* the transaction group number is less than the current, open txg.
* If you add a new test, please do this if applicable.
*
+ * (7) Threads are created with a reduced stack size, for sanity checking.
+ * Therefore, it's important not to allocate huge buffers on the stack.
+ *
* When run with no arguments, ztest runs for about five minutes and
* produces no output if successful. To get a little bit of information,
* specify -V. To get more information, specify -VV, and so on.
typedef struct rll {
void *rll_writer;
int rll_readers;
- mutex_t rll_lock;
- cond_t rll_cv;
+ kmutex_t rll_lock;
+ kcondvar_t rll_cv;
} rll_t;
typedef struct rl {
uint64_t zd_seq;
ztest_od_t *zd_od; /* debugging aid */
char zd_name[MAXNAMELEN];
- mutex_t zd_dirobj_lock;
+ kmutex_t zd_dirobj_lock;
rll_t zd_object_lock[ZTEST_OBJECT_LOCKS];
rll_t zd_range_lock[ZTEST_RANGE_LOCKS];
} ztest_ds_t;
* The callbacks are ordered by txg number.
*/
typedef struct ztest_cb_list {
- mutex_t zcl_callbacks_lock;
- list_t zcl_callbacks;
+ kmutex_t zcl_callbacks_lock;
+ list_t zcl_callbacks;
} ztest_cb_list_t;
/*
uint64_t zs_vdev_aux;
uint64_t zs_alloc;
uint64_t zs_space;
- mutex_t zs_vdev_lock;
- rwlock_t zs_name_lock;
+ kmutex_t zs_vdev_lock;
+ krwlock_t zs_name_lock;
ztest_info_t zs_info[ZTEST_FUNCS];
uint64_t zs_splits;
uint64_t zs_mirrors;
/* Global commit callback list */
static ztest_cb_list_t zcl;
+/* Commit cb delay */
+static uint64_t zc_min_txg_delay = UINT64_MAX;
+static int zc_cb_counter = 0;
+
+/*
+ * Minimum number of commit callbacks that need to be registered for us to check
+ * whether the minimum txg delay is acceptable.
+ */
+#define ZTEST_COMMIT_CB_MIN_REG 100
+
+/*
+ * If a number of txgs equal to this threshold have been created after a commit
+ * callback has been registered but not called, then we assume there is an
+ * implementation bug.
+ */
+#define ZTEST_COMMIT_CB_THRESH (TXG_CONCURRENT_STATES + 1000)
extern uint64_t metaslab_gang_bang;
extern uint64_t metaslab_df_alloc_threshold;
* debugging facilities.
*/
const char *
-_umem_debug_init()
+_umem_debug_init(void)
{
return ("default,verbose"); /* $UMEM_DEBUG setting */
}
{
va_list args;
int save_errno = errno;
- char buf[FATAL_MSG_SZ];
+ char *buf;
(void) fflush(stdout);
+ buf = umem_alloc(FATAL_MSG_SZ, UMEM_NOFAIL);
va_start(args, message);
(void) sprintf(buf, "ztest: ");
static nvlist_t *
make_vdev_file(char *path, char *aux, size_t size, uint64_t ashift)
{
- char pathbuf[MAXPATHLEN];
+ char *pathbuf;
uint64_t vdev;
nvlist_t *file;
+ pathbuf = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
+
if (ashift == 0)
ashift = ztest_get_ashift();
VERIFY(nvlist_add_string(file, ZPOOL_CONFIG_TYPE, VDEV_TYPE_FILE) == 0);
VERIFY(nvlist_add_string(file, ZPOOL_CONFIG_PATH, path) == 0);
VERIFY(nvlist_add_uint64(file, ZPOOL_CONFIG_ASHIFT, ashift) == 0);
+ umem_free(pathbuf, MAXPATHLEN);
return (file);
}
{
const char *propname = zfs_prop_to_name(prop);
const char *valname;
- char setpoint[MAXPATHLEN];
+ char *setpoint;
uint64_t curval;
int error;
}
ASSERT3U(error, ==, 0);
+ setpoint = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
VERIFY3U(dsl_prop_get(osname, propname, sizeof (curval),
1, &curval, setpoint), ==, 0);
(void) printf("%s %s = %s at '%s'\n",
osname, propname, valname, setpoint);
}
+ umem_free(setpoint, MAXPATHLEN);
return (error);
}
{
rll->rll_writer = NULL;
rll->rll_readers = 0;
- VERIFY(_mutex_init(&rll->rll_lock, USYNC_THREAD, NULL) == 0);
- VERIFY(cond_init(&rll->rll_cv, USYNC_THREAD, NULL) == 0);
+ mutex_init(&rll->rll_lock, NULL, MUTEX_DEFAULT, NULL);
+ cv_init(&rll->rll_cv, NULL, CV_DEFAULT, NULL);
}
static void
{
ASSERT(rll->rll_writer == NULL);
ASSERT(rll->rll_readers == 0);
- VERIFY(_mutex_destroy(&rll->rll_lock) == 0);
- VERIFY(cond_destroy(&rll->rll_cv) == 0);
+ mutex_destroy(&rll->rll_lock);
+ cv_destroy(&rll->rll_cv);
}
static void
ztest_rll_lock(rll_t *rll, rl_type_t type)
{
- VERIFY(mutex_lock(&rll->rll_lock) == 0);
+ mutex_enter(&rll->rll_lock);
if (type == RL_READER) {
while (rll->rll_writer != NULL)
- (void) cond_wait(&rll->rll_cv, &rll->rll_lock);
+ (void) cv_wait(&rll->rll_cv, &rll->rll_lock);
rll->rll_readers++;
} else {
while (rll->rll_writer != NULL || rll->rll_readers)
- (void) cond_wait(&rll->rll_cv, &rll->rll_lock);
+ (void) cv_wait(&rll->rll_cv, &rll->rll_lock);
rll->rll_writer = curthread;
}
- VERIFY(mutex_unlock(&rll->rll_lock) == 0);
+ mutex_exit(&rll->rll_lock);
}
static void
ztest_rll_unlock(rll_t *rll)
{
- VERIFY(mutex_lock(&rll->rll_lock) == 0);
+ mutex_enter(&rll->rll_lock);
if (rll->rll_writer) {
ASSERT(rll->rll_readers == 0);
}
if (rll->rll_writer == NULL && rll->rll_readers == 0)
- VERIFY(cond_broadcast(&rll->rll_cv) == 0);
+ cv_broadcast(&rll->rll_cv);
- VERIFY(mutex_unlock(&rll->rll_lock) == 0);
+ mutex_exit(&rll->rll_lock);
}
static void
dmu_objset_name(os, zd->zd_name);
int l;
- VERIFY(_mutex_init(&zd->zd_dirobj_lock, USYNC_THREAD, NULL) == 0);
+ mutex_init(&zd->zd_dirobj_lock, NULL, MUTEX_DEFAULT, NULL);
for (l = 0; l < ZTEST_OBJECT_LOCKS; l++)
ztest_rll_init(&zd->zd_object_lock[l]);
{
int l;
- VERIFY(_mutex_destroy(&zd->zd_dirobj_lock) == 0);
+ mutex_destroy(&zd->zd_dirobj_lock);
for (l = 0; l < ZTEST_OBJECT_LOCKS; l++)
ztest_rll_destroy(&zd->zd_object_lock[l]);
*ip++ = value;
}
+#ifndef NDEBUG
static boolean_t
ztest_pattern_match(void *buf, uint64_t size, uint64_t value)
{
return (diff == 0);
}
+#endif
static void
ztest_bt_generate(ztest_block_tag_t *bt, objset_t *os, uint64_t object,
}
zil_replay_func_t *ztest_replay_vector[TX_MAX_TYPE] = {
- NULL, /* 0 no such transaction type */
- ztest_replay_create, /* TX_CREATE */
- NULL, /* TX_MKDIR */
- NULL, /* TX_MKXATTR */
- NULL, /* TX_SYMLINK */
- ztest_replay_remove, /* TX_REMOVE */
- NULL, /* TX_RMDIR */
- NULL, /* TX_LINK */
- NULL, /* TX_RENAME */
- ztest_replay_write, /* TX_WRITE */
- ztest_replay_truncate, /* TX_TRUNCATE */
- ztest_replay_setattr, /* TX_SETATTR */
- NULL, /* TX_ACL */
- NULL, /* TX_CREATE_ACL */
- NULL, /* TX_CREATE_ATTR */
- NULL, /* TX_CREATE_ACL_ATTR */
- NULL, /* TX_MKDIR_ACL */
- NULL, /* TX_MKDIR_ATTR */
- NULL, /* TX_MKDIR_ACL_ATTR */
- NULL, /* TX_WRITE2 */
+ NULL, /* 0 no such transaction type */
+ (zil_replay_func_t *)ztest_replay_create, /* TX_CREATE */
+ NULL, /* TX_MKDIR */
+ NULL, /* TX_MKXATTR */
+ NULL, /* TX_SYMLINK */
+ (zil_replay_func_t *)ztest_replay_remove, /* TX_REMOVE */
+ NULL, /* TX_RMDIR */
+ NULL, /* TX_LINK */
+ NULL, /* TX_RENAME */
+ (zil_replay_func_t *)ztest_replay_write, /* TX_WRITE */
+ (zil_replay_func_t *)ztest_replay_truncate, /* TX_TRUNCATE */
+ (zil_replay_func_t *)ztest_replay_setattr, /* TX_SETATTR */
+ NULL, /* TX_ACL */
+ NULL, /* TX_CREATE_ACL */
+ NULL, /* TX_CREATE_ATTR */
+ NULL, /* TX_CREATE_ACL_ATTR */
+ NULL, /* TX_MKDIR_ACL */
+ NULL, /* TX_MKDIR_ATTR */
+ NULL, /* TX_MKDIR_ACL_ATTR */
+ NULL, /* TX_WRITE2 */
};
/*
int error;
int i;
- ASSERT(_mutex_held(&zd->zd_dirobj_lock));
+ ASSERT(mutex_held(&zd->zd_dirobj_lock));
for (i = 0; i < count; i++, od++) {
od->od_object = 0;
int missing = 0;
int i;
- ASSERT(_mutex_held(&zd->zd_dirobj_lock));
+ ASSERT(mutex_held(&zd->zd_dirobj_lock));
for (i = 0; i < count; i++, od++) {
if (missing) {
int error;
int i;
- ASSERT(_mutex_held(&zd->zd_dirobj_lock));
+ ASSERT(mutex_held(&zd->zd_dirobj_lock));
od += count - 1;
case ZTEST_IO_SETATTR:
(void) ztest_setattr(zd, object);
break;
+ default:
+ break;
}
umem_free(data, blocksize);
int count = size / sizeof (*od);
int rv = 0;
- VERIFY(mutex_lock(&zd->zd_dirobj_lock) == 0);
+ mutex_enter(&zd->zd_dirobj_lock);
if ((ztest_lookup(zd, od, count) != 0 || remove) &&
(ztest_remove(zd, od, count) != 0 ||
ztest_create(zd, od, count) != 0))
rv = -1;
zd->zd_od = od;
- VERIFY(mutex_unlock(&zd->zd_dirobj_lock) == 0);
+ mutex_exit(&zd->zd_dirobj_lock);
return (rv);
}
* Attempt to create an existing pool. It shouldn't matter
* what's in the nvroot; we should fail with EEXIST.
*/
- (void) rw_rdlock(&zs->zs_name_lock);
+ (void) rw_enter(&zs->zs_name_lock, RW_READER);
nvroot = make_vdev_root("/dev/bogus", NULL, 0, 0, 0, 0, 0, 1);
VERIFY3U(EEXIST, ==, spa_create(zs->zs_pool, nvroot, NULL, NULL, NULL));
nvlist_free(nvroot);
VERIFY3U(EBUSY, ==, spa_destroy(zs->zs_pool));
spa_close(spa, FTAG);
- (void) rw_unlock(&zs->zs_name_lock);
+ (void) rw_exit(&zs->zs_name_lock);
}
static vdev_t *
nvlist_t *nvroot;
int error;
- VERIFY(mutex_lock(&zs->zs_vdev_lock) == 0);
+ mutex_enter(&zs->zs_vdev_lock);
leaves = MAX(zs->zs_mirrors + zs->zs_splits, 1) * zopt_raidz;
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
* dmu_objset_destroy() to fail with EBUSY thus
* leaving the dataset in an inconsistent state.
*/
- VERIFY(rw_wrlock(&ztest_shared->zs_name_lock) == 0);
+ rw_enter(&ztest_shared->zs_name_lock, RW_WRITER);
error = spa_vdev_remove(spa, guid, B_FALSE);
- VERIFY(rw_unlock(&ztest_shared->zs_name_lock) == 0);
+ rw_exit(&ztest_shared->zs_name_lock);
if (error && error != EEXIST)
fatal(0, "spa_vdev_remove() = %d", error);
fatal(0, "spa_vdev_add() = %d", error);
}
- VERIFY(mutex_unlock(&ztest_shared->zs_vdev_lock) == 0);
+ mutex_exit(&ztest_shared->zs_vdev_lock);
}
/*
vdev_t *rvd = spa->spa_root_vdev;
spa_aux_vdev_t *sav;
char *aux;
+ char *path;
uint64_t guid = 0;
int error;
+ path = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
+
if (ztest_random(2) == 0) {
sav = &spa->spa_spares;
aux = ZPOOL_CONFIG_SPARES;
aux = ZPOOL_CONFIG_L2CACHE;
}
- VERIFY(mutex_lock(&zs->zs_vdev_lock) == 0);
+ mutex_enter(&zs->zs_vdev_lock);
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
*/
zs->zs_vdev_aux = 0;
for (;;) {
- char path[MAXPATHLEN];
int c;
(void) sprintf(path, ztest_aux_template, zopt_dir,
zopt_pool, aux, zs->zs_vdev_aux);
fatal(0, "spa_vdev_remove(%llu) = %d", guid, error);
}
- VERIFY(mutex_unlock(&zs->zs_vdev_lock) == 0);
+ mutex_exit(&zs->zs_vdev_lock);
+
+ umem_free(path, MAXPATHLEN);
}
/*
uint_t c, children, schildren = 0, lastlogid = 0;
int error = 0;
- VERIFY(mutex_lock(&zs->zs_vdev_lock) == 0);
+ mutex_enter(&zs->zs_vdev_lock);
/* ensure we have a useable config; mirrors of raidz aren't supported */
if (zs->zs_mirrors < 3 || zopt_raidz > 1) {
- VERIFY(mutex_unlock(&zs->zs_vdev_lock) == 0);
+ mutex_exit(&zs->zs_vdev_lock);
return;
}
spa_config_exit(spa, SCL_VDEV, FTAG);
- (void) rw_wrlock(&zs->zs_name_lock);
+ (void) rw_enter(&zs->zs_name_lock, RW_WRITER);
error = spa_vdev_split_mirror(spa, "splitp", config, NULL, B_FALSE);
- (void) rw_unlock(&zs->zs_name_lock);
+ (void) rw_exit(&zs->zs_name_lock);
nvlist_free(config);
++zs->zs_splits;
--zs->zs_mirrors;
}
- VERIFY(mutex_unlock(&zs->zs_vdev_lock) == 0);
+ mutex_exit(&zs->zs_vdev_lock);
}
uint64_t ashift = ztest_get_ashift();
uint64_t oldguid, pguid;
size_t oldsize, newsize;
- char oldpath[MAXPATHLEN], newpath[MAXPATHLEN];
+ char *oldpath, *newpath;
int replacing;
int oldvd_has_siblings = B_FALSE;
int newvd_is_spare = B_FALSE;
int oldvd_is_log;
int error, expected_error;
- VERIFY(mutex_lock(&zs->zs_vdev_lock) == 0);
+ oldpath = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
+ newpath = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
+
+ mutex_enter(&zs->zs_vdev_lock);
leaves = MAX(zs->zs_mirrors, 1) * zopt_raidz;
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
if (error != 0 && error != ENODEV && error != EBUSY &&
error != ENOTSUP)
fatal(0, "detach (%s) returned %d", oldpath, error);
- VERIFY(mutex_unlock(&zs->zs_vdev_lock) == 0);
- return;
+ goto out;
}
/*
oldpath, (longlong_t)oldsize, newpath,
(longlong_t)newsize, replacing, error, expected_error);
}
+out:
+ mutex_exit(&zs->zs_vdev_lock);
- VERIFY(mutex_unlock(&zs->zs_vdev_lock) == 0);
+ umem_free(oldpath, MAXPATHLEN);
+ umem_free(newpath, MAXPATHLEN);
}
/*
vdev_t *
grow_vdev(vdev_t *vd, void *arg)
{
- spa_t *spa = vd->vdev_spa;
+ ASSERTV(spa_t *spa = vd->vdev_spa);
size_t *newsize = arg;
size_t fsize;
int fd;
uint64_t top;
uint64_t old_class_space, new_class_space, old_ms_count, new_ms_count;
- VERIFY(mutex_lock(&zs->zs_vdev_lock) == 0);
+ mutex_enter(&zs->zs_vdev_lock);
spa_config_enter(spa, SCL_STATE, spa, RW_READER);
top = ztest_random_vdev_top(spa, B_TRUE);
if (tvd->vdev_state != VDEV_STATE_HEALTHY ||
psize == 0 || psize >= 4 * zopt_vdev_size) {
spa_config_exit(spa, SCL_STATE, spa);
- VERIFY(mutex_unlock(&zs->zs_vdev_lock) == 0);
+ mutex_exit(&zs->zs_vdev_lock);
return;
}
ASSERT(psize > 0);
"the vdev configuration changed.\n");
}
spa_config_exit(spa, SCL_STATE, spa);
- VERIFY(mutex_unlock(&zs->zs_vdev_lock) == 0);
+ mutex_exit(&zs->zs_vdev_lock);
return;
}
"intervening vdev offline or remove.\n");
}
spa_config_exit(spa, SCL_STATE, spa);
- VERIFY(mutex_unlock(&zs->zs_vdev_lock) == 0);
+ mutex_exit(&zs->zs_vdev_lock);
return;
}
}
spa_config_exit(spa, SCL_STATE, spa);
- VERIFY(mutex_unlock(&zs->zs_vdev_lock) == 0);
+ mutex_exit(&zs->zs_vdev_lock);
}
/*
ztest_dmu_objset_create_destroy(ztest_ds_t *zd, uint64_t id)
{
ztest_shared_t *zs = ztest_shared;
- ztest_ds_t zdtmp;
+ ztest_ds_t *zdtmp;
int iters;
int error;
objset_t *os, *os2;
- char name[MAXNAMELEN];
+ char *name;
zilog_t *zilog;
int i;
- (void) rw_rdlock(&zs->zs_name_lock);
+ zdtmp = umem_alloc(sizeof (ztest_ds_t), UMEM_NOFAIL);
+ name = umem_alloc(MAXNAMELEN, UMEM_NOFAIL);
+
+ (void) rw_enter(&zs->zs_name_lock, RW_READER);
(void) snprintf(name, MAXNAMELEN, "%s/temp_%llu",
zs->zs_pool, (u_longlong_t)id);
*/
if (ztest_random(2) == 0 &&
dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, FTAG, &os) == 0) {
- ztest_zd_init(&zdtmp, os);
- zil_replay(os, &zdtmp, ztest_replay_vector);
- ztest_zd_fini(&zdtmp);
+ ztest_zd_init(zdtmp, os);
+ zil_replay(os, zdtmp, ztest_replay_vector);
+ ztest_zd_fini(zdtmp);
dmu_objset_disown(os, FTAG);
}
if (error) {
if (error == ENOSPC) {
ztest_record_enospc(FTAG);
- (void) rw_unlock(&zs->zs_name_lock);
- return;
+ goto out;
}
fatal(0, "dmu_objset_create(%s) = %d", name, error);
}
VERIFY3U(0, ==,
dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, FTAG, &os));
- ztest_zd_init(&zdtmp, os);
+ ztest_zd_init(zdtmp, os);
/*
* Open the intent log for it.
*/
iters = ztest_random(5);
for (i = 0; i < iters; i++) {
- ztest_dmu_object_alloc_free(&zdtmp, id);
+ ztest_dmu_object_alloc_free(zdtmp, id);
if (ztest_random(iters) == 0)
(void) ztest_snapshot_create(name, i);
}
zil_close(zilog);
dmu_objset_disown(os, FTAG);
- ztest_zd_fini(&zdtmp);
+ ztest_zd_fini(zdtmp);
+out:
+ (void) rw_exit(&zs->zs_name_lock);
- (void) rw_unlock(&zs->zs_name_lock);
+ umem_free(name, MAXNAMELEN);
+ umem_free(zdtmp, sizeof (ztest_ds_t));
}
/*
{
ztest_shared_t *zs = ztest_shared;
- (void) rw_rdlock(&zs->zs_name_lock);
+ (void) rw_enter(&zs->zs_name_lock, RW_READER);
(void) ztest_snapshot_destroy(zd->zd_name, id);
(void) ztest_snapshot_create(zd->zd_name, id);
- (void) rw_unlock(&zs->zs_name_lock);
+ (void) rw_exit(&zs->zs_name_lock);
}
/*
void
ztest_dsl_dataset_cleanup(char *osname, uint64_t id)
{
- char snap1name[MAXNAMELEN];
- char clone1name[MAXNAMELEN];
- char snap2name[MAXNAMELEN];
- char clone2name[MAXNAMELEN];
- char snap3name[MAXNAMELEN];
+ char *snap1name;
+ char *clone1name;
+ char *snap2name;
+ char *clone2name;
+ char *snap3name;
int error;
+ snap1name = umem_alloc(MAXNAMELEN, UMEM_NOFAIL);
+ clone1name = umem_alloc(MAXNAMELEN, UMEM_NOFAIL);
+ snap2name = umem_alloc(MAXNAMELEN, UMEM_NOFAIL);
+ clone2name = umem_alloc(MAXNAMELEN, UMEM_NOFAIL);
+ snap3name = umem_alloc(MAXNAMELEN, UMEM_NOFAIL);
+
(void) snprintf(snap1name, MAXNAMELEN, "%s@s1_%llu",
osname, (u_longlong_t)id);
(void) snprintf(clone1name, MAXNAMELEN, "%s/c1_%llu",
error = dmu_objset_destroy(snap1name, B_FALSE);
if (error && error != ENOENT)
fatal(0, "dmu_objset_destroy(%s) = %d", snap1name, error);
+
+ umem_free(snap1name, MAXNAMELEN);
+ umem_free(clone1name, MAXNAMELEN);
+ umem_free(snap2name, MAXNAMELEN);
+ umem_free(clone2name, MAXNAMELEN);
+ umem_free(snap3name, MAXNAMELEN);
}
/*
ztest_shared_t *zs = ztest_shared;
objset_t *clone;
dsl_dataset_t *ds;
- char snap1name[MAXNAMELEN];
- char clone1name[MAXNAMELEN];
- char snap2name[MAXNAMELEN];
- char clone2name[MAXNAMELEN];
- char snap3name[MAXNAMELEN];
+ char *snap1name;
+ char *clone1name;
+ char *snap2name;
+ char *clone2name;
+ char *snap3name;
char *osname = zd->zd_name;
int error;
- (void) rw_rdlock(&zs->zs_name_lock);
+ snap1name = umem_alloc(MAXNAMELEN, UMEM_NOFAIL);
+ clone1name = umem_alloc(MAXNAMELEN, UMEM_NOFAIL);
+ snap2name = umem_alloc(MAXNAMELEN, UMEM_NOFAIL);
+ clone2name = umem_alloc(MAXNAMELEN, UMEM_NOFAIL);
+ snap3name = umem_alloc(MAXNAMELEN, UMEM_NOFAIL);
+
+ (void) rw_enter(&zs->zs_name_lock, RW_READER);
ztest_dsl_dataset_cleanup(osname, id);
out:
ztest_dsl_dataset_cleanup(osname, id);
- (void) rw_unlock(&zs->zs_name_lock);
+ (void) rw_exit(&zs->zs_name_lock);
+
+ umem_free(snap1name, MAXNAMELEN);
+ umem_free(clone1name, MAXNAMELEN);
+ umem_free(snap2name, MAXNAMELEN);
+ umem_free(clone2name, MAXNAMELEN);
+ umem_free(snap3name, MAXNAMELEN);
}
+#undef OD_ARRAY_SIZE
+#define OD_ARRAY_SIZE 4
+
/*
* Verify that dmu_object_{alloc,free} work as expected.
*/
void
ztest_dmu_object_alloc_free(ztest_ds_t *zd, uint64_t id)
{
- ztest_od_t od[4];
- int batchsize = sizeof (od) / sizeof (od[0]);
+ ztest_od_t *od;
+ int batchsize;
+ int size;
int b;
+ size = sizeof(ztest_od_t) * OD_ARRAY_SIZE;
+ od = umem_alloc(size, UMEM_NOFAIL);
+ batchsize = OD_ARRAY_SIZE;
+
for (b = 0; b < batchsize; b++)
- ztest_od_init(&od[b], id, FTAG, b, DMU_OT_UINT64_OTHER, 0, 0);
+ ztest_od_init(od + b, id, FTAG, b, DMU_OT_UINT64_OTHER, 0, 0);
/*
* Destroy the previous batch of objects, create a new batch,
* and do some I/O on the new objects.
*/
- if (ztest_object_init(zd, od, sizeof (od), B_TRUE) != 0)
+ if (ztest_object_init(zd, od, size, B_TRUE) != 0)
return;
while (ztest_random(4 * batchsize) != 0)
ztest_io(zd, od[ztest_random(batchsize)].od_object,
ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT);
+
+ umem_free(od, size);
}
+#undef OD_ARRAY_SIZE
+#define OD_ARRAY_SIZE 2
+
/*
* Verify that dmu_{read,write} work as expected.
*/
void
ztest_dmu_read_write(ztest_ds_t *zd, uint64_t id)
{
+ int size;
+ ztest_od_t *od;
+
objset_t *os = zd->zd_os;
- ztest_od_t od[2];
+ size = sizeof(ztest_od_t) * OD_ARRAY_SIZE;
+ od = umem_alloc(size, UMEM_NOFAIL);
dmu_tx_t *tx;
int i, freeit, error;
uint64_t n, s, txg;
/*
* Read the directory info. If it's the first time, set things up.
*/
- ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, 0, chunksize);
- ztest_od_init(&od[1], id, FTAG, 1, DMU_OT_UINT64_OTHER, 0, chunksize);
+ ztest_od_init(od, id, FTAG, 0, DMU_OT_UINT64_OTHER, 0, chunksize);
+ ztest_od_init(od + 1, id, FTAG, 1, DMU_OT_UINT64_OTHER, 0, chunksize);
- if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0)
+ if (ztest_object_init(zd, od, size, B_FALSE) != 0) {
+ umem_free(od, size);
return;
+ }
bigobj = od[0].od_object;
packobj = od[1].od_object;
if (txg == 0) {
umem_free(packbuf, packsize);
umem_free(bigbuf, bigsize);
+ umem_free(od, size);
return;
}
umem_free(packbuf, packsize);
umem_free(bigbuf, bigsize);
+ umem_free(od, size);
}
void
}
}
+#undef OD_ARRAY_SIZE
+#define OD_ARRAY_SIZE 2
+
void
ztest_dmu_read_write_zcopy(ztest_ds_t *zd, uint64_t id)
{
objset_t *os = zd->zd_os;
- ztest_od_t od[2];
+ ztest_od_t *od;
dmu_tx_t *tx;
uint64_t i;
int error;
+ int size;
uint64_t n, s, txg;
bufwad_t *packbuf, *bigbuf;
uint64_t packobj, packoff, packsize, bigobj, bigoff, bigsize;
arc_buf_t **bigbuf_arcbufs;
dmu_object_info_t doi;
+ size = sizeof(ztest_od_t) * OD_ARRAY_SIZE;
+ od = umem_alloc(size, UMEM_NOFAIL);
+
/*
* This test uses two objects, packobj and bigobj, that are always
* updated together (i.e. in the same tx) so that their contents are
/*
* Read the directory info. If it's the first time, set things up.
*/
- ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0);
- ztest_od_init(&od[1], id, FTAG, 1, DMU_OT_UINT64_OTHER, 0, chunksize);
+ ztest_od_init(od, id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0);
+ ztest_od_init(od + 1, id, FTAG, 1, DMU_OT_UINT64_OTHER, 0, chunksize);
- if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0)
+
+ if (ztest_object_init(zd, od, size, B_FALSE) != 0) {
+ umem_free(od, size);
return;
+ }
bigobj = od[0].od_object;
packobj = od[1].od_object;
}
}
umem_free(bigbuf_arcbufs, 2 * s * sizeof (arc_buf_t *));
+ umem_free(od, size);
dmu_buf_rele(bonus_db, FTAG);
return;
}
umem_free(packbuf, packsize);
umem_free(bigbuf, bigsize);
umem_free(bigbuf_arcbufs, 2 * s * sizeof (arc_buf_t *));
+ umem_free(od, size);
}
/* ARGSUSED */
void
ztest_dmu_write_parallel(ztest_ds_t *zd, uint64_t id)
{
- ztest_od_t od[1];
+ ztest_od_t *od;
+
+ od = umem_alloc(sizeof(ztest_od_t), UMEM_NOFAIL);
uint64_t offset = (1ULL << (ztest_random(20) + 43)) +
(ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT);
* to verify that parallel writes to an object -- even to the
* same blocks within the object -- doesn't cause any trouble.
*/
- ztest_od_init(&od[0], ID_PARALLEL, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0);
+ ztest_od_init(od, ID_PARALLEL, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0);
- if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0)
+ if (ztest_object_init(zd, od, sizeof (ztest_od_t), B_FALSE) != 0)
return;
while (ztest_random(10) != 0)
- ztest_io(zd, od[0].od_object, offset);
+ ztest_io(zd, od->od_object, offset);
+
+ umem_free(od, sizeof(ztest_od_t));
}
void
ztest_dmu_prealloc(ztest_ds_t *zd, uint64_t id)
{
- ztest_od_t od[1];
+ ztest_od_t *od;
uint64_t offset = (1ULL << (ztest_random(4) + SPA_MAXBLOCKSHIFT)) +
(ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT);
uint64_t count = ztest_random(20) + 1;
uint64_t blocksize = ztest_random_blocksize();
void *data;
- ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0);
+ od = umem_alloc(sizeof(ztest_od_t), UMEM_NOFAIL);
- if (ztest_object_init(zd, od, sizeof (od), !ztest_random(2)) != 0)
+ ztest_od_init(od, id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0);
+
+ if (ztest_object_init(zd, od, sizeof (ztest_od_t), !ztest_random(2)) != 0) {
+ umem_free(od, sizeof(ztest_od_t));
return;
+ }
- if (ztest_truncate(zd, od[0].od_object, offset, count * blocksize) != 0)
+ if (ztest_truncate(zd, od->od_object, offset, count * blocksize) != 0) {
+ umem_free(od, sizeof(ztest_od_t));
return;
+ }
- ztest_prealloc(zd, od[0].od_object, offset, count * blocksize);
+ ztest_prealloc(zd, od->od_object, offset, count * blocksize);
data = umem_zalloc(blocksize, UMEM_NOFAIL);
while (ztest_random(count) != 0) {
uint64_t randoff = offset + (ztest_random(count) * blocksize);
- if (ztest_write(zd, od[0].od_object, randoff, blocksize,
+ if (ztest_write(zd, od->od_object, randoff, blocksize,
data) != 0)
break;
while (ztest_random(4) != 0)
- ztest_io(zd, od[0].od_object, randoff);
+ ztest_io(zd, od->od_object, randoff);
}
umem_free(data, blocksize);
+ umem_free(od, sizeof(ztest_od_t));
}
/*
ztest_zap(ztest_ds_t *zd, uint64_t id)
{
objset_t *os = zd->zd_os;
- ztest_od_t od[1];
+ ztest_od_t *od;
uint64_t object;
uint64_t txg, last_txg;
uint64_t value[ZTEST_ZAP_MAX_INTS];
int error;
char *hc[2] = { "s.acl.h", ".s.open.h.hyLZlg" };
- ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_ZAP_OTHER, 0, 0);
+ od = umem_alloc(sizeof(ztest_od_t), UMEM_NOFAIL);
+ ztest_od_init(od, id, FTAG, 0, DMU_OT_ZAP_OTHER, 0, 0);
- if (ztest_object_init(zd, od, sizeof (od), !ztest_random(2)) != 0)
- return;
+ if (ztest_object_init(zd, od, sizeof (ztest_od_t),
+ !ztest_random(2)) != 0)
+ goto out;
- object = od[0].od_object;
+ object = od->od_object;
/*
* Generate a known hash collision, and verify that
dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
if (txg == 0)
- return;
+ goto out;
for (i = 0; i < 2; i++) {
value[i] = i;
VERIFY3U(0, ==, zap_add(os, object, hc[i], sizeof (uint64_t),
dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
if (txg == 0)
- return;
+ goto out;
if (last_txg > txg)
fatal(0, "zap future leak: old %llu new %llu", last_txg, txg);
error = zap_length(os, object, txgname, &zl_intsize, &zl_ints);
if (error == ENOENT)
- return;
+ goto out;
ASSERT3U(error, ==, 0);
dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
if (txg == 0)
- return;
+ goto out;
VERIFY3U(0, ==, zap_remove(os, object, txgname, tx));
VERIFY3U(0, ==, zap_remove(os, object, propname, tx));
dmu_tx_commit(tx);
+out:
+ umem_free(od, sizeof(ztest_od_t));
}
/*
ztest_fzap(ztest_ds_t *zd, uint64_t id)
{
objset_t *os = zd->zd_os;
- ztest_od_t od[1];
+ ztest_od_t *od;
uint64_t object, txg;
int i;
- ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_ZAP_OTHER, 0, 0);
-
- if (ztest_object_init(zd, od, sizeof (od), !ztest_random(2)) != 0)
- return;
+ od = umem_alloc(sizeof(ztest_od_t), UMEM_NOFAIL);
+ ztest_od_init(od, id, FTAG, 0, DMU_OT_ZAP_OTHER, 0, 0);
- object = od[0].od_object;
+ if (ztest_object_init(zd, od, sizeof (ztest_od_t),
+ !ztest_random(2)) != 0)
+ goto out;
+ object = od->od_object;
/*
* Add entries to this ZAP and make sure it spills over
dmu_tx_hold_zap(tx, object, B_TRUE, name);
txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
if (txg == 0)
- return;
+ goto out;
error = zap_add(os, object, name, sizeof (uint64_t), 1,
&value, tx);
ASSERT(error == 0 || error == EEXIST);
dmu_tx_commit(tx);
}
+out:
+ umem_free(od, sizeof(ztest_od_t));
}
/* ARGSUSED */
ztest_zap_parallel(ztest_ds_t *zd, uint64_t id)
{
objset_t *os = zd->zd_os;
- ztest_od_t od[1];
+ ztest_od_t *od;
uint64_t txg, object, count, wsize, wc, zl_wsize, zl_wc;
dmu_tx_t *tx;
int i, namelen, error;
char name[20], string_value[20];
void *data;
- ztest_od_init(&od[0], ID_PARALLEL, FTAG, micro, DMU_OT_ZAP_OTHER, 0, 0);
+ od = umem_alloc(sizeof(ztest_od_t), UMEM_NOFAIL);
+ ztest_od_init(od, ID_PARALLEL, FTAG, micro, DMU_OT_ZAP_OTHER, 0, 0);
- if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0)
+ if (ztest_object_init(zd, od, sizeof (ztest_od_t), B_FALSE) != 0) {
+ umem_free(od, sizeof(ztest_od_t));
return;
+ }
- object = od[0].od_object;
+ object = od->od_object;
/*
* Generate a random name of the form 'xxx.....' where each
if (tx != NULL)
dmu_tx_commit(tx);
+
+ umem_free(od, sizeof(ztest_od_t));
}
/*
return;
}
- /* Was this callback added to the global callback list? */
- if (!data->zcd_added)
- goto out;
-
+ ASSERT(data->zcd_added);
ASSERT3U(data->zcd_txg, !=, 0);
+ (void) mutex_enter(&zcl.zcl_callbacks_lock);
+
+ /* See if this cb was called more quickly */
+ if ((synced_txg - data->zcd_txg) < zc_min_txg_delay)
+ zc_min_txg_delay = synced_txg - data->zcd_txg;
+
/* Remove our callback from the list */
- (void) mutex_lock(&zcl.zcl_callbacks_lock);
list_remove(&zcl.zcl_callbacks, data);
- (void) mutex_unlock(&zcl.zcl_callbacks_lock);
-out:
+ (void) mutex_exit(&zcl.zcl_callbacks_lock);
+
umem_free(data, sizeof (ztest_cb_data_t));
}
cb_data->zcd_txg = txg;
cb_data->zcd_spa = dmu_objset_spa(os);
+ list_link_init(&cb_data->zcd_node);
return (cb_data);
}
/*
- * If a number of txgs equal to this threshold have been created after a commit
- * callback has been registered but not called, then we assume there is an
- * implementation bug.
- */
-#define ZTEST_COMMIT_CALLBACK_THRESH (TXG_CONCURRENT_STATES + 2)
-
-/*
* Commit callback test.
*/
void
ztest_dmu_commit_callbacks(ztest_ds_t *zd, uint64_t id)
{
objset_t *os = zd->zd_os;
- ztest_od_t od[1];
+ ztest_od_t *od;
dmu_tx_t *tx;
ztest_cb_data_t *cb_data[3], *tmp_cb;
uint64_t old_txg, txg;
- int i, error;
+ int i, error = 0;
- ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0);
+ od = umem_alloc(sizeof(ztest_od_t), UMEM_NOFAIL);
+ ztest_od_init(od, id, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0);
- if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0)
+ if (ztest_object_init(zd, od, sizeof (ztest_od_t), B_FALSE) != 0) {
+ umem_free(od, sizeof(ztest_od_t));
return;
+ }
tx = dmu_tx_create(os);
cb_data[0] = ztest_create_cb_data(os, 0);
dmu_tx_callback_register(tx, ztest_commit_callback, cb_data[0]);
- dmu_tx_hold_write(tx, od[0].od_object, 0, sizeof (uint64_t));
+ dmu_tx_hold_write(tx, od->od_object, 0, sizeof (uint64_t));
/* Every once in a while, abort the transaction on purpose */
if (ztest_random(100) == 0)
umem_free(cb_data[i], sizeof (ztest_cb_data_t));
}
+ umem_free(od, sizeof(ztest_od_t));
return;
}
/*
* Read existing data to make sure there isn't a future leak.
*/
- VERIFY(0 == dmu_read(os, od[0].od_object, 0, sizeof (uint64_t),
+ VERIFY(0 == dmu_read(os, od->od_object, 0, sizeof (uint64_t),
&old_txg, DMU_READ_PREFETCH));
if (old_txg > txg)
fatal(0, "future leak: got %" PRIu64 ", open txg is %" PRIu64,
old_txg, txg);
- dmu_write(os, od[0].od_object, 0, sizeof (uint64_t), &txg, tx);
+ dmu_write(os, od->od_object, 0, sizeof (uint64_t), &txg, tx);
- (void) mutex_lock(&zcl.zcl_callbacks_lock);
+ (void) mutex_enter(&zcl.zcl_callbacks_lock);
/*
* Since commit callbacks don't have any ordering requirement and since
*/
tmp_cb = list_head(&zcl.zcl_callbacks);
if (tmp_cb != NULL &&
- tmp_cb->zcd_txg > txg - ZTEST_COMMIT_CALLBACK_THRESH) {
+ tmp_cb->zcd_txg + ZTEST_COMMIT_CB_THRESH < txg) {
fatal(0, "Commit callback threshold exceeded, oldest txg: %"
PRIu64 ", open txg: %" PRIu64 "\n", tmp_cb->zcd_txg, txg);
}
tmp_cb = cb_data[i];
}
- (void) mutex_unlock(&zcl.zcl_callbacks_lock);
+ zc_cb_counter += 3;
+
+ (void) mutex_exit(&zcl.zcl_callbacks_lock);
dmu_tx_commit(tx);
+
+ umem_free(od, sizeof(ztest_od_t));
}
/* ARGSUSED */
ztest_shared_t *zs = ztest_shared;
int p;
- (void) rw_rdlock(&zs->zs_name_lock);
+ (void) rw_enter(&zs->zs_name_lock, RW_READER);
for (p = 0; p < sizeof (proplist) / sizeof (proplist[0]); p++)
(void) ztest_dsl_prop_set_uint64(zd->zd_name, proplist[p],
ztest_random_dsl_prop(proplist[p]), (int)ztest_random(2));
- (void) rw_unlock(&zs->zs_name_lock);
+ (void) rw_exit(&zs->zs_name_lock);
}
/* ARGSUSED */
ztest_shared_t *zs = ztest_shared;
nvlist_t *props = NULL;
- (void) rw_rdlock(&zs->zs_name_lock);
+ (void) rw_enter(&zs->zs_name_lock, RW_READER);
(void) ztest_spa_prop_set_uint64(zs, ZPOOL_PROP_DEDUPDITTO,
ZIO_DEDUPDITTO_MIN + ztest_random(ZIO_DEDUPDITTO_MIN));
nvlist_free(props);
- (void) rw_unlock(&zs->zs_name_lock);
+ (void) rw_exit(&zs->zs_name_lock);
}
/*
char tag[100];
char osname[MAXNAMELEN];
- (void) rw_rdlock(&ztest_shared->zs_name_lock);
+ (void) rw_enter(&ztest_shared->zs_name_lock, RW_READER);
dmu_objset_name(os, osname);
- (void) snprintf(snapname, 100, "sh1_%llu", id);
+ (void) snprintf(snapname, 100, "sh1_%llu", (u_longlong_t)id);
(void) snprintf(fullname, 100, "%s@%s", osname, snapname);
- (void) snprintf(clonename, 100, "%s/ch1_%llu", osname, id);
- (void) snprintf(tag, 100, "%tag_%llu", id);
+ (void) snprintf(clonename, 100, "%s/ch1_%llu",osname,(u_longlong_t)id);
+ (void) snprintf(tag, 100, "tag_%llu", (u_longlong_t)id);
/*
* Clean up from any previous run.
VERIFY(dmu_objset_hold(fullname, FTAG, &origin) == ENOENT);
out:
- (void) rw_unlock(&ztest_shared->zs_name_lock);
+ (void) rw_exit(&ztest_shared->zs_name_lock);
}
/*
uint64_t leaves;
uint64_t bad = 0x1990c0ffeedecadeull;
uint64_t top, leaf;
- char path0[MAXPATHLEN];
- char pathrand[MAXPATHLEN];
+ char *path0;
+ char *pathrand;
size_t fsize;
int bshift = SPA_MAXBLOCKSHIFT + 2; /* don't scrog all labels */
int iters = 1000;
uint64_t guid0 = 0;
boolean_t islog = B_FALSE;
- VERIFY(mutex_lock(&zs->zs_vdev_lock) == 0);
+ path0 = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
+ pathrand = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
+
+ mutex_enter(&zs->zs_vdev_lock);
maxfaults = MAXFAULTS();
leaves = MAX(zs->zs_mirrors, 1) * zopt_raidz;
mirror_save = zs->zs_mirrors;
- VERIFY(mutex_unlock(&zs->zs_vdev_lock) == 0);
+ mutex_exit(&zs->zs_vdev_lock);
ASSERT(leaves >= 1);
if (sav->sav_count == 0) {
spa_config_exit(spa, SCL_STATE, FTAG);
- return;
+ goto out;
}
vd0 = sav->sav_vdevs[ztest_random(sav->sav_count)];
guid0 = vd0->vdev_guid;
* leaving the dataset in an inconsistent state.
*/
if (islog)
- (void) rw_wrlock(&ztest_shared->zs_name_lock);
+ (void) rw_enter(&ztest_shared->zs_name_lock,
+ RW_WRITER);
VERIFY(vdev_offline(spa, guid0, flags) != EBUSY);
if (islog)
- (void) rw_unlock(&ztest_shared->zs_name_lock);
+ (void) rw_exit(&ztest_shared->zs_name_lock);
} else {
(void) vdev_online(spa, guid0, 0, NULL);
}
}
if (maxfaults == 0)
- return;
+ goto out;
/*
* We have at least single-fault tolerance, so inject data corruption.
fd = open(pathrand, O_RDWR);
if (fd == -1) /* we hit a gap in the device namespace */
- return;
+ goto out;
fsize = lseek(fd, 0, SEEK_END);
if (offset >= fsize)
continue;
- VERIFY(mutex_lock(&zs->zs_vdev_lock) == 0);
+ mutex_enter(&zs->zs_vdev_lock);
if (mirror_save != zs->zs_mirrors) {
- VERIFY(mutex_unlock(&zs->zs_vdev_lock) == 0);
+ mutex_exit(&zs->zs_vdev_lock);
(void) close(fd);
- return;
+ goto out;
}
if (pwrite(fd, &bad, sizeof (bad), offset) != sizeof (bad))
fatal(1, "can't inject bad word at 0x%llx in %s",
offset, pathrand);
- VERIFY(mutex_unlock(&zs->zs_vdev_lock) == 0);
+ mutex_exit(&zs->zs_vdev_lock);
if (zopt_verbose >= 7)
(void) printf("injected bad word into %s,"
}
(void) close(fd);
+out:
+ umem_free(path0, MAXPATHLEN);
+ umem_free(pathrand, MAXPATHLEN);
}
/*
ztest_shared_t *zs = ztest_shared;
spa_t *spa = zs->zs_spa;
objset_t *os = zd->zd_os;
- ztest_od_t od[1];
+ ztest_od_t *od;
uint64_t object, blocksize, txg, pattern, psize;
enum zio_checksum checksum = spa_dedup_checksum(spa);
dmu_buf_t *db;
blocksize = ztest_random_blocksize();
blocksize = MIN(blocksize, 2048); /* because we write so many */
- ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0);
+ od = umem_alloc(sizeof(ztest_od_t), UMEM_NOFAIL);
+ ztest_od_init(od, id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0);
- if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0)
+ if (ztest_object_init(zd, od, sizeof (ztest_od_t), B_FALSE) != 0) {
+ umem_free(od, sizeof(ztest_od_t));
return;
+ }
/*
* Take the name lock as writer to prevent anyone else from changing
* the pool and dataset properies we need to maintain during this test.
*/
- (void) rw_wrlock(&zs->zs_name_lock);
+ (void) rw_enter(&zs->zs_name_lock, RW_WRITER);
if (ztest_dsl_prop_set_uint64(zd->zd_name, ZFS_PROP_DEDUP, checksum,
B_FALSE) != 0 ||
ztest_dsl_prop_set_uint64(zd->zd_name, ZFS_PROP_COPIES, 1,
B_FALSE) != 0) {
- (void) rw_unlock(&zs->zs_name_lock);
+ (void) rw_exit(&zs->zs_name_lock);
+ umem_free(od, sizeof(ztest_od_t));
return;
}
dmu_tx_hold_write(tx, object, 0, copies * blocksize);
txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
if (txg == 0) {
- (void) rw_unlock(&zs->zs_name_lock);
+ (void) rw_exit(&zs->zs_name_lock);
+ umem_free(od, sizeof(ztest_od_t));
return;
}
zio_buf_free(buf, psize);
- (void) rw_unlock(&zs->zs_name_lock);
+ (void) rw_exit(&zs->zs_name_lock);
+ umem_free(od, sizeof(ztest_od_t));
}
/*
char *oldname, *newname;
spa_t *spa;
- (void) rw_wrlock(&zs->zs_name_lock);
+ (void) rw_enter(&zs->zs_name_lock, RW_WRITER);
oldname = zs->zs_pool;
newname = umem_alloc(strlen(oldname) + 5, UMEM_NOFAIL);
umem_free(newname, strlen(newname) + 1);
- (void) rw_unlock(&zs->zs_name_lock);
+ (void) rw_exit(&zs->zs_name_lock);
}
/*
ztest_run_zdb(char *pool)
{
int status;
- char zdb[MAXPATHLEN + MAXNAMELEN + 20];
- char zbuf[1024];
char *bin;
- char *ztest;
- char *isa;
- int isalen;
+ char *zdb;
+ char *zbuf;
FILE *fp;
- (void) realpath(getexecname(), zdb);
+ bin = umem_alloc(MAXPATHLEN + MAXNAMELEN + 20, UMEM_NOFAIL);
+ zdb = umem_alloc(MAXPATHLEN + MAXNAMELEN + 20, UMEM_NOFAIL);
+ zbuf = umem_alloc(1024, UMEM_NOFAIL);
- /* zdb lives in /usr/sbin, while ztest lives in /usr/bin */
- bin = strstr(zdb, "/usr/bin/");
- ztest = strstr(bin, "/ztest");
- isa = bin + 8;
- isalen = ztest - isa;
- isa = strdup(isa);
- /* LINTED */
- (void) sprintf(bin,
- "/usr/sbin%.*s/zdb -bcc%s%s -U %s %s",
- isalen,
- isa,
+ VERIFY(realpath(getexecname(), bin) != NULL);
+ if (strncmp(bin, "/usr/sbin/ztest", 14) == 0) {
+ strcpy(bin, "/usr/sbin/zdb"); /* Installed */
+ } else {
+ strstr(bin, "/ztest/")[0] = '\0'; /* In-tree */
+ strcat(bin, "/zdb/zdb");
+ }
+
+ (void) sprintf(zdb,
+ "%s -bcc%s%s -U %s %s",
+ bin,
zopt_verbose >= 3 ? "s" : "",
zopt_verbose >= 4 ? "v" : "",
spa_config_path,
pool);
- free(isa);
if (zopt_verbose >= 5)
(void) printf("Executing %s\n", strstr(zdb, "zdb "));
status = pclose(fp);
if (status == 0)
- return;
+ goto out;
ztest_dump_core = 0;
if (WIFEXITED(status))
fatal(0, "'%s' exit code %d", zdb, WEXITSTATUS(status));
else
fatal(0, "'%s' died with signal %d", zdb, WTERMSIG(status));
+out:
+ umem_free(bin, MAXPATHLEN + MAXNAMELEN + 20);
+ umem_free(zdb, MAXPATHLEN + MAXNAMELEN + 20);
+ umem_free(zbuf, 1024);
}
static void
ztest_resume(spa);
(void) poll(NULL, 0, 100);
}
- return (NULL);
-}
-static void *
-ztest_deadman_thread(void *arg)
-{
- ztest_shared_t *zs = arg;
- int grace = 300;
- hrtime_t delta;
-
- delta = (zs->zs_thread_stop - zs->zs_thread_start) / NANOSEC + grace;
+ thread_exit();
- (void) poll(NULL, 0, (int)(1000 * delta));
+ return (NULL);
+}
- fatal(0, "failed to complete within %d seconds of deadline", grace);
+#define GRACE 300
- return (NULL);
+static void
+ztest_deadman_alarm(int sig)
+{
+ fatal(0, "failed to complete within %d seconds of deadline", GRACE);
}
static void
ztest_execute(zi, id);
}
+ thread_exit();
+
return (NULL);
}
ztest_dataset_name(name, zs->zs_pool, d);
- (void) rw_rdlock(&zs->zs_name_lock);
+ (void) rw_enter(&zs->zs_name_lock, RW_READER);
error = ztest_dataset_create(name);
if (error == ENOSPC) {
- (void) rw_unlock(&zs->zs_name_lock);
+ (void) rw_exit(&zs->zs_name_lock);
ztest_record_enospc(FTAG);
return (error);
}
ASSERT(error == 0 || error == EEXIST);
VERIFY3U(dmu_objset_hold(name, zd, &os), ==, 0);
- (void) rw_unlock(&zs->zs_name_lock);
+ (void) rw_exit(&zs->zs_name_lock);
ztest_zd_init(zd, os);
static void
ztest_run(ztest_shared_t *zs)
{
- thread_t *tid;
+ kt_did_t *tid;
spa_t *spa;
- thread_t resume_tid;
+ kthread_t *resume_thread;
+ uint64_t object;
int error;
int t, d;
/*
* Initialize parent/child shared state.
*/
- VERIFY(_mutex_init(&zs->zs_vdev_lock, USYNC_THREAD, NULL) == 0);
- VERIFY(rwlock_init(&zs->zs_name_lock, USYNC_THREAD, NULL) == 0);
+ mutex_init(&zs->zs_vdev_lock, NULL, MUTEX_DEFAULT, NULL);
+ rw_init(&zs->zs_name_lock, NULL, RW_DEFAULT, NULL);
zs->zs_thread_start = gethrtime();
zs->zs_thread_stop = zs->zs_thread_start + zopt_passtime * NANOSEC;
if (ztest_random(100) < zopt_killrate)
zs->zs_thread_kill -= ztest_random(zopt_passtime * NANOSEC);
- (void) _mutex_init(&zcl.zcl_callbacks_lock, USYNC_THREAD, NULL);
+ mutex_init(&zcl.zcl_callbacks_lock, NULL, MUTEX_DEFAULT, NULL);
list_create(&zcl.zcl_callbacks, sizeof (ztest_cb_data_t),
offsetof(ztest_cb_data_t, zcd_node));
/*
* Create a thread to periodically resume suspended I/O.
*/
- VERIFY(thr_create(0, 0, ztest_resume_thread, spa, THR_BOUND,
- &resume_tid) == 0);
+ VERIFY3P((resume_thread = thread_create(NULL, 0, ztest_resume_thread,
+ spa, TS_RUN, NULL, 0, 0)), !=, NULL);
/*
- * Create a deadman thread to abort() if we hang.
+ * Set a deadman alarm to abort() if we hang.
*/
- VERIFY(thr_create(0, 0, ztest_deadman_thread, zs, THR_BOUND,
- NULL) == 0);
+ signal(SIGALRM, ztest_deadman_alarm);
+ alarm((zs->zs_thread_stop - zs->zs_thread_start) / NANOSEC + GRACE);
/*
* Verify that we can safely inquire about about any object,
}
zs->zs_enospc_count = 0;
- tid = umem_zalloc(zopt_threads * sizeof (thread_t), UMEM_NOFAIL);
+ tid = umem_zalloc(zopt_threads * sizeof (kt_did_t), UMEM_NOFAIL);
if (zopt_verbose >= 4)
(void) printf("starting main threads...\n");
* Kick off all the tests that run in parallel.
*/
for (t = 0; t < zopt_threads; t++) {
+ kthread_t *thread;
+
if (t < zopt_datasets && ztest_dataset_open(zs, t) != 0)
return;
- VERIFY(thr_create(0, 0, ztest_thread, (void *)(uintptr_t)t,
- THR_BOUND, &tid[t]) == 0);
+
+ VERIFY3P(thread = thread_create(NULL, 0, ztest_thread,
+ (void *)(uintptr_t)t, TS_RUN, NULL, 0, 0), !=, NULL);
+ tid[t] = thread->t_tid;
}
/*
* so we don't close datasets while threads are still using them.
*/
for (t = zopt_threads - 1; t >= 0; t--) {
- VERIFY(thr_join(tid[t], NULL, NULL) == 0);
+ thread_join(tid[t]);
if (t < zopt_datasets)
ztest_dataset_close(zs, t);
}
zs->zs_alloc = metaslab_class_get_alloc(spa_normal_class(spa));
zs->zs_space = metaslab_class_get_space(spa_normal_class(spa));
- umem_free(tid, zopt_threads * sizeof (thread_t));
+ umem_free(tid, zopt_threads * sizeof (kt_did_t));
/* Kill the resume thread */
ztest_exiting = B_TRUE;
- VERIFY(thr_join(resume_tid, NULL, NULL) == 0);
+ thread_join(resume_thread->t_tid);
ztest_resume(spa);
/*
* Right before closing the pool, kick off a bunch of async I/O;
* spa_close() should wait for it to complete.
*/
- for (uint64_t object = 1; object < 50; object++)
+ for (object = 1; object < 50; object++)
dmu_prefetch(spa->spa_meta_objset, object, 0, 1ULL << 20);
+ /* Verify that at least one commit cb was called in a timely fashion */
+ if (zc_cb_counter >= ZTEST_COMMIT_CB_MIN_REG)
+ VERIFY3U(zc_min_txg_delay, ==, 0);
+
spa_close(spa, FTAG);
/*
kernel_fini();
list_destroy(&zcl.zcl_callbacks);
-
- (void) _mutex_destroy(&zcl.zcl_callbacks_lock);
-
- (void) rwlock_destroy(&zs->zs_name_lock);
- (void) _mutex_destroy(&zs->zs_vdev_lock);
+ mutex_destroy(&zcl.zcl_callbacks_lock);
+ rw_destroy(&zs->zs_name_lock);
+ mutex_destroy(&zs->zs_vdev_lock);
}
static void
}
static nvlist_t *
-make_random_props()
+make_random_props(void)
{
nvlist_t *props;
spa_t *spa;
nvlist_t *nvroot, *props;
- VERIFY(_mutex_init(&zs->zs_vdev_lock, USYNC_THREAD, NULL) == 0);
- VERIFY(rwlock_init(&zs->zs_name_lock, USYNC_THREAD, NULL) == 0);
+ mutex_init(&zs->zs_vdev_lock, NULL, MUTEX_DEFAULT, NULL);
+ rw_init(&zs->zs_name_lock, NULL, RW_DEFAULT, NULL);
kernel_init(FREAD | FWRITE);
ztest_run_zdb(zs->zs_pool);
- (void) rwlock_destroy(&zs->zs_name_lock);
- (void) _mutex_destroy(&zs->zs_vdev_lock);
+ (void) rw_destroy(&zs->zs_name_lock);
+ (void) mutex_destroy(&zs->zs_vdev_lock);
}
int
ztest_random_fd = open("/dev/urandom", O_RDONLY);
+ dprintf_setup(&argc, argv);
process_options(argc, argv);
/* Override location of zpool.cache */