X-Git-Url: https://git.camperquake.de/gitweb.cgi?a=blobdiff_plain;f=cmd%2Fztest%2Fztest.c;h=09d6e9526aa306a27f1fab8f78763c50eb9250d3;hb=d13524579162b35189804c357a63993be758b84c;hp=6acba5290e7e672cf47d51629a36137e94ecb44f;hpb=341b5f1d4c03c0f318218346154e2fb79a8acb92;p=zfs.git diff --git a/cmd/ztest/ztest.c b/cmd/ztest/ztest.c index 6acba52..09d6e95 100644 --- a/cmd/ztest/ztest.c +++ b/cmd/ztest/ztest.c @@ -20,6 +20,8 @@ */ /* * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011 by Delphix. All rights reserved. + * Copyright 2011 Nexenta Systems, Inc. All rights reserved. */ /* @@ -205,6 +207,7 @@ typedef struct ztest_od { */ typedef struct ztest_ds { objset_t *zd_os; + krwlock_t zd_zilog_lock; zilog_t *zd_zilog; uint64_t zd_seq; ztest_od_t *zd_od; /* debugging aid */ @@ -238,6 +241,7 @@ ztest_func_t ztest_dmu_commit_callbacks; ztest_func_t ztest_zap; ztest_func_t ztest_zap_parallel; ztest_func_t ztest_zil_commit; +ztest_func_t ztest_zil_remount; ztest_func_t ztest_dmu_read_write_zcopy; ztest_func_t ztest_dmu_objset_create_destroy; ztest_func_t ztest_dmu_prealloc; @@ -257,6 +261,7 @@ ztest_func_t ztest_vdev_LUN_growth; ztest_func_t ztest_vdev_add_remove; ztest_func_t ztest_vdev_aux_add_remove; ztest_func_t ztest_split_pool; +ztest_func_t ztest_reguid; uint64_t zopt_always = 0ULL * NANOSEC; /* all the time */ uint64_t zopt_incessant = 1ULL * NANOSEC / 10; /* every 1/10 second */ @@ -273,6 +278,7 @@ ztest_info_t ztest_info[] = { { ztest_zap_parallel, 100, &zopt_always }, { ztest_split_pool, 1, &zopt_always }, { ztest_zil_commit, 1, &zopt_incessant }, + { ztest_zil_remount, 1, &zopt_sometimes }, { ztest_dmu_read_write_zcopy, 1, &zopt_often }, { ztest_dmu_objset_create_destroy, 1, &zopt_often }, { ztest_dsl_prop_get_set, 1, &zopt_often }, @@ -286,6 +292,13 @@ ztest_info_t ztest_info[] = { { ztest_fault_inject, 1, &zopt_sometimes }, { ztest_ddt_repair, 1, &zopt_sometimes }, { ztest_dmu_snapshot_hold, 1, &zopt_sometimes }, + /* + * The reguid test is currently broken. Disable it until + * we get around to fixing it. + */ +#if 0 + { ztest_reguid, 1, &zopt_sometimes }, +#endif { ztest_spa_rename, 1, &zopt_rarely }, { ztest_scrub, 1, &zopt_rarely }, { ztest_dsl_dataset_promote_busy, 1, &zopt_rarely }, @@ -322,6 +335,7 @@ typedef struct ztest_shared { uint64_t zs_vdev_aux; uint64_t zs_alloc; uint64_t zs_space; + uint64_t zs_guid; kmutex_t zs_vdev_lock; krwlock_t zs_name_lock; ztest_info_t zs_info[ZTEST_FUNCS]; @@ -1006,6 +1020,7 @@ ztest_zd_init(ztest_ds_t *zd, objset_t *os) dmu_objset_name(os, zd->zd_name); int l; + rw_init(&zd->zd_zilog_lock, NULL, RW_DEFAULT, NULL); mutex_init(&zd->zd_dirobj_lock, NULL, MUTEX_DEFAULT, NULL); for (l = 0; l < ZTEST_OBJECT_LOCKS; l++) @@ -1021,6 +1036,7 @@ ztest_zd_fini(ztest_ds_t *zd) int l; mutex_destroy(&zd->zd_dirobj_lock); + rw_destroy(&zd->zd_zilog_lock); for (l = 0; l < ZTEST_OBJECT_LOCKS; l++) ztest_rll_destroy(&zd->zd_object_lock[l]); @@ -1992,6 +2008,8 @@ ztest_io(ztest_ds_t *zd, uint64_t object, uint64_t offset) if (ztest_random(2) == 0) io_type = ZTEST_IO_WRITE_TAG; + (void) rw_enter(&zd->zd_zilog_lock, RW_READER); + switch (io_type) { case ZTEST_IO_WRITE_TAG: @@ -2029,6 +2047,8 @@ ztest_io(ztest_ds_t *zd, uint64_t object, uint64_t offset) break; } + (void) rw_exit(&zd->zd_zilog_lock); + umem_free(data, blocksize); } @@ -2083,6 +2103,8 @@ ztest_zil_commit(ztest_ds_t *zd, uint64_t id) { zilog_t *zilog = zd->zd_zilog; + (void) rw_enter(&zd->zd_zilog_lock, RW_READER); + zil_commit(zilog, ztest_random(ZTEST_OBJECTS)); /* @@ -2094,6 +2116,31 @@ ztest_zil_commit(ztest_ds_t *zd, uint64_t id) ASSERT(zd->zd_seq <= zilog->zl_commit_lr_seq); zd->zd_seq = zilog->zl_commit_lr_seq; mutex_exit(&zilog->zl_lock); + + (void) rw_exit(&zd->zd_zilog_lock); +} + +/* + * This function is designed to simulate the operations that occur during a + * mount/unmount operation. We hold the dataset across these operations in an + * attempt to expose any implicit assumptions about ZIL management. + */ +/* ARGSUSED */ +void +ztest_zil_remount(ztest_ds_t *zd, uint64_t id) +{ + objset_t *os = zd->zd_os; + + (void) rw_enter(&zd->zd_zilog_lock, RW_WRITER); + + /* zfs_sb_teardown() */ + zil_close(zd->zd_zilog); + + /* zfsvfs_setup() */ + VERIFY(zil_open(os, ztest_get_data) == zd->zd_zilog); + zil_replay(os, zd, ztest_replay_vector); + + (void) rw_exit(&zd->zd_zilog_lock); } /* @@ -2526,7 +2573,7 @@ ztest_vdev_attach_detach(ztest_ds_t *zd, uint64_t id) newvd_is_spare = B_TRUE; (void) strcpy(newpath, newvd->vdev_path); } else { - (void) snprintf(newpath, sizeof (newpath), ztest_dev_template, + (void) snprintf(newpath, MAXPATHLEN, ztest_dev_template, zopt_dir, zopt_pool, top * leaves + leaf); if (ztest_random(2) == 0) newpath[strlen(newpath) - 1] = 'b'; @@ -2882,7 +2929,8 @@ ztest_dataset_create(char *dsname) if (err || zilset < 80) return (err); - (void) printf("Setting dataset %s to sync always\n", dsname); + if (zopt_verbose >= 5) + (void) printf("Setting dataset %s to sync always\n", dsname); return (ztest_dsl_prop_set_uint64(dsname, ZFS_PROP_SYNC, ZFS_SYNC_ALWAYS, B_FALSE)); } @@ -4591,9 +4639,9 @@ ztest_fault_inject(ztest_ds_t *zd, uint64_t id) * write failures and random online/offline activity on leaf 0, * and we'll write random garbage to the randomly chosen leaf. */ - (void) snprintf(path0, sizeof (path0), ztest_dev_template, + (void) snprintf(path0, MAXPATHLEN, ztest_dev_template, zopt_dir, zopt_pool, top * leaves + zs->zs_splits); - (void) snprintf(pathrand, sizeof (pathrand), ztest_dev_template, + (void) snprintf(pathrand, MAXPATHLEN, ztest_dev_template, zopt_dir, zopt_pool, top * leaves + leaf); vd0 = vdev_lookup_by_path(spa->spa_root_vdev, path0); @@ -4767,7 +4815,7 @@ ztest_ddt_repair(ztest_ds_t *zd, uint64_t id) object = od[0].od_object; blocksize = od[0].od_blocksize; - pattern = spa_guid(spa) ^ dmu_objset_fsid_guid(os); + pattern = zs->zs_guid ^ dmu_objset_fsid_guid(os); ASSERT(object != 0); @@ -4840,6 +4888,31 @@ ztest_scrub(ztest_ds_t *zd, uint64_t id) } /* + * Change the guid for the pool. + */ +/* ARGSUSED */ +void +ztest_reguid(ztest_ds_t *zd, uint64_t id) +{ + ztest_shared_t *zs = ztest_shared; + spa_t *spa = zs->zs_spa; + uint64_t orig, load; + + orig = spa_guid(spa); + load = spa_load_guid(spa); + if (spa_change_guid(spa) != 0) + return; + + if (zopt_verbose >= 3) { + (void) printf("Changed guid old %llu -> %llu\n", + (u_longlong_t)orig, (u_longlong_t)spa_guid(spa)); + } + + VERIFY3U(orig, !=, spa_guid(spa)); + VERIFY3U(load, ==, spa_load_guid(spa)); +} + +/* * Rename the pool to a different name and then rename it back. */ /* ARGSUSED */ @@ -4932,7 +5005,7 @@ ztest_run_zdb(char *pool) fp = popen(zdb, "r"); - while (fgets(zbuf, sizeof (zbuf), fp) != NULL) + while (fgets(zbuf, 1024, fp) != NULL) if (zopt_verbose >= 3) (void) printf("%s", zbuf); @@ -5270,6 +5343,7 @@ ztest_run(ztest_shared_t *zs) { kt_did_t *tid; spa_t *spa; + objset_t *os; kthread_t *resume_thread; uint64_t object; int error; @@ -5300,8 +5374,13 @@ ztest_run(ztest_shared_t *zs) */ kernel_init(FREAD | FWRITE); VERIFY(spa_open(zs->zs_pool, &spa, FTAG) == 0); + spa->spa_debug = B_TRUE; zs->zs_spa = spa; + VERIFY3U(0, ==, dmu_objset_hold(zs->zs_pool, FTAG, &os)); + zs->zs_guid = dmu_objset_fsid_guid(os); + dmu_objset_rele(os, FTAG); + spa->spa_dedup_ditto = 2 * ZIO_DEDUPDITTO_MIN; /* @@ -5317,8 +5396,9 @@ ztest_run(ztest_shared_t *zs) /* * Create a thread to periodically resume suspended I/O. */ - VERIFY3P((resume_thread = thread_create(NULL, 0, ztest_resume_thread, - spa, TS_RUN, NULL, 0, 0)), !=, NULL); + VERIFY3P((resume_thread = zk_thread_create(NULL, 0, + (thread_func_t)ztest_resume_thread, spa, TS_RUN, NULL, 0, 0, + PTHREAD_CREATE_JOINABLE)), !=, NULL); /* * Set a deadman alarm to abort() if we hang. @@ -5364,8 +5444,10 @@ ztest_run(ztest_shared_t *zs) if (t < zopt_datasets && ztest_dataset_open(zs, t) != 0) return; - VERIFY3P(thread = thread_create(NULL, 0, ztest_thread, - (void *)(uintptr_t)t, TS_RUN, NULL, 0, 0), !=, NULL); + VERIFY3P(thread = zk_thread_create(NULL, 0, + (thread_func_t)ztest_thread, + (void *)(uintptr_t)t, TS_RUN, NULL, 0, 0, + PTHREAD_CREATE_JOINABLE), !=, NULL); tid[t] = thread->t_tid; }