X-Git-Url: https://git.camperquake.de/gitweb.cgi?a=blobdiff_plain;f=module%2Fzfs%2Fdsl_pool.c;h=7795d8045e92d71c148c7594e64c218ceb31e9e0;hb=refs%2Fheads%2Frertzinger%2Ffeature-zpool-get--p;hp=dacc57c81c25447f502b0bc71c318317f7d90892;hpb=172bb4bd5e4afef721dd4d2972d8680d983f144b;p=zfs.git diff --git a/module/zfs/dsl_pool.c b/module/zfs/dsl_pool.c index dacc57c..7795d80 100644 --- a/module/zfs/dsl_pool.c +++ b/module/zfs/dsl_pool.c @@ -19,14 +19,17 @@ * CDDL HEADER END */ /* - * Copyright 2008 Sun Microsystems, Inc. All rights reserved. - * Use is subject to license terms. + * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012 by Delphix. All rights reserved. */ #include #include +#include #include #include +#include +#include #include #include #include @@ -36,21 +39,220 @@ #include #include #include +#include +#include +#include +#include int zfs_no_write_throttle = 0; int zfs_write_limit_shift = 3; /* 1/8th of physical memory */ -int zfs_txg_synctime = 5; /* target secs to sync a txg */ +int zfs_txg_synctime_ms = 1000; /* target millisecs to sync a txg */ +int zfs_txg_history = 60; /* statistics for the last N txgs */ -uint64_t zfs_write_limit_min = 32 << 20; /* min write limit is 32MB */ -uint64_t zfs_write_limit_max = 0; /* max data payload per txg */ -uint64_t zfs_write_limit_inflated = 0; -uint64_t zfs_write_limit_override = 0; +unsigned long zfs_write_limit_min = 32 << 20; /* min write limit is 32MB */ +unsigned long zfs_write_limit_max = 0; /* max data payload per txg */ +unsigned long zfs_write_limit_inflated = 0; +unsigned long zfs_write_limit_override = 0; kmutex_t zfs_write_limit_lock; static pgcnt_t old_physmem = 0; +static void +dsl_pool_tx_assign_init(dsl_pool_t *dp, unsigned int ndata) +{ + kstat_named_t *ks; + char name[KSTAT_STRLEN]; + int i, data_size = ndata * sizeof(kstat_named_t); + + (void) snprintf(name, KSTAT_STRLEN, "dmu_tx_assign-%s", + spa_name(dp->dp_spa)); + + dp->dp_tx_assign_size = ndata; + + if (data_size) + dp->dp_tx_assign_buckets = kmem_alloc(data_size, KM_SLEEP); + else + dp->dp_tx_assign_buckets = NULL; + + for (i = 0; i < dp->dp_tx_assign_size; i++) { + ks = &dp->dp_tx_assign_buckets[i]; + ks->data_type = KSTAT_DATA_UINT64; + ks->value.ui64 = 0; + (void) snprintf(ks->name, KSTAT_STRLEN, "%u us", 1 << i); + } + + dp->dp_tx_assign_kstat = kstat_create("zfs", 0, name, "misc", + KSTAT_TYPE_NAMED, 0, KSTAT_FLAG_VIRTUAL); + + if (dp->dp_tx_assign_kstat) { + dp->dp_tx_assign_kstat->ks_data = dp->dp_tx_assign_buckets; + dp->dp_tx_assign_kstat->ks_ndata = dp->dp_tx_assign_size; + dp->dp_tx_assign_kstat->ks_data_size = data_size; + kstat_install(dp->dp_tx_assign_kstat); + } +} + +static void +dsl_pool_tx_assign_destroy(dsl_pool_t *dp) +{ + if (dp->dp_tx_assign_buckets) + kmem_free(dp->dp_tx_assign_buckets, + dp->dp_tx_assign_size * sizeof(kstat_named_t)); + + if (dp->dp_tx_assign_kstat) + kstat_delete(dp->dp_tx_assign_kstat); +} + +void +dsl_pool_tx_assign_add_usecs(dsl_pool_t *dp, uint64_t usecs) +{ + uint64_t idx = 0; + + while (((1 << idx) < usecs) && (idx < dp->dp_tx_assign_size - 1)) + idx++; + + atomic_inc_64(&dp->dp_tx_assign_buckets[idx].value.ui64); +} + static int +dsl_pool_txg_history_update(kstat_t *ksp, int rw) +{ + dsl_pool_t *dp = ksp->ks_private; + txg_history_t *th; + int i = 0; + + if (rw == KSTAT_WRITE) + return (EACCES); + + if (ksp->ks_data) + kmem_free(ksp->ks_data, ksp->ks_data_size); + + mutex_enter(&dp->dp_lock); + + ksp->ks_ndata = dp->dp_txg_history_size; + ksp->ks_data_size = dp->dp_txg_history_size * sizeof(kstat_txg_t); + if (ksp->ks_data_size > 0) + ksp->ks_data = kmem_alloc(ksp->ks_data_size, KM_PUSHPAGE); + + /* Traversed oldest to youngest for the most readable kstat output */ + for (th = list_tail(&dp->dp_txg_history); th != NULL; + th = list_prev(&dp->dp_txg_history, th)) { + mutex_enter(&th->th_lock); + ASSERT3S(i + sizeof(kstat_txg_t), <=, ksp->ks_data_size); + memcpy(ksp->ks_data + i, &th->th_kstat, sizeof(kstat_txg_t)); + i += sizeof(kstat_txg_t); + mutex_exit(&th->th_lock); + } + + mutex_exit(&dp->dp_lock); + + return (0); +} + +static void +dsl_pool_txg_history_init(dsl_pool_t *dp, uint64_t txg) +{ + char name[KSTAT_STRLEN]; + + list_create(&dp->dp_txg_history, sizeof (txg_history_t), + offsetof(txg_history_t, th_link)); + dsl_pool_txg_history_add(dp, txg); + + (void) snprintf(name, KSTAT_STRLEN, "txgs-%s", spa_name(dp->dp_spa)); + dp->dp_txg_kstat = kstat_create("zfs", 0, name, "misc", + KSTAT_TYPE_TXG, 0, KSTAT_FLAG_VIRTUAL); + if (dp->dp_txg_kstat) { + dp->dp_txg_kstat->ks_data = NULL; + dp->dp_txg_kstat->ks_private = dp; + dp->dp_txg_kstat->ks_update = dsl_pool_txg_history_update; + kstat_install(dp->dp_txg_kstat); + } +} + +static void +dsl_pool_txg_history_destroy(dsl_pool_t *dp) +{ + txg_history_t *th; + + if (dp->dp_txg_kstat) { + if (dp->dp_txg_kstat->ks_data) + kmem_free(dp->dp_txg_kstat->ks_data, + dp->dp_txg_kstat->ks_data_size); + + kstat_delete(dp->dp_txg_kstat); + } + + mutex_enter(&dp->dp_lock); + while ((th = list_remove_head(&dp->dp_txg_history))) { + dp->dp_txg_history_size--; + mutex_destroy(&th->th_lock); + kmem_free(th, sizeof(txg_history_t)); + } + + ASSERT3U(dp->dp_txg_history_size, ==, 0); + list_destroy(&dp->dp_txg_history); + mutex_exit(&dp->dp_lock); +} + +txg_history_t * +dsl_pool_txg_history_add(dsl_pool_t *dp, uint64_t txg) +{ + txg_history_t *th, *rm; + + th = kmem_zalloc(sizeof(txg_history_t), KM_PUSHPAGE); + mutex_init(&th->th_lock, NULL, MUTEX_DEFAULT, NULL); + th->th_kstat.txg = txg; + th->th_kstat.state = TXG_STATE_OPEN; + th->th_kstat.birth = gethrtime(); + + mutex_enter(&dp->dp_lock); + + list_insert_head(&dp->dp_txg_history, th); + dp->dp_txg_history_size++; + + while (dp->dp_txg_history_size > zfs_txg_history) { + dp->dp_txg_history_size--; + rm = list_remove_tail(&dp->dp_txg_history); + mutex_destroy(&rm->th_lock); + kmem_free(rm, sizeof(txg_history_t)); + } + + mutex_exit(&dp->dp_lock); + + return (th); +} + +/* + * Traversed youngest to oldest because lookups are only done for open + * or syncing txgs which are guaranteed to be at the head of the list. + * The txg_history_t structure will be returned locked. + */ +txg_history_t * +dsl_pool_txg_history_get(dsl_pool_t *dp, uint64_t txg) +{ + txg_history_t *th; + + mutex_enter(&dp->dp_lock); + for (th = list_head(&dp->dp_txg_history); th != NULL; + th = list_next(&dp->dp_txg_history, th)) { + if (th->th_kstat.txg == txg) { + mutex_enter(&th->th_lock); + break; + } + } + mutex_exit(&dp->dp_lock); + + return (th); +} + +void +dsl_pool_txg_history_put(txg_history_t *th) +{ + mutex_exit(&th->th_lock); +} + +int dsl_pool_open_special_dir(dsl_pool_t *dp, const char *name, dsl_dir_t **ddp) { uint64_t obj; @@ -80,34 +282,49 @@ dsl_pool_open_impl(spa_t *spa, uint64_t txg) txg_list_create(&dp->dp_dirty_datasets, offsetof(dsl_dataset_t, ds_dirty_link)); + txg_list_create(&dp->dp_dirty_zilogs, + offsetof(zilog_t, zl_dirty_link)); txg_list_create(&dp->dp_dirty_dirs, offsetof(dsl_dir_t, dd_dirty_link)); txg_list_create(&dp->dp_sync_tasks, offsetof(dsl_sync_task_group_t, dstg_node)); - list_create(&dp->dp_synced_datasets, sizeof (dsl_dataset_t), - offsetof(dsl_dataset_t, ds_synced_link)); mutex_init(&dp->dp_lock, NULL, MUTEX_DEFAULT, NULL); - mutex_init(&dp->dp_scrub_cancel_lock, NULL, MUTEX_DEFAULT, NULL); + + dp->dp_iput_taskq = taskq_create("zfs_iput_taskq", 1, minclsyspri, + 1, 4, 0); + + dsl_pool_txg_history_init(dp, txg); + dsl_pool_tx_assign_init(dp, 32); return (dp); } int -dsl_pool_open(spa_t *spa, uint64_t txg, dsl_pool_t **dpp) +dsl_pool_init(spa_t *spa, uint64_t txg, dsl_pool_t **dpp) { int err; dsl_pool_t *dp = dsl_pool_open_impl(spa, txg); + + err = dmu_objset_open_impl(spa, NULL, &dp->dp_meta_rootbp, + &dp->dp_meta_objset); + if (err != 0) + dsl_pool_close(dp); + else + *dpp = dp; + + return (err); +} + +int +dsl_pool_open(dsl_pool_t *dp) +{ + int err; dsl_dir_t *dd; dsl_dataset_t *ds; - objset_impl_t *osi; + uint64_t obj; rw_enter(&dp->dp_config_rwlock, RW_WRITER); - err = dmu_objset_open_impl(spa, NULL, &dp->dp_meta_rootbp, &osi); - if (err) - goto out; - dp->dp_meta_objset = &osi->os; - err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ROOT_DATASET, sizeof (uint64_t), 1, &dp->dp_root_dir_obj); @@ -123,76 +340,67 @@ dsl_pool_open(spa_t *spa, uint64_t txg, dsl_pool_t **dpp) if (err) goto out; - if (spa_version(spa) >= SPA_VERSION_ORIGIN) { + if (spa_version(dp->dp_spa) >= SPA_VERSION_ORIGIN) { err = dsl_pool_open_special_dir(dp, ORIGIN_DIR_NAME, &dd); if (err) goto out; err = dsl_dataset_hold_obj(dp, dd->dd_phys->dd_head_dataset_obj, FTAG, &ds); + if (err == 0) { + err = dsl_dataset_hold_obj(dp, + ds->ds_phys->ds_prev_snap_obj, dp, + &dp->dp_origin_snap); + dsl_dataset_rele(ds, FTAG); + } + dsl_dir_close(dd, dp); if (err) goto out; - err = dsl_dataset_hold_obj(dp, ds->ds_phys->ds_prev_snap_obj, - dp, &dp->dp_origin_snap); - if (err) - goto out; - dsl_dataset_rele(ds, FTAG); - dsl_dir_close(dd, dp); } - /* get scrub status */ - err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, - DMU_POOL_SCRUB_FUNC, sizeof (uint32_t), 1, - &dp->dp_scrub_func); - if (err == 0) { - err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, - DMU_POOL_SCRUB_QUEUE, sizeof (uint64_t), 1, - &dp->dp_scrub_queue_obj); - if (err) - goto out; - err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, - DMU_POOL_SCRUB_MIN_TXG, sizeof (uint64_t), 1, - &dp->dp_scrub_min_txg); + if (spa_version(dp->dp_spa) >= SPA_VERSION_DEADLISTS) { + err = dsl_pool_open_special_dir(dp, FREE_DIR_NAME, + &dp->dp_free_dir); if (err) goto out; + err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, - DMU_POOL_SCRUB_MAX_TXG, sizeof (uint64_t), 1, - &dp->dp_scrub_max_txg); + DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj); if (err) goto out; + VERIFY3U(0, ==, bpobj_open(&dp->dp_free_bpobj, + dp->dp_meta_objset, obj)); + } + + if (spa_feature_is_active(dp->dp_spa, + &spa_feature_table[SPA_FEATURE_ASYNC_DESTROY])) { err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, - DMU_POOL_SCRUB_BOOKMARK, sizeof (uint64_t), 4, - &dp->dp_scrub_bookmark); - if (err) + DMU_POOL_BPTREE_OBJ, sizeof (uint64_t), 1, + &dp->dp_bptree_obj); + if (err != 0) goto out; + } + + if (spa_feature_is_active(dp->dp_spa, + &spa_feature_table[SPA_FEATURE_EMPTY_BPOBJ])) { err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, - DMU_POOL_SCRUB_ERRORS, sizeof (uint64_t), 1, - &spa->spa_scrub_errors); - if (err) + DMU_POOL_EMPTY_BPOBJ, sizeof (uint64_t), 1, + &dp->dp_empty_bpobj); + if (err != 0) goto out; - if (spa_version(spa) < SPA_VERSION_DSL_SCRUB) { - /* - * A new-type scrub was in progress on an old - * pool. Restart from the beginning, since the - * old software may have changed the pool in the - * meantime. - */ - dsl_pool_scrub_restart(dp); - } - } else { - /* - * It's OK if there is no scrub in progress (and if - * there was an I/O error, ignore it). - */ - err = 0; } -out: - rw_exit(&dp->dp_config_rwlock); + err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, + DMU_POOL_TMP_USERREFS, sizeof (uint64_t), 1, + &dp->dp_tmp_userrefs_obj); + if (err == ENOENT) + err = 0; if (err) - dsl_pool_close(dp); - else - *dpp = dp; + goto out; + + err = dsl_scan_init(dp, dp->dp_tx.tx_open_txg); +out: + rw_exit(&dp->dp_config_rwlock); return (err); } @@ -210,22 +418,30 @@ dsl_pool_close(dsl_pool_t *dp) dsl_dataset_drop_ref(dp->dp_origin_snap, dp); if (dp->dp_mos_dir) dsl_dir_close(dp->dp_mos_dir, dp); + if (dp->dp_free_dir) + dsl_dir_close(dp->dp_free_dir, dp); if (dp->dp_root_dir) dsl_dir_close(dp->dp_root_dir, dp); + bpobj_close(&dp->dp_free_bpobj); + /* undo the dmu_objset_open_impl(mos) from dsl_pool_open() */ if (dp->dp_meta_objset) - dmu_objset_evict(NULL, dp->dp_meta_objset->os); + dmu_objset_evict(dp->dp_meta_objset); txg_list_destroy(&dp->dp_dirty_datasets); + txg_list_destroy(&dp->dp_dirty_zilogs); + txg_list_destroy(&dp->dp_sync_tasks); txg_list_destroy(&dp->dp_dirty_dirs); - list_destroy(&dp->dp_synced_datasets); arc_flush(dp->dp_spa); txg_fini(dp); + dsl_scan_fini(dp); + dsl_pool_tx_assign_destroy(dp); + dsl_pool_txg_history_destroy(dp); rw_destroy(&dp->dp_config_rwlock); mutex_destroy(&dp->dp_lock); - mutex_destroy(&dp->dp_scrub_cancel_lock); + taskq_destroy(dp->dp_iput_taskq); if (dp->dp_blkstats) kmem_free(dp->dp_blkstats, sizeof (zfs_all_blkstats_t)); kmem_free(dp, sizeof (dsl_pool_t)); @@ -237,18 +453,21 @@ dsl_pool_create(spa_t *spa, nvlist_t *zplprops, uint64_t txg) int err; dsl_pool_t *dp = dsl_pool_open_impl(spa, txg); dmu_tx_t *tx = dmu_tx_create_assigned(dp, txg); - objset_impl_t *osip; + objset_t *os; dsl_dataset_t *ds; - uint64_t dsobj; + uint64_t obj; /* create and open the MOS (meta-objset) */ - dp->dp_meta_objset = &dmu_objset_create_impl(spa, - NULL, &dp->dp_meta_rootbp, DMU_OST_META, tx)->os; + dp->dp_meta_objset = dmu_objset_create_impl(spa, + NULL, &dp->dp_meta_rootbp, DMU_OST_META, tx); /* create the pool directory */ err = zap_create_claim(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, DMU_OT_OBJECT_DIRECTORY, DMU_OT_NONE, 0, tx); - ASSERT3U(err, ==, 0); + ASSERT0(err); + + /* Initialize scan structures */ + VERIFY3U(0, ==, dsl_scan_init(dp, txg)); /* create and open the root dir */ dp->dp_root_dir_obj = dsl_dir_create_sync(dp, NULL, NULL, tx); @@ -260,18 +479,33 @@ dsl_pool_create(spa_t *spa, nvlist_t *zplprops, uint64_t txg) VERIFY(0 == dsl_pool_open_special_dir(dp, MOS_DIR_NAME, &dp->dp_mos_dir)); + if (spa_version(spa) >= SPA_VERSION_DEADLISTS) { + /* create and open the free dir */ + (void) dsl_dir_create_sync(dp, dp->dp_root_dir, + FREE_DIR_NAME, tx); + VERIFY(0 == dsl_pool_open_special_dir(dp, + FREE_DIR_NAME, &dp->dp_free_dir)); + + /* create and open the free_bplist */ + obj = bpobj_alloc(dp->dp_meta_objset, SPA_MAXBLOCKSIZE, tx); + VERIFY(zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, + DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj, tx) == 0); + VERIFY3U(0, ==, bpobj_open(&dp->dp_free_bpobj, + dp->dp_meta_objset, obj)); + } + if (spa_version(spa) >= SPA_VERSION_DSL_SCRUB) dsl_pool_create_origin(dp, tx); /* create the root dataset */ - dsobj = dsl_dataset_create_sync_dd(dp->dp_root_dir, NULL, 0, tx); + obj = dsl_dataset_create_sync_dd(dp->dp_root_dir, NULL, 0, tx); /* create the root objset */ - VERIFY(0 == dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds)); - osip = dmu_objset_create_impl(dp->dp_spa, ds, - dsl_dataset_get_blkptr(ds), DMU_OST_ZFS, tx); + VERIFY(0 == dsl_dataset_hold_obj(dp, obj, FTAG, &ds)); + VERIFY(NULL != (os = dmu_objset_create_impl(dp->dp_spa, ds, + dsl_dataset_get_blkptr(ds), DMU_OST_ZFS, tx))); #ifdef _KERNEL - zfs_create_fs(&osip->os, kcred, zplprops, tx); + zfs_create_fs(os, kcred, zplprops, tx); #endif dsl_dataset_rele(ds, FTAG); @@ -280,6 +514,32 @@ dsl_pool_create(spa_t *spa, nvlist_t *zplprops, uint64_t txg) return (dp); } +/* + * Account for the meta-objset space in its placeholder dsl_dir. + */ +void +dsl_pool_mos_diduse_space(dsl_pool_t *dp, + int64_t used, int64_t comp, int64_t uncomp) +{ + ASSERT3U(comp, ==, uncomp); /* it's all metadata */ + mutex_enter(&dp->dp_lock); + dp->dp_mos_used_delta += used; + dp->dp_mos_compressed_delta += comp; + dp->dp_mos_uncompressed_delta += uncomp; + mutex_exit(&dp->dp_lock); +} + +static int +deadlist_enqueue_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) +{ + dsl_deadlist_t *dl = arg; + dsl_pool_t *dp = dmu_objset_pool(dl->dl_os); + rw_enter(&dp->dp_config_rwlock, RW_READER); + dsl_deadlist_insert(dl, bp, tx); + rw_exit(&dp->dp_config_rwlock); + return (0); +} + void dsl_pool_sync(dsl_pool_t *dp, uint64_t txg) { @@ -287,48 +547,111 @@ dsl_pool_sync(dsl_pool_t *dp, uint64_t txg) dmu_tx_t *tx; dsl_dir_t *dd; dsl_dataset_t *ds; - dsl_sync_task_group_t *dstg; - objset_impl_t *mosi = dp->dp_meta_objset->os; + objset_t *mos = dp->dp_meta_objset; hrtime_t start, write_time; uint64_t data_written; int err; + list_t synced_datasets; + + list_create(&synced_datasets, sizeof (dsl_dataset_t), + offsetof(dsl_dataset_t, ds_synced_link)); + + /* + * We need to copy dp_space_towrite() before doing + * dsl_sync_task_group_sync(), because + * dsl_dataset_snapshot_reserve_space() will increase + * dp_space_towrite but not actually write anything. + */ + data_written = dp->dp_space_towrite[txg & TXG_MASK]; tx = dmu_tx_create_assigned(dp, txg); dp->dp_read_overhead = 0; + start = gethrtime(); + zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED); - while (ds = txg_list_remove(&dp->dp_dirty_datasets, txg)) { - if (!list_link_active(&ds->ds_synced_link)) - list_insert_tail(&dp->dp_synced_datasets, ds); - else - dmu_buf_rele(ds->ds_dbuf, ds); + while ((ds = txg_list_remove(&dp->dp_dirty_datasets, txg))) { + /* + * We must not sync any non-MOS datasets twice, because + * we may have taken a snapshot of them. However, we + * may sync newly-created datasets on pass 2. + */ + ASSERT(!list_link_active(&ds->ds_synced_link)); + list_insert_tail(&synced_datasets, ds); dsl_dataset_sync(ds, zio, tx); } DTRACE_PROBE(pool_sync__1setup); - - start = gethrtime(); err = zio_wait(zio); + write_time = gethrtime() - start; ASSERT(err == 0); DTRACE_PROBE(pool_sync__2rootzio); - while (dstg = txg_list_remove(&dp->dp_sync_tasks, txg)) - dsl_sync_task_group_sync(dstg, tx); - DTRACE_PROBE(pool_sync__3task); + /* + * After the data blocks have been written (ensured by the zio_wait() + * above), update the user/group space accounting. + */ + for (ds = list_head(&synced_datasets); ds; + ds = list_next(&synced_datasets, ds)) + dmu_objset_do_userquota_updates(ds->ds_objset, tx); + + /* + * Sync the datasets again to push out the changes due to + * userspace updates. This must be done before we process the + * sync tasks, so that any snapshots will have the correct + * user accounting information (and we won't get confused + * about which blocks are part of the snapshot). + */ + zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED); + while ((ds = txg_list_remove(&dp->dp_dirty_datasets, txg))) { + ASSERT(list_link_active(&ds->ds_synced_link)); + dmu_buf_rele(ds->ds_dbuf, ds); + dsl_dataset_sync(ds, zio, tx); + } + err = zio_wait(zio); + + /* + * Now that the datasets have been completely synced, we can + * clean up our in-memory structures accumulated while syncing: + * + * - move dead blocks from the pending deadlist to the on-disk deadlist + * - clean up zil records + * - release hold from dsl_dataset_dirty() + */ + while ((ds = list_remove_head(&synced_datasets))) { + ASSERTV(objset_t *os = ds->ds_objset); + bplist_iterate(&ds->ds_pending_deadlist, + deadlist_enqueue_cb, &ds->ds_deadlist, tx); + ASSERT(!dmu_objset_is_dirty(os, txg)); + dmu_buf_rele(ds->ds_dbuf, ds); + } start = gethrtime(); - while (dd = txg_list_remove(&dp->dp_dirty_dirs, txg)) + while ((dd = txg_list_remove(&dp->dp_dirty_dirs, txg))) dsl_dir_sync(dd, tx); write_time += gethrtime() - start; - if (spa_sync_pass(dp->dp_spa) == 1) - dsl_pool_scrub_sync(dp, tx); + /* + * The MOS's space is accounted for in the pool/$MOS + * (dp_mos_dir). We can't modify the mos while we're syncing + * it, so we remember the deltas and apply them here. + */ + if (dp->dp_mos_used_delta != 0 || dp->dp_mos_compressed_delta != 0 || + dp->dp_mos_uncompressed_delta != 0) { + dsl_dir_diduse_space(dp->dp_mos_dir, DD_USED_HEAD, + dp->dp_mos_used_delta, + dp->dp_mos_compressed_delta, + dp->dp_mos_uncompressed_delta, tx); + dp->dp_mos_used_delta = 0; + dp->dp_mos_compressed_delta = 0; + dp->dp_mos_uncompressed_delta = 0; + } start = gethrtime(); - if (list_head(&mosi->os_dirty_dnodes[txg & TXG_MASK]) != NULL || - list_head(&mosi->os_free_dnodes[txg & TXG_MASK]) != NULL) { + if (list_head(&mos->os_dirty_dnodes[txg & TXG_MASK]) != NULL || + list_head(&mos->os_free_dnodes[txg & TXG_MASK]) != NULL) { zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED); - dmu_objset_sync(mosi, zio, tx); + dmu_objset_sync(mos, zio, tx); err = zio_wait(zio); ASSERT(err == 0); dprintf_bp(&dp->dp_meta_rootbp, "meta objset rootbp is %s", ""); @@ -339,9 +662,29 @@ dsl_pool_sync(dsl_pool_t *dp, uint64_t txg) hrtime_t, dp->dp_read_overhead); write_time -= dp->dp_read_overhead; + /* + * If we modify a dataset in the same txg that we want to destroy it, + * its dsl_dir's dd_dbuf will be dirty, and thus have a hold on it. + * dsl_dir_destroy_check() will fail if there are unexpected holds. + * Therefore, we want to sync the MOS (thus syncing the dd_dbuf + * and clearing the hold on it) before we process the sync_tasks. + * The MOS data dirtied by the sync_tasks will be synced on the next + * pass. + */ + DTRACE_PROBE(pool_sync__3task); + if (!txg_list_empty(&dp->dp_sync_tasks, txg)) { + dsl_sync_task_group_t *dstg; + /* + * No more sync tasks should have been added while we + * were syncing. + */ + ASSERT(spa_sync_pass(dp->dp_spa) == 1); + while ((dstg = txg_list_remove(&dp->dp_sync_tasks, txg))) + dsl_sync_task_group_sync(dstg, tx); + } + dmu_tx_commit(tx); - data_written = dp->dp_space_towrite[txg & TXG_MASK]; dp->dp_space_towrite[txg & TXG_MASK] = 0; ASSERT(dp->dp_tempreserved[txg & TXG_MASK] == 0); @@ -366,10 +709,14 @@ dsl_pool_sync(dsl_pool_t *dp, uint64_t txg) * amount of write traffic allowed into each transaction group. * Weight the throughput calculation towards the current value: * thru = 3/4 old_thru + 1/4 new_thru + * + * Note: write_time is in nanosecs, so write_time/MICROSEC + * yields millisecs */ ASSERT(zfs_write_limit_min > 0); - if (data_written > zfs_write_limit_min / 8 && write_time > 0) { - uint64_t throughput = (data_written * NANOSEC) / write_time; + if (data_written > zfs_write_limit_min / 8 && write_time > MICROSEC) { + uint64_t throughput = data_written / (write_time / MICROSEC); + if (dp->dp_throughput) dp->dp_throughput = throughput / 4 + 3 * dp->dp_throughput / 4; @@ -377,21 +724,23 @@ dsl_pool_sync(dsl_pool_t *dp, uint64_t txg) dp->dp_throughput = throughput; dp->dp_write_limit = MIN(zfs_write_limit_inflated, MAX(zfs_write_limit_min, - dp->dp_throughput * zfs_txg_synctime)); + dp->dp_throughput * zfs_txg_synctime_ms)); } } void -dsl_pool_zil_clean(dsl_pool_t *dp) +dsl_pool_sync_done(dsl_pool_t *dp, uint64_t txg) { + zilog_t *zilog; dsl_dataset_t *ds; - while (ds = list_head(&dp->dp_synced_datasets)) { - list_remove(&dp->dp_synced_datasets, ds); - ASSERT(ds->ds_user_ptr != NULL); - zil_clean(((objset_impl_t *)ds->ds_user_ptr)->os_zil); - dmu_buf_rele(ds->ds_dbuf, ds); + while ((zilog = txg_list_remove(&dp->dp_dirty_zilogs, txg))) { + ds = dmu_objset_ds(zilog->zl_os); + zil_clean(zilog, txg); + ASSERT(!dmu_objset_is_dirty(zilog->zl_os, txg)); + dmu_buf_rele(ds->ds_dbuf, zilog); } + ASSERT(!dmu_objset_is_dirty(dp->dp_meta_objset, txg)); } /* @@ -402,7 +751,7 @@ int dsl_pool_sync_context(dsl_pool_t *dp) { return (curthread == dp->dp_tx.tx_sync_thread || - spa_get_dsl(dp->dp_spa) == NULL); + spa_is_initializing(dp->dp_spa)); } uint64_t @@ -453,8 +802,10 @@ dsl_pool_tempreserve_space(dsl_pool_t *dp, uint64_t space, dmu_tx_t *tx) reserved = dp->dp_space_towrite[tx->tx_txg & TXG_MASK] + dp->dp_tempreserved[tx->tx_txg & TXG_MASK] / 2; - if (reserved && reserved > write_limit) + if (reserved && reserved > write_limit) { + DMU_TX_STAT_BUMP(dmu_tx_write_limit); return (ERESTART); + } } atomic_add_64(&dp->dp_tempreserved[tx->tx_txg & TXG_MASK], space); @@ -568,6 +919,7 @@ upgrade_clones_cb(spa_t *spa, uint64_t dsobj, const char *dsname, void *arg) ASSERT(ds->ds_phys->ds_prev_snap_obj == prev->ds_object); if (prev->ds_phys->ds_next_clones_obj == 0) { + dmu_buf_will_dirty(prev->ds_dbuf, tx); prev->ds_phys->ds_next_clones_obj = zap_create(dp->dp_meta_objset, DMU_OT_NEXT_CLONES, DMU_OT_NONE, 0, tx); @@ -587,8 +939,68 @@ dsl_pool_upgrade_clones(dsl_pool_t *dp, dmu_tx_t *tx) ASSERT(dmu_tx_is_syncing(tx)); ASSERT(dp->dp_origin_snap != NULL); - (void) dmu_objset_find_spa(dp->dp_spa, NULL, upgrade_clones_cb, - tx, DS_FIND_CHILDREN); + VERIFY3U(0, ==, dmu_objset_find_spa(dp->dp_spa, NULL, upgrade_clones_cb, + tx, DS_FIND_CHILDREN)); +} + +/* ARGSUSED */ +static int +upgrade_dir_clones_cb(spa_t *spa, uint64_t dsobj, const char *dsname, void *arg) +{ + dmu_tx_t *tx = arg; + dsl_dataset_t *ds; + dsl_pool_t *dp = spa_get_dsl(spa); + objset_t *mos = dp->dp_meta_objset; + + VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds)); + + if (ds->ds_dir->dd_phys->dd_origin_obj) { + dsl_dataset_t *origin; + + VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, + ds->ds_dir->dd_phys->dd_origin_obj, FTAG, &origin)); + + if (origin->ds_dir->dd_phys->dd_clones == 0) { + dmu_buf_will_dirty(origin->ds_dir->dd_dbuf, tx); + origin->ds_dir->dd_phys->dd_clones = zap_create(mos, + DMU_OT_DSL_CLONES, DMU_OT_NONE, 0, tx); + } + + VERIFY3U(0, ==, zap_add_int(dp->dp_meta_objset, + origin->ds_dir->dd_phys->dd_clones, dsobj, tx)); + + dsl_dataset_rele(origin, FTAG); + } + + dsl_dataset_rele(ds, FTAG); + return (0); +} + +void +dsl_pool_upgrade_dir_clones(dsl_pool_t *dp, dmu_tx_t *tx) +{ + uint64_t obj; + + ASSERT(dmu_tx_is_syncing(tx)); + + (void) dsl_dir_create_sync(dp, dp->dp_root_dir, FREE_DIR_NAME, tx); + VERIFY(0 == dsl_pool_open_special_dir(dp, + FREE_DIR_NAME, &dp->dp_free_dir)); + + /* + * We can't use bpobj_alloc(), because spa_version() still + * returns the old version, and we need a new-version bpobj with + * subobj support. So call dmu_object_alloc() directly. + */ + obj = dmu_object_alloc(dp->dp_meta_objset, DMU_OT_BPOBJ, + SPA_MAXBLOCKSIZE, DMU_OT_BPOBJ_HDR, sizeof (bpobj_phys_t), tx); + VERIFY3U(0, ==, zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, + DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj, tx)); + VERIFY3U(0, ==, bpobj_open(&dp->dp_free_bpobj, + dp->dp_meta_objset, obj)); + + VERIFY3U(0, ==, dmu_objset_find_spa(dp->dp_spa, NULL, + upgrade_dir_clones_cb, tx, DS_FIND_CHILDREN)); } void @@ -605,9 +1017,143 @@ dsl_pool_create_origin(dsl_pool_t *dp, dmu_tx_t *tx) dsobj = dsl_dataset_create_sync(dp->dp_root_dir, ORIGIN_DIR_NAME, NULL, 0, kcred, tx); VERIFY(0 == dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds)); - dsl_dataset_snapshot_sync(ds, ORIGIN_DIR_NAME, kcred, tx); + dsl_dataset_snapshot_sync(ds, ORIGIN_DIR_NAME, tx); VERIFY(0 == dsl_dataset_hold_obj(dp, ds->ds_phys->ds_prev_snap_obj, dp, &dp->dp_origin_snap)); dsl_dataset_rele(ds, FTAG); rw_exit(&dp->dp_config_rwlock); } + +taskq_t * +dsl_pool_iput_taskq(dsl_pool_t *dp) +{ + return (dp->dp_iput_taskq); +} + +/* + * Walk through the pool-wide zap object of temporary snapshot user holds + * and release them. + */ +void +dsl_pool_clean_tmp_userrefs(dsl_pool_t *dp) +{ + zap_attribute_t za; + zap_cursor_t zc; + objset_t *mos = dp->dp_meta_objset; + uint64_t zapobj = dp->dp_tmp_userrefs_obj; + + if (zapobj == 0) + return; + ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS); + + for (zap_cursor_init(&zc, mos, zapobj); + zap_cursor_retrieve(&zc, &za) == 0; + zap_cursor_advance(&zc)) { + char *htag; + uint64_t dsobj; + + htag = strchr(za.za_name, '-'); + *htag = '\0'; + ++htag; + dsobj = strtonum(za.za_name, NULL); + (void) dsl_dataset_user_release_tmp(dp, dsobj, htag, B_FALSE); + } + zap_cursor_fini(&zc); +} + +/* + * Create the pool-wide zap object for storing temporary snapshot holds. + */ +void +dsl_pool_user_hold_create_obj(dsl_pool_t *dp, dmu_tx_t *tx) +{ + objset_t *mos = dp->dp_meta_objset; + + ASSERT(dp->dp_tmp_userrefs_obj == 0); + ASSERT(dmu_tx_is_syncing(tx)); + + dp->dp_tmp_userrefs_obj = zap_create_link(mos, DMU_OT_USERREFS, + DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_TMP_USERREFS, tx); +} + +static int +dsl_pool_user_hold_rele_impl(dsl_pool_t *dp, uint64_t dsobj, + const char *tag, uint64_t *now, dmu_tx_t *tx, boolean_t holding) +{ + objset_t *mos = dp->dp_meta_objset; + uint64_t zapobj = dp->dp_tmp_userrefs_obj; + char *name; + int error; + + ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS); + ASSERT(dmu_tx_is_syncing(tx)); + + /* + * If the pool was created prior to SPA_VERSION_USERREFS, the + * zap object for temporary holds might not exist yet. + */ + if (zapobj == 0) { + if (holding) { + dsl_pool_user_hold_create_obj(dp, tx); + zapobj = dp->dp_tmp_userrefs_obj; + } else { + return (ENOENT); + } + } + + name = kmem_asprintf("%llx-%s", (u_longlong_t)dsobj, tag); + if (holding) + error = zap_add(mos, zapobj, name, 8, 1, now, tx); + else + error = zap_remove(mos, zapobj, name, tx); + strfree(name); + + return (error); +} + +/* + * Add a temporary hold for the given dataset object and tag. + */ +int +dsl_pool_user_hold(dsl_pool_t *dp, uint64_t dsobj, const char *tag, + uint64_t *now, dmu_tx_t *tx) +{ + return (dsl_pool_user_hold_rele_impl(dp, dsobj, tag, now, tx, B_TRUE)); +} + +/* + * Release a temporary hold for the given dataset object and tag. + */ +int +dsl_pool_user_release(dsl_pool_t *dp, uint64_t dsobj, const char *tag, + dmu_tx_t *tx) +{ + return (dsl_pool_user_hold_rele_impl(dp, dsobj, tag, NULL, + tx, B_FALSE)); +} + +#if defined(_KERNEL) && defined(HAVE_SPL) +module_param(zfs_no_write_throttle, int, 0644); +MODULE_PARM_DESC(zfs_no_write_throttle, "Disable write throttling"); + +module_param(zfs_write_limit_shift, int, 0444); +MODULE_PARM_DESC(zfs_write_limit_shift, "log2(fraction of memory) per txg"); + +module_param(zfs_txg_synctime_ms, int, 0644); +MODULE_PARM_DESC(zfs_txg_synctime_ms, "Target milliseconds between txg sync"); + +module_param(zfs_txg_history, int, 0644); +MODULE_PARM_DESC(zfs_txg_history, "Historic statistics for the last N txgs"); + +module_param(zfs_write_limit_min, ulong, 0444); +MODULE_PARM_DESC(zfs_write_limit_min, "Min txg write limit"); + +module_param(zfs_write_limit_max, ulong, 0444); +MODULE_PARM_DESC(zfs_write_limit_max, "Max txg write limit"); + +module_param(zfs_write_limit_inflated, ulong, 0444); +MODULE_PARM_DESC(zfs_write_limit_inflated, "Inflated txg write limit"); + +module_param(zfs_write_limit_override, ulong, 0444); +MODULE_PARM_DESC(zfs_write_limit_override, "Override txg write limit"); +#endif