X-Git-Url: https://git.camperquake.de/gitweb.cgi?a=blobdiff_plain;f=module%2Fzfs%2Fdsl_pool.c;h=85c745e8ae1579b5a9ca849002eed9586d1bda73;hb=9ae529ec5dbdc828ff8326beae58062971d74b2e;hp=700cc962865da79b18a5876e86725715144502d9;hpb=572e285762521df27fe5b026f409ba1a21abb7ac;p=zfs.git diff --git a/module/zfs/dsl_pool.c b/module/zfs/dsl_pool.c index 700cc96..85c745e 100644 --- a/module/zfs/dsl_pool.c +++ b/module/zfs/dsl_pool.c @@ -20,6 +20,7 @@ */ /* * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012 by Delphix. All rights reserved. */ #include @@ -39,20 +40,160 @@ #include #include #include +#include +#include int zfs_no_write_throttle = 0; int zfs_write_limit_shift = 3; /* 1/8th of physical memory */ int zfs_txg_synctime_ms = 1000; /* target millisecs to sync a txg */ +int zfs_txg_history = 60; /* statistics for the last N txgs */ -uint64_t zfs_write_limit_min = 32 << 20; /* min write limit is 32MB */ -uint64_t zfs_write_limit_max = 0; /* max data payload per txg */ -uint64_t zfs_write_limit_inflated = 0; -uint64_t zfs_write_limit_override = 0; +unsigned long zfs_write_limit_min = 32 << 20; /* min write limit is 32MB */ +unsigned long zfs_write_limit_max = 0; /* max data payload per txg */ +unsigned long zfs_write_limit_inflated = 0; +unsigned long zfs_write_limit_override = 0; kmutex_t zfs_write_limit_lock; static pgcnt_t old_physmem = 0; +static int +dsl_pool_txg_history_update(kstat_t *ksp, int rw) +{ + dsl_pool_t *dp = ksp->ks_private; + txg_history_t *th; + int i = 0; + + if (rw == KSTAT_WRITE) + return (EACCES); + + if (ksp->ks_data) + kmem_free(ksp->ks_data, ksp->ks_data_size); + + mutex_enter(&dp->dp_lock); + + ksp->ks_ndata = dp->dp_txg_history_size; + ksp->ks_data_size = dp->dp_txg_history_size * sizeof(kstat_txg_t); + if (ksp->ks_data_size > 0) + ksp->ks_data = kmem_alloc(ksp->ks_data_size, KM_PUSHPAGE); + + /* Traversed oldest to youngest for the most readable kstat output */ + for (th = list_tail(&dp->dp_txg_history); th != NULL; + th = list_prev(&dp->dp_txg_history, th)) { + mutex_enter(&th->th_lock); + ASSERT3S(i + sizeof(kstat_txg_t), <=, ksp->ks_data_size); + memcpy(ksp->ks_data + i, &th->th_kstat, sizeof(kstat_txg_t)); + i += sizeof(kstat_txg_t); + mutex_exit(&th->th_lock); + } + + mutex_exit(&dp->dp_lock); + + return (0); +} + +static void +dsl_pool_txg_history_init(dsl_pool_t *dp, uint64_t txg) +{ + char name[KSTAT_STRLEN]; + + list_create(&dp->dp_txg_history, sizeof (txg_history_t), + offsetof(txg_history_t, th_link)); + dsl_pool_txg_history_add(dp, txg); + + (void) snprintf(name, KSTAT_STRLEN, "txgs-%s", spa_name(dp->dp_spa)); + dp->dp_txg_kstat = kstat_create("zfs", 0, name, "misc", + KSTAT_TYPE_TXG, 0, KSTAT_FLAG_VIRTUAL); + if (dp->dp_txg_kstat) { + dp->dp_txg_kstat->ks_data = NULL; + dp->dp_txg_kstat->ks_private = dp; + dp->dp_txg_kstat->ks_update = dsl_pool_txg_history_update; + kstat_install(dp->dp_txg_kstat); + } +} + +static void +dsl_pool_txg_history_destroy(dsl_pool_t *dp) +{ + txg_history_t *th; + + if (dp->dp_txg_kstat) { + if (dp->dp_txg_kstat->ks_data) + kmem_free(dp->dp_txg_kstat->ks_data, + dp->dp_txg_kstat->ks_data_size); + + kstat_delete(dp->dp_txg_kstat); + } + + mutex_enter(&dp->dp_lock); + while ((th = list_remove_head(&dp->dp_txg_history))) { + dp->dp_txg_history_size--; + mutex_destroy(&th->th_lock); + kmem_free(th, sizeof(txg_history_t)); + } + + ASSERT3U(dp->dp_txg_history_size, ==, 0); + list_destroy(&dp->dp_txg_history); + mutex_exit(&dp->dp_lock); +} + +txg_history_t * +dsl_pool_txg_history_add(dsl_pool_t *dp, uint64_t txg) +{ + txg_history_t *th, *rm; + + th = kmem_zalloc(sizeof(txg_history_t), KM_SLEEP); + mutex_init(&th->th_lock, NULL, MUTEX_DEFAULT, NULL); + th->th_kstat.txg = txg; + th->th_kstat.state = TXG_STATE_OPEN; + th->th_kstat.birth = gethrtime(); + + mutex_enter(&dp->dp_lock); + + list_insert_head(&dp->dp_txg_history, th); + dp->dp_txg_history_size++; + + while (dp->dp_txg_history_size > zfs_txg_history) { + dp->dp_txg_history_size--; + rm = list_remove_tail(&dp->dp_txg_history); + mutex_destroy(&rm->th_lock); + kmem_free(rm, sizeof(txg_history_t)); + } + + mutex_exit(&dp->dp_lock); + + return (th); +} + +/* + * Traversed youngest to oldest because lookups are only done for open + * or syncing txgs which are guaranteed to be at the head of the list. + * The txg_history_t structure will be returned locked. + */ +txg_history_t * +dsl_pool_txg_history_get(dsl_pool_t *dp, uint64_t txg) +{ + txg_history_t *th; + + mutex_enter(&dp->dp_lock); + for (th = list_head(&dp->dp_txg_history); th != NULL; + th = list_next(&dp->dp_txg_history, th)) { + if (th->th_kstat.txg == txg) { + mutex_enter(&th->th_lock); + break; + } + } + mutex_exit(&dp->dp_lock); + + return (th); +} + +void +dsl_pool_txg_history_put(txg_history_t *th) +{ + mutex_exit(&th->th_lock); +} + int dsl_pool_open_special_dir(dsl_pool_t *dp, const char *name, dsl_dir_t **ddp) { @@ -92,27 +233,39 @@ dsl_pool_open_impl(spa_t *spa, uint64_t txg) mutex_init(&dp->dp_lock, NULL, MUTEX_DEFAULT, NULL); - dp->dp_vnrele_taskq = taskq_create("zfs_vn_rele_taskq", 1, minclsyspri, + dp->dp_iput_taskq = taskq_create("zfs_iput_taskq", 1, minclsyspri, 1, 4, 0); + dsl_pool_txg_history_init(dp, txg); + return (dp); } int -dsl_pool_open(spa_t *spa, uint64_t txg, dsl_pool_t **dpp) +dsl_pool_init(spa_t *spa, uint64_t txg, dsl_pool_t **dpp) { int err; dsl_pool_t *dp = dsl_pool_open_impl(spa, txg); + + err = dmu_objset_open_impl(spa, NULL, &dp->dp_meta_rootbp, + &dp->dp_meta_objset); + if (err != 0) + dsl_pool_close(dp); + else + *dpp = dp; + + return (err); +} + +int +dsl_pool_open(dsl_pool_t *dp) +{ + int err; dsl_dir_t *dd; dsl_dataset_t *ds; uint64_t obj; rw_enter(&dp->dp_config_rwlock, RW_WRITER); - err = dmu_objset_open_impl(spa, NULL, &dp->dp_meta_rootbp, - &dp->dp_meta_objset); - if (err) - goto out; - err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ROOT_DATASET, sizeof (uint64_t), 1, &dp->dp_root_dir_obj); @@ -128,7 +281,7 @@ dsl_pool_open(spa_t *spa, uint64_t txg, dsl_pool_t **dpp) if (err) goto out; - if (spa_version(spa) >= SPA_VERSION_ORIGIN) { + if (spa_version(dp->dp_spa) >= SPA_VERSION_ORIGIN) { err = dsl_pool_open_special_dir(dp, ORIGIN_DIR_NAME, &dd); if (err) goto out; @@ -145,7 +298,7 @@ dsl_pool_open(spa_t *spa, uint64_t txg, dsl_pool_t **dpp) goto out; } - if (spa_version(spa) >= SPA_VERSION_DEADLISTS) { + if (spa_version(dp->dp_spa) >= SPA_VERSION_DEADLISTS) { err = dsl_pool_open_special_dir(dp, FREE_DIR_NAME, &dp->dp_free_dir); if (err) @@ -159,6 +312,15 @@ dsl_pool_open(spa_t *spa, uint64_t txg, dsl_pool_t **dpp) dp->dp_meta_objset, obj)); } + if (spa_feature_is_active(dp->dp_spa, + &spa_feature_table[SPA_FEATURE_ASYNC_DESTROY])) { + err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, + DMU_POOL_BPTREE_OBJ, sizeof (uint64_t), 1, + &dp->dp_bptree_obj); + if (err != 0) + goto out; + } + err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_TMP_USERREFS, sizeof (uint64_t), 1, &dp->dp_tmp_userrefs_obj); @@ -167,15 +329,10 @@ dsl_pool_open(spa_t *spa, uint64_t txg, dsl_pool_t **dpp) if (err) goto out; - err = dsl_scan_init(dp, txg); + err = dsl_scan_init(dp, dp->dp_tx.tx_open_txg); out: rw_exit(&dp->dp_config_rwlock); - if (err) - dsl_pool_close(dp); - else - *dpp = dp; - return (err); } @@ -212,9 +369,10 @@ dsl_pool_close(dsl_pool_t *dp) arc_flush(dp->dp_spa); txg_fini(dp); dsl_scan_fini(dp); + dsl_pool_txg_history_destroy(dp); rw_destroy(&dp->dp_config_rwlock); mutex_destroy(&dp->dp_lock); - taskq_destroy(dp->dp_vnrele_taskq); + taskq_destroy(dp->dp_iput_taskq); if (dp->dp_blkstats) kmem_free(dp->dp_blkstats, sizeof (zfs_all_blkstats_t)); kmem_free(dp, sizeof (dsl_pool_t)); @@ -275,8 +433,8 @@ dsl_pool_create(spa_t *spa, nvlist_t *zplprops, uint64_t txg) /* create the root objset */ VERIFY(0 == dsl_dataset_hold_obj(dp, obj, FTAG, &ds)); - os = dmu_objset_create_impl(dp->dp_spa, ds, - dsl_dataset_get_blkptr(ds), DMU_OST_ZFS, tx); + VERIFY(NULL != (os = dmu_objset_create_impl(dp->dp_spa, ds, + dsl_dataset_get_blkptr(ds), DMU_OST_ZFS, tx))); #ifdef _KERNEL zfs_create_fs(os, kcred, zplprops, tx); #endif @@ -291,7 +449,10 @@ static int deadlist_enqueue_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) { dsl_deadlist_t *dl = arg; + dsl_pool_t *dp = dmu_objset_pool(dl->dl_os); + rw_enter(&dp->dp_config_rwlock, RW_READER); dsl_deadlist_insert(dl, bp, tx); + rw_exit(&dp->dp_config_rwlock); return (0); } @@ -322,7 +483,7 @@ dsl_pool_sync(dsl_pool_t *dp, uint64_t txg) start = gethrtime(); zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED); - while (ds = txg_list_remove(&dp->dp_dirty_datasets, txg)) { + while ((ds = txg_list_remove(&dp->dp_dirty_datasets, txg))) { /* * We must not sync any non-MOS datasets twice, because * we may have taken a snapshot of them. However, we @@ -350,7 +511,7 @@ dsl_pool_sync(dsl_pool_t *dp, uint64_t txg) * whose ds_bp will be rewritten when we do this 2nd sync. */ zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED); - while (ds = txg_list_remove(&dp->dp_dirty_datasets, txg)) { + while ((ds = txg_list_remove(&dp->dp_dirty_datasets, txg))) { ASSERT(list_link_active(&ds->ds_synced_link)); dmu_buf_rele(ds->ds_dbuf, ds); dsl_dataset_sync(ds, zio, tx); @@ -367,7 +528,7 @@ dsl_pool_sync(dsl_pool_t *dp, uint64_t txg) deadlist_enqueue_cb, &ds->ds_deadlist, tx); } - while (dstg = txg_list_remove(&dp->dp_sync_tasks, txg)) { + while ((dstg = txg_list_remove(&dp->dp_sync_tasks, txg))) { /* * No more sync tasks should have been added while we * were syncing. @@ -378,7 +539,7 @@ dsl_pool_sync(dsl_pool_t *dp, uint64_t txg) DTRACE_PROBE(pool_sync__3task); start = gethrtime(); - while (dd = txg_list_remove(&dp->dp_dirty_dirs, txg)) + while ((dd = txg_list_remove(&dp->dp_dirty_dirs, txg))) dsl_dir_sync(dd, tx); write_time += gethrtime() - start; @@ -448,7 +609,7 @@ dsl_pool_sync_done(dsl_pool_t *dp, uint64_t txg) dsl_dataset_t *ds; objset_t *os; - while (ds = list_head(&dp->dp_synced_datasets)) { + while ((ds = list_head(&dp->dp_synced_datasets))) { list_remove(&dp->dp_synced_datasets, ds); os = ds->ds_objset; zil_clean(os->os_zil, txg); @@ -466,7 +627,7 @@ int dsl_pool_sync_context(dsl_pool_t *dp) { return (curthread == dp->dp_tx.tx_sync_thread || - spa_get_dsl(dp->dp_spa) == NULL); + spa_is_initializing(dp->dp_spa)); } uint64_t @@ -517,8 +678,10 @@ dsl_pool_tempreserve_space(dsl_pool_t *dp, uint64_t space, dmu_tx_t *tx) reserved = dp->dp_space_towrite[tx->tx_txg & TXG_MASK] + dp->dp_tempreserved[tx->tx_txg & TXG_MASK] / 2; - if (reserved && reserved > write_limit) + if (reserved && reserved > write_limit) { + DMU_TX_STAT_BUMP(dmu_tx_write_limit); return (ERESTART); + } } atomic_add_64(&dp->dp_tempreserved[tx->tx_txg & TXG_MASK], space); @@ -692,9 +855,10 @@ upgrade_dir_clones_cb(spa_t *spa, uint64_t dsobj, const char *dsname, void *arg) void dsl_pool_upgrade_dir_clones(dsl_pool_t *dp, dmu_tx_t *tx) { - ASSERT(dmu_tx_is_syncing(tx)); uint64_t obj; + ASSERT(dmu_tx_is_syncing(tx)); + (void) dsl_dir_create_sync(dp, dp->dp_root_dir, FREE_DIR_NAME, tx); VERIFY(0 == dsl_pool_open_special_dir(dp, FREE_DIR_NAME, &dp->dp_free_dir)); @@ -737,9 +901,9 @@ dsl_pool_create_origin(dsl_pool_t *dp, dmu_tx_t *tx) } taskq_t * -dsl_pool_vnrele_taskq(dsl_pool_t *dp) +dsl_pool_iput_taskq(dsl_pool_t *dp) { - return (dp->dp_vnrele_taskq); + return (dp->dp_iput_taskq); } /* @@ -784,11 +948,8 @@ dsl_pool_user_hold_create_obj(dsl_pool_t *dp, dmu_tx_t *tx) ASSERT(dp->dp_tmp_userrefs_obj == 0); ASSERT(dmu_tx_is_syncing(tx)); - dp->dp_tmp_userrefs_obj = zap_create(mos, DMU_OT_USERREFS, - DMU_OT_NONE, 0, tx); - - VERIFY(zap_add(mos, DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_TMP_USERREFS, - sizeof (uint64_t), 1, &dp->dp_tmp_userrefs_obj, tx) == 0); + dp->dp_tmp_userrefs_obj = zap_create_link(mos, DMU_OT_USERREFS, + DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_TMP_USERREFS, tx); } static int @@ -846,3 +1007,29 @@ dsl_pool_user_release(dsl_pool_t *dp, uint64_t dsobj, const char *tag, return (dsl_pool_user_hold_rele_impl(dp, dsobj, tag, NULL, tx, B_FALSE)); } + +#if defined(_KERNEL) && defined(HAVE_SPL) +module_param(zfs_no_write_throttle, int, 0644); +MODULE_PARM_DESC(zfs_no_write_throttle, "Disable write throttling"); + +module_param(zfs_write_limit_shift, int, 0444); +MODULE_PARM_DESC(zfs_write_limit_shift, "log2(fraction of memory) per txg"); + +module_param(zfs_txg_synctime_ms, int, 0644); +MODULE_PARM_DESC(zfs_txg_synctime_ms, "Target milliseconds between txg sync"); + +module_param(zfs_txg_history, int, 0644); +MODULE_PARM_DESC(zfs_txg_history, "Historic statistics for the last N txgs"); + +module_param(zfs_write_limit_min, ulong, 0444); +MODULE_PARM_DESC(zfs_write_limit_min, "Min txg write limit"); + +module_param(zfs_write_limit_max, ulong, 0444); +MODULE_PARM_DESC(zfs_write_limit_max, "Max txg write limit"); + +module_param(zfs_write_limit_inflated, ulong, 0444); +MODULE_PARM_DESC(zfs_write_limit_inflated, "Inflated txg write limit"); + +module_param(zfs_write_limit_override, ulong, 0444); +MODULE_PARM_DESC(zfs_write_limit_override, "Override txg write limit"); +#endif