X-Git-Url: https://git.camperquake.de/gitweb.cgi?a=blobdiff_plain;f=module%2Fzfs%2Fspa_misc.c;h=c82dca6c5693225200e5f384743f6f364aece0a1;hb=3541dc6d02592bd0939ea2d35b50c2bbdcc4cd0e;hp=485e83fcea00bbe61bd13fdfecdd7a13bf306d12;hpb=fb5f0bc83330c8a0236c4d34a23723ac1974971a;p=zfs.git diff --git a/module/zfs/spa_misc.c b/module/zfs/spa_misc.c index 485e83f..c82dca6 100644 --- a/module/zfs/spa_misc.c +++ b/module/zfs/spa_misc.c @@ -19,8 +19,9 @@ * CDDL HEADER END */ /* - * Copyright 2008 Sun Microsystems, Inc. All rights reserved. - * Use is subject to license terms. + * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011 by Delphix. All rights reserved. + * Copyright 2011 Nexenta Systems, Inc. All rights reserved. */ #include @@ -41,10 +42,12 @@ #include #include #include +#include +#include #include #include -#include #include +#include #include "zfs_prop.h" /* @@ -186,7 +189,7 @@ * * SCL_VDEV * Held as reader to prevent changes to the vdev tree during trivial - * inquiries such as bp_get_dasize(). SCL_VDEV is distinct from the + * inquiries such as bp_get_dsize(). SCL_VDEV is distinct from the * other locks, and lower than all of them, to ensure that it's safe * to acquire regardless of caller context. * @@ -232,21 +235,6 @@ static avl_tree_t spa_l2cache_avl; kmem_cache_t *spa_buffer_pool; int spa_mode_global; -#ifdef ZFS_DEBUG -/* Everything except dprintf is on by default in debug builds */ -int zfs_flags = ~ZFS_DEBUG_DPRINTF; -#else -int zfs_flags = 0; -#endif - -/* - * zfs_recover can be set to nonzero to attempt to recover from - * otherwise-fatal errors, typically caused by on-disk corruption. When - * set, calls to zfs_panic_recover() will turn into warning messages. - */ -int zfs_recover = 0; - - /* * ========================================================================== * SPA config locking @@ -255,7 +243,9 @@ int zfs_recover = 0; static void spa_config_lock_init(spa_t *spa) { - for (int i = 0; i < SCL_LOCKS; i++) { + int i; + + for (i = 0; i < SCL_LOCKS; i++) { spa_config_lock_t *scl = &spa->spa_config_lock[i]; mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL); cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL); @@ -268,7 +258,9 @@ spa_config_lock_init(spa_t *spa) static void spa_config_lock_destroy(spa_t *spa) { - for (int i = 0; i < SCL_LOCKS; i++) { + int i; + + for (i = 0; i < SCL_LOCKS; i++) { spa_config_lock_t *scl = &spa->spa_config_lock[i]; mutex_destroy(&scl->scl_lock); cv_destroy(&scl->scl_cv); @@ -281,7 +273,9 @@ spa_config_lock_destroy(spa_t *spa) int spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw) { - for (int i = 0; i < SCL_LOCKS; i++) { + int i; + + for (i = 0; i < SCL_LOCKS; i++) { spa_config_lock_t *scl = &spa->spa_config_lock[i]; if (!(locks & (1 << i))) continue; @@ -310,8 +304,13 @@ spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw) void spa_config_enter(spa_t *spa, int locks, void *tag, krw_t rw) { - for (int i = 0; i < SCL_LOCKS; i++) { + int wlocks_held = 0; + int i; + + for (i = 0; i < SCL_LOCKS; i++) { spa_config_lock_t *scl = &spa->spa_config_lock[i]; + if (scl->scl_writer == curthread) + wlocks_held |= (1 << i); if (!(locks & (1 << i))) continue; mutex_enter(&scl->scl_lock); @@ -331,12 +330,15 @@ spa_config_enter(spa_t *spa, int locks, void *tag, krw_t rw) (void) refcount_add(&scl->scl_count, tag); mutex_exit(&scl->scl_lock); } + ASSERT(wlocks_held <= locks); } void spa_config_exit(spa_t *spa, int locks, void *tag) { - for (int i = SCL_LOCKS - 1; i >= 0; i--) { + int i; + + for (i = SCL_LOCKS - 1; i >= 0; i--) { spa_config_lock_t *scl = &spa->spa_config_lock[i]; if (!(locks & (1 << i))) continue; @@ -355,9 +357,9 @@ spa_config_exit(spa_t *spa, int locks, void *tag) int spa_config_held(spa_t *spa, int locks, krw_t rw) { - int locks_held = 0; + int i, locks_held = 0; - for (int i = 0; i < SCL_LOCKS; i++) { + for (i = 0; i < SCL_LOCKS; i++) { spa_config_lock_t *scl = &spa->spa_config_lock[i]; if (!(locks & (1 << i))) continue; @@ -385,7 +387,7 @@ spa_lookup(const char *name) static spa_t search; /* spa_t is large; don't allocate on stack */ spa_t *spa; avl_index_t where; - char c; + char c = 0; char *cp; ASSERT(MUTEX_HELD(&spa_namespace_lock)); @@ -415,41 +417,47 @@ spa_lookup(const char *name) * exist by calling spa_lookup() first. */ spa_t * -spa_add(const char *name, const char *altroot) +spa_add(const char *name, nvlist_t *config, const char *altroot) { spa_t *spa; spa_config_dirent_t *dp; + int t; ASSERT(MUTEX_HELD(&spa_namespace_lock)); - spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP); + spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP | KM_NODEBUG); mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL); - mutex_init(&spa->spa_async_root_lock, NULL, MUTEX_DEFAULT, NULL); - mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL); - mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL); mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL); - mutex_init(&spa->spa_sync_bplist.bpl_lock, NULL, MUTEX_DEFAULT, NULL); + mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL); mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL); + mutex_init(&spa->spa_proc_lock, NULL, MUTEX_DEFAULT, NULL); mutex_init(&spa->spa_props_lock, NULL, MUTEX_DEFAULT, NULL); + mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL); + mutex_init(&spa->spa_suspend_lock, NULL, MUTEX_DEFAULT, NULL); + mutex_init(&spa->spa_vdev_top_lock, NULL, MUTEX_DEFAULT, NULL); cv_init(&spa->spa_async_cv, NULL, CV_DEFAULT, NULL); - cv_init(&spa->spa_async_root_cv, NULL, CV_DEFAULT, NULL); + cv_init(&spa->spa_proc_cv, NULL, CV_DEFAULT, NULL); cv_init(&spa->spa_scrub_io_cv, NULL, CV_DEFAULT, NULL); cv_init(&spa->spa_suspend_cv, NULL, CV_DEFAULT, NULL); + for (t = 0; t < TXG_SIZE; t++) + bplist_create(&spa->spa_free_bplist[t]); + (void) strlcpy(spa->spa_name, name, sizeof (spa->spa_name)); spa->spa_state = POOL_STATE_UNINITIALIZED; spa->spa_freeze_txg = UINT64_MAX; spa->spa_final_txg = UINT64_MAX; + spa->spa_load_max_txg = UINT64_MAX; + spa->spa_proc = &p0; + spa->spa_proc_state = SPA_PROC_NONE; refcount_create(&spa->spa_refcount); spa_config_lock_init(spa); avl_add(&spa_namespace_avl, spa); - mutex_init(&spa->spa_suspend_lock, NULL, MUTEX_DEFAULT, NULL); - /* * Set the alternate root, if there is one. */ @@ -465,9 +473,15 @@ spa_add(const char *name, const char *altroot) offsetof(spa_config_dirent_t, scd_link)); dp = kmem_zalloc(sizeof (spa_config_dirent_t), KM_SLEEP); - dp->scd_path = spa_strdup(spa_config_path); + dp->scd_path = altroot ? NULL : spa_strdup(spa_config_path); list_insert_head(&spa->spa_config_list, dp); + VERIFY(nvlist_alloc(&spa->spa_load_info, NV_UNIQUE_NAME, + KM_SLEEP) == 0); + + if (config != NULL) + VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0); + return (spa); } @@ -480,10 +494,13 @@ void spa_remove(spa_t *spa) { spa_config_dirent_t *dp; + int t; ASSERT(MUTEX_HELD(&spa_namespace_lock)); ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED); + nvlist_free(spa->spa_config_splitting); + avl_remove(&spa_namespace_avl, spa); cv_broadcast(&spa_namespace_cv); @@ -501,26 +518,30 @@ spa_remove(spa_t *spa) list_destroy(&spa->spa_config_list); + nvlist_free(spa->spa_load_info); spa_config_set(spa, NULL); refcount_destroy(&spa->spa_refcount); spa_config_lock_destroy(spa); + for (t = 0; t < TXG_SIZE; t++) + bplist_destroy(&spa->spa_free_bplist[t]); + cv_destroy(&spa->spa_async_cv); - cv_destroy(&spa->spa_async_root_cv); + cv_destroy(&spa->spa_proc_cv); cv_destroy(&spa->spa_scrub_io_cv); cv_destroy(&spa->spa_suspend_cv); mutex_destroy(&spa->spa_async_lock); - mutex_destroy(&spa->spa_async_root_lock); - mutex_destroy(&spa->spa_scrub_lock); - mutex_destroy(&spa->spa_errlog_lock); mutex_destroy(&spa->spa_errlist_lock); - mutex_destroy(&spa->spa_sync_bplist.bpl_lock); + mutex_destroy(&spa->spa_errlog_lock); mutex_destroy(&spa->spa_history_lock); + mutex_destroy(&spa->spa_proc_lock); mutex_destroy(&spa->spa_props_lock); + mutex_destroy(&spa->spa_scrub_lock); mutex_destroy(&spa->spa_suspend_lock); + mutex_destroy(&spa->spa_vdev_top_lock); kmem_free(spa, sizeof (spa_t)); } @@ -814,12 +835,6 @@ spa_l2cache_activate(vdev_t *vd) mutex_exit(&spa_l2cache_lock); } -void -spa_l2cache_space_update(vdev_t *vd, int64_t space, int64_t alloc) -{ - vdev_space_update(vd, space, alloc, B_FALSE); -} - /* * ========================================================================== * SPA vdev locking @@ -834,7 +849,20 @@ spa_l2cache_space_update(vdev_t *vd, int64_t space, int64_t alloc) uint64_t spa_vdev_enter(spa_t *spa) { + mutex_enter(&spa->spa_vdev_top_lock); mutex_enter(&spa_namespace_lock); + return (spa_vdev_config_enter(spa)); +} + +/* + * Internal implementation for spa_vdev_enter(). Used when a vdev + * operation requires multiple syncs (i.e. removing a device) while + * keeping the spa_namespace_lock held. + */ +uint64_t +spa_vdev_config_enter(spa_t *spa) +{ + ASSERT(MUTEX_HELD(&spa_namespace_lock)); spa_config_enter(spa, SCL_ALL, spa, RW_WRITER); @@ -842,16 +870,15 @@ spa_vdev_enter(spa_t *spa) } /* - * Unlock the spa_t after adding or removing a vdev. Besides undoing the - * locking of spa_vdev_enter(), we also want make sure the transactions have - * synced to disk, and then update the global configuration cache with the new - * information. + * Used in combination with spa_vdev_config_enter() to allow the syncing + * of multiple transactions without releasing the spa_namespace_lock. */ -int -spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error) +void +spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error, char *tag) { int config_changed = B_FALSE; + ASSERT(MUTEX_HELD(&spa_namespace_lock)); ASSERT(txg > spa_last_synced_txg(spa)); spa->spa_pending_vdev = NULL; @@ -861,17 +888,28 @@ spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error) */ vdev_dtl_reassess(spa->spa_root_vdev, 0, 0, B_FALSE); - /* - * If the config changed, notify the scrub thread that it must restart. - */ if (error == 0 && !list_is_empty(&spa->spa_config_dirty_list)) { - dsl_pool_scrub_restart(spa->spa_dsl_pool); config_changed = B_TRUE; + spa->spa_config_generation++; } + /* + * Verify the metaslab classes. + */ + ASSERT(metaslab_class_validate(spa_normal_class(spa)) == 0); + ASSERT(metaslab_class_validate(spa_log_class(spa)) == 0); + spa_config_exit(spa, SCL_ALL, spa); /* + * Panic the system if the specified tag requires it. This + * is useful for ensuring that configurations are updated + * transactionally. + */ + if (zio_injection_enabled) + zio_handle_panic_injection(spa, tag, 0); + + /* * Note: this txg_wait_synced() is important because it ensures * that there won't be more than one config change per txg. * This allows us to use the txg as the generation number. @@ -891,8 +929,20 @@ spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error) */ if (config_changed) spa_config_sync(spa, B_FALSE, B_TRUE); +} +/* + * Unlock the spa_t after adding or removing a vdev. Besides undoing the + * locking of spa_vdev_enter(), we also want make sure the transactions have + * synced to disk, and then update the global configuration cache with the new + * information. + */ +int +spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error) +{ + spa_vdev_config_exit(spa, vd, txg, error, FTAG); mutex_exit(&spa_namespace_lock); + mutex_exit(&spa->spa_vdev_top_lock); return (error); } @@ -901,18 +951,52 @@ spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error) * Lock the given spa_t for the purpose of changing vdev state. */ void -spa_vdev_state_enter(spa_t *spa) +spa_vdev_state_enter(spa_t *spa, int oplocks) { - spa_config_enter(spa, SCL_STATE_ALL, spa, RW_WRITER); + int locks = SCL_STATE_ALL | oplocks; + + /* + * Root pools may need to read of the underlying devfs filesystem + * when opening up a vdev. Unfortunately if we're holding the + * SCL_ZIO lock it will result in a deadlock when we try to issue + * the read from the root filesystem. Instead we "prefetch" + * the associated vnodes that we need prior to opening the + * underlying devices and cache them so that we can prevent + * any I/O when we are doing the actual open. + */ + if (spa_is_root(spa)) { + int low = locks & ~(SCL_ZIO - 1); + int high = locks & ~low; + + spa_config_enter(spa, high, spa, RW_WRITER); + vdev_hold(spa->spa_root_vdev); + spa_config_enter(spa, low, spa, RW_WRITER); + } else { + spa_config_enter(spa, locks, spa, RW_WRITER); + } + spa->spa_vdev_locks = locks; } int spa_vdev_state_exit(spa_t *spa, vdev_t *vd, int error) { - if (vd != NULL) + boolean_t config_changed = B_FALSE; + + if (vd != NULL || error == 0) + vdev_dtl_reassess(vd ? vd->vdev_top : spa->spa_root_vdev, + 0, 0, B_FALSE); + + if (vd != NULL) { vdev_state_dirty(vd->vdev_top); + config_changed = B_TRUE; + spa->spa_config_generation++; + } - spa_config_exit(spa, SCL_STATE_ALL, spa); + if (spa_is_root(spa)) + vdev_rele(spa->spa_root_vdev); + + ASSERT3U(spa->spa_vdev_locks, >=, SCL_STATE_ALL); + spa_config_exit(spa, spa->spa_vdev_locks, spa); /* * If anything changed, wait for it to sync. This ensures that, @@ -923,6 +1007,15 @@ spa_vdev_state_exit(spa_t *spa, vdev_t *vd, int error) if (vd != NULL) txg_wait_synced(spa->spa_dsl_pool, 0); + /* + * If the config changed, update the config cache. + */ + if (config_changed) { + mutex_enter(&spa_namespace_lock); + spa_config_sync(spa, B_FALSE, B_TRUE); + mutex_exit(&spa_namespace_lock); + } + return (error); } @@ -982,14 +1075,13 @@ spa_rename(const char *name, const char *newname) return (0); } - /* - * Determine whether a pool with given pool_guid exists. If device_guid is - * non-zero, determine whether the pool exists *and* contains a device with the - * specified device_guid. + * Return the spa_t associated with given pool_guid, if it exists. If + * device_guid is non-zero, determine whether the pool exists *and* contains + * a device with the specified device_guid. */ -boolean_t -spa_guid_exists(uint64_t pool_guid, uint64_t device_guid) +spa_t * +spa_by_guid(uint64_t pool_guid, uint64_t device_guid) { spa_t *spa; avl_tree_t *t = &spa_namespace_avl; @@ -1020,7 +1112,16 @@ spa_guid_exists(uint64_t pool_guid, uint64_t device_guid) } } - return (spa != NULL); + return (spa); +} + +/* + * Determine whether a pool with the given pool_guid exists. + */ +boolean_t +spa_guid_exists(uint64_t pool_guid, uint64_t device_guid) +{ + return (spa_by_guid(pool_guid, device_guid) != NULL); } char * @@ -1055,48 +1156,36 @@ spa_get_random(uint64_t range) return (r % range); } -void -sprintf_blkptr(char *buf, int len, const blkptr_t *bp) +uint64_t +spa_generate_guid(spa_t *spa) { - int d; + uint64_t guid = spa_get_random(-1ULL); - if (bp == NULL) { - (void) snprintf(buf, len, ""); - return; + if (spa != NULL) { + while (guid == 0 || spa_guid_exists(spa_guid(spa), guid)) + guid = spa_get_random(-1ULL); + } else { + while (guid == 0 || spa_guid_exists(guid, 0)) + guid = spa_get_random(-1ULL); } - if (BP_IS_HOLE(bp)) { - (void) snprintf(buf, len, ""); - return; - } + return (guid); +} - (void) snprintf(buf, len, "[L%llu %s] %llxL/%llxP ", - (u_longlong_t)BP_GET_LEVEL(bp), - dmu_ot[BP_GET_TYPE(bp)].ot_name, - (u_longlong_t)BP_GET_LSIZE(bp), - (u_longlong_t)BP_GET_PSIZE(bp)); - - for (d = 0; d < BP_GET_NDVAS(bp); d++) { - const dva_t *dva = &bp->blk_dva[d]; - (void) snprintf(buf + strlen(buf), len - strlen(buf), - "DVA[%d]=<%llu:%llx:%llx> ", d, - (u_longlong_t)DVA_GET_VDEV(dva), - (u_longlong_t)DVA_GET_OFFSET(dva), - (u_longlong_t)DVA_GET_ASIZE(dva)); +void +sprintf_blkptr(char *buf, const blkptr_t *bp) +{ + char *type = NULL; + char *checksum = NULL; + char *compress = NULL; + + if (bp != NULL) { + type = dmu_ot[BP_GET_TYPE(bp)].ot_name; + checksum = zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name; + compress = zio_compress_table[BP_GET_COMPRESS(bp)].ci_name; } - (void) snprintf(buf + strlen(buf), len - strlen(buf), - "%s %s %s %s birth=%llu fill=%llu cksum=%llx:%llx:%llx:%llx", - zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name, - zio_compress_table[BP_GET_COMPRESS(bp)].ci_name, - BP_GET_BYTEORDER(bp) == 0 ? "BE" : "LE", - BP_IS_GANG(bp) ? "gang" : "contiguous", - (u_longlong_t)bp->blk_birth, - (u_longlong_t)bp->blk_fill, - (u_longlong_t)bp->blk_cksum.zc_word[0], - (u_longlong_t)bp->blk_cksum.zc_word[1], - (u_longlong_t)bp->blk_cksum.zc_word[2], - (u_longlong_t)bp->blk_cksum.zc_word[3]); + SPRINTF_BLKPTR(snprintf, ' ', buf, bp, type, checksum, compress); } void @@ -1114,14 +1203,35 @@ spa_freeze(spa_t *spa) txg_wait_synced(spa_get_dsl(spa), freeze_txg); } -void -zfs_panic_recover(const char *fmt, ...) +/* + * This is a stripped-down version of strtoull, suitable only for converting + * lowercase hexidecimal numbers that don't overflow. + */ +uint64_t +strtonum(const char *str, char **nptr) { - va_list adx; + uint64_t val = 0; + char c; + int digit; - va_start(adx, fmt); - vcmn_err(zfs_recover ? CE_WARN : CE_PANIC, fmt, adx); - va_end(adx); + while ((c = *str) != '\0') { + if (c >= '0' && c <= '9') + digit = c - '0'; + else if (c >= 'a' && c <= 'f') + digit = 10 + c - 'a'; + else + break; + + val *= 16; + val += digit; + + str++; + } + + if (nptr) + *nptr = (char *)str; + + return (val); } /* @@ -1181,13 +1291,24 @@ spa_guid(spa_t *spa) /* * If we fail to parse the config during spa_load(), we can go through * the error path (which posts an ereport) and end up here with no root - * vdev. We stash the original pool guid in 'spa_load_guid' to handle + * vdev. We stash the original pool guid in 'spa_config_guid' to handle * this case. */ if (spa->spa_root_vdev != NULL) return (spa->spa_root_vdev->vdev_guid); else - return (spa->spa_load_guid); + return (spa->spa_config_guid); +} + +uint64_t +spa_load_guid(spa_t *spa) +{ + /* + * This is a GUID that exists solely as a reference for the + * purposes of the arc. It is generated at load time, and + * is never written to persistent storage. + */ + return (spa->spa_load_guid); } uint64_t @@ -1202,59 +1323,55 @@ spa_first_txg(spa_t *spa) return (spa->spa_first_txg); } +uint64_t +spa_syncing_txg(spa_t *spa) +{ + return (spa->spa_syncing_txg); +} + pool_state_t spa_state(spa_t *spa) { return (spa->spa_state); } -uint64_t -spa_freeze_txg(spa_t *spa) +spa_load_state_t +spa_load_state(spa_t *spa) { - return (spa->spa_freeze_txg); + return (spa->spa_load_state); } -/* - * Return how much space is allocated in the pool (ie. sum of all asize) - */ uint64_t -spa_get_alloc(spa_t *spa) +spa_freeze_txg(spa_t *spa) { - return (spa->spa_root_vdev->vdev_stat.vs_alloc); + return (spa->spa_freeze_txg); } -/* - * Return how much (raid-z inflated) space there is in the pool. - */ +/* ARGSUSED */ uint64_t -spa_get_space(spa_t *spa) +spa_get_asize(spa_t *spa, uint64_t lsize) { - return (spa->spa_root_vdev->vdev_stat.vs_space); + /* + * The worst case is single-sector max-parity RAID-Z blocks, in which + * case the space requirement is exactly (VDEV_RAIDZ_MAXPARITY + 1) + * times the size; so just assume that. Add to this the fact that + * we can have up to 3 DVAs per bp, and one more factor of 2 because + * the block may be dittoed with up to 3 DVAs by ddt_sync(). + */ + return (lsize * (VDEV_RAIDZ_MAXPARITY + 1) * SPA_DVAS_PER_BP * 2); } -/* - * Return the amount of raid-z-deflated space in the pool. - */ uint64_t spa_get_dspace(spa_t *spa) { - if (spa->spa_deflate) - return (spa->spa_root_vdev->vdev_stat.vs_dspace); - else - return (spa->spa_root_vdev->vdev_stat.vs_space); + return (spa->spa_dspace); } -/* ARGSUSED */ -uint64_t -spa_get_asize(spa_t *spa, uint64_t lsize) +void +spa_update_dspace(spa_t *spa) { - /* - * For now, the worst case is 512-byte RAID-Z blocks, in which - * case the space requirement is exactly 2x; so just assume that. - * Add to this the fact that we can have up to 3 DVAs per bp, and - * we have to multiply by a total of 6x. - */ - return (lsize * 6); + spa->spa_dspace = metaslab_class_get_dspace(spa_normal_class(spa)) + + ddt_get_dedup_dspace(spa); } /* @@ -1279,6 +1396,24 @@ spa_version(spa_t *spa) return (spa->spa_ubsync.ub_version); } +boolean_t +spa_deflate(spa_t *spa) +{ + return (spa->spa_deflate); +} + +metaslab_class_t * +spa_normal_class(spa_t *spa) +{ + return (spa->spa_normal_class); +} + +metaslab_class_t * +spa_log_class(spa_t *spa) +{ + return (spa->spa_log_class); +} + int spa_max_replication(spa_t *spa) { @@ -1292,24 +1427,54 @@ spa_max_replication(spa_t *spa) return (MIN(SPA_DVAS_PER_BP, spa_max_replication_override)); } +int +spa_prev_software_version(spa_t *spa) +{ + return (spa->spa_prev_software_version); +} + uint64_t -bp_get_dasize(spa_t *spa, const blkptr_t *bp) +dva_get_dsize_sync(spa_t *spa, const dva_t *dva) { - int sz = 0, i; + uint64_t asize = DVA_GET_ASIZE(dva); + uint64_t dsize = asize; - if (!spa->spa_deflate) - return (BP_GET_ASIZE(bp)); + ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); - spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); - for (i = 0; i < SPA_DVAS_PER_BP; i++) { - vdev_t *vd = - vdev_lookup_top(spa, DVA_GET_VDEV(&bp->blk_dva[i])); - if (vd) - sz += (DVA_GET_ASIZE(&bp->blk_dva[i]) >> - SPA_MINBLOCKSHIFT) * vd->vdev_deflate_ratio; + if (asize != 0 && spa->spa_deflate) { + vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva)); + dsize = (asize >> SPA_MINBLOCKSHIFT) * vd->vdev_deflate_ratio; } + + return (dsize); +} + +uint64_t +bp_get_dsize_sync(spa_t *spa, const blkptr_t *bp) +{ + uint64_t dsize = 0; + int d; + + for (d = 0; d < SPA_DVAS_PER_BP; d++) + dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]); + + return (dsize); +} + +uint64_t +bp_get_dsize(spa_t *spa, const blkptr_t *bp) +{ + uint64_t dsize = 0; + int d; + + spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); + + for (d = 0; d < SPA_DVAS_PER_BP; d++) + dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]); + spa_config_exit(spa, SCL_VDEV, FTAG); - return (sz); + + return (dsize); } /* @@ -1333,14 +1498,8 @@ spa_name_compare(const void *a1, const void *a2) return (0); } -int -spa_busy(void) -{ - return (spa_active_count); -} - void -spa_boot_init() +spa_boot_init(void) { spa_config_load(); } @@ -1364,6 +1523,7 @@ spa_init(int mode) spa_mode_global = mode; + fm_init(); refcount_init(); unique_init(); zio_init(); @@ -1389,6 +1549,7 @@ spa_fini(void) zio_fini(); unique_fini(); refcount_fini(); + fm_fini(); avl_destroy(&spa_namespace_avl); avl_destroy(&spa_spare_avl); @@ -1411,9 +1572,18 @@ spa_has_slogs(spa_t *spa) return (spa->spa_log_class->mc_rotor != NULL); } -/* - * Return whether this pool is the root pool. - */ +spa_log_state_t +spa_get_log_state(spa_t *spa) +{ + return (spa->spa_log_state); +} + +void +spa_set_log_state(spa_t *spa, spa_log_state_t state) +{ + spa->spa_log_state = state; +} + boolean_t spa_is_root(spa_t *spa) { @@ -1431,3 +1601,155 @@ spa_mode(spa_t *spa) { return (spa->spa_mode); } + +uint64_t +spa_bootfs(spa_t *spa) +{ + return (spa->spa_bootfs); +} + +uint64_t +spa_delegation(spa_t *spa) +{ + return (spa->spa_delegation); +} + +objset_t * +spa_meta_objset(spa_t *spa) +{ + return (spa->spa_meta_objset); +} + +enum zio_checksum +spa_dedup_checksum(spa_t *spa) +{ + return (spa->spa_dedup_checksum); +} + +/* + * Reset pool scan stat per scan pass (or reboot). + */ +void +spa_scan_stat_init(spa_t *spa) +{ + /* data not stored on disk */ + spa->spa_scan_pass_start = gethrestime_sec(); + spa->spa_scan_pass_exam = 0; + vdev_scan_stat_init(spa->spa_root_vdev); +} + +/* + * Get scan stats for zpool status reports + */ +int +spa_scan_get_stats(spa_t *spa, pool_scan_stat_t *ps) +{ + dsl_scan_t *scn = spa->spa_dsl_pool ? spa->spa_dsl_pool->dp_scan : NULL; + + if (scn == NULL || scn->scn_phys.scn_func == POOL_SCAN_NONE) + return (ENOENT); + bzero(ps, sizeof (pool_scan_stat_t)); + + /* data stored on disk */ + ps->pss_func = scn->scn_phys.scn_func; + ps->pss_start_time = scn->scn_phys.scn_start_time; + ps->pss_end_time = scn->scn_phys.scn_end_time; + ps->pss_to_examine = scn->scn_phys.scn_to_examine; + ps->pss_examined = scn->scn_phys.scn_examined; + ps->pss_to_process = scn->scn_phys.scn_to_process; + ps->pss_processed = scn->scn_phys.scn_processed; + ps->pss_errors = scn->scn_phys.scn_errors; + ps->pss_state = scn->scn_phys.scn_state; + + /* data not stored on disk */ + ps->pss_pass_start = spa->spa_scan_pass_start; + ps->pss_pass_exam = spa->spa_scan_pass_exam; + + return (0); +} + +boolean_t +spa_debug_enabled(spa_t *spa) +{ + return (spa->spa_debug); +} + +#if defined(_KERNEL) && defined(HAVE_SPL) +/* Namespace manipulation */ +EXPORT_SYMBOL(spa_lookup); +EXPORT_SYMBOL(spa_add); +EXPORT_SYMBOL(spa_remove); +EXPORT_SYMBOL(spa_next); + +/* Refcount functions */ +EXPORT_SYMBOL(spa_open_ref); +EXPORT_SYMBOL(spa_close); +EXPORT_SYMBOL(spa_refcount_zero); + +/* Pool configuration lock */ +EXPORT_SYMBOL(spa_config_tryenter); +EXPORT_SYMBOL(spa_config_enter); +EXPORT_SYMBOL(spa_config_exit); +EXPORT_SYMBOL(spa_config_held); + +/* Pool vdev add/remove lock */ +EXPORT_SYMBOL(spa_vdev_enter); +EXPORT_SYMBOL(spa_vdev_exit); + +/* Pool vdev state change lock */ +EXPORT_SYMBOL(spa_vdev_state_enter); +EXPORT_SYMBOL(spa_vdev_state_exit); + +/* Accessor functions */ +EXPORT_SYMBOL(spa_shutting_down); +EXPORT_SYMBOL(spa_get_dsl); +EXPORT_SYMBOL(spa_get_rootblkptr); +EXPORT_SYMBOL(spa_set_rootblkptr); +EXPORT_SYMBOL(spa_altroot); +EXPORT_SYMBOL(spa_sync_pass); +EXPORT_SYMBOL(spa_name); +EXPORT_SYMBOL(spa_guid); +EXPORT_SYMBOL(spa_last_synced_txg); +EXPORT_SYMBOL(spa_first_txg); +EXPORT_SYMBOL(spa_syncing_txg); +EXPORT_SYMBOL(spa_version); +EXPORT_SYMBOL(spa_state); +EXPORT_SYMBOL(spa_load_state); +EXPORT_SYMBOL(spa_freeze_txg); +EXPORT_SYMBOL(spa_get_asize); +EXPORT_SYMBOL(spa_get_dspace); +EXPORT_SYMBOL(spa_update_dspace); +EXPORT_SYMBOL(spa_deflate); +EXPORT_SYMBOL(spa_normal_class); +EXPORT_SYMBOL(spa_log_class); +EXPORT_SYMBOL(spa_max_replication); +EXPORT_SYMBOL(spa_prev_software_version); +EXPORT_SYMBOL(spa_get_failmode); +EXPORT_SYMBOL(spa_suspended); +EXPORT_SYMBOL(spa_bootfs); +EXPORT_SYMBOL(spa_delegation); +EXPORT_SYMBOL(spa_meta_objset); + +/* Miscellaneous support routines */ +EXPORT_SYMBOL(spa_rename); +EXPORT_SYMBOL(spa_guid_exists); +EXPORT_SYMBOL(spa_strdup); +EXPORT_SYMBOL(spa_strfree); +EXPORT_SYMBOL(spa_get_random); +EXPORT_SYMBOL(spa_generate_guid); +EXPORT_SYMBOL(sprintf_blkptr); +EXPORT_SYMBOL(spa_freeze); +EXPORT_SYMBOL(spa_upgrade); +EXPORT_SYMBOL(spa_evict_all); +EXPORT_SYMBOL(spa_lookup_by_guid); +EXPORT_SYMBOL(spa_has_spare); +EXPORT_SYMBOL(dva_get_dsize_sync); +EXPORT_SYMBOL(bp_get_dsize_sync); +EXPORT_SYMBOL(bp_get_dsize); +EXPORT_SYMBOL(spa_has_slogs); +EXPORT_SYMBOL(spa_is_root); +EXPORT_SYMBOL(spa_writeable); +EXPORT_SYMBOL(spa_mode); + +EXPORT_SYMBOL(spa_namespace_lock); +#endif