4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 #pragma ident "@(#)spa_misc.c 1.31 08/04/01 SMI"
28 #include <sys/zfs_context.h>
29 #include <sys/spa_impl.h>
31 #include <sys/zio_checksum.h>
32 #include <sys/zio_compress.h>
34 #include <sys/dmu_tx.h>
37 #include <sys/vdev_impl.h>
38 #include <sys/metaslab.h>
39 #include <sys/uberblock_impl.h>
42 #include <sys/unique.h>
43 #include <sys/dsl_pool.h>
44 #include <sys/dsl_dir.h>
45 #include <sys/dsl_prop.h>
46 #include <sys/fs/zfs.h>
47 #include <sys/metaslab_impl.h>
53 * There are four basic locks for managing spa_t structures:
55 * spa_namespace_lock (global mutex)
57 * This lock must be acquired to do any of the following:
59 * - Lookup a spa_t by name
60 * - Add or remove a spa_t from the namespace
61 * - Increase spa_refcount from non-zero
62 * - Check if spa_refcount is zero
64 * - add/remove/attach/detach devices
65 * - Held for the duration of create/destroy/import/export
67 * It does not need to handle recursion. A create or destroy may
68 * reference objects (files or zvols) in other pools, but by
69 * definition they must have an existing reference, and will never need
70 * to lookup a spa_t by name.
72 * spa_refcount (per-spa refcount_t protected by mutex)
74 * This reference count keep track of any active users of the spa_t. The
75 * spa_t cannot be destroyed or freed while this is non-zero. Internally,
76 * the refcount is never really 'zero' - opening a pool implicitly keeps
77 * some references in the DMU. Internally we check against SPA_MINREF, but
78 * present the image of a zero/non-zero value to consumers.
80 * spa_config_lock (per-spa read-priority rwlock)
82 * This protects the spa_t from config changes, and must be held in
83 * the following circumstances:
85 * - RW_READER to perform I/O to the spa
86 * - RW_WRITER to change the vdev config
88 * spa_config_cache_lock (per-spa mutex)
90 * This mutex prevents the spa_config nvlist from being updated. No
91 * other locks are required to obtain this lock, although implicitly you
92 * must have the namespace lock or non-zero refcount to have any kind
93 * of spa_t pointer at all.
95 * The locking order is fairly straightforward:
97 * spa_namespace_lock -> spa_refcount
99 * The namespace lock must be acquired to increase the refcount from 0
100 * or to check if it is zero.
102 * spa_refcount -> spa_config_lock
104 * There must be at least one valid reference on the spa_t to acquire
107 * spa_namespace_lock -> spa_config_lock
109 * The namespace lock must always be taken before the config lock.
112 * The spa_namespace_lock and spa_config_cache_lock can be acquired directly and
113 * are globally visible.
115 * The namespace is manipulated using the following functions, all which require
116 * the spa_namespace_lock to be held.
118 * spa_lookup() Lookup a spa_t by name.
120 * spa_add() Create a new spa_t in the namespace.
122 * spa_remove() Remove a spa_t from the namespace. This also
123 * frees up any memory associated with the spa_t.
125 * spa_next() Returns the next spa_t in the system, or the
126 * first if NULL is passed.
128 * spa_evict_all() Shutdown and remove all spa_t structures in
131 * spa_guid_exists() Determine whether a pool/device guid exists.
133 * The spa_refcount is manipulated using the following functions:
135 * spa_open_ref() Adds a reference to the given spa_t. Must be
136 * called with spa_namespace_lock held if the
137 * refcount is currently zero.
139 * spa_close() Remove a reference from the spa_t. This will
140 * not free the spa_t or remove it from the
141 * namespace. No locking is required.
143 * spa_refcount_zero() Returns true if the refcount is currently
144 * zero. Must be called with spa_namespace_lock
147 * The spa_config_lock is a form of rwlock. It must be held as RW_READER
148 * to perform I/O to the pool, and as RW_WRITER to change the vdev config.
149 * The spa_config_lock is manipulated with spa_config_{enter,exit,held}().
151 * The vdev configuration is protected by spa_vdev_enter() / spa_vdev_exit().
153 * spa_vdev_enter() Acquire the namespace lock and the config lock
156 * spa_vdev_exit() Release the config lock, wait for all I/O
157 * to complete, sync the updated configs to the
158 * cache, and release the namespace lock.
160 * The spa_name() function also requires either the spa_namespace_lock
161 * or the spa_config_lock, as both are needed to do a rename. spa_rename() is
162 * also implemented within this file since is requires manipulation of the
166 static avl_tree_t spa_namespace_avl;
167 kmutex_t spa_namespace_lock;
168 static kcondvar_t spa_namespace_cv;
169 static int spa_active_count;
170 int spa_max_replication_override = SPA_DVAS_PER_BP;
172 static kmutex_t spa_spare_lock;
173 static avl_tree_t spa_spare_avl;
174 static kmutex_t spa_l2cache_lock;
175 static avl_tree_t spa_l2cache_avl;
177 kmem_cache_t *spa_buffer_pool;
181 /* Everything except dprintf is on by default in debug builds */
182 int zfs_flags = ~ZFS_DEBUG_DPRINTF;
188 * zfs_recover can be set to nonzero to attempt to recover from
189 * otherwise-fatal errors, typically caused by on-disk corruption. When
190 * set, calls to zfs_panic_recover() will turn into warning messages.
194 #define SPA_MINREF 5 /* spa_refcnt for an open-but-idle pool */
197 * ==========================================================================
199 * ==========================================================================
202 spa_config_lock_init(spa_config_lock_t *scl)
204 mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL);
205 scl->scl_writer = NULL;
206 cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL);
207 refcount_create(&scl->scl_count);
211 spa_config_lock_destroy(spa_config_lock_t *scl)
213 mutex_destroy(&scl->scl_lock);
214 ASSERT(scl->scl_writer == NULL);
215 cv_destroy(&scl->scl_cv);
216 refcount_destroy(&scl->scl_count);
220 spa_config_enter(spa_t *spa, krw_t rw, void *tag)
222 spa_config_lock_t *scl = &spa->spa_config_lock;
224 mutex_enter(&scl->scl_lock);
226 if (rw == RW_READER) {
227 while (scl->scl_writer != NULL && scl->scl_writer != curthread)
228 cv_wait(&scl->scl_cv, &scl->scl_lock);
230 while (!refcount_is_zero(&scl->scl_count) &&
231 scl->scl_writer != curthread)
232 cv_wait(&scl->scl_cv, &scl->scl_lock);
233 scl->scl_writer = curthread;
236 (void) refcount_add(&scl->scl_count, tag);
238 mutex_exit(&scl->scl_lock);
242 spa_config_exit(spa_t *spa, void *tag)
244 spa_config_lock_t *scl = &spa->spa_config_lock;
246 mutex_enter(&scl->scl_lock);
248 ASSERT(!refcount_is_zero(&scl->scl_count));
250 if (refcount_remove(&scl->scl_count, tag) == 0) {
251 cv_broadcast(&scl->scl_cv);
252 ASSERT(scl->scl_writer == NULL || scl->scl_writer == curthread);
253 scl->scl_writer = NULL; /* OK in either case */
256 mutex_exit(&scl->scl_lock);
260 spa_config_held(spa_t *spa, krw_t rw)
262 spa_config_lock_t *scl = &spa->spa_config_lock;
265 return (!refcount_is_zero(&scl->scl_count));
267 return (scl->scl_writer == curthread);
271 * ==========================================================================
272 * SPA namespace functions
273 * ==========================================================================
277 * Lookup the named spa_t in the AVL tree. The spa_namespace_lock must be held.
278 * Returns NULL if no matching spa_t is found.
281 spa_lookup(const char *name)
288 ASSERT(MUTEX_HELD(&spa_namespace_lock));
291 * If it's a full dataset name, figure out the pool name and
294 cp = strpbrk(name, "/@");
300 search.spa_name = (char *)name;
301 spa = avl_find(&spa_namespace_avl, &search, &where);
310 * Create an uninitialized spa_t with the given name. Requires
311 * spa_namespace_lock. The caller must ensure that the spa_t doesn't already
312 * exist by calling spa_lookup() first.
315 spa_add(const char *name, const char *altroot)
319 ASSERT(MUTEX_HELD(&spa_namespace_lock));
321 spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP);
323 rw_init(&spa->spa_traverse_lock, NULL, RW_DEFAULT, NULL);
325 mutex_init(&spa->spa_uberblock_lock, NULL, MUTEX_DEFAULT, NULL);
326 mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL);
327 mutex_init(&spa->spa_config_cache_lock, NULL, MUTEX_DEFAULT, NULL);
328 mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL);
329 mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL);
330 mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL);
331 mutex_init(&spa->spa_sync_bplist.bpl_lock, NULL, MUTEX_DEFAULT, NULL);
332 mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL);
333 mutex_init(&spa->spa_props_lock, NULL, MUTEX_DEFAULT, NULL);
335 cv_init(&spa->spa_async_cv, NULL, CV_DEFAULT, NULL);
336 cv_init(&spa->spa_scrub_cv, NULL, CV_DEFAULT, NULL);
337 cv_init(&spa->spa_scrub_io_cv, NULL, CV_DEFAULT, NULL);
339 spa->spa_name = spa_strdup(name);
340 spa->spa_state = POOL_STATE_UNINITIALIZED;
341 spa->spa_freeze_txg = UINT64_MAX;
342 spa->spa_final_txg = UINT64_MAX;
344 refcount_create(&spa->spa_refcount);
345 spa_config_lock_init(&spa->spa_config_lock);
347 avl_add(&spa_namespace_avl, spa);
349 mutex_init(&spa->spa_zio_lock, NULL, MUTEX_DEFAULT, NULL);
352 * Set the alternate root, if there is one.
355 spa->spa_root = spa_strdup(altroot);
363 * Removes a spa_t from the namespace, freeing up any memory used. Requires
364 * spa_namespace_lock. This is called only after the spa_t has been closed and
368 spa_remove(spa_t *spa)
370 ASSERT(MUTEX_HELD(&spa_namespace_lock));
371 ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
372 ASSERT(spa->spa_scrub_thread == NULL);
374 avl_remove(&spa_namespace_avl, spa);
375 cv_broadcast(&spa_namespace_cv);
378 spa_strfree(spa->spa_root);
383 spa_strfree(spa->spa_name);
385 if (spa->spa_config_dir)
386 spa_strfree(spa->spa_config_dir);
387 if (spa->spa_config_file)
388 spa_strfree(spa->spa_config_file);
390 spa_config_set(spa, NULL);
392 refcount_destroy(&spa->spa_refcount);
394 spa_config_lock_destroy(&spa->spa_config_lock);
396 rw_destroy(&spa->spa_traverse_lock);
398 cv_destroy(&spa->spa_async_cv);
399 cv_destroy(&spa->spa_scrub_cv);
400 cv_destroy(&spa->spa_scrub_io_cv);
402 mutex_destroy(&spa->spa_uberblock_lock);
403 mutex_destroy(&spa->spa_async_lock);
404 mutex_destroy(&spa->spa_config_cache_lock);
405 mutex_destroy(&spa->spa_scrub_lock);
406 mutex_destroy(&spa->spa_errlog_lock);
407 mutex_destroy(&spa->spa_errlist_lock);
408 mutex_destroy(&spa->spa_sync_bplist.bpl_lock);
409 mutex_destroy(&spa->spa_history_lock);
410 mutex_destroy(&spa->spa_props_lock);
411 mutex_destroy(&spa->spa_zio_lock);
413 kmem_free(spa, sizeof (spa_t));
417 * Given a pool, return the next pool in the namespace, or NULL if there is
418 * none. If 'prev' is NULL, return the first pool.
421 spa_next(spa_t *prev)
423 ASSERT(MUTEX_HELD(&spa_namespace_lock));
426 return (AVL_NEXT(&spa_namespace_avl, prev));
428 return (avl_first(&spa_namespace_avl));
432 * ==========================================================================
433 * SPA refcount functions
434 * ==========================================================================
438 * Add a reference to the given spa_t. Must have at least one reference, or
439 * have the namespace lock held.
442 spa_open_ref(spa_t *spa, void *tag)
444 ASSERT(refcount_count(&spa->spa_refcount) > SPA_MINREF ||
445 MUTEX_HELD(&spa_namespace_lock));
447 (void) refcount_add(&spa->spa_refcount, tag);
451 * Remove a reference to the given spa_t. Must have at least one reference, or
452 * have the namespace lock held.
455 spa_close(spa_t *spa, void *tag)
457 ASSERT(refcount_count(&spa->spa_refcount) > SPA_MINREF ||
458 MUTEX_HELD(&spa_namespace_lock));
460 (void) refcount_remove(&spa->spa_refcount, tag);
464 * Check to see if the spa refcount is zero. Must be called with
465 * spa_namespace_lock held. We really compare against SPA_MINREF, which is the
466 * number of references acquired when opening a pool
469 spa_refcount_zero(spa_t *spa)
471 ASSERT(MUTEX_HELD(&spa_namespace_lock));
473 return (refcount_count(&spa->spa_refcount) == SPA_MINREF);
477 * ==========================================================================
478 * SPA spare and l2cache tracking
479 * ==========================================================================
483 * Hot spares and cache devices are tracked using the same code below,
484 * for 'auxiliary' devices.
487 typedef struct spa_aux {
495 spa_aux_compare(const void *a, const void *b)
497 const spa_aux_t *sa = a;
498 const spa_aux_t *sb = b;
500 if (sa->aux_guid < sb->aux_guid)
502 else if (sa->aux_guid > sb->aux_guid)
509 spa_aux_add(vdev_t *vd, avl_tree_t *avl)
515 search.aux_guid = vd->vdev_guid;
516 if ((aux = avl_find(avl, &search, &where)) != NULL) {
519 aux = kmem_zalloc(sizeof (spa_aux_t), KM_SLEEP);
520 aux->aux_guid = vd->vdev_guid;
522 avl_insert(avl, aux, where);
527 spa_aux_remove(vdev_t *vd, avl_tree_t *avl)
533 search.aux_guid = vd->vdev_guid;
534 aux = avl_find(avl, &search, &where);
538 if (--aux->aux_count == 0) {
539 avl_remove(avl, aux);
540 kmem_free(aux, sizeof (spa_aux_t));
541 } else if (aux->aux_pool == spa_guid(vd->vdev_spa)) {
542 aux->aux_pool = 0ULL;
547 spa_aux_exists(uint64_t guid, uint64_t *pool, avl_tree_t *avl)
549 spa_aux_t search, *found;
552 search.aux_guid = guid;
553 found = avl_find(avl, &search, &where);
557 *pool = found->aux_pool;
562 return (found != NULL);
566 spa_aux_activate(vdev_t *vd, avl_tree_t *avl)
568 spa_aux_t search, *found;
571 search.aux_guid = vd->vdev_guid;
572 found = avl_find(avl, &search, &where);
573 ASSERT(found != NULL);
574 ASSERT(found->aux_pool == 0ULL);
576 found->aux_pool = spa_guid(vd->vdev_spa);
580 * Spares are tracked globally due to the following constraints:
582 * - A spare may be part of multiple pools.
583 * - A spare may be added to a pool even if it's actively in use within
585 * - A spare in use in any pool can only be the source of a replacement if
586 * the target is a spare in the same pool.
588 * We keep track of all spares on the system through the use of a reference
589 * counted AVL tree. When a vdev is added as a spare, or used as a replacement
590 * spare, then we bump the reference count in the AVL tree. In addition, we set
591 * the 'vdev_isspare' member to indicate that the device is a spare (active or
592 * inactive). When a spare is made active (used to replace a device in the
593 * pool), we also keep track of which pool its been made a part of.
595 * The 'spa_spare_lock' protects the AVL tree. These functions are normally
596 * called under the spa_namespace lock as part of vdev reconfiguration. The
597 * separate spare lock exists for the status query path, which does not need to
598 * be completely consistent with respect to other vdev configuration changes.
602 spa_spare_compare(const void *a, const void *b)
604 return (spa_aux_compare(a, b));
608 spa_spare_add(vdev_t *vd)
610 mutex_enter(&spa_spare_lock);
611 ASSERT(!vd->vdev_isspare);
612 spa_aux_add(vd, &spa_spare_avl);
613 vd->vdev_isspare = B_TRUE;
614 mutex_exit(&spa_spare_lock);
618 spa_spare_remove(vdev_t *vd)
620 mutex_enter(&spa_spare_lock);
621 ASSERT(vd->vdev_isspare);
622 spa_aux_remove(vd, &spa_spare_avl);
623 vd->vdev_isspare = B_FALSE;
624 mutex_exit(&spa_spare_lock);
628 spa_spare_exists(uint64_t guid, uint64_t *pool)
632 mutex_enter(&spa_spare_lock);
633 found = spa_aux_exists(guid, pool, &spa_spare_avl);
634 mutex_exit(&spa_spare_lock);
640 spa_spare_activate(vdev_t *vd)
642 mutex_enter(&spa_spare_lock);
643 ASSERT(vd->vdev_isspare);
644 spa_aux_activate(vd, &spa_spare_avl);
645 mutex_exit(&spa_spare_lock);
649 * Level 2 ARC devices are tracked globally for the same reasons as spares.
650 * Cache devices currently only support one pool per cache device, and so
651 * for these devices the aux reference count is currently unused beyond 1.
655 spa_l2cache_compare(const void *a, const void *b)
657 return (spa_aux_compare(a, b));
661 spa_l2cache_add(vdev_t *vd)
663 mutex_enter(&spa_l2cache_lock);
664 ASSERT(!vd->vdev_isl2cache);
665 spa_aux_add(vd, &spa_l2cache_avl);
666 vd->vdev_isl2cache = B_TRUE;
667 mutex_exit(&spa_l2cache_lock);
671 spa_l2cache_remove(vdev_t *vd)
673 mutex_enter(&spa_l2cache_lock);
674 ASSERT(vd->vdev_isl2cache);
675 spa_aux_remove(vd, &spa_l2cache_avl);
676 vd->vdev_isl2cache = B_FALSE;
677 mutex_exit(&spa_l2cache_lock);
681 spa_l2cache_exists(uint64_t guid, uint64_t *pool)
685 mutex_enter(&spa_l2cache_lock);
686 found = spa_aux_exists(guid, pool, &spa_l2cache_avl);
687 mutex_exit(&spa_l2cache_lock);
693 spa_l2cache_activate(vdev_t *vd)
695 mutex_enter(&spa_l2cache_lock);
696 ASSERT(vd->vdev_isl2cache);
697 spa_aux_activate(vd, &spa_l2cache_avl);
698 mutex_exit(&spa_l2cache_lock);
702 spa_l2cache_space_update(vdev_t *vd, int64_t space, int64_t alloc)
704 vdev_space_update(vd, space, alloc, B_FALSE);
708 * ==========================================================================
710 * ==========================================================================
714 * Lock the given spa_t for the purpose of adding or removing a vdev.
715 * Grabs the global spa_namespace_lock plus the spa config lock for writing.
716 * It returns the next transaction group for the spa_t.
719 spa_vdev_enter(spa_t *spa)
721 mutex_enter(&spa_namespace_lock);
724 * Suspend scrub activity while we mess with the config. We must do
725 * this after acquiring the namespace lock to avoid a 3-way deadlock
726 * with spa_scrub_stop() and the scrub thread.
728 spa_scrub_suspend(spa);
730 spa_config_enter(spa, RW_WRITER, spa);
732 return (spa_last_synced_txg(spa) + 1);
736 * Unlock the spa_t after adding or removing a vdev. Besides undoing the
737 * locking of spa_vdev_enter(), we also want make sure the transactions have
738 * synced to disk, and then update the global configuration cache with the new
742 spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error)
744 int config_changed = B_FALSE;
746 ASSERT(txg > spa_last_synced_txg(spa));
751 vdev_dtl_reassess(spa->spa_root_vdev, 0, 0, B_FALSE);
754 * If the config changed, notify the scrub thread that it must restart.
756 if (error == 0 && !list_is_empty(&spa->spa_dirty_list)) {
757 config_changed = B_TRUE;
758 spa_scrub_restart(spa, txg);
761 spa_config_exit(spa, spa);
764 * Allow scrubbing to resume.
766 spa_scrub_resume(spa);
769 * Note: this txg_wait_synced() is important because it ensures
770 * that there won't be more than one config change per txg.
771 * This allows us to use the txg as the generation number.
774 txg_wait_synced(spa->spa_dsl_pool, txg);
777 ASSERT(!vd->vdev_detached || vd->vdev_dtl.smo_object == 0);
782 * If the config changed, update the config cache.
787 mutex_exit(&spa_namespace_lock);
793 * ==========================================================================
794 * Miscellaneous functions
795 * ==========================================================================
802 spa_rename(const char *name, const char *newname)
808 * Lookup the spa_t and grab the config lock for writing. We need to
809 * actually open the pool so that we can sync out the necessary labels.
810 * It's OK to call spa_open() with the namespace lock held because we
811 * allow recursive calls for other reasons.
813 mutex_enter(&spa_namespace_lock);
814 if ((err = spa_open(name, &spa, FTAG)) != 0) {
815 mutex_exit(&spa_namespace_lock);
819 spa_config_enter(spa, RW_WRITER, FTAG);
821 avl_remove(&spa_namespace_avl, spa);
822 spa_strfree(spa->spa_name);
823 spa->spa_name = spa_strdup(newname);
824 avl_add(&spa_namespace_avl, spa);
827 * Sync all labels to disk with the new names by marking the root vdev
828 * dirty and waiting for it to sync. It will pick up the new pool name
831 vdev_config_dirty(spa->spa_root_vdev);
833 spa_config_exit(spa, FTAG);
835 txg_wait_synced(spa->spa_dsl_pool, 0);
838 * Sync the updated config cache.
842 spa_close(spa, FTAG);
844 mutex_exit(&spa_namespace_lock);
851 * Determine whether a pool with given pool_guid exists. If device_guid is
852 * non-zero, determine whether the pool exists *and* contains a device with the
853 * specified device_guid.
856 spa_guid_exists(uint64_t pool_guid, uint64_t device_guid)
859 avl_tree_t *t = &spa_namespace_avl;
861 ASSERT(MUTEX_HELD(&spa_namespace_lock));
863 for (spa = avl_first(t); spa != NULL; spa = AVL_NEXT(t, spa)) {
864 if (spa->spa_state == POOL_STATE_UNINITIALIZED)
866 if (spa->spa_root_vdev == NULL)
868 if (spa_guid(spa) == pool_guid) {
869 if (device_guid == 0)
872 if (vdev_lookup_by_guid(spa->spa_root_vdev,
873 device_guid) != NULL)
877 * Check any devices we may be in the process of adding.
879 if (spa->spa_pending_vdev) {
880 if (vdev_lookup_by_guid(spa->spa_pending_vdev,
881 device_guid) != NULL)
887 return (spa != NULL);
891 spa_strdup(const char *s)
897 new = kmem_alloc(len + 1, KM_SLEEP);
907 kmem_free(s, strlen(s) + 1);
911 spa_get_random(uint64_t range)
917 (void) random_get_pseudo_bytes((void *)&r, sizeof (uint64_t));
923 sprintf_blkptr(char *buf, int len, const blkptr_t *bp)
928 (void) snprintf(buf, len, "<NULL>");
932 if (BP_IS_HOLE(bp)) {
933 (void) snprintf(buf, len, "<hole>");
937 (void) snprintf(buf, len, "[L%llu %s] %llxL/%llxP ",
938 (u_longlong_t)BP_GET_LEVEL(bp),
939 dmu_ot[BP_GET_TYPE(bp)].ot_name,
940 (u_longlong_t)BP_GET_LSIZE(bp),
941 (u_longlong_t)BP_GET_PSIZE(bp));
943 for (d = 0; d < BP_GET_NDVAS(bp); d++) {
944 const dva_t *dva = &bp->blk_dva[d];
945 (void) snprintf(buf + strlen(buf), len - strlen(buf),
946 "DVA[%d]=<%llu:%llx:%llx> ", d,
947 (u_longlong_t)DVA_GET_VDEV(dva),
948 (u_longlong_t)DVA_GET_OFFSET(dva),
949 (u_longlong_t)DVA_GET_ASIZE(dva));
952 (void) snprintf(buf + strlen(buf), len - strlen(buf),
953 "%s %s %s %s birth=%llu fill=%llu cksum=%llx:%llx:%llx:%llx",
954 zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name,
955 zio_compress_table[BP_GET_COMPRESS(bp)].ci_name,
956 BP_GET_BYTEORDER(bp) == 0 ? "BE" : "LE",
957 BP_IS_GANG(bp) ? "gang" : "contiguous",
958 (u_longlong_t)bp->blk_birth,
959 (u_longlong_t)bp->blk_fill,
960 (u_longlong_t)bp->blk_cksum.zc_word[0],
961 (u_longlong_t)bp->blk_cksum.zc_word[1],
962 (u_longlong_t)bp->blk_cksum.zc_word[2],
963 (u_longlong_t)bp->blk_cksum.zc_word[3]);
967 spa_freeze(spa_t *spa)
969 uint64_t freeze_txg = 0;
971 spa_config_enter(spa, RW_WRITER, FTAG);
972 if (spa->spa_freeze_txg == UINT64_MAX) {
973 freeze_txg = spa_last_synced_txg(spa) + TXG_SIZE;
974 spa->spa_freeze_txg = freeze_txg;
976 spa_config_exit(spa, FTAG);
978 txg_wait_synced(spa_get_dsl(spa), freeze_txg);
982 zfs_panic_recover(const char *fmt, ...)
987 vcmn_err(zfs_recover ? CE_WARN : CE_PANIC, fmt, adx);
992 * ==========================================================================
994 * ==========================================================================
998 spa_traverse_rwlock(spa_t *spa)
1000 return (&spa->spa_traverse_lock);
1004 spa_traverse_wanted(spa_t *spa)
1006 return (spa->spa_traverse_wanted);
1010 spa_get_dsl(spa_t *spa)
1012 return (spa->spa_dsl_pool);
1016 spa_get_rootblkptr(spa_t *spa)
1018 return (&spa->spa_ubsync.ub_rootbp);
1022 spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp)
1024 spa->spa_uberblock.ub_rootbp = *bp;
1028 spa_altroot(spa_t *spa, char *buf, size_t buflen)
1030 if (spa->spa_root == NULL)
1033 (void) strncpy(buf, spa->spa_root, buflen);
1037 spa_sync_pass(spa_t *spa)
1039 return (spa->spa_sync_pass);
1043 spa_name(spa_t *spa)
1046 * Accessing the name requires holding either the namespace lock or the
1047 * config lock, both of which are required to do a rename.
1049 ASSERT(MUTEX_HELD(&spa_namespace_lock) ||
1050 spa_config_held(spa, RW_READER));
1052 return (spa->spa_name);
1056 spa_guid(spa_t *spa)
1059 * If we fail to parse the config during spa_load(), we can go through
1060 * the error path (which posts an ereport) and end up here with no root
1061 * vdev. We stash the original pool guid in 'spa_load_guid' to handle
1064 if (spa->spa_root_vdev != NULL)
1065 return (spa->spa_root_vdev->vdev_guid);
1067 return (spa->spa_load_guid);
1071 spa_last_synced_txg(spa_t *spa)
1073 return (spa->spa_ubsync.ub_txg);
1077 spa_first_txg(spa_t *spa)
1079 return (spa->spa_first_txg);
1083 spa_state(spa_t *spa)
1085 return (spa->spa_state);
1089 spa_freeze_txg(spa_t *spa)
1091 return (spa->spa_freeze_txg);
1095 * Return how much space is allocated in the pool (ie. sum of all asize)
1098 spa_get_alloc(spa_t *spa)
1100 return (spa->spa_root_vdev->vdev_stat.vs_alloc);
1104 * Return how much (raid-z inflated) space there is in the pool.
1107 spa_get_space(spa_t *spa)
1109 return (spa->spa_root_vdev->vdev_stat.vs_space);
1113 * Return the amount of raid-z-deflated space in the pool.
1116 spa_get_dspace(spa_t *spa)
1118 if (spa->spa_deflate)
1119 return (spa->spa_root_vdev->vdev_stat.vs_dspace);
1121 return (spa->spa_root_vdev->vdev_stat.vs_space);
1126 spa_get_asize(spa_t *spa, uint64_t lsize)
1129 * For now, the worst case is 512-byte RAID-Z blocks, in which
1130 * case the space requirement is exactly 2x; so just assume that.
1131 * Add to this the fact that we can have up to 3 DVAs per bp, and
1132 * we have to multiply by a total of 6x.
1138 * Return the failure mode that has been set to this pool. The default
1139 * behavior will be to block all I/Os when a complete failure occurs.
1142 spa_get_failmode(spa_t *spa)
1144 return (spa->spa_failmode);
1148 spa_version(spa_t *spa)
1150 return (spa->spa_ubsync.ub_version);
1154 spa_max_replication(spa_t *spa)
1157 * As of SPA_VERSION == SPA_VERSION_DITTO_BLOCKS, we are able to
1158 * handle BPs with more than one DVA allocated. Set our max
1159 * replication level accordingly.
1161 if (spa_version(spa) < SPA_VERSION_DITTO_BLOCKS)
1163 return (MIN(SPA_DVAS_PER_BP, spa_max_replication_override));
1167 bp_get_dasize(spa_t *spa, const blkptr_t *bp)
1171 if (!spa->spa_deflate)
1172 return (BP_GET_ASIZE(bp));
1174 spa_config_enter(spa, RW_READER, FTAG);
1175 for (i = 0; i < SPA_DVAS_PER_BP; i++) {
1177 vdev_lookup_top(spa, DVA_GET_VDEV(&bp->blk_dva[i]));
1179 sz += (DVA_GET_ASIZE(&bp->blk_dva[i]) >>
1180 SPA_MINBLOCKSHIFT) * vd->vdev_deflate_ratio;
1182 spa_config_exit(spa, FTAG);
1187 * ==========================================================================
1188 * Initialization and Termination
1189 * ==========================================================================
1193 spa_name_compare(const void *a1, const void *a2)
1195 const spa_t *s1 = a1;
1196 const spa_t *s2 = a2;
1199 s = strcmp(s1->spa_name, s2->spa_name);
1210 return (spa_active_count);
1222 mutex_init(&spa_namespace_lock, NULL, MUTEX_DEFAULT, NULL);
1223 mutex_init(&spa_spare_lock, NULL, MUTEX_DEFAULT, NULL);
1224 mutex_init(&spa_l2cache_lock, NULL, MUTEX_DEFAULT, NULL);
1225 cv_init(&spa_namespace_cv, NULL, CV_DEFAULT, NULL);
1227 avl_create(&spa_namespace_avl, spa_name_compare, sizeof (spa_t),
1228 offsetof(spa_t, spa_avl));
1230 avl_create(&spa_spare_avl, spa_spare_compare, sizeof (spa_aux_t),
1231 offsetof(spa_aux_t, aux_avl));
1233 avl_create(&spa_l2cache_avl, spa_l2cache_compare, sizeof (spa_aux_t),
1234 offsetof(spa_aux_t, aux_avl));
1243 vdev_cache_stat_init();
1254 vdev_cache_stat_fini();
1261 avl_destroy(&spa_namespace_avl);
1262 avl_destroy(&spa_spare_avl);
1263 avl_destroy(&spa_l2cache_avl);
1265 cv_destroy(&spa_namespace_cv);
1266 mutex_destroy(&spa_namespace_lock);
1267 mutex_destroy(&spa_spare_lock);
1268 mutex_destroy(&spa_l2cache_lock);
1272 * Return whether this pool has slogs. No locking needed.
1273 * It's not a problem if the wrong answer is returned as it's only for
1274 * performance and not correctness
1277 spa_has_slogs(spa_t *spa)
1279 return (spa->spa_log_class->mc_rotor != NULL);