4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
28 * This file contains all the routines used when modifying on-disk SPA state.
29 * This includes opening, importing, destroying, exporting a pool, and syncing a
33 #include <sys/zfs_context.h>
34 #include <sys/fm/fs/zfs.h>
35 #include <sys/spa_impl.h>
37 #include <sys/zio_checksum.h>
38 #include <sys/zio_compress.h>
40 #include <sys/dmu_tx.h>
43 #include <sys/vdev_impl.h>
44 #include <sys/metaslab.h>
45 #include <sys/uberblock_impl.h>
48 #include <sys/dmu_traverse.h>
49 #include <sys/dmu_objset.h>
50 #include <sys/unique.h>
51 #include <sys/dsl_pool.h>
52 #include <sys/dsl_dataset.h>
53 #include <sys/dsl_dir.h>
54 #include <sys/dsl_prop.h>
55 #include <sys/dsl_synctask.h>
56 #include <sys/fs/zfs.h>
58 #include <sys/callb.h>
59 #include <sys/systeminfo.h>
60 #include <sys/sunddi.h>
61 #include <sys/spa_boot.h>
68 #include "zfs_comutil.h"
70 int zio_taskq_threads[ZIO_TYPES][ZIO_TASKQ_TYPES] = {
72 { 1, 1 }, /* ZIO_TYPE_NULL */
73 { 1, 8 }, /* ZIO_TYPE_READ */
74 { 8, 1 }, /* ZIO_TYPE_WRITE */
75 { 1, 1 }, /* ZIO_TYPE_FREE */
76 { 1, 1 }, /* ZIO_TYPE_CLAIM */
77 { 1, 1 }, /* ZIO_TYPE_IOCTL */
80 static void spa_sync_props(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx);
81 static boolean_t spa_has_active_shared_spare(spa_t *spa);
84 * ==========================================================================
85 * SPA properties routines
86 * ==========================================================================
90 * Add a (source=src, propname=propval) list to an nvlist.
93 spa_prop_add_list(nvlist_t *nvl, zpool_prop_t prop, char *strval,
94 uint64_t intval, zprop_source_t src)
96 const char *propname = zpool_prop_to_name(prop);
99 VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0);
100 VERIFY(nvlist_add_uint64(propval, ZPROP_SOURCE, src) == 0);
103 VERIFY(nvlist_add_string(propval, ZPROP_VALUE, strval) == 0);
105 VERIFY(nvlist_add_uint64(propval, ZPROP_VALUE, intval) == 0);
107 VERIFY(nvlist_add_nvlist(nvl, propname, propval) == 0);
108 nvlist_free(propval);
112 * Get property values from the spa configuration.
115 spa_prop_get_config(spa_t *spa, nvlist_t **nvp)
119 uint64_t cap, version;
120 zprop_source_t src = ZPROP_SRC_NONE;
121 spa_config_dirent_t *dp;
123 ASSERT(MUTEX_HELD(&spa->spa_props_lock));
125 if (spa->spa_root_vdev != NULL) {
126 size = spa_get_space(spa);
127 used = spa_get_alloc(spa);
128 spa_prop_add_list(*nvp, ZPOOL_PROP_NAME, spa_name(spa), 0, src);
129 spa_prop_add_list(*nvp, ZPOOL_PROP_SIZE, NULL, size, src);
130 spa_prop_add_list(*nvp, ZPOOL_PROP_USED, NULL, used, src);
131 spa_prop_add_list(*nvp, ZPOOL_PROP_AVAILABLE, NULL,
134 cap = (size == 0) ? 0 : (used * 100 / size);
135 spa_prop_add_list(*nvp, ZPOOL_PROP_CAPACITY, NULL, cap, src);
137 spa_prop_add_list(*nvp, ZPOOL_PROP_HEALTH, NULL,
138 spa->spa_root_vdev->vdev_state, src);
140 version = spa_version(spa);
141 if (version == zpool_prop_default_numeric(ZPOOL_PROP_VERSION))
142 src = ZPROP_SRC_DEFAULT;
144 src = ZPROP_SRC_LOCAL;
145 spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL, version, src);
148 spa_prop_add_list(*nvp, ZPOOL_PROP_GUID, NULL, spa_guid(spa), src);
150 if (spa->spa_root != NULL)
151 spa_prop_add_list(*nvp, ZPOOL_PROP_ALTROOT, spa->spa_root,
154 if ((dp = list_head(&spa->spa_config_list)) != NULL) {
155 if (dp->scd_path == NULL) {
156 spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
157 "none", 0, ZPROP_SRC_LOCAL);
158 } else if (strcmp(dp->scd_path, spa_config_path) != 0) {
159 spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
160 dp->scd_path, 0, ZPROP_SRC_LOCAL);
166 * Get zpool property values.
169 spa_prop_get(spa_t *spa, nvlist_t **nvp)
173 objset_t *mos = spa->spa_meta_objset;
176 VERIFY(nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0);
178 mutex_enter(&spa->spa_props_lock);
181 * Get properties from the spa config.
183 spa_prop_get_config(spa, nvp);
185 /* If no pool property object, no more prop to get. */
186 if (spa->spa_pool_props_object == 0) {
187 mutex_exit(&spa->spa_props_lock);
192 * Get properties from the MOS pool property object.
194 for (zap_cursor_init(&zc, mos, spa->spa_pool_props_object);
195 (err = zap_cursor_retrieve(&zc, &za)) == 0;
196 zap_cursor_advance(&zc)) {
199 zprop_source_t src = ZPROP_SRC_DEFAULT;
202 if ((prop = zpool_name_to_prop(za.za_name)) == ZPROP_INVAL)
205 switch (za.za_integer_length) {
207 /* integer property */
208 if (za.za_first_integer !=
209 zpool_prop_default_numeric(prop))
210 src = ZPROP_SRC_LOCAL;
212 if (prop == ZPOOL_PROP_BOOTFS) {
214 dsl_dataset_t *ds = NULL;
216 dp = spa_get_dsl(spa);
217 rw_enter(&dp->dp_config_rwlock, RW_READER);
218 if (err = dsl_dataset_hold_obj(dp,
219 za.za_first_integer, FTAG, &ds)) {
220 rw_exit(&dp->dp_config_rwlock);
225 MAXNAMELEN + strlen(MOS_DIR_NAME) + 1,
227 dsl_dataset_name(ds, strval);
228 dsl_dataset_rele(ds, FTAG);
229 rw_exit(&dp->dp_config_rwlock);
232 intval = za.za_first_integer;
235 spa_prop_add_list(*nvp, prop, strval, intval, src);
239 MAXNAMELEN + strlen(MOS_DIR_NAME) + 1);
244 /* string property */
245 strval = kmem_alloc(za.za_num_integers, KM_SLEEP);
246 err = zap_lookup(mos, spa->spa_pool_props_object,
247 za.za_name, 1, za.za_num_integers, strval);
249 kmem_free(strval, za.za_num_integers);
252 spa_prop_add_list(*nvp, prop, strval, 0, src);
253 kmem_free(strval, za.za_num_integers);
260 zap_cursor_fini(&zc);
261 mutex_exit(&spa->spa_props_lock);
263 if (err && err != ENOENT) {
273 * Validate the given pool properties nvlist and modify the list
274 * for the property values to be set.
277 spa_prop_validate(spa_t *spa, nvlist_t *props)
280 int error = 0, reset_bootfs = 0;
284 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
286 char *propname, *strval;
291 propname = nvpair_name(elem);
293 if ((prop = zpool_name_to_prop(propname)) == ZPROP_INVAL)
297 case ZPOOL_PROP_VERSION:
298 error = nvpair_value_uint64(elem, &intval);
300 (intval < spa_version(spa) || intval > SPA_VERSION))
304 case ZPOOL_PROP_DELEGATION:
305 case ZPOOL_PROP_AUTOREPLACE:
306 case ZPOOL_PROP_LISTSNAPS:
307 error = nvpair_value_uint64(elem, &intval);
308 if (!error && intval > 1)
312 case ZPOOL_PROP_BOOTFS:
313 if (spa_version(spa) < SPA_VERSION_BOOTFS) {
319 * Make sure the vdev config is bootable
321 if (!vdev_is_bootable(spa->spa_root_vdev)) {
328 error = nvpair_value_string(elem, &strval);
333 if (strval == NULL || strval[0] == '\0') {
334 objnum = zpool_prop_default_numeric(
339 if (error = dmu_objset_open(strval, DMU_OST_ZFS,
340 DS_MODE_USER | DS_MODE_READONLY, &os))
343 /* We don't support gzip bootable datasets */
344 if ((error = dsl_prop_get_integer(strval,
345 zfs_prop_to_name(ZFS_PROP_COMPRESSION),
346 &compress, NULL)) == 0 &&
347 !BOOTFS_COMPRESS_VALID(compress)) {
350 objnum = dmu_objset_id(os);
352 dmu_objset_close(os);
356 case ZPOOL_PROP_FAILUREMODE:
357 error = nvpair_value_uint64(elem, &intval);
358 if (!error && (intval < ZIO_FAILURE_MODE_WAIT ||
359 intval > ZIO_FAILURE_MODE_PANIC))
363 * This is a special case which only occurs when
364 * the pool has completely failed. This allows
365 * the user to change the in-core failmode property
366 * without syncing it out to disk (I/Os might
367 * currently be blocked). We do this by returning
368 * EIO to the caller (spa_prop_set) to trick it
369 * into thinking we encountered a property validation
372 if (!error && spa_suspended(spa)) {
373 spa->spa_failmode = intval;
378 case ZPOOL_PROP_CACHEFILE:
379 if ((error = nvpair_value_string(elem, &strval)) != 0)
382 if (strval[0] == '\0')
385 if (strcmp(strval, "none") == 0)
388 if (strval[0] != '/') {
393 slash = strrchr(strval, '/');
394 ASSERT(slash != NULL);
396 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
397 strcmp(slash, "/..") == 0)
406 if (!error && reset_bootfs) {
407 error = nvlist_remove(props,
408 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), DATA_TYPE_STRING);
411 error = nvlist_add_uint64(props,
412 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), objnum);
420 spa_configfile_set(spa_t *spa, nvlist_t *nvp, boolean_t need_sync)
423 spa_config_dirent_t *dp;
425 if (nvlist_lookup_string(nvp, zpool_prop_to_name(ZPOOL_PROP_CACHEFILE),
429 dp = kmem_alloc(sizeof (spa_config_dirent_t),
432 if (cachefile[0] == '\0')
433 dp->scd_path = spa_strdup(spa_config_path);
434 else if (strcmp(cachefile, "none") == 0)
437 dp->scd_path = spa_strdup(cachefile);
439 list_insert_head(&spa->spa_config_list, dp);
441 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
445 spa_prop_set(spa_t *spa, nvlist_t *nvp)
449 boolean_t need_sync = B_FALSE;
452 if ((error = spa_prop_validate(spa, nvp)) != 0)
456 while ((elem = nvlist_next_nvpair(nvp, elem)) != NULL) {
457 if ((prop = zpool_name_to_prop(
458 nvpair_name(elem))) == ZPROP_INVAL)
461 if (prop == ZPOOL_PROP_CACHEFILE || prop == ZPOOL_PROP_ALTROOT)
469 return (dsl_sync_task_do(spa_get_dsl(spa), NULL, spa_sync_props,
476 * If the bootfs property value is dsobj, clear it.
479 spa_prop_clear_bootfs(spa_t *spa, uint64_t dsobj, dmu_tx_t *tx)
481 if (spa->spa_bootfs == dsobj && spa->spa_pool_props_object != 0) {
482 VERIFY(zap_remove(spa->spa_meta_objset,
483 spa->spa_pool_props_object,
484 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), tx) == 0);
490 * ==========================================================================
491 * SPA state manipulation (open/create/destroy/import/export)
492 * ==========================================================================
496 spa_error_entry_compare(const void *a, const void *b)
498 spa_error_entry_t *sa = (spa_error_entry_t *)a;
499 spa_error_entry_t *sb = (spa_error_entry_t *)b;
502 ret = bcmp(&sa->se_bookmark, &sb->se_bookmark,
503 sizeof (zbookmark_t));
514 * Utility function which retrieves copies of the current logs and
515 * re-initializes them in the process.
518 spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub)
520 ASSERT(MUTEX_HELD(&spa->spa_errlist_lock));
522 bcopy(&spa->spa_errlist_last, last, sizeof (avl_tree_t));
523 bcopy(&spa->spa_errlist_scrub, scrub, sizeof (avl_tree_t));
525 avl_create(&spa->spa_errlist_scrub,
526 spa_error_entry_compare, sizeof (spa_error_entry_t),
527 offsetof(spa_error_entry_t, se_avl));
528 avl_create(&spa->spa_errlist_last,
529 spa_error_entry_compare, sizeof (spa_error_entry_t),
530 offsetof(spa_error_entry_t, se_avl));
534 * Activate an uninitialized pool.
537 spa_activate(spa_t *spa, int mode)
539 ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
541 spa->spa_state = POOL_STATE_ACTIVE;
542 spa->spa_mode = mode;
544 spa->spa_normal_class = metaslab_class_create();
545 spa->spa_log_class = metaslab_class_create();
547 for (int t = 0; t < ZIO_TYPES; t++) {
548 for (int q = 0; q < ZIO_TASKQ_TYPES; q++) {
549 spa->spa_zio_taskq[t][q] = taskq_create("spa_zio",
550 zio_taskq_threads[t][q], maxclsyspri, 50,
551 INT_MAX, TASKQ_PREPOPULATE);
555 list_create(&spa->spa_config_dirty_list, sizeof (vdev_t),
556 offsetof(vdev_t, vdev_config_dirty_node));
557 list_create(&spa->spa_state_dirty_list, sizeof (vdev_t),
558 offsetof(vdev_t, vdev_state_dirty_node));
560 txg_list_create(&spa->spa_vdev_txg_list,
561 offsetof(struct vdev, vdev_txg_node));
563 avl_create(&spa->spa_errlist_scrub,
564 spa_error_entry_compare, sizeof (spa_error_entry_t),
565 offsetof(spa_error_entry_t, se_avl));
566 avl_create(&spa->spa_errlist_last,
567 spa_error_entry_compare, sizeof (spa_error_entry_t),
568 offsetof(spa_error_entry_t, se_avl));
572 * Opposite of spa_activate().
575 spa_deactivate(spa_t *spa)
577 ASSERT(spa->spa_sync_on == B_FALSE);
578 ASSERT(spa->spa_dsl_pool == NULL);
579 ASSERT(spa->spa_root_vdev == NULL);
581 ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED);
583 txg_list_destroy(&spa->spa_vdev_txg_list);
585 list_destroy(&spa->spa_config_dirty_list);
586 list_destroy(&spa->spa_state_dirty_list);
588 for (int t = 0; t < ZIO_TYPES; t++) {
589 for (int q = 0; q < ZIO_TASKQ_TYPES; q++) {
590 taskq_destroy(spa->spa_zio_taskq[t][q]);
591 spa->spa_zio_taskq[t][q] = NULL;
595 metaslab_class_destroy(spa->spa_normal_class);
596 spa->spa_normal_class = NULL;
598 metaslab_class_destroy(spa->spa_log_class);
599 spa->spa_log_class = NULL;
602 * If this was part of an import or the open otherwise failed, we may
603 * still have errors left in the queues. Empty them just in case.
605 spa_errlog_drain(spa);
607 avl_destroy(&spa->spa_errlist_scrub);
608 avl_destroy(&spa->spa_errlist_last);
610 spa->spa_state = POOL_STATE_UNINITIALIZED;
614 * Verify a pool configuration, and construct the vdev tree appropriately. This
615 * will create all the necessary vdevs in the appropriate layout, with each vdev
616 * in the CLOSED state. This will prep the pool before open/creation/import.
617 * All vdev validation is done by the vdev_alloc() routine.
620 spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent,
621 uint_t id, int atype)
627 if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0)
630 if ((*vdp)->vdev_ops->vdev_op_leaf)
633 error = nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
645 for (c = 0; c < children; c++) {
647 if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c,
655 ASSERT(*vdp != NULL);
661 * Opposite of spa_load().
664 spa_unload(spa_t *spa)
668 ASSERT(MUTEX_HELD(&spa_namespace_lock));
673 spa_async_suspend(spa);
678 if (spa->spa_sync_on) {
679 txg_sync_stop(spa->spa_dsl_pool);
680 spa->spa_sync_on = B_FALSE;
684 * Wait for any outstanding async I/O to complete.
686 mutex_enter(&spa->spa_async_root_lock);
687 while (spa->spa_async_root_count != 0)
688 cv_wait(&spa->spa_async_root_cv, &spa->spa_async_root_lock);
689 mutex_exit(&spa->spa_async_root_lock);
692 * Close the dsl pool.
694 if (spa->spa_dsl_pool) {
695 dsl_pool_close(spa->spa_dsl_pool);
696 spa->spa_dsl_pool = NULL;
699 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
702 * Drop and purge level 2 cache
704 spa_l2cache_drop(spa);
709 if (spa->spa_root_vdev)
710 vdev_free(spa->spa_root_vdev);
711 ASSERT(spa->spa_root_vdev == NULL);
713 for (i = 0; i < spa->spa_spares.sav_count; i++)
714 vdev_free(spa->spa_spares.sav_vdevs[i]);
715 if (spa->spa_spares.sav_vdevs) {
716 kmem_free(spa->spa_spares.sav_vdevs,
717 spa->spa_spares.sav_count * sizeof (void *));
718 spa->spa_spares.sav_vdevs = NULL;
720 if (spa->spa_spares.sav_config) {
721 nvlist_free(spa->spa_spares.sav_config);
722 spa->spa_spares.sav_config = NULL;
724 spa->spa_spares.sav_count = 0;
726 for (i = 0; i < spa->spa_l2cache.sav_count; i++)
727 vdev_free(spa->spa_l2cache.sav_vdevs[i]);
728 if (spa->spa_l2cache.sav_vdevs) {
729 kmem_free(spa->spa_l2cache.sav_vdevs,
730 spa->spa_l2cache.sav_count * sizeof (void *));
731 spa->spa_l2cache.sav_vdevs = NULL;
733 if (spa->spa_l2cache.sav_config) {
734 nvlist_free(spa->spa_l2cache.sav_config);
735 spa->spa_l2cache.sav_config = NULL;
737 spa->spa_l2cache.sav_count = 0;
739 spa->spa_async_suspended = 0;
741 spa_config_exit(spa, SCL_ALL, FTAG);
745 * Load (or re-load) the current list of vdevs describing the active spares for
746 * this pool. When this is called, we have some form of basic information in
747 * 'spa_spares.sav_config'. We parse this into vdevs, try to open them, and
748 * then re-generate a more complete list including status information.
751 spa_load_spares(spa_t *spa)
758 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
761 * First, close and free any existing spare vdevs.
763 for (i = 0; i < spa->spa_spares.sav_count; i++) {
764 vd = spa->spa_spares.sav_vdevs[i];
766 /* Undo the call to spa_activate() below */
767 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
768 B_FALSE)) != NULL && tvd->vdev_isspare)
769 spa_spare_remove(tvd);
774 if (spa->spa_spares.sav_vdevs)
775 kmem_free(spa->spa_spares.sav_vdevs,
776 spa->spa_spares.sav_count * sizeof (void *));
778 if (spa->spa_spares.sav_config == NULL)
781 VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
782 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
784 spa->spa_spares.sav_count = (int)nspares;
785 spa->spa_spares.sav_vdevs = NULL;
791 * Construct the array of vdevs, opening them to get status in the
792 * process. For each spare, there is potentially two different vdev_t
793 * structures associated with it: one in the list of spares (used only
794 * for basic validation purposes) and one in the active vdev
795 * configuration (if it's spared in). During this phase we open and
796 * validate each vdev on the spare list. If the vdev also exists in the
797 * active configuration, then we also mark this vdev as an active spare.
799 spa->spa_spares.sav_vdevs = kmem_alloc(nspares * sizeof (void *),
801 for (i = 0; i < spa->spa_spares.sav_count; i++) {
802 VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0,
803 VDEV_ALLOC_SPARE) == 0);
806 spa->spa_spares.sav_vdevs[i] = vd;
808 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
810 if (!tvd->vdev_isspare)
814 * We only mark the spare active if we were successfully
815 * able to load the vdev. Otherwise, importing a pool
816 * with a bad active spare would result in strange
817 * behavior, because multiple pool would think the spare
818 * is actively in use.
820 * There is a vulnerability here to an equally bizarre
821 * circumstance, where a dead active spare is later
822 * brought back to life (onlined or otherwise). Given
823 * the rarity of this scenario, and the extra complexity
824 * it adds, we ignore the possibility.
826 if (!vdev_is_dead(tvd))
827 spa_spare_activate(tvd);
832 if (vdev_open(vd) != 0)
835 if (vdev_validate_aux(vd) == 0)
840 * Recompute the stashed list of spares, with status information
843 VERIFY(nvlist_remove(spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES,
844 DATA_TYPE_NVLIST_ARRAY) == 0);
846 spares = kmem_alloc(spa->spa_spares.sav_count * sizeof (void *),
848 for (i = 0; i < spa->spa_spares.sav_count; i++)
849 spares[i] = vdev_config_generate(spa,
850 spa->spa_spares.sav_vdevs[i], B_TRUE, B_TRUE, B_FALSE);
851 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
852 ZPOOL_CONFIG_SPARES, spares, spa->spa_spares.sav_count) == 0);
853 for (i = 0; i < spa->spa_spares.sav_count; i++)
854 nvlist_free(spares[i]);
855 kmem_free(spares, spa->spa_spares.sav_count * sizeof (void *));
859 * Load (or re-load) the current list of vdevs describing the active l2cache for
860 * this pool. When this is called, we have some form of basic information in
861 * 'spa_l2cache.sav_config'. We parse this into vdevs, try to open them, and
862 * then re-generate a more complete list including status information.
863 * Devices which are already active have their details maintained, and are
867 spa_load_l2cache(spa_t *spa)
873 vdev_t *vd, **oldvdevs, **newvdevs;
874 spa_aux_vdev_t *sav = &spa->spa_l2cache;
876 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
878 if (sav->sav_config != NULL) {
879 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config,
880 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
881 newvdevs = kmem_alloc(nl2cache * sizeof (void *), KM_SLEEP);
886 oldvdevs = sav->sav_vdevs;
887 oldnvdevs = sav->sav_count;
888 sav->sav_vdevs = NULL;
892 * Process new nvlist of vdevs.
894 for (i = 0; i < nl2cache; i++) {
895 VERIFY(nvlist_lookup_uint64(l2cache[i], ZPOOL_CONFIG_GUID,
899 for (j = 0; j < oldnvdevs; j++) {
901 if (vd != NULL && guid == vd->vdev_guid) {
903 * Retain previous vdev for add/remove ops.
911 if (newvdevs[i] == NULL) {
915 VERIFY(spa_config_parse(spa, &vd, l2cache[i], NULL, 0,
916 VDEV_ALLOC_L2CACHE) == 0);
921 * Commit this vdev as an l2cache device,
922 * even if it fails to open.
929 spa_l2cache_activate(vd);
931 if (vdev_open(vd) != 0)
934 (void) vdev_validate_aux(vd);
936 if (!vdev_is_dead(vd)) {
937 size = vdev_get_rsize(vd);
938 l2arc_add_vdev(spa, vd,
939 VDEV_LABEL_START_SIZE,
940 size - VDEV_LABEL_START_SIZE);
946 * Purge vdevs that were dropped
948 for (i = 0; i < oldnvdevs; i++) {
953 if (spa_l2cache_exists(vd->vdev_guid, &pool) &&
954 pool != 0ULL && l2arc_vdev_present(vd))
955 l2arc_remove_vdev(vd);
956 (void) vdev_close(vd);
957 spa_l2cache_remove(vd);
962 kmem_free(oldvdevs, oldnvdevs * sizeof (void *));
964 if (sav->sav_config == NULL)
967 sav->sav_vdevs = newvdevs;
968 sav->sav_count = (int)nl2cache;
971 * Recompute the stashed list of l2cache devices, with status
972 * information this time.
974 VERIFY(nvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE,
975 DATA_TYPE_NVLIST_ARRAY) == 0);
977 l2cache = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP);
978 for (i = 0; i < sav->sav_count; i++)
979 l2cache[i] = vdev_config_generate(spa,
980 sav->sav_vdevs[i], B_TRUE, B_FALSE, B_TRUE);
981 VERIFY(nvlist_add_nvlist_array(sav->sav_config,
982 ZPOOL_CONFIG_L2CACHE, l2cache, sav->sav_count) == 0);
984 for (i = 0; i < sav->sav_count; i++)
985 nvlist_free(l2cache[i]);
987 kmem_free(l2cache, sav->sav_count * sizeof (void *));
991 load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value)
999 VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db));
1000 nvsize = *(uint64_t *)db->db_data;
1001 dmu_buf_rele(db, FTAG);
1003 packed = kmem_alloc(nvsize, KM_SLEEP);
1004 error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed);
1006 error = nvlist_unpack(packed, nvsize, value, 0);
1007 kmem_free(packed, nvsize);
1013 * Checks to see if the given vdev could not be opened, in which case we post a
1014 * sysevent to notify the autoreplace code that the device has been removed.
1017 spa_check_removed(vdev_t *vd)
1021 for (c = 0; c < vd->vdev_children; c++)
1022 spa_check_removed(vd->vdev_child[c]);
1024 if (vd->vdev_ops->vdev_op_leaf && vdev_is_dead(vd)) {
1025 zfs_post_autoreplace(vd->vdev_spa, vd);
1026 spa_event_notify(vd->vdev_spa, vd, ESC_ZFS_VDEV_CHECK);
1031 * Check for missing log devices
1034 spa_check_logs(spa_t *spa)
1036 switch (spa->spa_log_state) {
1037 case SPA_LOG_MISSING:
1038 /* need to recheck in case slog has been restored */
1039 case SPA_LOG_UNKNOWN:
1040 if (dmu_objset_find(spa->spa_name, zil_check_log_chain, NULL,
1041 DS_FIND_CHILDREN)) {
1042 spa->spa_log_state = SPA_LOG_MISSING;
1048 (void) dmu_objset_find(spa->spa_name, zil_clear_log_chain, NULL,
1052 spa->spa_log_state = SPA_LOG_GOOD;
1057 * Load an existing storage pool, using the pool's builtin spa_config as a
1058 * source of configuration information.
1061 spa_load(spa_t *spa, nvlist_t *config, spa_load_state_t state, int mosconfig)
1064 nvlist_t *nvroot = NULL;
1066 uberblock_t *ub = &spa->spa_uberblock;
1067 uint64_t config_cache_txg = spa->spa_config_txg;
1070 uint64_t autoreplace = 0;
1071 int orig_mode = spa->spa_mode;
1072 char *ereport = FM_EREPORT_ZFS_POOL;
1075 * If this is an untrusted config, access the pool in read-only mode.
1076 * This prevents things like resilvering recently removed devices.
1079 spa->spa_mode = FREAD;
1081 ASSERT(MUTEX_HELD(&spa_namespace_lock));
1083 spa->spa_load_state = state;
1085 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvroot) ||
1086 nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid)) {
1092 * Versioning wasn't explicitly added to the label until later, so if
1093 * it's not present treat it as the initial version.
1095 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &version) != 0)
1096 version = SPA_VERSION_INITIAL;
1098 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
1099 &spa->spa_config_txg);
1101 if ((state == SPA_LOAD_IMPORT || state == SPA_LOAD_TRYIMPORT) &&
1102 spa_guid_exists(pool_guid, 0)) {
1107 spa->spa_load_guid = pool_guid;
1110 * Parse the configuration into a vdev tree. We explicitly set the
1111 * value that will be returned by spa_version() since parsing the
1112 * configuration requires knowing the version number.
1114 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1115 spa->spa_ubsync.ub_version = version;
1116 error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_LOAD);
1117 spa_config_exit(spa, SCL_ALL, FTAG);
1122 ASSERT(spa->spa_root_vdev == rvd);
1123 ASSERT(spa_guid(spa) == pool_guid);
1126 * Try to open all vdevs, loading each label in the process.
1128 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1129 error = vdev_open(rvd);
1130 spa_config_exit(spa, SCL_ALL, FTAG);
1135 * Validate the labels for all leaf vdevs. We need to grab the config
1136 * lock because all label I/O is done with ZIO_FLAG_CONFIG_WRITER.
1139 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1140 error = vdev_validate(rvd);
1141 spa_config_exit(spa, SCL_ALL, FTAG);
1146 if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) {
1152 * Find the best uberblock.
1154 vdev_uberblock_load(NULL, rvd, ub);
1157 * If we weren't able to find a single valid uberblock, return failure.
1159 if (ub->ub_txg == 0) {
1160 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1161 VDEV_AUX_CORRUPT_DATA);
1167 * If the pool is newer than the code, we can't open it.
1169 if (ub->ub_version > SPA_VERSION) {
1170 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1171 VDEV_AUX_VERSION_NEWER);
1177 * If the vdev guid sum doesn't match the uberblock, we have an
1178 * incomplete configuration.
1180 if (rvd->vdev_guid_sum != ub->ub_guid_sum && mosconfig) {
1181 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1182 VDEV_AUX_BAD_GUID_SUM);
1188 * Initialize internal SPA structures.
1190 spa->spa_state = POOL_STATE_ACTIVE;
1191 spa->spa_ubsync = spa->spa_uberblock;
1192 spa->spa_first_txg = spa_last_synced_txg(spa) + 1;
1193 error = dsl_pool_open(spa, spa->spa_first_txg, &spa->spa_dsl_pool);
1195 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1196 VDEV_AUX_CORRUPT_DATA);
1199 spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset;
1201 if (zap_lookup(spa->spa_meta_objset,
1202 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG,
1203 sizeof (uint64_t), 1, &spa->spa_config_object) != 0) {
1204 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1205 VDEV_AUX_CORRUPT_DATA);
1211 nvlist_t *newconfig;
1214 if (load_nvlist(spa, spa->spa_config_object, &newconfig) != 0) {
1215 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1216 VDEV_AUX_CORRUPT_DATA);
1221 if (!spa_is_root(spa) && nvlist_lookup_uint64(newconfig,
1222 ZPOOL_CONFIG_HOSTID, &hostid) == 0) {
1224 unsigned long myhostid = 0;
1226 VERIFY(nvlist_lookup_string(newconfig,
1227 ZPOOL_CONFIG_HOSTNAME, &hostname) == 0);
1230 myhostid = zone_get_hostid(NULL);
1233 * We're emulating the system's hostid in userland, so
1234 * we can't use zone_get_hostid().
1236 (void) ddi_strtoul(hw_serial, NULL, 10, &myhostid);
1237 #endif /* _KERNEL */
1238 if (hostid != 0 && myhostid != 0 &&
1239 hostid != myhostid) {
1240 cmn_err(CE_WARN, "pool '%s' could not be "
1241 "loaded as it was last accessed by "
1242 "another system (host: %s hostid: 0x%lx). "
1243 "See: http://www.sun.com/msg/ZFS-8000-EY",
1244 spa_name(spa), hostname,
1245 (unsigned long)hostid);
1251 spa_config_set(spa, newconfig);
1253 spa_deactivate(spa);
1254 spa_activate(spa, orig_mode);
1256 return (spa_load(spa, newconfig, state, B_TRUE));
1259 if (zap_lookup(spa->spa_meta_objset,
1260 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPLIST,
1261 sizeof (uint64_t), 1, &spa->spa_sync_bplist_obj) != 0) {
1262 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1263 VDEV_AUX_CORRUPT_DATA);
1269 * Load the bit that tells us to use the new accounting function
1270 * (raid-z deflation). If we have an older pool, this will not
1273 error = zap_lookup(spa->spa_meta_objset,
1274 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
1275 sizeof (uint64_t), 1, &spa->spa_deflate);
1276 if (error != 0 && error != ENOENT) {
1277 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1278 VDEV_AUX_CORRUPT_DATA);
1284 * Load the persistent error log. If we have an older pool, this will
1287 error = zap_lookup(spa->spa_meta_objset,
1288 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ERRLOG_LAST,
1289 sizeof (uint64_t), 1, &spa->spa_errlog_last);
1290 if (error != 0 && error != ENOENT) {
1291 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1292 VDEV_AUX_CORRUPT_DATA);
1297 error = zap_lookup(spa->spa_meta_objset,
1298 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ERRLOG_SCRUB,
1299 sizeof (uint64_t), 1, &spa->spa_errlog_scrub);
1300 if (error != 0 && error != ENOENT) {
1301 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1302 VDEV_AUX_CORRUPT_DATA);
1308 * Load the history object. If we have an older pool, this
1309 * will not be present.
1311 error = zap_lookup(spa->spa_meta_objset,
1312 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_HISTORY,
1313 sizeof (uint64_t), 1, &spa->spa_history);
1314 if (error != 0 && error != ENOENT) {
1315 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1316 VDEV_AUX_CORRUPT_DATA);
1322 * Load any hot spares for this pool.
1324 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
1325 DMU_POOL_SPARES, sizeof (uint64_t), 1, &spa->spa_spares.sav_object);
1326 if (error != 0 && error != ENOENT) {
1327 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1328 VDEV_AUX_CORRUPT_DATA);
1333 ASSERT(spa_version(spa) >= SPA_VERSION_SPARES);
1334 if (load_nvlist(spa, spa->spa_spares.sav_object,
1335 &spa->spa_spares.sav_config) != 0) {
1336 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1337 VDEV_AUX_CORRUPT_DATA);
1342 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1343 spa_load_spares(spa);
1344 spa_config_exit(spa, SCL_ALL, FTAG);
1348 * Load any level 2 ARC devices for this pool.
1350 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
1351 DMU_POOL_L2CACHE, sizeof (uint64_t), 1,
1352 &spa->spa_l2cache.sav_object);
1353 if (error != 0 && error != ENOENT) {
1354 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1355 VDEV_AUX_CORRUPT_DATA);
1360 ASSERT(spa_version(spa) >= SPA_VERSION_L2CACHE);
1361 if (load_nvlist(spa, spa->spa_l2cache.sav_object,
1362 &spa->spa_l2cache.sav_config) != 0) {
1363 vdev_set_state(rvd, B_TRUE,
1364 VDEV_STATE_CANT_OPEN,
1365 VDEV_AUX_CORRUPT_DATA);
1370 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1371 spa_load_l2cache(spa);
1372 spa_config_exit(spa, SCL_ALL, FTAG);
1375 if (spa_check_logs(spa)) {
1376 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1379 ereport = FM_EREPORT_ZFS_LOG_REPLAY;
1384 spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
1386 error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
1387 DMU_POOL_PROPS, sizeof (uint64_t), 1, &spa->spa_pool_props_object);
1389 if (error && error != ENOENT) {
1390 vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
1391 VDEV_AUX_CORRUPT_DATA);
1397 (void) zap_lookup(spa->spa_meta_objset,
1398 spa->spa_pool_props_object,
1399 zpool_prop_to_name(ZPOOL_PROP_BOOTFS),
1400 sizeof (uint64_t), 1, &spa->spa_bootfs);
1401 (void) zap_lookup(spa->spa_meta_objset,
1402 spa->spa_pool_props_object,
1403 zpool_prop_to_name(ZPOOL_PROP_AUTOREPLACE),
1404 sizeof (uint64_t), 1, &autoreplace);
1405 (void) zap_lookup(spa->spa_meta_objset,
1406 spa->spa_pool_props_object,
1407 zpool_prop_to_name(ZPOOL_PROP_DELEGATION),
1408 sizeof (uint64_t), 1, &spa->spa_delegation);
1409 (void) zap_lookup(spa->spa_meta_objset,
1410 spa->spa_pool_props_object,
1411 zpool_prop_to_name(ZPOOL_PROP_FAILUREMODE),
1412 sizeof (uint64_t), 1, &spa->spa_failmode);
1416 * If the 'autoreplace' property is set, then post a resource notifying
1417 * the ZFS DE that it should not issue any faults for unopenable
1418 * devices. We also iterate over the vdevs, and post a sysevent for any
1419 * unopenable vdevs so that the normal autoreplace handler can take
1422 if (autoreplace && state != SPA_LOAD_TRYIMPORT)
1423 spa_check_removed(spa->spa_root_vdev);
1426 * Load the vdev state for all toplevel vdevs.
1431 * Propagate the leaf DTLs we just loaded all the way up the tree.
1433 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1434 vdev_dtl_reassess(rvd, 0, 0, B_FALSE);
1435 spa_config_exit(spa, SCL_ALL, FTAG);
1438 * Check the state of the root vdev. If it can't be opened, it
1439 * indicates one or more toplevel vdevs are faulted.
1441 if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) {
1446 if (spa_writeable(spa)) {
1448 int need_update = B_FALSE;
1450 ASSERT(state != SPA_LOAD_TRYIMPORT);
1453 * Claim log blocks that haven't been committed yet.
1454 * This must all happen in a single txg.
1456 tx = dmu_tx_create_assigned(spa_get_dsl(spa),
1457 spa_first_txg(spa));
1458 (void) dmu_objset_find(spa_name(spa),
1459 zil_claim, tx, DS_FIND_CHILDREN);
1462 spa->spa_sync_on = B_TRUE;
1463 txg_sync_start(spa->spa_dsl_pool);
1466 * Wait for all claims to sync.
1468 txg_wait_synced(spa->spa_dsl_pool, 0);
1471 * If the config cache is stale, or we have uninitialized
1472 * metaslabs (see spa_vdev_add()), then update the config.
1474 if (config_cache_txg != spa->spa_config_txg ||
1475 state == SPA_LOAD_IMPORT)
1476 need_update = B_TRUE;
1478 for (int c = 0; c < rvd->vdev_children; c++)
1479 if (rvd->vdev_child[c]->vdev_ms_array == 0)
1480 need_update = B_TRUE;
1483 * Update the config cache asychronously in case we're the
1484 * root pool, in which case the config cache isn't writable yet.
1487 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
1490 * Check all DTLs to see if anything needs resilvering.
1492 if (vdev_resilver_needed(rvd, NULL, NULL))
1493 spa_async_request(spa, SPA_ASYNC_RESILVER);
1498 spa->spa_minref = refcount_count(&spa->spa_refcount);
1499 if (error && error != EBADF)
1500 zfs_ereport_post(ereport, spa, NULL, NULL, 0, 0);
1501 spa->spa_load_state = SPA_LOAD_NONE;
1510 * The import case is identical to an open except that the configuration is sent
1511 * down from userland, instead of grabbed from the configuration cache. For the
1512 * case of an open, the pool configuration will exist in the
1513 * POOL_STATE_UNINITIALIZED state.
1515 * The stats information (gen/count/ustats) is used to gather vdev statistics at
1516 * the same time open the pool, without having to keep around the spa_t in some
1520 spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t **config)
1524 int locked = B_FALSE;
1529 * As disgusting as this is, we need to support recursive calls to this
1530 * function because dsl_dir_open() is called during spa_load(), and ends
1531 * up calling spa_open() again. The real fix is to figure out how to
1532 * avoid dsl_dir_open() calling this in the first place.
1534 if (mutex_owner(&spa_namespace_lock) != curthread) {
1535 mutex_enter(&spa_namespace_lock);
1539 if ((spa = spa_lookup(pool)) == NULL) {
1541 mutex_exit(&spa_namespace_lock);
1544 if (spa->spa_state == POOL_STATE_UNINITIALIZED) {
1546 spa_activate(spa, spa_mode_global);
1548 error = spa_load(spa, spa->spa_config, SPA_LOAD_OPEN, B_FALSE);
1550 if (error == EBADF) {
1552 * If vdev_validate() returns failure (indicated by
1553 * EBADF), it indicates that one of the vdevs indicates
1554 * that the pool has been exported or destroyed. If
1555 * this is the case, the config cache is out of sync and
1556 * we should remove the pool from the namespace.
1559 spa_deactivate(spa);
1560 spa_config_sync(spa, B_TRUE, B_TRUE);
1563 mutex_exit(&spa_namespace_lock);
1569 * We can't open the pool, but we still have useful
1570 * information: the state of each vdev after the
1571 * attempted vdev_open(). Return this to the user.
1573 if (config != NULL && spa->spa_root_vdev != NULL)
1574 *config = spa_config_generate(spa, NULL, -1ULL,
1577 spa_deactivate(spa);
1578 spa->spa_last_open_failed = B_TRUE;
1580 mutex_exit(&spa_namespace_lock);
1584 spa->spa_last_open_failed = B_FALSE;
1588 spa_open_ref(spa, tag);
1591 mutex_exit(&spa_namespace_lock);
1596 *config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
1602 spa_open(const char *name, spa_t **spapp, void *tag)
1604 return (spa_open_common(name, spapp, tag, NULL));
1608 * Lookup the given spa_t, incrementing the inject count in the process,
1609 * preventing it from being exported or destroyed.
1612 spa_inject_addref(char *name)
1616 mutex_enter(&spa_namespace_lock);
1617 if ((spa = spa_lookup(name)) == NULL) {
1618 mutex_exit(&spa_namespace_lock);
1621 spa->spa_inject_ref++;
1622 mutex_exit(&spa_namespace_lock);
1628 spa_inject_delref(spa_t *spa)
1630 mutex_enter(&spa_namespace_lock);
1631 spa->spa_inject_ref--;
1632 mutex_exit(&spa_namespace_lock);
1636 * Add spares device information to the nvlist.
1639 spa_add_spares(spa_t *spa, nvlist_t *config)
1649 if (spa->spa_spares.sav_count == 0)
1652 VERIFY(nvlist_lookup_nvlist(config,
1653 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1654 VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
1655 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
1657 VERIFY(nvlist_add_nvlist_array(nvroot,
1658 ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
1659 VERIFY(nvlist_lookup_nvlist_array(nvroot,
1660 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
1663 * Go through and find any spares which have since been
1664 * repurposed as an active spare. If this is the case, update
1665 * their status appropriately.
1667 for (i = 0; i < nspares; i++) {
1668 VERIFY(nvlist_lookup_uint64(spares[i],
1669 ZPOOL_CONFIG_GUID, &guid) == 0);
1670 if (spa_spare_exists(guid, &pool, NULL) &&
1672 VERIFY(nvlist_lookup_uint64_array(
1673 spares[i], ZPOOL_CONFIG_STATS,
1674 (uint64_t **)&vs, &vsc) == 0);
1675 vs->vs_state = VDEV_STATE_CANT_OPEN;
1676 vs->vs_aux = VDEV_AUX_SPARED;
1683 * Add l2cache device information to the nvlist, including vdev stats.
1686 spa_add_l2cache(spa_t *spa, nvlist_t *config)
1689 uint_t i, j, nl2cache;
1696 if (spa->spa_l2cache.sav_count == 0)
1699 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
1701 VERIFY(nvlist_lookup_nvlist(config,
1702 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1703 VERIFY(nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
1704 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
1705 if (nl2cache != 0) {
1706 VERIFY(nvlist_add_nvlist_array(nvroot,
1707 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
1708 VERIFY(nvlist_lookup_nvlist_array(nvroot,
1709 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
1712 * Update level 2 cache device stats.
1715 for (i = 0; i < nl2cache; i++) {
1716 VERIFY(nvlist_lookup_uint64(l2cache[i],
1717 ZPOOL_CONFIG_GUID, &guid) == 0);
1720 for (j = 0; j < spa->spa_l2cache.sav_count; j++) {
1722 spa->spa_l2cache.sav_vdevs[j]->vdev_guid) {
1723 vd = spa->spa_l2cache.sav_vdevs[j];
1729 VERIFY(nvlist_lookup_uint64_array(l2cache[i],
1730 ZPOOL_CONFIG_STATS, (uint64_t **)&vs, &vsc) == 0);
1731 vdev_get_stats(vd, vs);
1735 spa_config_exit(spa, SCL_CONFIG, FTAG);
1739 spa_get_stats(const char *name, nvlist_t **config, char *altroot, size_t buflen)
1745 error = spa_open_common(name, &spa, FTAG, config);
1747 if (spa && *config != NULL) {
1748 VERIFY(nvlist_add_uint64(*config, ZPOOL_CONFIG_ERRCOUNT,
1749 spa_get_errlog_size(spa)) == 0);
1751 if (spa_suspended(spa))
1752 VERIFY(nvlist_add_uint64(*config,
1753 ZPOOL_CONFIG_SUSPENDED, spa->spa_failmode) == 0);
1755 spa_add_spares(spa, *config);
1756 spa_add_l2cache(spa, *config);
1760 * We want to get the alternate root even for faulted pools, so we cheat
1761 * and call spa_lookup() directly.
1765 mutex_enter(&spa_namespace_lock);
1766 spa = spa_lookup(name);
1768 spa_altroot(spa, altroot, buflen);
1772 mutex_exit(&spa_namespace_lock);
1774 spa_altroot(spa, altroot, buflen);
1779 spa_close(spa, FTAG);
1785 * Validate that the auxiliary device array is well formed. We must have an
1786 * array of nvlists, each which describes a valid leaf vdev. If this is an
1787 * import (mode is VDEV_ALLOC_SPARE), then we allow corrupted spares to be
1788 * specified, as long as they are well-formed.
1791 spa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode,
1792 spa_aux_vdev_t *sav, const char *config, uint64_t version,
1793 vdev_labeltype_t label)
1800 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
1803 * It's acceptable to have no devs specified.
1805 if (nvlist_lookup_nvlist_array(nvroot, config, &dev, &ndev) != 0)
1812 * Make sure the pool is formatted with a version that supports this
1815 if (spa_version(spa) < version)
1819 * Set the pending device list so we correctly handle device in-use
1822 sav->sav_pending = dev;
1823 sav->sav_npending = ndev;
1825 for (i = 0; i < ndev; i++) {
1826 if ((error = spa_config_parse(spa, &vd, dev[i], NULL, 0,
1830 if (!vd->vdev_ops->vdev_op_leaf) {
1837 * The L2ARC currently only supports disk devices in
1838 * kernel context. For user-level testing, we allow it.
1841 if ((strcmp(config, ZPOOL_CONFIG_L2CACHE) == 0) &&
1842 strcmp(vd->vdev_ops->vdev_op_type, VDEV_TYPE_DISK) != 0) {
1849 if ((error = vdev_open(vd)) == 0 &&
1850 (error = vdev_label_init(vd, crtxg, label)) == 0) {
1851 VERIFY(nvlist_add_uint64(dev[i], ZPOOL_CONFIG_GUID,
1852 vd->vdev_guid) == 0);
1858 (mode != VDEV_ALLOC_SPARE && mode != VDEV_ALLOC_L2CACHE))
1865 sav->sav_pending = NULL;
1866 sav->sav_npending = 0;
1871 spa_validate_aux(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode)
1875 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
1877 if ((error = spa_validate_aux_devs(spa, nvroot, crtxg, mode,
1878 &spa->spa_spares, ZPOOL_CONFIG_SPARES, SPA_VERSION_SPARES,
1879 VDEV_LABEL_SPARE)) != 0) {
1883 return (spa_validate_aux_devs(spa, nvroot, crtxg, mode,
1884 &spa->spa_l2cache, ZPOOL_CONFIG_L2CACHE, SPA_VERSION_L2CACHE,
1885 VDEV_LABEL_L2CACHE));
1889 spa_set_aux_vdevs(spa_aux_vdev_t *sav, nvlist_t **devs, int ndevs,
1894 if (sav->sav_config != NULL) {
1900 * Generate new dev list by concatentating with the
1903 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, config,
1904 &olddevs, &oldndevs) == 0);
1906 newdevs = kmem_alloc(sizeof (void *) *
1907 (ndevs + oldndevs), KM_SLEEP);
1908 for (i = 0; i < oldndevs; i++)
1909 VERIFY(nvlist_dup(olddevs[i], &newdevs[i],
1911 for (i = 0; i < ndevs; i++)
1912 VERIFY(nvlist_dup(devs[i], &newdevs[i + oldndevs],
1915 VERIFY(nvlist_remove(sav->sav_config, config,
1916 DATA_TYPE_NVLIST_ARRAY) == 0);
1918 VERIFY(nvlist_add_nvlist_array(sav->sav_config,
1919 config, newdevs, ndevs + oldndevs) == 0);
1920 for (i = 0; i < oldndevs + ndevs; i++)
1921 nvlist_free(newdevs[i]);
1922 kmem_free(newdevs, (oldndevs + ndevs) * sizeof (void *));
1925 * Generate a new dev list.
1927 VERIFY(nvlist_alloc(&sav->sav_config, NV_UNIQUE_NAME,
1929 VERIFY(nvlist_add_nvlist_array(sav->sav_config, config,
1935 * Stop and drop level 2 ARC devices
1938 spa_l2cache_drop(spa_t *spa)
1942 spa_aux_vdev_t *sav = &spa->spa_l2cache;
1944 for (i = 0; i < sav->sav_count; i++) {
1947 vd = sav->sav_vdevs[i];
1950 if (spa_l2cache_exists(vd->vdev_guid, &pool) &&
1951 pool != 0ULL && l2arc_vdev_present(vd))
1952 l2arc_remove_vdev(vd);
1953 if (vd->vdev_isl2cache)
1954 spa_l2cache_remove(vd);
1955 vdev_clear_stats(vd);
1956 (void) vdev_close(vd);
1964 spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props,
1965 const char *history_str, nvlist_t *zplprops)
1968 char *altroot = NULL;
1973 uint64_t txg = TXG_INITIAL;
1974 nvlist_t **spares, **l2cache;
1975 uint_t nspares, nl2cache;
1979 * If this pool already exists, return failure.
1981 mutex_enter(&spa_namespace_lock);
1982 if (spa_lookup(pool) != NULL) {
1983 mutex_exit(&spa_namespace_lock);
1988 * Allocate a new spa_t structure.
1990 (void) nvlist_lookup_string(props,
1991 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
1992 spa = spa_add(pool, altroot);
1993 spa_activate(spa, spa_mode_global);
1995 spa->spa_uberblock.ub_txg = txg - 1;
1997 if (props && (error = spa_prop_validate(spa, props))) {
1999 spa_deactivate(spa);
2001 mutex_exit(&spa_namespace_lock);
2005 if (nvlist_lookup_uint64(props, zpool_prop_to_name(ZPOOL_PROP_VERSION),
2007 version = SPA_VERSION;
2008 ASSERT(version <= SPA_VERSION);
2009 spa->spa_uberblock.ub_version = version;
2010 spa->spa_ubsync = spa->spa_uberblock;
2013 * Create the root vdev.
2015 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2017 error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_ADD);
2019 ASSERT(error != 0 || rvd != NULL);
2020 ASSERT(error != 0 || spa->spa_root_vdev == rvd);
2022 if (error == 0 && !zfs_allocatable_devs(nvroot))
2026 (error = vdev_create(rvd, txg, B_FALSE)) == 0 &&
2027 (error = spa_validate_aux(spa, nvroot, txg,
2028 VDEV_ALLOC_ADD)) == 0) {
2029 for (c = 0; c < rvd->vdev_children; c++)
2030 vdev_init(rvd->vdev_child[c], txg);
2031 vdev_config_dirty(rvd);
2034 spa_config_exit(spa, SCL_ALL, FTAG);
2038 spa_deactivate(spa);
2040 mutex_exit(&spa_namespace_lock);
2045 * Get the list of spares, if specified.
2047 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
2048 &spares, &nspares) == 0) {
2049 VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, NV_UNIQUE_NAME,
2051 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
2052 ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
2053 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2054 spa_load_spares(spa);
2055 spa_config_exit(spa, SCL_ALL, FTAG);
2056 spa->spa_spares.sav_sync = B_TRUE;
2060 * Get the list of level 2 cache devices, if specified.
2062 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
2063 &l2cache, &nl2cache) == 0) {
2064 VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config,
2065 NV_UNIQUE_NAME, KM_SLEEP) == 0);
2066 VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
2067 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
2068 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2069 spa_load_l2cache(spa);
2070 spa_config_exit(spa, SCL_ALL, FTAG);
2071 spa->spa_l2cache.sav_sync = B_TRUE;
2074 spa->spa_dsl_pool = dp = dsl_pool_create(spa, zplprops, txg);
2075 spa->spa_meta_objset = dp->dp_meta_objset;
2077 tx = dmu_tx_create_assigned(dp, txg);
2080 * Create the pool config object.
2082 spa->spa_config_object = dmu_object_alloc(spa->spa_meta_objset,
2083 DMU_OT_PACKED_NVLIST, SPA_CONFIG_BLOCKSIZE,
2084 DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx);
2086 if (zap_add(spa->spa_meta_objset,
2087 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG,
2088 sizeof (uint64_t), 1, &spa->spa_config_object, tx) != 0) {
2089 cmn_err(CE_PANIC, "failed to add pool config");
2092 /* Newly created pools with the right version are always deflated. */
2093 if (version >= SPA_VERSION_RAIDZ_DEFLATE) {
2094 spa->spa_deflate = TRUE;
2095 if (zap_add(spa->spa_meta_objset,
2096 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
2097 sizeof (uint64_t), 1, &spa->spa_deflate, tx) != 0) {
2098 cmn_err(CE_PANIC, "failed to add deflate");
2103 * Create the deferred-free bplist object. Turn off compression
2104 * because sync-to-convergence takes longer if the blocksize
2107 spa->spa_sync_bplist_obj = bplist_create(spa->spa_meta_objset,
2109 dmu_object_set_compress(spa->spa_meta_objset, spa->spa_sync_bplist_obj,
2110 ZIO_COMPRESS_OFF, tx);
2112 if (zap_add(spa->spa_meta_objset,
2113 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPLIST,
2114 sizeof (uint64_t), 1, &spa->spa_sync_bplist_obj, tx) != 0) {
2115 cmn_err(CE_PANIC, "failed to add bplist");
2119 * Create the pool's history object.
2121 if (version >= SPA_VERSION_ZPOOL_HISTORY)
2122 spa_history_create_obj(spa, tx);
2125 * Set pool properties.
2127 spa->spa_bootfs = zpool_prop_default_numeric(ZPOOL_PROP_BOOTFS);
2128 spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
2129 spa->spa_failmode = zpool_prop_default_numeric(ZPOOL_PROP_FAILUREMODE);
2130 if (props != NULL) {
2131 spa_configfile_set(spa, props, B_FALSE);
2132 spa_sync_props(spa, props, CRED(), tx);
2137 spa->spa_sync_on = B_TRUE;
2138 txg_sync_start(spa->spa_dsl_pool);
2141 * We explicitly wait for the first transaction to complete so that our
2142 * bean counters are appropriately updated.
2144 txg_wait_synced(spa->spa_dsl_pool, txg);
2146 spa_config_sync(spa, B_FALSE, B_TRUE);
2148 if (version >= SPA_VERSION_ZPOOL_HISTORY && history_str != NULL)
2149 (void) spa_history_log(spa, history_str, LOG_CMD_POOL_CREATE);
2151 spa->spa_minref = refcount_count(&spa->spa_refcount);
2153 mutex_exit(&spa_namespace_lock);
2159 * Import the given pool into the system. We set up the necessary spa_t and
2160 * then call spa_load() to do the dirty work.
2163 spa_import_common(const char *pool, nvlist_t *config, nvlist_t *props,
2164 boolean_t isroot, boolean_t allowfaulted)
2167 char *altroot = NULL;
2170 nvlist_t **spares, **l2cache;
2171 uint_t nspares, nl2cache;
2174 * If a pool with this name exists, return failure.
2176 mutex_enter(&spa_namespace_lock);
2177 if ((spa = spa_lookup(pool)) != NULL) {
2180 * Remove the existing root pool from the
2181 * namespace so that we can replace it with
2182 * the correct config we just read in.
2184 ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
2187 mutex_exit(&spa_namespace_lock);
2193 * Create and initialize the spa structure.
2195 (void) nvlist_lookup_string(props,
2196 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
2197 spa = spa_add(pool, altroot);
2198 spa_activate(spa, spa_mode_global);
2201 spa->spa_import_faulted = B_TRUE;
2202 spa->spa_is_root = isroot;
2205 * Pass off the heavy lifting to spa_load().
2206 * Pass TRUE for mosconfig (unless this is a root pool) because
2207 * the user-supplied config is actually the one to trust when
2210 loaderr = error = spa_load(spa, config, SPA_LOAD_IMPORT, !isroot);
2212 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2214 * Toss any existing sparelist, as it doesn't have any validity anymore,
2215 * and conflicts with spa_has_spare().
2217 if (!isroot && spa->spa_spares.sav_config) {
2218 nvlist_free(spa->spa_spares.sav_config);
2219 spa->spa_spares.sav_config = NULL;
2220 spa_load_spares(spa);
2222 if (!isroot && spa->spa_l2cache.sav_config) {
2223 nvlist_free(spa->spa_l2cache.sav_config);
2224 spa->spa_l2cache.sav_config = NULL;
2225 spa_load_l2cache(spa);
2228 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
2231 error = spa_validate_aux(spa, nvroot, -1ULL, VDEV_ALLOC_SPARE);
2233 error = spa_validate_aux(spa, nvroot, -1ULL,
2234 VDEV_ALLOC_L2CACHE);
2235 spa_config_exit(spa, SCL_ALL, FTAG);
2238 spa_configfile_set(spa, props, B_FALSE);
2240 if (error != 0 || (props && spa_writeable(spa) &&
2241 (error = spa_prop_set(spa, props)))) {
2242 if (loaderr != 0 && loaderr != EINVAL && allowfaulted) {
2244 * If we failed to load the pool, but 'allowfaulted' is
2245 * set, then manually set the config as if the config
2246 * passed in was specified in the cache file.
2249 spa->spa_import_faulted = B_FALSE;
2250 if (spa->spa_config == NULL)
2251 spa->spa_config = spa_config_generate(spa,
2252 NULL, -1ULL, B_TRUE);
2254 spa_deactivate(spa);
2255 spa_config_sync(spa, B_FALSE, B_TRUE);
2258 spa_deactivate(spa);
2261 mutex_exit(&spa_namespace_lock);
2266 * Override any spares and level 2 cache devices as specified by
2267 * the user, as these may have correct device names/devids, etc.
2269 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
2270 &spares, &nspares) == 0) {
2271 if (spa->spa_spares.sav_config)
2272 VERIFY(nvlist_remove(spa->spa_spares.sav_config,
2273 ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0);
2275 VERIFY(nvlist_alloc(&spa->spa_spares.sav_config,
2276 NV_UNIQUE_NAME, KM_SLEEP) == 0);
2277 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
2278 ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
2279 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2280 spa_load_spares(spa);
2281 spa_config_exit(spa, SCL_ALL, FTAG);
2282 spa->spa_spares.sav_sync = B_TRUE;
2284 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
2285 &l2cache, &nl2cache) == 0) {
2286 if (spa->spa_l2cache.sav_config)
2287 VERIFY(nvlist_remove(spa->spa_l2cache.sav_config,
2288 ZPOOL_CONFIG_L2CACHE, DATA_TYPE_NVLIST_ARRAY) == 0);
2290 VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config,
2291 NV_UNIQUE_NAME, KM_SLEEP) == 0);
2292 VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
2293 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
2294 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2295 spa_load_l2cache(spa);
2296 spa_config_exit(spa, SCL_ALL, FTAG);
2297 spa->spa_l2cache.sav_sync = B_TRUE;
2300 if (spa_writeable(spa)) {
2302 * Update the config cache to include the newly-imported pool.
2304 spa_config_update_common(spa, SPA_CONFIG_UPDATE_POOL, isroot);
2307 spa->spa_import_faulted = B_FALSE;
2308 mutex_exit(&spa_namespace_lock);
2315 * Build a "root" vdev for a top level vdev read in from a rootpool
2319 spa_build_rootpool_config(nvlist_t *config)
2321 nvlist_t *nvtop, *nvroot;
2325 * Add this top-level vdev to the child array.
2327 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvtop)
2329 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pgid)
2333 * Put this pool's top-level vdevs into a root vdev.
2335 VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2336 VERIFY(nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT)
2338 VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) == 0);
2339 VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, pgid) == 0);
2340 VERIFY(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
2344 * Replace the existing vdev_tree with the new root vdev in
2345 * this pool's configuration (remove the old, add the new).
2347 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, nvroot) == 0);
2348 nvlist_free(nvroot);
2352 * Get the root pool information from the root disk, then import the root pool
2353 * during the system boot up time.
2355 extern int vdev_disk_read_rootlabel(char *, char *, nvlist_t **);
2358 spa_check_rootconf(char *devpath, char *devid, nvlist_t **bestconf,
2365 if (error = vdev_disk_read_rootlabel(devpath, devid, &config))
2368 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, &txg) == 0);
2370 if (bestconf != NULL)
2373 nvlist_free(config);
2379 spa_rootdev_validate(nvlist_t *nv)
2383 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 ||
2384 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 ||
2385 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0)
2393 * Given the boot device's physical path or devid, check if the device
2394 * is in a valid state. If so, return the configuration from the vdev
2398 spa_get_rootconf(char *devpath, char *devid, nvlist_t **bestconf)
2400 nvlist_t *conf = NULL;
2402 nvlist_t *nvtop, **child;
2404 char *bootpath = NULL;
2409 if (devpath && ((tmp = strchr(devpath, ' ')) != NULL))
2411 if (error = spa_check_rootconf(devpath, devid, &conf, &txg)) {
2412 cmn_err(CE_NOTE, "error reading device label");
2416 cmn_err(CE_NOTE, "this device is detached");
2421 VERIFY(nvlist_lookup_nvlist(conf, ZPOOL_CONFIG_VDEV_TREE,
2423 VERIFY(nvlist_lookup_string(nvtop, ZPOOL_CONFIG_TYPE, &type) == 0);
2425 if (strcmp(type, VDEV_TYPE_DISK) == 0) {
2426 if (spa_rootdev_validate(nvtop)) {
2434 ASSERT(strcmp(type, VDEV_TYPE_MIRROR) == 0);
2436 VERIFY(nvlist_lookup_nvlist_array(nvtop, ZPOOL_CONFIG_CHILDREN,
2437 &child, &children) == 0);
2440 * Go thru vdevs in the mirror to see if the given device
2441 * has the most recent txg. Only the device with the most
2442 * recent txg has valid information and should be booted.
2444 for (c = 0; c < children; c++) {
2445 char *cdevid, *cpath;
2450 if (nvlist_lookup_string(child[c], ZPOOL_CONFIG_PHYS_PATH,
2451 &cpath) != 0 && nvlist_lookup_string(child[c],
2452 ZPOOL_CONFIG_DEVID, &cdevid) != 0)
2454 if ((spa_check_rootconf(cpath, cdevid, NULL,
2455 &tmptxg) == 0) && (tmptxg > txg)) {
2457 VERIFY(nvlist_lookup_string(child[c],
2458 ZPOOL_CONFIG_PATH, &bootpath) == 0);
2462 /* Does the best device match the one we've booted from? */
2464 cmn_err(CE_NOTE, "try booting from '%s'", bootpath);
2473 * Import a root pool.
2475 * For x86. devpath_list will consist of devid and/or physpath name of
2476 * the vdev (e.g. "id1,sd@SSEAGATE..." or "/pci@1f,0/ide@d/disk@0,0:a").
2477 * The GRUB "findroot" command will return the vdev we should boot.
2479 * For Sparc, devpath_list consists the physpath name of the booting device
2480 * no matter the rootpool is a single device pool or a mirrored pool.
2482 * "/pci@1f,0/ide@d/disk@0,0:a"
2485 spa_import_rootpool(char *devpath, char *devid)
2487 nvlist_t *conf = NULL;
2492 * Get the vdev pathname and configuation from the most
2493 * recently updated vdev (highest txg).
2495 if (error = spa_get_rootconf(devpath, devid, &conf))
2499 * Add type "root" vdev to the config.
2501 spa_build_rootpool_config(conf);
2503 VERIFY(nvlist_lookup_string(conf, ZPOOL_CONFIG_POOL_NAME, &pname) == 0);
2506 * We specify 'allowfaulted' for this to be treated like spa_open()
2507 * instead of spa_import(). This prevents us from marking vdevs as
2508 * persistently unavailable, and generates FMA ereports as if it were a
2509 * pool open, not import.
2511 error = spa_import_common(pname, conf, NULL, B_TRUE, B_TRUE);
2512 ASSERT(error != EEXIST);
2518 cmn_err(CE_NOTE, "\n"
2519 " *************************************************** \n"
2520 " * This device is not bootable! * \n"
2521 " * It is either offlined or detached or faulted. * \n"
2522 " * Please try to boot from a different device. * \n"
2523 " *************************************************** ");
2530 * Import a non-root pool into the system.
2533 spa_import(const char *pool, nvlist_t *config, nvlist_t *props)
2535 return (spa_import_common(pool, config, props, B_FALSE, B_FALSE));
2539 spa_import_faulted(const char *pool, nvlist_t *config, nvlist_t *props)
2541 return (spa_import_common(pool, config, props, B_FALSE, B_TRUE));
2546 * This (illegal) pool name is used when temporarily importing a spa_t in order
2547 * to get the vdev stats associated with the imported devices.
2549 #define TRYIMPORT_NAME "$import"
2552 spa_tryimport(nvlist_t *tryconfig)
2554 nvlist_t *config = NULL;
2560 if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_POOL_NAME, &poolname))
2563 if (nvlist_lookup_uint64(tryconfig, ZPOOL_CONFIG_POOL_STATE, &state))
2567 * Create and initialize the spa structure.
2569 mutex_enter(&spa_namespace_lock);
2570 spa = spa_add(TRYIMPORT_NAME, NULL);
2571 spa_activate(spa, FREAD);
2574 * Pass off the heavy lifting to spa_load().
2575 * Pass TRUE for mosconfig because the user-supplied config
2576 * is actually the one to trust when doing an import.
2578 error = spa_load(spa, tryconfig, SPA_LOAD_TRYIMPORT, B_TRUE);
2581 * If 'tryconfig' was at least parsable, return the current config.
2583 if (spa->spa_root_vdev != NULL) {
2584 config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
2585 VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME,
2587 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE,
2589 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_TIMESTAMP,
2590 spa->spa_uberblock.ub_timestamp) == 0);
2593 * If the bootfs property exists on this pool then we
2594 * copy it out so that external consumers can tell which
2595 * pools are bootable.
2597 if ((!error || error == EEXIST) && spa->spa_bootfs) {
2598 char *tmpname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
2601 * We have to play games with the name since the
2602 * pool was opened as TRYIMPORT_NAME.
2604 if (dsl_dsobj_to_dsname(spa_name(spa),
2605 spa->spa_bootfs, tmpname) == 0) {
2607 char *dsname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
2609 cp = strchr(tmpname, '/');
2611 (void) strlcpy(dsname, tmpname,
2614 (void) snprintf(dsname, MAXPATHLEN,
2615 "%s/%s", poolname, ++cp);
2617 VERIFY(nvlist_add_string(config,
2618 ZPOOL_CONFIG_BOOTFS, dsname) == 0);
2619 kmem_free(dsname, MAXPATHLEN);
2621 kmem_free(tmpname, MAXPATHLEN);
2625 * Add the list of hot spares and level 2 cache devices.
2627 spa_add_spares(spa, config);
2628 spa_add_l2cache(spa, config);
2632 spa_deactivate(spa);
2634 mutex_exit(&spa_namespace_lock);
2640 * Pool export/destroy
2642 * The act of destroying or exporting a pool is very simple. We make sure there
2643 * is no more pending I/O and any references to the pool are gone. Then, we
2644 * update the pool state and sync all the labels to disk, removing the
2645 * configuration from the cache afterwards. If the 'hardforce' flag is set, then
2646 * we don't sync the labels or remove the configuration cache.
2649 spa_export_common(char *pool, int new_state, nvlist_t **oldconfig,
2650 boolean_t force, boolean_t hardforce)
2657 if (!(spa_mode_global & FWRITE))
2660 mutex_enter(&spa_namespace_lock);
2661 if ((spa = spa_lookup(pool)) == NULL) {
2662 mutex_exit(&spa_namespace_lock);
2667 * Put a hold on the pool, drop the namespace lock, stop async tasks,
2668 * reacquire the namespace lock, and see if we can export.
2670 spa_open_ref(spa, FTAG);
2671 mutex_exit(&spa_namespace_lock);
2672 spa_async_suspend(spa);
2673 mutex_enter(&spa_namespace_lock);
2674 spa_close(spa, FTAG);
2677 * The pool will be in core if it's openable,
2678 * in which case we can modify its state.
2680 if (spa->spa_state != POOL_STATE_UNINITIALIZED && spa->spa_sync_on) {
2682 * Objsets may be open only because they're dirty, so we
2683 * have to force it to sync before checking spa_refcnt.
2685 txg_wait_synced(spa->spa_dsl_pool, 0);
2688 * A pool cannot be exported or destroyed if there are active
2689 * references. If we are resetting a pool, allow references by
2690 * fault injection handlers.
2692 if (!spa_refcount_zero(spa) ||
2693 (spa->spa_inject_ref != 0 &&
2694 new_state != POOL_STATE_UNINITIALIZED)) {
2695 spa_async_resume(spa);
2696 mutex_exit(&spa_namespace_lock);
2701 * A pool cannot be exported if it has an active shared spare.
2702 * This is to prevent other pools stealing the active spare
2703 * from an exported pool. At user's own will, such pool can
2704 * be forcedly exported.
2706 if (!force && new_state == POOL_STATE_EXPORTED &&
2707 spa_has_active_shared_spare(spa)) {
2708 spa_async_resume(spa);
2709 mutex_exit(&spa_namespace_lock);
2714 * We want this to be reflected on every label,
2715 * so mark them all dirty. spa_unload() will do the
2716 * final sync that pushes these changes out.
2718 if (new_state != POOL_STATE_UNINITIALIZED && !hardforce) {
2719 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
2720 spa->spa_state = new_state;
2721 spa->spa_final_txg = spa_last_synced_txg(spa) + 1;
2722 vdev_config_dirty(spa->spa_root_vdev);
2723 spa_config_exit(spa, SCL_ALL, FTAG);
2727 spa_event_notify(spa, NULL, ESC_ZFS_POOL_DESTROY);
2729 if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
2731 spa_deactivate(spa);
2734 if (oldconfig && spa->spa_config)
2735 VERIFY(nvlist_dup(spa->spa_config, oldconfig, 0) == 0);
2737 if (new_state != POOL_STATE_UNINITIALIZED) {
2739 spa_config_sync(spa, B_TRUE, B_TRUE);
2742 mutex_exit(&spa_namespace_lock);
2748 * Destroy a storage pool.
2751 spa_destroy(char *pool)
2753 return (spa_export_common(pool, POOL_STATE_DESTROYED, NULL,
2758 * Export a storage pool.
2761 spa_export(char *pool, nvlist_t **oldconfig, boolean_t force,
2762 boolean_t hardforce)
2764 return (spa_export_common(pool, POOL_STATE_EXPORTED, oldconfig,
2769 * Similar to spa_export(), this unloads the spa_t without actually removing it
2770 * from the namespace in any way.
2773 spa_reset(char *pool)
2775 return (spa_export_common(pool, POOL_STATE_UNINITIALIZED, NULL,
2780 * ==========================================================================
2781 * Device manipulation
2782 * ==========================================================================
2786 * Add a device to a storage pool.
2789 spa_vdev_add(spa_t *spa, nvlist_t *nvroot)
2793 vdev_t *rvd = spa->spa_root_vdev;
2795 nvlist_t **spares, **l2cache;
2796 uint_t nspares, nl2cache;
2798 txg = spa_vdev_enter(spa);
2800 if ((error = spa_config_parse(spa, &vd, nvroot, NULL, 0,
2801 VDEV_ALLOC_ADD)) != 0)
2802 return (spa_vdev_exit(spa, NULL, txg, error));
2804 spa->spa_pending_vdev = vd; /* spa_vdev_exit() will clear this */
2806 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, &spares,
2810 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, &l2cache,
2814 if (vd->vdev_children == 0 && nspares == 0 && nl2cache == 0)
2815 return (spa_vdev_exit(spa, vd, txg, EINVAL));
2817 if (vd->vdev_children != 0 &&
2818 (error = vdev_create(vd, txg, B_FALSE)) != 0)
2819 return (spa_vdev_exit(spa, vd, txg, error));
2822 * We must validate the spares and l2cache devices after checking the
2823 * children. Otherwise, vdev_inuse() will blindly overwrite the spare.
2825 if ((error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) != 0)
2826 return (spa_vdev_exit(spa, vd, txg, error));
2829 * Transfer each new top-level vdev from vd to rvd.
2831 for (int c = 0; c < vd->vdev_children; c++) {
2832 tvd = vd->vdev_child[c];
2833 vdev_remove_child(vd, tvd);
2834 tvd->vdev_id = rvd->vdev_children;
2835 vdev_add_child(rvd, tvd);
2836 vdev_config_dirty(tvd);
2840 spa_set_aux_vdevs(&spa->spa_spares, spares, nspares,
2841 ZPOOL_CONFIG_SPARES);
2842 spa_load_spares(spa);
2843 spa->spa_spares.sav_sync = B_TRUE;
2846 if (nl2cache != 0) {
2847 spa_set_aux_vdevs(&spa->spa_l2cache, l2cache, nl2cache,
2848 ZPOOL_CONFIG_L2CACHE);
2849 spa_load_l2cache(spa);
2850 spa->spa_l2cache.sav_sync = B_TRUE;
2854 * We have to be careful when adding new vdevs to an existing pool.
2855 * If other threads start allocating from these vdevs before we
2856 * sync the config cache, and we lose power, then upon reboot we may
2857 * fail to open the pool because there are DVAs that the config cache
2858 * can't translate. Therefore, we first add the vdevs without
2859 * initializing metaslabs; sync the config cache (via spa_vdev_exit());
2860 * and then let spa_config_update() initialize the new metaslabs.
2862 * spa_load() checks for added-but-not-initialized vdevs, so that
2863 * if we lose power at any point in this sequence, the remaining
2864 * steps will be completed the next time we load the pool.
2866 (void) spa_vdev_exit(spa, vd, txg, 0);
2868 mutex_enter(&spa_namespace_lock);
2869 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
2870 mutex_exit(&spa_namespace_lock);
2876 * Attach a device to a mirror. The arguments are the path to any device
2877 * in the mirror, and the nvroot for the new device. If the path specifies
2878 * a device that is not mirrored, we automatically insert the mirror vdev.
2880 * If 'replacing' is specified, the new device is intended to replace the
2881 * existing device; in this case the two devices are made into their own
2882 * mirror using the 'replacing' vdev, which is functionally identical to
2883 * the mirror vdev (it actually reuses all the same ops) but has a few
2884 * extra rules: you can't attach to it after it's been created, and upon
2885 * completion of resilvering, the first disk (the one being replaced)
2886 * is automatically detached.
2889 spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing)
2891 uint64_t txg, open_txg;
2892 vdev_t *rvd = spa->spa_root_vdev;
2893 vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd;
2896 char *oldvdpath, *newvdpath;
2900 txg = spa_vdev_enter(spa);
2902 oldvd = spa_lookup_by_guid(spa, guid, B_FALSE);
2905 return (spa_vdev_exit(spa, NULL, txg, ENODEV));
2907 if (!oldvd->vdev_ops->vdev_op_leaf)
2908 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
2910 pvd = oldvd->vdev_parent;
2912 if ((error = spa_config_parse(spa, &newrootvd, nvroot, NULL, 0,
2913 VDEV_ALLOC_ADD)) != 0)
2914 return (spa_vdev_exit(spa, NULL, txg, EINVAL));
2916 if (newrootvd->vdev_children != 1)
2917 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
2919 newvd = newrootvd->vdev_child[0];
2921 if (!newvd->vdev_ops->vdev_op_leaf)
2922 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
2924 if ((error = vdev_create(newrootvd, txg, replacing)) != 0)
2925 return (spa_vdev_exit(spa, newrootvd, txg, error));
2928 * Spares can't replace logs
2930 if (oldvd->vdev_top->vdev_islog && newvd->vdev_isspare)
2931 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
2935 * For attach, the only allowable parent is a mirror or the root
2938 if (pvd->vdev_ops != &vdev_mirror_ops &&
2939 pvd->vdev_ops != &vdev_root_ops)
2940 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
2942 pvops = &vdev_mirror_ops;
2945 * Active hot spares can only be replaced by inactive hot
2948 if (pvd->vdev_ops == &vdev_spare_ops &&
2949 pvd->vdev_child[1] == oldvd &&
2950 !spa_has_spare(spa, newvd->vdev_guid))
2951 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
2954 * If the source is a hot spare, and the parent isn't already a
2955 * spare, then we want to create a new hot spare. Otherwise, we
2956 * want to create a replacing vdev. The user is not allowed to
2957 * attach to a spared vdev child unless the 'isspare' state is
2958 * the same (spare replaces spare, non-spare replaces
2961 if (pvd->vdev_ops == &vdev_replacing_ops)
2962 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
2963 else if (pvd->vdev_ops == &vdev_spare_ops &&
2964 newvd->vdev_isspare != oldvd->vdev_isspare)
2965 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
2966 else if (pvd->vdev_ops != &vdev_spare_ops &&
2967 newvd->vdev_isspare)
2968 pvops = &vdev_spare_ops;
2970 pvops = &vdev_replacing_ops;
2974 * Compare the new device size with the replaceable/attachable
2977 if (newvd->vdev_psize < vdev_get_rsize(oldvd))
2978 return (spa_vdev_exit(spa, newrootvd, txg, EOVERFLOW));
2981 * The new device cannot have a higher alignment requirement
2982 * than the top-level vdev.
2984 if (newvd->vdev_ashift > oldvd->vdev_top->vdev_ashift)
2985 return (spa_vdev_exit(spa, newrootvd, txg, EDOM));
2988 * If this is an in-place replacement, update oldvd's path and devid
2989 * to make it distinguishable from newvd, and unopenable from now on.
2991 if (strcmp(oldvd->vdev_path, newvd->vdev_path) == 0) {
2992 spa_strfree(oldvd->vdev_path);
2993 oldvd->vdev_path = kmem_alloc(strlen(newvd->vdev_path) + 5,
2995 (void) sprintf(oldvd->vdev_path, "%s/%s",
2996 newvd->vdev_path, "old");
2997 if (oldvd->vdev_devid != NULL) {
2998 spa_strfree(oldvd->vdev_devid);
2999 oldvd->vdev_devid = NULL;
3004 * If the parent is not a mirror, or if we're replacing, insert the new
3005 * mirror/replacing/spare vdev above oldvd.
3007 if (pvd->vdev_ops != pvops)
3008 pvd = vdev_add_parent(oldvd, pvops);
3010 ASSERT(pvd->vdev_top->vdev_parent == rvd);
3011 ASSERT(pvd->vdev_ops == pvops);
3012 ASSERT(oldvd->vdev_parent == pvd);
3015 * Extract the new device from its root and add it to pvd.
3017 vdev_remove_child(newrootvd, newvd);
3018 newvd->vdev_id = pvd->vdev_children;
3019 vdev_add_child(pvd, newvd);
3022 * If newvd is smaller than oldvd, but larger than its rsize,
3023 * the addition of newvd may have decreased our parent's asize.
3025 pvd->vdev_asize = MIN(pvd->vdev_asize, newvd->vdev_asize);
3027 tvd = newvd->vdev_top;
3028 ASSERT(pvd->vdev_top == tvd);
3029 ASSERT(tvd->vdev_parent == rvd);
3031 vdev_config_dirty(tvd);
3034 * Set newvd's DTL to [TXG_INITIAL, open_txg]. It will propagate
3035 * upward when spa_vdev_exit() calls vdev_dtl_reassess().
3037 open_txg = txg + TXG_CONCURRENT_STATES - 1;
3039 vdev_dtl_dirty(newvd, DTL_MISSING,
3040 TXG_INITIAL, open_txg - TXG_INITIAL + 1);
3042 if (newvd->vdev_isspare)
3043 spa_spare_activate(newvd);
3044 oldvdpath = spa_strdup(oldvd->vdev_path);
3045 newvdpath = spa_strdup(newvd->vdev_path);
3046 newvd_isspare = newvd->vdev_isspare;
3049 * Mark newvd's DTL dirty in this txg.
3051 vdev_dirty(tvd, VDD_DTL, newvd, txg);
3053 (void) spa_vdev_exit(spa, newrootvd, open_txg, 0);
3055 tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
3056 if (dmu_tx_assign(tx, TXG_WAIT) == 0) {
3057 spa_history_internal_log(LOG_POOL_VDEV_ATTACH, spa, tx,
3058 CRED(), "%s vdev=%s %s vdev=%s",
3059 replacing && newvd_isspare ? "spare in" :
3060 replacing ? "replace" : "attach", newvdpath,
3061 replacing ? "for" : "to", oldvdpath);
3067 spa_strfree(oldvdpath);
3068 spa_strfree(newvdpath);
3071 * Kick off a resilver to update newvd.
3073 VERIFY3U(spa_scrub(spa, POOL_SCRUB_RESILVER), ==, 0);
3079 * Detach a device from a mirror or replacing vdev.
3080 * If 'replace_done' is specified, only detach if the parent
3081 * is a replacing vdev.
3084 spa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid, int replace_done)
3088 vdev_t *rvd = spa->spa_root_vdev;
3089 vdev_t *vd, *pvd, *cvd, *tvd;
3090 boolean_t unspare = B_FALSE;
3091 uint64_t unspare_guid;
3094 txg = spa_vdev_enter(spa);
3096 vd = spa_lookup_by_guid(spa, guid, B_FALSE);
3099 return (spa_vdev_exit(spa, NULL, txg, ENODEV));
3101 if (!vd->vdev_ops->vdev_op_leaf)
3102 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
3104 pvd = vd->vdev_parent;
3107 * If the parent/child relationship is not as expected, don't do it.
3108 * Consider M(A,R(B,C)) -- that is, a mirror of A with a replacing
3109 * vdev that's replacing B with C. The user's intent in replacing
3110 * is to go from M(A,B) to M(A,C). If the user decides to cancel
3111 * the replace by detaching C, the expected behavior is to end up
3112 * M(A,B). But suppose that right after deciding to detach C,
3113 * the replacement of B completes. We would have M(A,C), and then
3114 * ask to detach C, which would leave us with just A -- not what
3115 * the user wanted. To prevent this, we make sure that the
3116 * parent/child relationship hasn't changed -- in this example,
3117 * that C's parent is still the replacing vdev R.
3119 if (pvd->vdev_guid != pguid && pguid != 0)
3120 return (spa_vdev_exit(spa, NULL, txg, EBUSY));
3123 * If replace_done is specified, only remove this device if it's
3124 * the first child of a replacing vdev. For the 'spare' vdev, either
3125 * disk can be removed.
3128 if (pvd->vdev_ops == &vdev_replacing_ops) {
3129 if (vd->vdev_id != 0)
3130 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
3131 } else if (pvd->vdev_ops != &vdev_spare_ops) {
3132 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
3136 ASSERT(pvd->vdev_ops != &vdev_spare_ops ||
3137 spa_version(spa) >= SPA_VERSION_SPARES);
3140 * Only mirror, replacing, and spare vdevs support detach.
3142 if (pvd->vdev_ops != &vdev_replacing_ops &&
3143 pvd->vdev_ops != &vdev_mirror_ops &&
3144 pvd->vdev_ops != &vdev_spare_ops)
3145 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
3148 * If this device has the only valid copy of some data,
3149 * we cannot safely detach it.
3151 if (vdev_dtl_required(vd))
3152 return (spa_vdev_exit(spa, NULL, txg, EBUSY));
3154 ASSERT(pvd->vdev_children >= 2);
3157 * If we are detaching the second disk from a replacing vdev, then
3158 * check to see if we changed the original vdev's path to have "/old"
3159 * at the end in spa_vdev_attach(). If so, undo that change now.
3161 if (pvd->vdev_ops == &vdev_replacing_ops && vd->vdev_id == 1 &&
3162 pvd->vdev_child[0]->vdev_path != NULL &&
3163 pvd->vdev_child[1]->vdev_path != NULL) {
3164 ASSERT(pvd->vdev_child[1] == vd);
3165 cvd = pvd->vdev_child[0];
3166 len = strlen(vd->vdev_path);
3167 if (strncmp(cvd->vdev_path, vd->vdev_path, len) == 0 &&
3168 strcmp(cvd->vdev_path + len, "/old") == 0) {
3169 spa_strfree(cvd->vdev_path);
3170 cvd->vdev_path = spa_strdup(vd->vdev_path);
3175 * If we are detaching the original disk from a spare, then it implies
3176 * that the spare should become a real disk, and be removed from the
3177 * active spare list for the pool.
3179 if (pvd->vdev_ops == &vdev_spare_ops &&
3180 vd->vdev_id == 0 && pvd->vdev_child[1]->vdev_isspare)
3184 * Erase the disk labels so the disk can be used for other things.
3185 * This must be done after all other error cases are handled,
3186 * but before we disembowel vd (so we can still do I/O to it).
3187 * But if we can't do it, don't treat the error as fatal --
3188 * it may be that the unwritability of the disk is the reason
3189 * it's being detached!
3191 error = vdev_label_init(vd, 0, VDEV_LABEL_REMOVE);
3194 * Remove vd from its parent and compact the parent's children.
3196 vdev_remove_child(pvd, vd);
3197 vdev_compact_children(pvd);
3200 * Remember one of the remaining children so we can get tvd below.
3202 cvd = pvd->vdev_child[0];
3205 * If we need to remove the remaining child from the list of hot spares,
3206 * do it now, marking the vdev as no longer a spare in the process.
3207 * We must do this before vdev_remove_parent(), because that can
3208 * change the GUID if it creates a new toplevel GUID. For a similar
3209 * reason, we must remove the spare now, in the same txg as the detach;
3210 * otherwise someone could attach a new sibling, change the GUID, and
3211 * the subsequent attempt to spa_vdev_remove(unspare_guid) would fail.
3214 ASSERT(cvd->vdev_isspare);
3215 spa_spare_remove(cvd);
3216 unspare_guid = cvd->vdev_guid;
3217 (void) spa_vdev_remove(spa, unspare_guid, B_TRUE);
3221 * If the parent mirror/replacing vdev only has one child,
3222 * the parent is no longer needed. Remove it from the tree.
3224 if (pvd->vdev_children == 1)
3225 vdev_remove_parent(cvd);
3228 * We don't set tvd until now because the parent we just removed
3229 * may have been the previous top-level vdev.
3231 tvd = cvd->vdev_top;
3232 ASSERT(tvd->vdev_parent == rvd);
3235 * Reevaluate the parent vdev state.
3237 vdev_propagate_state(cvd);
3240 * If the device we just detached was smaller than the others, it may be
3241 * possible to add metaslabs (i.e. grow the pool). vdev_metaslab_init()
3242 * can't fail because the existing metaslabs are already in core, so
3243 * there's nothing to read from disk.
3245 VERIFY(vdev_metaslab_init(tvd, txg) == 0);
3247 vdev_config_dirty(tvd);
3250 * Mark vd's DTL as dirty in this txg. vdev_dtl_sync() will see that
3251 * vd->vdev_detached is set and free vd's DTL object in syncing context.
3252 * But first make sure we're not on any *other* txg's DTL list, to
3253 * prevent vd from being accessed after it's freed.
3255 for (int t = 0; t < TXG_SIZE; t++)
3256 (void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t);
3257 vd->vdev_detached = B_TRUE;
3258 vdev_dirty(tvd, VDD_DTL, vd, txg);
3260 spa_event_notify(spa, vd, ESC_ZFS_VDEV_REMOVE);
3262 error = spa_vdev_exit(spa, vd, txg, 0);
3265 * If this was the removal of the original device in a hot spare vdev,
3266 * then we want to go through and remove the device from the hot spare
3267 * list of every other pool.
3272 mutex_enter(&spa_namespace_lock);
3273 while ((spa = spa_next(spa)) != NULL) {
3274 if (spa->spa_state != POOL_STATE_ACTIVE)
3278 spa_open_ref(spa, FTAG);
3279 mutex_exit(&spa_namespace_lock);
3280 (void) spa_vdev_remove(spa, unspare_guid, B_TRUE);
3281 mutex_enter(&spa_namespace_lock);
3282 spa_close(spa, FTAG);
3284 mutex_exit(&spa_namespace_lock);
3291 spa_nvlist_lookup_by_guid(nvlist_t **nvpp, int count, uint64_t target_guid)
3293 for (int i = 0; i < count; i++) {
3296 VERIFY(nvlist_lookup_uint64(nvpp[i], ZPOOL_CONFIG_GUID,
3299 if (guid == target_guid)
3307 spa_vdev_remove_aux(nvlist_t *config, char *name, nvlist_t **dev, int count,
3308 nvlist_t *dev_to_remove)
3310 nvlist_t **newdev = NULL;
3313 newdev = kmem_alloc((count - 1) * sizeof (void *), KM_SLEEP);
3315 for (int i = 0, j = 0; i < count; i++) {
3316 if (dev[i] == dev_to_remove)
3318 VERIFY(nvlist_dup(dev[i], &newdev[j++], KM_SLEEP) == 0);
3321 VERIFY(nvlist_remove(config, name, DATA_TYPE_NVLIST_ARRAY) == 0);
3322 VERIFY(nvlist_add_nvlist_array(config, name, newdev, count - 1) == 0);
3324 for (int i = 0; i < count - 1; i++)
3325 nvlist_free(newdev[i]);
3328 kmem_free(newdev, (count - 1) * sizeof (void *));
3332 * Remove a device from the pool. Currently, this supports removing only hot
3333 * spares and level 2 ARC devices.
3336 spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare)
3339 nvlist_t **spares, **l2cache, *nv;
3340 uint_t nspares, nl2cache;
3343 boolean_t locked = MUTEX_HELD(&spa_namespace_lock);
3346 txg = spa_vdev_enter(spa);
3348 vd = spa_lookup_by_guid(spa, guid, B_FALSE);
3350 if (spa->spa_spares.sav_vdevs != NULL &&
3351 nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
3352 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0 &&
3353 (nv = spa_nvlist_lookup_by_guid(spares, nspares, guid)) != NULL) {
3355 * Only remove the hot spare if it's not currently in use
3358 if (vd == NULL || unspare) {
3359 spa_vdev_remove_aux(spa->spa_spares.sav_config,
3360 ZPOOL_CONFIG_SPARES, spares, nspares, nv);
3361 spa_load_spares(spa);
3362 spa->spa_spares.sav_sync = B_TRUE;
3366 } else if (spa->spa_l2cache.sav_vdevs != NULL &&
3367 nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
3368 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0 &&
3369 (nv = spa_nvlist_lookup_by_guid(l2cache, nl2cache, guid)) != NULL) {
3371 * Cache devices can always be removed.
3373 spa_vdev_remove_aux(spa->spa_l2cache.sav_config,
3374 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache, nv);
3375 spa_load_l2cache(spa);
3376 spa->spa_l2cache.sav_sync = B_TRUE;
3377 } else if (vd != NULL) {
3379 * Normal vdevs cannot be removed (yet).
3384 * There is no vdev of any kind with the specified guid.
3390 return (spa_vdev_exit(spa, NULL, txg, error));
3396 * Find any device that's done replacing, or a vdev marked 'unspare' that's
3397 * current spared, so we can detach it.
3400 spa_vdev_resilver_done_hunt(vdev_t *vd)
3402 vdev_t *newvd, *oldvd;
3405 for (c = 0; c < vd->vdev_children; c++) {
3406 oldvd = spa_vdev_resilver_done_hunt(vd->vdev_child[c]);
3412 * Check for a completed replacement.
3414 if (vd->vdev_ops == &vdev_replacing_ops && vd->vdev_children == 2) {
3415 oldvd = vd->vdev_child[0];
3416 newvd = vd->vdev_child[1];
3418 if (vdev_dtl_empty(newvd, DTL_MISSING) &&
3419 !vdev_dtl_required(oldvd))
3424 * Check for a completed resilver with the 'unspare' flag set.
3426 if (vd->vdev_ops == &vdev_spare_ops && vd->vdev_children == 2) {
3427 newvd = vd->vdev_child[0];
3428 oldvd = vd->vdev_child[1];
3430 if (newvd->vdev_unspare &&
3431 vdev_dtl_empty(newvd, DTL_MISSING) &&
3432 !vdev_dtl_required(oldvd)) {
3433 newvd->vdev_unspare = 0;
3442 spa_vdev_resilver_done(spa_t *spa)
3444 vdev_t *vd, *pvd, *ppvd;
3445 uint64_t guid, sguid, pguid, ppguid;
3447 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3449 while ((vd = spa_vdev_resilver_done_hunt(spa->spa_root_vdev)) != NULL) {
3450 pvd = vd->vdev_parent;
3451 ppvd = pvd->vdev_parent;
3452 guid = vd->vdev_guid;
3453 pguid = pvd->vdev_guid;
3454 ppguid = ppvd->vdev_guid;
3457 * If we have just finished replacing a hot spared device, then
3458 * we need to detach the parent's first child (the original hot
3461 if (ppvd->vdev_ops == &vdev_spare_ops && pvd->vdev_id == 0) {
3462 ASSERT(pvd->vdev_ops == &vdev_replacing_ops);
3463 ASSERT(ppvd->vdev_children == 2);
3464 sguid = ppvd->vdev_child[1]->vdev_guid;
3466 spa_config_exit(spa, SCL_ALL, FTAG);
3467 if (spa_vdev_detach(spa, guid, pguid, B_TRUE) != 0)
3469 if (sguid && spa_vdev_detach(spa, sguid, ppguid, B_TRUE) != 0)
3471 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
3474 spa_config_exit(spa, SCL_ALL, FTAG);
3478 * Update the stored path for this vdev. Dirty the vdev configuration, relying
3479 * on spa_vdev_enter/exit() to synchronize the labels and cache.
3482 spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath)
3487 txg = spa_vdev_enter(spa);
3489 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) {
3491 * Determine if this is a reference to a hot spare device. If
3492 * it is, update the path manually as there is no associated
3493 * vdev_t that can be synced to disk.
3498 if (spa->spa_spares.sav_config != NULL) {
3499 VERIFY(nvlist_lookup_nvlist_array(
3500 spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES,
3501 &spares, &nspares) == 0);
3502 for (i = 0; i < nspares; i++) {
3504 VERIFY(nvlist_lookup_uint64(spares[i],
3505 ZPOOL_CONFIG_GUID, &theguid) == 0);
3506 if (theguid == guid) {
3507 VERIFY(nvlist_add_string(spares[i],
3508 ZPOOL_CONFIG_PATH, newpath) == 0);
3509 spa_load_spares(spa);
3510 spa->spa_spares.sav_sync = B_TRUE;
3511 return (spa_vdev_exit(spa, NULL, txg,
3517 return (spa_vdev_exit(spa, NULL, txg, ENOENT));
3520 if (!vd->vdev_ops->vdev_op_leaf)
3521 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
3523 spa_strfree(vd->vdev_path);
3524 vd->vdev_path = spa_strdup(newpath);
3526 vdev_config_dirty(vd->vdev_top);
3528 return (spa_vdev_exit(spa, NULL, txg, 0));
3532 * ==========================================================================
3534 * ==========================================================================
3538 spa_scrub(spa_t *spa, pool_scrub_type_t type)
3540 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
3542 if ((uint_t)type >= POOL_SCRUB_TYPES)
3546 * If a resilver was requested, but there is no DTL on a
3547 * writeable leaf device, we have nothing to do.
3549 if (type == POOL_SCRUB_RESILVER &&
3550 !vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) {
3551 spa_async_request(spa, SPA_ASYNC_RESILVER_DONE);
3555 if (type == POOL_SCRUB_EVERYTHING &&
3556 spa->spa_dsl_pool->dp_scrub_func != SCRUB_FUNC_NONE &&
3557 spa->spa_dsl_pool->dp_scrub_isresilver)
3560 if (type == POOL_SCRUB_EVERYTHING || type == POOL_SCRUB_RESILVER) {
3561 return (dsl_pool_scrub_clean(spa->spa_dsl_pool));
3562 } else if (type == POOL_SCRUB_NONE) {
3563 return (dsl_pool_scrub_cancel(spa->spa_dsl_pool));
3570 * ==========================================================================
3571 * SPA async task processing
3572 * ==========================================================================
3576 spa_async_remove(spa_t *spa, vdev_t *vd)
3578 if (vd->vdev_remove_wanted) {
3579 vd->vdev_remove_wanted = 0;
3580 vdev_set_state(vd, B_FALSE, VDEV_STATE_REMOVED, VDEV_AUX_NONE);
3581 vdev_clear(spa, vd);
3582 vdev_state_dirty(vd->vdev_top);
3585 for (int c = 0; c < vd->vdev_children; c++)
3586 spa_async_remove(spa, vd->vdev_child[c]);
3590 spa_async_probe(spa_t *spa, vdev_t *vd)
3592 if (vd->vdev_probe_wanted) {
3593 vd->vdev_probe_wanted = 0;
3594 vdev_reopen(vd); /* vdev_open() does the actual probe */
3597 for (int c = 0; c < vd->vdev_children; c++)
3598 spa_async_probe(spa, vd->vdev_child[c]);
3602 spa_async_thread(spa_t *spa)
3606 ASSERT(spa->spa_sync_on);
3608 mutex_enter(&spa->spa_async_lock);
3609 tasks = spa->spa_async_tasks;
3610 spa->spa_async_tasks = 0;
3611 mutex_exit(&spa->spa_async_lock);
3614 * See if the config needs to be updated.
3616 if (tasks & SPA_ASYNC_CONFIG_UPDATE) {
3617 mutex_enter(&spa_namespace_lock);
3618 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
3619 mutex_exit(&spa_namespace_lock);
3623 * See if any devices need to be marked REMOVED.
3625 if (tasks & SPA_ASYNC_REMOVE) {
3626 spa_vdev_state_enter(spa);
3627 spa_async_remove(spa, spa->spa_root_vdev);
3628 for (int i = 0; i < spa->spa_l2cache.sav_count; i++)
3629 spa_async_remove(spa, spa->spa_l2cache.sav_vdevs[i]);
3630 for (int i = 0; i < spa->spa_spares.sav_count; i++)
3631 spa_async_remove(spa, spa->spa_spares.sav_vdevs[i]);
3632 (void) spa_vdev_state_exit(spa, NULL, 0);
3636 * See if any devices need to be probed.
3638 if (tasks & SPA_ASYNC_PROBE) {
3639 spa_vdev_state_enter(spa);
3640 spa_async_probe(spa, spa->spa_root_vdev);
3641 (void) spa_vdev_state_exit(spa, NULL, 0);
3645 * If any devices are done replacing, detach them.
3647 if (tasks & SPA_ASYNC_RESILVER_DONE)
3648 spa_vdev_resilver_done(spa);
3651 * Kick off a resilver.
3653 if (tasks & SPA_ASYNC_RESILVER)
3654 VERIFY(spa_scrub(spa, POOL_SCRUB_RESILVER) == 0);
3657 * Let the world know that we're done.
3659 mutex_enter(&spa->spa_async_lock);
3660 spa->spa_async_thread = NULL;
3661 cv_broadcast(&spa->spa_async_cv);
3662 mutex_exit(&spa->spa_async_lock);
3667 spa_async_suspend(spa_t *spa)
3669 mutex_enter(&spa->spa_async_lock);
3670 spa->spa_async_suspended++;
3671 while (spa->spa_async_thread != NULL)
3672 cv_wait(&spa->spa_async_cv, &spa->spa_async_lock);
3673 mutex_exit(&spa->spa_async_lock);
3677 spa_async_resume(spa_t *spa)
3679 mutex_enter(&spa->spa_async_lock);
3680 ASSERT(spa->spa_async_suspended != 0);
3681 spa->spa_async_suspended--;
3682 mutex_exit(&spa->spa_async_lock);
3686 spa_async_dispatch(spa_t *spa)
3688 mutex_enter(&spa->spa_async_lock);
3689 if (spa->spa_async_tasks && !spa->spa_async_suspended &&
3690 spa->spa_async_thread == NULL &&
3691 rootdir != NULL && !vn_is_readonly(rootdir))
3692 spa->spa_async_thread = thread_create(NULL, 0,
3693 spa_async_thread, spa, 0, &p0, TS_RUN, maxclsyspri);
3694 mutex_exit(&spa->spa_async_lock);
3698 spa_async_request(spa_t *spa, int task)
3700 mutex_enter(&spa->spa_async_lock);
3701 spa->spa_async_tasks |= task;
3702 mutex_exit(&spa->spa_async_lock);
3706 * ==========================================================================
3707 * SPA syncing routines
3708 * ==========================================================================
3712 spa_sync_deferred_frees(spa_t *spa, uint64_t txg)
3714 bplist_t *bpl = &spa->spa_sync_bplist;
3722 zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
3724 while (bplist_iterate(bpl, &itor, &blk) == 0) {
3725 ASSERT(blk.blk_birth < txg);
3726 zio_nowait(zio_free(zio, spa, txg, &blk, NULL, NULL,
3727 ZIO_FLAG_MUSTSUCCEED));
3730 error = zio_wait(zio);
3731 ASSERT3U(error, ==, 0);
3733 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
3734 bplist_vacate(bpl, tx);
3737 * Pre-dirty the first block so we sync to convergence faster.
3738 * (Usually only the first block is needed.)
3740 dmu_write(spa->spa_meta_objset, spa->spa_sync_bplist_obj, 0, 1, &c, tx);
3745 spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx)
3747 char *packed = NULL;
3752 VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0);
3755 * Write full (SPA_CONFIG_BLOCKSIZE) blocks of configuration
3756 * information. This avoids the dbuf_will_dirty() path and
3757 * saves us a pre-read to get data we don't actually care about.
3759 bufsize = P2ROUNDUP(nvsize, SPA_CONFIG_BLOCKSIZE);
3760 packed = kmem_alloc(bufsize, KM_SLEEP);
3762 VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR,
3764 bzero(packed + nvsize, bufsize - nvsize);
3766 dmu_write(spa->spa_meta_objset, obj, 0, bufsize, packed, tx);
3768 kmem_free(packed, bufsize);
3770 VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db));
3771 dmu_buf_will_dirty(db, tx);
3772 *(uint64_t *)db->db_data = nvsize;
3773 dmu_buf_rele(db, FTAG);
3777 spa_sync_aux_dev(spa_t *spa, spa_aux_vdev_t *sav, dmu_tx_t *tx,
3778 const char *config, const char *entry)
3788 * Update the MOS nvlist describing the list of available devices.
3789 * spa_validate_aux() will have already made sure this nvlist is
3790 * valid and the vdevs are labeled appropriately.
3792 if (sav->sav_object == 0) {
3793 sav->sav_object = dmu_object_alloc(spa->spa_meta_objset,
3794 DMU_OT_PACKED_NVLIST, 1 << 14, DMU_OT_PACKED_NVLIST_SIZE,
3795 sizeof (uint64_t), tx);
3796 VERIFY(zap_update(spa->spa_meta_objset,
3797 DMU_POOL_DIRECTORY_OBJECT, entry, sizeof (uint64_t), 1,
3798 &sav->sav_object, tx) == 0);
3801 VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0);
3802 if (sav->sav_count == 0) {
3803 VERIFY(nvlist_add_nvlist_array(nvroot, config, NULL, 0) == 0);
3805 list = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP);
3806 for (i = 0; i < sav->sav_count; i++)
3807 list[i] = vdev_config_generate(spa, sav->sav_vdevs[i],
3808 B_FALSE, B_FALSE, B_TRUE);
3809 VERIFY(nvlist_add_nvlist_array(nvroot, config, list,
3810 sav->sav_count) == 0);
3811 for (i = 0; i < sav->sav_count; i++)
3812 nvlist_free(list[i]);
3813 kmem_free(list, sav->sav_count * sizeof (void *));
3816 spa_sync_nvlist(spa, sav->sav_object, nvroot, tx);
3817 nvlist_free(nvroot);
3819 sav->sav_sync = B_FALSE;
3823 spa_sync_config_object(spa_t *spa, dmu_tx_t *tx)
3827 if (list_is_empty(&spa->spa_config_dirty_list))
3830 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
3832 config = spa_config_generate(spa, spa->spa_root_vdev,
3833 dmu_tx_get_txg(tx), B_FALSE);
3835 spa_config_exit(spa, SCL_STATE, FTAG);
3837 if (spa->spa_config_syncing)
3838 nvlist_free(spa->spa_config_syncing);
3839 spa->spa_config_syncing = config;
3841 spa_sync_nvlist(spa, spa->spa_config_object, config, tx);
3845 * Set zpool properties.
3848 spa_sync_props(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
3851 objset_t *mos = spa->spa_meta_objset;
3852 nvlist_t *nvp = arg2;
3857 const char *propname;
3858 zprop_type_t proptype;
3860 mutex_enter(&spa->spa_props_lock);
3863 while ((elem = nvlist_next_nvpair(nvp, elem))) {
3864 switch (prop = zpool_name_to_prop(nvpair_name(elem))) {
3865 case ZPOOL_PROP_VERSION:
3867 * Only set version for non-zpool-creation cases
3868 * (set/import). spa_create() needs special care
3869 * for version setting.
3871 if (tx->tx_txg != TXG_INITIAL) {
3872 VERIFY(nvpair_value_uint64(elem,
3874 ASSERT(intval <= SPA_VERSION);
3875 ASSERT(intval >= spa_version(spa));
3876 spa->spa_uberblock.ub_version = intval;
3877 vdev_config_dirty(spa->spa_root_vdev);
3881 case ZPOOL_PROP_ALTROOT:
3883 * 'altroot' is a non-persistent property. It should
3884 * have been set temporarily at creation or import time.
3886 ASSERT(spa->spa_root != NULL);
3889 case ZPOOL_PROP_CACHEFILE:
3891 * 'cachefile' is also a non-persisitent property.
3896 * Set pool property values in the poolprops mos object.
3898 if (spa->spa_pool_props_object == 0) {
3899 objset_t *mos = spa->spa_meta_objset;
3901 VERIFY((spa->spa_pool_props_object =
3902 zap_create(mos, DMU_OT_POOL_PROPS,
3903 DMU_OT_NONE, 0, tx)) > 0);
3905 VERIFY(zap_update(mos,
3906 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_PROPS,
3907 8, 1, &spa->spa_pool_props_object, tx)
3911 /* normalize the property name */
3912 propname = zpool_prop_to_name(prop);
3913 proptype = zpool_prop_get_type(prop);
3915 if (nvpair_type(elem) == DATA_TYPE_STRING) {
3916 ASSERT(proptype == PROP_TYPE_STRING);
3917 VERIFY(nvpair_value_string(elem, &strval) == 0);
3918 VERIFY(zap_update(mos,
3919 spa->spa_pool_props_object, propname,
3920 1, strlen(strval) + 1, strval, tx) == 0);
3922 } else if (nvpair_type(elem) == DATA_TYPE_UINT64) {
3923 VERIFY(nvpair_value_uint64(elem, &intval) == 0);
3925 if (proptype == PROP_TYPE_INDEX) {
3927 VERIFY(zpool_prop_index_to_string(
3928 prop, intval, &unused) == 0);
3930 VERIFY(zap_update(mos,
3931 spa->spa_pool_props_object, propname,
3932 8, 1, &intval, tx) == 0);
3934 ASSERT(0); /* not allowed */
3938 case ZPOOL_PROP_DELEGATION:
3939 spa->spa_delegation = intval;
3941 case ZPOOL_PROP_BOOTFS:
3942 spa->spa_bootfs = intval;
3944 case ZPOOL_PROP_FAILUREMODE:
3945 spa->spa_failmode = intval;
3952 /* log internal history if this is not a zpool create */
3953 if (spa_version(spa) >= SPA_VERSION_ZPOOL_HISTORY &&
3954 tx->tx_txg != TXG_INITIAL) {
3955 spa_history_internal_log(LOG_POOL_PROPSET,
3956 spa, tx, cr, "%s %lld %s",
3957 nvpair_name(elem), intval, spa_name(spa));
3961 mutex_exit(&spa->spa_props_lock);
3965 * Sync the specified transaction group. New blocks may be dirtied as
3966 * part of the process, so we iterate until it converges.
3969 spa_sync(spa_t *spa, uint64_t txg)
3971 dsl_pool_t *dp = spa->spa_dsl_pool;
3972 objset_t *mos = spa->spa_meta_objset;
3973 bplist_t *bpl = &spa->spa_sync_bplist;
3974 vdev_t *rvd = spa->spa_root_vdev;
3981 * Lock out configuration changes.
3983 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
3985 spa->spa_syncing_txg = txg;
3986 spa->spa_sync_pass = 0;
3989 * If there are any pending vdev state changes, convert them
3990 * into config changes that go out with this transaction group.
3992 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
3993 while (list_head(&spa->spa_state_dirty_list) != NULL) {
3995 * We need the write lock here because, for aux vdevs,
3996 * calling vdev_config_dirty() modifies sav_config.
3997 * This is ugly and will become unnecessary when we
3998 * eliminate the aux vdev wart by integrating all vdevs
3999 * into the root vdev tree.
4001 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
4002 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_WRITER);
4003 while ((vd = list_head(&spa->spa_state_dirty_list)) != NULL) {
4004 vdev_state_clean(vd);
4005 vdev_config_dirty(vd);
4007 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
4008 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
4010 spa_config_exit(spa, SCL_STATE, FTAG);
4012 VERIFY(0 == bplist_open(bpl, mos, spa->spa_sync_bplist_obj));
4014 tx = dmu_tx_create_assigned(dp, txg);
4017 * If we are upgrading to SPA_VERSION_RAIDZ_DEFLATE this txg,
4018 * set spa_deflate if we have no raid-z vdevs.
4020 if (spa->spa_ubsync.ub_version < SPA_VERSION_RAIDZ_DEFLATE &&
4021 spa->spa_uberblock.ub_version >= SPA_VERSION_RAIDZ_DEFLATE) {
4024 for (i = 0; i < rvd->vdev_children; i++) {
4025 vd = rvd->vdev_child[i];
4026 if (vd->vdev_deflate_ratio != SPA_MINBLOCKSIZE)
4029 if (i == rvd->vdev_children) {
4030 spa->spa_deflate = TRUE;
4031 VERIFY(0 == zap_add(spa->spa_meta_objset,
4032 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
4033 sizeof (uint64_t), 1, &spa->spa_deflate, tx));
4037 if (spa->spa_ubsync.ub_version < SPA_VERSION_ORIGIN &&
4038 spa->spa_uberblock.ub_version >= SPA_VERSION_ORIGIN) {
4039 dsl_pool_create_origin(dp, tx);
4041 /* Keeping the origin open increases spa_minref */
4042 spa->spa_minref += 3;
4045 if (spa->spa_ubsync.ub_version < SPA_VERSION_NEXT_CLONES &&
4046 spa->spa_uberblock.ub_version >= SPA_VERSION_NEXT_CLONES) {
4047 dsl_pool_upgrade_clones(dp, tx);
4051 * If anything has changed in this txg, push the deferred frees
4052 * from the previous txg. If not, leave them alone so that we
4053 * don't generate work on an otherwise idle system.
4055 if (!txg_list_empty(&dp->dp_dirty_datasets, txg) ||
4056 !txg_list_empty(&dp->dp_dirty_dirs, txg) ||
4057 !txg_list_empty(&dp->dp_sync_tasks, txg))
4058 spa_sync_deferred_frees(spa, txg);
4061 * Iterate to convergence.
4064 spa->spa_sync_pass++;
4066 spa_sync_config_object(spa, tx);
4067 spa_sync_aux_dev(spa, &spa->spa_spares, tx,
4068 ZPOOL_CONFIG_SPARES, DMU_POOL_SPARES);
4069 spa_sync_aux_dev(spa, &spa->spa_l2cache, tx,
4070 ZPOOL_CONFIG_L2CACHE, DMU_POOL_L2CACHE);
4071 spa_errlog_sync(spa, txg);
4072 dsl_pool_sync(dp, txg);
4075 while (vd = txg_list_remove(&spa->spa_vdev_txg_list, txg)) {
4080 bplist_sync(bpl, tx);
4081 } while (dirty_vdevs);
4085 dprintf("txg %llu passes %d\n", txg, spa->spa_sync_pass);
4088 * Rewrite the vdev configuration (which includes the uberblock)
4089 * to commit the transaction group.
4091 * If there are no dirty vdevs, we sync the uberblock to a few
4092 * random top-level vdevs that are known to be visible in the
4093 * config cache (see spa_vdev_add() for a complete description).
4094 * If there *are* dirty vdevs, sync the uberblock to all vdevs.
4098 * We hold SCL_STATE to prevent vdev open/close/etc.
4099 * while we're attempting to write the vdev labels.
4101 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
4103 if (list_is_empty(&spa->spa_config_dirty_list)) {
4104 vdev_t *svd[SPA_DVAS_PER_BP];
4106 int children = rvd->vdev_children;
4107 int c0 = spa_get_random(children);
4110 for (c = 0; c < children; c++) {
4111 vd = rvd->vdev_child[(c0 + c) % children];
4112 if (vd->vdev_ms_array == 0 || vd->vdev_islog)
4114 svd[svdcount++] = vd;
4115 if (svdcount == SPA_DVAS_PER_BP)
4118 error = vdev_config_sync(svd, svdcount, txg);
4120 error = vdev_config_sync(rvd->vdev_child,
4121 rvd->vdev_children, txg);
4124 spa_config_exit(spa, SCL_STATE, FTAG);
4128 zio_suspend(spa, NULL);
4129 zio_resume_wait(spa);
4134 * Clear the dirty config list.
4136 while ((vd = list_head(&spa->spa_config_dirty_list)) != NULL)
4137 vdev_config_clean(vd);
4140 * Now that the new config has synced transactionally,
4141 * let it become visible to the config cache.
4143 if (spa->spa_config_syncing != NULL) {
4144 spa_config_set(spa, spa->spa_config_syncing);
4145 spa->spa_config_txg = txg;
4146 spa->spa_config_syncing = NULL;
4149 spa->spa_ubsync = spa->spa_uberblock;
4152 * Clean up the ZIL records for the synced txg.
4154 dsl_pool_zil_clean(dp);
4157 * Update usable space statistics.
4159 while (vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg)))
4160 vdev_sync_done(vd, txg);
4163 * It had better be the case that we didn't dirty anything
4164 * since vdev_config_sync().
4166 ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg));
4167 ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg));
4168 ASSERT(txg_list_empty(&spa->spa_vdev_txg_list, txg));
4169 ASSERT(bpl->bpl_queue == NULL);
4171 spa_config_exit(spa, SCL_CONFIG, FTAG);
4174 * If any async tasks have been requested, kick them off.
4176 spa_async_dispatch(spa);
4180 * Sync all pools. We don't want to hold the namespace lock across these
4181 * operations, so we take a reference on the spa_t and drop the lock during the
4185 spa_sync_allpools(void)
4188 mutex_enter(&spa_namespace_lock);
4189 while ((spa = spa_next(spa)) != NULL) {
4190 if (spa_state(spa) != POOL_STATE_ACTIVE || spa_suspended(spa))
4192 spa_open_ref(spa, FTAG);
4193 mutex_exit(&spa_namespace_lock);
4194 txg_wait_synced(spa_get_dsl(spa), 0);
4195 mutex_enter(&spa_namespace_lock);
4196 spa_close(spa, FTAG);
4198 mutex_exit(&spa_namespace_lock);
4202 * ==========================================================================
4203 * Miscellaneous routines
4204 * ==========================================================================
4208 * Remove all pools in the system.
4216 * Remove all cached state. All pools should be closed now,
4217 * so every spa in the AVL tree should be unreferenced.
4219 mutex_enter(&spa_namespace_lock);
4220 while ((spa = spa_next(NULL)) != NULL) {
4222 * Stop async tasks. The async thread may need to detach
4223 * a device that's been replaced, which requires grabbing
4224 * spa_namespace_lock, so we must drop it here.
4226 spa_open_ref(spa, FTAG);
4227 mutex_exit(&spa_namespace_lock);
4228 spa_async_suspend(spa);
4229 mutex_enter(&spa_namespace_lock);
4230 spa_close(spa, FTAG);
4232 if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
4234 spa_deactivate(spa);
4238 mutex_exit(&spa_namespace_lock);
4242 spa_lookup_by_guid(spa_t *spa, uint64_t guid, boolean_t l2cache)
4247 if ((vd = vdev_lookup_by_guid(spa->spa_root_vdev, guid)) != NULL)
4251 for (i = 0; i < spa->spa_l2cache.sav_count; i++) {
4252 vd = spa->spa_l2cache.sav_vdevs[i];
4253 if (vd->vdev_guid == guid)
4262 spa_upgrade(spa_t *spa, uint64_t version)
4264 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
4267 * This should only be called for a non-faulted pool, and since a
4268 * future version would result in an unopenable pool, this shouldn't be
4271 ASSERT(spa->spa_uberblock.ub_version <= SPA_VERSION);
4272 ASSERT(version >= spa->spa_uberblock.ub_version);
4274 spa->spa_uberblock.ub_version = version;
4275 vdev_config_dirty(spa->spa_root_vdev);
4277 spa_config_exit(spa, SCL_ALL, FTAG);
4279 txg_wait_synced(spa_get_dsl(spa), 0);
4283 spa_has_spare(spa_t *spa, uint64_t guid)
4287 spa_aux_vdev_t *sav = &spa->spa_spares;
4289 for (i = 0; i < sav->sav_count; i++)
4290 if (sav->sav_vdevs[i]->vdev_guid == guid)
4293 for (i = 0; i < sav->sav_npending; i++) {
4294 if (nvlist_lookup_uint64(sav->sav_pending[i], ZPOOL_CONFIG_GUID,
4295 &spareguid) == 0 && spareguid == guid)
4303 * Check if a pool has an active shared spare device.
4304 * Note: reference count of an active spare is 2, as a spare and as a replace
4307 spa_has_active_shared_spare(spa_t *spa)
4311 spa_aux_vdev_t *sav = &spa->spa_spares;
4313 for (i = 0; i < sav->sav_count; i++) {
4314 if (spa_spare_exists(sav->sav_vdevs[i]->vdev_guid, &pool,
4315 &refcnt) && pool != 0ULL && pool == spa_guid(spa) &&
4324 * Post a sysevent corresponding to the given event. The 'name' must be one of
4325 * the event definitions in sys/sysevent/eventdefs.h. The payload will be
4326 * filled in from the spa and (optionally) the vdev. This doesn't do anything
4327 * in the userland libzpool, as we don't want consumers to misinterpret ztest
4328 * or zdb as real changes.
4331 spa_event_notify(spa_t *spa, vdev_t *vd, const char *name)
4335 sysevent_attr_list_t *attr = NULL;
4336 sysevent_value_t value;
4339 ev = sysevent_alloc(EC_ZFS, (char *)name, SUNW_KERN_PUB "zfs",
4342 value.value_type = SE_DATA_TYPE_STRING;
4343 value.value.sv_string = spa_name(spa);
4344 if (sysevent_add_attr(&attr, ZFS_EV_POOL_NAME, &value, SE_SLEEP) != 0)
4347 value.value_type = SE_DATA_TYPE_UINT64;
4348 value.value.sv_uint64 = spa_guid(spa);
4349 if (sysevent_add_attr(&attr, ZFS_EV_POOL_GUID, &value, SE_SLEEP) != 0)
4353 value.value_type = SE_DATA_TYPE_UINT64;
4354 value.value.sv_uint64 = vd->vdev_guid;
4355 if (sysevent_add_attr(&attr, ZFS_EV_VDEV_GUID, &value,
4359 if (vd->vdev_path) {
4360 value.value_type = SE_DATA_TYPE_STRING;
4361 value.value.sv_string = vd->vdev_path;
4362 if (sysevent_add_attr(&attr, ZFS_EV_VDEV_PATH,
4363 &value, SE_SLEEP) != 0)
4368 if (sysevent_attach_attributes(ev, attr) != 0)
4372 (void) log_sysevent(ev, SE_SLEEP, &eid);
4376 sysevent_free_attr(attr);