*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#include <sys/systeminfo.h>
#include <sys/sunddi.h>
#include <sys/spa_boot.h>
+#include <sys/zfs_ioctl.h>
+
+#ifdef _KERNEL
+#include <sys/zone.h>
+#endif /* _KERNEL */
#include "zfs_prop.h"
#include "zfs_comutil.h"
-int zio_taskq_threads[ZIO_TYPES][ZIO_TASKQ_TYPES] = {
- /* ISSUE INTR */
- { 1, 1 }, /* ZIO_TYPE_NULL */
- { 1, 8 }, /* ZIO_TYPE_READ */
- { 8, 1 }, /* ZIO_TYPE_WRITE */
- { 1, 1 }, /* ZIO_TYPE_FREE */
- { 1, 1 }, /* ZIO_TYPE_CLAIM */
- { 1, 1 }, /* ZIO_TYPE_IOCTL */
+enum zti_modes {
+ zti_mode_fixed, /* value is # of threads (min 1) */
+ zti_mode_online_percent, /* value is % of online CPUs */
+ zti_mode_tune, /* fill from zio_taskq_tune_* */
+ zti_nmodes
+};
+
+#define ZTI_THREAD_FIX(n) { zti_mode_fixed, (n) }
+#define ZTI_THREAD_PCT(n) { zti_mode_online_percent, (n) }
+#define ZTI_THREAD_TUNE { zti_mode_tune, 0 }
+
+#define ZTI_THREAD_ONE ZTI_THREAD_FIX(1)
+
+typedef struct zio_taskq_info {
+ const char *zti_name;
+ struct {
+ enum zti_modes zti_mode;
+ uint_t zti_value;
+ } zti_nthreads[ZIO_TASKQ_TYPES];
+} zio_taskq_info_t;
+
+static const char *const zio_taskq_types[ZIO_TASKQ_TYPES] = {
+ "issue", "intr"
+};
+
+const zio_taskq_info_t zio_taskqs[ZIO_TYPES] = {
+ /* ISSUE INTR */
+ { "spa_zio_null", { ZTI_THREAD_ONE, ZTI_THREAD_ONE } },
+ { "spa_zio_read", { ZTI_THREAD_FIX(8), ZTI_THREAD_TUNE } },
+ { "spa_zio_write", { ZTI_THREAD_TUNE, ZTI_THREAD_FIX(8) } },
+ { "spa_zio_free", { ZTI_THREAD_ONE, ZTI_THREAD_ONE } },
+ { "spa_zio_claim", { ZTI_THREAD_ONE, ZTI_THREAD_ONE } },
+ { "spa_zio_ioctl", { ZTI_THREAD_ONE, ZTI_THREAD_ONE } },
};
+enum zti_modes zio_taskq_tune_mode = zti_mode_online_percent;
+uint_t zio_taskq_tune_value = 80; /* #threads = 80% of # online CPUs */
+
static void spa_sync_props(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx);
static boolean_t spa_has_active_shared_spare(spa_t *spa);
static void
spa_prop_get_config(spa_t *spa, nvlist_t **nvp)
{
- uint64_t size = spa_get_space(spa);
- uint64_t used = spa_get_alloc(spa);
+ uint64_t size;
+ uint64_t used;
uint64_t cap, version;
zprop_source_t src = ZPROP_SRC_NONE;
spa_config_dirent_t *dp;
ASSERT(MUTEX_HELD(&spa->spa_props_lock));
- /*
- * readonly properties
- */
- spa_prop_add_list(*nvp, ZPOOL_PROP_NAME, spa_name(spa), 0, src);
- spa_prop_add_list(*nvp, ZPOOL_PROP_SIZE, NULL, size, src);
- spa_prop_add_list(*nvp, ZPOOL_PROP_USED, NULL, used, src);
- spa_prop_add_list(*nvp, ZPOOL_PROP_AVAILABLE, NULL, size - used, src);
-
- cap = (size == 0) ? 0 : (used * 100 / size);
- spa_prop_add_list(*nvp, ZPOOL_PROP_CAPACITY, NULL, cap, src);
+ if (spa->spa_root_vdev != NULL) {
+ size = spa_get_space(spa);
+ used = spa_get_alloc(spa);
+ spa_prop_add_list(*nvp, ZPOOL_PROP_NAME, spa_name(spa), 0, src);
+ spa_prop_add_list(*nvp, ZPOOL_PROP_SIZE, NULL, size, src);
+ spa_prop_add_list(*nvp, ZPOOL_PROP_USED, NULL, used, src);
+ spa_prop_add_list(*nvp, ZPOOL_PROP_AVAILABLE, NULL,
+ size - used, src);
+
+ cap = (size == 0) ? 0 : (used * 100 / size);
+ spa_prop_add_list(*nvp, ZPOOL_PROP_CAPACITY, NULL, cap, src);
+
+ spa_prop_add_list(*nvp, ZPOOL_PROP_HEALTH, NULL,
+ spa->spa_root_vdev->vdev_state, src);
+
+ version = spa_version(spa);
+ if (version == zpool_prop_default_numeric(ZPOOL_PROP_VERSION))
+ src = ZPROP_SRC_DEFAULT;
+ else
+ src = ZPROP_SRC_LOCAL;
+ spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL, version, src);
+ }
spa_prop_add_list(*nvp, ZPOOL_PROP_GUID, NULL, spa_guid(spa), src);
- spa_prop_add_list(*nvp, ZPOOL_PROP_HEALTH, NULL,
- spa->spa_root_vdev->vdev_state, src);
-
- /*
- * settable properties that are not stored in the pool property object.
- */
- version = spa_version(spa);
- if (version == zpool_prop_default_numeric(ZPOOL_PROP_VERSION))
- src = ZPROP_SRC_DEFAULT;
- else
- src = ZPROP_SRC_LOCAL;
- spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL, version, src);
if (spa->spa_root != NULL)
spa_prop_add_list(*nvp, ZPOOL_PROP_ALTROOT, spa->spa_root,
case ZPOOL_PROP_DELEGATION:
case ZPOOL_PROP_AUTOREPLACE:
case ZPOOL_PROP_LISTSNAPS:
+ case ZPOOL_PROP_AUTOEXPAND:
error = nvpair_value_uint64(elem, &intval);
if (!error && intval > 1)
error = EINVAL;
break;
case ZPOOL_PROP_BOOTFS:
+ /*
+ * If the pool version is less than SPA_VERSION_BOOTFS,
+ * or the pool is still being created (version == 0),
+ * the bootfs property cannot be set.
+ */
if (spa_version(spa) < SPA_VERSION_BOOTFS) {
error = ENOTSUP;
break;
return (error);
}
+void
+spa_configfile_set(spa_t *spa, nvlist_t *nvp, boolean_t need_sync)
+{
+ char *cachefile;
+ spa_config_dirent_t *dp;
+
+ if (nvlist_lookup_string(nvp, zpool_prop_to_name(ZPOOL_PROP_CACHEFILE),
+ &cachefile) != 0)
+ return;
+
+ dp = kmem_alloc(sizeof (spa_config_dirent_t),
+ KM_SLEEP);
+
+ if (cachefile[0] == '\0')
+ dp->scd_path = spa_strdup(spa_config_path);
+ else if (strcmp(cachefile, "none") == 0)
+ dp->scd_path = NULL;
+ else
+ dp->scd_path = spa_strdup(cachefile);
+
+ list_insert_head(&spa->spa_config_list, dp);
+ if (need_sync)
+ spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
+}
+
int
spa_prop_set(spa_t *spa, nvlist_t *nvp)
{
int error;
+ nvpair_t *elem;
+ boolean_t need_sync = B_FALSE;
+ zpool_prop_t prop;
if ((error = spa_prop_validate(spa, nvp)) != 0)
return (error);
- return (dsl_sync_task_do(spa_get_dsl(spa), NULL, spa_sync_props,
- spa, nvp, 3));
+ elem = NULL;
+ while ((elem = nvlist_next_nvpair(nvp, elem)) != NULL) {
+ if ((prop = zpool_name_to_prop(
+ nvpair_name(elem))) == ZPROP_INVAL)
+ return (EINVAL);
+
+ if (prop == ZPOOL_PROP_CACHEFILE || prop == ZPOOL_PROP_ALTROOT)
+ continue;
+
+ need_sync = B_TRUE;
+ break;
+ }
+
+ if (need_sync)
+ return (dsl_sync_task_do(spa_get_dsl(spa), NULL, spa_sync_props,
+ spa, nvp, 3));
+ else
+ return (0);
}
/*
* Activate an uninitialized pool.
*/
static void
-spa_activate(spa_t *spa)
+spa_activate(spa_t *spa, int mode)
{
ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
spa->spa_state = POOL_STATE_ACTIVE;
+ spa->spa_mode = mode;
- spa->spa_normal_class = metaslab_class_create();
- spa->spa_log_class = metaslab_class_create();
+ spa->spa_normal_class = metaslab_class_create(zfs_metaslab_ops);
+ spa->spa_log_class = metaslab_class_create(zfs_metaslab_ops);
for (int t = 0; t < ZIO_TYPES; t++) {
+ const zio_taskq_info_t *ztip = &zio_taskqs[t];
for (int q = 0; q < ZIO_TASKQ_TYPES; q++) {
- spa->spa_zio_taskq[t][q] = taskq_create("spa_zio",
- zio_taskq_threads[t][q], maxclsyspri, 50,
- INT_MAX, TASKQ_PREPOPULATE);
+ enum zti_modes mode = ztip->zti_nthreads[q].zti_mode;
+ uint_t value = ztip->zti_nthreads[q].zti_value;
+ char name[32];
+
+ (void) snprintf(name, sizeof (name),
+ "%s_%s", ztip->zti_name, zio_taskq_types[q]);
+
+ if (mode == zti_mode_tune) {
+ mode = zio_taskq_tune_mode;
+ value = zio_taskq_tune_value;
+ if (mode == zti_mode_tune)
+ mode = zti_mode_online_percent;
+ }
+
+ switch (mode) {
+ case zti_mode_fixed:
+ ASSERT3U(value, >=, 1);
+ value = MAX(value, 1);
+
+ spa->spa_zio_taskq[t][q] = taskq_create(name,
+ value, maxclsyspri, 50, INT_MAX,
+ TASKQ_PREPOPULATE);
+ break;
+
+ case zti_mode_online_percent:
+ spa->spa_zio_taskq[t][q] = taskq_create(name,
+ value, maxclsyspri, 50, INT_MAX,
+ TASKQ_PREPOPULATE | TASKQ_THREADS_CPU_PCT);
+ break;
+
+ case zti_mode_tune:
+ default:
+ panic("unrecognized mode for "
+ "zio_taskqs[%u]->zti_nthreads[%u] (%u:%u) "
+ "in spa_activate()",
+ t, q, mode, value);
+ break;
+ }
}
}
ASSERT(spa->spa_sync_on == B_FALSE);
ASSERT(spa->spa_dsl_pool == NULL);
ASSERT(spa->spa_root_vdev == NULL);
-
+ ASSERT(spa->spa_async_zio_root == NULL);
ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED);
txg_list_destroy(&spa->spa_vdev_txg_list);
uint_t id, int atype)
{
nvlist_t **child;
- uint_t c, children;
+ uint_t children;
int error;
if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0)
return (EINVAL);
}
- for (c = 0; c < children; c++) {
+ for (int c = 0; c < children; c++) {
vdev_t *vd;
if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c,
atype)) != 0) {
/*
* Wait for any outstanding async I/O to complete.
*/
- mutex_enter(&spa->spa_async_root_lock);
- while (spa->spa_async_root_count != 0)
- cv_wait(&spa->spa_async_root_cv, &spa->spa_async_root_lock);
- mutex_exit(&spa->spa_async_root_lock);
-
- /*
- * Drop and purge level 2 cache
- */
- spa_l2cache_drop(spa);
+ if (spa->spa_async_zio_root != NULL) {
+ (void) zio_wait(spa->spa_async_zio_root);
+ spa->spa_async_zio_root = NULL;
+ }
/*
* Close the dsl pool.
spa->spa_dsl_pool = NULL;
}
+ spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
+
+ /*
+ * Drop and purge level 2 cache
+ */
+ spa_l2cache_drop(spa);
+
/*
* Close all vdevs.
*/
spa->spa_l2cache.sav_count = 0;
spa->spa_async_suspended = 0;
+
+ spa_config_exit(spa, SCL_ALL, FTAG);
}
/*
}
vd->vdev_top = vd;
+ vd->vdev_aux = &spa->spa_spares;
if (vdev_open(vd) != 0)
continue;
nvlist_t **l2cache;
uint_t nl2cache;
int i, j, oldnvdevs;
- uint64_t guid, size;
+ uint64_t guid;
vdev_t *vd, **oldvdevs, **newvdevs;
spa_aux_vdev_t *sav = &spa->spa_l2cache;
(void) vdev_validate_aux(vd);
- if (!vdev_is_dead(vd)) {
- size = vdev_get_rsize(vd);
- l2arc_add_vdev(spa, vd,
- VDEV_LABEL_START_SIZE,
- size - VDEV_LABEL_START_SIZE);
- }
+ if (!vdev_is_dead(vd))
+ l2arc_add_vdev(spa, vd);
}
}
vd = oldvdevs[i];
if (vd != NULL) {
- if ((spa_mode & FWRITE) &&
- spa_l2cache_exists(vd->vdev_guid, &pool) &&
- pool != 0ULL &&
- l2arc_vdev_present(vd)) {
+ if (spa_l2cache_exists(vd->vdev_guid, &pool) &&
+ pool != 0ULL && l2arc_vdev_present(vd))
l2arc_remove_vdev(vd);
- }
(void) vdev_close(vd);
spa_l2cache_remove(vd);
}
dmu_buf_rele(db, FTAG);
packed = kmem_alloc(nvsize, KM_SLEEP);
- error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed);
+ error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed,
+ DMU_READ_PREFETCH);
if (error == 0)
error = nvlist_unpack(packed, nvsize, value, 0);
kmem_free(packed, nvsize);
static void
spa_check_removed(vdev_t *vd)
{
- int c;
-
- for (c = 0; c < vd->vdev_children; c++)
+ for (int c = 0; c < vd->vdev_children; c++)
spa_check_removed(vd->vdev_child[c]);
if (vd->vdev_ops->vdev_op_leaf && vdev_is_dead(vd)) {
}
/*
+ * Load the slog device state from the config object since it's possible
+ * that the label does not contain the most up-to-date information.
+ */
+void
+spa_load_log_state(spa_t *spa)
+{
+ nvlist_t *nv, *nvroot, **child;
+ uint64_t is_log;
+ uint_t children;
+ vdev_t *rvd = spa->spa_root_vdev;
+
+ VERIFY(load_nvlist(spa, spa->spa_config_object, &nv) == 0);
+ VERIFY(nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
+ VERIFY(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
+ &child, &children) == 0);
+
+ for (int c = 0; c < children; c++) {
+ vdev_t *tvd = rvd->vdev_child[c];
+
+ if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
+ &is_log) == 0 && is_log)
+ vdev_load_log_state(tvd, child[c]);
+ }
+ nvlist_free(nv);
+}
+
+/*
* Check for missing log devices
*/
int
return (1);
}
break;
-
- case SPA_LOG_CLEAR:
- (void) dmu_objset_find(spa->spa_name, zil_clear_log_chain, NULL,
- DS_FIND_CHILDREN);
- break;
}
- spa->spa_log_state = SPA_LOG_GOOD;
return (0);
}
uint64_t pool_guid;
uint64_t version;
uint64_t autoreplace = 0;
+ int orig_mode = spa->spa_mode;
char *ereport = FM_EREPORT_ZFS_POOL;
+ /*
+ * If this is an untrusted config, access the pool in read-only mode.
+ * This prevents things like resilvering recently removed devices.
+ */
+ if (!mosconfig)
+ spa->spa_mode = FREAD;
+
ASSERT(MUTEX_HELD(&spa_namespace_lock));
spa->spa_load_state = state;
spa->spa_load_guid = pool_guid;
/*
+ * Create "The Godfather" zio to hold all async IOs
+ */
+ spa->spa_async_zio_root = zio_root(spa, NULL, NULL,
+ ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | ZIO_FLAG_GODFATHER);
+
+ /*
* Parse the configuration into a vdev tree. We explicitly set the
* value that will be returned by spa_version() since parsing the
* configuration requires knowing the version number.
goto out;
/*
- * Validate the labels for all leaf vdevs. We need to grab the config
- * lock because all label I/O is done with ZIO_FLAG_CONFIG_WRITER.
+ * We need to validate the vdev labels against the configuration that
+ * we have in hand, which is dependent on the setting of mosconfig. If
+ * mosconfig is true then we're validating the vdev labels based on
+ * that config. Otherwise, we're validating against the cached config
+ * (zpool.cache) that was read when we loaded the zfs module, and then
+ * later we will recursively call spa_load() and validate against
+ * the vdev config.
*/
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
error = vdev_validate(rvd);
spa_config_exit(spa, SCL_ALL, FTAG);
-
if (error != 0)
goto out;
VERIFY(nvlist_lookup_string(newconfig,
ZPOOL_CONFIG_HOSTNAME, &hostname) == 0);
+#ifdef _KERNEL
+ myhostid = zone_get_hostid(NULL);
+#else /* _KERNEL */
+ /*
+ * We're emulating the system's hostid in userland, so
+ * we can't use zone_get_hostid().
+ */
(void) ddi_strtoul(hw_serial, NULL, 10, &myhostid);
+#endif /* _KERNEL */
if (hostid != 0 && myhostid != 0 &&
- (unsigned long)hostid != myhostid) {
+ hostid != myhostid) {
cmn_err(CE_WARN, "pool '%s' could not be "
"loaded as it was last accessed by "
"another system (host: %s hostid: 0x%lx). "
spa_config_set(spa, newconfig);
spa_unload(spa);
spa_deactivate(spa);
- spa_activate(spa);
+ spa_activate(spa, orig_mode);
return (spa_load(spa, newconfig, state, B_TRUE));
}
spa_config_exit(spa, SCL_ALL, FTAG);
}
+ spa_load_log_state(spa);
+
if (spa_check_logs(spa)) {
vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_BAD_LOG);
spa->spa_pool_props_object,
zpool_prop_to_name(ZPOOL_PROP_FAILUREMODE),
sizeof (uint64_t), 1, &spa->spa_failmode);
+ (void) zap_lookup(spa->spa_meta_objset,
+ spa->spa_pool_props_object,
+ zpool_prop_to_name(ZPOOL_PROP_AUTOEXPAND),
+ sizeof (uint64_t), 1, &spa->spa_autoexpand);
}
/*
goto out;
}
- if ((spa_mode & FWRITE) && state != SPA_LOAD_TRYIMPORT) {
+ if (spa_writeable(spa)) {
dmu_tx_t *tx;
int need_update = B_FALSE;
- int c;
+
+ ASSERT(state != SPA_LOAD_TRYIMPORT);
/*
* Claim log blocks that haven't been committed yet.
zil_claim, tx, DS_FIND_CHILDREN);
dmu_tx_commit(tx);
+ spa->spa_log_state = SPA_LOG_GOOD;
spa->spa_sync_on = B_TRUE;
txg_sync_start(spa->spa_dsl_pool);
state == SPA_LOAD_IMPORT)
need_update = B_TRUE;
- for (c = 0; c < rvd->vdev_children; c++)
+ for (int c = 0; c < rvd->vdev_children; c++)
if (rvd->vdev_child[c]->vdev_ms_array == 0)
need_update = B_TRUE;
*/
if (need_update)
spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
+
+ /*
+ * Check all DTLs to see if anything needs resilvering.
+ */
+ if (vdev_resilver_needed(rvd, NULL, NULL))
+ spa_async_request(spa, SPA_ASYNC_RESILVER);
}
error = 0;
}
if (spa->spa_state == POOL_STATE_UNINITIALIZED) {
- spa_activate(spa);
+ spa_activate(spa, spa_mode_global);
error = spa_load(spa, spa->spa_config, SPA_LOAD_OPEN, B_FALSE);
uint_t vsc;
uint64_t pool;
+ ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
+
if (spa->spa_spares.sav_count == 0)
return;
vdev_stat_t *vs;
uint_t vsc;
+ ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
+
if (spa->spa_l2cache.sav_count == 0)
return;
- spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
-
VERIFY(nvlist_lookup_nvlist(config,
ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
VERIFY(nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
vdev_get_stats(vd, vs);
}
}
-
- spa_config_exit(spa, SCL_CONFIG, FTAG);
}
int
*config = NULL;
error = spa_open_common(name, &spa, FTAG, config);
- if (spa && *config != NULL) {
- VERIFY(nvlist_add_uint64(*config, ZPOOL_CONFIG_ERRCOUNT,
- spa_get_errlog_size(spa)) == 0);
+ if (spa != NULL) {
+ /*
+ * This still leaves a window of inconsistency where the spares
+ * or l2cache devices could change and the config would be
+ * self-inconsistent.
+ */
+ spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
- if (spa_suspended(spa))
+ if (*config != NULL) {
VERIFY(nvlist_add_uint64(*config,
- ZPOOL_CONFIG_SUSPENDED, spa->spa_failmode) == 0);
+ ZPOOL_CONFIG_ERRCOUNT,
+ spa_get_errlog_size(spa)) == 0);
+
+ if (spa_suspended(spa))
+ VERIFY(nvlist_add_uint64(*config,
+ ZPOOL_CONFIG_SUSPENDED,
+ spa->spa_failmode) == 0);
- spa_add_spares(spa, *config);
- spa_add_l2cache(spa, *config);
+ spa_add_spares(spa, *config);
+ spa_add_l2cache(spa, *config);
+ }
}
/*
}
}
- if (spa != NULL)
+ if (spa != NULL) {
+ spa_config_exit(spa, SCL_CONFIG, FTAG);
spa_close(spa, FTAG);
+ }
return (error);
}
vd = sav->sav_vdevs[i];
ASSERT(vd != NULL);
- if ((spa_mode & FWRITE) &&
- spa_l2cache_exists(vd->vdev_guid, &pool) && pool != 0ULL &&
- l2arc_vdev_present(vd)) {
+ if (spa_l2cache_exists(vd->vdev_guid, &pool) &&
+ pool != 0ULL && l2arc_vdev_present(vd))
l2arc_remove_vdev(vd);
- }
if (vd->vdev_isl2cache)
spa_l2cache_remove(vd);
vdev_clear_stats(vd);
vdev_t *rvd;
dsl_pool_t *dp;
dmu_tx_t *tx;
- int c, error = 0;
+ int error = 0;
uint64_t txg = TXG_INITIAL;
nvlist_t **spares, **l2cache;
uint_t nspares, nl2cache;
(void) nvlist_lookup_string(props,
zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
spa = spa_add(pool, altroot);
- spa_activate(spa);
+ spa_activate(spa, spa_mode_global);
spa->spa_uberblock.ub_txg = txg - 1;
if (props && (error = spa_prop_validate(spa, props))) {
- spa_unload(spa);
spa_deactivate(spa);
spa_remove(spa);
mutex_exit(&spa_namespace_lock);
spa->spa_ubsync = spa->spa_uberblock;
/*
+ * Create "The Godfather" zio to hold all async IOs
+ */
+ spa->spa_async_zio_root = zio_root(spa, NULL, NULL,
+ ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | ZIO_FLAG_GODFATHER);
+
+ /*
* Create the root vdev.
*/
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
(error = vdev_create(rvd, txg, B_FALSE)) == 0 &&
(error = spa_validate_aux(spa, nvroot, txg,
VDEV_ALLOC_ADD)) == 0) {
- for (c = 0; c < rvd->vdev_children; c++)
- vdev_init(rvd->vdev_child[c], txg);
- vdev_config_dirty(rvd);
+ for (int c = 0; c < rvd->vdev_children; c++) {
+ vdev_metaslab_set_size(rvd->vdev_child[c]);
+ vdev_expand(rvd->vdev_child[c], txg);
+ }
}
spa_config_exit(spa, SCL_ALL, FTAG);
spa->spa_bootfs = zpool_prop_default_numeric(ZPOOL_PROP_BOOTFS);
spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
spa->spa_failmode = zpool_prop_default_numeric(ZPOOL_PROP_FAILUREMODE);
- if (props)
+ spa->spa_autoexpand = zpool_prop_default_numeric(ZPOOL_PROP_AUTOEXPAND);
+ if (props != NULL) {
+ spa_configfile_set(spa, props, B_FALSE);
spa_sync_props(spa, props, CRED(), tx);
+ }
dmu_tx_commit(tx);
if (version >= SPA_VERSION_ZPOOL_HISTORY && history_str != NULL)
(void) spa_history_log(spa, history_str, LOG_CMD_POOL_CREATE);
- mutex_exit(&spa_namespace_lock);
-
spa->spa_minref = refcount_count(&spa->spa_refcount);
- return (0);
-}
-
-/*
- * Import the given pool into the system. We set up the necessary spa_t and
- * then call spa_load() to do the dirty work.
- */
-static int
-spa_import_common(const char *pool, nvlist_t *config, nvlist_t *props,
- boolean_t isroot, boolean_t allowfaulted)
-{
- spa_t *spa;
- char *altroot = NULL;
- int error, loaderr;
- nvlist_t *nvroot;
- nvlist_t **spares, **l2cache;
- uint_t nspares, nl2cache;
-
- /*
- * If a pool with this name exists, return failure.
- */
- mutex_enter(&spa_namespace_lock);
- if ((spa = spa_lookup(pool)) != NULL) {
- if (isroot) {
- /*
- * Remove the existing root pool from the
- * namespace so that we can replace it with
- * the correct config we just read in.
- */
- ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
- spa_remove(spa);
- } else {
- mutex_exit(&spa_namespace_lock);
- return (EEXIST);
- }
- }
-
- /*
- * Create and initialize the spa structure.
- */
- (void) nvlist_lookup_string(props,
- zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
- spa = spa_add(pool, altroot);
- spa_activate(spa);
-
- if (allowfaulted)
- spa->spa_import_faulted = B_TRUE;
- spa->spa_is_root = isroot;
-
- /*
- * Pass off the heavy lifting to spa_load().
- * Pass TRUE for mosconfig (unless this is a root pool) because
- * the user-supplied config is actually the one to trust when
- * doing an import.
- */
- loaderr = error = spa_load(spa, config, SPA_LOAD_IMPORT, !isroot);
-
- spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
- /*
- * Toss any existing sparelist, as it doesn't have any validity anymore,
- * and conflicts with spa_has_spare().
- */
- if (!isroot && spa->spa_spares.sav_config) {
- nvlist_free(spa->spa_spares.sav_config);
- spa->spa_spares.sav_config = NULL;
- spa_load_spares(spa);
- }
- if (!isroot && spa->spa_l2cache.sav_config) {
- nvlist_free(spa->spa_l2cache.sav_config);
- spa->spa_l2cache.sav_config = NULL;
- spa_load_l2cache(spa);
- }
-
- VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
- &nvroot) == 0);
- if (error == 0)
- error = spa_validate_aux(spa, nvroot, -1ULL, VDEV_ALLOC_SPARE);
- if (error == 0)
- error = spa_validate_aux(spa, nvroot, -1ULL,
- VDEV_ALLOC_L2CACHE);
- spa_config_exit(spa, SCL_ALL, FTAG);
-
- if (error != 0 || (props && (error = spa_prop_set(spa, props)))) {
- if (loaderr != 0 && loaderr != EINVAL && allowfaulted) {
- /*
- * If we failed to load the pool, but 'allowfaulted' is
- * set, then manually set the config as if the config
- * passed in was specified in the cache file.
- */
- error = 0;
- spa->spa_import_faulted = B_FALSE;
- if (spa->spa_config == NULL)
- spa->spa_config = spa_config_generate(spa,
- NULL, -1ULL, B_TRUE);
- spa_unload(spa);
- spa_deactivate(spa);
- spa_config_sync(spa, B_FALSE, B_TRUE);
- } else {
- spa_unload(spa);
- spa_deactivate(spa);
- spa_remove(spa);
- }
- mutex_exit(&spa_namespace_lock);
- return (error);
- }
-
- /*
- * Override any spares and level 2 cache devices as specified by
- * the user, as these may have correct device names/devids, etc.
- */
- if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
- &spares, &nspares) == 0) {
- if (spa->spa_spares.sav_config)
- VERIFY(nvlist_remove(spa->spa_spares.sav_config,
- ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0);
- else
- VERIFY(nvlist_alloc(&spa->spa_spares.sav_config,
- NV_UNIQUE_NAME, KM_SLEEP) == 0);
- VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
- ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
- spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
- spa_load_spares(spa);
- spa_config_exit(spa, SCL_ALL, FTAG);
- spa->spa_spares.sav_sync = B_TRUE;
- }
- if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
- &l2cache, &nl2cache) == 0) {
- if (spa->spa_l2cache.sav_config)
- VERIFY(nvlist_remove(spa->spa_l2cache.sav_config,
- ZPOOL_CONFIG_L2CACHE, DATA_TYPE_NVLIST_ARRAY) == 0);
- else
- VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config,
- NV_UNIQUE_NAME, KM_SLEEP) == 0);
- VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
- ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
- spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
- spa_load_l2cache(spa);
- spa_config_exit(spa, SCL_ALL, FTAG);
- spa->spa_l2cache.sav_sync = B_TRUE;
- }
-
- if (spa_mode & FWRITE) {
- /*
- * Update the config cache to include the newly-imported pool.
- */
- spa_config_update_common(spa, SPA_CONFIG_UPDATE_POOL, isroot);
- }
-
- spa->spa_import_faulted = B_FALSE;
mutex_exit(&spa_namespace_lock);
return (0);
#ifdef _KERNEL
/*
- * Build a "root" vdev for a top level vdev read in from a rootpool
- * device label.
+ * Get the root pool information from the root disk, then import the root pool
+ * during the system boot up time.
*/
-static void
-spa_build_rootpool_config(nvlist_t *config)
+extern int vdev_disk_read_rootlabel(char *, char *, nvlist_t **);
+
+static nvlist_t *
+spa_generate_rootconf(char *devpath, char *devid, uint64_t *guid)
{
+ nvlist_t *config;
nvlist_t *nvtop, *nvroot;
uint64_t pgid;
+ if (vdev_disk_read_rootlabel(devpath, devid, &config) != 0)
+ return (NULL);
+
/*
* Add this top-level vdev to the child array.
*/
- VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvtop)
- == 0);
- VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pgid)
- == 0);
+ VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
+ &nvtop) == 0);
+ VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
+ &pgid) == 0);
+ VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, guid) == 0);
/*
* Put this pool's top-level vdevs into a root vdev.
*/
VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0);
- VERIFY(nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT)
- == 0);
+ VERIFY(nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE,
+ VDEV_TYPE_ROOT) == 0);
VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) == 0);
VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, pgid) == 0);
VERIFY(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
*/
VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, nvroot) == 0);
nvlist_free(nvroot);
+ return (config);
}
/*
- * Get the root pool information from the root disk, then import the root pool
- * during the system boot up time.
+ * Walk the vdev tree and see if we can find a device with "better"
+ * configuration. A configuration is "better" if the label on that
+ * device has a more recent txg.
*/
-extern int vdev_disk_read_rootlabel(char *, char *, nvlist_t **);
-
-int
-spa_check_rootconf(char *devpath, char *devid, nvlist_t **bestconf,
- uint64_t *besttxg)
-{
- nvlist_t *config;
- uint64_t txg;
- int error;
-
- if (error = vdev_disk_read_rootlabel(devpath, devid, &config))
- return (error);
-
- VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, &txg) == 0);
-
- if (bestconf != NULL)
- *bestconf = config;
- else
- nvlist_free(config);
- *besttxg = txg;
- return (0);
-}
-
-boolean_t
-spa_rootdev_validate(nvlist_t *nv)
-{
- uint64_t ival;
-
- if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 ||
- nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 ||
- nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0)
- return (B_FALSE);
-
- return (B_TRUE);
-}
-
-
-/*
- * Given the boot device's physical path or devid, check if the device
- * is in a valid state. If so, return the configuration from the vdev
- * label.
- */
-int
-spa_get_rootconf(char *devpath, char *devid, nvlist_t **bestconf)
+static void
+spa_alt_rootvdev(vdev_t *vd, vdev_t **avd, uint64_t *txg)
{
- nvlist_t *conf = NULL;
- uint64_t txg = 0;
- nvlist_t *nvtop, **child;
- char *type;
- char *bootpath = NULL;
- uint_t children, c;
- char *tmp;
- int error;
-
- if (devpath && ((tmp = strchr(devpath, ' ')) != NULL))
- *tmp = '\0';
- if (error = spa_check_rootconf(devpath, devid, &conf, &txg)) {
- cmn_err(CE_NOTE, "error reading device label");
- return (error);
- }
- if (txg == 0) {
- cmn_err(CE_NOTE, "this device is detached");
- nvlist_free(conf);
- return (EINVAL);
- }
-
- VERIFY(nvlist_lookup_nvlist(conf, ZPOOL_CONFIG_VDEV_TREE,
- &nvtop) == 0);
- VERIFY(nvlist_lookup_string(nvtop, ZPOOL_CONFIG_TYPE, &type) == 0);
-
- if (strcmp(type, VDEV_TYPE_DISK) == 0) {
- if (spa_rootdev_validate(nvtop)) {
- goto out;
- } else {
- nvlist_free(conf);
- return (EINVAL);
- }
- }
+ for (int c = 0; c < vd->vdev_children; c++)
+ spa_alt_rootvdev(vd->vdev_child[c], avd, txg);
- ASSERT(strcmp(type, VDEV_TYPE_MIRROR) == 0);
+ if (vd->vdev_ops->vdev_op_leaf) {
+ nvlist_t *label;
+ uint64_t label_txg;
- VERIFY(nvlist_lookup_nvlist_array(nvtop, ZPOOL_CONFIG_CHILDREN,
- &child, &children) == 0);
+ if (vdev_disk_read_rootlabel(vd->vdev_physpath, vd->vdev_devid,
+ &label) != 0)
+ return;
- /*
- * Go thru vdevs in the mirror to see if the given device
- * has the most recent txg. Only the device with the most
- * recent txg has valid information and should be booted.
- */
- for (c = 0; c < children; c++) {
- char *cdevid, *cpath;
- uint64_t tmptxg;
+ VERIFY(nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_TXG,
+ &label_txg) == 0);
- if (nvlist_lookup_string(child[c], ZPOOL_CONFIG_PHYS_PATH,
- &cpath) != 0)
- return (EINVAL);
- if (nvlist_lookup_string(child[c], ZPOOL_CONFIG_DEVID,
- &cdevid) != 0)
- return (EINVAL);
- if ((spa_check_rootconf(cpath, cdevid, NULL,
- &tmptxg) == 0) && (tmptxg > txg)) {
- txg = tmptxg;
- VERIFY(nvlist_lookup_string(child[c],
- ZPOOL_CONFIG_PATH, &bootpath) == 0);
+ /*
+ * Do we have a better boot device?
+ */
+ if (label_txg > *txg) {
+ *txg = label_txg;
+ *avd = vd;
}
+ nvlist_free(label);
}
-
- /* Does the best device match the one we've booted from? */
- if (bootpath) {
- cmn_err(CE_NOTE, "try booting from '%s'", bootpath);
- return (EINVAL);
- }
-out:
- *bestconf = conf;
- return (0);
}
/*
int
spa_import_rootpool(char *devpath, char *devid)
{
- nvlist_t *conf = NULL;
+ spa_t *spa;
+ vdev_t *rvd, *bvd, *avd = NULL;
+ nvlist_t *config, *nvtop;
+ uint64_t guid, txg;
char *pname;
int error;
/*
- * Get the vdev pathname and configuation from the most
- * recently updated vdev (highest txg).
+ * Read the label from the boot device and generate a configuration.
*/
- if (error = spa_get_rootconf(devpath, devid, &conf))
- goto msg_out;
+ if ((config = spa_generate_rootconf(devpath, devid, &guid)) == NULL) {
+ cmn_err(CE_NOTE, "Can not read the pool label from '%s'",
+ devpath);
+ return (EIO);
+ }
+
+ VERIFY(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
+ &pname) == 0);
+ VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, &txg) == 0);
+
+ mutex_enter(&spa_namespace_lock);
+ if ((spa = spa_lookup(pname)) != NULL) {
+ /*
+ * Remove the existing root pool from the namespace so that we
+ * can replace it with the correct config we just read in.
+ */
+ spa_remove(spa);
+ }
+
+ spa = spa_add(pname, NULL);
+ spa->spa_is_root = B_TRUE;
/*
- * Add type "root" vdev to the config.
+ * Build up a vdev tree based on the boot device's label config.
*/
- spa_build_rootpool_config(conf);
+ VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
+ &nvtop) == 0);
+ spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
+ error = spa_config_parse(spa, &rvd, nvtop, NULL, 0,
+ VDEV_ALLOC_ROOTPOOL);
+ spa_config_exit(spa, SCL_ALL, FTAG);
+ if (error) {
+ mutex_exit(&spa_namespace_lock);
+ nvlist_free(config);
+ cmn_err(CE_NOTE, "Can not parse the config for pool '%s'",
+ pname);
+ return (error);
+ }
- VERIFY(nvlist_lookup_string(conf, ZPOOL_CONFIG_POOL_NAME, &pname) == 0);
+ /*
+ * Get the boot vdev.
+ */
+ if ((bvd = vdev_lookup_by_guid(rvd, guid)) == NULL) {
+ cmn_err(CE_NOTE, "Can not find the boot vdev for guid %llu",
+ (u_longlong_t)guid);
+ error = ENOENT;
+ goto out;
+ }
/*
- * We specify 'allowfaulted' for this to be treated like spa_open()
- * instead of spa_import(). This prevents us from marking vdevs as
- * persistently unavailable, and generates FMA ereports as if it were a
- * pool open, not import.
+ * Determine if there is a better boot device.
*/
- error = spa_import_common(pname, conf, NULL, B_TRUE, B_TRUE);
- ASSERT(error != EEXIST);
+ avd = bvd;
+ spa_alt_rootvdev(rvd, &avd, &txg);
+ if (avd != bvd) {
+ cmn_err(CE_NOTE, "The boot device is 'degraded'. Please "
+ "try booting from '%s'", avd->vdev_path);
+ error = EINVAL;
+ goto out;
+ }
- nvlist_free(conf);
- return (error);
+ /*
+ * If the boot device is part of a spare vdev then ensure that
+ * we're booting off the active spare.
+ */
+ if (bvd->vdev_parent->vdev_ops == &vdev_spare_ops &&
+ !bvd->vdev_isspare) {
+ cmn_err(CE_NOTE, "The boot device is currently spared. Please "
+ "try booting from '%s'",
+ bvd->vdev_parent->vdev_child[1]->vdev_path);
+ error = EINVAL;
+ goto out;
+ }
-msg_out:
- cmn_err(CE_NOTE, "\n"
- " *************************************************** \n"
- " * This device is not bootable! * \n"
- " * It is either offlined or detached or faulted. * \n"
- " * Please try to boot from a different device. * \n"
- " *************************************************** ");
+ VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0);
+ error = 0;
+out:
+ spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
+ vdev_free(rvd);
+ spa_config_exit(spa, SCL_ALL, FTAG);
+ mutex_exit(&spa_namespace_lock);
+ nvlist_free(config);
return (error);
}
+
#endif
/*
- * Import a non-root pool into the system.
+ * Take a pool and insert it into the namespace as if it had been loaded at
+ * boot.
*/
int
-spa_import(const char *pool, nvlist_t *config, nvlist_t *props)
+spa_import_verbatim(const char *pool, nvlist_t *config, nvlist_t *props)
{
- return (spa_import_common(pool, config, props, B_FALSE, B_FALSE));
+ spa_t *spa;
+ char *altroot = NULL;
+
+ mutex_enter(&spa_namespace_lock);
+ if (spa_lookup(pool) != NULL) {
+ mutex_exit(&spa_namespace_lock);
+ return (EEXIST);
+ }
+
+ (void) nvlist_lookup_string(props,
+ zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
+ spa = spa_add(pool, altroot);
+
+ VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0);
+
+ if (props != NULL)
+ spa_configfile_set(spa, props, B_FALSE);
+
+ spa_config_sync(spa, B_FALSE, B_TRUE);
+
+ mutex_exit(&spa_namespace_lock);
+
+ return (0);
}
+/*
+ * Import a non-root pool into the system.
+ */
int
-spa_import_faulted(const char *pool, nvlist_t *config, nvlist_t *props)
+spa_import(const char *pool, nvlist_t *config, nvlist_t *props)
{
- return (spa_import_common(pool, config, props, B_FALSE, B_TRUE));
+ spa_t *spa;
+ char *altroot = NULL;
+ int error;
+ nvlist_t *nvroot;
+ nvlist_t **spares, **l2cache;
+ uint_t nspares, nl2cache;
+
+ /*
+ * If a pool with this name exists, return failure.
+ */
+ mutex_enter(&spa_namespace_lock);
+ if ((spa = spa_lookup(pool)) != NULL) {
+ mutex_exit(&spa_namespace_lock);
+ return (EEXIST);
+ }
+
+ /*
+ * Create and initialize the spa structure.
+ */
+ (void) nvlist_lookup_string(props,
+ zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
+ spa = spa_add(pool, altroot);
+ spa_activate(spa, spa_mode_global);
+
+ /*
+ * Don't start async tasks until we know everything is healthy.
+ */
+ spa_async_suspend(spa);
+
+ /*
+ * Pass off the heavy lifting to spa_load(). Pass TRUE for mosconfig
+ * because the user-supplied config is actually the one to trust when
+ * doing an import.
+ */
+ error = spa_load(spa, config, SPA_LOAD_IMPORT, B_TRUE);
+
+ spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
+ /*
+ * Toss any existing sparelist, as it doesn't have any validity
+ * anymore, and conflicts with spa_has_spare().
+ */
+ if (spa->spa_spares.sav_config) {
+ nvlist_free(spa->spa_spares.sav_config);
+ spa->spa_spares.sav_config = NULL;
+ spa_load_spares(spa);
+ }
+ if (spa->spa_l2cache.sav_config) {
+ nvlist_free(spa->spa_l2cache.sav_config);
+ spa->spa_l2cache.sav_config = NULL;
+ spa_load_l2cache(spa);
+ }
+
+ VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
+ &nvroot) == 0);
+ if (error == 0)
+ error = spa_validate_aux(spa, nvroot, -1ULL,
+ VDEV_ALLOC_SPARE);
+ if (error == 0)
+ error = spa_validate_aux(spa, nvroot, -1ULL,
+ VDEV_ALLOC_L2CACHE);
+ spa_config_exit(spa, SCL_ALL, FTAG);
+
+ if (props != NULL)
+ spa_configfile_set(spa, props, B_FALSE);
+
+ if (error != 0 || (props && spa_writeable(spa) &&
+ (error = spa_prop_set(spa, props)))) {
+ spa_unload(spa);
+ spa_deactivate(spa);
+ spa_remove(spa);
+ mutex_exit(&spa_namespace_lock);
+ return (error);
+ }
+
+ spa_async_resume(spa);
+
+ /*
+ * Override any spares and level 2 cache devices as specified by
+ * the user, as these may have correct device names/devids, etc.
+ */
+ if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
+ &spares, &nspares) == 0) {
+ if (spa->spa_spares.sav_config)
+ VERIFY(nvlist_remove(spa->spa_spares.sav_config,
+ ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0);
+ else
+ VERIFY(nvlist_alloc(&spa->spa_spares.sav_config,
+ NV_UNIQUE_NAME, KM_SLEEP) == 0);
+ VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
+ ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
+ spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
+ spa_load_spares(spa);
+ spa_config_exit(spa, SCL_ALL, FTAG);
+ spa->spa_spares.sav_sync = B_TRUE;
+ }
+ if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
+ &l2cache, &nl2cache) == 0) {
+ if (spa->spa_l2cache.sav_config)
+ VERIFY(nvlist_remove(spa->spa_l2cache.sav_config,
+ ZPOOL_CONFIG_L2CACHE, DATA_TYPE_NVLIST_ARRAY) == 0);
+ else
+ VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config,
+ NV_UNIQUE_NAME, KM_SLEEP) == 0);
+ VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
+ ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
+ spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
+ spa_load_l2cache(spa);
+ spa_config_exit(spa, SCL_ALL, FTAG);
+ spa->spa_l2cache.sav_sync = B_TRUE;
+ }
+
+ if (spa_writeable(spa)) {
+ /*
+ * Update the config cache to include the newly-imported pool.
+ */
+ spa_config_update_common(spa, SPA_CONFIG_UPDATE_POOL, B_FALSE);
+ }
+
+ /*
+ * It's possible that the pool was expanded while it was exported.
+ * We kick off an async task to handle this for us.
+ */
+ spa_async_request(spa, SPA_ASYNC_AUTOEXPAND);
+
+ mutex_exit(&spa_namespace_lock);
+
+ return (0);
}
char *poolname;
spa_t *spa;
uint64_t state;
+ int error;
if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_POOL_NAME, &poolname))
return (NULL);
*/
mutex_enter(&spa_namespace_lock);
spa = spa_add(TRYIMPORT_NAME, NULL);
- spa_activate(spa);
+ spa_activate(spa, FREAD);
/*
* Pass off the heavy lifting to spa_load().
* Pass TRUE for mosconfig because the user-supplied config
* is actually the one to trust when doing an import.
*/
- (void) spa_load(spa, tryconfig, SPA_LOAD_TRYIMPORT, B_TRUE);
+ error = spa_load(spa, tryconfig, SPA_LOAD_TRYIMPORT, B_TRUE);
/*
* If 'tryconfig' was at least parsable, return the current config.
* copy it out so that external consumers can tell which
* pools are bootable.
*/
- if (spa->spa_bootfs) {
+ if ((!error || error == EEXIST) && spa->spa_bootfs) {
char *tmpname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
/*
/*
* Add the list of hot spares and level 2 cache devices.
*/
+ spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
spa_add_spares(spa, config);
spa_add_l2cache(spa, config);
+ spa_config_exit(spa, SCL_CONFIG, FTAG);
}
spa_unload(spa);
* The act of destroying or exporting a pool is very simple. We make sure there
* is no more pending I/O and any references to the pool are gone. Then, we
* update the pool state and sync all the labels to disk, removing the
- * configuration from the cache afterwards.
+ * configuration from the cache afterwards. If the 'hardforce' flag is set, then
+ * we don't sync the labels or remove the configuration cache.
*/
static int
spa_export_common(char *pool, int new_state, nvlist_t **oldconfig,
- boolean_t force)
+ boolean_t force, boolean_t hardforce)
{
spa_t *spa;
if (oldconfig)
*oldconfig = NULL;
- if (!(spa_mode & FWRITE))
+ if (!(spa_mode_global & FWRITE))
return (EROFS);
mutex_enter(&spa_namespace_lock);
* so mark them all dirty. spa_unload() will do the
* final sync that pushes these changes out.
*/
- if (new_state != POOL_STATE_UNINITIALIZED) {
+ if (new_state != POOL_STATE_UNINITIALIZED && !hardforce) {
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
spa->spa_state = new_state;
spa->spa_final_txg = spa_last_synced_txg(spa) + 1;
VERIFY(nvlist_dup(spa->spa_config, oldconfig, 0) == 0);
if (new_state != POOL_STATE_UNINITIALIZED) {
- spa_config_sync(spa, B_TRUE, B_TRUE);
+ if (!hardforce)
+ spa_config_sync(spa, B_TRUE, B_TRUE);
spa_remove(spa);
}
mutex_exit(&spa_namespace_lock);
int
spa_destroy(char *pool)
{
- return (spa_export_common(pool, POOL_STATE_DESTROYED, NULL, B_FALSE));
+ return (spa_export_common(pool, POOL_STATE_DESTROYED, NULL,
+ B_FALSE, B_FALSE));
}
/*
* Export a storage pool.
*/
int
-spa_export(char *pool, nvlist_t **oldconfig, boolean_t force)
+spa_export(char *pool, nvlist_t **oldconfig, boolean_t force,
+ boolean_t hardforce)
{
- return (spa_export_common(pool, POOL_STATE_EXPORTED, oldconfig, force));
+ return (spa_export_common(pool, POOL_STATE_EXPORTED, oldconfig,
+ force, hardforce));
}
/*
spa_reset(char *pool)
{
return (spa_export_common(pool, POOL_STATE_UNINITIALIZED, NULL,
- B_FALSE));
+ B_FALSE, B_FALSE));
}
/*
spa_vdev_add(spa_t *spa, nvlist_t *nvroot)
{
uint64_t txg;
- int c, error;
+ int error;
vdev_t *rvd = spa->spa_root_vdev;
vdev_t *vd, *tvd;
nvlist_t **spares, **l2cache;
/*
* Transfer each new top-level vdev from vd to rvd.
*/
- for (c = 0; c < vd->vdev_children; c++) {
+ for (int c = 0; c < vd->vdev_children; c++) {
tvd = vd->vdev_child[c];
vdev_remove_child(vd, tvd);
tvd->vdev_id = rvd->vdev_children;
}
/*
- * Compare the new device size with the replaceable/attachable
- * device size.
+ * Make sure the new device is big enough.
*/
- if (newvd->vdev_psize < vdev_get_rsize(oldvd))
+ if (newvd->vdev_asize < vdev_get_min_asize(oldvd))
return (spa_vdev_exit(spa, newrootvd, txg, EOVERFLOW));
/*
newvd->vdev_id = pvd->vdev_children;
vdev_add_child(pvd, newvd);
- /*
- * If newvd is smaller than oldvd, but larger than its rsize,
- * the addition of newvd may have decreased our parent's asize.
- */
- pvd->vdev_asize = MIN(pvd->vdev_asize, newvd->vdev_asize);
-
tvd = newvd->vdev_top;
ASSERT(pvd->vdev_top == tvd);
ASSERT(tvd->vdev_parent == rvd);
*/
open_txg = txg + TXG_CONCURRENT_STATES - 1;
- mutex_enter(&newvd->vdev_dtl_lock);
- space_map_add(&newvd->vdev_dtl_map, TXG_INITIAL,
- open_txg - TXG_INITIAL + 1);
- mutex_exit(&newvd->vdev_dtl_lock);
+ vdev_dtl_dirty(newvd, DTL_MISSING,
+ TXG_INITIAL, open_txg - TXG_INITIAL + 1);
- if (newvd->vdev_isspare)
+ if (newvd->vdev_isspare) {
spa_spare_activate(newvd);
+ spa_event_notify(spa, newvd, ESC_ZFS_VDEV_SPARE);
+ }
+
oldvdpath = spa_strdup(oldvd->vdev_path);
newvdpath = spa_strdup(newvd->vdev_path);
newvd_isspare = newvd->vdev_isspare;
* is a replacing vdev.
*/
int
-spa_vdev_detach(spa_t *spa, uint64_t guid, int replace_done)
+spa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid, int replace_done)
{
uint64_t txg;
- int c, t, error;
+ int error;
vdev_t *rvd = spa->spa_root_vdev;
vdev_t *vd, *pvd, *cvd, *tvd;
boolean_t unspare = B_FALSE;
pvd = vd->vdev_parent;
/*
+ * If the parent/child relationship is not as expected, don't do it.
+ * Consider M(A,R(B,C)) -- that is, a mirror of A with a replacing
+ * vdev that's replacing B with C. The user's intent in replacing
+ * is to go from M(A,B) to M(A,C). If the user decides to cancel
+ * the replace by detaching C, the expected behavior is to end up
+ * M(A,B). But suppose that right after deciding to detach C,
+ * the replacement of B completes. We would have M(A,C), and then
+ * ask to detach C, which would leave us with just A -- not what
+ * the user wanted. To prevent this, we make sure that the
+ * parent/child relationship hasn't changed -- in this example,
+ * that C's parent is still the replacing vdev R.
+ */
+ if (pvd->vdev_guid != pguid && pguid != 0)
+ return (spa_vdev_exit(spa, NULL, txg, EBUSY));
+
+ /*
* If replace_done is specified, only remove this device if it's
* the first child of a replacing vdev. For the 'spare' vdev, either
* disk can be removed.
return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
/*
- * If there's only one replica, you can't detach it.
+ * If this device has the only valid copy of some data,
+ * we cannot safely detach it.
*/
- if (pvd->vdev_children <= 1)
+ if (vdev_dtl_required(vd))
return (spa_vdev_exit(spa, NULL, txg, EBUSY));
- /*
- * If all siblings have non-empty DTLs, this device may have the only
- * valid copy of the data, which means we cannot safely detach it.
- *
- * XXX -- as in the vdev_offline() case, we really want a more
- * precise DTL check.
- */
- for (c = 0; c < pvd->vdev_children; c++) {
- uint64_t dirty;
-
- cvd = pvd->vdev_child[c];
- if (cvd == vd)
- continue;
- if (vdev_is_dead(cvd))
- continue;
- mutex_enter(&cvd->vdev_dtl_lock);
- dirty = cvd->vdev_dtl_map.sm_space |
- cvd->vdev_dtl_scrub.sm_space;
- mutex_exit(&cvd->vdev_dtl_lock);
- if (!dirty)
- break;
- }
-
- if (c == pvd->vdev_children)
- return (spa_vdev_exit(spa, NULL, txg, EBUSY));
+ ASSERT(pvd->vdev_children >= 2);
/*
* If we are detaching the second disk from a replacing vdev, then
* active spare list for the pool.
*/
if (pvd->vdev_ops == &vdev_spare_ops &&
- vd->vdev_id == 0)
+ vd->vdev_id == 0 && pvd->vdev_child[1]->vdev_isspare)
unspare = B_TRUE;
/*
/*
* If we need to remove the remaining child from the list of hot spares,
- * do it now, marking the vdev as no longer a spare in the process. We
- * must do this before vdev_remove_parent(), because that can change the
- * GUID if it creates a new toplevel GUID.
+ * do it now, marking the vdev as no longer a spare in the process.
+ * We must do this before vdev_remove_parent(), because that can
+ * change the GUID if it creates a new toplevel GUID. For a similar
+ * reason, we must remove the spare now, in the same txg as the detach;
+ * otherwise someone could attach a new sibling, change the GUID, and
+ * the subsequent attempt to spa_vdev_remove(unspare_guid) would fail.
*/
if (unspare) {
ASSERT(cvd->vdev_isspare);
spa_spare_remove(cvd);
unspare_guid = cvd->vdev_guid;
+ (void) spa_vdev_remove(spa, unspare_guid, B_TRUE);
}
/*
vdev_propagate_state(cvd);
/*
- * If the device we just detached was smaller than the others, it may be
- * possible to add metaslabs (i.e. grow the pool). vdev_metaslab_init()
- * can't fail because the existing metaslabs are already in core, so
- * there's nothing to read from disk.
+ * If the 'autoexpand' property is set on the pool then automatically
+ * try to expand the size of the pool. For example if the device we
+ * just detached was smaller than the others, it may be possible to
+ * add metaslabs (i.e. grow the pool). We need to reopen the vdev
+ * first so that we can obtain the updated sizes of the leaf vdevs.
*/
- VERIFY(vdev_metaslab_init(tvd, txg) == 0);
+ if (spa->spa_autoexpand) {
+ vdev_reopen(tvd);
+ vdev_expand(tvd, txg);
+ }
vdev_config_dirty(tvd);
* But first make sure we're not on any *other* txg's DTL list, to
* prevent vd from being accessed after it's freed.
*/
- for (t = 0; t < TXG_SIZE; t++)
+ for (int t = 0; t < TXG_SIZE; t++)
(void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t);
vd->vdev_detached = B_TRUE;
vdev_dirty(tvd, VDD_DTL, vd, txg);
* list of every other pool.
*/
if (unspare) {
+ spa_t *myspa = spa;
spa = NULL;
mutex_enter(&spa_namespace_lock);
while ((spa = spa_next(spa)) != NULL) {
if (spa->spa_state != POOL_STATE_ACTIVE)
continue;
+ if (spa == myspa)
+ continue;
spa_open_ref(spa, FTAG);
mutex_exit(&spa_namespace_lock);
(void) spa_vdev_remove(spa, unspare_guid, B_TRUE);
vdev_t *vd;
nvlist_t **spares, **l2cache, *nv;
uint_t nspares, nl2cache;
- uint64_t txg;
+ uint64_t txg = 0;
int error = 0;
+ boolean_t locked = MUTEX_HELD(&spa_namespace_lock);
- txg = spa_vdev_enter(spa);
+ if (!locked)
+ txg = spa_vdev_enter(spa);
vd = spa_lookup_by_guid(spa, guid, B_FALSE);
error = ENOENT;
}
- return (spa_vdev_exit(spa, NULL, txg, error));
+ if (!locked)
+ return (spa_vdev_exit(spa, NULL, txg, error));
+
+ return (error);
}
/*
spa_vdev_resilver_done_hunt(vdev_t *vd)
{
vdev_t *newvd, *oldvd;
- int c;
- for (c = 0; c < vd->vdev_children; c++) {
+ for (int c = 0; c < vd->vdev_children; c++) {
oldvd = spa_vdev_resilver_done_hunt(vd->vdev_child[c]);
if (oldvd != NULL)
return (oldvd);
oldvd = vd->vdev_child[0];
newvd = vd->vdev_child[1];
- mutex_enter(&newvd->vdev_dtl_lock);
- if (newvd->vdev_dtl_map.sm_space == 0 &&
- newvd->vdev_dtl_scrub.sm_space == 0) {
- mutex_exit(&newvd->vdev_dtl_lock);
+ if (vdev_dtl_empty(newvd, DTL_MISSING) &&
+ !vdev_dtl_required(oldvd))
return (oldvd);
- }
- mutex_exit(&newvd->vdev_dtl_lock);
}
/*
newvd = vd->vdev_child[0];
oldvd = vd->vdev_child[1];
- mutex_enter(&newvd->vdev_dtl_lock);
if (newvd->vdev_unspare &&
- newvd->vdev_dtl_map.sm_space == 0 &&
- newvd->vdev_dtl_scrub.sm_space == 0) {
+ vdev_dtl_empty(newvd, DTL_MISSING) &&
+ !vdev_dtl_required(oldvd)) {
newvd->vdev_unspare = 0;
- mutex_exit(&newvd->vdev_dtl_lock);
return (oldvd);
}
- mutex_exit(&newvd->vdev_dtl_lock);
}
return (NULL);
static void
spa_vdev_resilver_done(spa_t *spa)
{
- vdev_t *vd;
- vdev_t *pvd;
- uint64_t guid;
- uint64_t pguid = 0;
+ vdev_t *vd, *pvd, *ppvd;
+ uint64_t guid, sguid, pguid, ppguid;
- spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
+ spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
while ((vd = spa_vdev_resilver_done_hunt(spa->spa_root_vdev)) != NULL) {
+ pvd = vd->vdev_parent;
+ ppvd = pvd->vdev_parent;
guid = vd->vdev_guid;
+ pguid = pvd->vdev_guid;
+ ppguid = ppvd->vdev_guid;
+ sguid = 0;
/*
* If we have just finished replacing a hot spared device, then
* we need to detach the parent's first child (the original hot
* spare) as well.
*/
- pvd = vd->vdev_parent;
- if (pvd->vdev_parent->vdev_ops == &vdev_spare_ops &&
- pvd->vdev_id == 0) {
+ if (ppvd->vdev_ops == &vdev_spare_ops && pvd->vdev_id == 0) {
ASSERT(pvd->vdev_ops == &vdev_replacing_ops);
- ASSERT(pvd->vdev_parent->vdev_children == 2);
- pguid = pvd->vdev_parent->vdev_child[1]->vdev_guid;
+ ASSERT(ppvd->vdev_children == 2);
+ sguid = ppvd->vdev_child[1]->vdev_guid;
}
- spa_config_exit(spa, SCL_CONFIG, FTAG);
- if (spa_vdev_detach(spa, guid, B_TRUE) != 0)
+ spa_config_exit(spa, SCL_ALL, FTAG);
+ if (spa_vdev_detach(spa, guid, pguid, B_TRUE) != 0)
return;
- if (pguid != 0 && spa_vdev_detach(spa, pguid, B_TRUE) != 0)
+ if (sguid && spa_vdev_detach(spa, sguid, ppguid, B_TRUE) != 0)
return;
- spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
+ spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
}
- spa_config_exit(spa, SCL_CONFIG, FTAG);
+ spa_config_exit(spa, SCL_ALL, FTAG);
}
/*
- * Update the stored path for this vdev. Dirty the vdev configuration, relying
- * on spa_vdev_enter/exit() to synchronize the labels and cache.
+ * Update the stored path or FRU for this vdev. Dirty the vdev configuration,
+ * relying on spa_vdev_enter/exit() to synchronize the labels and cache.
*/
int
-spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath)
+spa_vdev_set_common(spa_t *spa, uint64_t guid, const char *value,
+ boolean_t ispath)
{
vdev_t *vd;
uint64_t txg;
txg = spa_vdev_enter(spa);
- if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) {
- /*
- * Determine if this is a reference to a hot spare device. If
- * it is, update the path manually as there is no associated
- * vdev_t that can be synced to disk.
- */
- nvlist_t **spares;
- uint_t i, nspares;
-
- if (spa->spa_spares.sav_config != NULL) {
- VERIFY(nvlist_lookup_nvlist_array(
- spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES,
- &spares, &nspares) == 0);
- for (i = 0; i < nspares; i++) {
- uint64_t theguid;
- VERIFY(nvlist_lookup_uint64(spares[i],
- ZPOOL_CONFIG_GUID, &theguid) == 0);
- if (theguid == guid) {
- VERIFY(nvlist_add_string(spares[i],
- ZPOOL_CONFIG_PATH, newpath) == 0);
- spa_load_spares(spa);
- spa->spa_spares.sav_sync = B_TRUE;
- return (spa_vdev_exit(spa, NULL, txg,
- 0));
- }
- }
- }
-
+ if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
return (spa_vdev_exit(spa, NULL, txg, ENOENT));
- }
if (!vd->vdev_ops->vdev_op_leaf)
return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
- spa_strfree(vd->vdev_path);
- vd->vdev_path = spa_strdup(newpath);
+ if (ispath) {
+ spa_strfree(vd->vdev_path);
+ vd->vdev_path = spa_strdup(value);
+ } else {
+ if (vd->vdev_fru != NULL)
+ spa_strfree(vd->vdev_fru);
+ vd->vdev_fru = spa_strdup(value);
+ }
vdev_config_dirty(vd->vdev_top);
return (spa_vdev_exit(spa, NULL, txg, 0));
}
+int
+spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath)
+{
+ return (spa_vdev_set_common(spa, guid, newpath, B_TRUE));
+}
+
+int
+spa_vdev_setfru(spa_t *spa, uint64_t guid, const char *newfru)
+{
+ return (spa_vdev_set_common(spa, guid, newfru, B_FALSE));
+}
+
/*
* ==========================================================================
* SPA Scrubbing
}
static void
+spa_async_autoexpand(spa_t *spa, vdev_t *vd)
+{
+ sysevent_id_t eid;
+ nvlist_t *attr;
+ char *physpath;
+
+ if (!spa->spa_autoexpand)
+ return;
+
+ for (int c = 0; c < vd->vdev_children; c++) {
+ vdev_t *cvd = vd->vdev_child[c];
+ spa_async_autoexpand(spa, cvd);
+ }
+
+ if (!vd->vdev_ops->vdev_op_leaf || vd->vdev_physpath == NULL)
+ return;
+
+ physpath = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
+ (void) snprintf(physpath, MAXPATHLEN, "/devices%s", vd->vdev_physpath);
+
+ VERIFY(nvlist_alloc(&attr, NV_UNIQUE_NAME, KM_SLEEP) == 0);
+ VERIFY(nvlist_add_string(attr, DEV_PHYS_PATH, physpath) == 0);
+
+ (void) ddi_log_sysevent(zfs_dip, SUNW_VENDOR, EC_DEV_STATUS,
+ ESC_DEV_DLE, attr, &eid, DDI_SLEEP);
+
+ nvlist_free(attr);
+ kmem_free(physpath, MAXPATHLEN);
+}
+
+static void
spa_async_thread(spa_t *spa)
{
int tasks;
* See if the config needs to be updated.
*/
if (tasks & SPA_ASYNC_CONFIG_UPDATE) {
+ uint64_t oldsz, space_update;
+
mutex_enter(&spa_namespace_lock);
+ oldsz = spa_get_space(spa);
spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
+ space_update = spa_get_space(spa) - oldsz;
mutex_exit(&spa_namespace_lock);
+
+ /*
+ * If the pool grew as a result of the config update,
+ * then log an internal history event.
+ */
+ if (space_update) {
+ dmu_tx_t *tx;
+
+ tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
+ if (dmu_tx_assign(tx, TXG_WAIT) == 0) {
+ spa_history_internal_log(LOG_POOL_VDEV_ONLINE,
+ spa, tx, CRED(),
+ "pool '%s' size: %llu(+%llu)",
+ spa_name(spa), spa_get_space(spa),
+ space_update);
+ dmu_tx_commit(tx);
+ } else {
+ dmu_tx_abort(tx);
+ }
+ }
}
/*
(void) spa_vdev_state_exit(spa, NULL, 0);
}
+ if ((tasks & SPA_ASYNC_AUTOEXPAND) && !spa_suspended(spa)) {
+ spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
+ spa_async_autoexpand(spa, spa->spa_root_vdev);
+ spa_config_exit(spa, SCL_CONFIG, FTAG);
+ }
+
/*
* See if any devices need to be probed.
*/
zpool_prop_t prop;
const char *propname;
zprop_type_t proptype;
- spa_config_dirent_t *dp;
mutex_enter(&spa->spa_props_lock);
case ZPOOL_PROP_CACHEFILE:
/*
- * 'cachefile' is a non-persistent property, but note
- * an async request that the config cache needs to be
- * udpated.
+ * 'cachefile' is also a non-persisitent property.
*/
- VERIFY(nvpair_value_string(elem, &strval) == 0);
-
- dp = kmem_alloc(sizeof (spa_config_dirent_t), KM_SLEEP);
-
- if (strval[0] == '\0')
- dp->scd_path = spa_strdup(spa_config_path);
- else if (strcmp(strval, "none") == 0)
- dp->scd_path = NULL;
- else
- dp->scd_path = spa_strdup(strval);
-
- list_insert_head(&spa->spa_config_list, dp);
- spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
break;
default:
/*
case ZPOOL_PROP_FAILUREMODE:
spa->spa_failmode = intval;
break;
+ case ZPOOL_PROP_AUTOEXPAND:
+ spa->spa_autoexpand = intval;
+ spa_async_request(spa, SPA_ASYNC_AUTOEXPAND);
+ break;
default:
break;
}
* into config changes that go out with this transaction group.
*/
spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
- while ((vd = list_head(&spa->spa_state_dirty_list)) != NULL) {
- vdev_state_clean(vd);
- vdev_config_dirty(vd);
+ while (list_head(&spa->spa_state_dirty_list) != NULL) {
+ /*
+ * We need the write lock here because, for aux vdevs,
+ * calling vdev_config_dirty() modifies sav_config.
+ * This is ugly and will become unnecessary when we
+ * eliminate the aux vdev wart by integrating all vdevs
+ * into the root vdev tree.
+ */
+ spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
+ spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_WRITER);
+ while ((vd = list_head(&spa->spa_state_dirty_list)) != NULL) {
+ vdev_state_clean(vd);
+ vdev_config_dirty(vd);
+ }
+ spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
+ spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
}
spa_config_exit(spa, SCL_STATE, FTAG);
int svdcount = 0;
int children = rvd->vdev_children;
int c0 = spa_get_random(children);
- int c;
- for (c = 0; c < children; c++) {
+ for (int c = 0; c < children; c++) {
vd = rvd->vdev_child[(c0 + c) % children];
if (vd->vdev_ms_array == 0 || vd->vdev_islog)
continue;
if (svdcount == SPA_DVAS_PER_BP)
break;
}
- error = vdev_config_sync(svd, svdcount, txg);
+ error = vdev_config_sync(svd, svdcount, txg, B_FALSE);
+ if (error != 0)
+ error = vdev_config_sync(svd, svdcount, txg,
+ B_TRUE);
} else {
error = vdev_config_sync(rvd->vdev_child,
- rvd->vdev_children, txg);
+ rvd->vdev_children, txg, B_FALSE);
+ if (error != 0)
+ error = vdev_config_sync(rvd->vdev_child,
+ rvd->vdev_children, txg, B_TRUE);
}
spa_config_exit(spa, SCL_STATE, FTAG);
}
vdev_t *
-spa_lookup_by_guid(spa_t *spa, uint64_t guid, boolean_t l2cache)
+spa_lookup_by_guid(spa_t *spa, uint64_t guid, boolean_t aux)
{
vdev_t *vd;
int i;
if ((vd = vdev_lookup_by_guid(spa->spa_root_vdev, guid)) != NULL)
return (vd);
- if (l2cache) {
+ if (aux) {
for (i = 0; i < spa->spa_l2cache.sav_count; i++) {
vd = spa->spa_l2cache.sav_vdevs[i];
if (vd->vdev_guid == guid)
return (vd);
}
+
+ for (i = 0; i < spa->spa_spares.sav_count; i++) {
+ vd = spa->spa_spares.sav_vdevs[i];
+ if (vd->vdev_guid == guid)
+ return (vd);
+ }
}
return (NULL);