*/
/*
- * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
-#pragma ident "@(#)vdev.c 1.33 07/11/27 SMI"
-
#include <sys/zfs_context.h>
#include <sys/fm/fs/zfs.h>
#include <sys/spa.h>
#include <sys/zio.h>
#include <sys/zap.h>
#include <sys/fs/zfs.h>
+#include <sys/arc.h>
/*
* Virtual device management.
NULL
};
-/* maximum scrub/resilver I/O queue */
-int zfs_scrub_limit = 70;
+/* maximum scrub/resilver I/O queue per leaf vdev */
+int zfs_scrub_limit = 10;
/*
* Given a vdev type, return the appropriate ops vector.
{
vdev_t *rvd = spa->spa_root_vdev;
- ASSERT(spa_config_held(spa, RW_READER) ||
- curthread == spa->spa_scrub_thread);
+ ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
- if (vdev < rvd->vdev_children)
+ if (vdev < rvd->vdev_children) {
+ ASSERT(rvd->vdev_child[vdev] != NULL);
return (rvd->vdev_child[vdev]);
+ }
return (NULL);
}
uint64_t id = cvd->vdev_id;
vdev_t **newchild;
- ASSERT(spa_config_held(cvd->vdev_spa, RW_WRITER));
+ ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
ASSERT(cvd->vdev_parent == NULL);
cvd->vdev_parent = pvd;
int oldc = pvd->vdev_children;
int newc, c;
- ASSERT(spa_config_held(pvd->vdev_spa, RW_WRITER));
+ ASSERT(spa_config_held(pvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
for (c = newc = 0; c < oldc; c++)
if (pvd->vdev_child[c])
mutex_init(&vd->vdev_dtl_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&vd->vdev_stat_lock, NULL, MUTEX_DEFAULT, NULL);
+ mutex_init(&vd->vdev_probe_lock, NULL, MUTEX_DEFAULT, NULL);
space_map_create(&vd->vdev_dtl_map, 0, -1ULL, 0, &vd->vdev_dtl_lock);
space_map_create(&vd->vdev_dtl_scrub, 0, -1ULL, 0, &vd->vdev_dtl_lock);
txg_list_create(&vd->vdev_ms_list,
uint64_t guid = 0, islog, nparity;
vdev_t *vd;
- ASSERT(spa_config_held(spa, RW_WRITER));
+ ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
return (EINVAL);
* Look for the 'not present' flag. This will only be set if the device
* was not present at the time of import.
*/
- (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
- &vd->vdev_not_present);
+ if (!spa->spa_import_faulted)
+ (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
+ &vd->vdev_not_present);
/*
* Get the alignment requirement.
/*
* If we're a leaf vdev, try to load the DTL object and other state.
*/
- if (vd->vdev_ops->vdev_op_leaf && alloctype == VDEV_ALLOC_LOAD) {
- (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DTL,
- &vd->vdev_dtl.smo_object);
+ if (vd->vdev_ops->vdev_op_leaf &&
+ (alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_L2CACHE)) {
+ if (alloctype == VDEV_ALLOC_LOAD) {
+ (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DTL,
+ &vd->vdev_dtl.smo_object);
+ (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_UNSPARE,
+ &vd->vdev_unspare);
+ }
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE,
&vd->vdev_offline);
- (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_UNSPARE,
- &vd->vdev_unspare);
+
/*
* When importing a pool, we want to ignore the persistent fault
* state, as the diagnosis made on another system may not be
*/
vdev_close(vd);
-
- ASSERT(!list_link_active(&vd->vdev_dirty_node));
+ ASSERT(!list_link_active(&vd->vdev_config_dirty_node));
/*
* Free all children.
mutex_exit(&vd->vdev_dtl_lock);
mutex_destroy(&vd->vdev_dtl_lock);
mutex_destroy(&vd->vdev_stat_lock);
+ mutex_destroy(&vd->vdev_probe_lock);
if (vd == spa->spa_root_vdev)
spa->spa_root_vdev = NULL;
(void) txg_list_add(&spa->spa_vdev_txg_list, tvd, t);
}
- if (list_link_active(&svd->vdev_dirty_node)) {
+ if (list_link_active(&svd->vdev_config_dirty_node)) {
vdev_config_clean(svd);
vdev_config_dirty(tvd);
}
+ if (list_link_active(&svd->vdev_state_dirty_node)) {
+ vdev_state_clean(svd);
+ vdev_state_dirty(tvd);
+ }
+
tvd->vdev_deflate_ratio = svd->vdev_deflate_ratio;
svd->vdev_deflate_ratio = 0;
vdev_t *pvd = cvd->vdev_parent;
vdev_t *mvd;
- ASSERT(spa_config_held(spa, RW_WRITER));
+ ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
mvd = vdev_alloc_common(spa, cvd->vdev_id, 0, ops);
vdev_t *mvd = cvd->vdev_parent;
vdev_t *pvd = mvd->vdev_parent;
- ASSERT(spa_config_held(cvd->vdev_spa, RW_WRITER));
+ ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
ASSERT(mvd->vdev_children == 1);
ASSERT(mvd->vdev_ops == &vdev_mirror_ops ||
vdev_remove_child(mvd, cvd);
vdev_remove_child(pvd, mvd);
- cvd->vdev_id = mvd->vdev_id;
- vdev_add_child(pvd, cvd);
/*
- * If we created a new toplevel vdev, then we need to change the child's
- * vdev GUID to match the old toplevel vdev. Otherwise, we could have
- * detached an offline device, and when we go to import the pool we'll
- * think we have two toplevel vdevs, instead of a different version of
- * the same toplevel vdev.
+ * If cvd will replace mvd as a top-level vdev, preserve mvd's guid.
+ * Otherwise, we could have detached an offline device, and when we
+ * go to import the pool we'll think we have two top-level vdevs,
+ * instead of a different version of the same top-level vdev.
*/
- if (cvd->vdev_top == cvd) {
- pvd->vdev_guid_sum -= cvd->vdev_guid;
- cvd->vdev_guid_sum -= cvd->vdev_guid;
- cvd->vdev_guid = mvd->vdev_guid;
- cvd->vdev_guid_sum += mvd->vdev_guid;
- pvd->vdev_guid_sum += cvd->vdev_guid;
- }
+ if (mvd->vdev_top == mvd)
+ cvd->vdev_guid = cvd->vdev_guid_sum = mvd->vdev_guid;
+ cvd->vdev_id = mvd->vdev_id;
+ vdev_add_child(pvd, cvd);
vdev_top_update(cvd->vdev_top, cvd->vdev_top);
if (cvd == cvd->vdev_top)
if (vd->vdev_ms_shift == 0) /* not being allocated from yet */
return (0);
- dprintf("%s oldc %llu newc %llu\n", vdev_description(vd), oldc, newc);
-
ASSERT(oldc <= newc);
if (vd->vdev_islog)
}
}
-int
-vdev_probe(vdev_t *vd)
+typedef struct vdev_probe_stats {
+ boolean_t vps_readable;
+ boolean_t vps_writeable;
+ int vps_flags;
+ zio_t *vps_root;
+ vdev_t *vps_vd;
+} vdev_probe_stats_t;
+
+static void
+vdev_probe_done(zio_t *zio)
{
- if (vd == NULL)
- return (EINVAL);
+ vdev_probe_stats_t *vps = zio->io_private;
+ vdev_t *vd = vps->vps_vd;
+
+ if (zio->io_type == ZIO_TYPE_READ) {
+ ASSERT(zio->io_vd == vd);
+ if (zio->io_error == 0)
+ vps->vps_readable = 1;
+ if (zio->io_error == 0 && (spa_mode & FWRITE)) {
+ zio_nowait(zio_write_phys(vps->vps_root, vd,
+ zio->io_offset, zio->io_size, zio->io_data,
+ ZIO_CHECKSUM_OFF, vdev_probe_done, vps,
+ ZIO_PRIORITY_SYNC_WRITE, vps->vps_flags, B_TRUE));
+ } else {
+ zio_buf_free(zio->io_data, zio->io_size);
+ }
+ } else if (zio->io_type == ZIO_TYPE_WRITE) {
+ ASSERT(zio->io_vd == vd);
+ if (zio->io_error == 0)
+ vps->vps_writeable = 1;
+ zio_buf_free(zio->io_data, zio->io_size);
+ } else if (zio->io_type == ZIO_TYPE_NULL) {
+ ASSERT(zio->io_vd == NULL);
+ ASSERT(zio == vps->vps_root);
+
+ vd->vdev_cant_read |= !vps->vps_readable;
+ vd->vdev_cant_write |= !vps->vps_writeable;
+
+ if (vdev_readable(vd) &&
+ (vdev_writeable(vd) || !(spa_mode & FWRITE))) {
+ zio->io_error = 0;
+ } else {
+ ASSERT(zio->io_error != 0);
+ zfs_ereport_post(FM_EREPORT_ZFS_PROBE_FAILURE,
+ zio->io_spa, vd, NULL, 0, 0);
+ zio->io_error = ENXIO;
+ }
+ kmem_free(vps, sizeof (*vps));
+ }
+}
- /*
- * Right now we only support status checks on the leaf vdevs.
- */
- if (vd->vdev_ops->vdev_op_leaf)
- return (vd->vdev_ops->vdev_op_probe(vd));
+/*
+ * Determine whether this device is accessible by reading and writing
+ * to several known locations: the pad regions of each vdev label
+ * but the first (which we leave alone in case it contains a VTOC).
+ */
+zio_t *
+vdev_probe(vdev_t *vd, zio_t *pio)
+{
+ spa_t *spa = vd->vdev_spa;
+ vdev_probe_stats_t *vps;
+ zio_t *zio;
- return (0);
+ vps = kmem_zalloc(sizeof (*vps), KM_SLEEP);
+
+ vps->vps_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_PROBE |
+ ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_AGGREGATE | ZIO_FLAG_DONT_RETRY;
+
+ if (spa_config_held(spa, SCL_ZIO, RW_WRITER)) {
+ /*
+ * vdev_cant_read and vdev_cant_write can only transition
+ * from TRUE to FALSE when we have the SCL_ZIO lock as writer;
+ * otherwise they can only transition from FALSE to TRUE.
+ * This ensures that any zio looking at these values can
+ * assume that failures persist for the life of the I/O.
+ * That's important because when a device has intermittent
+ * connectivity problems, we want to ensure that they're
+ * ascribed to the device (ENXIO) and not the zio (EIO).
+ *
+ * Since we hold SCL_ZIO as writer here, clear both values
+ * so the probe can reevaluate from first principles.
+ */
+ vps->vps_flags |= ZIO_FLAG_CONFIG_WRITER;
+ vd->vdev_cant_read = B_FALSE;
+ vd->vdev_cant_write = B_FALSE;
+ }
+
+ ASSERT(vd->vdev_ops->vdev_op_leaf);
+
+ zio = zio_null(pio, spa, vdev_probe_done, vps, vps->vps_flags);
+
+ vps->vps_root = zio;
+ vps->vps_vd = vd;
+
+ for (int l = 1; l < VDEV_LABELS; l++) {
+ zio_nowait(zio_read_phys(zio, vd,
+ vdev_label_offset(vd->vdev_psize, l,
+ offsetof(vdev_label_t, vl_pad)),
+ VDEV_SKIP_SIZE, zio_buf_alloc(VDEV_SKIP_SIZE),
+ ZIO_CHECKSUM_OFF, vdev_probe_done, vps,
+ ZIO_PRIORITY_SYNC_READ, vps->vps_flags, B_TRUE));
+ }
+
+ return (zio);
}
/*
vd->vdev_state == VDEV_STATE_CANT_OPEN ||
vd->vdev_state == VDEV_STATE_OFFLINE);
- if (vd->vdev_fault_mode == VDEV_FAULT_COUNT)
- vd->vdev_fault_arg >>= 1;
- else
- vd->vdev_fault_mode = VDEV_FAULT_NONE;
-
vd->vdev_stat.vs_aux = VDEV_AUX_NONE;
if (!vd->vdev_removed && vd->vdev_faulted) {
* Ensure we can issue some IO before declaring the
* vdev open for business.
*/
- error = vdev_probe(vd);
- if (error) {
+ if (vd->vdev_ops->vdev_op_leaf &&
+ (error = zio_wait(vdev_probe(vd, NULL))) != 0) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
- VDEV_AUX_OPEN_FAILED);
+ VDEV_AUX_IO_FAILURE);
return (error);
}
}
/*
- * This allows the ZFS DE to close cases appropriately. If a device
- * goes away and later returns, we want to close the associated case.
- * But it's not enough to simply post this only when a device goes from
- * CANT_OPEN -> HEALTHY. If we reboot the system and the device is
- * back, we also need to close the case (otherwise we will try to replay
- * it). So we have to post this notifier every time. Since this only
- * occurs during pool open or error recovery, this should not be an
- * issue.
+ * If a leaf vdev has a DTL, and seems healthy, then kick off a
+ * resilver. But don't do this if we are doing a reopen for a
+ * scrub, since this would just restart the scrub we are already
+ * doing.
*/
- zfs_post_ok(vd->vdev_spa, vd);
+ if (vd->vdev_children == 0 && !vd->vdev_spa->spa_scrub_reopen) {
+ mutex_enter(&vd->vdev_dtl_lock);
+ if (vd->vdev_dtl_map.sm_space != 0 && vdev_writeable(vd))
+ spa_async_request(vd->vdev_spa, SPA_ASYNC_RESILVER);
+ mutex_exit(&vd->vdev_dtl_lock);
+ }
return (0);
}
spa_t *spa = vd->vdev_spa;
int c;
nvlist_t *label;
- uint64_t guid;
+ uint64_t guid, top_guid;
uint64_t state;
for (c = 0; c < vd->vdev_children; c++)
* any further validation. Otherwise, label I/O will fail and we will
* overwrite the previous state.
*/
- if (vd->vdev_ops->vdev_op_leaf && !vdev_is_dead(vd)) {
+ if (vd->vdev_ops->vdev_op_leaf && vdev_readable(vd)) {
if ((label = vdev_label_read_config(vd)) == NULL) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
return (0);
}
+ /*
+ * If this vdev just became a top-level vdev because its
+ * sibling was detached, it will have adopted the parent's
+ * vdev guid -- but the label may or may not be on disk yet.
+ * Fortunately, either version of the label will have the
+ * same top guid, so if we're a top-level vdev, we can
+ * safely compare to that instead.
+ */
if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID,
- &guid) != 0 || guid != vd->vdev_guid) {
+ &guid) != 0 ||
+ nvlist_lookup_uint64(label, ZPOOL_CONFIG_TOP_GUID,
+ &top_guid) != 0 ||
+ (vd->vdev_guid != guid &&
+ (vd->vdev_guid != top_guid || vd != vd->vdev_top))) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
nvlist_free(label);
if (spa->spa_load_state == SPA_LOAD_OPEN &&
state != POOL_STATE_ACTIVE)
return (EBADF);
- }
- /*
- * If we were able to open and validate a vdev that was previously
- * marked permanently unavailable, clear that state now.
- */
- if (vd->vdev_not_present)
- vd->vdev_not_present = 0;
+ /*
+ * If we were able to open and validate a vdev that was
+ * previously marked permanently unavailable, clear that state
+ * now.
+ */
+ if (vd->vdev_not_present)
+ vd->vdev_not_present = 0;
+ }
return (0);
}
{
spa_t *spa = vd->vdev_spa;
- ASSERT(spa_config_held(spa, RW_WRITER));
+ ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
vdev_close(vd);
(void) vdev_open(vd);
* Otherwise, a device with an invalid label could be successfully
* opened in response to vdev_reopen().
*/
- (void) vdev_validate(vd);
+ if (vd->vdev_aux) {
+ (void) vdev_validate_aux(vd);
+ if (vdev_readable(vd) && vdev_writeable(vd) &&
+ !l2arc_vdev_present(vd)) {
+ uint64_t size = vdev_get_rsize(vd);
+ l2arc_add_vdev(spa, vd,
+ VDEV_LABEL_START_SIZE,
+ size - VDEV_LABEL_START_SIZE);
+ }
+ } else {
+ (void) vdev_validate(vd);
+ }
/*
* Reassess parent vdev's health.
spa_t *spa = vd->vdev_spa;
int c;
- ASSERT(spa_config_held(spa, RW_WRITER));
+ ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
if (vd->vdev_children == 0) {
mutex_enter(&vd->vdev_dtl_lock);
- /*
- * We're successfully scrubbed everything up to scrub_txg.
- * Therefore, excise all old DTLs up to that point, then
- * fold in the DTLs for everything we couldn't scrub.
- */
- if (scrub_txg != 0) {
+ if (scrub_txg != 0 &&
+ (spa->spa_scrub_started || spa->spa_scrub_errors == 0)) {
+ /* XXX should check scrub_done? */
+ /*
+ * We completed a scrub up to scrub_txg. If we
+ * did it without rebooting, then the scrub dtl
+ * will be valid, so excise the old region and
+ * fold in the scrub dtl. Otherwise, leave the
+ * dtl as-is if there was an error.
+ */
space_map_excise(&vd->vdev_dtl_map, 0, scrub_txg);
space_map_union(&vd->vdev_dtl_map, &vd->vdev_dtl_scrub);
}
if (scrub_done)
space_map_vacate(&vd->vdev_dtl_scrub, NULL, NULL);
mutex_exit(&vd->vdev_dtl_lock);
+
if (txg != 0)
vdev_dirty(vd->vdev_top, VDD_DTL, vd, txg);
return;
dmu_buf_t *db;
dmu_tx_t *tx;
- dprintf("%s in txg %llu pass %d\n",
- vdev_description(vd), (u_longlong_t)txg, spa_sync_pass(spa));
-
tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
if (vd->vdev_detached) {
smo->smo_object = 0;
}
dmu_tx_commit(tx);
- dprintf("detach %s committed in txg %llu\n",
- vdev_description(vd), txg);
return;
}
dmu_tx_commit(tx);
}
+/*
+ * Determine if resilver is needed, and if so the txg range.
+ */
+boolean_t
+vdev_resilver_needed(vdev_t *vd, uint64_t *minp, uint64_t *maxp)
+{
+ boolean_t needed = B_FALSE;
+ uint64_t thismin = UINT64_MAX;
+ uint64_t thismax = 0;
+
+ if (vd->vdev_children == 0) {
+ mutex_enter(&vd->vdev_dtl_lock);
+ if (vd->vdev_dtl_map.sm_space != 0 && vdev_writeable(vd)) {
+ space_seg_t *ss;
+
+ ss = avl_first(&vd->vdev_dtl_map.sm_root);
+ thismin = ss->ss_start - 1;
+ ss = avl_last(&vd->vdev_dtl_map.sm_root);
+ thismax = ss->ss_end;
+ needed = B_TRUE;
+ }
+ mutex_exit(&vd->vdev_dtl_lock);
+ } else {
+ int c;
+ for (c = 0; c < vd->vdev_children; c++) {
+ vdev_t *cvd = vd->vdev_child[c];
+ uint64_t cmin, cmax;
+
+ if (vdev_resilver_needed(cvd, &cmin, &cmax)) {
+ thismin = MIN(thismin, cmin);
+ thismax = MAX(thismax, cmax);
+ needed = B_TRUE;
+ }
+ }
+ }
+
+ if (needed && minp) {
+ *minp = thismin;
+ *maxp = thismax;
+ }
+ return (needed);
+}
+
void
vdev_load(vdev_t *vd)
{
uint64_t guid, version;
uint64_t state;
+ if (!vdev_readable(vd))
+ return (0);
+
if ((label = vdev_label_read_config(vd)) == NULL) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
{
metaslab_t *msp;
- dprintf("%s txg %llu\n", vdev_description(vd), txg);
-
while (msp = txg_list_remove(&vd->vdev_ms_list, TXG_CLEAN(txg)))
metaslab_sync_done(msp, txg);
}
metaslab_t *msp;
dmu_tx_t *tx;
- dprintf("%s txg %llu pass %d\n",
- vdev_description(vd), (u_longlong_t)txg, spa_sync_pass(spa));
-
if (vd->vdev_ms_array == 0 && vd->vdev_ms_shift != 0) {
ASSERT(vd == vd->vdev_top);
tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
return (vd->vdev_ops->vdev_op_asize(vd, psize));
}
-const char *
-vdev_description(vdev_t *vd)
-{
- if (vd == NULL || vd->vdev_ops == NULL)
- return ("<unknown>");
-
- if (vd->vdev_path != NULL)
- return (vd->vdev_path);
-
- if (vd->vdev_parent == NULL)
- return (spa_name(vd->vdev_spa));
-
- return (vd->vdev_ops->vdev_op_type);
-}
-
/*
* Mark the given vdev faulted. A faulted vdev behaves as if the device could
* not be opened, and no I/O is attempted.
int
vdev_fault(spa_t *spa, uint64_t guid)
{
- vdev_t *rvd, *vd;
- uint64_t txg;
-
- /*
- * Disregard a vdev fault request if the pool has
- * experienced a complete failure.
- *
- * XXX - We do this here so that we don't hold the
- * spa_namespace_lock in the event that we can't get
- * the RW_WRITER spa_config_lock.
- */
- if (spa_state(spa) == POOL_STATE_IO_FAILURE)
- return (EIO);
+ vdev_t *vd;
- txg = spa_vdev_enter(spa);
+ spa_vdev_state_enter(spa);
- rvd = spa->spa_root_vdev;
+ if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
+ return (spa_vdev_state_exit(spa, NULL, ENODEV));
- if ((vd = vdev_lookup_by_guid(rvd, guid)) == NULL)
- return (spa_vdev_exit(spa, NULL, txg, ENODEV));
if (!vd->vdev_ops->vdev_op_leaf)
- return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
+ return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
/*
* Faulted state takes precedence over degraded.
*/
vd->vdev_faulted = 1ULL;
vd->vdev_degraded = 0ULL;
- vdev_set_state(vd, B_FALSE, VDEV_STATE_FAULTED,
- VDEV_AUX_ERR_EXCEEDED);
+ vdev_set_state(vd, B_FALSE, VDEV_STATE_FAULTED, VDEV_AUX_ERR_EXCEEDED);
/*
- * If marking the vdev as faulted cause the toplevel vdev to become
+ * If marking the vdev as faulted cause the top-level vdev to become
* unavailable, then back off and simply mark the vdev as degraded
* instead.
*/
- if (vdev_is_dead(vd->vdev_top)) {
+ if (vdev_is_dead(vd->vdev_top) && vd->vdev_aux == NULL) {
vd->vdev_degraded = 1ULL;
vd->vdev_faulted = 0ULL;
}
}
- vdev_config_dirty(vd->vdev_top);
-
- (void) spa_vdev_exit(spa, NULL, txg, 0);
-
- return (0);
+ return (spa_vdev_state_exit(spa, vd, 0));
}
/*
int
vdev_degrade(spa_t *spa, uint64_t guid)
{
- vdev_t *rvd, *vd;
- uint64_t txg;
-
- /*
- * Disregard a vdev fault request if the pool has
- * experienced a complete failure.
- *
- * XXX - We do this here so that we don't hold the
- * spa_namespace_lock in the event that we can't get
- * the RW_WRITER spa_config_lock.
- */
- if (spa_state(spa) == POOL_STATE_IO_FAILURE)
- return (EIO);
+ vdev_t *vd;
- txg = spa_vdev_enter(spa);
+ spa_vdev_state_enter(spa);
- rvd = spa->spa_root_vdev;
+ if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
+ return (spa_vdev_state_exit(spa, NULL, ENODEV));
- if ((vd = vdev_lookup_by_guid(rvd, guid)) == NULL)
- return (spa_vdev_exit(spa, NULL, txg, ENODEV));
if (!vd->vdev_ops->vdev_op_leaf)
- return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
+ return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
/*
* If the vdev is already faulted, then don't do anything.
*/
- if (vd->vdev_faulted || vd->vdev_degraded) {
- (void) spa_vdev_exit(spa, NULL, txg, 0);
- return (0);
- }
+ if (vd->vdev_faulted || vd->vdev_degraded)
+ return (spa_vdev_state_exit(spa, NULL, 0));
vd->vdev_degraded = 1ULL;
if (!vdev_is_dead(vd))
vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED,
VDEV_AUX_ERR_EXCEEDED);
- vdev_config_dirty(vd->vdev_top);
-
- (void) spa_vdev_exit(spa, NULL, txg, 0);
- return (0);
+ return (spa_vdev_state_exit(spa, vd, 0));
}
/*
* so no FMA events are generated if the device fails to open.
*/
int
-vdev_online(spa_t *spa, uint64_t guid, uint64_t flags,
- vdev_state_t *newstate)
+vdev_online(spa_t *spa, uint64_t guid, uint64_t flags, vdev_state_t *newstate)
{
- vdev_t *rvd, *vd;
- uint64_t txg;
-
- /*
- * Disregard a vdev fault request if the pool has
- * experienced a complete failure.
- *
- * XXX - We do this here so that we don't hold the
- * spa_namespace_lock in the event that we can't get
- * the RW_WRITER spa_config_lock.
- */
- if (spa_state(spa) == POOL_STATE_IO_FAILURE)
- return (EIO);
-
- txg = spa_vdev_enter(spa);
+ vdev_t *vd;
- rvd = spa->spa_root_vdev;
+ spa_vdev_state_enter(spa);
- if ((vd = vdev_lookup_by_guid(rvd, guid)) == NULL)
- return (spa_vdev_exit(spa, NULL, txg, ENODEV));
+ if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
+ return (spa_vdev_state_exit(spa, NULL, ENODEV));
if (!vd->vdev_ops->vdev_op_leaf)
- return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
+ return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
vd->vdev_offline = B_FALSE;
vd->vdev_tmpoffline = B_FALSE;
- vd->vdev_checkremove = (flags & ZFS_ONLINE_CHECKREMOVE) ?
- B_TRUE : B_FALSE;
- vd->vdev_forcefault = (flags & ZFS_ONLINE_FORCEFAULT) ?
- B_TRUE : B_FALSE;
+ vd->vdev_checkremove = !!(flags & ZFS_ONLINE_CHECKREMOVE);
+ vd->vdev_forcefault = !!(flags & ZFS_ONLINE_FORCEFAULT);
vdev_reopen(vd->vdev_top);
vd->vdev_checkremove = vd->vdev_forcefault = B_FALSE;
vd->vdev_parent->vdev_child[0] == vd)
vd->vdev_unspare = B_TRUE;
- vdev_config_dirty(vd->vdev_top);
+ (void) spa_vdev_state_exit(spa, vd, 0);
- (void) spa_vdev_exit(spa, NULL, txg, 0);
-
- /*
- * Must hold spa_namespace_lock in order to post resilver sysevent
- * w/pool name.
- */
- mutex_enter(&spa_namespace_lock);
- VERIFY(spa_scrub(spa, POOL_SCRUB_RESILVER, B_TRUE) == 0);
- mutex_exit(&spa_namespace_lock);
+ VERIFY3U(spa_scrub(spa, POOL_SCRUB_RESILVER), ==, 0);
return (0);
}
int
vdev_offline(spa_t *spa, uint64_t guid, uint64_t flags)
{
- vdev_t *rvd, *vd;
- uint64_t txg;
-
- /*
- * Disregard a vdev fault request if the pool has
- * experienced a complete failure.
- *
- * XXX - We do this here so that we don't hold the
- * spa_namespace_lock in the event that we can't get
- * the RW_WRITER spa_config_lock.
- */
- if (spa_state(spa) == POOL_STATE_IO_FAILURE)
- return (EIO);
-
- txg = spa_vdev_enter(spa);
+ vdev_t *vd;
- rvd = spa->spa_root_vdev;
+ spa_vdev_state_enter(spa);
- if ((vd = vdev_lookup_by_guid(rvd, guid)) == NULL)
- return (spa_vdev_exit(spa, NULL, txg, ENODEV));
+ if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
+ return (spa_vdev_state_exit(spa, NULL, ENODEV));
if (!vd->vdev_ops->vdev_op_leaf)
- return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
+ return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
/*
* If the device isn't already offline, try to offline it.
* as long as the remaining devices don't have any DTL holes.
*/
if (vd->vdev_top->vdev_dtl_map.sm_space != 0)
- return (spa_vdev_exit(spa, NULL, txg, EBUSY));
+ return (spa_vdev_state_exit(spa, NULL, EBUSY));
/*
* Offline this device and reopen its top-level vdev.
*/
vd->vdev_offline = B_TRUE;
vdev_reopen(vd->vdev_top);
- if (vdev_is_dead(vd->vdev_top)) {
+ if (vdev_is_dead(vd->vdev_top) && vd->vdev_aux == NULL) {
vd->vdev_offline = B_FALSE;
vdev_reopen(vd->vdev_top);
- return (spa_vdev_exit(spa, NULL, txg, EBUSY));
+ return (spa_vdev_state_exit(spa, NULL, EBUSY));
}
}
- vd->vdev_tmpoffline = (flags & ZFS_OFFLINE_TEMPORARY) ?
- B_TRUE : B_FALSE;
+ vd->vdev_tmpoffline = !!(flags & ZFS_OFFLINE_TEMPORARY);
- vdev_config_dirty(vd->vdev_top);
-
- return (spa_vdev_exit(spa, NULL, txg, 0));
+ return (spa_vdev_state_exit(spa, vd, 0));
}
/*
* Clear the error counts associated with this vdev. Unlike vdev_online() and
* vdev_offline(), we assume the spa config is locked. We also clear all
* children. If 'vd' is NULL, then the user wants to clear all vdevs.
- * If reopen is specified then attempt to reopen the vdev if the vdev is
- * faulted or degraded.
*/
void
-vdev_clear(spa_t *spa, vdev_t *vd, boolean_t reopen_wanted)
+vdev_clear(spa_t *spa, vdev_t *vd)
{
- int c;
+ vdev_t *rvd = spa->spa_root_vdev;
+
+ ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
if (vd == NULL)
- vd = spa->spa_root_vdev;
+ vd = rvd;
vd->vdev_stat.vs_read_errors = 0;
vd->vdev_stat.vs_write_errors = 0;
vd->vdev_stat.vs_checksum_errors = 0;
- vd->vdev_is_failing = B_FALSE;
- for (c = 0; c < vd->vdev_children; c++)
- vdev_clear(spa, vd->vdev_child[c], reopen_wanted);
+ for (int c = 0; c < vd->vdev_children; c++)
+ vdev_clear(spa, vd->vdev_child[c]);
/*
- * If we're in the FAULTED state, then clear the persistent state and
- * attempt to reopen the device. We also mark the vdev config dirty, so
- * that the new faulted state is written out to disk.
+ * If we're in the FAULTED state or have experienced failed I/O, then
+ * clear the persistent state and attempt to reopen the device. We
+ * also mark the vdev config dirty, so that the new faulted state is
+ * written out to disk.
*/
- if (reopen_wanted && (vd->vdev_faulted || vd->vdev_degraded)) {
+ if (vd->vdev_faulted || vd->vdev_degraded ||
+ !vdev_readable(vd) || !vdev_writeable(vd)) {
+
vd->vdev_faulted = vd->vdev_degraded = 0;
+ vd->vdev_cant_read = B_FALSE;
+ vd->vdev_cant_write = B_FALSE;
+
vdev_reopen(vd);
- vdev_config_dirty(vd->vdev_top);
- if (vd->vdev_faulted)
+ if (vd != rvd)
+ vdev_state_dirty(vd->vdev_top);
+
+ if (vd->vdev_aux == NULL && !vdev_is_dead(vd))
spa_async_request(spa, SPA_ASYNC_RESILVER);
spa_event_notify(spa, vd, ESC_ZFS_VDEV_CLEAR);
}
}
-int
+boolean_t
+vdev_is_dead(vdev_t *vd)
+{
+ return (vd->vdev_state < VDEV_STATE_DEGRADED);
+}
+
+boolean_t
vdev_readable(vdev_t *vd)
{
- /* XXPOLICY */
- return (!vdev_is_dead(vd));
+ return (!vdev_is_dead(vd) && !vd->vdev_cant_read);
}
-int
+boolean_t
vdev_writeable(vdev_t *vd)
{
- return (!vdev_is_dead(vd) && !vd->vdev_is_failing);
+ return (!vdev_is_dead(vd) && !vd->vdev_cant_write);
}
-int
-vdev_is_dead(vdev_t *vd)
+boolean_t
+vdev_allocatable(vdev_t *vd)
{
- return (vd->vdev_state < VDEV_STATE_DEGRADED);
+ /*
+ * We currently allow allocations from vdevs which maybe in the
+ * process of reopening (i.e. VDEV_STATE_CLOSED). If the device
+ * fails to reopen then we'll catch it later when we're holding
+ * the proper locks.
+ */
+ return (!(vdev_is_dead(vd) && vd->vdev_state != VDEV_STATE_CLOSED) &&
+ !vd->vdev_cant_write);
}
-int
-vdev_error_inject(vdev_t *vd, zio_t *zio)
+boolean_t
+vdev_accessible(vdev_t *vd, zio_t *zio)
{
- int error = 0;
+ ASSERT(zio->io_vd == vd);
- if (vd->vdev_fault_mode == VDEV_FAULT_NONE)
- return (0);
+ if (vdev_is_dead(vd) || vd->vdev_remove_wanted)
+ return (B_FALSE);
- if (((1ULL << zio->io_type) & vd->vdev_fault_mask) == 0)
- return (0);
+ if (zio->io_type == ZIO_TYPE_READ)
+ return (!vd->vdev_cant_read);
- switch (vd->vdev_fault_mode) {
- case VDEV_FAULT_RANDOM:
- if (spa_get_random(vd->vdev_fault_arg) == 0)
- error = EIO;
- break;
-
- case VDEV_FAULT_COUNT:
- if ((int64_t)--vd->vdev_fault_arg <= 0)
- vd->vdev_fault_mode = VDEV_FAULT_NONE;
- error = EIO;
- break;
- }
+ if (zio->io_type == ZIO_TYPE_WRITE)
+ return (!vd->vdev_cant_write);
- return (error);
+ return (B_TRUE);
}
/*
vdev_get_stats(vdev_t *vd, vdev_stat_t *vs)
{
vdev_t *rvd = vd->vdev_spa->spa_root_vdev;
- int c, t;
mutex_enter(&vd->vdev_stat_lock);
bcopy(&vd->vdev_stat, vs, sizeof (*vs));
+ vs->vs_scrub_errors = vd->vdev_spa->spa_scrub_errors;
vs->vs_timestamp = gethrtime() - vs->vs_timestamp;
vs->vs_state = vd->vdev_state;
vs->vs_rsize = vdev_get_rsize(vd);
* over all top-level vdevs (i.e. the direct children of the root).
*/
if (vd == rvd) {
- for (c = 0; c < rvd->vdev_children; c++) {
+ for (int c = 0; c < rvd->vdev_children; c++) {
vdev_t *cvd = rvd->vdev_child[c];
vdev_stat_t *cvs = &cvd->vdev_stat;
mutex_enter(&vd->vdev_stat_lock);
- for (t = 0; t < ZIO_TYPES; t++) {
+ for (int t = 0; t < ZIO_TYPES; t++) {
vs->vs_ops[t] += cvs->vs_ops[t];
vs->vs_bytes[t] += cvs->vs_bytes[t];
}
- vs->vs_read_errors += cvs->vs_read_errors;
- vs->vs_write_errors += cvs->vs_write_errors;
- vs->vs_checksum_errors += cvs->vs_checksum_errors;
vs->vs_scrub_examined += cvs->vs_scrub_examined;
- vs->vs_scrub_errors += cvs->vs_scrub_errors;
mutex_exit(&vd->vdev_stat_lock);
}
}
}
void
-vdev_stat_update(zio_t *zio)
+vdev_stat_update(zio_t *zio, uint64_t psize)
{
- vdev_t *vd = zio->io_vd;
+ vdev_t *rvd = zio->io_spa->spa_root_vdev;
+ vdev_t *vd = zio->io_vd ? zio->io_vd : rvd;
vdev_t *pvd;
uint64_t txg = zio->io_txg;
vdev_stat_t *vs = &vd->vdev_stat;
zio_type_t type = zio->io_type;
int flags = zio->io_flags;
+ /*
+ * If this i/o is a gang leader, it didn't do any actual work.
+ */
+ if (zio->io_gang_tree)
+ return;
+
if (zio->io_error == 0) {
+ /*
+ * If this is a root i/o, don't count it -- we've already
+ * counted the top-level vdevs, and vdev_get_stats() will
+ * aggregate them when asked. This reduces contention on
+ * the root vdev_stat_lock and implicitly handles blocks
+ * that compress away to holes, for which there is no i/o.
+ * (Holes never create vdev children, so all the counters
+ * remain zero, which is what we want.)
+ *
+ * Note: this only applies to successful i/o (io_error == 0)
+ * because unlike i/o counts, errors are not additive.
+ * When reading a ditto block, for example, failure of
+ * one top-level vdev does not imply a root-level error.
+ */
+ if (vd == rvd)
+ return;
+
+ ASSERT(vd == zio->io_vd);
if (!(flags & ZIO_FLAG_IO_BYPASS)) {
mutex_enter(&vd->vdev_stat_lock);
vs->vs_ops[type]++;
- vs->vs_bytes[type] += zio->io_size;
+ vs->vs_bytes[type] += psize;
mutex_exit(&vd->vdev_stat_lock);
}
- if ((flags & ZIO_FLAG_IO_REPAIR) &&
- zio->io_delegate_list == NULL) {
+ if (flags & ZIO_FLAG_IO_REPAIR) {
+ ASSERT(zio->io_delegate_list == NULL);
mutex_enter(&vd->vdev_stat_lock);
if (flags & ZIO_FLAG_SCRUB_THREAD)
- vs->vs_scrub_repaired += zio->io_size;
+ vs->vs_scrub_repaired += psize;
else
- vs->vs_self_healed += zio->io_size;
+ vs->vs_self_healed += psize;
mutex_exit(&vd->vdev_stat_lock);
}
return;
if (flags & ZIO_FLAG_SPECULATIVE)
return;
- if (vdev_readable(vd)) {
- mutex_enter(&vd->vdev_stat_lock);
- if (type == ZIO_TYPE_READ) {
- if (zio->io_error == ECKSUM)
- vs->vs_checksum_errors++;
- else
- vs->vs_read_errors++;
- }
- if (type == ZIO_TYPE_WRITE)
- vs->vs_write_errors++;
- mutex_exit(&vd->vdev_stat_lock);
+ mutex_enter(&vd->vdev_stat_lock);
+ if (type == ZIO_TYPE_READ) {
+ if (zio->io_error == ECKSUM)
+ vs->vs_checksum_errors++;
+ else
+ vs->vs_read_errors++;
}
+ if (type == ZIO_TYPE_WRITE)
+ vs->vs_write_errors++;
+ mutex_exit(&vd->vdev_stat_lock);
- if (type == ZIO_TYPE_WRITE) {
- if (txg == 0 || vd->vdev_children != 0)
- return;
+ if (type == ZIO_TYPE_WRITE && txg != 0 && vd->vdev_children == 0) {
if (flags & ZIO_FLAG_SCRUB_THREAD) {
ASSERT(flags & ZIO_FLAG_IO_REPAIR);
for (pvd = vd; pvd != NULL; pvd = pvd->vdev_parent)
vs->vs_scrub_complete = 0;
vs->vs_scrub_examined = 0;
vs->vs_scrub_repaired = 0;
- vs->vs_scrub_errors = 0;
vs->vs_scrub_start = gethrestime_sec();
vs->vs_scrub_end = 0;
}
int c;
/*
- * The dirty list is protected by the config lock. The caller must
- * either hold the config lock as writer, or must be the sync thread
- * (which holds the lock as reader). There's only one sync thread,
+ * If this is an aux vdev (as with l2cache devices), then we update the
+ * vdev config manually and set the sync flag.
+ */
+ if (vd->vdev_aux != NULL) {
+ spa_aux_vdev_t *sav = vd->vdev_aux;
+ nvlist_t **aux;
+ uint_t naux;
+
+ for (c = 0; c < sav->sav_count; c++) {
+ if (sav->sav_vdevs[c] == vd)
+ break;
+ }
+
+ if (c == sav->sav_count) {
+ /*
+ * We're being removed. There's nothing more to do.
+ */
+ ASSERT(sav->sav_sync == B_TRUE);
+ return;
+ }
+
+ sav->sav_sync = B_TRUE;
+
+ VERIFY(nvlist_lookup_nvlist_array(sav->sav_config,
+ ZPOOL_CONFIG_L2CACHE, &aux, &naux) == 0);
+
+ ASSERT(c < naux);
+
+ /*
+ * Setting the nvlist in the middle if the array is a little
+ * sketchy, but it will work.
+ */
+ nvlist_free(aux[c]);
+ aux[c] = vdev_config_generate(spa, vd, B_TRUE, B_FALSE, B_TRUE);
+
+ return;
+ }
+
+ /*
+ * The dirty list is protected by the SCL_CONFIG lock. The caller
+ * must either hold SCL_CONFIG as writer, or must be the sync thread
+ * (which holds SCL_CONFIG as reader). There's only one sync thread,
* so this is sufficient to ensure mutual exclusion.
*/
- ASSERT(spa_config_held(spa, RW_WRITER) ||
- dsl_pool_sync_context(spa_get_dsl(spa)));
+ ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) ||
+ (dsl_pool_sync_context(spa_get_dsl(spa)) &&
+ spa_config_held(spa, SCL_CONFIG, RW_READER)));
if (vd == rvd) {
for (c = 0; c < rvd->vdev_children; c++)
} else {
ASSERT(vd == vd->vdev_top);
- if (!list_link_active(&vd->vdev_dirty_node))
- list_insert_head(&spa->spa_dirty_list, vd);
+ if (!list_link_active(&vd->vdev_config_dirty_node))
+ list_insert_head(&spa->spa_config_dirty_list, vd);
}
}
{
spa_t *spa = vd->vdev_spa;
- ASSERT(spa_config_held(spa, RW_WRITER) ||
- dsl_pool_sync_context(spa_get_dsl(spa)));
+ ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) ||
+ (dsl_pool_sync_context(spa_get_dsl(spa)) &&
+ spa_config_held(spa, SCL_CONFIG, RW_READER)));
- ASSERT(list_link_active(&vd->vdev_dirty_node));
- list_remove(&spa->spa_dirty_list, vd);
+ ASSERT(list_link_active(&vd->vdev_config_dirty_node));
+ list_remove(&spa->spa_config_dirty_list, vd);
}
+/*
+ * Mark a top-level vdev's state as dirty, so that the next pass of
+ * spa_sync() can convert this into vdev_config_dirty(). We distinguish
+ * the state changes from larger config changes because they require
+ * much less locking, and are often needed for administrative actions.
+ */
+void
+vdev_state_dirty(vdev_t *vd)
+{
+ spa_t *spa = vd->vdev_spa;
+
+ ASSERT(vd == vd->vdev_top);
+
+ /*
+ * The state list is protected by the SCL_STATE lock. The caller
+ * must either hold SCL_STATE as writer, or must be the sync thread
+ * (which holds SCL_STATE as reader). There's only one sync thread,
+ * so this is sufficient to ensure mutual exclusion.
+ */
+ ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) ||
+ (dsl_pool_sync_context(spa_get_dsl(spa)) &&
+ spa_config_held(spa, SCL_STATE, RW_READER)));
+
+ if (!list_link_active(&vd->vdev_state_dirty_node))
+ list_insert_head(&spa->spa_state_dirty_list, vd);
+}
+
+void
+vdev_state_clean(vdev_t *vd)
+{
+ spa_t *spa = vd->vdev_spa;
+
+ ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) ||
+ (dsl_pool_sync_context(spa_get_dsl(spa)) &&
+ spa_config_held(spa, SCL_STATE, RW_READER)));
+
+ ASSERT(list_link_active(&vd->vdev_state_dirty_node));
+ list_remove(&spa->spa_state_dirty_list, vd);
+}
+
+/*
+ * Propagate vdev state up from children to parent.
+ */
void
vdev_propagate_state(vdev_t *vd)
{
if (vd->vdev_children > 0) {
for (c = 0; c < vd->vdev_children; c++) {
child = vd->vdev_child[c];
- if (vdev_is_dead(child) && !vdev_readable(child))
- faulted++;
- else if (child->vdev_state <= VDEV_STATE_DEGRADED)
+
+ if (!vdev_readable(child) ||
+ (!vdev_writeable(child) && (spa_mode & FWRITE))) {
+ /*
+ * Root special: if there is a top-level log
+ * device, treat the root vdev as if it were
+ * degraded.
+ */
+ if (child->vdev_islog && vd == rvd)
+ degraded++;
+ else
+ faulted++;
+ } else if (child->vdev_state <= VDEV_STATE_DEGRADED) {
degraded++;
+ }
if (child->vdev_stat.vs_aux == VDEV_AUX_CORRUPT_DATA)
corrupted++;
vd->vdev_ops->vdev_op_state_change(vd, faulted, degraded);
/*
- * Root special: if there is a toplevel vdev that cannot be
+ * Root special: if there is a top-level vdev that cannot be
* opened due to corrupted metadata, then propagate the root
* vdev's aux state as 'corrupt' rather than 'insufficient
* replicas'.
VDEV_AUX_CORRUPT_DATA);
}
- if (vd->vdev_parent && !vd->vdev_islog)
+ if (vd->vdev_parent)
vdev_propagate_state(vd->vdev_parent);
}
vdev_set_state(vdev_t *vd, boolean_t isopen, vdev_state_t state, vdev_aux_t aux)
{
uint64_t save_state;
+ spa_t *spa = vd->vdev_spa;
if (state == vd->vdev_state) {
vd->vdev_stat.vs_aux = aux;
* want here. This is limited to leaf devices, because otherwise
* closing the device will affect other children.
*/
- if (!vdev_readable(vd) && vd->vdev_ops->vdev_op_leaf)
+ if (vdev_is_dead(vd) && vd->vdev_ops->vdev_op_leaf)
vd->vdev_ops->vdev_op_close(vd);
if (vd->vdev_removed &&
* Indicate to the ZFS DE that this device has been removed, and
* any recent errors should be ignored.
*/
- zfs_post_remove(vd->vdev_spa, vd);
+ zfs_post_remove(spa, vd);
vd->vdev_removed = B_TRUE;
} else if (state == VDEV_STATE_CANT_OPEN) {
/*
* begin with. Failure to open such a device is not considered
* an error.
*/
- if (vd->vdev_spa->spa_load_state == SPA_LOAD_IMPORT &&
+ if (spa->spa_load_state == SPA_LOAD_IMPORT &&
+ !spa->spa_import_faulted &&
vd->vdev_ops->vdev_op_leaf)
vd->vdev_not_present = 1;
*/
if ((vd->vdev_prevstate != state || vd->vdev_forcefault) &&
!vd->vdev_not_present && !vd->vdev_checkremove &&
- vd != vd->vdev_spa->spa_root_vdev) {
+ vd != spa->spa_root_vdev) {
const char *class;
switch (aux) {
case VDEV_AUX_BAD_LABEL:
class = FM_EREPORT_ZFS_DEVICE_BAD_LABEL;
break;
+ case VDEV_AUX_IO_FAILURE:
+ class = FM_EREPORT_ZFS_IO_FAILURE;
+ break;
default:
class = FM_EREPORT_ZFS_DEVICE_UNKNOWN;
}
- zfs_ereport_post(class, vd->vdev_spa,
- vd, NULL, save_state, 0);
+ zfs_ereport_post(class, spa, vd, NULL, save_state, 0);
}
/* Erase any notion of persistent removed state */
if (!isopen)
vdev_propagate_state(vd);
}
+
+/*
+ * Check the vdev configuration to ensure that it's capable of supporting
+ * a root pool. Currently, we do not support RAID-Z or partial configuration.
+ * In addition, only a single top-level vdev is allowed and none of the leaves
+ * can be wholedisks.
+ */
+boolean_t
+vdev_is_bootable(vdev_t *vd)
+{
+ int c;
+
+ if (!vd->vdev_ops->vdev_op_leaf) {
+ char *vdev_type = vd->vdev_ops->vdev_op_type;
+
+ if (strcmp(vdev_type, VDEV_TYPE_ROOT) == 0 &&
+ vd->vdev_children > 1) {
+ return (B_FALSE);
+ } else if (strcmp(vdev_type, VDEV_TYPE_RAIDZ) == 0 ||
+ strcmp(vdev_type, VDEV_TYPE_MISSING) == 0) {
+ return (B_FALSE);
+ }
+ } else if (vd->vdev_wholedisk == 1) {
+ return (B_FALSE);
+ }
+
+ for (c = 0; c < vd->vdev_children; c++) {
+ if (!vdev_is_bootable(vd->vdev_child[c]))
+ return (B_FALSE);
+ }
+ return (B_TRUE);
+}