3246 ZFS I/O deadman thread
[zfs.git] / module / zfs / vdev.c
index a61f29b..15ff30f 100644 (file)
@@ -21,6 +21,8 @@
 
 /*
  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
+ * Copyright (c) 2012 by Delphix. All rights reserved.
  */
 
 #include <sys/zfs_context.h>
@@ -40,6 +42,7 @@
 #include <sys/arc.h>
 #include <sys/zil.h>
 #include <sys/dsl_scan.h>
+#include <sys/zvol.h>
 
 /*
  * Virtual device management.
@@ -85,8 +88,9 @@ vdev_default_asize(vdev_t *vd, uint64_t psize)
 {
        uint64_t asize = P2ROUNDUP(psize, 1ULL << vd->vdev_top->vdev_ashift);
        uint64_t csize;
+       int c;
 
-       for (int c = 0; c < vd->vdev_children; c++) {
+       for (c = 0; c < vd->vdev_children; c++) {
                csize = vdev_psize_to_asize(vd->vdev_child[c], psize);
                asize = MAX(asize, csize);
        }
@@ -106,7 +110,7 @@ vdev_get_min_asize(vdev_t *vd)
        vdev_t *pvd = vd->vdev_parent;
 
        /*
-        * The our parent is NULL (inactive spare or cache) or is the root,
+        * If our parent is NULL (inactive spare or cache) or is the root,
         * just return our own asize.
         */
        if (pvd == NULL)
@@ -132,9 +136,10 @@ vdev_get_min_asize(vdev_t *vd)
 void
 vdev_set_min_asize(vdev_t *vd)
 {
+       int c;
        vd->vdev_min_asize = vdev_get_min_asize(vd);
 
-       for (int c = 0; c < vd->vdev_children; c++)
+       for (c = 0; c < vd->vdev_children; c++)
                vdev_set_min_asize(vd->vdev_child[c]);
 }
 
@@ -157,11 +162,12 @@ vdev_t *
 vdev_lookup_by_guid(vdev_t *vd, uint64_t guid)
 {
        vdev_t *mvd;
+       int c;
 
        if (vd->vdev_guid == guid)
                return (vd);
 
-       for (int c = 0; c < vd->vdev_children; c++)
+       for (c = 0; c < vd->vdev_children; c++)
                if ((mvd = vdev_lookup_by_guid(vd->vdev_child[c], guid)) !=
                    NULL)
                        return (mvd);
@@ -190,7 +196,7 @@ vdev_add_child(vdev_t *pvd, vdev_t *cvd)
        pvd->vdev_children = MAX(pvd->vdev_children, id + 1);
        newsize = pvd->vdev_children * sizeof (vdev_t *);
 
-       newchild = kmem_zalloc(newsize, KM_SLEEP);
+       newchild = kmem_zalloc(newsize, KM_PUSHPAGE);
        if (pvd->vdev_child != NULL) {
                bcopy(pvd->vdev_child, newchild, oldsize);
                kmem_free(pvd->vdev_child, oldsize);
@@ -207,9 +213,6 @@ vdev_add_child(vdev_t *pvd, vdev_t *cvd)
         */
        for (; pvd != NULL; pvd = pvd->vdev_parent)
                pvd->vdev_guid_sum += cvd->vdev_guid_sum;
-
-       if (cvd->vdev_ops->vdev_op_leaf)
-               cvd->vdev_spa->spa_scrub_maxinflight += zfs_scrub_limit;
 }
 
 void
@@ -244,9 +247,6 @@ vdev_remove_child(vdev_t *pvd, vdev_t *cvd)
         */
        for (; pvd != NULL; pvd = pvd->vdev_parent)
                pvd->vdev_guid_sum -= cvd->vdev_guid_sum;
-
-       if (cvd->vdev_ops->vdev_op_leaf)
-               cvd->vdev_spa->spa_scrub_maxinflight -= zfs_scrub_limit;
 }
 
 /*
@@ -258,16 +258,17 @@ vdev_compact_children(vdev_t *pvd)
        vdev_t **newchild, *cvd;
        int oldc = pvd->vdev_children;
        int newc;
+       int c;
 
        ASSERT(spa_config_held(pvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
 
-       for (int c = newc = 0; c < oldc; c++)
+       for (c = newc = 0; c < oldc; c++)
                if (pvd->vdev_child[c])
                        newc++;
 
-       newchild = kmem_alloc(newc * sizeof (vdev_t *), KM_SLEEP);
+       newchild = kmem_alloc(newc * sizeof (vdev_t *), KM_PUSHPAGE);
 
-       for (int c = newc = 0; c < oldc; c++) {
+       for (c = newc = 0; c < oldc; c++) {
                if ((cvd = pvd->vdev_child[c]) != NULL) {
                        newchild[newc] = cvd;
                        cvd->vdev_id = newc++;
@@ -286,12 +287,14 @@ vdev_t *
 vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid, vdev_ops_t *ops)
 {
        vdev_t *vd;
+       int t;
 
-       vd = kmem_zalloc(sizeof (vdev_t), KM_SLEEP);
+       vd = kmem_zalloc(sizeof (vdev_t), KM_PUSHPAGE);
 
        if (spa->spa_root_vdev == NULL) {
                ASSERT(ops == &vdev_root_ops);
                spa->spa_root_vdev = vd;
+               spa->spa_load_guid = spa_generate_guid(NULL);
        }
 
        if (guid == 0 && ops != &vdev_hole_ops) {
@@ -318,10 +321,12 @@ vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid, vdev_ops_t *ops)
        vd->vdev_state = VDEV_STATE_CLOSED;
        vd->vdev_ishole = (ops == &vdev_hole_ops);
 
+       list_link_init(&vd->vdev_config_dirty_node);
+       list_link_init(&vd->vdev_state_dirty_node);
        mutex_init(&vd->vdev_dtl_lock, NULL, MUTEX_DEFAULT, NULL);
        mutex_init(&vd->vdev_stat_lock, NULL, MUTEX_DEFAULT, NULL);
        mutex_init(&vd->vdev_probe_lock, NULL, MUTEX_DEFAULT, NULL);
-       for (int t = 0; t < DTL_TYPES; t++) {
+       for (t = 0; t < DTL_TYPES; t++) {
                space_map_create(&vd->vdev_dtl[t], 0, -1ULL, 0,
                    &vd->vdev_dtl_lock);
        }
@@ -491,7 +496,7 @@ vdev_alloc(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, uint_t id,
                    &vd->vdev_removing);
        }
 
-       if (parent && !parent->vdev_parent) {
+       if (parent && !parent->vdev_parent && alloctype != VDEV_ALLOC_ATTACH) {
                ASSERT(alloctype == VDEV_ALLOC_LOAD ||
                    alloctype == VDEV_ALLOC_ADD ||
                    alloctype == VDEV_ALLOC_SPLIT ||
@@ -524,6 +529,9 @@ vdev_alloc(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, uint_t id,
                (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE,
                    &vd->vdev_offline);
 
+               (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_RESILVERING,
+                   &vd->vdev_resilvering);
+
                /*
                 * When importing a pool, we want to ignore the persistent fault
                 * state, as the diagnosis made on another system may not be
@@ -564,6 +572,7 @@ vdev_alloc(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, uint_t id,
 void
 vdev_free(vdev_t *vd)
 {
+       int c, t;
        spa_t *spa = vd->vdev_spa;
 
        /*
@@ -578,7 +587,7 @@ vdev_free(vdev_t *vd)
        /*
         * Free all children.
         */
-       for (int c = 0; c < vd->vdev_children; c++)
+       for (c = 0; c < vd->vdev_children; c++)
                vdev_free(vd->vdev_child[c]);
 
        ASSERT(vd->vdev_child == NULL);
@@ -627,7 +636,7 @@ vdev_free(vdev_t *vd)
        txg_list_destroy(&vd->vdev_dtl_list);
 
        mutex_enter(&vd->vdev_dtl_lock);
-       for (int t = 0; t < DTL_TYPES; t++) {
+       for (t = 0; t < DTL_TYPES; t++) {
                space_map_unload(&vd->vdev_dtl[t]);
                space_map_destroy(&vd->vdev_dtl[t]);
        }
@@ -664,6 +673,8 @@ vdev_top_transfer(vdev_t *svd, vdev_t *tvd)
        svd->vdev_ms_shift = 0;
        svd->vdev_ms_count = 0;
 
+       if (tvd->vdev_mg)
+               ASSERT3P(tvd->vdev_mg, ==, svd->vdev_mg);
        tvd->vdev_mg = svd->vdev_mg;
        tvd->vdev_ms = svd->vdev_ms;
 
@@ -710,12 +721,14 @@ vdev_top_transfer(vdev_t *svd, vdev_t *tvd)
 static void
 vdev_top_update(vdev_t *tvd, vdev_t *vd)
 {
+       int c;
+
        if (vd == NULL)
                return;
 
        vd->vdev_top = tvd;
 
-       for (int c = 0; c < vd->vdev_children; c++)
+       for (c = 0; c < vd->vdev_children; c++)
                vdev_top_update(tvd, vd->vdev_child[c]);
 }
 
@@ -735,6 +748,7 @@ vdev_add_parent(vdev_t *cvd, vdev_ops_t *ops)
 
        mvd->vdev_asize = cvd->vdev_asize;
        mvd->vdev_min_asize = cvd->vdev_min_asize;
+       mvd->vdev_max_asize = cvd->vdev_max_asize;
        mvd->vdev_ashift = cvd->vdev_ashift;
        mvd->vdev_state = cvd->vdev_state;
        mvd->vdev_crtxg = cvd->vdev_crtxg;
@@ -826,7 +840,7 @@ vdev_metaslab_init(vdev_t *vd, uint64_t txg)
 
        ASSERT(oldc <= newc);
 
-       mspp = kmem_zalloc(newc * sizeof (*mspp), KM_SLEEP);
+       mspp = kmem_zalloc(newc * sizeof (*mspp), KM_PUSHPAGE | KM_NODEBUG);
 
        if (oldc != 0) {
                bcopy(vd->vdev_ms, mspp, oldc * sizeof (*mspp));
@@ -891,6 +905,8 @@ vdev_metaslab_fini(vdev_t *vd)
                kmem_free(vd->vdev_ms, count * sizeof (metaslab_t *));
                vd->vdev_ms = NULL;
        }
+
+       ASSERT3U(vd->vdev_pending_fastwrite, ==, 0);
 }
 
 typedef struct vdev_probe_stats {
@@ -963,6 +979,7 @@ vdev_probe(vdev_t *vd, zio_t *zio)
        spa_t *spa = vd->vdev_spa;
        vdev_probe_stats_t *vps = NULL;
        zio_t *pio;
+       int l;
 
        ASSERT(vd->vdev_ops->vdev_op_leaf);
 
@@ -980,7 +997,7 @@ vdev_probe(vdev_t *vd, zio_t *zio)
        mutex_enter(&vd->vdev_probe_lock);
 
        if ((pio = vd->vdev_probe_zio) == NULL) {
-               vps = kmem_zalloc(sizeof (*vps), KM_SLEEP);
+               vps = kmem_zalloc(sizeof (*vps), KM_PUSHPAGE);
 
                vps->vps_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_PROBE |
                    ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_AGGREGATE |
@@ -1032,7 +1049,7 @@ vdev_probe(vdev_t *vd, zio_t *zio)
                return (NULL);
        }
 
-       for (int l = 1; l < VDEV_LABELS; l++) {
+       for (l = 1; l < VDEV_LABELS; l++) {
                zio_nowait(zio_read_phys(pio, vd,
                    vdev_label_offset(vd->vdev_psize, l,
                    offsetof(vdev_label_t, vl_pad2)),
@@ -1058,15 +1075,20 @@ vdev_open_child(void *arg)
        vd->vdev_open_thread = NULL;
 }
 
-boolean_t
+static boolean_t
 vdev_uses_zvols(vdev_t *vd)
 {
-       if (vd->vdev_path && strncmp(vd->vdev_path, ZVOL_DIR,
-           strlen(ZVOL_DIR)) == 0)
+       int c;
+
+#ifdef _KERNEL
+       if (zvol_is_zvol(vd->vdev_path))
                return (B_TRUE);
-       for (int c = 0; c < vd->vdev_children; c++)
+#endif
+
+       for (c = 0; c < vd->vdev_children; c++)
                if (vdev_uses_zvols(vd->vdev_child[c]))
                        return (B_TRUE);
+
        return (B_FALSE);
 }
 
@@ -1075,6 +1097,7 @@ vdev_open_children(vdev_t *vd)
 {
        taskq_t *tq;
        int children = vd->vdev_children;
+       int c;
 
        /*
         * in order to handle pools on top of zvols, do the opens
@@ -1082,7 +1105,7 @@ vdev_open_children(vdev_t *vd)
         * spa_namespace_lock
         */
        if (vdev_uses_zvols(vd)) {
-               for (int c = 0; c < children; c++)
+               for (c = 0; c < children; c++)
                        vd->vdev_child[c]->vdev_open_error =
                            vdev_open(vd->vdev_child[c]);
                return;
@@ -1090,9 +1113,9 @@ vdev_open_children(vdev_t *vd)
        tq = taskq_create("vdev_open", children, minclsyspri,
            children, children, TASKQ_PREPOPULATE);
 
-       for (int c = 0; c < children; c++)
+       for (c = 0; c < children; c++)
                VERIFY(taskq_dispatch(tq, vdev_open_child, vd->vdev_child[c],
-                   TQ_SLEEP) != NULL);
+                   TQ_SLEEP) != 0);
 
        taskq_destroy(tq);
 }
@@ -1106,8 +1129,10 @@ vdev_open(vdev_t *vd)
        spa_t *spa = vd->vdev_spa;
        int error;
        uint64_t osize = 0;
-       uint64_t asize, psize;
+       uint64_t max_osize = 0;
+       uint64_t asize, max_asize, psize;
        uint64_t ashift = 0;
+       int c;
 
        ASSERT(vd->vdev_open_thread == curthread ||
            spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
@@ -1137,7 +1162,7 @@ vdev_open(vdev_t *vd)
                return (ENXIO);
        }
 
-       error = vd->vdev_ops->vdev_op_open(vd, &osize, &ashift);
+       error = vd->vdev_ops->vdev_op_open(vd, &osize, &max_osize, &ashift);
 
        /*
         * Reset the vdev_reopening flag so that we actually close
@@ -1186,7 +1211,7 @@ vdev_open(vdev_t *vd)
        if (vd->vdev_ishole || vd->vdev_ops == &vdev_missing_ops)
                return (0);
 
-       for (int c = 0; c < vd->vdev_children; c++) {
+       for (c = 0; c < vd->vdev_children; c++) {
                if (vd->vdev_child[c]->vdev_state != VDEV_STATE_HEALTHY) {
                        vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED,
                            VDEV_AUX_NONE);
@@ -1195,6 +1220,7 @@ vdev_open(vdev_t *vd)
        }
 
        osize = P2ALIGN(osize, (uint64_t)sizeof (vdev_label_t));
+       max_osize = P2ALIGN(max_osize, (uint64_t)sizeof (vdev_label_t));
 
        if (vd->vdev_children == 0) {
                if (osize < SPA_MINDEVSIZE) {
@@ -1204,6 +1230,8 @@ vdev_open(vdev_t *vd)
                }
                psize = osize;
                asize = osize - (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE);
+               max_asize = max_osize - (VDEV_LABEL_START_SIZE +
+                   VDEV_LABEL_END_SIZE);
        } else {
                if (vd->vdev_parent != NULL && osize < SPA_MINDEVSIZE -
                    (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE)) {
@@ -1213,6 +1241,7 @@ vdev_open(vdev_t *vd)
                }
                psize = 0;
                asize = osize;
+               max_asize = max_osize;
        }
 
        vd->vdev_psize = psize;
@@ -1229,19 +1258,25 @@ vdev_open(vdev_t *vd)
        if (vd->vdev_asize == 0) {
                /*
                 * This is the first-ever open, so use the computed values.
-                * For testing purposes, a higher ashift can be requested.
+                * For compatibility, a different ashift can be requested.
                 */
                vd->vdev_asize = asize;
-               vd->vdev_ashift = MAX(ashift, vd->vdev_ashift);
+               vd->vdev_max_asize = max_asize;
+               if (vd->vdev_ashift == 0)
+                       vd->vdev_ashift = ashift;
        } else {
                /*
-                * Make sure the alignment requirement hasn't increased.
+                * Detect if the alignment requirement has increased.
+                * We don't want to make the pool unavailable, just
+                * post an event instead.
                 */
-               if (ashift > vd->vdev_top->vdev_ashift) {
-                       vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
-                           VDEV_AUX_BAD_LABEL);
-                       return (EINVAL);
+               if (ashift > vd->vdev_top->vdev_ashift &&
+                   vd->vdev_ops->vdev_op_leaf) {
+                       zfs_ereport_post(FM_EREPORT_ZFS_DEVICE_BAD_ASHIFT,
+                           spa, vd, NULL, 0, 0);
                }
+
+               vd->vdev_max_asize = max_asize;
        }
 
        /*
@@ -1283,21 +1318,27 @@ vdev_open(vdev_t *vd)
  * contents.  This needs to be done before vdev_load() so that we don't
  * inadvertently do repair I/Os to the wrong device.
  *
+ * If 'strict' is false ignore the spa guid check. This is necessary because
+ * if the machine crashed during a re-guid the new guid might have been written
+ * to all of the vdev labels, but not the cached config. The strict check
+ * will be performed when the pool is opened again using the mos config.
+ *
  * This function will only return failure if one of the vdevs indicates that it
  * has since been destroyed or exported.  This is only possible if
  * /etc/zfs/zpool.cache was readonly at the time.  Otherwise, the vdev state
  * will be updated but the function will return 0.
  */
 int
-vdev_validate(vdev_t *vd)
+vdev_validate(vdev_t *vd, boolean_t strict)
 {
        spa_t *spa = vd->vdev_spa;
        nvlist_t *label;
        uint64_t guid = 0, top_guid;
        uint64_t state;
+       int c;
 
-       for (int c = 0; c < vd->vdev_children; c++)
-               if (vdev_validate(vd->vdev_child[c]) != 0)
+       for (c = 0; c < vd->vdev_children; c++)
+               if (vdev_validate(vd->vdev_child[c], strict) != 0)
                        return (EBADF);
 
        /*
@@ -1308,8 +1349,10 @@ vdev_validate(vdev_t *vd)
        if (vd->vdev_ops->vdev_op_leaf && vdev_readable(vd)) {
                uint64_t aux_guid = 0;
                nvlist_t *nvl;
+               uint64_t txg = spa_last_synced_txg(spa) != 0 ?
+                   spa_last_synced_txg(spa) : -1ULL;
 
-               if ((label = vdev_label_read_config(vd)) == NULL) {
+               if ((label = vdev_label_read_config(vd, txg)) == NULL) {
                        vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
                            VDEV_AUX_BAD_LABEL);
                        return (0);
@@ -1327,8 +1370,9 @@ vdev_validate(vdev_t *vd)
                        return (0);
                }
 
-               if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_GUID,
-                   &guid) != 0 || guid != spa_guid(spa)) {
+               if (strict && (nvlist_lookup_uint64(label,
+                   ZPOOL_CONFIG_POOL_GUID, &guid) != 0 ||
+                   guid != spa_guid(spa))) {
                        vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
                            VDEV_AUX_CORRUPT_DATA);
                        nvlist_free(label);
@@ -1375,10 +1419,10 @@ vdev_validate(vdev_t *vd)
                nvlist_free(label);
 
                /*
-                * If spa->spa_load_verbatim is true, no need to check the
+                * If this is a verbatim import, no need to check the
                 * state of the pool.
                 */
-               if (!spa->spa_load_verbatim &&
+               if (!(spa->spa_import_flags & ZFS_IMPORT_VERBATIM) &&
                    spa_load_state(spa) == SPA_LOAD_OPEN &&
                    state != POOL_STATE_ACTIVE)
                        return (EBADF);
@@ -1401,8 +1445,8 @@ vdev_validate(vdev_t *vd)
 void
 vdev_close(vdev_t *vd)
 {
-       spa_t *spa = vd->vdev_spa;
        vdev_t *pvd = vd->vdev_parent;
+       ASSERTV(spa_t *spa = vd->vdev_spa);
 
        ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
 
@@ -1435,12 +1479,13 @@ void
 vdev_hold(vdev_t *vd)
 {
        spa_t *spa = vd->vdev_spa;
+       int c;
 
        ASSERT(spa_is_root(spa));
        if (spa->spa_state == POOL_STATE_UNINITIALIZED)
                return;
 
-       for (int c = 0; c < vd->vdev_children; c++)
+       for (c = 0; c < vd->vdev_children; c++)
                vdev_hold(vd->vdev_child[c]);
 
        if (vd->vdev_ops->vdev_op_leaf)
@@ -1450,10 +1495,10 @@ vdev_hold(vdev_t *vd)
 void
 vdev_rele(vdev_t *vd)
 {
-       spa_t *spa = vd->vdev_spa;
+       int c;
 
-       ASSERT(spa_is_root(spa));
-       for (int c = 0; c < vd->vdev_children; c++)
+       ASSERT(spa_is_root(vd->vdev_spa));
+       for (c = 0; c < vd->vdev_children; c++)
                vdev_rele(vd->vdev_child[c]);
 
        if (vd->vdev_ops->vdev_op_leaf)
@@ -1490,7 +1535,7 @@ vdev_reopen(vdev_t *vd)
                    !l2arc_vdev_present(vd))
                        l2arc_add_vdev(spa, vd);
        } else {
-               (void) vdev_validate(vd);
+               (void) vdev_validate(vd, B_TRUE);
        }
 
        /*
@@ -1544,6 +1589,7 @@ vdev_dirty(vdev_t *vd, int flags, void *arg, uint64_t txg)
        ASSERT(vd == vd->vdev_top);
        ASSERT(!vd->vdev_ishole);
        ASSERT(ISP2(flags));
+       ASSERT(spa_writeable(vd->vdev_spa));
 
        if (flags & VDD_METASLAB)
                (void) txg_list_add(&vd->vdev_ms_list, arg, txg);
@@ -1599,6 +1645,7 @@ vdev_dtl_dirty(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size)
 
        ASSERT(t < DTL_TYPES);
        ASSERT(vd != vd->vdev_spa->spa_root_vdev);
+       ASSERT(spa_writeable(vd->vdev_spa));
 
        mutex_enter(sm->sm_lock);
        if (!space_map_contains(sm, txg, size))
@@ -1644,11 +1691,11 @@ vdev_dtl_reassess(vdev_t *vd, uint64_t txg, uint64_t scrub_txg, int scrub_done)
 {
        spa_t *spa = vd->vdev_spa;
        avl_tree_t reftree;
-       int minref;
+       int c, t, minref;
 
        ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
 
-       for (int c = 0; c < vd->vdev_children; c++)
+       for (c = 0; c < vd->vdev_children; c++)
                vdev_dtl_reassess(vd->vdev_child[c], txg,
                    scrub_txg, scrub_done);
 
@@ -1708,7 +1755,7 @@ vdev_dtl_reassess(vdev_t *vd, uint64_t txg, uint64_t scrub_txg, int scrub_done)
        }
 
        mutex_enter(&vd->vdev_dtl_lock);
-       for (int t = 0; t < DTL_TYPES; t++) {
+       for (t = 0; t < DTL_TYPES; t++) {
                /* account for child's outage in parent's missing map */
                int s = (t == DTL_MISSING) ? DTL_OUTAGE: t;
                if (t == DTL_SCRUB)
@@ -1720,7 +1767,7 @@ vdev_dtl_reassess(vdev_t *vd, uint64_t txg, uint64_t scrub_txg, int scrub_done)
                else
                        minref = vd->vdev_children;     /* any kind of mirror */
                space_map_ref_create(&reftree);
-               for (int c = 0; c < vd->vdev_children; c++) {
+               for (c = 0; c < vd->vdev_children; c++) {
                        vdev_t *cvd = vd->vdev_child[c];
                        mutex_enter(&cvd->vdev_dtl_lock);
                        space_map_ref_add_map(&reftree, &cvd->vdev_dtl[s], 1);
@@ -1781,8 +1828,7 @@ vdev_dtl_sync(vdev_t *vd, uint64_t txg)
 
        if (vd->vdev_detached) {
                if (smo->smo_object != 0) {
-                       int err = dmu_object_free(mos, smo->smo_object, tx);
-                       ASSERT3U(err, ==, 0);
+                       VERIFY(0 == dmu_object_free(mos, smo->smo_object, tx));
                        smo->smo_object = 0;
                }
                dmu_tx_commit(tx);
@@ -1855,6 +1901,9 @@ vdev_dtl_required(vdev_t *vd)
        vd->vdev_cant_read = cant_read;
        vdev_dtl_reassess(tvd, 0, 0, B_FALSE);
 
+       if (!required && zio_injection_enabled)
+               required = !!zio_handle_device_injection(vd, NULL, ECHILD);
+
        return (required);
 }
 
@@ -1867,6 +1916,7 @@ vdev_resilver_needed(vdev_t *vd, uint64_t *minp, uint64_t *maxp)
        boolean_t needed = B_FALSE;
        uint64_t thismin = UINT64_MAX;
        uint64_t thismax = 0;
+       int c;
 
        if (vd->vdev_children == 0) {
                mutex_enter(&vd->vdev_dtl_lock);
@@ -1882,7 +1932,7 @@ vdev_resilver_needed(vdev_t *vd, uint64_t *minp, uint64_t *maxp)
                }
                mutex_exit(&vd->vdev_dtl_lock);
        } else {
-               for (int c = 0; c < vd->vdev_children; c++) {
+               for (c = 0; c < vd->vdev_children; c++) {
                        vdev_t *cvd = vd->vdev_child[c];
                        uint64_t cmin, cmax;
 
@@ -1904,10 +1954,12 @@ vdev_resilver_needed(vdev_t *vd, uint64_t *minp, uint64_t *maxp)
 void
 vdev_load(vdev_t *vd)
 {
+       int c;
+
        /*
         * Recursively load all children.
         */
-       for (int c = 0; c < vd->vdev_children; c++)
+       for (c = 0; c < vd->vdev_children; c++)
                vdev_load(vd->vdev_child[c]);
 
        /*
@@ -1944,14 +1996,14 @@ vdev_validate_aux(vdev_t *vd)
        if (!vdev_readable(vd))
                return (0);
 
-       if ((label = vdev_label_read_config(vd)) == NULL) {
+       if ((label = vdev_label_read_config(vd, -1ULL)) == NULL) {
                vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
                    VDEV_AUX_CORRUPT_DATA);
                return (-1);
        }
 
        if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_VERSION, &version) != 0 ||
-           version > SPA_VERSION ||
+           !SPA_VERSION_IS_SUPPORTED(version) ||
            nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) != 0 ||
            guid != vd->vdev_guid ||
            nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE, &state) != 0) {
@@ -1975,6 +2027,7 @@ vdev_remove(vdev_t *vd, uint64_t txg)
        spa_t *spa = vd->vdev_spa;
        objset_t *mos = spa->spa_meta_objset;
        dmu_tx_t *tx;
+       int m;
 
        tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
 
@@ -1985,7 +2038,7 @@ vdev_remove(vdev_t *vd, uint64_t txg)
        }
 
        if (vd->vdev_ms != NULL) {
-               for (int m = 0; m < vd->vdev_ms_count; m++) {
+               for (m = 0; m < vd->vdev_ms_count; m++) {
                        metaslab_t *msp = vd->vdev_ms[m];
 
                        if (msp == NULL || msp->ms_smo.smo_object == 0)
@@ -2013,7 +2066,7 @@ vdev_sync_done(vdev_t *vd, uint64_t txg)
 
        ASSERT(!vd->vdev_ishole);
 
-       while (msp = txg_list_remove(&vd->vdev_ms_list, TXG_CLEAN(txg)))
+       while ((msp = txg_list_remove(&vd->vdev_ms_list, TXG_CLEAN(txg))))
                metaslab_sync_done(msp, txg);
 
        if (reassess)
@@ -2070,7 +2123,7 @@ vdev_psize_to_asize(vdev_t *vd, uint64_t psize)
 int
 vdev_fault(spa_t *spa, uint64_t guid, vdev_aux_t aux)
 {
-       vdev_t *vd;
+       vdev_t *vd, *tvd;
 
        spa_vdev_state_enter(spa, SCL_NONE);
 
@@ -2080,6 +2133,8 @@ vdev_fault(spa_t *spa, uint64_t guid, vdev_aux_t aux)
        if (!vd->vdev_ops->vdev_op_leaf)
                return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
 
+       tvd = vd->vdev_top;
+
        /*
         * We don't directly use the aux state here, but if we do a
         * vdev_reopen(), we need this value to be present to remember why we
@@ -2099,7 +2154,7 @@ vdev_fault(spa_t *spa, uint64_t guid, vdev_aux_t aux)
         * If this device has the only valid copy of the data, then
         * back off and simply mark the vdev as degraded instead.
         */
-       if (!vd->vdev_islog && vd->vdev_aux == NULL && vdev_dtl_required(vd)) {
+       if (!tvd->vdev_islog && vd->vdev_aux == NULL && vdev_dtl_required(vd)) {
                vd->vdev_degraded = 1ULL;
                vd->vdev_faulted = 0ULL;
 
@@ -2107,7 +2162,7 @@ vdev_fault(spa_t *spa, uint64_t guid, vdev_aux_t aux)
                 * If we reopen the device and it's not dead, only then do we
                 * mark it degraded.
                 */
-               vdev_reopen(vd);
+               vdev_reopen(tvd);
 
                if (vdev_readable(vd))
                        vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, aux);
@@ -2320,6 +2375,7 @@ void
 vdev_clear(spa_t *spa, vdev_t *vd)
 {
        vdev_t *rvd = spa->spa_root_vdev;
+       int c;
 
        ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
 
@@ -2330,7 +2386,7 @@ vdev_clear(spa_t *spa, vdev_t *vd)
        vd->vdev_stat.vs_write_errors = 0;
        vd->vdev_stat.vs_checksum_errors = 0;
 
-       for (int c = 0; c < vd->vdev_children; c++)
+       for (c = 0; c < vd->vdev_children; c++)
                vdev_clear(spa, vd->vdev_child[c]);
 
        /*
@@ -2349,21 +2405,21 @@ vdev_clear(spa_t *spa, vdev_t *vd)
                 */
                vd->vdev_forcefault = B_TRUE;
 
-               vd->vdev_faulted = vd->vdev_degraded = 0;
+               vd->vdev_faulted = vd->vdev_degraded = 0ULL;
                vd->vdev_cant_read = B_FALSE;
                vd->vdev_cant_write = B_FALSE;
 
-               vdev_reopen(vd);
+               vdev_reopen(vd == rvd ? rvd : vd->vdev_top);
 
                vd->vdev_forcefault = B_FALSE;
 
-               if (vd != rvd)
+               if (vd != rvd && vdev_writeable(vd->vdev_top))
                        vdev_state_dirty(vd->vdev_top);
 
                if (vd->vdev_aux == NULL && !vdev_is_dead(vd))
                        spa_async_request(spa, SPA_ASYNC_RESILVER);
 
-               spa_event_notify(spa, vd, ESC_ZFS_VDEV_CLEAR);
+               spa_event_notify(spa, vd, FM_EREPORT_ZFS_DEVICE_CLEAR);
        }
 
        /*
@@ -2444,6 +2500,7 @@ void
 vdev_get_stats(vdev_t *vd, vdev_stat_t *vs)
 {
        vdev_t *rvd = vd->vdev_spa->spa_root_vdev;
+       int c, t;
 
        mutex_enter(&vd->vdev_stat_lock);
        bcopy(&vd->vdev_stat, vs, sizeof (*vs));
@@ -2452,6 +2509,7 @@ vdev_get_stats(vdev_t *vd, vdev_stat_t *vs)
        vs->vs_rsize = vdev_get_min_asize(vd);
        if (vd->vdev_ops->vdev_op_leaf)
                vs->vs_rsize += VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE;
+       vs->vs_esize = vd->vdev_max_asize - vd->vdev_asize;
        mutex_exit(&vd->vdev_stat_lock);
 
        /*
@@ -2459,12 +2517,12 @@ vdev_get_stats(vdev_t *vd, vdev_stat_t *vs)
         * over all top-level vdevs (i.e. the direct children of the root).
         */
        if (vd == rvd) {
-               for (int c = 0; c < rvd->vdev_children; c++) {
+               for (c = 0; c < rvd->vdev_children; c++) {
                        vdev_t *cvd = rvd->vdev_child[c];
                        vdev_stat_t *cvs = &cvd->vdev_stat;
 
                        mutex_enter(&vd->vdev_stat_lock);
-                       for (int t = 0; t < ZIO_TYPES; t++) {
+                       for (t = 0; t < ZIO_TYPES; t++) {
                                vs->vs_ops[t] += cvs->vs_ops[t];
                                vs->vs_bytes[t] += cvs->vs_bytes[t];
                        }
@@ -2488,8 +2546,9 @@ void
 vdev_scan_stat_init(vdev_t *vd)
 {
        vdev_stat_t *vs = &vd->vdev_stat;
+       int c;
 
-       for (int c = 0; c < vd->vdev_children; c++)
+       for (c = 0; c < vd->vdev_children; c++)
                vdev_scan_stat_init(vd->vdev_child[c]);
 
        mutex_enter(&vd->vdev_stat_lock);
@@ -2541,7 +2600,7 @@ vdev_stat_update(zio_t *zio, uint64_t psize)
                mutex_enter(&vd->vdev_stat_lock);
 
                if (flags & ZIO_FLAG_IO_REPAIR) {
-                       if (flags & ZIO_FLAG_SCRUB_THREAD) {
+                       if (flags & ZIO_FLAG_SCAN_THREAD) {
                                dsl_scan_phys_t *scn_phys =
                                    &spa->spa_dsl_pool->dp_scan->scn_phys;
                                uint64_t *processed = &scn_phys->scn_processed;
@@ -2597,7 +2656,7 @@ vdev_stat_update(zio_t *zio, uint64_t psize)
 
        if (type == ZIO_TYPE_WRITE && txg != 0 &&
            (!(flags & ZIO_FLAG_IO_REPAIR) ||
-           (flags & ZIO_FLAG_SCRUB_THREAD) ||
+           (flags & ZIO_FLAG_SCAN_THREAD) ||
            spa->spa_claiming)) {
                /*
                 * This is either a normal write (not a repair), or it's
@@ -2616,7 +2675,7 @@ vdev_stat_update(zio_t *zio, uint64_t psize)
                 */
                if (vd->vdev_ops->vdev_op_leaf) {
                        uint64_t commit_txg = txg;
-                       if (flags & ZIO_FLAG_SCRUB_THREAD) {
+                       if (flags & ZIO_FLAG_SCAN_THREAD) {
                                ASSERT(flags & ZIO_FLAG_IO_REPAIR);
                                ASSERT(spa_sync_pass(spa) == 1);
                                vdev_dtl_dirty(vd, DTL_SCRUB, txg, 1);
@@ -2699,6 +2758,8 @@ vdev_config_dirty(vdev_t *vd)
        vdev_t *rvd = spa->spa_root_vdev;
        int c;
 
+       ASSERT(spa_writeable(spa));
+
        /*
         * If this is an aux vdev (as with l2cache and spare devices), then we
         * update the vdev config manually and set the sync flag.
@@ -2787,6 +2848,7 @@ vdev_state_dirty(vdev_t *vd)
 {
        spa_t *spa = vd->vdev_spa;
 
+       ASSERT(spa_writeable(spa));
        ASSERT(vd == vd->vdev_top);
 
        /*
@@ -2827,9 +2889,10 @@ vdev_propagate_state(vdev_t *vd)
        int degraded = 0, faulted = 0;
        int corrupted = 0;
        vdev_t *child;
+       int c;
 
        if (vd->vdev_children > 0) {
-               for (int c = 0; c < vd->vdev_children; c++) {
+               for (c = 0; c < vd->vdev_children; c++) {
                        child = vd->vdev_child[c];
 
                        /*
@@ -2944,12 +3007,13 @@ vdev_set_state(vdev_t *vd, boolean_t isopen, vdev_state_t state, vdev_aux_t aux)
                vd->vdev_removed = B_TRUE;
        } else if (state == VDEV_STATE_CANT_OPEN) {
                /*
-                * If we fail to open a vdev during an import, we mark it as
-                * "not available", which signifies that it was never there to
-                * begin with.  Failure to open such a device is not considered
-                * an error.
+                * If we fail to open a vdev during an import or recovery, we
+                * mark it as "not available", which signifies that it was
+                * never there to begin with.  Failure to open such a device
+                * is not considered an error.
                 */
-               if (spa_load_state(spa) == SPA_LOAD_IMPORT &&
+               if ((spa_load_state(spa) == SPA_LOAD_IMPORT ||
+                   spa_load_state(spa) == SPA_LOAD_RECOVER) &&
                    vd->vdev_ops->vdev_op_leaf)
                        vd->vdev_not_present = 1;
 
@@ -3011,13 +3075,19 @@ vdev_set_state(vdev_t *vd, boolean_t isopen, vdev_state_t state, vdev_aux_t aux)
 
 /*
  * Check the vdev configuration to ensure that it's capable of supporting
- * a root pool. Currently, we do not support RAID-Z or partial configuration.
- * In addition, only a single top-level vdev is allowed and none of the leaves
- * can be wholedisks.
+ * a root pool.
  */
 boolean_t
 vdev_is_bootable(vdev_t *vd)
 {
+#if defined(__sun__) || defined(__sun)
+       /*
+        * Currently, we do not support RAID-Z or partial configuration.
+        * In addition, only a single top-level vdev is allowed and none of the
+        * leaves can be wholedisks.
+        */
+       int c;
+
        if (!vd->vdev_ops->vdev_op_leaf) {
                char *vdev_type = vd->vdev_ops->vdev_op_type;
 
@@ -3032,42 +3102,66 @@ vdev_is_bootable(vdev_t *vd)
                return (B_FALSE);
        }
 
-       for (int c = 0; c < vd->vdev_children; c++) {
+       for (c = 0; c < vd->vdev_children; c++) {
                if (!vdev_is_bootable(vd->vdev_child[c]))
                        return (B_FALSE);
        }
+#endif /* __sun__ || __sun */
        return (B_TRUE);
 }
 
 /*
  * Load the state from the original vdev tree (ovd) which
  * we've retrieved from the MOS config object. If the original
- * vdev was offline then we transfer that state to the device
- * in the current vdev tree (nvd).
+ * vdev was offline or faulted then we transfer that state to the
+ * device in the current vdev tree (nvd).
  */
 void
 vdev_load_log_state(vdev_t *nvd, vdev_t *ovd)
 {
-       spa_t *spa = nvd->vdev_spa;
+       int c;
 
-       ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
+       ASSERT(nvd->vdev_top->vdev_islog);
+       ASSERT(spa_config_held(nvd->vdev_spa,
+           SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
        ASSERT3U(nvd->vdev_guid, ==, ovd->vdev_guid);
 
-       for (int c = 0; c < nvd->vdev_children; c++)
+       for (c = 0; c < nvd->vdev_children; c++)
                vdev_load_log_state(nvd->vdev_child[c], ovd->vdev_child[c]);
 
-       if (nvd->vdev_ops->vdev_op_leaf && ovd->vdev_offline) {
+       if (nvd->vdev_ops->vdev_op_leaf) {
                /*
-                * It would be nice to call vdev_offline()
-                * directly but the pool isn't fully loaded and
-                * the txg threads have not been started yet.
+                * Restore the persistent vdev state
                 */
                nvd->vdev_offline = ovd->vdev_offline;
-               vdev_reopen(nvd->vdev_top);
+               nvd->vdev_faulted = ovd->vdev_faulted;
+               nvd->vdev_degraded = ovd->vdev_degraded;
+               nvd->vdev_removed = ovd->vdev_removed;
        }
 }
 
 /*
+ * Determine if a log device has valid content.  If the vdev was
+ * removed or faulted in the MOS config then we know that
+ * the content on the log device has already been written to the pool.
+ */
+boolean_t
+vdev_log_state_valid(vdev_t *vd)
+{
+       int c;
+
+       if (vd->vdev_ops->vdev_op_leaf && !vd->vdev_faulted &&
+           !vd->vdev_removed)
+               return (B_TRUE);
+
+       for (c = 0; c < vd->vdev_children; c++)
+               if (vdev_log_state_valid(vd->vdev_child[c]))
+                       return (B_TRUE);
+
+       return (B_FALSE);
+}
+
+/*
  * Expand a vdev if possible.
  */
 void
@@ -3100,3 +3194,54 @@ vdev_split(vdev_t *vd)
        }
        vdev_propagate_state(cvd);
 }
+
+void
+vdev_deadman(vdev_t *vd)
+{
+       int c;
+
+       for (c = 0; c < vd->vdev_children; c++) {
+               vdev_t *cvd = vd->vdev_child[c];
+
+               vdev_deadman(cvd);
+       }
+
+       if (vd->vdev_ops->vdev_op_leaf) {
+               vdev_queue_t *vq = &vd->vdev_queue;
+
+               mutex_enter(&vq->vq_lock);
+               if (avl_numnodes(&vq->vq_pending_tree) > 0) {
+                       spa_t *spa = vd->vdev_spa;
+                       zio_t *fio;
+                       uint64_t delta;
+
+                       /*
+                        * Look at the head of all the pending queues,
+                        * if any I/O has been outstanding for longer than
+                        * the spa_deadman_synctime we log a zevent.
+                        */
+                       fio = avl_first(&vq->vq_pending_tree);
+                       delta = ddi_get_lbolt64() - fio->io_timestamp;
+                       if (delta > NSEC_TO_TICK(spa_deadman_synctime(spa))) {
+                               zfs_dbgmsg("SLOW IO: zio timestamp %llu, "
+                                   "delta %llu, last io %llu",
+                                   fio->io_timestamp, delta,
+                                   vq->vq_io_complete_ts);
+                               zfs_ereport_post(FM_EREPORT_ZFS_DELAY,
+                                   spa, vd, fio, 0, 0);
+                       }
+               }
+               mutex_exit(&vq->vq_lock);
+       }
+}
+
+#if defined(_KERNEL) && defined(HAVE_SPL)
+EXPORT_SYMBOL(vdev_fault);
+EXPORT_SYMBOL(vdev_degrade);
+EXPORT_SYMBOL(vdev_online);
+EXPORT_SYMBOL(vdev_offline);
+EXPORT_SYMBOL(vdev_clear);
+
+module_param(zfs_scrub_limit, int, 0644);
+MODULE_PARM_DESC(zfs_scrub_limit, "Max scrub/resilver I/O per leaf vdev");
+#endif