zfs_scrub_limit tunable is not used anywhere
[zfs.git] / module / zfs / vdev.c
index 4bed646..c44e4f6 100644 (file)
@@ -21,6 +21,8 @@
 
 /*
  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
+ * Copyright (c) 2012 by Delphix. All rights reserved.
  */
 
 #include <sys/zfs_context.h>
@@ -40,6 +42,7 @@
 #include <sys/arc.h>
 #include <sys/zil.h>
 #include <sys/dsl_scan.h>
+#include <sys/zvol.h>
 
 /*
  * Virtual device management.
@@ -58,9 +61,6 @@ static vdev_ops_t *vdev_ops_table[] = {
        NULL
 };
 
-/* maximum scrub/resilver I/O queue per leaf vdev */
-int zfs_scrub_limit = 10;
-
 /*
  * Given a vdev type, return the appropriate ops vector.
  */
@@ -107,7 +107,7 @@ vdev_get_min_asize(vdev_t *vd)
        vdev_t *pvd = vd->vdev_parent;
 
        /*
-        * The our parent is NULL (inactive spare or cache) or is the root,
+        * If our parent is NULL (inactive spare or cache) or is the root,
         * just return our own asize.
         */
        if (pvd == NULL)
@@ -193,7 +193,7 @@ vdev_add_child(vdev_t *pvd, vdev_t *cvd)
        pvd->vdev_children = MAX(pvd->vdev_children, id + 1);
        newsize = pvd->vdev_children * sizeof (vdev_t *);
 
-       newchild = kmem_zalloc(newsize, KM_SLEEP);
+       newchild = kmem_zalloc(newsize, KM_PUSHPAGE);
        if (pvd->vdev_child != NULL) {
                bcopy(pvd->vdev_child, newchild, oldsize);
                kmem_free(pvd->vdev_child, oldsize);
@@ -263,7 +263,7 @@ vdev_compact_children(vdev_t *pvd)
                if (pvd->vdev_child[c])
                        newc++;
 
-       newchild = kmem_alloc(newc * sizeof (vdev_t *), KM_SLEEP);
+       newchild = kmem_alloc(newc * sizeof (vdev_t *), KM_PUSHPAGE);
 
        for (c = newc = 0; c < oldc; c++) {
                if ((cvd = pvd->vdev_child[c]) != NULL) {
@@ -286,11 +286,12 @@ vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid, vdev_ops_t *ops)
        vdev_t *vd;
        int t;
 
-       vd = kmem_zalloc(sizeof (vdev_t), KM_SLEEP);
+       vd = kmem_zalloc(sizeof (vdev_t), KM_PUSHPAGE);
 
        if (spa->spa_root_vdev == NULL) {
                ASSERT(ops == &vdev_root_ops);
                spa->spa_root_vdev = vd;
+               spa->spa_load_guid = spa_generate_guid(NULL);
        }
 
        if (guid == 0 && ops != &vdev_hole_ops) {
@@ -492,7 +493,7 @@ vdev_alloc(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, uint_t id,
                    &vd->vdev_removing);
        }
 
-       if (parent && !parent->vdev_parent) {
+       if (parent && !parent->vdev_parent && alloctype != VDEV_ALLOC_ATTACH) {
                ASSERT(alloctype == VDEV_ALLOC_LOAD ||
                    alloctype == VDEV_ALLOC_ADD ||
                    alloctype == VDEV_ALLOC_SPLIT ||
@@ -669,6 +670,8 @@ vdev_top_transfer(vdev_t *svd, vdev_t *tvd)
        svd->vdev_ms_shift = 0;
        svd->vdev_ms_count = 0;
 
+       if (tvd->vdev_mg)
+               ASSERT3P(tvd->vdev_mg, ==, svd->vdev_mg);
        tvd->vdev_mg = svd->vdev_mg;
        tvd->vdev_ms = svd->vdev_ms;
 
@@ -742,6 +745,7 @@ vdev_add_parent(vdev_t *cvd, vdev_ops_t *ops)
 
        mvd->vdev_asize = cvd->vdev_asize;
        mvd->vdev_min_asize = cvd->vdev_min_asize;
+       mvd->vdev_max_asize = cvd->vdev_max_asize;
        mvd->vdev_ashift = cvd->vdev_ashift;
        mvd->vdev_state = cvd->vdev_state;
        mvd->vdev_crtxg = cvd->vdev_crtxg;
@@ -833,7 +837,7 @@ vdev_metaslab_init(vdev_t *vd, uint64_t txg)
 
        ASSERT(oldc <= newc);
 
-       mspp = kmem_zalloc(newc * sizeof (*mspp), KM_SLEEP);
+       mspp = kmem_zalloc(newc * sizeof (*mspp), KM_PUSHPAGE | KM_NODEBUG);
 
        if (oldc != 0) {
                bcopy(vd->vdev_ms, mspp, oldc * sizeof (*mspp));
@@ -898,6 +902,8 @@ vdev_metaslab_fini(vdev_t *vd)
                kmem_free(vd->vdev_ms, count * sizeof (metaslab_t *));
                vd->vdev_ms = NULL;
        }
+
+       ASSERT3U(vd->vdev_pending_fastwrite, ==, 0);
 }
 
 typedef struct vdev_probe_stats {
@@ -988,7 +994,7 @@ vdev_probe(vdev_t *vd, zio_t *zio)
        mutex_enter(&vd->vdev_probe_lock);
 
        if ((pio = vd->vdev_probe_zio) == NULL) {
-               vps = kmem_zalloc(sizeof (*vps), KM_SLEEP);
+               vps = kmem_zalloc(sizeof (*vps), KM_PUSHPAGE);
 
                vps->vps_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_PROBE |
                    ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_AGGREGATE |
@@ -1066,27 +1072,20 @@ vdev_open_child(void *arg)
        vd->vdev_open_thread = NULL;
 }
 
-boolean_t
+static boolean_t
 vdev_uses_zvols(vdev_t *vd)
 {
-/*
- * Stacking zpools on top of zvols is unsupported until we implement a method
- * for determining if an arbitrary block device is a zvol without using the
- * path.  Solaris would check the 'zvol' path component but this does not
- * exist in the Linux port, so we really should do something like stat the
- * file and check the major number.  This is complicated by the fact that
- * we need to do this portably in user or kernel space.
- */
-#if 0
        int c;
 
-       if (vd->vdev_path && strncmp(vd->vdev_path, ZVOL_DIR,
-           strlen(ZVOL_DIR)) == 0)
+#ifdef _KERNEL
+       if (zvol_is_zvol(vd->vdev_path))
                return (B_TRUE);
+#endif
+
        for (c = 0; c < vd->vdev_children; c++)
                if (vdev_uses_zvols(vd->vdev_child[c]))
                        return (B_TRUE);
-#endif
+
        return (B_FALSE);
 }
 
@@ -1127,7 +1126,8 @@ vdev_open(vdev_t *vd)
        spa_t *spa = vd->vdev_spa;
        int error;
        uint64_t osize = 0;
-       uint64_t asize, psize;
+       uint64_t max_osize = 0;
+       uint64_t asize, max_asize, psize;
        uint64_t ashift = 0;
        int c;
 
@@ -1159,7 +1159,7 @@ vdev_open(vdev_t *vd)
                return (ENXIO);
        }
 
-       error = vd->vdev_ops->vdev_op_open(vd, &osize, &ashift);
+       error = vd->vdev_ops->vdev_op_open(vd, &osize, &max_osize, &ashift);
 
        /*
         * Reset the vdev_reopening flag so that we actually close
@@ -1217,6 +1217,7 @@ vdev_open(vdev_t *vd)
        }
 
        osize = P2ALIGN(osize, (uint64_t)sizeof (vdev_label_t));
+       max_osize = P2ALIGN(max_osize, (uint64_t)sizeof (vdev_label_t));
 
        if (vd->vdev_children == 0) {
                if (osize < SPA_MINDEVSIZE) {
@@ -1226,6 +1227,8 @@ vdev_open(vdev_t *vd)
                }
                psize = osize;
                asize = osize - (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE);
+               max_asize = max_osize - (VDEV_LABEL_START_SIZE +
+                   VDEV_LABEL_END_SIZE);
        } else {
                if (vd->vdev_parent != NULL && osize < SPA_MINDEVSIZE -
                    (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE)) {
@@ -1235,6 +1238,7 @@ vdev_open(vdev_t *vd)
                }
                psize = 0;
                asize = osize;
+               max_asize = max_osize;
        }
 
        vd->vdev_psize = psize;
@@ -1251,19 +1255,25 @@ vdev_open(vdev_t *vd)
        if (vd->vdev_asize == 0) {
                /*
                 * This is the first-ever open, so use the computed values.
-                * For testing purposes, a higher ashift can be requested.
+                * For compatibility, a different ashift can be requested.
                 */
                vd->vdev_asize = asize;
-               vd->vdev_ashift = MAX(ashift, vd->vdev_ashift);
+               vd->vdev_max_asize = max_asize;
+               if (vd->vdev_ashift == 0)
+                       vd->vdev_ashift = ashift;
        } else {
                /*
-                * Make sure the alignment requirement hasn't increased.
+                * Detect if the alignment requirement has increased.
+                * We don't want to make the pool unavailable, just
+                * post an event instead.
                 */
-               if (ashift > vd->vdev_top->vdev_ashift) {
-                       vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
-                           VDEV_AUX_BAD_LABEL);
-                       return (EINVAL);
+               if (ashift > vd->vdev_top->vdev_ashift &&
+                   vd->vdev_ops->vdev_op_leaf) {
+                       zfs_ereport_post(FM_EREPORT_ZFS_DEVICE_BAD_ASHIFT,
+                           spa, vd, NULL, 0, 0);
                }
+
+               vd->vdev_max_asize = max_asize;
        }
 
        /*
@@ -1305,13 +1315,18 @@ vdev_open(vdev_t *vd)
  * contents.  This needs to be done before vdev_load() so that we don't
  * inadvertently do repair I/Os to the wrong device.
  *
+ * If 'strict' is false ignore the spa guid check. This is necessary because
+ * if the machine crashed during a re-guid the new guid might have been written
+ * to all of the vdev labels, but not the cached config. The strict check
+ * will be performed when the pool is opened again using the mos config.
+ *
  * This function will only return failure if one of the vdevs indicates that it
  * has since been destroyed or exported.  This is only possible if
  * /etc/zfs/zpool.cache was readonly at the time.  Otherwise, the vdev state
  * will be updated but the function will return 0.
  */
 int
-vdev_validate(vdev_t *vd)
+vdev_validate(vdev_t *vd, boolean_t strict)
 {
        spa_t *spa = vd->vdev_spa;
        nvlist_t *label;
@@ -1320,7 +1335,7 @@ vdev_validate(vdev_t *vd)
        int c;
 
        for (c = 0; c < vd->vdev_children; c++)
-               if (vdev_validate(vd->vdev_child[c]) != 0)
+               if (vdev_validate(vd->vdev_child[c], strict) != 0)
                        return (EBADF);
 
        /*
@@ -1331,8 +1346,10 @@ vdev_validate(vdev_t *vd)
        if (vd->vdev_ops->vdev_op_leaf && vdev_readable(vd)) {
                uint64_t aux_guid = 0;
                nvlist_t *nvl;
+               uint64_t txg = spa_last_synced_txg(spa) != 0 ?
+                   spa_last_synced_txg(spa) : -1ULL;
 
-               if ((label = vdev_label_read_config(vd)) == NULL) {
+               if ((label = vdev_label_read_config(vd, txg)) == NULL) {
                        vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
                            VDEV_AUX_BAD_LABEL);
                        return (0);
@@ -1350,8 +1367,9 @@ vdev_validate(vdev_t *vd)
                        return (0);
                }
 
-               if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_GUID,
-                   &guid) != 0 || guid != spa_guid(spa)) {
+               if (strict && (nvlist_lookup_uint64(label,
+                   ZPOOL_CONFIG_POOL_GUID, &guid) != 0 ||
+                   guid != spa_guid(spa))) {
                        vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
                            VDEV_AUX_CORRUPT_DATA);
                        nvlist_free(label);
@@ -1514,7 +1532,7 @@ vdev_reopen(vdev_t *vd)
                    !l2arc_vdev_present(vd))
                        l2arc_add_vdev(spa, vd);
        } else {
-               (void) vdev_validate(vd);
+               (void) vdev_validate(vd, B_TRUE);
        }
 
        /*
@@ -1975,14 +1993,14 @@ vdev_validate_aux(vdev_t *vd)
        if (!vdev_readable(vd))
                return (0);
 
-       if ((label = vdev_label_read_config(vd)) == NULL) {
+       if ((label = vdev_label_read_config(vd, -1ULL)) == NULL) {
                vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
                    VDEV_AUX_CORRUPT_DATA);
                return (-1);
        }
 
        if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_VERSION, &version) != 0 ||
-           version > SPA_VERSION ||
+           !SPA_VERSION_IS_SUPPORTED(version) ||
            nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) != 0 ||
            guid != vd->vdev_guid ||
            nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE, &state) != 0) {
@@ -2488,6 +2506,7 @@ vdev_get_stats(vdev_t *vd, vdev_stat_t *vs)
        vs->vs_rsize = vdev_get_min_asize(vd);
        if (vd->vdev_ops->vdev_op_leaf)
                vs->vs_rsize += VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE;
+       vs->vs_esize = vd->vdev_max_asize - vd->vdev_asize;
        mutex_exit(&vd->vdev_stat_lock);
 
        /*
@@ -3053,13 +3072,17 @@ vdev_set_state(vdev_t *vd, boolean_t isopen, vdev_state_t state, vdev_aux_t aux)
 
 /*
  * Check the vdev configuration to ensure that it's capable of supporting
- * a root pool. Currently, we do not support RAID-Z or partial configuration.
- * In addition, only a single top-level vdev is allowed and none of the leaves
- * can be wholedisks.
+ * a root pool.
  */
 boolean_t
 vdev_is_bootable(vdev_t *vd)
 {
+#if defined(__sun__) || defined(__sun)
+       /*
+        * Currently, we do not support RAID-Z or partial configuration.
+        * In addition, only a single top-level vdev is allowed and none of the
+        * leaves can be wholedisks.
+        */
        int c;
 
        if (!vd->vdev_ops->vdev_op_leaf) {
@@ -3080,6 +3103,7 @@ vdev_is_bootable(vdev_t *vd)
                if (!vdev_is_bootable(vd->vdev_child[c]))
                        return (B_FALSE);
        }
+#endif /* __sun__ || __sun */
        return (B_TRUE);
 }
 
@@ -3168,6 +3192,46 @@ vdev_split(vdev_t *vd)
        vdev_propagate_state(cvd);
 }
 
+void
+vdev_deadman(vdev_t *vd)
+{
+       int c;
+
+       for (c = 0; c < vd->vdev_children; c++) {
+               vdev_t *cvd = vd->vdev_child[c];
+
+               vdev_deadman(cvd);
+       }
+
+       if (vd->vdev_ops->vdev_op_leaf) {
+               vdev_queue_t *vq = &vd->vdev_queue;
+
+               mutex_enter(&vq->vq_lock);
+               if (avl_numnodes(&vq->vq_pending_tree) > 0) {
+                       spa_t *spa = vd->vdev_spa;
+                       zio_t *fio;
+                       uint64_t delta;
+
+                       /*
+                        * Look at the head of all the pending queues,
+                        * if any I/O has been outstanding for longer than
+                        * the spa_deadman_synctime we log a zevent.
+                        */
+                       fio = avl_first(&vq->vq_pending_tree);
+                       delta = ddi_get_lbolt64() - fio->io_timestamp;
+                       if (delta > NSEC_TO_TICK(spa_deadman_synctime(spa))) {
+                               zfs_dbgmsg("SLOW IO: zio timestamp %llu, "
+                                   "delta %llu, last io %llu",
+                                   fio->io_timestamp, delta,
+                                   vq->vq_io_complete_ts);
+                               zfs_ereport_post(FM_EREPORT_ZFS_DELAY,
+                                   spa, vd, fio, 0, 0);
+                       }
+               }
+               mutex_exit(&vq->vq_lock);
+       }
+}
+
 #if defined(_KERNEL) && defined(HAVE_SPL)
 EXPORT_SYMBOL(vdev_fault);
 EXPORT_SYMBOL(vdev_degrade);