X-Git-Url: https://git.camperquake.de/gitweb.cgi?a=blobdiff_plain;f=module%2Fzfs%2Fvdev.c;h=662a877f8543a09d15a3bfa1e25b8de822f2a4c8;hb=refs%2Fheads%2Frertzinger%2Ffeature-zpool-get--p;hp=06c7d0c8937b5f36c7ae86fe80194f38a0bc0484;hpb=b8d06fca089fae4680c3a552fc55c512bfb02202;p=zfs.git diff --git a/module/zfs/vdev.c b/module/zfs/vdev.c index 06c7d0c..662a877 100644 --- a/module/zfs/vdev.c +++ b/module/zfs/vdev.c @@ -22,7 +22,7 @@ /* * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright 2011 Nexenta Systems, Inc. All rights reserved. - * Copyright (c) 2011 by Delphix. All rights reserved. + * Copyright (c) 2012 by Delphix. All rights reserved. */ #include @@ -42,6 +42,7 @@ #include #include #include +#include /* * Virtual device management. @@ -60,9 +61,6 @@ static vdev_ops_t *vdev_ops_table[] = { NULL }; -/* maximum scrub/resilver I/O queue per leaf vdev */ -int zfs_scrub_limit = 10; - /* * Given a vdev type, return the appropriate ops vector. */ @@ -109,7 +107,7 @@ vdev_get_min_asize(vdev_t *vd) vdev_t *pvd = vd->vdev_parent; /* - * The our parent is NULL (inactive spare or cache) or is the root, + * If our parent is NULL (inactive spare or cache) or is the root, * just return our own asize. */ if (pvd == NULL) @@ -600,9 +598,9 @@ vdev_free(vdev_t *vd) metaslab_group_destroy(vd->vdev_mg); } - ASSERT3U(vd->vdev_stat.vs_space, ==, 0); - ASSERT3U(vd->vdev_stat.vs_dspace, ==, 0); - ASSERT3U(vd->vdev_stat.vs_alloc, ==, 0); + ASSERT0(vd->vdev_stat.vs_space); + ASSERT0(vd->vdev_stat.vs_dspace); + ASSERT0(vd->vdev_stat.vs_alloc); /* * Remove this vdev from its parent's child list. @@ -747,6 +745,7 @@ vdev_add_parent(vdev_t *cvd, vdev_ops_t *ops) mvd->vdev_asize = cvd->vdev_asize; mvd->vdev_min_asize = cvd->vdev_min_asize; + mvd->vdev_max_asize = cvd->vdev_max_asize; mvd->vdev_ashift = cvd->vdev_ashift; mvd->vdev_state = cvd->vdev_state; mvd->vdev_crtxg = cvd->vdev_crtxg; @@ -903,6 +902,8 @@ vdev_metaslab_fini(vdev_t *vd) kmem_free(vd->vdev_ms, count * sizeof (metaslab_t *)); vd->vdev_ms = NULL; } + + ASSERT3U(vd->vdev_pending_fastwrite, ==, 0); } typedef struct vdev_probe_stats { @@ -1071,27 +1072,20 @@ vdev_open_child(void *arg) vd->vdev_open_thread = NULL; } -boolean_t +static boolean_t vdev_uses_zvols(vdev_t *vd) { -/* - * Stacking zpools on top of zvols is unsupported until we implement a method - * for determining if an arbitrary block device is a zvol without using the - * path. Solaris would check the 'zvol' path component but this does not - * exist in the Linux port, so we really should do something like stat the - * file and check the major number. This is complicated by the fact that - * we need to do this portably in user or kernel space. - */ -#if 0 int c; - if (vd->vdev_path && strncmp(vd->vdev_path, ZVOL_DIR, - strlen(ZVOL_DIR)) == 0) +#ifdef _KERNEL + if (zvol_is_zvol(vd->vdev_path)) return (B_TRUE); +#endif + for (c = 0; c < vd->vdev_children; c++) if (vdev_uses_zvols(vd->vdev_child[c])) return (B_TRUE); -#endif + return (B_FALSE); } @@ -1132,7 +1126,8 @@ vdev_open(vdev_t *vd) spa_t *spa = vd->vdev_spa; int error; uint64_t osize = 0; - uint64_t asize, psize; + uint64_t max_osize = 0; + uint64_t asize, max_asize, psize; uint64_t ashift = 0; int c; @@ -1164,7 +1159,7 @@ vdev_open(vdev_t *vd) return (ENXIO); } - error = vd->vdev_ops->vdev_op_open(vd, &osize, &ashift); + error = vd->vdev_ops->vdev_op_open(vd, &osize, &max_osize, &ashift); /* * Reset the vdev_reopening flag so that we actually close @@ -1222,6 +1217,7 @@ vdev_open(vdev_t *vd) } osize = P2ALIGN(osize, (uint64_t)sizeof (vdev_label_t)); + max_osize = P2ALIGN(max_osize, (uint64_t)sizeof (vdev_label_t)); if (vd->vdev_children == 0) { if (osize < SPA_MINDEVSIZE) { @@ -1231,6 +1227,8 @@ vdev_open(vdev_t *vd) } psize = osize; asize = osize - (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE); + max_asize = max_osize - (VDEV_LABEL_START_SIZE + + VDEV_LABEL_END_SIZE); } else { if (vd->vdev_parent != NULL && osize < SPA_MINDEVSIZE - (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE)) { @@ -1240,6 +1238,7 @@ vdev_open(vdev_t *vd) } psize = 0; asize = osize; + max_asize = max_osize; } vd->vdev_psize = psize; @@ -1256,19 +1255,25 @@ vdev_open(vdev_t *vd) if (vd->vdev_asize == 0) { /* * This is the first-ever open, so use the computed values. - * For testing purposes, a higher ashift can be requested. + * For compatibility, a different ashift can be requested. */ vd->vdev_asize = asize; - vd->vdev_ashift = MAX(ashift, vd->vdev_ashift); + vd->vdev_max_asize = max_asize; + if (vd->vdev_ashift == 0) + vd->vdev_ashift = ashift; } else { /* - * Make sure the alignment requirement hasn't increased. + * Detect if the alignment requirement has increased. + * We don't want to make the pool unavailable, just + * post an event instead. */ - if (ashift > vd->vdev_top->vdev_ashift) { - vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, - VDEV_AUX_BAD_LABEL); - return (EINVAL); + if (ashift > vd->vdev_top->vdev_ashift && + vd->vdev_ops->vdev_op_leaf) { + zfs_ereport_post(FM_EREPORT_ZFS_DEVICE_BAD_ASHIFT, + spa, vd, NULL, 0, 0); } + + vd->vdev_max_asize = max_asize; } /* @@ -1341,8 +1346,10 @@ vdev_validate(vdev_t *vd, boolean_t strict) if (vd->vdev_ops->vdev_op_leaf && vdev_readable(vd)) { uint64_t aux_guid = 0; nvlist_t *nvl; + uint64_t txg = spa_last_synced_txg(spa) != 0 ? + spa_last_synced_txg(spa) : -1ULL; - if ((label = vdev_label_read_config(vd)) == NULL) { + if ((label = vdev_label_read_config(vd, txg)) == NULL) { vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, VDEV_AUX_BAD_LABEL); return (0); @@ -1818,7 +1825,7 @@ vdev_dtl_sync(vdev_t *vd, uint64_t txg) if (vd->vdev_detached) { if (smo->smo_object != 0) { - VERIFY(0 == dmu_object_free(mos, smo->smo_object, tx)); + VERIFY0(dmu_object_free(mos, smo->smo_object, tx)); smo->smo_object = 0; } dmu_tx_commit(tx); @@ -1848,6 +1855,7 @@ vdev_dtl_sync(vdev_t *vd, uint64_t txg) space_map_truncate(smo, mos, tx); space_map_sync(&smsync, SM_ALLOC, smo, mos, tx); + space_map_vacate(&smsync, NULL, NULL); space_map_destroy(&smsync); @@ -1986,14 +1994,14 @@ vdev_validate_aux(vdev_t *vd) if (!vdev_readable(vd)) return (0); - if ((label = vdev_label_read_config(vd)) == NULL) { + if ((label = vdev_label_read_config(vd, -1ULL)) == NULL) { vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, VDEV_AUX_CORRUPT_DATA); return (-1); } if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_VERSION, &version) != 0 || - version > SPA_VERSION || + !SPA_VERSION_IS_SUPPORTED(version) || nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) != 0 || guid != vd->vdev_guid || nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE, &state) != 0) { @@ -2022,7 +2030,7 @@ vdev_remove(vdev_t *vd, uint64_t txg) tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg); if (vd->vdev_dtl_smo.smo_object) { - ASSERT3U(vd->vdev_dtl_smo.smo_alloc, ==, 0); + ASSERT0(vd->vdev_dtl_smo.smo_alloc); (void) dmu_object_free(mos, vd->vdev_dtl_smo.smo_object, tx); vd->vdev_dtl_smo.smo_object = 0; } @@ -2034,7 +2042,7 @@ vdev_remove(vdev_t *vd, uint64_t txg) if (msp == NULL || msp->ms_smo.smo_object == 0) continue; - ASSERT3U(msp->ms_smo.smo_alloc, ==, 0); + ASSERT0(msp->ms_smo.smo_alloc); (void) dmu_object_free(mos, msp->ms_smo.smo_object, tx); msp->ms_smo.smo_object = 0; } @@ -2312,7 +2320,7 @@ top: (void) spa_vdev_state_exit(spa, vd, 0); goto top; } - ASSERT3U(tvd->vdev_stat.vs_alloc, ==, 0); + ASSERT0(tvd->vdev_stat.vs_alloc); } /* @@ -2499,6 +2507,7 @@ vdev_get_stats(vdev_t *vd, vdev_stat_t *vs) vs->vs_rsize = vdev_get_min_asize(vd); if (vd->vdev_ops->vdev_op_leaf) vs->vs_rsize += VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE; + vs->vs_esize = vd->vdev_max_asize - vd->vdev_asize; mutex_exit(&vd->vdev_stat_lock); /* @@ -3184,13 +3193,50 @@ vdev_split(vdev_t *vd) vdev_propagate_state(cvd); } +void +vdev_deadman(vdev_t *vd) +{ + int c; + + for (c = 0; c < vd->vdev_children; c++) { + vdev_t *cvd = vd->vdev_child[c]; + + vdev_deadman(cvd); + } + + if (vd->vdev_ops->vdev_op_leaf) { + vdev_queue_t *vq = &vd->vdev_queue; + + mutex_enter(&vq->vq_lock); + if (avl_numnodes(&vq->vq_pending_tree) > 0) { + spa_t *spa = vd->vdev_spa; + zio_t *fio; + uint64_t delta; + + /* + * Look at the head of all the pending queues, + * if any I/O has been outstanding for longer than + * the spa_deadman_synctime we log a zevent. + */ + fio = avl_first(&vq->vq_pending_tree); + delta = gethrtime() - fio->io_timestamp; + if (delta > spa_deadman_synctime(spa)) { + zfs_dbgmsg("SLOW IO: zio timestamp %lluns, " + "delta %lluns, last io %lluns", + fio->io_timestamp, delta, + vq->vq_io_complete_ts); + zfs_ereport_post(FM_EREPORT_ZFS_DELAY, + spa, vd, fio, 0, 0); + } + } + mutex_exit(&vq->vq_lock); + } +} + #if defined(_KERNEL) && defined(HAVE_SPL) EXPORT_SYMBOL(vdev_fault); EXPORT_SYMBOL(vdev_degrade); EXPORT_SYMBOL(vdev_online); EXPORT_SYMBOL(vdev_offline); EXPORT_SYMBOL(vdev_clear); - -module_param(zfs_scrub_limit, int, 0644); -MODULE_PARM_DESC(zfs_scrub_limit, "Max scrub/resilver I/O per leaf vdev"); #endif