X-Git-Url: https://git.camperquake.de/gitweb.cgi?a=blobdiff_plain;f=module%2Fzfs%2Fvdev.c;h=205a1d1aabbceb7a92088d873ae5211463ccc2f8;hb=af26c4d4ab545767456d8c21ed48e9e01ce6a3e7;hp=604a673f954c5c5a2d4475bd83ccd07651cb41ab;hpb=d6320ddb78fa89c4d0fc2af00ae53c7c70992f96;p=zfs.git diff --git a/module/zfs/vdev.c b/module/zfs/vdev.c index 604a673..205a1d1 100644 --- a/module/zfs/vdev.c +++ b/module/zfs/vdev.c @@ -21,6 +21,8 @@ /* * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright 2011 Nexenta Systems, Inc. All rights reserved. + * Copyright (c) 2012 by Delphix. All rights reserved. */ #include @@ -107,7 +109,7 @@ vdev_get_min_asize(vdev_t *vd) vdev_t *pvd = vd->vdev_parent; /* - * The our parent is NULL (inactive spare or cache) or is the root, + * If our parent is NULL (inactive spare or cache) or is the root, * just return our own asize. */ if (pvd == NULL) @@ -193,7 +195,7 @@ vdev_add_child(vdev_t *pvd, vdev_t *cvd) pvd->vdev_children = MAX(pvd->vdev_children, id + 1); newsize = pvd->vdev_children * sizeof (vdev_t *); - newchild = kmem_zalloc(newsize, KM_SLEEP); + newchild = kmem_zalloc(newsize, KM_PUSHPAGE); if (pvd->vdev_child != NULL) { bcopy(pvd->vdev_child, newchild, oldsize); kmem_free(pvd->vdev_child, oldsize); @@ -263,7 +265,7 @@ vdev_compact_children(vdev_t *pvd) if (pvd->vdev_child[c]) newc++; - newchild = kmem_alloc(newc * sizeof (vdev_t *), KM_SLEEP); + newchild = kmem_alloc(newc * sizeof (vdev_t *), KM_PUSHPAGE); for (c = newc = 0; c < oldc; c++) { if ((cvd = pvd->vdev_child[c]) != NULL) { @@ -286,11 +288,12 @@ vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid, vdev_ops_t *ops) vdev_t *vd; int t; - vd = kmem_zalloc(sizeof (vdev_t), KM_SLEEP); + vd = kmem_zalloc(sizeof (vdev_t), KM_PUSHPAGE); if (spa->spa_root_vdev == NULL) { ASSERT(ops == &vdev_root_ops); spa->spa_root_vdev = vd; + spa->spa_load_guid = spa_generate_guid(NULL); } if (guid == 0 && ops != &vdev_hole_ops) { @@ -317,6 +320,8 @@ vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid, vdev_ops_t *ops) vd->vdev_state = VDEV_STATE_CLOSED; vd->vdev_ishole = (ops == &vdev_hole_ops); + list_link_init(&vd->vdev_config_dirty_node); + list_link_init(&vd->vdev_state_dirty_node); mutex_init(&vd->vdev_dtl_lock, NULL, MUTEX_DEFAULT, NULL); mutex_init(&vd->vdev_stat_lock, NULL, MUTEX_DEFAULT, NULL); mutex_init(&vd->vdev_probe_lock, NULL, MUTEX_DEFAULT, NULL); @@ -490,7 +495,7 @@ vdev_alloc(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, uint_t id, &vd->vdev_removing); } - if (parent && !parent->vdev_parent) { + if (parent && !parent->vdev_parent && alloctype != VDEV_ALLOC_ATTACH) { ASSERT(alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_ADD || alloctype == VDEV_ALLOC_SPLIT || @@ -667,6 +672,8 @@ vdev_top_transfer(vdev_t *svd, vdev_t *tvd) svd->vdev_ms_shift = 0; svd->vdev_ms_count = 0; + if (tvd->vdev_mg) + ASSERT3P(tvd->vdev_mg, ==, svd->vdev_mg); tvd->vdev_mg = svd->vdev_mg; tvd->vdev_ms = svd->vdev_ms; @@ -740,6 +747,7 @@ vdev_add_parent(vdev_t *cvd, vdev_ops_t *ops) mvd->vdev_asize = cvd->vdev_asize; mvd->vdev_min_asize = cvd->vdev_min_asize; + mvd->vdev_max_asize = cvd->vdev_max_asize; mvd->vdev_ashift = cvd->vdev_ashift; mvd->vdev_state = cvd->vdev_state; mvd->vdev_crtxg = cvd->vdev_crtxg; @@ -831,7 +839,7 @@ vdev_metaslab_init(vdev_t *vd, uint64_t txg) ASSERT(oldc <= newc); - mspp = kmem_zalloc(newc * sizeof (*mspp), KM_SLEEP); + mspp = kmem_zalloc(newc * sizeof (*mspp), KM_PUSHPAGE | KM_NODEBUG); if (oldc != 0) { bcopy(vd->vdev_ms, mspp, oldc * sizeof (*mspp)); @@ -986,7 +994,7 @@ vdev_probe(vdev_t *vd, zio_t *zio) mutex_enter(&vd->vdev_probe_lock); if ((pio = vd->vdev_probe_zio) == NULL) { - vps = kmem_zalloc(sizeof (*vps), KM_SLEEP); + vps = kmem_zalloc(sizeof (*vps), KM_PUSHPAGE); vps->vps_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_PROBE | ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_AGGREGATE | @@ -1067,6 +1075,15 @@ vdev_open_child(void *arg) boolean_t vdev_uses_zvols(vdev_t *vd) { +/* + * Stacking zpools on top of zvols is unsupported until we implement a method + * for determining if an arbitrary block device is a zvol without using the + * path. Solaris would check the 'zvol' path component but this does not + * exist in the Linux port, so we really should do something like stat the + * file and check the major number. This is complicated by the fact that + * we need to do this portably in user or kernel space. + */ +#if 0 int c; if (vd->vdev_path && strncmp(vd->vdev_path, ZVOL_DIR, @@ -1075,6 +1092,7 @@ vdev_uses_zvols(vdev_t *vd) for (c = 0; c < vd->vdev_children; c++) if (vdev_uses_zvols(vd->vdev_child[c])) return (B_TRUE); +#endif return (B_FALSE); } @@ -1115,7 +1133,8 @@ vdev_open(vdev_t *vd) spa_t *spa = vd->vdev_spa; int error; uint64_t osize = 0; - uint64_t asize, psize; + uint64_t max_osize = 0; + uint64_t asize, max_asize, psize; uint64_t ashift = 0; int c; @@ -1147,7 +1166,7 @@ vdev_open(vdev_t *vd) return (ENXIO); } - error = vd->vdev_ops->vdev_op_open(vd, &osize, &ashift); + error = vd->vdev_ops->vdev_op_open(vd, &osize, &max_osize, &ashift); /* * Reset the vdev_reopening flag so that we actually close @@ -1205,6 +1224,7 @@ vdev_open(vdev_t *vd) } osize = P2ALIGN(osize, (uint64_t)sizeof (vdev_label_t)); + max_osize = P2ALIGN(max_osize, (uint64_t)sizeof (vdev_label_t)); if (vd->vdev_children == 0) { if (osize < SPA_MINDEVSIZE) { @@ -1214,6 +1234,8 @@ vdev_open(vdev_t *vd) } psize = osize; asize = osize - (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE); + max_asize = max_osize - (VDEV_LABEL_START_SIZE + + VDEV_LABEL_END_SIZE); } else { if (vd->vdev_parent != NULL && osize < SPA_MINDEVSIZE - (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE)) { @@ -1223,6 +1245,7 @@ vdev_open(vdev_t *vd) } psize = 0; asize = osize; + max_asize = max_osize; } vd->vdev_psize = psize; @@ -1242,6 +1265,7 @@ vdev_open(vdev_t *vd) * For testing purposes, a higher ashift can be requested. */ vd->vdev_asize = asize; + vd->vdev_max_asize = max_asize; vd->vdev_ashift = MAX(ashift, vd->vdev_ashift); } else { /* @@ -1252,6 +1276,7 @@ vdev_open(vdev_t *vd) VDEV_AUX_BAD_LABEL); return (EINVAL); } + vd->vdev_max_asize = max_asize; } /* @@ -1293,13 +1318,18 @@ vdev_open(vdev_t *vd) * contents. This needs to be done before vdev_load() so that we don't * inadvertently do repair I/Os to the wrong device. * + * If 'strict' is false ignore the spa guid check. This is necessary because + * if the machine crashed during a re-guid the new guid might have been written + * to all of the vdev labels, but not the cached config. The strict check + * will be performed when the pool is opened again using the mos config. + * * This function will only return failure if one of the vdevs indicates that it * has since been destroyed or exported. This is only possible if * /etc/zfs/zpool.cache was readonly at the time. Otherwise, the vdev state * will be updated but the function will return 0. */ int -vdev_validate(vdev_t *vd) +vdev_validate(vdev_t *vd, boolean_t strict) { spa_t *spa = vd->vdev_spa; nvlist_t *label; @@ -1308,7 +1338,7 @@ vdev_validate(vdev_t *vd) int c; for (c = 0; c < vd->vdev_children; c++) - if (vdev_validate(vd->vdev_child[c]) != 0) + if (vdev_validate(vd->vdev_child[c], strict) != 0) return (EBADF); /* @@ -1338,8 +1368,9 @@ vdev_validate(vdev_t *vd) return (0); } - if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_GUID, - &guid) != 0 || guid != spa_guid(spa)) { + if (strict && (nvlist_lookup_uint64(label, + ZPOOL_CONFIG_POOL_GUID, &guid) != 0 || + guid != spa_guid(spa))) { vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, VDEV_AUX_CORRUPT_DATA); nvlist_free(label); @@ -1412,8 +1443,8 @@ vdev_validate(vdev_t *vd) void vdev_close(vdev_t *vd) { - spa_t *spa = vd->vdev_spa; vdev_t *pvd = vd->vdev_parent; + ASSERTV(spa_t *spa = vd->vdev_spa); ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); @@ -1502,7 +1533,7 @@ vdev_reopen(vdev_t *vd) !l2arc_vdev_present(vd)) l2arc_add_vdev(spa, vd); } else { - (void) vdev_validate(vd); + (void) vdev_validate(vd, B_TRUE); } /* @@ -1795,8 +1826,7 @@ vdev_dtl_sync(vdev_t *vd, uint64_t txg) if (vd->vdev_detached) { if (smo->smo_object != 0) { - int err = dmu_object_free(mos, smo->smo_object, tx); - ASSERT3U(err, ==, 0); + VERIFY(0 == dmu_object_free(mos, smo->smo_object, tx)); smo->smo_object = 0; } dmu_tx_commit(tx); @@ -2034,7 +2064,7 @@ vdev_sync_done(vdev_t *vd, uint64_t txg) ASSERT(!vd->vdev_ishole); - while (msp = txg_list_remove(&vd->vdev_ms_list, TXG_CLEAN(txg))) + while ((msp = txg_list_remove(&vd->vdev_ms_list, TXG_CLEAN(txg)))) metaslab_sync_done(msp, txg); if (reassess) @@ -2387,7 +2417,7 @@ vdev_clear(spa_t *spa, vdev_t *vd) if (vd->vdev_aux == NULL && !vdev_is_dead(vd)) spa_async_request(spa, SPA_ASYNC_RESILVER); - spa_event_notify(spa, vd, ESC_ZFS_VDEV_CLEAR); + spa_event_notify(spa, vd, FM_EREPORT_ZFS_DEVICE_CLEAR); } /* @@ -2477,6 +2507,7 @@ vdev_get_stats(vdev_t *vd, vdev_stat_t *vs) vs->vs_rsize = vdev_get_min_asize(vd); if (vd->vdev_ops->vdev_op_leaf) vs->vs_rsize += VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE; + vs->vs_esize = vd->vdev_max_asize - vd->vdev_asize; mutex_exit(&vd->vdev_stat_lock); /* @@ -3042,13 +3073,17 @@ vdev_set_state(vdev_t *vd, boolean_t isopen, vdev_state_t state, vdev_aux_t aux) /* * Check the vdev configuration to ensure that it's capable of supporting - * a root pool. Currently, we do not support RAID-Z or partial configuration. - * In addition, only a single top-level vdev is allowed and none of the leaves - * can be wholedisks. + * a root pool. */ boolean_t vdev_is_bootable(vdev_t *vd) { +#if defined(__sun__) || defined(__sun) + /* + * Currently, we do not support RAID-Z or partial configuration. + * In addition, only a single top-level vdev is allowed and none of the + * leaves can be wholedisks. + */ int c; if (!vd->vdev_ops->vdev_op_leaf) { @@ -3069,6 +3104,7 @@ vdev_is_bootable(vdev_t *vd) if (!vdev_is_bootable(vd->vdev_child[c])) return (B_FALSE); } +#endif /* __sun__ || __sun */ return (B_TRUE); } @@ -3084,7 +3120,8 @@ vdev_load_log_state(vdev_t *nvd, vdev_t *ovd) int c; ASSERT(nvd->vdev_top->vdev_islog); - ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); + ASSERT(spa_config_held(nvd->vdev_spa, + SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); ASSERT3U(nvd->vdev_guid, ==, ovd->vdev_guid); for (c = 0; c < nvd->vdev_children; c++) @@ -3155,3 +3192,14 @@ vdev_split(vdev_t *vd) } vdev_propagate_state(cvd); } + +#if defined(_KERNEL) && defined(HAVE_SPL) +EXPORT_SYMBOL(vdev_fault); +EXPORT_SYMBOL(vdev_degrade); +EXPORT_SYMBOL(vdev_online); +EXPORT_SYMBOL(vdev_offline); +EXPORT_SYMBOL(vdev_clear); + +module_param(zfs_scrub_limit, int, 0644); +MODULE_PARM_DESC(zfs_scrub_limit, "Max scrub/resilver I/O per leaf vdev"); +#endif