X-Git-Url: https://git.camperquake.de/gitweb.cgi?a=blobdiff_plain;f=module%2Fzfs%2Fvdev.c;h=d6b55ee48f4d65a315e583596c33df70fe0219e5;hb=cb543e6b5e98546a5caec29ca4b25abec98560a2;hp=bac3e86054d6f8e64f0fed061725a621236c8535;hpb=572e285762521df27fe5b026f409ba1a21abb7ac;p=zfs.git diff --git a/module/zfs/vdev.c b/module/zfs/vdev.c index bac3e86..d6b55ee 100644 --- a/module/zfs/vdev.c +++ b/module/zfs/vdev.c @@ -21,6 +21,8 @@ /* * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright 2011 Nexenta Systems, Inc. All rights reserved. + * Copyright (c) 2012 by Delphix. All rights reserved. */ #include @@ -40,6 +42,7 @@ #include #include #include +#include /* * Virtual device management. @@ -58,9 +61,6 @@ static vdev_ops_t *vdev_ops_table[] = { NULL }; -/* maximum scrub/resilver I/O queue per leaf vdev */ -int zfs_scrub_limit = 10; - /* * Given a vdev type, return the appropriate ops vector. */ @@ -85,8 +85,9 @@ vdev_default_asize(vdev_t *vd, uint64_t psize) { uint64_t asize = P2ROUNDUP(psize, 1ULL << vd->vdev_top->vdev_ashift); uint64_t csize; + int c; - for (int c = 0; c < vd->vdev_children; c++) { + for (c = 0; c < vd->vdev_children; c++) { csize = vdev_psize_to_asize(vd->vdev_child[c], psize); asize = MAX(asize, csize); } @@ -106,7 +107,7 @@ vdev_get_min_asize(vdev_t *vd) vdev_t *pvd = vd->vdev_parent; /* - * The our parent is NULL (inactive spare or cache) or is the root, + * If our parent is NULL (inactive spare or cache) or is the root, * just return our own asize. */ if (pvd == NULL) @@ -132,9 +133,10 @@ vdev_get_min_asize(vdev_t *vd) void vdev_set_min_asize(vdev_t *vd) { + int c; vd->vdev_min_asize = vdev_get_min_asize(vd); - for (int c = 0; c < vd->vdev_children; c++) + for (c = 0; c < vd->vdev_children; c++) vdev_set_min_asize(vd->vdev_child[c]); } @@ -157,11 +159,12 @@ vdev_t * vdev_lookup_by_guid(vdev_t *vd, uint64_t guid) { vdev_t *mvd; + int c; if (vd->vdev_guid == guid) return (vd); - for (int c = 0; c < vd->vdev_children; c++) + for (c = 0; c < vd->vdev_children; c++) if ((mvd = vdev_lookup_by_guid(vd->vdev_child[c], guid)) != NULL) return (mvd); @@ -190,7 +193,7 @@ vdev_add_child(vdev_t *pvd, vdev_t *cvd) pvd->vdev_children = MAX(pvd->vdev_children, id + 1); newsize = pvd->vdev_children * sizeof (vdev_t *); - newchild = kmem_zalloc(newsize, KM_SLEEP); + newchild = kmem_zalloc(newsize, KM_PUSHPAGE); if (pvd->vdev_child != NULL) { bcopy(pvd->vdev_child, newchild, oldsize); kmem_free(pvd->vdev_child, oldsize); @@ -252,16 +255,17 @@ vdev_compact_children(vdev_t *pvd) vdev_t **newchild, *cvd; int oldc = pvd->vdev_children; int newc; + int c; ASSERT(spa_config_held(pvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); - for (int c = newc = 0; c < oldc; c++) + for (c = newc = 0; c < oldc; c++) if (pvd->vdev_child[c]) newc++; - newchild = kmem_alloc(newc * sizeof (vdev_t *), KM_SLEEP); + newchild = kmem_alloc(newc * sizeof (vdev_t *), KM_PUSHPAGE); - for (int c = newc = 0; c < oldc; c++) { + for (c = newc = 0; c < oldc; c++) { if ((cvd = pvd->vdev_child[c]) != NULL) { newchild[newc] = cvd; cvd->vdev_id = newc++; @@ -280,12 +284,14 @@ vdev_t * vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid, vdev_ops_t *ops) { vdev_t *vd; + int t; - vd = kmem_zalloc(sizeof (vdev_t), KM_SLEEP); + vd = kmem_zalloc(sizeof (vdev_t), KM_PUSHPAGE); if (spa->spa_root_vdev == NULL) { ASSERT(ops == &vdev_root_ops); spa->spa_root_vdev = vd; + spa->spa_load_guid = spa_generate_guid(NULL); } if (guid == 0 && ops != &vdev_hole_ops) { @@ -312,10 +318,12 @@ vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid, vdev_ops_t *ops) vd->vdev_state = VDEV_STATE_CLOSED; vd->vdev_ishole = (ops == &vdev_hole_ops); + list_link_init(&vd->vdev_config_dirty_node); + list_link_init(&vd->vdev_state_dirty_node); mutex_init(&vd->vdev_dtl_lock, NULL, MUTEX_DEFAULT, NULL); mutex_init(&vd->vdev_stat_lock, NULL, MUTEX_DEFAULT, NULL); mutex_init(&vd->vdev_probe_lock, NULL, MUTEX_DEFAULT, NULL); - for (int t = 0; t < DTL_TYPES; t++) { + for (t = 0; t < DTL_TYPES; t++) { space_map_create(&vd->vdev_dtl[t], 0, -1ULL, 0, &vd->vdev_dtl_lock); } @@ -485,7 +493,7 @@ vdev_alloc(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, uint_t id, &vd->vdev_removing); } - if (parent && !parent->vdev_parent) { + if (parent && !parent->vdev_parent && alloctype != VDEV_ALLOC_ATTACH) { ASSERT(alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_ADD || alloctype == VDEV_ALLOC_SPLIT || @@ -561,6 +569,7 @@ vdev_alloc(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, uint_t id, void vdev_free(vdev_t *vd) { + int c, t; spa_t *spa = vd->vdev_spa; /* @@ -575,7 +584,7 @@ vdev_free(vdev_t *vd) /* * Free all children. */ - for (int c = 0; c < vd->vdev_children; c++) + for (c = 0; c < vd->vdev_children; c++) vdev_free(vd->vdev_child[c]); ASSERT(vd->vdev_child == NULL); @@ -589,9 +598,9 @@ vdev_free(vdev_t *vd) metaslab_group_destroy(vd->vdev_mg); } - ASSERT3U(vd->vdev_stat.vs_space, ==, 0); - ASSERT3U(vd->vdev_stat.vs_dspace, ==, 0); - ASSERT3U(vd->vdev_stat.vs_alloc, ==, 0); + ASSERT0(vd->vdev_stat.vs_space); + ASSERT0(vd->vdev_stat.vs_dspace); + ASSERT0(vd->vdev_stat.vs_alloc); /* * Remove this vdev from its parent's child list. @@ -624,7 +633,7 @@ vdev_free(vdev_t *vd) txg_list_destroy(&vd->vdev_dtl_list); mutex_enter(&vd->vdev_dtl_lock); - for (int t = 0; t < DTL_TYPES; t++) { + for (t = 0; t < DTL_TYPES; t++) { space_map_unload(&vd->vdev_dtl[t]); space_map_destroy(&vd->vdev_dtl[t]); } @@ -661,6 +670,8 @@ vdev_top_transfer(vdev_t *svd, vdev_t *tvd) svd->vdev_ms_shift = 0; svd->vdev_ms_count = 0; + if (tvd->vdev_mg) + ASSERT3P(tvd->vdev_mg, ==, svd->vdev_mg); tvd->vdev_mg = svd->vdev_mg; tvd->vdev_ms = svd->vdev_ms; @@ -707,12 +718,14 @@ vdev_top_transfer(vdev_t *svd, vdev_t *tvd) static void vdev_top_update(vdev_t *tvd, vdev_t *vd) { + int c; + if (vd == NULL) return; vd->vdev_top = tvd; - for (int c = 0; c < vd->vdev_children; c++) + for (c = 0; c < vd->vdev_children; c++) vdev_top_update(tvd, vd->vdev_child[c]); } @@ -732,6 +745,7 @@ vdev_add_parent(vdev_t *cvd, vdev_ops_t *ops) mvd->vdev_asize = cvd->vdev_asize; mvd->vdev_min_asize = cvd->vdev_min_asize; + mvd->vdev_max_asize = cvd->vdev_max_asize; mvd->vdev_ashift = cvd->vdev_ashift; mvd->vdev_state = cvd->vdev_state; mvd->vdev_crtxg = cvd->vdev_crtxg; @@ -823,7 +837,7 @@ vdev_metaslab_init(vdev_t *vd, uint64_t txg) ASSERT(oldc <= newc); - mspp = kmem_zalloc(newc * sizeof (*mspp), KM_SLEEP); + mspp = kmem_zalloc(newc * sizeof (*mspp), KM_PUSHPAGE | KM_NODEBUG); if (oldc != 0) { bcopy(vd->vdev_ms, mspp, oldc * sizeof (*mspp)); @@ -888,6 +902,8 @@ vdev_metaslab_fini(vdev_t *vd) kmem_free(vd->vdev_ms, count * sizeof (metaslab_t *)); vd->vdev_ms = NULL; } + + ASSERT3U(vd->vdev_pending_fastwrite, ==, 0); } typedef struct vdev_probe_stats { @@ -960,6 +976,7 @@ vdev_probe(vdev_t *vd, zio_t *zio) spa_t *spa = vd->vdev_spa; vdev_probe_stats_t *vps = NULL; zio_t *pio; + int l; ASSERT(vd->vdev_ops->vdev_op_leaf); @@ -977,7 +994,7 @@ vdev_probe(vdev_t *vd, zio_t *zio) mutex_enter(&vd->vdev_probe_lock); if ((pio = vd->vdev_probe_zio) == NULL) { - vps = kmem_zalloc(sizeof (*vps), KM_SLEEP); + vps = kmem_zalloc(sizeof (*vps), KM_PUSHPAGE); vps->vps_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_PROBE | ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_AGGREGATE | @@ -1029,7 +1046,7 @@ vdev_probe(vdev_t *vd, zio_t *zio) return (NULL); } - for (int l = 1; l < VDEV_LABELS; l++) { + for (l = 1; l < VDEV_LABELS; l++) { zio_nowait(zio_read_phys(pio, vd, vdev_label_offset(vd->vdev_psize, l, offsetof(vdev_label_t, vl_pad2)), @@ -1055,15 +1072,20 @@ vdev_open_child(void *arg) vd->vdev_open_thread = NULL; } -boolean_t +static boolean_t vdev_uses_zvols(vdev_t *vd) { - if (vd->vdev_path && strncmp(vd->vdev_path, ZVOL_DIR, - strlen(ZVOL_DIR)) == 0) + int c; + +#ifdef _KERNEL + if (zvol_is_zvol(vd->vdev_path)) return (B_TRUE); - for (int c = 0; c < vd->vdev_children; c++) +#endif + + for (c = 0; c < vd->vdev_children; c++) if (vdev_uses_zvols(vd->vdev_child[c])) return (B_TRUE); + return (B_FALSE); } @@ -1072,6 +1094,7 @@ vdev_open_children(vdev_t *vd) { taskq_t *tq; int children = vd->vdev_children; + int c; /* * in order to handle pools on top of zvols, do the opens @@ -1079,7 +1102,7 @@ vdev_open_children(vdev_t *vd) * spa_namespace_lock */ if (vdev_uses_zvols(vd)) { - for (int c = 0; c < children; c++) + for (c = 0; c < children; c++) vd->vdev_child[c]->vdev_open_error = vdev_open(vd->vdev_child[c]); return; @@ -1087,9 +1110,9 @@ vdev_open_children(vdev_t *vd) tq = taskq_create("vdev_open", children, minclsyspri, children, children, TASKQ_PREPOPULATE); - for (int c = 0; c < children; c++) + for (c = 0; c < children; c++) VERIFY(taskq_dispatch(tq, vdev_open_child, vd->vdev_child[c], - TQ_SLEEP) != NULL); + TQ_SLEEP) != 0); taskq_destroy(tq); } @@ -1103,8 +1126,10 @@ vdev_open(vdev_t *vd) spa_t *spa = vd->vdev_spa; int error; uint64_t osize = 0; - uint64_t asize, psize; + uint64_t max_osize = 0; + uint64_t asize, max_asize, psize; uint64_t ashift = 0; + int c; ASSERT(vd->vdev_open_thread == curthread || spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); @@ -1134,7 +1159,7 @@ vdev_open(vdev_t *vd) return (ENXIO); } - error = vd->vdev_ops->vdev_op_open(vd, &osize, &ashift); + error = vd->vdev_ops->vdev_op_open(vd, &osize, &max_osize, &ashift); /* * Reset the vdev_reopening flag so that we actually close @@ -1183,7 +1208,7 @@ vdev_open(vdev_t *vd) if (vd->vdev_ishole || vd->vdev_ops == &vdev_missing_ops) return (0); - for (int c = 0; c < vd->vdev_children; c++) { + for (c = 0; c < vd->vdev_children; c++) { if (vd->vdev_child[c]->vdev_state != VDEV_STATE_HEALTHY) { vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED, VDEV_AUX_NONE); @@ -1192,6 +1217,7 @@ vdev_open(vdev_t *vd) } osize = P2ALIGN(osize, (uint64_t)sizeof (vdev_label_t)); + max_osize = P2ALIGN(max_osize, (uint64_t)sizeof (vdev_label_t)); if (vd->vdev_children == 0) { if (osize < SPA_MINDEVSIZE) { @@ -1201,6 +1227,8 @@ vdev_open(vdev_t *vd) } psize = osize; asize = osize - (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE); + max_asize = max_osize - (VDEV_LABEL_START_SIZE + + VDEV_LABEL_END_SIZE); } else { if (vd->vdev_parent != NULL && osize < SPA_MINDEVSIZE - (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE)) { @@ -1210,6 +1238,7 @@ vdev_open(vdev_t *vd) } psize = 0; asize = osize; + max_asize = max_osize; } vd->vdev_psize = psize; @@ -1226,19 +1255,25 @@ vdev_open(vdev_t *vd) if (vd->vdev_asize == 0) { /* * This is the first-ever open, so use the computed values. - * For testing purposes, a higher ashift can be requested. + * For compatibility, a different ashift can be requested. */ vd->vdev_asize = asize; - vd->vdev_ashift = MAX(ashift, vd->vdev_ashift); + vd->vdev_max_asize = max_asize; + if (vd->vdev_ashift == 0) + vd->vdev_ashift = ashift; } else { /* - * Make sure the alignment requirement hasn't increased. + * Detect if the alignment requirement has increased. + * We don't want to make the pool unavailable, just + * post an event instead. */ - if (ashift > vd->vdev_top->vdev_ashift) { - vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, - VDEV_AUX_BAD_LABEL); - return (EINVAL); + if (ashift > vd->vdev_top->vdev_ashift && + vd->vdev_ops->vdev_op_leaf) { + zfs_ereport_post(FM_EREPORT_ZFS_DEVICE_BAD_ASHIFT, + spa, vd, NULL, 0, 0); } + + vd->vdev_max_asize = max_asize; } /* @@ -1280,21 +1315,27 @@ vdev_open(vdev_t *vd) * contents. This needs to be done before vdev_load() so that we don't * inadvertently do repair I/Os to the wrong device. * + * If 'strict' is false ignore the spa guid check. This is necessary because + * if the machine crashed during a re-guid the new guid might have been written + * to all of the vdev labels, but not the cached config. The strict check + * will be performed when the pool is opened again using the mos config. + * * This function will only return failure if one of the vdevs indicates that it * has since been destroyed or exported. This is only possible if * /etc/zfs/zpool.cache was readonly at the time. Otherwise, the vdev state * will be updated but the function will return 0. */ int -vdev_validate(vdev_t *vd) +vdev_validate(vdev_t *vd, boolean_t strict) { spa_t *spa = vd->vdev_spa; nvlist_t *label; uint64_t guid = 0, top_guid; uint64_t state; + int c; - for (int c = 0; c < vd->vdev_children; c++) - if (vdev_validate(vd->vdev_child[c]) != 0) + for (c = 0; c < vd->vdev_children; c++) + if (vdev_validate(vd->vdev_child[c], strict) != 0) return (EBADF); /* @@ -1305,8 +1346,10 @@ vdev_validate(vdev_t *vd) if (vd->vdev_ops->vdev_op_leaf && vdev_readable(vd)) { uint64_t aux_guid = 0; nvlist_t *nvl; + uint64_t txg = spa_last_synced_txg(spa) != 0 ? + spa_last_synced_txg(spa) : -1ULL; - if ((label = vdev_label_read_config(vd)) == NULL) { + if ((label = vdev_label_read_config(vd, txg)) == NULL) { vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, VDEV_AUX_BAD_LABEL); return (0); @@ -1324,8 +1367,9 @@ vdev_validate(vdev_t *vd) return (0); } - if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_GUID, - &guid) != 0 || guid != spa_guid(spa)) { + if (strict && (nvlist_lookup_uint64(label, + ZPOOL_CONFIG_POOL_GUID, &guid) != 0 || + guid != spa_guid(spa))) { vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, VDEV_AUX_CORRUPT_DATA); nvlist_free(label); @@ -1398,8 +1442,8 @@ vdev_validate(vdev_t *vd) void vdev_close(vdev_t *vd) { - spa_t *spa = vd->vdev_spa; vdev_t *pvd = vd->vdev_parent; + ASSERTV(spa_t *spa = vd->vdev_spa); ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); @@ -1432,12 +1476,13 @@ void vdev_hold(vdev_t *vd) { spa_t *spa = vd->vdev_spa; + int c; ASSERT(spa_is_root(spa)); if (spa->spa_state == POOL_STATE_UNINITIALIZED) return; - for (int c = 0; c < vd->vdev_children; c++) + for (c = 0; c < vd->vdev_children; c++) vdev_hold(vd->vdev_child[c]); if (vd->vdev_ops->vdev_op_leaf) @@ -1447,10 +1492,10 @@ vdev_hold(vdev_t *vd) void vdev_rele(vdev_t *vd) { - spa_t *spa = vd->vdev_spa; + int c; - ASSERT(spa_is_root(spa)); - for (int c = 0; c < vd->vdev_children; c++) + ASSERT(spa_is_root(vd->vdev_spa)); + for (c = 0; c < vd->vdev_children; c++) vdev_rele(vd->vdev_child[c]); if (vd->vdev_ops->vdev_op_leaf) @@ -1487,7 +1532,7 @@ vdev_reopen(vdev_t *vd) !l2arc_vdev_present(vd)) l2arc_add_vdev(spa, vd); } else { - (void) vdev_validate(vd); + (void) vdev_validate(vd, B_TRUE); } /* @@ -1643,11 +1688,11 @@ vdev_dtl_reassess(vdev_t *vd, uint64_t txg, uint64_t scrub_txg, int scrub_done) { spa_t *spa = vd->vdev_spa; avl_tree_t reftree; - int minref; + int c, t, minref; ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); - for (int c = 0; c < vd->vdev_children; c++) + for (c = 0; c < vd->vdev_children; c++) vdev_dtl_reassess(vd->vdev_child[c], txg, scrub_txg, scrub_done); @@ -1707,7 +1752,7 @@ vdev_dtl_reassess(vdev_t *vd, uint64_t txg, uint64_t scrub_txg, int scrub_done) } mutex_enter(&vd->vdev_dtl_lock); - for (int t = 0; t < DTL_TYPES; t++) { + for (t = 0; t < DTL_TYPES; t++) { /* account for child's outage in parent's missing map */ int s = (t == DTL_MISSING) ? DTL_OUTAGE: t; if (t == DTL_SCRUB) @@ -1719,7 +1764,7 @@ vdev_dtl_reassess(vdev_t *vd, uint64_t txg, uint64_t scrub_txg, int scrub_done) else minref = vd->vdev_children; /* any kind of mirror */ space_map_ref_create(&reftree); - for (int c = 0; c < vd->vdev_children; c++) { + for (c = 0; c < vd->vdev_children; c++) { vdev_t *cvd = vd->vdev_child[c]; mutex_enter(&cvd->vdev_dtl_lock); space_map_ref_add_map(&reftree, &cvd->vdev_dtl[s], 1); @@ -1780,8 +1825,7 @@ vdev_dtl_sync(vdev_t *vd, uint64_t txg) if (vd->vdev_detached) { if (smo->smo_object != 0) { - int err = dmu_object_free(mos, smo->smo_object, tx); - ASSERT3U(err, ==, 0); + VERIFY0(dmu_object_free(mos, smo->smo_object, tx)); smo->smo_object = 0; } dmu_tx_commit(tx); @@ -1811,6 +1855,7 @@ vdev_dtl_sync(vdev_t *vd, uint64_t txg) space_map_truncate(smo, mos, tx); space_map_sync(&smsync, SM_ALLOC, smo, mos, tx); + space_map_vacate(&smsync, NULL, NULL); space_map_destroy(&smsync); @@ -1869,6 +1914,7 @@ vdev_resilver_needed(vdev_t *vd, uint64_t *minp, uint64_t *maxp) boolean_t needed = B_FALSE; uint64_t thismin = UINT64_MAX; uint64_t thismax = 0; + int c; if (vd->vdev_children == 0) { mutex_enter(&vd->vdev_dtl_lock); @@ -1884,7 +1930,7 @@ vdev_resilver_needed(vdev_t *vd, uint64_t *minp, uint64_t *maxp) } mutex_exit(&vd->vdev_dtl_lock); } else { - for (int c = 0; c < vd->vdev_children; c++) { + for (c = 0; c < vd->vdev_children; c++) { vdev_t *cvd = vd->vdev_child[c]; uint64_t cmin, cmax; @@ -1906,10 +1952,12 @@ vdev_resilver_needed(vdev_t *vd, uint64_t *minp, uint64_t *maxp) void vdev_load(vdev_t *vd) { + int c; + /* * Recursively load all children. */ - for (int c = 0; c < vd->vdev_children; c++) + for (c = 0; c < vd->vdev_children; c++) vdev_load(vd->vdev_child[c]); /* @@ -1946,14 +1994,14 @@ vdev_validate_aux(vdev_t *vd) if (!vdev_readable(vd)) return (0); - if ((label = vdev_label_read_config(vd)) == NULL) { + if ((label = vdev_label_read_config(vd, -1ULL)) == NULL) { vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, VDEV_AUX_CORRUPT_DATA); return (-1); } if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_VERSION, &version) != 0 || - version > SPA_VERSION || + !SPA_VERSION_IS_SUPPORTED(version) || nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) != 0 || guid != vd->vdev_guid || nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE, &state) != 0) { @@ -1977,23 +2025,24 @@ vdev_remove(vdev_t *vd, uint64_t txg) spa_t *spa = vd->vdev_spa; objset_t *mos = spa->spa_meta_objset; dmu_tx_t *tx; + int m; tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg); if (vd->vdev_dtl_smo.smo_object) { - ASSERT3U(vd->vdev_dtl_smo.smo_alloc, ==, 0); + ASSERT0(vd->vdev_dtl_smo.smo_alloc); (void) dmu_object_free(mos, vd->vdev_dtl_smo.smo_object, tx); vd->vdev_dtl_smo.smo_object = 0; } if (vd->vdev_ms != NULL) { - for (int m = 0; m < vd->vdev_ms_count; m++) { + for (m = 0; m < vd->vdev_ms_count; m++) { metaslab_t *msp = vd->vdev_ms[m]; if (msp == NULL || msp->ms_smo.smo_object == 0) continue; - ASSERT3U(msp->ms_smo.smo_alloc, ==, 0); + ASSERT0(msp->ms_smo.smo_alloc); (void) dmu_object_free(mos, msp->ms_smo.smo_object, tx); msp->ms_smo.smo_object = 0; } @@ -2015,7 +2064,7 @@ vdev_sync_done(vdev_t *vd, uint64_t txg) ASSERT(!vd->vdev_ishole); - while (msp = txg_list_remove(&vd->vdev_ms_list, TXG_CLEAN(txg))) + while ((msp = txg_list_remove(&vd->vdev_ms_list, TXG_CLEAN(txg)))) metaslab_sync_done(msp, txg); if (reassess) @@ -2271,7 +2320,7 @@ top: (void) spa_vdev_state_exit(spa, vd, 0); goto top; } - ASSERT3U(tvd->vdev_stat.vs_alloc, ==, 0); + ASSERT0(tvd->vdev_stat.vs_alloc); } /* @@ -2324,6 +2373,7 @@ void vdev_clear(spa_t *spa, vdev_t *vd) { vdev_t *rvd = spa->spa_root_vdev; + int c; ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); @@ -2334,7 +2384,7 @@ vdev_clear(spa_t *spa, vdev_t *vd) vd->vdev_stat.vs_write_errors = 0; vd->vdev_stat.vs_checksum_errors = 0; - for (int c = 0; c < vd->vdev_children; c++) + for (c = 0; c < vd->vdev_children; c++) vdev_clear(spa, vd->vdev_child[c]); /* @@ -2367,7 +2417,7 @@ vdev_clear(spa_t *spa, vdev_t *vd) if (vd->vdev_aux == NULL && !vdev_is_dead(vd)) spa_async_request(spa, SPA_ASYNC_RESILVER); - spa_event_notify(spa, vd, ESC_ZFS_VDEV_CLEAR); + spa_event_notify(spa, vd, FM_EREPORT_ZFS_DEVICE_CLEAR); } /* @@ -2448,6 +2498,7 @@ void vdev_get_stats(vdev_t *vd, vdev_stat_t *vs) { vdev_t *rvd = vd->vdev_spa->spa_root_vdev; + int c, t; mutex_enter(&vd->vdev_stat_lock); bcopy(&vd->vdev_stat, vs, sizeof (*vs)); @@ -2456,6 +2507,7 @@ vdev_get_stats(vdev_t *vd, vdev_stat_t *vs) vs->vs_rsize = vdev_get_min_asize(vd); if (vd->vdev_ops->vdev_op_leaf) vs->vs_rsize += VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE; + vs->vs_esize = vd->vdev_max_asize - vd->vdev_asize; mutex_exit(&vd->vdev_stat_lock); /* @@ -2463,12 +2515,12 @@ vdev_get_stats(vdev_t *vd, vdev_stat_t *vs) * over all top-level vdevs (i.e. the direct children of the root). */ if (vd == rvd) { - for (int c = 0; c < rvd->vdev_children; c++) { + for (c = 0; c < rvd->vdev_children; c++) { vdev_t *cvd = rvd->vdev_child[c]; vdev_stat_t *cvs = &cvd->vdev_stat; mutex_enter(&vd->vdev_stat_lock); - for (int t = 0; t < ZIO_TYPES; t++) { + for (t = 0; t < ZIO_TYPES; t++) { vs->vs_ops[t] += cvs->vs_ops[t]; vs->vs_bytes[t] += cvs->vs_bytes[t]; } @@ -2492,8 +2544,9 @@ void vdev_scan_stat_init(vdev_t *vd) { vdev_stat_t *vs = &vd->vdev_stat; + int c; - for (int c = 0; c < vd->vdev_children; c++) + for (c = 0; c < vd->vdev_children; c++) vdev_scan_stat_init(vd->vdev_child[c]); mutex_enter(&vd->vdev_stat_lock); @@ -2834,9 +2887,10 @@ vdev_propagate_state(vdev_t *vd) int degraded = 0, faulted = 0; int corrupted = 0; vdev_t *child; + int c; if (vd->vdev_children > 0) { - for (int c = 0; c < vd->vdev_children; c++) { + for (c = 0; c < vd->vdev_children; c++) { child = vd->vdev_child[c]; /* @@ -3019,13 +3073,19 @@ vdev_set_state(vdev_t *vd, boolean_t isopen, vdev_state_t state, vdev_aux_t aux) /* * Check the vdev configuration to ensure that it's capable of supporting - * a root pool. Currently, we do not support RAID-Z or partial configuration. - * In addition, only a single top-level vdev is allowed and none of the leaves - * can be wholedisks. + * a root pool. */ boolean_t vdev_is_bootable(vdev_t *vd) { +#if defined(__sun__) || defined(__sun) + /* + * Currently, we do not support RAID-Z or partial configuration. + * In addition, only a single top-level vdev is allowed and none of the + * leaves can be wholedisks. + */ + int c; + if (!vd->vdev_ops->vdev_op_leaf) { char *vdev_type = vd->vdev_ops->vdev_op_type; @@ -3040,10 +3100,11 @@ vdev_is_bootable(vdev_t *vd) return (B_FALSE); } - for (int c = 0; c < vd->vdev_children; c++) { + for (c = 0; c < vd->vdev_children; c++) { if (!vdev_is_bootable(vd->vdev_child[c])) return (B_FALSE); } +#endif /* __sun__ || __sun */ return (B_TRUE); } @@ -3056,13 +3117,14 @@ vdev_is_bootable(vdev_t *vd) void vdev_load_log_state(vdev_t *nvd, vdev_t *ovd) { - spa_t *spa = nvd->vdev_spa; + int c; ASSERT(nvd->vdev_top->vdev_islog); - ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); + ASSERT(spa_config_held(nvd->vdev_spa, + SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); ASSERT3U(nvd->vdev_guid, ==, ovd->vdev_guid); - for (int c = 0; c < nvd->vdev_children; c++) + for (c = 0; c < nvd->vdev_children; c++) vdev_load_log_state(nvd->vdev_child[c], ovd->vdev_child[c]); if (nvd->vdev_ops->vdev_op_leaf) { @@ -3084,11 +3146,13 @@ vdev_load_log_state(vdev_t *nvd, vdev_t *ovd) boolean_t vdev_log_state_valid(vdev_t *vd) { + int c; + if (vd->vdev_ops->vdev_op_leaf && !vd->vdev_faulted && !vd->vdev_removed) return (B_TRUE); - for (int c = 0; c < vd->vdev_children; c++) + for (c = 0; c < vd->vdev_children; c++) if (vdev_log_state_valid(vd->vdev_child[c])) return (B_TRUE); @@ -3128,3 +3192,51 @@ vdev_split(vdev_t *vd) } vdev_propagate_state(cvd); } + +void +vdev_deadman(vdev_t *vd) +{ + int c; + + for (c = 0; c < vd->vdev_children; c++) { + vdev_t *cvd = vd->vdev_child[c]; + + vdev_deadman(cvd); + } + + if (vd->vdev_ops->vdev_op_leaf) { + vdev_queue_t *vq = &vd->vdev_queue; + + mutex_enter(&vq->vq_lock); + if (avl_numnodes(&vq->vq_pending_tree) > 0) { + spa_t *spa = vd->vdev_spa; + zio_t *fio; + uint64_t delta; + + /* + * Look at the head of all the pending queues, + * if any I/O has been outstanding for longer than + * the spa_deadman_synctime we log a zevent. + */ + fio = avl_first(&vq->vq_pending_tree); + delta = ddi_get_lbolt64() - fio->io_timestamp; + if (delta > NSEC_TO_TICK(spa_deadman_synctime(spa))) { + zfs_dbgmsg("SLOW IO: zio timestamp %llu, " + "delta %llu, last io %llu", + fio->io_timestamp, delta, + vq->vq_io_complete_ts); + zfs_ereport_post(FM_EREPORT_ZFS_DELAY, + spa, vd, fio, 0, 0); + } + } + mutex_exit(&vq->vq_lock); + } +} + +#if defined(_KERNEL) && defined(HAVE_SPL) +EXPORT_SYMBOL(vdev_fault); +EXPORT_SYMBOL(vdev_degrade); +EXPORT_SYMBOL(vdev_online); +EXPORT_SYMBOL(vdev_offline); +EXPORT_SYMBOL(vdev_clear); +#endif