X-Git-Url: https://git.camperquake.de/gitweb.cgi?a=blobdiff_plain;ds=sidebyside;f=module%2Fzfs%2Fvdev.c;h=18d2d5ca6e752e39496f2bb0e2f2427fd90dd5bb;hb=3541dc6d02592bd0939ea2d35b50c2bbdcc4cd0e;hp=a61f29b8e78a6deb58690a813aeed9ccee715ad1;hpb=428870ff734fdaccc342b33fc53cf94724409a46;p=zfs.git diff --git a/module/zfs/vdev.c b/module/zfs/vdev.c index a61f29b..18d2d5c 100644 --- a/module/zfs/vdev.c +++ b/module/zfs/vdev.c @@ -21,6 +21,8 @@ /* * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright 2011 Nexenta Systems, Inc. All rights reserved. + * Copyright (c) 2011 by Delphix. All rights reserved. */ #include @@ -85,8 +87,9 @@ vdev_default_asize(vdev_t *vd, uint64_t psize) { uint64_t asize = P2ROUNDUP(psize, 1ULL << vd->vdev_top->vdev_ashift); uint64_t csize; + int c; - for (int c = 0; c < vd->vdev_children; c++) { + for (c = 0; c < vd->vdev_children; c++) { csize = vdev_psize_to_asize(vd->vdev_child[c], psize); asize = MAX(asize, csize); } @@ -132,9 +135,10 @@ vdev_get_min_asize(vdev_t *vd) void vdev_set_min_asize(vdev_t *vd) { + int c; vd->vdev_min_asize = vdev_get_min_asize(vd); - for (int c = 0; c < vd->vdev_children; c++) + for (c = 0; c < vd->vdev_children; c++) vdev_set_min_asize(vd->vdev_child[c]); } @@ -157,11 +161,12 @@ vdev_t * vdev_lookup_by_guid(vdev_t *vd, uint64_t guid) { vdev_t *mvd; + int c; if (vd->vdev_guid == guid) return (vd); - for (int c = 0; c < vd->vdev_children; c++) + for (c = 0; c < vd->vdev_children; c++) if ((mvd = vdev_lookup_by_guid(vd->vdev_child[c], guid)) != NULL) return (mvd); @@ -207,9 +212,6 @@ vdev_add_child(vdev_t *pvd, vdev_t *cvd) */ for (; pvd != NULL; pvd = pvd->vdev_parent) pvd->vdev_guid_sum += cvd->vdev_guid_sum; - - if (cvd->vdev_ops->vdev_op_leaf) - cvd->vdev_spa->spa_scrub_maxinflight += zfs_scrub_limit; } void @@ -244,9 +246,6 @@ vdev_remove_child(vdev_t *pvd, vdev_t *cvd) */ for (; pvd != NULL; pvd = pvd->vdev_parent) pvd->vdev_guid_sum -= cvd->vdev_guid_sum; - - if (cvd->vdev_ops->vdev_op_leaf) - cvd->vdev_spa->spa_scrub_maxinflight -= zfs_scrub_limit; } /* @@ -258,16 +257,17 @@ vdev_compact_children(vdev_t *pvd) vdev_t **newchild, *cvd; int oldc = pvd->vdev_children; int newc; + int c; ASSERT(spa_config_held(pvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); - for (int c = newc = 0; c < oldc; c++) + for (c = newc = 0; c < oldc; c++) if (pvd->vdev_child[c]) newc++; newchild = kmem_alloc(newc * sizeof (vdev_t *), KM_SLEEP); - for (int c = newc = 0; c < oldc; c++) { + for (c = newc = 0; c < oldc; c++) { if ((cvd = pvd->vdev_child[c]) != NULL) { newchild[newc] = cvd; cvd->vdev_id = newc++; @@ -286,12 +286,14 @@ vdev_t * vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid, vdev_ops_t *ops) { vdev_t *vd; + int t; vd = kmem_zalloc(sizeof (vdev_t), KM_SLEEP); if (spa->spa_root_vdev == NULL) { ASSERT(ops == &vdev_root_ops); spa->spa_root_vdev = vd; + spa->spa_load_guid = spa_generate_guid(NULL); } if (guid == 0 && ops != &vdev_hole_ops) { @@ -318,10 +320,12 @@ vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid, vdev_ops_t *ops) vd->vdev_state = VDEV_STATE_CLOSED; vd->vdev_ishole = (ops == &vdev_hole_ops); + list_link_init(&vd->vdev_config_dirty_node); + list_link_init(&vd->vdev_state_dirty_node); mutex_init(&vd->vdev_dtl_lock, NULL, MUTEX_DEFAULT, NULL); mutex_init(&vd->vdev_stat_lock, NULL, MUTEX_DEFAULT, NULL); mutex_init(&vd->vdev_probe_lock, NULL, MUTEX_DEFAULT, NULL); - for (int t = 0; t < DTL_TYPES; t++) { + for (t = 0; t < DTL_TYPES; t++) { space_map_create(&vd->vdev_dtl[t], 0, -1ULL, 0, &vd->vdev_dtl_lock); } @@ -491,7 +495,7 @@ vdev_alloc(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, uint_t id, &vd->vdev_removing); } - if (parent && !parent->vdev_parent) { + if (parent && !parent->vdev_parent && alloctype != VDEV_ALLOC_ATTACH) { ASSERT(alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_ADD || alloctype == VDEV_ALLOC_SPLIT || @@ -524,6 +528,9 @@ vdev_alloc(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, uint_t id, (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &vd->vdev_offline); + (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_RESILVERING, + &vd->vdev_resilvering); + /* * When importing a pool, we want to ignore the persistent fault * state, as the diagnosis made on another system may not be @@ -564,6 +571,7 @@ vdev_alloc(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, uint_t id, void vdev_free(vdev_t *vd) { + int c, t; spa_t *spa = vd->vdev_spa; /* @@ -578,7 +586,7 @@ vdev_free(vdev_t *vd) /* * Free all children. */ - for (int c = 0; c < vd->vdev_children; c++) + for (c = 0; c < vd->vdev_children; c++) vdev_free(vd->vdev_child[c]); ASSERT(vd->vdev_child == NULL); @@ -627,7 +635,7 @@ vdev_free(vdev_t *vd) txg_list_destroy(&vd->vdev_dtl_list); mutex_enter(&vd->vdev_dtl_lock); - for (int t = 0; t < DTL_TYPES; t++) { + for (t = 0; t < DTL_TYPES; t++) { space_map_unload(&vd->vdev_dtl[t]); space_map_destroy(&vd->vdev_dtl[t]); } @@ -664,6 +672,8 @@ vdev_top_transfer(vdev_t *svd, vdev_t *tvd) svd->vdev_ms_shift = 0; svd->vdev_ms_count = 0; + if (tvd->vdev_mg) + ASSERT3P(tvd->vdev_mg, ==, svd->vdev_mg); tvd->vdev_mg = svd->vdev_mg; tvd->vdev_ms = svd->vdev_ms; @@ -710,12 +720,14 @@ vdev_top_transfer(vdev_t *svd, vdev_t *tvd) static void vdev_top_update(vdev_t *tvd, vdev_t *vd) { + int c; + if (vd == NULL) return; vd->vdev_top = tvd; - for (int c = 0; c < vd->vdev_children; c++) + for (c = 0; c < vd->vdev_children; c++) vdev_top_update(tvd, vd->vdev_child[c]); } @@ -826,7 +838,7 @@ vdev_metaslab_init(vdev_t *vd, uint64_t txg) ASSERT(oldc <= newc); - mspp = kmem_zalloc(newc * sizeof (*mspp), KM_SLEEP); + mspp = kmem_zalloc(newc * sizeof (*mspp), KM_SLEEP | KM_NODEBUG); if (oldc != 0) { bcopy(vd->vdev_ms, mspp, oldc * sizeof (*mspp)); @@ -963,6 +975,7 @@ vdev_probe(vdev_t *vd, zio_t *zio) spa_t *spa = vd->vdev_spa; vdev_probe_stats_t *vps = NULL; zio_t *pio; + int l; ASSERT(vd->vdev_ops->vdev_op_leaf); @@ -1032,7 +1045,7 @@ vdev_probe(vdev_t *vd, zio_t *zio) return (NULL); } - for (int l = 1; l < VDEV_LABELS; l++) { + for (l = 1; l < VDEV_LABELS; l++) { zio_nowait(zio_read_phys(pio, vd, vdev_label_offset(vd->vdev_psize, l, offsetof(vdev_label_t, vl_pad2)), @@ -1061,12 +1074,24 @@ vdev_open_child(void *arg) boolean_t vdev_uses_zvols(vdev_t *vd) { +/* + * Stacking zpools on top of zvols is unsupported until we implement a method + * for determining if an arbitrary block device is a zvol without using the + * path. Solaris would check the 'zvol' path component but this does not + * exist in the Linux port, so we really should do something like stat the + * file and check the major number. This is complicated by the fact that + * we need to do this portably in user or kernel space. + */ +#if 0 + int c; + if (vd->vdev_path && strncmp(vd->vdev_path, ZVOL_DIR, strlen(ZVOL_DIR)) == 0) return (B_TRUE); - for (int c = 0; c < vd->vdev_children; c++) + for (c = 0; c < vd->vdev_children; c++) if (vdev_uses_zvols(vd->vdev_child[c])) return (B_TRUE); +#endif return (B_FALSE); } @@ -1075,6 +1100,7 @@ vdev_open_children(vdev_t *vd) { taskq_t *tq; int children = vd->vdev_children; + int c; /* * in order to handle pools on top of zvols, do the opens @@ -1082,7 +1108,7 @@ vdev_open_children(vdev_t *vd) * spa_namespace_lock */ if (vdev_uses_zvols(vd)) { - for (int c = 0; c < children; c++) + for (c = 0; c < children; c++) vd->vdev_child[c]->vdev_open_error = vdev_open(vd->vdev_child[c]); return; @@ -1090,9 +1116,9 @@ vdev_open_children(vdev_t *vd) tq = taskq_create("vdev_open", children, minclsyspri, children, children, TASKQ_PREPOPULATE); - for (int c = 0; c < children; c++) + for (c = 0; c < children; c++) VERIFY(taskq_dispatch(tq, vdev_open_child, vd->vdev_child[c], - TQ_SLEEP) != NULL); + TQ_SLEEP) != 0); taskq_destroy(tq); } @@ -1108,6 +1134,7 @@ vdev_open(vdev_t *vd) uint64_t osize = 0; uint64_t asize, psize; uint64_t ashift = 0; + int c; ASSERT(vd->vdev_open_thread == curthread || spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); @@ -1186,7 +1213,7 @@ vdev_open(vdev_t *vd) if (vd->vdev_ishole || vd->vdev_ops == &vdev_missing_ops) return (0); - for (int c = 0; c < vd->vdev_children; c++) { + for (c = 0; c < vd->vdev_children; c++) { if (vd->vdev_child[c]->vdev_state != VDEV_STATE_HEALTHY) { vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED, VDEV_AUX_NONE); @@ -1295,8 +1322,9 @@ vdev_validate(vdev_t *vd) nvlist_t *label; uint64_t guid = 0, top_guid; uint64_t state; + int c; - for (int c = 0; c < vd->vdev_children; c++) + for (c = 0; c < vd->vdev_children; c++) if (vdev_validate(vd->vdev_child[c]) != 0) return (EBADF); @@ -1375,10 +1403,10 @@ vdev_validate(vdev_t *vd) nvlist_free(label); /* - * If spa->spa_load_verbatim is true, no need to check the + * If this is a verbatim import, no need to check the * state of the pool. */ - if (!spa->spa_load_verbatim && + if (!(spa->spa_import_flags & ZFS_IMPORT_VERBATIM) && spa_load_state(spa) == SPA_LOAD_OPEN && state != POOL_STATE_ACTIVE) return (EBADF); @@ -1401,8 +1429,8 @@ vdev_validate(vdev_t *vd) void vdev_close(vdev_t *vd) { - spa_t *spa = vd->vdev_spa; vdev_t *pvd = vd->vdev_parent; + ASSERTV(spa_t *spa = vd->vdev_spa); ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); @@ -1435,12 +1463,13 @@ void vdev_hold(vdev_t *vd) { spa_t *spa = vd->vdev_spa; + int c; ASSERT(spa_is_root(spa)); if (spa->spa_state == POOL_STATE_UNINITIALIZED) return; - for (int c = 0; c < vd->vdev_children; c++) + for (c = 0; c < vd->vdev_children; c++) vdev_hold(vd->vdev_child[c]); if (vd->vdev_ops->vdev_op_leaf) @@ -1450,10 +1479,10 @@ vdev_hold(vdev_t *vd) void vdev_rele(vdev_t *vd) { - spa_t *spa = vd->vdev_spa; + int c; - ASSERT(spa_is_root(spa)); - for (int c = 0; c < vd->vdev_children; c++) + ASSERT(spa_is_root(vd->vdev_spa)); + for (c = 0; c < vd->vdev_children; c++) vdev_rele(vd->vdev_child[c]); if (vd->vdev_ops->vdev_op_leaf) @@ -1544,6 +1573,7 @@ vdev_dirty(vdev_t *vd, int flags, void *arg, uint64_t txg) ASSERT(vd == vd->vdev_top); ASSERT(!vd->vdev_ishole); ASSERT(ISP2(flags)); + ASSERT(spa_writeable(vd->vdev_spa)); if (flags & VDD_METASLAB) (void) txg_list_add(&vd->vdev_ms_list, arg, txg); @@ -1599,6 +1629,7 @@ vdev_dtl_dirty(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size) ASSERT(t < DTL_TYPES); ASSERT(vd != vd->vdev_spa->spa_root_vdev); + ASSERT(spa_writeable(vd->vdev_spa)); mutex_enter(sm->sm_lock); if (!space_map_contains(sm, txg, size)) @@ -1644,11 +1675,11 @@ vdev_dtl_reassess(vdev_t *vd, uint64_t txg, uint64_t scrub_txg, int scrub_done) { spa_t *spa = vd->vdev_spa; avl_tree_t reftree; - int minref; + int c, t, minref; ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); - for (int c = 0; c < vd->vdev_children; c++) + for (c = 0; c < vd->vdev_children; c++) vdev_dtl_reassess(vd->vdev_child[c], txg, scrub_txg, scrub_done); @@ -1708,7 +1739,7 @@ vdev_dtl_reassess(vdev_t *vd, uint64_t txg, uint64_t scrub_txg, int scrub_done) } mutex_enter(&vd->vdev_dtl_lock); - for (int t = 0; t < DTL_TYPES; t++) { + for (t = 0; t < DTL_TYPES; t++) { /* account for child's outage in parent's missing map */ int s = (t == DTL_MISSING) ? DTL_OUTAGE: t; if (t == DTL_SCRUB) @@ -1720,7 +1751,7 @@ vdev_dtl_reassess(vdev_t *vd, uint64_t txg, uint64_t scrub_txg, int scrub_done) else minref = vd->vdev_children; /* any kind of mirror */ space_map_ref_create(&reftree); - for (int c = 0; c < vd->vdev_children; c++) { + for (c = 0; c < vd->vdev_children; c++) { vdev_t *cvd = vd->vdev_child[c]; mutex_enter(&cvd->vdev_dtl_lock); space_map_ref_add_map(&reftree, &cvd->vdev_dtl[s], 1); @@ -1781,8 +1812,7 @@ vdev_dtl_sync(vdev_t *vd, uint64_t txg) if (vd->vdev_detached) { if (smo->smo_object != 0) { - int err = dmu_object_free(mos, smo->smo_object, tx); - ASSERT3U(err, ==, 0); + VERIFY(0 == dmu_object_free(mos, smo->smo_object, tx)); smo->smo_object = 0; } dmu_tx_commit(tx); @@ -1855,6 +1885,9 @@ vdev_dtl_required(vdev_t *vd) vd->vdev_cant_read = cant_read; vdev_dtl_reassess(tvd, 0, 0, B_FALSE); + if (!required && zio_injection_enabled) + required = !!zio_handle_device_injection(vd, NULL, ECHILD); + return (required); } @@ -1867,6 +1900,7 @@ vdev_resilver_needed(vdev_t *vd, uint64_t *minp, uint64_t *maxp) boolean_t needed = B_FALSE; uint64_t thismin = UINT64_MAX; uint64_t thismax = 0; + int c; if (vd->vdev_children == 0) { mutex_enter(&vd->vdev_dtl_lock); @@ -1882,7 +1916,7 @@ vdev_resilver_needed(vdev_t *vd, uint64_t *minp, uint64_t *maxp) } mutex_exit(&vd->vdev_dtl_lock); } else { - for (int c = 0; c < vd->vdev_children; c++) { + for (c = 0; c < vd->vdev_children; c++) { vdev_t *cvd = vd->vdev_child[c]; uint64_t cmin, cmax; @@ -1904,10 +1938,12 @@ vdev_resilver_needed(vdev_t *vd, uint64_t *minp, uint64_t *maxp) void vdev_load(vdev_t *vd) { + int c; + /* * Recursively load all children. */ - for (int c = 0; c < vd->vdev_children; c++) + for (c = 0; c < vd->vdev_children; c++) vdev_load(vd->vdev_child[c]); /* @@ -1975,6 +2011,7 @@ vdev_remove(vdev_t *vd, uint64_t txg) spa_t *spa = vd->vdev_spa; objset_t *mos = spa->spa_meta_objset; dmu_tx_t *tx; + int m; tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg); @@ -1985,7 +2022,7 @@ vdev_remove(vdev_t *vd, uint64_t txg) } if (vd->vdev_ms != NULL) { - for (int m = 0; m < vd->vdev_ms_count; m++) { + for (m = 0; m < vd->vdev_ms_count; m++) { metaslab_t *msp = vd->vdev_ms[m]; if (msp == NULL || msp->ms_smo.smo_object == 0) @@ -2013,7 +2050,7 @@ vdev_sync_done(vdev_t *vd, uint64_t txg) ASSERT(!vd->vdev_ishole); - while (msp = txg_list_remove(&vd->vdev_ms_list, TXG_CLEAN(txg))) + while ((msp = txg_list_remove(&vd->vdev_ms_list, TXG_CLEAN(txg)))) metaslab_sync_done(msp, txg); if (reassess) @@ -2070,7 +2107,7 @@ vdev_psize_to_asize(vdev_t *vd, uint64_t psize) int vdev_fault(spa_t *spa, uint64_t guid, vdev_aux_t aux) { - vdev_t *vd; + vdev_t *vd, *tvd; spa_vdev_state_enter(spa, SCL_NONE); @@ -2080,6 +2117,8 @@ vdev_fault(spa_t *spa, uint64_t guid, vdev_aux_t aux) if (!vd->vdev_ops->vdev_op_leaf) return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); + tvd = vd->vdev_top; + /* * We don't directly use the aux state here, but if we do a * vdev_reopen(), we need this value to be present to remember why we @@ -2099,7 +2138,7 @@ vdev_fault(spa_t *spa, uint64_t guid, vdev_aux_t aux) * If this device has the only valid copy of the data, then * back off and simply mark the vdev as degraded instead. */ - if (!vd->vdev_islog && vd->vdev_aux == NULL && vdev_dtl_required(vd)) { + if (!tvd->vdev_islog && vd->vdev_aux == NULL && vdev_dtl_required(vd)) { vd->vdev_degraded = 1ULL; vd->vdev_faulted = 0ULL; @@ -2107,7 +2146,7 @@ vdev_fault(spa_t *spa, uint64_t guid, vdev_aux_t aux) * If we reopen the device and it's not dead, only then do we * mark it degraded. */ - vdev_reopen(vd); + vdev_reopen(tvd); if (vdev_readable(vd)) vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, aux); @@ -2320,6 +2359,7 @@ void vdev_clear(spa_t *spa, vdev_t *vd) { vdev_t *rvd = spa->spa_root_vdev; + int c; ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); @@ -2330,7 +2370,7 @@ vdev_clear(spa_t *spa, vdev_t *vd) vd->vdev_stat.vs_write_errors = 0; vd->vdev_stat.vs_checksum_errors = 0; - for (int c = 0; c < vd->vdev_children; c++) + for (c = 0; c < vd->vdev_children; c++) vdev_clear(spa, vd->vdev_child[c]); /* @@ -2349,21 +2389,21 @@ vdev_clear(spa_t *spa, vdev_t *vd) */ vd->vdev_forcefault = B_TRUE; - vd->vdev_faulted = vd->vdev_degraded = 0; + vd->vdev_faulted = vd->vdev_degraded = 0ULL; vd->vdev_cant_read = B_FALSE; vd->vdev_cant_write = B_FALSE; - vdev_reopen(vd); + vdev_reopen(vd == rvd ? rvd : vd->vdev_top); vd->vdev_forcefault = B_FALSE; - if (vd != rvd) + if (vd != rvd && vdev_writeable(vd->vdev_top)) vdev_state_dirty(vd->vdev_top); if (vd->vdev_aux == NULL && !vdev_is_dead(vd)) spa_async_request(spa, SPA_ASYNC_RESILVER); - spa_event_notify(spa, vd, ESC_ZFS_VDEV_CLEAR); + spa_event_notify(spa, vd, FM_EREPORT_ZFS_DEVICE_CLEAR); } /* @@ -2444,6 +2484,7 @@ void vdev_get_stats(vdev_t *vd, vdev_stat_t *vs) { vdev_t *rvd = vd->vdev_spa->spa_root_vdev; + int c, t; mutex_enter(&vd->vdev_stat_lock); bcopy(&vd->vdev_stat, vs, sizeof (*vs)); @@ -2459,12 +2500,12 @@ vdev_get_stats(vdev_t *vd, vdev_stat_t *vs) * over all top-level vdevs (i.e. the direct children of the root). */ if (vd == rvd) { - for (int c = 0; c < rvd->vdev_children; c++) { + for (c = 0; c < rvd->vdev_children; c++) { vdev_t *cvd = rvd->vdev_child[c]; vdev_stat_t *cvs = &cvd->vdev_stat; mutex_enter(&vd->vdev_stat_lock); - for (int t = 0; t < ZIO_TYPES; t++) { + for (t = 0; t < ZIO_TYPES; t++) { vs->vs_ops[t] += cvs->vs_ops[t]; vs->vs_bytes[t] += cvs->vs_bytes[t]; } @@ -2488,8 +2529,9 @@ void vdev_scan_stat_init(vdev_t *vd) { vdev_stat_t *vs = &vd->vdev_stat; + int c; - for (int c = 0; c < vd->vdev_children; c++) + for (c = 0; c < vd->vdev_children; c++) vdev_scan_stat_init(vd->vdev_child[c]); mutex_enter(&vd->vdev_stat_lock); @@ -2541,7 +2583,7 @@ vdev_stat_update(zio_t *zio, uint64_t psize) mutex_enter(&vd->vdev_stat_lock); if (flags & ZIO_FLAG_IO_REPAIR) { - if (flags & ZIO_FLAG_SCRUB_THREAD) { + if (flags & ZIO_FLAG_SCAN_THREAD) { dsl_scan_phys_t *scn_phys = &spa->spa_dsl_pool->dp_scan->scn_phys; uint64_t *processed = &scn_phys->scn_processed; @@ -2597,7 +2639,7 @@ vdev_stat_update(zio_t *zio, uint64_t psize) if (type == ZIO_TYPE_WRITE && txg != 0 && (!(flags & ZIO_FLAG_IO_REPAIR) || - (flags & ZIO_FLAG_SCRUB_THREAD) || + (flags & ZIO_FLAG_SCAN_THREAD) || spa->spa_claiming)) { /* * This is either a normal write (not a repair), or it's @@ -2616,7 +2658,7 @@ vdev_stat_update(zio_t *zio, uint64_t psize) */ if (vd->vdev_ops->vdev_op_leaf) { uint64_t commit_txg = txg; - if (flags & ZIO_FLAG_SCRUB_THREAD) { + if (flags & ZIO_FLAG_SCAN_THREAD) { ASSERT(flags & ZIO_FLAG_IO_REPAIR); ASSERT(spa_sync_pass(spa) == 1); vdev_dtl_dirty(vd, DTL_SCRUB, txg, 1); @@ -2699,6 +2741,8 @@ vdev_config_dirty(vdev_t *vd) vdev_t *rvd = spa->spa_root_vdev; int c; + ASSERT(spa_writeable(spa)); + /* * If this is an aux vdev (as with l2cache and spare devices), then we * update the vdev config manually and set the sync flag. @@ -2787,6 +2831,7 @@ vdev_state_dirty(vdev_t *vd) { spa_t *spa = vd->vdev_spa; + ASSERT(spa_writeable(spa)); ASSERT(vd == vd->vdev_top); /* @@ -2827,9 +2872,10 @@ vdev_propagate_state(vdev_t *vd) int degraded = 0, faulted = 0; int corrupted = 0; vdev_t *child; + int c; if (vd->vdev_children > 0) { - for (int c = 0; c < vd->vdev_children; c++) { + for (c = 0; c < vd->vdev_children; c++) { child = vd->vdev_child[c]; /* @@ -2944,12 +2990,13 @@ vdev_set_state(vdev_t *vd, boolean_t isopen, vdev_state_t state, vdev_aux_t aux) vd->vdev_removed = B_TRUE; } else if (state == VDEV_STATE_CANT_OPEN) { /* - * If we fail to open a vdev during an import, we mark it as - * "not available", which signifies that it was never there to - * begin with. Failure to open such a device is not considered - * an error. + * If we fail to open a vdev during an import or recovery, we + * mark it as "not available", which signifies that it was + * never there to begin with. Failure to open such a device + * is not considered an error. */ - if (spa_load_state(spa) == SPA_LOAD_IMPORT && + if ((spa_load_state(spa) == SPA_LOAD_IMPORT || + spa_load_state(spa) == SPA_LOAD_RECOVER) && vd->vdev_ops->vdev_op_leaf) vd->vdev_not_present = 1; @@ -3011,13 +3058,19 @@ vdev_set_state(vdev_t *vd, boolean_t isopen, vdev_state_t state, vdev_aux_t aux) /* * Check the vdev configuration to ensure that it's capable of supporting - * a root pool. Currently, we do not support RAID-Z or partial configuration. - * In addition, only a single top-level vdev is allowed and none of the leaves - * can be wholedisks. + * a root pool. */ boolean_t vdev_is_bootable(vdev_t *vd) { +#if defined(__sun__) || defined(__sun) + /* + * Currently, we do not support RAID-Z or partial configuration. + * In addition, only a single top-level vdev is allowed and none of the + * leaves can be wholedisks. + */ + int c; + if (!vd->vdev_ops->vdev_op_leaf) { char *vdev_type = vd->vdev_ops->vdev_op_type; @@ -3032,42 +3085,66 @@ vdev_is_bootable(vdev_t *vd) return (B_FALSE); } - for (int c = 0; c < vd->vdev_children; c++) { + for (c = 0; c < vd->vdev_children; c++) { if (!vdev_is_bootable(vd->vdev_child[c])) return (B_FALSE); } +#endif /* __sun__ || __sun */ return (B_TRUE); } /* * Load the state from the original vdev tree (ovd) which * we've retrieved from the MOS config object. If the original - * vdev was offline then we transfer that state to the device - * in the current vdev tree (nvd). + * vdev was offline or faulted then we transfer that state to the + * device in the current vdev tree (nvd). */ void vdev_load_log_state(vdev_t *nvd, vdev_t *ovd) { - spa_t *spa = nvd->vdev_spa; + int c; - ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); + ASSERT(nvd->vdev_top->vdev_islog); + ASSERT(spa_config_held(nvd->vdev_spa, + SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); ASSERT3U(nvd->vdev_guid, ==, ovd->vdev_guid); - for (int c = 0; c < nvd->vdev_children; c++) + for (c = 0; c < nvd->vdev_children; c++) vdev_load_log_state(nvd->vdev_child[c], ovd->vdev_child[c]); - if (nvd->vdev_ops->vdev_op_leaf && ovd->vdev_offline) { + if (nvd->vdev_ops->vdev_op_leaf) { /* - * It would be nice to call vdev_offline() - * directly but the pool isn't fully loaded and - * the txg threads have not been started yet. + * Restore the persistent vdev state */ nvd->vdev_offline = ovd->vdev_offline; - vdev_reopen(nvd->vdev_top); + nvd->vdev_faulted = ovd->vdev_faulted; + nvd->vdev_degraded = ovd->vdev_degraded; + nvd->vdev_removed = ovd->vdev_removed; } } /* + * Determine if a log device has valid content. If the vdev was + * removed or faulted in the MOS config then we know that + * the content on the log device has already been written to the pool. + */ +boolean_t +vdev_log_state_valid(vdev_t *vd) +{ + int c; + + if (vd->vdev_ops->vdev_op_leaf && !vd->vdev_faulted && + !vd->vdev_removed) + return (B_TRUE); + + for (c = 0; c < vd->vdev_children; c++) + if (vdev_log_state_valid(vd->vdev_child[c])) + return (B_TRUE); + + return (B_FALSE); +} + +/* * Expand a vdev if possible. */ void @@ -3100,3 +3177,14 @@ vdev_split(vdev_t *vd) } vdev_propagate_state(cvd); } + +#if defined(_KERNEL) && defined(HAVE_SPL) +EXPORT_SYMBOL(vdev_fault); +EXPORT_SYMBOL(vdev_degrade); +EXPORT_SYMBOL(vdev_online); +EXPORT_SYMBOL(vdev_offline); +EXPORT_SYMBOL(vdev_clear); + +module_param(zfs_scrub_limit, int, 0644); +MODULE_PARM_DESC(zfs_scrub_limit, "Max scrub/resilver I/O per leaf vdev"); +#endif