X-Git-Url: https://git.camperquake.de/gitweb.cgi?a=blobdiff_plain;f=module%2Fzfs%2Fmetaslab.c;h=cd1b6ce730f4843bbe5d41f9f4b24f39f4aa48f5;hb=refs%2Fheads%2Frertzinger%2Ffeature-zpool-get--p;hp=17b4b12c4ee4db8f4aa0b5af2e915e9d459294c2;hpb=428870ff734fdaccc342b33fc53cf94724409a46;p=zfs.git diff --git a/module/zfs/metaslab.c b/module/zfs/metaslab.c index 17b4b12..cd1b6ce 100644 --- a/module/zfs/metaslab.c +++ b/module/zfs/metaslab.c @@ -20,6 +20,7 @@ */ /* * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012 by Delphix. All rights reserved. */ #include @@ -30,13 +31,42 @@ #include #include +#define WITH_DF_BLOCK_ALLOCATOR + +/* + * Allow allocations to switch to gang blocks quickly. We do this to + * avoid having to load lots of space_maps in a given txg. There are, + * however, some cases where we want to avoid "fast" ganging and instead + * we want to do an exhaustive search of all metaslabs on this device. + * Currently we don't allow any gang, zil, or dump device related allocations + * to "fast" gang. + */ +#define CAN_FASTGANG(flags) \ + (!((flags) & (METASLAB_GANG_CHILD | METASLAB_GANG_HEADER | \ + METASLAB_GANG_AVOID))) + uint64_t metaslab_aliquot = 512ULL << 10; uint64_t metaslab_gang_bang = SPA_MAXBLOCKSIZE + 1; /* force gang blocks */ /* + * The in-core space map representation is more compact than its on-disk form. + * The zfs_condense_pct determines how much more compact the in-core + * space_map representation must be before we compact it on-disk. + * Values should be greater than or equal to 100. + */ +int zfs_condense_pct = 200; + +/* + * This value defines the number of allowed allocation failures per vdev. + * If a device reaches this threshold in a given txg then we consider skipping + * allocations on that device. + */ +int zfs_mg_alloc_failures; + +/* * Metaslab debugging: when set, keeps all space maps in core to verify frees. */ -static int metaslab_debug = 0; +int metaslab_debug = 0; /* * Minimum size which forces the dynamic allocator to change @@ -80,11 +110,12 @@ metaslab_class_create(spa_t *spa, space_map_ops_t *ops) { metaslab_class_t *mc; - mc = kmem_zalloc(sizeof (metaslab_class_t), KM_SLEEP); + mc = kmem_zalloc(sizeof (metaslab_class_t), KM_PUSHPAGE); mc->mc_spa = spa; mc->mc_rotor = NULL; mc->mc_ops = ops; + mutex_init(&mc->mc_fastwrite_lock, NULL, MUTEX_DEFAULT, NULL); return (mc); } @@ -98,6 +129,7 @@ metaslab_class_destroy(metaslab_class_t *mc) ASSERT(mc->mc_space == 0); ASSERT(mc->mc_dspace == 0); + mutex_destroy(&mc->mc_fastwrite_lock); kmem_free(mc, sizeof (metaslab_class_t)); } @@ -180,9 +212,9 @@ metaslab_compare(const void *x1, const void *x2) /* * If the weights are identical, use the offset to force uniqueness. */ - if (m1->ms_map.sm_start < m2->ms_map.sm_start) + if (m1->ms_map->sm_start < m2->ms_map->sm_start) return (-1); - if (m1->ms_map.sm_start > m2->ms_map.sm_start) + if (m1->ms_map->sm_start > m2->ms_map->sm_start) return (1); ASSERT3P(m1, ==, m2); @@ -195,7 +227,7 @@ metaslab_group_create(metaslab_class_t *mc, vdev_t *vd) { metaslab_group_t *mg; - mg = kmem_zalloc(sizeof (metaslab_group_t), KM_SLEEP); + mg = kmem_zalloc(sizeof (metaslab_group_t), KM_PUSHPAGE); mutex_init(&mg->mg_lock, NULL, MUTEX_DEFAULT, NULL); avl_create(&mg->mg_metaslab_tree, metaslab_compare, sizeof (metaslab_t), offsetof(struct metaslab, ms_group_node)); @@ -350,6 +382,9 @@ metaslab_segsize_compare(const void *x1, const void *x2) return (0); } +#if defined(WITH_FF_BLOCK_ALLOCATOR) || \ + defined(WITH_DF_BLOCK_ALLOCATOR) || \ + defined(WITH_CDF_BLOCK_ALLOCATOR) /* * This is a helper function that can be used by the allocator to find * a suitable block to allocate. This will search the specified AVL @@ -389,6 +424,7 @@ metaslab_block_picker(avl_tree_t *t, uint64_t *cursor, uint64_t size, *cursor = 0; return (metaslab_block_picker(t, cursor, size, align)); } +#endif /* WITH_FF/DF/CDF_BLOCK_ALLOCATOR */ static void metaslab_pp_load(space_map_t *sm) @@ -396,9 +432,9 @@ metaslab_pp_load(space_map_t *sm) space_seg_t *ss; ASSERT(sm->sm_ppd == NULL); - sm->sm_ppd = kmem_zalloc(64 * sizeof (uint64_t), KM_SLEEP); + sm->sm_ppd = kmem_zalloc(64 * sizeof (uint64_t), KM_PUSHPAGE); - sm->sm_pp_root = kmem_alloc(sizeof (avl_tree_t), KM_SLEEP); + sm->sm_pp_root = kmem_alloc(sizeof (avl_tree_t), KM_PUSHPAGE); avl_create(sm->sm_pp_root, metaslab_segsize_compare, sizeof (space_seg_t), offsetof(struct space_seg, ss_pp_node)); @@ -452,6 +488,7 @@ metaslab_pp_maxsize(space_map_t *sm) return (ss->ss_end - ss->ss_start); } +#if defined(WITH_FF_BLOCK_ALLOCATOR) /* * ========================================================================== * The first-fit block allocator @@ -484,6 +521,10 @@ static space_map_ops_t metaslab_ff_ops = { metaslab_ff_fragmented }; +space_map_ops_t *zfs_metaslab_ops = &metaslab_ff_ops; +#endif /* WITH_FF_BLOCK_ALLOCATOR */ + +#if defined(WITH_DF_BLOCK_ALLOCATOR) /* * ========================================================================== * Dynamic block allocator - @@ -543,11 +584,15 @@ static space_map_ops_t metaslab_df_ops = { metaslab_df_fragmented }; +space_map_ops_t *zfs_metaslab_ops = &metaslab_df_ops; +#endif /* WITH_DF_BLOCK_ALLOCATOR */ + /* * ========================================================================== * Other experimental allocators * ========================================================================== */ +#if defined(WITH_CDF_BLOCK_ALLOCATOR) static uint64_t metaslab_cdf_alloc(space_map_t *sm, uint64_t size) { @@ -607,6 +652,10 @@ static space_map_ops_t metaslab_cdf_ops = { metaslab_cdf_fragmented }; +space_map_ops_t *zfs_metaslab_ops = &metaslab_cdf_ops; +#endif /* WITH_CDF_BLOCK_ALLOCATOR */ + +#if defined(WITH_NDF_BLOCK_ALLOCATOR) uint64_t metaslab_ndf_clump_shift = 4; static uint64_t @@ -672,6 +721,7 @@ static space_map_ops_t metaslab_ndf_ops = { }; space_map_ops_t *zfs_metaslab_ops = &metaslab_ndf_ops; +#endif /* WITH_NDF_BLOCK_ALLOCATOR */ /* * ========================================================================== @@ -685,7 +735,7 @@ metaslab_init(metaslab_group_t *mg, space_map_obj_t *smo, vdev_t *vd = mg->mg_vd; metaslab_t *msp; - msp = kmem_zalloc(sizeof (metaslab_t), KM_SLEEP); + msp = kmem_zalloc(sizeof (metaslab_t), KM_PUSHPAGE); mutex_init(&msp->ms_lock, NULL, MUTEX_DEFAULT, NULL); msp->ms_smo_syncing = *smo; @@ -697,14 +747,15 @@ metaslab_init(metaslab_group_t *mg, space_map_obj_t *smo, * addition of new space; and for debugging, it ensures that we'd * data fault on any attempt to use this metaslab before it's ready. */ - space_map_create(&msp->ms_map, start, size, + msp->ms_map = kmem_zalloc(sizeof (space_map_t), KM_PUSHPAGE); + space_map_create(msp->ms_map, start, size, vd->vdev_ashift, &msp->ms_lock); metaslab_group_add(mg, msp); if (metaslab_debug && smo->smo_object != 0) { mutex_enter(&msp->ms_lock); - VERIFY(space_map_load(&msp->ms_map, mg->mg_class->mc_ops, + VERIFY(space_map_load(msp->ms_map, mg->mg_class->mc_ops, SM_FREE, smo, spa_meta_objset(vd->vdev_spa)) == 0); mutex_exit(&msp->ms_lock); } @@ -730,26 +781,32 @@ void metaslab_fini(metaslab_t *msp) { metaslab_group_t *mg = msp->ms_group; + int t; vdev_space_update(mg->mg_vd, - -msp->ms_smo.smo_alloc, 0, -msp->ms_map.sm_size); + -msp->ms_smo.smo_alloc, 0, -msp->ms_map->sm_size); metaslab_group_remove(mg, msp); mutex_enter(&msp->ms_lock); - space_map_unload(&msp->ms_map); - space_map_destroy(&msp->ms_map); + space_map_unload(msp->ms_map); + space_map_destroy(msp->ms_map); + kmem_free(msp->ms_map, sizeof (*msp->ms_map)); - for (int t = 0; t < TXG_SIZE; t++) { - space_map_destroy(&msp->ms_allocmap[t]); - space_map_destroy(&msp->ms_freemap[t]); + for (t = 0; t < TXG_SIZE; t++) { + space_map_destroy(msp->ms_allocmap[t]); + space_map_destroy(msp->ms_freemap[t]); + kmem_free(msp->ms_allocmap[t], sizeof (*msp->ms_allocmap[t])); + kmem_free(msp->ms_freemap[t], sizeof (*msp->ms_freemap[t])); } - for (int t = 0; t < TXG_DEFER_SIZE; t++) - space_map_destroy(&msp->ms_defermap[t]); + for (t = 0; t < TXG_DEFER_SIZE; t++) { + space_map_destroy(msp->ms_defermap[t]); + kmem_free(msp->ms_defermap[t], sizeof (*msp->ms_defermap[t])); + } - ASSERT3S(msp->ms_deferspace, ==, 0); + ASSERT0(msp->ms_deferspace); mutex_exit(&msp->ms_lock); mutex_destroy(&msp->ms_lock); @@ -766,7 +823,7 @@ static uint64_t metaslab_weight(metaslab_t *msp) { metaslab_group_t *mg = msp->ms_group; - space_map_t *sm = &msp->ms_map; + space_map_t *sm = msp->ms_map; space_map_obj_t *smo = &msp->ms_smo; vdev_t *vd = mg->mg_vd; uint64_t weight, space; @@ -826,7 +883,7 @@ metaslab_prefetch(metaslab_group_t *mg) * Prefetch the next potential metaslabs */ for (msp = avl_first(t), m = 0; msp; msp = AVL_NEXT(t, msp), m++) { - space_map_t *sm = &msp->ms_map; + space_map_t *sm = msp->ms_map; space_map_obj_t *smo = &msp->ms_smo; /* If we have reached our prefetch limit then we're done */ @@ -844,26 +901,28 @@ metaslab_prefetch(metaslab_group_t *mg) } static int -metaslab_activate(metaslab_t *msp, uint64_t activation_weight, uint64_t size) +metaslab_activate(metaslab_t *msp, uint64_t activation_weight) { metaslab_group_t *mg = msp->ms_group; - space_map_t *sm = &msp->ms_map; + space_map_t *sm = msp->ms_map; space_map_ops_t *sm_ops = msp->ms_group->mg_class->mc_ops; + int t; ASSERT(MUTEX_HELD(&msp->ms_lock)); if ((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0) { space_map_load_wait(sm); if (!sm->sm_loaded) { - int error = space_map_load(sm, sm_ops, SM_FREE, - &msp->ms_smo, + space_map_obj_t *smo = &msp->ms_smo; + + int error = space_map_load(sm, sm_ops, SM_FREE, smo, spa_meta_objset(msp->ms_group->mg_vd->vdev_spa)); if (error) { metaslab_group_sort(msp->ms_group, msp, 0); return (error); } - for (int t = 0; t < TXG_DEFER_SIZE; t++) - space_map_walk(&msp->ms_defermap[t], + for (t = 0; t < TXG_DEFER_SIZE; t++) + space_map_walk(msp->ms_defermap[t], space_map_claim, sm); } @@ -877,13 +936,6 @@ metaslab_activate(metaslab_t *msp, uint64_t activation_weight, uint64_t size) mutex_exit(&mg->mg_lock); } - /* - * If we were able to load the map then make sure - * that this map is still able to satisfy our request. - */ - if (msp->ms_weight < size) - return (ENOSPC); - metaslab_group_sort(msp->ms_group, msp, msp->ms_weight | activation_weight); } @@ -901,12 +953,159 @@ metaslab_passivate(metaslab_t *msp, uint64_t size) * this metaslab again. In that case, it had better be empty, * or we would be leaving space on the table. */ - ASSERT(size >= SPA_MINBLOCKSIZE || msp->ms_map.sm_space == 0); + ASSERT(size >= SPA_MINBLOCKSIZE || msp->ms_map->sm_space == 0); metaslab_group_sort(msp->ms_group, msp, MIN(msp->ms_weight, size)); ASSERT((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0); } /* + * Determine if the in-core space map representation can be condensed on-disk. + * We would like to use the following criteria to make our decision: + * + * 1. The size of the space map object should not dramatically increase as a + * result of writing out our in-core free map. + * + * 2. The minimal on-disk space map representation is zfs_condense_pct/100 + * times the size than the in-core representation (i.e. zfs_condense_pct = 110 + * and in-core = 1MB, minimal = 1.1.MB). + * + * Checking the first condition is tricky since we don't want to walk + * the entire AVL tree calculating the estimated on-disk size. Instead we + * use the size-ordered AVL tree in the space map and calculate the + * size required for the largest segment in our in-core free map. If the + * size required to represent that segment on disk is larger than the space + * map object then we avoid condensing this map. + * + * To determine the second criterion we use a best-case estimate and assume + * each segment can be represented on-disk as a single 64-bit entry. We refer + * to this best-case estimate as the space map's minimal form. + */ +static boolean_t +metaslab_should_condense(metaslab_t *msp) +{ + space_map_t *sm = msp->ms_map; + space_map_obj_t *smo = &msp->ms_smo_syncing; + space_seg_t *ss; + uint64_t size, entries, segsz; + + ASSERT(MUTEX_HELD(&msp->ms_lock)); + ASSERT(sm->sm_loaded); + + /* + * Use the sm_pp_root AVL tree, which is ordered by size, to obtain + * the largest segment in the in-core free map. If the tree is + * empty then we should condense the map. + */ + ss = avl_last(sm->sm_pp_root); + if (ss == NULL) + return (B_TRUE); + + /* + * Calculate the number of 64-bit entries this segment would + * require when written to disk. If this single segment would be + * larger on-disk than the entire current on-disk structure, then + * clearly condensing will increase the on-disk structure size. + */ + size = (ss->ss_end - ss->ss_start) >> sm->sm_shift; + entries = size / (MIN(size, SM_RUN_MAX)); + segsz = entries * sizeof (uint64_t); + + return (segsz <= smo->smo_objsize && + smo->smo_objsize >= (zfs_condense_pct * + sizeof (uint64_t) * avl_numnodes(&sm->sm_root)) / 100); +} + +/* + * Condense the on-disk space map representation to its minimized form. + * The minimized form consists of a small number of allocations followed by + * the in-core free map. + */ +static void +metaslab_condense(metaslab_t *msp, uint64_t txg, dmu_tx_t *tx) +{ + spa_t *spa = msp->ms_group->mg_vd->vdev_spa; + space_map_t *freemap = msp->ms_freemap[txg & TXG_MASK]; + space_map_t condense_map; + space_map_t *sm = msp->ms_map; + objset_t *mos = spa_meta_objset(spa); + space_map_obj_t *smo = &msp->ms_smo_syncing; + int t; + + ASSERT(MUTEX_HELD(&msp->ms_lock)); + ASSERT3U(spa_sync_pass(spa), ==, 1); + ASSERT(sm->sm_loaded); + + spa_dbgmsg(spa, "condensing: txg %llu, msp[%llu] %p, " + "smo size %llu, segments %lu", txg, + (msp->ms_map->sm_start / msp->ms_map->sm_size), msp, + smo->smo_objsize, avl_numnodes(&sm->sm_root)); + + /* + * Create an map that is a 100% allocated map. We remove segments + * that have been freed in this txg, any deferred frees that exist, + * and any allocation in the future. Removing segments should be + * a relatively inexpensive operation since we expect these maps to + * a small number of nodes. + */ + space_map_create(&condense_map, sm->sm_start, sm->sm_size, + sm->sm_shift, sm->sm_lock); + space_map_add(&condense_map, condense_map.sm_start, + condense_map.sm_size); + + /* + * Remove what's been freed in this txg from the condense_map. + * Since we're in sync_pass 1, we know that all the frees from + * this txg are in the freemap. + */ + space_map_walk(freemap, space_map_remove, &condense_map); + + for (t = 0; t < TXG_DEFER_SIZE; t++) + space_map_walk(msp->ms_defermap[t], + space_map_remove, &condense_map); + + for (t = 1; t < TXG_CONCURRENT_STATES; t++) + space_map_walk(msp->ms_allocmap[(txg + t) & TXG_MASK], + space_map_remove, &condense_map); + + /* + * We're about to drop the metaslab's lock thus allowing + * other consumers to change it's content. Set the + * space_map's sm_condensing flag to ensure that + * allocations on this metaslab do not occur while we're + * in the middle of committing it to disk. This is only critical + * for the ms_map as all other space_maps use per txg + * views of their content. + */ + sm->sm_condensing = B_TRUE; + + mutex_exit(&msp->ms_lock); + space_map_truncate(smo, mos, tx); + mutex_enter(&msp->ms_lock); + + /* + * While we would ideally like to create a space_map representation + * that consists only of allocation records, doing so can be + * prohibitively expensive because the in-core free map can be + * large, and therefore computationally expensive to subtract + * from the condense_map. Instead we sync out two maps, a cheap + * allocation only map followed by the in-core free map. While not + * optimal, this is typically close to optimal, and much cheaper to + * compute. + */ + space_map_sync(&condense_map, SM_ALLOC, smo, mos, tx); + space_map_vacate(&condense_map, NULL, NULL); + space_map_destroy(&condense_map); + + space_map_sync(sm, SM_FREE, smo, mos, tx); + sm->sm_condensing = B_FALSE; + + spa_dbgmsg(spa, "condensed: txg %llu, msp[%llu] %p, " + "smo size %llu", txg, + (msp->ms_map->sm_start / msp->ms_map->sm_size), msp, + smo->smo_objsize); +} + +/* * Write a metaslab to disk in the context of the specified transaction group. */ void @@ -915,17 +1114,29 @@ metaslab_sync(metaslab_t *msp, uint64_t txg) vdev_t *vd = msp->ms_group->mg_vd; spa_t *spa = vd->vdev_spa; objset_t *mos = spa_meta_objset(spa); - space_map_t *allocmap = &msp->ms_allocmap[txg & TXG_MASK]; - space_map_t *freemap = &msp->ms_freemap[txg & TXG_MASK]; - space_map_t *freed_map = &msp->ms_freemap[TXG_CLEAN(txg) & TXG_MASK]; - space_map_t *sm = &msp->ms_map; + space_map_t *allocmap = msp->ms_allocmap[txg & TXG_MASK]; + space_map_t **freemap = &msp->ms_freemap[txg & TXG_MASK]; + space_map_t **freed_map = &msp->ms_freemap[TXG_CLEAN(txg) & TXG_MASK]; + space_map_t *sm = msp->ms_map; space_map_obj_t *smo = &msp->ms_smo_syncing; dmu_buf_t *db; dmu_tx_t *tx; ASSERT(!vd->vdev_ishole); - if (allocmap->sm_space == 0 && freemap->sm_space == 0) + /* + * This metaslab has just been added so there's no work to do now. + */ + if (*freemap == NULL) { + ASSERT3P(allocmap, ==, NULL); + return; + } + + ASSERT3P(allocmap, !=, NULL); + ASSERT3P(*freemap, !=, NULL); + ASSERT3P(*freed_map, !=, NULL); + + if (allocmap->sm_space == 0 && (*freemap)->sm_space == 0) return; /* @@ -953,49 +1164,36 @@ metaslab_sync(metaslab_t *msp, uint64_t txg) mutex_enter(&msp->ms_lock); - space_map_walk(freemap, space_map_add, freed_map); - - if (sm->sm_loaded && spa_sync_pass(spa) == 1 && smo->smo_objsize >= - 2 * sizeof (uint64_t) * avl_numnodes(&sm->sm_root)) { - /* - * The in-core space map representation is twice as compact - * as the on-disk one, so it's time to condense the latter - * by generating a pure allocmap from first principles. - * - * This metaslab is 100% allocated, - * minus the content of the in-core map (sm), - * minus what's been freed this txg (freed_map), - * minus deferred frees (ms_defermap[]), - * minus allocations from txgs in the future - * (because they haven't been committed yet). - */ - space_map_vacate(allocmap, NULL, NULL); - space_map_vacate(freemap, NULL, NULL); - - space_map_add(allocmap, allocmap->sm_start, allocmap->sm_size); - - space_map_walk(sm, space_map_remove, allocmap); - space_map_walk(freed_map, space_map_remove, allocmap); - - for (int t = 0; t < TXG_DEFER_SIZE; t++) - space_map_walk(&msp->ms_defermap[t], - space_map_remove, allocmap); + if (sm->sm_loaded && spa_sync_pass(spa) == 1 && + metaslab_should_condense(msp)) { + metaslab_condense(msp, txg, tx); + } else { + space_map_sync(allocmap, SM_ALLOC, smo, mos, tx); + space_map_sync(*freemap, SM_FREE, smo, mos, tx); + } - for (int t = 1; t < TXG_CONCURRENT_STATES; t++) - space_map_walk(&msp->ms_allocmap[(txg + t) & TXG_MASK], - space_map_remove, allocmap); + space_map_vacate(allocmap, NULL, NULL); - mutex_exit(&msp->ms_lock); - space_map_truncate(smo, mos, tx); - mutex_enter(&msp->ms_lock); + /* + * For sync pass 1, we avoid walking the entire space map and + * instead will just swap the pointers for freemap and + * freed_map. We can safely do this since the freed_map is + * guaranteed to be empty on the initial pass. + */ + if (spa_sync_pass(spa) == 1) { + ASSERT0((*freed_map)->sm_space); + ASSERT0(avl_numnodes(&(*freed_map)->sm_root)); + space_map_swap(freemap, freed_map); + } else { + space_map_vacate(*freemap, space_map_add, *freed_map); } - space_map_sync(allocmap, SM_ALLOC, smo, mos, tx); - space_map_sync(freemap, SM_FREE, smo, mos, tx); + ASSERT0(msp->ms_allocmap[txg & TXG_MASK]->sm_space); + ASSERT0(msp->ms_freemap[txg & TXG_MASK]->sm_space); mutex_exit(&msp->ms_lock); - VERIFY(0 == dmu_bonus_hold(mos, smo->smo_object, FTAG, &db)); + VERIFY0(dmu_bonus_hold(mos, smo->smo_object, FTAG, &db)); dmu_buf_will_dirty(db, tx); ASSERT3U(db->db_size, >=, sizeof (*smo)); bcopy(smo, db->db_data, sizeof (*smo)); @@ -1013,12 +1211,13 @@ metaslab_sync_done(metaslab_t *msp, uint64_t txg) { space_map_obj_t *smo = &msp->ms_smo; space_map_obj_t *smosync = &msp->ms_smo_syncing; - space_map_t *sm = &msp->ms_map; - space_map_t *freed_map = &msp->ms_freemap[TXG_CLEAN(txg) & TXG_MASK]; - space_map_t *defer_map = &msp->ms_defermap[txg % TXG_DEFER_SIZE]; + space_map_t *sm = msp->ms_map; + space_map_t *freed_map = msp->ms_freemap[TXG_CLEAN(txg) & TXG_MASK]; + space_map_t *defer_map = msp->ms_defermap[txg % TXG_DEFER_SIZE]; metaslab_group_t *mg = msp->ms_group; vdev_t *vd = mg->mg_vd; int64_t alloc_delta, defer_delta; + int t; ASSERT(!vd->vdev_ishole); @@ -1026,19 +1225,30 @@ metaslab_sync_done(metaslab_t *msp, uint64_t txg) /* * If this metaslab is just becoming available, initialize its - * allocmaps and freemaps and add its capacity to the vdev. + * allocmaps, freemaps, and defermap and add its capacity to the vdev. */ - if (freed_map->sm_size == 0) { - for (int t = 0; t < TXG_SIZE; t++) { - space_map_create(&msp->ms_allocmap[t], sm->sm_start, + if (freed_map == NULL) { + ASSERT(defer_map == NULL); + for (t = 0; t < TXG_SIZE; t++) { + msp->ms_allocmap[t] = kmem_zalloc(sizeof (space_map_t), + KM_PUSHPAGE); + space_map_create(msp->ms_allocmap[t], sm->sm_start, sm->sm_size, sm->sm_shift, sm->sm_lock); - space_map_create(&msp->ms_freemap[t], sm->sm_start, + msp->ms_freemap[t] = kmem_zalloc(sizeof (space_map_t), + KM_PUSHPAGE); + space_map_create(msp->ms_freemap[t], sm->sm_start, sm->sm_size, sm->sm_shift, sm->sm_lock); } - for (int t = 0; t < TXG_DEFER_SIZE; t++) - space_map_create(&msp->ms_defermap[t], sm->sm_start, + for (t = 0; t < TXG_DEFER_SIZE; t++) { + msp->ms_defermap[t] = kmem_zalloc(sizeof (space_map_t), + KM_PUSHPAGE); + space_map_create(msp->ms_defermap[t], sm->sm_start, sm->sm_size, sm->sm_shift, sm->sm_lock); + } + + freed_map = msp->ms_freemap[TXG_CLEAN(txg) & TXG_MASK]; + defer_map = msp->ms_defermap[txg % TXG_DEFER_SIZE]; vdev_space_update(vd, 0, 0, sm->sm_size); } @@ -1048,8 +1258,8 @@ metaslab_sync_done(metaslab_t *msp, uint64_t txg) vdev_space_update(vd, alloc_delta + defer_delta, defer_delta, 0); - ASSERT(msp->ms_allocmap[txg & TXG_MASK].sm_space == 0); - ASSERT(msp->ms_freemap[txg & TXG_MASK].sm_space == 0); + ASSERT(msp->ms_allocmap[txg & TXG_MASK]->sm_space == 0); + ASSERT(msp->ms_freemap[txg & TXG_MASK]->sm_space == 0); /* * If there's a space_map_load() in progress, wait for it to complete @@ -1082,8 +1292,8 @@ metaslab_sync_done(metaslab_t *msp, uint64_t txg) if (sm->sm_loaded && (msp->ms_weight & METASLAB_ACTIVE_MASK) == 0) { int evictable = 1; - for (int t = 1; t < TXG_CONCURRENT_STATES; t++) - if (msp->ms_allocmap[(txg + t) & TXG_MASK].sm_space) + for (t = 1; t < TXG_CONCURRENT_STATES; t++) + if (msp->ms_allocmap[(txg + t) & TXG_MASK]->sm_space) evictable = 0; if (evictable && !metaslab_debug) @@ -1099,15 +1309,17 @@ void metaslab_sync_reassess(metaslab_group_t *mg) { vdev_t *vd = mg->mg_vd; + int64_t failures = mg->mg_alloc_failures; + int m; /* * Re-evaluate all metaslabs which have lower offsets than the * bonus area. */ - for (int m = 0; m < vd->vdev_ms_count; m++) { + for (m = 0; m < vd->vdev_ms_count; m++) { metaslab_t *msp = vd->vdev_ms[m]; - if (msp->ms_map.sm_start > mg->mg_bonus_area) + if (msp->ms_map->sm_start > mg->mg_bonus_area) break; mutex_enter(&msp->ms_lock); @@ -1115,6 +1327,8 @@ metaslab_sync_reassess(metaslab_group_t *mg) mutex_exit(&msp->ms_lock); } + atomic_add_64(&mg->mg_alloc_failures, -failures); + /* * Prefetch the next potential metaslabs */ @@ -1126,7 +1340,7 @@ metaslab_distance(metaslab_t *msp, dva_t *dva) { uint64_t ms_shift = msp->ms_group->mg_vd->vdev_ms_shift; uint64_t offset = DVA_GET_OFFSET(dva) >> ms_shift; - uint64_t start = msp->ms_map.sm_start >> ms_shift; + uint64_t start = msp->ms_map->sm_start >> ms_shift; if (msp->ms_group->mg_vd->vdev_id != DVA_GET_VDEV(dva)) return (1ULL << 63); @@ -1139,9 +1353,10 @@ metaslab_distance(metaslab_t *msp, dva_t *dva) } static uint64_t -metaslab_group_alloc(metaslab_group_t *mg, uint64_t size, uint64_t txg, - uint64_t min_distance, dva_t *dva, int d) +metaslab_group_alloc(metaslab_group_t *mg, uint64_t psize, uint64_t asize, + uint64_t txg, uint64_t min_distance, dva_t *dva, int d, int flags) { + spa_t *spa = mg->mg_vd->vdev_spa; metaslab_t *msp = NULL; uint64_t offset = -1ULL; avl_tree_t *t = &mg->mg_metaslab_tree; @@ -1162,11 +1377,17 @@ metaslab_group_alloc(metaslab_group_t *mg, uint64_t size, uint64_t txg, mutex_enter(&mg->mg_lock); for (msp = avl_first(t); msp; msp = AVL_NEXT(t, msp)) { - if (msp->ms_weight < size) { + if (msp->ms_weight < asize) { + spa_dbgmsg(spa, "%s: failed to meet weight " + "requirement: vdev %llu, txg %llu, mg %p, " + "msp %p, psize %llu, asize %llu, " + "failures %llu, weight %llu", + spa_name(spa), mg->mg_vd->vdev_id, txg, + mg, msp, psize, asize, + mg->mg_alloc_failures, msp->ms_weight); mutex_exit(&mg->mg_lock); return (-1ULL); } - was_active = msp->ms_weight & METASLAB_ACTIVE_MASK; if (activation_weight == METASLAB_WEIGHT_PRIMARY) break; @@ -1185,15 +1406,44 @@ metaslab_group_alloc(metaslab_group_t *mg, uint64_t size, uint64_t txg, if (msp == NULL) return (-1ULL); + /* + * If we've already reached the allowable number of failed + * allocation attempts on this metaslab group then we + * consider skipping it. We skip it only if we're allowed + * to "fast" gang, the physical size is larger than + * a gang block, and we're attempting to allocate from + * the primary metaslab. + */ + if (mg->mg_alloc_failures > zfs_mg_alloc_failures && + CAN_FASTGANG(flags) && psize > SPA_GANGBLOCKSIZE && + activation_weight == METASLAB_WEIGHT_PRIMARY) { + spa_dbgmsg(spa, "%s: skipping metaslab group: " + "vdev %llu, txg %llu, mg %p, psize %llu, " + "asize %llu, failures %llu", spa_name(spa), + mg->mg_vd->vdev_id, txg, mg, psize, asize, + mg->mg_alloc_failures); + return (-1ULL); + } + mutex_enter(&msp->ms_lock); /* + * If this metaslab is currently condensing then pick again as + * we can't manipulate this metaslab until it's committed + * to disk. + */ + if (msp->ms_map->sm_condensing) { + mutex_exit(&msp->ms_lock); + continue; + } + + /* * Ensure that the metaslab we have selected is still * capable of handling our request. It's possible that * another thread may have changed the weight while we * were blocked on the metaslab lock. */ - if (msp->ms_weight < size || (was_active && + if (msp->ms_weight < asize || (was_active && !(msp->ms_weight & METASLAB_ACTIVE_MASK) && activation_weight == METASLAB_WEIGHT_PRIMARY)) { mutex_exit(&msp->ms_lock); @@ -1208,23 +1458,25 @@ metaslab_group_alloc(metaslab_group_t *mg, uint64_t size, uint64_t txg, continue; } - if (metaslab_activate(msp, activation_weight, size) != 0) { + if (metaslab_activate(msp, activation_weight) != 0) { mutex_exit(&msp->ms_lock); continue; } - if ((offset = space_map_alloc(&msp->ms_map, size)) != -1ULL) + if ((offset = space_map_alloc(msp->ms_map, asize)) != -1ULL) break; - metaslab_passivate(msp, space_map_maxsize(&msp->ms_map)); + atomic_inc_64(&mg->mg_alloc_failures); + + metaslab_passivate(msp, space_map_maxsize(msp->ms_map)); mutex_exit(&msp->ms_lock); } - if (msp->ms_allocmap[txg & TXG_MASK].sm_space == 0) + if (msp->ms_allocmap[txg & TXG_MASK]->sm_space == 0) vdev_dirty(mg->mg_vd, VDD_METASLAB, msp, txg); - space_map_add(&msp->ms_allocmap[txg & TXG_MASK], offset, size); + space_map_add(msp->ms_allocmap[txg & TXG_MASK], offset, asize); mutex_exit(&msp->ms_lock); @@ -1238,7 +1490,7 @@ static int metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize, dva_t *dva, int d, dva_t *hintdva, uint64_t txg, int flags) { - metaslab_group_t *mg, *rotor; + metaslab_group_t *mg, *fast_mg, *rotor; vdev_t *vd; int dshift = 3; int all_zero; @@ -1256,6 +1508,9 @@ metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize, if (psize >= metaslab_gang_bang && (ddi_get_lbolt() & 3) == 0) return (ENOSPC); + if (flags & METASLAB_FASTWRITE) + mutex_enter(&mc->mc_fastwrite_lock); + /* * Start at the rotor and loop through all mgs until we find something. * Note that there's no locking on mc_rotor or mc_aliquot because @@ -1298,6 +1553,15 @@ metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize, } else if (d != 0) { vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d - 1])); mg = vd->vdev_mg->mg_next; + } else if (flags & METASLAB_FASTWRITE) { + mg = fast_mg = mc->mc_rotor; + + do { + if (fast_mg->mg_vd->vdev_pending_fastwrite < + mg->mg_vd->vdev_pending_fastwrite) + mg = fast_mg; + } while ((fast_mg = fast_mg->mg_next) != mc->mc_rotor); + } else { mg = mc->mc_rotor; } @@ -1351,7 +1615,8 @@ top: asize = vdev_psize_to_asize(vd, psize); ASSERT(P2PHASE(asize, 1ULL << vd->vdev_ashift) == 0); - offset = metaslab_group_alloc(mg, asize, txg, distance, dva, d); + offset = metaslab_group_alloc(mg, psize, asize, txg, distance, + dva, d, flags); if (offset != -1ULL) { /* * If we've just selected this metaslab group, @@ -1363,21 +1628,28 @@ top: vdev_stat_t *vs = &vd->vdev_stat; int64_t vu, cu; - /* - * Determine percent used in units of 0..1024. - * (This is just to avoid floating point.) - */ - vu = (vs->vs_alloc << 10) / (vs->vs_space + 1); - cu = (mc->mc_alloc << 10) / (mc->mc_space + 1); + vu = (vs->vs_alloc * 100) / (vs->vs_space + 1); + cu = (mc->mc_alloc * 100) / (mc->mc_space + 1); /* - * Bias by at most +/- 25% of the aliquot. + * Calculate how much more or less we should + * try to allocate from this device during + * this iteration around the rotor. + * For example, if a device is 80% full + * and the pool is 20% full then we should + * reduce allocations by 60% on this device. + * + * mg_bias = (20 - 80) * 512K / 100 = -307K + * + * This reduces allocations by 307K for this + * iteration. */ mg->mg_bias = ((cu - vu) * - (int64_t)mg->mg_aliquot) / (1024 * 4); + (int64_t)mg->mg_aliquot) / 100; } - if (atomic_add_64_nv(&mc->mc_aliquot, asize) >= + if ((flags & METASLAB_FASTWRITE) || + atomic_add_64_nv(&mc->mc_aliquot, asize) >= mg->mg_aliquot + mg->mg_bias) { mc->mc_rotor = mg->mg_next; mc->mc_aliquot = 0; @@ -1388,6 +1660,12 @@ top: DVA_SET_GANG(&dva[d], !!(flags & METASLAB_GANG_HEADER)); DVA_SET_ASIZE(&dva[d], asize); + if (flags & METASLAB_FASTWRITE) { + atomic_add_64(&vd->vdev_pending_fastwrite, + psize); + mutex_exit(&mc->mc_fastwrite_lock); + } + return (0); } next: @@ -1409,6 +1687,8 @@ next: bzero(&dva[d], sizeof (dva_t)); + if (flags & METASLAB_FASTWRITE) + mutex_exit(&mc->mc_fastwrite_lock); return (ENOSPC); } @@ -1446,13 +1726,13 @@ metaslab_free_dva(spa_t *spa, const dva_t *dva, uint64_t txg, boolean_t now) mutex_enter(&msp->ms_lock); if (now) { - space_map_remove(&msp->ms_allocmap[txg & TXG_MASK], + space_map_remove(msp->ms_allocmap[txg & TXG_MASK], offset, size); - space_map_free(&msp->ms_map, offset, size); + space_map_free(msp->ms_map, offset, size); } else { - if (msp->ms_freemap[txg & TXG_MASK].sm_space == 0) + if (msp->ms_freemap[txg & TXG_MASK]->sm_space == 0) vdev_dirty(vd, VDD_METASLAB, msp, txg); - space_map_add(&msp->ms_freemap[txg & TXG_MASK], offset, size); + space_map_add(msp->ms_freemap[txg & TXG_MASK], offset, size); } mutex_exit(&msp->ms_lock); @@ -1487,10 +1767,10 @@ metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg) mutex_enter(&msp->ms_lock); - if ((txg != 0 && spa_writeable(spa)) || !msp->ms_map.sm_loaded) - error = metaslab_activate(msp, METASLAB_WEIGHT_SECONDARY, 0); + if ((txg != 0 && spa_writeable(spa)) || !msp->ms_map->sm_loaded) + error = metaslab_activate(msp, METASLAB_WEIGHT_SECONDARY); - if (error == 0 && !space_map_contains(&msp->ms_map, offset, size)) + if (error == 0 && !space_map_contains(msp->ms_map, offset, size)) error = ENOENT; if (error || txg == 0) { /* txg == 0 indicates dry run */ @@ -1498,12 +1778,12 @@ metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg) return (error); } - space_map_claim(&msp->ms_map, offset, size); + space_map_claim(msp->ms_map, offset, size); if (spa_writeable(spa)) { /* don't dirty if we're zdb(1M) */ - if (msp->ms_allocmap[txg & TXG_MASK].sm_space == 0) + if (msp->ms_allocmap[txg & TXG_MASK]->sm_space == 0) vdev_dirty(vd, VDD_METASLAB, msp, txg); - space_map_add(&msp->ms_allocmap[txg & TXG_MASK], offset, size); + space_map_add(msp->ms_allocmap[txg & TXG_MASK], offset, size); } mutex_exit(&msp->ms_lock); @@ -1517,7 +1797,7 @@ metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp, { dva_t *dva = bp->blk_dva; dva_t *hintdva = hintbp->blk_dva; - int error = 0; + int d, error = 0; ASSERT(bp->blk_birth == 0); ASSERT(BP_PHYSICAL_BIRTH(bp) == 0); @@ -1533,7 +1813,7 @@ metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp, ASSERT(BP_GET_NDVAS(bp) == 0); ASSERT(hintbp == NULL || ndvas <= BP_GET_NDVAS(hintbp)); - for (int d = 0; d < ndvas; d++) { + for (d = 0; d < ndvas; d++) { error = metaslab_alloc_dva(spa, mc, psize, dva, d, hintdva, txg, flags); if (error) { @@ -1559,14 +1839,14 @@ void metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now) { const dva_t *dva = bp->blk_dva; - int ndvas = BP_GET_NDVAS(bp); + int d, ndvas = BP_GET_NDVAS(bp); ASSERT(!BP_IS_HOLE(bp)); ASSERT(!now || bp->blk_birth >= spa_syncing_txg(spa)); spa_config_enter(spa, SCL_FREE, FTAG, RW_READER); - for (int d = 0; d < ndvas; d++) + for (d = 0; d < ndvas; d++) metaslab_free_dva(spa, &dva[d], txg, now); spa_config_exit(spa, SCL_FREE, FTAG); @@ -1577,7 +1857,7 @@ metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg) { const dva_t *dva = bp->blk_dva; int ndvas = BP_GET_NDVAS(bp); - int error = 0; + int d, error = 0; ASSERT(!BP_IS_HOLE(bp)); @@ -1592,7 +1872,7 @@ metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg) spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER); - for (int d = 0; d < ndvas; d++) + for (d = 0; d < ndvas; d++) if ((error = metaslab_claim_dva(spa, &dva[d], txg)) != 0) break; @@ -1602,3 +1882,53 @@ metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg) return (error); } + +void metaslab_fastwrite_mark(spa_t *spa, const blkptr_t *bp) +{ + const dva_t *dva = bp->blk_dva; + int ndvas = BP_GET_NDVAS(bp); + uint64_t psize = BP_GET_PSIZE(bp); + int d; + vdev_t *vd; + + ASSERT(!BP_IS_HOLE(bp)); + ASSERT(psize > 0); + + spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); + + for (d = 0; d < ndvas; d++) { + if ((vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d]))) == NULL) + continue; + atomic_add_64(&vd->vdev_pending_fastwrite, psize); + } + + spa_config_exit(spa, SCL_VDEV, FTAG); +} + +void metaslab_fastwrite_unmark(spa_t *spa, const blkptr_t *bp) +{ + const dva_t *dva = bp->blk_dva; + int ndvas = BP_GET_NDVAS(bp); + uint64_t psize = BP_GET_PSIZE(bp); + int d; + vdev_t *vd; + + ASSERT(!BP_IS_HOLE(bp)); + ASSERT(psize > 0); + + spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); + + for (d = 0; d < ndvas; d++) { + if ((vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d]))) == NULL) + continue; + ASSERT3U(vd->vdev_pending_fastwrite, >=, psize); + atomic_sub_64(&vd->vdev_pending_fastwrite, psize); + } + + spa_config_exit(spa, SCL_VDEV, FTAG); +} + +#if defined(_KERNEL) && defined(HAVE_SPL) +module_param(metaslab_debug, int, 0644); +MODULE_PARM_DESC(metaslab_debug, "keep space maps in core to verify frees"); +#endif /* _KERNEL && HAVE_SPL */