4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012 by Delphix. All rights reserved.
26 #include <sys/zfs_context.h>
28 #include <sys/dmu_tx.h>
29 #include <sys/space_map.h>
30 #include <sys/metaslab_impl.h>
31 #include <sys/vdev_impl.h>
34 #define WITH_DF_BLOCK_ALLOCATOR
37 * Allow allocations to switch to gang blocks quickly. We do this to
38 * avoid having to load lots of space_maps in a given txg. There are,
39 * however, some cases where we want to avoid "fast" ganging and instead
40 * we want to do an exhaustive search of all metaslabs on this device.
41 * Currently we don't allow any gang, zil, or dump device related allocations
44 #define CAN_FASTGANG(flags) \
45 (!((flags) & (METASLAB_GANG_CHILD | METASLAB_GANG_HEADER | \
46 METASLAB_GANG_AVOID)))
48 uint64_t metaslab_aliquot = 512ULL << 10;
49 uint64_t metaslab_gang_bang = SPA_MAXBLOCKSIZE + 1; /* force gang blocks */
52 * This value defines the number of allowed allocation failures per vdev.
53 * If a device reaches this threshold in a given txg then we consider skipping
54 * allocations on that device.
56 int zfs_mg_alloc_failures;
59 * Metaslab debugging: when set, keeps all space maps in core to verify frees.
61 int metaslab_debug = 0;
64 * Minimum size which forces the dynamic allocator to change
65 * it's allocation strategy. Once the space map cannot satisfy
66 * an allocation of this size then it switches to using more
67 * aggressive strategy (i.e search by size rather than offset).
69 uint64_t metaslab_df_alloc_threshold = SPA_MAXBLOCKSIZE;
72 * The minimum free space, in percent, which must be available
73 * in a space map to continue allocations in a first-fit fashion.
74 * Once the space_map's free space drops below this level we dynamically
75 * switch to using best-fit allocations.
77 int metaslab_df_free_pct = 4;
80 * A metaslab is considered "free" if it contains a contiguous
81 * segment which is greater than metaslab_min_alloc_size.
83 uint64_t metaslab_min_alloc_size = DMU_MAX_ACCESS;
86 * Max number of space_maps to prefetch.
88 int metaslab_prefetch_limit = SPA_DVAS_PER_BP;
91 * Percentage bonus multiplier for metaslabs that are in the bonus area.
93 int metaslab_smo_bonus_pct = 150;
96 * ==========================================================================
98 * ==========================================================================
101 metaslab_class_create(spa_t *spa, space_map_ops_t *ops)
103 metaslab_class_t *mc;
105 mc = kmem_zalloc(sizeof (metaslab_class_t), KM_PUSHPAGE);
110 mutex_init(&mc->mc_fastwrite_lock, NULL, MUTEX_DEFAULT, NULL);
116 metaslab_class_destroy(metaslab_class_t *mc)
118 ASSERT(mc->mc_rotor == NULL);
119 ASSERT(mc->mc_alloc == 0);
120 ASSERT(mc->mc_deferred == 0);
121 ASSERT(mc->mc_space == 0);
122 ASSERT(mc->mc_dspace == 0);
124 mutex_destroy(&mc->mc_fastwrite_lock);
125 kmem_free(mc, sizeof (metaslab_class_t));
129 metaslab_class_validate(metaslab_class_t *mc)
131 metaslab_group_t *mg;
135 * Must hold one of the spa_config locks.
137 ASSERT(spa_config_held(mc->mc_spa, SCL_ALL, RW_READER) ||
138 spa_config_held(mc->mc_spa, SCL_ALL, RW_WRITER));
140 if ((mg = mc->mc_rotor) == NULL)
145 ASSERT(vd->vdev_mg != NULL);
146 ASSERT3P(vd->vdev_top, ==, vd);
147 ASSERT3P(mg->mg_class, ==, mc);
148 ASSERT3P(vd->vdev_ops, !=, &vdev_hole_ops);
149 } while ((mg = mg->mg_next) != mc->mc_rotor);
155 metaslab_class_space_update(metaslab_class_t *mc, int64_t alloc_delta,
156 int64_t defer_delta, int64_t space_delta, int64_t dspace_delta)
158 atomic_add_64(&mc->mc_alloc, alloc_delta);
159 atomic_add_64(&mc->mc_deferred, defer_delta);
160 atomic_add_64(&mc->mc_space, space_delta);
161 atomic_add_64(&mc->mc_dspace, dspace_delta);
165 metaslab_class_get_alloc(metaslab_class_t *mc)
167 return (mc->mc_alloc);
171 metaslab_class_get_deferred(metaslab_class_t *mc)
173 return (mc->mc_deferred);
177 metaslab_class_get_space(metaslab_class_t *mc)
179 return (mc->mc_space);
183 metaslab_class_get_dspace(metaslab_class_t *mc)
185 return (spa_deflate(mc->mc_spa) ? mc->mc_dspace : mc->mc_space);
189 * ==========================================================================
191 * ==========================================================================
194 metaslab_compare(const void *x1, const void *x2)
196 const metaslab_t *m1 = x1;
197 const metaslab_t *m2 = x2;
199 if (m1->ms_weight < m2->ms_weight)
201 if (m1->ms_weight > m2->ms_weight)
205 * If the weights are identical, use the offset to force uniqueness.
207 if (m1->ms_map.sm_start < m2->ms_map.sm_start)
209 if (m1->ms_map.sm_start > m2->ms_map.sm_start)
212 ASSERT3P(m1, ==, m2);
218 metaslab_group_create(metaslab_class_t *mc, vdev_t *vd)
220 metaslab_group_t *mg;
222 mg = kmem_zalloc(sizeof (metaslab_group_t), KM_PUSHPAGE);
223 mutex_init(&mg->mg_lock, NULL, MUTEX_DEFAULT, NULL);
224 avl_create(&mg->mg_metaslab_tree, metaslab_compare,
225 sizeof (metaslab_t), offsetof(struct metaslab, ms_group_node));
228 mg->mg_activation_count = 0;
234 metaslab_group_destroy(metaslab_group_t *mg)
236 ASSERT(mg->mg_prev == NULL);
237 ASSERT(mg->mg_next == NULL);
239 * We may have gone below zero with the activation count
240 * either because we never activated in the first place or
241 * because we're done, and possibly removing the vdev.
243 ASSERT(mg->mg_activation_count <= 0);
245 avl_destroy(&mg->mg_metaslab_tree);
246 mutex_destroy(&mg->mg_lock);
247 kmem_free(mg, sizeof (metaslab_group_t));
251 metaslab_group_activate(metaslab_group_t *mg)
253 metaslab_class_t *mc = mg->mg_class;
254 metaslab_group_t *mgprev, *mgnext;
256 ASSERT(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_WRITER));
258 ASSERT(mc->mc_rotor != mg);
259 ASSERT(mg->mg_prev == NULL);
260 ASSERT(mg->mg_next == NULL);
261 ASSERT(mg->mg_activation_count <= 0);
263 if (++mg->mg_activation_count <= 0)
266 mg->mg_aliquot = metaslab_aliquot * MAX(1, mg->mg_vd->vdev_children);
268 if ((mgprev = mc->mc_rotor) == NULL) {
272 mgnext = mgprev->mg_next;
273 mg->mg_prev = mgprev;
274 mg->mg_next = mgnext;
275 mgprev->mg_next = mg;
276 mgnext->mg_prev = mg;
282 metaslab_group_passivate(metaslab_group_t *mg)
284 metaslab_class_t *mc = mg->mg_class;
285 metaslab_group_t *mgprev, *mgnext;
287 ASSERT(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_WRITER));
289 if (--mg->mg_activation_count != 0) {
290 ASSERT(mc->mc_rotor != mg);
291 ASSERT(mg->mg_prev == NULL);
292 ASSERT(mg->mg_next == NULL);
293 ASSERT(mg->mg_activation_count < 0);
297 mgprev = mg->mg_prev;
298 mgnext = mg->mg_next;
303 mc->mc_rotor = mgnext;
304 mgprev->mg_next = mgnext;
305 mgnext->mg_prev = mgprev;
313 metaslab_group_add(metaslab_group_t *mg, metaslab_t *msp)
315 mutex_enter(&mg->mg_lock);
316 ASSERT(msp->ms_group == NULL);
319 avl_add(&mg->mg_metaslab_tree, msp);
320 mutex_exit(&mg->mg_lock);
324 metaslab_group_remove(metaslab_group_t *mg, metaslab_t *msp)
326 mutex_enter(&mg->mg_lock);
327 ASSERT(msp->ms_group == mg);
328 avl_remove(&mg->mg_metaslab_tree, msp);
329 msp->ms_group = NULL;
330 mutex_exit(&mg->mg_lock);
334 metaslab_group_sort(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight)
337 * Although in principle the weight can be any value, in
338 * practice we do not use values in the range [1, 510].
340 ASSERT(weight >= SPA_MINBLOCKSIZE-1 || weight == 0);
341 ASSERT(MUTEX_HELD(&msp->ms_lock));
343 mutex_enter(&mg->mg_lock);
344 ASSERT(msp->ms_group == mg);
345 avl_remove(&mg->mg_metaslab_tree, msp);
346 msp->ms_weight = weight;
347 avl_add(&mg->mg_metaslab_tree, msp);
348 mutex_exit(&mg->mg_lock);
352 * ==========================================================================
353 * Common allocator routines
354 * ==========================================================================
357 metaslab_segsize_compare(const void *x1, const void *x2)
359 const space_seg_t *s1 = x1;
360 const space_seg_t *s2 = x2;
361 uint64_t ss_size1 = s1->ss_end - s1->ss_start;
362 uint64_t ss_size2 = s2->ss_end - s2->ss_start;
364 if (ss_size1 < ss_size2)
366 if (ss_size1 > ss_size2)
369 if (s1->ss_start < s2->ss_start)
371 if (s1->ss_start > s2->ss_start)
377 #if defined(WITH_FF_BLOCK_ALLOCATOR) || \
378 defined(WITH_DF_BLOCK_ALLOCATOR) || \
379 defined(WITH_CDF_BLOCK_ALLOCATOR)
381 * This is a helper function that can be used by the allocator to find
382 * a suitable block to allocate. This will search the specified AVL
383 * tree looking for a block that matches the specified criteria.
386 metaslab_block_picker(avl_tree_t *t, uint64_t *cursor, uint64_t size,
389 space_seg_t *ss, ssearch;
392 ssearch.ss_start = *cursor;
393 ssearch.ss_end = *cursor + size;
395 ss = avl_find(t, &ssearch, &where);
397 ss = avl_nearest(t, where, AVL_AFTER);
400 uint64_t offset = P2ROUNDUP(ss->ss_start, align);
402 if (offset + size <= ss->ss_end) {
403 *cursor = offset + size;
406 ss = AVL_NEXT(t, ss);
410 * If we know we've searched the whole map (*cursor == 0), give up.
411 * Otherwise, reset the cursor to the beginning and try again.
417 return (metaslab_block_picker(t, cursor, size, align));
419 #endif /* WITH_FF/DF/CDF_BLOCK_ALLOCATOR */
422 metaslab_pp_load(space_map_t *sm)
426 ASSERT(sm->sm_ppd == NULL);
427 sm->sm_ppd = kmem_zalloc(64 * sizeof (uint64_t), KM_PUSHPAGE);
429 sm->sm_pp_root = kmem_alloc(sizeof (avl_tree_t), KM_PUSHPAGE);
430 avl_create(sm->sm_pp_root, metaslab_segsize_compare,
431 sizeof (space_seg_t), offsetof(struct space_seg, ss_pp_node));
433 for (ss = avl_first(&sm->sm_root); ss; ss = AVL_NEXT(&sm->sm_root, ss))
434 avl_add(sm->sm_pp_root, ss);
438 metaslab_pp_unload(space_map_t *sm)
442 kmem_free(sm->sm_ppd, 64 * sizeof (uint64_t));
445 while (avl_destroy_nodes(sm->sm_pp_root, &cookie) != NULL) {
446 /* tear down the tree */
449 avl_destroy(sm->sm_pp_root);
450 kmem_free(sm->sm_pp_root, sizeof (avl_tree_t));
451 sm->sm_pp_root = NULL;
456 metaslab_pp_claim(space_map_t *sm, uint64_t start, uint64_t size)
458 /* No need to update cursor */
463 metaslab_pp_free(space_map_t *sm, uint64_t start, uint64_t size)
465 /* No need to update cursor */
469 * Return the maximum contiguous segment within the metaslab.
472 metaslab_pp_maxsize(space_map_t *sm)
474 avl_tree_t *t = sm->sm_pp_root;
477 if (t == NULL || (ss = avl_last(t)) == NULL)
480 return (ss->ss_end - ss->ss_start);
483 #if defined(WITH_FF_BLOCK_ALLOCATOR)
485 * ==========================================================================
486 * The first-fit block allocator
487 * ==========================================================================
490 metaslab_ff_alloc(space_map_t *sm, uint64_t size)
492 avl_tree_t *t = &sm->sm_root;
493 uint64_t align = size & -size;
494 uint64_t *cursor = (uint64_t *)sm->sm_ppd + highbit(align) - 1;
496 return (metaslab_block_picker(t, cursor, size, align));
501 metaslab_ff_fragmented(space_map_t *sm)
506 static space_map_ops_t metaslab_ff_ops = {
513 metaslab_ff_fragmented
516 space_map_ops_t *zfs_metaslab_ops = &metaslab_ff_ops;
517 #endif /* WITH_FF_BLOCK_ALLOCATOR */
519 #if defined(WITH_DF_BLOCK_ALLOCATOR)
521 * ==========================================================================
522 * Dynamic block allocator -
523 * Uses the first fit allocation scheme until space get low and then
524 * adjusts to a best fit allocation method. Uses metaslab_df_alloc_threshold
525 * and metaslab_df_free_pct to determine when to switch the allocation scheme.
526 * ==========================================================================
529 metaslab_df_alloc(space_map_t *sm, uint64_t size)
531 avl_tree_t *t = &sm->sm_root;
532 uint64_t align = size & -size;
533 uint64_t *cursor = (uint64_t *)sm->sm_ppd + highbit(align) - 1;
534 uint64_t max_size = metaslab_pp_maxsize(sm);
535 int free_pct = sm->sm_space * 100 / sm->sm_size;
537 ASSERT(MUTEX_HELD(sm->sm_lock));
538 ASSERT3U(avl_numnodes(&sm->sm_root), ==, avl_numnodes(sm->sm_pp_root));
544 * If we're running low on space switch to using the size
545 * sorted AVL tree (best-fit).
547 if (max_size < metaslab_df_alloc_threshold ||
548 free_pct < metaslab_df_free_pct) {
553 return (metaslab_block_picker(t, cursor, size, 1ULL));
557 metaslab_df_fragmented(space_map_t *sm)
559 uint64_t max_size = metaslab_pp_maxsize(sm);
560 int free_pct = sm->sm_space * 100 / sm->sm_size;
562 if (max_size >= metaslab_df_alloc_threshold &&
563 free_pct >= metaslab_df_free_pct)
569 static space_map_ops_t metaslab_df_ops = {
576 metaslab_df_fragmented
579 space_map_ops_t *zfs_metaslab_ops = &metaslab_df_ops;
580 #endif /* WITH_DF_BLOCK_ALLOCATOR */
583 * ==========================================================================
584 * Other experimental allocators
585 * ==========================================================================
587 #if defined(WITH_CDF_BLOCK_ALLOCATOR)
589 metaslab_cdf_alloc(space_map_t *sm, uint64_t size)
591 avl_tree_t *t = &sm->sm_root;
592 uint64_t *cursor = (uint64_t *)sm->sm_ppd;
593 uint64_t *extent_end = (uint64_t *)sm->sm_ppd + 1;
594 uint64_t max_size = metaslab_pp_maxsize(sm);
595 uint64_t rsize = size;
598 ASSERT(MUTEX_HELD(sm->sm_lock));
599 ASSERT3U(avl_numnodes(&sm->sm_root), ==, avl_numnodes(sm->sm_pp_root));
604 ASSERT3U(*extent_end, >=, *cursor);
607 * If we're running low on space switch to using the size
608 * sorted AVL tree (best-fit).
610 if ((*cursor + size) > *extent_end) {
613 *cursor = *extent_end = 0;
615 if (max_size > 2 * SPA_MAXBLOCKSIZE)
616 rsize = MIN(metaslab_min_alloc_size, max_size);
617 offset = metaslab_block_picker(t, extent_end, rsize, 1ULL);
619 *cursor = offset + size;
621 offset = metaslab_block_picker(t, cursor, rsize, 1ULL);
623 ASSERT3U(*cursor, <=, *extent_end);
628 metaslab_cdf_fragmented(space_map_t *sm)
630 uint64_t max_size = metaslab_pp_maxsize(sm);
632 if (max_size > (metaslab_min_alloc_size * 10))
637 static space_map_ops_t metaslab_cdf_ops = {
644 metaslab_cdf_fragmented
647 space_map_ops_t *zfs_metaslab_ops = &metaslab_cdf_ops;
648 #endif /* WITH_CDF_BLOCK_ALLOCATOR */
650 #if defined(WITH_NDF_BLOCK_ALLOCATOR)
651 uint64_t metaslab_ndf_clump_shift = 4;
654 metaslab_ndf_alloc(space_map_t *sm, uint64_t size)
656 avl_tree_t *t = &sm->sm_root;
658 space_seg_t *ss, ssearch;
659 uint64_t hbit = highbit(size);
660 uint64_t *cursor = (uint64_t *)sm->sm_ppd + hbit - 1;
661 uint64_t max_size = metaslab_pp_maxsize(sm);
663 ASSERT(MUTEX_HELD(sm->sm_lock));
664 ASSERT3U(avl_numnodes(&sm->sm_root), ==, avl_numnodes(sm->sm_pp_root));
669 ssearch.ss_start = *cursor;
670 ssearch.ss_end = *cursor + size;
672 ss = avl_find(t, &ssearch, &where);
673 if (ss == NULL || (ss->ss_start + size > ss->ss_end)) {
676 ssearch.ss_start = 0;
677 ssearch.ss_end = MIN(max_size,
678 1ULL << (hbit + metaslab_ndf_clump_shift));
679 ss = avl_find(t, &ssearch, &where);
681 ss = avl_nearest(t, where, AVL_AFTER);
686 if (ss->ss_start + size <= ss->ss_end) {
687 *cursor = ss->ss_start + size;
688 return (ss->ss_start);
695 metaslab_ndf_fragmented(space_map_t *sm)
697 uint64_t max_size = metaslab_pp_maxsize(sm);
699 if (max_size > (metaslab_min_alloc_size << metaslab_ndf_clump_shift))
705 static space_map_ops_t metaslab_ndf_ops = {
712 metaslab_ndf_fragmented
715 space_map_ops_t *zfs_metaslab_ops = &metaslab_ndf_ops;
716 #endif /* WITH_NDF_BLOCK_ALLOCATOR */
719 * ==========================================================================
721 * ==========================================================================
724 metaslab_init(metaslab_group_t *mg, space_map_obj_t *smo,
725 uint64_t start, uint64_t size, uint64_t txg)
727 vdev_t *vd = mg->mg_vd;
730 msp = kmem_zalloc(sizeof (metaslab_t), KM_PUSHPAGE);
731 mutex_init(&msp->ms_lock, NULL, MUTEX_DEFAULT, NULL);
733 msp->ms_smo_syncing = *smo;
736 * We create the main space map here, but we don't create the
737 * allocmaps and freemaps until metaslab_sync_done(). This serves
738 * two purposes: it allows metaslab_sync_done() to detect the
739 * addition of new space; and for debugging, it ensures that we'd
740 * data fault on any attempt to use this metaslab before it's ready.
742 space_map_create(&msp->ms_map, start, size,
743 vd->vdev_ashift, &msp->ms_lock);
745 metaslab_group_add(mg, msp);
747 if (metaslab_debug && smo->smo_object != 0) {
748 mutex_enter(&msp->ms_lock);
749 VERIFY(space_map_load(&msp->ms_map, mg->mg_class->mc_ops,
750 SM_FREE, smo, spa_meta_objset(vd->vdev_spa)) == 0);
751 mutex_exit(&msp->ms_lock);
755 * If we're opening an existing pool (txg == 0) or creating
756 * a new one (txg == TXG_INITIAL), all space is available now.
757 * If we're adding space to an existing pool, the new space
758 * does not become available until after this txg has synced.
760 if (txg <= TXG_INITIAL)
761 metaslab_sync_done(msp, 0);
764 vdev_dirty(vd, 0, NULL, txg);
765 vdev_dirty(vd, VDD_METASLAB, msp, txg);
772 metaslab_fini(metaslab_t *msp)
774 metaslab_group_t *mg = msp->ms_group;
777 vdev_space_update(mg->mg_vd,
778 -msp->ms_smo.smo_alloc, 0, -msp->ms_map.sm_size);
780 metaslab_group_remove(mg, msp);
782 mutex_enter(&msp->ms_lock);
784 space_map_unload(&msp->ms_map);
785 space_map_destroy(&msp->ms_map);
787 for (t = 0; t < TXG_SIZE; t++) {
788 space_map_destroy(&msp->ms_allocmap[t]);
789 space_map_destroy(&msp->ms_freemap[t]);
792 for (t = 0; t < TXG_DEFER_SIZE; t++)
793 space_map_destroy(&msp->ms_defermap[t]);
795 ASSERT3S(msp->ms_deferspace, ==, 0);
797 mutex_exit(&msp->ms_lock);
798 mutex_destroy(&msp->ms_lock);
800 kmem_free(msp, sizeof (metaslab_t));
803 #define METASLAB_WEIGHT_PRIMARY (1ULL << 63)
804 #define METASLAB_WEIGHT_SECONDARY (1ULL << 62)
805 #define METASLAB_ACTIVE_MASK \
806 (METASLAB_WEIGHT_PRIMARY | METASLAB_WEIGHT_SECONDARY)
809 metaslab_weight(metaslab_t *msp)
811 metaslab_group_t *mg = msp->ms_group;
812 space_map_t *sm = &msp->ms_map;
813 space_map_obj_t *smo = &msp->ms_smo;
814 vdev_t *vd = mg->mg_vd;
815 uint64_t weight, space;
817 ASSERT(MUTEX_HELD(&msp->ms_lock));
820 * The baseline weight is the metaslab's free space.
822 space = sm->sm_size - smo->smo_alloc;
826 * Modern disks have uniform bit density and constant angular velocity.
827 * Therefore, the outer recording zones are faster (higher bandwidth)
828 * than the inner zones by the ratio of outer to inner track diameter,
829 * which is typically around 2:1. We account for this by assigning
830 * higher weight to lower metaslabs (multiplier ranging from 2x to 1x).
831 * In effect, this means that we'll select the metaslab with the most
832 * free bandwidth rather than simply the one with the most free space.
834 weight = 2 * weight -
835 ((sm->sm_start >> vd->vdev_ms_shift) * weight) / vd->vdev_ms_count;
836 ASSERT(weight >= space && weight <= 2 * space);
839 * For locality, assign higher weight to metaslabs which have
840 * a lower offset than what we've already activated.
842 if (sm->sm_start <= mg->mg_bonus_area)
843 weight *= (metaslab_smo_bonus_pct / 100);
844 ASSERT(weight >= space &&
845 weight <= 2 * (metaslab_smo_bonus_pct / 100) * space);
847 if (sm->sm_loaded && !sm->sm_ops->smop_fragmented(sm)) {
849 * If this metaslab is one we're actively using, adjust its
850 * weight to make it preferable to any inactive metaslab so
851 * we'll polish it off.
853 weight |= (msp->ms_weight & METASLAB_ACTIVE_MASK);
859 metaslab_prefetch(metaslab_group_t *mg)
861 spa_t *spa = mg->mg_vd->vdev_spa;
863 avl_tree_t *t = &mg->mg_metaslab_tree;
866 mutex_enter(&mg->mg_lock);
869 * Prefetch the next potential metaslabs
871 for (msp = avl_first(t), m = 0; msp; msp = AVL_NEXT(t, msp), m++) {
872 space_map_t *sm = &msp->ms_map;
873 space_map_obj_t *smo = &msp->ms_smo;
875 /* If we have reached our prefetch limit then we're done */
876 if (m >= metaslab_prefetch_limit)
879 if (!sm->sm_loaded && smo->smo_object != 0) {
880 mutex_exit(&mg->mg_lock);
881 dmu_prefetch(spa_meta_objset(spa), smo->smo_object,
882 0ULL, smo->smo_objsize);
883 mutex_enter(&mg->mg_lock);
886 mutex_exit(&mg->mg_lock);
890 metaslab_activate(metaslab_t *msp, uint64_t activation_weight)
892 metaslab_group_t *mg = msp->ms_group;
893 space_map_t *sm = &msp->ms_map;
894 space_map_ops_t *sm_ops = msp->ms_group->mg_class->mc_ops;
897 ASSERT(MUTEX_HELD(&msp->ms_lock));
899 if ((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0) {
900 space_map_load_wait(sm);
901 if (!sm->sm_loaded) {
902 int error = space_map_load(sm, sm_ops, SM_FREE,
904 spa_meta_objset(msp->ms_group->mg_vd->vdev_spa));
906 metaslab_group_sort(msp->ms_group, msp, 0);
909 for (t = 0; t < TXG_DEFER_SIZE; t++)
910 space_map_walk(&msp->ms_defermap[t],
911 space_map_claim, sm);
916 * Track the bonus area as we activate new metaslabs.
918 if (sm->sm_start > mg->mg_bonus_area) {
919 mutex_enter(&mg->mg_lock);
920 mg->mg_bonus_area = sm->sm_start;
921 mutex_exit(&mg->mg_lock);
924 metaslab_group_sort(msp->ms_group, msp,
925 msp->ms_weight | activation_weight);
927 ASSERT(sm->sm_loaded);
928 ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
934 metaslab_passivate(metaslab_t *msp, uint64_t size)
937 * If size < SPA_MINBLOCKSIZE, then we will not allocate from
938 * this metaslab again. In that case, it had better be empty,
939 * or we would be leaving space on the table.
941 ASSERT(size >= SPA_MINBLOCKSIZE || msp->ms_map.sm_space == 0);
942 metaslab_group_sort(msp->ms_group, msp, MIN(msp->ms_weight, size));
943 ASSERT((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0);
947 * Write a metaslab to disk in the context of the specified transaction group.
950 metaslab_sync(metaslab_t *msp, uint64_t txg)
952 vdev_t *vd = msp->ms_group->mg_vd;
953 spa_t *spa = vd->vdev_spa;
954 objset_t *mos = spa_meta_objset(spa);
955 space_map_t *allocmap = &msp->ms_allocmap[txg & TXG_MASK];
956 space_map_t *freemap = &msp->ms_freemap[txg & TXG_MASK];
957 space_map_t *freed_map = &msp->ms_freemap[TXG_CLEAN(txg) & TXG_MASK];
958 space_map_t *sm = &msp->ms_map;
959 space_map_obj_t *smo = &msp->ms_smo_syncing;
964 ASSERT(!vd->vdev_ishole);
966 if (allocmap->sm_space == 0 && freemap->sm_space == 0)
970 * The only state that can actually be changing concurrently with
971 * metaslab_sync() is the metaslab's ms_map. No other thread can
972 * be modifying this txg's allocmap, freemap, freed_map, or smo.
973 * Therefore, we only hold ms_lock to satify space_map ASSERTs.
974 * We drop it whenever we call into the DMU, because the DMU
975 * can call down to us (e.g. via zio_free()) at any time.
978 tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
980 if (smo->smo_object == 0) {
981 ASSERT(smo->smo_objsize == 0);
982 ASSERT(smo->smo_alloc == 0);
983 smo->smo_object = dmu_object_alloc(mos,
984 DMU_OT_SPACE_MAP, 1 << SPACE_MAP_BLOCKSHIFT,
985 DMU_OT_SPACE_MAP_HEADER, sizeof (*smo), tx);
986 ASSERT(smo->smo_object != 0);
987 dmu_write(mos, vd->vdev_ms_array, sizeof (uint64_t) *
988 (sm->sm_start >> vd->vdev_ms_shift),
989 sizeof (uint64_t), &smo->smo_object, tx);
992 mutex_enter(&msp->ms_lock);
994 space_map_walk(freemap, space_map_add, freed_map);
996 if (sm->sm_loaded && spa_sync_pass(spa) == 1 && smo->smo_objsize >=
997 2 * sizeof (uint64_t) * avl_numnodes(&sm->sm_root)) {
999 * The in-core space map representation is twice as compact
1000 * as the on-disk one, so it's time to condense the latter
1001 * by generating a pure allocmap from first principles.
1003 * This metaslab is 100% allocated,
1004 * minus the content of the in-core map (sm),
1005 * minus what's been freed this txg (freed_map),
1006 * minus deferred frees (ms_defermap[]),
1007 * minus allocations from txgs in the future
1008 * (because they haven't been committed yet).
1010 space_map_vacate(allocmap, NULL, NULL);
1011 space_map_vacate(freemap, NULL, NULL);
1013 space_map_add(allocmap, allocmap->sm_start, allocmap->sm_size);
1015 space_map_walk(sm, space_map_remove, allocmap);
1016 space_map_walk(freed_map, space_map_remove, allocmap);
1018 for (t = 0; t < TXG_DEFER_SIZE; t++)
1019 space_map_walk(&msp->ms_defermap[t],
1020 space_map_remove, allocmap);
1022 for (t = 1; t < TXG_CONCURRENT_STATES; t++)
1023 space_map_walk(&msp->ms_allocmap[(txg + t) & TXG_MASK],
1024 space_map_remove, allocmap);
1026 mutex_exit(&msp->ms_lock);
1027 space_map_truncate(smo, mos, tx);
1028 mutex_enter(&msp->ms_lock);
1031 space_map_sync(allocmap, SM_ALLOC, smo, mos, tx);
1032 space_map_sync(freemap, SM_FREE, smo, mos, tx);
1034 mutex_exit(&msp->ms_lock);
1036 VERIFY(0 == dmu_bonus_hold(mos, smo->smo_object, FTAG, &db));
1037 dmu_buf_will_dirty(db, tx);
1038 ASSERT3U(db->db_size, >=, sizeof (*smo));
1039 bcopy(smo, db->db_data, sizeof (*smo));
1040 dmu_buf_rele(db, FTAG);
1046 * Called after a transaction group has completely synced to mark
1047 * all of the metaslab's free space as usable.
1050 metaslab_sync_done(metaslab_t *msp, uint64_t txg)
1052 space_map_obj_t *smo = &msp->ms_smo;
1053 space_map_obj_t *smosync = &msp->ms_smo_syncing;
1054 space_map_t *sm = &msp->ms_map;
1055 space_map_t *freed_map = &msp->ms_freemap[TXG_CLEAN(txg) & TXG_MASK];
1056 space_map_t *defer_map = &msp->ms_defermap[txg % TXG_DEFER_SIZE];
1057 metaslab_group_t *mg = msp->ms_group;
1058 vdev_t *vd = mg->mg_vd;
1059 int64_t alloc_delta, defer_delta;
1062 ASSERT(!vd->vdev_ishole);
1064 mutex_enter(&msp->ms_lock);
1067 * If this metaslab is just becoming available, initialize its
1068 * allocmaps and freemaps and add its capacity to the vdev.
1070 if (freed_map->sm_size == 0) {
1071 for (t = 0; t < TXG_SIZE; t++) {
1072 space_map_create(&msp->ms_allocmap[t], sm->sm_start,
1073 sm->sm_size, sm->sm_shift, sm->sm_lock);
1074 space_map_create(&msp->ms_freemap[t], sm->sm_start,
1075 sm->sm_size, sm->sm_shift, sm->sm_lock);
1078 for (t = 0; t < TXG_DEFER_SIZE; t++)
1079 space_map_create(&msp->ms_defermap[t], sm->sm_start,
1080 sm->sm_size, sm->sm_shift, sm->sm_lock);
1082 vdev_space_update(vd, 0, 0, sm->sm_size);
1085 alloc_delta = smosync->smo_alloc - smo->smo_alloc;
1086 defer_delta = freed_map->sm_space - defer_map->sm_space;
1088 vdev_space_update(vd, alloc_delta + defer_delta, defer_delta, 0);
1090 ASSERT(msp->ms_allocmap[txg & TXG_MASK].sm_space == 0);
1091 ASSERT(msp->ms_freemap[txg & TXG_MASK].sm_space == 0);
1094 * If there's a space_map_load() in progress, wait for it to complete
1095 * so that we have a consistent view of the in-core space map.
1096 * Then, add defer_map (oldest deferred frees) to this map and
1097 * transfer freed_map (this txg's frees) to defer_map.
1099 space_map_load_wait(sm);
1100 space_map_vacate(defer_map, sm->sm_loaded ? space_map_free : NULL, sm);
1101 space_map_vacate(freed_map, space_map_add, defer_map);
1105 msp->ms_deferspace += defer_delta;
1106 ASSERT3S(msp->ms_deferspace, >=, 0);
1107 ASSERT3S(msp->ms_deferspace, <=, sm->sm_size);
1108 if (msp->ms_deferspace != 0) {
1110 * Keep syncing this metaslab until all deferred frees
1111 * are back in circulation.
1113 vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
1117 * If the map is loaded but no longer active, evict it as soon as all
1118 * future allocations have synced. (If we unloaded it now and then
1119 * loaded a moment later, the map wouldn't reflect those allocations.)
1121 if (sm->sm_loaded && (msp->ms_weight & METASLAB_ACTIVE_MASK) == 0) {
1124 for (t = 1; t < TXG_CONCURRENT_STATES; t++)
1125 if (msp->ms_allocmap[(txg + t) & TXG_MASK].sm_space)
1128 if (evictable && !metaslab_debug)
1129 space_map_unload(sm);
1132 metaslab_group_sort(mg, msp, metaslab_weight(msp));
1134 mutex_exit(&msp->ms_lock);
1138 metaslab_sync_reassess(metaslab_group_t *mg)
1140 vdev_t *vd = mg->mg_vd;
1141 int64_t failures = mg->mg_alloc_failures;
1145 * Re-evaluate all metaslabs which have lower offsets than the
1148 for (m = 0; m < vd->vdev_ms_count; m++) {
1149 metaslab_t *msp = vd->vdev_ms[m];
1151 if (msp->ms_map.sm_start > mg->mg_bonus_area)
1154 mutex_enter(&msp->ms_lock);
1155 metaslab_group_sort(mg, msp, metaslab_weight(msp));
1156 mutex_exit(&msp->ms_lock);
1159 atomic_add_64(&mg->mg_alloc_failures, -failures);
1162 * Prefetch the next potential metaslabs
1164 metaslab_prefetch(mg);
1168 metaslab_distance(metaslab_t *msp, dva_t *dva)
1170 uint64_t ms_shift = msp->ms_group->mg_vd->vdev_ms_shift;
1171 uint64_t offset = DVA_GET_OFFSET(dva) >> ms_shift;
1172 uint64_t start = msp->ms_map.sm_start >> ms_shift;
1174 if (msp->ms_group->mg_vd->vdev_id != DVA_GET_VDEV(dva))
1175 return (1ULL << 63);
1178 return ((start - offset) << ms_shift);
1180 return ((offset - start) << ms_shift);
1185 metaslab_group_alloc(metaslab_group_t *mg, uint64_t psize, uint64_t asize,
1186 uint64_t txg, uint64_t min_distance, dva_t *dva, int d, int flags)
1188 spa_t *spa = mg->mg_vd->vdev_spa;
1189 metaslab_t *msp = NULL;
1190 uint64_t offset = -1ULL;
1191 avl_tree_t *t = &mg->mg_metaslab_tree;
1192 uint64_t activation_weight;
1193 uint64_t target_distance;
1196 activation_weight = METASLAB_WEIGHT_PRIMARY;
1197 for (i = 0; i < d; i++) {
1198 if (DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) {
1199 activation_weight = METASLAB_WEIGHT_SECONDARY;
1205 boolean_t was_active;
1207 mutex_enter(&mg->mg_lock);
1208 for (msp = avl_first(t); msp; msp = AVL_NEXT(t, msp)) {
1209 if (msp->ms_weight < asize) {
1210 spa_dbgmsg(spa, "%s: failed to meet weight "
1211 "requirement: vdev %llu, txg %llu, mg %p, "
1212 "msp %p, psize %llu, asize %llu, "
1213 "failures %llu, weight %llu",
1214 spa_name(spa), mg->mg_vd->vdev_id, txg,
1215 mg, msp, psize, asize,
1216 mg->mg_alloc_failures, msp->ms_weight);
1217 mutex_exit(&mg->mg_lock);
1220 was_active = msp->ms_weight & METASLAB_ACTIVE_MASK;
1221 if (activation_weight == METASLAB_WEIGHT_PRIMARY)
1224 target_distance = min_distance +
1225 (msp->ms_smo.smo_alloc ? 0 : min_distance >> 1);
1227 for (i = 0; i < d; i++)
1228 if (metaslab_distance(msp, &dva[i]) <
1234 mutex_exit(&mg->mg_lock);
1239 * If we've already reached the allowable number of failed
1240 * allocation attempts on this metaslab group then we
1241 * consider skipping it. We skip it only if we're allowed
1242 * to "fast" gang, the physical size is larger than
1243 * a gang block, and we're attempting to allocate from
1244 * the primary metaslab.
1246 if (mg->mg_alloc_failures > zfs_mg_alloc_failures &&
1247 CAN_FASTGANG(flags) && psize > SPA_GANGBLOCKSIZE &&
1248 activation_weight == METASLAB_WEIGHT_PRIMARY) {
1249 spa_dbgmsg(spa, "%s: skipping metaslab group: "
1250 "vdev %llu, txg %llu, mg %p, psize %llu, "
1251 "asize %llu, failures %llu", spa_name(spa),
1252 mg->mg_vd->vdev_id, txg, mg, psize, asize,
1253 mg->mg_alloc_failures);
1257 mutex_enter(&msp->ms_lock);
1260 * Ensure that the metaslab we have selected is still
1261 * capable of handling our request. It's possible that
1262 * another thread may have changed the weight while we
1263 * were blocked on the metaslab lock.
1265 if (msp->ms_weight < asize || (was_active &&
1266 !(msp->ms_weight & METASLAB_ACTIVE_MASK) &&
1267 activation_weight == METASLAB_WEIGHT_PRIMARY)) {
1268 mutex_exit(&msp->ms_lock);
1272 if ((msp->ms_weight & METASLAB_WEIGHT_SECONDARY) &&
1273 activation_weight == METASLAB_WEIGHT_PRIMARY) {
1274 metaslab_passivate(msp,
1275 msp->ms_weight & ~METASLAB_ACTIVE_MASK);
1276 mutex_exit(&msp->ms_lock);
1280 if (metaslab_activate(msp, activation_weight) != 0) {
1281 mutex_exit(&msp->ms_lock);
1285 if ((offset = space_map_alloc(&msp->ms_map, asize)) != -1ULL)
1288 atomic_inc_64(&mg->mg_alloc_failures);
1290 metaslab_passivate(msp, space_map_maxsize(&msp->ms_map));
1292 mutex_exit(&msp->ms_lock);
1295 if (msp->ms_allocmap[txg & TXG_MASK].sm_space == 0)
1296 vdev_dirty(mg->mg_vd, VDD_METASLAB, msp, txg);
1298 space_map_add(&msp->ms_allocmap[txg & TXG_MASK], offset, asize);
1300 mutex_exit(&msp->ms_lock);
1306 * Allocate a block for the specified i/o.
1309 metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize,
1310 dva_t *dva, int d, dva_t *hintdva, uint64_t txg, int flags)
1312 metaslab_group_t *mg, *fast_mg, *rotor;
1316 int zio_lock = B_FALSE;
1317 boolean_t allocatable;
1318 uint64_t offset = -1ULL;
1322 ASSERT(!DVA_IS_VALID(&dva[d]));
1325 * For testing, make some blocks above a certain size be gang blocks.
1327 if (psize >= metaslab_gang_bang && (ddi_get_lbolt() & 3) == 0)
1330 if (flags & METASLAB_FASTWRITE)
1331 mutex_enter(&mc->mc_fastwrite_lock);
1334 * Start at the rotor and loop through all mgs until we find something.
1335 * Note that there's no locking on mc_rotor or mc_aliquot because
1336 * nothing actually breaks if we miss a few updates -- we just won't
1337 * allocate quite as evenly. It all balances out over time.
1339 * If we are doing ditto or log blocks, try to spread them across
1340 * consecutive vdevs. If we're forced to reuse a vdev before we've
1341 * allocated all of our ditto blocks, then try and spread them out on
1342 * that vdev as much as possible. If it turns out to not be possible,
1343 * gradually lower our standards until anything becomes acceptable.
1344 * Also, allocating on consecutive vdevs (as opposed to random vdevs)
1345 * gives us hope of containing our fault domains to something we're
1346 * able to reason about. Otherwise, any two top-level vdev failures
1347 * will guarantee the loss of data. With consecutive allocation,
1348 * only two adjacent top-level vdev failures will result in data loss.
1350 * If we are doing gang blocks (hintdva is non-NULL), try to keep
1351 * ourselves on the same vdev as our gang block header. That
1352 * way, we can hope for locality in vdev_cache, plus it makes our
1353 * fault domains something tractable.
1356 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&hintdva[d]));
1359 * It's possible the vdev we're using as the hint no
1360 * longer exists (i.e. removed). Consult the rotor when
1366 if (flags & METASLAB_HINTBP_AVOID &&
1367 mg->mg_next != NULL)
1372 } else if (d != 0) {
1373 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d - 1]));
1374 mg = vd->vdev_mg->mg_next;
1375 } else if (flags & METASLAB_FASTWRITE) {
1376 mg = fast_mg = mc->mc_rotor;
1379 if (fast_mg->mg_vd->vdev_pending_fastwrite <
1380 mg->mg_vd->vdev_pending_fastwrite)
1382 } while ((fast_mg = fast_mg->mg_next) != mc->mc_rotor);
1389 * If the hint put us into the wrong metaslab class, or into a
1390 * metaslab group that has been passivated, just follow the rotor.
1392 if (mg->mg_class != mc || mg->mg_activation_count <= 0)
1399 ASSERT(mg->mg_activation_count == 1);
1404 * Don't allocate from faulted devices.
1407 spa_config_enter(spa, SCL_ZIO, FTAG, RW_READER);
1408 allocatable = vdev_allocatable(vd);
1409 spa_config_exit(spa, SCL_ZIO, FTAG);
1411 allocatable = vdev_allocatable(vd);
1417 * Avoid writing single-copy data to a failing vdev
1419 if ((vd->vdev_stat.vs_write_errors > 0 ||
1420 vd->vdev_state < VDEV_STATE_HEALTHY) &&
1421 d == 0 && dshift == 3) {
1426 ASSERT(mg->mg_class == mc);
1428 distance = vd->vdev_asize >> dshift;
1429 if (distance <= (1ULL << vd->vdev_ms_shift))
1434 asize = vdev_psize_to_asize(vd, psize);
1435 ASSERT(P2PHASE(asize, 1ULL << vd->vdev_ashift) == 0);
1437 offset = metaslab_group_alloc(mg, psize, asize, txg, distance,
1439 if (offset != -1ULL) {
1441 * If we've just selected this metaslab group,
1442 * figure out whether the corresponding vdev is
1443 * over- or under-used relative to the pool,
1444 * and set an allocation bias to even it out.
1446 if (mc->mc_aliquot == 0) {
1447 vdev_stat_t *vs = &vd->vdev_stat;
1450 vu = (vs->vs_alloc * 100) / (vs->vs_space + 1);
1451 cu = (mc->mc_alloc * 100) / (mc->mc_space + 1);
1454 * Calculate how much more or less we should
1455 * try to allocate from this device during
1456 * this iteration around the rotor.
1457 * For example, if a device is 80% full
1458 * and the pool is 20% full then we should
1459 * reduce allocations by 60% on this device.
1461 * mg_bias = (20 - 80) * 512K / 100 = -307K
1463 * This reduces allocations by 307K for this
1466 mg->mg_bias = ((cu - vu) *
1467 (int64_t)mg->mg_aliquot) / 100;
1470 if ((flags & METASLAB_FASTWRITE) ||
1471 atomic_add_64_nv(&mc->mc_aliquot, asize) >=
1472 mg->mg_aliquot + mg->mg_bias) {
1473 mc->mc_rotor = mg->mg_next;
1477 DVA_SET_VDEV(&dva[d], vd->vdev_id);
1478 DVA_SET_OFFSET(&dva[d], offset);
1479 DVA_SET_GANG(&dva[d], !!(flags & METASLAB_GANG_HEADER));
1480 DVA_SET_ASIZE(&dva[d], asize);
1482 if (flags & METASLAB_FASTWRITE) {
1483 atomic_add_64(&vd->vdev_pending_fastwrite,
1485 mutex_exit(&mc->mc_fastwrite_lock);
1491 mc->mc_rotor = mg->mg_next;
1493 } while ((mg = mg->mg_next) != rotor);
1497 ASSERT(dshift < 64);
1501 if (!allocatable && !zio_lock) {
1507 bzero(&dva[d], sizeof (dva_t));
1509 if (flags & METASLAB_FASTWRITE)
1510 mutex_exit(&mc->mc_fastwrite_lock);
1515 * Free the block represented by DVA in the context of the specified
1516 * transaction group.
1519 metaslab_free_dva(spa_t *spa, const dva_t *dva, uint64_t txg, boolean_t now)
1521 uint64_t vdev = DVA_GET_VDEV(dva);
1522 uint64_t offset = DVA_GET_OFFSET(dva);
1523 uint64_t size = DVA_GET_ASIZE(dva);
1527 ASSERT(DVA_IS_VALID(dva));
1529 if (txg > spa_freeze_txg(spa))
1532 if ((vd = vdev_lookup_top(spa, vdev)) == NULL ||
1533 (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) {
1534 cmn_err(CE_WARN, "metaslab_free_dva(): bad DVA %llu:%llu",
1535 (u_longlong_t)vdev, (u_longlong_t)offset);
1540 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
1542 if (DVA_GET_GANG(dva))
1543 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
1545 mutex_enter(&msp->ms_lock);
1548 space_map_remove(&msp->ms_allocmap[txg & TXG_MASK],
1550 space_map_free(&msp->ms_map, offset, size);
1552 if (msp->ms_freemap[txg & TXG_MASK].sm_space == 0)
1553 vdev_dirty(vd, VDD_METASLAB, msp, txg);
1554 space_map_add(&msp->ms_freemap[txg & TXG_MASK], offset, size);
1557 mutex_exit(&msp->ms_lock);
1561 * Intent log support: upon opening the pool after a crash, notify the SPA
1562 * of blocks that the intent log has allocated for immediate write, but
1563 * which are still considered free by the SPA because the last transaction
1564 * group didn't commit yet.
1567 metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
1569 uint64_t vdev = DVA_GET_VDEV(dva);
1570 uint64_t offset = DVA_GET_OFFSET(dva);
1571 uint64_t size = DVA_GET_ASIZE(dva);
1576 ASSERT(DVA_IS_VALID(dva));
1578 if ((vd = vdev_lookup_top(spa, vdev)) == NULL ||
1579 (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count)
1582 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
1584 if (DVA_GET_GANG(dva))
1585 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
1587 mutex_enter(&msp->ms_lock);
1589 if ((txg != 0 && spa_writeable(spa)) || !msp->ms_map.sm_loaded)
1590 error = metaslab_activate(msp, METASLAB_WEIGHT_SECONDARY);
1592 if (error == 0 && !space_map_contains(&msp->ms_map, offset, size))
1595 if (error || txg == 0) { /* txg == 0 indicates dry run */
1596 mutex_exit(&msp->ms_lock);
1600 space_map_claim(&msp->ms_map, offset, size);
1602 if (spa_writeable(spa)) { /* don't dirty if we're zdb(1M) */
1603 if (msp->ms_allocmap[txg & TXG_MASK].sm_space == 0)
1604 vdev_dirty(vd, VDD_METASLAB, msp, txg);
1605 space_map_add(&msp->ms_allocmap[txg & TXG_MASK], offset, size);
1608 mutex_exit(&msp->ms_lock);
1614 metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp,
1615 int ndvas, uint64_t txg, blkptr_t *hintbp, int flags)
1617 dva_t *dva = bp->blk_dva;
1618 dva_t *hintdva = hintbp->blk_dva;
1621 ASSERT(bp->blk_birth == 0);
1622 ASSERT(BP_PHYSICAL_BIRTH(bp) == 0);
1624 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
1626 if (mc->mc_rotor == NULL) { /* no vdevs in this class */
1627 spa_config_exit(spa, SCL_ALLOC, FTAG);
1631 ASSERT(ndvas > 0 && ndvas <= spa_max_replication(spa));
1632 ASSERT(BP_GET_NDVAS(bp) == 0);
1633 ASSERT(hintbp == NULL || ndvas <= BP_GET_NDVAS(hintbp));
1635 for (d = 0; d < ndvas; d++) {
1636 error = metaslab_alloc_dva(spa, mc, psize, dva, d, hintdva,
1639 for (d--; d >= 0; d--) {
1640 metaslab_free_dva(spa, &dva[d], txg, B_TRUE);
1641 bzero(&dva[d], sizeof (dva_t));
1643 spa_config_exit(spa, SCL_ALLOC, FTAG);
1648 ASSERT(BP_GET_NDVAS(bp) == ndvas);
1650 spa_config_exit(spa, SCL_ALLOC, FTAG);
1652 BP_SET_BIRTH(bp, txg, txg);
1658 metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now)
1660 const dva_t *dva = bp->blk_dva;
1661 int d, ndvas = BP_GET_NDVAS(bp);
1663 ASSERT(!BP_IS_HOLE(bp));
1664 ASSERT(!now || bp->blk_birth >= spa_syncing_txg(spa));
1666 spa_config_enter(spa, SCL_FREE, FTAG, RW_READER);
1668 for (d = 0; d < ndvas; d++)
1669 metaslab_free_dva(spa, &dva[d], txg, now);
1671 spa_config_exit(spa, SCL_FREE, FTAG);
1675 metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg)
1677 const dva_t *dva = bp->blk_dva;
1678 int ndvas = BP_GET_NDVAS(bp);
1681 ASSERT(!BP_IS_HOLE(bp));
1685 * First do a dry run to make sure all DVAs are claimable,
1686 * so we don't have to unwind from partial failures below.
1688 if ((error = metaslab_claim(spa, bp, 0)) != 0)
1692 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
1694 for (d = 0; d < ndvas; d++)
1695 if ((error = metaslab_claim_dva(spa, &dva[d], txg)) != 0)
1698 spa_config_exit(spa, SCL_ALLOC, FTAG);
1700 ASSERT(error == 0 || txg == 0);
1705 void metaslab_fastwrite_mark(spa_t *spa, const blkptr_t *bp)
1707 const dva_t *dva = bp->blk_dva;
1708 int ndvas = BP_GET_NDVAS(bp);
1709 uint64_t psize = BP_GET_PSIZE(bp);
1713 ASSERT(!BP_IS_HOLE(bp));
1716 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
1718 for (d = 0; d < ndvas; d++) {
1719 if ((vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d]))) == NULL)
1721 atomic_add_64(&vd->vdev_pending_fastwrite, psize);
1724 spa_config_exit(spa, SCL_VDEV, FTAG);
1727 void metaslab_fastwrite_unmark(spa_t *spa, const blkptr_t *bp)
1729 const dva_t *dva = bp->blk_dva;
1730 int ndvas = BP_GET_NDVAS(bp);
1731 uint64_t psize = BP_GET_PSIZE(bp);
1735 ASSERT(!BP_IS_HOLE(bp));
1738 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
1740 for (d = 0; d < ndvas; d++) {
1741 if ((vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d]))) == NULL)
1743 ASSERT3U(vd->vdev_pending_fastwrite, >=, psize);
1744 atomic_sub_64(&vd->vdev_pending_fastwrite, psize);
1747 spa_config_exit(spa, SCL_VDEV, FTAG);
1750 #if defined(_KERNEL) && defined(HAVE_SPL)
1751 module_param(metaslab_debug, int, 0644);
1752 MODULE_PARM_DESC(metaslab_debug, "keep space maps in core to verify frees");
1753 #endif /* _KERNEL && HAVE_SPL */