4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 #pragma ident "@(#)metaslab.c 1.17 07/11/27 SMI"
28 #include <sys/zfs_context.h>
29 #include <sys/spa_impl.h>
31 #include <sys/dmu_tx.h>
32 #include <sys/space_map.h>
33 #include <sys/metaslab_impl.h>
34 #include <sys/vdev_impl.h>
37 uint64_t metaslab_aliquot = 512ULL << 10;
38 uint64_t metaslab_gang_bang = SPA_MAXBLOCKSIZE + 1; /* force gang blocks */
41 * ==========================================================================
43 * ==========================================================================
46 metaslab_class_create(void)
50 mc = kmem_zalloc(sizeof (metaslab_class_t), KM_SLEEP);
58 metaslab_class_destroy(metaslab_class_t *mc)
62 while ((mg = mc->mc_rotor) != NULL) {
63 metaslab_class_remove(mc, mg);
64 metaslab_group_destroy(mg);
67 kmem_free(mc, sizeof (metaslab_class_t));
71 metaslab_class_add(metaslab_class_t *mc, metaslab_group_t *mg)
73 metaslab_group_t *mgprev, *mgnext;
75 ASSERT(mg->mg_class == NULL);
77 if ((mgprev = mc->mc_rotor) == NULL) {
81 mgnext = mgprev->mg_next;
92 metaslab_class_remove(metaslab_class_t *mc, metaslab_group_t *mg)
94 metaslab_group_t *mgprev, *mgnext;
96 ASSERT(mg->mg_class == mc);
104 mc->mc_rotor = mgnext;
105 mgprev->mg_next = mgnext;
106 mgnext->mg_prev = mgprev;
115 * ==========================================================================
117 * ==========================================================================
120 metaslab_compare(const void *x1, const void *x2)
122 const metaslab_t *m1 = x1;
123 const metaslab_t *m2 = x2;
125 if (m1->ms_weight < m2->ms_weight)
127 if (m1->ms_weight > m2->ms_weight)
131 * If the weights are identical, use the offset to force uniqueness.
133 if (m1->ms_map.sm_start < m2->ms_map.sm_start)
135 if (m1->ms_map.sm_start > m2->ms_map.sm_start)
138 ASSERT3P(m1, ==, m2);
144 metaslab_group_create(metaslab_class_t *mc, vdev_t *vd)
146 metaslab_group_t *mg;
148 mg = kmem_zalloc(sizeof (metaslab_group_t), KM_SLEEP);
149 mutex_init(&mg->mg_lock, NULL, MUTEX_DEFAULT, NULL);
150 avl_create(&mg->mg_metaslab_tree, metaslab_compare,
151 sizeof (metaslab_t), offsetof(struct metaslab, ms_group_node));
152 mg->mg_aliquot = metaslab_aliquot * MAX(1, vd->vdev_children);
154 metaslab_class_add(mc, mg);
160 metaslab_group_destroy(metaslab_group_t *mg)
162 avl_destroy(&mg->mg_metaslab_tree);
163 mutex_destroy(&mg->mg_lock);
164 kmem_free(mg, sizeof (metaslab_group_t));
168 metaslab_group_add(metaslab_group_t *mg, metaslab_t *msp)
170 mutex_enter(&mg->mg_lock);
171 ASSERT(msp->ms_group == NULL);
174 avl_add(&mg->mg_metaslab_tree, msp);
175 mutex_exit(&mg->mg_lock);
179 metaslab_group_remove(metaslab_group_t *mg, metaslab_t *msp)
181 mutex_enter(&mg->mg_lock);
182 ASSERT(msp->ms_group == mg);
183 avl_remove(&mg->mg_metaslab_tree, msp);
184 msp->ms_group = NULL;
185 mutex_exit(&mg->mg_lock);
189 metaslab_group_sort(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight)
192 * Although in principle the weight can be any value, in
193 * practice we do not use values in the range [1, 510].
195 ASSERT(weight >= SPA_MINBLOCKSIZE-1 || weight == 0);
196 ASSERT(MUTEX_HELD(&msp->ms_lock));
198 mutex_enter(&mg->mg_lock);
199 ASSERT(msp->ms_group == mg);
200 avl_remove(&mg->mg_metaslab_tree, msp);
201 msp->ms_weight = weight;
202 avl_add(&mg->mg_metaslab_tree, msp);
203 mutex_exit(&mg->mg_lock);
207 * ==========================================================================
208 * The first-fit block allocator
209 * ==========================================================================
212 metaslab_ff_load(space_map_t *sm)
214 ASSERT(sm->sm_ppd == NULL);
215 sm->sm_ppd = kmem_zalloc(64 * sizeof (uint64_t), KM_SLEEP);
219 metaslab_ff_unload(space_map_t *sm)
221 kmem_free(sm->sm_ppd, 64 * sizeof (uint64_t));
226 metaslab_ff_alloc(space_map_t *sm, uint64_t size)
228 avl_tree_t *t = &sm->sm_root;
229 uint64_t align = size & -size;
230 uint64_t *cursor = (uint64_t *)sm->sm_ppd + highbit(align) - 1;
231 space_seg_t *ss, ssearch;
234 ssearch.ss_start = *cursor;
235 ssearch.ss_end = *cursor + size;
237 ss = avl_find(t, &ssearch, &where);
239 ss = avl_nearest(t, where, AVL_AFTER);
242 uint64_t offset = P2ROUNDUP(ss->ss_start, align);
244 if (offset + size <= ss->ss_end) {
245 *cursor = offset + size;
248 ss = AVL_NEXT(t, ss);
252 * If we know we've searched the whole map (*cursor == 0), give up.
253 * Otherwise, reset the cursor to the beginning and try again.
259 return (metaslab_ff_alloc(sm, size));
264 metaslab_ff_claim(space_map_t *sm, uint64_t start, uint64_t size)
266 /* No need to update cursor */
271 metaslab_ff_free(space_map_t *sm, uint64_t start, uint64_t size)
273 /* No need to update cursor */
276 static space_map_ops_t metaslab_ff_ops = {
285 * ==========================================================================
287 * ==========================================================================
290 metaslab_init(metaslab_group_t *mg, space_map_obj_t *smo,
291 uint64_t start, uint64_t size, uint64_t txg)
293 vdev_t *vd = mg->mg_vd;
296 msp = kmem_zalloc(sizeof (metaslab_t), KM_SLEEP);
297 mutex_init(&msp->ms_lock, NULL, MUTEX_DEFAULT, NULL);
299 msp->ms_smo_syncing = *smo;
302 * We create the main space map here, but we don't create the
303 * allocmaps and freemaps until metaslab_sync_done(). This serves
304 * two purposes: it allows metaslab_sync_done() to detect the
305 * addition of new space; and for debugging, it ensures that we'd
306 * data fault on any attempt to use this metaslab before it's ready.
308 space_map_create(&msp->ms_map, start, size,
309 vd->vdev_ashift, &msp->ms_lock);
311 metaslab_group_add(mg, msp);
314 * If we're opening an existing pool (txg == 0) or creating
315 * a new one (txg == TXG_INITIAL), all space is available now.
316 * If we're adding space to an existing pool, the new space
317 * does not become available until after this txg has synced.
319 if (txg <= TXG_INITIAL)
320 metaslab_sync_done(msp, 0);
324 * The vdev is dirty, but the metaslab isn't -- it just needs
325 * to have metaslab_sync_done() invoked from vdev_sync_done().
326 * [We could just dirty the metaslab, but that would cause us
327 * to allocate a space map object for it, which is wasteful
328 * and would mess up the locality logic in metaslab_weight().]
330 ASSERT(TXG_CLEAN(txg) == spa_last_synced_txg(vd->vdev_spa));
331 vdev_dirty(vd, 0, NULL, txg);
332 vdev_dirty(vd, VDD_METASLAB, msp, TXG_CLEAN(txg));
339 metaslab_fini(metaslab_t *msp)
341 metaslab_group_t *mg = msp->ms_group;
344 vdev_space_update(mg->mg_vd, -msp->ms_map.sm_size,
345 -msp->ms_smo.smo_alloc, B_TRUE);
347 metaslab_group_remove(mg, msp);
349 mutex_enter(&msp->ms_lock);
351 space_map_unload(&msp->ms_map);
352 space_map_destroy(&msp->ms_map);
354 for (t = 0; t < TXG_SIZE; t++) {
355 space_map_destroy(&msp->ms_allocmap[t]);
356 space_map_destroy(&msp->ms_freemap[t]);
359 mutex_exit(&msp->ms_lock);
360 mutex_destroy(&msp->ms_lock);
362 kmem_free(msp, sizeof (metaslab_t));
365 #define METASLAB_WEIGHT_PRIMARY (1ULL << 63)
366 #define METASLAB_WEIGHT_SECONDARY (1ULL << 62)
367 #define METASLAB_ACTIVE_MASK \
368 (METASLAB_WEIGHT_PRIMARY | METASLAB_WEIGHT_SECONDARY)
369 #define METASLAB_SMO_BONUS_MULTIPLIER 2
372 metaslab_weight(metaslab_t *msp)
374 metaslab_group_t *mg = msp->ms_group;
375 space_map_t *sm = &msp->ms_map;
376 space_map_obj_t *smo = &msp->ms_smo;
377 vdev_t *vd = mg->mg_vd;
378 uint64_t weight, space;
380 ASSERT(MUTEX_HELD(&msp->ms_lock));
383 * The baseline weight is the metaslab's free space.
385 space = sm->sm_size - smo->smo_alloc;
389 * Modern disks have uniform bit density and constant angular velocity.
390 * Therefore, the outer recording zones are faster (higher bandwidth)
391 * than the inner zones by the ratio of outer to inner track diameter,
392 * which is typically around 2:1. We account for this by assigning
393 * higher weight to lower metaslabs (multiplier ranging from 2x to 1x).
394 * In effect, this means that we'll select the metaslab with the most
395 * free bandwidth rather than simply the one with the most free space.
397 weight = 2 * weight -
398 ((sm->sm_start >> vd->vdev_ms_shift) * weight) / vd->vdev_ms_count;
399 ASSERT(weight >= space && weight <= 2 * space);
402 * For locality, assign higher weight to metaslabs we've used before.
404 if (smo->smo_object != 0)
405 weight *= METASLAB_SMO_BONUS_MULTIPLIER;
406 ASSERT(weight >= space &&
407 weight <= 2 * METASLAB_SMO_BONUS_MULTIPLIER * space);
410 * If this metaslab is one we're actively using, adjust its weight to
411 * make it preferable to any inactive metaslab so we'll polish it off.
413 weight |= (msp->ms_weight & METASLAB_ACTIVE_MASK);
419 metaslab_activate(metaslab_t *msp, uint64_t activation_weight)
421 space_map_t *sm = &msp->ms_map;
423 ASSERT(MUTEX_HELD(&msp->ms_lock));
425 if ((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0) {
426 int error = space_map_load(sm, &metaslab_ff_ops,
427 SM_FREE, &msp->ms_smo,
428 msp->ms_group->mg_vd->vdev_spa->spa_meta_objset);
430 metaslab_group_sort(msp->ms_group, msp, 0);
433 metaslab_group_sort(msp->ms_group, msp,
434 msp->ms_weight | activation_weight);
436 ASSERT(sm->sm_loaded);
437 ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
443 metaslab_passivate(metaslab_t *msp, uint64_t size)
446 * If size < SPA_MINBLOCKSIZE, then we will not allocate from
447 * this metaslab again. In that case, it had better be empty,
448 * or we would be leaving space on the table.
450 ASSERT(size >= SPA_MINBLOCKSIZE || msp->ms_map.sm_space == 0);
451 metaslab_group_sort(msp->ms_group, msp, MIN(msp->ms_weight, size));
452 ASSERT((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0);
456 * Write a metaslab to disk in the context of the specified transaction group.
459 metaslab_sync(metaslab_t *msp, uint64_t txg)
461 vdev_t *vd = msp->ms_group->mg_vd;
462 spa_t *spa = vd->vdev_spa;
463 objset_t *mos = spa->spa_meta_objset;
464 space_map_t *allocmap = &msp->ms_allocmap[txg & TXG_MASK];
465 space_map_t *freemap = &msp->ms_freemap[txg & TXG_MASK];
466 space_map_t *freed_map = &msp->ms_freemap[TXG_CLEAN(txg) & TXG_MASK];
467 space_map_t *sm = &msp->ms_map;
468 space_map_obj_t *smo = &msp->ms_smo_syncing;
473 tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
476 * The only state that can actually be changing concurrently with
477 * metaslab_sync() is the metaslab's ms_map. No other thread can
478 * be modifying this txg's allocmap, freemap, freed_map, or smo.
479 * Therefore, we only hold ms_lock to satify space_map ASSERTs.
480 * We drop it whenever we call into the DMU, because the DMU
481 * can call down to us (e.g. via zio_free()) at any time.
483 mutex_enter(&msp->ms_lock);
485 if (smo->smo_object == 0) {
486 ASSERT(smo->smo_objsize == 0);
487 ASSERT(smo->smo_alloc == 0);
488 mutex_exit(&msp->ms_lock);
489 smo->smo_object = dmu_object_alloc(mos,
490 DMU_OT_SPACE_MAP, 1 << SPACE_MAP_BLOCKSHIFT,
491 DMU_OT_SPACE_MAP_HEADER, sizeof (*smo), tx);
492 ASSERT(smo->smo_object != 0);
493 dmu_write(mos, vd->vdev_ms_array, sizeof (uint64_t) *
494 (sm->sm_start >> vd->vdev_ms_shift),
495 sizeof (uint64_t), &smo->smo_object, tx);
496 mutex_enter(&msp->ms_lock);
499 space_map_walk(freemap, space_map_add, freed_map);
501 if (sm->sm_loaded && spa_sync_pass(spa) == 1 && smo->smo_objsize >=
502 2 * sizeof (uint64_t) * avl_numnodes(&sm->sm_root)) {
504 * The in-core space map representation is twice as compact
505 * as the on-disk one, so it's time to condense the latter
506 * by generating a pure allocmap from first principles.
508 * This metaslab is 100% allocated,
509 * minus the content of the in-core map (sm),
510 * minus what's been freed this txg (freed_map),
511 * minus allocations from txgs in the future
512 * (because they haven't been committed yet).
514 space_map_vacate(allocmap, NULL, NULL);
515 space_map_vacate(freemap, NULL, NULL);
517 space_map_add(allocmap, allocmap->sm_start, allocmap->sm_size);
519 space_map_walk(sm, space_map_remove, allocmap);
520 space_map_walk(freed_map, space_map_remove, allocmap);
522 for (t = 1; t < TXG_CONCURRENT_STATES; t++)
523 space_map_walk(&msp->ms_allocmap[(txg + t) & TXG_MASK],
524 space_map_remove, allocmap);
526 mutex_exit(&msp->ms_lock);
527 space_map_truncate(smo, mos, tx);
528 mutex_enter(&msp->ms_lock);
531 space_map_sync(allocmap, SM_ALLOC, smo, mos, tx);
532 space_map_sync(freemap, SM_FREE, smo, mos, tx);
534 mutex_exit(&msp->ms_lock);
536 VERIFY(0 == dmu_bonus_hold(mos, smo->smo_object, FTAG, &db));
537 dmu_buf_will_dirty(db, tx);
538 ASSERT3U(db->db_size, >=, sizeof (*smo));
539 bcopy(smo, db->db_data, sizeof (*smo));
540 dmu_buf_rele(db, FTAG);
546 * Called after a transaction group has completely synced to mark
547 * all of the metaslab's free space as usable.
550 metaslab_sync_done(metaslab_t *msp, uint64_t txg)
552 space_map_obj_t *smo = &msp->ms_smo;
553 space_map_obj_t *smosync = &msp->ms_smo_syncing;
554 space_map_t *sm = &msp->ms_map;
555 space_map_t *freed_map = &msp->ms_freemap[TXG_CLEAN(txg) & TXG_MASK];
556 metaslab_group_t *mg = msp->ms_group;
557 vdev_t *vd = mg->mg_vd;
560 mutex_enter(&msp->ms_lock);
563 * If this metaslab is just becoming available, initialize its
564 * allocmaps and freemaps and add its capacity to the vdev.
566 if (freed_map->sm_size == 0) {
567 for (t = 0; t < TXG_SIZE; t++) {
568 space_map_create(&msp->ms_allocmap[t], sm->sm_start,
569 sm->sm_size, sm->sm_shift, sm->sm_lock);
570 space_map_create(&msp->ms_freemap[t], sm->sm_start,
571 sm->sm_size, sm->sm_shift, sm->sm_lock);
573 vdev_space_update(vd, sm->sm_size, 0, B_TRUE);
576 vdev_space_update(vd, 0, smosync->smo_alloc - smo->smo_alloc, B_TRUE);
578 ASSERT(msp->ms_allocmap[txg & TXG_MASK].sm_space == 0);
579 ASSERT(msp->ms_freemap[txg & TXG_MASK].sm_space == 0);
582 * If there's a space_map_load() in progress, wait for it to complete
583 * so that we have a consistent view of the in-core space map.
584 * Then, add everything we freed in this txg to the map.
586 space_map_load_wait(sm);
587 space_map_vacate(freed_map, sm->sm_loaded ? space_map_free : NULL, sm);
592 * If the map is loaded but no longer active, evict it as soon as all
593 * future allocations have synced. (If we unloaded it now and then
594 * loaded a moment later, the map wouldn't reflect those allocations.)
596 if (sm->sm_loaded && (msp->ms_weight & METASLAB_ACTIVE_MASK) == 0) {
599 for (t = 1; t < TXG_CONCURRENT_STATES; t++)
600 if (msp->ms_allocmap[(txg + t) & TXG_MASK].sm_space)
604 space_map_unload(sm);
607 metaslab_group_sort(mg, msp, metaslab_weight(msp));
609 mutex_exit(&msp->ms_lock);
613 metaslab_distance(metaslab_t *msp, dva_t *dva)
615 uint64_t ms_shift = msp->ms_group->mg_vd->vdev_ms_shift;
616 uint64_t offset = DVA_GET_OFFSET(dva) >> ms_shift;
617 uint64_t start = msp->ms_map.sm_start >> ms_shift;
619 if (msp->ms_group->mg_vd->vdev_id != DVA_GET_VDEV(dva))
623 return ((start - offset) << ms_shift);
625 return ((offset - start) << ms_shift);
630 metaslab_group_alloc(metaslab_group_t *mg, uint64_t size, uint64_t txg,
631 uint64_t min_distance, dva_t *dva, int d)
633 metaslab_t *msp = NULL;
634 uint64_t offset = -1ULL;
635 avl_tree_t *t = &mg->mg_metaslab_tree;
636 uint64_t activation_weight;
637 uint64_t target_distance;
640 activation_weight = METASLAB_WEIGHT_PRIMARY;
641 for (i = 0; i < d; i++)
642 if (DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id)
643 activation_weight = METASLAB_WEIGHT_SECONDARY;
646 mutex_enter(&mg->mg_lock);
647 for (msp = avl_first(t); msp; msp = AVL_NEXT(t, msp)) {
648 if (msp->ms_weight < size) {
649 mutex_exit(&mg->mg_lock);
653 if (activation_weight == METASLAB_WEIGHT_PRIMARY)
656 target_distance = min_distance +
657 (msp->ms_smo.smo_alloc ? 0 : min_distance >> 1);
659 for (i = 0; i < d; i++)
660 if (metaslab_distance(msp, &dva[i]) <
666 mutex_exit(&mg->mg_lock);
670 mutex_enter(&msp->ms_lock);
673 * Ensure that the metaslab we have selected is still
674 * capable of handling our request. It's possible that
675 * another thread may have changed the weight while we
676 * were blocked on the metaslab lock.
678 if (msp->ms_weight < size) {
679 mutex_exit(&msp->ms_lock);
683 if ((msp->ms_weight & METASLAB_WEIGHT_SECONDARY) &&
684 activation_weight == METASLAB_WEIGHT_PRIMARY) {
685 metaslab_passivate(msp,
686 msp->ms_weight & ~METASLAB_ACTIVE_MASK);
687 mutex_exit(&msp->ms_lock);
691 if (metaslab_activate(msp, activation_weight) != 0) {
692 mutex_exit(&msp->ms_lock);
696 if ((offset = space_map_alloc(&msp->ms_map, size)) != -1ULL)
699 metaslab_passivate(msp, size - 1);
701 mutex_exit(&msp->ms_lock);
704 if (msp->ms_allocmap[txg & TXG_MASK].sm_space == 0)
705 vdev_dirty(mg->mg_vd, VDD_METASLAB, msp, txg);
707 space_map_add(&msp->ms_allocmap[txg & TXG_MASK], offset, size);
709 mutex_exit(&msp->ms_lock);
715 * Allocate a block for the specified i/o.
718 metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize,
719 dva_t *dva, int d, dva_t *hintdva, uint64_t txg, boolean_t hintdva_avoid)
721 metaslab_group_t *mg, *rotor;
725 uint64_t offset = -1ULL;
729 ASSERT(!DVA_IS_VALID(&dva[d]));
732 * For testing, make some blocks above a certain size be gang blocks.
734 if (psize >= metaslab_gang_bang && (lbolt & 3) == 0)
738 * Start at the rotor and loop through all mgs until we find something.
739 * Note that there's no locking on mc_rotor or mc_allocated because
740 * nothing actually breaks if we miss a few updates -- we just won't
741 * allocate quite as evenly. It all balances out over time.
743 * If we are doing ditto or log blocks, try to spread them across
744 * consecutive vdevs. If we're forced to reuse a vdev before we've
745 * allocated all of our ditto blocks, then try and spread them out on
746 * that vdev as much as possible. If it turns out to not be possible,
747 * gradually lower our standards until anything becomes acceptable.
748 * Also, allocating on consecutive vdevs (as opposed to random vdevs)
749 * gives us hope of containing our fault domains to something we're
750 * able to reason about. Otherwise, any two top-level vdev failures
751 * will guarantee the loss of data. With consecutive allocation,
752 * only two adjacent top-level vdev failures will result in data loss.
754 * If we are doing gang blocks (hintdva is non-NULL), try to keep
755 * ourselves on the same vdev as our gang block header. That
756 * way, we can hope for locality in vdev_cache, plus it makes our
757 * fault domains something tractable.
760 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&hintdva[d]));
762 mg = vd->vdev_mg->mg_next;
766 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d - 1]));
767 mg = vd->vdev_mg->mg_next;
773 * If the hint put us into the wrong class, just follow the rotor.
775 if (mg->mg_class != mc)
784 * Dont allocate from faulted devices
786 if (!vdev_writeable(vd))
789 * Avoid writing single-copy data to a failing vdev
791 if ((vd->vdev_stat.vs_write_errors > 0 ||
792 vd->vdev_state < VDEV_STATE_HEALTHY) &&
793 d == 0 && dshift == 3) {
798 ASSERT(mg->mg_class == mc);
800 distance = vd->vdev_asize >> dshift;
801 if (distance <= (1ULL << vd->vdev_ms_shift))
806 asize = vdev_psize_to_asize(vd, psize);
807 ASSERT(P2PHASE(asize, 1ULL << vd->vdev_ashift) == 0);
809 offset = metaslab_group_alloc(mg, asize, txg, distance, dva, d);
810 if (offset != -1ULL) {
812 * If we've just selected this metaslab group,
813 * figure out whether the corresponding vdev is
814 * over- or under-used relative to the pool,
815 * and set an allocation bias to even it out.
817 if (mc->mc_allocated == 0) {
818 vdev_stat_t *vs = &vd->vdev_stat;
819 uint64_t alloc, space;
822 alloc = spa_get_alloc(spa);
823 space = spa_get_space(spa);
826 * Determine percent used in units of 0..1024.
827 * (This is just to avoid floating point.)
829 vu = (vs->vs_alloc << 10) / (vs->vs_space + 1);
830 su = (alloc << 10) / (space + 1);
833 * Bias by at most +/- 25% of the aliquot.
835 mg->mg_bias = ((su - vu) *
836 (int64_t)mg->mg_aliquot) / (1024 * 4);
839 if (atomic_add_64_nv(&mc->mc_allocated, asize) >=
840 mg->mg_aliquot + mg->mg_bias) {
841 mc->mc_rotor = mg->mg_next;
842 mc->mc_allocated = 0;
845 DVA_SET_VDEV(&dva[d], vd->vdev_id);
846 DVA_SET_OFFSET(&dva[d], offset);
847 DVA_SET_GANG(&dva[d], 0);
848 DVA_SET_ASIZE(&dva[d], asize);
853 mc->mc_rotor = mg->mg_next;
854 mc->mc_allocated = 0;
855 } while ((mg = mg->mg_next) != rotor);
863 bzero(&dva[d], sizeof (dva_t));
869 * Free the block represented by DVA in the context of the specified
873 metaslab_free_dva(spa_t *spa, const dva_t *dva, uint64_t txg, boolean_t now)
875 uint64_t vdev = DVA_GET_VDEV(dva);
876 uint64_t offset = DVA_GET_OFFSET(dva);
877 uint64_t size = DVA_GET_ASIZE(dva);
881 ASSERT(DVA_IS_VALID(dva));
883 if (txg > spa_freeze_txg(spa))
886 if ((vd = vdev_lookup_top(spa, vdev)) == NULL ||
887 (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) {
888 cmn_err(CE_WARN, "metaslab_free_dva(): bad DVA %llu:%llu",
889 (u_longlong_t)vdev, (u_longlong_t)offset);
894 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
896 if (DVA_GET_GANG(dva))
897 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
899 mutex_enter(&msp->ms_lock);
902 space_map_remove(&msp->ms_allocmap[txg & TXG_MASK],
904 space_map_free(&msp->ms_map, offset, size);
906 if (msp->ms_freemap[txg & TXG_MASK].sm_space == 0)
907 vdev_dirty(vd, VDD_METASLAB, msp, txg);
908 space_map_add(&msp->ms_freemap[txg & TXG_MASK], offset, size);
911 * verify that this region is actually allocated in
912 * either a ms_allocmap or the ms_map
914 if (msp->ms_map.sm_loaded) {
915 boolean_t allocd = B_FALSE;
918 if (!space_map_contains(&msp->ms_map, offset, size)) {
921 for (i = 0; i < TXG_CONCURRENT_STATES; i++) {
922 space_map_t *sm = &msp->ms_allocmap
923 [(txg - i) & TXG_MASK];
924 if (space_map_contains(sm,
933 zfs_panic_recover("freeing free segment "
934 "(vdev=%llu offset=%llx size=%llx)",
935 (longlong_t)vdev, (longlong_t)offset,
943 mutex_exit(&msp->ms_lock);
947 * Intent log support: upon opening the pool after a crash, notify the SPA
948 * of blocks that the intent log has allocated for immediate write, but
949 * which are still considered free by the SPA because the last transaction
950 * group didn't commit yet.
953 metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
955 uint64_t vdev = DVA_GET_VDEV(dva);
956 uint64_t offset = DVA_GET_OFFSET(dva);
957 uint64_t size = DVA_GET_ASIZE(dva);
962 ASSERT(DVA_IS_VALID(dva));
964 if ((vd = vdev_lookup_top(spa, vdev)) == NULL ||
965 (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count)
968 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
970 if (DVA_GET_GANG(dva))
971 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
973 mutex_enter(&msp->ms_lock);
975 error = metaslab_activate(msp, METASLAB_WEIGHT_SECONDARY);
977 mutex_exit(&msp->ms_lock);
981 if (msp->ms_allocmap[txg & TXG_MASK].sm_space == 0)
982 vdev_dirty(vd, VDD_METASLAB, msp, txg);
984 space_map_claim(&msp->ms_map, offset, size);
985 space_map_add(&msp->ms_allocmap[txg & TXG_MASK], offset, size);
987 mutex_exit(&msp->ms_lock);
993 metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp,
994 int ndvas, uint64_t txg, blkptr_t *hintbp, boolean_t hintbp_avoid)
996 dva_t *dva = bp->blk_dva;
997 dva_t *hintdva = hintbp->blk_dva;
1001 if (mc->mc_rotor == NULL) /* no vdevs in this class */
1004 ASSERT(ndvas > 0 && ndvas <= spa_max_replication(spa));
1005 ASSERT(BP_GET_NDVAS(bp) == 0);
1006 ASSERT(hintbp == NULL || ndvas <= BP_GET_NDVAS(hintbp));
1008 for (d = 0; d < ndvas; d++) {
1009 error = metaslab_alloc_dva(spa, mc, psize, dva, d, hintdva,
1012 for (d--; d >= 0; d--) {
1013 metaslab_free_dva(spa, &dva[d], txg, B_TRUE);
1014 bzero(&dva[d], sizeof (dva_t));
1020 ASSERT(BP_GET_NDVAS(bp) == ndvas);
1026 metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now)
1028 const dva_t *dva = bp->blk_dva;
1029 int ndvas = BP_GET_NDVAS(bp);
1032 ASSERT(!BP_IS_HOLE(bp));
1034 for (d = 0; d < ndvas; d++)
1035 metaslab_free_dva(spa, &dva[d], txg, now);
1039 metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg)
1041 const dva_t *dva = bp->blk_dva;
1042 int ndvas = BP_GET_NDVAS(bp);
1046 ASSERT(!BP_IS_HOLE(bp));
1048 for (d = 0; d < ndvas; d++)
1049 if ((error = metaslab_claim_dva(spa, &dva[d], txg)) != 0)
1052 return (last_error);