4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
28 * For a more complete description of the main ideas, see:
30 * Jeff Bonwick and Jonathan Adams,
32 * Magazines and vmem: Extending the Slab Allocator to Many CPUs and
33 * Arbitrary Resources.
35 * Proceedings of the 2001 Usenix Conference.
36 * Available as /shared/sac/PSARC/2000/550/materials/vmem.pdf.
38 * For the "Big Theory Statement", see usr/src/uts/common/os/vmem.c
40 * 1. Overview of changes
41 * ------------------------------
42 * There have been a few changes to vmem in order to support umem. The
45 * * VM_SLEEP unsupported
49 * * initialization changes
51 * * _vmem_extend_alloc
56 * Since VM_SLEEP allocations can hold locks (in vmem_populate()) for
57 * possibly infinite amounts of time, they are not supported in this
58 * version of vmem. Sleep-like behavior can be achieved through
59 * UMEM_NOFAIL umem allocations.
64 * Unlike kmem_reap(), which just asynchronously schedules work, umem_reap()
65 * can do allocations and frees synchronously. This is a problem if it
66 * occurs during a vmem_populate() allocation.
68 * Instead, we delay reaps while populates are active.
71 * 4. Initialization changes
72 * -------------------------
73 * In the kernel, vmem_init() allows you to create a single, top-level arena,
74 * which has vmem_internal_arena as a child. For umem, we want to be able
75 * to extend arenas dynamically. It is much easier to support this if we
76 * allow a two-level "heap" arena:
86 * | +-+-- ... <other children>
94 * The new vmem_init() allows you to specify a "parent" of the heap, along
95 * with allocation functions.
98 * 5. _vmem_extend_alloc
99 * ---------------------
100 * The other part of extending is _vmem_extend_alloc. This function allows
101 * you to extend (expand current spans, if possible) an arena and allocate
102 * a chunk of the newly extened span atomically. This is needed to support
103 * extending the heap while vmem_populate()ing it.
105 * In order to increase the usefulness of extending, non-imported spans are
106 * sorted in address order.
109 #include <sys/vmem_impl_user.h>
111 #include <sys/sysmacros.h>
116 #include "vmem_base.h"
117 #include "umem_base.h"
119 #define VMEM_INITIAL 6 /* early vmem arenas */
120 #define VMEM_SEG_INITIAL 100 /* early segments */
123 * Adding a new span to an arena requires two segment structures: one to
124 * represent the span, and one to represent the free segment it contains.
126 #define VMEM_SEGS_PER_SPAN_CREATE 2
129 * Allocating a piece of an existing segment requires 0-2 segment structures
130 * depending on how much of the segment we're allocating.
132 * To allocate the entire segment, no new segment structures are needed; we
133 * simply move the existing segment structure from the freelist to the
134 * allocation hash table.
136 * To allocate a piece from the left or right end of the segment, we must
137 * split the segment into two pieces (allocated part and remainder), so we
138 * need one new segment structure to represent the remainder.
140 * To allocate from the middle of a segment, we need two new segment strucures
141 * to represent the remainders on either side of the allocated part.
143 #define VMEM_SEGS_PER_EXACT_ALLOC 0
144 #define VMEM_SEGS_PER_LEFT_ALLOC 1
145 #define VMEM_SEGS_PER_RIGHT_ALLOC 1
146 #define VMEM_SEGS_PER_MIDDLE_ALLOC 2
149 * vmem_populate() preallocates segment structures for vmem to do its work.
150 * It must preallocate enough for the worst case, which is when we must import
151 * a new span and then allocate from the middle of it.
153 #define VMEM_SEGS_PER_ALLOC_MAX \
154 (VMEM_SEGS_PER_SPAN_CREATE + VMEM_SEGS_PER_MIDDLE_ALLOC)
157 * The segment structures themselves are allocated from vmem_seg_arena, so
158 * we have a recursion problem when vmem_seg_arena needs to populate itself.
159 * We address this by working out the maximum number of segment structures
160 * this act will require, and multiplying by the maximum number of threads
161 * that we'll allow to do it simultaneously.
163 * The worst-case segment consumption to populate vmem_seg_arena is as
164 * follows (depicted as a stack trace to indicate why events are occurring):
166 * vmem_alloc(vmem_seg_arena) -> 2 segs (span create + exact alloc)
167 * vmem_alloc(vmem_internal_arena) -> 2 segs (span create + exact alloc)
168 * heap_alloc(heap_arena)
169 * vmem_alloc(heap_arena) -> 4 seg (span create + alloc)
170 * parent_alloc(parent_arena)
171 * _vmem_extend_alloc(parent_arena) -> 3 seg (span create + left alloc)
173 * Note: The reservation for heap_arena must be 4, since vmem_xalloc()
174 * is overly pessimistic on allocations where parent_arena has a stricter
175 * alignment than heap_arena.
177 * The worst-case consumption for any arena is 4 segment structures.
178 * For now, we only support VM_NOSLEEP allocations, so as long as we
179 * serialize all vmem_populates, a 4-seg reserve is sufficient.
181 #define VMEM_POPULATE_SEGS_PER_ARENA 4
182 #define VMEM_POPULATE_LOCKS 1
184 #define VMEM_POPULATE_RESERVE \
185 (VMEM_POPULATE_SEGS_PER_ARENA * VMEM_POPULATE_LOCKS)
188 * vmem_populate() ensures that each arena has VMEM_MINFREE seg structures
189 * so that it can satisfy the worst-case allocation *and* participate in
190 * worst-case allocation from vmem_seg_arena.
192 #define VMEM_MINFREE (VMEM_POPULATE_RESERVE + VMEM_SEGS_PER_ALLOC_MAX)
194 /* Don't assume new statics are zeroed - see vmem_startup() */
195 static vmem_t vmem0[VMEM_INITIAL];
196 static vmem_t *vmem_populator[VMEM_INITIAL];
197 static uint32_t vmem_id;
198 static uint32_t vmem_populators;
199 static vmem_seg_t vmem_seg0[VMEM_SEG_INITIAL];
200 static vmem_seg_t *vmem_segfree;
201 static mutex_t vmem_list_lock;
202 static mutex_t vmem_segfree_lock;
203 static vmem_populate_lock_t vmem_nosleep_lock;
204 #define IN_POPULATE() (vmem_nosleep_lock.vmpl_thr == thr_self())
205 static vmem_t *vmem_list;
206 static vmem_t *vmem_internal_arena;
207 static vmem_t *vmem_seg_arena;
208 static vmem_t *vmem_hash_arena;
209 static vmem_t *vmem_vmem_arena;
212 vmem_alloc_t *vmem_heap_alloc;
213 vmem_free_t *vmem_heap_free;
215 uint32_t vmem_mtbf; /* mean time between failures [default: off] */
216 size_t vmem_seg_size = sizeof (vmem_seg_t);
219 * Insert/delete from arena list (type 'a') or next-of-kin list (type 'k').
221 #define VMEM_INSERT(vprev, vsp, type) \
223 vmem_seg_t *vnext = (vprev)->vs_##type##next; \
224 (vsp)->vs_##type##next = (vnext); \
225 (vsp)->vs_##type##prev = (vprev); \
226 (vprev)->vs_##type##next = (vsp); \
227 (vnext)->vs_##type##prev = (vsp); \
230 #define VMEM_DELETE(vsp, type) \
232 vmem_seg_t *vprev = (vsp)->vs_##type##prev; \
233 vmem_seg_t *vnext = (vsp)->vs_##type##next; \
234 (vprev)->vs_##type##next = (vnext); \
235 (vnext)->vs_##type##prev = (vprev); \
239 * Get a vmem_seg_t from the global segfree list.
242 vmem_getseg_global(void)
246 (void) mutex_lock(&vmem_segfree_lock);
247 if ((vsp = vmem_segfree) != NULL)
248 vmem_segfree = vsp->vs_knext;
249 (void) mutex_unlock(&vmem_segfree_lock);
255 * Put a vmem_seg_t on the global segfree list.
258 vmem_putseg_global(vmem_seg_t *vsp)
260 (void) mutex_lock(&vmem_segfree_lock);
261 vsp->vs_knext = vmem_segfree;
263 (void) mutex_unlock(&vmem_segfree_lock);
267 * Get a vmem_seg_t from vmp's segfree list.
270 vmem_getseg(vmem_t *vmp)
274 ASSERT(vmp->vm_nsegfree > 0);
276 vsp = vmp->vm_segfree;
277 vmp->vm_segfree = vsp->vs_knext;
284 * Put a vmem_seg_t on vmp's segfree list.
287 vmem_putseg(vmem_t *vmp, vmem_seg_t *vsp)
289 vsp->vs_knext = vmp->vm_segfree;
290 vmp->vm_segfree = vsp;
295 * Add vsp to the appropriate freelist.
298 vmem_freelist_insert(vmem_t *vmp, vmem_seg_t *vsp)
302 ASSERT(*VMEM_HASH(vmp, vsp->vs_start) != vsp);
304 vprev = (vmem_seg_t *)&vmp->vm_freelist[highbit(VS_SIZE(vsp)) - 1];
305 vsp->vs_type = VMEM_FREE;
306 vmp->vm_freemap |= VS_SIZE(vprev);
307 VMEM_INSERT(vprev, vsp, k);
309 (void) cond_broadcast(&vmp->vm_cv);
313 * Take vsp from the freelist.
316 vmem_freelist_delete(vmem_t *vmp, vmem_seg_t *vsp)
318 ASSERT(*VMEM_HASH(vmp, vsp->vs_start) != vsp);
319 ASSERT(vsp->vs_type == VMEM_FREE);
321 if (vsp->vs_knext->vs_start == 0 && vsp->vs_kprev->vs_start == 0) {
323 * The segments on both sides of 'vsp' are freelist heads,
324 * so taking vsp leaves the freelist at vsp->vs_kprev empty.
326 ASSERT(vmp->vm_freemap & VS_SIZE(vsp->vs_kprev));
327 vmp->vm_freemap ^= VS_SIZE(vsp->vs_kprev);
333 * Add vsp to the allocated-segment hash table and update kstats.
336 vmem_hash_insert(vmem_t *vmp, vmem_seg_t *vsp)
340 vsp->vs_type = VMEM_ALLOC;
341 bucket = VMEM_HASH(vmp, vsp->vs_start);
342 vsp->vs_knext = *bucket;
345 if (vmem_seg_size == sizeof (vmem_seg_t)) {
346 vsp->vs_depth = (uint8_t)getpcstack(vsp->vs_stack,
347 VMEM_STACK_DEPTH, 0);
348 vsp->vs_thread = thr_self();
349 vsp->vs_timestamp = gethrtime();
354 vmp->vm_kstat.vk_alloc++;
355 vmp->vm_kstat.vk_mem_inuse += VS_SIZE(vsp);
359 * Remove vsp from the allocated-segment hash table and update kstats.
362 vmem_hash_delete(vmem_t *vmp, uintptr_t addr, size_t size)
364 vmem_seg_t *vsp, **prev_vspp;
366 prev_vspp = VMEM_HASH(vmp, addr);
367 while ((vsp = *prev_vspp) != NULL) {
368 if (vsp->vs_start == addr) {
369 *prev_vspp = vsp->vs_knext;
372 vmp->vm_kstat.vk_lookup++;
373 prev_vspp = &vsp->vs_knext;
377 umem_panic("vmem_hash_delete(%p, %lx, %lu): bad free",
380 if (VS_SIZE(vsp) != size) {
381 umem_panic("vmem_hash_delete(%p, %lx, %lu): wrong size "
382 "(expect %lu)", vmp, addr, size, VS_SIZE(vsp));
385 vmp->vm_kstat.vk_free++;
386 vmp->vm_kstat.vk_mem_inuse -= size;
392 * Create a segment spanning the range [start, end) and add it to the arena.
395 vmem_seg_create(vmem_t *vmp, vmem_seg_t *vprev, uintptr_t start, uintptr_t end)
397 vmem_seg_t *newseg = vmem_getseg(vmp);
399 newseg->vs_start = start;
400 newseg->vs_end = end;
402 newseg->vs_import = 0;
404 VMEM_INSERT(vprev, newseg, a);
410 * Remove segment vsp from the arena.
413 vmem_seg_destroy(vmem_t *vmp, vmem_seg_t *vsp)
415 ASSERT(vsp->vs_type != VMEM_ROTOR);
418 vmem_putseg(vmp, vsp);
422 * Add the span [vaddr, vaddr + size) to vmp and update kstats.
425 vmem_span_create(vmem_t *vmp, void *vaddr, size_t size, uint8_t import)
428 vmem_seg_t *newseg, *span;
429 uintptr_t start = (uintptr_t)vaddr;
430 uintptr_t end = start + size;
432 knext = &vmp->vm_seg0;
433 if (!import && vmp->vm_source_alloc == NULL) {
434 vmem_seg_t *kend, *kprev;
436 * non-imported spans are sorted in address order. This
437 * makes vmem_extend_unlocked() much more effective.
439 * We search in reverse order, since new spans are
440 * generally at higher addresses.
442 kend = &vmp->vm_seg0;
443 for (kprev = kend->vs_kprev; kprev != kend;
444 kprev = kprev->vs_kprev) {
445 if (!kprev->vs_import && (kprev->vs_end - 1) < start)
448 knext = kprev->vs_knext;
451 ASSERT(MUTEX_HELD(&vmp->vm_lock));
453 if ((start | end) & (vmp->vm_quantum - 1)) {
454 umem_panic("vmem_span_create(%p, %p, %lu): misaligned",
458 span = vmem_seg_create(vmp, knext->vs_aprev, start, end);
459 span->vs_type = VMEM_SPAN;
460 VMEM_INSERT(knext->vs_kprev, span, k);
462 newseg = vmem_seg_create(vmp, span, start, end);
463 vmem_freelist_insert(vmp, newseg);
465 newseg->vs_import = import;
467 vmp->vm_kstat.vk_mem_import += size;
468 vmp->vm_kstat.vk_mem_total += size;
474 * Remove span vsp from vmp and update kstats.
477 vmem_span_destroy(vmem_t *vmp, vmem_seg_t *vsp)
479 vmem_seg_t *span = vsp->vs_aprev;
480 size_t size = VS_SIZE(vsp);
482 ASSERT(MUTEX_HELD(&vmp->vm_lock));
483 ASSERT(span->vs_type == VMEM_SPAN);
486 vmp->vm_kstat.vk_mem_import -= size;
487 vmp->vm_kstat.vk_mem_total -= size;
489 VMEM_DELETE(span, k);
491 vmem_seg_destroy(vmp, vsp);
492 vmem_seg_destroy(vmp, span);
496 * Allocate the subrange [addr, addr + size) from segment vsp.
497 * If there are leftovers on either side, place them on the freelist.
498 * Returns a pointer to the segment representing [addr, addr + size).
501 vmem_seg_alloc(vmem_t *vmp, vmem_seg_t *vsp, uintptr_t addr, size_t size)
503 uintptr_t vs_start = vsp->vs_start;
504 uintptr_t vs_end = vsp->vs_end;
505 size_t vs_size = vs_end - vs_start;
506 size_t realsize = P2ROUNDUP(size, vmp->vm_quantum);
507 uintptr_t addr_end = addr + realsize;
509 ASSERT(P2PHASE(vs_start, vmp->vm_quantum) == 0);
510 ASSERT(P2PHASE(addr, vmp->vm_quantum) == 0);
511 ASSERT(vsp->vs_type == VMEM_FREE);
512 ASSERT(addr >= vs_start && addr_end - 1 <= vs_end - 1);
513 ASSERT(addr - 1 <= addr_end - 1);
516 * If we're allocating from the start of the segment, and the
517 * remainder will be on the same freelist, we can save quite
520 if (P2SAMEHIGHBIT(vs_size, vs_size - realsize) && addr == vs_start) {
521 ASSERT(highbit(vs_size) == highbit(vs_size - realsize));
522 vsp->vs_start = addr_end;
523 vsp = vmem_seg_create(vmp, vsp->vs_aprev, addr, addr + size);
524 vmem_hash_insert(vmp, vsp);
528 vmem_freelist_delete(vmp, vsp);
530 if (vs_end != addr_end)
531 vmem_freelist_insert(vmp,
532 vmem_seg_create(vmp, vsp, addr_end, vs_end));
534 if (vs_start != addr)
535 vmem_freelist_insert(vmp,
536 vmem_seg_create(vmp, vsp->vs_aprev, vs_start, addr));
538 vsp->vs_start = addr;
539 vsp->vs_end = addr + size;
541 vmem_hash_insert(vmp, vsp);
546 * We cannot reap if we are in the middle of a vmem_populate().
556 * Populate vmp's segfree list with VMEM_MINFREE vmem_seg_t structures.
559 vmem_populate(vmem_t *vmp, int vmflag)
565 vmem_populate_lock_t *lp;
568 while (vmp->vm_nsegfree < VMEM_MINFREE &&
569 (vsp = vmem_getseg_global()) != NULL)
570 vmem_putseg(vmp, vsp);
572 if (vmp->vm_nsegfree >= VMEM_MINFREE)
576 * If we're already populating, tap the reserve.
578 if (vmem_nosleep_lock.vmpl_thr == thr_self()) {
579 ASSERT(vmp->vm_cflags & VMC_POPULATOR);
583 (void) mutex_unlock(&vmp->vm_lock);
585 ASSERT(vmflag & VM_NOSLEEP); /* we do not allow sleep allocations */
586 lp = &vmem_nosleep_lock;
589 * Cannot be just a mutex_lock(), since that has no effect if
590 * libthread is not linked.
592 (void) mutex_lock(&lp->vmpl_mutex);
593 ASSERT(lp->vmpl_thr == 0);
594 lp->vmpl_thr = thr_self();
596 nseg = VMEM_MINFREE + vmem_populators * VMEM_POPULATE_RESERVE;
597 size = P2ROUNDUP(nseg * vmem_seg_size, vmem_seg_arena->vm_quantum);
598 nseg = size / vmem_seg_size;
601 * The following vmem_alloc() may need to populate vmem_seg_arena
602 * and all the things it imports from. When doing so, it will tap
603 * each arena's reserve to prevent recursion (see the block comment
604 * above the definition of VMEM_POPULATE_RESERVE).
606 * During this allocation, vmem_reap() is a no-op. If the allocation
607 * fails, we call vmem_reap() after dropping the population lock.
609 p = vmem_alloc(vmem_seg_arena, size, vmflag & VM_UMFLAGS);
612 (void) mutex_unlock(&lp->vmpl_mutex);
615 (void) mutex_lock(&vmp->vm_lock);
616 vmp->vm_kstat.vk_populate_fail++;
620 * Restock the arenas that may have been depleted during population.
622 for (i = 0; i < vmem_populators; i++) {
623 (void) mutex_lock(&vmem_populator[i]->vm_lock);
624 while (vmem_populator[i]->vm_nsegfree < VMEM_POPULATE_RESERVE)
625 vmem_putseg(vmem_populator[i],
626 (vmem_seg_t *)(p + --nseg * vmem_seg_size));
627 (void) mutex_unlock(&vmem_populator[i]->vm_lock);
631 (void) mutex_unlock(&lp->vmpl_mutex);
632 (void) mutex_lock(&vmp->vm_lock);
635 * Now take our own segments.
637 ASSERT(nseg >= VMEM_MINFREE);
638 while (vmp->vm_nsegfree < VMEM_MINFREE)
639 vmem_putseg(vmp, (vmem_seg_t *)(p + --nseg * vmem_seg_size));
642 * Give the remainder to charity.
645 vmem_putseg_global((vmem_seg_t *)(p + --nseg * vmem_seg_size));
651 * Advance a walker from its previous position to 'afterme'.
652 * Note: may drop and reacquire vmp->vm_lock.
655 vmem_advance(vmem_t *vmp, vmem_seg_t *walker, vmem_seg_t *afterme)
657 vmem_seg_t *vprev = walker->vs_aprev;
658 vmem_seg_t *vnext = walker->vs_anext;
659 vmem_seg_t *vsp = NULL;
661 VMEM_DELETE(walker, a);
664 VMEM_INSERT(afterme, walker, a);
667 * The walker segment's presence may have prevented its neighbors
668 * from coalescing. If so, coalesce them now.
670 if (vprev->vs_type == VMEM_FREE) {
671 if (vnext->vs_type == VMEM_FREE) {
672 ASSERT(vprev->vs_end == vnext->vs_start);
673 vmem_freelist_delete(vmp, vnext);
674 vmem_freelist_delete(vmp, vprev);
675 vprev->vs_end = vnext->vs_end;
676 vmem_freelist_insert(vmp, vprev);
677 vmem_seg_destroy(vmp, vnext);
680 } else if (vnext->vs_type == VMEM_FREE) {
685 * vsp could represent a complete imported span,
686 * in which case we must return it to the source.
688 if (vsp != NULL && vsp->vs_import && vmp->vm_source_free != NULL &&
689 vsp->vs_aprev->vs_type == VMEM_SPAN &&
690 vsp->vs_anext->vs_type == VMEM_SPAN) {
691 void *vaddr = (void *)vsp->vs_start;
692 size_t size = VS_SIZE(vsp);
693 ASSERT(size == VS_SIZE(vsp->vs_aprev));
694 vmem_freelist_delete(vmp, vsp);
695 vmem_span_destroy(vmp, vsp);
696 (void) mutex_unlock(&vmp->vm_lock);
697 vmp->vm_source_free(vmp->vm_source, vaddr, size);
698 (void) mutex_lock(&vmp->vm_lock);
703 * VM_NEXTFIT allocations deliberately cycle through all virtual addresses
704 * in an arena, so that we avoid reusing addresses for as long as possible.
705 * This helps to catch used-after-freed bugs. It's also the perfect policy
706 * for allocating things like process IDs, where we want to cycle through
707 * all values in order.
710 vmem_nextfit_alloc(vmem_t *vmp, size_t size, int vmflag)
712 vmem_seg_t *vsp, *rotor;
714 size_t realsize = P2ROUNDUP(size, vmp->vm_quantum);
717 (void) mutex_lock(&vmp->vm_lock);
719 if (vmp->vm_nsegfree < VMEM_MINFREE && !vmem_populate(vmp, vmflag)) {
720 (void) mutex_unlock(&vmp->vm_lock);
725 * The common case is that the segment right after the rotor is free,
726 * and large enough that extracting 'size' bytes won't change which
727 * freelist it's on. In this case we can avoid a *lot* of work.
728 * Instead of the normal vmem_seg_alloc(), we just advance the start
729 * address of the victim segment. Instead of moving the rotor, we
730 * create the new segment structure *behind the rotor*, which has
731 * the same effect. And finally, we know we don't have to coalesce
732 * the rotor's neighbors because the new segment lies between them.
734 rotor = &vmp->vm_rotor;
735 vsp = rotor->vs_anext;
736 if (vsp->vs_type == VMEM_FREE && (vs_size = VS_SIZE(vsp)) > realsize &&
737 P2SAMEHIGHBIT(vs_size, vs_size - realsize)) {
738 ASSERT(highbit(vs_size) == highbit(vs_size - realsize));
739 addr = vsp->vs_start;
740 vsp->vs_start = addr + realsize;
741 vmem_hash_insert(vmp,
742 vmem_seg_create(vmp, rotor->vs_aprev, addr, addr + size));
743 (void) mutex_unlock(&vmp->vm_lock);
744 return ((void *)addr);
748 * Starting at the rotor, look for a segment large enough to
749 * satisfy the allocation.
752 vmp->vm_kstat.vk_search++;
753 if (vsp->vs_type == VMEM_FREE && VS_SIZE(vsp) >= size)
760 * We've come full circle. One possibility is that the
761 * there's actually enough space, but the rotor itself
762 * is preventing the allocation from succeeding because
763 * it's sitting between two free segments. Therefore,
764 * we advance the rotor and see if that liberates a
767 vmem_advance(vmp, rotor, rotor->vs_anext);
768 vsp = rotor->vs_aprev;
769 if (vsp->vs_type == VMEM_FREE && VS_SIZE(vsp) >= size)
772 * If there's a lower arena we can import from, or it's
773 * a VM_NOSLEEP allocation, let vmem_xalloc() handle it.
774 * Otherwise, wait until another thread frees something.
776 if (vmp->vm_source_alloc != NULL ||
777 (vmflag & VM_NOSLEEP)) {
778 (void) mutex_unlock(&vmp->vm_lock);
779 return (vmem_xalloc(vmp, size, vmp->vm_quantum,
780 0, 0, NULL, NULL, vmflag & VM_UMFLAGS));
782 vmp->vm_kstat.vk_wait++;
783 (void) pthread_setcancelstate(PTHREAD_CANCEL_DISABLE,
785 (void) cond_wait(&vmp->vm_cv, &vmp->vm_lock);
786 (void) pthread_setcancelstate(cancel_state, NULL);
787 vsp = rotor->vs_anext;
792 * We found a segment. Extract enough space to satisfy the allocation.
794 addr = vsp->vs_start;
795 vsp = vmem_seg_alloc(vmp, vsp, addr, size);
796 ASSERT(vsp->vs_type == VMEM_ALLOC &&
797 vsp->vs_start == addr && vsp->vs_end == addr + size);
800 * Advance the rotor to right after the newly-allocated segment.
801 * That's where the next VM_NEXTFIT allocation will begin searching.
803 vmem_advance(vmp, rotor, vsp);
804 (void) mutex_unlock(&vmp->vm_lock);
805 return ((void *)addr);
809 * Allocate size bytes at offset phase from an align boundary such that the
810 * resulting segment [addr, addr + size) is a subset of [minaddr, maxaddr)
811 * that does not straddle a nocross-aligned boundary.
814 vmem_xalloc(vmem_t *vmp, size_t size, size_t align, size_t phase,
815 size_t nocross, void *minaddr, void *maxaddr, int vmflag)
818 vmem_seg_t *vbest = NULL;
819 uintptr_t addr, taddr, start, end;
824 if (phase > 0 && phase >= align)
825 umem_panic("vmem_xalloc(%p, %lu, %lu, %lu, %lu, %p, %p, %x): "
827 (void *)vmp, size, align, phase, nocross,
828 minaddr, maxaddr, vmflag);
831 align = vmp->vm_quantum;
833 if ((align | phase | nocross) & (vmp->vm_quantum - 1)) {
834 umem_panic("vmem_xalloc(%p, %lu, %lu, %lu, %lu, %p, %p, %x): "
835 "parameters not vm_quantum aligned",
836 (void *)vmp, size, align, phase, nocross,
837 minaddr, maxaddr, vmflag);
841 (align > nocross || P2ROUNDUP(phase + size, align) > nocross)) {
842 umem_panic("vmem_xalloc(%p, %lu, %lu, %lu, %lu, %p, %p, %x): "
843 "overconstrained allocation",
844 (void *)vmp, size, align, phase, nocross,
845 minaddr, maxaddr, vmflag);
848 if ((mtbf = vmem_mtbf | vmp->vm_mtbf) != 0 && gethrtime() % mtbf == 0 &&
849 (vmflag & (VM_NOSLEEP | VM_PANIC)) == VM_NOSLEEP)
852 (void) mutex_lock(&vmp->vm_lock);
856 if (vmp->vm_nsegfree < VMEM_MINFREE &&
857 !vmem_populate(vmp, vmflag))
861 * highbit() returns the highest bit + 1, which is exactly
862 * what we want: we want to search the first freelist whose
863 * members are *definitely* large enough to satisfy our
864 * allocation. However, there are certain cases in which we
865 * want to look at the next-smallest freelist (which *might*
866 * be able to satisfy the allocation):
868 * (1) The size is exactly a power of 2, in which case
869 * the smaller freelist is always big enough;
871 * (2) All other freelists are empty;
873 * (3) We're in the highest possible freelist, which is
874 * always empty (e.g. the 4GB freelist on 32-bit systems);
876 * (4) We're doing a best-fit or first-fit allocation.
878 if ((size & (size - 1)) == 0) {
879 flist = lowbit(P2ALIGN(vmp->vm_freemap, size));
882 if ((vmp->vm_freemap >> hb) == 0 ||
883 hb == VMEM_FREELISTS ||
884 (vmflag & (VM_BESTFIT | VM_FIRSTFIT)))
886 flist = lowbit(P2ALIGN(vmp->vm_freemap, 1UL << hb));
889 for (vbest = NULL, vsp = (flist == 0) ? NULL :
890 vmp->vm_freelist[flist - 1].vs_knext;
891 vsp != NULL; vsp = vsp->vs_knext) {
892 vmp->vm_kstat.vk_search++;
893 if (vsp->vs_start == 0) {
895 * We're moving up to a larger freelist,
896 * so if we've already found a candidate,
897 * the fit can't possibly get any better.
902 * Find the next non-empty freelist.
904 flist = lowbit(P2ALIGN(vmp->vm_freemap,
908 vsp = (vmem_seg_t *)&vmp->vm_freelist[flist];
909 ASSERT(vsp->vs_knext->vs_type == VMEM_FREE);
912 if (vsp->vs_end - 1 < (uintptr_t)minaddr)
914 if (vsp->vs_start > (uintptr_t)maxaddr - 1)
916 start = MAX(vsp->vs_start, (uintptr_t)minaddr);
917 end = MIN(vsp->vs_end - 1, (uintptr_t)maxaddr - 1) + 1;
918 taddr = P2PHASEUP(start, align, phase);
919 if (P2BOUNDARY(taddr, size, nocross))
921 P2ROUNDUP(P2NPHASE(taddr, nocross), align);
922 if ((taddr - start) + size > end - start ||
923 (vbest != NULL && VS_SIZE(vsp) >= VS_SIZE(vbest)))
927 if (!(vmflag & VM_BESTFIT) || VS_SIZE(vbest) == size)
933 umem_panic("vmem_xalloc(): size == 0");
934 if (vmp->vm_source_alloc != NULL && nocross == 0 &&
935 minaddr == NULL && maxaddr == NULL) {
936 size_t asize = P2ROUNDUP(size + phase,
937 MAX(align, vmp->vm_source->vm_quantum));
938 if (asize < size) { /* overflow */
939 (void) mutex_unlock(&vmp->vm_lock);
940 if (vmflag & VM_NOSLEEP)
943 umem_panic("vmem_xalloc(): "
944 "overflow on VM_SLEEP allocation");
947 * Determine how many segment structures we'll consume.
948 * The calculation must be presise because if we're
949 * here on behalf of vmem_populate(), we are taking
950 * segments from a very limited reserve.
952 resv = (size == asize) ?
953 VMEM_SEGS_PER_SPAN_CREATE +
954 VMEM_SEGS_PER_EXACT_ALLOC :
955 VMEM_SEGS_PER_ALLOC_MAX;
956 ASSERT(vmp->vm_nsegfree >= resv);
957 vmp->vm_nsegfree -= resv; /* reserve our segs */
958 (void) mutex_unlock(&vmp->vm_lock);
959 vaddr = vmp->vm_source_alloc(vmp->vm_source, asize,
960 vmflag & VM_UMFLAGS);
961 (void) mutex_lock(&vmp->vm_lock);
962 vmp->vm_nsegfree += resv; /* claim reservation */
964 vbest = vmem_span_create(vmp, vaddr, asize, 1);
965 addr = P2PHASEUP(vbest->vs_start, align, phase);
969 (void) mutex_unlock(&vmp->vm_lock);
971 (void) mutex_lock(&vmp->vm_lock);
972 if (vmflag & VM_NOSLEEP)
974 vmp->vm_kstat.vk_wait++;
975 (void) pthread_setcancelstate(PTHREAD_CANCEL_DISABLE,
977 (void) cond_wait(&vmp->vm_cv, &vmp->vm_lock);
978 (void) pthread_setcancelstate(cancel_state, NULL);
981 ASSERT(vbest->vs_type == VMEM_FREE);
982 ASSERT(vbest->vs_knext != vbest);
983 (void) vmem_seg_alloc(vmp, vbest, addr, size);
984 (void) mutex_unlock(&vmp->vm_lock);
985 ASSERT(P2PHASE(addr, align) == phase);
986 ASSERT(!P2BOUNDARY(addr, size, nocross));
987 ASSERT(addr >= (uintptr_t)minaddr);
988 ASSERT(addr + size - 1 <= (uintptr_t)maxaddr - 1);
989 return ((void *)addr);
991 vmp->vm_kstat.vk_fail++;
992 (void) mutex_unlock(&vmp->vm_lock);
993 if (vmflag & VM_PANIC)
994 umem_panic("vmem_xalloc(%p, %lu, %lu, %lu, %lu, %p, %p, %x): "
995 "cannot satisfy mandatory allocation",
996 (void *)vmp, size, align, phase, nocross,
997 minaddr, maxaddr, vmflag);
1002 * Free the segment [vaddr, vaddr + size), where vaddr was a constrained
1003 * allocation. vmem_xalloc() and vmem_xfree() must always be paired because
1004 * both routines bypass the quantum caches.
1007 vmem_xfree(vmem_t *vmp, void *vaddr, size_t size)
1009 vmem_seg_t *vsp, *vnext, *vprev;
1011 (void) mutex_lock(&vmp->vm_lock);
1013 vsp = vmem_hash_delete(vmp, (uintptr_t)vaddr, size);
1014 vsp->vs_end = P2ROUNDUP(vsp->vs_end, vmp->vm_quantum);
1017 * Attempt to coalesce with the next segment.
1019 vnext = vsp->vs_anext;
1020 if (vnext->vs_type == VMEM_FREE) {
1021 ASSERT(vsp->vs_end == vnext->vs_start);
1022 vmem_freelist_delete(vmp, vnext);
1023 vsp->vs_end = vnext->vs_end;
1024 vmem_seg_destroy(vmp, vnext);
1028 * Attempt to coalesce with the previous segment.
1030 vprev = vsp->vs_aprev;
1031 if (vprev->vs_type == VMEM_FREE) {
1032 ASSERT(vprev->vs_end == vsp->vs_start);
1033 vmem_freelist_delete(vmp, vprev);
1034 vprev->vs_end = vsp->vs_end;
1035 vmem_seg_destroy(vmp, vsp);
1040 * If the entire span is free, return it to the source.
1042 if (vsp->vs_import && vmp->vm_source_free != NULL &&
1043 vsp->vs_aprev->vs_type == VMEM_SPAN &&
1044 vsp->vs_anext->vs_type == VMEM_SPAN) {
1045 vaddr = (void *)vsp->vs_start;
1046 size = VS_SIZE(vsp);
1047 ASSERT(size == VS_SIZE(vsp->vs_aprev));
1048 vmem_span_destroy(vmp, vsp);
1049 (void) mutex_unlock(&vmp->vm_lock);
1050 vmp->vm_source_free(vmp->vm_source, vaddr, size);
1052 vmem_freelist_insert(vmp, vsp);
1053 (void) mutex_unlock(&vmp->vm_lock);
1058 * Allocate size bytes from arena vmp. Returns the allocated address
1059 * on success, NULL on failure. vmflag specifies VM_SLEEP or VM_NOSLEEP,
1060 * and may also specify best-fit, first-fit, or next-fit allocation policy
1061 * instead of the default instant-fit policy. VM_SLEEP allocations are
1062 * guaranteed to succeed.
1065 vmem_alloc(vmem_t *vmp, size_t size, int vmflag)
1073 if (size - 1 < vmp->vm_qcache_max) {
1074 ASSERT(vmflag & VM_NOSLEEP);
1075 return (_umem_cache_alloc(vmp->vm_qcache[(size - 1) >>
1076 vmp->vm_qshift], UMEM_DEFAULT));
1079 if ((mtbf = vmem_mtbf | vmp->vm_mtbf) != 0 && gethrtime() % mtbf == 0 &&
1080 (vmflag & (VM_NOSLEEP | VM_PANIC)) == VM_NOSLEEP)
1083 if (vmflag & VM_NEXTFIT)
1084 return (vmem_nextfit_alloc(vmp, size, vmflag));
1086 if (vmflag & (VM_BESTFIT | VM_FIRSTFIT))
1087 return (vmem_xalloc(vmp, size, vmp->vm_quantum, 0, 0,
1088 NULL, NULL, vmflag));
1091 * Unconstrained instant-fit allocation from the segment list.
1093 (void) mutex_lock(&vmp->vm_lock);
1095 if (vmp->vm_nsegfree >= VMEM_MINFREE || vmem_populate(vmp, vmflag)) {
1096 if ((size & (size - 1)) == 0)
1097 flist = lowbit(P2ALIGN(vmp->vm_freemap, size));
1098 else if ((hb = highbit(size)) < VMEM_FREELISTS)
1099 flist = lowbit(P2ALIGN(vmp->vm_freemap, 1UL << hb));
1103 (void) mutex_unlock(&vmp->vm_lock);
1104 return (vmem_xalloc(vmp, size, vmp->vm_quantum,
1105 0, 0, NULL, NULL, vmflag));
1108 ASSERT(size <= (1UL << flist));
1109 vsp = vmp->vm_freelist[flist].vs_knext;
1110 addr = vsp->vs_start;
1111 (void) vmem_seg_alloc(vmp, vsp, addr, size);
1112 (void) mutex_unlock(&vmp->vm_lock);
1113 return ((void *)addr);
1117 * Free the segment [vaddr, vaddr + size).
1120 vmem_free(vmem_t *vmp, void *vaddr, size_t size)
1122 if (size - 1 < vmp->vm_qcache_max)
1123 _umem_cache_free(vmp->vm_qcache[(size - 1) >> vmp->vm_qshift],
1126 vmem_xfree(vmp, vaddr, size);
1130 * Determine whether arena vmp contains the segment [vaddr, vaddr + size).
1133 vmem_contains(vmem_t *vmp, void *vaddr, size_t size)
1135 uintptr_t start = (uintptr_t)vaddr;
1136 uintptr_t end = start + size;
1138 vmem_seg_t *seg0 = &vmp->vm_seg0;
1140 (void) mutex_lock(&vmp->vm_lock);
1141 vmp->vm_kstat.vk_contains++;
1142 for (vsp = seg0->vs_knext; vsp != seg0; vsp = vsp->vs_knext) {
1143 vmp->vm_kstat.vk_contains_search++;
1144 ASSERT(vsp->vs_type == VMEM_SPAN);
1145 if (start >= vsp->vs_start && end - 1 <= vsp->vs_end - 1)
1148 (void) mutex_unlock(&vmp->vm_lock);
1149 return (vsp != seg0);
1153 * Add the span [vaddr, vaddr + size) to arena vmp.
1156 vmem_add(vmem_t *vmp, void *vaddr, size_t size, int vmflag)
1158 if (vaddr == NULL || size == 0) {
1159 umem_panic("vmem_add(%p, %p, %lu): bad arguments",
1163 ASSERT(!vmem_contains(vmp, vaddr, size));
1165 (void) mutex_lock(&vmp->vm_lock);
1166 if (vmem_populate(vmp, vmflag))
1167 (void) vmem_span_create(vmp, vaddr, size, 0);
1170 (void) cond_broadcast(&vmp->vm_cv);
1171 (void) mutex_unlock(&vmp->vm_lock);
1176 * Adds the address range [addr, endaddr) to arena vmp, by either:
1177 * 1. joining two existing spans, [x, addr), and [endaddr, y) (which
1178 * are in that order) into a single [x, y) span,
1179 * 2. expanding an existing [x, addr) span to [x, endaddr),
1180 * 3. expanding an existing [endaddr, x) span to [addr, x), or
1181 * 4. creating a new [addr, endaddr) span.
1183 * Called with vmp->vm_lock held, and a successful vmem_populate() completed.
1184 * Cannot fail. Returns the new segment.
1186 * NOTE: this algorithm is linear-time in the number of spans, but is
1187 * constant-time when you are extending the last (highest-addressed)
1191 vmem_extend_unlocked(vmem_t *vmp, uintptr_t addr, uintptr_t endaddr)
1196 vmem_seg_t *end = &vmp->vm_seg0;
1198 ASSERT(MUTEX_HELD(&vmp->vm_lock));
1201 * the second "if" clause below relies on the direction of this search
1203 for (span = end->vs_kprev; span != end; span = span->vs_kprev) {
1204 if (span->vs_end == addr || span->vs_start == endaddr)
1209 return (vmem_span_create(vmp, (void *)addr, endaddr - addr, 0));
1210 if (span->vs_kprev->vs_end == addr && span->vs_start == endaddr) {
1211 vmem_seg_t *prevspan = span->vs_kprev;
1212 vmem_seg_t *nextseg = span->vs_anext;
1213 vmem_seg_t *prevseg = span->vs_aprev;
1216 * prevspan becomes the span marker for the full range
1218 prevspan->vs_end = span->vs_end;
1221 * Notionally, span becomes a free segment representing
1224 * However, if either of its neighbors are free, we coalesce
1225 * by destroying span and changing the free segment.
1227 if (prevseg->vs_type == VMEM_FREE &&
1228 nextseg->vs_type == VMEM_FREE) {
1230 * coalesce both ways
1232 ASSERT(prevseg->vs_end == addr &&
1233 nextseg->vs_start == endaddr);
1235 vmem_freelist_delete(vmp, prevseg);
1236 prevseg->vs_end = nextseg->vs_end;
1238 vmem_freelist_delete(vmp, nextseg);
1239 VMEM_DELETE(span, k);
1240 vmem_seg_destroy(vmp, nextseg);
1241 vmem_seg_destroy(vmp, span);
1244 } else if (prevseg->vs_type == VMEM_FREE) {
1248 ASSERT(prevseg->vs_end == addr);
1250 VMEM_DELETE(span, k);
1251 vmem_seg_destroy(vmp, span);
1253 vmem_freelist_delete(vmp, prevseg);
1254 prevseg->vs_end = endaddr;
1257 } else if (nextseg->vs_type == VMEM_FREE) {
1261 ASSERT(nextseg->vs_start == endaddr);
1263 VMEM_DELETE(span, k);
1264 vmem_seg_destroy(vmp, span);
1266 vmem_freelist_delete(vmp, nextseg);
1267 nextseg->vs_start = addr;
1274 VMEM_DELETE(span, k);
1275 span->vs_start = addr;
1276 span->vs_end = endaddr;
1280 } else if (span->vs_end == addr) {
1281 vmem_seg_t *oldseg = span->vs_knext->vs_aprev;
1282 span->vs_end = endaddr;
1284 ASSERT(oldseg->vs_type != VMEM_SPAN);
1285 if (oldseg->vs_type == VMEM_FREE) {
1286 ASSERT(oldseg->vs_end == addr);
1287 vmem_freelist_delete(vmp, oldseg);
1288 oldseg->vs_end = endaddr;
1291 vsp = vmem_seg_create(vmp, oldseg, addr, endaddr);
1293 vmem_seg_t *oldseg = span->vs_anext;
1294 ASSERT(span->vs_start == endaddr);
1295 span->vs_start = addr;
1297 ASSERT(oldseg->vs_type != VMEM_SPAN);
1298 if (oldseg->vs_type == VMEM_FREE) {
1299 ASSERT(oldseg->vs_start == endaddr);
1300 vmem_freelist_delete(vmp, oldseg);
1301 oldseg->vs_start = addr;
1304 vsp = vmem_seg_create(vmp, span, addr, endaddr);
1306 vmem_freelist_insert(vmp, vsp);
1307 vmp->vm_kstat.vk_mem_total += (endaddr - addr);
1312 * Does some error checking, calls vmem_extend_unlocked to add
1313 * [vaddr, vaddr+size) to vmp, then allocates alloc bytes from the
1314 * newly merged segment.
1317 _vmem_extend_alloc(vmem_t *vmp, void *vaddr, size_t size, size_t alloc,
1320 uintptr_t addr = (uintptr_t)vaddr;
1321 uintptr_t endaddr = addr + size;
1324 ASSERT(vaddr != NULL && size != 0 && endaddr > addr);
1325 ASSERT(alloc <= size && alloc != 0);
1326 ASSERT(((addr | size | alloc) & (vmp->vm_quantum - 1)) == 0);
1328 ASSERT(!vmem_contains(vmp, vaddr, size));
1330 (void) mutex_lock(&vmp->vm_lock);
1331 if (!vmem_populate(vmp, vmflag)) {
1332 (void) mutex_unlock(&vmp->vm_lock);
1336 * if there is a source, we can't mess with the spans
1338 if (vmp->vm_source_alloc != NULL)
1339 vsp = vmem_span_create(vmp, vaddr, size, 0);
1341 vsp = vmem_extend_unlocked(vmp, addr, endaddr);
1343 ASSERT(VS_SIZE(vsp) >= alloc);
1345 addr = vsp->vs_start;
1346 (void) vmem_seg_alloc(vmp, vsp, addr, alloc);
1347 vaddr = (void *)addr;
1349 (void) cond_broadcast(&vmp->vm_cv);
1350 (void) mutex_unlock(&vmp->vm_lock);
1356 * Walk the vmp arena, applying func to each segment matching typemask.
1357 * If VMEM_REENTRANT is specified, the arena lock is dropped across each
1358 * call to func(); otherwise, it is held for the duration of vmem_walk()
1359 * to ensure a consistent snapshot. Note that VMEM_REENTRANT callbacks
1360 * are *not* necessarily consistent, so they may only be used when a hint
1364 vmem_walk(vmem_t *vmp, int typemask,
1365 void (*func)(void *, void *, size_t), void *arg)
1368 vmem_seg_t *seg0 = &vmp->vm_seg0;
1371 if (typemask & VMEM_WALKER)
1374 bzero(&walker, sizeof (walker));
1375 walker.vs_type = VMEM_WALKER;
1377 (void) mutex_lock(&vmp->vm_lock);
1378 VMEM_INSERT(seg0, &walker, a);
1379 for (vsp = seg0->vs_anext; vsp != seg0; vsp = vsp->vs_anext) {
1380 if (vsp->vs_type & typemask) {
1381 void *start = (void *)vsp->vs_start;
1382 size_t size = VS_SIZE(vsp);
1383 if (typemask & VMEM_REENTRANT) {
1384 vmem_advance(vmp, &walker, vsp);
1385 (void) mutex_unlock(&vmp->vm_lock);
1386 func(arg, start, size);
1387 (void) mutex_lock(&vmp->vm_lock);
1390 func(arg, start, size);
1394 vmem_advance(vmp, &walker, NULL);
1395 (void) mutex_unlock(&vmp->vm_lock);
1399 * Return the total amount of memory whose type matches typemask. Thus:
1401 * typemask VMEM_ALLOC yields total memory allocated (in use).
1402 * typemask VMEM_FREE yields total memory free (available).
1403 * typemask (VMEM_ALLOC | VMEM_FREE) yields total arena size.
1406 vmem_size(vmem_t *vmp, int typemask)
1410 if (typemask & VMEM_ALLOC)
1411 size += vmp->vm_kstat.vk_mem_inuse;
1412 if (typemask & VMEM_FREE)
1413 size += vmp->vm_kstat.vk_mem_total -
1414 vmp->vm_kstat.vk_mem_inuse;
1415 return ((size_t)size);
1419 * Create an arena called name whose initial span is [base, base + size).
1420 * The arena's natural unit of currency is quantum, so vmem_alloc()
1421 * guarantees quantum-aligned results. The arena may import new spans
1422 * by invoking afunc() on source, and may return those spans by invoking
1423 * ffunc() on source. To make small allocations fast and scalable,
1424 * the arena offers high-performance caching for each integer multiple
1425 * of quantum up to qcache_max.
1428 vmem_create(const char *name, void *base, size_t size, size_t quantum,
1429 vmem_alloc_t *afunc, vmem_free_t *ffunc, vmem_t *source,
1430 size_t qcache_max, int vmflag)
1434 vmem_t *vmp, *cur, **vmpp;
1436 vmem_freelist_t *vfp;
1437 uint32_t id = atomic_add_32_nv(&vmem_id, 1);
1439 if (vmem_vmem_arena != NULL) {
1440 vmp = vmem_alloc(vmem_vmem_arena, sizeof (vmem_t),
1441 vmflag & VM_UMFLAGS);
1443 ASSERT(id <= VMEM_INITIAL);
1444 vmp = &vmem0[id - 1];
1449 bzero(vmp, sizeof (vmem_t));
1451 (void) snprintf(vmp->vm_name, VMEM_NAMELEN, "%s", name);
1452 (void) mutex_init(&vmp->vm_lock, USYNC_THREAD, NULL);
1453 (void) cond_init(&vmp->vm_cv, USYNC_THREAD, NULL);
1454 vmp->vm_cflags = vmflag;
1455 vmflag &= VM_UMFLAGS;
1457 vmp->vm_quantum = quantum;
1458 vmp->vm_qshift = highbit(quantum) - 1;
1459 nqcache = MIN(qcache_max >> vmp->vm_qshift, VMEM_NQCACHE_MAX);
1461 for (i = 0; i <= VMEM_FREELISTS; i++) {
1462 vfp = &vmp->vm_freelist[i];
1463 vfp->vs_end = 1UL << i;
1464 vfp->vs_knext = (vmem_seg_t *)(vfp + 1);
1465 vfp->vs_kprev = (vmem_seg_t *)(vfp - 1);
1468 vmp->vm_freelist[0].vs_kprev = NULL;
1469 vmp->vm_freelist[VMEM_FREELISTS].vs_knext = NULL;
1470 vmp->vm_freelist[VMEM_FREELISTS].vs_end = 0;
1471 vmp->vm_hash_table = vmp->vm_hash0;
1472 vmp->vm_hash_mask = VMEM_HASH_INITIAL - 1;
1473 vmp->vm_hash_shift = highbit(vmp->vm_hash_mask);
1475 vsp = &vmp->vm_seg0;
1476 vsp->vs_anext = vsp;
1477 vsp->vs_aprev = vsp;
1478 vsp->vs_knext = vsp;
1479 vsp->vs_kprev = vsp;
1480 vsp->vs_type = VMEM_SPAN;
1482 vsp = &vmp->vm_rotor;
1483 vsp->vs_type = VMEM_ROTOR;
1484 VMEM_INSERT(&vmp->vm_seg0, vsp, a);
1488 vmp->vm_kstat.vk_source_id = source->vm_id;
1489 vmp->vm_source = source;
1490 vmp->vm_source_alloc = afunc;
1491 vmp->vm_source_free = ffunc;
1494 vmp->vm_qcache_max = nqcache << vmp->vm_qshift;
1495 for (i = 0; i < nqcache; i++) {
1496 char buf[VMEM_NAMELEN + 21];
1497 (void) snprintf(buf, sizeof (buf), "%s_%lu",
1498 vmp->vm_name, (long)((i + 1) * quantum));
1499 vmp->vm_qcache[i] = umem_cache_create(buf,
1500 (i + 1) * quantum, quantum, NULL, NULL, NULL,
1501 NULL, vmp, UMC_QCACHE | UMC_NOTOUCH);
1502 if (vmp->vm_qcache[i] == NULL) {
1503 vmp->vm_qcache_max = i * quantum;
1509 (void) mutex_lock(&vmem_list_lock);
1511 while ((cur = *vmpp) != NULL)
1512 vmpp = &cur->vm_next;
1514 (void) mutex_unlock(&vmem_list_lock);
1516 if (vmp->vm_cflags & VMC_POPULATOR) {
1517 uint_t pop_id = atomic_add_32_nv(&vmem_populators, 1);
1518 ASSERT(pop_id <= VMEM_INITIAL);
1519 vmem_populator[pop_id - 1] = vmp;
1520 (void) mutex_lock(&vmp->vm_lock);
1521 (void) vmem_populate(vmp, vmflag | VM_PANIC);
1522 (void) mutex_unlock(&vmp->vm_lock);
1525 if ((base || size) && vmem_add(vmp, base, size, vmflag) == NULL) {
1534 * Destroy arena vmp.
1537 vmem_destroy(vmem_t *vmp)
1539 vmem_t *cur, **vmpp;
1540 vmem_seg_t *seg0 = &vmp->vm_seg0;
1545 (void) mutex_lock(&vmem_list_lock);
1547 while ((cur = *vmpp) != vmp)
1548 vmpp = &cur->vm_next;
1549 *vmpp = vmp->vm_next;
1550 (void) mutex_unlock(&vmem_list_lock);
1552 for (i = 0; i < VMEM_NQCACHE_MAX; i++)
1553 if (vmp->vm_qcache[i])
1554 umem_cache_destroy(vmp->vm_qcache[i]);
1556 leaked = vmem_size(vmp, VMEM_ALLOC);
1558 umem_printf("vmem_destroy('%s'): leaked %lu bytes",
1559 vmp->vm_name, leaked);
1561 if (vmp->vm_hash_table != vmp->vm_hash0)
1562 vmem_free(vmem_hash_arena, vmp->vm_hash_table,
1563 (vmp->vm_hash_mask + 1) * sizeof (void *));
1566 * Give back the segment structures for anything that's left in the
1567 * arena, e.g. the primary spans and their free segments.
1569 VMEM_DELETE(&vmp->vm_rotor, a);
1570 for (vsp = seg0->vs_anext; vsp != seg0; vsp = vsp->vs_anext)
1571 vmem_putseg_global(vsp);
1573 while (vmp->vm_nsegfree > 0)
1574 vmem_putseg_global(vmem_getseg(vmp));
1576 (void) mutex_destroy(&vmp->vm_lock);
1577 (void) cond_destroy(&vmp->vm_cv);
1578 vmem_free(vmem_vmem_arena, vmp, sizeof (vmem_t));
1582 * Resize vmp's hash table to keep the average lookup depth near 1.0.
1585 vmem_hash_rescale(vmem_t *vmp)
1587 vmem_seg_t **old_table, **new_table, *vsp;
1588 size_t old_size, new_size, h, nseg;
1590 nseg = (size_t)(vmp->vm_kstat.vk_alloc - vmp->vm_kstat.vk_free);
1592 new_size = MAX(VMEM_HASH_INITIAL, 1 << (highbit(3 * nseg + 4) - 2));
1593 old_size = vmp->vm_hash_mask + 1;
1595 if ((old_size >> 1) <= new_size && new_size <= (old_size << 1))
1598 new_table = vmem_alloc(vmem_hash_arena, new_size * sizeof (void *),
1600 if (new_table == NULL)
1602 bzero(new_table, new_size * sizeof (void *));
1604 (void) mutex_lock(&vmp->vm_lock);
1606 old_size = vmp->vm_hash_mask + 1;
1607 old_table = vmp->vm_hash_table;
1609 vmp->vm_hash_mask = new_size - 1;
1610 vmp->vm_hash_table = new_table;
1611 vmp->vm_hash_shift = highbit(vmp->vm_hash_mask);
1613 for (h = 0; h < old_size; h++) {
1615 while (vsp != NULL) {
1616 uintptr_t addr = vsp->vs_start;
1617 vmem_seg_t *next_vsp = vsp->vs_knext;
1618 vmem_seg_t **hash_bucket = VMEM_HASH(vmp, addr);
1619 vsp->vs_knext = *hash_bucket;
1625 (void) mutex_unlock(&vmp->vm_lock);
1627 if (old_table != vmp->vm_hash0)
1628 vmem_free(vmem_hash_arena, old_table,
1629 old_size * sizeof (void *));
1633 * Perform periodic maintenance on all vmem arenas.
1637 vmem_update(void *dummy)
1641 (void) mutex_lock(&vmem_list_lock);
1642 for (vmp = vmem_list; vmp != NULL; vmp = vmp->vm_next) {
1644 * If threads are waiting for resources, wake them up
1645 * periodically so they can issue another vmem_reap()
1646 * to reclaim resources cached by the slab allocator.
1648 (void) cond_broadcast(&vmp->vm_cv);
1651 * Rescale the hash table to keep the hash chains short.
1653 vmem_hash_rescale(vmp);
1655 (void) mutex_unlock(&vmem_list_lock);
1659 * If vmem_init is called again, we need to be able to reset the world.
1660 * That includes resetting the statics back to their original values.
1665 #ifdef UMEM_STANDALONE
1667 vmem_populators = 0;
1668 vmem_segfree = NULL;
1670 vmem_internal_arena = NULL;
1671 vmem_seg_arena = NULL;
1672 vmem_hash_arena = NULL;
1673 vmem_vmem_arena = NULL;
1675 vmem_heap_alloc = NULL;
1676 vmem_heap_free = NULL;
1678 bzero(vmem0, sizeof (vmem0));
1679 bzero(vmem_populator, sizeof (vmem_populator));
1680 bzero(vmem_seg0, sizeof (vmem_seg0));
1685 * Prepare vmem for use.
1688 vmem_init(const char *parent_name, size_t parent_quantum,
1689 vmem_alloc_t *parent_alloc, vmem_free_t *parent_free,
1690 const char *heap_name, void *heap_start, size_t heap_size,
1691 size_t heap_quantum, vmem_alloc_t *heap_alloc, vmem_free_t *heap_free)
1694 int nseg = VMEM_SEG_INITIAL;
1695 vmem_t *parent, *heap;
1697 ASSERT(vmem_internal_arena == NULL);
1700 vmem_putseg_global(&vmem_seg0[nseg]);
1702 if (parent_name != NULL) {
1703 parent = vmem_create(parent_name,
1704 heap_start, heap_size, parent_quantum,
1705 NULL, NULL, NULL, 0,
1706 VM_SLEEP | VMC_POPULATOR);
1710 ASSERT(parent_alloc == NULL && parent_free == NULL);
1714 heap = vmem_create(heap_name,
1715 heap_start, heap_size, heap_quantum,
1716 parent_alloc, parent_free, parent, 0,
1717 VM_SLEEP | VMC_POPULATOR);
1720 vmem_heap_alloc = heap_alloc;
1721 vmem_heap_free = heap_free;
1723 vmem_internal_arena = vmem_create("vmem_internal",
1724 NULL, 0, heap_quantum,
1725 heap_alloc, heap_free, heap, 0,
1726 VM_SLEEP | VMC_POPULATOR);
1728 vmem_seg_arena = vmem_create("vmem_seg",
1729 NULL, 0, heap_quantum,
1730 vmem_alloc, vmem_free, vmem_internal_arena, 0,
1731 VM_SLEEP | VMC_POPULATOR);
1733 vmem_hash_arena = vmem_create("vmem_hash",
1735 vmem_alloc, vmem_free, vmem_internal_arena, 0,
1738 vmem_vmem_arena = vmem_create("vmem_vmem",
1739 vmem0, sizeof (vmem0), 1,
1740 vmem_alloc, vmem_free, vmem_internal_arena, 0,
1743 for (id = 0; id < vmem_id; id++)
1744 (void) vmem_xalloc(vmem_vmem_arena, sizeof (vmem_t),
1745 1, 0, 0, &vmem0[id], &vmem0[id + 1],
1746 VM_NOSLEEP | VM_BESTFIT | VM_PANIC);
1755 * This size must be a multiple of the minimum required alignment,
1756 * since vmem_populate allocates them compactly.
1758 vmem_seg_size = P2ROUNDUP(offsetof(vmem_seg_t, vs_thread),
1763 * Lockup and release, for fork1(2) handling.
1770 (void) mutex_lock(&vmem_list_lock);
1771 (void) mutex_lock(&vmem_nosleep_lock.vmpl_mutex);
1774 * Lock up and broadcast all arenas.
1776 for (cur = vmem_list; cur != NULL; cur = cur->vm_next) {
1777 (void) mutex_lock(&cur->vm_lock);
1778 (void) cond_broadcast(&cur->vm_cv);
1781 (void) mutex_lock(&vmem_segfree_lock);
1789 (void) mutex_unlock(&vmem_nosleep_lock.vmpl_mutex);
1791 for (cur = vmem_list; cur != NULL; cur = cur->vm_next)
1792 (void) mutex_unlock(&cur->vm_lock);
1794 (void) mutex_unlock(&vmem_segfree_lock);
1795 (void) mutex_unlock(&vmem_list_lock);