4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
27 * The 512-byte leaf is broken into 32 16-byte chunks.
28 * chunk number n means l_chunk[n], even though the header precedes it.
29 * the names are stored null-terminated.
34 #include <sys/zfs_context.h>
35 #include <sys/fs/zfs.h>
37 #include <sys/zap_impl.h>
38 #include <sys/zap_leaf.h>
40 static uint16_t *zap_leaf_rehash_entry(zap_leaf_t *l, uint16_t entry);
42 #define CHAIN_END 0xffff /* end of the chunk chain */
44 /* half the (current) minimum block size */
45 #define MAX_ARRAY_BYTES (8<<10)
47 #define LEAF_HASH(l, h) \
48 ((ZAP_LEAF_HASH_NUMENTRIES(l)-1) & \
49 ((h) >> (64 - ZAP_LEAF_HASH_SHIFT(l)-(l)->l_phys->l_hdr.lh_prefix_len)))
51 #define LEAF_HASH_ENTPTR(l, h) (&(l)->l_phys->l_hash[LEAF_HASH(l, h)])
55 zap_memset(void *a, int c, size_t n)
65 stv(int len, void *addr, uint64_t value)
69 *(uint8_t *)addr = value;
72 *(uint16_t *)addr = value;
75 *(uint32_t *)addr = value;
78 *(uint64_t *)addr = value;
81 ASSERT(!"bad int len");
85 ldv(int len, const void *addr)
89 return (*(uint8_t *)addr);
91 return (*(uint16_t *)addr);
93 return (*(uint32_t *)addr);
95 return (*(uint64_t *)addr);
97 ASSERT(!"bad int len");
98 return (0xFEEDFACEDEADBEEFULL);
102 zap_leaf_byteswap(zap_leaf_phys_t *buf, int size)
106 l.l_bs = highbit(size)-1;
109 buf->l_hdr.lh_block_type = BSWAP_64(buf->l_hdr.lh_block_type);
110 buf->l_hdr.lh_prefix = BSWAP_64(buf->l_hdr.lh_prefix);
111 buf->l_hdr.lh_magic = BSWAP_32(buf->l_hdr.lh_magic);
112 buf->l_hdr.lh_nfree = BSWAP_16(buf->l_hdr.lh_nfree);
113 buf->l_hdr.lh_nentries = BSWAP_16(buf->l_hdr.lh_nentries);
114 buf->l_hdr.lh_prefix_len = BSWAP_16(buf->l_hdr.lh_prefix_len);
115 buf->l_hdr.lh_freelist = BSWAP_16(buf->l_hdr.lh_freelist);
117 for (i = 0; i < ZAP_LEAF_HASH_NUMENTRIES(&l); i++)
118 buf->l_hash[i] = BSWAP_16(buf->l_hash[i]);
120 for (i = 0; i < ZAP_LEAF_NUMCHUNKS(&l); i++) {
121 zap_leaf_chunk_t *lc = &ZAP_LEAF_CHUNK(&l, i);
122 struct zap_leaf_entry *le;
124 switch (lc->l_free.lf_type) {
125 case ZAP_CHUNK_ENTRY:
128 le->le_type = BSWAP_8(le->le_type);
129 le->le_int_size = BSWAP_8(le->le_int_size);
130 le->le_next = BSWAP_16(le->le_next);
131 le->le_name_chunk = BSWAP_16(le->le_name_chunk);
132 le->le_name_length = BSWAP_16(le->le_name_length);
133 le->le_value_chunk = BSWAP_16(le->le_value_chunk);
134 le->le_value_length = BSWAP_16(le->le_value_length);
135 le->le_cd = BSWAP_32(le->le_cd);
136 le->le_hash = BSWAP_64(le->le_hash);
139 lc->l_free.lf_type = BSWAP_8(lc->l_free.lf_type);
140 lc->l_free.lf_next = BSWAP_16(lc->l_free.lf_next);
142 case ZAP_CHUNK_ARRAY:
143 lc->l_array.la_type = BSWAP_8(lc->l_array.la_type);
144 lc->l_array.la_next = BSWAP_16(lc->l_array.la_next);
145 /* la_array doesn't need swapping */
148 ASSERT(!"bad leaf type");
154 zap_leaf_init(zap_leaf_t *l, boolean_t sort)
158 l->l_bs = highbit(l->l_dbuf->db_size)-1;
159 zap_memset(&l->l_phys->l_hdr, 0, sizeof (struct zap_leaf_header));
160 zap_memset(l->l_phys->l_hash, CHAIN_END, 2*ZAP_LEAF_HASH_NUMENTRIES(l));
161 for (i = 0; i < ZAP_LEAF_NUMCHUNKS(l); i++) {
162 ZAP_LEAF_CHUNK(l, i).l_free.lf_type = ZAP_CHUNK_FREE;
163 ZAP_LEAF_CHUNK(l, i).l_free.lf_next = i+1;
165 ZAP_LEAF_CHUNK(l, ZAP_LEAF_NUMCHUNKS(l)-1).l_free.lf_next = CHAIN_END;
166 l->l_phys->l_hdr.lh_block_type = ZBT_LEAF;
167 l->l_phys->l_hdr.lh_magic = ZAP_LEAF_MAGIC;
168 l->l_phys->l_hdr.lh_nfree = ZAP_LEAF_NUMCHUNKS(l);
170 l->l_phys->l_hdr.lh_flags |= ZLF_ENTRIES_CDSORTED;
174 * Routines which manipulate leaf chunks (l_chunk[]).
178 zap_leaf_chunk_alloc(zap_leaf_t *l)
182 ASSERT(l->l_phys->l_hdr.lh_nfree > 0);
184 chunk = l->l_phys->l_hdr.lh_freelist;
185 ASSERT3U(chunk, <, ZAP_LEAF_NUMCHUNKS(l));
186 ASSERT3U(ZAP_LEAF_CHUNK(l, chunk).l_free.lf_type, ==, ZAP_CHUNK_FREE);
188 l->l_phys->l_hdr.lh_freelist = ZAP_LEAF_CHUNK(l, chunk).l_free.lf_next;
190 l->l_phys->l_hdr.lh_nfree--;
196 zap_leaf_chunk_free(zap_leaf_t *l, uint16_t chunk)
198 struct zap_leaf_free *zlf = &ZAP_LEAF_CHUNK(l, chunk).l_free;
199 ASSERT3U(l->l_phys->l_hdr.lh_nfree, <, ZAP_LEAF_NUMCHUNKS(l));
200 ASSERT3U(chunk, <, ZAP_LEAF_NUMCHUNKS(l));
201 ASSERT(zlf->lf_type != ZAP_CHUNK_FREE);
203 zlf->lf_type = ZAP_CHUNK_FREE;
204 zlf->lf_next = l->l_phys->l_hdr.lh_freelist;
205 bzero(zlf->lf_pad, sizeof (zlf->lf_pad)); /* help it to compress */
206 l->l_phys->l_hdr.lh_freelist = chunk;
208 l->l_phys->l_hdr.lh_nfree++;
212 * Routines which manipulate leaf arrays (zap_leaf_array type chunks).
216 zap_leaf_array_create(zap_leaf_t *l, const char *buf,
217 int integer_size, int num_integers)
220 uint16_t *chunkp = &chunk_head;
223 int shift = (integer_size-1)*8;
224 int len = num_integers;
226 ASSERT3U(num_integers * integer_size, <, MAX_ARRAY_BYTES);
229 uint16_t chunk = zap_leaf_chunk_alloc(l);
230 struct zap_leaf_array *la = &ZAP_LEAF_CHUNK(l, chunk).l_array;
233 la->la_type = ZAP_CHUNK_ARRAY;
234 for (i = 0; i < ZAP_LEAF_ARRAY_BYTES; i++) {
236 value = ldv(integer_size, buf);
237 la->la_array[i] = value >> shift;
239 if (++byten == integer_size) {
248 chunkp = &la->la_next;
256 zap_leaf_array_free(zap_leaf_t *l, uint16_t *chunkp)
258 uint16_t chunk = *chunkp;
262 while (chunk != CHAIN_END) {
263 int nextchunk = ZAP_LEAF_CHUNK(l, chunk).l_array.la_next;
264 ASSERT3U(ZAP_LEAF_CHUNK(l, chunk).l_array.la_type, ==,
266 zap_leaf_chunk_free(l, chunk);
271 /* array_len and buf_len are in integers, not bytes */
273 zap_leaf_array_read(zap_leaf_t *l, uint16_t chunk,
274 int array_int_len, int array_len, int buf_int_len, uint64_t buf_len,
277 int len = MIN(array_len, buf_len);
281 ASSERT3U(array_int_len, <=, buf_int_len);
283 /* Fast path for one 8-byte integer */
284 if (array_int_len == 8 && buf_int_len == 8 && len == 1) {
285 struct zap_leaf_array *la = &ZAP_LEAF_CHUNK(l, chunk).l_array;
286 uint8_t *ip = la->la_array;
287 uint64_t *buf64 = (uint64_t *)buf;
289 *buf64 = (uint64_t)ip[0] << 56 | (uint64_t)ip[1] << 48 |
290 (uint64_t)ip[2] << 40 | (uint64_t)ip[3] << 32 |
291 (uint64_t)ip[4] << 24 | (uint64_t)ip[5] << 16 |
292 (uint64_t)ip[6] << 8 | (uint64_t)ip[7];
296 /* Fast path for an array of 1-byte integers (eg. the entry name) */
297 if (array_int_len == 1 && buf_int_len == 1 &&
298 buf_len > array_len + ZAP_LEAF_ARRAY_BYTES) {
299 while (chunk != CHAIN_END) {
300 struct zap_leaf_array *la =
301 &ZAP_LEAF_CHUNK(l, chunk).l_array;
302 bcopy(la->la_array, buf, ZAP_LEAF_ARRAY_BYTES);
303 buf += ZAP_LEAF_ARRAY_BYTES;
310 struct zap_leaf_array *la = &ZAP_LEAF_CHUNK(l, chunk).l_array;
313 ASSERT3U(chunk, <, ZAP_LEAF_NUMCHUNKS(l));
314 for (i = 0; i < ZAP_LEAF_ARRAY_BYTES && len > 0; i++) {
315 value = (value << 8) | la->la_array[i];
317 if (byten == array_int_len) {
318 stv(buf_int_len, buf, value);
331 * Only to be used on 8-bit arrays.
332 * array_len is actual len in bytes (not encoded le_value_length).
333 * namenorm is null-terminated.
336 zap_leaf_array_match(zap_leaf_t *l, zap_name_t *zn, int chunk, int array_len)
340 if (zn->zn_matchtype == MT_FIRST) {
341 char *thisname = kmem_alloc(array_len, KM_SLEEP);
344 zap_leaf_array_read(l, chunk, 1, array_len, 1,
345 array_len, thisname);
346 match = zap_match(zn, thisname);
347 kmem_free(thisname, array_len);
351 /* Fast path for exact matching */
352 while (bseen < array_len) {
353 struct zap_leaf_array *la = &ZAP_LEAF_CHUNK(l, chunk).l_array;
354 int toread = MIN(array_len - bseen, ZAP_LEAF_ARRAY_BYTES);
355 ASSERT3U(chunk, <, ZAP_LEAF_NUMCHUNKS(l));
356 if (bcmp(la->la_array, zn->zn_name_orij + bseen, toread))
361 return (bseen == array_len);
365 * Routines which manipulate leaf entries.
369 zap_leaf_lookup(zap_leaf_t *l, zap_name_t *zn, zap_entry_handle_t *zeh)
372 struct zap_leaf_entry *le;
374 ASSERT3U(l->l_phys->l_hdr.lh_magic, ==, ZAP_LEAF_MAGIC);
377 for (chunkp = LEAF_HASH_ENTPTR(l, zn->zn_hash);
378 *chunkp != CHAIN_END; chunkp = &le->le_next) {
379 uint16_t chunk = *chunkp;
380 le = ZAP_LEAF_ENTRY(l, chunk);
382 ASSERT3U(chunk, <, ZAP_LEAF_NUMCHUNKS(l));
383 ASSERT3U(le->le_type, ==, ZAP_CHUNK_ENTRY);
385 if (le->le_hash != zn->zn_hash)
389 * NB: the entry chain is always sorted by cd on
390 * normalized zap objects, so this will find the
391 * lowest-cd match for MT_FIRST.
393 ASSERT(zn->zn_matchtype == MT_EXACT ||
394 (l->l_phys->l_hdr.lh_flags & ZLF_ENTRIES_CDSORTED));
395 if (zap_leaf_array_match(l, zn, le->le_name_chunk,
396 le->le_name_length)) {
397 zeh->zeh_num_integers = le->le_value_length;
398 zeh->zeh_integer_size = le->le_int_size;
399 zeh->zeh_cd = le->le_cd;
400 zeh->zeh_hash = le->le_hash;
401 zeh->zeh_chunkp = chunkp;
408 * NB: we could of course do this in one pass, but that would be
409 * a pain. We'll see if MT_BEST is even used much.
411 if (zn->zn_matchtype == MT_BEST) {
412 zn->zn_matchtype = MT_FIRST;
419 /* Return (h1,cd1 >= h2,cd2) */
420 #define HCD_GTEQ(h1, cd1, h2, cd2) \
421 ((h1 > h2) ? TRUE : ((h1 == h2 && cd1 >= cd2) ? TRUE : FALSE))
424 zap_leaf_lookup_closest(zap_leaf_t *l,
425 uint64_t h, uint32_t cd, zap_entry_handle_t *zeh)
428 uint64_t besth = -1ULL;
429 uint32_t bestcd = ZAP_MAXCD;
430 uint16_t bestlh = ZAP_LEAF_HASH_NUMENTRIES(l)-1;
432 struct zap_leaf_entry *le;
434 ASSERT3U(l->l_phys->l_hdr.lh_magic, ==, ZAP_LEAF_MAGIC);
436 for (lh = LEAF_HASH(l, h); lh <= bestlh; lh++) {
437 for (chunk = l->l_phys->l_hash[lh];
438 chunk != CHAIN_END; chunk = le->le_next) {
439 le = ZAP_LEAF_ENTRY(l, chunk);
441 ASSERT3U(chunk, <, ZAP_LEAF_NUMCHUNKS(l));
442 ASSERT3U(le->le_type, ==, ZAP_CHUNK_ENTRY);
444 if (HCD_GTEQ(le->le_hash, le->le_cd, h, cd) &&
445 HCD_GTEQ(besth, bestcd, le->le_hash, le->le_cd)) {
446 ASSERT3U(bestlh, >=, lh);
451 zeh->zeh_num_integers = le->le_value_length;
452 zeh->zeh_integer_size = le->le_int_size;
453 zeh->zeh_cd = le->le_cd;
454 zeh->zeh_hash = le->le_hash;
455 zeh->zeh_fakechunk = chunk;
456 zeh->zeh_chunkp = &zeh->zeh_fakechunk;
462 return (bestcd == ZAP_MAXCD ? ENOENT : 0);
466 zap_entry_read(const zap_entry_handle_t *zeh,
467 uint8_t integer_size, uint64_t num_integers, void *buf)
469 struct zap_leaf_entry *le =
470 ZAP_LEAF_ENTRY(zeh->zeh_leaf, *zeh->zeh_chunkp);
471 ASSERT3U(le->le_type, ==, ZAP_CHUNK_ENTRY);
473 if (le->le_int_size > integer_size)
476 zap_leaf_array_read(zeh->zeh_leaf, le->le_value_chunk, le->le_int_size,
477 le->le_value_length, integer_size, num_integers, buf);
479 if (zeh->zeh_num_integers > num_integers)
486 zap_entry_read_name(const zap_entry_handle_t *zeh, uint16_t buflen, char *buf)
488 struct zap_leaf_entry *le =
489 ZAP_LEAF_ENTRY(zeh->zeh_leaf, *zeh->zeh_chunkp);
490 ASSERT3U(le->le_type, ==, ZAP_CHUNK_ENTRY);
492 zap_leaf_array_read(zeh->zeh_leaf, le->le_name_chunk, 1,
493 le->le_name_length, 1, buflen, buf);
494 if (le->le_name_length > buflen)
500 zap_entry_update(zap_entry_handle_t *zeh,
501 uint8_t integer_size, uint64_t num_integers, const void *buf)
504 zap_leaf_t *l = zeh->zeh_leaf;
505 struct zap_leaf_entry *le = ZAP_LEAF_ENTRY(l, *zeh->zeh_chunkp);
507 delta_chunks = ZAP_LEAF_ARRAY_NCHUNKS(num_integers * integer_size) -
508 ZAP_LEAF_ARRAY_NCHUNKS(le->le_value_length * le->le_int_size);
510 if ((int)l->l_phys->l_hdr.lh_nfree < delta_chunks)
514 * We should search other chained leaves (via
515 * zap_entry_remove,create?) otherwise returning EAGAIN will
516 * just send us into an infinite loop if we have to chain
517 * another leaf block, rather than being able to split this
521 zap_leaf_array_free(l, &le->le_value_chunk);
523 zap_leaf_array_create(l, buf, integer_size, num_integers);
524 le->le_value_length = num_integers;
525 le->le_int_size = integer_size;
530 zap_entry_remove(zap_entry_handle_t *zeh)
532 uint16_t entry_chunk;
533 struct zap_leaf_entry *le;
534 zap_leaf_t *l = zeh->zeh_leaf;
536 ASSERT3P(zeh->zeh_chunkp, !=, &zeh->zeh_fakechunk);
538 entry_chunk = *zeh->zeh_chunkp;
539 le = ZAP_LEAF_ENTRY(l, entry_chunk);
540 ASSERT3U(le->le_type, ==, ZAP_CHUNK_ENTRY);
542 zap_leaf_array_free(l, &le->le_name_chunk);
543 zap_leaf_array_free(l, &le->le_value_chunk);
545 *zeh->zeh_chunkp = le->le_next;
546 zap_leaf_chunk_free(l, entry_chunk);
548 l->l_phys->l_hdr.lh_nentries--;
552 zap_entry_create(zap_leaf_t *l, const char *name, uint64_t h, uint32_t cd,
553 uint8_t integer_size, uint64_t num_integers, const void *buf,
554 zap_entry_handle_t *zeh)
558 struct zap_leaf_entry *le;
559 uint64_t namelen, valuelen;
562 valuelen = integer_size * num_integers;
563 namelen = strlen(name) + 1;
564 ASSERT(namelen >= 2);
566 numchunks = 1 + ZAP_LEAF_ARRAY_NCHUNKS(namelen) +
567 ZAP_LEAF_ARRAY_NCHUNKS(valuelen);
568 if (numchunks > ZAP_LEAF_NUMCHUNKS(l))
571 if (cd == ZAP_MAXCD) {
572 /* find the lowest unused cd */
573 if (l->l_phys->l_hdr.lh_flags & ZLF_ENTRIES_CDSORTED) {
576 for (chunk = *LEAF_HASH_ENTPTR(l, h);
577 chunk != CHAIN_END; chunk = le->le_next) {
578 le = ZAP_LEAF_ENTRY(l, chunk);
581 if (le->le_hash == h) {
582 ASSERT3U(cd, ==, le->le_cd);
587 /* old unsorted format; do it the O(n^2) way */
588 for (cd = 0; cd < ZAP_MAXCD; cd++) {
589 for (chunk = *LEAF_HASH_ENTPTR(l, h);
590 chunk != CHAIN_END; chunk = le->le_next) {
591 le = ZAP_LEAF_ENTRY(l, chunk);
592 if (le->le_hash == h &&
597 /* If this cd is not in use, we are good. */
598 if (chunk == CHAIN_END)
603 * we would run out of space in a block before we could
604 * have ZAP_MAXCD entries
606 ASSERT3U(cd, <, ZAP_MAXCD);
609 if (l->l_phys->l_hdr.lh_nfree < numchunks)
613 chunk = zap_leaf_chunk_alloc(l);
614 le = ZAP_LEAF_ENTRY(l, chunk);
615 le->le_type = ZAP_CHUNK_ENTRY;
616 le->le_name_chunk = zap_leaf_array_create(l, name, 1, namelen);
617 le->le_name_length = namelen;
619 zap_leaf_array_create(l, buf, integer_size, num_integers);
620 le->le_value_length = num_integers;
621 le->le_int_size = integer_size;
625 /* link it into the hash chain */
626 /* XXX if we did the search above, we could just use that */
627 chunkp = zap_leaf_rehash_entry(l, chunk);
629 l->l_phys->l_hdr.lh_nentries++;
632 zeh->zeh_num_integers = num_integers;
633 zeh->zeh_integer_size = le->le_int_size;
634 zeh->zeh_cd = le->le_cd;
635 zeh->zeh_hash = le->le_hash;
636 zeh->zeh_chunkp = chunkp;
642 * Determine if there is another entry with the same normalized form.
643 * For performance purposes, either zn or name must be provided (the
644 * other can be NULL). Note, there usually won't be any hash
645 * conflicts, in which case we don't need the concatenated/normalized
646 * form of the name. But all callers have one of these on hand anyway,
647 * so might as well take advantage. A cleaner but slower interface
648 * would accept neither argument, and compute the normalized name as
649 * needed (using zap_name_alloc(zap_entry_read_name(zeh))).
652 zap_entry_normalization_conflict(zap_entry_handle_t *zeh, zap_name_t *zn,
653 const char *name, zap_t *zap)
656 struct zap_leaf_entry *le;
657 boolean_t allocdzn = B_FALSE;
659 if (zap->zap_normflags == 0)
662 for (chunk = *LEAF_HASH_ENTPTR(zeh->zeh_leaf, zeh->zeh_hash);
663 chunk != CHAIN_END; chunk = le->le_next) {
664 le = ZAP_LEAF_ENTRY(zeh->zeh_leaf, chunk);
665 if (le->le_hash != zeh->zeh_hash)
667 if (le->le_cd == zeh->zeh_cd)
671 zn = zap_name_alloc(zap, name, MT_FIRST);
674 if (zap_leaf_array_match(zeh->zeh_leaf, zn,
675 le->le_name_chunk, le->le_name_length)) {
687 * Routines for transferring entries between leafs.
691 zap_leaf_rehash_entry(zap_leaf_t *l, uint16_t entry)
693 struct zap_leaf_entry *le = ZAP_LEAF_ENTRY(l, entry);
694 struct zap_leaf_entry *le2;
698 * keep the entry chain sorted by cd
699 * NB: this will not cause problems for unsorted leafs, though
700 * it is unnecessary there.
702 for (chunkp = LEAF_HASH_ENTPTR(l, le->le_hash);
703 *chunkp != CHAIN_END; chunkp = &le2->le_next) {
704 le2 = ZAP_LEAF_ENTRY(l, *chunkp);
705 if (le2->le_cd > le->le_cd)
709 le->le_next = *chunkp;
715 zap_leaf_transfer_array(zap_leaf_t *l, uint16_t chunk, zap_leaf_t *nl)
718 uint16_t *nchunkp = &new_chunk;
720 while (chunk != CHAIN_END) {
721 uint16_t nchunk = zap_leaf_chunk_alloc(nl);
722 struct zap_leaf_array *nla =
723 &ZAP_LEAF_CHUNK(nl, nchunk).l_array;
724 struct zap_leaf_array *la =
725 &ZAP_LEAF_CHUNK(l, chunk).l_array;
726 int nextchunk = la->la_next;
728 ASSERT3U(chunk, <, ZAP_LEAF_NUMCHUNKS(l));
729 ASSERT3U(nchunk, <, ZAP_LEAF_NUMCHUNKS(l));
731 *nla = *la; /* structure assignment */
733 zap_leaf_chunk_free(l, chunk);
736 nchunkp = &nla->la_next;
738 *nchunkp = CHAIN_END;
743 zap_leaf_transfer_entry(zap_leaf_t *l, int entry, zap_leaf_t *nl)
745 struct zap_leaf_entry *le, *nle;
748 le = ZAP_LEAF_ENTRY(l, entry);
749 ASSERT3U(le->le_type, ==, ZAP_CHUNK_ENTRY);
751 chunk = zap_leaf_chunk_alloc(nl);
752 nle = ZAP_LEAF_ENTRY(nl, chunk);
753 *nle = *le; /* structure assignment */
755 (void) zap_leaf_rehash_entry(nl, chunk);
757 nle->le_name_chunk = zap_leaf_transfer_array(l, le->le_name_chunk, nl);
758 nle->le_value_chunk =
759 zap_leaf_transfer_array(l, le->le_value_chunk, nl);
761 zap_leaf_chunk_free(l, entry);
763 l->l_phys->l_hdr.lh_nentries--;
764 nl->l_phys->l_hdr.lh_nentries++;
768 * Transfer the entries whose hash prefix ends in 1 to the new leaf.
771 zap_leaf_split(zap_leaf_t *l, zap_leaf_t *nl, boolean_t sort)
774 int bit = 64 - 1 - l->l_phys->l_hdr.lh_prefix_len;
776 /* set new prefix and prefix_len */
777 l->l_phys->l_hdr.lh_prefix <<= 1;
778 l->l_phys->l_hdr.lh_prefix_len++;
779 nl->l_phys->l_hdr.lh_prefix = l->l_phys->l_hdr.lh_prefix | 1;
780 nl->l_phys->l_hdr.lh_prefix_len = l->l_phys->l_hdr.lh_prefix_len;
782 /* break existing hash chains */
783 zap_memset(l->l_phys->l_hash, CHAIN_END, 2*ZAP_LEAF_HASH_NUMENTRIES(l));
786 l->l_phys->l_hdr.lh_flags |= ZLF_ENTRIES_CDSORTED;
789 * Transfer entries whose hash bit 'bit' is set to nl; rehash
790 * the remaining entries
792 * NB: We could find entries via the hashtable instead. That
793 * would be O(hashents+numents) rather than O(numblks+numents),
794 * but this accesses memory more sequentially, and when we're
795 * called, the block is usually pretty full.
797 for (i = 0; i < ZAP_LEAF_NUMCHUNKS(l); i++) {
798 struct zap_leaf_entry *le = ZAP_LEAF_ENTRY(l, i);
799 if (le->le_type != ZAP_CHUNK_ENTRY)
802 if (le->le_hash & (1ULL << bit))
803 zap_leaf_transfer_entry(l, i, nl);
805 (void) zap_leaf_rehash_entry(l, i);
810 zap_leaf_stats(zap_t *zap, zap_leaf_t *l, zap_stats_t *zs)
814 n = zap->zap_f.zap_phys->zap_ptrtbl.zt_shift -
815 l->l_phys->l_hdr.lh_prefix_len;
816 n = MIN(n, ZAP_HISTOGRAM_SIZE-1);
817 zs->zs_leafs_with_2n_pointers[n]++;
820 n = l->l_phys->l_hdr.lh_nentries/5;
821 n = MIN(n, ZAP_HISTOGRAM_SIZE-1);
822 zs->zs_blocks_with_n5_entries[n]++;
824 n = ((1<<FZAP_BLOCK_SHIFT(zap)) -
825 l->l_phys->l_hdr.lh_nfree * (ZAP_LEAF_ARRAY_BYTES+1))*10 /
826 (1<<FZAP_BLOCK_SHIFT(zap));
827 n = MIN(n, ZAP_HISTOGRAM_SIZE-1);
828 zs->zs_blocks_n_tenths_full[n]++;
830 for (i = 0; i < ZAP_LEAF_HASH_NUMENTRIES(l); i++) {
832 int chunk = l->l_phys->l_hash[i];
834 while (chunk != CHAIN_END) {
835 struct zap_leaf_entry *le =
836 ZAP_LEAF_ENTRY(l, chunk);
838 n = 1 + ZAP_LEAF_ARRAY_NCHUNKS(le->le_name_length) +
839 ZAP_LEAF_ARRAY_NCHUNKS(le->le_value_length *
841 n = MIN(n, ZAP_HISTOGRAM_SIZE-1);
842 zs->zs_entries_using_n_chunks[n]++;
849 n = MIN(n, ZAP_HISTOGRAM_SIZE-1);
850 zs->zs_buckets_with_n_entries[n]++;