4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
25 #include <sys/zfs_context.h>
27 #include <sys/dmu_impl.h>
29 #include <sys/dmu_objset.h>
30 #include <sys/dsl_dataset.h>
31 #include <sys/dsl_dir.h>
32 #include <sys/dmu_tx.h>
35 #include <sys/dmu_zfetch.h>
37 #include <sys/sa_impl.h>
39 static void dbuf_destroy(dmu_buf_impl_t *db);
40 static int dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx);
41 static void dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx);
44 * Global data structures and functions for the dbuf cache.
46 static kmem_cache_t *dbuf_cache;
50 dbuf_cons(void *vdb, void *unused, int kmflag)
52 dmu_buf_impl_t *db = vdb;
53 bzero(db, sizeof (dmu_buf_impl_t));
55 mutex_init(&db->db_mtx, NULL, MUTEX_DEFAULT, NULL);
56 cv_init(&db->db_changed, NULL, CV_DEFAULT, NULL);
57 refcount_create(&db->db_holds);
58 list_link_init(&db->db_link);
64 dbuf_dest(void *vdb, void *unused)
66 dmu_buf_impl_t *db = vdb;
67 mutex_destroy(&db->db_mtx);
68 cv_destroy(&db->db_changed);
69 refcount_destroy(&db->db_holds);
73 * dbuf hash table routines
75 static dbuf_hash_table_t dbuf_hash_table;
77 static uint64_t dbuf_hash_count;
80 dbuf_hash(void *os, uint64_t obj, uint8_t lvl, uint64_t blkid)
82 uintptr_t osv = (uintptr_t)os;
85 ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY);
86 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (lvl)) & 0xFF];
87 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (osv >> 6)) & 0xFF];
88 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 0)) & 0xFF];
89 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 8)) & 0xFF];
90 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (blkid >> 0)) & 0xFF];
91 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (blkid >> 8)) & 0xFF];
93 crc ^= (osv>>14) ^ (obj>>16) ^ (blkid>>16);
98 #define DBUF_HASH(os, obj, level, blkid) dbuf_hash(os, obj, level, blkid);
100 #define DBUF_EQUAL(dbuf, os, obj, level, blkid) \
101 ((dbuf)->db.db_object == (obj) && \
102 (dbuf)->db_objset == (os) && \
103 (dbuf)->db_level == (level) && \
104 (dbuf)->db_blkid == (blkid))
107 dbuf_find(dnode_t *dn, uint8_t level, uint64_t blkid)
109 dbuf_hash_table_t *h = &dbuf_hash_table;
110 objset_t *os = dn->dn_objset;
117 hv = DBUF_HASH(os, obj, level, blkid);
118 idx = hv & h->hash_table_mask;
120 mutex_enter(DBUF_HASH_MUTEX(h, idx));
121 for (db = h->hash_table[idx]; db != NULL; db = db->db_hash_next) {
122 if (DBUF_EQUAL(db, os, obj, level, blkid)) {
123 mutex_enter(&db->db_mtx);
124 if (db->db_state != DB_EVICTING) {
125 mutex_exit(DBUF_HASH_MUTEX(h, idx));
128 mutex_exit(&db->db_mtx);
131 mutex_exit(DBUF_HASH_MUTEX(h, idx));
136 * Insert an entry into the hash table. If there is already an element
137 * equal to elem in the hash table, then the already existing element
138 * will be returned and the new element will not be inserted.
139 * Otherwise returns NULL.
141 static dmu_buf_impl_t *
142 dbuf_hash_insert(dmu_buf_impl_t *db)
144 dbuf_hash_table_t *h = &dbuf_hash_table;
145 objset_t *os = db->db_objset;
146 uint64_t obj = db->db.db_object;
147 int level = db->db_level;
148 uint64_t blkid, hv, idx;
151 blkid = db->db_blkid;
152 hv = DBUF_HASH(os, obj, level, blkid);
153 idx = hv & h->hash_table_mask;
155 mutex_enter(DBUF_HASH_MUTEX(h, idx));
156 for (dbf = h->hash_table[idx]; dbf != NULL; dbf = dbf->db_hash_next) {
157 if (DBUF_EQUAL(dbf, os, obj, level, blkid)) {
158 mutex_enter(&dbf->db_mtx);
159 if (dbf->db_state != DB_EVICTING) {
160 mutex_exit(DBUF_HASH_MUTEX(h, idx));
163 mutex_exit(&dbf->db_mtx);
167 mutex_enter(&db->db_mtx);
168 db->db_hash_next = h->hash_table[idx];
169 h->hash_table[idx] = db;
170 mutex_exit(DBUF_HASH_MUTEX(h, idx));
171 atomic_add_64(&dbuf_hash_count, 1);
177 * Remove an entry from the hash table. This operation will
178 * fail if there are any existing holds on the db.
181 dbuf_hash_remove(dmu_buf_impl_t *db)
183 dbuf_hash_table_t *h = &dbuf_hash_table;
185 dmu_buf_impl_t *dbf, **dbp;
187 hv = DBUF_HASH(db->db_objset, db->db.db_object,
188 db->db_level, db->db_blkid);
189 idx = hv & h->hash_table_mask;
192 * We musn't hold db_mtx to maintin lock ordering:
193 * DBUF_HASH_MUTEX > db_mtx.
195 ASSERT(refcount_is_zero(&db->db_holds));
196 ASSERT(db->db_state == DB_EVICTING);
197 ASSERT(!MUTEX_HELD(&db->db_mtx));
199 mutex_enter(DBUF_HASH_MUTEX(h, idx));
200 dbp = &h->hash_table[idx];
201 while ((dbf = *dbp) != db) {
202 dbp = &dbf->db_hash_next;
205 *dbp = db->db_hash_next;
206 db->db_hash_next = NULL;
207 mutex_exit(DBUF_HASH_MUTEX(h, idx));
208 atomic_add_64(&dbuf_hash_count, -1);
211 static arc_evict_func_t dbuf_do_evict;
214 dbuf_evict_user(dmu_buf_impl_t *db)
216 ASSERT(MUTEX_HELD(&db->db_mtx));
218 if (db->db_level != 0 || db->db_evict_func == NULL)
221 if (db->db_user_data_ptr_ptr)
222 *db->db_user_data_ptr_ptr = db->db.db_data;
223 db->db_evict_func(&db->db, db->db_user_ptr);
224 db->db_user_ptr = NULL;
225 db->db_user_data_ptr_ptr = NULL;
226 db->db_evict_func = NULL;
230 dbuf_is_metadata(dmu_buf_impl_t *db)
232 if (db->db_level > 0) {
235 boolean_t is_metadata;
238 is_metadata = dmu_ot[DB_DNODE(db)->dn_type].ot_metadata;
241 return (is_metadata);
246 dbuf_evict(dmu_buf_impl_t *db)
248 ASSERT(MUTEX_HELD(&db->db_mtx));
249 ASSERT(db->db_buf == NULL);
250 ASSERT(db->db_data_pending == NULL);
259 uint64_t hsize = 1ULL << 16;
260 dbuf_hash_table_t *h = &dbuf_hash_table;
264 * The hash table is big enough to fill all of physical memory
265 * with an average 4K block size. The table will take up
266 * totalmem*sizeof(void*)/4K (i.e. 2MB/GB with 8-byte pointers).
268 while (hsize * 4096 < physmem * PAGESIZE)
272 h->hash_table_mask = hsize - 1;
273 h->hash_table = kmem_zalloc(hsize * sizeof (void *), KM_NOSLEEP);
274 if (h->hash_table == NULL) {
275 /* XXX - we should really return an error instead of assert */
276 ASSERT(hsize > (1ULL << 10));
281 dbuf_cache = kmem_cache_create("dmu_buf_impl_t",
282 sizeof (dmu_buf_impl_t),
283 0, dbuf_cons, dbuf_dest, NULL, NULL, NULL, 0);
285 for (i = 0; i < DBUF_MUTEXES; i++)
286 mutex_init(&h->hash_mutexes[i], NULL, MUTEX_DEFAULT, NULL);
292 dbuf_hash_table_t *h = &dbuf_hash_table;
295 for (i = 0; i < DBUF_MUTEXES; i++)
296 mutex_destroy(&h->hash_mutexes[i]);
297 kmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *));
298 kmem_cache_destroy(dbuf_cache);
307 dbuf_verify(dmu_buf_impl_t *db)
310 dbuf_dirty_record_t *dr;
312 ASSERT(MUTEX_HELD(&db->db_mtx));
314 if (!(zfs_flags & ZFS_DEBUG_DBUF_VERIFY))
317 ASSERT(db->db_objset != NULL);
321 ASSERT(db->db_parent == NULL);
322 ASSERT(db->db_blkptr == NULL);
324 ASSERT3U(db->db.db_object, ==, dn->dn_object);
325 ASSERT3P(db->db_objset, ==, dn->dn_objset);
326 ASSERT3U(db->db_level, <, dn->dn_nlevels);
327 ASSERT(db->db_blkid == DMU_BONUS_BLKID ||
328 db->db_blkid == DMU_SPILL_BLKID ||
329 !list_is_empty(&dn->dn_dbufs));
331 if (db->db_blkid == DMU_BONUS_BLKID) {
333 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
334 ASSERT3U(db->db.db_offset, ==, DMU_BONUS_BLKID);
335 } else if (db->db_blkid == DMU_SPILL_BLKID) {
337 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
338 ASSERT3U(db->db.db_offset, ==, 0);
340 ASSERT3U(db->db.db_offset, ==, db->db_blkid * db->db.db_size);
343 for (dr = db->db_data_pending; dr != NULL; dr = dr->dr_next)
344 ASSERT(dr->dr_dbuf == db);
346 for (dr = db->db_last_dirty; dr != NULL; dr = dr->dr_next)
347 ASSERT(dr->dr_dbuf == db);
350 * We can't assert that db_size matches dn_datablksz because it
351 * can be momentarily different when another thread is doing
354 if (db->db_level == 0 && db->db.db_object == DMU_META_DNODE_OBJECT) {
355 dr = db->db_data_pending;
357 * It should only be modified in syncing context, so
358 * make sure we only have one copy of the data.
360 ASSERT(dr == NULL || dr->dt.dl.dr_data == db->db_buf);
363 /* verify db->db_blkptr */
365 if (db->db_parent == dn->dn_dbuf) {
366 /* db is pointed to by the dnode */
367 /* ASSERT3U(db->db_blkid, <, dn->dn_nblkptr); */
368 if (DMU_OBJECT_IS_SPECIAL(db->db.db_object))
369 ASSERT(db->db_parent == NULL);
371 ASSERT(db->db_parent != NULL);
372 if (db->db_blkid != DMU_SPILL_BLKID)
373 ASSERT3P(db->db_blkptr, ==,
374 &dn->dn_phys->dn_blkptr[db->db_blkid]);
376 /* db is pointed to by an indirect block */
377 ASSERTV(int epb = db->db_parent->db.db_size >>
379 ASSERT3U(db->db_parent->db_level, ==, db->db_level+1);
380 ASSERT3U(db->db_parent->db.db_object, ==,
383 * dnode_grow_indblksz() can make this fail if we don't
384 * have the struct_rwlock. XXX indblksz no longer
385 * grows. safe to do this now?
387 if (RW_WRITE_HELD(&dn->dn_struct_rwlock)) {
388 ASSERT3P(db->db_blkptr, ==,
389 ((blkptr_t *)db->db_parent->db.db_data +
390 db->db_blkid % epb));
394 if ((db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr)) &&
395 (db->db_buf == NULL || db->db_buf->b_data) &&
396 db->db.db_data && db->db_blkid != DMU_BONUS_BLKID &&
397 db->db_state != DB_FILL && !dn->dn_free_txg) {
399 * If the blkptr isn't set but they have nonzero data,
400 * it had better be dirty, otherwise we'll lose that
401 * data when we evict this buffer.
403 if (db->db_dirtycnt == 0) {
404 ASSERTV(uint64_t *buf = db->db.db_data);
407 for (i = 0; i < db->db.db_size >> 3; i++) {
417 dbuf_update_data(dmu_buf_impl_t *db)
419 ASSERT(MUTEX_HELD(&db->db_mtx));
420 if (db->db_level == 0 && db->db_user_data_ptr_ptr) {
421 ASSERT(!refcount_is_zero(&db->db_holds));
422 *db->db_user_data_ptr_ptr = db->db.db_data;
427 dbuf_set_data(dmu_buf_impl_t *db, arc_buf_t *buf)
429 ASSERT(MUTEX_HELD(&db->db_mtx));
430 ASSERT(db->db_buf == NULL || !arc_has_callback(db->db_buf));
433 ASSERT(buf->b_data != NULL);
434 db->db.db_data = buf->b_data;
435 if (!arc_released(buf))
436 arc_set_callback(buf, dbuf_do_evict, db);
437 dbuf_update_data(db);
440 db->db.db_data = NULL;
441 if (db->db_state != DB_NOFILL)
442 db->db_state = DB_UNCACHED;
447 * Loan out an arc_buf for read. Return the loaned arc_buf.
450 dbuf_loan_arcbuf(dmu_buf_impl_t *db)
454 mutex_enter(&db->db_mtx);
455 if (arc_released(db->db_buf) || refcount_count(&db->db_holds) > 1) {
456 int blksz = db->db.db_size;
459 mutex_exit(&db->db_mtx);
460 DB_GET_SPA(&spa, db);
461 abuf = arc_loan_buf(spa, blksz);
462 bcopy(db->db.db_data, abuf->b_data, blksz);
465 arc_loan_inuse_buf(abuf, db);
466 dbuf_set_data(db, NULL);
467 mutex_exit(&db->db_mtx);
473 dbuf_whichblock(dnode_t *dn, uint64_t offset)
475 if (dn->dn_datablkshift) {
476 return (offset >> dn->dn_datablkshift);
478 ASSERT3U(offset, <, dn->dn_datablksz);
484 dbuf_read_done(zio_t *zio, arc_buf_t *buf, void *vdb)
486 dmu_buf_impl_t *db = vdb;
488 mutex_enter(&db->db_mtx);
489 ASSERT3U(db->db_state, ==, DB_READ);
491 * All reads are synchronous, so we must have a hold on the dbuf
493 ASSERT(refcount_count(&db->db_holds) > 0);
494 ASSERT(db->db_buf == NULL);
495 ASSERT(db->db.db_data == NULL);
496 if (db->db_level == 0 && db->db_freed_in_flight) {
497 /* we were freed in flight; disregard any error */
498 arc_release(buf, db);
499 bzero(buf->b_data, db->db.db_size);
501 db->db_freed_in_flight = FALSE;
502 dbuf_set_data(db, buf);
503 db->db_state = DB_CACHED;
504 } else if (zio == NULL || zio->io_error == 0) {
505 dbuf_set_data(db, buf);
506 db->db_state = DB_CACHED;
508 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
509 ASSERT3P(db->db_buf, ==, NULL);
510 VERIFY(arc_buf_remove_ref(buf, db) == 1);
511 db->db_state = DB_UNCACHED;
513 cv_broadcast(&db->db_changed);
514 dbuf_rele_and_unlock(db, NULL);
518 dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t *flags)
523 uint32_t aflags = ARC_NOWAIT;
528 ASSERT(!refcount_is_zero(&db->db_holds));
529 /* We need the struct_rwlock to prevent db_blkptr from changing. */
530 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
531 ASSERT(MUTEX_HELD(&db->db_mtx));
532 ASSERT(db->db_state == DB_UNCACHED);
533 ASSERT(db->db_buf == NULL);
535 if (db->db_blkid == DMU_BONUS_BLKID) {
536 int bonuslen = MIN(dn->dn_bonuslen, dn->dn_phys->dn_bonuslen);
538 ASSERT3U(bonuslen, <=, db->db.db_size);
539 db->db.db_data = zio_buf_alloc(DN_MAX_BONUSLEN);
540 arc_space_consume(DN_MAX_BONUSLEN, ARC_SPACE_OTHER);
541 if (bonuslen < DN_MAX_BONUSLEN)
542 bzero(db->db.db_data, DN_MAX_BONUSLEN);
544 bcopy(DN_BONUS(dn->dn_phys), db->db.db_data, bonuslen);
546 dbuf_update_data(db);
547 db->db_state = DB_CACHED;
548 mutex_exit(&db->db_mtx);
553 * Recheck BP_IS_HOLE() after dnode_block_freed() in case dnode_sync()
554 * processes the delete record and clears the bp while we are waiting
555 * for the dn_mtx (resulting in a "no" from block_freed).
557 if (db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr) ||
558 (db->db_level == 0 && (dnode_block_freed(dn, db->db_blkid) ||
559 BP_IS_HOLE(db->db_blkptr)))) {
560 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
562 dbuf_set_data(db, arc_buf_alloc(dn->dn_objset->os_spa,
563 db->db.db_size, db, type));
565 bzero(db->db.db_data, db->db.db_size);
566 db->db_state = DB_CACHED;
567 *flags |= DB_RF_CACHED;
568 mutex_exit(&db->db_mtx);
572 spa = dn->dn_objset->os_spa;
575 db->db_state = DB_READ;
576 mutex_exit(&db->db_mtx);
578 if (DBUF_IS_L2CACHEABLE(db))
579 aflags |= ARC_L2CACHE;
581 SET_BOOKMARK(&zb, db->db_objset->os_dsl_dataset ?
582 db->db_objset->os_dsl_dataset->ds_object : DMU_META_OBJSET,
583 db->db.db_object, db->db_level, db->db_blkid);
585 dbuf_add_ref(db, NULL);
586 /* ZIO_FLAG_CANFAIL callers have to check the parent zio's error */
589 pbuf = db->db_parent->db_buf;
591 pbuf = db->db_objset->os_phys_buf;
593 (void) dsl_read(zio, spa, db->db_blkptr, pbuf,
594 dbuf_read_done, db, ZIO_PRIORITY_SYNC_READ,
595 (*flags & DB_RF_CANFAIL) ? ZIO_FLAG_CANFAIL : ZIO_FLAG_MUSTSUCCEED,
597 if (aflags & ARC_CACHED)
598 *flags |= DB_RF_CACHED;
602 dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags)
605 int havepzio = (zio != NULL);
610 * We don't have to hold the mutex to check db_state because it
611 * can't be freed while we have a hold on the buffer.
613 ASSERT(!refcount_is_zero(&db->db_holds));
615 if (db->db_state == DB_NOFILL)
620 if ((flags & DB_RF_HAVESTRUCT) == 0)
621 rw_enter(&dn->dn_struct_rwlock, RW_READER);
623 prefetch = db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
624 (flags & DB_RF_NOPREFETCH) == 0 && dn != NULL &&
625 DBUF_IS_CACHEABLE(db);
627 mutex_enter(&db->db_mtx);
628 if (db->db_state == DB_CACHED) {
629 mutex_exit(&db->db_mtx);
631 dmu_zfetch(&dn->dn_zfetch, db->db.db_offset,
632 db->db.db_size, TRUE);
633 if ((flags & DB_RF_HAVESTRUCT) == 0)
634 rw_exit(&dn->dn_struct_rwlock);
636 } else if (db->db_state == DB_UNCACHED) {
637 spa_t *spa = dn->dn_objset->os_spa;
640 zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
641 dbuf_read_impl(db, zio, &flags);
643 /* dbuf_read_impl has dropped db_mtx for us */
646 dmu_zfetch(&dn->dn_zfetch, db->db.db_offset,
647 db->db.db_size, flags & DB_RF_CACHED);
649 if ((flags & DB_RF_HAVESTRUCT) == 0)
650 rw_exit(&dn->dn_struct_rwlock);
656 mutex_exit(&db->db_mtx);
658 dmu_zfetch(&dn->dn_zfetch, db->db.db_offset,
659 db->db.db_size, TRUE);
660 if ((flags & DB_RF_HAVESTRUCT) == 0)
661 rw_exit(&dn->dn_struct_rwlock);
664 mutex_enter(&db->db_mtx);
665 if ((flags & DB_RF_NEVERWAIT) == 0) {
666 while (db->db_state == DB_READ ||
667 db->db_state == DB_FILL) {
668 ASSERT(db->db_state == DB_READ ||
669 (flags & DB_RF_HAVESTRUCT) == 0);
670 cv_wait(&db->db_changed, &db->db_mtx);
672 if (db->db_state == DB_UNCACHED)
675 mutex_exit(&db->db_mtx);
678 ASSERT(err || havepzio || db->db_state == DB_CACHED);
683 dbuf_noread(dmu_buf_impl_t *db)
685 ASSERT(!refcount_is_zero(&db->db_holds));
686 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
687 mutex_enter(&db->db_mtx);
688 while (db->db_state == DB_READ || db->db_state == DB_FILL)
689 cv_wait(&db->db_changed, &db->db_mtx);
690 if (db->db_state == DB_UNCACHED) {
691 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
694 ASSERT(db->db_buf == NULL);
695 ASSERT(db->db.db_data == NULL);
696 DB_GET_SPA(&spa, db);
697 dbuf_set_data(db, arc_buf_alloc(spa, db->db.db_size, db, type));
698 db->db_state = DB_FILL;
699 } else if (db->db_state == DB_NOFILL) {
700 dbuf_set_data(db, NULL);
702 ASSERT3U(db->db_state, ==, DB_CACHED);
704 mutex_exit(&db->db_mtx);
708 * This is our just-in-time copy function. It makes a copy of
709 * buffers, that have been modified in a previous transaction
710 * group, before we modify them in the current active group.
712 * This function is used in two places: when we are dirtying a
713 * buffer for the first time in a txg, and when we are freeing
714 * a range in a dnode that includes this buffer.
716 * Note that when we are called from dbuf_free_range() we do
717 * not put a hold on the buffer, we just traverse the active
718 * dbuf list for the dnode.
721 dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg)
723 dbuf_dirty_record_t *dr = db->db_last_dirty;
725 ASSERT(MUTEX_HELD(&db->db_mtx));
726 ASSERT(db->db.db_data != NULL);
727 ASSERT(db->db_level == 0);
728 ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT);
731 (dr->dt.dl.dr_data !=
732 ((db->db_blkid == DMU_BONUS_BLKID) ? db->db.db_data : db->db_buf)))
736 * If the last dirty record for this dbuf has not yet synced
737 * and its referencing the dbuf data, either:
738 * reset the reference to point to a new copy,
739 * or (if there a no active holders)
740 * just null out the current db_data pointer.
742 ASSERT(dr->dr_txg >= txg - 2);
743 if (db->db_blkid == DMU_BONUS_BLKID) {
744 /* Note that the data bufs here are zio_bufs */
745 dr->dt.dl.dr_data = zio_buf_alloc(DN_MAX_BONUSLEN);
746 arc_space_consume(DN_MAX_BONUSLEN, ARC_SPACE_OTHER);
747 bcopy(db->db.db_data, dr->dt.dl.dr_data, DN_MAX_BONUSLEN);
748 } else if (refcount_count(&db->db_holds) > db->db_dirtycnt) {
749 int size = db->db.db_size;
750 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
753 DB_GET_SPA(&spa, db);
754 dr->dt.dl.dr_data = arc_buf_alloc(spa, size, db, type);
755 bcopy(db->db.db_data, dr->dt.dl.dr_data->b_data, size);
757 dbuf_set_data(db, NULL);
762 dbuf_unoverride(dbuf_dirty_record_t *dr)
764 dmu_buf_impl_t *db = dr->dr_dbuf;
765 blkptr_t *bp = &dr->dt.dl.dr_overridden_by;
766 uint64_t txg = dr->dr_txg;
768 ASSERT(MUTEX_HELD(&db->db_mtx));
769 ASSERT(dr->dt.dl.dr_override_state != DR_IN_DMU_SYNC);
770 ASSERT(db->db_level == 0);
772 if (db->db_blkid == DMU_BONUS_BLKID ||
773 dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN)
776 ASSERT(db->db_data_pending != dr);
778 /* free this block */
779 if (!BP_IS_HOLE(bp)) {
782 DB_GET_SPA(&spa, db);
783 zio_free(spa, txg, bp);
785 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
787 * Release the already-written buffer, so we leave it in
788 * a consistent dirty state. Note that all callers are
789 * modifying the buffer, so they will immediately do
790 * another (redundant) arc_release(). Therefore, leave
791 * the buf thawed to save the effort of freezing &
792 * immediately re-thawing it.
794 arc_release(dr->dt.dl.dr_data, db);
798 * Evict (if its unreferenced) or clear (if its referenced) any level-0
799 * data blocks in the free range, so that any future readers will find
800 * empty blocks. Also, if we happen accross any level-1 dbufs in the
801 * range that have not already been marked dirty, mark them dirty so
802 * they stay in memory.
805 dbuf_free_range(dnode_t *dn, uint64_t start, uint64_t end, dmu_tx_t *tx)
807 dmu_buf_impl_t *db, *db_next;
808 uint64_t txg = tx->tx_txg;
809 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
810 uint64_t first_l1 = start >> epbs;
811 uint64_t last_l1 = end >> epbs;
813 if (end > dn->dn_maxblkid && (end != DMU_SPILL_BLKID)) {
814 end = dn->dn_maxblkid;
815 last_l1 = end >> epbs;
817 dprintf_dnode(dn, "start=%llu end=%llu\n", start, end);
818 mutex_enter(&dn->dn_dbufs_mtx);
819 for (db = list_head(&dn->dn_dbufs); db; db = db_next) {
820 db_next = list_next(&dn->dn_dbufs, db);
821 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
823 if (db->db_level == 1 &&
824 db->db_blkid >= first_l1 && db->db_blkid <= last_l1) {
825 mutex_enter(&db->db_mtx);
826 if (db->db_last_dirty &&
827 db->db_last_dirty->dr_txg < txg) {
828 dbuf_add_ref(db, FTAG);
829 mutex_exit(&db->db_mtx);
830 dbuf_will_dirty(db, tx);
833 mutex_exit(&db->db_mtx);
837 if (db->db_level != 0)
839 dprintf_dbuf(db, "found buf %s\n", "");
840 if (db->db_blkid < start || db->db_blkid > end)
843 /* found a level 0 buffer in the range */
844 if (dbuf_undirty(db, tx))
847 mutex_enter(&db->db_mtx);
848 if (db->db_state == DB_UNCACHED ||
849 db->db_state == DB_NOFILL ||
850 db->db_state == DB_EVICTING) {
851 ASSERT(db->db.db_data == NULL);
852 mutex_exit(&db->db_mtx);
855 if (db->db_state == DB_READ || db->db_state == DB_FILL) {
856 /* will be handled in dbuf_read_done or dbuf_rele */
857 db->db_freed_in_flight = TRUE;
858 mutex_exit(&db->db_mtx);
861 if (refcount_count(&db->db_holds) == 0) {
866 /* The dbuf is referenced */
868 if (db->db_last_dirty != NULL) {
869 dbuf_dirty_record_t *dr = db->db_last_dirty;
871 if (dr->dr_txg == txg) {
873 * This buffer is "in-use", re-adjust the file
874 * size to reflect that this buffer may
875 * contain new data when we sync.
877 if (db->db_blkid != DMU_SPILL_BLKID &&
878 db->db_blkid > dn->dn_maxblkid)
879 dn->dn_maxblkid = db->db_blkid;
883 * This dbuf is not dirty in the open context.
884 * Either uncache it (if its not referenced in
885 * the open context) or reset its contents to
888 dbuf_fix_old_data(db, txg);
891 /* clear the contents if its cached */
892 if (db->db_state == DB_CACHED) {
893 ASSERT(db->db.db_data != NULL);
894 arc_release(db->db_buf, db);
895 bzero(db->db.db_data, db->db.db_size);
896 arc_buf_freeze(db->db_buf);
899 mutex_exit(&db->db_mtx);
901 mutex_exit(&dn->dn_dbufs_mtx);
905 dbuf_block_freeable(dmu_buf_impl_t *db)
907 dsl_dataset_t *ds = db->db_objset->os_dsl_dataset;
908 uint64_t birth_txg = 0;
911 * We don't need any locking to protect db_blkptr:
912 * If it's syncing, then db_last_dirty will be set
913 * so we'll ignore db_blkptr.
915 ASSERT(MUTEX_HELD(&db->db_mtx));
916 if (db->db_last_dirty)
917 birth_txg = db->db_last_dirty->dr_txg;
918 else if (db->db_blkptr)
919 birth_txg = db->db_blkptr->blk_birth;
922 * If we don't exist or are in a snapshot, we can't be freed.
923 * Don't pass the bp to dsl_dataset_block_freeable() since we
924 * are holding the db_mtx lock and might deadlock if we are
925 * prefetching a dedup-ed block.
928 return (ds == NULL ||
929 dsl_dataset_block_freeable(ds, NULL, birth_txg));
935 dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx)
937 arc_buf_t *buf, *obuf;
938 int osize = db->db.db_size;
939 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
942 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
947 /* XXX does *this* func really need the lock? */
948 ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
951 * This call to dbuf_will_dirty() with the dn_struct_rwlock held
952 * is OK, because there can be no other references to the db
953 * when we are changing its size, so no concurrent DB_FILL can
957 * XXX we should be doing a dbuf_read, checking the return
958 * value and returning that up to our callers
960 dbuf_will_dirty(db, tx);
962 /* create the data buffer for the new block */
963 buf = arc_buf_alloc(dn->dn_objset->os_spa, size, db, type);
965 /* copy old block data to the new block */
967 bcopy(obuf->b_data, buf->b_data, MIN(osize, size));
968 /* zero the remainder */
970 bzero((uint8_t *)buf->b_data + osize, size - osize);
972 mutex_enter(&db->db_mtx);
973 dbuf_set_data(db, buf);
974 VERIFY(arc_buf_remove_ref(obuf, db) == 1);
975 db->db.db_size = size;
977 if (db->db_level == 0) {
978 ASSERT3U(db->db_last_dirty->dr_txg, ==, tx->tx_txg);
979 db->db_last_dirty->dt.dl.dr_data = buf;
981 mutex_exit(&db->db_mtx);
983 dnode_willuse_space(dn, size-osize, tx);
988 dbuf_release_bp(dmu_buf_impl_t *db)
993 DB_GET_OBJSET(&os, db);
994 ASSERT(dsl_pool_sync_context(dmu_objset_pool(os)));
995 ASSERT(arc_released(os->os_phys_buf) ||
996 list_link_active(&os->os_dsl_dataset->ds_synced_link));
997 ASSERT(db->db_parent == NULL || arc_released(db->db_parent->db_buf));
999 zb.zb_objset = os->os_dsl_dataset ?
1000 os->os_dsl_dataset->ds_object : 0;
1001 zb.zb_object = db->db.db_object;
1002 zb.zb_level = db->db_level;
1003 zb.zb_blkid = db->db_blkid;
1004 (void) arc_release_bp(db->db_buf, db,
1005 db->db_blkptr, os->os_spa, &zb);
1008 dbuf_dirty_record_t *
1009 dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
1013 dbuf_dirty_record_t **drp, *dr;
1014 int drop_struct_lock = FALSE;
1015 boolean_t do_free_accounting = B_FALSE;
1016 int txgoff = tx->tx_txg & TXG_MASK;
1018 ASSERT(tx->tx_txg != 0);
1019 ASSERT(!refcount_is_zero(&db->db_holds));
1020 DMU_TX_DIRTY_BUF(tx, db);
1025 * Shouldn't dirty a regular buffer in syncing context. Private
1026 * objects may be dirtied in syncing context, but only if they
1027 * were already pre-dirtied in open context.
1029 ASSERT(!dmu_tx_is_syncing(tx) ||
1030 BP_IS_HOLE(dn->dn_objset->os_rootbp) ||
1031 DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
1032 dn->dn_objset->os_dsl_dataset == NULL);
1034 * We make this assert for private objects as well, but after we
1035 * check if we're already dirty. They are allowed to re-dirty
1036 * in syncing context.
1038 ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
1039 dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
1040 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
1042 mutex_enter(&db->db_mtx);
1044 * XXX make this true for indirects too? The problem is that
1045 * transactions created with dmu_tx_create_assigned() from
1046 * syncing context don't bother holding ahead.
1048 ASSERT(db->db_level != 0 ||
1049 db->db_state == DB_CACHED || db->db_state == DB_FILL ||
1050 db->db_state == DB_NOFILL);
1052 mutex_enter(&dn->dn_mtx);
1054 * Don't set dirtyctx to SYNC if we're just modifying this as we
1055 * initialize the objset.
1057 if (dn->dn_dirtyctx == DN_UNDIRTIED &&
1058 !BP_IS_HOLE(dn->dn_objset->os_rootbp)) {
1060 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN);
1061 ASSERT(dn->dn_dirtyctx_firstset == NULL);
1062 dn->dn_dirtyctx_firstset = kmem_alloc(1, KM_SLEEP);
1064 mutex_exit(&dn->dn_mtx);
1066 if (db->db_blkid == DMU_SPILL_BLKID)
1067 dn->dn_have_spill = B_TRUE;
1070 * If this buffer is already dirty, we're done.
1072 drp = &db->db_last_dirty;
1073 ASSERT(*drp == NULL || (*drp)->dr_txg <= tx->tx_txg ||
1074 db->db.db_object == DMU_META_DNODE_OBJECT);
1075 while ((dr = *drp) != NULL && dr->dr_txg > tx->tx_txg)
1077 if (dr && dr->dr_txg == tx->tx_txg) {
1080 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID) {
1082 * If this buffer has already been written out,
1083 * we now need to reset its state.
1085 dbuf_unoverride(dr);
1086 if (db->db.db_object != DMU_META_DNODE_OBJECT &&
1087 db->db_state != DB_NOFILL)
1088 arc_buf_thaw(db->db_buf);
1090 mutex_exit(&db->db_mtx);
1095 * Only valid if not already dirty.
1097 ASSERT(dn->dn_object == 0 ||
1098 dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
1099 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
1101 ASSERT3U(dn->dn_nlevels, >, db->db_level);
1102 ASSERT((dn->dn_phys->dn_nlevels == 0 && db->db_level == 0) ||
1103 dn->dn_phys->dn_nlevels > db->db_level ||
1104 dn->dn_next_nlevels[txgoff] > db->db_level ||
1105 dn->dn_next_nlevels[(tx->tx_txg-1) & TXG_MASK] > db->db_level ||
1106 dn->dn_next_nlevels[(tx->tx_txg-2) & TXG_MASK] > db->db_level);
1109 * We should only be dirtying in syncing context if it's the
1110 * mos or we're initializing the os or it's a special object.
1111 * However, we are allowed to dirty in syncing context provided
1112 * we already dirtied it in open context. Hence we must make
1113 * this assertion only if we're not already dirty.
1116 ASSERT(!dmu_tx_is_syncing(tx) || DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
1117 os->os_dsl_dataset == NULL || BP_IS_HOLE(os->os_rootbp));
1118 ASSERT(db->db.db_size != 0);
1120 dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
1122 if (db->db_blkid != DMU_BONUS_BLKID) {
1124 * Update the accounting.
1125 * Note: we delay "free accounting" until after we drop
1126 * the db_mtx. This keeps us from grabbing other locks
1127 * (and possibly deadlocking) in bp_get_dsize() while
1128 * also holding the db_mtx.
1130 dnode_willuse_space(dn, db->db.db_size, tx);
1131 do_free_accounting = dbuf_block_freeable(db);
1135 * If this buffer is dirty in an old transaction group we need
1136 * to make a copy of it so that the changes we make in this
1137 * transaction group won't leak out when we sync the older txg.
1139 dr = kmem_zalloc(sizeof (dbuf_dirty_record_t), KM_SLEEP);
1140 list_link_init(&dr->dr_dirty_node);
1141 if (db->db_level == 0) {
1142 void *data_old = db->db_buf;
1144 if (db->db_state != DB_NOFILL) {
1145 if (db->db_blkid == DMU_BONUS_BLKID) {
1146 dbuf_fix_old_data(db, tx->tx_txg);
1147 data_old = db->db.db_data;
1148 } else if (db->db.db_object != DMU_META_DNODE_OBJECT) {
1150 * Release the data buffer from the cache so
1151 * that we can modify it without impacting
1152 * possible other users of this cached data
1153 * block. Note that indirect blocks and
1154 * private objects are not released until the
1155 * syncing state (since they are only modified
1158 arc_release(db->db_buf, db);
1159 dbuf_fix_old_data(db, tx->tx_txg);
1160 data_old = db->db_buf;
1162 ASSERT(data_old != NULL);
1164 dr->dt.dl.dr_data = data_old;
1166 mutex_init(&dr->dt.di.dr_mtx, NULL, MUTEX_DEFAULT, NULL);
1167 list_create(&dr->dt.di.dr_children,
1168 sizeof (dbuf_dirty_record_t),
1169 offsetof(dbuf_dirty_record_t, dr_dirty_node));
1172 dr->dr_txg = tx->tx_txg;
1177 * We could have been freed_in_flight between the dbuf_noread
1178 * and dbuf_dirty. We win, as though the dbuf_noread() had
1179 * happened after the free.
1181 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
1182 db->db_blkid != DMU_SPILL_BLKID) {
1183 mutex_enter(&dn->dn_mtx);
1184 dnode_clear_range(dn, db->db_blkid, 1, tx);
1185 mutex_exit(&dn->dn_mtx);
1186 db->db_freed_in_flight = FALSE;
1190 * This buffer is now part of this txg
1192 dbuf_add_ref(db, (void *)(uintptr_t)tx->tx_txg);
1193 db->db_dirtycnt += 1;
1194 ASSERT3U(db->db_dirtycnt, <=, 3);
1196 mutex_exit(&db->db_mtx);
1198 if (db->db_blkid == DMU_BONUS_BLKID ||
1199 db->db_blkid == DMU_SPILL_BLKID) {
1200 mutex_enter(&dn->dn_mtx);
1201 ASSERT(!list_link_active(&dr->dr_dirty_node));
1202 list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
1203 mutex_exit(&dn->dn_mtx);
1204 dnode_setdirty(dn, tx);
1207 } else if (do_free_accounting) {
1208 blkptr_t *bp = db->db_blkptr;
1209 int64_t willfree = (bp && !BP_IS_HOLE(bp)) ?
1210 bp_get_dsize(os->os_spa, bp) : db->db.db_size;
1212 * This is only a guess -- if the dbuf is dirty
1213 * in a previous txg, we don't know how much
1214 * space it will use on disk yet. We should
1215 * really have the struct_rwlock to access
1216 * db_blkptr, but since this is just a guess,
1217 * it's OK if we get an odd answer.
1219 ddt_prefetch(os->os_spa, bp);
1220 dnode_willuse_space(dn, -willfree, tx);
1223 if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) {
1224 rw_enter(&dn->dn_struct_rwlock, RW_READER);
1225 drop_struct_lock = TRUE;
1228 if (db->db_level == 0) {
1229 dnode_new_blkid(dn, db->db_blkid, tx, drop_struct_lock);
1230 ASSERT(dn->dn_maxblkid >= db->db_blkid);
1233 if (db->db_level+1 < dn->dn_nlevels) {
1234 dmu_buf_impl_t *parent = db->db_parent;
1235 dbuf_dirty_record_t *di;
1236 int parent_held = FALSE;
1238 if (db->db_parent == NULL || db->db_parent == dn->dn_dbuf) {
1239 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
1241 parent = dbuf_hold_level(dn, db->db_level+1,
1242 db->db_blkid >> epbs, FTAG);
1243 ASSERT(parent != NULL);
1246 if (drop_struct_lock)
1247 rw_exit(&dn->dn_struct_rwlock);
1248 ASSERT3U(db->db_level+1, ==, parent->db_level);
1249 di = dbuf_dirty(parent, tx);
1251 dbuf_rele(parent, FTAG);
1253 mutex_enter(&db->db_mtx);
1254 /* possible race with dbuf_undirty() */
1255 if (db->db_last_dirty == dr ||
1256 dn->dn_object == DMU_META_DNODE_OBJECT) {
1257 mutex_enter(&di->dt.di.dr_mtx);
1258 ASSERT3U(di->dr_txg, ==, tx->tx_txg);
1259 ASSERT(!list_link_active(&dr->dr_dirty_node));
1260 list_insert_tail(&di->dt.di.dr_children, dr);
1261 mutex_exit(&di->dt.di.dr_mtx);
1264 mutex_exit(&db->db_mtx);
1266 ASSERT(db->db_level+1 == dn->dn_nlevels);
1267 ASSERT(db->db_blkid < dn->dn_nblkptr);
1268 ASSERT(db->db_parent == NULL || db->db_parent == dn->dn_dbuf);
1269 mutex_enter(&dn->dn_mtx);
1270 ASSERT(!list_link_active(&dr->dr_dirty_node));
1271 list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
1272 mutex_exit(&dn->dn_mtx);
1273 if (drop_struct_lock)
1274 rw_exit(&dn->dn_struct_rwlock);
1277 dnode_setdirty(dn, tx);
1283 dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
1286 uint64_t txg = tx->tx_txg;
1287 dbuf_dirty_record_t *dr, **drp;
1290 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1292 mutex_enter(&db->db_mtx);
1294 * If this buffer is not dirty, we're done.
1296 for (drp = &db->db_last_dirty; (dr = *drp) != NULL; drp = &dr->dr_next)
1297 if (dr->dr_txg <= txg)
1299 if (dr == NULL || dr->dr_txg < txg) {
1300 mutex_exit(&db->db_mtx);
1303 ASSERT(dr->dr_txg == txg);
1304 ASSERT(dr->dr_dbuf == db);
1310 * If this buffer is currently held, we cannot undirty
1311 * it, since one of the current holders may be in the
1312 * middle of an update. Note that users of dbuf_undirty()
1313 * should not place a hold on the dbuf before the call.
1315 if (refcount_count(&db->db_holds) > db->db_dirtycnt) {
1316 mutex_exit(&db->db_mtx);
1317 /* Make sure we don't toss this buffer at sync phase */
1318 mutex_enter(&dn->dn_mtx);
1319 dnode_clear_range(dn, db->db_blkid, 1, tx);
1320 mutex_exit(&dn->dn_mtx);
1325 dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
1327 ASSERT(db->db.db_size != 0);
1329 /* XXX would be nice to fix up dn_towrite_space[] */
1333 if (dr->dr_parent) {
1334 mutex_enter(&dr->dr_parent->dt.di.dr_mtx);
1335 list_remove(&dr->dr_parent->dt.di.dr_children, dr);
1336 mutex_exit(&dr->dr_parent->dt.di.dr_mtx);
1337 } else if (db->db_level+1 == dn->dn_nlevels) {
1338 ASSERT(db->db_blkptr == NULL || db->db_parent == dn->dn_dbuf);
1339 mutex_enter(&dn->dn_mtx);
1340 list_remove(&dn->dn_dirty_records[txg & TXG_MASK], dr);
1341 mutex_exit(&dn->dn_mtx);
1345 if (db->db_level == 0) {
1346 if (db->db_state != DB_NOFILL) {
1347 dbuf_unoverride(dr);
1349 ASSERT(db->db_buf != NULL);
1350 ASSERT(dr->dt.dl.dr_data != NULL);
1351 if (dr->dt.dl.dr_data != db->db_buf)
1352 VERIFY(arc_buf_remove_ref(dr->dt.dl.dr_data,
1356 ASSERT(db->db_buf != NULL);
1357 ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
1358 mutex_destroy(&dr->dt.di.dr_mtx);
1359 list_destroy(&dr->dt.di.dr_children);
1361 kmem_free(dr, sizeof (dbuf_dirty_record_t));
1363 ASSERT(db->db_dirtycnt > 0);
1364 db->db_dirtycnt -= 1;
1366 if (refcount_remove(&db->db_holds, (void *)(uintptr_t)txg) == 0) {
1367 arc_buf_t *buf = db->db_buf;
1369 ASSERT(db->db_state == DB_NOFILL || arc_released(buf));
1370 dbuf_set_data(db, NULL);
1371 VERIFY(arc_buf_remove_ref(buf, db) == 1);
1376 mutex_exit(&db->db_mtx);
1380 #pragma weak dmu_buf_will_dirty = dbuf_will_dirty
1382 dbuf_will_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
1384 int rf = DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH;
1386 ASSERT(tx->tx_txg != 0);
1387 ASSERT(!refcount_is_zero(&db->db_holds));
1390 if (RW_WRITE_HELD(&DB_DNODE(db)->dn_struct_rwlock))
1391 rf |= DB_RF_HAVESTRUCT;
1393 (void) dbuf_read(db, NULL, rf);
1394 (void) dbuf_dirty(db, tx);
1398 dmu_buf_will_not_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
1400 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
1402 db->db_state = DB_NOFILL;
1404 dmu_buf_will_fill(db_fake, tx);
1408 dmu_buf_will_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
1410 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
1412 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1413 ASSERT(tx->tx_txg != 0);
1414 ASSERT(db->db_level == 0);
1415 ASSERT(!refcount_is_zero(&db->db_holds));
1417 ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT ||
1418 dmu_tx_private_ok(tx));
1421 (void) dbuf_dirty(db, tx);
1424 #pragma weak dmu_buf_fill_done = dbuf_fill_done
1427 dbuf_fill_done(dmu_buf_impl_t *db, dmu_tx_t *tx)
1429 mutex_enter(&db->db_mtx);
1432 if (db->db_state == DB_FILL) {
1433 if (db->db_level == 0 && db->db_freed_in_flight) {
1434 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1435 /* we were freed while filling */
1436 /* XXX dbuf_undirty? */
1437 bzero(db->db.db_data, db->db.db_size);
1438 db->db_freed_in_flight = FALSE;
1440 db->db_state = DB_CACHED;
1441 cv_broadcast(&db->db_changed);
1443 mutex_exit(&db->db_mtx);
1447 * Directly assign a provided arc buf to a given dbuf if it's not referenced
1448 * by anybody except our caller. Otherwise copy arcbuf's contents to dbuf.
1451 dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx)
1453 ASSERT(!refcount_is_zero(&db->db_holds));
1454 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1455 ASSERT(db->db_level == 0);
1456 ASSERT(DBUF_GET_BUFC_TYPE(db) == ARC_BUFC_DATA);
1457 ASSERT(buf != NULL);
1458 ASSERT(arc_buf_size(buf) == db->db.db_size);
1459 ASSERT(tx->tx_txg != 0);
1461 arc_return_buf(buf, db);
1462 ASSERT(arc_released(buf));
1464 mutex_enter(&db->db_mtx);
1466 while (db->db_state == DB_READ || db->db_state == DB_FILL)
1467 cv_wait(&db->db_changed, &db->db_mtx);
1469 ASSERT(db->db_state == DB_CACHED || db->db_state == DB_UNCACHED);
1471 if (db->db_state == DB_CACHED &&
1472 refcount_count(&db->db_holds) - 1 > db->db_dirtycnt) {
1473 mutex_exit(&db->db_mtx);
1474 (void) dbuf_dirty(db, tx);
1475 bcopy(buf->b_data, db->db.db_data, db->db.db_size);
1476 VERIFY(arc_buf_remove_ref(buf, db) == 1);
1477 xuio_stat_wbuf_copied();
1481 xuio_stat_wbuf_nocopy();
1482 if (db->db_state == DB_CACHED) {
1483 dbuf_dirty_record_t *dr = db->db_last_dirty;
1485 ASSERT(db->db_buf != NULL);
1486 if (dr != NULL && dr->dr_txg == tx->tx_txg) {
1487 ASSERT(dr->dt.dl.dr_data == db->db_buf);
1488 if (!arc_released(db->db_buf)) {
1489 ASSERT(dr->dt.dl.dr_override_state ==
1491 arc_release(db->db_buf, db);
1493 dr->dt.dl.dr_data = buf;
1494 VERIFY(arc_buf_remove_ref(db->db_buf, db) == 1);
1495 } else if (dr == NULL || dr->dt.dl.dr_data != db->db_buf) {
1496 arc_release(db->db_buf, db);
1497 VERIFY(arc_buf_remove_ref(db->db_buf, db) == 1);
1501 ASSERT(db->db_buf == NULL);
1502 dbuf_set_data(db, buf);
1503 db->db_state = DB_FILL;
1504 mutex_exit(&db->db_mtx);
1505 (void) dbuf_dirty(db, tx);
1506 dbuf_fill_done(db, tx);
1510 * "Clear" the contents of this dbuf. This will mark the dbuf
1511 * EVICTING and clear *most* of its references. Unfortunetely,
1512 * when we are not holding the dn_dbufs_mtx, we can't clear the
1513 * entry in the dn_dbufs list. We have to wait until dbuf_destroy()
1514 * in this case. For callers from the DMU we will usually see:
1515 * dbuf_clear()->arc_buf_evict()->dbuf_do_evict()->dbuf_destroy()
1516 * For the arc callback, we will usually see:
1517 * dbuf_do_evict()->dbuf_clear();dbuf_destroy()
1518 * Sometimes, though, we will get a mix of these two:
1519 * DMU: dbuf_clear()->arc_buf_evict()
1520 * ARC: dbuf_do_evict()->dbuf_destroy()
1523 dbuf_clear(dmu_buf_impl_t *db)
1526 dmu_buf_impl_t *parent = db->db_parent;
1527 dmu_buf_impl_t *dndb;
1528 int dbuf_gone = FALSE;
1530 ASSERT(MUTEX_HELD(&db->db_mtx));
1531 ASSERT(refcount_is_zero(&db->db_holds));
1533 dbuf_evict_user(db);
1535 if (db->db_state == DB_CACHED) {
1536 ASSERT(db->db.db_data != NULL);
1537 if (db->db_blkid == DMU_BONUS_BLKID) {
1538 zio_buf_free(db->db.db_data, DN_MAX_BONUSLEN);
1539 arc_space_return(DN_MAX_BONUSLEN, ARC_SPACE_OTHER);
1541 db->db.db_data = NULL;
1542 db->db_state = DB_UNCACHED;
1545 ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL);
1546 ASSERT(db->db_data_pending == NULL);
1548 db->db_state = DB_EVICTING;
1549 db->db_blkptr = NULL;
1554 if (db->db_blkid != DMU_BONUS_BLKID && MUTEX_HELD(&dn->dn_dbufs_mtx)) {
1555 list_remove(&dn->dn_dbufs, db);
1556 (void) atomic_dec_32_nv(&dn->dn_dbufs_count);
1560 * Decrementing the dbuf count means that the hold corresponding
1561 * to the removed dbuf is no longer discounted in dnode_move(),
1562 * so the dnode cannot be moved until after we release the hold.
1563 * The membar_producer() ensures visibility of the decremented
1564 * value in dnode_move(), since DB_DNODE_EXIT doesn't actually
1568 db->db_dnode_handle = NULL;
1574 dbuf_gone = arc_buf_evict(db->db_buf);
1577 mutex_exit(&db->db_mtx);
1580 * If this dbuf is referenced from an indirect dbuf,
1581 * decrement the ref count on the indirect dbuf.
1583 if (parent && parent != dndb)
1584 dbuf_rele(parent, db);
1588 dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse,
1589 dmu_buf_impl_t **parentp, blkptr_t **bpp)
1596 ASSERT(blkid != DMU_BONUS_BLKID);
1598 if (blkid == DMU_SPILL_BLKID) {
1599 mutex_enter(&dn->dn_mtx);
1600 if (dn->dn_have_spill &&
1601 (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR))
1602 *bpp = &dn->dn_phys->dn_spill;
1605 dbuf_add_ref(dn->dn_dbuf, NULL);
1606 *parentp = dn->dn_dbuf;
1607 mutex_exit(&dn->dn_mtx);
1611 if (dn->dn_phys->dn_nlevels == 0)
1614 nlevels = dn->dn_phys->dn_nlevels;
1616 epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
1618 ASSERT3U(level * epbs, <, 64);
1619 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
1620 if (level >= nlevels ||
1621 (blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))) {
1622 /* the buffer has no parent yet */
1624 } else if (level < nlevels-1) {
1625 /* this block is referenced from an indirect block */
1626 int err = dbuf_hold_impl(dn, level+1,
1627 blkid >> epbs, fail_sparse, NULL, parentp);
1630 err = dbuf_read(*parentp, NULL,
1631 (DB_RF_HAVESTRUCT | DB_RF_NOPREFETCH | DB_RF_CANFAIL));
1633 dbuf_rele(*parentp, NULL);
1637 *bpp = ((blkptr_t *)(*parentp)->db.db_data) +
1638 (blkid & ((1ULL << epbs) - 1));
1641 /* the block is referenced from the dnode */
1642 ASSERT3U(level, ==, nlevels-1);
1643 ASSERT(dn->dn_phys->dn_nblkptr == 0 ||
1644 blkid < dn->dn_phys->dn_nblkptr);
1646 dbuf_add_ref(dn->dn_dbuf, NULL);
1647 *parentp = dn->dn_dbuf;
1649 *bpp = &dn->dn_phys->dn_blkptr[blkid];
1654 static dmu_buf_impl_t *
1655 dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid,
1656 dmu_buf_impl_t *parent, blkptr_t *blkptr)
1658 objset_t *os = dn->dn_objset;
1659 dmu_buf_impl_t *db, *odb;
1661 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
1662 ASSERT(dn->dn_type != DMU_OT_NONE);
1664 db = kmem_cache_alloc(dbuf_cache, KM_SLEEP);
1667 db->db.db_object = dn->dn_object;
1668 db->db_level = level;
1669 db->db_blkid = blkid;
1670 db->db_last_dirty = NULL;
1671 db->db_dirtycnt = 0;
1672 db->db_dnode_handle = dn->dn_handle;
1673 db->db_parent = parent;
1674 db->db_blkptr = blkptr;
1676 db->db_user_ptr = NULL;
1677 db->db_user_data_ptr_ptr = NULL;
1678 db->db_evict_func = NULL;
1679 db->db_immediate_evict = 0;
1680 db->db_freed_in_flight = 0;
1682 if (blkid == DMU_BONUS_BLKID) {
1683 ASSERT3P(parent, ==, dn->dn_dbuf);
1684 db->db.db_size = DN_MAX_BONUSLEN -
1685 (dn->dn_nblkptr-1) * sizeof (blkptr_t);
1686 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
1687 db->db.db_offset = DMU_BONUS_BLKID;
1688 db->db_state = DB_UNCACHED;
1689 /* the bonus dbuf is not placed in the hash table */
1690 arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER);
1692 } else if (blkid == DMU_SPILL_BLKID) {
1693 db->db.db_size = (blkptr != NULL) ?
1694 BP_GET_LSIZE(blkptr) : SPA_MINBLOCKSIZE;
1695 db->db.db_offset = 0;
1698 db->db_level ? 1<<dn->dn_indblkshift : dn->dn_datablksz;
1699 db->db.db_size = blocksize;
1700 db->db.db_offset = db->db_blkid * blocksize;
1704 * Hold the dn_dbufs_mtx while we get the new dbuf
1705 * in the hash table *and* added to the dbufs list.
1706 * This prevents a possible deadlock with someone
1707 * trying to look up this dbuf before its added to the
1710 mutex_enter(&dn->dn_dbufs_mtx);
1711 db->db_state = DB_EVICTING;
1712 if ((odb = dbuf_hash_insert(db)) != NULL) {
1713 /* someone else inserted it first */
1714 kmem_cache_free(dbuf_cache, db);
1715 mutex_exit(&dn->dn_dbufs_mtx);
1718 list_insert_head(&dn->dn_dbufs, db);
1719 db->db_state = DB_UNCACHED;
1720 mutex_exit(&dn->dn_dbufs_mtx);
1721 arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER);
1723 if (parent && parent != dn->dn_dbuf)
1724 dbuf_add_ref(parent, db);
1726 ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
1727 refcount_count(&dn->dn_holds) > 0);
1728 (void) refcount_add(&dn->dn_holds, db);
1729 (void) atomic_inc_32_nv(&dn->dn_dbufs_count);
1731 dprintf_dbuf(db, "db=%p\n", db);
1737 dbuf_do_evict(void *private)
1739 arc_buf_t *buf = private;
1740 dmu_buf_impl_t *db = buf->b_private;
1742 if (!MUTEX_HELD(&db->db_mtx))
1743 mutex_enter(&db->db_mtx);
1745 ASSERT(refcount_is_zero(&db->db_holds));
1747 if (db->db_state != DB_EVICTING) {
1748 ASSERT(db->db_state == DB_CACHED);
1753 mutex_exit(&db->db_mtx);
1760 dbuf_destroy(dmu_buf_impl_t *db)
1762 ASSERT(refcount_is_zero(&db->db_holds));
1764 if (db->db_blkid != DMU_BONUS_BLKID) {
1766 * If this dbuf is still on the dn_dbufs list,
1767 * remove it from that list.
1769 if (db->db_dnode_handle != NULL) {
1774 mutex_enter(&dn->dn_dbufs_mtx);
1775 list_remove(&dn->dn_dbufs, db);
1776 (void) atomic_dec_32_nv(&dn->dn_dbufs_count);
1777 mutex_exit(&dn->dn_dbufs_mtx);
1780 * Decrementing the dbuf count means that the hold
1781 * corresponding to the removed dbuf is no longer
1782 * discounted in dnode_move(), so the dnode cannot be
1783 * moved until after we release the hold.
1786 db->db_dnode_handle = NULL;
1788 dbuf_hash_remove(db);
1790 db->db_parent = NULL;
1793 ASSERT(!list_link_active(&db->db_link));
1794 ASSERT(db->db.db_data == NULL);
1795 ASSERT(db->db_hash_next == NULL);
1796 ASSERT(db->db_blkptr == NULL);
1797 ASSERT(db->db_data_pending == NULL);
1799 kmem_cache_free(dbuf_cache, db);
1800 arc_space_return(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER);
1804 dbuf_prefetch(dnode_t *dn, uint64_t blkid)
1806 dmu_buf_impl_t *db = NULL;
1807 blkptr_t *bp = NULL;
1809 ASSERT(blkid != DMU_BONUS_BLKID);
1810 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
1812 if (dnode_block_freed(dn, blkid))
1815 /* dbuf_find() returns with db_mtx held */
1816 if ((db = dbuf_find(dn, 0, blkid))) {
1818 * This dbuf is already in the cache. We assume that
1819 * it is already CACHED, or else about to be either
1822 mutex_exit(&db->db_mtx);
1826 if (dbuf_findbp(dn, 0, blkid, TRUE, &db, &bp) == 0) {
1827 if (bp && !BP_IS_HOLE(bp)) {
1828 int priority = dn->dn_type == DMU_OT_DDT_ZAP ?
1829 ZIO_PRIORITY_DDT_PREFETCH : ZIO_PRIORITY_ASYNC_READ;
1831 dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
1832 uint32_t aflags = ARC_NOWAIT | ARC_PREFETCH;
1835 SET_BOOKMARK(&zb, ds ? ds->ds_object : DMU_META_OBJSET,
1836 dn->dn_object, 0, blkid);
1841 pbuf = dn->dn_objset->os_phys_buf;
1843 (void) dsl_read(NULL, dn->dn_objset->os_spa,
1844 bp, pbuf, NULL, NULL, priority,
1845 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
1849 dbuf_rele(db, NULL);
1854 * Returns with db_holds incremented, and db_mtx not held.
1855 * Note: dn_struct_rwlock must be held.
1858 dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid, int fail_sparse,
1859 void *tag, dmu_buf_impl_t **dbp)
1861 dmu_buf_impl_t *db, *parent = NULL;
1863 ASSERT(blkid != DMU_BONUS_BLKID);
1864 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
1865 ASSERT3U(dn->dn_nlevels, >, level);
1869 /* dbuf_find() returns with db_mtx held */
1870 db = dbuf_find(dn, level, blkid);
1873 blkptr_t *bp = NULL;
1876 ASSERT3P(parent, ==, NULL);
1877 err = dbuf_findbp(dn, level, blkid, fail_sparse, &parent, &bp);
1879 if (err == 0 && bp && BP_IS_HOLE(bp))
1883 dbuf_rele(parent, NULL);
1887 if (err && err != ENOENT)
1889 db = dbuf_create(dn, level, blkid, parent, bp);
1892 if (db->db_buf && refcount_is_zero(&db->db_holds)) {
1893 arc_buf_add_ref(db->db_buf, db);
1894 if (db->db_buf->b_data == NULL) {
1897 dbuf_rele(parent, NULL);
1902 ASSERT3P(db->db.db_data, ==, db->db_buf->b_data);
1905 ASSERT(db->db_buf == NULL || arc_referenced(db->db_buf));
1908 * If this buffer is currently syncing out, and we are are
1909 * still referencing it from db_data, we need to make a copy
1910 * of it in case we decide we want to dirty it again in this txg.
1912 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
1913 dn->dn_object != DMU_META_DNODE_OBJECT &&
1914 db->db_state == DB_CACHED && db->db_data_pending) {
1915 dbuf_dirty_record_t *dr = db->db_data_pending;
1917 if (dr->dt.dl.dr_data == db->db_buf) {
1918 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
1921 arc_buf_alloc(dn->dn_objset->os_spa,
1922 db->db.db_size, db, type));
1923 bcopy(dr->dt.dl.dr_data->b_data, db->db.db_data,
1928 (void) refcount_add(&db->db_holds, tag);
1929 dbuf_update_data(db);
1931 mutex_exit(&db->db_mtx);
1933 /* NOTE: we can't rele the parent until after we drop the db_mtx */
1935 dbuf_rele(parent, NULL);
1937 ASSERT3P(DB_DNODE(db), ==, dn);
1938 ASSERT3U(db->db_blkid, ==, blkid);
1939 ASSERT3U(db->db_level, ==, level);
1946 dbuf_hold(dnode_t *dn, uint64_t blkid, void *tag)
1949 int err = dbuf_hold_impl(dn, 0, blkid, FALSE, tag, &db);
1950 return (err ? NULL : db);
1954 dbuf_hold_level(dnode_t *dn, int level, uint64_t blkid, void *tag)
1957 int err = dbuf_hold_impl(dn, level, blkid, FALSE, tag, &db);
1958 return (err ? NULL : db);
1962 dbuf_create_bonus(dnode_t *dn)
1964 ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
1966 ASSERT(dn->dn_bonus == NULL);
1967 dn->dn_bonus = dbuf_create(dn, 0, DMU_BONUS_BLKID, dn->dn_dbuf, NULL);
1971 dbuf_spill_set_blksz(dmu_buf_t *db_fake, uint64_t blksz, dmu_tx_t *tx)
1973 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
1976 if (db->db_blkid != DMU_SPILL_BLKID)
1979 blksz = SPA_MINBLOCKSIZE;
1980 if (blksz > SPA_MAXBLOCKSIZE)
1981 blksz = SPA_MAXBLOCKSIZE;
1983 blksz = P2ROUNDUP(blksz, SPA_MINBLOCKSIZE);
1987 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
1988 dbuf_new_size(db, blksz, tx);
1989 rw_exit(&dn->dn_struct_rwlock);
1996 dbuf_rm_spill(dnode_t *dn, dmu_tx_t *tx)
1998 dbuf_free_range(dn, DMU_SPILL_BLKID, DMU_SPILL_BLKID, tx);
2001 #pragma weak dmu_buf_add_ref = dbuf_add_ref
2003 dbuf_add_ref(dmu_buf_impl_t *db, void *tag)
2005 VERIFY(refcount_add(&db->db_holds, tag) > 1);
2009 * If you call dbuf_rele() you had better not be referencing the dnode handle
2010 * unless you have some other direct or indirect hold on the dnode. (An indirect
2011 * hold is a hold on one of the dnode's dbufs, including the bonus buffer.)
2012 * Without that, the dbuf_rele() could lead to a dnode_rele() followed by the
2013 * dnode's parent dbuf evicting its dnode handles.
2015 #pragma weak dmu_buf_rele = dbuf_rele
2017 dbuf_rele(dmu_buf_impl_t *db, void *tag)
2019 mutex_enter(&db->db_mtx);
2020 dbuf_rele_and_unlock(db, tag);
2024 * dbuf_rele() for an already-locked dbuf. This is necessary to allow
2025 * db_dirtycnt and db_holds to be updated atomically.
2028 dbuf_rele_and_unlock(dmu_buf_impl_t *db, void *tag)
2032 ASSERT(MUTEX_HELD(&db->db_mtx));
2036 * Remove the reference to the dbuf before removing its hold on the
2037 * dnode so we can guarantee in dnode_move() that a referenced bonus
2038 * buffer has a corresponding dnode hold.
2040 holds = refcount_remove(&db->db_holds, tag);
2044 * We can't freeze indirects if there is a possibility that they
2045 * may be modified in the current syncing context.
2047 if (db->db_buf && holds == (db->db_level == 0 ? db->db_dirtycnt : 0))
2048 arc_buf_freeze(db->db_buf);
2050 if (holds == db->db_dirtycnt &&
2051 db->db_level == 0 && db->db_immediate_evict)
2052 dbuf_evict_user(db);
2055 if (db->db_blkid == DMU_BONUS_BLKID) {
2056 mutex_exit(&db->db_mtx);
2059 * If the dnode moves here, we cannot cross this barrier
2060 * until the move completes.
2063 (void) atomic_dec_32_nv(&DB_DNODE(db)->dn_dbufs_count);
2066 * The bonus buffer's dnode hold is no longer discounted
2067 * in dnode_move(). The dnode cannot move until after
2070 dnode_rele(DB_DNODE(db), db);
2071 } else if (db->db_buf == NULL) {
2073 * This is a special case: we never associated this
2074 * dbuf with any data allocated from the ARC.
2076 ASSERT(db->db_state == DB_UNCACHED ||
2077 db->db_state == DB_NOFILL);
2079 } else if (arc_released(db->db_buf)) {
2080 arc_buf_t *buf = db->db_buf;
2082 * This dbuf has anonymous data associated with it.
2084 dbuf_set_data(db, NULL);
2085 VERIFY(arc_buf_remove_ref(buf, db) == 1);
2088 VERIFY(arc_buf_remove_ref(db->db_buf, db) == 0);
2089 if (!DBUF_IS_CACHEABLE(db))
2092 mutex_exit(&db->db_mtx);
2095 mutex_exit(&db->db_mtx);
2099 #pragma weak dmu_buf_refcount = dbuf_refcount
2101 dbuf_refcount(dmu_buf_impl_t *db)
2103 return (refcount_count(&db->db_holds));
2107 dmu_buf_set_user(dmu_buf_t *db_fake, void *user_ptr, void *user_data_ptr_ptr,
2108 dmu_buf_evict_func_t *evict_func)
2110 return (dmu_buf_update_user(db_fake, NULL, user_ptr,
2111 user_data_ptr_ptr, evict_func));
2115 dmu_buf_set_user_ie(dmu_buf_t *db_fake, void *user_ptr, void *user_data_ptr_ptr,
2116 dmu_buf_evict_func_t *evict_func)
2118 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2120 db->db_immediate_evict = TRUE;
2121 return (dmu_buf_update_user(db_fake, NULL, user_ptr,
2122 user_data_ptr_ptr, evict_func));
2126 dmu_buf_update_user(dmu_buf_t *db_fake, void *old_user_ptr, void *user_ptr,
2127 void *user_data_ptr_ptr, dmu_buf_evict_func_t *evict_func)
2129 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2130 ASSERT(db->db_level == 0);
2132 ASSERT((user_ptr == NULL) == (evict_func == NULL));
2134 mutex_enter(&db->db_mtx);
2136 if (db->db_user_ptr == old_user_ptr) {
2137 db->db_user_ptr = user_ptr;
2138 db->db_user_data_ptr_ptr = user_data_ptr_ptr;
2139 db->db_evict_func = evict_func;
2141 dbuf_update_data(db);
2143 old_user_ptr = db->db_user_ptr;
2146 mutex_exit(&db->db_mtx);
2147 return (old_user_ptr);
2151 dmu_buf_get_user(dmu_buf_t *db_fake)
2153 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2154 ASSERT(!refcount_is_zero(&db->db_holds));
2156 return (db->db_user_ptr);
2160 dmu_buf_freeable(dmu_buf_t *dbuf)
2162 boolean_t res = B_FALSE;
2163 dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
2166 res = dsl_dataset_block_freeable(db->db_objset->os_dsl_dataset,
2167 db->db_blkptr, db->db_blkptr->blk_birth);
2173 dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db)
2175 /* ASSERT(dmu_tx_is_syncing(tx) */
2176 ASSERT(MUTEX_HELD(&db->db_mtx));
2178 if (db->db_blkptr != NULL)
2181 if (db->db_blkid == DMU_SPILL_BLKID) {
2182 db->db_blkptr = &dn->dn_phys->dn_spill;
2183 BP_ZERO(db->db_blkptr);
2186 if (db->db_level == dn->dn_phys->dn_nlevels-1) {
2188 * This buffer was allocated at a time when there was
2189 * no available blkptrs from the dnode, or it was
2190 * inappropriate to hook it in (i.e., nlevels mis-match).
2192 ASSERT(db->db_blkid < dn->dn_phys->dn_nblkptr);
2193 ASSERT(db->db_parent == NULL);
2194 db->db_parent = dn->dn_dbuf;
2195 db->db_blkptr = &dn->dn_phys->dn_blkptr[db->db_blkid];
2198 dmu_buf_impl_t *parent = db->db_parent;
2199 int epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
2201 ASSERT(dn->dn_phys->dn_nlevels > 1);
2202 if (parent == NULL) {
2203 mutex_exit(&db->db_mtx);
2204 rw_enter(&dn->dn_struct_rwlock, RW_READER);
2205 (void) dbuf_hold_impl(dn, db->db_level+1,
2206 db->db_blkid >> epbs, FALSE, db, &parent);
2207 rw_exit(&dn->dn_struct_rwlock);
2208 mutex_enter(&db->db_mtx);
2209 db->db_parent = parent;
2211 db->db_blkptr = (blkptr_t *)parent->db.db_data +
2212 (db->db_blkid & ((1ULL << epbs) - 1));
2218 dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
2220 dmu_buf_impl_t *db = dr->dr_dbuf;
2224 ASSERT(dmu_tx_is_syncing(tx));
2226 dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
2228 mutex_enter(&db->db_mtx);
2230 ASSERT(db->db_level > 0);
2233 if (db->db_buf == NULL) {
2234 mutex_exit(&db->db_mtx);
2235 (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED);
2236 mutex_enter(&db->db_mtx);
2238 ASSERT3U(db->db_state, ==, DB_CACHED);
2239 ASSERT(db->db_buf != NULL);
2243 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
2244 dbuf_check_blkptr(dn, db);
2247 db->db_data_pending = dr;
2249 mutex_exit(&db->db_mtx);
2250 dbuf_write(dr, db->db_buf, tx);
2253 mutex_enter(&dr->dt.di.dr_mtx);
2254 dbuf_sync_list(&dr->dt.di.dr_children, tx);
2255 ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
2256 mutex_exit(&dr->dt.di.dr_mtx);
2261 dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
2263 arc_buf_t **datap = &dr->dt.dl.dr_data;
2264 dmu_buf_impl_t *db = dr->dr_dbuf;
2267 uint64_t txg = tx->tx_txg;
2269 ASSERT(dmu_tx_is_syncing(tx));
2271 dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
2273 mutex_enter(&db->db_mtx);
2275 * To be synced, we must be dirtied. But we
2276 * might have been freed after the dirty.
2278 if (db->db_state == DB_UNCACHED) {
2279 /* This buffer has been freed since it was dirtied */
2280 ASSERT(db->db.db_data == NULL);
2281 } else if (db->db_state == DB_FILL) {
2282 /* This buffer was freed and is now being re-filled */
2283 ASSERT(db->db.db_data != dr->dt.dl.dr_data);
2285 ASSERT(db->db_state == DB_CACHED || db->db_state == DB_NOFILL);
2292 if (db->db_blkid == DMU_SPILL_BLKID) {
2293 mutex_enter(&dn->dn_mtx);
2294 dn->dn_phys->dn_flags |= DNODE_FLAG_SPILL_BLKPTR;
2295 mutex_exit(&dn->dn_mtx);
2299 * If this is a bonus buffer, simply copy the bonus data into the
2300 * dnode. It will be written out when the dnode is synced (and it
2301 * will be synced, since it must have been dirty for dbuf_sync to
2304 if (db->db_blkid == DMU_BONUS_BLKID) {
2305 dbuf_dirty_record_t **drp;
2307 ASSERT(*datap != NULL);
2308 ASSERT3U(db->db_level, ==, 0);
2309 ASSERT3U(dn->dn_phys->dn_bonuslen, <=, DN_MAX_BONUSLEN);
2310 bcopy(*datap, DN_BONUS(dn->dn_phys), dn->dn_phys->dn_bonuslen);
2313 if (*datap != db->db.db_data) {
2314 zio_buf_free(*datap, DN_MAX_BONUSLEN);
2315 arc_space_return(DN_MAX_BONUSLEN, ARC_SPACE_OTHER);
2317 db->db_data_pending = NULL;
2318 drp = &db->db_last_dirty;
2320 drp = &(*drp)->dr_next;
2321 ASSERT(dr->dr_next == NULL);
2322 ASSERT(dr->dr_dbuf == db);
2324 if (dr->dr_dbuf->db_level != 0) {
2325 mutex_destroy(&dr->dt.di.dr_mtx);
2326 list_destroy(&dr->dt.di.dr_children);
2328 kmem_free(dr, sizeof (dbuf_dirty_record_t));
2329 ASSERT(db->db_dirtycnt > 0);
2330 db->db_dirtycnt -= 1;
2331 dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg);
2338 * This function may have dropped the db_mtx lock allowing a dmu_sync
2339 * operation to sneak in. As a result, we need to ensure that we
2340 * don't check the dr_override_state until we have returned from
2341 * dbuf_check_blkptr.
2343 dbuf_check_blkptr(dn, db);
2346 * If this buffer is in the middle of an immediate write,
2347 * wait for the synchronous IO to complete.
2349 while (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC) {
2350 ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
2351 cv_wait(&db->db_changed, &db->db_mtx);
2352 ASSERT(dr->dt.dl.dr_override_state != DR_NOT_OVERRIDDEN);
2355 if (db->db_state != DB_NOFILL &&
2356 dn->dn_object != DMU_META_DNODE_OBJECT &&
2357 refcount_count(&db->db_holds) > 1 &&
2358 dr->dt.dl.dr_override_state != DR_OVERRIDDEN &&
2359 *datap == db->db_buf) {
2361 * If this buffer is currently "in use" (i.e., there
2362 * are active holds and db_data still references it),
2363 * then make a copy before we start the write so that
2364 * any modifications from the open txg will not leak
2367 * NOTE: this copy does not need to be made for
2368 * objects only modified in the syncing context (e.g.
2369 * DNONE_DNODE blocks).
2371 int blksz = arc_buf_size(*datap);
2372 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
2373 *datap = arc_buf_alloc(os->os_spa, blksz, db, type);
2374 bcopy(db->db.db_data, (*datap)->b_data, blksz);
2376 db->db_data_pending = dr;
2378 mutex_exit(&db->db_mtx);
2380 dbuf_write(dr, *datap, tx);
2382 ASSERT(!list_link_active(&dr->dr_dirty_node));
2383 if (dn->dn_object == DMU_META_DNODE_OBJECT) {
2384 list_insert_tail(&dn->dn_dirty_records[txg&TXG_MASK], dr);
2388 * Although zio_nowait() does not "wait for an IO", it does
2389 * initiate the IO. If this is an empty write it seems plausible
2390 * that the IO could actually be completed before the nowait
2391 * returns. We need to DB_DNODE_EXIT() first in case
2392 * zio_nowait() invalidates the dbuf.
2395 zio_nowait(dr->dr_zio);
2400 dbuf_sync_list(list_t *list, dmu_tx_t *tx)
2402 dbuf_dirty_record_t *dr;
2404 while ((dr = list_head(list))) {
2405 if (dr->dr_zio != NULL) {
2407 * If we find an already initialized zio then we
2408 * are processing the meta-dnode, and we have finished.
2409 * The dbufs for all dnodes are put back on the list
2410 * during processing, so that we can zio_wait()
2411 * these IOs after initiating all child IOs.
2413 ASSERT3U(dr->dr_dbuf->db.db_object, ==,
2414 DMU_META_DNODE_OBJECT);
2417 list_remove(list, dr);
2418 if (dr->dr_dbuf->db_level > 0)
2419 dbuf_sync_indirect(dr, tx);
2421 dbuf_sync_leaf(dr, tx);
2427 dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb)
2429 dmu_buf_impl_t *db = vdb;
2431 blkptr_t *bp = zio->io_bp;
2432 blkptr_t *bp_orig = &zio->io_bp_orig;
2433 spa_t *spa = zio->io_spa;
2438 ASSERT(db->db_blkptr == bp);
2442 delta = bp_get_dsize_sync(spa, bp) - bp_get_dsize_sync(spa, bp_orig);
2443 dnode_diduse_space(dn, delta - zio->io_prev_space_delta);
2444 zio->io_prev_space_delta = delta;
2446 if (BP_IS_HOLE(bp)) {
2447 ASSERT(bp->blk_fill == 0);
2452 ASSERT((db->db_blkid != DMU_SPILL_BLKID &&
2453 BP_GET_TYPE(bp) == dn->dn_type) ||
2454 (db->db_blkid == DMU_SPILL_BLKID &&
2455 BP_GET_TYPE(bp) == dn->dn_bonustype));
2456 ASSERT(BP_GET_LEVEL(bp) == db->db_level);
2458 mutex_enter(&db->db_mtx);
2461 if (db->db_blkid == DMU_SPILL_BLKID) {
2462 ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
2463 ASSERT(!(BP_IS_HOLE(db->db_blkptr)) &&
2464 db->db_blkptr == &dn->dn_phys->dn_spill);
2468 if (db->db_level == 0) {
2469 mutex_enter(&dn->dn_mtx);
2470 if (db->db_blkid > dn->dn_phys->dn_maxblkid &&
2471 db->db_blkid != DMU_SPILL_BLKID)
2472 dn->dn_phys->dn_maxblkid = db->db_blkid;
2473 mutex_exit(&dn->dn_mtx);
2475 if (dn->dn_type == DMU_OT_DNODE) {
2476 dnode_phys_t *dnp = db->db.db_data;
2477 for (i = db->db.db_size >> DNODE_SHIFT; i > 0;
2479 if (dnp->dn_type != DMU_OT_NONE)
2486 blkptr_t *ibp = db->db.db_data;
2487 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
2488 for (i = db->db.db_size >> SPA_BLKPTRSHIFT; i > 0; i--, ibp++) {
2489 if (BP_IS_HOLE(ibp))
2491 fill += ibp->blk_fill;
2496 bp->blk_fill = fill;
2498 mutex_exit(&db->db_mtx);
2503 dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb)
2505 dmu_buf_impl_t *db = vdb;
2506 blkptr_t *bp = zio->io_bp;
2507 blkptr_t *bp_orig = &zio->io_bp_orig;
2508 uint64_t txg = zio->io_txg;
2509 dbuf_dirty_record_t **drp, *dr;
2511 ASSERT3U(zio->io_error, ==, 0);
2512 ASSERT(db->db_blkptr == bp);
2514 if (zio->io_flags & ZIO_FLAG_IO_REWRITE) {
2515 ASSERT(BP_EQUAL(bp, bp_orig));
2521 DB_GET_OBJSET(&os, db);
2522 ds = os->os_dsl_dataset;
2525 (void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE);
2526 dsl_dataset_block_born(ds, bp, tx);
2529 mutex_enter(&db->db_mtx);
2533 drp = &db->db_last_dirty;
2534 while ((dr = *drp) != db->db_data_pending)
2536 ASSERT(!list_link_active(&dr->dr_dirty_node));
2537 ASSERT(dr->dr_txg == txg);
2538 ASSERT(dr->dr_dbuf == db);
2539 ASSERT(dr->dr_next == NULL);
2543 if (db->db_blkid == DMU_SPILL_BLKID) {
2548 ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
2549 ASSERT(!(BP_IS_HOLE(db->db_blkptr)) &&
2550 db->db_blkptr == &dn->dn_phys->dn_spill);
2555 if (db->db_level == 0) {
2556 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2557 ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN);
2558 if (db->db_state != DB_NOFILL) {
2559 if (dr->dt.dl.dr_data != db->db_buf)
2560 VERIFY(arc_buf_remove_ref(dr->dt.dl.dr_data,
2562 else if (!arc_released(db->db_buf))
2563 arc_set_callback(db->db_buf, dbuf_do_evict, db);
2570 ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
2571 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
2572 if (!BP_IS_HOLE(db->db_blkptr)) {
2573 ASSERTV(int epbs = dn->dn_phys->dn_indblkshift -
2575 ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==,
2577 ASSERT3U(dn->dn_phys->dn_maxblkid
2578 >> (db->db_level * epbs), >=, db->db_blkid);
2579 arc_set_callback(db->db_buf, dbuf_do_evict, db);
2582 mutex_destroy(&dr->dt.di.dr_mtx);
2583 list_destroy(&dr->dt.di.dr_children);
2585 kmem_free(dr, sizeof (dbuf_dirty_record_t));
2587 cv_broadcast(&db->db_changed);
2588 ASSERT(db->db_dirtycnt > 0);
2589 db->db_dirtycnt -= 1;
2590 db->db_data_pending = NULL;
2591 dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg);
2595 dbuf_write_nofill_ready(zio_t *zio)
2597 dbuf_write_ready(zio, NULL, zio->io_private);
2601 dbuf_write_nofill_done(zio_t *zio)
2603 dbuf_write_done(zio, NULL, zio->io_private);
2607 dbuf_write_override_ready(zio_t *zio)
2609 dbuf_dirty_record_t *dr = zio->io_private;
2610 dmu_buf_impl_t *db = dr->dr_dbuf;
2612 dbuf_write_ready(zio, NULL, db);
2616 dbuf_write_override_done(zio_t *zio)
2618 dbuf_dirty_record_t *dr = zio->io_private;
2619 dmu_buf_impl_t *db = dr->dr_dbuf;
2620 blkptr_t *obp = &dr->dt.dl.dr_overridden_by;
2622 mutex_enter(&db->db_mtx);
2623 if (!BP_EQUAL(zio->io_bp, obp)) {
2624 if (!BP_IS_HOLE(obp))
2625 dsl_free(spa_get_dsl(zio->io_spa), zio->io_txg, obp);
2626 arc_release(dr->dt.dl.dr_data, db);
2628 mutex_exit(&db->db_mtx);
2630 dbuf_write_done(zio, NULL, db);
2634 dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx)
2636 dmu_buf_impl_t *db = dr->dr_dbuf;
2639 dmu_buf_impl_t *parent = db->db_parent;
2640 uint64_t txg = tx->tx_txg;
2650 if (db->db_state != DB_NOFILL) {
2651 if (db->db_level > 0 || dn->dn_type == DMU_OT_DNODE) {
2653 * Private object buffers are released here rather
2654 * than in dbuf_dirty() since they are only modified
2655 * in the syncing context and we don't want the
2656 * overhead of making multiple copies of the data.
2658 if (BP_IS_HOLE(db->db_blkptr)) {
2661 dbuf_release_bp(db);
2666 if (parent != dn->dn_dbuf) {
2667 ASSERT(parent && parent->db_data_pending);
2668 ASSERT(db->db_level == parent->db_level-1);
2669 ASSERT(arc_released(parent->db_buf));
2670 zio = parent->db_data_pending->dr_zio;
2672 ASSERT((db->db_level == dn->dn_phys->dn_nlevels-1 &&
2673 db->db_blkid != DMU_SPILL_BLKID) ||
2674 (db->db_blkid == DMU_SPILL_BLKID && db->db_level == 0));
2675 if (db->db_blkid != DMU_SPILL_BLKID)
2676 ASSERT3P(db->db_blkptr, ==,
2677 &dn->dn_phys->dn_blkptr[db->db_blkid]);
2681 ASSERT(db->db_level == 0 || data == db->db_buf);
2682 ASSERT3U(db->db_blkptr->blk_birth, <=, txg);
2685 SET_BOOKMARK(&zb, os->os_dsl_dataset ?
2686 os->os_dsl_dataset->ds_object : DMU_META_OBJSET,
2687 db->db.db_object, db->db_level, db->db_blkid);
2689 if (db->db_blkid == DMU_SPILL_BLKID)
2691 wp_flag |= (db->db_state == DB_NOFILL) ? WP_NOFILL : 0;
2693 dmu_write_policy(os, dn, db->db_level, wp_flag, &zp);
2696 if (db->db_level == 0 && dr->dt.dl.dr_override_state == DR_OVERRIDDEN) {
2697 ASSERT(db->db_state != DB_NOFILL);
2698 dr->dr_zio = zio_write(zio, os->os_spa, txg,
2699 db->db_blkptr, data->b_data, arc_buf_size(data), &zp,
2700 dbuf_write_override_ready, dbuf_write_override_done, dr,
2701 ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
2702 mutex_enter(&db->db_mtx);
2703 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
2704 zio_write_override(dr->dr_zio, &dr->dt.dl.dr_overridden_by,
2705 dr->dt.dl.dr_copies);
2706 mutex_exit(&db->db_mtx);
2707 } else if (db->db_state == DB_NOFILL) {
2708 ASSERT(zp.zp_checksum == ZIO_CHECKSUM_OFF);
2709 dr->dr_zio = zio_write(zio, os->os_spa, txg,
2710 db->db_blkptr, NULL, db->db.db_size, &zp,
2711 dbuf_write_nofill_ready, dbuf_write_nofill_done, db,
2712 ZIO_PRIORITY_ASYNC_WRITE,
2713 ZIO_FLAG_MUSTSUCCEED | ZIO_FLAG_NODATA, &zb);
2715 ASSERT(arc_released(data));
2716 dr->dr_zio = arc_write(zio, os->os_spa, txg,
2717 db->db_blkptr, data, DBUF_IS_L2CACHEABLE(db), &zp,
2718 dbuf_write_ready, dbuf_write_done, db,
2719 ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);