4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 #pragma ident "@(#)dbuf.c 1.32 08/03/20 SMI"
28 #include <sys/zfs_context.h>
30 #include <sys/dmu_impl.h>
32 #include <sys/dmu_objset.h>
33 #include <sys/dsl_dataset.h>
34 #include <sys/dsl_dir.h>
35 #include <sys/dmu_tx.h>
38 #include <sys/dmu_zfetch.h>
40 static void dbuf_destroy(dmu_buf_impl_t *db);
41 static int dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx);
42 static void dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, int checksum,
43 int compress, dmu_tx_t *tx);
44 static arc_done_func_t dbuf_write_ready;
45 static arc_done_func_t dbuf_write_done;
47 int zfs_mdcomp_disable = 0;
50 * Global data structures and functions for the dbuf cache.
52 static kmem_cache_t *dbuf_cache;
56 dbuf_cons(void *vdb, void *unused, int kmflag)
58 dmu_buf_impl_t *db = vdb;
59 bzero(db, sizeof (dmu_buf_impl_t));
61 mutex_init(&db->db_mtx, NULL, MUTEX_DEFAULT, NULL);
62 cv_init(&db->db_changed, NULL, CV_DEFAULT, NULL);
63 refcount_create(&db->db_holds);
69 dbuf_dest(void *vdb, void *unused)
71 dmu_buf_impl_t *db = vdb;
72 mutex_destroy(&db->db_mtx);
73 cv_destroy(&db->db_changed);
74 refcount_destroy(&db->db_holds);
78 * dbuf hash table routines
80 static dbuf_hash_table_t dbuf_hash_table;
82 static uint64_t dbuf_hash_count;
85 dbuf_hash(void *os, uint64_t obj, uint8_t lvl, uint64_t blkid)
87 uintptr_t osv = (uintptr_t)os;
90 ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY);
91 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (lvl)) & 0xFF];
92 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (osv >> 6)) & 0xFF];
93 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 0)) & 0xFF];
94 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 8)) & 0xFF];
95 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (blkid >> 0)) & 0xFF];
96 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (blkid >> 8)) & 0xFF];
98 crc ^= (osv>>14) ^ (obj>>16) ^ (blkid>>16);
103 #define DBUF_HASH(os, obj, level, blkid) dbuf_hash(os, obj, level, blkid);
105 #define DBUF_EQUAL(dbuf, os, obj, level, blkid) \
106 ((dbuf)->db.db_object == (obj) && \
107 (dbuf)->db_objset == (os) && \
108 (dbuf)->db_level == (level) && \
109 (dbuf)->db_blkid == (blkid))
112 dbuf_find(dnode_t *dn, uint8_t level, uint64_t blkid)
114 dbuf_hash_table_t *h = &dbuf_hash_table;
115 objset_impl_t *os = dn->dn_objset;
116 uint64_t obj = dn->dn_object;
117 uint64_t hv = DBUF_HASH(os, obj, level, blkid);
118 uint64_t idx = hv & h->hash_table_mask;
121 mutex_enter(DBUF_HASH_MUTEX(h, idx));
122 for (db = h->hash_table[idx]; db != NULL; db = db->db_hash_next) {
123 if (DBUF_EQUAL(db, os, obj, level, blkid)) {
124 mutex_enter(&db->db_mtx);
125 if (db->db_state != DB_EVICTING) {
126 mutex_exit(DBUF_HASH_MUTEX(h, idx));
129 mutex_exit(&db->db_mtx);
132 mutex_exit(DBUF_HASH_MUTEX(h, idx));
137 * Insert an entry into the hash table. If there is already an element
138 * equal to elem in the hash table, then the already existing element
139 * will be returned and the new element will not be inserted.
140 * Otherwise returns NULL.
142 static dmu_buf_impl_t *
143 dbuf_hash_insert(dmu_buf_impl_t *db)
145 dbuf_hash_table_t *h = &dbuf_hash_table;
146 objset_impl_t *os = db->db_objset;
147 uint64_t obj = db->db.db_object;
148 int level = db->db_level;
149 uint64_t blkid = db->db_blkid;
150 uint64_t hv = DBUF_HASH(os, obj, level, blkid);
151 uint64_t idx = hv & h->hash_table_mask;
154 mutex_enter(DBUF_HASH_MUTEX(h, idx));
155 for (dbf = h->hash_table[idx]; dbf != NULL; dbf = dbf->db_hash_next) {
156 if (DBUF_EQUAL(dbf, os, obj, level, blkid)) {
157 mutex_enter(&dbf->db_mtx);
158 if (dbf->db_state != DB_EVICTING) {
159 mutex_exit(DBUF_HASH_MUTEX(h, idx));
162 mutex_exit(&dbf->db_mtx);
166 mutex_enter(&db->db_mtx);
167 db->db_hash_next = h->hash_table[idx];
168 h->hash_table[idx] = db;
169 mutex_exit(DBUF_HASH_MUTEX(h, idx));
170 atomic_add_64(&dbuf_hash_count, 1);
176 * Remove an entry from the hash table. This operation will
177 * fail if there are any existing holds on the db.
180 dbuf_hash_remove(dmu_buf_impl_t *db)
182 dbuf_hash_table_t *h = &dbuf_hash_table;
183 uint64_t hv = DBUF_HASH(db->db_objset, db->db.db_object,
184 db->db_level, db->db_blkid);
185 uint64_t idx = hv & h->hash_table_mask;
186 dmu_buf_impl_t *dbf, **dbp;
189 * We musn't hold db_mtx to maintin lock ordering:
190 * DBUF_HASH_MUTEX > db_mtx.
192 ASSERT(refcount_is_zero(&db->db_holds));
193 ASSERT(db->db_state == DB_EVICTING);
194 ASSERT(!MUTEX_HELD(&db->db_mtx));
196 mutex_enter(DBUF_HASH_MUTEX(h, idx));
197 dbp = &h->hash_table[idx];
198 while ((dbf = *dbp) != db) {
199 dbp = &dbf->db_hash_next;
202 *dbp = db->db_hash_next;
203 db->db_hash_next = NULL;
204 mutex_exit(DBUF_HASH_MUTEX(h, idx));
205 atomic_add_64(&dbuf_hash_count, -1);
208 static arc_evict_func_t dbuf_do_evict;
211 dbuf_evict_user(dmu_buf_impl_t *db)
213 ASSERT(MUTEX_HELD(&db->db_mtx));
215 if (db->db_level != 0 || db->db_evict_func == NULL)
218 if (db->db_user_data_ptr_ptr)
219 *db->db_user_data_ptr_ptr = db->db.db_data;
220 db->db_evict_func(&db->db, db->db_user_ptr);
221 db->db_user_ptr = NULL;
222 db->db_user_data_ptr_ptr = NULL;
223 db->db_evict_func = NULL;
227 dbuf_evict(dmu_buf_impl_t *db)
229 ASSERT(MUTEX_HELD(&db->db_mtx));
230 ASSERT(db->db_buf == NULL);
231 ASSERT(db->db_data_pending == NULL);
240 uint64_t hsize = 1ULL << 16;
241 dbuf_hash_table_t *h = &dbuf_hash_table;
245 * The hash table is big enough to fill all of physical memory
246 * with an average 4K block size. The table will take up
247 * totalmem*sizeof(void*)/4K (i.e. 2MB/GB with 8-byte pointers).
249 while (hsize * 4096 < physmem * PAGESIZE)
253 h->hash_table_mask = hsize - 1;
254 h->hash_table = kmem_zalloc(hsize * sizeof (void *), KM_NOSLEEP);
255 if (h->hash_table == NULL) {
256 /* XXX - we should really return an error instead of assert */
257 ASSERT(hsize > (1ULL << 10));
262 dbuf_cache = kmem_cache_create("dmu_buf_impl_t",
263 sizeof (dmu_buf_impl_t),
264 0, dbuf_cons, dbuf_dest, NULL, NULL, NULL, 0);
266 for (i = 0; i < DBUF_MUTEXES; i++)
267 mutex_init(&h->hash_mutexes[i], NULL, MUTEX_DEFAULT, NULL);
273 dbuf_hash_table_t *h = &dbuf_hash_table;
276 for (i = 0; i < DBUF_MUTEXES; i++)
277 mutex_destroy(&h->hash_mutexes[i]);
278 kmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *));
279 kmem_cache_destroy(dbuf_cache);
288 dbuf_verify(dmu_buf_impl_t *db)
290 dnode_t *dn = db->db_dnode;
292 ASSERT(MUTEX_HELD(&db->db_mtx));
294 if (!(zfs_flags & ZFS_DEBUG_DBUF_VERIFY))
297 ASSERT(db->db_objset != NULL);
299 ASSERT(db->db_parent == NULL);
300 ASSERT(db->db_blkptr == NULL);
302 ASSERT3U(db->db.db_object, ==, dn->dn_object);
303 ASSERT3P(db->db_objset, ==, dn->dn_objset);
304 ASSERT3U(db->db_level, <, dn->dn_nlevels);
305 ASSERT(db->db_blkid == DB_BONUS_BLKID ||
306 list_head(&dn->dn_dbufs));
308 if (db->db_blkid == DB_BONUS_BLKID) {
310 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
311 ASSERT3U(db->db.db_offset, ==, DB_BONUS_BLKID);
313 ASSERT3U(db->db.db_offset, ==, db->db_blkid * db->db.db_size);
316 if (db->db_level == 0) {
317 /* we can be momentarily larger in dnode_set_blksz() */
318 if (db->db_blkid != DB_BONUS_BLKID && dn) {
319 ASSERT3U(db->db.db_size, >=, dn->dn_datablksz);
321 if (db->db.db_object == DMU_META_DNODE_OBJECT) {
322 dbuf_dirty_record_t *dr = db->db_data_pending;
324 * it should only be modified in syncing
325 * context, so make sure we only have
326 * one copy of the data.
328 ASSERT(dr == NULL || dr->dt.dl.dr_data == db->db_buf);
332 /* verify db->db_blkptr */
334 if (db->db_parent == dn->dn_dbuf) {
335 /* db is pointed to by the dnode */
336 /* ASSERT3U(db->db_blkid, <, dn->dn_nblkptr); */
337 if (db->db.db_object == DMU_META_DNODE_OBJECT)
338 ASSERT(db->db_parent == NULL);
340 ASSERT(db->db_parent != NULL);
341 ASSERT3P(db->db_blkptr, ==,
342 &dn->dn_phys->dn_blkptr[db->db_blkid]);
344 /* db is pointed to by an indirect block */
345 int epb = db->db_parent->db.db_size >> SPA_BLKPTRSHIFT;
346 ASSERT3U(db->db_parent->db_level, ==, db->db_level+1);
347 ASSERT3U(db->db_parent->db.db_object, ==,
350 * dnode_grow_indblksz() can make this fail if we don't
351 * have the struct_rwlock. XXX indblksz no longer
352 * grows. safe to do this now?
354 if (RW_WRITE_HELD(&db->db_dnode->dn_struct_rwlock)) {
355 ASSERT3P(db->db_blkptr, ==,
356 ((blkptr_t *)db->db_parent->db.db_data +
357 db->db_blkid % epb));
361 if ((db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr)) &&
362 db->db.db_data && db->db_blkid != DB_BONUS_BLKID &&
363 db->db_state != DB_FILL && !dn->dn_free_txg) {
365 * If the blkptr isn't set but they have nonzero data,
366 * it had better be dirty, otherwise we'll lose that
367 * data when we evict this buffer.
369 if (db->db_dirtycnt == 0) {
370 uint64_t *buf = db->db.db_data;
373 for (i = 0; i < db->db.db_size >> 3; i++) {
382 dbuf_update_data(dmu_buf_impl_t *db)
384 ASSERT(MUTEX_HELD(&db->db_mtx));
385 if (db->db_level == 0 && db->db_user_data_ptr_ptr) {
386 ASSERT(!refcount_is_zero(&db->db_holds));
387 *db->db_user_data_ptr_ptr = db->db.db_data;
392 dbuf_set_data(dmu_buf_impl_t *db, arc_buf_t *buf)
394 ASSERT(MUTEX_HELD(&db->db_mtx));
395 ASSERT(db->db_buf == NULL || !arc_has_callback(db->db_buf));
398 ASSERT(buf->b_data != NULL);
399 db->db.db_data = buf->b_data;
400 if (!arc_released(buf))
401 arc_set_callback(buf, dbuf_do_evict, db);
402 dbuf_update_data(db);
405 db->db.db_data = NULL;
406 db->db_state = DB_UNCACHED;
411 dbuf_whichblock(dnode_t *dn, uint64_t offset)
413 if (dn->dn_datablkshift) {
414 return (offset >> dn->dn_datablkshift);
416 ASSERT3U(offset, <, dn->dn_datablksz);
422 dbuf_read_done(zio_t *zio, arc_buf_t *buf, void *vdb)
424 dmu_buf_impl_t *db = vdb;
426 mutex_enter(&db->db_mtx);
427 ASSERT3U(db->db_state, ==, DB_READ);
429 * All reads are synchronous, so we must have a hold on the dbuf
431 ASSERT(refcount_count(&db->db_holds) > 0);
432 ASSERT(db->db_buf == NULL);
433 ASSERT(db->db.db_data == NULL);
434 if (db->db_level == 0 && db->db_freed_in_flight) {
435 /* we were freed in flight; disregard any error */
436 arc_release(buf, db);
437 bzero(buf->b_data, db->db.db_size);
439 db->db_freed_in_flight = FALSE;
440 dbuf_set_data(db, buf);
441 db->db_state = DB_CACHED;
442 } else if (zio == NULL || zio->io_error == 0) {
443 dbuf_set_data(db, buf);
444 db->db_state = DB_CACHED;
446 ASSERT(db->db_blkid != DB_BONUS_BLKID);
447 ASSERT3P(db->db_buf, ==, NULL);
448 VERIFY(arc_buf_remove_ref(buf, db) == 1);
449 db->db_state = DB_UNCACHED;
451 cv_broadcast(&db->db_changed);
452 mutex_exit(&db->db_mtx);
457 dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t *flags)
461 uint32_t aflags = ARC_NOWAIT;
463 ASSERT(!refcount_is_zero(&db->db_holds));
464 /* We need the struct_rwlock to prevent db_blkptr from changing. */
465 ASSERT(RW_LOCK_HELD(&db->db_dnode->dn_struct_rwlock));
466 ASSERT(MUTEX_HELD(&db->db_mtx));
467 ASSERT(db->db_state == DB_UNCACHED);
468 ASSERT(db->db_buf == NULL);
470 if (db->db_blkid == DB_BONUS_BLKID) {
471 int bonuslen = db->db_dnode->dn_bonuslen;
473 ASSERT3U(bonuslen, <=, db->db.db_size);
474 db->db.db_data = zio_buf_alloc(DN_MAX_BONUSLEN);
475 arc_space_consume(DN_MAX_BONUSLEN);
476 if (bonuslen < DN_MAX_BONUSLEN)
477 bzero(db->db.db_data, DN_MAX_BONUSLEN);
478 bcopy(DN_BONUS(db->db_dnode->dn_phys), db->db.db_data,
480 dbuf_update_data(db);
481 db->db_state = DB_CACHED;
482 mutex_exit(&db->db_mtx);
486 if (db->db_level == 0 && dnode_block_freed(db->db_dnode, db->db_blkid))
492 dprintf_dbuf(db, "blkptr: %s\n", "NULL");
494 dprintf_dbuf_bp(db, bp, "%s", "blkptr:");
496 if (bp == NULL || BP_IS_HOLE(bp)) {
497 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
499 ASSERT(bp == NULL || BP_IS_HOLE(bp));
500 dbuf_set_data(db, arc_buf_alloc(db->db_dnode->dn_objset->os_spa,
501 db->db.db_size, db, type));
502 bzero(db->db.db_data, db->db.db_size);
503 db->db_state = DB_CACHED;
504 *flags |= DB_RF_CACHED;
505 mutex_exit(&db->db_mtx);
509 db->db_state = DB_READ;
510 mutex_exit(&db->db_mtx);
512 zb.zb_objset = db->db_objset->os_dsl_dataset ?
513 db->db_objset->os_dsl_dataset->ds_object : 0;
514 zb.zb_object = db->db.db_object;
515 zb.zb_level = db->db_level;
516 zb.zb_blkid = db->db_blkid;
518 dbuf_add_ref(db, NULL);
519 /* ZIO_FLAG_CANFAIL callers have to check the parent zio's error */
520 ASSERT3U(db->db_dnode->dn_type, <, DMU_OT_NUMTYPES);
521 (void) arc_read(zio, db->db_dnode->dn_objset->os_spa, bp,
522 db->db_level > 0 ? byteswap_uint64_array :
523 dmu_ot[db->db_dnode->dn_type].ot_byteswap,
524 dbuf_read_done, db, ZIO_PRIORITY_SYNC_READ,
525 (*flags & DB_RF_CANFAIL) ? ZIO_FLAG_CANFAIL : ZIO_FLAG_MUSTSUCCEED,
527 if (aflags & ARC_CACHED)
528 *flags |= DB_RF_CACHED;
532 dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags)
535 int havepzio = (zio != NULL);
539 * We don't have to hold the mutex to check db_state because it
540 * can't be freed while we have a hold on the buffer.
542 ASSERT(!refcount_is_zero(&db->db_holds));
544 if ((flags & DB_RF_HAVESTRUCT) == 0)
545 rw_enter(&db->db_dnode->dn_struct_rwlock, RW_READER);
547 prefetch = db->db_level == 0 && db->db_blkid != DB_BONUS_BLKID &&
548 (flags & DB_RF_NOPREFETCH) == 0 && db->db_dnode != NULL;
550 mutex_enter(&db->db_mtx);
551 if (db->db_state == DB_CACHED) {
552 mutex_exit(&db->db_mtx);
554 dmu_zfetch(&db->db_dnode->dn_zfetch, db->db.db_offset,
555 db->db.db_size, TRUE);
556 if ((flags & DB_RF_HAVESTRUCT) == 0)
557 rw_exit(&db->db_dnode->dn_struct_rwlock);
558 } else if (db->db_state == DB_UNCACHED) {
560 zio = zio_root(db->db_dnode->dn_objset->os_spa,
561 NULL, NULL, ZIO_FLAG_CANFAIL);
563 dbuf_read_impl(db, zio, &flags);
565 /* dbuf_read_impl has dropped db_mtx for us */
568 dmu_zfetch(&db->db_dnode->dn_zfetch, db->db.db_offset,
569 db->db.db_size, flags & DB_RF_CACHED);
571 if ((flags & DB_RF_HAVESTRUCT) == 0)
572 rw_exit(&db->db_dnode->dn_struct_rwlock);
577 mutex_exit(&db->db_mtx);
579 dmu_zfetch(&db->db_dnode->dn_zfetch, db->db.db_offset,
580 db->db.db_size, TRUE);
581 if ((flags & DB_RF_HAVESTRUCT) == 0)
582 rw_exit(&db->db_dnode->dn_struct_rwlock);
584 mutex_enter(&db->db_mtx);
585 if ((flags & DB_RF_NEVERWAIT) == 0) {
586 while (db->db_state == DB_READ ||
587 db->db_state == DB_FILL) {
588 ASSERT(db->db_state == DB_READ ||
589 (flags & DB_RF_HAVESTRUCT) == 0);
590 cv_wait(&db->db_changed, &db->db_mtx);
592 if (db->db_state == DB_UNCACHED)
595 mutex_exit(&db->db_mtx);
598 ASSERT(err || havepzio || db->db_state == DB_CACHED);
603 dbuf_noread(dmu_buf_impl_t *db)
605 ASSERT(!refcount_is_zero(&db->db_holds));
606 ASSERT(db->db_blkid != DB_BONUS_BLKID);
607 mutex_enter(&db->db_mtx);
608 while (db->db_state == DB_READ || db->db_state == DB_FILL)
609 cv_wait(&db->db_changed, &db->db_mtx);
610 if (db->db_state == DB_UNCACHED) {
611 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
613 ASSERT(db->db_buf == NULL);
614 ASSERT(db->db.db_data == NULL);
615 dbuf_set_data(db, arc_buf_alloc(db->db_dnode->dn_objset->os_spa,
616 db->db.db_size, db, type));
617 db->db_state = DB_FILL;
619 ASSERT3U(db->db_state, ==, DB_CACHED);
621 mutex_exit(&db->db_mtx);
625 * This is our just-in-time copy function. It makes a copy of
626 * buffers, that have been modified in a previous transaction
627 * group, before we modify them in the current active group.
629 * This function is used in two places: when we are dirtying a
630 * buffer for the first time in a txg, and when we are freeing
631 * a range in a dnode that includes this buffer.
633 * Note that when we are called from dbuf_free_range() we do
634 * not put a hold on the buffer, we just traverse the active
635 * dbuf list for the dnode.
638 dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg)
640 dbuf_dirty_record_t *dr = db->db_last_dirty;
642 ASSERT(MUTEX_HELD(&db->db_mtx));
643 ASSERT(db->db.db_data != NULL);
644 ASSERT(db->db_level == 0);
645 ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT);
648 (dr->dt.dl.dr_data !=
649 ((db->db_blkid == DB_BONUS_BLKID) ? db->db.db_data : db->db_buf)))
653 * If the last dirty record for this dbuf has not yet synced
654 * and its referencing the dbuf data, either:
655 * reset the reference to point to a new copy,
656 * or (if there a no active holders)
657 * just null out the current db_data pointer.
659 ASSERT(dr->dr_txg >= txg - 2);
660 if (db->db_blkid == DB_BONUS_BLKID) {
661 /* Note that the data bufs here are zio_bufs */
662 dr->dt.dl.dr_data = zio_buf_alloc(DN_MAX_BONUSLEN);
663 arc_space_consume(DN_MAX_BONUSLEN);
664 bcopy(db->db.db_data, dr->dt.dl.dr_data, DN_MAX_BONUSLEN);
665 } else if (refcount_count(&db->db_holds) > db->db_dirtycnt) {
666 int size = db->db.db_size;
667 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
668 dr->dt.dl.dr_data = arc_buf_alloc(
669 db->db_dnode->dn_objset->os_spa, size, db, type);
670 bcopy(db->db.db_data, dr->dt.dl.dr_data->b_data, size);
672 dbuf_set_data(db, NULL);
677 dbuf_unoverride(dbuf_dirty_record_t *dr)
679 dmu_buf_impl_t *db = dr->dr_dbuf;
680 uint64_t txg = dr->dr_txg;
682 ASSERT(MUTEX_HELD(&db->db_mtx));
683 ASSERT(dr->dt.dl.dr_override_state != DR_IN_DMU_SYNC);
684 ASSERT(db->db_level == 0);
686 if (db->db_blkid == DB_BONUS_BLKID ||
687 dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN)
690 /* free this block */
691 if (!BP_IS_HOLE(&dr->dt.dl.dr_overridden_by)) {
692 /* XXX can get silent EIO here */
693 (void) arc_free(NULL, db->db_dnode->dn_objset->os_spa,
694 txg, &dr->dt.dl.dr_overridden_by, NULL, NULL, ARC_WAIT);
696 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
698 * Release the already-written buffer, so we leave it in
699 * a consistent dirty state. Note that all callers are
700 * modifying the buffer, so they will immediately do
701 * another (redundant) arc_release(). Therefore, leave
702 * the buf thawed to save the effort of freezing &
703 * immediately re-thawing it.
705 arc_release(dr->dt.dl.dr_data, db);
709 dbuf_free_range(dnode_t *dn, uint64_t blkid, uint64_t nblks, dmu_tx_t *tx)
711 dmu_buf_impl_t *db, *db_next;
712 uint64_t txg = tx->tx_txg;
714 dprintf_dnode(dn, "blkid=%llu nblks=%llu\n", blkid, nblks);
715 mutex_enter(&dn->dn_dbufs_mtx);
716 for (db = list_head(&dn->dn_dbufs); db; db = db_next) {
717 db_next = list_next(&dn->dn_dbufs, db);
718 ASSERT(db->db_blkid != DB_BONUS_BLKID);
719 if (db->db_level != 0)
721 dprintf_dbuf(db, "found buf %s\n", "");
722 if (db->db_blkid < blkid ||
723 db->db_blkid >= blkid+nblks)
726 /* found a level 0 buffer in the range */
727 if (dbuf_undirty(db, tx))
730 mutex_enter(&db->db_mtx);
731 if (db->db_state == DB_UNCACHED ||
732 db->db_state == DB_EVICTING) {
733 ASSERT(db->db.db_data == NULL);
734 mutex_exit(&db->db_mtx);
737 if (db->db_state == DB_READ || db->db_state == DB_FILL) {
738 /* will be handled in dbuf_read_done or dbuf_rele */
739 db->db_freed_in_flight = TRUE;
740 mutex_exit(&db->db_mtx);
743 if (refcount_count(&db->db_holds) == 0) {
748 /* The dbuf is referenced */
750 if (db->db_last_dirty != NULL) {
751 dbuf_dirty_record_t *dr = db->db_last_dirty;
753 if (dr->dr_txg == txg) {
755 * This buffer is "in-use", re-adjust the file
756 * size to reflect that this buffer may
757 * contain new data when we sync.
759 if (db->db_blkid > dn->dn_maxblkid)
760 dn->dn_maxblkid = db->db_blkid;
764 * This dbuf is not dirty in the open context.
765 * Either uncache it (if its not referenced in
766 * the open context) or reset its contents to
769 dbuf_fix_old_data(db, txg);
772 /* clear the contents if its cached */
773 if (db->db_state == DB_CACHED) {
774 ASSERT(db->db.db_data != NULL);
775 arc_release(db->db_buf, db);
776 bzero(db->db.db_data, db->db.db_size);
777 arc_buf_freeze(db->db_buf);
780 mutex_exit(&db->db_mtx);
782 mutex_exit(&dn->dn_dbufs_mtx);
786 dbuf_block_freeable(dmu_buf_impl_t *db)
788 dsl_dataset_t *ds = db->db_objset->os_dsl_dataset;
789 uint64_t birth_txg = 0;
792 * We don't need any locking to protect db_blkptr:
793 * If it's syncing, then db_last_dirty will be set
794 * so we'll ignore db_blkptr.
796 ASSERT(MUTEX_HELD(&db->db_mtx));
797 if (db->db_last_dirty)
798 birth_txg = db->db_last_dirty->dr_txg;
799 else if (db->db_blkptr)
800 birth_txg = db->db_blkptr->blk_birth;
802 /* If we don't exist or are in a snapshot, we can't be freed */
804 return (ds == NULL ||
805 dsl_dataset_block_freeable(ds, birth_txg));
811 dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx)
813 arc_buf_t *buf, *obuf;
814 int osize = db->db.db_size;
815 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
817 ASSERT(db->db_blkid != DB_BONUS_BLKID);
819 /* XXX does *this* func really need the lock? */
820 ASSERT(RW_WRITE_HELD(&db->db_dnode->dn_struct_rwlock));
823 * This call to dbuf_will_dirty() with the dn_struct_rwlock held
824 * is OK, because there can be no other references to the db
825 * when we are changing its size, so no concurrent DB_FILL can
829 * XXX we should be doing a dbuf_read, checking the return
830 * value and returning that up to our callers
832 dbuf_will_dirty(db, tx);
834 /* create the data buffer for the new block */
835 buf = arc_buf_alloc(db->db_dnode->dn_objset->os_spa, size, db, type);
837 /* copy old block data to the new block */
839 bcopy(obuf->b_data, buf->b_data, MIN(osize, size));
840 /* zero the remainder */
842 bzero((uint8_t *)buf->b_data + osize, size - osize);
844 mutex_enter(&db->db_mtx);
845 dbuf_set_data(db, buf);
846 VERIFY(arc_buf_remove_ref(obuf, db) == 1);
847 db->db.db_size = size;
849 if (db->db_level == 0) {
850 ASSERT3U(db->db_last_dirty->dr_txg, ==, tx->tx_txg);
851 db->db_last_dirty->dt.dl.dr_data = buf;
853 mutex_exit(&db->db_mtx);
855 dnode_willuse_space(db->db_dnode, size-osize, tx);
858 dbuf_dirty_record_t *
859 dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
861 dnode_t *dn = db->db_dnode;
862 objset_impl_t *os = dn->dn_objset;
863 dbuf_dirty_record_t **drp, *dr;
864 int drop_struct_lock = FALSE;
865 int txgoff = tx->tx_txg & TXG_MASK;
867 ASSERT(tx->tx_txg != 0);
868 ASSERT(!refcount_is_zero(&db->db_holds));
869 DMU_TX_DIRTY_BUF(tx, db);
872 * Shouldn't dirty a regular buffer in syncing context. Private
873 * objects may be dirtied in syncing context, but only if they
874 * were already pre-dirtied in open context.
875 * XXX We may want to prohibit dirtying in syncing context even
876 * if they did pre-dirty.
878 ASSERT(!dmu_tx_is_syncing(tx) ||
879 BP_IS_HOLE(dn->dn_objset->os_rootbp) ||
880 dn->dn_object == DMU_META_DNODE_OBJECT ||
881 dn->dn_objset->os_dsl_dataset == NULL ||
882 dsl_dir_is_private(dn->dn_objset->os_dsl_dataset->ds_dir));
885 * We make this assert for private objects as well, but after we
886 * check if we're already dirty. They are allowed to re-dirty
887 * in syncing context.
889 ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
890 dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
891 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
893 mutex_enter(&db->db_mtx);
895 * XXX make this true for indirects too? The problem is that
896 * transactions created with dmu_tx_create_assigned() from
897 * syncing context don't bother holding ahead.
899 ASSERT(db->db_level != 0 ||
900 db->db_state == DB_CACHED || db->db_state == DB_FILL);
902 mutex_enter(&dn->dn_mtx);
904 * Don't set dirtyctx to SYNC if we're just modifying this as we
905 * initialize the objset.
907 if (dn->dn_dirtyctx == DN_UNDIRTIED &&
908 !BP_IS_HOLE(dn->dn_objset->os_rootbp)) {
910 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN);
911 ASSERT(dn->dn_dirtyctx_firstset == NULL);
912 dn->dn_dirtyctx_firstset = kmem_alloc(1, KM_SLEEP);
914 mutex_exit(&dn->dn_mtx);
917 * If this buffer is already dirty, we're done.
919 drp = &db->db_last_dirty;
920 ASSERT(*drp == NULL || (*drp)->dr_txg <= tx->tx_txg ||
921 db->db.db_object == DMU_META_DNODE_OBJECT);
922 while ((dr = *drp) != NULL && dr->dr_txg > tx->tx_txg)
924 if (dr && dr->dr_txg == tx->tx_txg) {
925 if (db->db_level == 0 && db->db_blkid != DB_BONUS_BLKID) {
927 * If this buffer has already been written out,
928 * we now need to reset its state.
931 if (db->db.db_object != DMU_META_DNODE_OBJECT)
932 arc_buf_thaw(db->db_buf);
934 mutex_exit(&db->db_mtx);
939 * Only valid if not already dirty.
941 ASSERT(dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
942 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
944 ASSERT3U(dn->dn_nlevels, >, db->db_level);
945 ASSERT((dn->dn_phys->dn_nlevels == 0 && db->db_level == 0) ||
946 dn->dn_phys->dn_nlevels > db->db_level ||
947 dn->dn_next_nlevels[txgoff] > db->db_level ||
948 dn->dn_next_nlevels[(tx->tx_txg-1) & TXG_MASK] > db->db_level ||
949 dn->dn_next_nlevels[(tx->tx_txg-2) & TXG_MASK] > db->db_level);
952 * We should only be dirtying in syncing context if it's the
953 * mos, a spa os, or we're initializing the os. However, we are
954 * allowed to dirty in syncing context provided we already
955 * dirtied it in open context. Hence we must make this
956 * assertion only if we're not already dirty.
958 ASSERT(!dmu_tx_is_syncing(tx) ||
959 os->os_dsl_dataset == NULL ||
960 !dsl_dir_is_private(os->os_dsl_dataset->ds_dir) ||
961 !BP_IS_HOLE(os->os_rootbp));
962 ASSERT(db->db.db_size != 0);
964 dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
966 if (db->db_blkid != DB_BONUS_BLKID) {
968 * Update the accounting.
970 if (dbuf_block_freeable(db)) {
971 blkptr_t *bp = db->db_blkptr;
972 int64_t willfree = (bp && !BP_IS_HOLE(bp)) ?
973 bp_get_dasize(os->os_spa, bp) : db->db.db_size;
975 * This is only a guess -- if the dbuf is dirty
976 * in a previous txg, we don't know how much
977 * space it will use on disk yet. We should
978 * really have the struct_rwlock to access
979 * db_blkptr, but since this is just a guess,
980 * it's OK if we get an odd answer.
982 dnode_willuse_space(dn, -willfree, tx);
984 dnode_willuse_space(dn, db->db.db_size, tx);
988 * If this buffer is dirty in an old transaction group we need
989 * to make a copy of it so that the changes we make in this
990 * transaction group won't leak out when we sync the older txg.
992 dr = kmem_zalloc(sizeof (dbuf_dirty_record_t), KM_SLEEP);
993 if (db->db_level == 0) {
994 void *data_old = db->db_buf;
996 if (db->db_blkid == DB_BONUS_BLKID) {
997 dbuf_fix_old_data(db, tx->tx_txg);
998 data_old = db->db.db_data;
999 } else if (db->db.db_object != DMU_META_DNODE_OBJECT) {
1001 * Release the data buffer from the cache so that we
1002 * can modify it without impacting possible other users
1003 * of this cached data block. Note that indirect
1004 * blocks and private objects are not released until the
1005 * syncing state (since they are only modified then).
1007 arc_release(db->db_buf, db);
1008 dbuf_fix_old_data(db, tx->tx_txg);
1009 data_old = db->db_buf;
1011 ASSERT(data_old != NULL);
1012 dr->dt.dl.dr_data = data_old;
1014 mutex_init(&dr->dt.di.dr_mtx, NULL, MUTEX_DEFAULT, NULL);
1015 list_create(&dr->dt.di.dr_children,
1016 sizeof (dbuf_dirty_record_t),
1017 offsetof(dbuf_dirty_record_t, dr_dirty_node));
1020 dr->dr_txg = tx->tx_txg;
1025 * We could have been freed_in_flight between the dbuf_noread
1026 * and dbuf_dirty. We win, as though the dbuf_noread() had
1027 * happened after the free.
1029 if (db->db_level == 0 && db->db_blkid != DB_BONUS_BLKID) {
1030 mutex_enter(&dn->dn_mtx);
1031 dnode_clear_range(dn, db->db_blkid, 1, tx);
1032 mutex_exit(&dn->dn_mtx);
1033 db->db_freed_in_flight = FALSE;
1037 * This buffer is now part of this txg
1039 dbuf_add_ref(db, (void *)(uintptr_t)tx->tx_txg);
1040 db->db_dirtycnt += 1;
1041 ASSERT3U(db->db_dirtycnt, <=, 3);
1043 mutex_exit(&db->db_mtx);
1045 if (db->db_blkid == DB_BONUS_BLKID) {
1046 mutex_enter(&dn->dn_mtx);
1047 ASSERT(!list_link_active(&dr->dr_dirty_node));
1048 list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
1049 mutex_exit(&dn->dn_mtx);
1050 dnode_setdirty(dn, tx);
1054 if (db->db_level == 0) {
1055 dnode_new_blkid(dn, db->db_blkid, tx);
1056 ASSERT(dn->dn_maxblkid >= db->db_blkid);
1059 if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) {
1060 rw_enter(&dn->dn_struct_rwlock, RW_READER);
1061 drop_struct_lock = TRUE;
1064 if (db->db_level+1 < dn->dn_nlevels) {
1065 dmu_buf_impl_t *parent = db->db_parent;
1066 dbuf_dirty_record_t *di;
1067 int parent_held = FALSE;
1069 if (db->db_parent == NULL || db->db_parent == dn->dn_dbuf) {
1070 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
1072 parent = dbuf_hold_level(dn, db->db_level+1,
1073 db->db_blkid >> epbs, FTAG);
1076 if (drop_struct_lock)
1077 rw_exit(&dn->dn_struct_rwlock);
1078 ASSERT3U(db->db_level+1, ==, parent->db_level);
1079 di = dbuf_dirty(parent, tx);
1081 dbuf_rele(parent, FTAG);
1083 mutex_enter(&db->db_mtx);
1084 /* possible race with dbuf_undirty() */
1085 if (db->db_last_dirty == dr ||
1086 dn->dn_object == DMU_META_DNODE_OBJECT) {
1087 mutex_enter(&di->dt.di.dr_mtx);
1088 ASSERT3U(di->dr_txg, ==, tx->tx_txg);
1089 ASSERT(!list_link_active(&dr->dr_dirty_node));
1090 list_insert_tail(&di->dt.di.dr_children, dr);
1091 mutex_exit(&di->dt.di.dr_mtx);
1094 mutex_exit(&db->db_mtx);
1096 ASSERT(db->db_level+1 == dn->dn_nlevels);
1097 ASSERT(db->db_blkid < dn->dn_nblkptr);
1098 ASSERT(db->db_parent == NULL ||
1099 db->db_parent == db->db_dnode->dn_dbuf);
1100 mutex_enter(&dn->dn_mtx);
1101 ASSERT(!list_link_active(&dr->dr_dirty_node));
1102 list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
1103 mutex_exit(&dn->dn_mtx);
1104 if (drop_struct_lock)
1105 rw_exit(&dn->dn_struct_rwlock);
1108 dnode_setdirty(dn, tx);
1113 dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
1115 dnode_t *dn = db->db_dnode;
1116 uint64_t txg = tx->tx_txg;
1117 dbuf_dirty_record_t *dr, **drp;
1120 ASSERT(db->db_blkid != DB_BONUS_BLKID);
1122 mutex_enter(&db->db_mtx);
1125 * If this buffer is not dirty, we're done.
1127 for (drp = &db->db_last_dirty; (dr = *drp) != NULL; drp = &dr->dr_next)
1128 if (dr->dr_txg <= txg)
1130 if (dr == NULL || dr->dr_txg < txg) {
1131 mutex_exit(&db->db_mtx);
1134 ASSERT(dr->dr_txg == txg);
1137 * If this buffer is currently held, we cannot undirty
1138 * it, since one of the current holders may be in the
1139 * middle of an update. Note that users of dbuf_undirty()
1140 * should not place a hold on the dbuf before the call.
1142 if (refcount_count(&db->db_holds) > db->db_dirtycnt) {
1143 mutex_exit(&db->db_mtx);
1144 /* Make sure we don't toss this buffer at sync phase */
1145 mutex_enter(&dn->dn_mtx);
1146 dnode_clear_range(dn, db->db_blkid, 1, tx);
1147 mutex_exit(&dn->dn_mtx);
1151 dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
1153 ASSERT(db->db.db_size != 0);
1155 /* XXX would be nice to fix up dn_towrite_space[] */
1159 if (dr->dr_parent) {
1160 mutex_enter(&dr->dr_parent->dt.di.dr_mtx);
1161 list_remove(&dr->dr_parent->dt.di.dr_children, dr);
1162 mutex_exit(&dr->dr_parent->dt.di.dr_mtx);
1163 } else if (db->db_level+1 == dn->dn_nlevels) {
1164 ASSERT3P(db->db_parent, ==, dn->dn_dbuf);
1165 mutex_enter(&dn->dn_mtx);
1166 list_remove(&dn->dn_dirty_records[txg & TXG_MASK], dr);
1167 mutex_exit(&dn->dn_mtx);
1170 if (db->db_level == 0) {
1171 dbuf_unoverride(dr);
1173 ASSERT(db->db_buf != NULL);
1174 ASSERT(dr->dt.dl.dr_data != NULL);
1175 if (dr->dt.dl.dr_data != db->db_buf)
1176 VERIFY(arc_buf_remove_ref(dr->dt.dl.dr_data, db) == 1);
1178 ASSERT(db->db_buf != NULL);
1179 ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
1180 mutex_destroy(&dr->dt.di.dr_mtx);
1181 list_destroy(&dr->dt.di.dr_children);
1183 kmem_free(dr, sizeof (dbuf_dirty_record_t));
1185 ASSERT(db->db_dirtycnt > 0);
1186 db->db_dirtycnt -= 1;
1188 if (refcount_remove(&db->db_holds, (void *)(uintptr_t)txg) == 0) {
1189 arc_buf_t *buf = db->db_buf;
1191 ASSERT(arc_released(buf));
1192 dbuf_set_data(db, NULL);
1193 VERIFY(arc_buf_remove_ref(buf, db) == 1);
1198 mutex_exit(&db->db_mtx);
1202 #pragma weak dmu_buf_will_dirty = dbuf_will_dirty
1204 dbuf_will_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
1206 int rf = DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH;
1208 ASSERT(tx->tx_txg != 0);
1209 ASSERT(!refcount_is_zero(&db->db_holds));
1211 if (RW_WRITE_HELD(&db->db_dnode->dn_struct_rwlock))
1212 rf |= DB_RF_HAVESTRUCT;
1213 (void) dbuf_read(db, NULL, rf);
1214 (void) dbuf_dirty(db, tx);
1218 dmu_buf_will_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
1220 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
1222 ASSERT(db->db_blkid != DB_BONUS_BLKID);
1223 ASSERT(tx->tx_txg != 0);
1224 ASSERT(db->db_level == 0);
1225 ASSERT(!refcount_is_zero(&db->db_holds));
1227 ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT ||
1228 dmu_tx_private_ok(tx));
1231 (void) dbuf_dirty(db, tx);
1234 #pragma weak dmu_buf_fill_done = dbuf_fill_done
1237 dbuf_fill_done(dmu_buf_impl_t *db, dmu_tx_t *tx)
1239 mutex_enter(&db->db_mtx);
1242 if (db->db_state == DB_FILL) {
1243 if (db->db_level == 0 && db->db_freed_in_flight) {
1244 ASSERT(db->db_blkid != DB_BONUS_BLKID);
1245 /* we were freed while filling */
1246 /* XXX dbuf_undirty? */
1247 bzero(db->db.db_data, db->db.db_size);
1248 db->db_freed_in_flight = FALSE;
1250 db->db_state = DB_CACHED;
1251 cv_broadcast(&db->db_changed);
1253 mutex_exit(&db->db_mtx);
1257 * "Clear" the contents of this dbuf. This will mark the dbuf
1258 * EVICTING and clear *most* of its references. Unfortunetely,
1259 * when we are not holding the dn_dbufs_mtx, we can't clear the
1260 * entry in the dn_dbufs list. We have to wait until dbuf_destroy()
1261 * in this case. For callers from the DMU we will usually see:
1262 * dbuf_clear()->arc_buf_evict()->dbuf_do_evict()->dbuf_destroy()
1263 * For the arc callback, we will usually see:
1264 * dbuf_do_evict()->dbuf_clear();dbuf_destroy()
1265 * Sometimes, though, we will get a mix of these two:
1266 * DMU: dbuf_clear()->arc_buf_evict()
1267 * ARC: dbuf_do_evict()->dbuf_destroy()
1270 dbuf_clear(dmu_buf_impl_t *db)
1272 dnode_t *dn = db->db_dnode;
1273 dmu_buf_impl_t *parent = db->db_parent;
1274 dmu_buf_impl_t *dndb = dn->dn_dbuf;
1275 int dbuf_gone = FALSE;
1277 ASSERT(MUTEX_HELD(&db->db_mtx));
1278 ASSERT(refcount_is_zero(&db->db_holds));
1280 dbuf_evict_user(db);
1282 if (db->db_state == DB_CACHED) {
1283 ASSERT(db->db.db_data != NULL);
1284 if (db->db_blkid == DB_BONUS_BLKID) {
1285 zio_buf_free(db->db.db_data, DN_MAX_BONUSLEN);
1286 arc_space_return(DN_MAX_BONUSLEN);
1288 db->db.db_data = NULL;
1289 db->db_state = DB_UNCACHED;
1292 ASSERT3U(db->db_state, ==, DB_UNCACHED);
1293 ASSERT(db->db_data_pending == NULL);
1295 db->db_state = DB_EVICTING;
1296 db->db_blkptr = NULL;
1298 if (db->db_blkid != DB_BONUS_BLKID && MUTEX_HELD(&dn->dn_dbufs_mtx)) {
1299 list_remove(&dn->dn_dbufs, db);
1301 db->db_dnode = NULL;
1305 dbuf_gone = arc_buf_evict(db->db_buf);
1308 mutex_exit(&db->db_mtx);
1311 * If this dbuf is referened from an indirect dbuf,
1312 * decrement the ref count on the indirect dbuf.
1314 if (parent && parent != dndb)
1315 dbuf_rele(parent, db);
1319 dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse,
1320 dmu_buf_impl_t **parentp, blkptr_t **bpp)
1327 ASSERT(blkid != DB_BONUS_BLKID);
1329 if (dn->dn_phys->dn_nlevels == 0)
1332 nlevels = dn->dn_phys->dn_nlevels;
1334 epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
1336 ASSERT3U(level * epbs, <, 64);
1337 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
1338 if (level >= nlevels ||
1339 (blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))) {
1340 /* the buffer has no parent yet */
1342 } else if (level < nlevels-1) {
1343 /* this block is referenced from an indirect block */
1344 int err = dbuf_hold_impl(dn, level+1,
1345 blkid >> epbs, fail_sparse, NULL, parentp);
1348 err = dbuf_read(*parentp, NULL,
1349 (DB_RF_HAVESTRUCT | DB_RF_NOPREFETCH | DB_RF_CANFAIL));
1351 dbuf_rele(*parentp, NULL);
1355 *bpp = ((blkptr_t *)(*parentp)->db.db_data) +
1356 (blkid & ((1ULL << epbs) - 1));
1359 /* the block is referenced from the dnode */
1360 ASSERT3U(level, ==, nlevels-1);
1361 ASSERT(dn->dn_phys->dn_nblkptr == 0 ||
1362 blkid < dn->dn_phys->dn_nblkptr);
1364 dbuf_add_ref(dn->dn_dbuf, NULL);
1365 *parentp = dn->dn_dbuf;
1367 *bpp = &dn->dn_phys->dn_blkptr[blkid];
1372 static dmu_buf_impl_t *
1373 dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid,
1374 dmu_buf_impl_t *parent, blkptr_t *blkptr)
1376 objset_impl_t *os = dn->dn_objset;
1377 dmu_buf_impl_t *db, *odb;
1379 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
1380 ASSERT(dn->dn_type != DMU_OT_NONE);
1382 db = kmem_cache_alloc(dbuf_cache, KM_SLEEP);
1385 db->db.db_object = dn->dn_object;
1386 db->db_level = level;
1387 db->db_blkid = blkid;
1388 db->db_last_dirty = NULL;
1389 db->db_dirtycnt = 0;
1391 db->db_parent = parent;
1392 db->db_blkptr = blkptr;
1394 db->db_user_ptr = NULL;
1395 db->db_user_data_ptr_ptr = NULL;
1396 db->db_evict_func = NULL;
1397 db->db_immediate_evict = 0;
1398 db->db_freed_in_flight = 0;
1400 if (blkid == DB_BONUS_BLKID) {
1401 ASSERT3P(parent, ==, dn->dn_dbuf);
1402 db->db.db_size = DN_MAX_BONUSLEN -
1403 (dn->dn_nblkptr-1) * sizeof (blkptr_t);
1404 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
1405 db->db.db_offset = DB_BONUS_BLKID;
1406 db->db_state = DB_UNCACHED;
1407 /* the bonus dbuf is not placed in the hash table */
1408 arc_space_consume(sizeof (dmu_buf_impl_t));
1412 db->db_level ? 1<<dn->dn_indblkshift : dn->dn_datablksz;
1413 db->db.db_size = blocksize;
1414 db->db.db_offset = db->db_blkid * blocksize;
1418 * Hold the dn_dbufs_mtx while we get the new dbuf
1419 * in the hash table *and* added to the dbufs list.
1420 * This prevents a possible deadlock with someone
1421 * trying to look up this dbuf before its added to the
1424 mutex_enter(&dn->dn_dbufs_mtx);
1425 db->db_state = DB_EVICTING;
1426 if ((odb = dbuf_hash_insert(db)) != NULL) {
1427 /* someone else inserted it first */
1428 kmem_cache_free(dbuf_cache, db);
1429 mutex_exit(&dn->dn_dbufs_mtx);
1432 list_insert_head(&dn->dn_dbufs, db);
1433 db->db_state = DB_UNCACHED;
1434 mutex_exit(&dn->dn_dbufs_mtx);
1435 arc_space_consume(sizeof (dmu_buf_impl_t));
1437 if (parent && parent != dn->dn_dbuf)
1438 dbuf_add_ref(parent, db);
1440 ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
1441 refcount_count(&dn->dn_holds) > 0);
1442 (void) refcount_add(&dn->dn_holds, db);
1444 dprintf_dbuf(db, "db=%p\n", db);
1450 dbuf_do_evict(void *private)
1452 arc_buf_t *buf = private;
1453 dmu_buf_impl_t *db = buf->b_private;
1455 if (!MUTEX_HELD(&db->db_mtx))
1456 mutex_enter(&db->db_mtx);
1458 ASSERT(refcount_is_zero(&db->db_holds));
1460 if (db->db_state != DB_EVICTING) {
1461 ASSERT(db->db_state == DB_CACHED);
1466 mutex_exit(&db->db_mtx);
1473 dbuf_destroy(dmu_buf_impl_t *db)
1475 ASSERT(refcount_is_zero(&db->db_holds));
1477 if (db->db_blkid != DB_BONUS_BLKID) {
1479 * If this dbuf is still on the dn_dbufs list,
1480 * remove it from that list.
1483 dnode_t *dn = db->db_dnode;
1485 mutex_enter(&dn->dn_dbufs_mtx);
1486 list_remove(&dn->dn_dbufs, db);
1487 mutex_exit(&dn->dn_dbufs_mtx);
1490 db->db_dnode = NULL;
1492 dbuf_hash_remove(db);
1494 db->db_parent = NULL;
1497 ASSERT(!list_link_active(&db->db_link));
1498 ASSERT(db->db.db_data == NULL);
1499 ASSERT(db->db_hash_next == NULL);
1500 ASSERT(db->db_blkptr == NULL);
1501 ASSERT(db->db_data_pending == NULL);
1503 kmem_cache_free(dbuf_cache, db);
1504 arc_space_return(sizeof (dmu_buf_impl_t));
1508 dbuf_prefetch(dnode_t *dn, uint64_t blkid)
1510 dmu_buf_impl_t *db = NULL;
1511 blkptr_t *bp = NULL;
1513 ASSERT(blkid != DB_BONUS_BLKID);
1514 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
1516 if (dnode_block_freed(dn, blkid))
1519 /* dbuf_find() returns with db_mtx held */
1520 if (db = dbuf_find(dn, 0, blkid)) {
1521 if (refcount_count(&db->db_holds) > 0) {
1523 * This dbuf is active. We assume that it is
1524 * already CACHED, or else about to be either
1527 mutex_exit(&db->db_mtx);
1530 mutex_exit(&db->db_mtx);
1534 if (dbuf_findbp(dn, 0, blkid, TRUE, &db, &bp) == 0) {
1535 if (bp && !BP_IS_HOLE(bp)) {
1536 uint32_t aflags = ARC_NOWAIT | ARC_PREFETCH;
1538 zb.zb_objset = dn->dn_objset->os_dsl_dataset ?
1539 dn->dn_objset->os_dsl_dataset->ds_object : 0;
1540 zb.zb_object = dn->dn_object;
1542 zb.zb_blkid = blkid;
1544 (void) arc_read(NULL, dn->dn_objset->os_spa, bp,
1545 dmu_ot[dn->dn_type].ot_byteswap,
1546 NULL, NULL, ZIO_PRIORITY_ASYNC_READ,
1547 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
1551 dbuf_rele(db, NULL);
1556 * Returns with db_holds incremented, and db_mtx not held.
1557 * Note: dn_struct_rwlock must be held.
1560 dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid, int fail_sparse,
1561 void *tag, dmu_buf_impl_t **dbp)
1563 dmu_buf_impl_t *db, *parent = NULL;
1565 ASSERT(blkid != DB_BONUS_BLKID);
1566 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
1567 ASSERT3U(dn->dn_nlevels, >, level);
1571 /* dbuf_find() returns with db_mtx held */
1572 db = dbuf_find(dn, level, blkid);
1575 blkptr_t *bp = NULL;
1578 ASSERT3P(parent, ==, NULL);
1579 err = dbuf_findbp(dn, level, blkid, fail_sparse, &parent, &bp);
1581 if (err == 0 && bp && BP_IS_HOLE(bp))
1585 dbuf_rele(parent, NULL);
1589 if (err && err != ENOENT)
1591 db = dbuf_create(dn, level, blkid, parent, bp);
1594 if (db->db_buf && refcount_is_zero(&db->db_holds)) {
1595 arc_buf_add_ref(db->db_buf, db);
1596 if (db->db_buf->b_data == NULL) {
1599 dbuf_rele(parent, NULL);
1604 ASSERT3P(db->db.db_data, ==, db->db_buf->b_data);
1607 ASSERT(db->db_buf == NULL || arc_referenced(db->db_buf));
1610 * If this buffer is currently syncing out, and we are are
1611 * still referencing it from db_data, we need to make a copy
1612 * of it in case we decide we want to dirty it again in this txg.
1614 if (db->db_level == 0 && db->db_blkid != DB_BONUS_BLKID &&
1615 dn->dn_object != DMU_META_DNODE_OBJECT &&
1616 db->db_state == DB_CACHED && db->db_data_pending) {
1617 dbuf_dirty_record_t *dr = db->db_data_pending;
1619 if (dr->dt.dl.dr_data == db->db_buf) {
1620 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
1623 arc_buf_alloc(db->db_dnode->dn_objset->os_spa,
1624 db->db.db_size, db, type));
1625 bcopy(dr->dt.dl.dr_data->b_data, db->db.db_data,
1630 (void) refcount_add(&db->db_holds, tag);
1631 dbuf_update_data(db);
1633 mutex_exit(&db->db_mtx);
1635 /* NOTE: we can't rele the parent until after we drop the db_mtx */
1637 dbuf_rele(parent, NULL);
1639 ASSERT3P(db->db_dnode, ==, dn);
1640 ASSERT3U(db->db_blkid, ==, blkid);
1641 ASSERT3U(db->db_level, ==, level);
1648 dbuf_hold(dnode_t *dn, uint64_t blkid, void *tag)
1651 int err = dbuf_hold_impl(dn, 0, blkid, FALSE, tag, &db);
1652 return (err ? NULL : db);
1656 dbuf_hold_level(dnode_t *dn, int level, uint64_t blkid, void *tag)
1659 int err = dbuf_hold_impl(dn, level, blkid, FALSE, tag, &db);
1660 return (err ? NULL : db);
1664 dbuf_create_bonus(dnode_t *dn)
1666 ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
1668 ASSERT(dn->dn_bonus == NULL);
1669 dn->dn_bonus = dbuf_create(dn, 0, DB_BONUS_BLKID, dn->dn_dbuf, NULL);
1672 #pragma weak dmu_buf_add_ref = dbuf_add_ref
1674 dbuf_add_ref(dmu_buf_impl_t *db, void *tag)
1676 int64_t holds = refcount_add(&db->db_holds, tag);
1680 #pragma weak dmu_buf_rele = dbuf_rele
1682 dbuf_rele(dmu_buf_impl_t *db, void *tag)
1686 mutex_enter(&db->db_mtx);
1689 holds = refcount_remove(&db->db_holds, tag);
1693 * We can't freeze indirects if there is a possibility that they
1694 * may be modified in the current syncing context.
1696 if (db->db_buf && holds == (db->db_level == 0 ? db->db_dirtycnt : 0))
1697 arc_buf_freeze(db->db_buf);
1699 if (holds == db->db_dirtycnt &&
1700 db->db_level == 0 && db->db_immediate_evict)
1701 dbuf_evict_user(db);
1704 if (db->db_blkid == DB_BONUS_BLKID) {
1705 mutex_exit(&db->db_mtx);
1706 dnode_rele(db->db_dnode, db);
1707 } else if (db->db_buf == NULL) {
1709 * This is a special case: we never associated this
1710 * dbuf with any data allocated from the ARC.
1712 ASSERT3U(db->db_state, ==, DB_UNCACHED);
1714 } else if (arc_released(db->db_buf)) {
1715 arc_buf_t *buf = db->db_buf;
1717 * This dbuf has anonymous data associated with it.
1719 dbuf_set_data(db, NULL);
1720 VERIFY(arc_buf_remove_ref(buf, db) == 1);
1723 VERIFY(arc_buf_remove_ref(db->db_buf, db) == 0);
1724 mutex_exit(&db->db_mtx);
1727 mutex_exit(&db->db_mtx);
1731 #pragma weak dmu_buf_refcount = dbuf_refcount
1733 dbuf_refcount(dmu_buf_impl_t *db)
1735 return (refcount_count(&db->db_holds));
1739 dmu_buf_set_user(dmu_buf_t *db_fake, void *user_ptr, void *user_data_ptr_ptr,
1740 dmu_buf_evict_func_t *evict_func)
1742 return (dmu_buf_update_user(db_fake, NULL, user_ptr,
1743 user_data_ptr_ptr, evict_func));
1747 dmu_buf_set_user_ie(dmu_buf_t *db_fake, void *user_ptr, void *user_data_ptr_ptr,
1748 dmu_buf_evict_func_t *evict_func)
1750 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
1752 db->db_immediate_evict = TRUE;
1753 return (dmu_buf_update_user(db_fake, NULL, user_ptr,
1754 user_data_ptr_ptr, evict_func));
1758 dmu_buf_update_user(dmu_buf_t *db_fake, void *old_user_ptr, void *user_ptr,
1759 void *user_data_ptr_ptr, dmu_buf_evict_func_t *evict_func)
1761 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
1762 ASSERT(db->db_level == 0);
1764 ASSERT((user_ptr == NULL) == (evict_func == NULL));
1766 mutex_enter(&db->db_mtx);
1768 if (db->db_user_ptr == old_user_ptr) {
1769 db->db_user_ptr = user_ptr;
1770 db->db_user_data_ptr_ptr = user_data_ptr_ptr;
1771 db->db_evict_func = evict_func;
1773 dbuf_update_data(db);
1775 old_user_ptr = db->db_user_ptr;
1778 mutex_exit(&db->db_mtx);
1779 return (old_user_ptr);
1783 dmu_buf_get_user(dmu_buf_t *db_fake)
1785 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
1786 ASSERT(!refcount_is_zero(&db->db_holds));
1788 return (db->db_user_ptr);
1792 dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db)
1794 /* ASSERT(dmu_tx_is_syncing(tx) */
1795 ASSERT(MUTEX_HELD(&db->db_mtx));
1797 if (db->db_blkptr != NULL)
1800 if (db->db_level == dn->dn_phys->dn_nlevels-1) {
1802 * This buffer was allocated at a time when there was
1803 * no available blkptrs from the dnode, or it was
1804 * inappropriate to hook it in (i.e., nlevels mis-match).
1806 ASSERT(db->db_blkid < dn->dn_phys->dn_nblkptr);
1807 ASSERT(db->db_parent == NULL);
1808 db->db_parent = dn->dn_dbuf;
1809 db->db_blkptr = &dn->dn_phys->dn_blkptr[db->db_blkid];
1812 dmu_buf_impl_t *parent = db->db_parent;
1813 int epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
1815 ASSERT(dn->dn_phys->dn_nlevels > 1);
1816 if (parent == NULL) {
1817 mutex_exit(&db->db_mtx);
1818 rw_enter(&dn->dn_struct_rwlock, RW_READER);
1819 (void) dbuf_hold_impl(dn, db->db_level+1,
1820 db->db_blkid >> epbs, FALSE, db, &parent);
1821 rw_exit(&dn->dn_struct_rwlock);
1822 mutex_enter(&db->db_mtx);
1823 db->db_parent = parent;
1825 db->db_blkptr = (blkptr_t *)parent->db.db_data +
1826 (db->db_blkid & ((1ULL << epbs) - 1));
1832 dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
1834 dmu_buf_impl_t *db = dr->dr_dbuf;
1835 dnode_t *dn = db->db_dnode;
1838 ASSERT(dmu_tx_is_syncing(tx));
1840 dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
1842 mutex_enter(&db->db_mtx);
1844 ASSERT(db->db_level > 0);
1847 if (db->db_buf == NULL) {
1848 mutex_exit(&db->db_mtx);
1849 (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED);
1850 mutex_enter(&db->db_mtx);
1852 ASSERT3U(db->db_state, ==, DB_CACHED);
1853 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
1854 ASSERT(db->db_buf != NULL);
1856 dbuf_check_blkptr(dn, db);
1858 db->db_data_pending = dr;
1860 arc_release(db->db_buf, db);
1861 mutex_exit(&db->db_mtx);
1864 * XXX -- we should design a compression algorithm
1865 * that specializes in arrays of bps.
1867 dbuf_write(dr, db->db_buf, ZIO_CHECKSUM_FLETCHER_4,
1868 zfs_mdcomp_disable ? ZIO_COMPRESS_EMPTY : ZIO_COMPRESS_LZJB, tx);
1871 mutex_enter(&dr->dt.di.dr_mtx);
1872 dbuf_sync_list(&dr->dt.di.dr_children, tx);
1873 ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
1874 mutex_exit(&dr->dt.di.dr_mtx);
1879 dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
1881 arc_buf_t **datap = &dr->dt.dl.dr_data;
1882 dmu_buf_impl_t *db = dr->dr_dbuf;
1883 dnode_t *dn = db->db_dnode;
1884 objset_impl_t *os = dn->dn_objset;
1885 uint64_t txg = tx->tx_txg;
1886 int checksum, compress;
1889 ASSERT(dmu_tx_is_syncing(tx));
1891 dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
1893 mutex_enter(&db->db_mtx);
1895 * To be synced, we must be dirtied. But we
1896 * might have been freed after the dirty.
1898 if (db->db_state == DB_UNCACHED) {
1899 /* This buffer has been freed since it was dirtied */
1900 ASSERT(db->db.db_data == NULL);
1901 } else if (db->db_state == DB_FILL) {
1902 /* This buffer was freed and is now being re-filled */
1903 ASSERT(db->db.db_data != dr->dt.dl.dr_data);
1905 ASSERT3U(db->db_state, ==, DB_CACHED);
1910 * If this is a bonus buffer, simply copy the bonus data into the
1911 * dnode. It will be written out when the dnode is synced (and it
1912 * will be synced, since it must have been dirty for dbuf_sync to
1915 if (db->db_blkid == DB_BONUS_BLKID) {
1916 dbuf_dirty_record_t **drp;
1918 ASSERT(*datap != NULL);
1919 ASSERT3U(db->db_level, ==, 0);
1920 ASSERT3U(dn->dn_phys->dn_bonuslen, <=, DN_MAX_BONUSLEN);
1921 bcopy(*datap, DN_BONUS(dn->dn_phys), dn->dn_phys->dn_bonuslen);
1922 if (*datap != db->db.db_data) {
1923 zio_buf_free(*datap, DN_MAX_BONUSLEN);
1924 arc_space_return(DN_MAX_BONUSLEN);
1926 db->db_data_pending = NULL;
1927 drp = &db->db_last_dirty;
1929 drp = &(*drp)->dr_next;
1930 ASSERT(dr->dr_next == NULL);
1932 kmem_free(dr, sizeof (dbuf_dirty_record_t));
1933 ASSERT(db->db_dirtycnt > 0);
1934 db->db_dirtycnt -= 1;
1935 mutex_exit(&db->db_mtx);
1936 dbuf_rele(db, (void *)(uintptr_t)txg);
1941 * This function may have dropped the db_mtx lock allowing a dmu_sync
1942 * operation to sneak in. As a result, we need to ensure that we
1943 * don't check the dr_override_state until we have returned from
1944 * dbuf_check_blkptr.
1946 dbuf_check_blkptr(dn, db);
1949 * If this buffer is in the middle of an immdiate write,
1950 * wait for the synchronous IO to complete.
1952 while (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC) {
1953 ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
1954 cv_wait(&db->db_changed, &db->db_mtx);
1955 ASSERT(dr->dt.dl.dr_override_state != DR_NOT_OVERRIDDEN);
1959 * If this dbuf has already been written out via an immediate write,
1960 * just complete the write by copying over the new block pointer and
1961 * updating the accounting via the write-completion functions.
1963 if (dr->dt.dl.dr_override_state == DR_OVERRIDDEN) {
1966 zio_fake.io_private = &db;
1967 zio_fake.io_error = 0;
1968 zio_fake.io_bp = db->db_blkptr;
1969 zio_fake.io_bp_orig = *db->db_blkptr;
1970 zio_fake.io_txg = txg;
1972 *db->db_blkptr = dr->dt.dl.dr_overridden_by;
1973 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
1974 db->db_data_pending = dr;
1975 dr->dr_zio = &zio_fake;
1976 mutex_exit(&db->db_mtx);
1978 if (BP_IS_OLDER(&zio_fake.io_bp_orig, txg))
1979 dsl_dataset_block_kill(os->os_dsl_dataset,
1980 &zio_fake.io_bp_orig, dn->dn_zio, tx);
1982 dbuf_write_ready(&zio_fake, db->db_buf, db);
1983 dbuf_write_done(&zio_fake, db->db_buf, db);
1988 blksz = arc_buf_size(*datap);
1990 if (dn->dn_object != DMU_META_DNODE_OBJECT) {
1992 * If this buffer is currently "in use" (i.e., there are
1993 * active holds and db_data still references it), then make
1994 * a copy before we start the write so that any modifications
1995 * from the open txg will not leak into this write.
1997 * NOTE: this copy does not need to be made for objects only
1998 * modified in the syncing context (e.g. DNONE_DNODE blocks).
2000 if (refcount_count(&db->db_holds) > 1 && *datap == db->db_buf) {
2001 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
2002 *datap = arc_buf_alloc(os->os_spa, blksz, db, type);
2003 bcopy(db->db.db_data, (*datap)->b_data, blksz);
2007 * Private object buffers are released here rather
2008 * than in dbuf_dirty() since they are only modified
2009 * in the syncing context and we don't want the
2010 * overhead of making multiple copies of the data.
2012 arc_release(db->db_buf, db);
2015 ASSERT(*datap != NULL);
2016 db->db_data_pending = dr;
2018 mutex_exit(&db->db_mtx);
2021 * Allow dnode settings to override objset settings,
2022 * except for metadata checksums.
2024 if (dmu_ot[dn->dn_type].ot_metadata) {
2025 checksum = os->os_md_checksum;
2026 compress = zio_compress_select(dn->dn_compress,
2027 os->os_md_compress);
2029 checksum = zio_checksum_select(dn->dn_checksum,
2031 compress = zio_compress_select(dn->dn_compress,
2035 dbuf_write(dr, *datap, checksum, compress, tx);
2037 ASSERT(!list_link_active(&dr->dr_dirty_node));
2038 if (dn->dn_object == DMU_META_DNODE_OBJECT)
2039 list_insert_tail(&dn->dn_dirty_records[txg&TXG_MASK], dr);
2041 zio_nowait(dr->dr_zio);
2045 dbuf_sync_list(list_t *list, dmu_tx_t *tx)
2047 dbuf_dirty_record_t *dr;
2049 while (dr = list_head(list)) {
2050 if (dr->dr_zio != NULL) {
2052 * If we find an already initialized zio then we
2053 * are processing the meta-dnode, and we have finished.
2054 * The dbufs for all dnodes are put back on the list
2055 * during processing, so that we can zio_wait()
2056 * these IOs after initiating all child IOs.
2058 ASSERT3U(dr->dr_dbuf->db.db_object, ==,
2059 DMU_META_DNODE_OBJECT);
2062 list_remove(list, dr);
2063 if (dr->dr_dbuf->db_level > 0)
2064 dbuf_sync_indirect(dr, tx);
2066 dbuf_sync_leaf(dr, tx);
2071 dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, int checksum,
2072 int compress, dmu_tx_t *tx)
2074 dmu_buf_impl_t *db = dr->dr_dbuf;
2075 dnode_t *dn = db->db_dnode;
2076 objset_impl_t *os = dn->dn_objset;
2077 dmu_buf_impl_t *parent = db->db_parent;
2078 uint64_t txg = tx->tx_txg;
2083 if (parent != dn->dn_dbuf) {
2084 ASSERT(parent && parent->db_data_pending);
2085 ASSERT(db->db_level == parent->db_level-1);
2086 ASSERT(arc_released(parent->db_buf));
2087 zio = parent->db_data_pending->dr_zio;
2089 ASSERT(db->db_level == dn->dn_phys->dn_nlevels-1);
2090 ASSERT3P(db->db_blkptr, ==,
2091 &dn->dn_phys->dn_blkptr[db->db_blkid]);
2095 ASSERT(db->db_level == 0 || data == db->db_buf);
2096 ASSERT3U(db->db_blkptr->blk_birth, <=, txg);
2099 zb.zb_objset = os->os_dsl_dataset ? os->os_dsl_dataset->ds_object : 0;
2100 zb.zb_object = db->db.db_object;
2101 zb.zb_level = db->db_level;
2102 zb.zb_blkid = db->db_blkid;
2104 zio_flags = ZIO_FLAG_MUSTSUCCEED;
2105 if (dmu_ot[dn->dn_type].ot_metadata || zb.zb_level != 0)
2106 zio_flags |= ZIO_FLAG_METADATA;
2107 if (BP_IS_OLDER(db->db_blkptr, txg))
2108 dsl_dataset_block_kill(
2109 os->os_dsl_dataset, db->db_blkptr, zio, tx);
2111 dr->dr_zio = arc_write(zio, os->os_spa, checksum, compress,
2112 dmu_get_replication_level(os, &zb, dn->dn_type), txg,
2113 db->db_blkptr, data, dbuf_write_ready, dbuf_write_done, db,
2114 ZIO_PRIORITY_ASYNC_WRITE, zio_flags, &zb);
2119 dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb)
2121 dmu_buf_impl_t *db = vdb;
2122 dnode_t *dn = db->db_dnode;
2123 objset_impl_t *os = dn->dn_objset;
2124 blkptr_t *bp_orig = &zio->io_bp_orig;
2126 int old_size, new_size, i;
2128 dprintf_dbuf_bp(db, bp_orig, "bp_orig: %s", "");
2130 old_size = bp_get_dasize(os->os_spa, bp_orig);
2131 new_size = bp_get_dasize(os->os_spa, zio->io_bp);
2133 dnode_diduse_space(dn, new_size-old_size);
2135 if (BP_IS_HOLE(zio->io_bp)) {
2136 dsl_dataset_t *ds = os->os_dsl_dataset;
2137 dmu_tx_t *tx = os->os_synctx;
2139 if (bp_orig->blk_birth == tx->tx_txg)
2140 dsl_dataset_block_kill(ds, bp_orig, NULL, tx);
2141 ASSERT3U(db->db_blkptr->blk_fill, ==, 0);
2145 mutex_enter(&db->db_mtx);
2147 if (db->db_level == 0) {
2148 mutex_enter(&dn->dn_mtx);
2149 if (db->db_blkid > dn->dn_phys->dn_maxblkid)
2150 dn->dn_phys->dn_maxblkid = db->db_blkid;
2151 mutex_exit(&dn->dn_mtx);
2153 if (dn->dn_type == DMU_OT_DNODE) {
2154 dnode_phys_t *dnp = db->db.db_data;
2155 for (i = db->db.db_size >> DNODE_SHIFT; i > 0;
2157 if (dnp->dn_type != DMU_OT_NONE)
2164 blkptr_t *bp = db->db.db_data;
2165 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
2166 for (i = db->db.db_size >> SPA_BLKPTRSHIFT; i > 0; i--, bp++) {
2169 ASSERT3U(BP_GET_LSIZE(bp), ==,
2170 db->db_level == 1 ? dn->dn_datablksz :
2171 (1<<dn->dn_phys->dn_indblkshift));
2172 fill += bp->blk_fill;
2176 db->db_blkptr->blk_fill = fill;
2177 BP_SET_TYPE(db->db_blkptr, dn->dn_type);
2178 BP_SET_LEVEL(db->db_blkptr, db->db_level);
2180 mutex_exit(&db->db_mtx);
2182 /* We must do this after we've set the bp's type and level */
2183 if (!DVA_EQUAL(BP_IDENTITY(zio->io_bp), BP_IDENTITY(bp_orig))) {
2184 dsl_dataset_t *ds = os->os_dsl_dataset;
2185 dmu_tx_t *tx = os->os_synctx;
2187 if (bp_orig->blk_birth == tx->tx_txg)
2188 dsl_dataset_block_kill(ds, bp_orig, NULL, tx);
2189 dsl_dataset_block_born(ds, zio->io_bp, tx);
2195 dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb)
2197 dmu_buf_impl_t *db = vdb;
2198 uint64_t txg = zio->io_txg;
2199 dbuf_dirty_record_t **drp, *dr;
2201 ASSERT3U(zio->io_error, ==, 0);
2203 mutex_enter(&db->db_mtx);
2205 drp = &db->db_last_dirty;
2206 while ((dr = *drp) != db->db_data_pending)
2208 ASSERT(!list_link_active(&dr->dr_dirty_node));
2209 ASSERT(dr->dr_txg == txg);
2210 ASSERT(dr->dr_next == NULL);
2213 if (db->db_level == 0) {
2214 ASSERT(db->db_blkid != DB_BONUS_BLKID);
2215 ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN);
2217 if (dr->dt.dl.dr_data != db->db_buf)
2218 VERIFY(arc_buf_remove_ref(dr->dt.dl.dr_data, db) == 1);
2219 else if (!BP_IS_HOLE(db->db_blkptr))
2220 arc_set_callback(db->db_buf, dbuf_do_evict, db);
2222 ASSERT(arc_released(db->db_buf));
2224 dnode_t *dn = db->db_dnode;
2226 ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
2227 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
2228 if (!BP_IS_HOLE(db->db_blkptr)) {
2230 dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
2231 ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==,
2233 ASSERT3U(dn->dn_phys->dn_maxblkid
2234 >> (db->db_level * epbs), >=, db->db_blkid);
2235 arc_set_callback(db->db_buf, dbuf_do_evict, db);
2237 mutex_destroy(&dr->dt.di.dr_mtx);
2238 list_destroy(&dr->dt.di.dr_children);
2240 kmem_free(dr, sizeof (dbuf_dirty_record_t));
2242 cv_broadcast(&db->db_changed);
2243 ASSERT(db->db_dirtycnt > 0);
2244 db->db_dirtycnt -= 1;
2245 db->db_data_pending = NULL;
2246 mutex_exit(&db->db_mtx);
2248 dprintf_dbuf_bp(db, zio->io_bp, "bp: %s", "");
2250 dbuf_rele(db, (void *)(uintptr_t)txg);