Fix stack noinline
[zfs.git] / module / zfs / dbuf.c
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  */
24
25 #include <sys/zfs_context.h>
26 #include <sys/dmu.h>
27 #include <sys/dmu_impl.h>
28 #include <sys/dbuf.h>
29 #include <sys/dmu_objset.h>
30 #include <sys/dsl_dataset.h>
31 #include <sys/dsl_dir.h>
32 #include <sys/dmu_tx.h>
33 #include <sys/spa.h>
34 #include <sys/zio.h>
35 #include <sys/dmu_zfetch.h>
36 #include <sys/sa.h>
37 #include <sys/sa_impl.h>
38
39 struct dbuf_hold_impl_data {
40         /* Function arguments */
41         dnode_t *dh_dn;
42         uint8_t dh_level;
43         uint64_t dh_blkid;
44         int dh_fail_sparse;
45         void *dh_tag;
46         dmu_buf_impl_t **dh_dbp;
47         /* Local variables */
48         dmu_buf_impl_t *dh_db;
49         dmu_buf_impl_t *dh_parent;
50         blkptr_t *dh_bp;
51         int dh_err;
52         dbuf_dirty_record_t *dh_dr;
53         arc_buf_contents_t dh_type;
54         int dh_depth;
55 };
56
57 static void __dbuf_hold_impl_init(struct dbuf_hold_impl_data *dh,
58     dnode_t *dn, uint8_t level, uint64_t blkid, int fail_sparse,
59     void *tag, dmu_buf_impl_t **dbp, int depth);
60 static int __dbuf_hold_impl(struct dbuf_hold_impl_data *dh);
61
62 static void dbuf_destroy(dmu_buf_impl_t *db);
63 static int dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx);
64 static void dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx);
65
66 /*
67  * Global data structures and functions for the dbuf cache.
68  */
69 static kmem_cache_t *dbuf_cache;
70
71 /* ARGSUSED */
72 static int
73 dbuf_cons(void *vdb, void *unused, int kmflag)
74 {
75         dmu_buf_impl_t *db = vdb;
76         bzero(db, sizeof (dmu_buf_impl_t));
77
78         mutex_init(&db->db_mtx, NULL, MUTEX_DEFAULT, NULL);
79         cv_init(&db->db_changed, NULL, CV_DEFAULT, NULL);
80         refcount_create(&db->db_holds);
81         list_link_init(&db->db_link);
82         return (0);
83 }
84
85 /* ARGSUSED */
86 static void
87 dbuf_dest(void *vdb, void *unused)
88 {
89         dmu_buf_impl_t *db = vdb;
90         mutex_destroy(&db->db_mtx);
91         cv_destroy(&db->db_changed);
92         refcount_destroy(&db->db_holds);
93 }
94
95 /*
96  * dbuf hash table routines
97  */
98 static dbuf_hash_table_t dbuf_hash_table;
99
100 static uint64_t dbuf_hash_count;
101
102 static uint64_t
103 dbuf_hash(void *os, uint64_t obj, uint8_t lvl, uint64_t blkid)
104 {
105         uintptr_t osv = (uintptr_t)os;
106         uint64_t crc = -1ULL;
107
108         ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY);
109         crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (lvl)) & 0xFF];
110         crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (osv >> 6)) & 0xFF];
111         crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 0)) & 0xFF];
112         crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 8)) & 0xFF];
113         crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (blkid >> 0)) & 0xFF];
114         crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (blkid >> 8)) & 0xFF];
115
116         crc ^= (osv>>14) ^ (obj>>16) ^ (blkid>>16);
117
118         return (crc);
119 }
120
121 #define DBUF_HASH(os, obj, level, blkid) dbuf_hash(os, obj, level, blkid);
122
123 #define DBUF_EQUAL(dbuf, os, obj, level, blkid)         \
124         ((dbuf)->db.db_object == (obj) &&               \
125         (dbuf)->db_objset == (os) &&                    \
126         (dbuf)->db_level == (level) &&                  \
127         (dbuf)->db_blkid == (blkid))
128
129 dmu_buf_impl_t *
130 dbuf_find(dnode_t *dn, uint8_t level, uint64_t blkid)
131 {
132         dbuf_hash_table_t *h = &dbuf_hash_table;
133         objset_t *os = dn->dn_objset;
134         uint64_t obj;
135         uint64_t hv;
136         uint64_t idx;
137         dmu_buf_impl_t *db;
138
139         obj = dn->dn_object;
140         hv = DBUF_HASH(os, obj, level, blkid);
141         idx = hv & h->hash_table_mask;
142
143         mutex_enter(DBUF_HASH_MUTEX(h, idx));
144         for (db = h->hash_table[idx]; db != NULL; db = db->db_hash_next) {
145                 if (DBUF_EQUAL(db, os, obj, level, blkid)) {
146                         mutex_enter(&db->db_mtx);
147                         if (db->db_state != DB_EVICTING) {
148                                 mutex_exit(DBUF_HASH_MUTEX(h, idx));
149                                 return (db);
150                         }
151                         mutex_exit(&db->db_mtx);
152                 }
153         }
154         mutex_exit(DBUF_HASH_MUTEX(h, idx));
155         return (NULL);
156 }
157
158 /*
159  * Insert an entry into the hash table.  If there is already an element
160  * equal to elem in the hash table, then the already existing element
161  * will be returned and the new element will not be inserted.
162  * Otherwise returns NULL.
163  */
164 static dmu_buf_impl_t *
165 dbuf_hash_insert(dmu_buf_impl_t *db)
166 {
167         dbuf_hash_table_t *h = &dbuf_hash_table;
168         objset_t *os = db->db_objset;
169         uint64_t obj = db->db.db_object;
170         int level = db->db_level;
171         uint64_t blkid, hv, idx;
172         dmu_buf_impl_t *dbf;
173
174         blkid = db->db_blkid;
175         hv = DBUF_HASH(os, obj, level, blkid);
176         idx = hv & h->hash_table_mask;
177
178         mutex_enter(DBUF_HASH_MUTEX(h, idx));
179         for (dbf = h->hash_table[idx]; dbf != NULL; dbf = dbf->db_hash_next) {
180                 if (DBUF_EQUAL(dbf, os, obj, level, blkid)) {
181                         mutex_enter(&dbf->db_mtx);
182                         if (dbf->db_state != DB_EVICTING) {
183                                 mutex_exit(DBUF_HASH_MUTEX(h, idx));
184                                 return (dbf);
185                         }
186                         mutex_exit(&dbf->db_mtx);
187                 }
188         }
189
190         mutex_enter(&db->db_mtx);
191         db->db_hash_next = h->hash_table[idx];
192         h->hash_table[idx] = db;
193         mutex_exit(DBUF_HASH_MUTEX(h, idx));
194         atomic_add_64(&dbuf_hash_count, 1);
195
196         return (NULL);
197 }
198
199 /*
200  * Remove an entry from the hash table.  This operation will
201  * fail if there are any existing holds on the db.
202  */
203 static void
204 dbuf_hash_remove(dmu_buf_impl_t *db)
205 {
206         dbuf_hash_table_t *h = &dbuf_hash_table;
207         uint64_t hv, idx;
208         dmu_buf_impl_t *dbf, **dbp;
209
210         hv = DBUF_HASH(db->db_objset, db->db.db_object,
211             db->db_level, db->db_blkid);
212         idx = hv & h->hash_table_mask;
213
214         /*
215          * We musn't hold db_mtx to maintin lock ordering:
216          * DBUF_HASH_MUTEX > db_mtx.
217          */
218         ASSERT(refcount_is_zero(&db->db_holds));
219         ASSERT(db->db_state == DB_EVICTING);
220         ASSERT(!MUTEX_HELD(&db->db_mtx));
221
222         mutex_enter(DBUF_HASH_MUTEX(h, idx));
223         dbp = &h->hash_table[idx];
224         while ((dbf = *dbp) != db) {
225                 dbp = &dbf->db_hash_next;
226                 ASSERT(dbf != NULL);
227         }
228         *dbp = db->db_hash_next;
229         db->db_hash_next = NULL;
230         mutex_exit(DBUF_HASH_MUTEX(h, idx));
231         atomic_add_64(&dbuf_hash_count, -1);
232 }
233
234 static arc_evict_func_t dbuf_do_evict;
235
236 static void
237 dbuf_evict_user(dmu_buf_impl_t *db)
238 {
239         ASSERT(MUTEX_HELD(&db->db_mtx));
240
241         if (db->db_level != 0 || db->db_evict_func == NULL)
242                 return;
243
244         if (db->db_user_data_ptr_ptr)
245                 *db->db_user_data_ptr_ptr = db->db.db_data;
246         db->db_evict_func(&db->db, db->db_user_ptr);
247         db->db_user_ptr = NULL;
248         db->db_user_data_ptr_ptr = NULL;
249         db->db_evict_func = NULL;
250 }
251
252 boolean_t
253 dbuf_is_metadata(dmu_buf_impl_t *db)
254 {
255         if (db->db_level > 0) {
256                 return (B_TRUE);
257         } else {
258                 boolean_t is_metadata;
259
260                 DB_DNODE_ENTER(db);
261                 is_metadata = dmu_ot[DB_DNODE(db)->dn_type].ot_metadata;
262                 DB_DNODE_EXIT(db);
263
264                 return (is_metadata);
265         }
266 }
267
268 void
269 dbuf_evict(dmu_buf_impl_t *db)
270 {
271         ASSERT(MUTEX_HELD(&db->db_mtx));
272         ASSERT(db->db_buf == NULL);
273         ASSERT(db->db_data_pending == NULL);
274
275         dbuf_clear(db);
276         dbuf_destroy(db);
277 }
278
279 void
280 dbuf_init(void)
281 {
282         uint64_t hsize = 1ULL << 16;
283         dbuf_hash_table_t *h = &dbuf_hash_table;
284         int i;
285
286         /*
287          * The hash table is big enough to fill all of physical memory
288          * with an average 4K block size.  The table will take up
289          * totalmem*sizeof(void*)/4K (i.e. 2MB/GB with 8-byte pointers).
290          */
291         while (hsize * 4096 < physmem * PAGESIZE)
292                 hsize <<= 1;
293
294 retry:
295         h->hash_table_mask = hsize - 1;
296         h->hash_table = kmem_zalloc(hsize * sizeof (void *), KM_NOSLEEP);
297         if (h->hash_table == NULL) {
298                 /* XXX - we should really return an error instead of assert */
299                 ASSERT(hsize > (1ULL << 10));
300                 hsize >>= 1;
301                 goto retry;
302         }
303
304         dbuf_cache = kmem_cache_create("dmu_buf_impl_t",
305             sizeof (dmu_buf_impl_t),
306             0, dbuf_cons, dbuf_dest, NULL, NULL, NULL, 0);
307
308         for (i = 0; i < DBUF_MUTEXES; i++)
309                 mutex_init(&h->hash_mutexes[i], NULL, MUTEX_DEFAULT, NULL);
310 }
311
312 void
313 dbuf_fini(void)
314 {
315         dbuf_hash_table_t *h = &dbuf_hash_table;
316         int i;
317
318         for (i = 0; i < DBUF_MUTEXES; i++)
319                 mutex_destroy(&h->hash_mutexes[i]);
320         kmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *));
321         kmem_cache_destroy(dbuf_cache);
322 }
323
324 /*
325  * Other stuff.
326  */
327
328 #ifdef ZFS_DEBUG
329 static void
330 dbuf_verify(dmu_buf_impl_t *db)
331 {
332         dnode_t *dn;
333         dbuf_dirty_record_t *dr;
334
335         ASSERT(MUTEX_HELD(&db->db_mtx));
336
337         if (!(zfs_flags & ZFS_DEBUG_DBUF_VERIFY))
338                 return;
339
340         ASSERT(db->db_objset != NULL);
341         DB_DNODE_ENTER(db);
342         dn = DB_DNODE(db);
343         if (dn == NULL) {
344                 ASSERT(db->db_parent == NULL);
345                 ASSERT(db->db_blkptr == NULL);
346         } else {
347                 ASSERT3U(db->db.db_object, ==, dn->dn_object);
348                 ASSERT3P(db->db_objset, ==, dn->dn_objset);
349                 ASSERT3U(db->db_level, <, dn->dn_nlevels);
350                 ASSERT(db->db_blkid == DMU_BONUS_BLKID ||
351                     db->db_blkid == DMU_SPILL_BLKID ||
352                     !list_is_empty(&dn->dn_dbufs));
353         }
354         if (db->db_blkid == DMU_BONUS_BLKID) {
355                 ASSERT(dn != NULL);
356                 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
357                 ASSERT3U(db->db.db_offset, ==, DMU_BONUS_BLKID);
358         } else if (db->db_blkid == DMU_SPILL_BLKID) {
359                 ASSERT(dn != NULL);
360                 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
361                 ASSERT3U(db->db.db_offset, ==, 0);
362         } else {
363                 ASSERT3U(db->db.db_offset, ==, db->db_blkid * db->db.db_size);
364         }
365
366         for (dr = db->db_data_pending; dr != NULL; dr = dr->dr_next)
367                 ASSERT(dr->dr_dbuf == db);
368
369         for (dr = db->db_last_dirty; dr != NULL; dr = dr->dr_next)
370                 ASSERT(dr->dr_dbuf == db);
371
372         /*
373          * We can't assert that db_size matches dn_datablksz because it
374          * can be momentarily different when another thread is doing
375          * dnode_set_blksz().
376          */
377         if (db->db_level == 0 && db->db.db_object == DMU_META_DNODE_OBJECT) {
378                 dr = db->db_data_pending;
379                 /*
380                  * It should only be modified in syncing context, so
381                  * make sure we only have one copy of the data.
382                  */
383                 ASSERT(dr == NULL || dr->dt.dl.dr_data == db->db_buf);
384         }
385
386         /* verify db->db_blkptr */
387         if (db->db_blkptr) {
388                 if (db->db_parent == dn->dn_dbuf) {
389                         /* db is pointed to by the dnode */
390                         /* ASSERT3U(db->db_blkid, <, dn->dn_nblkptr); */
391                         if (DMU_OBJECT_IS_SPECIAL(db->db.db_object))
392                                 ASSERT(db->db_parent == NULL);
393                         else
394                                 ASSERT(db->db_parent != NULL);
395                         if (db->db_blkid != DMU_SPILL_BLKID)
396                                 ASSERT3P(db->db_blkptr, ==,
397                                     &dn->dn_phys->dn_blkptr[db->db_blkid]);
398                 } else {
399                         /* db is pointed to by an indirect block */
400                         ASSERTV(int epb = db->db_parent->db.db_size >>
401                                 SPA_BLKPTRSHIFT);
402                         ASSERT3U(db->db_parent->db_level, ==, db->db_level+1);
403                         ASSERT3U(db->db_parent->db.db_object, ==,
404                             db->db.db_object);
405                         /*
406                          * dnode_grow_indblksz() can make this fail if we don't
407                          * have the struct_rwlock.  XXX indblksz no longer
408                          * grows.  safe to do this now?
409                          */
410                         if (RW_WRITE_HELD(&dn->dn_struct_rwlock)) {
411                                 ASSERT3P(db->db_blkptr, ==,
412                                     ((blkptr_t *)db->db_parent->db.db_data +
413                                     db->db_blkid % epb));
414                         }
415                 }
416         }
417         if ((db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr)) &&
418             (db->db_buf == NULL || db->db_buf->b_data) &&
419             db->db.db_data && db->db_blkid != DMU_BONUS_BLKID &&
420             db->db_state != DB_FILL && !dn->dn_free_txg) {
421                 /*
422                  * If the blkptr isn't set but they have nonzero data,
423                  * it had better be dirty, otherwise we'll lose that
424                  * data when we evict this buffer.
425                  */
426                 if (db->db_dirtycnt == 0) {
427                         ASSERTV(uint64_t *buf = db->db.db_data);
428                         int i;
429
430                         for (i = 0; i < db->db.db_size >> 3; i++) {
431                                 ASSERT(buf[i] == 0);
432                         }
433                 }
434         }
435         DB_DNODE_EXIT(db);
436 }
437 #endif
438
439 static void
440 dbuf_update_data(dmu_buf_impl_t *db)
441 {
442         ASSERT(MUTEX_HELD(&db->db_mtx));
443         if (db->db_level == 0 && db->db_user_data_ptr_ptr) {
444                 ASSERT(!refcount_is_zero(&db->db_holds));
445                 *db->db_user_data_ptr_ptr = db->db.db_data;
446         }
447 }
448
449 static void
450 dbuf_set_data(dmu_buf_impl_t *db, arc_buf_t *buf)
451 {
452         ASSERT(MUTEX_HELD(&db->db_mtx));
453         ASSERT(db->db_buf == NULL || !arc_has_callback(db->db_buf));
454         db->db_buf = buf;
455         if (buf != NULL) {
456                 ASSERT(buf->b_data != NULL);
457                 db->db.db_data = buf->b_data;
458                 if (!arc_released(buf))
459                         arc_set_callback(buf, dbuf_do_evict, db);
460                 dbuf_update_data(db);
461         } else {
462                 dbuf_evict_user(db);
463                 db->db.db_data = NULL;
464                 if (db->db_state != DB_NOFILL)
465                         db->db_state = DB_UNCACHED;
466         }
467 }
468
469 /*
470  * Loan out an arc_buf for read.  Return the loaned arc_buf.
471  */
472 arc_buf_t *
473 dbuf_loan_arcbuf(dmu_buf_impl_t *db)
474 {
475         arc_buf_t *abuf;
476
477         mutex_enter(&db->db_mtx);
478         if (arc_released(db->db_buf) || refcount_count(&db->db_holds) > 1) {
479                 int blksz = db->db.db_size;
480                 spa_t *spa;
481
482                 mutex_exit(&db->db_mtx);
483                 DB_GET_SPA(&spa, db);
484                 abuf = arc_loan_buf(spa, blksz);
485                 bcopy(db->db.db_data, abuf->b_data, blksz);
486         } else {
487                 abuf = db->db_buf;
488                 arc_loan_inuse_buf(abuf, db);
489                 dbuf_set_data(db, NULL);
490                 mutex_exit(&db->db_mtx);
491         }
492         return (abuf);
493 }
494
495 uint64_t
496 dbuf_whichblock(dnode_t *dn, uint64_t offset)
497 {
498         if (dn->dn_datablkshift) {
499                 return (offset >> dn->dn_datablkshift);
500         } else {
501                 ASSERT3U(offset, <, dn->dn_datablksz);
502                 return (0);
503         }
504 }
505
506 static void
507 dbuf_read_done(zio_t *zio, arc_buf_t *buf, void *vdb)
508 {
509         dmu_buf_impl_t *db = vdb;
510
511         mutex_enter(&db->db_mtx);
512         ASSERT3U(db->db_state, ==, DB_READ);
513         /*
514          * All reads are synchronous, so we must have a hold on the dbuf
515          */
516         ASSERT(refcount_count(&db->db_holds) > 0);
517         ASSERT(db->db_buf == NULL);
518         ASSERT(db->db.db_data == NULL);
519         if (db->db_level == 0 && db->db_freed_in_flight) {
520                 /* we were freed in flight; disregard any error */
521                 arc_release(buf, db);
522                 bzero(buf->b_data, db->db.db_size);
523                 arc_buf_freeze(buf);
524                 db->db_freed_in_flight = FALSE;
525                 dbuf_set_data(db, buf);
526                 db->db_state = DB_CACHED;
527         } else if (zio == NULL || zio->io_error == 0) {
528                 dbuf_set_data(db, buf);
529                 db->db_state = DB_CACHED;
530         } else {
531                 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
532                 ASSERT3P(db->db_buf, ==, NULL);
533                 VERIFY(arc_buf_remove_ref(buf, db) == 1);
534                 db->db_state = DB_UNCACHED;
535         }
536         cv_broadcast(&db->db_changed);
537         dbuf_rele_and_unlock(db, NULL);
538 }
539
540 static void
541 dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t *flags)
542 {
543         dnode_t *dn;
544         spa_t *spa;
545         zbookmark_t zb;
546         uint32_t aflags = ARC_NOWAIT;
547         arc_buf_t *pbuf;
548
549         DB_DNODE_ENTER(db);
550         dn = DB_DNODE(db);
551         ASSERT(!refcount_is_zero(&db->db_holds));
552         /* We need the struct_rwlock to prevent db_blkptr from changing. */
553         ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
554         ASSERT(MUTEX_HELD(&db->db_mtx));
555         ASSERT(db->db_state == DB_UNCACHED);
556         ASSERT(db->db_buf == NULL);
557
558         if (db->db_blkid == DMU_BONUS_BLKID) {
559                 int bonuslen = MIN(dn->dn_bonuslen, dn->dn_phys->dn_bonuslen);
560
561                 ASSERT3U(bonuslen, <=, db->db.db_size);
562                 db->db.db_data = zio_buf_alloc(DN_MAX_BONUSLEN);
563                 arc_space_consume(DN_MAX_BONUSLEN, ARC_SPACE_OTHER);
564                 if (bonuslen < DN_MAX_BONUSLEN)
565                         bzero(db->db.db_data, DN_MAX_BONUSLEN);
566                 if (bonuslen)
567                         bcopy(DN_BONUS(dn->dn_phys), db->db.db_data, bonuslen);
568                 DB_DNODE_EXIT(db);
569                 dbuf_update_data(db);
570                 db->db_state = DB_CACHED;
571                 mutex_exit(&db->db_mtx);
572                 return;
573         }
574
575         /*
576          * Recheck BP_IS_HOLE() after dnode_block_freed() in case dnode_sync()
577          * processes the delete record and clears the bp while we are waiting
578          * for the dn_mtx (resulting in a "no" from block_freed).
579          */
580         if (db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr) ||
581             (db->db_level == 0 && (dnode_block_freed(dn, db->db_blkid) ||
582             BP_IS_HOLE(db->db_blkptr)))) {
583                 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
584
585                 dbuf_set_data(db, arc_buf_alloc(dn->dn_objset->os_spa,
586                     db->db.db_size, db, type));
587                 DB_DNODE_EXIT(db);
588                 bzero(db->db.db_data, db->db.db_size);
589                 db->db_state = DB_CACHED;
590                 *flags |= DB_RF_CACHED;
591                 mutex_exit(&db->db_mtx);
592                 return;
593         }
594
595         spa = dn->dn_objset->os_spa;
596         DB_DNODE_EXIT(db);
597
598         db->db_state = DB_READ;
599         mutex_exit(&db->db_mtx);
600
601         if (DBUF_IS_L2CACHEABLE(db))
602                 aflags |= ARC_L2CACHE;
603
604         SET_BOOKMARK(&zb, db->db_objset->os_dsl_dataset ?
605             db->db_objset->os_dsl_dataset->ds_object : DMU_META_OBJSET,
606             db->db.db_object, db->db_level, db->db_blkid);
607
608         dbuf_add_ref(db, NULL);
609         /* ZIO_FLAG_CANFAIL callers have to check the parent zio's error */
610
611         if (db->db_parent)
612                 pbuf = db->db_parent->db_buf;
613         else
614                 pbuf = db->db_objset->os_phys_buf;
615
616         (void) dsl_read(zio, spa, db->db_blkptr, pbuf,
617             dbuf_read_done, db, ZIO_PRIORITY_SYNC_READ,
618             (*flags & DB_RF_CANFAIL) ? ZIO_FLAG_CANFAIL : ZIO_FLAG_MUSTSUCCEED,
619             &aflags, &zb);
620         if (aflags & ARC_CACHED)
621                 *flags |= DB_RF_CACHED;
622 }
623
624 int
625 dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags)
626 {
627         int err = 0;
628         int havepzio = (zio != NULL);
629         int prefetch;
630         dnode_t *dn;
631
632         /*
633          * We don't have to hold the mutex to check db_state because it
634          * can't be freed while we have a hold on the buffer.
635          */
636         ASSERT(!refcount_is_zero(&db->db_holds));
637
638         if (db->db_state == DB_NOFILL)
639                 return (EIO);
640
641         DB_DNODE_ENTER(db);
642         dn = DB_DNODE(db);
643         if ((flags & DB_RF_HAVESTRUCT) == 0)
644                 rw_enter(&dn->dn_struct_rwlock, RW_READER);
645
646         prefetch = db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
647             (flags & DB_RF_NOPREFETCH) == 0 && dn != NULL &&
648             DBUF_IS_CACHEABLE(db);
649
650         mutex_enter(&db->db_mtx);
651         if (db->db_state == DB_CACHED) {
652                 mutex_exit(&db->db_mtx);
653                 if (prefetch)
654                         dmu_zfetch(&dn->dn_zfetch, db->db.db_offset,
655                             db->db.db_size, TRUE);
656                 if ((flags & DB_RF_HAVESTRUCT) == 0)
657                         rw_exit(&dn->dn_struct_rwlock);
658                 DB_DNODE_EXIT(db);
659         } else if (db->db_state == DB_UNCACHED) {
660                 spa_t *spa = dn->dn_objset->os_spa;
661
662                 if (zio == NULL)
663                         zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
664                 dbuf_read_impl(db, zio, &flags);
665
666                 /* dbuf_read_impl has dropped db_mtx for us */
667
668                 if (prefetch)
669                         dmu_zfetch(&dn->dn_zfetch, db->db.db_offset,
670                             db->db.db_size, flags & DB_RF_CACHED);
671
672                 if ((flags & DB_RF_HAVESTRUCT) == 0)
673                         rw_exit(&dn->dn_struct_rwlock);
674                 DB_DNODE_EXIT(db);
675
676                 if (!havepzio)
677                         err = zio_wait(zio);
678         } else {
679                 mutex_exit(&db->db_mtx);
680                 if (prefetch)
681                         dmu_zfetch(&dn->dn_zfetch, db->db.db_offset,
682                             db->db.db_size, TRUE);
683                 if ((flags & DB_RF_HAVESTRUCT) == 0)
684                         rw_exit(&dn->dn_struct_rwlock);
685                 DB_DNODE_EXIT(db);
686
687                 mutex_enter(&db->db_mtx);
688                 if ((flags & DB_RF_NEVERWAIT) == 0) {
689                         while (db->db_state == DB_READ ||
690                             db->db_state == DB_FILL) {
691                                 ASSERT(db->db_state == DB_READ ||
692                                     (flags & DB_RF_HAVESTRUCT) == 0);
693                                 cv_wait(&db->db_changed, &db->db_mtx);
694                         }
695                         if (db->db_state == DB_UNCACHED)
696                                 err = EIO;
697                 }
698                 mutex_exit(&db->db_mtx);
699         }
700
701         ASSERT(err || havepzio || db->db_state == DB_CACHED);
702         return (err);
703 }
704
705 static void
706 dbuf_noread(dmu_buf_impl_t *db)
707 {
708         ASSERT(!refcount_is_zero(&db->db_holds));
709         ASSERT(db->db_blkid != DMU_BONUS_BLKID);
710         mutex_enter(&db->db_mtx);
711         while (db->db_state == DB_READ || db->db_state == DB_FILL)
712                 cv_wait(&db->db_changed, &db->db_mtx);
713         if (db->db_state == DB_UNCACHED) {
714                 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
715                 spa_t *spa;
716
717                 ASSERT(db->db_buf == NULL);
718                 ASSERT(db->db.db_data == NULL);
719                 DB_GET_SPA(&spa, db);
720                 dbuf_set_data(db, arc_buf_alloc(spa, db->db.db_size, db, type));
721                 db->db_state = DB_FILL;
722         } else if (db->db_state == DB_NOFILL) {
723                 dbuf_set_data(db, NULL);
724         } else {
725                 ASSERT3U(db->db_state, ==, DB_CACHED);
726         }
727         mutex_exit(&db->db_mtx);
728 }
729
730 /*
731  * This is our just-in-time copy function.  It makes a copy of
732  * buffers, that have been modified in a previous transaction
733  * group, before we modify them in the current active group.
734  *
735  * This function is used in two places: when we are dirtying a
736  * buffer for the first time in a txg, and when we are freeing
737  * a range in a dnode that includes this buffer.
738  *
739  * Note that when we are called from dbuf_free_range() we do
740  * not put a hold on the buffer, we just traverse the active
741  * dbuf list for the dnode.
742  */
743 static void
744 dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg)
745 {
746         dbuf_dirty_record_t *dr = db->db_last_dirty;
747
748         ASSERT(MUTEX_HELD(&db->db_mtx));
749         ASSERT(db->db.db_data != NULL);
750         ASSERT(db->db_level == 0);
751         ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT);
752
753         if (dr == NULL ||
754             (dr->dt.dl.dr_data !=
755             ((db->db_blkid  == DMU_BONUS_BLKID) ? db->db.db_data : db->db_buf)))
756                 return;
757
758         /*
759          * If the last dirty record for this dbuf has not yet synced
760          * and its referencing the dbuf data, either:
761          *      reset the reference to point to a new copy,
762          * or (if there a no active holders)
763          *      just null out the current db_data pointer.
764          */
765         ASSERT(dr->dr_txg >= txg - 2);
766         if (db->db_blkid == DMU_BONUS_BLKID) {
767                 /* Note that the data bufs here are zio_bufs */
768                 dr->dt.dl.dr_data = zio_buf_alloc(DN_MAX_BONUSLEN);
769                 arc_space_consume(DN_MAX_BONUSLEN, ARC_SPACE_OTHER);
770                 bcopy(db->db.db_data, dr->dt.dl.dr_data, DN_MAX_BONUSLEN);
771         } else if (refcount_count(&db->db_holds) > db->db_dirtycnt) {
772                 int size = db->db.db_size;
773                 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
774                 spa_t *spa;
775
776                 DB_GET_SPA(&spa, db);
777                 dr->dt.dl.dr_data = arc_buf_alloc(spa, size, db, type);
778                 bcopy(db->db.db_data, dr->dt.dl.dr_data->b_data, size);
779         } else {
780                 dbuf_set_data(db, NULL);
781         }
782 }
783
784 void
785 dbuf_unoverride(dbuf_dirty_record_t *dr)
786 {
787         dmu_buf_impl_t *db = dr->dr_dbuf;
788         blkptr_t *bp = &dr->dt.dl.dr_overridden_by;
789         uint64_t txg = dr->dr_txg;
790
791         ASSERT(MUTEX_HELD(&db->db_mtx));
792         ASSERT(dr->dt.dl.dr_override_state != DR_IN_DMU_SYNC);
793         ASSERT(db->db_level == 0);
794
795         if (db->db_blkid == DMU_BONUS_BLKID ||
796             dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN)
797                 return;
798
799         ASSERT(db->db_data_pending != dr);
800
801         /* free this block */
802         if (!BP_IS_HOLE(bp)) {
803                 spa_t *spa;
804
805                 DB_GET_SPA(&spa, db);
806                 zio_free(spa, txg, bp);
807         }
808         dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
809         /*
810          * Release the already-written buffer, so we leave it in
811          * a consistent dirty state.  Note that all callers are
812          * modifying the buffer, so they will immediately do
813          * another (redundant) arc_release().  Therefore, leave
814          * the buf thawed to save the effort of freezing &
815          * immediately re-thawing it.
816          */
817         arc_release(dr->dt.dl.dr_data, db);
818 }
819
820 /*
821  * Evict (if its unreferenced) or clear (if its referenced) any level-0
822  * data blocks in the free range, so that any future readers will find
823  * empty blocks.  Also, if we happen accross any level-1 dbufs in the
824  * range that have not already been marked dirty, mark them dirty so
825  * they stay in memory.
826  */
827 void
828 dbuf_free_range(dnode_t *dn, uint64_t start, uint64_t end, dmu_tx_t *tx)
829 {
830         dmu_buf_impl_t *db, *db_next;
831         uint64_t txg = tx->tx_txg;
832         int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
833         uint64_t first_l1 = start >> epbs;
834         uint64_t last_l1 = end >> epbs;
835
836         if (end > dn->dn_maxblkid && (end != DMU_SPILL_BLKID)) {
837                 end = dn->dn_maxblkid;
838                 last_l1 = end >> epbs;
839         }
840         dprintf_dnode(dn, "start=%llu end=%llu\n", start, end);
841         mutex_enter(&dn->dn_dbufs_mtx);
842         for (db = list_head(&dn->dn_dbufs); db; db = db_next) {
843                 db_next = list_next(&dn->dn_dbufs, db);
844                 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
845
846                 if (db->db_level == 1 &&
847                     db->db_blkid >= first_l1 && db->db_blkid <= last_l1) {
848                         mutex_enter(&db->db_mtx);
849                         if (db->db_last_dirty &&
850                             db->db_last_dirty->dr_txg < txg) {
851                                 dbuf_add_ref(db, FTAG);
852                                 mutex_exit(&db->db_mtx);
853                                 dbuf_will_dirty(db, tx);
854                                 dbuf_rele(db, FTAG);
855                         } else {
856                                 mutex_exit(&db->db_mtx);
857                         }
858                 }
859
860                 if (db->db_level != 0)
861                         continue;
862                 dprintf_dbuf(db, "found buf %s\n", "");
863                 if (db->db_blkid < start || db->db_blkid > end)
864                         continue;
865
866                 /* found a level 0 buffer in the range */
867                 if (dbuf_undirty(db, tx))
868                         continue;
869
870                 mutex_enter(&db->db_mtx);
871                 if (db->db_state == DB_UNCACHED ||
872                     db->db_state == DB_NOFILL ||
873                     db->db_state == DB_EVICTING) {
874                         ASSERT(db->db.db_data == NULL);
875                         mutex_exit(&db->db_mtx);
876                         continue;
877                 }
878                 if (db->db_state == DB_READ || db->db_state == DB_FILL) {
879                         /* will be handled in dbuf_read_done or dbuf_rele */
880                         db->db_freed_in_flight = TRUE;
881                         mutex_exit(&db->db_mtx);
882                         continue;
883                 }
884                 if (refcount_count(&db->db_holds) == 0) {
885                         ASSERT(db->db_buf);
886                         dbuf_clear(db);
887                         continue;
888                 }
889                 /* The dbuf is referenced */
890
891                 if (db->db_last_dirty != NULL) {
892                         dbuf_dirty_record_t *dr = db->db_last_dirty;
893
894                         if (dr->dr_txg == txg) {
895                                 /*
896                                  * This buffer is "in-use", re-adjust the file
897                                  * size to reflect that this buffer may
898                                  * contain new data when we sync.
899                                  */
900                                 if (db->db_blkid != DMU_SPILL_BLKID &&
901                                     db->db_blkid > dn->dn_maxblkid)
902                                         dn->dn_maxblkid = db->db_blkid;
903                                 dbuf_unoverride(dr);
904                         } else {
905                                 /*
906                                  * This dbuf is not dirty in the open context.
907                                  * Either uncache it (if its not referenced in
908                                  * the open context) or reset its contents to
909                                  * empty.
910                                  */
911                                 dbuf_fix_old_data(db, txg);
912                         }
913                 }
914                 /* clear the contents if its cached */
915                 if (db->db_state == DB_CACHED) {
916                         ASSERT(db->db.db_data != NULL);
917                         arc_release(db->db_buf, db);
918                         bzero(db->db.db_data, db->db.db_size);
919                         arc_buf_freeze(db->db_buf);
920                 }
921
922                 mutex_exit(&db->db_mtx);
923         }
924         mutex_exit(&dn->dn_dbufs_mtx);
925 }
926
927 static int
928 dbuf_block_freeable(dmu_buf_impl_t *db)
929 {
930         dsl_dataset_t *ds = db->db_objset->os_dsl_dataset;
931         uint64_t birth_txg = 0;
932
933         /*
934          * We don't need any locking to protect db_blkptr:
935          * If it's syncing, then db_last_dirty will be set
936          * so we'll ignore db_blkptr.
937          */
938         ASSERT(MUTEX_HELD(&db->db_mtx));
939         if (db->db_last_dirty)
940                 birth_txg = db->db_last_dirty->dr_txg;
941         else if (db->db_blkptr)
942                 birth_txg = db->db_blkptr->blk_birth;
943
944         /*
945          * If we don't exist or are in a snapshot, we can't be freed.
946          * Don't pass the bp to dsl_dataset_block_freeable() since we
947          * are holding the db_mtx lock and might deadlock if we are
948          * prefetching a dedup-ed block.
949          */
950         if (birth_txg)
951                 return (ds == NULL ||
952                     dsl_dataset_block_freeable(ds, NULL, birth_txg));
953         else
954                 return (FALSE);
955 }
956
957 void
958 dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx)
959 {
960         arc_buf_t *buf, *obuf;
961         int osize = db->db.db_size;
962         arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
963         dnode_t *dn;
964
965         ASSERT(db->db_blkid != DMU_BONUS_BLKID);
966
967         DB_DNODE_ENTER(db);
968         dn = DB_DNODE(db);
969
970         /* XXX does *this* func really need the lock? */
971         ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
972
973         /*
974          * This call to dbuf_will_dirty() with the dn_struct_rwlock held
975          * is OK, because there can be no other references to the db
976          * when we are changing its size, so no concurrent DB_FILL can
977          * be happening.
978          */
979         /*
980          * XXX we should be doing a dbuf_read, checking the return
981          * value and returning that up to our callers
982          */
983         dbuf_will_dirty(db, tx);
984
985         /* create the data buffer for the new block */
986         buf = arc_buf_alloc(dn->dn_objset->os_spa, size, db, type);
987
988         /* copy old block data to the new block */
989         obuf = db->db_buf;
990         bcopy(obuf->b_data, buf->b_data, MIN(osize, size));
991         /* zero the remainder */
992         if (size > osize)
993                 bzero((uint8_t *)buf->b_data + osize, size - osize);
994
995         mutex_enter(&db->db_mtx);
996         dbuf_set_data(db, buf);
997         VERIFY(arc_buf_remove_ref(obuf, db) == 1);
998         db->db.db_size = size;
999
1000         if (db->db_level == 0) {
1001                 ASSERT3U(db->db_last_dirty->dr_txg, ==, tx->tx_txg);
1002                 db->db_last_dirty->dt.dl.dr_data = buf;
1003         }
1004         mutex_exit(&db->db_mtx);
1005
1006         dnode_willuse_space(dn, size-osize, tx);
1007         DB_DNODE_EXIT(db);
1008 }
1009
1010 void
1011 dbuf_release_bp(dmu_buf_impl_t *db)
1012 {
1013         objset_t *os;
1014         zbookmark_t zb;
1015
1016         DB_GET_OBJSET(&os, db);
1017         ASSERT(dsl_pool_sync_context(dmu_objset_pool(os)));
1018         ASSERT(arc_released(os->os_phys_buf) ||
1019             list_link_active(&os->os_dsl_dataset->ds_synced_link));
1020         ASSERT(db->db_parent == NULL || arc_released(db->db_parent->db_buf));
1021
1022         zb.zb_objset = os->os_dsl_dataset ?
1023             os->os_dsl_dataset->ds_object : 0;
1024         zb.zb_object = db->db.db_object;
1025         zb.zb_level = db->db_level;
1026         zb.zb_blkid = db->db_blkid;
1027         (void) arc_release_bp(db->db_buf, db,
1028             db->db_blkptr, os->os_spa, &zb);
1029 }
1030
1031 dbuf_dirty_record_t *
1032 dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
1033 {
1034         dnode_t *dn;
1035         objset_t *os;
1036         dbuf_dirty_record_t **drp, *dr;
1037         int drop_struct_lock = FALSE;
1038         boolean_t do_free_accounting = B_FALSE;
1039         int txgoff = tx->tx_txg & TXG_MASK;
1040
1041         ASSERT(tx->tx_txg != 0);
1042         ASSERT(!refcount_is_zero(&db->db_holds));
1043         DMU_TX_DIRTY_BUF(tx, db);
1044
1045         DB_DNODE_ENTER(db);
1046         dn = DB_DNODE(db);
1047         /*
1048          * Shouldn't dirty a regular buffer in syncing context.  Private
1049          * objects may be dirtied in syncing context, but only if they
1050          * were already pre-dirtied in open context.
1051          */
1052         ASSERT(!dmu_tx_is_syncing(tx) ||
1053             BP_IS_HOLE(dn->dn_objset->os_rootbp) ||
1054             DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
1055             dn->dn_objset->os_dsl_dataset == NULL);
1056         /*
1057          * We make this assert for private objects as well, but after we
1058          * check if we're already dirty.  They are allowed to re-dirty
1059          * in syncing context.
1060          */
1061         ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
1062             dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
1063             (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
1064
1065         mutex_enter(&db->db_mtx);
1066         /*
1067          * XXX make this true for indirects too?  The problem is that
1068          * transactions created with dmu_tx_create_assigned() from
1069          * syncing context don't bother holding ahead.
1070          */
1071         ASSERT(db->db_level != 0 ||
1072             db->db_state == DB_CACHED || db->db_state == DB_FILL ||
1073             db->db_state == DB_NOFILL);
1074
1075         mutex_enter(&dn->dn_mtx);
1076         /*
1077          * Don't set dirtyctx to SYNC if we're just modifying this as we
1078          * initialize the objset.
1079          */
1080         if (dn->dn_dirtyctx == DN_UNDIRTIED &&
1081             !BP_IS_HOLE(dn->dn_objset->os_rootbp)) {
1082                 dn->dn_dirtyctx =
1083                     (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN);
1084                 ASSERT(dn->dn_dirtyctx_firstset == NULL);
1085                 dn->dn_dirtyctx_firstset = kmem_alloc(1, KM_SLEEP);
1086         }
1087         mutex_exit(&dn->dn_mtx);
1088
1089         if (db->db_blkid == DMU_SPILL_BLKID)
1090                 dn->dn_have_spill = B_TRUE;
1091
1092         /*
1093          * If this buffer is already dirty, we're done.
1094          */
1095         drp = &db->db_last_dirty;
1096         ASSERT(*drp == NULL || (*drp)->dr_txg <= tx->tx_txg ||
1097             db->db.db_object == DMU_META_DNODE_OBJECT);
1098         while ((dr = *drp) != NULL && dr->dr_txg > tx->tx_txg)
1099                 drp = &dr->dr_next;
1100         if (dr && dr->dr_txg == tx->tx_txg) {
1101                 DB_DNODE_EXIT(db);
1102
1103                 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID) {
1104                         /*
1105                          * If this buffer has already been written out,
1106                          * we now need to reset its state.
1107                          */
1108                         dbuf_unoverride(dr);
1109                         if (db->db.db_object != DMU_META_DNODE_OBJECT &&
1110                             db->db_state != DB_NOFILL)
1111                                 arc_buf_thaw(db->db_buf);
1112                 }
1113                 mutex_exit(&db->db_mtx);
1114                 return (dr);
1115         }
1116
1117         /*
1118          * Only valid if not already dirty.
1119          */
1120         ASSERT(dn->dn_object == 0 ||
1121             dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
1122             (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
1123
1124         ASSERT3U(dn->dn_nlevels, >, db->db_level);
1125         ASSERT((dn->dn_phys->dn_nlevels == 0 && db->db_level == 0) ||
1126             dn->dn_phys->dn_nlevels > db->db_level ||
1127             dn->dn_next_nlevels[txgoff] > db->db_level ||
1128             dn->dn_next_nlevels[(tx->tx_txg-1) & TXG_MASK] > db->db_level ||
1129             dn->dn_next_nlevels[(tx->tx_txg-2) & TXG_MASK] > db->db_level);
1130
1131         /*
1132          * We should only be dirtying in syncing context if it's the
1133          * mos or we're initializing the os or it's a special object.
1134          * However, we are allowed to dirty in syncing context provided
1135          * we already dirtied it in open context.  Hence we must make
1136          * this assertion only if we're not already dirty.
1137          */
1138         os = dn->dn_objset;
1139         ASSERT(!dmu_tx_is_syncing(tx) || DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
1140             os->os_dsl_dataset == NULL || BP_IS_HOLE(os->os_rootbp));
1141         ASSERT(db->db.db_size != 0);
1142
1143         dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
1144
1145         if (db->db_blkid != DMU_BONUS_BLKID) {
1146                 /*
1147                  * Update the accounting.
1148                  * Note: we delay "free accounting" until after we drop
1149                  * the db_mtx.  This keeps us from grabbing other locks
1150                  * (and possibly deadlocking) in bp_get_dsize() while
1151                  * also holding the db_mtx.
1152                  */
1153                 dnode_willuse_space(dn, db->db.db_size, tx);
1154                 do_free_accounting = dbuf_block_freeable(db);
1155         }
1156
1157         /*
1158          * If this buffer is dirty in an old transaction group we need
1159          * to make a copy of it so that the changes we make in this
1160          * transaction group won't leak out when we sync the older txg.
1161          */
1162         dr = kmem_zalloc(sizeof (dbuf_dirty_record_t), KM_SLEEP);
1163         list_link_init(&dr->dr_dirty_node);
1164         if (db->db_level == 0) {
1165                 void *data_old = db->db_buf;
1166
1167                 if (db->db_state != DB_NOFILL) {
1168                         if (db->db_blkid == DMU_BONUS_BLKID) {
1169                                 dbuf_fix_old_data(db, tx->tx_txg);
1170                                 data_old = db->db.db_data;
1171                         } else if (db->db.db_object != DMU_META_DNODE_OBJECT) {
1172                                 /*
1173                                  * Release the data buffer from the cache so
1174                                  * that we can modify it without impacting
1175                                  * possible other users of this cached data
1176                                  * block.  Note that indirect blocks and
1177                                  * private objects are not released until the
1178                                  * syncing state (since they are only modified
1179                                  * then).
1180                                  */
1181                                 arc_release(db->db_buf, db);
1182                                 dbuf_fix_old_data(db, tx->tx_txg);
1183                                 data_old = db->db_buf;
1184                         }
1185                         ASSERT(data_old != NULL);
1186                 }
1187                 dr->dt.dl.dr_data = data_old;
1188         } else {
1189                 mutex_init(&dr->dt.di.dr_mtx, NULL, MUTEX_DEFAULT, NULL);
1190                 list_create(&dr->dt.di.dr_children,
1191                     sizeof (dbuf_dirty_record_t),
1192                     offsetof(dbuf_dirty_record_t, dr_dirty_node));
1193         }
1194         dr->dr_dbuf = db;
1195         dr->dr_txg = tx->tx_txg;
1196         dr->dr_next = *drp;
1197         *drp = dr;
1198
1199         /*
1200          * We could have been freed_in_flight between the dbuf_noread
1201          * and dbuf_dirty.  We win, as though the dbuf_noread() had
1202          * happened after the free.
1203          */
1204         if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
1205             db->db_blkid != DMU_SPILL_BLKID) {
1206                 mutex_enter(&dn->dn_mtx);
1207                 dnode_clear_range(dn, db->db_blkid, 1, tx);
1208                 mutex_exit(&dn->dn_mtx);
1209                 db->db_freed_in_flight = FALSE;
1210         }
1211
1212         /*
1213          * This buffer is now part of this txg
1214          */
1215         dbuf_add_ref(db, (void *)(uintptr_t)tx->tx_txg);
1216         db->db_dirtycnt += 1;
1217         ASSERT3U(db->db_dirtycnt, <=, 3);
1218
1219         mutex_exit(&db->db_mtx);
1220
1221         if (db->db_blkid == DMU_BONUS_BLKID ||
1222             db->db_blkid == DMU_SPILL_BLKID) {
1223                 mutex_enter(&dn->dn_mtx);
1224                 ASSERT(!list_link_active(&dr->dr_dirty_node));
1225                 list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
1226                 mutex_exit(&dn->dn_mtx);
1227                 dnode_setdirty(dn, tx);
1228                 DB_DNODE_EXIT(db);
1229                 return (dr);
1230         } else if (do_free_accounting) {
1231                 blkptr_t *bp = db->db_blkptr;
1232                 int64_t willfree = (bp && !BP_IS_HOLE(bp)) ?
1233                     bp_get_dsize(os->os_spa, bp) : db->db.db_size;
1234                 /*
1235                  * This is only a guess -- if the dbuf is dirty
1236                  * in a previous txg, we don't know how much
1237                  * space it will use on disk yet.  We should
1238                  * really have the struct_rwlock to access
1239                  * db_blkptr, but since this is just a guess,
1240                  * it's OK if we get an odd answer.
1241                  */
1242                 ddt_prefetch(os->os_spa, bp);
1243                 dnode_willuse_space(dn, -willfree, tx);
1244         }
1245
1246         if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) {
1247                 rw_enter(&dn->dn_struct_rwlock, RW_READER);
1248                 drop_struct_lock = TRUE;
1249         }
1250
1251         if (db->db_level == 0) {
1252                 dnode_new_blkid(dn, db->db_blkid, tx, drop_struct_lock);
1253                 ASSERT(dn->dn_maxblkid >= db->db_blkid);
1254         }
1255
1256         if (db->db_level+1 < dn->dn_nlevels) {
1257                 dmu_buf_impl_t *parent = db->db_parent;
1258                 dbuf_dirty_record_t *di;
1259                 int parent_held = FALSE;
1260
1261                 if (db->db_parent == NULL || db->db_parent == dn->dn_dbuf) {
1262                         int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
1263
1264                         parent = dbuf_hold_level(dn, db->db_level+1,
1265                             db->db_blkid >> epbs, FTAG);
1266                         ASSERT(parent != NULL);
1267                         parent_held = TRUE;
1268                 }
1269                 if (drop_struct_lock)
1270                         rw_exit(&dn->dn_struct_rwlock);
1271                 ASSERT3U(db->db_level+1, ==, parent->db_level);
1272                 di = dbuf_dirty(parent, tx);
1273                 if (parent_held)
1274                         dbuf_rele(parent, FTAG);
1275
1276                 mutex_enter(&db->db_mtx);
1277                 /*  possible race with dbuf_undirty() */
1278                 if (db->db_last_dirty == dr ||
1279                     dn->dn_object == DMU_META_DNODE_OBJECT) {
1280                         mutex_enter(&di->dt.di.dr_mtx);
1281                         ASSERT3U(di->dr_txg, ==, tx->tx_txg);
1282                         ASSERT(!list_link_active(&dr->dr_dirty_node));
1283                         list_insert_tail(&di->dt.di.dr_children, dr);
1284                         mutex_exit(&di->dt.di.dr_mtx);
1285                         dr->dr_parent = di;
1286                 }
1287                 mutex_exit(&db->db_mtx);
1288         } else {
1289                 ASSERT(db->db_level+1 == dn->dn_nlevels);
1290                 ASSERT(db->db_blkid < dn->dn_nblkptr);
1291                 ASSERT(db->db_parent == NULL || db->db_parent == dn->dn_dbuf);
1292                 mutex_enter(&dn->dn_mtx);
1293                 ASSERT(!list_link_active(&dr->dr_dirty_node));
1294                 list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
1295                 mutex_exit(&dn->dn_mtx);
1296                 if (drop_struct_lock)
1297                         rw_exit(&dn->dn_struct_rwlock);
1298         }
1299
1300         dnode_setdirty(dn, tx);
1301         DB_DNODE_EXIT(db);
1302         return (dr);
1303 }
1304
1305 static int
1306 dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
1307 {
1308         dnode_t *dn;
1309         uint64_t txg = tx->tx_txg;
1310         dbuf_dirty_record_t *dr, **drp;
1311
1312         ASSERT(txg != 0);
1313         ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1314
1315         mutex_enter(&db->db_mtx);
1316         /*
1317          * If this buffer is not dirty, we're done.
1318          */
1319         for (drp = &db->db_last_dirty; (dr = *drp) != NULL; drp = &dr->dr_next)
1320                 if (dr->dr_txg <= txg)
1321                         break;
1322         if (dr == NULL || dr->dr_txg < txg) {
1323                 mutex_exit(&db->db_mtx);
1324                 return (0);
1325         }
1326         ASSERT(dr->dr_txg == txg);
1327         ASSERT(dr->dr_dbuf == db);
1328
1329         DB_DNODE_ENTER(db);
1330         dn = DB_DNODE(db);
1331
1332         /*
1333          * If this buffer is currently held, we cannot undirty
1334          * it, since one of the current holders may be in the
1335          * middle of an update.  Note that users of dbuf_undirty()
1336          * should not place a hold on the dbuf before the call.
1337          */
1338         if (refcount_count(&db->db_holds) > db->db_dirtycnt) {
1339                 mutex_exit(&db->db_mtx);
1340                 /* Make sure we don't toss this buffer at sync phase */
1341                 mutex_enter(&dn->dn_mtx);
1342                 dnode_clear_range(dn, db->db_blkid, 1, tx);
1343                 mutex_exit(&dn->dn_mtx);
1344                 DB_DNODE_EXIT(db);
1345                 return (0);
1346         }
1347
1348         dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
1349
1350         ASSERT(db->db.db_size != 0);
1351
1352         /* XXX would be nice to fix up dn_towrite_space[] */
1353
1354         *drp = dr->dr_next;
1355
1356         if (dr->dr_parent) {
1357                 mutex_enter(&dr->dr_parent->dt.di.dr_mtx);
1358                 list_remove(&dr->dr_parent->dt.di.dr_children, dr);
1359                 mutex_exit(&dr->dr_parent->dt.di.dr_mtx);
1360         } else if (db->db_level+1 == dn->dn_nlevels) {
1361                 ASSERT(db->db_blkptr == NULL || db->db_parent == dn->dn_dbuf);
1362                 mutex_enter(&dn->dn_mtx);
1363                 list_remove(&dn->dn_dirty_records[txg & TXG_MASK], dr);
1364                 mutex_exit(&dn->dn_mtx);
1365         }
1366         DB_DNODE_EXIT(db);
1367
1368         if (db->db_level == 0) {
1369                 if (db->db_state != DB_NOFILL) {
1370                         dbuf_unoverride(dr);
1371
1372                         ASSERT(db->db_buf != NULL);
1373                         ASSERT(dr->dt.dl.dr_data != NULL);
1374                         if (dr->dt.dl.dr_data != db->db_buf)
1375                                 VERIFY(arc_buf_remove_ref(dr->dt.dl.dr_data,
1376                                     db) == 1);
1377                 }
1378         } else {
1379                 ASSERT(db->db_buf != NULL);
1380                 ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
1381                 mutex_destroy(&dr->dt.di.dr_mtx);
1382                 list_destroy(&dr->dt.di.dr_children);
1383         }
1384         kmem_free(dr, sizeof (dbuf_dirty_record_t));
1385
1386         ASSERT(db->db_dirtycnt > 0);
1387         db->db_dirtycnt -= 1;
1388
1389         if (refcount_remove(&db->db_holds, (void *)(uintptr_t)txg) == 0) {
1390                 arc_buf_t *buf = db->db_buf;
1391
1392                 ASSERT(db->db_state == DB_NOFILL || arc_released(buf));
1393                 dbuf_set_data(db, NULL);
1394                 VERIFY(arc_buf_remove_ref(buf, db) == 1);
1395                 dbuf_evict(db);
1396                 return (1);
1397         }
1398
1399         mutex_exit(&db->db_mtx);
1400         return (0);
1401 }
1402
1403 #pragma weak dmu_buf_will_dirty = dbuf_will_dirty
1404 void
1405 dbuf_will_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
1406 {
1407         int rf = DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH;
1408
1409         ASSERT(tx->tx_txg != 0);
1410         ASSERT(!refcount_is_zero(&db->db_holds));
1411
1412         DB_DNODE_ENTER(db);
1413         if (RW_WRITE_HELD(&DB_DNODE(db)->dn_struct_rwlock))
1414                 rf |= DB_RF_HAVESTRUCT;
1415         DB_DNODE_EXIT(db);
1416         (void) dbuf_read(db, NULL, rf);
1417         (void) dbuf_dirty(db, tx);
1418 }
1419
1420 void
1421 dmu_buf_will_not_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
1422 {
1423         dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
1424
1425         db->db_state = DB_NOFILL;
1426
1427         dmu_buf_will_fill(db_fake, tx);
1428 }
1429
1430 void
1431 dmu_buf_will_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
1432 {
1433         dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
1434
1435         ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1436         ASSERT(tx->tx_txg != 0);
1437         ASSERT(db->db_level == 0);
1438         ASSERT(!refcount_is_zero(&db->db_holds));
1439
1440         ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT ||
1441             dmu_tx_private_ok(tx));
1442
1443         dbuf_noread(db);
1444         (void) dbuf_dirty(db, tx);
1445 }
1446
1447 #pragma weak dmu_buf_fill_done = dbuf_fill_done
1448 /* ARGSUSED */
1449 void
1450 dbuf_fill_done(dmu_buf_impl_t *db, dmu_tx_t *tx)
1451 {
1452         mutex_enter(&db->db_mtx);
1453         DBUF_VERIFY(db);
1454
1455         if (db->db_state == DB_FILL) {
1456                 if (db->db_level == 0 && db->db_freed_in_flight) {
1457                         ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1458                         /* we were freed while filling */
1459                         /* XXX dbuf_undirty? */
1460                         bzero(db->db.db_data, db->db.db_size);
1461                         db->db_freed_in_flight = FALSE;
1462                 }
1463                 db->db_state = DB_CACHED;
1464                 cv_broadcast(&db->db_changed);
1465         }
1466         mutex_exit(&db->db_mtx);
1467 }
1468
1469 /*
1470  * Directly assign a provided arc buf to a given dbuf if it's not referenced
1471  * by anybody except our caller. Otherwise copy arcbuf's contents to dbuf.
1472  */
1473 void
1474 dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx)
1475 {
1476         ASSERT(!refcount_is_zero(&db->db_holds));
1477         ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1478         ASSERT(db->db_level == 0);
1479         ASSERT(DBUF_GET_BUFC_TYPE(db) == ARC_BUFC_DATA);
1480         ASSERT(buf != NULL);
1481         ASSERT(arc_buf_size(buf) == db->db.db_size);
1482         ASSERT(tx->tx_txg != 0);
1483
1484         arc_return_buf(buf, db);
1485         ASSERT(arc_released(buf));
1486
1487         mutex_enter(&db->db_mtx);
1488
1489         while (db->db_state == DB_READ || db->db_state == DB_FILL)
1490                 cv_wait(&db->db_changed, &db->db_mtx);
1491
1492         ASSERT(db->db_state == DB_CACHED || db->db_state == DB_UNCACHED);
1493
1494         if (db->db_state == DB_CACHED &&
1495             refcount_count(&db->db_holds) - 1 > db->db_dirtycnt) {
1496                 mutex_exit(&db->db_mtx);
1497                 (void) dbuf_dirty(db, tx);
1498                 bcopy(buf->b_data, db->db.db_data, db->db.db_size);
1499                 VERIFY(arc_buf_remove_ref(buf, db) == 1);
1500                 xuio_stat_wbuf_copied();
1501                 return;
1502         }
1503
1504         xuio_stat_wbuf_nocopy();
1505         if (db->db_state == DB_CACHED) {
1506                 dbuf_dirty_record_t *dr = db->db_last_dirty;
1507
1508                 ASSERT(db->db_buf != NULL);
1509                 if (dr != NULL && dr->dr_txg == tx->tx_txg) {
1510                         ASSERT(dr->dt.dl.dr_data == db->db_buf);
1511                         if (!arc_released(db->db_buf)) {
1512                                 ASSERT(dr->dt.dl.dr_override_state ==
1513                                     DR_OVERRIDDEN);
1514                                 arc_release(db->db_buf, db);
1515                         }
1516                         dr->dt.dl.dr_data = buf;
1517                         VERIFY(arc_buf_remove_ref(db->db_buf, db) == 1);
1518                 } else if (dr == NULL || dr->dt.dl.dr_data != db->db_buf) {
1519                         arc_release(db->db_buf, db);
1520                         VERIFY(arc_buf_remove_ref(db->db_buf, db) == 1);
1521                 }
1522                 db->db_buf = NULL;
1523         }
1524         ASSERT(db->db_buf == NULL);
1525         dbuf_set_data(db, buf);
1526         db->db_state = DB_FILL;
1527         mutex_exit(&db->db_mtx);
1528         (void) dbuf_dirty(db, tx);
1529         dbuf_fill_done(db, tx);
1530 }
1531
1532 /*
1533  * "Clear" the contents of this dbuf.  This will mark the dbuf
1534  * EVICTING and clear *most* of its references.  Unfortunetely,
1535  * when we are not holding the dn_dbufs_mtx, we can't clear the
1536  * entry in the dn_dbufs list.  We have to wait until dbuf_destroy()
1537  * in this case.  For callers from the DMU we will usually see:
1538  *      dbuf_clear()->arc_buf_evict()->dbuf_do_evict()->dbuf_destroy()
1539  * For the arc callback, we will usually see:
1540  *      dbuf_do_evict()->dbuf_clear();dbuf_destroy()
1541  * Sometimes, though, we will get a mix of these two:
1542  *      DMU: dbuf_clear()->arc_buf_evict()
1543  *      ARC: dbuf_do_evict()->dbuf_destroy()
1544  */
1545 void
1546 dbuf_clear(dmu_buf_impl_t *db)
1547 {
1548         dnode_t *dn;
1549         dmu_buf_impl_t *parent = db->db_parent;
1550         dmu_buf_impl_t *dndb;
1551         int dbuf_gone = FALSE;
1552
1553         ASSERT(MUTEX_HELD(&db->db_mtx));
1554         ASSERT(refcount_is_zero(&db->db_holds));
1555
1556         dbuf_evict_user(db);
1557
1558         if (db->db_state == DB_CACHED) {
1559                 ASSERT(db->db.db_data != NULL);
1560                 if (db->db_blkid == DMU_BONUS_BLKID) {
1561                         zio_buf_free(db->db.db_data, DN_MAX_BONUSLEN);
1562                         arc_space_return(DN_MAX_BONUSLEN, ARC_SPACE_OTHER);
1563                 }
1564                 db->db.db_data = NULL;
1565                 db->db_state = DB_UNCACHED;
1566         }
1567
1568         ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL);
1569         ASSERT(db->db_data_pending == NULL);
1570
1571         db->db_state = DB_EVICTING;
1572         db->db_blkptr = NULL;
1573
1574         DB_DNODE_ENTER(db);
1575         dn = DB_DNODE(db);
1576         dndb = dn->dn_dbuf;
1577         if (db->db_blkid != DMU_BONUS_BLKID && MUTEX_HELD(&dn->dn_dbufs_mtx)) {
1578                 list_remove(&dn->dn_dbufs, db);
1579                 (void) atomic_dec_32_nv(&dn->dn_dbufs_count);
1580                 membar_producer();
1581                 DB_DNODE_EXIT(db);
1582                 /*
1583                  * Decrementing the dbuf count means that the hold corresponding
1584                  * to the removed dbuf is no longer discounted in dnode_move(),
1585                  * so the dnode cannot be moved until after we release the hold.
1586                  * The membar_producer() ensures visibility of the decremented
1587                  * value in dnode_move(), since DB_DNODE_EXIT doesn't actually
1588                  * release any lock.
1589                  */
1590                 dnode_rele(dn, db);
1591                 db->db_dnode_handle = NULL;
1592         } else {
1593                 DB_DNODE_EXIT(db);
1594         }
1595
1596         if (db->db_buf)
1597                 dbuf_gone = arc_buf_evict(db->db_buf);
1598
1599         if (!dbuf_gone)
1600                 mutex_exit(&db->db_mtx);
1601
1602         /*
1603          * If this dbuf is referenced from an indirect dbuf,
1604          * decrement the ref count on the indirect dbuf.
1605          */
1606         if (parent && parent != dndb)
1607                 dbuf_rele(parent, db);
1608 }
1609
1610 __attribute__((always_inline))
1611 static inline int
1612 dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse,
1613     dmu_buf_impl_t **parentp, blkptr_t **bpp, struct dbuf_hold_impl_data *dh)
1614 {
1615         int nlevels, epbs;
1616
1617         *parentp = NULL;
1618         *bpp = NULL;
1619
1620         ASSERT(blkid != DMU_BONUS_BLKID);
1621
1622         if (blkid == DMU_SPILL_BLKID) {
1623                 mutex_enter(&dn->dn_mtx);
1624                 if (dn->dn_have_spill &&
1625                     (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR))
1626                         *bpp = &dn->dn_phys->dn_spill;
1627                 else
1628                         *bpp = NULL;
1629                 dbuf_add_ref(dn->dn_dbuf, NULL);
1630                 *parentp = dn->dn_dbuf;
1631                 mutex_exit(&dn->dn_mtx);
1632                 return (0);
1633         }
1634
1635         if (dn->dn_phys->dn_nlevels == 0)
1636                 nlevels = 1;
1637         else
1638                 nlevels = dn->dn_phys->dn_nlevels;
1639
1640         epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
1641
1642         ASSERT3U(level * epbs, <, 64);
1643         ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
1644         if (level >= nlevels ||
1645             (blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))) {
1646                 /* the buffer has no parent yet */
1647                 return (ENOENT);
1648         } else if (level < nlevels-1) {
1649                 /* this block is referenced from an indirect block */
1650                 int err;
1651                 if (dh == NULL) {
1652                         err = dbuf_hold_impl(dn, level+1, blkid >> epbs,
1653                                         fail_sparse, NULL, parentp);
1654                 }
1655                 else {
1656                         __dbuf_hold_impl_init(dh + 1, dn, dh->dh_level + 1,
1657                                         blkid >> epbs, fail_sparse, NULL,
1658                                         parentp, dh->dh_depth + 1);
1659                         err = __dbuf_hold_impl(dh + 1);
1660                 }
1661                 if (err)
1662                         return (err);
1663                 err = dbuf_read(*parentp, NULL,
1664                     (DB_RF_HAVESTRUCT | DB_RF_NOPREFETCH | DB_RF_CANFAIL));
1665                 if (err) {
1666                         dbuf_rele(*parentp, NULL);
1667                         *parentp = NULL;
1668                         return (err);
1669                 }
1670                 *bpp = ((blkptr_t *)(*parentp)->db.db_data) +
1671                     (blkid & ((1ULL << epbs) - 1));
1672                 return (0);
1673         } else {
1674                 /* the block is referenced from the dnode */
1675                 ASSERT3U(level, ==, nlevels-1);
1676                 ASSERT(dn->dn_phys->dn_nblkptr == 0 ||
1677                     blkid < dn->dn_phys->dn_nblkptr);
1678                 if (dn->dn_dbuf) {
1679                         dbuf_add_ref(dn->dn_dbuf, NULL);
1680                         *parentp = dn->dn_dbuf;
1681                 }
1682                 *bpp = &dn->dn_phys->dn_blkptr[blkid];
1683                 return (0);
1684         }
1685 }
1686
1687 static dmu_buf_impl_t *
1688 dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid,
1689     dmu_buf_impl_t *parent, blkptr_t *blkptr)
1690 {
1691         objset_t *os = dn->dn_objset;
1692         dmu_buf_impl_t *db, *odb;
1693
1694         ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
1695         ASSERT(dn->dn_type != DMU_OT_NONE);
1696
1697         db = kmem_cache_alloc(dbuf_cache, KM_SLEEP);
1698
1699         db->db_objset = os;
1700         db->db.db_object = dn->dn_object;
1701         db->db_level = level;
1702         db->db_blkid = blkid;
1703         db->db_last_dirty = NULL;
1704         db->db_dirtycnt = 0;
1705         db->db_dnode_handle = dn->dn_handle;
1706         db->db_parent = parent;
1707         db->db_blkptr = blkptr;
1708
1709         db->db_user_ptr = NULL;
1710         db->db_user_data_ptr_ptr = NULL;
1711         db->db_evict_func = NULL;
1712         db->db_immediate_evict = 0;
1713         db->db_freed_in_flight = 0;
1714
1715         if (blkid == DMU_BONUS_BLKID) {
1716                 ASSERT3P(parent, ==, dn->dn_dbuf);
1717                 db->db.db_size = DN_MAX_BONUSLEN -
1718                     (dn->dn_nblkptr-1) * sizeof (blkptr_t);
1719                 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
1720                 db->db.db_offset = DMU_BONUS_BLKID;
1721                 db->db_state = DB_UNCACHED;
1722                 /* the bonus dbuf is not placed in the hash table */
1723                 arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER);
1724                 return (db);
1725         } else if (blkid == DMU_SPILL_BLKID) {
1726                 db->db.db_size = (blkptr != NULL) ?
1727                     BP_GET_LSIZE(blkptr) : SPA_MINBLOCKSIZE;
1728                 db->db.db_offset = 0;
1729         } else {
1730                 int blocksize =
1731                     db->db_level ? 1<<dn->dn_indblkshift :  dn->dn_datablksz;
1732                 db->db.db_size = blocksize;
1733                 db->db.db_offset = db->db_blkid * blocksize;
1734         }
1735
1736         /*
1737          * Hold the dn_dbufs_mtx while we get the new dbuf
1738          * in the hash table *and* added to the dbufs list.
1739          * This prevents a possible deadlock with someone
1740          * trying to look up this dbuf before its added to the
1741          * dn_dbufs list.
1742          */
1743         mutex_enter(&dn->dn_dbufs_mtx);
1744         db->db_state = DB_EVICTING;
1745         if ((odb = dbuf_hash_insert(db)) != NULL) {
1746                 /* someone else inserted it first */
1747                 kmem_cache_free(dbuf_cache, db);
1748                 mutex_exit(&dn->dn_dbufs_mtx);
1749                 return (odb);
1750         }
1751         list_insert_head(&dn->dn_dbufs, db);
1752         db->db_state = DB_UNCACHED;
1753         mutex_exit(&dn->dn_dbufs_mtx);
1754         arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER);
1755
1756         if (parent && parent != dn->dn_dbuf)
1757                 dbuf_add_ref(parent, db);
1758
1759         ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
1760             refcount_count(&dn->dn_holds) > 0);
1761         (void) refcount_add(&dn->dn_holds, db);
1762         (void) atomic_inc_32_nv(&dn->dn_dbufs_count);
1763
1764         dprintf_dbuf(db, "db=%p\n", db);
1765
1766         return (db);
1767 }
1768
1769 static int
1770 dbuf_do_evict(void *private)
1771 {
1772         arc_buf_t *buf = private;
1773         dmu_buf_impl_t *db = buf->b_private;
1774
1775         if (!MUTEX_HELD(&db->db_mtx))
1776                 mutex_enter(&db->db_mtx);
1777
1778         ASSERT(refcount_is_zero(&db->db_holds));
1779
1780         if (db->db_state != DB_EVICTING) {
1781                 ASSERT(db->db_state == DB_CACHED);
1782                 DBUF_VERIFY(db);
1783                 db->db_buf = NULL;
1784                 dbuf_evict(db);
1785         } else {
1786                 mutex_exit(&db->db_mtx);
1787                 dbuf_destroy(db);
1788         }
1789         return (0);
1790 }
1791
1792 static void
1793 dbuf_destroy(dmu_buf_impl_t *db)
1794 {
1795         ASSERT(refcount_is_zero(&db->db_holds));
1796
1797         if (db->db_blkid != DMU_BONUS_BLKID) {
1798                 /*
1799                  * If this dbuf is still on the dn_dbufs list,
1800                  * remove it from that list.
1801                  */
1802                 if (db->db_dnode_handle != NULL) {
1803                         dnode_t *dn;
1804
1805                         DB_DNODE_ENTER(db);
1806                         dn = DB_DNODE(db);
1807                         mutex_enter(&dn->dn_dbufs_mtx);
1808                         list_remove(&dn->dn_dbufs, db);
1809                         (void) atomic_dec_32_nv(&dn->dn_dbufs_count);
1810                         mutex_exit(&dn->dn_dbufs_mtx);
1811                         DB_DNODE_EXIT(db);
1812                         /*
1813                          * Decrementing the dbuf count means that the hold
1814                          * corresponding to the removed dbuf is no longer
1815                          * discounted in dnode_move(), so the dnode cannot be
1816                          * moved until after we release the hold.
1817                          */
1818                         dnode_rele(dn, db);
1819                         db->db_dnode_handle = NULL;
1820                 }
1821                 dbuf_hash_remove(db);
1822         }
1823         db->db_parent = NULL;
1824         db->db_buf = NULL;
1825
1826         ASSERT(!list_link_active(&db->db_link));
1827         ASSERT(db->db.db_data == NULL);
1828         ASSERT(db->db_hash_next == NULL);
1829         ASSERT(db->db_blkptr == NULL);
1830         ASSERT(db->db_data_pending == NULL);
1831
1832         kmem_cache_free(dbuf_cache, db);
1833         arc_space_return(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER);
1834 }
1835
1836 void
1837 dbuf_prefetch(dnode_t *dn, uint64_t blkid)
1838 {
1839         dmu_buf_impl_t *db = NULL;
1840         blkptr_t *bp = NULL;
1841
1842         ASSERT(blkid != DMU_BONUS_BLKID);
1843         ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
1844
1845         if (dnode_block_freed(dn, blkid))
1846                 return;
1847
1848         /* dbuf_find() returns with db_mtx held */
1849         if ((db = dbuf_find(dn, 0, blkid))) {
1850                 /*
1851                  * This dbuf is already in the cache.  We assume that
1852                  * it is already CACHED, or else about to be either
1853                  * read or filled.
1854                  */
1855                 mutex_exit(&db->db_mtx);
1856                 return;
1857         }
1858
1859         if (dbuf_findbp(dn, 0, blkid, TRUE, &db, &bp, NULL) == 0) {
1860                 if (bp && !BP_IS_HOLE(bp)) {
1861                         int priority = dn->dn_type == DMU_OT_DDT_ZAP ?
1862                             ZIO_PRIORITY_DDT_PREFETCH : ZIO_PRIORITY_ASYNC_READ;
1863                         arc_buf_t *pbuf;
1864                         dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
1865                         uint32_t aflags = ARC_NOWAIT | ARC_PREFETCH;
1866                         zbookmark_t zb;
1867
1868                         SET_BOOKMARK(&zb, ds ? ds->ds_object : DMU_META_OBJSET,
1869                             dn->dn_object, 0, blkid);
1870
1871                         if (db)
1872                                 pbuf = db->db_buf;
1873                         else
1874                                 pbuf = dn->dn_objset->os_phys_buf;
1875
1876                         (void) dsl_read(NULL, dn->dn_objset->os_spa,
1877                             bp, pbuf, NULL, NULL, priority,
1878                             ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
1879                             &aflags, &zb);
1880                 }
1881                 if (db)
1882                         dbuf_rele(db, NULL);
1883         }
1884 }
1885
1886 #define DBUF_HOLD_IMPL_MAX_DEPTH        20
1887
1888 /*
1889  * Returns with db_holds incremented, and db_mtx not held.
1890  * Note: dn_struct_rwlock must be held.
1891  */
1892 static int
1893 __dbuf_hold_impl(struct dbuf_hold_impl_data *dh)
1894 {
1895         ASSERT3S(dh->dh_depth, <, DBUF_HOLD_IMPL_MAX_DEPTH);
1896         dh->dh_parent = NULL;
1897
1898         ASSERT(dh->dh_blkid != DMU_BONUS_BLKID);
1899         ASSERT(RW_LOCK_HELD(&dh->dh_dn->dn_struct_rwlock));
1900         ASSERT3U(dh->dh_dn->dn_nlevels, >, dh->dh_level);
1901
1902         *(dh->dh_dbp) = NULL;
1903 top:
1904         /* dbuf_find() returns with db_mtx held */
1905         dh->dh_db = dbuf_find(dh->dh_dn, dh->dh_level, dh->dh_blkid);
1906
1907         if (dh->dh_db == NULL) {
1908                 dh->dh_bp = NULL;
1909
1910                 ASSERT3P(dh->dh_parent, ==, NULL);
1911                 dh->dh_err = dbuf_findbp(dh->dh_dn, dh->dh_level, dh->dh_blkid,
1912                                         dh->dh_fail_sparse, &dh->dh_parent,
1913                                         &dh->dh_bp, dh);
1914                 if (dh->dh_fail_sparse) {
1915                         if (dh->dh_err == 0 && dh->dh_bp && BP_IS_HOLE(dh->dh_bp))
1916                                 dh->dh_err = ENOENT;
1917                         if (dh->dh_err) {
1918                                 if (dh->dh_parent)
1919                                         dbuf_rele(dh->dh_parent, NULL);
1920                                 return (dh->dh_err);
1921                         }
1922                 }
1923                 if (dh->dh_err && dh->dh_err != ENOENT)
1924                         return (dh->dh_err);
1925                 dh->dh_db = dbuf_create(dh->dh_dn, dh->dh_level, dh->dh_blkid,
1926                                         dh->dh_parent, dh->dh_bp);
1927         }
1928
1929         if (dh->dh_db->db_buf && refcount_is_zero(&dh->dh_db->db_holds)) {
1930                 arc_buf_add_ref(dh->dh_db->db_buf, dh->dh_db);
1931                 if (dh->dh_db->db_buf->b_data == NULL) {
1932                         dbuf_clear(dh->dh_db);
1933                         if (dh->dh_parent) {
1934                                 dbuf_rele(dh->dh_parent, NULL);
1935                                 dh->dh_parent = NULL;
1936                         }
1937                         goto top;
1938                 }
1939                 ASSERT3P(dh->dh_db->db.db_data, ==, dh->dh_db->db_buf->b_data);
1940         }
1941
1942         ASSERT(dh->dh_db->db_buf == NULL || arc_referenced(dh->dh_db->db_buf));
1943
1944         /*
1945          * If this buffer is currently syncing out, and we are are
1946          * still referencing it from db_data, we need to make a copy
1947          * of it in case we decide we want to dirty it again in this txg.
1948          */
1949         if (dh->dh_db->db_level == 0 &&
1950             dh->dh_db->db_blkid != DMU_BONUS_BLKID &&
1951             dh->dh_dn->dn_object != DMU_META_DNODE_OBJECT &&
1952             dh->dh_db->db_state == DB_CACHED && dh->dh_db->db_data_pending) {
1953                 dh->dh_dr = dh->dh_db->db_data_pending;
1954
1955                 if (dh->dh_dr->dt.dl.dr_data == dh->dh_db->db_buf) {
1956                         dh->dh_type = DBUF_GET_BUFC_TYPE(dh->dh_db);
1957
1958                         dbuf_set_data(dh->dh_db,
1959                             arc_buf_alloc(dh->dh_dn->dn_objset->os_spa,
1960                             dh->dh_db->db.db_size, dh->dh_db, dh->dh_type));
1961                         bcopy(dh->dh_dr->dt.dl.dr_data->b_data,
1962                             dh->dh_db->db.db_data, dh->dh_db->db.db_size);
1963                 }
1964         }
1965
1966         (void) refcount_add(&dh->dh_db->db_holds, dh->dh_tag);
1967         dbuf_update_data(dh->dh_db);
1968         DBUF_VERIFY(dh->dh_db);
1969         mutex_exit(&dh->dh_db->db_mtx);
1970
1971         /* NOTE: we can't rele the parent until after we drop the db_mtx */
1972         if (dh->dh_parent)
1973                 dbuf_rele(dh->dh_parent, NULL);
1974
1975         ASSERT3P(DB_DNODE(dh->dh_db), ==, dh->dh_dn);
1976         ASSERT3U(dh->dh_db->db_blkid, ==, dh->dh_blkid);
1977         ASSERT3U(dh->dh_db->db_level, ==, dh->dh_level);
1978         *(dh->dh_dbp) = dh->dh_db;
1979
1980         return (0);
1981 }
1982
1983 /*
1984  * The following code preserves the recursive function dbuf_hold_impl()
1985  * but moves the local variables AND function arguments to the heap to
1986  * minimize the stack frame size.  Enough space is initially allocated
1987  * on the stack for 20 levels of recursion.
1988  */
1989 int
1990 dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid, int fail_sparse,
1991     void *tag, dmu_buf_impl_t **dbp)
1992 {
1993         struct dbuf_hold_impl_data *dh;
1994         int error;
1995
1996         dh = kmem_zalloc(sizeof(struct dbuf_hold_impl_data) *
1997             DBUF_HOLD_IMPL_MAX_DEPTH, KM_SLEEP);
1998         __dbuf_hold_impl_init(dh, dn, level, blkid, fail_sparse, tag, dbp, 0);
1999
2000         error = __dbuf_hold_impl(dh);
2001
2002         kmem_free(dh, sizeof(struct dbuf_hold_impl_data) *
2003             DBUF_HOLD_IMPL_MAX_DEPTH);
2004
2005         return (error);
2006 }
2007
2008 static void
2009 __dbuf_hold_impl_init(struct dbuf_hold_impl_data *dh,
2010     dnode_t *dn, uint8_t level, uint64_t blkid, int fail_sparse,
2011     void *tag, dmu_buf_impl_t **dbp, int depth)
2012 {
2013         dh->dh_dn = dn;
2014         dh->dh_level = level;
2015         dh->dh_blkid = blkid;
2016         dh->dh_fail_sparse = fail_sparse;
2017         dh->dh_tag = tag;
2018         dh->dh_dbp = dbp;
2019         dh->dh_depth = depth;
2020 }
2021
2022 dmu_buf_impl_t *
2023 dbuf_hold(dnode_t *dn, uint64_t blkid, void *tag)
2024 {
2025         dmu_buf_impl_t *db;
2026         int err = dbuf_hold_impl(dn, 0, blkid, FALSE, tag, &db);
2027         return (err ? NULL : db);
2028 }
2029
2030 dmu_buf_impl_t *
2031 dbuf_hold_level(dnode_t *dn, int level, uint64_t blkid, void *tag)
2032 {
2033         dmu_buf_impl_t *db;
2034         int err = dbuf_hold_impl(dn, level, blkid, FALSE, tag, &db);
2035         return (err ? NULL : db);
2036 }
2037
2038 void
2039 dbuf_create_bonus(dnode_t *dn)
2040 {
2041         ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
2042
2043         ASSERT(dn->dn_bonus == NULL);
2044         dn->dn_bonus = dbuf_create(dn, 0, DMU_BONUS_BLKID, dn->dn_dbuf, NULL);
2045 }
2046
2047 int
2048 dbuf_spill_set_blksz(dmu_buf_t *db_fake, uint64_t blksz, dmu_tx_t *tx)
2049 {
2050         dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2051         dnode_t *dn;
2052
2053         if (db->db_blkid != DMU_SPILL_BLKID)
2054                 return (ENOTSUP);
2055         if (blksz == 0)
2056                 blksz = SPA_MINBLOCKSIZE;
2057         if (blksz > SPA_MAXBLOCKSIZE)
2058                 blksz = SPA_MAXBLOCKSIZE;
2059         else
2060                 blksz = P2ROUNDUP(blksz, SPA_MINBLOCKSIZE);
2061
2062         DB_DNODE_ENTER(db);
2063         dn = DB_DNODE(db);
2064         rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
2065         dbuf_new_size(db, blksz, tx);
2066         rw_exit(&dn->dn_struct_rwlock);
2067         DB_DNODE_EXIT(db);
2068
2069         return (0);
2070 }
2071
2072 void
2073 dbuf_rm_spill(dnode_t *dn, dmu_tx_t *tx)
2074 {
2075         dbuf_free_range(dn, DMU_SPILL_BLKID, DMU_SPILL_BLKID, tx);
2076 }
2077
2078 #pragma weak dmu_buf_add_ref = dbuf_add_ref
2079 void
2080 dbuf_add_ref(dmu_buf_impl_t *db, void *tag)
2081 {
2082         VERIFY(refcount_add(&db->db_holds, tag) > 1);
2083 }
2084
2085 /*
2086  * If you call dbuf_rele() you had better not be referencing the dnode handle
2087  * unless you have some other direct or indirect hold on the dnode. (An indirect
2088  * hold is a hold on one of the dnode's dbufs, including the bonus buffer.)
2089  * Without that, the dbuf_rele() could lead to a dnode_rele() followed by the
2090  * dnode's parent dbuf evicting its dnode handles.
2091  */
2092 #pragma weak dmu_buf_rele = dbuf_rele
2093 void
2094 dbuf_rele(dmu_buf_impl_t *db, void *tag)
2095 {
2096         mutex_enter(&db->db_mtx);
2097         dbuf_rele_and_unlock(db, tag);
2098 }
2099
2100 /*
2101  * dbuf_rele() for an already-locked dbuf.  This is necessary to allow
2102  * db_dirtycnt and db_holds to be updated atomically.
2103  */
2104 void
2105 dbuf_rele_and_unlock(dmu_buf_impl_t *db, void *tag)
2106 {
2107         int64_t holds;
2108
2109         ASSERT(MUTEX_HELD(&db->db_mtx));
2110         DBUF_VERIFY(db);
2111
2112         /*
2113          * Remove the reference to the dbuf before removing its hold on the
2114          * dnode so we can guarantee in dnode_move() that a referenced bonus
2115          * buffer has a corresponding dnode hold.
2116          */
2117         holds = refcount_remove(&db->db_holds, tag);
2118         ASSERT(holds >= 0);
2119
2120         /*
2121          * We can't freeze indirects if there is a possibility that they
2122          * may be modified in the current syncing context.
2123          */
2124         if (db->db_buf && holds == (db->db_level == 0 ? db->db_dirtycnt : 0))
2125                 arc_buf_freeze(db->db_buf);
2126
2127         if (holds == db->db_dirtycnt &&
2128             db->db_level == 0 && db->db_immediate_evict)
2129                 dbuf_evict_user(db);
2130
2131         if (holds == 0) {
2132                 if (db->db_blkid == DMU_BONUS_BLKID) {
2133                         mutex_exit(&db->db_mtx);
2134
2135                         /*
2136                          * If the dnode moves here, we cannot cross this barrier
2137                          * until the move completes.
2138                          */
2139                         DB_DNODE_ENTER(db);
2140                         (void) atomic_dec_32_nv(&DB_DNODE(db)->dn_dbufs_count);
2141                         DB_DNODE_EXIT(db);
2142                         /*
2143                          * The bonus buffer's dnode hold is no longer discounted
2144                          * in dnode_move(). The dnode cannot move until after
2145                          * the dnode_rele().
2146                          */
2147                         dnode_rele(DB_DNODE(db), db);
2148                 } else if (db->db_buf == NULL) {
2149                         /*
2150                          * This is a special case: we never associated this
2151                          * dbuf with any data allocated from the ARC.
2152                          */
2153                         ASSERT(db->db_state == DB_UNCACHED ||
2154                             db->db_state == DB_NOFILL);
2155                         dbuf_evict(db);
2156                 } else if (arc_released(db->db_buf)) {
2157                         arc_buf_t *buf = db->db_buf;
2158                         /*
2159                          * This dbuf has anonymous data associated with it.
2160                          */
2161                         dbuf_set_data(db, NULL);
2162                         VERIFY(arc_buf_remove_ref(buf, db) == 1);
2163                         dbuf_evict(db);
2164                 } else {
2165                         VERIFY(arc_buf_remove_ref(db->db_buf, db) == 0);
2166                         if (!DBUF_IS_CACHEABLE(db))
2167                                 dbuf_clear(db);
2168                         else
2169                                 mutex_exit(&db->db_mtx);
2170                 }
2171         } else {
2172                 mutex_exit(&db->db_mtx);
2173         }
2174 }
2175
2176 #pragma weak dmu_buf_refcount = dbuf_refcount
2177 uint64_t
2178 dbuf_refcount(dmu_buf_impl_t *db)
2179 {
2180         return (refcount_count(&db->db_holds));
2181 }
2182
2183 void *
2184 dmu_buf_set_user(dmu_buf_t *db_fake, void *user_ptr, void *user_data_ptr_ptr,
2185     dmu_buf_evict_func_t *evict_func)
2186 {
2187         return (dmu_buf_update_user(db_fake, NULL, user_ptr,
2188             user_data_ptr_ptr, evict_func));
2189 }
2190
2191 void *
2192 dmu_buf_set_user_ie(dmu_buf_t *db_fake, void *user_ptr, void *user_data_ptr_ptr,
2193     dmu_buf_evict_func_t *evict_func)
2194 {
2195         dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2196
2197         db->db_immediate_evict = TRUE;
2198         return (dmu_buf_update_user(db_fake, NULL, user_ptr,
2199             user_data_ptr_ptr, evict_func));
2200 }
2201
2202 void *
2203 dmu_buf_update_user(dmu_buf_t *db_fake, void *old_user_ptr, void *user_ptr,
2204     void *user_data_ptr_ptr, dmu_buf_evict_func_t *evict_func)
2205 {
2206         dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2207         ASSERT(db->db_level == 0);
2208
2209         ASSERT((user_ptr == NULL) == (evict_func == NULL));
2210
2211         mutex_enter(&db->db_mtx);
2212
2213         if (db->db_user_ptr == old_user_ptr) {
2214                 db->db_user_ptr = user_ptr;
2215                 db->db_user_data_ptr_ptr = user_data_ptr_ptr;
2216                 db->db_evict_func = evict_func;
2217
2218                 dbuf_update_data(db);
2219         } else {
2220                 old_user_ptr = db->db_user_ptr;
2221         }
2222
2223         mutex_exit(&db->db_mtx);
2224         return (old_user_ptr);
2225 }
2226
2227 void *
2228 dmu_buf_get_user(dmu_buf_t *db_fake)
2229 {
2230         dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2231         ASSERT(!refcount_is_zero(&db->db_holds));
2232
2233         return (db->db_user_ptr);
2234 }
2235
2236 boolean_t
2237 dmu_buf_freeable(dmu_buf_t *dbuf)
2238 {
2239         boolean_t res = B_FALSE;
2240         dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
2241
2242         if (db->db_blkptr)
2243                 res = dsl_dataset_block_freeable(db->db_objset->os_dsl_dataset,
2244                     db->db_blkptr, db->db_blkptr->blk_birth);
2245
2246         return (res);
2247 }
2248
2249 static void
2250 dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db)
2251 {
2252         /* ASSERT(dmu_tx_is_syncing(tx) */
2253         ASSERT(MUTEX_HELD(&db->db_mtx));
2254
2255         if (db->db_blkptr != NULL)
2256                 return;
2257
2258         if (db->db_blkid == DMU_SPILL_BLKID) {
2259                 db->db_blkptr = &dn->dn_phys->dn_spill;
2260                 BP_ZERO(db->db_blkptr);
2261                 return;
2262         }
2263         if (db->db_level == dn->dn_phys->dn_nlevels-1) {
2264                 /*
2265                  * This buffer was allocated at a time when there was
2266                  * no available blkptrs from the dnode, or it was
2267                  * inappropriate to hook it in (i.e., nlevels mis-match).
2268                  */
2269                 ASSERT(db->db_blkid < dn->dn_phys->dn_nblkptr);
2270                 ASSERT(db->db_parent == NULL);
2271                 db->db_parent = dn->dn_dbuf;
2272                 db->db_blkptr = &dn->dn_phys->dn_blkptr[db->db_blkid];
2273                 DBUF_VERIFY(db);
2274         } else {
2275                 dmu_buf_impl_t *parent = db->db_parent;
2276                 int epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
2277
2278                 ASSERT(dn->dn_phys->dn_nlevels > 1);
2279                 if (parent == NULL) {
2280                         mutex_exit(&db->db_mtx);
2281                         rw_enter(&dn->dn_struct_rwlock, RW_READER);
2282                         (void) dbuf_hold_impl(dn, db->db_level+1,
2283                             db->db_blkid >> epbs, FALSE, db, &parent);
2284                         rw_exit(&dn->dn_struct_rwlock);
2285                         mutex_enter(&db->db_mtx);
2286                         db->db_parent = parent;
2287                 }
2288                 db->db_blkptr = (blkptr_t *)parent->db.db_data +
2289                     (db->db_blkid & ((1ULL << epbs) - 1));
2290                 DBUF_VERIFY(db);
2291         }
2292 }
2293
2294 /* dbuf_sync_indirect() is called recursively from dbuf_sync_list() so it
2295  * is critical the we not allow the compiler to inline this function in to
2296  * dbuf_sync_list() thereby drastically bloating the stack usage.
2297  */
2298 noinline static void
2299 dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
2300 {
2301         dmu_buf_impl_t *db = dr->dr_dbuf;
2302         dnode_t *dn;
2303         zio_t *zio;
2304
2305         ASSERT(dmu_tx_is_syncing(tx));
2306
2307         dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
2308
2309         mutex_enter(&db->db_mtx);
2310
2311         ASSERT(db->db_level > 0);
2312         DBUF_VERIFY(db);
2313
2314         if (db->db_buf == NULL) {
2315                 mutex_exit(&db->db_mtx);
2316                 (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED);
2317                 mutex_enter(&db->db_mtx);
2318         }
2319         ASSERT3U(db->db_state, ==, DB_CACHED);
2320         ASSERT(db->db_buf != NULL);
2321
2322         DB_DNODE_ENTER(db);
2323         dn = DB_DNODE(db);
2324         ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
2325         dbuf_check_blkptr(dn, db);
2326         DB_DNODE_EXIT(db);
2327
2328         db->db_data_pending = dr;
2329
2330         mutex_exit(&db->db_mtx);
2331         dbuf_write(dr, db->db_buf, tx);
2332
2333         zio = dr->dr_zio;
2334         mutex_enter(&dr->dt.di.dr_mtx);
2335         dbuf_sync_list(&dr->dt.di.dr_children, tx);
2336         ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
2337         mutex_exit(&dr->dt.di.dr_mtx);
2338         zio_nowait(zio);
2339 }
2340
2341 /* dbuf_sync_leaf() is called recursively from dbuf_sync_list() so it is
2342  * critical the we not allow the compiler to inline this function in to
2343  * dbuf_sync_list() thereby drastically bloating the stack usage.
2344  */
2345 noinline static void
2346 dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
2347 {
2348         arc_buf_t **datap = &dr->dt.dl.dr_data;
2349         dmu_buf_impl_t *db = dr->dr_dbuf;
2350         dnode_t *dn;
2351         objset_t *os;
2352         uint64_t txg = tx->tx_txg;
2353
2354         ASSERT(dmu_tx_is_syncing(tx));
2355
2356         dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
2357
2358         mutex_enter(&db->db_mtx);
2359         /*
2360          * To be synced, we must be dirtied.  But we
2361          * might have been freed after the dirty.
2362          */
2363         if (db->db_state == DB_UNCACHED) {
2364                 /* This buffer has been freed since it was dirtied */
2365                 ASSERT(db->db.db_data == NULL);
2366         } else if (db->db_state == DB_FILL) {
2367                 /* This buffer was freed and is now being re-filled */
2368                 ASSERT(db->db.db_data != dr->dt.dl.dr_data);
2369         } else {
2370                 ASSERT(db->db_state == DB_CACHED || db->db_state == DB_NOFILL);
2371         }
2372         DBUF_VERIFY(db);
2373
2374         DB_DNODE_ENTER(db);
2375         dn = DB_DNODE(db);
2376
2377         if (db->db_blkid == DMU_SPILL_BLKID) {
2378                 mutex_enter(&dn->dn_mtx);
2379                 dn->dn_phys->dn_flags |= DNODE_FLAG_SPILL_BLKPTR;
2380                 mutex_exit(&dn->dn_mtx);
2381         }
2382
2383         /*
2384          * If this is a bonus buffer, simply copy the bonus data into the
2385          * dnode.  It will be written out when the dnode is synced (and it
2386          * will be synced, since it must have been dirty for dbuf_sync to
2387          * be called).
2388          */
2389         if (db->db_blkid == DMU_BONUS_BLKID) {
2390                 dbuf_dirty_record_t **drp;
2391
2392                 ASSERT(*datap != NULL);
2393                 ASSERT3U(db->db_level, ==, 0);
2394                 ASSERT3U(dn->dn_phys->dn_bonuslen, <=, DN_MAX_BONUSLEN);
2395                 bcopy(*datap, DN_BONUS(dn->dn_phys), dn->dn_phys->dn_bonuslen);
2396                 DB_DNODE_EXIT(db);
2397
2398                 if (*datap != db->db.db_data) {
2399                         zio_buf_free(*datap, DN_MAX_BONUSLEN);
2400                         arc_space_return(DN_MAX_BONUSLEN, ARC_SPACE_OTHER);
2401                 }
2402                 db->db_data_pending = NULL;
2403                 drp = &db->db_last_dirty;
2404                 while (*drp != dr)
2405                         drp = &(*drp)->dr_next;
2406                 ASSERT(dr->dr_next == NULL);
2407                 ASSERT(dr->dr_dbuf == db);
2408                 *drp = dr->dr_next;
2409                 if (dr->dr_dbuf->db_level != 0) {
2410                         mutex_destroy(&dr->dt.di.dr_mtx);
2411                         list_destroy(&dr->dt.di.dr_children);
2412                 }
2413                 kmem_free(dr, sizeof (dbuf_dirty_record_t));
2414                 ASSERT(db->db_dirtycnt > 0);
2415                 db->db_dirtycnt -= 1;
2416                 dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg);
2417                 return;
2418         }
2419
2420         os = dn->dn_objset;
2421
2422         /*
2423          * This function may have dropped the db_mtx lock allowing a dmu_sync
2424          * operation to sneak in. As a result, we need to ensure that we
2425          * don't check the dr_override_state until we have returned from
2426          * dbuf_check_blkptr.
2427          */
2428         dbuf_check_blkptr(dn, db);
2429
2430         /*
2431          * If this buffer is in the middle of an immediate write,
2432          * wait for the synchronous IO to complete.
2433          */
2434         while (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC) {
2435                 ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
2436                 cv_wait(&db->db_changed, &db->db_mtx);
2437                 ASSERT(dr->dt.dl.dr_override_state != DR_NOT_OVERRIDDEN);
2438         }
2439
2440         if (db->db_state != DB_NOFILL &&
2441             dn->dn_object != DMU_META_DNODE_OBJECT &&
2442             refcount_count(&db->db_holds) > 1 &&
2443             dr->dt.dl.dr_override_state != DR_OVERRIDDEN &&
2444             *datap == db->db_buf) {
2445                 /*
2446                  * If this buffer is currently "in use" (i.e., there
2447                  * are active holds and db_data still references it),
2448                  * then make a copy before we start the write so that
2449                  * any modifications from the open txg will not leak
2450                  * into this write.
2451                  *
2452                  * NOTE: this copy does not need to be made for
2453                  * objects only modified in the syncing context (e.g.
2454                  * DNONE_DNODE blocks).
2455                  */
2456                 int blksz = arc_buf_size(*datap);
2457                 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
2458                 *datap = arc_buf_alloc(os->os_spa, blksz, db, type);
2459                 bcopy(db->db.db_data, (*datap)->b_data, blksz);
2460         }
2461         db->db_data_pending = dr;
2462
2463         mutex_exit(&db->db_mtx);
2464
2465         dbuf_write(dr, *datap, tx);
2466
2467         ASSERT(!list_link_active(&dr->dr_dirty_node));
2468         if (dn->dn_object == DMU_META_DNODE_OBJECT) {
2469                 list_insert_tail(&dn->dn_dirty_records[txg&TXG_MASK], dr);
2470                 DB_DNODE_EXIT(db);
2471         } else {
2472                 /*
2473                  * Although zio_nowait() does not "wait for an IO", it does
2474                  * initiate the IO. If this is an empty write it seems plausible
2475                  * that the IO could actually be completed before the nowait
2476                  * returns. We need to DB_DNODE_EXIT() first in case
2477                  * zio_nowait() invalidates the dbuf.
2478                  */
2479                 DB_DNODE_EXIT(db);
2480                 zio_nowait(dr->dr_zio);
2481         }
2482 }
2483
2484 void
2485 dbuf_sync_list(list_t *list, dmu_tx_t *tx)
2486 {
2487         dbuf_dirty_record_t *dr;
2488
2489         while ((dr = list_head(list))) {
2490                 if (dr->dr_zio != NULL) {
2491                         /*
2492                          * If we find an already initialized zio then we
2493                          * are processing the meta-dnode, and we have finished.
2494                          * The dbufs for all dnodes are put back on the list
2495                          * during processing, so that we can zio_wait()
2496                          * these IOs after initiating all child IOs.
2497                          */
2498                         ASSERT3U(dr->dr_dbuf->db.db_object, ==,
2499                             DMU_META_DNODE_OBJECT);
2500                         break;
2501                 }
2502                 list_remove(list, dr);
2503                 if (dr->dr_dbuf->db_level > 0)
2504                         dbuf_sync_indirect(dr, tx);
2505                 else
2506                         dbuf_sync_leaf(dr, tx);
2507         }
2508 }
2509
2510 /* ARGSUSED */
2511 static void
2512 dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb)
2513 {
2514         dmu_buf_impl_t *db = vdb;
2515         dnode_t *dn;
2516         blkptr_t *bp = zio->io_bp;
2517         blkptr_t *bp_orig = &zio->io_bp_orig;
2518         spa_t *spa = zio->io_spa;
2519         int64_t delta;
2520         uint64_t fill = 0;
2521         int i;
2522
2523         ASSERT(db->db_blkptr == bp);
2524
2525         DB_DNODE_ENTER(db);
2526         dn = DB_DNODE(db);
2527         delta = bp_get_dsize_sync(spa, bp) - bp_get_dsize_sync(spa, bp_orig);
2528         dnode_diduse_space(dn, delta - zio->io_prev_space_delta);
2529         zio->io_prev_space_delta = delta;
2530
2531         if (BP_IS_HOLE(bp)) {
2532                 ASSERT(bp->blk_fill == 0);
2533                 DB_DNODE_EXIT(db);
2534                 return;
2535         }
2536
2537         ASSERT((db->db_blkid != DMU_SPILL_BLKID &&
2538             BP_GET_TYPE(bp) == dn->dn_type) ||
2539             (db->db_blkid == DMU_SPILL_BLKID &&
2540             BP_GET_TYPE(bp) == dn->dn_bonustype));
2541         ASSERT(BP_GET_LEVEL(bp) == db->db_level);
2542
2543         mutex_enter(&db->db_mtx);
2544
2545 #ifdef ZFS_DEBUG
2546         if (db->db_blkid == DMU_SPILL_BLKID) {
2547                 ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
2548                 ASSERT(!(BP_IS_HOLE(db->db_blkptr)) &&
2549                     db->db_blkptr == &dn->dn_phys->dn_spill);
2550         }
2551 #endif
2552
2553         if (db->db_level == 0) {
2554                 mutex_enter(&dn->dn_mtx);
2555                 if (db->db_blkid > dn->dn_phys->dn_maxblkid &&
2556                     db->db_blkid != DMU_SPILL_BLKID)
2557                         dn->dn_phys->dn_maxblkid = db->db_blkid;
2558                 mutex_exit(&dn->dn_mtx);
2559
2560                 if (dn->dn_type == DMU_OT_DNODE) {
2561                         dnode_phys_t *dnp = db->db.db_data;
2562                         for (i = db->db.db_size >> DNODE_SHIFT; i > 0;
2563                             i--, dnp++) {
2564                                 if (dnp->dn_type != DMU_OT_NONE)
2565                                         fill++;
2566                         }
2567                 } else {
2568                         fill = 1;
2569                 }
2570         } else {
2571                 blkptr_t *ibp = db->db.db_data;
2572                 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
2573                 for (i = db->db.db_size >> SPA_BLKPTRSHIFT; i > 0; i--, ibp++) {
2574                         if (BP_IS_HOLE(ibp))
2575                                 continue;
2576                         fill += ibp->blk_fill;
2577                 }
2578         }
2579         DB_DNODE_EXIT(db);
2580
2581         bp->blk_fill = fill;
2582
2583         mutex_exit(&db->db_mtx);
2584 }
2585
2586 /* ARGSUSED */
2587 static void
2588 dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb)
2589 {
2590         dmu_buf_impl_t *db = vdb;
2591         blkptr_t *bp = zio->io_bp;
2592         blkptr_t *bp_orig = &zio->io_bp_orig;
2593         uint64_t txg = zio->io_txg;
2594         dbuf_dirty_record_t **drp, *dr;
2595
2596         ASSERT3U(zio->io_error, ==, 0);
2597         ASSERT(db->db_blkptr == bp);
2598
2599         if (zio->io_flags & ZIO_FLAG_IO_REWRITE) {
2600                 ASSERT(BP_EQUAL(bp, bp_orig));
2601         } else {
2602                 objset_t *os;
2603                 dsl_dataset_t *ds;
2604                 dmu_tx_t *tx;
2605
2606                 DB_GET_OBJSET(&os, db);
2607                 ds = os->os_dsl_dataset;
2608                 tx = os->os_synctx;
2609
2610                 (void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE);
2611                 dsl_dataset_block_born(ds, bp, tx);
2612         }
2613
2614         mutex_enter(&db->db_mtx);
2615
2616         DBUF_VERIFY(db);
2617
2618         drp = &db->db_last_dirty;
2619         while ((dr = *drp) != db->db_data_pending)
2620                 drp = &dr->dr_next;
2621         ASSERT(!list_link_active(&dr->dr_dirty_node));
2622         ASSERT(dr->dr_txg == txg);
2623         ASSERT(dr->dr_dbuf == db);
2624         ASSERT(dr->dr_next == NULL);
2625         *drp = dr->dr_next;
2626
2627 #ifdef ZFS_DEBUG
2628         if (db->db_blkid == DMU_SPILL_BLKID) {
2629                 dnode_t *dn;
2630
2631                 DB_DNODE_ENTER(db);
2632                 dn = DB_DNODE(db);
2633                 ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
2634                 ASSERT(!(BP_IS_HOLE(db->db_blkptr)) &&
2635                     db->db_blkptr == &dn->dn_phys->dn_spill);
2636                 DB_DNODE_EXIT(db);
2637         }
2638 #endif
2639
2640         if (db->db_level == 0) {
2641                 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2642                 ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN);
2643                 if (db->db_state != DB_NOFILL) {
2644                         if (dr->dt.dl.dr_data != db->db_buf)
2645                                 VERIFY(arc_buf_remove_ref(dr->dt.dl.dr_data,
2646                                     db) == 1);
2647                         else if (!arc_released(db->db_buf))
2648                                 arc_set_callback(db->db_buf, dbuf_do_evict, db);
2649                 }
2650         } else {
2651                 dnode_t *dn;
2652
2653                 DB_DNODE_ENTER(db);
2654                 dn = DB_DNODE(db);
2655                 ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
2656                 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
2657                 if (!BP_IS_HOLE(db->db_blkptr)) {
2658                         ASSERTV(int epbs = dn->dn_phys->dn_indblkshift -
2659                             SPA_BLKPTRSHIFT);
2660                         ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==,
2661                             db->db.db_size);
2662                         ASSERT3U(dn->dn_phys->dn_maxblkid
2663                             >> (db->db_level * epbs), >=, db->db_blkid);
2664                         arc_set_callback(db->db_buf, dbuf_do_evict, db);
2665                 }
2666                 DB_DNODE_EXIT(db);
2667                 mutex_destroy(&dr->dt.di.dr_mtx);
2668                 list_destroy(&dr->dt.di.dr_children);
2669         }
2670         kmem_free(dr, sizeof (dbuf_dirty_record_t));
2671
2672         cv_broadcast(&db->db_changed);
2673         ASSERT(db->db_dirtycnt > 0);
2674         db->db_dirtycnt -= 1;
2675         db->db_data_pending = NULL;
2676         dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg);
2677 }
2678
2679 static void
2680 dbuf_write_nofill_ready(zio_t *zio)
2681 {
2682         dbuf_write_ready(zio, NULL, zio->io_private);
2683 }
2684
2685 static void
2686 dbuf_write_nofill_done(zio_t *zio)
2687 {
2688         dbuf_write_done(zio, NULL, zio->io_private);
2689 }
2690
2691 static void
2692 dbuf_write_override_ready(zio_t *zio)
2693 {
2694         dbuf_dirty_record_t *dr = zio->io_private;
2695         dmu_buf_impl_t *db = dr->dr_dbuf;
2696
2697         dbuf_write_ready(zio, NULL, db);
2698 }
2699
2700 static void
2701 dbuf_write_override_done(zio_t *zio)
2702 {
2703         dbuf_dirty_record_t *dr = zio->io_private;
2704         dmu_buf_impl_t *db = dr->dr_dbuf;
2705         blkptr_t *obp = &dr->dt.dl.dr_overridden_by;
2706
2707         mutex_enter(&db->db_mtx);
2708         if (!BP_EQUAL(zio->io_bp, obp)) {
2709                 if (!BP_IS_HOLE(obp))
2710                         dsl_free(spa_get_dsl(zio->io_spa), zio->io_txg, obp);
2711                 arc_release(dr->dt.dl.dr_data, db);
2712         }
2713         mutex_exit(&db->db_mtx);
2714
2715         dbuf_write_done(zio, NULL, db);
2716 }
2717
2718 static void
2719 dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx)
2720 {
2721         dmu_buf_impl_t *db = dr->dr_dbuf;
2722         dnode_t *dn;
2723         objset_t *os;
2724         dmu_buf_impl_t *parent = db->db_parent;
2725         uint64_t txg = tx->tx_txg;
2726         zbookmark_t zb;
2727         zio_prop_t zp;
2728         zio_t *zio;
2729         int wp_flag = 0;
2730
2731         DB_DNODE_ENTER(db);
2732         dn = DB_DNODE(db);
2733         os = dn->dn_objset;
2734
2735         if (db->db_state != DB_NOFILL) {
2736                 if (db->db_level > 0 || dn->dn_type == DMU_OT_DNODE) {
2737                         /*
2738                          * Private object buffers are released here rather
2739                          * than in dbuf_dirty() since they are only modified
2740                          * in the syncing context and we don't want the
2741                          * overhead of making multiple copies of the data.
2742                          */
2743                         if (BP_IS_HOLE(db->db_blkptr)) {
2744                                 arc_buf_thaw(data);
2745                         } else {
2746                                 dbuf_release_bp(db);
2747                         }
2748                 }
2749         }
2750
2751         if (parent != dn->dn_dbuf) {
2752                 ASSERT(parent && parent->db_data_pending);
2753                 ASSERT(db->db_level == parent->db_level-1);
2754                 ASSERT(arc_released(parent->db_buf));
2755                 zio = parent->db_data_pending->dr_zio;
2756         } else {
2757                 ASSERT((db->db_level == dn->dn_phys->dn_nlevels-1 &&
2758                     db->db_blkid != DMU_SPILL_BLKID) ||
2759                     (db->db_blkid == DMU_SPILL_BLKID && db->db_level == 0));
2760                 if (db->db_blkid != DMU_SPILL_BLKID)
2761                         ASSERT3P(db->db_blkptr, ==,
2762                             &dn->dn_phys->dn_blkptr[db->db_blkid]);
2763                 zio = dn->dn_zio;
2764         }
2765
2766         ASSERT(db->db_level == 0 || data == db->db_buf);
2767         ASSERT3U(db->db_blkptr->blk_birth, <=, txg);
2768         ASSERT(zio);
2769
2770         SET_BOOKMARK(&zb, os->os_dsl_dataset ?
2771             os->os_dsl_dataset->ds_object : DMU_META_OBJSET,
2772             db->db.db_object, db->db_level, db->db_blkid);
2773
2774         if (db->db_blkid == DMU_SPILL_BLKID)
2775                 wp_flag = WP_SPILL;
2776         wp_flag |= (db->db_state == DB_NOFILL) ? WP_NOFILL : 0;
2777
2778         dmu_write_policy(os, dn, db->db_level, wp_flag, &zp);
2779         DB_DNODE_EXIT(db);
2780
2781         if (db->db_level == 0 && dr->dt.dl.dr_override_state == DR_OVERRIDDEN) {
2782                 ASSERT(db->db_state != DB_NOFILL);
2783                 dr->dr_zio = zio_write(zio, os->os_spa, txg,
2784                     db->db_blkptr, data->b_data, arc_buf_size(data), &zp,
2785                     dbuf_write_override_ready, dbuf_write_override_done, dr,
2786                     ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
2787                 mutex_enter(&db->db_mtx);
2788                 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
2789                 zio_write_override(dr->dr_zio, &dr->dt.dl.dr_overridden_by,
2790                     dr->dt.dl.dr_copies);
2791                 mutex_exit(&db->db_mtx);
2792         } else if (db->db_state == DB_NOFILL) {
2793                 ASSERT(zp.zp_checksum == ZIO_CHECKSUM_OFF);
2794                 dr->dr_zio = zio_write(zio, os->os_spa, txg,
2795                     db->db_blkptr, NULL, db->db.db_size, &zp,
2796                     dbuf_write_nofill_ready, dbuf_write_nofill_done, db,
2797                     ZIO_PRIORITY_ASYNC_WRITE,
2798                     ZIO_FLAG_MUSTSUCCEED | ZIO_FLAG_NODATA, &zb);
2799         } else {
2800                 ASSERT(arc_released(data));
2801                 dr->dr_zio = arc_write(zio, os->os_spa, txg,
2802                     db->db_blkptr, data, DBUF_IS_L2CACHEABLE(db), &zp,
2803                     dbuf_write_ready, dbuf_write_done, db,
2804                     ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
2805         }
2806 }