Illumos #764: panic in zfs:dbuf_sync_list
[zfs.git] / module / zfs / dbuf.c
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
24  */
25
26 #include <sys/zfs_context.h>
27 #include <sys/arc.h>
28 #include <sys/dmu.h>
29 #include <sys/dmu_impl.h>
30 #include <sys/dbuf.h>
31 #include <sys/dmu_objset.h>
32 #include <sys/dsl_dataset.h>
33 #include <sys/dsl_dir.h>
34 #include <sys/dmu_tx.h>
35 #include <sys/spa.h>
36 #include <sys/zio.h>
37 #include <sys/dmu_zfetch.h>
38 #include <sys/sa.h>
39 #include <sys/sa_impl.h>
40
41 struct dbuf_hold_impl_data {
42         /* Function arguments */
43         dnode_t *dh_dn;
44         uint8_t dh_level;
45         uint64_t dh_blkid;
46         int dh_fail_sparse;
47         void *dh_tag;
48         dmu_buf_impl_t **dh_dbp;
49         /* Local variables */
50         dmu_buf_impl_t *dh_db;
51         dmu_buf_impl_t *dh_parent;
52         blkptr_t *dh_bp;
53         int dh_err;
54         dbuf_dirty_record_t *dh_dr;
55         arc_buf_contents_t dh_type;
56         int dh_depth;
57 };
58
59 static void __dbuf_hold_impl_init(struct dbuf_hold_impl_data *dh,
60     dnode_t *dn, uint8_t level, uint64_t blkid, int fail_sparse,
61     void *tag, dmu_buf_impl_t **dbp, int depth);
62 static int __dbuf_hold_impl(struct dbuf_hold_impl_data *dh);
63
64 static void dbuf_destroy(dmu_buf_impl_t *db);
65 static int dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx);
66 static void dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx);
67
68 /*
69  * Global data structures and functions for the dbuf cache.
70  */
71 static kmem_cache_t *dbuf_cache;
72
73 /* ARGSUSED */
74 static int
75 dbuf_cons(void *vdb, void *unused, int kmflag)
76 {
77         dmu_buf_impl_t *db = vdb;
78         bzero(db, sizeof (dmu_buf_impl_t));
79
80         mutex_init(&db->db_mtx, NULL, MUTEX_DEFAULT, NULL);
81         cv_init(&db->db_changed, NULL, CV_DEFAULT, NULL);
82         refcount_create(&db->db_holds);
83         list_link_init(&db->db_link);
84         return (0);
85 }
86
87 /* ARGSUSED */
88 static void
89 dbuf_dest(void *vdb, void *unused)
90 {
91         dmu_buf_impl_t *db = vdb;
92         mutex_destroy(&db->db_mtx);
93         cv_destroy(&db->db_changed);
94         refcount_destroy(&db->db_holds);
95 }
96
97 /*
98  * dbuf hash table routines
99  */
100 static dbuf_hash_table_t dbuf_hash_table;
101
102 static uint64_t dbuf_hash_count;
103
104 static uint64_t
105 dbuf_hash(void *os, uint64_t obj, uint8_t lvl, uint64_t blkid)
106 {
107         uintptr_t osv = (uintptr_t)os;
108         uint64_t crc = -1ULL;
109
110         ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY);
111         crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (lvl)) & 0xFF];
112         crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (osv >> 6)) & 0xFF];
113         crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 0)) & 0xFF];
114         crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 8)) & 0xFF];
115         crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (blkid >> 0)) & 0xFF];
116         crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (blkid >> 8)) & 0xFF];
117
118         crc ^= (osv>>14) ^ (obj>>16) ^ (blkid>>16);
119
120         return (crc);
121 }
122
123 #define DBUF_HASH(os, obj, level, blkid) dbuf_hash(os, obj, level, blkid);
124
125 #define DBUF_EQUAL(dbuf, os, obj, level, blkid)         \
126         ((dbuf)->db.db_object == (obj) &&               \
127         (dbuf)->db_objset == (os) &&                    \
128         (dbuf)->db_level == (level) &&                  \
129         (dbuf)->db_blkid == (blkid))
130
131 dmu_buf_impl_t *
132 dbuf_find(dnode_t *dn, uint8_t level, uint64_t blkid)
133 {
134         dbuf_hash_table_t *h = &dbuf_hash_table;
135         objset_t *os = dn->dn_objset;
136         uint64_t obj;
137         uint64_t hv;
138         uint64_t idx;
139         dmu_buf_impl_t *db;
140
141         obj = dn->dn_object;
142         hv = DBUF_HASH(os, obj, level, blkid);
143         idx = hv & h->hash_table_mask;
144
145         mutex_enter(DBUF_HASH_MUTEX(h, idx));
146         for (db = h->hash_table[idx]; db != NULL; db = db->db_hash_next) {
147                 if (DBUF_EQUAL(db, os, obj, level, blkid)) {
148                         mutex_enter(&db->db_mtx);
149                         if (db->db_state != DB_EVICTING) {
150                                 mutex_exit(DBUF_HASH_MUTEX(h, idx));
151                                 return (db);
152                         }
153                         mutex_exit(&db->db_mtx);
154                 }
155         }
156         mutex_exit(DBUF_HASH_MUTEX(h, idx));
157         return (NULL);
158 }
159
160 /*
161  * Insert an entry into the hash table.  If there is already an element
162  * equal to elem in the hash table, then the already existing element
163  * will be returned and the new element will not be inserted.
164  * Otherwise returns NULL.
165  */
166 static dmu_buf_impl_t *
167 dbuf_hash_insert(dmu_buf_impl_t *db)
168 {
169         dbuf_hash_table_t *h = &dbuf_hash_table;
170         objset_t *os = db->db_objset;
171         uint64_t obj = db->db.db_object;
172         int level = db->db_level;
173         uint64_t blkid, hv, idx;
174         dmu_buf_impl_t *dbf;
175
176         blkid = db->db_blkid;
177         hv = DBUF_HASH(os, obj, level, blkid);
178         idx = hv & h->hash_table_mask;
179
180         mutex_enter(DBUF_HASH_MUTEX(h, idx));
181         for (dbf = h->hash_table[idx]; dbf != NULL; dbf = dbf->db_hash_next) {
182                 if (DBUF_EQUAL(dbf, os, obj, level, blkid)) {
183                         mutex_enter(&dbf->db_mtx);
184                         if (dbf->db_state != DB_EVICTING) {
185                                 mutex_exit(DBUF_HASH_MUTEX(h, idx));
186                                 return (dbf);
187                         }
188                         mutex_exit(&dbf->db_mtx);
189                 }
190         }
191
192         mutex_enter(&db->db_mtx);
193         db->db_hash_next = h->hash_table[idx];
194         h->hash_table[idx] = db;
195         mutex_exit(DBUF_HASH_MUTEX(h, idx));
196         atomic_add_64(&dbuf_hash_count, 1);
197
198         return (NULL);
199 }
200
201 /*
202  * Remove an entry from the hash table.  This operation will
203  * fail if there are any existing holds on the db.
204  */
205 static void
206 dbuf_hash_remove(dmu_buf_impl_t *db)
207 {
208         dbuf_hash_table_t *h = &dbuf_hash_table;
209         uint64_t hv, idx;
210         dmu_buf_impl_t *dbf, **dbp;
211
212         hv = DBUF_HASH(db->db_objset, db->db.db_object,
213             db->db_level, db->db_blkid);
214         idx = hv & h->hash_table_mask;
215
216         /*
217          * We musn't hold db_mtx to maintin lock ordering:
218          * DBUF_HASH_MUTEX > db_mtx.
219          */
220         ASSERT(refcount_is_zero(&db->db_holds));
221         ASSERT(db->db_state == DB_EVICTING);
222         ASSERT(!MUTEX_HELD(&db->db_mtx));
223
224         mutex_enter(DBUF_HASH_MUTEX(h, idx));
225         dbp = &h->hash_table[idx];
226         while ((dbf = *dbp) != db) {
227                 dbp = &dbf->db_hash_next;
228                 ASSERT(dbf != NULL);
229         }
230         *dbp = db->db_hash_next;
231         db->db_hash_next = NULL;
232         mutex_exit(DBUF_HASH_MUTEX(h, idx));
233         atomic_add_64(&dbuf_hash_count, -1);
234 }
235
236 static arc_evict_func_t dbuf_do_evict;
237
238 static void
239 dbuf_evict_user(dmu_buf_impl_t *db)
240 {
241         ASSERT(MUTEX_HELD(&db->db_mtx));
242
243         if (db->db_level != 0 || db->db_evict_func == NULL)
244                 return;
245
246         if (db->db_user_data_ptr_ptr)
247                 *db->db_user_data_ptr_ptr = db->db.db_data;
248         db->db_evict_func(&db->db, db->db_user_ptr);
249         db->db_user_ptr = NULL;
250         db->db_user_data_ptr_ptr = NULL;
251         db->db_evict_func = NULL;
252 }
253
254 boolean_t
255 dbuf_is_metadata(dmu_buf_impl_t *db)
256 {
257         if (db->db_level > 0) {
258                 return (B_TRUE);
259         } else {
260                 boolean_t is_metadata;
261
262                 DB_DNODE_ENTER(db);
263                 is_metadata = dmu_ot[DB_DNODE(db)->dn_type].ot_metadata;
264                 DB_DNODE_EXIT(db);
265
266                 return (is_metadata);
267         }
268 }
269
270 void
271 dbuf_evict(dmu_buf_impl_t *db)
272 {
273         ASSERT(MUTEX_HELD(&db->db_mtx));
274         ASSERT(db->db_buf == NULL);
275         ASSERT(db->db_data_pending == NULL);
276
277         dbuf_clear(db);
278         dbuf_destroy(db);
279 }
280
281 void
282 dbuf_init(void)
283 {
284         uint64_t hsize = 1ULL << 16;
285         dbuf_hash_table_t *h = &dbuf_hash_table;
286         int i;
287
288         /*
289          * The hash table is big enough to fill all of physical memory
290          * with an average 4K block size.  The table will take up
291          * totalmem*sizeof(void*)/4K (i.e. 2MB/GB with 8-byte pointers).
292          */
293         while (hsize * 4096 < physmem * PAGESIZE)
294                 hsize <<= 1;
295
296 retry:
297         h->hash_table_mask = hsize - 1;
298 #if defined(_KERNEL) && defined(HAVE_SPL)
299         /* Large allocations which do not require contiguous pages
300          * should be using vmem_alloc() in the linux kernel */
301         h->hash_table = vmem_zalloc(hsize * sizeof (void *), KM_SLEEP);
302 #else
303         h->hash_table = kmem_zalloc(hsize * sizeof (void *), KM_NOSLEEP);
304 #endif
305         if (h->hash_table == NULL) {
306                 /* XXX - we should really return an error instead of assert */
307                 ASSERT(hsize > (1ULL << 10));
308                 hsize >>= 1;
309                 goto retry;
310         }
311
312         dbuf_cache = kmem_cache_create("dmu_buf_impl_t",
313             sizeof (dmu_buf_impl_t),
314             0, dbuf_cons, dbuf_dest, NULL, NULL, NULL, 0);
315
316         for (i = 0; i < DBUF_MUTEXES; i++)
317                 mutex_init(&h->hash_mutexes[i], NULL, MUTEX_DEFAULT, NULL);
318 }
319
320 void
321 dbuf_fini(void)
322 {
323         dbuf_hash_table_t *h = &dbuf_hash_table;
324         int i;
325
326         for (i = 0; i < DBUF_MUTEXES; i++)
327                 mutex_destroy(&h->hash_mutexes[i]);
328 #if defined(_KERNEL) && defined(HAVE_SPL)
329         /* Large allocations which do not require contiguous pages
330          * should be using vmem_free() in the linux kernel */
331         vmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *));
332 #else
333         kmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *));
334 #endif
335         kmem_cache_destroy(dbuf_cache);
336 }
337
338 /*
339  * Other stuff.
340  */
341
342 #ifdef ZFS_DEBUG
343 static void
344 dbuf_verify(dmu_buf_impl_t *db)
345 {
346         dnode_t *dn;
347         dbuf_dirty_record_t *dr;
348
349         ASSERT(MUTEX_HELD(&db->db_mtx));
350
351         if (!(zfs_flags & ZFS_DEBUG_DBUF_VERIFY))
352                 return;
353
354         ASSERT(db->db_objset != NULL);
355         DB_DNODE_ENTER(db);
356         dn = DB_DNODE(db);
357         if (dn == NULL) {
358                 ASSERT(db->db_parent == NULL);
359                 ASSERT(db->db_blkptr == NULL);
360         } else {
361                 ASSERT3U(db->db.db_object, ==, dn->dn_object);
362                 ASSERT3P(db->db_objset, ==, dn->dn_objset);
363                 ASSERT3U(db->db_level, <, dn->dn_nlevels);
364                 ASSERT(db->db_blkid == DMU_BONUS_BLKID ||
365                     db->db_blkid == DMU_SPILL_BLKID ||
366                     !list_is_empty(&dn->dn_dbufs));
367         }
368         if (db->db_blkid == DMU_BONUS_BLKID) {
369                 ASSERT(dn != NULL);
370                 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
371                 ASSERT3U(db->db.db_offset, ==, DMU_BONUS_BLKID);
372         } else if (db->db_blkid == DMU_SPILL_BLKID) {
373                 ASSERT(dn != NULL);
374                 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
375                 ASSERT3U(db->db.db_offset, ==, 0);
376         } else {
377                 ASSERT3U(db->db.db_offset, ==, db->db_blkid * db->db.db_size);
378         }
379
380         for (dr = db->db_data_pending; dr != NULL; dr = dr->dr_next)
381                 ASSERT(dr->dr_dbuf == db);
382
383         for (dr = db->db_last_dirty; dr != NULL; dr = dr->dr_next)
384                 ASSERT(dr->dr_dbuf == db);
385
386         /*
387          * We can't assert that db_size matches dn_datablksz because it
388          * can be momentarily different when another thread is doing
389          * dnode_set_blksz().
390          */
391         if (db->db_level == 0 && db->db.db_object == DMU_META_DNODE_OBJECT) {
392                 dr = db->db_data_pending;
393                 /*
394                  * It should only be modified in syncing context, so
395                  * make sure we only have one copy of the data.
396                  */
397                 ASSERT(dr == NULL || dr->dt.dl.dr_data == db->db_buf);
398         }
399
400         /* verify db->db_blkptr */
401         if (db->db_blkptr) {
402                 if (db->db_parent == dn->dn_dbuf) {
403                         /* db is pointed to by the dnode */
404                         /* ASSERT3U(db->db_blkid, <, dn->dn_nblkptr); */
405                         if (DMU_OBJECT_IS_SPECIAL(db->db.db_object))
406                                 ASSERT(db->db_parent == NULL);
407                         else
408                                 ASSERT(db->db_parent != NULL);
409                         if (db->db_blkid != DMU_SPILL_BLKID)
410                                 ASSERT3P(db->db_blkptr, ==,
411                                     &dn->dn_phys->dn_blkptr[db->db_blkid]);
412                 } else {
413                         /* db is pointed to by an indirect block */
414                         ASSERTV(int epb = db->db_parent->db.db_size >>
415                                 SPA_BLKPTRSHIFT);
416                         ASSERT3U(db->db_parent->db_level, ==, db->db_level+1);
417                         ASSERT3U(db->db_parent->db.db_object, ==,
418                             db->db.db_object);
419                         /*
420                          * dnode_grow_indblksz() can make this fail if we don't
421                          * have the struct_rwlock.  XXX indblksz no longer
422                          * grows.  safe to do this now?
423                          */
424                         if (RW_WRITE_HELD(&dn->dn_struct_rwlock)) {
425                                 ASSERT3P(db->db_blkptr, ==,
426                                     ((blkptr_t *)db->db_parent->db.db_data +
427                                     db->db_blkid % epb));
428                         }
429                 }
430         }
431         if ((db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr)) &&
432             (db->db_buf == NULL || db->db_buf->b_data) &&
433             db->db.db_data && db->db_blkid != DMU_BONUS_BLKID &&
434             db->db_state != DB_FILL && !dn->dn_free_txg) {
435                 /*
436                  * If the blkptr isn't set but they have nonzero data,
437                  * it had better be dirty, otherwise we'll lose that
438                  * data when we evict this buffer.
439                  */
440                 if (db->db_dirtycnt == 0) {
441                         ASSERTV(uint64_t *buf = db->db.db_data);
442                         int i;
443
444                         for (i = 0; i < db->db.db_size >> 3; i++) {
445                                 ASSERT(buf[i] == 0);
446                         }
447                 }
448         }
449         DB_DNODE_EXIT(db);
450 }
451 #endif
452
453 static void
454 dbuf_update_data(dmu_buf_impl_t *db)
455 {
456         ASSERT(MUTEX_HELD(&db->db_mtx));
457         if (db->db_level == 0 && db->db_user_data_ptr_ptr) {
458                 ASSERT(!refcount_is_zero(&db->db_holds));
459                 *db->db_user_data_ptr_ptr = db->db.db_data;
460         }
461 }
462
463 static void
464 dbuf_set_data(dmu_buf_impl_t *db, arc_buf_t *buf)
465 {
466         ASSERT(MUTEX_HELD(&db->db_mtx));
467         ASSERT(db->db_buf == NULL || !arc_has_callback(db->db_buf));
468         db->db_buf = buf;
469         if (buf != NULL) {
470                 ASSERT(buf->b_data != NULL);
471                 db->db.db_data = buf->b_data;
472                 if (!arc_released(buf))
473                         arc_set_callback(buf, dbuf_do_evict, db);
474                 dbuf_update_data(db);
475         } else {
476                 dbuf_evict_user(db);
477                 db->db.db_data = NULL;
478                 if (db->db_state != DB_NOFILL)
479                         db->db_state = DB_UNCACHED;
480         }
481 }
482
483 /*
484  * Loan out an arc_buf for read.  Return the loaned arc_buf.
485  */
486 arc_buf_t *
487 dbuf_loan_arcbuf(dmu_buf_impl_t *db)
488 {
489         arc_buf_t *abuf;
490
491         mutex_enter(&db->db_mtx);
492         if (arc_released(db->db_buf) || refcount_count(&db->db_holds) > 1) {
493                 int blksz = db->db.db_size;
494                 spa_t *spa;
495
496                 mutex_exit(&db->db_mtx);
497                 DB_GET_SPA(&spa, db);
498                 abuf = arc_loan_buf(spa, blksz);
499                 bcopy(db->db.db_data, abuf->b_data, blksz);
500         } else {
501                 abuf = db->db_buf;
502                 arc_loan_inuse_buf(abuf, db);
503                 dbuf_set_data(db, NULL);
504                 mutex_exit(&db->db_mtx);
505         }
506         return (abuf);
507 }
508
509 uint64_t
510 dbuf_whichblock(dnode_t *dn, uint64_t offset)
511 {
512         if (dn->dn_datablkshift) {
513                 return (offset >> dn->dn_datablkshift);
514         } else {
515                 ASSERT3U(offset, <, dn->dn_datablksz);
516                 return (0);
517         }
518 }
519
520 static void
521 dbuf_read_done(zio_t *zio, arc_buf_t *buf, void *vdb)
522 {
523         dmu_buf_impl_t *db = vdb;
524
525         mutex_enter(&db->db_mtx);
526         ASSERT3U(db->db_state, ==, DB_READ);
527         /*
528          * All reads are synchronous, so we must have a hold on the dbuf
529          */
530         ASSERT(refcount_count(&db->db_holds) > 0);
531         ASSERT(db->db_buf == NULL);
532         ASSERT(db->db.db_data == NULL);
533         if (db->db_level == 0 && db->db_freed_in_flight) {
534                 /* we were freed in flight; disregard any error */
535                 arc_release(buf, db);
536                 bzero(buf->b_data, db->db.db_size);
537                 arc_buf_freeze(buf);
538                 db->db_freed_in_flight = FALSE;
539                 dbuf_set_data(db, buf);
540                 db->db_state = DB_CACHED;
541         } else if (zio == NULL || zio->io_error == 0) {
542                 dbuf_set_data(db, buf);
543                 db->db_state = DB_CACHED;
544         } else {
545                 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
546                 ASSERT3P(db->db_buf, ==, NULL);
547                 VERIFY(arc_buf_remove_ref(buf, db) == 1);
548                 db->db_state = DB_UNCACHED;
549         }
550         cv_broadcast(&db->db_changed);
551         dbuf_rele_and_unlock(db, NULL);
552 }
553
554 static void
555 dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t *flags)
556 {
557         dnode_t *dn;
558         spa_t *spa;
559         zbookmark_t zb;
560         uint32_t aflags = ARC_NOWAIT;
561         arc_buf_t *pbuf;
562
563         DB_DNODE_ENTER(db);
564         dn = DB_DNODE(db);
565         ASSERT(!refcount_is_zero(&db->db_holds));
566         /* We need the struct_rwlock to prevent db_blkptr from changing. */
567         ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
568         ASSERT(MUTEX_HELD(&db->db_mtx));
569         ASSERT(db->db_state == DB_UNCACHED);
570         ASSERT(db->db_buf == NULL);
571
572         if (db->db_blkid == DMU_BONUS_BLKID) {
573                 int bonuslen = MIN(dn->dn_bonuslen, dn->dn_phys->dn_bonuslen);
574
575                 ASSERT3U(bonuslen, <=, db->db.db_size);
576                 db->db.db_data = zio_buf_alloc(DN_MAX_BONUSLEN);
577                 arc_space_consume(DN_MAX_BONUSLEN, ARC_SPACE_OTHER);
578                 if (bonuslen < DN_MAX_BONUSLEN)
579                         bzero(db->db.db_data, DN_MAX_BONUSLEN);
580                 if (bonuslen)
581                         bcopy(DN_BONUS(dn->dn_phys), db->db.db_data, bonuslen);
582                 DB_DNODE_EXIT(db);
583                 dbuf_update_data(db);
584                 db->db_state = DB_CACHED;
585                 mutex_exit(&db->db_mtx);
586                 return;
587         }
588
589         /*
590          * Recheck BP_IS_HOLE() after dnode_block_freed() in case dnode_sync()
591          * processes the delete record and clears the bp while we are waiting
592          * for the dn_mtx (resulting in a "no" from block_freed).
593          */
594         if (db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr) ||
595             (db->db_level == 0 && (dnode_block_freed(dn, db->db_blkid) ||
596             BP_IS_HOLE(db->db_blkptr)))) {
597                 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
598
599                 dbuf_set_data(db, arc_buf_alloc(dn->dn_objset->os_spa,
600                     db->db.db_size, db, type));
601                 DB_DNODE_EXIT(db);
602                 bzero(db->db.db_data, db->db.db_size);
603                 db->db_state = DB_CACHED;
604                 *flags |= DB_RF_CACHED;
605                 mutex_exit(&db->db_mtx);
606                 return;
607         }
608
609         spa = dn->dn_objset->os_spa;
610         DB_DNODE_EXIT(db);
611
612         db->db_state = DB_READ;
613         mutex_exit(&db->db_mtx);
614
615         if (DBUF_IS_L2CACHEABLE(db))
616                 aflags |= ARC_L2CACHE;
617
618         SET_BOOKMARK(&zb, db->db_objset->os_dsl_dataset ?
619             db->db_objset->os_dsl_dataset->ds_object : DMU_META_OBJSET,
620             db->db.db_object, db->db_level, db->db_blkid);
621
622         dbuf_add_ref(db, NULL);
623         /* ZIO_FLAG_CANFAIL callers have to check the parent zio's error */
624
625         if (db->db_parent)
626                 pbuf = db->db_parent->db_buf;
627         else
628                 pbuf = db->db_objset->os_phys_buf;
629
630         (void) dsl_read(zio, spa, db->db_blkptr, pbuf,
631             dbuf_read_done, db, ZIO_PRIORITY_SYNC_READ,
632             (*flags & DB_RF_CANFAIL) ? ZIO_FLAG_CANFAIL : ZIO_FLAG_MUSTSUCCEED,
633             &aflags, &zb);
634         if (aflags & ARC_CACHED)
635                 *flags |= DB_RF_CACHED;
636 }
637
638 int
639 dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags)
640 {
641         int err = 0;
642         int havepzio = (zio != NULL);
643         int prefetch;
644         dnode_t *dn;
645
646         /*
647          * We don't have to hold the mutex to check db_state because it
648          * can't be freed while we have a hold on the buffer.
649          */
650         ASSERT(!refcount_is_zero(&db->db_holds));
651
652         if (db->db_state == DB_NOFILL)
653                 return (EIO);
654
655         DB_DNODE_ENTER(db);
656         dn = DB_DNODE(db);
657         if ((flags & DB_RF_HAVESTRUCT) == 0)
658                 rw_enter(&dn->dn_struct_rwlock, RW_READER);
659
660         prefetch = db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
661             (flags & DB_RF_NOPREFETCH) == 0 && dn != NULL &&
662             DBUF_IS_CACHEABLE(db);
663
664         mutex_enter(&db->db_mtx);
665         if (db->db_state == DB_CACHED) {
666                 mutex_exit(&db->db_mtx);
667                 if (prefetch)
668                         dmu_zfetch(&dn->dn_zfetch, db->db.db_offset,
669                             db->db.db_size, TRUE);
670                 if ((flags & DB_RF_HAVESTRUCT) == 0)
671                         rw_exit(&dn->dn_struct_rwlock);
672                 DB_DNODE_EXIT(db);
673         } else if (db->db_state == DB_UNCACHED) {
674                 spa_t *spa = dn->dn_objset->os_spa;
675
676                 if (zio == NULL)
677                         zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
678                 dbuf_read_impl(db, zio, &flags);
679
680                 /* dbuf_read_impl has dropped db_mtx for us */
681
682                 if (prefetch)
683                         dmu_zfetch(&dn->dn_zfetch, db->db.db_offset,
684                             db->db.db_size, flags & DB_RF_CACHED);
685
686                 if ((flags & DB_RF_HAVESTRUCT) == 0)
687                         rw_exit(&dn->dn_struct_rwlock);
688                 DB_DNODE_EXIT(db);
689
690                 if (!havepzio)
691                         err = zio_wait(zio);
692         } else {
693                 mutex_exit(&db->db_mtx);
694                 if (prefetch)
695                         dmu_zfetch(&dn->dn_zfetch, db->db.db_offset,
696                             db->db.db_size, TRUE);
697                 if ((flags & DB_RF_HAVESTRUCT) == 0)
698                         rw_exit(&dn->dn_struct_rwlock);
699                 DB_DNODE_EXIT(db);
700
701                 mutex_enter(&db->db_mtx);
702                 if ((flags & DB_RF_NEVERWAIT) == 0) {
703                         while (db->db_state == DB_READ ||
704                             db->db_state == DB_FILL) {
705                                 ASSERT(db->db_state == DB_READ ||
706                                     (flags & DB_RF_HAVESTRUCT) == 0);
707                                 cv_wait(&db->db_changed, &db->db_mtx);
708                         }
709                         if (db->db_state == DB_UNCACHED)
710                                 err = EIO;
711                 }
712                 mutex_exit(&db->db_mtx);
713         }
714
715         ASSERT(err || havepzio || db->db_state == DB_CACHED);
716         return (err);
717 }
718
719 static void
720 dbuf_noread(dmu_buf_impl_t *db)
721 {
722         ASSERT(!refcount_is_zero(&db->db_holds));
723         ASSERT(db->db_blkid != DMU_BONUS_BLKID);
724         mutex_enter(&db->db_mtx);
725         while (db->db_state == DB_READ || db->db_state == DB_FILL)
726                 cv_wait(&db->db_changed, &db->db_mtx);
727         if (db->db_state == DB_UNCACHED) {
728                 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
729                 spa_t *spa;
730
731                 ASSERT(db->db_buf == NULL);
732                 ASSERT(db->db.db_data == NULL);
733                 DB_GET_SPA(&spa, db);
734                 dbuf_set_data(db, arc_buf_alloc(spa, db->db.db_size, db, type));
735                 db->db_state = DB_FILL;
736         } else if (db->db_state == DB_NOFILL) {
737                 dbuf_set_data(db, NULL);
738         } else {
739                 ASSERT3U(db->db_state, ==, DB_CACHED);
740         }
741         mutex_exit(&db->db_mtx);
742 }
743
744 /*
745  * This is our just-in-time copy function.  It makes a copy of
746  * buffers, that have been modified in a previous transaction
747  * group, before we modify them in the current active group.
748  *
749  * This function is used in two places: when we are dirtying a
750  * buffer for the first time in a txg, and when we are freeing
751  * a range in a dnode that includes this buffer.
752  *
753  * Note that when we are called from dbuf_free_range() we do
754  * not put a hold on the buffer, we just traverse the active
755  * dbuf list for the dnode.
756  */
757 static void
758 dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg)
759 {
760         dbuf_dirty_record_t *dr = db->db_last_dirty;
761
762         ASSERT(MUTEX_HELD(&db->db_mtx));
763         ASSERT(db->db.db_data != NULL);
764         ASSERT(db->db_level == 0);
765         ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT);
766
767         if (dr == NULL ||
768             (dr->dt.dl.dr_data !=
769             ((db->db_blkid  == DMU_BONUS_BLKID) ? db->db.db_data : db->db_buf)))
770                 return;
771
772         /*
773          * If the last dirty record for this dbuf has not yet synced
774          * and its referencing the dbuf data, either:
775          *      reset the reference to point to a new copy,
776          * or (if there a no active holders)
777          *      just null out the current db_data pointer.
778          */
779         ASSERT(dr->dr_txg >= txg - 2);
780         if (db->db_blkid == DMU_BONUS_BLKID) {
781                 /* Note that the data bufs here are zio_bufs */
782                 dr->dt.dl.dr_data = zio_buf_alloc(DN_MAX_BONUSLEN);
783                 arc_space_consume(DN_MAX_BONUSLEN, ARC_SPACE_OTHER);
784                 bcopy(db->db.db_data, dr->dt.dl.dr_data, DN_MAX_BONUSLEN);
785         } else if (refcount_count(&db->db_holds) > db->db_dirtycnt) {
786                 int size = db->db.db_size;
787                 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
788                 spa_t *spa;
789
790                 DB_GET_SPA(&spa, db);
791                 dr->dt.dl.dr_data = arc_buf_alloc(spa, size, db, type);
792                 bcopy(db->db.db_data, dr->dt.dl.dr_data->b_data, size);
793         } else {
794                 dbuf_set_data(db, NULL);
795         }
796 }
797
798 void
799 dbuf_unoverride(dbuf_dirty_record_t *dr)
800 {
801         dmu_buf_impl_t *db = dr->dr_dbuf;
802         blkptr_t *bp = &dr->dt.dl.dr_overridden_by;
803         uint64_t txg = dr->dr_txg;
804
805         ASSERT(MUTEX_HELD(&db->db_mtx));
806         ASSERT(dr->dt.dl.dr_override_state != DR_IN_DMU_SYNC);
807         ASSERT(db->db_level == 0);
808
809         if (db->db_blkid == DMU_BONUS_BLKID ||
810             dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN)
811                 return;
812
813         ASSERT(db->db_data_pending != dr);
814
815         /* free this block */
816         if (!BP_IS_HOLE(bp)) {
817                 spa_t *spa;
818
819                 DB_GET_SPA(&spa, db);
820                 zio_free(spa, txg, bp);
821         }
822         dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
823         /*
824          * Release the already-written buffer, so we leave it in
825          * a consistent dirty state.  Note that all callers are
826          * modifying the buffer, so they will immediately do
827          * another (redundant) arc_release().  Therefore, leave
828          * the buf thawed to save the effort of freezing &
829          * immediately re-thawing it.
830          */
831         arc_release(dr->dt.dl.dr_data, db);
832 }
833
834 /*
835  * Evict (if its unreferenced) or clear (if its referenced) any level-0
836  * data blocks in the free range, so that any future readers will find
837  * empty blocks.  Also, if we happen accross any level-1 dbufs in the
838  * range that have not already been marked dirty, mark them dirty so
839  * they stay in memory.
840  */
841 void
842 dbuf_free_range(dnode_t *dn, uint64_t start, uint64_t end, dmu_tx_t *tx)
843 {
844         dmu_buf_impl_t *db, *db_next;
845         uint64_t txg = tx->tx_txg;
846         int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
847         uint64_t first_l1 = start >> epbs;
848         uint64_t last_l1 = end >> epbs;
849
850         if (end > dn->dn_maxblkid && (end != DMU_SPILL_BLKID)) {
851                 end = dn->dn_maxblkid;
852                 last_l1 = end >> epbs;
853         }
854         dprintf_dnode(dn, "start=%llu end=%llu\n", start, end);
855         mutex_enter(&dn->dn_dbufs_mtx);
856         for (db = list_head(&dn->dn_dbufs); db; db = db_next) {
857                 db_next = list_next(&dn->dn_dbufs, db);
858                 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
859
860                 if (db->db_level == 1 &&
861                     db->db_blkid >= first_l1 && db->db_blkid <= last_l1) {
862                         mutex_enter(&db->db_mtx);
863                         if (db->db_last_dirty &&
864                             db->db_last_dirty->dr_txg < txg) {
865                                 dbuf_add_ref(db, FTAG);
866                                 mutex_exit(&db->db_mtx);
867                                 dbuf_will_dirty(db, tx);
868                                 dbuf_rele(db, FTAG);
869                         } else {
870                                 mutex_exit(&db->db_mtx);
871                         }
872                 }
873
874                 if (db->db_level != 0)
875                         continue;
876                 dprintf_dbuf(db, "found buf %s\n", "");
877                 if (db->db_blkid < start || db->db_blkid > end)
878                         continue;
879
880                 /* found a level 0 buffer in the range */
881                 if (dbuf_undirty(db, tx))
882                         continue;
883
884                 mutex_enter(&db->db_mtx);
885                 if (db->db_state == DB_UNCACHED ||
886                     db->db_state == DB_NOFILL ||
887                     db->db_state == DB_EVICTING) {
888                         ASSERT(db->db.db_data == NULL);
889                         mutex_exit(&db->db_mtx);
890                         continue;
891                 }
892                 if (db->db_state == DB_READ || db->db_state == DB_FILL) {
893                         /* will be handled in dbuf_read_done or dbuf_rele */
894                         db->db_freed_in_flight = TRUE;
895                         mutex_exit(&db->db_mtx);
896                         continue;
897                 }
898                 if (refcount_count(&db->db_holds) == 0) {
899                         ASSERT(db->db_buf);
900                         dbuf_clear(db);
901                         continue;
902                 }
903                 /* The dbuf is referenced */
904
905                 if (db->db_last_dirty != NULL) {
906                         dbuf_dirty_record_t *dr = db->db_last_dirty;
907
908                         if (dr->dr_txg == txg) {
909                                 /*
910                                  * This buffer is "in-use", re-adjust the file
911                                  * size to reflect that this buffer may
912                                  * contain new data when we sync.
913                                  */
914                                 if (db->db_blkid != DMU_SPILL_BLKID &&
915                                     db->db_blkid > dn->dn_maxblkid)
916                                         dn->dn_maxblkid = db->db_blkid;
917                                 dbuf_unoverride(dr);
918                         } else {
919                                 /*
920                                  * This dbuf is not dirty in the open context.
921                                  * Either uncache it (if its not referenced in
922                                  * the open context) or reset its contents to
923                                  * empty.
924                                  */
925                                 dbuf_fix_old_data(db, txg);
926                         }
927                 }
928                 /* clear the contents if its cached */
929                 if (db->db_state == DB_CACHED) {
930                         ASSERT(db->db.db_data != NULL);
931                         arc_release(db->db_buf, db);
932                         bzero(db->db.db_data, db->db.db_size);
933                         arc_buf_freeze(db->db_buf);
934                 }
935
936                 mutex_exit(&db->db_mtx);
937         }
938         mutex_exit(&dn->dn_dbufs_mtx);
939 }
940
941 static int
942 dbuf_block_freeable(dmu_buf_impl_t *db)
943 {
944         dsl_dataset_t *ds = db->db_objset->os_dsl_dataset;
945         uint64_t birth_txg = 0;
946
947         /*
948          * We don't need any locking to protect db_blkptr:
949          * If it's syncing, then db_last_dirty will be set
950          * so we'll ignore db_blkptr.
951          */
952         ASSERT(MUTEX_HELD(&db->db_mtx));
953         if (db->db_last_dirty)
954                 birth_txg = db->db_last_dirty->dr_txg;
955         else if (db->db_blkptr)
956                 birth_txg = db->db_blkptr->blk_birth;
957
958         /*
959          * If we don't exist or are in a snapshot, we can't be freed.
960          * Don't pass the bp to dsl_dataset_block_freeable() since we
961          * are holding the db_mtx lock and might deadlock if we are
962          * prefetching a dedup-ed block.
963          */
964         if (birth_txg)
965                 return (ds == NULL ||
966                     dsl_dataset_block_freeable(ds, NULL, birth_txg));
967         else
968                 return (FALSE);
969 }
970
971 void
972 dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx)
973 {
974         arc_buf_t *buf, *obuf;
975         int osize = db->db.db_size;
976         arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
977         dnode_t *dn;
978
979         ASSERT(db->db_blkid != DMU_BONUS_BLKID);
980
981         DB_DNODE_ENTER(db);
982         dn = DB_DNODE(db);
983
984         /* XXX does *this* func really need the lock? */
985         ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
986
987         /*
988          * This call to dbuf_will_dirty() with the dn_struct_rwlock held
989          * is OK, because there can be no other references to the db
990          * when we are changing its size, so no concurrent DB_FILL can
991          * be happening.
992          */
993         /*
994          * XXX we should be doing a dbuf_read, checking the return
995          * value and returning that up to our callers
996          */
997         dbuf_will_dirty(db, tx);
998
999         /* create the data buffer for the new block */
1000         buf = arc_buf_alloc(dn->dn_objset->os_spa, size, db, type);
1001
1002         /* copy old block data to the new block */
1003         obuf = db->db_buf;
1004         bcopy(obuf->b_data, buf->b_data, MIN(osize, size));
1005         /* zero the remainder */
1006         if (size > osize)
1007                 bzero((uint8_t *)buf->b_data + osize, size - osize);
1008
1009         mutex_enter(&db->db_mtx);
1010         dbuf_set_data(db, buf);
1011         VERIFY(arc_buf_remove_ref(obuf, db) == 1);
1012         db->db.db_size = size;
1013
1014         if (db->db_level == 0) {
1015                 ASSERT3U(db->db_last_dirty->dr_txg, ==, tx->tx_txg);
1016                 db->db_last_dirty->dt.dl.dr_data = buf;
1017         }
1018         mutex_exit(&db->db_mtx);
1019
1020         dnode_willuse_space(dn, size-osize, tx);
1021         DB_DNODE_EXIT(db);
1022 }
1023
1024 void
1025 dbuf_release_bp(dmu_buf_impl_t *db)
1026 {
1027         objset_t *os;
1028         zbookmark_t zb;
1029
1030         DB_GET_OBJSET(&os, db);
1031         ASSERT(dsl_pool_sync_context(dmu_objset_pool(os)));
1032         ASSERT(arc_released(os->os_phys_buf) ||
1033             list_link_active(&os->os_dsl_dataset->ds_synced_link));
1034         ASSERT(db->db_parent == NULL || arc_released(db->db_parent->db_buf));
1035
1036         zb.zb_objset = os->os_dsl_dataset ?
1037             os->os_dsl_dataset->ds_object : 0;
1038         zb.zb_object = db->db.db_object;
1039         zb.zb_level = db->db_level;
1040         zb.zb_blkid = db->db_blkid;
1041         (void) arc_release_bp(db->db_buf, db,
1042             db->db_blkptr, os->os_spa, &zb);
1043 }
1044
1045 dbuf_dirty_record_t *
1046 dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
1047 {
1048         dnode_t *dn;
1049         objset_t *os;
1050         dbuf_dirty_record_t **drp, *dr;
1051         int drop_struct_lock = FALSE;
1052         boolean_t do_free_accounting = B_FALSE;
1053         int txgoff = tx->tx_txg & TXG_MASK;
1054
1055         ASSERT(tx->tx_txg != 0);
1056         ASSERT(!refcount_is_zero(&db->db_holds));
1057         DMU_TX_DIRTY_BUF(tx, db);
1058
1059         DB_DNODE_ENTER(db);
1060         dn = DB_DNODE(db);
1061         /*
1062          * Shouldn't dirty a regular buffer in syncing context.  Private
1063          * objects may be dirtied in syncing context, but only if they
1064          * were already pre-dirtied in open context.
1065          */
1066         ASSERT(!dmu_tx_is_syncing(tx) ||
1067             BP_IS_HOLE(dn->dn_objset->os_rootbp) ||
1068             DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
1069             dn->dn_objset->os_dsl_dataset == NULL);
1070         /*
1071          * We make this assert for private objects as well, but after we
1072          * check if we're already dirty.  They are allowed to re-dirty
1073          * in syncing context.
1074          */
1075         ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
1076             dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
1077             (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
1078
1079         mutex_enter(&db->db_mtx);
1080         /*
1081          * XXX make this true for indirects too?  The problem is that
1082          * transactions created with dmu_tx_create_assigned() from
1083          * syncing context don't bother holding ahead.
1084          */
1085         ASSERT(db->db_level != 0 ||
1086             db->db_state == DB_CACHED || db->db_state == DB_FILL ||
1087             db->db_state == DB_NOFILL);
1088
1089         mutex_enter(&dn->dn_mtx);
1090         /*
1091          * Don't set dirtyctx to SYNC if we're just modifying this as we
1092          * initialize the objset.
1093          */
1094         if (dn->dn_dirtyctx == DN_UNDIRTIED &&
1095             !BP_IS_HOLE(dn->dn_objset->os_rootbp)) {
1096                 dn->dn_dirtyctx =
1097                     (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN);
1098                 ASSERT(dn->dn_dirtyctx_firstset == NULL);
1099                 dn->dn_dirtyctx_firstset = kmem_alloc(1, KM_PUSHPAGE);
1100         }
1101         mutex_exit(&dn->dn_mtx);
1102
1103         if (db->db_blkid == DMU_SPILL_BLKID)
1104                 dn->dn_have_spill = B_TRUE;
1105
1106         /*
1107          * If this buffer is already dirty, we're done.
1108          */
1109         drp = &db->db_last_dirty;
1110         ASSERT(*drp == NULL || (*drp)->dr_txg <= tx->tx_txg ||
1111             db->db.db_object == DMU_META_DNODE_OBJECT);
1112         while ((dr = *drp) != NULL && dr->dr_txg > tx->tx_txg)
1113                 drp = &dr->dr_next;
1114         if (dr && dr->dr_txg == tx->tx_txg) {
1115                 DB_DNODE_EXIT(db);
1116
1117                 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID) {
1118                         /*
1119                          * If this buffer has already been written out,
1120                          * we now need to reset its state.
1121                          */
1122                         dbuf_unoverride(dr);
1123                         if (db->db.db_object != DMU_META_DNODE_OBJECT &&
1124                             db->db_state != DB_NOFILL)
1125                                 arc_buf_thaw(db->db_buf);
1126                 }
1127                 mutex_exit(&db->db_mtx);
1128                 return (dr);
1129         }
1130
1131         /*
1132          * Only valid if not already dirty.
1133          */
1134         ASSERT(dn->dn_object == 0 ||
1135             dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
1136             (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
1137
1138         ASSERT3U(dn->dn_nlevels, >, db->db_level);
1139         ASSERT((dn->dn_phys->dn_nlevels == 0 && db->db_level == 0) ||
1140             dn->dn_phys->dn_nlevels > db->db_level ||
1141             dn->dn_next_nlevels[txgoff] > db->db_level ||
1142             dn->dn_next_nlevels[(tx->tx_txg-1) & TXG_MASK] > db->db_level ||
1143             dn->dn_next_nlevels[(tx->tx_txg-2) & TXG_MASK] > db->db_level);
1144
1145         /*
1146          * We should only be dirtying in syncing context if it's the
1147          * mos or we're initializing the os or it's a special object.
1148          * However, we are allowed to dirty in syncing context provided
1149          * we already dirtied it in open context.  Hence we must make
1150          * this assertion only if we're not already dirty.
1151          */
1152         os = dn->dn_objset;
1153         ASSERT(!dmu_tx_is_syncing(tx) || DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
1154             os->os_dsl_dataset == NULL || BP_IS_HOLE(os->os_rootbp));
1155         ASSERT(db->db.db_size != 0);
1156
1157         dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
1158
1159         if (db->db_blkid != DMU_BONUS_BLKID) {
1160                 /*
1161                  * Update the accounting.
1162                  * Note: we delay "free accounting" until after we drop
1163                  * the db_mtx.  This keeps us from grabbing other locks
1164                  * (and possibly deadlocking) in bp_get_dsize() while
1165                  * also holding the db_mtx.
1166                  */
1167                 dnode_willuse_space(dn, db->db.db_size, tx);
1168                 do_free_accounting = dbuf_block_freeable(db);
1169         }
1170
1171         /*
1172          * If this buffer is dirty in an old transaction group we need
1173          * to make a copy of it so that the changes we make in this
1174          * transaction group won't leak out when we sync the older txg.
1175          */
1176         dr = kmem_zalloc(sizeof (dbuf_dirty_record_t), KM_PUSHPAGE);
1177         list_link_init(&dr->dr_dirty_node);
1178         if (db->db_level == 0) {
1179                 void *data_old = db->db_buf;
1180
1181                 if (db->db_state != DB_NOFILL) {
1182                         if (db->db_blkid == DMU_BONUS_BLKID) {
1183                                 dbuf_fix_old_data(db, tx->tx_txg);
1184                                 data_old = db->db.db_data;
1185                         } else if (db->db.db_object != DMU_META_DNODE_OBJECT) {
1186                                 /*
1187                                  * Release the data buffer from the cache so
1188                                  * that we can modify it without impacting
1189                                  * possible other users of this cached data
1190                                  * block.  Note that indirect blocks and
1191                                  * private objects are not released until the
1192                                  * syncing state (since they are only modified
1193                                  * then).
1194                                  */
1195                                 arc_release(db->db_buf, db);
1196                                 dbuf_fix_old_data(db, tx->tx_txg);
1197                                 data_old = db->db_buf;
1198                         }
1199                         ASSERT(data_old != NULL);
1200                 }
1201                 dr->dt.dl.dr_data = data_old;
1202         } else {
1203                 mutex_init(&dr->dt.di.dr_mtx, NULL, MUTEX_DEFAULT, NULL);
1204                 list_create(&dr->dt.di.dr_children,
1205                     sizeof (dbuf_dirty_record_t),
1206                     offsetof(dbuf_dirty_record_t, dr_dirty_node));
1207         }
1208         dr->dr_dbuf = db;
1209         dr->dr_txg = tx->tx_txg;
1210         dr->dr_next = *drp;
1211         *drp = dr;
1212
1213         /*
1214          * We could have been freed_in_flight between the dbuf_noread
1215          * and dbuf_dirty.  We win, as though the dbuf_noread() had
1216          * happened after the free.
1217          */
1218         if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
1219             db->db_blkid != DMU_SPILL_BLKID) {
1220                 mutex_enter(&dn->dn_mtx);
1221                 dnode_clear_range(dn, db->db_blkid, 1, tx);
1222                 mutex_exit(&dn->dn_mtx);
1223                 db->db_freed_in_flight = FALSE;
1224         }
1225
1226         /*
1227          * This buffer is now part of this txg
1228          */
1229         dbuf_add_ref(db, (void *)(uintptr_t)tx->tx_txg);
1230         db->db_dirtycnt += 1;
1231         ASSERT3U(db->db_dirtycnt, <=, 3);
1232
1233         mutex_exit(&db->db_mtx);
1234
1235         if (db->db_blkid == DMU_BONUS_BLKID ||
1236             db->db_blkid == DMU_SPILL_BLKID) {
1237                 mutex_enter(&dn->dn_mtx);
1238                 ASSERT(!list_link_active(&dr->dr_dirty_node));
1239                 list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
1240                 mutex_exit(&dn->dn_mtx);
1241                 dnode_setdirty(dn, tx);
1242                 DB_DNODE_EXIT(db);
1243                 return (dr);
1244         } else if (do_free_accounting) {
1245                 blkptr_t *bp = db->db_blkptr;
1246                 int64_t willfree = (bp && !BP_IS_HOLE(bp)) ?
1247                     bp_get_dsize(os->os_spa, bp) : db->db.db_size;
1248                 /*
1249                  * This is only a guess -- if the dbuf is dirty
1250                  * in a previous txg, we don't know how much
1251                  * space it will use on disk yet.  We should
1252                  * really have the struct_rwlock to access
1253                  * db_blkptr, but since this is just a guess,
1254                  * it's OK if we get an odd answer.
1255                  */
1256                 ddt_prefetch(os->os_spa, bp);
1257                 dnode_willuse_space(dn, -willfree, tx);
1258         }
1259
1260         if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) {
1261                 rw_enter(&dn->dn_struct_rwlock, RW_READER);
1262                 drop_struct_lock = TRUE;
1263         }
1264
1265         if (db->db_level == 0) {
1266                 dnode_new_blkid(dn, db->db_blkid, tx, drop_struct_lock);
1267                 ASSERT(dn->dn_maxblkid >= db->db_blkid);
1268         }
1269
1270         if (db->db_level+1 < dn->dn_nlevels) {
1271                 dmu_buf_impl_t *parent = db->db_parent;
1272                 dbuf_dirty_record_t *di;
1273                 int parent_held = FALSE;
1274
1275                 if (db->db_parent == NULL || db->db_parent == dn->dn_dbuf) {
1276                         int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
1277
1278                         parent = dbuf_hold_level(dn, db->db_level+1,
1279                             db->db_blkid >> epbs, FTAG);
1280                         ASSERT(parent != NULL);
1281                         parent_held = TRUE;
1282                 }
1283                 if (drop_struct_lock)
1284                         rw_exit(&dn->dn_struct_rwlock);
1285                 ASSERT3U(db->db_level+1, ==, parent->db_level);
1286                 di = dbuf_dirty(parent, tx);
1287                 if (parent_held)
1288                         dbuf_rele(parent, FTAG);
1289
1290                 mutex_enter(&db->db_mtx);
1291                 /*  possible race with dbuf_undirty() */
1292                 if (db->db_last_dirty == dr ||
1293                     dn->dn_object == DMU_META_DNODE_OBJECT) {
1294                         mutex_enter(&di->dt.di.dr_mtx);
1295                         ASSERT3U(di->dr_txg, ==, tx->tx_txg);
1296                         ASSERT(!list_link_active(&dr->dr_dirty_node));
1297                         list_insert_tail(&di->dt.di.dr_children, dr);
1298                         mutex_exit(&di->dt.di.dr_mtx);
1299                         dr->dr_parent = di;
1300                 }
1301                 mutex_exit(&db->db_mtx);
1302         } else {
1303                 ASSERT(db->db_level+1 == dn->dn_nlevels);
1304                 ASSERT(db->db_blkid < dn->dn_nblkptr);
1305                 ASSERT(db->db_parent == NULL || db->db_parent == dn->dn_dbuf);
1306                 mutex_enter(&dn->dn_mtx);
1307                 ASSERT(!list_link_active(&dr->dr_dirty_node));
1308                 list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
1309                 mutex_exit(&dn->dn_mtx);
1310                 if (drop_struct_lock)
1311                         rw_exit(&dn->dn_struct_rwlock);
1312         }
1313
1314         dnode_setdirty(dn, tx);
1315         DB_DNODE_EXIT(db);
1316         return (dr);
1317 }
1318
1319 static int
1320 dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
1321 {
1322         dnode_t *dn;
1323         uint64_t txg = tx->tx_txg;
1324         dbuf_dirty_record_t *dr, **drp;
1325
1326         ASSERT(txg != 0);
1327         ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1328
1329         mutex_enter(&db->db_mtx);
1330         /*
1331          * If this buffer is not dirty, we're done.
1332          */
1333         for (drp = &db->db_last_dirty; (dr = *drp) != NULL; drp = &dr->dr_next)
1334                 if (dr->dr_txg <= txg)
1335                         break;
1336         if (dr == NULL || dr->dr_txg < txg) {
1337                 mutex_exit(&db->db_mtx);
1338                 return (0);
1339         }
1340         ASSERT(dr->dr_txg == txg);
1341         ASSERT(dr->dr_dbuf == db);
1342
1343         DB_DNODE_ENTER(db);
1344         dn = DB_DNODE(db);
1345
1346         /*
1347          * If this buffer is currently held, we cannot undirty
1348          * it, since one of the current holders may be in the
1349          * middle of an update.  Note that users of dbuf_undirty()
1350          * should not place a hold on the dbuf before the call.
1351          * Also note: we can get here with a spill block, so
1352          * test for that similar to how dbuf_dirty does.
1353          */
1354         if (refcount_count(&db->db_holds) > db->db_dirtycnt) {
1355                 mutex_exit(&db->db_mtx);
1356                 /* Make sure we don't toss this buffer at sync phase */
1357                 if (db->db_blkid != DMU_SPILL_BLKID) {
1358                         mutex_enter(&dn->dn_mtx);
1359                         dnode_clear_range(dn, db->db_blkid, 1, tx);
1360                         mutex_exit(&dn->dn_mtx);
1361                 }
1362                 DB_DNODE_EXIT(db);
1363                 return (0);
1364         }
1365
1366         dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
1367
1368         ASSERT(db->db.db_size != 0);
1369
1370         /* XXX would be nice to fix up dn_towrite_space[] */
1371
1372         *drp = dr->dr_next;
1373
1374         /*
1375          * Note that there are three places in dbuf_dirty()
1376          * where this dirty record may be put on a list.
1377          * Make sure to do a list_remove corresponding to
1378          * every one of those list_insert calls.
1379          */
1380         if (dr->dr_parent) {
1381                 mutex_enter(&dr->dr_parent->dt.di.dr_mtx);
1382                 list_remove(&dr->dr_parent->dt.di.dr_children, dr);
1383                 mutex_exit(&dr->dr_parent->dt.di.dr_mtx);
1384         } else if (db->db_blkid == DMU_SPILL_BLKID ||
1385             db->db_level+1 == dn->dn_nlevels) {
1386                 ASSERT(db->db_blkptr == NULL || db->db_parent == dn->dn_dbuf);
1387                 mutex_enter(&dn->dn_mtx);
1388                 list_remove(&dn->dn_dirty_records[txg & TXG_MASK], dr);
1389                 mutex_exit(&dn->dn_mtx);
1390         }
1391         DB_DNODE_EXIT(db);
1392
1393         if (db->db_level == 0) {
1394                 if (db->db_state != DB_NOFILL) {
1395                         dbuf_unoverride(dr);
1396
1397                         ASSERT(db->db_buf != NULL);
1398                         ASSERT(dr->dt.dl.dr_data != NULL);
1399                         if (dr->dt.dl.dr_data != db->db_buf)
1400                                 VERIFY(arc_buf_remove_ref(dr->dt.dl.dr_data,
1401                                     db) == 1);
1402                 }
1403         } else {
1404                 ASSERT(db->db_buf != NULL);
1405                 ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
1406                 mutex_destroy(&dr->dt.di.dr_mtx);
1407                 list_destroy(&dr->dt.di.dr_children);
1408         }
1409         kmem_free(dr, sizeof (dbuf_dirty_record_t));
1410
1411         ASSERT(db->db_dirtycnt > 0);
1412         db->db_dirtycnt -= 1;
1413
1414         if (refcount_remove(&db->db_holds, (void *)(uintptr_t)txg) == 0) {
1415                 arc_buf_t *buf = db->db_buf;
1416
1417                 ASSERT(db->db_state == DB_NOFILL || arc_released(buf));
1418                 dbuf_set_data(db, NULL);
1419                 VERIFY(arc_buf_remove_ref(buf, db) == 1);
1420                 dbuf_evict(db);
1421                 return (1);
1422         }
1423
1424         mutex_exit(&db->db_mtx);
1425         return (0);
1426 }
1427
1428 #pragma weak dmu_buf_will_dirty = dbuf_will_dirty
1429 void
1430 dbuf_will_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
1431 {
1432         int rf = DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH;
1433
1434         ASSERT(tx->tx_txg != 0);
1435         ASSERT(!refcount_is_zero(&db->db_holds));
1436
1437         DB_DNODE_ENTER(db);
1438         if (RW_WRITE_HELD(&DB_DNODE(db)->dn_struct_rwlock))
1439                 rf |= DB_RF_HAVESTRUCT;
1440         DB_DNODE_EXIT(db);
1441         (void) dbuf_read(db, NULL, rf);
1442         (void) dbuf_dirty(db, tx);
1443 }
1444
1445 void
1446 dmu_buf_will_not_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
1447 {
1448         dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
1449
1450         db->db_state = DB_NOFILL;
1451
1452         dmu_buf_will_fill(db_fake, tx);
1453 }
1454
1455 void
1456 dmu_buf_will_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
1457 {
1458         dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
1459
1460         ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1461         ASSERT(tx->tx_txg != 0);
1462         ASSERT(db->db_level == 0);
1463         ASSERT(!refcount_is_zero(&db->db_holds));
1464
1465         ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT ||
1466             dmu_tx_private_ok(tx));
1467
1468         dbuf_noread(db);
1469         (void) dbuf_dirty(db, tx);
1470 }
1471
1472 #pragma weak dmu_buf_fill_done = dbuf_fill_done
1473 /* ARGSUSED */
1474 void
1475 dbuf_fill_done(dmu_buf_impl_t *db, dmu_tx_t *tx)
1476 {
1477         mutex_enter(&db->db_mtx);
1478         DBUF_VERIFY(db);
1479
1480         if (db->db_state == DB_FILL) {
1481                 if (db->db_level == 0 && db->db_freed_in_flight) {
1482                         ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1483                         /* we were freed while filling */
1484                         /* XXX dbuf_undirty? */
1485                         bzero(db->db.db_data, db->db.db_size);
1486                         db->db_freed_in_flight = FALSE;
1487                 }
1488                 db->db_state = DB_CACHED;
1489                 cv_broadcast(&db->db_changed);
1490         }
1491         mutex_exit(&db->db_mtx);
1492 }
1493
1494 /*
1495  * Directly assign a provided arc buf to a given dbuf if it's not referenced
1496  * by anybody except our caller. Otherwise copy arcbuf's contents to dbuf.
1497  */
1498 void
1499 dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx)
1500 {
1501         ASSERT(!refcount_is_zero(&db->db_holds));
1502         ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1503         ASSERT(db->db_level == 0);
1504         ASSERT(DBUF_GET_BUFC_TYPE(db) == ARC_BUFC_DATA);
1505         ASSERT(buf != NULL);
1506         ASSERT(arc_buf_size(buf) == db->db.db_size);
1507         ASSERT(tx->tx_txg != 0);
1508
1509         arc_return_buf(buf, db);
1510         ASSERT(arc_released(buf));
1511
1512         mutex_enter(&db->db_mtx);
1513
1514         while (db->db_state == DB_READ || db->db_state == DB_FILL)
1515                 cv_wait(&db->db_changed, &db->db_mtx);
1516
1517         ASSERT(db->db_state == DB_CACHED || db->db_state == DB_UNCACHED);
1518
1519         if (db->db_state == DB_CACHED &&
1520             refcount_count(&db->db_holds) - 1 > db->db_dirtycnt) {
1521                 mutex_exit(&db->db_mtx);
1522                 (void) dbuf_dirty(db, tx);
1523                 bcopy(buf->b_data, db->db.db_data, db->db.db_size);
1524                 VERIFY(arc_buf_remove_ref(buf, db) == 1);
1525                 xuio_stat_wbuf_copied();
1526                 return;
1527         }
1528
1529         xuio_stat_wbuf_nocopy();
1530         if (db->db_state == DB_CACHED) {
1531                 dbuf_dirty_record_t *dr = db->db_last_dirty;
1532
1533                 ASSERT(db->db_buf != NULL);
1534                 if (dr != NULL && dr->dr_txg == tx->tx_txg) {
1535                         ASSERT(dr->dt.dl.dr_data == db->db_buf);
1536                         if (!arc_released(db->db_buf)) {
1537                                 ASSERT(dr->dt.dl.dr_override_state ==
1538                                     DR_OVERRIDDEN);
1539                                 arc_release(db->db_buf, db);
1540                         }
1541                         dr->dt.dl.dr_data = buf;
1542                         VERIFY(arc_buf_remove_ref(db->db_buf, db) == 1);
1543                 } else if (dr == NULL || dr->dt.dl.dr_data != db->db_buf) {
1544                         arc_release(db->db_buf, db);
1545                         VERIFY(arc_buf_remove_ref(db->db_buf, db) == 1);
1546                 }
1547                 db->db_buf = NULL;
1548         }
1549         ASSERT(db->db_buf == NULL);
1550         dbuf_set_data(db, buf);
1551         db->db_state = DB_FILL;
1552         mutex_exit(&db->db_mtx);
1553         (void) dbuf_dirty(db, tx);
1554         dbuf_fill_done(db, tx);
1555 }
1556
1557 /*
1558  * "Clear" the contents of this dbuf.  This will mark the dbuf
1559  * EVICTING and clear *most* of its references.  Unfortunetely,
1560  * when we are not holding the dn_dbufs_mtx, we can't clear the
1561  * entry in the dn_dbufs list.  We have to wait until dbuf_destroy()
1562  * in this case.  For callers from the DMU we will usually see:
1563  *      dbuf_clear()->arc_buf_evict()->dbuf_do_evict()->dbuf_destroy()
1564  * For the arc callback, we will usually see:
1565  *      dbuf_do_evict()->dbuf_clear();dbuf_destroy()
1566  * Sometimes, though, we will get a mix of these two:
1567  *      DMU: dbuf_clear()->arc_buf_evict()
1568  *      ARC: dbuf_do_evict()->dbuf_destroy()
1569  */
1570 void
1571 dbuf_clear(dmu_buf_impl_t *db)
1572 {
1573         dnode_t *dn;
1574         dmu_buf_impl_t *parent = db->db_parent;
1575         dmu_buf_impl_t *dndb;
1576         int dbuf_gone = FALSE;
1577
1578         ASSERT(MUTEX_HELD(&db->db_mtx));
1579         ASSERT(refcount_is_zero(&db->db_holds));
1580
1581         dbuf_evict_user(db);
1582
1583         if (db->db_state == DB_CACHED) {
1584                 ASSERT(db->db.db_data != NULL);
1585                 if (db->db_blkid == DMU_BONUS_BLKID) {
1586                         zio_buf_free(db->db.db_data, DN_MAX_BONUSLEN);
1587                         arc_space_return(DN_MAX_BONUSLEN, ARC_SPACE_OTHER);
1588                 }
1589                 db->db.db_data = NULL;
1590                 db->db_state = DB_UNCACHED;
1591         }
1592
1593         ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL);
1594         ASSERT(db->db_data_pending == NULL);
1595
1596         db->db_state = DB_EVICTING;
1597         db->db_blkptr = NULL;
1598
1599         DB_DNODE_ENTER(db);
1600         dn = DB_DNODE(db);
1601         dndb = dn->dn_dbuf;
1602         if (db->db_blkid != DMU_BONUS_BLKID && MUTEX_HELD(&dn->dn_dbufs_mtx)) {
1603                 list_remove(&dn->dn_dbufs, db);
1604                 (void) atomic_dec_32_nv(&dn->dn_dbufs_count);
1605                 membar_producer();
1606                 DB_DNODE_EXIT(db);
1607                 /*
1608                  * Decrementing the dbuf count means that the hold corresponding
1609                  * to the removed dbuf is no longer discounted in dnode_move(),
1610                  * so the dnode cannot be moved until after we release the hold.
1611                  * The membar_producer() ensures visibility of the decremented
1612                  * value in dnode_move(), since DB_DNODE_EXIT doesn't actually
1613                  * release any lock.
1614                  */
1615                 dnode_rele(dn, db);
1616                 db->db_dnode_handle = NULL;
1617         } else {
1618                 DB_DNODE_EXIT(db);
1619         }
1620
1621         if (db->db_buf)
1622                 dbuf_gone = arc_buf_evict(db->db_buf);
1623
1624         if (!dbuf_gone)
1625                 mutex_exit(&db->db_mtx);
1626
1627         /*
1628          * If this dbuf is referenced from an indirect dbuf,
1629          * decrement the ref count on the indirect dbuf.
1630          */
1631         if (parent && parent != dndb)
1632                 dbuf_rele(parent, db);
1633 }
1634
1635 __attribute__((always_inline))
1636 static inline int
1637 dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse,
1638     dmu_buf_impl_t **parentp, blkptr_t **bpp, struct dbuf_hold_impl_data *dh)
1639 {
1640         int nlevels, epbs;
1641
1642         *parentp = NULL;
1643         *bpp = NULL;
1644
1645         ASSERT(blkid != DMU_BONUS_BLKID);
1646
1647         if (blkid == DMU_SPILL_BLKID) {
1648                 mutex_enter(&dn->dn_mtx);
1649                 if (dn->dn_have_spill &&
1650                     (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR))
1651                         *bpp = &dn->dn_phys->dn_spill;
1652                 else
1653                         *bpp = NULL;
1654                 dbuf_add_ref(dn->dn_dbuf, NULL);
1655                 *parentp = dn->dn_dbuf;
1656                 mutex_exit(&dn->dn_mtx);
1657                 return (0);
1658         }
1659
1660         if (dn->dn_phys->dn_nlevels == 0)
1661                 nlevels = 1;
1662         else
1663                 nlevels = dn->dn_phys->dn_nlevels;
1664
1665         epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
1666
1667         ASSERT3U(level * epbs, <, 64);
1668         ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
1669         if (level >= nlevels ||
1670             (blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))) {
1671                 /* the buffer has no parent yet */
1672                 return (ENOENT);
1673         } else if (level < nlevels-1) {
1674                 /* this block is referenced from an indirect block */
1675                 int err;
1676                 if (dh == NULL) {
1677                         err = dbuf_hold_impl(dn, level+1, blkid >> epbs,
1678                                         fail_sparse, NULL, parentp);
1679                 }
1680                 else {
1681                         __dbuf_hold_impl_init(dh + 1, dn, dh->dh_level + 1,
1682                                         blkid >> epbs, fail_sparse, NULL,
1683                                         parentp, dh->dh_depth + 1);
1684                         err = __dbuf_hold_impl(dh + 1);
1685                 }
1686                 if (err)
1687                         return (err);
1688                 err = dbuf_read(*parentp, NULL,
1689                     (DB_RF_HAVESTRUCT | DB_RF_NOPREFETCH | DB_RF_CANFAIL));
1690                 if (err) {
1691                         dbuf_rele(*parentp, NULL);
1692                         *parentp = NULL;
1693                         return (err);
1694                 }
1695                 *bpp = ((blkptr_t *)(*parentp)->db.db_data) +
1696                     (blkid & ((1ULL << epbs) - 1));
1697                 return (0);
1698         } else {
1699                 /* the block is referenced from the dnode */
1700                 ASSERT3U(level, ==, nlevels-1);
1701                 ASSERT(dn->dn_phys->dn_nblkptr == 0 ||
1702                     blkid < dn->dn_phys->dn_nblkptr);
1703                 if (dn->dn_dbuf) {
1704                         dbuf_add_ref(dn->dn_dbuf, NULL);
1705                         *parentp = dn->dn_dbuf;
1706                 }
1707                 *bpp = &dn->dn_phys->dn_blkptr[blkid];
1708                 return (0);
1709         }
1710 }
1711
1712 static dmu_buf_impl_t *
1713 dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid,
1714     dmu_buf_impl_t *parent, blkptr_t *blkptr)
1715 {
1716         objset_t *os = dn->dn_objset;
1717         dmu_buf_impl_t *db, *odb;
1718
1719         ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
1720         ASSERT(dn->dn_type != DMU_OT_NONE);
1721
1722         db = kmem_cache_alloc(dbuf_cache, KM_SLEEP);
1723
1724         db->db_objset = os;
1725         db->db.db_object = dn->dn_object;
1726         db->db_level = level;
1727         db->db_blkid = blkid;
1728         db->db_last_dirty = NULL;
1729         db->db_dirtycnt = 0;
1730         db->db_dnode_handle = dn->dn_handle;
1731         db->db_parent = parent;
1732         db->db_blkptr = blkptr;
1733
1734         db->db_user_ptr = NULL;
1735         db->db_user_data_ptr_ptr = NULL;
1736         db->db_evict_func = NULL;
1737         db->db_immediate_evict = 0;
1738         db->db_freed_in_flight = 0;
1739
1740         if (blkid == DMU_BONUS_BLKID) {
1741                 ASSERT3P(parent, ==, dn->dn_dbuf);
1742                 db->db.db_size = DN_MAX_BONUSLEN -
1743                     (dn->dn_nblkptr-1) * sizeof (blkptr_t);
1744                 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
1745                 db->db.db_offset = DMU_BONUS_BLKID;
1746                 db->db_state = DB_UNCACHED;
1747                 /* the bonus dbuf is not placed in the hash table */
1748                 arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER);
1749                 return (db);
1750         } else if (blkid == DMU_SPILL_BLKID) {
1751                 db->db.db_size = (blkptr != NULL) ?
1752                     BP_GET_LSIZE(blkptr) : SPA_MINBLOCKSIZE;
1753                 db->db.db_offset = 0;
1754         } else {
1755                 int blocksize =
1756                     db->db_level ? 1<<dn->dn_indblkshift :  dn->dn_datablksz;
1757                 db->db.db_size = blocksize;
1758                 db->db.db_offset = db->db_blkid * blocksize;
1759         }
1760
1761         /*
1762          * Hold the dn_dbufs_mtx while we get the new dbuf
1763          * in the hash table *and* added to the dbufs list.
1764          * This prevents a possible deadlock with someone
1765          * trying to look up this dbuf before its added to the
1766          * dn_dbufs list.
1767          */
1768         mutex_enter(&dn->dn_dbufs_mtx);
1769         db->db_state = DB_EVICTING;
1770         if ((odb = dbuf_hash_insert(db)) != NULL) {
1771                 /* someone else inserted it first */
1772                 kmem_cache_free(dbuf_cache, db);
1773                 mutex_exit(&dn->dn_dbufs_mtx);
1774                 return (odb);
1775         }
1776         list_insert_head(&dn->dn_dbufs, db);
1777         db->db_state = DB_UNCACHED;
1778         mutex_exit(&dn->dn_dbufs_mtx);
1779         arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER);
1780
1781         if (parent && parent != dn->dn_dbuf)
1782                 dbuf_add_ref(parent, db);
1783
1784         ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
1785             refcount_count(&dn->dn_holds) > 0);
1786         (void) refcount_add(&dn->dn_holds, db);
1787         (void) atomic_inc_32_nv(&dn->dn_dbufs_count);
1788
1789         dprintf_dbuf(db, "db=%p\n", db);
1790
1791         return (db);
1792 }
1793
1794 static int
1795 dbuf_do_evict(void *private)
1796 {
1797         arc_buf_t *buf = private;
1798         dmu_buf_impl_t *db = buf->b_private;
1799
1800         if (!MUTEX_HELD(&db->db_mtx))
1801                 mutex_enter(&db->db_mtx);
1802
1803         ASSERT(refcount_is_zero(&db->db_holds));
1804
1805         if (db->db_state != DB_EVICTING) {
1806                 ASSERT(db->db_state == DB_CACHED);
1807                 DBUF_VERIFY(db);
1808                 db->db_buf = NULL;
1809                 dbuf_evict(db);
1810         } else {
1811                 mutex_exit(&db->db_mtx);
1812                 dbuf_destroy(db);
1813         }
1814         return (0);
1815 }
1816
1817 static void
1818 dbuf_destroy(dmu_buf_impl_t *db)
1819 {
1820         ASSERT(refcount_is_zero(&db->db_holds));
1821
1822         if (db->db_blkid != DMU_BONUS_BLKID) {
1823                 /*
1824                  * If this dbuf is still on the dn_dbufs list,
1825                  * remove it from that list.
1826                  */
1827                 if (db->db_dnode_handle != NULL) {
1828                         dnode_t *dn;
1829
1830                         DB_DNODE_ENTER(db);
1831                         dn = DB_DNODE(db);
1832                         mutex_enter(&dn->dn_dbufs_mtx);
1833                         list_remove(&dn->dn_dbufs, db);
1834                         (void) atomic_dec_32_nv(&dn->dn_dbufs_count);
1835                         mutex_exit(&dn->dn_dbufs_mtx);
1836                         DB_DNODE_EXIT(db);
1837                         /*
1838                          * Decrementing the dbuf count means that the hold
1839                          * corresponding to the removed dbuf is no longer
1840                          * discounted in dnode_move(), so the dnode cannot be
1841                          * moved until after we release the hold.
1842                          */
1843                         dnode_rele(dn, db);
1844                         db->db_dnode_handle = NULL;
1845                 }
1846                 dbuf_hash_remove(db);
1847         }
1848         db->db_parent = NULL;
1849         db->db_buf = NULL;
1850
1851         ASSERT(!list_link_active(&db->db_link));
1852         ASSERT(db->db.db_data == NULL);
1853         ASSERT(db->db_hash_next == NULL);
1854         ASSERT(db->db_blkptr == NULL);
1855         ASSERT(db->db_data_pending == NULL);
1856
1857         kmem_cache_free(dbuf_cache, db);
1858         arc_space_return(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER);
1859 }
1860
1861 void
1862 dbuf_prefetch(dnode_t *dn, uint64_t blkid)
1863 {
1864         dmu_buf_impl_t *db = NULL;
1865         blkptr_t *bp = NULL;
1866
1867         ASSERT(blkid != DMU_BONUS_BLKID);
1868         ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
1869
1870         if (dnode_block_freed(dn, blkid))
1871                 return;
1872
1873         /* dbuf_find() returns with db_mtx held */
1874         if ((db = dbuf_find(dn, 0, blkid))) {
1875                 /*
1876                  * This dbuf is already in the cache.  We assume that
1877                  * it is already CACHED, or else about to be either
1878                  * read or filled.
1879                  */
1880                 mutex_exit(&db->db_mtx);
1881                 return;
1882         }
1883
1884         if (dbuf_findbp(dn, 0, blkid, TRUE, &db, &bp, NULL) == 0) {
1885                 if (bp && !BP_IS_HOLE(bp)) {
1886                         int priority = dn->dn_type == DMU_OT_DDT_ZAP ?
1887                             ZIO_PRIORITY_DDT_PREFETCH : ZIO_PRIORITY_ASYNC_READ;
1888                         arc_buf_t *pbuf;
1889                         dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
1890                         uint32_t aflags = ARC_NOWAIT | ARC_PREFETCH;
1891                         zbookmark_t zb;
1892
1893                         SET_BOOKMARK(&zb, ds ? ds->ds_object : DMU_META_OBJSET,
1894                             dn->dn_object, 0, blkid);
1895
1896                         if (db)
1897                                 pbuf = db->db_buf;
1898                         else
1899                                 pbuf = dn->dn_objset->os_phys_buf;
1900
1901                         (void) dsl_read(NULL, dn->dn_objset->os_spa,
1902                             bp, pbuf, NULL, NULL, priority,
1903                             ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
1904                             &aflags, &zb);
1905                 }
1906                 if (db)
1907                         dbuf_rele(db, NULL);
1908         }
1909 }
1910
1911 #define DBUF_HOLD_IMPL_MAX_DEPTH        20
1912
1913 /*
1914  * Returns with db_holds incremented, and db_mtx not held.
1915  * Note: dn_struct_rwlock must be held.
1916  */
1917 static int
1918 __dbuf_hold_impl(struct dbuf_hold_impl_data *dh)
1919 {
1920         ASSERT3S(dh->dh_depth, <, DBUF_HOLD_IMPL_MAX_DEPTH);
1921         dh->dh_parent = NULL;
1922
1923         ASSERT(dh->dh_blkid != DMU_BONUS_BLKID);
1924         ASSERT(RW_LOCK_HELD(&dh->dh_dn->dn_struct_rwlock));
1925         ASSERT3U(dh->dh_dn->dn_nlevels, >, dh->dh_level);
1926
1927         *(dh->dh_dbp) = NULL;
1928 top:
1929         /* dbuf_find() returns with db_mtx held */
1930         dh->dh_db = dbuf_find(dh->dh_dn, dh->dh_level, dh->dh_blkid);
1931
1932         if (dh->dh_db == NULL) {
1933                 dh->dh_bp = NULL;
1934
1935                 ASSERT3P(dh->dh_parent, ==, NULL);
1936                 dh->dh_err = dbuf_findbp(dh->dh_dn, dh->dh_level, dh->dh_blkid,
1937                                         dh->dh_fail_sparse, &dh->dh_parent,
1938                                         &dh->dh_bp, dh);
1939                 if (dh->dh_fail_sparse) {
1940                         if (dh->dh_err == 0 && dh->dh_bp && BP_IS_HOLE(dh->dh_bp))
1941                                 dh->dh_err = ENOENT;
1942                         if (dh->dh_err) {
1943                                 if (dh->dh_parent)
1944                                         dbuf_rele(dh->dh_parent, NULL);
1945                                 return (dh->dh_err);
1946                         }
1947                 }
1948                 if (dh->dh_err && dh->dh_err != ENOENT)
1949                         return (dh->dh_err);
1950                 dh->dh_db = dbuf_create(dh->dh_dn, dh->dh_level, dh->dh_blkid,
1951                                         dh->dh_parent, dh->dh_bp);
1952         }
1953
1954         if (dh->dh_db->db_buf && refcount_is_zero(&dh->dh_db->db_holds)) {
1955                 arc_buf_add_ref(dh->dh_db->db_buf, dh->dh_db);
1956                 if (dh->dh_db->db_buf->b_data == NULL) {
1957                         dbuf_clear(dh->dh_db);
1958                         if (dh->dh_parent) {
1959                                 dbuf_rele(dh->dh_parent, NULL);
1960                                 dh->dh_parent = NULL;
1961                         }
1962                         goto top;
1963                 }
1964                 ASSERT3P(dh->dh_db->db.db_data, ==, dh->dh_db->db_buf->b_data);
1965         }
1966
1967         ASSERT(dh->dh_db->db_buf == NULL || arc_referenced(dh->dh_db->db_buf));
1968
1969         /*
1970          * If this buffer is currently syncing out, and we are are
1971          * still referencing it from db_data, we need to make a copy
1972          * of it in case we decide we want to dirty it again in this txg.
1973          */
1974         if (dh->dh_db->db_level == 0 &&
1975             dh->dh_db->db_blkid != DMU_BONUS_BLKID &&
1976             dh->dh_dn->dn_object != DMU_META_DNODE_OBJECT &&
1977             dh->dh_db->db_state == DB_CACHED && dh->dh_db->db_data_pending) {
1978                 dh->dh_dr = dh->dh_db->db_data_pending;
1979
1980                 if (dh->dh_dr->dt.dl.dr_data == dh->dh_db->db_buf) {
1981                         dh->dh_type = DBUF_GET_BUFC_TYPE(dh->dh_db);
1982
1983                         dbuf_set_data(dh->dh_db,
1984                             arc_buf_alloc(dh->dh_dn->dn_objset->os_spa,
1985                             dh->dh_db->db.db_size, dh->dh_db, dh->dh_type));
1986                         bcopy(dh->dh_dr->dt.dl.dr_data->b_data,
1987                             dh->dh_db->db.db_data, dh->dh_db->db.db_size);
1988                 }
1989         }
1990
1991         (void) refcount_add(&dh->dh_db->db_holds, dh->dh_tag);
1992         dbuf_update_data(dh->dh_db);
1993         DBUF_VERIFY(dh->dh_db);
1994         mutex_exit(&dh->dh_db->db_mtx);
1995
1996         /* NOTE: we can't rele the parent until after we drop the db_mtx */
1997         if (dh->dh_parent)
1998                 dbuf_rele(dh->dh_parent, NULL);
1999
2000         ASSERT3P(DB_DNODE(dh->dh_db), ==, dh->dh_dn);
2001         ASSERT3U(dh->dh_db->db_blkid, ==, dh->dh_blkid);
2002         ASSERT3U(dh->dh_db->db_level, ==, dh->dh_level);
2003         *(dh->dh_dbp) = dh->dh_db;
2004
2005         return (0);
2006 }
2007
2008 /*
2009  * The following code preserves the recursive function dbuf_hold_impl()
2010  * but moves the local variables AND function arguments to the heap to
2011  * minimize the stack frame size.  Enough space is initially allocated
2012  * on the stack for 20 levels of recursion.
2013  */
2014 int
2015 dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid, int fail_sparse,
2016     void *tag, dmu_buf_impl_t **dbp)
2017 {
2018         struct dbuf_hold_impl_data *dh;
2019         int error;
2020
2021         dh = kmem_zalloc(sizeof(struct dbuf_hold_impl_data) *
2022             DBUF_HOLD_IMPL_MAX_DEPTH, KM_SLEEP);
2023         __dbuf_hold_impl_init(dh, dn, level, blkid, fail_sparse, tag, dbp, 0);
2024
2025         error = __dbuf_hold_impl(dh);
2026
2027         kmem_free(dh, sizeof(struct dbuf_hold_impl_data) *
2028             DBUF_HOLD_IMPL_MAX_DEPTH);
2029
2030         return (error);
2031 }
2032
2033 static void
2034 __dbuf_hold_impl_init(struct dbuf_hold_impl_data *dh,
2035     dnode_t *dn, uint8_t level, uint64_t blkid, int fail_sparse,
2036     void *tag, dmu_buf_impl_t **dbp, int depth)
2037 {
2038         dh->dh_dn = dn;
2039         dh->dh_level = level;
2040         dh->dh_blkid = blkid;
2041         dh->dh_fail_sparse = fail_sparse;
2042         dh->dh_tag = tag;
2043         dh->dh_dbp = dbp;
2044         dh->dh_depth = depth;
2045 }
2046
2047 dmu_buf_impl_t *
2048 dbuf_hold(dnode_t *dn, uint64_t blkid, void *tag)
2049 {
2050         dmu_buf_impl_t *db;
2051         int err = dbuf_hold_impl(dn, 0, blkid, FALSE, tag, &db);
2052         return (err ? NULL : db);
2053 }
2054
2055 dmu_buf_impl_t *
2056 dbuf_hold_level(dnode_t *dn, int level, uint64_t blkid, void *tag)
2057 {
2058         dmu_buf_impl_t *db;
2059         int err = dbuf_hold_impl(dn, level, blkid, FALSE, tag, &db);
2060         return (err ? NULL : db);
2061 }
2062
2063 void
2064 dbuf_create_bonus(dnode_t *dn)
2065 {
2066         ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
2067
2068         ASSERT(dn->dn_bonus == NULL);
2069         dn->dn_bonus = dbuf_create(dn, 0, DMU_BONUS_BLKID, dn->dn_dbuf, NULL);
2070 }
2071
2072 int
2073 dbuf_spill_set_blksz(dmu_buf_t *db_fake, uint64_t blksz, dmu_tx_t *tx)
2074 {
2075         dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2076         dnode_t *dn;
2077
2078         if (db->db_blkid != DMU_SPILL_BLKID)
2079                 return (ENOTSUP);
2080         if (blksz == 0)
2081                 blksz = SPA_MINBLOCKSIZE;
2082         if (blksz > SPA_MAXBLOCKSIZE)
2083                 blksz = SPA_MAXBLOCKSIZE;
2084         else
2085                 blksz = P2ROUNDUP(blksz, SPA_MINBLOCKSIZE);
2086
2087         DB_DNODE_ENTER(db);
2088         dn = DB_DNODE(db);
2089         rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
2090         dbuf_new_size(db, blksz, tx);
2091         rw_exit(&dn->dn_struct_rwlock);
2092         DB_DNODE_EXIT(db);
2093
2094         return (0);
2095 }
2096
2097 void
2098 dbuf_rm_spill(dnode_t *dn, dmu_tx_t *tx)
2099 {
2100         dbuf_free_range(dn, DMU_SPILL_BLKID, DMU_SPILL_BLKID, tx);
2101 }
2102
2103 #pragma weak dmu_buf_add_ref = dbuf_add_ref
2104 void
2105 dbuf_add_ref(dmu_buf_impl_t *db, void *tag)
2106 {
2107         VERIFY(refcount_add(&db->db_holds, tag) > 1);
2108 }
2109
2110 /*
2111  * If you call dbuf_rele() you had better not be referencing the dnode handle
2112  * unless you have some other direct or indirect hold on the dnode. (An indirect
2113  * hold is a hold on one of the dnode's dbufs, including the bonus buffer.)
2114  * Without that, the dbuf_rele() could lead to a dnode_rele() followed by the
2115  * dnode's parent dbuf evicting its dnode handles.
2116  */
2117 #pragma weak dmu_buf_rele = dbuf_rele
2118 void
2119 dbuf_rele(dmu_buf_impl_t *db, void *tag)
2120 {
2121         mutex_enter(&db->db_mtx);
2122         dbuf_rele_and_unlock(db, tag);
2123 }
2124
2125 /*
2126  * dbuf_rele() for an already-locked dbuf.  This is necessary to allow
2127  * db_dirtycnt and db_holds to be updated atomically.
2128  */
2129 void
2130 dbuf_rele_and_unlock(dmu_buf_impl_t *db, void *tag)
2131 {
2132         int64_t holds;
2133
2134         ASSERT(MUTEX_HELD(&db->db_mtx));
2135         DBUF_VERIFY(db);
2136
2137         /*
2138          * Remove the reference to the dbuf before removing its hold on the
2139          * dnode so we can guarantee in dnode_move() that a referenced bonus
2140          * buffer has a corresponding dnode hold.
2141          */
2142         holds = refcount_remove(&db->db_holds, tag);
2143         ASSERT(holds >= 0);
2144
2145         /*
2146          * We can't freeze indirects if there is a possibility that they
2147          * may be modified in the current syncing context.
2148          */
2149         if (db->db_buf && holds == (db->db_level == 0 ? db->db_dirtycnt : 0))
2150                 arc_buf_freeze(db->db_buf);
2151
2152         if (holds == db->db_dirtycnt &&
2153             db->db_level == 0 && db->db_immediate_evict)
2154                 dbuf_evict_user(db);
2155
2156         if (holds == 0) {
2157                 if (db->db_blkid == DMU_BONUS_BLKID) {
2158                         mutex_exit(&db->db_mtx);
2159
2160                         /*
2161                          * If the dnode moves here, we cannot cross this barrier
2162                          * until the move completes.
2163                          */
2164                         DB_DNODE_ENTER(db);
2165                         (void) atomic_dec_32_nv(&DB_DNODE(db)->dn_dbufs_count);
2166                         DB_DNODE_EXIT(db);
2167                         /*
2168                          * The bonus buffer's dnode hold is no longer discounted
2169                          * in dnode_move(). The dnode cannot move until after
2170                          * the dnode_rele().
2171                          */
2172                         dnode_rele(DB_DNODE(db), db);
2173                 } else if (db->db_buf == NULL) {
2174                         /*
2175                          * This is a special case: we never associated this
2176                          * dbuf with any data allocated from the ARC.
2177                          */
2178                         ASSERT(db->db_state == DB_UNCACHED ||
2179                             db->db_state == DB_NOFILL);
2180                         dbuf_evict(db);
2181                 } else if (arc_released(db->db_buf)) {
2182                         arc_buf_t *buf = db->db_buf;
2183                         /*
2184                          * This dbuf has anonymous data associated with it.
2185                          */
2186                         dbuf_set_data(db, NULL);
2187                         VERIFY(arc_buf_remove_ref(buf, db) == 1);
2188                         dbuf_evict(db);
2189                 } else {
2190                         VERIFY(arc_buf_remove_ref(db->db_buf, db) == 0);
2191                         if (!DBUF_IS_CACHEABLE(db))
2192                                 dbuf_clear(db);
2193                         else
2194                                 mutex_exit(&db->db_mtx);
2195                 }
2196         } else {
2197                 mutex_exit(&db->db_mtx);
2198         }
2199 }
2200
2201 #pragma weak dmu_buf_refcount = dbuf_refcount
2202 uint64_t
2203 dbuf_refcount(dmu_buf_impl_t *db)
2204 {
2205         return (refcount_count(&db->db_holds));
2206 }
2207
2208 void *
2209 dmu_buf_set_user(dmu_buf_t *db_fake, void *user_ptr, void *user_data_ptr_ptr,
2210     dmu_buf_evict_func_t *evict_func)
2211 {
2212         return (dmu_buf_update_user(db_fake, NULL, user_ptr,
2213             user_data_ptr_ptr, evict_func));
2214 }
2215
2216 void *
2217 dmu_buf_set_user_ie(dmu_buf_t *db_fake, void *user_ptr, void *user_data_ptr_ptr,
2218     dmu_buf_evict_func_t *evict_func)
2219 {
2220         dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2221
2222         db->db_immediate_evict = TRUE;
2223         return (dmu_buf_update_user(db_fake, NULL, user_ptr,
2224             user_data_ptr_ptr, evict_func));
2225 }
2226
2227 void *
2228 dmu_buf_update_user(dmu_buf_t *db_fake, void *old_user_ptr, void *user_ptr,
2229     void *user_data_ptr_ptr, dmu_buf_evict_func_t *evict_func)
2230 {
2231         dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2232         ASSERT(db->db_level == 0);
2233
2234         ASSERT((user_ptr == NULL) == (evict_func == NULL));
2235
2236         mutex_enter(&db->db_mtx);
2237
2238         if (db->db_user_ptr == old_user_ptr) {
2239                 db->db_user_ptr = user_ptr;
2240                 db->db_user_data_ptr_ptr = user_data_ptr_ptr;
2241                 db->db_evict_func = evict_func;
2242
2243                 dbuf_update_data(db);
2244         } else {
2245                 old_user_ptr = db->db_user_ptr;
2246         }
2247
2248         mutex_exit(&db->db_mtx);
2249         return (old_user_ptr);
2250 }
2251
2252 void *
2253 dmu_buf_get_user(dmu_buf_t *db_fake)
2254 {
2255         dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2256         ASSERT(!refcount_is_zero(&db->db_holds));
2257
2258         return (db->db_user_ptr);
2259 }
2260
2261 boolean_t
2262 dmu_buf_freeable(dmu_buf_t *dbuf)
2263 {
2264         boolean_t res = B_FALSE;
2265         dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
2266
2267         if (db->db_blkptr)
2268                 res = dsl_dataset_block_freeable(db->db_objset->os_dsl_dataset,
2269                     db->db_blkptr, db->db_blkptr->blk_birth);
2270
2271         return (res);
2272 }
2273
2274 static void
2275 dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db)
2276 {
2277         /* ASSERT(dmu_tx_is_syncing(tx) */
2278         ASSERT(MUTEX_HELD(&db->db_mtx));
2279
2280         if (db->db_blkptr != NULL)
2281                 return;
2282
2283         if (db->db_blkid == DMU_SPILL_BLKID) {
2284                 db->db_blkptr = &dn->dn_phys->dn_spill;
2285                 BP_ZERO(db->db_blkptr);
2286                 return;
2287         }
2288         if (db->db_level == dn->dn_phys->dn_nlevels-1) {
2289                 /*
2290                  * This buffer was allocated at a time when there was
2291                  * no available blkptrs from the dnode, or it was
2292                  * inappropriate to hook it in (i.e., nlevels mis-match).
2293                  */
2294                 ASSERT(db->db_blkid < dn->dn_phys->dn_nblkptr);
2295                 ASSERT(db->db_parent == NULL);
2296                 db->db_parent = dn->dn_dbuf;
2297                 db->db_blkptr = &dn->dn_phys->dn_blkptr[db->db_blkid];
2298                 DBUF_VERIFY(db);
2299         } else {
2300                 dmu_buf_impl_t *parent = db->db_parent;
2301                 int epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
2302
2303                 ASSERT(dn->dn_phys->dn_nlevels > 1);
2304                 if (parent == NULL) {
2305                         mutex_exit(&db->db_mtx);
2306                         rw_enter(&dn->dn_struct_rwlock, RW_READER);
2307                         (void) dbuf_hold_impl(dn, db->db_level+1,
2308                             db->db_blkid >> epbs, FALSE, db, &parent);
2309                         rw_exit(&dn->dn_struct_rwlock);
2310                         mutex_enter(&db->db_mtx);
2311                         db->db_parent = parent;
2312                 }
2313                 db->db_blkptr = (blkptr_t *)parent->db.db_data +
2314                     (db->db_blkid & ((1ULL << epbs) - 1));
2315                 DBUF_VERIFY(db);
2316         }
2317 }
2318
2319 /* dbuf_sync_indirect() is called recursively from dbuf_sync_list() so it
2320  * is critical the we not allow the compiler to inline this function in to
2321  * dbuf_sync_list() thereby drastically bloating the stack usage.
2322  */
2323 noinline static void
2324 dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
2325 {
2326         dmu_buf_impl_t *db = dr->dr_dbuf;
2327         dnode_t *dn;
2328         zio_t *zio;
2329
2330         ASSERT(dmu_tx_is_syncing(tx));
2331
2332         dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
2333
2334         mutex_enter(&db->db_mtx);
2335
2336         ASSERT(db->db_level > 0);
2337         DBUF_VERIFY(db);
2338
2339         if (db->db_buf == NULL) {
2340                 mutex_exit(&db->db_mtx);
2341                 (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED);
2342                 mutex_enter(&db->db_mtx);
2343         }
2344         ASSERT3U(db->db_state, ==, DB_CACHED);
2345         ASSERT(db->db_buf != NULL);
2346
2347         DB_DNODE_ENTER(db);
2348         dn = DB_DNODE(db);
2349         ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
2350         dbuf_check_blkptr(dn, db);
2351         DB_DNODE_EXIT(db);
2352
2353         db->db_data_pending = dr;
2354
2355         mutex_exit(&db->db_mtx);
2356         dbuf_write(dr, db->db_buf, tx);
2357
2358         zio = dr->dr_zio;
2359         mutex_enter(&dr->dt.di.dr_mtx);
2360         dbuf_sync_list(&dr->dt.di.dr_children, tx);
2361         ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
2362         mutex_exit(&dr->dt.di.dr_mtx);
2363         zio_nowait(zio);
2364 }
2365
2366 /* dbuf_sync_leaf() is called recursively from dbuf_sync_list() so it is
2367  * critical the we not allow the compiler to inline this function in to
2368  * dbuf_sync_list() thereby drastically bloating the stack usage.
2369  */
2370 noinline static void
2371 dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
2372 {
2373         arc_buf_t **datap = &dr->dt.dl.dr_data;
2374         dmu_buf_impl_t *db = dr->dr_dbuf;
2375         dnode_t *dn;
2376         objset_t *os;
2377         uint64_t txg = tx->tx_txg;
2378
2379         ASSERT(dmu_tx_is_syncing(tx));
2380
2381         dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
2382
2383         mutex_enter(&db->db_mtx);
2384         /*
2385          * To be synced, we must be dirtied.  But we
2386          * might have been freed after the dirty.
2387          */
2388         if (db->db_state == DB_UNCACHED) {
2389                 /* This buffer has been freed since it was dirtied */
2390                 ASSERT(db->db.db_data == NULL);
2391         } else if (db->db_state == DB_FILL) {
2392                 /* This buffer was freed and is now being re-filled */
2393                 ASSERT(db->db.db_data != dr->dt.dl.dr_data);
2394         } else {
2395                 ASSERT(db->db_state == DB_CACHED || db->db_state == DB_NOFILL);
2396         }
2397         DBUF_VERIFY(db);
2398
2399         DB_DNODE_ENTER(db);
2400         dn = DB_DNODE(db);
2401
2402         if (db->db_blkid == DMU_SPILL_BLKID) {
2403                 mutex_enter(&dn->dn_mtx);
2404                 dn->dn_phys->dn_flags |= DNODE_FLAG_SPILL_BLKPTR;
2405                 mutex_exit(&dn->dn_mtx);
2406         }
2407
2408         /*
2409          * If this is a bonus buffer, simply copy the bonus data into the
2410          * dnode.  It will be written out when the dnode is synced (and it
2411          * will be synced, since it must have been dirty for dbuf_sync to
2412          * be called).
2413          */
2414         if (db->db_blkid == DMU_BONUS_BLKID) {
2415                 dbuf_dirty_record_t **drp;
2416
2417                 ASSERT(*datap != NULL);
2418                 ASSERT3U(db->db_level, ==, 0);
2419                 ASSERT3U(dn->dn_phys->dn_bonuslen, <=, DN_MAX_BONUSLEN);
2420                 bcopy(*datap, DN_BONUS(dn->dn_phys), dn->dn_phys->dn_bonuslen);
2421                 DB_DNODE_EXIT(db);
2422
2423                 if (*datap != db->db.db_data) {
2424                         zio_buf_free(*datap, DN_MAX_BONUSLEN);
2425                         arc_space_return(DN_MAX_BONUSLEN, ARC_SPACE_OTHER);
2426                 }
2427                 db->db_data_pending = NULL;
2428                 drp = &db->db_last_dirty;
2429                 while (*drp != dr)
2430                         drp = &(*drp)->dr_next;
2431                 ASSERT(dr->dr_next == NULL);
2432                 ASSERT(dr->dr_dbuf == db);
2433                 *drp = dr->dr_next;
2434                 if (dr->dr_dbuf->db_level != 0) {
2435                         mutex_destroy(&dr->dt.di.dr_mtx);
2436                         list_destroy(&dr->dt.di.dr_children);
2437                 }
2438                 kmem_free(dr, sizeof (dbuf_dirty_record_t));
2439                 ASSERT(db->db_dirtycnt > 0);
2440                 db->db_dirtycnt -= 1;
2441                 dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg);
2442                 return;
2443         }
2444
2445         os = dn->dn_objset;
2446
2447         /*
2448          * This function may have dropped the db_mtx lock allowing a dmu_sync
2449          * operation to sneak in. As a result, we need to ensure that we
2450          * don't check the dr_override_state until we have returned from
2451          * dbuf_check_blkptr.
2452          */
2453         dbuf_check_blkptr(dn, db);
2454
2455         /*
2456          * If this buffer is in the middle of an immediate write,
2457          * wait for the synchronous IO to complete.
2458          */
2459         while (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC) {
2460                 ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
2461                 cv_wait(&db->db_changed, &db->db_mtx);
2462                 ASSERT(dr->dt.dl.dr_override_state != DR_NOT_OVERRIDDEN);
2463         }
2464
2465         if (db->db_state != DB_NOFILL &&
2466             dn->dn_object != DMU_META_DNODE_OBJECT &&
2467             refcount_count(&db->db_holds) > 1 &&
2468             dr->dt.dl.dr_override_state != DR_OVERRIDDEN &&
2469             *datap == db->db_buf) {
2470                 /*
2471                  * If this buffer is currently "in use" (i.e., there
2472                  * are active holds and db_data still references it),
2473                  * then make a copy before we start the write so that
2474                  * any modifications from the open txg will not leak
2475                  * into this write.
2476                  *
2477                  * NOTE: this copy does not need to be made for
2478                  * objects only modified in the syncing context (e.g.
2479                  * DNONE_DNODE blocks).
2480                  */
2481                 int blksz = arc_buf_size(*datap);
2482                 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
2483                 *datap = arc_buf_alloc(os->os_spa, blksz, db, type);
2484                 bcopy(db->db.db_data, (*datap)->b_data, blksz);
2485         }
2486         db->db_data_pending = dr;
2487
2488         mutex_exit(&db->db_mtx);
2489
2490         dbuf_write(dr, *datap, tx);
2491
2492         ASSERT(!list_link_active(&dr->dr_dirty_node));
2493         if (dn->dn_object == DMU_META_DNODE_OBJECT) {
2494                 list_insert_tail(&dn->dn_dirty_records[txg&TXG_MASK], dr);
2495                 DB_DNODE_EXIT(db);
2496         } else {
2497                 /*
2498                  * Although zio_nowait() does not "wait for an IO", it does
2499                  * initiate the IO. If this is an empty write it seems plausible
2500                  * that the IO could actually be completed before the nowait
2501                  * returns. We need to DB_DNODE_EXIT() first in case
2502                  * zio_nowait() invalidates the dbuf.
2503                  */
2504                 DB_DNODE_EXIT(db);
2505                 zio_nowait(dr->dr_zio);
2506         }
2507 }
2508
2509 void
2510 dbuf_sync_list(list_t *list, dmu_tx_t *tx)
2511 {
2512         dbuf_dirty_record_t *dr;
2513
2514         while ((dr = list_head(list))) {
2515                 if (dr->dr_zio != NULL) {
2516                         /*
2517                          * If we find an already initialized zio then we
2518                          * are processing the meta-dnode, and we have finished.
2519                          * The dbufs for all dnodes are put back on the list
2520                          * during processing, so that we can zio_wait()
2521                          * these IOs after initiating all child IOs.
2522                          */
2523                         ASSERT3U(dr->dr_dbuf->db.db_object, ==,
2524                             DMU_META_DNODE_OBJECT);
2525                         break;
2526                 }
2527                 list_remove(list, dr);
2528                 if (dr->dr_dbuf->db_level > 0)
2529                         dbuf_sync_indirect(dr, tx);
2530                 else
2531                         dbuf_sync_leaf(dr, tx);
2532         }
2533 }
2534
2535 /* ARGSUSED */
2536 static void
2537 dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb)
2538 {
2539         dmu_buf_impl_t *db = vdb;
2540         dnode_t *dn;
2541         blkptr_t *bp = zio->io_bp;
2542         blkptr_t *bp_orig = &zio->io_bp_orig;
2543         spa_t *spa = zio->io_spa;
2544         int64_t delta;
2545         uint64_t fill = 0;
2546         int i;
2547
2548         ASSERT(db->db_blkptr == bp);
2549
2550         DB_DNODE_ENTER(db);
2551         dn = DB_DNODE(db);
2552         delta = bp_get_dsize_sync(spa, bp) - bp_get_dsize_sync(spa, bp_orig);
2553         dnode_diduse_space(dn, delta - zio->io_prev_space_delta);
2554         zio->io_prev_space_delta = delta;
2555
2556         if (BP_IS_HOLE(bp)) {
2557                 ASSERT(bp->blk_fill == 0);
2558                 DB_DNODE_EXIT(db);
2559                 return;
2560         }
2561
2562         ASSERT((db->db_blkid != DMU_SPILL_BLKID &&
2563             BP_GET_TYPE(bp) == dn->dn_type) ||
2564             (db->db_blkid == DMU_SPILL_BLKID &&
2565             BP_GET_TYPE(bp) == dn->dn_bonustype));
2566         ASSERT(BP_GET_LEVEL(bp) == db->db_level);
2567
2568         mutex_enter(&db->db_mtx);
2569
2570 #ifdef ZFS_DEBUG
2571         if (db->db_blkid == DMU_SPILL_BLKID) {
2572                 ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
2573                 ASSERT(!(BP_IS_HOLE(db->db_blkptr)) &&
2574                     db->db_blkptr == &dn->dn_phys->dn_spill);
2575         }
2576 #endif
2577
2578         if (db->db_level == 0) {
2579                 mutex_enter(&dn->dn_mtx);
2580                 if (db->db_blkid > dn->dn_phys->dn_maxblkid &&
2581                     db->db_blkid != DMU_SPILL_BLKID)
2582                         dn->dn_phys->dn_maxblkid = db->db_blkid;
2583                 mutex_exit(&dn->dn_mtx);
2584
2585                 if (dn->dn_type == DMU_OT_DNODE) {
2586                         dnode_phys_t *dnp = db->db.db_data;
2587                         for (i = db->db.db_size >> DNODE_SHIFT; i > 0;
2588                             i--, dnp++) {
2589                                 if (dnp->dn_type != DMU_OT_NONE)
2590                                         fill++;
2591                         }
2592                 } else {
2593                         fill = 1;
2594                 }
2595         } else {
2596                 blkptr_t *ibp = db->db.db_data;
2597                 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
2598                 for (i = db->db.db_size >> SPA_BLKPTRSHIFT; i > 0; i--, ibp++) {
2599                         if (BP_IS_HOLE(ibp))
2600                                 continue;
2601                         fill += ibp->blk_fill;
2602                 }
2603         }
2604         DB_DNODE_EXIT(db);
2605
2606         bp->blk_fill = fill;
2607
2608         mutex_exit(&db->db_mtx);
2609 }
2610
2611 /* ARGSUSED */
2612 static void
2613 dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb)
2614 {
2615         dmu_buf_impl_t *db = vdb;
2616         blkptr_t *bp = zio->io_bp;
2617         blkptr_t *bp_orig = &zio->io_bp_orig;
2618         uint64_t txg = zio->io_txg;
2619         dbuf_dirty_record_t **drp, *dr;
2620
2621         ASSERT3U(zio->io_error, ==, 0);
2622         ASSERT(db->db_blkptr == bp);
2623
2624         if (zio->io_flags & ZIO_FLAG_IO_REWRITE) {
2625                 ASSERT(BP_EQUAL(bp, bp_orig));
2626         } else {
2627                 objset_t *os;
2628                 dsl_dataset_t *ds;
2629                 dmu_tx_t *tx;
2630
2631                 DB_GET_OBJSET(&os, db);
2632                 ds = os->os_dsl_dataset;
2633                 tx = os->os_synctx;
2634
2635                 (void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE);
2636                 dsl_dataset_block_born(ds, bp, tx);
2637         }
2638
2639         mutex_enter(&db->db_mtx);
2640
2641         DBUF_VERIFY(db);
2642
2643         drp = &db->db_last_dirty;
2644         while ((dr = *drp) != db->db_data_pending)
2645                 drp = &dr->dr_next;
2646         ASSERT(!list_link_active(&dr->dr_dirty_node));
2647         ASSERT(dr->dr_txg == txg);
2648         ASSERT(dr->dr_dbuf == db);
2649         ASSERT(dr->dr_next == NULL);
2650         *drp = dr->dr_next;
2651
2652 #ifdef ZFS_DEBUG
2653         if (db->db_blkid == DMU_SPILL_BLKID) {
2654                 dnode_t *dn;
2655
2656                 DB_DNODE_ENTER(db);
2657                 dn = DB_DNODE(db);
2658                 ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
2659                 ASSERT(!(BP_IS_HOLE(db->db_blkptr)) &&
2660                     db->db_blkptr == &dn->dn_phys->dn_spill);
2661                 DB_DNODE_EXIT(db);
2662         }
2663 #endif
2664
2665         if (db->db_level == 0) {
2666                 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2667                 ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN);
2668                 if (db->db_state != DB_NOFILL) {
2669                         if (dr->dt.dl.dr_data != db->db_buf)
2670                                 VERIFY(arc_buf_remove_ref(dr->dt.dl.dr_data,
2671                                     db) == 1);
2672                         else if (!arc_released(db->db_buf))
2673                                 arc_set_callback(db->db_buf, dbuf_do_evict, db);
2674                 }
2675         } else {
2676                 dnode_t *dn;
2677
2678                 DB_DNODE_ENTER(db);
2679                 dn = DB_DNODE(db);
2680                 ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
2681                 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
2682                 if (!BP_IS_HOLE(db->db_blkptr)) {
2683                         ASSERTV(int epbs = dn->dn_phys->dn_indblkshift -
2684                             SPA_BLKPTRSHIFT);
2685                         ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==,
2686                             db->db.db_size);
2687                         ASSERT3U(dn->dn_phys->dn_maxblkid
2688                             >> (db->db_level * epbs), >=, db->db_blkid);
2689                         arc_set_callback(db->db_buf, dbuf_do_evict, db);
2690                 }
2691                 DB_DNODE_EXIT(db);
2692                 mutex_destroy(&dr->dt.di.dr_mtx);
2693                 list_destroy(&dr->dt.di.dr_children);
2694         }
2695         kmem_free(dr, sizeof (dbuf_dirty_record_t));
2696
2697         cv_broadcast(&db->db_changed);
2698         ASSERT(db->db_dirtycnt > 0);
2699         db->db_dirtycnt -= 1;
2700         db->db_data_pending = NULL;
2701         dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg);
2702 }
2703
2704 static void
2705 dbuf_write_nofill_ready(zio_t *zio)
2706 {
2707         dbuf_write_ready(zio, NULL, zio->io_private);
2708 }
2709
2710 static void
2711 dbuf_write_nofill_done(zio_t *zio)
2712 {
2713         dbuf_write_done(zio, NULL, zio->io_private);
2714 }
2715
2716 static void
2717 dbuf_write_override_ready(zio_t *zio)
2718 {
2719         dbuf_dirty_record_t *dr = zio->io_private;
2720         dmu_buf_impl_t *db = dr->dr_dbuf;
2721
2722         dbuf_write_ready(zio, NULL, db);
2723 }
2724
2725 static void
2726 dbuf_write_override_done(zio_t *zio)
2727 {
2728         dbuf_dirty_record_t *dr = zio->io_private;
2729         dmu_buf_impl_t *db = dr->dr_dbuf;
2730         blkptr_t *obp = &dr->dt.dl.dr_overridden_by;
2731
2732         mutex_enter(&db->db_mtx);
2733         if (!BP_EQUAL(zio->io_bp, obp)) {
2734                 if (!BP_IS_HOLE(obp))
2735                         dsl_free(spa_get_dsl(zio->io_spa), zio->io_txg, obp);
2736                 arc_release(dr->dt.dl.dr_data, db);
2737         }
2738         mutex_exit(&db->db_mtx);
2739
2740         dbuf_write_done(zio, NULL, db);
2741 }
2742
2743 static void
2744 dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx)
2745 {
2746         dmu_buf_impl_t *db = dr->dr_dbuf;
2747         dnode_t *dn;
2748         objset_t *os;
2749         dmu_buf_impl_t *parent = db->db_parent;
2750         uint64_t txg = tx->tx_txg;
2751         zbookmark_t zb;
2752         zio_prop_t zp;
2753         zio_t *zio;
2754         int wp_flag = 0;
2755
2756         DB_DNODE_ENTER(db);
2757         dn = DB_DNODE(db);
2758         os = dn->dn_objset;
2759
2760         if (db->db_state != DB_NOFILL) {
2761                 if (db->db_level > 0 || dn->dn_type == DMU_OT_DNODE) {
2762                         /*
2763                          * Private object buffers are released here rather
2764                          * than in dbuf_dirty() since they are only modified
2765                          * in the syncing context and we don't want the
2766                          * overhead of making multiple copies of the data.
2767                          */
2768                         if (BP_IS_HOLE(db->db_blkptr)) {
2769                                 arc_buf_thaw(data);
2770                         } else {
2771                                 dbuf_release_bp(db);
2772                         }
2773                 }
2774         }
2775
2776         if (parent != dn->dn_dbuf) {
2777                 ASSERT(parent && parent->db_data_pending);
2778                 ASSERT(db->db_level == parent->db_level-1);
2779                 ASSERT(arc_released(parent->db_buf));
2780                 zio = parent->db_data_pending->dr_zio;
2781         } else {
2782                 ASSERT((db->db_level == dn->dn_phys->dn_nlevels-1 &&
2783                     db->db_blkid != DMU_SPILL_BLKID) ||
2784                     (db->db_blkid == DMU_SPILL_BLKID && db->db_level == 0));
2785                 if (db->db_blkid != DMU_SPILL_BLKID)
2786                         ASSERT3P(db->db_blkptr, ==,
2787                             &dn->dn_phys->dn_blkptr[db->db_blkid]);
2788                 zio = dn->dn_zio;
2789         }
2790
2791         ASSERT(db->db_level == 0 || data == db->db_buf);
2792         ASSERT3U(db->db_blkptr->blk_birth, <=, txg);
2793         ASSERT(zio);
2794
2795         SET_BOOKMARK(&zb, os->os_dsl_dataset ?
2796             os->os_dsl_dataset->ds_object : DMU_META_OBJSET,
2797             db->db.db_object, db->db_level, db->db_blkid);
2798
2799         if (db->db_blkid == DMU_SPILL_BLKID)
2800                 wp_flag = WP_SPILL;
2801         wp_flag |= (db->db_state == DB_NOFILL) ? WP_NOFILL : 0;
2802
2803         dmu_write_policy(os, dn, db->db_level, wp_flag, &zp);
2804         DB_DNODE_EXIT(db);
2805
2806         if (db->db_level == 0 && dr->dt.dl.dr_override_state == DR_OVERRIDDEN) {
2807                 ASSERT(db->db_state != DB_NOFILL);
2808                 dr->dr_zio = zio_write(zio, os->os_spa, txg,
2809                     db->db_blkptr, data->b_data, arc_buf_size(data), &zp,
2810                     dbuf_write_override_ready, dbuf_write_override_done, dr,
2811                     ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
2812                 mutex_enter(&db->db_mtx);
2813                 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
2814                 zio_write_override(dr->dr_zio, &dr->dt.dl.dr_overridden_by,
2815                     dr->dt.dl.dr_copies);
2816                 mutex_exit(&db->db_mtx);
2817         } else if (db->db_state == DB_NOFILL) {
2818                 ASSERT(zp.zp_checksum == ZIO_CHECKSUM_OFF);
2819                 dr->dr_zio = zio_write(zio, os->os_spa, txg,
2820                     db->db_blkptr, NULL, db->db.db_size, &zp,
2821                     dbuf_write_nofill_ready, dbuf_write_nofill_done, db,
2822                     ZIO_PRIORITY_ASYNC_WRITE,
2823                     ZIO_FLAG_MUSTSUCCEED | ZIO_FLAG_NODATA, &zb);
2824         } else {
2825                 ASSERT(arc_released(data));
2826                 dr->dr_zio = arc_write(zio, os->os_spa, txg,
2827                     db->db_blkptr, data, DBUF_IS_L2CACHEABLE(db), &zp,
2828                     dbuf_write_ready, dbuf_write_done, db,
2829                     ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
2830         }
2831 }
2832
2833 #if defined(_KERNEL) && defined(HAVE_SPL)
2834 EXPORT_SYMBOL(dmu_buf_rele);
2835 EXPORT_SYMBOL(dmu_buf_will_dirty);
2836 #endif