Fix zmod.h usage in userspace
[zfs.git] / module / zfs / dbuf.c
index 82cfd1a..d083591 100644 (file)
@@ -55,6 +55,7 @@ dbuf_cons(void *vdb, void *unused, int kmflag)
        mutex_init(&db->db_mtx, NULL, MUTEX_DEFAULT, NULL);
        cv_init(&db->db_changed, NULL, CV_DEFAULT, NULL);
        refcount_create(&db->db_holds);
+       list_link_init(&db->db_link);
        return (0);
 }
 
@@ -373,7 +374,8 @@ dbuf_verify(dmu_buf_impl_t *db)
                                    &dn->dn_phys->dn_blkptr[db->db_blkid]);
                } else {
                        /* db is pointed to by an indirect block */
-                       int epb = db->db_parent->db.db_size >> SPA_BLKPTRSHIFT;
+                       ASSERTV(int epb = db->db_parent->db.db_size >>
+                               SPA_BLKPTRSHIFT);
                        ASSERT3U(db->db_parent->db_level, ==, db->db_level+1);
                        ASSERT3U(db->db_parent->db.db_object, ==,
                            db->db.db_object);
@@ -399,7 +401,7 @@ dbuf_verify(dmu_buf_impl_t *db)
                 * data when we evict this buffer.
                 */
                if (db->db_dirtycnt == 0) {
-                       uint64_t *buf = db->db.db_data;
+                       ASSERTV(uint64_t *buf = db->db.db_data);
                        int i;
 
                        for (i = 0; i < db->db.db_size >> 3; i++) {
@@ -1135,6 +1137,7 @@ dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
         * transaction group won't leak out when we sync the older txg.
         */
        dr = kmem_zalloc(sizeof (dbuf_dirty_record_t), KM_SLEEP);
+       list_link_init(&dr->dr_dirty_node);
        if (db->db_level == 0) {
                void *data_old = db->db_buf;
 
@@ -1810,7 +1813,7 @@ dbuf_prefetch(dnode_t *dn, uint64_t blkid)
                return;
 
        /* dbuf_find() returns with db_mtx held */
-       if (db = dbuf_find(dn, 0, blkid)) {
+       if ((db = dbuf_find(dn, 0, blkid))) {
                /*
                 * This dbuf is already in the cache.  We assume that
                 * it is already CACHED, or else about to be either
@@ -1999,8 +2002,7 @@ dbuf_rm_spill(dnode_t *dn, dmu_tx_t *tx)
 void
 dbuf_add_ref(dmu_buf_impl_t *db, void *tag)
 {
-       int64_t holds = refcount_add(&db->db_holds, tag);
-       ASSERT(holds > 1);
+       VERIFY(refcount_add(&db->db_holds, tag) > 1);
 }
 
 /*
@@ -2319,6 +2321,10 @@ dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
                ASSERT(dr->dr_next == NULL);
                ASSERT(dr->dr_dbuf == db);
                *drp = dr->dr_next;
+               if (dr->dr_dbuf->db_level != 0) {
+                       mutex_destroy(&dr->dt.di.dr_mtx);
+                       list_destroy(&dr->dt.di.dr_children);
+               }
                kmem_free(dr, sizeof (dbuf_dirty_record_t));
                ASSERT(db->db_dirtycnt > 0);
                db->db_dirtycnt -= 1;
@@ -2395,7 +2401,7 @@ dbuf_sync_list(list_t *list, dmu_tx_t *tx)
 {
        dbuf_dirty_record_t *dr;
 
-       while (dr = list_head(list)) {
+       while ((dr = list_head(list))) {
                if (dr->dr_zio != NULL) {
                        /*
                         * If we find an already initialized zio then we
@@ -2564,8 +2570,8 @@ dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb)
                ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
                ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
                if (!BP_IS_HOLE(db->db_blkptr)) {
-                       int epbs =
-                           dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
+                       ASSERTV(int epbs = dn->dn_phys->dn_indblkshift -
+                           SPA_BLKPTRSHIFT);
                        ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==,
                            db->db.db_size);
                        ASSERT3U(dn->dn_phys->dn_maxblkid