Fix zmod.h usage in userspace
[zfs.git] / module / zfs / dbuf.c
index 9c4e029..d083591 100644 (file)
@@ -55,6 +55,7 @@ dbuf_cons(void *vdb, void *unused, int kmflag)
        mutex_init(&db->db_mtx, NULL, MUTEX_DEFAULT, NULL);
        cv_init(&db->db_changed, NULL, CV_DEFAULT, NULL);
        refcount_create(&db->db_holds);
+       list_link_init(&db->db_link);
        return (0);
 }
 
@@ -107,11 +108,15 @@ dbuf_find(dnode_t *dn, uint8_t level, uint64_t blkid)
 {
        dbuf_hash_table_t *h = &dbuf_hash_table;
        objset_t *os = dn->dn_objset;
-       uint64_t obj = dn->dn_object;
-       uint64_t hv = DBUF_HASH(os, obj, level, blkid);
-       uint64_t idx = hv & h->hash_table_mask;
+       uint64_t obj;
+       uint64_t hv;
+       uint64_t idx;
        dmu_buf_impl_t *db;
 
+       obj = dn->dn_object;
+       hv = DBUF_HASH(os, obj, level, blkid);
+       idx = hv & h->hash_table_mask;
+
        mutex_enter(DBUF_HASH_MUTEX(h, idx));
        for (db = h->hash_table[idx]; db != NULL; db = db->db_hash_next) {
                if (DBUF_EQUAL(db, os, obj, level, blkid)) {
@@ -140,11 +145,13 @@ dbuf_hash_insert(dmu_buf_impl_t *db)
        objset_t *os = db->db_objset;
        uint64_t obj = db->db.db_object;
        int level = db->db_level;
-       uint64_t blkid = db->db_blkid;
-       uint64_t hv = DBUF_HASH(os, obj, level, blkid);
-       uint64_t idx = hv & h->hash_table_mask;
+       uint64_t blkid, hv, idx;
        dmu_buf_impl_t *dbf;
 
+       blkid = db->db_blkid;
+       hv = DBUF_HASH(os, obj, level, blkid);
+       idx = hv & h->hash_table_mask;
+
        mutex_enter(DBUF_HASH_MUTEX(h, idx));
        for (dbf = h->hash_table[idx]; dbf != NULL; dbf = dbf->db_hash_next) {
                if (DBUF_EQUAL(dbf, os, obj, level, blkid)) {
@@ -174,11 +181,13 @@ static void
 dbuf_hash_remove(dmu_buf_impl_t *db)
 {
        dbuf_hash_table_t *h = &dbuf_hash_table;
-       uint64_t hv = DBUF_HASH(db->db_objset, db->db.db_object,
-           db->db_level, db->db_blkid);
-       uint64_t idx = hv & h->hash_table_mask;
+       uint64_t hv, idx;
        dmu_buf_impl_t *dbf, **dbp;
 
+       hv = DBUF_HASH(db->db_objset, db->db.db_object,
+           db->db_level, db->db_blkid);
+       idx = hv & h->hash_table_mask;
+
        /*
         * We musn't hold db_mtx to maintin lock ordering:
         * DBUF_HASH_MUTEX > db_mtx.
@@ -365,7 +374,8 @@ dbuf_verify(dmu_buf_impl_t *db)
                                    &dn->dn_phys->dn_blkptr[db->db_blkid]);
                } else {
                        /* db is pointed to by an indirect block */
-                       int epb = db->db_parent->db.db_size >> SPA_BLKPTRSHIFT;
+                       ASSERTV(int epb = db->db_parent->db.db_size >>
+                               SPA_BLKPTRSHIFT);
                        ASSERT3U(db->db_parent->db_level, ==, db->db_level+1);
                        ASSERT3U(db->db_parent->db.db_object, ==,
                            db->db.db_object);
@@ -391,7 +401,7 @@ dbuf_verify(dmu_buf_impl_t *db)
                 * data when we evict this buffer.
                 */
                if (db->db_dirtycnt == 0) {
-                       uint64_t *buf = db->db.db_data;
+                       ASSERTV(uint64_t *buf = db->db.db_data);
                        int i;
 
                        for (i = 0; i < db->db.db_size >> 3; i++) {
@@ -1127,6 +1137,7 @@ dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
         * transaction group won't leak out when we sync the older txg.
         */
        dr = kmem_zalloc(sizeof (dbuf_dirty_record_t), KM_SLEEP);
+       list_link_init(&dr->dr_dirty_node);
        if (db->db_level == 0) {
                void *data_old = db->db_buf;
 
@@ -1802,7 +1813,7 @@ dbuf_prefetch(dnode_t *dn, uint64_t blkid)
                return;
 
        /* dbuf_find() returns with db_mtx held */
-       if (db = dbuf_find(dn, 0, blkid)) {
+       if ((db = dbuf_find(dn, 0, blkid))) {
                /*
                 * This dbuf is already in the cache.  We assume that
                 * it is already CACHED, or else about to be either
@@ -1991,8 +2002,7 @@ dbuf_rm_spill(dnode_t *dn, dmu_tx_t *tx)
 void
 dbuf_add_ref(dmu_buf_impl_t *db, void *tag)
 {
-       int64_t holds = refcount_add(&db->db_holds, tag);
-       ASSERT(holds > 1);
+       VERIFY(refcount_add(&db->db_holds, tag) > 1);
 }
 
 /*
@@ -2311,6 +2321,10 @@ dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
                ASSERT(dr->dr_next == NULL);
                ASSERT(dr->dr_dbuf == db);
                *drp = dr->dr_next;
+               if (dr->dr_dbuf->db_level != 0) {
+                       mutex_destroy(&dr->dt.di.dr_mtx);
+                       list_destroy(&dr->dt.di.dr_children);
+               }
                kmem_free(dr, sizeof (dbuf_dirty_record_t));
                ASSERT(db->db_dirtycnt > 0);
                db->db_dirtycnt -= 1;
@@ -2387,7 +2401,7 @@ dbuf_sync_list(list_t *list, dmu_tx_t *tx)
 {
        dbuf_dirty_record_t *dr;
 
-       while (dr = list_head(list)) {
+       while ((dr = list_head(list))) {
                if (dr->dr_zio != NULL) {
                        /*
                         * If we find an already initialized zio then we
@@ -2556,8 +2570,8 @@ dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb)
                ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
                ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
                if (!BP_IS_HOLE(db->db_blkptr)) {
-                       int epbs =
-                           dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
+                       ASSERTV(int epbs = dn->dn_phys->dn_indblkshift -
+                           SPA_BLKPTRSHIFT);
                        ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==,
                            db->db.db_size);
                        ASSERT3U(dn->dn_phys->dn_maxblkid