X-Git-Url: https://git.camperquake.de/gitweb.cgi?a=blobdiff_plain;f=module%2Fzfs%2Fdbuf.c;h=46f42dd7c934a1b6d0781dbdc8fc0ef639cc21c9;hb=294f68063b49c06d3118d51016811063e69cf97a;hp=9c4e0296db2bdab35fca237db669679c66cebae7;hpb=572e285762521df27fe5b026f409ba1a21abb7ac;p=zfs.git diff --git a/module/zfs/dbuf.c b/module/zfs/dbuf.c index 9c4e029..46f42dd 100644 --- a/module/zfs/dbuf.c +++ b/module/zfs/dbuf.c @@ -20,9 +20,12 @@ */ /* * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright 2011 Nexenta Systems, Inc. All rights reserved. + * Copyright (c) 2012 by Delphix. All rights reserved. */ #include +#include #include #include #include @@ -36,6 +39,29 @@ #include #include +struct dbuf_hold_impl_data { + /* Function arguments */ + dnode_t *dh_dn; + uint8_t dh_level; + uint64_t dh_blkid; + int dh_fail_sparse; + void *dh_tag; + dmu_buf_impl_t **dh_dbp; + /* Local variables */ + dmu_buf_impl_t *dh_db; + dmu_buf_impl_t *dh_parent; + blkptr_t *dh_bp; + int dh_err; + dbuf_dirty_record_t *dh_dr; + arc_buf_contents_t dh_type; + int dh_depth; +}; + +static void __dbuf_hold_impl_init(struct dbuf_hold_impl_data *dh, + dnode_t *dn, uint8_t level, uint64_t blkid, int fail_sparse, + void *tag, dmu_buf_impl_t **dbp, int depth); +static int __dbuf_hold_impl(struct dbuf_hold_impl_data *dh); + static void dbuf_destroy(dmu_buf_impl_t *db); static int dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx); static void dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx); @@ -55,6 +81,7 @@ dbuf_cons(void *vdb, void *unused, int kmflag) mutex_init(&db->db_mtx, NULL, MUTEX_DEFAULT, NULL); cv_init(&db->db_changed, NULL, CV_DEFAULT, NULL); refcount_create(&db->db_holds); + list_link_init(&db->db_link); return (0); } @@ -107,11 +134,15 @@ dbuf_find(dnode_t *dn, uint8_t level, uint64_t blkid) { dbuf_hash_table_t *h = &dbuf_hash_table; objset_t *os = dn->dn_objset; - uint64_t obj = dn->dn_object; - uint64_t hv = DBUF_HASH(os, obj, level, blkid); - uint64_t idx = hv & h->hash_table_mask; + uint64_t obj; + uint64_t hv; + uint64_t idx; dmu_buf_impl_t *db; + obj = dn->dn_object; + hv = DBUF_HASH(os, obj, level, blkid); + idx = hv & h->hash_table_mask; + mutex_enter(DBUF_HASH_MUTEX(h, idx)); for (db = h->hash_table[idx]; db != NULL; db = db->db_hash_next) { if (DBUF_EQUAL(db, os, obj, level, blkid)) { @@ -140,11 +171,13 @@ dbuf_hash_insert(dmu_buf_impl_t *db) objset_t *os = db->db_objset; uint64_t obj = db->db.db_object; int level = db->db_level; - uint64_t blkid = db->db_blkid; - uint64_t hv = DBUF_HASH(os, obj, level, blkid); - uint64_t idx = hv & h->hash_table_mask; + uint64_t blkid, hv, idx; dmu_buf_impl_t *dbf; + blkid = db->db_blkid; + hv = DBUF_HASH(os, obj, level, blkid); + idx = hv & h->hash_table_mask; + mutex_enter(DBUF_HASH_MUTEX(h, idx)); for (dbf = h->hash_table[idx]; dbf != NULL; dbf = dbf->db_hash_next) { if (DBUF_EQUAL(dbf, os, obj, level, blkid)) { @@ -174,11 +207,13 @@ static void dbuf_hash_remove(dmu_buf_impl_t *db) { dbuf_hash_table_t *h = &dbuf_hash_table; - uint64_t hv = DBUF_HASH(db->db_objset, db->db.db_object, - db->db_level, db->db_blkid); - uint64_t idx = hv & h->hash_table_mask; + uint64_t hv, idx; dmu_buf_impl_t *dbf, **dbp; + hv = DBUF_HASH(db->db_objset, db->db.db_object, + db->db_level, db->db_blkid); + idx = hv & h->hash_table_mask; + /* * We musn't hold db_mtx to maintin lock ordering: * DBUF_HASH_MUTEX > db_mtx. @@ -226,7 +261,7 @@ dbuf_is_metadata(dmu_buf_impl_t *db) boolean_t is_metadata; DB_DNODE_ENTER(db); - is_metadata = dmu_ot[DB_DNODE(db)->dn_type].ot_metadata; + is_metadata = DMU_OT_IS_METADATA(DB_DNODE(db)->dn_type); DB_DNODE_EXIT(db); return (is_metadata); @@ -261,7 +296,13 @@ dbuf_init(void) retry: h->hash_table_mask = hsize - 1; +#if defined(_KERNEL) && defined(HAVE_SPL) + /* Large allocations which do not require contiguous pages + * should be using vmem_alloc() in the linux kernel */ + h->hash_table = vmem_zalloc(hsize * sizeof (void *), KM_PUSHPAGE); +#else h->hash_table = kmem_zalloc(hsize * sizeof (void *), KM_NOSLEEP); +#endif if (h->hash_table == NULL) { /* XXX - we should really return an error instead of assert */ ASSERT(hsize > (1ULL << 10)); @@ -285,7 +326,13 @@ dbuf_fini(void) for (i = 0; i < DBUF_MUTEXES; i++) mutex_destroy(&h->hash_mutexes[i]); +#if defined(_KERNEL) && defined(HAVE_SPL) + /* Large allocations which do not require contiguous pages + * should be using vmem_free() in the linux kernel */ + vmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *)); +#else kmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *)); +#endif kmem_cache_destroy(dbuf_cache); } @@ -326,7 +373,7 @@ dbuf_verify(dmu_buf_impl_t *db) } else if (db->db_blkid == DMU_SPILL_BLKID) { ASSERT(dn != NULL); ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen); - ASSERT3U(db->db.db_offset, ==, 0); + ASSERT0(db->db.db_offset); } else { ASSERT3U(db->db.db_offset, ==, db->db_blkid * db->db.db_size); } @@ -365,7 +412,8 @@ dbuf_verify(dmu_buf_impl_t *db) &dn->dn_phys->dn_blkptr[db->db_blkid]); } else { /* db is pointed to by an indirect block */ - int epb = db->db_parent->db.db_size >> SPA_BLKPTRSHIFT; + ASSERTV(int epb = db->db_parent->db.db_size >> + SPA_BLKPTRSHIFT); ASSERT3U(db->db_parent->db_level, ==, db->db_level+1); ASSERT3U(db->db_parent->db.db_object, ==, db->db.db_object); @@ -391,7 +439,7 @@ dbuf_verify(dmu_buf_impl_t *db) * data when we evict this buffer. */ if (db->db_dirtycnt == 0) { - uint64_t *buf = db->db.db_data; + ASSERTV(uint64_t *buf = db->db.db_data); int i; for (i = 0; i < db->db.db_size >> 3; i++) { @@ -511,7 +559,6 @@ dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t *flags) spa_t *spa; zbookmark_t zb; uint32_t aflags = ARC_NOWAIT; - arc_buf_t *pbuf; DB_DNODE_ENTER(db); dn = DB_DNODE(db); @@ -573,14 +620,8 @@ dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t *flags) db->db.db_object, db->db_level, db->db_blkid); dbuf_add_ref(db, NULL); - /* ZIO_FLAG_CANFAIL callers have to check the parent zio's error */ - - if (db->db_parent) - pbuf = db->db_parent->db_buf; - else - pbuf = db->db_objset->os_phys_buf; - (void) dsl_read(zio, spa, db->db_blkptr, pbuf, + (void) arc_read(zio, spa, db->db_blkptr, dbuf_read_done, db, ZIO_PRIORITY_SYNC_READ, (*flags & DB_RF_CANFAIL) ? ZIO_FLAG_CANFAIL : ZIO_FLAG_MUSTSUCCEED, &aflags, &zb); @@ -978,7 +1019,6 @@ void dbuf_release_bp(dmu_buf_impl_t *db) { objset_t *os; - zbookmark_t zb; DB_GET_OBJSET(&os, db); ASSERT(dsl_pool_sync_context(dmu_objset_pool(os))); @@ -986,13 +1026,7 @@ dbuf_release_bp(dmu_buf_impl_t *db) list_link_active(&os->os_dsl_dataset->ds_synced_link)); ASSERT(db->db_parent == NULL || arc_released(db->db_parent->db_buf)); - zb.zb_objset = os->os_dsl_dataset ? - os->os_dsl_dataset->ds_object : 0; - zb.zb_object = db->db.db_object; - zb.zb_level = db->db_level; - zb.zb_blkid = db->db_blkid; - (void) arc_release_bp(db->db_buf, db, - db->db_blkptr, os->os_spa, &zb); + (void) arc_release(db->db_buf, db); } dbuf_dirty_record_t * @@ -1049,7 +1083,7 @@ dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx) dn->dn_dirtyctx = (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN); ASSERT(dn->dn_dirtyctx_firstset == NULL); - dn->dn_dirtyctx_firstset = kmem_alloc(1, KM_SLEEP); + dn->dn_dirtyctx_firstset = kmem_alloc(1, KM_PUSHPAGE); } mutex_exit(&dn->dn_mtx); @@ -1126,7 +1160,8 @@ dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx) * to make a copy of it so that the changes we make in this * transaction group won't leak out when we sync the older txg. */ - dr = kmem_zalloc(sizeof (dbuf_dirty_record_t), KM_SLEEP); + dr = kmem_zalloc(sizeof (dbuf_dirty_record_t), KM_PUSHPAGE); + list_link_init(&dr->dr_dirty_node); if (db->db_level == 0) { void *data_old = db->db_buf; @@ -1300,13 +1335,17 @@ dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx) * it, since one of the current holders may be in the * middle of an update. Note that users of dbuf_undirty() * should not place a hold on the dbuf before the call. + * Also note: we can get here with a spill block, so + * test for that similar to how dbuf_dirty does. */ if (refcount_count(&db->db_holds) > db->db_dirtycnt) { mutex_exit(&db->db_mtx); /* Make sure we don't toss this buffer at sync phase */ - mutex_enter(&dn->dn_mtx); - dnode_clear_range(dn, db->db_blkid, 1, tx); - mutex_exit(&dn->dn_mtx); + if (db->db_blkid != DMU_SPILL_BLKID) { + mutex_enter(&dn->dn_mtx); + dnode_clear_range(dn, db->db_blkid, 1, tx); + mutex_exit(&dn->dn_mtx); + } DB_DNODE_EXIT(db); return (0); } @@ -1319,11 +1358,18 @@ dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx) *drp = dr->dr_next; + /* + * Note that there are three places in dbuf_dirty() + * where this dirty record may be put on a list. + * Make sure to do a list_remove corresponding to + * every one of those list_insert calls. + */ if (dr->dr_parent) { mutex_enter(&dr->dr_parent->dt.di.dr_mtx); list_remove(&dr->dr_parent->dt.di.dr_children, dr); mutex_exit(&dr->dr_parent->dt.di.dr_mtx); - } else if (db->db_level+1 == dn->dn_nlevels) { + } else if (db->db_blkid == DMU_SPILL_BLKID || + db->db_level+1 == dn->dn_nlevels) { ASSERT(db->db_blkptr == NULL || db->db_parent == dn->dn_dbuf); mutex_enter(&dn->dn_mtx); list_remove(&dn->dn_dirty_records[txg & TXG_MASK], dr); @@ -1573,9 +1619,10 @@ dbuf_clear(dmu_buf_impl_t *db) dbuf_rele(parent, db); } -static int +__attribute__((always_inline)) +static inline int dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse, - dmu_buf_impl_t **parentp, blkptr_t **bpp) + dmu_buf_impl_t **parentp, blkptr_t **bpp, struct dbuf_hold_impl_data *dh) { int nlevels, epbs; @@ -1612,8 +1659,17 @@ dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse, return (ENOENT); } else if (level < nlevels-1) { /* this block is referenced from an indirect block */ - int err = dbuf_hold_impl(dn, level+1, - blkid >> epbs, fail_sparse, NULL, parentp); + int err; + if (dh == NULL) { + err = dbuf_hold_impl(dn, level+1, blkid >> epbs, + fail_sparse, NULL, parentp); + } + else { + __dbuf_hold_impl_init(dh + 1, dn, dh->dh_level + 1, + blkid >> epbs, fail_sparse, NULL, + parentp, dh->dh_depth + 1); + err = __dbuf_hold_impl(dh + 1); + } if (err) return (err); err = dbuf_read(*parentp, NULL, @@ -1650,7 +1706,7 @@ dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid, ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); ASSERT(dn->dn_type != DMU_OT_NONE); - db = kmem_cache_alloc(dbuf_cache, KM_SLEEP); + db = kmem_cache_alloc(dbuf_cache, KM_PUSHPAGE); db->db_objset = os; db->db.db_object = dn->dn_object; @@ -1802,7 +1858,7 @@ dbuf_prefetch(dnode_t *dn, uint64_t blkid) return; /* dbuf_find() returns with db_mtx held */ - if (db = dbuf_find(dn, 0, blkid)) { + if ((db = dbuf_find(dn, 0, blkid))) { /* * This dbuf is already in the cache. We assume that * it is already CACHED, or else about to be either @@ -1812,11 +1868,10 @@ dbuf_prefetch(dnode_t *dn, uint64_t blkid) return; } - if (dbuf_findbp(dn, 0, blkid, TRUE, &db, &bp) == 0) { + if (dbuf_findbp(dn, 0, blkid, TRUE, &db, &bp, NULL) == 0) { if (bp && !BP_IS_HOLE(bp)) { int priority = dn->dn_type == DMU_OT_DDT_ZAP ? ZIO_PRIORITY_DDT_PREFETCH : ZIO_PRIORITY_ASYNC_READ; - arc_buf_t *pbuf; dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset; uint32_t aflags = ARC_NOWAIT | ARC_PREFETCH; zbookmark_t zb; @@ -1824,13 +1879,8 @@ dbuf_prefetch(dnode_t *dn, uint64_t blkid) SET_BOOKMARK(&zb, ds ? ds->ds_object : DMU_META_OBJSET, dn->dn_object, 0, blkid); - if (db) - pbuf = db->db_buf; - else - pbuf = dn->dn_objset->os_phys_buf; - - (void) dsl_read(NULL, dn->dn_objset->os_spa, - bp, pbuf, NULL, NULL, priority, + (void) arc_read(NULL, dn->dn_objset->os_spa, + bp, NULL, NULL, priority, ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, &aflags, &zb); } @@ -1839,98 +1889,142 @@ dbuf_prefetch(dnode_t *dn, uint64_t blkid) } } +#define DBUF_HOLD_IMPL_MAX_DEPTH 20 + /* * Returns with db_holds incremented, and db_mtx not held. * Note: dn_struct_rwlock must be held. */ -int -dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid, int fail_sparse, - void *tag, dmu_buf_impl_t **dbp) +static int +__dbuf_hold_impl(struct dbuf_hold_impl_data *dh) { - dmu_buf_impl_t *db, *parent = NULL; + ASSERT3S(dh->dh_depth, <, DBUF_HOLD_IMPL_MAX_DEPTH); + dh->dh_parent = NULL; - ASSERT(blkid != DMU_BONUS_BLKID); - ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); - ASSERT3U(dn->dn_nlevels, >, level); + ASSERT(dh->dh_blkid != DMU_BONUS_BLKID); + ASSERT(RW_LOCK_HELD(&dh->dh_dn->dn_struct_rwlock)); + ASSERT3U(dh->dh_dn->dn_nlevels, >, dh->dh_level); - *dbp = NULL; + *(dh->dh_dbp) = NULL; top: /* dbuf_find() returns with db_mtx held */ - db = dbuf_find(dn, level, blkid); - - if (db == NULL) { - blkptr_t *bp = NULL; - int err; - - ASSERT3P(parent, ==, NULL); - err = dbuf_findbp(dn, level, blkid, fail_sparse, &parent, &bp); - if (fail_sparse) { - if (err == 0 && bp && BP_IS_HOLE(bp)) - err = ENOENT; - if (err) { - if (parent) - dbuf_rele(parent, NULL); - return (err); + dh->dh_db = dbuf_find(dh->dh_dn, dh->dh_level, dh->dh_blkid); + + if (dh->dh_db == NULL) { + dh->dh_bp = NULL; + + ASSERT3P(dh->dh_parent, ==, NULL); + dh->dh_err = dbuf_findbp(dh->dh_dn, dh->dh_level, dh->dh_blkid, + dh->dh_fail_sparse, &dh->dh_parent, + &dh->dh_bp, dh); + if (dh->dh_fail_sparse) { + if (dh->dh_err == 0 && dh->dh_bp && BP_IS_HOLE(dh->dh_bp)) + dh->dh_err = ENOENT; + if (dh->dh_err) { + if (dh->dh_parent) + dbuf_rele(dh->dh_parent, NULL); + return (dh->dh_err); } } - if (err && err != ENOENT) - return (err); - db = dbuf_create(dn, level, blkid, parent, bp); - } - - if (db->db_buf && refcount_is_zero(&db->db_holds)) { - arc_buf_add_ref(db->db_buf, db); - if (db->db_buf->b_data == NULL) { - dbuf_clear(db); - if (parent) { - dbuf_rele(parent, NULL); - parent = NULL; + if (dh->dh_err && dh->dh_err != ENOENT) + return (dh->dh_err); + dh->dh_db = dbuf_create(dh->dh_dn, dh->dh_level, dh->dh_blkid, + dh->dh_parent, dh->dh_bp); + } + + if (dh->dh_db->db_buf && refcount_is_zero(&dh->dh_db->db_holds)) { + arc_buf_add_ref(dh->dh_db->db_buf, dh->dh_db); + if (dh->dh_db->db_buf->b_data == NULL) { + dbuf_clear(dh->dh_db); + if (dh->dh_parent) { + dbuf_rele(dh->dh_parent, NULL); + dh->dh_parent = NULL; } goto top; } - ASSERT3P(db->db.db_data, ==, db->db_buf->b_data); + ASSERT3P(dh->dh_db->db.db_data, ==, dh->dh_db->db_buf->b_data); } - ASSERT(db->db_buf == NULL || arc_referenced(db->db_buf)); + ASSERT(dh->dh_db->db_buf == NULL || arc_referenced(dh->dh_db->db_buf)); /* * If this buffer is currently syncing out, and we are are * still referencing it from db_data, we need to make a copy * of it in case we decide we want to dirty it again in this txg. */ - if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID && - dn->dn_object != DMU_META_DNODE_OBJECT && - db->db_state == DB_CACHED && db->db_data_pending) { - dbuf_dirty_record_t *dr = db->db_data_pending; - - if (dr->dt.dl.dr_data == db->db_buf) { - arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); - - dbuf_set_data(db, - arc_buf_alloc(dn->dn_objset->os_spa, - db->db.db_size, db, type)); - bcopy(dr->dt.dl.dr_data->b_data, db->db.db_data, - db->db.db_size); + if (dh->dh_db->db_level == 0 && + dh->dh_db->db_blkid != DMU_BONUS_BLKID && + dh->dh_dn->dn_object != DMU_META_DNODE_OBJECT && + dh->dh_db->db_state == DB_CACHED && dh->dh_db->db_data_pending) { + dh->dh_dr = dh->dh_db->db_data_pending; + + if (dh->dh_dr->dt.dl.dr_data == dh->dh_db->db_buf) { + dh->dh_type = DBUF_GET_BUFC_TYPE(dh->dh_db); + + dbuf_set_data(dh->dh_db, + arc_buf_alloc(dh->dh_dn->dn_objset->os_spa, + dh->dh_db->db.db_size, dh->dh_db, dh->dh_type)); + bcopy(dh->dh_dr->dt.dl.dr_data->b_data, + dh->dh_db->db.db_data, dh->dh_db->db.db_size); } } - (void) refcount_add(&db->db_holds, tag); - dbuf_update_data(db); - DBUF_VERIFY(db); - mutex_exit(&db->db_mtx); + (void) refcount_add(&dh->dh_db->db_holds, dh->dh_tag); + dbuf_update_data(dh->dh_db); + DBUF_VERIFY(dh->dh_db); + mutex_exit(&dh->dh_db->db_mtx); /* NOTE: we can't rele the parent until after we drop the db_mtx */ - if (parent) - dbuf_rele(parent, NULL); + if (dh->dh_parent) + dbuf_rele(dh->dh_parent, NULL); - ASSERT3P(DB_DNODE(db), ==, dn); - ASSERT3U(db->db_blkid, ==, blkid); - ASSERT3U(db->db_level, ==, level); - *dbp = db; + ASSERT3P(DB_DNODE(dh->dh_db), ==, dh->dh_dn); + ASSERT3U(dh->dh_db->db_blkid, ==, dh->dh_blkid); + ASSERT3U(dh->dh_db->db_level, ==, dh->dh_level); + *(dh->dh_dbp) = dh->dh_db; return (0); } +/* + * The following code preserves the recursive function dbuf_hold_impl() + * but moves the local variables AND function arguments to the heap to + * minimize the stack frame size. Enough space is initially allocated + * on the stack for 20 levels of recursion. + */ +int +dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid, int fail_sparse, + void *tag, dmu_buf_impl_t **dbp) +{ + struct dbuf_hold_impl_data *dh; + int error; + + dh = kmem_zalloc(sizeof(struct dbuf_hold_impl_data) * + DBUF_HOLD_IMPL_MAX_DEPTH, KM_PUSHPAGE); + __dbuf_hold_impl_init(dh, dn, level, blkid, fail_sparse, tag, dbp, 0); + + error = __dbuf_hold_impl(dh); + + kmem_free(dh, sizeof(struct dbuf_hold_impl_data) * + DBUF_HOLD_IMPL_MAX_DEPTH); + + return (error); +} + +static void +__dbuf_hold_impl_init(struct dbuf_hold_impl_data *dh, + dnode_t *dn, uint8_t level, uint64_t blkid, int fail_sparse, + void *tag, dmu_buf_impl_t **dbp, int depth) +{ + dh->dh_dn = dn; + dh->dh_level = level; + dh->dh_blkid = blkid; + dh->dh_fail_sparse = fail_sparse; + dh->dh_tag = tag; + dh->dh_dbp = dbp; + dh->dh_depth = depth; +} + dmu_buf_impl_t * dbuf_hold(dnode_t *dn, uint64_t blkid, void *tag) { @@ -1991,8 +2085,7 @@ dbuf_rm_spill(dnode_t *dn, dmu_tx_t *tx) void dbuf_add_ref(dmu_buf_impl_t *db, void *tag) { - int64_t holds = refcount_add(&db->db_holds, tag); - ASSERT(holds > 1); + VERIFY(refcount_add(&db->db_holds, tag) > 1); } /* @@ -2076,7 +2169,24 @@ dbuf_rele_and_unlock(dmu_buf_impl_t *db, void *tag) dbuf_evict(db); } else { VERIFY(arc_buf_remove_ref(db->db_buf, db) == 0); - if (!DBUF_IS_CACHEABLE(db)) + + /* + * A dbuf will be eligible for eviction if either the + * 'primarycache' property is set or a duplicate + * copy of this buffer is already cached in the arc. + * + * In the case of the 'primarycache' a buffer + * is considered for eviction if it matches the + * criteria set in the property. + * + * To decide if our buffer is considered a + * duplicate, we must call into the arc to determine + * if multiple buffers are referencing the same + * block on-disk. If so, then we simply evict + * ourselves. + */ + if (!DBUF_IS_CACHEABLE(db) || + arc_buf_eviction_needed(db->db_buf)) dbuf_clear(db); else mutex_exit(&db->db_mtx); @@ -2204,7 +2314,11 @@ dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db) } } -static void +/* dbuf_sync_indirect() is called recursively from dbuf_sync_list() so it + * is critical the we not allow the compiler to inline this function in to + * dbuf_sync_list() thereby drastically bloating the stack usage. + */ +noinline static void dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx) { dmu_buf_impl_t *db = dr->dr_dbuf; @@ -2247,7 +2361,11 @@ dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx) zio_nowait(zio); } -static void +/* dbuf_sync_leaf() is called recursively from dbuf_sync_list() so it is + * critical the we not allow the compiler to inline this function in to + * dbuf_sync_list() thereby drastically bloating the stack usage. + */ +noinline static void dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx) { arc_buf_t **datap = &dr->dt.dl.dr_data; @@ -2295,7 +2413,7 @@ dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx) dbuf_dirty_record_t **drp; ASSERT(*datap != NULL); - ASSERT3U(db->db_level, ==, 0); + ASSERT0(db->db_level); ASSERT3U(dn->dn_phys->dn_bonuslen, <=, DN_MAX_BONUSLEN); bcopy(*datap, DN_BONUS(dn->dn_phys), dn->dn_phys->dn_bonuslen); DB_DNODE_EXIT(db); @@ -2311,6 +2429,10 @@ dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx) ASSERT(dr->dr_next == NULL); ASSERT(dr->dr_dbuf == db); *drp = dr->dr_next; + if (dr->dr_dbuf->db_level != 0) { + mutex_destroy(&dr->dt.di.dr_mtx); + list_destroy(&dr->dt.di.dr_children); + } kmem_free(dr, sizeof (dbuf_dirty_record_t)); ASSERT(db->db_dirtycnt > 0); db->db_dirtycnt -= 1; @@ -2387,7 +2509,7 @@ dbuf_sync_list(list_t *list, dmu_tx_t *tx) { dbuf_dirty_record_t *dr; - while (dr = list_head(list)) { + while ((dr = list_head(list))) { if (dr->dr_zio != NULL) { /* * If we find an already initialized zio then we @@ -2494,7 +2616,7 @@ dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb) uint64_t txg = zio->io_txg; dbuf_dirty_record_t **drp, *dr; - ASSERT3U(zio->io_error, ==, 0); + ASSERT0(zio->io_error); ASSERT(db->db_blkptr == bp); if (zio->io_flags & ZIO_FLAG_IO_REWRITE) { @@ -2556,8 +2678,8 @@ dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb) ASSERT(list_head(&dr->dt.di.dr_children) == NULL); ASSERT3U(db->db.db_size, ==, 1<dn_phys->dn_indblkshift); if (!BP_IS_HOLE(db->db_blkptr)) { - int epbs = - dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; + ASSERTV(int epbs = dn->dn_phys->dn_indblkshift - + SPA_BLKPTRSHIFT); ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==, db->db.db_size); ASSERT3U(dn->dn_phys->dn_maxblkid @@ -2705,3 +2827,41 @@ dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx) ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb); } } + +#if defined(_KERNEL) && defined(HAVE_SPL) +EXPORT_SYMBOL(dbuf_find); +EXPORT_SYMBOL(dbuf_is_metadata); +EXPORT_SYMBOL(dbuf_evict); +EXPORT_SYMBOL(dbuf_loan_arcbuf); +EXPORT_SYMBOL(dbuf_whichblock); +EXPORT_SYMBOL(dbuf_read); +EXPORT_SYMBOL(dbuf_unoverride); +EXPORT_SYMBOL(dbuf_free_range); +EXPORT_SYMBOL(dbuf_new_size); +EXPORT_SYMBOL(dbuf_release_bp); +EXPORT_SYMBOL(dbuf_dirty); +EXPORT_SYMBOL(dmu_buf_will_dirty); +EXPORT_SYMBOL(dmu_buf_will_not_fill); +EXPORT_SYMBOL(dmu_buf_will_fill); +EXPORT_SYMBOL(dmu_buf_fill_done); +EXPORT_SYMBOL(dmu_buf_rele); +EXPORT_SYMBOL(dbuf_assign_arcbuf); +EXPORT_SYMBOL(dbuf_clear); +EXPORT_SYMBOL(dbuf_prefetch); +EXPORT_SYMBOL(dbuf_hold_impl); +EXPORT_SYMBOL(dbuf_hold); +EXPORT_SYMBOL(dbuf_hold_level); +EXPORT_SYMBOL(dbuf_create_bonus); +EXPORT_SYMBOL(dbuf_spill_set_blksz); +EXPORT_SYMBOL(dbuf_rm_spill); +EXPORT_SYMBOL(dbuf_add_ref); +EXPORT_SYMBOL(dbuf_rele); +EXPORT_SYMBOL(dbuf_rele_and_unlock); +EXPORT_SYMBOL(dbuf_refcount); +EXPORT_SYMBOL(dbuf_sync_list); +EXPORT_SYMBOL(dmu_buf_set_user); +EXPORT_SYMBOL(dmu_buf_set_user_ie); +EXPORT_SYMBOL(dmu_buf_update_user); +EXPORT_SYMBOL(dmu_buf_get_user); +EXPORT_SYMBOL(dmu_buf_freeable); +#endif