}
dnode_rele(dn, FTAG);
- *dbp = &db->db;
+ *dbp = &db->db; /* NULL db plus first field offset is NULL */
return (err);
}
}
int
-dmu_set_bonus(dmu_buf_t *db, int newsize, dmu_tx_t *tx)
+dmu_set_bonus(dmu_buf_t *db_fake, int newsize, dmu_tx_t *tx)
{
- dnode_t *dn = ((dmu_buf_impl_t *)db)->db_dnode;
+ dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
+ dnode_t *dn;
+ int error;
- if (dn->dn_bonus != (dmu_buf_impl_t *)db)
- return (EINVAL);
- if (newsize < 0 || newsize > db->db_size)
- return (EINVAL);
- dnode_setbonuslen(dn, newsize, tx);
- return (0);
+ DB_DNODE_ENTER(db);
+ dn = DB_DNODE(db);
+
+ if (dn->dn_bonus != db) {
+ error = EINVAL;
+ } else if (newsize < 0 || newsize > db_fake->db_size) {
+ error = EINVAL;
+ } else {
+ dnode_setbonuslen(dn, newsize, tx);
+ error = 0;
+ }
+
+ DB_DNODE_EXIT(db);
+ return (error);
}
int
-dmu_set_bonustype(dmu_buf_t *db, dmu_object_type_t type, dmu_tx_t *tx)
+dmu_set_bonustype(dmu_buf_t *db_fake, dmu_object_type_t type, dmu_tx_t *tx)
{
- dnode_t *dn = ((dmu_buf_impl_t *)db)->db_dnode;
+ dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
+ dnode_t *dn;
+ int error;
- if (type > DMU_OT_NUMTYPES)
- return (EINVAL);
+ DB_DNODE_ENTER(db);
+ dn = DB_DNODE(db);
- if (dn->dn_bonus != (dmu_buf_impl_t *)db)
- return (EINVAL);
+ if (type > DMU_OT_NUMTYPES) {
+ error = EINVAL;
+ } else if (dn->dn_bonus != db) {
+ error = EINVAL;
+ } else {
+ dnode_setbonus_type(dn, type, tx);
+ error = 0;
+ }
- dnode_setbonus_type(dn, type, tx);
- return (0);
+ DB_DNODE_EXIT(db);
+ return (error);
+}
+
+dmu_object_type_t
+dmu_get_bonustype(dmu_buf_t *db_fake)
+{
+ dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
+ dnode_t *dn;
+ dmu_object_type_t type;
+
+ DB_DNODE_ENTER(db);
+ dn = DB_DNODE(db);
+ type = dn->dn_bonustype;
+ DB_DNODE_EXIT(db);
+
+ return (type);
}
int
dbuf_create_bonus(dn);
}
db = dn->dn_bonus;
- rw_exit(&dn->dn_struct_rwlock);
/* as long as the bonus buf is held, the dnode will be held */
- if (refcount_add(&db->db_holds, tag) == 1)
+ if (refcount_add(&db->db_holds, tag) == 1) {
VERIFY(dnode_add_ref(dn, db));
+ (void) atomic_inc_32_nv(&dn->dn_dbufs_count);
+ }
+
+ /*
+ * Wait to drop dn_struct_rwlock until after adding the bonus dbuf's
+ * hold and incrementing the dbuf count to ensure that dnode_move() sees
+ * a dnode hold for every dbuf.
+ */
+ rw_exit(&dn->dn_struct_rwlock);
dnode_rele(dn, FTAG);
rw_exit(&dn->dn_struct_rwlock);
ASSERT(db != NULL);
- err = dbuf_read(db, NULL, DB_RF_MUST_SUCCEED | flags);
- *dbp = &db->db;
+ err = dbuf_read(db, NULL, flags);
+ if (err == 0)
+ *dbp = &db->db;
+ else
+ dbuf_rele(db, tag);
return (err);
}
int
dmu_spill_hold_existing(dmu_buf_t *bonus, void *tag, dmu_buf_t **dbp)
{
- dnode_t *dn = ((dmu_buf_impl_t *)bonus)->db_dnode;
+ dmu_buf_impl_t *db = (dmu_buf_impl_t *)bonus;
+ dnode_t *dn;
int err;
- if (spa_version(dn->dn_objset->os_spa) < SPA_VERSION_SA)
- return (EINVAL);
- rw_enter(&dn->dn_struct_rwlock, RW_READER);
+ DB_DNODE_ENTER(db);
+ dn = DB_DNODE(db);
+
+ if (spa_version(dn->dn_objset->os_spa) < SPA_VERSION_SA) {
+ err = EINVAL;
+ } else {
+ rw_enter(&dn->dn_struct_rwlock, RW_READER);
+
+ if (!dn->dn_have_spill) {
+ err = ENOENT;
+ } else {
+ err = dmu_spill_hold_by_dnode(dn,
+ DB_RF_HAVESTRUCT | DB_RF_CANFAIL, tag, dbp);
+ }
- if (!dn->dn_have_spill) {
rw_exit(&dn->dn_struct_rwlock);
- return (ENOENT);
}
- err = dmu_spill_hold_by_dnode(dn, DB_RF_HAVESTRUCT, tag, dbp);
- rw_exit(&dn->dn_struct_rwlock);
+
+ DB_DNODE_EXIT(db);
return (err);
}
int
dmu_spill_hold_by_bonus(dmu_buf_t *bonus, void *tag, dmu_buf_t **dbp)
{
- return (dmu_spill_hold_by_dnode(((dmu_buf_impl_t *)bonus)->db_dnode,
- 0, tag, dbp));
+ dmu_buf_impl_t *db = (dmu_buf_impl_t *)bonus;
+ dnode_t *dn;
+ int err;
+
+ DB_DNODE_ENTER(db);
+ dn = DB_DNODE(db);
+ err = dmu_spill_hold_by_dnode(dn, DB_RF_CANFAIL, tag, dbp);
+ DB_DNODE_EXIT(db);
+
+ return (err);
}
/*
uint32_t dbuf_flags;
int err;
zio_t *zio;
- hrtime_t start;
+ hrtime_t start = 0;
ASSERT(length <= DMU_MAX_ACCESS);
}
int
-dmu_buf_hold_array_by_bonus(dmu_buf_t *db, uint64_t offset,
+dmu_buf_hold_array_by_bonus(dmu_buf_t *db_fake, uint64_t offset,
uint64_t length, int read, void *tag, int *numbufsp, dmu_buf_t ***dbpp)
{
- dnode_t *dn = ((dmu_buf_impl_t *)db)->db_dnode;
+ dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
+ dnode_t *dn;
int err;
+ DB_DNODE_ENTER(db);
+ dn = DB_DNODE(db);
err = dmu_buf_hold_array_by_dnode(dn, offset, length, read, tag,
numbufsp, dbpp, DMU_READ_PREFETCH);
+ DB_DNODE_EXIT(db);
return (err);
}
return;
if (len == 0) { /* they're interested in the bonus buffer */
- dn = os->os_meta_dnode;
+ dn = DMU_META_DNODE(os);
if (object == 0 || object >= DN_MAX_OBJECT)
return;
else
dmu_buf_will_dirty(db, tx);
- bcopy(buf, (char *)db->db_data + bufoff, tocpy);
+ (void) memcpy((char *)db->db_data + bufoff, buf, tocpy);
if (tocpy == db->db_size)
dmu_buf_fill_done(db, tx);
*/
kstat_t *xuio_ksp = NULL;
+typedef struct xuio_stats {
+ /* loaned yet not returned arc_buf */
+ kstat_named_t xuiostat_onloan_rbuf;
+ kstat_named_t xuiostat_onloan_wbuf;
+ /* whether a copy is made when loaning out a read buffer */
+ kstat_named_t xuiostat_rbuf_copied;
+ kstat_named_t xuiostat_rbuf_nocopy;
+ /* whether a copy is made when assigning a write buffer */
+ kstat_named_t xuiostat_wbuf_copied;
+ kstat_named_t xuiostat_wbuf_nocopy;
+} xuio_stats_t;
+
+static xuio_stats_t xuio_stats = {
+ { "onloan_read_buf", KSTAT_DATA_UINT64 },
+ { "onloan_write_buf", KSTAT_DATA_UINT64 },
+ { "read_buf_copied", KSTAT_DATA_UINT64 },
+ { "read_buf_nocopy", KSTAT_DATA_UINT64 },
+ { "write_buf_copied", KSTAT_DATA_UINT64 },
+ { "write_buf_nocopy", KSTAT_DATA_UINT64 }
+};
+
+#define XUIOSTAT_INCR(stat, val) \
+ atomic_add_64(&xuio_stats.stat.value.ui64, (val))
+#define XUIOSTAT_BUMP(stat) XUIOSTAT_INCR(stat, 1)
+
int
dmu_xuio_init(xuio_t *xuio, int nblk)
{
}
#ifdef _KERNEL
+
+/*
+ * Copy up to size bytes between arg_buf and req based on the data direction
+ * described by the req. If an entire req's data cannot be transfered the
+ * req's is updated such that it's current index and bv offsets correctly
+ * reference any residual data which could not be copied. The return value
+ * is the number of bytes successfully copied to arg_buf.
+ */
+static int
+dmu_req_copy(void *arg_buf, int size, int *offset, struct request *req)
+{
+ struct bio_vec *bv;
+ struct req_iterator iter;
+ char *bv_buf;
+ int tocpy;
+
+ *offset = 0;
+ rq_for_each_segment(bv, req, iter) {
+
+ /* Fully consumed the passed arg_buf */
+ ASSERT3S(*offset, <=, size);
+ if (size == *offset)
+ break;
+
+ /* Skip fully consumed bv's */
+ if (bv->bv_len == 0)
+ continue;
+
+ tocpy = MIN(bv->bv_len, size - *offset);
+ ASSERT3S(tocpy, >=, 0);
+
+ bv_buf = page_address(bv->bv_page) + bv->bv_offset;
+ ASSERT3P(bv_buf, !=, NULL);
+
+ if (rq_data_dir(req) == WRITE)
+ memcpy(arg_buf + *offset, bv_buf, tocpy);
+ else
+ memcpy(bv_buf, arg_buf + *offset, tocpy);
+
+ *offset += tocpy;
+ bv->bv_offset += tocpy;
+ bv->bv_len -= tocpy;
+ }
+
+ return 0;
+}
+
int
-dmu_read_uio(objset_t *os, uint64_t object, uio_t *uio, uint64_t size)
+dmu_read_req(objset_t *os, uint64_t object, struct request *req)
{
+ uint64_t size = blk_rq_bytes(req);
+ uint64_t offset = blk_rq_pos(req) << 9;
dmu_buf_t **dbp;
int numbufs, i, err;
- xuio_t *xuio = NULL;
/*
* NB: we could do this block-at-a-time, but it's nice
* to be reading in parallel.
*/
- err = dmu_buf_hold_array(os, object, uio->uio_loffset, size, TRUE, FTAG,
- &numbufs, &dbp);
+ err = dmu_buf_hold_array(os, object, offset, size, TRUE, FTAG,
+ &numbufs, &dbp);
if (err)
return (err);
- if (uio->uio_extflg == UIO_XUIO)
- xuio = (xuio_t *)uio;
-
for (i = 0; i < numbufs; i++) {
- int tocpy;
- int bufoff;
+ int tocpy, didcpy, bufoff;
dmu_buf_t *db = dbp[i];
- ASSERT(size > 0);
+ bufoff = offset - db->db_offset;
+ ASSERT3S(bufoff, >=, 0);
- bufoff = uio->uio_loffset - db->db_offset;
tocpy = (int)MIN(db->db_size - bufoff, size);
+ if (tocpy == 0)
+ break;
- if (xuio) {
- dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
- arc_buf_t *dbuf_abuf = dbi->db_buf;
- arc_buf_t *abuf = dbuf_loan_arcbuf(dbi);
- err = dmu_xuio_add(xuio, abuf, bufoff, tocpy);
- if (!err) {
- uio->uio_resid -= tocpy;
- uio->uio_loffset += tocpy;
- }
+ err = dmu_req_copy(db->db_data + bufoff, tocpy, &didcpy, req);
+
+ if (didcpy < tocpy)
+ err = EIO;
- if (abuf == dbuf_abuf)
- XUIOSTAT_BUMP(xuiostat_rbuf_nocopy);
- else
- XUIOSTAT_BUMP(xuiostat_rbuf_copied);
- } else {
- err = uiomove((char *)db->db_data + bufoff, tocpy,
- UIO_READ, uio);
- }
if (err)
break;
size -= tocpy;
+ offset += didcpy;
+ err = 0;
}
dmu_buf_rele_array(dbp, numbufs, FTAG);
return (err);
}
-static int
-dmu_write_uio_dnode(dnode_t *dn, uio_t *uio, uint64_t size, dmu_tx_t *tx)
+int
+dmu_write_req(objset_t *os, uint64_t object, struct request *req, dmu_tx_t *tx)
{
+ uint64_t size = blk_rq_bytes(req);
+ uint64_t offset = blk_rq_pos(req) << 9;
dmu_buf_t **dbp;
int numbufs;
int err = 0;
int i;
- err = dmu_buf_hold_array_by_dnode(dn, uio->uio_loffset, size,
- FALSE, FTAG, &numbufs, &dbp, DMU_READ_PREFETCH);
+ if (size == 0)
+ return (0);
+
+ err = dmu_buf_hold_array(os, object, offset, size, FALSE, FTAG,
+ &numbufs, &dbp);
if (err)
return (err);
for (i = 0; i < numbufs; i++) {
- int tocpy;
- int bufoff;
+ int tocpy, didcpy, bufoff;
dmu_buf_t *db = dbp[i];
- ASSERT(size > 0);
+ bufoff = offset - db->db_offset;
+ ASSERT3S(bufoff, >=, 0);
- bufoff = uio->uio_loffset - db->db_offset;
tocpy = (int)MIN(db->db_size - bufoff, size);
+ if (tocpy == 0)
+ break;
ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size);
else
dmu_buf_will_dirty(db, tx);
- /*
- * XXX uiomove could block forever (eg. nfs-backed
- * pages). There needs to be a uiolockdown() function
- * to lock the pages in memory, so that uiomove won't
- * block.
- */
- err = uiomove((char *)db->db_data + bufoff, tocpy,
- UIO_WRITE, uio);
+ err = dmu_req_copy(db->db_data + bufoff, tocpy, &didcpy, req);
if (tocpy == db->db_size)
dmu_buf_fill_done(db, tx);
+ if (didcpy < tocpy)
+ err = EIO;
+
if (err)
break;
size -= tocpy;
+ offset += didcpy;
+ err = 0;
}
dmu_buf_rele_array(dbp, numbufs, FTAG);
return (err);
}
+#endif
+#ifdef HAVE_ZPL
int
dmu_write_uio_dbuf(dmu_buf_t *zdb, uio_t *uio, uint64_t size,
dmu_tx_t *tx)
{
+ dmu_buf_impl_t *db = (dmu_buf_impl_t *)zdb;
+ dnode_t *dn;
+ int err;
+
if (size == 0)
return (0);
- return (dmu_write_uio_dnode(((dmu_buf_impl_t *)zdb)->db_dnode,
- uio, size, tx));
+ DB_DNODE_ENTER(db);
+ dn = DB_DNODE(db);
+ err = dmu_write_uio_dnode(dn, uio, size, tx);
+ DB_DNODE_EXIT(db);
+
+ return (err);
}
int
arc_buf_t *
dmu_request_arcbuf(dmu_buf_t *handle, int size)
{
- dnode_t *dn = ((dmu_buf_impl_t *)handle)->db_dnode;
+ dmu_buf_impl_t *db = (dmu_buf_impl_t *)handle;
+ spa_t *spa;
- return (arc_loan_buf(dn->dn_objset->os_spa, size));
+ DB_GET_SPA(&spa, db);
+ return (arc_loan_buf(spa, size));
}
/*
dmu_assign_arcbuf(dmu_buf_t *handle, uint64_t offset, arc_buf_t *buf,
dmu_tx_t *tx)
{
- dnode_t *dn = ((dmu_buf_impl_t *)handle)->db_dnode;
+ dmu_buf_impl_t *dbuf = (dmu_buf_impl_t *)handle;
+ dnode_t *dn;
dmu_buf_impl_t *db;
uint32_t blksz = (uint32_t)arc_buf_size(buf);
uint64_t blkid;
+ DB_DNODE_ENTER(dbuf);
+ dn = DB_DNODE(dbuf);
rw_enter(&dn->dn_struct_rwlock, RW_READER);
blkid = dbuf_whichblock(dn, offset);
VERIFY((db = dbuf_hold(dn, blkid, FTAG)) != NULL);
rw_exit(&dn->dn_struct_rwlock);
+ DB_DNODE_EXIT(dbuf);
if (offset == db->db.db_offset && blksz == db->db.db_size) {
dbuf_assign_arcbuf(db, buf, tx);
dbuf_rele(db, FTAG);
} else {
+ objset_t *os;
+ uint64_t object;
+
+ DB_DNODE_ENTER(dbuf);
+ dn = DB_DNODE(dbuf);
+ os = dn->dn_objset;
+ object = dn->dn_object;
+ DB_DNODE_EXIT(dbuf);
+
dbuf_rele(db, FTAG);
- dmu_write(dn->dn_objset, dn->dn_object, offset, blksz,
- buf->b_data, tx);
+ dmu_write(os, object, offset, blksz, buf->b_data, tx);
dmu_return_arcbuf(buf);
XUIOSTAT_BUMP(xuiostat_wbuf_copied);
}
{
dmu_sync_arg_t *dsa = varg;
dmu_buf_t *db = dsa->dsa_zgd->zgd_db;
- dnode_t *dn = ((dmu_buf_impl_t *)db)->db_dnode;
blkptr_t *bp = zio->io_bp;
if (zio->io_error == 0) {
*/
BP_SET_LSIZE(bp, db->db_size);
} else {
- ASSERT(BP_GET_TYPE(bp) == dn->dn_type);
ASSERT(BP_GET_LEVEL(bp) == 0);
bp->blk_fill = 1;
}
dmu_sync_arg_t *dsa;
zbookmark_t zb;
zio_prop_t zp;
+ dnode_t *dn;
ASSERT(pio != NULL);
ASSERT(BP_IS_HOLE(bp));
SET_BOOKMARK(&zb, ds->ds_object,
db->db.db_object, db->db_level, db->db_blkid);
- dmu_write_policy(os, db->db_dnode, db->db_level, WP_DMU_SYNC, &zp);
+ DB_DNODE_ENTER(db);
+ dn = DB_DNODE(db);
+ dmu_write_policy(os, dn, db->db_level, WP_DMU_SYNC, &zp);
+ DB_DNODE_EXIT(db);
/*
* If we're frozen (running ziltest), we always need to generate a bp.
dmu_write_policy(objset_t *os, dnode_t *dn, int level, int wp, zio_prop_t *zp)
{
dmu_object_type_t type = dn ? dn->dn_type : DMU_OT_OBJSET;
- boolean_t ismd = (level > 0 || dmu_ot[type].ot_metadata);
+ boolean_t ismd = (level > 0 || dmu_ot[type].ot_metadata ||
+ (wp & WP_SPILL));
enum zio_checksum checksum = os->os_checksum;
enum zio_compress compress = os->os_compress;
enum zio_checksum dedup_checksum = os->os_dedup_checksum;
dmu_object_info_from_dnode(dnode_t *dn, dmu_object_info_t *doi)
{
dnode_phys_t *dnp;
+ int i;
rw_enter(&dn->dn_struct_rwlock, RW_READER);
mutex_enter(&dn->dn_mtx);
doi->doi_physical_blocks_512 = (DN_USED_BYTES(dnp) + 256) >> 9;
doi->doi_max_offset = (dnp->dn_maxblkid + 1) * dn->dn_datablksz;
doi->doi_fill_count = 0;
- for (int i = 0; i < dnp->dn_nblkptr; i++)
+ for (i = 0; i < dnp->dn_nblkptr; i++)
doi->doi_fill_count += dnp->dn_blkptr[i].blk_fill;
mutex_exit(&dn->dn_mtx);
* As above, but faster; can be used when you have a held dbuf in hand.
*/
void
-dmu_object_info_from_db(dmu_buf_t *db, dmu_object_info_t *doi)
+dmu_object_info_from_db(dmu_buf_t *db_fake, dmu_object_info_t *doi)
{
- dmu_object_info_from_dnode(((dmu_buf_impl_t *)db)->db_dnode, doi);
+ dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
+
+ DB_DNODE_ENTER(db);
+ dmu_object_info_from_dnode(DB_DNODE(db), doi);
+ DB_DNODE_EXIT(db);
}
/*
* This is specifically optimized for zfs_getattr().
*/
void
-dmu_object_size_from_db(dmu_buf_t *db, uint32_t *blksize, u_longlong_t *nblk512)
+dmu_object_size_from_db(dmu_buf_t *db_fake, uint32_t *blksize,
+ u_longlong_t *nblk512)
{
- dnode_t *dn = ((dmu_buf_impl_t *)db)->db_dnode;
+ dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
+ dnode_t *dn;
+
+ DB_DNODE_ENTER(db);
+ dn = DB_DNODE(db);
*blksize = dn->dn_datablksz;
/* add 1 for dnode space */
*nblk512 = ((DN_USED_BYTES(dn->dn_phys) + SPA_MINBLOCKSIZE/2) >>
SPA_MINBLOCKSHIFT) + 1;
+ DB_DNODE_EXIT(db);
}
void
dmu_init(void)
{
zfs_dbgmsg_init();
- dbuf_init();
+ sa_cache_init();
+ xuio_stat_init();
+ dmu_objset_init();
dnode_init();
+ dbuf_init();
zfetch_init();
arc_init();
l2arc_init();
- xuio_stat_init();
- sa_cache_init();
}
void
dmu_fini(void)
{
+ l2arc_fini();
arc_fini();
zfetch_fini();
- dnode_fini();
dbuf_fini();
- l2arc_fini();
+ dnode_fini();
+ dmu_objset_fini();
xuio_stat_fini();
sa_cache_fini();
zfs_dbgmsg_fini();
}
+
+#if defined(_KERNEL) && defined(HAVE_SPL)
+EXPORT_SYMBOL(dmu_bonus_hold);
+EXPORT_SYMBOL(dmu_free_range);
+EXPORT_SYMBOL(dmu_read);
+EXPORT_SYMBOL(dmu_write);
+
+/* Get information on a DMU object. */
+EXPORT_SYMBOL(dmu_object_info);
+EXPORT_SYMBOL(dmu_object_info_from_dnode);
+EXPORT_SYMBOL(dmu_object_info_from_db);
+EXPORT_SYMBOL(dmu_object_size_from_db);
+
+EXPORT_SYMBOL(dmu_object_set_blocksize);
+EXPORT_SYMBOL(dmu_object_set_checksum);
+EXPORT_SYMBOL(dmu_object_set_compress);
+
+EXPORT_SYMBOL(dmu_ot);
+#endif