4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
27 #include <sys/dmu_impl.h>
29 #include <sys/dmu_tx.h>
30 #include <sys/dmu_objset.h>
31 #include <sys/dsl_dataset.h> /* for dsl_dataset_block_freeable() */
32 #include <sys/dsl_dir.h> /* for dsl_dir_tempreserve_*() */
33 #include <sys/dsl_pool.h>
34 #include <sys/zap_impl.h> /* for fzap_default_block_shift */
36 #include <sys/zfs_context.h>
38 typedef void (*dmu_tx_hold_func_t)(dmu_tx_t *tx, struct dnode *dn,
39 uint64_t arg1, uint64_t arg2);
43 dmu_tx_create_dd(dsl_dir_t *dd)
45 dmu_tx_t *tx = kmem_zalloc(sizeof (dmu_tx_t), KM_SLEEP);
48 tx->tx_pool = dd->dd_pool;
49 list_create(&tx->tx_holds, sizeof (dmu_tx_hold_t),
50 offsetof(dmu_tx_hold_t, txh_node));
52 refcount_create(&tx->tx_space_written);
53 refcount_create(&tx->tx_space_freed);
59 dmu_tx_create(objset_t *os)
61 dmu_tx_t *tx = dmu_tx_create_dd(os->os->os_dsl_dataset->ds_dir);
63 tx->tx_lastsnap_txg = dsl_dataset_prev_snap_txg(os->os->os_dsl_dataset);
68 dmu_tx_create_assigned(struct dsl_pool *dp, uint64_t txg)
70 dmu_tx_t *tx = dmu_tx_create_dd(NULL);
72 ASSERT3U(txg, <=, dp->dp_tx.tx_open_txg);
81 dmu_tx_is_syncing(dmu_tx_t *tx)
83 return (tx->tx_anyobj);
87 dmu_tx_private_ok(dmu_tx_t *tx)
89 return (tx->tx_anyobj);
92 static dmu_tx_hold_t *
93 dmu_tx_hold_object_impl(dmu_tx_t *tx, objset_t *os, uint64_t object,
94 enum dmu_tx_hold_type type, uint64_t arg1, uint64_t arg2)
100 if (object != DMU_NEW_OBJECT) {
101 err = dnode_hold(os->os, object, tx, &dn);
107 if (err == 0 && tx->tx_txg != 0) {
108 mutex_enter(&dn->dn_mtx);
110 * dn->dn_assigned_txg == tx->tx_txg doesn't pose a
111 * problem, but there's no way for it to happen (for
114 ASSERT(dn->dn_assigned_txg == 0);
115 dn->dn_assigned_txg = tx->tx_txg;
116 (void) refcount_add(&dn->dn_tx_holds, tx);
117 mutex_exit(&dn->dn_mtx);
121 txh = kmem_zalloc(sizeof (dmu_tx_hold_t), KM_SLEEP);
125 txh->txh_type = type;
126 txh->txh_arg1 = arg1;
127 txh->txh_arg2 = arg2;
129 list_insert_tail(&tx->tx_holds, txh);
135 dmu_tx_add_new_object(dmu_tx_t *tx, objset_t *os, uint64_t object)
138 * If we're syncing, they can manipulate any object anyhow, and
139 * the hold on the dnode_t can cause problems.
141 if (!dmu_tx_is_syncing(tx)) {
142 (void) dmu_tx_hold_object_impl(tx, os,
143 object, THT_NEWOBJECT, 0, 0);
148 dmu_tx_check_ioerr(zio_t *zio, dnode_t *dn, int level, uint64_t blkid)
153 rw_enter(&dn->dn_struct_rwlock, RW_READER);
154 db = dbuf_hold_level(dn, level, blkid, FTAG);
155 rw_exit(&dn->dn_struct_rwlock);
158 err = dbuf_read(db, zio, DB_RF_CANFAIL | DB_RF_NOPREFETCH);
164 dmu_tx_count_indirects(dmu_tx_hold_t *txh, dmu_buf_impl_t *db,
165 boolean_t freeable, dmu_buf_impl_t **history)
167 int i = db->db_level + 1;
168 dnode_t *dn = db->db_dnode;
170 if (i >= dn->dn_nlevels)
175 uint64_t lvls = dn->dn_nlevels - i;
177 txh->txh_space_towrite += lvls << dn->dn_indblkshift;
181 if (db != history[i]) {
182 dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
183 uint64_t space = 1ULL << dn->dn_indblkshift;
185 freeable = (db->db_blkptr && (freeable ||
186 dsl_dataset_block_freeable(ds, db->db_blkptr->blk_birth)));
188 txh->txh_space_tooverwrite += space;
190 txh->txh_space_towrite += space;
192 txh->txh_space_tounref += space;
194 dmu_tx_count_indirects(txh, db, freeable, history);
200 dmu_tx_count_write(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
202 dnode_t *dn = txh->txh_dnode;
203 uint64_t start, end, i;
204 int min_bs, max_bs, min_ibs, max_ibs, epbs, bits;
210 min_bs = SPA_MINBLOCKSHIFT;
211 max_bs = SPA_MAXBLOCKSHIFT;
212 min_ibs = DN_MIN_INDBLKSHIFT;
213 max_ibs = DN_MAX_INDBLKSHIFT;
216 dmu_buf_impl_t *last[DN_MAX_LEVELS];
217 int nlvls = dn->dn_nlevels;
221 * For i/o error checking, read the first and last level-0
222 * blocks (if they are not aligned), and all the level-1 blocks.
224 if (dn->dn_maxblkid == 0) {
225 delta = dn->dn_datablksz;
226 start = (off < dn->dn_datablksz) ? 0 : 1;
227 end = (off+len <= dn->dn_datablksz) ? 0 : 1;
228 if (start == 0 && (off > 0 || len < dn->dn_datablksz)) {
229 err = dmu_tx_check_ioerr(NULL, dn, 0, 0);
235 zio_t *zio = zio_root(dn->dn_objset->os_spa,
236 NULL, NULL, ZIO_FLAG_CANFAIL);
238 /* first level-0 block */
239 start = off >> dn->dn_datablkshift;
240 if (P2PHASE(off, dn->dn_datablksz) ||
241 len < dn->dn_datablksz) {
242 err = dmu_tx_check_ioerr(zio, dn, 0, start);
247 /* last level-0 block */
248 end = (off+len-1) >> dn->dn_datablkshift;
249 if (end != start && end <= dn->dn_maxblkid &&
250 P2PHASE(off+len, dn->dn_datablksz)) {
251 err = dmu_tx_check_ioerr(zio, dn, 0, end);
258 int shft = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
259 for (i = (start>>shft)+1; i < end>>shft; i++) {
260 err = dmu_tx_check_ioerr(zio, dn, 1, i);
269 delta = P2NPHASE(off, dn->dn_datablksz);
272 if (dn->dn_maxblkid > 0) {
274 * The blocksize can't change,
275 * so we can make a more precise estimate.
277 ASSERT(dn->dn_datablkshift != 0);
278 min_bs = max_bs = dn->dn_datablkshift;
279 min_ibs = max_ibs = dn->dn_indblkshift;
280 } else if (dn->dn_indblkshift > max_ibs) {
282 * This ensures that if we reduce DN_MAX_INDBLKSHIFT,
283 * the code will still work correctly on older pools.
285 min_ibs = max_ibs = dn->dn_indblkshift;
289 * If this write is not off the end of the file
290 * we need to account for overwrites/unref.
292 if (start <= dn->dn_maxblkid)
293 bzero(last, sizeof (dmu_buf_impl_t *) * DN_MAX_LEVELS);
294 while (start <= dn->dn_maxblkid) {
295 spa_t *spa = txh->txh_tx->tx_pool->dp_spa;
296 dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
299 rw_enter(&dn->dn_struct_rwlock, RW_READER);
300 db = dbuf_hold_level(dn, 0, start, FTAG);
301 rw_exit(&dn->dn_struct_rwlock);
302 if (db->db_blkptr && dsl_dataset_block_freeable(ds,
303 db->db_blkptr->blk_birth)) {
304 dprintf_bp(db->db_blkptr, "can free old%s", "");
305 txh->txh_space_tooverwrite += dn->dn_datablksz;
306 txh->txh_space_tounref += dn->dn_datablksz;
307 dmu_tx_count_indirects(txh, db, TRUE, last);
309 txh->txh_space_towrite += dn->dn_datablksz;
311 txh->txh_space_tounref +=
312 bp_get_dasize(spa, db->db_blkptr);
313 dmu_tx_count_indirects(txh, db, FALSE, last);
318 * Account for new indirects appearing
319 * before this IO gets assigned into a txg.
322 epbs = min_ibs - SPA_BLKPTRSHIFT;
323 for (bits -= epbs * (nlvls - 1);
324 bits >= 0; bits -= epbs)
325 txh->txh_fudge += 1ULL << max_ibs;
331 delta = dn->dn_datablksz;
336 * 'end' is the last thing we will access, not one past.
337 * This way we won't overflow when accessing the last byte.
339 start = P2ALIGN(off, 1ULL << max_bs);
340 end = P2ROUNDUP(off + len, 1ULL << max_bs) - 1;
341 txh->txh_space_towrite += end - start + 1;
346 epbs = min_ibs - SPA_BLKPTRSHIFT;
349 * The object contains at most 2^(64 - min_bs) blocks,
350 * and each indirect level maps 2^epbs.
352 for (bits = 64 - min_bs; bits >= 0; bits -= epbs) {
355 ASSERT3U(end, >=, start);
356 txh->txh_space_towrite += (end - start + 1) << max_ibs;
359 * We also need a new blkid=0 indirect block
360 * to reference any existing file data.
362 txh->txh_space_towrite += 1ULL << max_ibs;
367 if (txh->txh_space_towrite + txh->txh_space_tooverwrite >
372 txh->txh_tx->tx_err = err;
376 dmu_tx_count_dnode(dmu_tx_hold_t *txh)
378 dnode_t *dn = txh->txh_dnode;
379 dnode_t *mdn = txh->txh_tx->tx_objset->os->os_meta_dnode;
380 uint64_t space = mdn->dn_datablksz +
381 ((mdn->dn_nlevels-1) << mdn->dn_indblkshift);
383 if (dn && dn->dn_dbuf->db_blkptr &&
384 dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset,
385 dn->dn_dbuf->db_blkptr->blk_birth)) {
386 txh->txh_space_tooverwrite += space;
387 txh->txh_space_tounref += space;
389 txh->txh_space_towrite += space;
390 if (dn && dn->dn_dbuf->db_blkptr)
391 txh->txh_space_tounref += space;
396 dmu_tx_hold_write(dmu_tx_t *tx, uint64_t object, uint64_t off, int len)
400 ASSERT(tx->tx_txg == 0);
401 ASSERT(len < DMU_MAX_ACCESS);
402 ASSERT(len == 0 || UINT64_MAX - off >= len - 1);
404 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
405 object, THT_WRITE, off, len);
409 dmu_tx_count_write(txh, off, len);
410 dmu_tx_count_dnode(txh);
414 dmu_tx_count_free(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
416 uint64_t blkid, nblks, lastblk;
417 uint64_t space = 0, unref = 0, skipped = 0;
418 dnode_t *dn = txh->txh_dnode;
419 dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
420 spa_t *spa = txh->txh_tx->tx_pool->dp_spa;
423 if (dn->dn_nlevels == 0)
427 * The struct_rwlock protects us against dn_nlevels
428 * changing, in case (against all odds) we manage to dirty &
429 * sync out the changes after we check for being dirty.
430 * Also, dbuf_hold_level() wants us to have the struct_rwlock.
432 rw_enter(&dn->dn_struct_rwlock, RW_READER);
433 epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
434 if (dn->dn_maxblkid == 0) {
435 if (off == 0 && len >= dn->dn_datablksz) {
439 rw_exit(&dn->dn_struct_rwlock);
443 blkid = off >> dn->dn_datablkshift;
444 nblks = (len + dn->dn_datablksz - 1) >> dn->dn_datablkshift;
446 if (blkid >= dn->dn_maxblkid) {
447 rw_exit(&dn->dn_struct_rwlock);
450 if (blkid + nblks > dn->dn_maxblkid)
451 nblks = dn->dn_maxblkid - blkid;
454 if (dn->dn_nlevels == 1) {
456 for (i = 0; i < nblks; i++) {
457 blkptr_t *bp = dn->dn_phys->dn_blkptr;
458 ASSERT3U(blkid + i, <, dn->dn_nblkptr);
460 if (dsl_dataset_block_freeable(ds, bp->blk_birth)) {
461 dprintf_bp(bp, "can free old%s", "");
462 space += bp_get_dasize(spa, bp);
464 unref += BP_GET_ASIZE(bp);
470 * Add in memory requirements of higher-level indirects.
471 * This assumes a worst-possible scenario for dn_nlevels.
474 uint64_t blkcnt = 1 + ((nblks >> epbs) >> epbs);
475 int level = (dn->dn_nlevels > 1) ? 2 : 1;
477 while (level++ < DN_MAX_LEVELS) {
478 txh->txh_memory_tohold += blkcnt << dn->dn_indblkshift;
479 blkcnt = 1 + (blkcnt >> epbs);
481 ASSERT(blkcnt <= dn->dn_nblkptr);
484 lastblk = blkid + nblks - 1;
486 dmu_buf_impl_t *dbuf;
487 uint64_t ibyte, new_blkid;
489 int err, i, blkoff, tochk;
492 ibyte = blkid << dn->dn_datablkshift;
493 err = dnode_next_offset(dn,
494 DNODE_FIND_HAVELOCK, &ibyte, 2, 1, 0);
495 new_blkid = ibyte >> dn->dn_datablkshift;
497 skipped += (lastblk >> epbs) - (blkid >> epbs) + 1;
501 txh->txh_tx->tx_err = err;
504 if (new_blkid > lastblk) {
505 skipped += (lastblk >> epbs) - (blkid >> epbs) + 1;
509 if (new_blkid > blkid) {
510 ASSERT((new_blkid >> epbs) > (blkid >> epbs));
511 skipped += (new_blkid >> epbs) - (blkid >> epbs) - 1;
512 nblks -= new_blkid - blkid;
515 blkoff = P2PHASE(blkid, epb);
516 tochk = MIN(epb - blkoff, nblks);
518 dbuf = dbuf_hold_level(dn, 1, blkid >> epbs, FTAG);
520 txh->txh_memory_tohold += dbuf->db.db_size;
521 if (txh->txh_memory_tohold > DMU_MAX_ACCESS) {
522 txh->txh_tx->tx_err = E2BIG;
523 dbuf_rele(dbuf, FTAG);
526 err = dbuf_read(dbuf, NULL, DB_RF_HAVESTRUCT | DB_RF_CANFAIL);
528 txh->txh_tx->tx_err = err;
529 dbuf_rele(dbuf, FTAG);
533 bp = dbuf->db.db_data;
536 for (i = 0; i < tochk; i++) {
537 if (dsl_dataset_block_freeable(ds, bp[i].blk_birth)) {
538 dprintf_bp(&bp[i], "can free old%s", "");
539 space += bp_get_dasize(spa, &bp[i]);
541 unref += BP_GET_ASIZE(bp);
543 dbuf_rele(dbuf, FTAG);
548 rw_exit(&dn->dn_struct_rwlock);
550 /* account for new level 1 indirect blocks that might show up */
552 txh->txh_fudge += skipped << dn->dn_indblkshift;
553 skipped = MIN(skipped, DMU_MAX_DELETEBLKCNT >> epbs);
554 txh->txh_memory_tohold += skipped << dn->dn_indblkshift;
556 txh->txh_space_tofree += space;
557 txh->txh_space_tounref += unref;
561 dmu_tx_hold_free(dmu_tx_t *tx, uint64_t object, uint64_t off, uint64_t len)
565 uint64_t start, end, i;
569 ASSERT(tx->tx_txg == 0);
571 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
572 object, THT_FREE, off, len);
579 dmu_tx_count_write(txh, off, 1);
581 if (len != DMU_OBJECT_END)
582 dmu_tx_count_write(txh, off+len, 1);
584 if (off >= (dn->dn_maxblkid+1) * dn->dn_datablksz)
586 if (len == DMU_OBJECT_END)
587 len = (dn->dn_maxblkid+1) * dn->dn_datablksz - off;
590 * For i/o error checking, read the first and last level-0
591 * blocks, and all the level-1 blocks. The above count_write's
592 * have already taken care of the level-0 blocks.
594 if (dn->dn_nlevels > 1) {
595 shift = dn->dn_datablkshift + dn->dn_indblkshift -
597 start = off >> shift;
598 end = dn->dn_datablkshift ? ((off+len) >> shift) : 0;
600 zio = zio_root(tx->tx_pool->dp_spa,
601 NULL, NULL, ZIO_FLAG_CANFAIL);
602 for (i = start; i <= end; i++) {
603 uint64_t ibyte = i << shift;
604 err = dnode_next_offset(dn, 0, &ibyte, 2, 1, 0);
613 err = dmu_tx_check_ioerr(zio, dn, 1, i);
626 dmu_tx_count_dnode(txh);
627 dmu_tx_count_free(txh, off, len);
631 dmu_tx_hold_zap(dmu_tx_t *tx, uint64_t object, int add, const char *name)
638 ASSERT(tx->tx_txg == 0);
640 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
641 object, THT_ZAP, add, (uintptr_t)name);
646 dmu_tx_count_dnode(txh);
650 * We will be able to fit a new object's entries into one leaf
651 * block. So there will be at most 2 blocks total,
652 * including the header block.
654 dmu_tx_count_write(txh, 0, 2 << fzap_default_block_shift);
658 ASSERT3P(dmu_ot[dn->dn_type].ot_byteswap, ==, zap_byteswap);
660 if (dn->dn_maxblkid == 0 && !add) {
662 * If there is only one block (i.e. this is a micro-zap)
663 * and we are not adding anything, the accounting is simple.
665 err = dmu_tx_check_ioerr(NULL, dn, 0, 0);
672 * Use max block size here, since we don't know how much
673 * the size will change between now and the dbuf dirty call.
675 if (dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset,
676 dn->dn_phys->dn_blkptr[0].blk_birth)) {
677 txh->txh_space_tooverwrite += SPA_MAXBLOCKSIZE;
679 txh->txh_space_towrite += SPA_MAXBLOCKSIZE;
681 if (dn->dn_phys->dn_blkptr[0].blk_birth)
682 txh->txh_space_tounref += SPA_MAXBLOCKSIZE;
686 if (dn->dn_maxblkid > 0 && name) {
688 * access the name in this fat-zap so that we'll check
689 * for i/o errors to the leaf blocks, etc.
691 err = zap_lookup(&dn->dn_objset->os, dn->dn_object, name,
699 err = zap_count_write(&dn->dn_objset->os, dn->dn_object, name, add,
700 &txh->txh_space_towrite, &txh->txh_space_tooverwrite);
703 * If the modified blocks are scattered to the four winds,
704 * we'll have to modify an indirect twig for each.
706 epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
707 for (nblocks = dn->dn_maxblkid >> epbs; nblocks != 0; nblocks >>= epbs)
708 if (dn->dn_objset->os_dsl_dataset->ds_phys->ds_prev_snap_obj)
709 txh->txh_space_towrite += 3 << dn->dn_indblkshift;
711 txh->txh_space_tooverwrite += 3 << dn->dn_indblkshift;
715 dmu_tx_hold_bonus(dmu_tx_t *tx, uint64_t object)
719 ASSERT(tx->tx_txg == 0);
721 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
722 object, THT_BONUS, 0, 0);
724 dmu_tx_count_dnode(txh);
728 dmu_tx_hold_space(dmu_tx_t *tx, uint64_t space)
731 ASSERT(tx->tx_txg == 0);
733 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
734 DMU_NEW_OBJECT, THT_SPACE, space, 0);
736 txh->txh_space_towrite += space;
740 dmu_tx_holds(dmu_tx_t *tx, uint64_t object)
746 * By asserting that the tx is assigned, we're counting the
747 * number of dn_tx_holds, which is the same as the number of
748 * dn_holds. Otherwise, we'd be counting dn_holds, but
749 * dn_tx_holds could be 0.
751 ASSERT(tx->tx_txg != 0);
753 /* if (tx->tx_anyobj == TRUE) */
756 for (txh = list_head(&tx->tx_holds); txh;
757 txh = list_next(&tx->tx_holds, txh)) {
758 if (txh->txh_dnode && txh->txh_dnode->dn_object == object)
767 dmu_tx_dirty_buf(dmu_tx_t *tx, dmu_buf_impl_t *db)
770 int match_object = FALSE, match_offset = FALSE;
771 dnode_t *dn = db->db_dnode;
773 ASSERT(tx->tx_txg != 0);
774 ASSERT(tx->tx_objset == NULL || dn->dn_objset == tx->tx_objset->os);
775 ASSERT3U(dn->dn_object, ==, db->db.db_object);
780 /* XXX No checking on the meta dnode for now */
781 if (db->db.db_object == DMU_META_DNODE_OBJECT)
784 for (txh = list_head(&tx->tx_holds); txh;
785 txh = list_next(&tx->tx_holds, txh)) {
786 ASSERT(dn == NULL || dn->dn_assigned_txg == tx->tx_txg);
787 if (txh->txh_dnode == dn && txh->txh_type != THT_NEWOBJECT)
789 if (txh->txh_dnode == NULL || txh->txh_dnode == dn) {
790 int datablkshift = dn->dn_datablkshift ?
791 dn->dn_datablkshift : SPA_MAXBLOCKSHIFT;
792 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
793 int shift = datablkshift + epbs * db->db_level;
794 uint64_t beginblk = shift >= 64 ? 0 :
795 (txh->txh_arg1 >> shift);
796 uint64_t endblk = shift >= 64 ? 0 :
797 ((txh->txh_arg1 + txh->txh_arg2 - 1) >> shift);
798 uint64_t blkid = db->db_blkid;
800 /* XXX txh_arg2 better not be zero... */
802 dprintf("found txh type %x beginblk=%llx endblk=%llx\n",
803 txh->txh_type, beginblk, endblk);
805 switch (txh->txh_type) {
807 if (blkid >= beginblk && blkid <= endblk)
810 * We will let this hold work for the bonus
811 * buffer so that we don't need to hold it
812 * when creating a new object.
814 if (blkid == DB_BONUS_BLKID)
817 * They might have to increase nlevels,
818 * thus dirtying the new TLIBs. Or the
819 * might have to change the block size,
820 * thus dirying the new lvl=0 blk=0.
827 * We will dirty all the level 1 blocks in
828 * the free range and perhaps the first and
829 * last level 0 block.
831 if (blkid >= beginblk && (blkid <= endblk ||
832 txh->txh_arg2 == DMU_OBJECT_END))
836 if (blkid == DB_BONUS_BLKID)
846 ASSERT(!"bad txh_type");
849 if (match_object && match_offset)
852 panic("dirtying dbuf obj=%llx lvl=%u blkid=%llx but not tx_held\n",
853 (u_longlong_t)db->db.db_object, db->db_level,
854 (u_longlong_t)db->db_blkid);
859 dmu_tx_try_assign(dmu_tx_t *tx, uint64_t txg_how)
862 spa_t *spa = tx->tx_pool->dp_spa;
863 uint64_t memory, asize, fsize, usize;
864 uint64_t towrite, tofree, tooverwrite, tounref, tohold, fudge;
866 ASSERT3U(tx->tx_txg, ==, 0);
871 if (spa_suspended(spa)) {
873 * If the user has indicated a blocking failure mode
874 * then return ERESTART which will block in dmu_tx_wait().
875 * Otherwise, return EIO so that an error can get
876 * propagated back to the VOP calls.
878 * Note that we always honor the txg_how flag regardless
879 * of the failuremode setting.
881 if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_CONTINUE &&
888 tx->tx_txg = txg_hold_open(tx->tx_pool, &tx->tx_txgh);
889 tx->tx_needassign_txh = NULL;
892 * NB: No error returns are allowed after txg_hold_open, but
893 * before processing the dnode holds, due to the
894 * dmu_tx_unassign() logic.
897 towrite = tofree = tooverwrite = tounref = tohold = fudge = 0;
898 for (txh = list_head(&tx->tx_holds); txh;
899 txh = list_next(&tx->tx_holds, txh)) {
900 dnode_t *dn = txh->txh_dnode;
902 mutex_enter(&dn->dn_mtx);
903 if (dn->dn_assigned_txg == tx->tx_txg - 1) {
904 mutex_exit(&dn->dn_mtx);
905 tx->tx_needassign_txh = txh;
908 if (dn->dn_assigned_txg == 0)
909 dn->dn_assigned_txg = tx->tx_txg;
910 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
911 (void) refcount_add(&dn->dn_tx_holds, tx);
912 mutex_exit(&dn->dn_mtx);
914 towrite += txh->txh_space_towrite;
915 tofree += txh->txh_space_tofree;
916 tooverwrite += txh->txh_space_tooverwrite;
917 tounref += txh->txh_space_tounref;
918 tohold += txh->txh_memory_tohold;
919 fudge += txh->txh_fudge;
923 * NB: This check must be after we've held the dnodes, so that
924 * the dmu_tx_unassign() logic will work properly
926 if (txg_how >= TXG_INITIAL && txg_how != tx->tx_txg)
930 * If a snapshot has been taken since we made our estimates,
931 * assume that we won't be able to free or overwrite anything.
934 dsl_dataset_prev_snap_txg(tx->tx_objset->os->os_dsl_dataset) >
935 tx->tx_lastsnap_txg) {
936 towrite += tooverwrite;
937 tooverwrite = tofree = 0;
940 /* needed allocation: worst-case estimate of write space */
941 asize = spa_get_asize(tx->tx_pool->dp_spa, towrite + tooverwrite);
942 /* freed space estimate: worst-case overwrite + free estimate */
943 fsize = spa_get_asize(tx->tx_pool->dp_spa, tooverwrite) + tofree;
944 /* convert unrefd space to worst-case estimate */
945 usize = spa_get_asize(tx->tx_pool->dp_spa, tounref);
946 /* calculate memory footprint estimate */
947 memory = towrite + tooverwrite + tohold;
951 * Add in 'tohold' to account for our dirty holds on this memory
952 * XXX - the "fudge" factor is to account for skipped blocks that
953 * we missed because dnode_next_offset() misses in-core-only blocks.
955 tx->tx_space_towrite = asize +
956 spa_get_asize(tx->tx_pool->dp_spa, tohold + fudge);
957 tx->tx_space_tofree = tofree;
958 tx->tx_space_tooverwrite = tooverwrite;
959 tx->tx_space_tounref = tounref;
962 if (tx->tx_dir && asize != 0) {
963 int err = dsl_dir_tempreserve_space(tx->tx_dir, memory,
964 asize, fsize, usize, &tx->tx_tempreserve_cookie, tx);
973 dmu_tx_unassign(dmu_tx_t *tx)
980 txg_rele_to_quiesce(&tx->tx_txgh);
982 for (txh = list_head(&tx->tx_holds); txh != tx->tx_needassign_txh;
983 txh = list_next(&tx->tx_holds, txh)) {
984 dnode_t *dn = txh->txh_dnode;
988 mutex_enter(&dn->dn_mtx);
989 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
991 if (refcount_remove(&dn->dn_tx_holds, tx) == 0) {
992 dn->dn_assigned_txg = 0;
993 cv_broadcast(&dn->dn_notxholds);
995 mutex_exit(&dn->dn_mtx);
998 txg_rele_to_sync(&tx->tx_txgh);
1000 tx->tx_lasttried_txg = tx->tx_txg;
1005 * Assign tx to a transaction group. txg_how can be one of:
1007 * (1) TXG_WAIT. If the current open txg is full, waits until there's
1008 * a new one. This should be used when you're not holding locks.
1009 * If will only fail if we're truly out of space (or over quota).
1011 * (2) TXG_NOWAIT. If we can't assign into the current open txg without
1012 * blocking, returns immediately with ERESTART. This should be used
1013 * whenever you're holding locks. On an ERESTART error, the caller
1014 * should drop locks, do a dmu_tx_wait(tx), and try again.
1016 * (3) A specific txg. Use this if you need to ensure that multiple
1017 * transactions all sync in the same txg. Like TXG_NOWAIT, it
1018 * returns ERESTART if it can't assign you into the requested txg.
1021 dmu_tx_assign(dmu_tx_t *tx, uint64_t txg_how)
1025 ASSERT(tx->tx_txg == 0);
1026 ASSERT(txg_how != 0);
1027 ASSERT(!dsl_pool_sync_context(tx->tx_pool));
1029 while ((err = dmu_tx_try_assign(tx, txg_how)) != 0) {
1030 dmu_tx_unassign(tx);
1032 if (err != ERESTART || txg_how != TXG_WAIT)
1038 txg_rele_to_quiesce(&tx->tx_txgh);
1044 dmu_tx_wait(dmu_tx_t *tx)
1046 spa_t *spa = tx->tx_pool->dp_spa;
1048 ASSERT(tx->tx_txg == 0);
1051 * It's possible that the pool has become active after this thread
1052 * has tried to obtain a tx. If that's the case then his
1053 * tx_lasttried_txg would not have been assigned.
1055 if (spa_suspended(spa) || tx->tx_lasttried_txg == 0) {
1056 txg_wait_synced(tx->tx_pool, spa_last_synced_txg(spa) + 1);
1057 } else if (tx->tx_needassign_txh) {
1058 dnode_t *dn = tx->tx_needassign_txh->txh_dnode;
1060 mutex_enter(&dn->dn_mtx);
1061 while (dn->dn_assigned_txg == tx->tx_lasttried_txg - 1)
1062 cv_wait(&dn->dn_notxholds, &dn->dn_mtx);
1063 mutex_exit(&dn->dn_mtx);
1064 tx->tx_needassign_txh = NULL;
1066 txg_wait_open(tx->tx_pool, tx->tx_lasttried_txg + 1);
1071 dmu_tx_willuse_space(dmu_tx_t *tx, int64_t delta)
1074 if (tx->tx_dir == NULL || delta == 0)
1078 ASSERT3U(refcount_count(&tx->tx_space_written) + delta, <=,
1079 tx->tx_space_towrite);
1080 (void) refcount_add_many(&tx->tx_space_written, delta, NULL);
1082 (void) refcount_add_many(&tx->tx_space_freed, -delta, NULL);
1088 dmu_tx_commit(dmu_tx_t *tx)
1092 ASSERT(tx->tx_txg != 0);
1094 while (txh = list_head(&tx->tx_holds)) {
1095 dnode_t *dn = txh->txh_dnode;
1097 list_remove(&tx->tx_holds, txh);
1098 kmem_free(txh, sizeof (dmu_tx_hold_t));
1101 mutex_enter(&dn->dn_mtx);
1102 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
1104 if (refcount_remove(&dn->dn_tx_holds, tx) == 0) {
1105 dn->dn_assigned_txg = 0;
1106 cv_broadcast(&dn->dn_notxholds);
1108 mutex_exit(&dn->dn_mtx);
1112 if (tx->tx_tempreserve_cookie)
1113 dsl_dir_tempreserve_clear(tx->tx_tempreserve_cookie, tx);
1115 if (tx->tx_anyobj == FALSE)
1116 txg_rele_to_sync(&tx->tx_txgh);
1117 list_destroy(&tx->tx_holds);
1119 dprintf("towrite=%llu written=%llu tofree=%llu freed=%llu\n",
1120 tx->tx_space_towrite, refcount_count(&tx->tx_space_written),
1121 tx->tx_space_tofree, refcount_count(&tx->tx_space_freed));
1122 refcount_destroy_many(&tx->tx_space_written,
1123 refcount_count(&tx->tx_space_written));
1124 refcount_destroy_many(&tx->tx_space_freed,
1125 refcount_count(&tx->tx_space_freed));
1127 kmem_free(tx, sizeof (dmu_tx_t));
1131 dmu_tx_abort(dmu_tx_t *tx)
1135 ASSERT(tx->tx_txg == 0);
1137 while (txh = list_head(&tx->tx_holds)) {
1138 dnode_t *dn = txh->txh_dnode;
1140 list_remove(&tx->tx_holds, txh);
1141 kmem_free(txh, sizeof (dmu_tx_hold_t));
1145 list_destroy(&tx->tx_holds);
1147 refcount_destroy_many(&tx->tx_space_written,
1148 refcount_count(&tx->tx_space_written));
1149 refcount_destroy_many(&tx->tx_space_freed,
1150 refcount_count(&tx->tx_space_freed));
1152 kmem_free(tx, sizeof (dmu_tx_t));
1156 dmu_tx_get_txg(dmu_tx_t *tx)
1158 ASSERT(tx->tx_txg != 0);
1159 return (tx->tx_txg);