4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
26 #include <sys/dmu_impl.h>
28 #include <sys/dmu_tx.h>
29 #include <sys/dmu_objset.h>
30 #include <sys/dsl_dataset.h> /* for dsl_dataset_block_freeable() */
31 #include <sys/dsl_dir.h> /* for dsl_dir_tempreserve_*() */
32 #include <sys/dsl_pool.h>
33 #include <sys/zap_impl.h> /* for fzap_default_block_shift */
36 #include <sys/sa_impl.h>
37 #include <sys/zfs_context.h>
38 #include <sys/varargs.h>
40 typedef void (*dmu_tx_hold_func_t)(dmu_tx_t *tx, struct dnode *dn,
41 uint64_t arg1, uint64_t arg2);
43 dmu_tx_stats_t dmu_tx_stats = {
44 { "dmu_tx_assigned", KSTAT_DATA_UINT64 },
45 { "dmu_tx_delay", KSTAT_DATA_UINT64 },
46 { "dmu_tx_error", KSTAT_DATA_UINT64 },
47 { "dmu_tx_suspended", KSTAT_DATA_UINT64 },
48 { "dmu_tx_group", KSTAT_DATA_UINT64 },
49 { "dmu_tx_how", KSTAT_DATA_UINT64 },
50 { "dmu_tx_memory_reserve", KSTAT_DATA_UINT64 },
51 { "dmu_tx_memory_reclaim", KSTAT_DATA_UINT64 },
52 { "dmu_tx_memory_inflight", KSTAT_DATA_UINT64 },
53 { "dmu_tx_dirty_throttle", KSTAT_DATA_UINT64 },
54 { "dmu_tx_write_limit", KSTAT_DATA_UINT64 },
55 { "dmu_tx_quota", KSTAT_DATA_UINT64 },
58 static kstat_t *dmu_tx_ksp;
61 dmu_tx_create_dd(dsl_dir_t *dd)
63 dmu_tx_t *tx = kmem_zalloc(sizeof (dmu_tx_t), KM_SLEEP);
66 tx->tx_pool = dd->dd_pool;
67 list_create(&tx->tx_holds, sizeof (dmu_tx_hold_t),
68 offsetof(dmu_tx_hold_t, txh_node));
69 list_create(&tx->tx_callbacks, sizeof (dmu_tx_callback_t),
70 offsetof(dmu_tx_callback_t, dcb_node));
72 refcount_create(&tx->tx_space_written);
73 refcount_create(&tx->tx_space_freed);
79 dmu_tx_create(objset_t *os)
81 dmu_tx_t *tx = dmu_tx_create_dd(os->os_dsl_dataset->ds_dir);
83 tx->tx_lastsnap_txg = dsl_dataset_prev_snap_txg(os->os_dsl_dataset);
88 dmu_tx_create_assigned(struct dsl_pool *dp, uint64_t txg)
90 dmu_tx_t *tx = dmu_tx_create_dd(NULL);
92 ASSERT3U(txg, <=, dp->dp_tx.tx_open_txg);
101 dmu_tx_is_syncing(dmu_tx_t *tx)
103 return (tx->tx_anyobj);
107 dmu_tx_private_ok(dmu_tx_t *tx)
109 return (tx->tx_anyobj);
112 static dmu_tx_hold_t *
113 dmu_tx_hold_object_impl(dmu_tx_t *tx, objset_t *os, uint64_t object,
114 enum dmu_tx_hold_type type, uint64_t arg1, uint64_t arg2)
120 if (object != DMU_NEW_OBJECT) {
121 err = dnode_hold(os, object, tx, &dn);
127 if (err == 0 && tx->tx_txg != 0) {
128 mutex_enter(&dn->dn_mtx);
130 * dn->dn_assigned_txg == tx->tx_txg doesn't pose a
131 * problem, but there's no way for it to happen (for
134 ASSERT(dn->dn_assigned_txg == 0);
135 dn->dn_assigned_txg = tx->tx_txg;
136 (void) refcount_add(&dn->dn_tx_holds, tx);
137 mutex_exit(&dn->dn_mtx);
141 txh = kmem_zalloc(sizeof (dmu_tx_hold_t), KM_SLEEP);
145 txh->txh_type = type;
146 txh->txh_arg1 = arg1;
147 txh->txh_arg2 = arg2;
149 list_insert_tail(&tx->tx_holds, txh);
155 dmu_tx_add_new_object(dmu_tx_t *tx, objset_t *os, uint64_t object)
158 * If we're syncing, they can manipulate any object anyhow, and
159 * the hold on the dnode_t can cause problems.
161 if (!dmu_tx_is_syncing(tx)) {
162 (void) dmu_tx_hold_object_impl(tx, os,
163 object, THT_NEWOBJECT, 0, 0);
168 dmu_tx_check_ioerr(zio_t *zio, dnode_t *dn, int level, uint64_t blkid)
173 rw_enter(&dn->dn_struct_rwlock, RW_READER);
174 db = dbuf_hold_level(dn, level, blkid, FTAG);
175 rw_exit(&dn->dn_struct_rwlock);
178 err = dbuf_read(db, zio, DB_RF_CANFAIL | DB_RF_NOPREFETCH);
184 dmu_tx_count_twig(dmu_tx_hold_t *txh, dnode_t *dn, dmu_buf_impl_t *db,
185 int level, uint64_t blkid, boolean_t freeable, uint64_t *history)
187 objset_t *os = dn->dn_objset;
188 dsl_dataset_t *ds = os->os_dsl_dataset;
189 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
190 dmu_buf_impl_t *parent = NULL;
194 if (level >= dn->dn_nlevels || history[level] == blkid)
197 history[level] = blkid;
199 space = (level == 0) ? dn->dn_datablksz : (1ULL << dn->dn_indblkshift);
201 if (db == NULL || db == dn->dn_dbuf) {
205 ASSERT(DB_DNODE(db) == dn);
206 ASSERT(db->db_level == level);
207 ASSERT(db->db.db_size == space);
208 ASSERT(db->db_blkid == blkid);
210 parent = db->db_parent;
213 freeable = (bp && (freeable ||
214 dsl_dataset_block_freeable(ds, bp, bp->blk_birth)));
217 txh->txh_space_tooverwrite += space;
219 txh->txh_space_towrite += space;
221 txh->txh_space_tounref += bp_get_dsize(os->os_spa, bp);
223 dmu_tx_count_twig(txh, dn, parent, level + 1,
224 blkid >> epbs, freeable, history);
229 dmu_tx_count_write(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
231 dnode_t *dn = txh->txh_dnode;
232 uint64_t start, end, i;
233 int min_bs, max_bs, min_ibs, max_ibs, epbs, bits;
240 min_bs = SPA_MINBLOCKSHIFT;
241 max_bs = SPA_MAXBLOCKSHIFT;
242 min_ibs = DN_MIN_INDBLKSHIFT;
243 max_ibs = DN_MAX_INDBLKSHIFT;
246 uint64_t history[DN_MAX_LEVELS];
247 int nlvls = dn->dn_nlevels;
251 * For i/o error checking, read the first and last level-0
252 * blocks (if they are not aligned), and all the level-1 blocks.
254 if (dn->dn_maxblkid == 0) {
255 delta = dn->dn_datablksz;
256 start = (off < dn->dn_datablksz) ? 0 : 1;
257 end = (off+len <= dn->dn_datablksz) ? 0 : 1;
258 if (start == 0 && (off > 0 || len < dn->dn_datablksz)) {
259 err = dmu_tx_check_ioerr(NULL, dn, 0, 0);
265 zio_t *zio = zio_root(dn->dn_objset->os_spa,
266 NULL, NULL, ZIO_FLAG_CANFAIL);
268 /* first level-0 block */
269 start = off >> dn->dn_datablkshift;
270 if (P2PHASE(off, dn->dn_datablksz) ||
271 len < dn->dn_datablksz) {
272 err = dmu_tx_check_ioerr(zio, dn, 0, start);
277 /* last level-0 block */
278 end = (off+len-1) >> dn->dn_datablkshift;
279 if (end != start && end <= dn->dn_maxblkid &&
280 P2PHASE(off+len, dn->dn_datablksz)) {
281 err = dmu_tx_check_ioerr(zio, dn, 0, end);
288 int shft = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
289 for (i = (start>>shft)+1; i < end>>shft; i++) {
290 err = dmu_tx_check_ioerr(zio, dn, 1, i);
299 delta = P2NPHASE(off, dn->dn_datablksz);
302 if (dn->dn_maxblkid > 0) {
304 * The blocksize can't change,
305 * so we can make a more precise estimate.
307 ASSERT(dn->dn_datablkshift != 0);
308 min_bs = max_bs = dn->dn_datablkshift;
309 min_ibs = max_ibs = dn->dn_indblkshift;
310 } else if (dn->dn_indblkshift > max_ibs) {
312 * This ensures that if we reduce DN_MAX_INDBLKSHIFT,
313 * the code will still work correctly on older pools.
315 min_ibs = max_ibs = dn->dn_indblkshift;
319 * If this write is not off the end of the file
320 * we need to account for overwrites/unref.
322 if (start <= dn->dn_maxblkid) {
323 for (l = 0; l < DN_MAX_LEVELS; l++)
326 while (start <= dn->dn_maxblkid) {
329 rw_enter(&dn->dn_struct_rwlock, RW_READER);
330 err = dbuf_hold_impl(dn, 0, start, FALSE, FTAG, &db);
331 rw_exit(&dn->dn_struct_rwlock);
334 txh->txh_tx->tx_err = err;
338 dmu_tx_count_twig(txh, dn, db, 0, start, B_FALSE,
343 * Account for new indirects appearing
344 * before this IO gets assigned into a txg.
347 epbs = min_ibs - SPA_BLKPTRSHIFT;
348 for (bits -= epbs * (nlvls - 1);
349 bits >= 0; bits -= epbs)
350 txh->txh_fudge += 1ULL << max_ibs;
356 delta = dn->dn_datablksz;
361 * 'end' is the last thing we will access, not one past.
362 * This way we won't overflow when accessing the last byte.
364 start = P2ALIGN(off, 1ULL << max_bs);
365 end = P2ROUNDUP(off + len, 1ULL << max_bs) - 1;
366 txh->txh_space_towrite += end - start + 1;
371 epbs = min_ibs - SPA_BLKPTRSHIFT;
374 * The object contains at most 2^(64 - min_bs) blocks,
375 * and each indirect level maps 2^epbs.
377 for (bits = 64 - min_bs; bits >= 0; bits -= epbs) {
380 ASSERT3U(end, >=, start);
381 txh->txh_space_towrite += (end - start + 1) << max_ibs;
384 * We also need a new blkid=0 indirect block
385 * to reference any existing file data.
387 txh->txh_space_towrite += 1ULL << max_ibs;
392 if (txh->txh_space_towrite + txh->txh_space_tooverwrite >
397 txh->txh_tx->tx_err = err;
401 dmu_tx_count_dnode(dmu_tx_hold_t *txh)
403 dnode_t *dn = txh->txh_dnode;
404 dnode_t *mdn = DMU_META_DNODE(txh->txh_tx->tx_objset);
405 uint64_t space = mdn->dn_datablksz +
406 ((mdn->dn_nlevels-1) << mdn->dn_indblkshift);
408 if (dn && dn->dn_dbuf->db_blkptr &&
409 dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset,
410 dn->dn_dbuf->db_blkptr, dn->dn_dbuf->db_blkptr->blk_birth)) {
411 txh->txh_space_tooverwrite += space;
412 txh->txh_space_tounref += space;
414 txh->txh_space_towrite += space;
415 if (dn && dn->dn_dbuf->db_blkptr)
416 txh->txh_space_tounref += space;
421 dmu_tx_hold_write(dmu_tx_t *tx, uint64_t object, uint64_t off, int len)
425 ASSERT(tx->tx_txg == 0);
426 ASSERT(len < DMU_MAX_ACCESS);
427 ASSERT(len == 0 || UINT64_MAX - off >= len - 1);
429 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
430 object, THT_WRITE, off, len);
434 dmu_tx_count_write(txh, off, len);
435 dmu_tx_count_dnode(txh);
439 dmu_tx_count_free(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
441 uint64_t blkid, nblks, lastblk;
442 uint64_t space = 0, unref = 0, skipped = 0;
443 dnode_t *dn = txh->txh_dnode;
444 dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
445 spa_t *spa = txh->txh_tx->tx_pool->dp_spa;
448 if (dn->dn_nlevels == 0)
452 * The struct_rwlock protects us against dn_nlevels
453 * changing, in case (against all odds) we manage to dirty &
454 * sync out the changes after we check for being dirty.
455 * Also, dbuf_hold_impl() wants us to have the struct_rwlock.
457 rw_enter(&dn->dn_struct_rwlock, RW_READER);
458 epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
459 if (dn->dn_maxblkid == 0) {
460 if (off == 0 && len >= dn->dn_datablksz) {
464 rw_exit(&dn->dn_struct_rwlock);
468 blkid = off >> dn->dn_datablkshift;
469 nblks = (len + dn->dn_datablksz - 1) >> dn->dn_datablkshift;
471 if (blkid >= dn->dn_maxblkid) {
472 rw_exit(&dn->dn_struct_rwlock);
475 if (blkid + nblks > dn->dn_maxblkid)
476 nblks = dn->dn_maxblkid - blkid;
479 if (dn->dn_nlevels == 1) {
481 for (i = 0; i < nblks; i++) {
482 blkptr_t *bp = dn->dn_phys->dn_blkptr;
483 ASSERT3U(blkid + i, <, dn->dn_nblkptr);
485 if (dsl_dataset_block_freeable(ds, bp, bp->blk_birth)) {
486 dprintf_bp(bp, "can free old%s", "");
487 space += bp_get_dsize(spa, bp);
489 unref += BP_GET_ASIZE(bp);
495 * Add in memory requirements of higher-level indirects.
496 * This assumes a worst-possible scenario for dn_nlevels.
499 uint64_t blkcnt = 1 + ((nblks >> epbs) >> epbs);
500 int level = (dn->dn_nlevels > 1) ? 2 : 1;
502 while (level++ < DN_MAX_LEVELS) {
503 txh->txh_memory_tohold += blkcnt << dn->dn_indblkshift;
504 blkcnt = 1 + (blkcnt >> epbs);
506 ASSERT(blkcnt <= dn->dn_nblkptr);
509 lastblk = blkid + nblks - 1;
511 dmu_buf_impl_t *dbuf;
512 uint64_t ibyte, new_blkid;
514 int err, i, blkoff, tochk;
517 ibyte = blkid << dn->dn_datablkshift;
518 err = dnode_next_offset(dn,
519 DNODE_FIND_HAVELOCK, &ibyte, 2, 1, 0);
520 new_blkid = ibyte >> dn->dn_datablkshift;
522 skipped += (lastblk >> epbs) - (blkid >> epbs) + 1;
526 txh->txh_tx->tx_err = err;
529 if (new_blkid > lastblk) {
530 skipped += (lastblk >> epbs) - (blkid >> epbs) + 1;
534 if (new_blkid > blkid) {
535 ASSERT((new_blkid >> epbs) > (blkid >> epbs));
536 skipped += (new_blkid >> epbs) - (blkid >> epbs) - 1;
537 nblks -= new_blkid - blkid;
540 blkoff = P2PHASE(blkid, epb);
541 tochk = MIN(epb - blkoff, nblks);
543 err = dbuf_hold_impl(dn, 1, blkid >> epbs, FALSE, FTAG, &dbuf);
545 txh->txh_tx->tx_err = err;
549 txh->txh_memory_tohold += dbuf->db.db_size;
552 * We don't check memory_tohold against DMU_MAX_ACCESS because
553 * memory_tohold is an over-estimation (especially the >L1
554 * indirect blocks), so it could fail. Callers should have
555 * already verified that they will not be holding too much
559 err = dbuf_read(dbuf, NULL, DB_RF_HAVESTRUCT | DB_RF_CANFAIL);
561 txh->txh_tx->tx_err = err;
562 dbuf_rele(dbuf, FTAG);
566 bp = dbuf->db.db_data;
569 for (i = 0; i < tochk; i++) {
570 if (dsl_dataset_block_freeable(ds, &bp[i],
572 dprintf_bp(&bp[i], "can free old%s", "");
573 space += bp_get_dsize(spa, &bp[i]);
575 unref += BP_GET_ASIZE(bp);
577 dbuf_rele(dbuf, FTAG);
582 rw_exit(&dn->dn_struct_rwlock);
584 /* account for new level 1 indirect blocks that might show up */
586 txh->txh_fudge += skipped << dn->dn_indblkshift;
587 skipped = MIN(skipped, DMU_MAX_DELETEBLKCNT >> epbs);
588 txh->txh_memory_tohold += skipped << dn->dn_indblkshift;
590 txh->txh_space_tofree += space;
591 txh->txh_space_tounref += unref;
595 dmu_tx_hold_free(dmu_tx_t *tx, uint64_t object, uint64_t off, uint64_t len)
599 uint64_t start, end, i;
603 ASSERT(tx->tx_txg == 0);
605 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
606 object, THT_FREE, off, len);
613 dmu_tx_count_write(txh, off, 1);
615 if (len != DMU_OBJECT_END)
616 dmu_tx_count_write(txh, off+len, 1);
618 dmu_tx_count_dnode(txh);
620 if (off >= (dn->dn_maxblkid+1) * dn->dn_datablksz)
622 if (len == DMU_OBJECT_END)
623 len = (dn->dn_maxblkid+1) * dn->dn_datablksz - off;
626 * For i/o error checking, read the first and last level-0
627 * blocks, and all the level-1 blocks. The above count_write's
628 * have already taken care of the level-0 blocks.
630 if (dn->dn_nlevels > 1) {
631 shift = dn->dn_datablkshift + dn->dn_indblkshift -
633 start = off >> shift;
634 end = dn->dn_datablkshift ? ((off+len) >> shift) : 0;
636 zio = zio_root(tx->tx_pool->dp_spa,
637 NULL, NULL, ZIO_FLAG_CANFAIL);
638 for (i = start; i <= end; i++) {
639 uint64_t ibyte = i << shift;
640 err = dnode_next_offset(dn, 0, &ibyte, 2, 1, 0);
649 err = dmu_tx_check_ioerr(zio, dn, 1, i);
662 dmu_tx_count_free(txh, off, len);
666 dmu_tx_hold_zap(dmu_tx_t *tx, uint64_t object, int add, const char *name)
673 ASSERT(tx->tx_txg == 0);
675 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
676 object, THT_ZAP, add, (uintptr_t)name);
681 dmu_tx_count_dnode(txh);
685 * We will be able to fit a new object's entries into one leaf
686 * block. So there will be at most 2 blocks total,
687 * including the header block.
689 dmu_tx_count_write(txh, 0, 2 << fzap_default_block_shift);
693 ASSERT3P(dmu_ot[dn->dn_type].ot_byteswap, ==, zap_byteswap);
695 if (dn->dn_maxblkid == 0 && !add) {
697 * If there is only one block (i.e. this is a micro-zap)
698 * and we are not adding anything, the accounting is simple.
700 err = dmu_tx_check_ioerr(NULL, dn, 0, 0);
707 * Use max block size here, since we don't know how much
708 * the size will change between now and the dbuf dirty call.
710 if (dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset,
711 &dn->dn_phys->dn_blkptr[0],
712 dn->dn_phys->dn_blkptr[0].blk_birth)) {
713 txh->txh_space_tooverwrite += SPA_MAXBLOCKSIZE;
715 txh->txh_space_towrite += SPA_MAXBLOCKSIZE;
717 if (dn->dn_phys->dn_blkptr[0].blk_birth)
718 txh->txh_space_tounref += SPA_MAXBLOCKSIZE;
722 if (dn->dn_maxblkid > 0 && name) {
724 * access the name in this fat-zap so that we'll check
725 * for i/o errors to the leaf blocks, etc.
727 err = zap_lookup(dn->dn_objset, dn->dn_object, name,
735 err = zap_count_write(dn->dn_objset, dn->dn_object, name, add,
736 &txh->txh_space_towrite, &txh->txh_space_tooverwrite);
739 * If the modified blocks are scattered to the four winds,
740 * we'll have to modify an indirect twig for each.
742 epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
743 for (nblocks = dn->dn_maxblkid >> epbs; nblocks != 0; nblocks >>= epbs)
744 if (dn->dn_objset->os_dsl_dataset->ds_phys->ds_prev_snap_obj)
745 txh->txh_space_towrite += 3 << dn->dn_indblkshift;
747 txh->txh_space_tooverwrite += 3 << dn->dn_indblkshift;
751 dmu_tx_hold_bonus(dmu_tx_t *tx, uint64_t object)
755 ASSERT(tx->tx_txg == 0);
757 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
758 object, THT_BONUS, 0, 0);
760 dmu_tx_count_dnode(txh);
764 dmu_tx_hold_space(dmu_tx_t *tx, uint64_t space)
767 ASSERT(tx->tx_txg == 0);
769 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
770 DMU_NEW_OBJECT, THT_SPACE, space, 0);
772 txh->txh_space_towrite += space;
776 dmu_tx_holds(dmu_tx_t *tx, uint64_t object)
782 * By asserting that the tx is assigned, we're counting the
783 * number of dn_tx_holds, which is the same as the number of
784 * dn_holds. Otherwise, we'd be counting dn_holds, but
785 * dn_tx_holds could be 0.
787 ASSERT(tx->tx_txg != 0);
789 /* if (tx->tx_anyobj == TRUE) */
792 for (txh = list_head(&tx->tx_holds); txh;
793 txh = list_next(&tx->tx_holds, txh)) {
794 if (txh->txh_dnode && txh->txh_dnode->dn_object == object)
803 dmu_tx_dirty_buf(dmu_tx_t *tx, dmu_buf_impl_t *db)
806 int match_object = FALSE, match_offset = FALSE;
811 ASSERT(tx->tx_txg != 0);
812 ASSERT(tx->tx_objset == NULL || dn->dn_objset == tx->tx_objset);
813 ASSERT3U(dn->dn_object, ==, db->db.db_object);
820 /* XXX No checking on the meta dnode for now */
821 if (db->db.db_object == DMU_META_DNODE_OBJECT) {
826 for (txh = list_head(&tx->tx_holds); txh;
827 txh = list_next(&tx->tx_holds, txh)) {
828 ASSERT(dn == NULL || dn->dn_assigned_txg == tx->tx_txg);
829 if (txh->txh_dnode == dn && txh->txh_type != THT_NEWOBJECT)
831 if (txh->txh_dnode == NULL || txh->txh_dnode == dn) {
832 int datablkshift = dn->dn_datablkshift ?
833 dn->dn_datablkshift : SPA_MAXBLOCKSHIFT;
834 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
835 int shift = datablkshift + epbs * db->db_level;
836 uint64_t beginblk = shift >= 64 ? 0 :
837 (txh->txh_arg1 >> shift);
838 uint64_t endblk = shift >= 64 ? 0 :
839 ((txh->txh_arg1 + txh->txh_arg2 - 1) >> shift);
840 uint64_t blkid = db->db_blkid;
842 /* XXX txh_arg2 better not be zero... */
844 dprintf("found txh type %x beginblk=%llx endblk=%llx\n",
845 txh->txh_type, beginblk, endblk);
847 switch (txh->txh_type) {
849 if (blkid >= beginblk && blkid <= endblk)
852 * We will let this hold work for the bonus
853 * or spill buffer so that we don't need to
854 * hold it when creating a new object.
856 if (blkid == DMU_BONUS_BLKID ||
857 blkid == DMU_SPILL_BLKID)
860 * They might have to increase nlevels,
861 * thus dirtying the new TLIBs. Or the
862 * might have to change the block size,
863 * thus dirying the new lvl=0 blk=0.
870 * We will dirty all the level 1 blocks in
871 * the free range and perhaps the first and
872 * last level 0 block.
874 if (blkid >= beginblk && (blkid <= endblk ||
875 txh->txh_arg2 == DMU_OBJECT_END))
879 if (blkid == DMU_SPILL_BLKID)
883 if (blkid == DMU_BONUS_BLKID)
893 ASSERT(!"bad txh_type");
896 if (match_object && match_offset) {
902 panic("dirtying dbuf obj=%llx lvl=%u blkid=%llx but not tx_held\n",
903 (u_longlong_t)db->db.db_object, db->db_level,
904 (u_longlong_t)db->db_blkid);
909 dmu_tx_try_assign(dmu_tx_t *tx, uint64_t txg_how)
912 spa_t *spa = tx->tx_pool->dp_spa;
913 uint64_t memory, asize, fsize, usize;
914 uint64_t towrite, tofree, tooverwrite, tounref, tohold, fudge;
916 ASSERT3U(tx->tx_txg, ==, 0);
919 DMU_TX_STAT_BUMP(dmu_tx_error);
923 if (spa_suspended(spa)) {
924 DMU_TX_STAT_BUMP(dmu_tx_suspended);
927 * If the user has indicated a blocking failure mode
928 * then return ERESTART which will block in dmu_tx_wait().
929 * Otherwise, return EIO so that an error can get
930 * propagated back to the VOP calls.
932 * Note that we always honor the txg_how flag regardless
933 * of the failuremode setting.
935 if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_CONTINUE &&
942 tx->tx_txg = txg_hold_open(tx->tx_pool, &tx->tx_txgh);
943 tx->tx_needassign_txh = NULL;
946 * NB: No error returns are allowed after txg_hold_open, but
947 * before processing the dnode holds, due to the
948 * dmu_tx_unassign() logic.
951 towrite = tofree = tooverwrite = tounref = tohold = fudge = 0;
952 for (txh = list_head(&tx->tx_holds); txh;
953 txh = list_next(&tx->tx_holds, txh)) {
954 dnode_t *dn = txh->txh_dnode;
956 mutex_enter(&dn->dn_mtx);
957 if (dn->dn_assigned_txg == tx->tx_txg - 1) {
958 mutex_exit(&dn->dn_mtx);
959 tx->tx_needassign_txh = txh;
960 DMU_TX_STAT_BUMP(dmu_tx_group);
963 if (dn->dn_assigned_txg == 0)
964 dn->dn_assigned_txg = tx->tx_txg;
965 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
966 (void) refcount_add(&dn->dn_tx_holds, tx);
967 mutex_exit(&dn->dn_mtx);
969 towrite += txh->txh_space_towrite;
970 tofree += txh->txh_space_tofree;
971 tooverwrite += txh->txh_space_tooverwrite;
972 tounref += txh->txh_space_tounref;
973 tohold += txh->txh_memory_tohold;
974 fudge += txh->txh_fudge;
978 * NB: This check must be after we've held the dnodes, so that
979 * the dmu_tx_unassign() logic will work properly
981 if (txg_how >= TXG_INITIAL && txg_how != tx->tx_txg) {
982 DMU_TX_STAT_BUMP(dmu_tx_how);
987 * If a snapshot has been taken since we made our estimates,
988 * assume that we won't be able to free or overwrite anything.
991 dsl_dataset_prev_snap_txg(tx->tx_objset->os_dsl_dataset) >
992 tx->tx_lastsnap_txg) {
993 towrite += tooverwrite;
994 tooverwrite = tofree = 0;
997 /* needed allocation: worst-case estimate of write space */
998 asize = spa_get_asize(tx->tx_pool->dp_spa, towrite + tooverwrite);
999 /* freed space estimate: worst-case overwrite + free estimate */
1000 fsize = spa_get_asize(tx->tx_pool->dp_spa, tooverwrite) + tofree;
1001 /* convert unrefd space to worst-case estimate */
1002 usize = spa_get_asize(tx->tx_pool->dp_spa, tounref);
1003 /* calculate memory footprint estimate */
1004 memory = towrite + tooverwrite + tohold;
1008 * Add in 'tohold' to account for our dirty holds on this memory
1009 * XXX - the "fudge" factor is to account for skipped blocks that
1010 * we missed because dnode_next_offset() misses in-core-only blocks.
1012 tx->tx_space_towrite = asize +
1013 spa_get_asize(tx->tx_pool->dp_spa, tohold + fudge);
1014 tx->tx_space_tofree = tofree;
1015 tx->tx_space_tooverwrite = tooverwrite;
1016 tx->tx_space_tounref = tounref;
1019 if (tx->tx_dir && asize != 0) {
1020 int err = dsl_dir_tempreserve_space(tx->tx_dir, memory,
1021 asize, fsize, usize, &tx->tx_tempreserve_cookie, tx);
1026 DMU_TX_STAT_BUMP(dmu_tx_assigned);
1032 dmu_tx_unassign(dmu_tx_t *tx)
1036 if (tx->tx_txg == 0)
1039 txg_rele_to_quiesce(&tx->tx_txgh);
1041 for (txh = list_head(&tx->tx_holds); txh != tx->tx_needassign_txh;
1042 txh = list_next(&tx->tx_holds, txh)) {
1043 dnode_t *dn = txh->txh_dnode;
1047 mutex_enter(&dn->dn_mtx);
1048 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
1050 if (refcount_remove(&dn->dn_tx_holds, tx) == 0) {
1051 dn->dn_assigned_txg = 0;
1052 cv_broadcast(&dn->dn_notxholds);
1054 mutex_exit(&dn->dn_mtx);
1057 txg_rele_to_sync(&tx->tx_txgh);
1059 tx->tx_lasttried_txg = tx->tx_txg;
1064 * Assign tx to a transaction group. txg_how can be one of:
1066 * (1) TXG_WAIT. If the current open txg is full, waits until there's
1067 * a new one. This should be used when you're not holding locks.
1068 * If will only fail if we're truly out of space (or over quota).
1070 * (2) TXG_NOWAIT. If we can't assign into the current open txg without
1071 * blocking, returns immediately with ERESTART. This should be used
1072 * whenever you're holding locks. On an ERESTART error, the caller
1073 * should drop locks, do a dmu_tx_wait(tx), and try again.
1075 * (3) A specific txg. Use this if you need to ensure that multiple
1076 * transactions all sync in the same txg. Like TXG_NOWAIT, it
1077 * returns ERESTART if it can't assign you into the requested txg.
1080 dmu_tx_assign(dmu_tx_t *tx, uint64_t txg_how)
1084 ASSERT(tx->tx_txg == 0);
1085 ASSERT(txg_how != 0);
1086 ASSERT(!dsl_pool_sync_context(tx->tx_pool));
1088 while ((err = dmu_tx_try_assign(tx, txg_how)) != 0) {
1089 dmu_tx_unassign(tx);
1091 if (err != ERESTART || txg_how != TXG_WAIT)
1097 txg_rele_to_quiesce(&tx->tx_txgh);
1103 dmu_tx_wait(dmu_tx_t *tx)
1105 spa_t *spa = tx->tx_pool->dp_spa;
1107 ASSERT(tx->tx_txg == 0);
1110 * It's possible that the pool has become active after this thread
1111 * has tried to obtain a tx. If that's the case then his
1112 * tx_lasttried_txg would not have been assigned.
1114 if (spa_suspended(spa) || tx->tx_lasttried_txg == 0) {
1115 txg_wait_synced(tx->tx_pool, spa_last_synced_txg(spa) + 1);
1116 } else if (tx->tx_needassign_txh) {
1117 dnode_t *dn = tx->tx_needassign_txh->txh_dnode;
1119 mutex_enter(&dn->dn_mtx);
1120 while (dn->dn_assigned_txg == tx->tx_lasttried_txg - 1)
1121 cv_wait(&dn->dn_notxholds, &dn->dn_mtx);
1122 mutex_exit(&dn->dn_mtx);
1123 tx->tx_needassign_txh = NULL;
1125 txg_wait_open(tx->tx_pool, tx->tx_lasttried_txg + 1);
1130 dmu_tx_willuse_space(dmu_tx_t *tx, int64_t delta)
1133 if (tx->tx_dir == NULL || delta == 0)
1137 ASSERT3U(refcount_count(&tx->tx_space_written) + delta, <=,
1138 tx->tx_space_towrite);
1139 (void) refcount_add_many(&tx->tx_space_written, delta, NULL);
1141 (void) refcount_add_many(&tx->tx_space_freed, -delta, NULL);
1147 dmu_tx_commit(dmu_tx_t *tx)
1151 ASSERT(tx->tx_txg != 0);
1153 while ((txh = list_head(&tx->tx_holds))) {
1154 dnode_t *dn = txh->txh_dnode;
1156 list_remove(&tx->tx_holds, txh);
1157 kmem_free(txh, sizeof (dmu_tx_hold_t));
1160 mutex_enter(&dn->dn_mtx);
1161 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
1163 if (refcount_remove(&dn->dn_tx_holds, tx) == 0) {
1164 dn->dn_assigned_txg = 0;
1165 cv_broadcast(&dn->dn_notxholds);
1167 mutex_exit(&dn->dn_mtx);
1171 if (tx->tx_tempreserve_cookie)
1172 dsl_dir_tempreserve_clear(tx->tx_tempreserve_cookie, tx);
1174 if (!list_is_empty(&tx->tx_callbacks))
1175 txg_register_callbacks(&tx->tx_txgh, &tx->tx_callbacks);
1177 if (tx->tx_anyobj == FALSE)
1178 txg_rele_to_sync(&tx->tx_txgh);
1180 list_destroy(&tx->tx_callbacks);
1181 list_destroy(&tx->tx_holds);
1183 dprintf("towrite=%llu written=%llu tofree=%llu freed=%llu\n",
1184 tx->tx_space_towrite, refcount_count(&tx->tx_space_written),
1185 tx->tx_space_tofree, refcount_count(&tx->tx_space_freed));
1186 refcount_destroy_many(&tx->tx_space_written,
1187 refcount_count(&tx->tx_space_written));
1188 refcount_destroy_many(&tx->tx_space_freed,
1189 refcount_count(&tx->tx_space_freed));
1191 kmem_free(tx, sizeof (dmu_tx_t));
1195 dmu_tx_abort(dmu_tx_t *tx)
1199 ASSERT(tx->tx_txg == 0);
1201 while ((txh = list_head(&tx->tx_holds))) {
1202 dnode_t *dn = txh->txh_dnode;
1204 list_remove(&tx->tx_holds, txh);
1205 kmem_free(txh, sizeof (dmu_tx_hold_t));
1211 * Call any registered callbacks with an error code.
1213 if (!list_is_empty(&tx->tx_callbacks))
1214 dmu_tx_do_callbacks(&tx->tx_callbacks, ECANCELED);
1216 list_destroy(&tx->tx_callbacks);
1217 list_destroy(&tx->tx_holds);
1219 refcount_destroy_many(&tx->tx_space_written,
1220 refcount_count(&tx->tx_space_written));
1221 refcount_destroy_many(&tx->tx_space_freed,
1222 refcount_count(&tx->tx_space_freed));
1224 kmem_free(tx, sizeof (dmu_tx_t));
1228 dmu_tx_get_txg(dmu_tx_t *tx)
1230 ASSERT(tx->tx_txg != 0);
1231 return (tx->tx_txg);
1235 dmu_tx_callback_register(dmu_tx_t *tx, dmu_tx_callback_func_t *func, void *data)
1237 dmu_tx_callback_t *dcb;
1239 dcb = kmem_alloc(sizeof (dmu_tx_callback_t), KM_SLEEP);
1241 dcb->dcb_func = func;
1242 dcb->dcb_data = data;
1244 list_insert_tail(&tx->tx_callbacks, dcb);
1248 * Call all the commit callbacks on a list, with a given error code.
1251 dmu_tx_do_callbacks(list_t *cb_list, int error)
1253 dmu_tx_callback_t *dcb;
1255 while ((dcb = list_head(cb_list))) {
1256 list_remove(cb_list, dcb);
1257 dcb->dcb_func(dcb->dcb_data, error);
1258 kmem_free(dcb, sizeof (dmu_tx_callback_t));
1263 * Interface to hold a bunch of attributes.
1264 * used for creating new files.
1265 * attrsize is the total size of all attributes
1266 * to be added during object creation
1268 * For updating/adding a single attribute dmu_tx_hold_sa() should be used.
1272 * hold necessary attribute name for attribute registration.
1273 * should be a very rare case where this is needed. If it does
1274 * happen it would only happen on the first write to the file system.
1277 dmu_tx_sa_registration_hold(sa_os_t *sa, dmu_tx_t *tx)
1281 if (!sa->sa_need_attr_registration)
1284 for (i = 0; i != sa->sa_num_attrs; i++) {
1285 if (!sa->sa_attr_table[i].sa_registered) {
1286 if (sa->sa_reg_attr_obj)
1287 dmu_tx_hold_zap(tx, sa->sa_reg_attr_obj,
1288 B_TRUE, sa->sa_attr_table[i].sa_name);
1290 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT,
1291 B_TRUE, sa->sa_attr_table[i].sa_name);
1298 dmu_tx_hold_spill(dmu_tx_t *tx, uint64_t object)
1304 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, object,
1307 dn = txh->txh_dnode;
1312 /* If blkptr doesn't exist then add space to towrite */
1313 bp = &dn->dn_phys->dn_spill;
1314 if (BP_IS_HOLE(bp)) {
1315 txh->txh_space_towrite += SPA_MAXBLOCKSIZE;
1316 txh->txh_space_tounref = 0;
1318 if (dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset,
1320 txh->txh_space_tooverwrite += SPA_MAXBLOCKSIZE;
1322 txh->txh_space_towrite += SPA_MAXBLOCKSIZE;
1324 txh->txh_space_tounref += SPA_MAXBLOCKSIZE;
1329 dmu_tx_hold_sa_create(dmu_tx_t *tx, int attrsize)
1331 sa_os_t *sa = tx->tx_objset->os_sa;
1333 dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
1335 if (tx->tx_objset->os_sa->sa_master_obj == 0)
1338 if (tx->tx_objset->os_sa->sa_layout_attr_obj)
1339 dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL);
1341 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS);
1342 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY);
1343 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1344 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1347 dmu_tx_sa_registration_hold(sa, tx);
1349 if (attrsize <= DN_MAX_BONUSLEN && !sa->sa_force_spill)
1352 (void) dmu_tx_hold_object_impl(tx, tx->tx_objset, DMU_NEW_OBJECT,
1359 * dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *, attribute, add, size)
1361 * variable_size is the total size of all variable sized attributes
1362 * passed to this function. It is not the total size of all
1363 * variable size attributes that *may* exist on this object.
1366 dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *hdl, boolean_t may_grow)
1369 sa_os_t *sa = tx->tx_objset->os_sa;
1371 ASSERT(hdl != NULL);
1373 object = sa_handle_object(hdl);
1375 dmu_tx_hold_bonus(tx, object);
1377 if (tx->tx_objset->os_sa->sa_master_obj == 0)
1380 if (tx->tx_objset->os_sa->sa_reg_attr_obj == 0 ||
1381 tx->tx_objset->os_sa->sa_layout_attr_obj == 0) {
1382 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS);
1383 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY);
1384 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1385 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1388 dmu_tx_sa_registration_hold(sa, tx);
1390 if (may_grow && tx->tx_objset->os_sa->sa_layout_attr_obj)
1391 dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL);
1393 if (sa->sa_force_spill || may_grow || hdl->sa_spill) {
1394 ASSERT(tx->tx_txg == 0);
1395 dmu_tx_hold_spill(tx, object);
1397 dmu_buf_impl_t *db = (dmu_buf_impl_t *)hdl->sa_bonus;
1402 if (dn->dn_have_spill) {
1403 ASSERT(tx->tx_txg == 0);
1404 dmu_tx_hold_spill(tx, object);
1413 dmu_tx_ksp = kstat_create("zfs", 0, "dmu_tx", "misc",
1414 KSTAT_TYPE_NAMED, sizeof (dmu_tx_stats) / sizeof (kstat_named_t),
1415 KSTAT_FLAG_VIRTUAL);
1417 if (dmu_tx_ksp != NULL) {
1418 dmu_tx_ksp->ks_data = &dmu_tx_stats;
1419 kstat_install(dmu_tx_ksp);
1426 if (dmu_tx_ksp != NULL) {
1427 kstat_delete(dmu_tx_ksp);
1432 #if defined(_KERNEL) && defined(HAVE_SPL)
1433 EXPORT_SYMBOL(dmu_tx_create);
1434 EXPORT_SYMBOL(dmu_tx_hold_write);
1435 EXPORT_SYMBOL(dmu_tx_hold_free);
1436 EXPORT_SYMBOL(dmu_tx_hold_zap);
1437 EXPORT_SYMBOL(dmu_tx_hold_bonus);
1438 EXPORT_SYMBOL(dmu_tx_abort);
1439 EXPORT_SYMBOL(dmu_tx_assign);
1440 EXPORT_SYMBOL(dmu_tx_wait);
1441 EXPORT_SYMBOL(dmu_tx_commit);
1442 EXPORT_SYMBOL(dmu_tx_get_txg);
1443 EXPORT_SYMBOL(dmu_tx_callback_register);
1444 EXPORT_SYMBOL(dmu_tx_do_callbacks);
1445 EXPORT_SYMBOL(dmu_tx_hold_spill);
1446 EXPORT_SYMBOL(dmu_tx_hold_sa_create);
1447 EXPORT_SYMBOL(dmu_tx_hold_sa);