X-Git-Url: https://git.camperquake.de/gitweb.cgi?a=blobdiff_plain;f=module%2Fzfs%2Fdmu_tx.c;h=47ec4c1091e8c467373fb837f06de978fbd10442;hb=4cec9b2dc79c8132ef9093921a0c14529cde775f;hp=5f333ab96c67232e5d3ab495d69099d2923da9b5;hpb=99ea23c583b272470a21e0ac7caa1f485f6b1125;p=zfs.git diff --git a/module/zfs/dmu_tx.c b/module/zfs/dmu_tx.c index 5f333ab..47ec4c1 100644 --- a/module/zfs/dmu_tx.c +++ b/module/zfs/dmu_tx.c @@ -20,6 +20,8 @@ */ /* * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright 2011 Nexenta Systems, Inc. All rights reserved. + * Copyright (c) 2012 by Delphix. All rights reserved. */ #include @@ -60,7 +62,7 @@ static kstat_t *dmu_tx_ksp; dmu_tx_t * dmu_tx_create_dd(dsl_dir_t *dd) { - dmu_tx_t *tx = kmem_zalloc(sizeof (dmu_tx_t), KM_SLEEP); + dmu_tx_t *tx = kmem_zalloc(sizeof (dmu_tx_t), KM_PUSHPAGE); tx->tx_dir = dd; if (dd) tx->tx_pool = dd->dd_pool; @@ -68,7 +70,7 @@ dmu_tx_create_dd(dsl_dir_t *dd) offsetof(dmu_tx_hold_t, txh_node)); list_create(&tx->tx_callbacks, sizeof (dmu_tx_callback_t), offsetof(dmu_tx_callback_t, dcb_node)); -#ifdef ZFS_DEBUG +#ifdef DEBUG_DMU_TX refcount_create(&tx->tx_space_written); refcount_create(&tx->tx_space_freed); #endif @@ -138,10 +140,10 @@ dmu_tx_hold_object_impl(dmu_tx_t *tx, objset_t *os, uint64_t object, } } - txh = kmem_zalloc(sizeof (dmu_tx_hold_t), KM_SLEEP); + txh = kmem_zalloc(sizeof (dmu_tx_hold_t), KM_PUSHPAGE); txh->txh_tx = tx; txh->txh_dnode = dn; -#ifdef ZFS_DEBUG +#ifdef DEBUG_DMU_TX txh->txh_type = type; txh->txh_arg1 = arg1; txh->txh_arg2 = arg2; @@ -690,9 +692,11 @@ dmu_tx_hold_zap(dmu_tx_t *tx, uint64_t object, int add, const char *name) return; } - ASSERT3P(dmu_ot[dn->dn_type].ot_byteswap, ==, zap_byteswap); + ASSERT3U(DMU_OT_BYTESWAP(dn->dn_type), ==, DMU_BSWAP_ZAP); if (dn->dn_maxblkid == 0 && !add) { + blkptr_t *bp; + /* * If there is only one block (i.e. this is a micro-zap) * and we are not adding anything, the accounting is simple. @@ -707,14 +711,13 @@ dmu_tx_hold_zap(dmu_tx_t *tx, uint64_t object, int add, const char *name) * Use max block size here, since we don't know how much * the size will change between now and the dbuf dirty call. */ + bp = &dn->dn_phys->dn_blkptr[0]; if (dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset, - &dn->dn_phys->dn_blkptr[0], - dn->dn_phys->dn_blkptr[0].blk_birth)) { + bp, bp->blk_birth)) txh->txh_space_tooverwrite += SPA_MAXBLOCKSIZE; - } else { + else txh->txh_space_towrite += SPA_MAXBLOCKSIZE; - } - if (dn->dn_phys->dn_blkptr[0].blk_birth) + if (!BP_IS_HOLE(bp)) txh->txh_space_tounref += SPA_MAXBLOCKSIZE; return; } @@ -798,7 +801,7 @@ dmu_tx_holds(dmu_tx_t *tx, uint64_t object) return (holds); } -#ifdef ZFS_DEBUG +#ifdef DEBUG_DMU_TX void dmu_tx_dirty_buf(dmu_tx_t *tx, dmu_buf_impl_t *db) { @@ -1004,7 +1007,7 @@ dmu_tx_try_assign(dmu_tx_t *tx, uint64_t txg_how) /* calculate memory footprint estimate */ memory = towrite + tooverwrite + tohold; -#ifdef ZFS_DEBUG +#ifdef DEBUG_DMU_TX /* * Add in 'tohold' to account for our dirty holds on this memory * XXX - the "fudge" factor is to account for skipped blocks that @@ -1130,7 +1133,7 @@ dmu_tx_wait(dmu_tx_t *tx) void dmu_tx_willuse_space(dmu_tx_t *tx, int64_t delta) { -#ifdef ZFS_DEBUG +#ifdef DEBUG_DMU_TX if (tx->tx_dir == NULL || delta == 0) return; @@ -1180,7 +1183,7 @@ dmu_tx_commit(dmu_tx_t *tx) list_destroy(&tx->tx_callbacks); list_destroy(&tx->tx_holds); -#ifdef ZFS_DEBUG +#ifdef DEBUG_DMU_TX dprintf("towrite=%llu written=%llu tofree=%llu freed=%llu\n", tx->tx_space_towrite, refcount_count(&tx->tx_space_written), tx->tx_space_tofree, refcount_count(&tx->tx_space_freed)); @@ -1216,7 +1219,7 @@ dmu_tx_abort(dmu_tx_t *tx) list_destroy(&tx->tx_callbacks); list_destroy(&tx->tx_holds); -#ifdef ZFS_DEBUG +#ifdef DEBUG_DMU_TX refcount_destroy_many(&tx->tx_space_written, refcount_count(&tx->tx_space_written)); refcount_destroy_many(&tx->tx_space_freed, @@ -1237,7 +1240,7 @@ dmu_tx_callback_register(dmu_tx_t *tx, dmu_tx_callback_func_t *func, void *data) { dmu_tx_callback_t *dcb; - dcb = kmem_alloc(sizeof (dmu_tx_callback_t), KM_SLEEP); + dcb = kmem_alloc(sizeof (dmu_tx_callback_t), KM_PUSHPAGE); dcb->dcb_func = func; dcb->dcb_data = data; @@ -1300,7 +1303,6 @@ dmu_tx_hold_spill(dmu_tx_t *tx, uint64_t object) { dnode_t *dn; dmu_tx_hold_t *txh; - blkptr_t *bp; txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, object, THT_SPILL, 0, 0); @@ -1311,17 +1313,18 @@ dmu_tx_hold_spill(dmu_tx_t *tx, uint64_t object) return; /* If blkptr doesn't exist then add space to towrite */ - bp = &dn->dn_phys->dn_spill; - if (BP_IS_HOLE(bp)) { + if (!(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) { txh->txh_space_towrite += SPA_MAXBLOCKSIZE; - txh->txh_space_tounref = 0; } else { + blkptr_t *bp; + + bp = &dn->dn_phys->dn_spill; if (dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset, bp, bp->blk_birth)) txh->txh_space_tooverwrite += SPA_MAXBLOCKSIZE; else txh->txh_space_towrite += SPA_MAXBLOCKSIZE; - if (bp->blk_birth) + if (!BP_IS_HOLE(bp)) txh->txh_space_tounref += SPA_MAXBLOCKSIZE; } }