Update SAs when an inode is dirtied
[zfs.git] / module / zfs / dmu_tx.c
index 50207bd..81c6dfe 100644 (file)
@@ -21,6 +21,9 @@
 /*
  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
  */
+/*
+ * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
+ */
 
 #include <sys/dmu.h>
 #include <sys/dmu_impl.h>
@@ -60,7 +63,7 @@ static kstat_t *dmu_tx_ksp;
 dmu_tx_t *
 dmu_tx_create_dd(dsl_dir_t *dd)
 {
-       dmu_tx_t *tx = kmem_zalloc(sizeof (dmu_tx_t), KM_SLEEP);
+       dmu_tx_t *tx = kmem_zalloc(sizeof (dmu_tx_t), KM_PUSHPAGE);
        tx->tx_dir = dd;
        if (dd)
                tx->tx_pool = dd->dd_pool;
@@ -138,7 +141,7 @@ dmu_tx_hold_object_impl(dmu_tx_t *tx, objset_t *os, uint64_t object,
                }
        }
 
-       txh = kmem_zalloc(sizeof (dmu_tx_hold_t), KM_SLEEP);
+       txh = kmem_zalloc(sizeof (dmu_tx_hold_t), KM_PUSHPAGE);
        txh->txh_tx = tx;
        txh->txh_dnode = dn;
 #ifdef DEBUG_DMU_TX
@@ -693,6 +696,8 @@ dmu_tx_hold_zap(dmu_tx_t *tx, uint64_t object, int add, const char *name)
        ASSERT3P(dmu_ot[dn->dn_type].ot_byteswap, ==, zap_byteswap);
 
        if (dn->dn_maxblkid == 0 && !add) {
+               blkptr_t *bp;
+
                /*
                 * If there is only one block  (i.e. this is a micro-zap)
                 * and we are not adding anything, the accounting is simple.
@@ -707,14 +712,13 @@ dmu_tx_hold_zap(dmu_tx_t *tx, uint64_t object, int add, const char *name)
                 * Use max block size here, since we don't know how much
                 * the size will change between now and the dbuf dirty call.
                 */
+               bp = &dn->dn_phys->dn_blkptr[0];
                if (dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset,
-                   &dn->dn_phys->dn_blkptr[0],
-                   dn->dn_phys->dn_blkptr[0].blk_birth)) {
+                   bp, bp->blk_birth))
                        txh->txh_space_tooverwrite += SPA_MAXBLOCKSIZE;
-               } else {
+               else
                        txh->txh_space_towrite += SPA_MAXBLOCKSIZE;
-               }
-               if (dn->dn_phys->dn_blkptr[0].blk_birth)
+               if (!BP_IS_HOLE(bp))
                        txh->txh_space_tounref += SPA_MAXBLOCKSIZE;
                return;
        }
@@ -1237,7 +1241,7 @@ dmu_tx_callback_register(dmu_tx_t *tx, dmu_tx_callback_func_t *func, void *data)
 {
        dmu_tx_callback_t *dcb;
 
-       dcb = kmem_alloc(sizeof (dmu_tx_callback_t), KM_SLEEP);
+       dcb = kmem_alloc(sizeof (dmu_tx_callback_t), KM_PUSHPAGE);
 
        dcb->dcb_func = func;
        dcb->dcb_data = data;
@@ -1300,7 +1304,6 @@ dmu_tx_hold_spill(dmu_tx_t *tx, uint64_t object)
 {
        dnode_t *dn;
        dmu_tx_hold_t *txh;
-       blkptr_t *bp;
 
        txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, object,
            THT_SPILL, 0, 0);
@@ -1311,17 +1314,18 @@ dmu_tx_hold_spill(dmu_tx_t *tx, uint64_t object)
                return;
 
        /* If blkptr doesn't exist then add space to towrite */
-       bp = &dn->dn_phys->dn_spill;
-       if (BP_IS_HOLE(bp)) {
+       if (!(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) {
                txh->txh_space_towrite += SPA_MAXBLOCKSIZE;
-               txh->txh_space_tounref = 0;
        } else {
+               blkptr_t *bp;
+
+               bp = &dn->dn_phys->dn_spill;
                if (dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset,
                    bp, bp->blk_birth))
                        txh->txh_space_tooverwrite += SPA_MAXBLOCKSIZE;
                else
                        txh->txh_space_towrite += SPA_MAXBLOCKSIZE;
-               if (bp->blk_birth)
+               if (!BP_IS_HOLE(bp))
                        txh->txh_space_tounref += SPA_MAXBLOCKSIZE;
        }
 }