*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
+ * Copyright (c) 2013 by Delphix. All rights reserved.
*/
#include <sys/dmu.h>
dmu_tx_t *
dmu_tx_create_dd(dsl_dir_t *dd)
{
- dmu_tx_t *tx = kmem_zalloc(sizeof (dmu_tx_t), KM_SLEEP);
+ dmu_tx_t *tx = kmem_zalloc(sizeof (dmu_tx_t), KM_PUSHPAGE);
tx->tx_dir = dd;
if (dd)
tx->tx_pool = dd->dd_pool;
}
}
- txh = kmem_zalloc(sizeof (dmu_tx_hold_t), KM_SLEEP);
+ txh = kmem_zalloc(sizeof (dmu_tx_hold_t), KM_PUSHPAGE);
txh->txh_tx = tx;
txh->txh_dnode = dn;
#ifdef DEBUG_DMU_TX
delta = P2NPHASE(off, dn->dn_datablksz);
}
+ min_ibs = max_ibs = dn->dn_indblkshift;
if (dn->dn_maxblkid > 0) {
/*
* The blocksize can't change,
*/
ASSERT(dn->dn_datablkshift != 0);
min_bs = max_bs = dn->dn_datablkshift;
- min_ibs = max_ibs = dn->dn_indblkshift;
- } else if (dn->dn_indblkshift > max_ibs) {
- /*
- * This ensures that if we reduce DN_MAX_INDBLKSHIFT,
- * the code will still work correctly on older pools.
- */
- min_ibs = max_ibs = dn->dn_indblkshift;
}
/*
dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
spa_t *spa = txh->txh_tx->tx_pool->dp_spa;
int epbs;
+ uint64_t l0span = 0, nl1blks = 0;
if (dn->dn_nlevels == 0)
return;
nblks = dn->dn_maxblkid - blkid;
}
+ l0span = nblks; /* save for later use to calc level > 1 overhead */
if (dn->dn_nlevels == 1) {
int i;
for (i = 0; i < nblks; i++) {
}
unref += BP_GET_ASIZE(bp);
}
+ nl1blks = 1;
nblks = 0;
}
- /*
- * Add in memory requirements of higher-level indirects.
- * This assumes a worst-possible scenario for dn_nlevels.
- */
- {
- uint64_t blkcnt = 1 + ((nblks >> epbs) >> epbs);
- int level = (dn->dn_nlevels > 1) ? 2 : 1;
-
- while (level++ < DN_MAX_LEVELS) {
- txh->txh_memory_tohold += blkcnt << dn->dn_indblkshift;
- blkcnt = 1 + (blkcnt >> epbs);
- }
- ASSERT(blkcnt <= dn->dn_nblkptr);
- }
-
lastblk = blkid + nblks - 1;
while (nblks) {
dmu_buf_impl_t *dbuf;
}
dbuf_rele(dbuf, FTAG);
+ ++nl1blks;
blkid += tochk;
nblks -= tochk;
}
rw_exit(&dn->dn_struct_rwlock);
+ /*
+ * Add in memory requirements of higher-level indirects.
+ * This assumes a worst-possible scenario for dn_nlevels and a
+ * worst-possible distribution of l1-blocks over the region to free.
+ */
+ {
+ uint64_t blkcnt = 1 + ((l0span >> epbs) >> epbs);
+ int level = 2;
+ /*
+ * Here we don't use DN_MAX_LEVEL, but calculate it with the
+ * given datablkshift and indblkshift. This makes the
+ * difference between 19 and 8 on large files.
+ */
+ int maxlevel = 2 + (DN_MAX_OFFSET_SHIFT - dn->dn_datablkshift) /
+ (dn->dn_indblkshift - SPA_BLKPTRSHIFT);
+
+ while (level++ < maxlevel) {
+ txh->txh_memory_tohold += MAX(MIN(blkcnt, nl1blks), 1)
+ << dn->dn_indblkshift;
+ blkcnt = 1 + (blkcnt >> epbs);
+ }
+ }
+
/* account for new level 1 indirect blocks that might show up */
if (skipped > 0) {
txh->txh_fudge += skipped << dn->dn_indblkshift;
return;
}
- ASSERT3P(dmu_ot[dn->dn_type].ot_byteswap, ==, zap_byteswap);
+ ASSERT3U(DMU_OT_BYTESWAP(dn->dn_type), ==, DMU_BSWAP_ZAP);
if (dn->dn_maxblkid == 0 && !add) {
+ blkptr_t *bp;
+
/*
* If there is only one block (i.e. this is a micro-zap)
* and we are not adding anything, the accounting is simple.
* Use max block size here, since we don't know how much
* the size will change between now and the dbuf dirty call.
*/
+ bp = &dn->dn_phys->dn_blkptr[0];
if (dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset,
- &dn->dn_phys->dn_blkptr[0],
- dn->dn_phys->dn_blkptr[0].blk_birth)) {
+ bp, bp->blk_birth))
txh->txh_space_tooverwrite += SPA_MAXBLOCKSIZE;
- } else {
+ else
txh->txh_space_towrite += SPA_MAXBLOCKSIZE;
- }
- if (dn->dn_phys->dn_blkptr[0].blk_birth)
+ if (!BP_IS_HOLE(bp))
txh->txh_space_tounref += SPA_MAXBLOCKSIZE;
return;
}
{
dmu_tx_callback_t *dcb;
- dcb = kmem_alloc(sizeof (dmu_tx_callback_t), KM_SLEEP);
+ dcb = kmem_alloc(sizeof (dmu_tx_callback_t), KM_PUSHPAGE);
dcb->dcb_func = func;
dcb->dcb_data = data;
{
dnode_t *dn;
dmu_tx_hold_t *txh;
- blkptr_t *bp;
txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, object,
THT_SPILL, 0, 0);
return;
/* If blkptr doesn't exist then add space to towrite */
- bp = &dn->dn_phys->dn_spill;
- if (BP_IS_HOLE(bp)) {
+ if (!(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) {
txh->txh_space_towrite += SPA_MAXBLOCKSIZE;
- txh->txh_space_tounref = 0;
} else {
+ blkptr_t *bp;
+
+ bp = &dn->dn_phys->dn_spill;
if (dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset,
bp, bp->blk_birth))
txh->txh_space_tooverwrite += SPA_MAXBLOCKSIZE;
else
txh->txh_space_towrite += SPA_MAXBLOCKSIZE;
- if (bp->blk_birth)
+ if (!BP_IS_HOLE(bp))
txh->txh_space_tounref += SPA_MAXBLOCKSIZE;
}
}