* zfs_nocacheflush will cause corruption on power loss if a volatile
* out-of-order write cache is enabled.
*/
-boolean_t zfs_nocacheflush = B_FALSE;
+int zfs_nocacheflush = 0;
static kmem_cache_t *zil_lwb_cache;
lrsize = P2ROUNDUP_TYPED(lrsize, sizeof (uint64_t), size_t);
- itx = kmem_alloc(offsetof(itx_t, itx_lr) + lrsize, KM_SLEEP);
+ itx = kmem_alloc(offsetof(itx_t, itx_lr) + lrsize,
+ KM_PUSHPAGE | KM_NODEBUG);
itx->itx_lr.lrc_txtype = txtype;
itx->itx_lr.lrc_reclen = lrsize;
itx->itx_sod = lrsize; /* if write & WR_NEED_COPY will be increased */
zr.zr_replay = replay_func;
zr.zr_arg = arg;
zr.zr_byteswap = BP_SHOULD_BYTESWAP(&zh->zh_log);
- zr.zr_lr = kmem_alloc(2 * SPA_MAXBLOCKSIZE, KM_SLEEP);
+ zr.zr_lr = vmem_alloc(2 * SPA_MAXBLOCKSIZE, KM_SLEEP);
/*
* Wait for in-progress removes to sync before starting replay.
ASSERT(zilog->zl_replay_blks == 0);
(void) zil_parse(zilog, zil_incr_blks, zil_replay_log_record, &zr,
zh->zh_claim_txg);
- kmem_free(zr.zr_lr, 2 * SPA_MAXBLOCKSIZE);
+ vmem_free(zr.zr_lr, 2 * SPA_MAXBLOCKSIZE);
zil_destroy(zilog, B_FALSE);
txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
dmu_objset_rele(os, FTAG);
return (error);
}
+
+#if defined(_KERNEL) && defined(HAVE_SPL)
+module_param(zil_replay_disable, int, 0644);
+MODULE_PARM_DESC(zil_replay_disable, "Disable intent logging replay");
+
+module_param(zfs_nocacheflush, int, 0644);
+MODULE_PARM_DESC(zfs_nocacheflush, "Disable cache flushes");
+#endif