*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011 by Delphix. All rights reserved.
*/
/* Portions Copyright 2010 Robert Milkowski */
* zfs_nocacheflush will cause corruption on power loss if a volatile
* out-of-order write cache is enabled.
*/
-boolean_t zfs_nocacheflush = B_FALSE;
+int zfs_nocacheflush = 0;
static kmem_cache_t *zil_lwb_cache;
char *lrbuf, *lrp;
int error = 0;
+ bzero(&next_blk, sizeof(blkptr_t));
+
/*
* Old logs didn't record the maximum zh_claim_lr_seq.
*/
for (blk = zh->zh_log; !BP_IS_HOLE(&blk); blk = next_blk) {
uint64_t blk_seq = blk.blk_cksum.zc_word[ZIL_ZC_SEQ];
int reclen;
- char *end;
+ char *end = NULL;
if (blk_seq > claim_blk_seq)
break;
if (!list_is_empty(&zilog->zl_lwb_list)) {
ASSERT(zh->zh_claim_txg == 0);
- ASSERT(!keep_first);
+ VERIFY(!keep_first);
while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) {
list_remove(&zilog->zl_lwb_list, lwb);
if (lwb->lwb_buf != NULL)
lrsize = P2ROUNDUP_TYPED(lrsize, sizeof (uint64_t), size_t);
- itx = kmem_alloc(offsetof(itx_t, itx_lr) + lrsize, KM_SLEEP);
+ itx = kmem_alloc(offsetof(itx_t, itx_lr) + lrsize,
+ KM_PUSHPAGE | KM_NODEBUG);
itx->itx_lr.lrc_txtype = txtype;
itx->itx_lr.lrc_reclen = lrsize;
itx->itx_sod = lrsize; /* if write & WR_NEED_COPY will be increased */
void
zil_free(zilog_t *zilog)
{
- lwb_t *head_lwb;
int i;
zilog->zl_stop_sync = 1;
- /*
- * After zil_close() there should only be one lwb with a buffer.
- */
- head_lwb = list_head(&zilog->zl_lwb_list);
- if (head_lwb) {
- ASSERT(head_lwb == list_tail(&zilog->zl_lwb_list));
- list_remove(&zilog->zl_lwb_list, head_lwb);
- zio_buf_free(head_lwb->lwb_buf, head_lwb->lwb_sz);
- kmem_cache_free(zil_lwb_cache, head_lwb);
- }
+ ASSERT(list_is_empty(&zilog->zl_lwb_list));
list_destroy(&zilog->zl_lwb_list);
avl_destroy(&zilog->zl_vdev_tree);
{
zilog_t *zilog = dmu_objset_zil(os);
+ ASSERT(zilog->zl_clean_taskq == NULL);
+ ASSERT(zilog->zl_get_data == NULL);
+ ASSERT(list_is_empty(&zilog->zl_lwb_list));
+
zilog->zl_get_data = get_data;
zilog->zl_clean_taskq = taskq_create("zil_clean", 1, minclsyspri,
2, 2, TASKQ_PREPOPULATE);
void
zil_close(zilog_t *zilog)
{
- lwb_t *tail_lwb;
+ lwb_t *lwb;
uint64_t txg = 0;
zil_commit(zilog, 0); /* commit all itx */
* destroy the zl_clean_taskq.
*/
mutex_enter(&zilog->zl_lock);
- tail_lwb = list_tail(&zilog->zl_lwb_list);
- if (tail_lwb != NULL)
- txg = tail_lwb->lwb_max_txg;
+ lwb = list_tail(&zilog->zl_lwb_list);
+ if (lwb != NULL)
+ txg = lwb->lwb_max_txg;
mutex_exit(&zilog->zl_lock);
if (txg)
txg_wait_synced(zilog->zl_dmu_pool, txg);
taskq_destroy(zilog->zl_clean_taskq);
zilog->zl_clean_taskq = NULL;
zilog->zl_get_data = NULL;
+
+ /*
+ * We should have only one LWB left on the list; remove it now.
+ */
+ mutex_enter(&zilog->zl_lock);
+ lwb = list_head(&zilog->zl_lwb_list);
+ if (lwb != NULL) {
+ ASSERT(lwb == list_tail(&zilog->zl_lwb_list));
+ list_remove(&zilog->zl_lwb_list, lwb);
+ zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
+ kmem_cache_free(zil_lwb_cache, lwb);
+ }
+ mutex_exit(&zilog->zl_lock);
}
/*
zr.zr_replay = replay_func;
zr.zr_arg = arg;
zr.zr_byteswap = BP_SHOULD_BYTESWAP(&zh->zh_log);
- zr.zr_lr = kmem_alloc(2 * SPA_MAXBLOCKSIZE, KM_SLEEP);
+ zr.zr_lr = vmem_alloc(2 * SPA_MAXBLOCKSIZE, KM_SLEEP);
/*
* Wait for in-progress removes to sync before starting replay.
ASSERT(zilog->zl_replay_blks == 0);
(void) zil_parse(zilog, zil_incr_blks, zil_replay_log_record, &zr,
zh->zh_claim_txg);
- kmem_free(zr.zr_lr, 2 * SPA_MAXBLOCKSIZE);
+ vmem_free(zr.zr_lr, 2 * SPA_MAXBLOCKSIZE);
zil_destroy(zilog, B_FALSE);
txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
dmu_objset_rele(os, FTAG);
return (error);
}
+
+#if defined(_KERNEL) && defined(HAVE_SPL)
+module_param(zil_replay_disable, int, 0644);
+MODULE_PARM_DESC(zil_replay_disable, "Disable intent logging replay");
+
+module_param(zfs_nocacheflush, int, 0644);
+MODULE_PARM_DESC(zfs_nocacheflush, "Disable cache flushes");
+#endif