*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012 by Delphix. All rights reserved.
*/
/* Portions Copyright 2010 Robert Milkowski */
#include <sys/vdev_impl.h>
#include <sys/dmu_tx.h>
#include <sys/dsl_pool.h>
+#include <sys/metaslab.h>
/*
* The zfs intent log (ZIL) saves transaction records of system calls
*/
/*
+ * See zil.h for more information about these fields.
+ */
+zil_stats_t zil_stats = {
+ { "zil_commit_count", KSTAT_DATA_UINT64 },
+ { "zil_commit_writer_count", KSTAT_DATA_UINT64 },
+ { "zil_itx_count", KSTAT_DATA_UINT64 },
+ { "zil_itx_indirect_count", KSTAT_DATA_UINT64 },
+ { "zil_itx_indirect_bytes", KSTAT_DATA_UINT64 },
+ { "zil_itx_copied_count", KSTAT_DATA_UINT64 },
+ { "zil_itx_copied_bytes", KSTAT_DATA_UINT64 },
+ { "zil_itx_needcopy_count", KSTAT_DATA_UINT64 },
+ { "zil_itx_needcopy_bytes", KSTAT_DATA_UINT64 },
+ { "zil_itx_metaslab_normal_count", KSTAT_DATA_UINT64 },
+ { "zil_itx_metaslab_normal_bytes", KSTAT_DATA_UINT64 },
+ { "zil_itx_metaslab_slog_count", KSTAT_DATA_UINT64 },
+ { "zil_itx_metaslab_slog_bytes", KSTAT_DATA_UINT64 },
+};
+
+static kstat_t *zil_ksp;
+
+/*
* This global ZIL switch affects all pools
*/
int zil_replay_disable = 0; /* disable intent logging replay */
* zfs_nocacheflush will cause corruption on power loss if a volatile
* out-of-order write cache is enabled.
*/
-boolean_t zfs_nocacheflush = B_FALSE;
+int zfs_nocacheflush = 0;
static kmem_cache_t *zil_lwb_cache;
if (avl_find(t, dva, &where) != NULL)
return (EEXIST);
- zn = kmem_alloc(sizeof (zil_bp_node_t), KM_SLEEP);
+ zn = kmem_alloc(sizeof (zil_bp_node_t), KM_PUSHPAGE);
zn->zn_dva = *dva;
avl_insert(t, zn, where);
SET_BOOKMARK(&zb, bp->blk_cksum.zc_word[ZIL_ZC_OBJSET],
ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, bp->blk_cksum.zc_word[ZIL_ZC_SEQ]);
- error = dsl_read_nolock(NULL, zilog->zl_spa, bp, arc_getbuf_func, &abuf,
+ error = arc_read(NULL, zilog->zl_spa, bp, arc_getbuf_func, &abuf,
ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb);
if (error == 0) {
SET_BOOKMARK(&zb, dmu_objset_id(zilog->zl_os), lr->lr_foid,
ZB_ZIL_LEVEL, lr->lr_offset / BP_GET_LSIZE(bp));
- error = arc_read_nolock(NULL, zilog->zl_spa, bp, arc_getbuf_func, &abuf,
+ error = arc_read(NULL, zilog->zl_spa, bp, arc_getbuf_func, &abuf,
ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb);
if (error == 0) {
char *lrbuf, *lrp;
int error = 0;
+ bzero(&next_blk, sizeof(blkptr_t));
+
/*
* Old logs didn't record the maximum zh_claim_lr_seq.
*/
for (blk = zh->zh_log; !BP_IS_HOLE(&blk); blk = next_blk) {
uint64_t blk_seq = blk.blk_cksum.zc_word[ZIL_ZC_SEQ];
int reclen;
- char *end;
+ char *end = NULL;
if (blk_seq > claim_blk_seq)
break;
}
static lwb_t *
-zil_alloc_lwb(zilog_t *zilog, blkptr_t *bp, uint64_t txg)
+zil_alloc_lwb(zilog_t *zilog, blkptr_t *bp, uint64_t txg, boolean_t fastwrite)
{
lwb_t *lwb;
- lwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP);
+ lwb = kmem_cache_alloc(zil_lwb_cache, KM_PUSHPAGE);
lwb->lwb_zilog = zilog;
lwb->lwb_blk = *bp;
+ lwb->lwb_fastwrite = fastwrite;
lwb->lwb_buf = zio_buf_alloc(BP_GET_LSIZE(bp));
lwb->lwb_max_txg = txg;
lwb->lwb_zio = NULL;
}
/*
+ * Called when we create in-memory log transactions so that we know
+ * to cleanup the itxs at the end of spa_sync().
+ */
+void
+zilog_dirty(zilog_t *zilog, uint64_t txg)
+{
+ dsl_pool_t *dp = zilog->zl_dmu_pool;
+ dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os);
+
+ if (dsl_dataset_is_snapshot(ds))
+ panic("dirtying snapshot!");
+
+ if (txg_list_add(&dp->dp_dirty_zilogs, zilog, txg) == 0) {
+ /* up the hold count until we can be written out */
+ dmu_buf_add_ref(ds->ds_dbuf, zilog);
+ }
+}
+
+boolean_t
+zilog_is_dirty(zilog_t *zilog)
+{
+ dsl_pool_t *dp = zilog->zl_dmu_pool;
+ int t;
+
+ for (t = 0; t < TXG_SIZE; t++) {
+ if (txg_list_member(&dp->dp_dirty_zilogs, zilog, t))
+ return (B_TRUE);
+ }
+ return (B_FALSE);
+}
+
+/*
* Create an on-disk intent log.
*/
static lwb_t *
dmu_tx_t *tx = NULL;
blkptr_t blk;
int error = 0;
+ boolean_t fastwrite = FALSE;
/*
* Wait for any previous destroy to complete.
BP_ZERO(&blk);
}
- error = zio_alloc_zil(zilog->zl_spa, txg, &blk, NULL,
- ZIL_MIN_BLKSZ, zilog->zl_logbias == ZFS_LOGBIAS_LATENCY);
+ error = zio_alloc_zil(zilog->zl_spa, txg, &blk,
+ ZIL_MIN_BLKSZ, B_TRUE);
+ fastwrite = TRUE;
if (error == 0)
zil_init_log_chain(zilog, &blk);
* Allocate a log write buffer (lwb) for the first log block.
*/
if (error == 0)
- lwb = zil_alloc_lwb(zilog, &blk, txg);
+ lwb = zil_alloc_lwb(zilog, &blk, txg, fastwrite);
/*
* If we just allocated the first log block, commit our transaction
if (!list_is_empty(&zilog->zl_lwb_list)) {
ASSERT(zh->zh_claim_txg == 0);
- ASSERT(!keep_first);
+ VERIFY(!keep_first);
while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) {
+ ASSERT(lwb->lwb_zio == NULL);
+ if (lwb->lwb_fastwrite)
+ metaslab_fastwrite_unmark(zilog->zl_spa,
+ &lwb->lwb_blk);
list_remove(&zilog->zl_lwb_list, lwb);
if (lwb->lwb_buf != NULL)
zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
kmem_cache_free(zil_lwb_cache, lwb);
}
} else if (!keep_first) {
- (void) zil_parse(zilog, zil_free_log_block,
- zil_free_log_record, tx, zh->zh_claim_txg);
+ zil_destroy_sync(zilog, tx);
}
mutex_exit(&zilog->zl_lock);
dmu_tx_commit(tx);
}
+void
+zil_destroy_sync(zilog_t *zilog, dmu_tx_t *tx)
+{
+ ASSERT(list_is_empty(&zilog->zl_lwb_list));
+ (void) zil_parse(zilog, zil_free_log_block,
+ zil_free_log_record, tx, zilog->zl_header->zh_claim_txg);
+}
+
int
zil_claim(const char *osname, void *txarg)
{
for (i = 0; i < ndvas; i++) {
zvsearch.zv_vdev = DVA_GET_VDEV(&bp->blk_dva[i]);
if (avl_find(t, &zvsearch, &where) == NULL) {
- zv = kmem_alloc(sizeof (*zv), KM_SLEEP);
+ zv = kmem_alloc(sizeof (*zv), KM_PUSHPAGE);
zv->zv_vdev = zvsearch.zv_vdev;
avl_insert(t, zv, where);
}
*/
zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
mutex_enter(&zilog->zl_lock);
+ lwb->lwb_zio = NULL;
+ lwb->lwb_fastwrite = FALSE;
lwb->lwb_buf = NULL;
lwb->lwb_tx = NULL;
mutex_exit(&zilog->zl_lock);
zilog->zl_root_zio = zio_root(zilog->zl_spa, NULL, NULL,
ZIO_FLAG_CANFAIL);
}
+
+ /* Lock so zil_sync() doesn't fastwrite_unmark after zio is created */
+ mutex_enter(&zilog->zl_lock);
if (lwb->lwb_zio == NULL) {
+ if (!lwb->lwb_fastwrite) {
+ metaslab_fastwrite_mark(zilog->zl_spa, &lwb->lwb_blk);
+ lwb->lwb_fastwrite = 1;
+ }
lwb->lwb_zio = zio_rewrite(zilog->zl_root_zio, zilog->zl_spa,
0, &lwb->lwb_blk, lwb->lwb_buf, BP_GET_LSIZE(&lwb->lwb_blk),
zil_lwb_write_done, lwb, ZIO_PRIORITY_LOG_WRITE,
- ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE, &zb);
+ ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE |
+ ZIO_FLAG_FASTWRITE, &zb);
}
+ mutex_exit(&zilog->zl_lock);
}
/*
};
/*
- * Use the slog as long as the logbias is 'latency' and the current commit size
- * is less than the limit or the total list size is less than 2X the limit.
- * Limit checking is disabled by setting zil_slog_limit to UINT64_MAX.
+ * Use the slog as long as the current commit size is less than the
+ * limit or the total list size is less than 2X the limit. Limit
+ * checking is disabled by setting zil_slog_limit to UINT64_MAX.
*/
-uint64_t zil_slog_limit = 1024 * 1024;
-#define USE_SLOG(zilog) (((zilog)->zl_logbias == ZFS_LOGBIAS_LATENCY) && \
- (((zilog)->zl_cur_used < zil_slog_limit) || \
- ((zilog)->zl_itx_list_sz < (zil_slog_limit << 1))))
+unsigned long zil_slog_limit = 1024 * 1024;
+#define USE_SLOG(zilog) (((zilog)->zl_cur_used < zil_slog_limit) || \
+ ((zilog)->zl_itx_list_sz < (zil_slog_limit << 1)))
/*
* Start a log block write and advance to the next log block.
uint64_t txg;
uint64_t zil_blksz, wsz;
int i, error;
+ boolean_t use_slog;
if (BP_GET_CHECKSUM(&lwb->lwb_blk) == ZIO_CHECKSUM_ZILOG2) {
zilc = (zil_chain_t *)lwb->lwb_buf;
zilog->zl_prev_rotor = (zilog->zl_prev_rotor + 1) & (ZIL_PREV_BLKS - 1);
BP_ZERO(bp);
- /* pass the old blkptr in order to spread log blocks across devs */
- error = zio_alloc_zil(spa, txg, bp, &lwb->lwb_blk, zil_blksz,
- USE_SLOG(zilog));
+ use_slog = USE_SLOG(zilog);
+ error = zio_alloc_zil(spa, txg, bp, zil_blksz, USE_SLOG(zilog));
+ if (use_slog)
+ {
+ ZIL_STAT_BUMP(zil_itx_metaslab_slog_count);
+ ZIL_STAT_INCR(zil_itx_metaslab_slog_bytes, lwb->lwb_nused);
+ }
+ else
+ {
+ ZIL_STAT_BUMP(zil_itx_metaslab_normal_count);
+ ZIL_STAT_INCR(zil_itx_metaslab_normal_bytes, lwb->lwb_nused);
+ }
if (!error) {
ASSERT3U(bp->blk_birth, ==, txg);
bp->blk_cksum = lwb->lwb_blk.blk_cksum;
/*
* Allocate a new log write buffer (lwb).
*/
- nlwb = zil_alloc_lwb(zilog, bp, txg);
+ nlwb = zil_alloc_lwb(zilog, bp, txg, TRUE);
/* Record the block for later vdev flushing */
zil_add_block(zilog, &lwb->lwb_blk);
return (NULL);
ASSERT(lwb->lwb_buf != NULL);
+ ASSERT(zilog_is_dirty(zilog) ||
+ spa_freeze_txg(zilog->zl_spa) != UINT64_MAX);
if (lrc->lrc_txtype == TX_WRITE && itx->itx_wr_state == WR_NEED_COPY)
dlen = P2ROUNDUP_TYPED(
lrc = (lr_t *)lr_buf;
lrw = (lr_write_t *)lrc;
+ ZIL_STAT_BUMP(zil_itx_count);
+
/*
* If it's a write, fetch the data or get its blkptr as appropriate.
*/
if (lrc->lrc_txtype == TX_WRITE) {
if (txg > spa_freeze_txg(zilog->zl_spa))
txg_wait_synced(zilog->zl_dmu_pool, txg);
- if (itx->itx_wr_state != WR_COPIED) {
+ if (itx->itx_wr_state == WR_COPIED) {
+ ZIL_STAT_BUMP(zil_itx_copied_count);
+ ZIL_STAT_INCR(zil_itx_copied_bytes, lrw->lr_length);
+ } else {
char *dbuf;
int error;
ASSERT(itx->itx_wr_state == WR_NEED_COPY);
dbuf = lr_buf + reclen;
lrw->lr_common.lrc_reclen += dlen;
+ ZIL_STAT_BUMP(zil_itx_needcopy_count);
+ ZIL_STAT_INCR(zil_itx_needcopy_bytes, lrw->lr_length);
} else {
ASSERT(itx->itx_wr_state == WR_INDIRECT);
dbuf = NULL;
+ ZIL_STAT_BUMP(zil_itx_indirect_count);
+ ZIL_STAT_INCR(zil_itx_indirect_bytes, lrw->lr_length);
}
error = zilog->zl_get_data(
itx->itx_private, lrw, dbuf, lwb->lwb_zio);
lwb->lwb_nused += reclen + dlen;
lwb->lwb_max_txg = MAX(lwb->lwb_max_txg, txg);
ASSERT3U(lwb->lwb_nused, <=, lwb->lwb_sz);
- ASSERT3U(P2PHASE(lwb->lwb_nused, sizeof (uint64_t)), ==, 0);
+ ASSERT0(P2PHASE(lwb->lwb_nused, sizeof (uint64_t)));
return (lwb);
}
lrsize = P2ROUNDUP_TYPED(lrsize, sizeof (uint64_t), size_t);
- itx = kmem_alloc(offsetof(itx_t, itx_lr) + lrsize, KM_SLEEP);
+ itx = kmem_alloc(offsetof(itx_t, itx_lr) + lrsize,
+ KM_PUSHPAGE | KM_NODEBUG);
itx->itx_lr.lrc_txtype = txtype;
itx->itx_lr.lrc_reclen = lrsize;
itx->itx_sod = lrsize; /* if write & WR_NEED_COPY will be increased */
if ((itx->itx_lr.lrc_txtype & ~TX_CI) == TX_RENAME)
zil_async_to_sync(zilog, itx->itx_oid);
- if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX)
+ if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX)
txg = ZILTEST_TXG;
else
txg = dmu_tx_get_txg(tx);
}
ASSERT(itxg->itxg_sod == 0);
itxg->itxg_txg = txg;
- itxs = itxg->itxg_itxs = kmem_zalloc(sizeof (itxs_t), KM_SLEEP);
+ itxs = itxg->itxg_itxs = kmem_zalloc(sizeof (itxs_t), KM_PUSHPAGE);
list_create(&itxs->i_sync_list, sizeof (itx_t),
offsetof(itx_t, itx_node));
ian = avl_find(t, &foid, &where);
if (ian == NULL) {
- ian = kmem_alloc(sizeof (itx_async_node_t), KM_SLEEP);
+ ian = kmem_alloc(sizeof (itx_async_node_t), KM_PUSHPAGE);
list_create(&ian->ia_list, sizeof (itx_t),
offsetof(itx_t, itx_node));
ian->ia_foid = foid;
}
itx->itx_lr.lrc_txg = dmu_tx_get_txg(tx);
+ zilog_dirty(zilog, txg);
mutex_exit(&itxg->itxg_lock);
/* Release the old itxs now we've dropped the lock */
/*
* If there are any in-memory intent log transactions which have now been
- * synced then start up a taskq to free them.
+ * synced then start up a taskq to free them. We should only do this after we
+ * have written out the uberblocks (i.e. txg has been comitted) so that
+ * don't inadvertently clean out in-memory log records that would be required
+ * by zil_commit().
*/
void
zil_clean(zilog_t *zilog, uint64_t synced_txg)
* created a bad performance problem.
*/
if (taskq_dispatch(zilog->zl_clean_taskq,
- (void (*)(void *))zil_itxg_clean, clean_me, TQ_NOSLEEP) == NULL)
+ (void (*)(void *))zil_itxg_clean, clean_me, TQ_NOSLEEP) == 0)
zil_itxg_clean(clean_me);
}
}
DTRACE_PROBE1(zil__cw1, zilog_t *, zilog);
- while (itx = list_head(&zilog->zl_itx_commit_list)) {
+ while ((itx = list_head(&zilog->zl_itx_commit_list))) {
txg = itx->itx_lr.lrc_txg;
ASSERT(txg);
if (zilog->zl_sync == ZFS_SYNC_DISABLED)
return;
+ ZIL_STAT_BUMP(zil_commit_count);
+
/* move the async itxs for the foid to the sync queues */
zil_async_to_sync(zilog, foid);
zilog->zl_next_batch++;
zilog->zl_writer = B_TRUE;
+ ZIL_STAT_BUMP(zil_commit_writer_count);
zil_commit_writer(zilog);
zilog->zl_com_batch = mybatch;
zilog->zl_writer = B_FALSE;
- mutex_exit(&zilog->zl_lock);
/* wake up one thread to become the next writer */
cv_signal(&zilog->zl_cv_batch[(mybatch+1) & 1]);
/* wake up all threads waiting for this batch to be committed */
cv_broadcast(&zilog->zl_cv_batch[mybatch & 1]);
+
+ mutex_exit(&zilog->zl_lock);
}
/*
zh->zh_log = lwb->lwb_blk;
if (lwb->lwb_buf != NULL || lwb->lwb_max_txg > txg)
break;
+
+ ASSERT(lwb->lwb_zio == NULL);
+
list_remove(&zilog->zl_lwb_list, lwb);
zio_free_zil(spa, txg, &lwb->lwb_blk);
kmem_cache_free(zil_lwb_cache, lwb);
if (list_head(&zilog->zl_lwb_list) == NULL)
BP_ZERO(&zh->zh_log);
}
+
+ /*
+ * Remove fastwrite on any blocks that have been pre-allocated for
+ * the next commit. This prevents fastwrite counter pollution by
+ * unused, long-lived LWBs.
+ */
+ for (; lwb != NULL; lwb = list_next(&zilog->zl_lwb_list, lwb)) {
+ if (lwb->lwb_fastwrite && !lwb->lwb_zio) {
+ metaslab_fastwrite_unmark(zilog->zl_spa, &lwb->lwb_blk);
+ lwb->lwb_fastwrite = 0;
+ }
+ }
+
mutex_exit(&zilog->zl_lock);
}
{
zil_lwb_cache = kmem_cache_create("zil_lwb_cache",
sizeof (struct lwb), 0, NULL, NULL, NULL, NULL, NULL, 0);
+
+ zil_ksp = kstat_create("zfs", 0, "zil", "misc",
+ KSTAT_TYPE_NAMED, sizeof(zil_stats) / sizeof(kstat_named_t),
+ KSTAT_FLAG_VIRTUAL);
+
+ if (zil_ksp != NULL) {
+ zil_ksp->ks_data = &zil_stats;
+ kstat_install(zil_ksp);
+ }
}
void
zil_fini(void)
{
kmem_cache_destroy(zil_lwb_cache);
+
+ if (zil_ksp != NULL) {
+ kstat_delete(zil_ksp);
+ zil_ksp = NULL;
+ }
}
void
zil_alloc(objset_t *os, zil_header_t *zh_phys)
{
zilog_t *zilog;
+ int i;
- zilog = kmem_zalloc(sizeof (zilog_t), KM_SLEEP);
+ zilog = kmem_zalloc(sizeof (zilog_t), KM_PUSHPAGE);
zilog->zl_header = zh_phys;
zilog->zl_os = os;
mutex_init(&zilog->zl_lock, NULL, MUTEX_DEFAULT, NULL);
- for (int i = 0; i < TXG_SIZE; i++) {
+ for (i = 0; i < TXG_SIZE; i++) {
mutex_init(&zilog->zl_itxg[i].itxg_lock, NULL,
MUTEX_DEFAULT, NULL);
}
void
zil_free(zilog_t *zilog)
{
- lwb_t *head_lwb;
+ int i;
zilog->zl_stop_sync = 1;
- /*
- * After zil_close() there should only be one lwb with a buffer.
- */
- head_lwb = list_head(&zilog->zl_lwb_list);
- if (head_lwb) {
- ASSERT(head_lwb == list_tail(&zilog->zl_lwb_list));
- list_remove(&zilog->zl_lwb_list, head_lwb);
- zio_buf_free(head_lwb->lwb_buf, head_lwb->lwb_sz);
- kmem_cache_free(zil_lwb_cache, head_lwb);
- }
+ ASSERT(list_is_empty(&zilog->zl_lwb_list));
list_destroy(&zilog->zl_lwb_list);
avl_destroy(&zilog->zl_vdev_tree);
ASSERT(list_is_empty(&zilog->zl_itx_commit_list));
list_destroy(&zilog->zl_itx_commit_list);
- for (int i = 0; i < TXG_SIZE; i++) {
+ for (i = 0; i < TXG_SIZE; i++) {
/*
* It's possible for an itx to be generated that doesn't dirty
* a txg (e.g. ztest TX_TRUNCATE). So there's no zil_clean()
{
zilog_t *zilog = dmu_objset_zil(os);
+ ASSERT(zilog->zl_clean_taskq == NULL);
+ ASSERT(zilog->zl_get_data == NULL);
+ ASSERT(list_is_empty(&zilog->zl_lwb_list));
+
zilog->zl_get_data = get_data;
zilog->zl_clean_taskq = taskq_create("zil_clean", 1, minclsyspri,
2, 2, TASKQ_PREPOPULATE);
void
zil_close(zilog_t *zilog)
{
- lwb_t *tail_lwb;
+ lwb_t *lwb;
uint64_t txg = 0;
zil_commit(zilog, 0); /* commit all itx */
* destroy the zl_clean_taskq.
*/
mutex_enter(&zilog->zl_lock);
- tail_lwb = list_tail(&zilog->zl_lwb_list);
- if (tail_lwb != NULL)
- txg = tail_lwb->lwb_max_txg;
+ lwb = list_tail(&zilog->zl_lwb_list);
+ if (lwb != NULL)
+ txg = lwb->lwb_max_txg;
mutex_exit(&zilog->zl_lock);
if (txg)
txg_wait_synced(zilog->zl_dmu_pool, txg);
+ ASSERT(!zilog_is_dirty(zilog));
taskq_destroy(zilog->zl_clean_taskq);
zilog->zl_clean_taskq = NULL;
zilog->zl_get_data = NULL;
+
+ /*
+ * We should have only one LWB left on the list; remove it now.
+ */
+ mutex_enter(&zilog->zl_lock);
+ lwb = list_head(&zilog->zl_lwb_list);
+ if (lwb != NULL) {
+ ASSERT(lwb == list_tail(&zilog->zl_lwb_list));
+ ASSERT(lwb->lwb_zio == NULL);
+ if (lwb->lwb_fastwrite)
+ metaslab_fastwrite_unmark(zilog->zl_spa, &lwb->lwb_blk);
+ list_remove(&zilog->zl_lwb_list, lwb);
+ zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
+ kmem_cache_free(zil_lwb_cache, lwb);
+ }
+ mutex_exit(&zilog->zl_lock);
}
/*
}
typedef struct zil_replay_arg {
- zil_replay_func_t **zr_replay;
+ zil_replay_func_t *zr_replay;
void *zr_arg;
boolean_t zr_byteswap;
char *zr_lr;
* If this dataset has a non-empty intent log, replay it and destroy it.
*/
void
-zil_replay(objset_t *os, void *arg, zil_replay_func_t *replay_func[TX_MAX_TYPE])
+zil_replay(objset_t *os, void *arg, zil_replay_func_t replay_func[TX_MAX_TYPE])
{
zilog_t *zilog = dmu_objset_zil(os);
const zil_header_t *zh = zilog->zl_header;
zr.zr_replay = replay_func;
zr.zr_arg = arg;
zr.zr_byteswap = BP_SHOULD_BYTESWAP(&zh->zh_log);
- zr.zr_lr = kmem_alloc(2 * SPA_MAXBLOCKSIZE, KM_SLEEP);
+ zr.zr_lr = vmem_alloc(2 * SPA_MAXBLOCKSIZE, KM_PUSHPAGE);
/*
* Wait for in-progress removes to sync before starting replay.
ASSERT(zilog->zl_replay_blks == 0);
(void) zil_parse(zilog, zil_incr_blks, zil_replay_log_record, &zr,
zh->zh_claim_txg);
- kmem_free(zr.zr_lr, 2 * SPA_MAXBLOCKSIZE);
+ vmem_free(zr.zr_lr, 2 * SPA_MAXBLOCKSIZE);
zil_destroy(zilog, B_FALSE);
txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
dmu_objset_rele(os, FTAG);
return (error);
}
+
+#if defined(_KERNEL) && defined(HAVE_SPL)
+module_param(zil_replay_disable, int, 0644);
+MODULE_PARM_DESC(zil_replay_disable, "Disable intent logging replay");
+
+module_param(zfs_nocacheflush, int, 0644);
+MODULE_PARM_DESC(zfs_nocacheflush, "Disable cache flushes");
+
+module_param(zil_slog_limit, ulong, 0644);
+MODULE_PARM_DESC(zil_slog_limit, "Max commit bytes to separate log device");
+#endif