4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 #include <sys/zfs_context.h>
28 #include <sys/spa_impl.h>
33 #include <sys/resource.h>
35 #include <sys/zil_impl.h>
36 #include <sys/dsl_dataset.h>
38 #include <sys/dmu_tx.h>
41 * The zfs intent log (ZIL) saves transaction records of system calls
42 * that change the file system in memory with enough information
43 * to be able to replay them. These are stored in memory until
44 * either the DMU transaction group (txg) commits them to the stable pool
45 * and they can be discarded, or they are flushed to the stable log
46 * (also in the pool) due to a fsync, O_DSYNC or other synchronous
47 * requirement. In the event of a panic or power fail then those log
48 * records (transactions) are replayed.
50 * There is one ZIL per file system. Its on-disk (pool) format consists
57 * A log record holds a system call transaction. Log blocks can
58 * hold many log records and the blocks are chained together.
59 * Each ZIL block contains a block pointer (blkptr_t) to the next
60 * ZIL block in the chain. The ZIL header points to the first
61 * block in the chain. Note there is not a fixed place in the pool
62 * to hold blocks. They are dynamically allocated and freed as
63 * needed from the blocks available. Figure X shows the ZIL structure:
67 * This global ZIL switch affects all pools
69 int zil_disable = 0; /* disable intent logging */
72 * Tunable parameter for debugging or performance analysis. Setting
73 * zfs_nocacheflush will cause corruption on power loss if a volatile
74 * out-of-order write cache is enabled.
76 boolean_t zfs_nocacheflush = B_FALSE;
78 static kmem_cache_t *zil_lwb_cache;
81 zil_dva_compare(const void *x1, const void *x2)
83 const dva_t *dva1 = x1;
84 const dva_t *dva2 = x2;
86 if (DVA_GET_VDEV(dva1) < DVA_GET_VDEV(dva2))
88 if (DVA_GET_VDEV(dva1) > DVA_GET_VDEV(dva2))
91 if (DVA_GET_OFFSET(dva1) < DVA_GET_OFFSET(dva2))
93 if (DVA_GET_OFFSET(dva1) > DVA_GET_OFFSET(dva2))
100 zil_dva_tree_init(avl_tree_t *t)
102 avl_create(t, zil_dva_compare, sizeof (zil_dva_node_t),
103 offsetof(zil_dva_node_t, zn_node));
107 zil_dva_tree_fini(avl_tree_t *t)
112 while ((zn = avl_destroy_nodes(t, &cookie)) != NULL)
113 kmem_free(zn, sizeof (zil_dva_node_t));
119 zil_dva_tree_add(avl_tree_t *t, dva_t *dva)
124 if (avl_find(t, dva, &where) != NULL)
127 zn = kmem_alloc(sizeof (zil_dva_node_t), KM_SLEEP);
129 avl_insert(t, zn, where);
134 static zil_header_t *
135 zil_header_in_syncing_context(zilog_t *zilog)
137 return ((zil_header_t *)zilog->zl_header);
141 zil_init_log_chain(zilog_t *zilog, blkptr_t *bp)
143 zio_cksum_t *zc = &bp->blk_cksum;
145 zc->zc_word[ZIL_ZC_GUID_0] = spa_get_random(-1ULL);
146 zc->zc_word[ZIL_ZC_GUID_1] = spa_get_random(-1ULL);
147 zc->zc_word[ZIL_ZC_OBJSET] = dmu_objset_id(zilog->zl_os);
148 zc->zc_word[ZIL_ZC_SEQ] = 1ULL;
152 * Read a log block, make sure it's valid, and byteswap it if necessary.
155 zil_read_log_block(zilog_t *zilog, const blkptr_t *bp, arc_buf_t **abufpp)
159 uint32_t aflags = ARC_WAIT;
162 zb.zb_objset = bp->blk_cksum.zc_word[ZIL_ZC_OBJSET];
165 zb.zb_blkid = bp->blk_cksum.zc_word[ZIL_ZC_SEQ];
170 * We shouldn't be doing any scrubbing while we're doing log
171 * replay, it's OK to not lock.
173 error = arc_read_nolock(NULL, zilog->zl_spa, &blk,
174 arc_getbuf_func, abufpp, ZIO_PRIORITY_SYNC_READ, ZIO_FLAG_CANFAIL |
175 ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB, &aflags, &zb);
178 char *data = (*abufpp)->b_data;
179 uint64_t blksz = BP_GET_LSIZE(bp);
180 zil_trailer_t *ztp = (zil_trailer_t *)(data + blksz) - 1;
181 zio_cksum_t cksum = bp->blk_cksum;
184 * Validate the checksummed log block.
186 * Sequence numbers should be... sequential. The checksum
187 * verifier for the next block should be bp's checksum plus 1.
189 * Also check the log chain linkage and size used.
191 cksum.zc_word[ZIL_ZC_SEQ]++;
193 if (bcmp(&cksum, &ztp->zit_next_blk.blk_cksum,
194 sizeof (cksum)) || BP_IS_HOLE(&ztp->zit_next_blk) ||
195 (ztp->zit_nused > (blksz - sizeof (zil_trailer_t)))) {
200 VERIFY(arc_buf_remove_ref(*abufpp, abufpp) == 1);
205 dprintf("error %d on %llu:%llu\n", error, zb.zb_objset, zb.zb_blkid);
211 * Parse the intent log, and call parse_func for each valid record within.
212 * Return the highest sequence number.
215 zil_parse(zilog_t *zilog, zil_parse_blk_func_t *parse_blk_func,
216 zil_parse_lr_func_t *parse_lr_func, void *arg, uint64_t txg)
218 const zil_header_t *zh = zilog->zl_header;
219 uint64_t claim_seq = zh->zh_claim_seq;
221 uint64_t max_seq = 0;
222 blkptr_t blk = zh->zh_log;
228 if (BP_IS_HOLE(&blk))
232 * Starting at the block pointed to by zh_log we read the log chain.
233 * For each block in the chain we strongly check that block to
234 * ensure its validity. We stop when an invalid block is found.
235 * For each block pointer in the chain we call parse_blk_func().
236 * For each record in each valid block we call parse_lr_func().
237 * If the log has been claimed, stop if we encounter a sequence
238 * number greater than the highest claimed sequence number.
240 zil_dva_tree_init(&zilog->zl_dva_tree);
242 seq = blk.blk_cksum.zc_word[ZIL_ZC_SEQ];
244 if (claim_seq != 0 && seq > claim_seq)
247 ASSERT(max_seq < seq);
250 error = zil_read_log_block(zilog, &blk, &abuf);
252 if (parse_blk_func != NULL)
253 parse_blk_func(zilog, &blk, arg, txg);
258 lrbuf = abuf->b_data;
259 ztp = (zil_trailer_t *)(lrbuf + BP_GET_LSIZE(&blk)) - 1;
260 blk = ztp->zit_next_blk;
262 if (parse_lr_func == NULL) {
263 VERIFY(arc_buf_remove_ref(abuf, &abuf) == 1);
267 for (lrp = lrbuf; lrp < lrbuf + ztp->zit_nused; lrp += reclen) {
268 lr_t *lr = (lr_t *)lrp;
269 reclen = lr->lrc_reclen;
270 ASSERT3U(reclen, >=, sizeof (lr_t));
271 parse_lr_func(zilog, lr, arg, txg);
273 VERIFY(arc_buf_remove_ref(abuf, &abuf) == 1);
275 zil_dva_tree_fini(&zilog->zl_dva_tree);
282 zil_claim_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t first_txg)
284 spa_t *spa = zilog->zl_spa;
288 * Claim log block if not already committed and not already claimed.
290 if (bp->blk_birth >= first_txg &&
291 zil_dva_tree_add(&zilog->zl_dva_tree, BP_IDENTITY(bp)) == 0) {
292 err = zio_wait(zio_claim(NULL, spa, first_txg, bp, NULL, NULL,
293 ZIO_FLAG_MUSTSUCCEED));
299 zil_claim_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t first_txg)
301 if (lrc->lrc_txtype == TX_WRITE) {
302 lr_write_t *lr = (lr_write_t *)lrc;
303 zil_claim_log_block(zilog, &lr->lr_blkptr, tx, first_txg);
309 zil_free_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t claim_txg)
311 zio_free_blk(zilog->zl_spa, bp, dmu_tx_get_txg(tx));
315 zil_free_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t claim_txg)
318 * If we previously claimed it, we need to free it.
320 if (claim_txg != 0 && lrc->lrc_txtype == TX_WRITE) {
321 lr_write_t *lr = (lr_write_t *)lrc;
322 blkptr_t *bp = &lr->lr_blkptr;
323 if (bp->blk_birth >= claim_txg &&
324 !zil_dva_tree_add(&zilog->zl_dva_tree, BP_IDENTITY(bp))) {
325 (void) arc_free(NULL, zilog->zl_spa,
326 dmu_tx_get_txg(tx), bp, NULL, NULL, ARC_WAIT);
332 * Create an on-disk intent log.
335 zil_create(zilog_t *zilog)
337 const zil_header_t *zh = zilog->zl_header;
345 * Wait for any previous destroy to complete.
347 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
349 ASSERT(zh->zh_claim_txg == 0);
350 ASSERT(zh->zh_replay_seq == 0);
355 * If we don't already have an initial log block or we have one
356 * but it's the wrong endianness then allocate one.
358 if (BP_IS_HOLE(&blk) || BP_SHOULD_BYTESWAP(&blk)) {
359 tx = dmu_tx_create(zilog->zl_os);
360 (void) dmu_tx_assign(tx, TXG_WAIT);
361 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
362 txg = dmu_tx_get_txg(tx);
364 if (!BP_IS_HOLE(&blk)) {
365 zio_free_blk(zilog->zl_spa, &blk, txg);
369 error = zio_alloc_blk(zilog->zl_spa, ZIL_MIN_BLKSZ, &blk,
373 zil_init_log_chain(zilog, &blk);
377 * Allocate a log write buffer (lwb) for the first log block.
380 lwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP);
381 lwb->lwb_zilog = zilog;
384 lwb->lwb_sz = BP_GET_LSIZE(&lwb->lwb_blk);
385 lwb->lwb_buf = zio_buf_alloc(lwb->lwb_sz);
386 lwb->lwb_max_txg = txg;
389 mutex_enter(&zilog->zl_lock);
390 list_insert_tail(&zilog->zl_lwb_list, lwb);
391 mutex_exit(&zilog->zl_lock);
395 * If we just allocated the first log block, commit our transaction
396 * and wait for zil_sync() to stuff the block poiner into zh_log.
397 * (zh is part of the MOS, so we cannot modify it in open context.)
401 txg_wait_synced(zilog->zl_dmu_pool, txg);
404 ASSERT(bcmp(&blk, &zh->zh_log, sizeof (blk)) == 0);
408 * In one tx, free all log blocks and clear the log header.
409 * If keep_first is set, then we're replaying a log with no content.
410 * We want to keep the first block, however, so that the first
411 * synchronous transaction doesn't require a txg_wait_synced()
412 * in zil_create(). We don't need to txg_wait_synced() here either
413 * when keep_first is set, because both zil_create() and zil_destroy()
414 * will wait for any in-progress destroys to complete.
417 zil_destroy(zilog_t *zilog, boolean_t keep_first)
419 const zil_header_t *zh = zilog->zl_header;
425 * Wait for any previous destroy to complete.
427 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
429 if (BP_IS_HOLE(&zh->zh_log))
432 tx = dmu_tx_create(zilog->zl_os);
433 (void) dmu_tx_assign(tx, TXG_WAIT);
434 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
435 txg = dmu_tx_get_txg(tx);
437 mutex_enter(&zilog->zl_lock);
440 * It is possible for the ZIL to get the previously mounted zilog
441 * structure of the same dataset if quickly remounted and the dbuf
442 * eviction has not completed. In this case we can see a non
443 * empty lwb list and keep_first will be set. We fix this by
444 * clearing the keep_first. This will be slower but it's very rare.
446 if (!list_is_empty(&zilog->zl_lwb_list) && keep_first)
447 keep_first = B_FALSE;
449 ASSERT3U(zilog->zl_destroy_txg, <, txg);
450 zilog->zl_destroy_txg = txg;
451 zilog->zl_keep_first = keep_first;
453 if (!list_is_empty(&zilog->zl_lwb_list)) {
454 ASSERT(zh->zh_claim_txg == 0);
456 while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) {
457 list_remove(&zilog->zl_lwb_list, lwb);
458 if (lwb->lwb_buf != NULL)
459 zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
460 zio_free_blk(zilog->zl_spa, &lwb->lwb_blk, txg);
461 kmem_cache_free(zil_lwb_cache, lwb);
465 (void) zil_parse(zilog, zil_free_log_block,
466 zil_free_log_record, tx, zh->zh_claim_txg);
469 mutex_exit(&zilog->zl_lock);
475 * return true if the initial log block is not valid
478 zil_empty(zilog_t *zilog)
480 const zil_header_t *zh = zilog->zl_header;
481 arc_buf_t *abuf = NULL;
483 if (BP_IS_HOLE(&zh->zh_log))
486 if (zil_read_log_block(zilog, &zh->zh_log, &abuf) != 0)
489 VERIFY(arc_buf_remove_ref(abuf, &abuf) == 1);
494 zil_claim(char *osname, void *txarg)
496 dmu_tx_t *tx = txarg;
497 uint64_t first_txg = dmu_tx_get_txg(tx);
503 error = dmu_objset_open(osname, DMU_OST_ANY, DS_MODE_USER, &os);
505 cmn_err(CE_WARN, "can't open objset for %s", osname);
509 zilog = dmu_objset_zil(os);
510 zh = zil_header_in_syncing_context(zilog);
512 if (zilog->zl_spa->spa_log_state == SPA_LOG_CLEAR) {
513 if (!BP_IS_HOLE(&zh->zh_log))
514 zio_free_blk(zilog->zl_spa, &zh->zh_log, first_txg);
515 BP_ZERO(&zh->zh_log);
516 dsl_dataset_dirty(dmu_objset_ds(os), tx);
520 * Record here whether the zil has any records to replay.
521 * If the header block pointer is null or the block points
522 * to the stubby then we know there are no valid log records.
523 * We use the header to store this state as the the zilog gets
524 * freed later in dmu_objset_close().
525 * The flags (and the rest of the header fields) are cleared in
526 * zil_sync() as a result of a zil_destroy(), after replaying the log.
528 * Note, the intent log can be empty but still need the
529 * stubby to be claimed.
531 if (!zil_empty(zilog)) {
532 zh->zh_flags |= ZIL_REPLAY_NEEDED;
533 dsl_dataset_dirty(dmu_objset_ds(os), tx);
537 * Claim all log blocks if we haven't already done so, and remember
538 * the highest claimed sequence number. This ensures that if we can
539 * read only part of the log now (e.g. due to a missing device),
540 * but we can read the entire log later, we will not try to replay
541 * or destroy beyond the last block we successfully claimed.
543 ASSERT3U(zh->zh_claim_txg, <=, first_txg);
544 if (zh->zh_claim_txg == 0 && !BP_IS_HOLE(&zh->zh_log)) {
545 zh->zh_claim_txg = first_txg;
546 zh->zh_claim_seq = zil_parse(zilog, zil_claim_log_block,
547 zil_claim_log_record, tx, first_txg);
548 dsl_dataset_dirty(dmu_objset_ds(os), tx);
551 ASSERT3U(first_txg, ==, (spa_last_synced_txg(zilog->zl_spa) + 1));
552 dmu_objset_close(os);
557 * Check the log by walking the log chain.
558 * Checksum errors are ok as they indicate the end of the chain.
559 * Any other error (no device or read failure) returns an error.
563 zil_check_log_chain(char *osname, void *txarg)
574 error = dmu_objset_open(osname, DMU_OST_ANY, DS_MODE_USER, &os);
576 cmn_err(CE_WARN, "can't open objset for %s", osname);
580 zilog = dmu_objset_zil(os);
581 zh = zil_header_in_syncing_context(zilog);
583 if (BP_IS_HOLE(&blk)) {
584 dmu_objset_close(os);
585 return (0); /* no chain */
589 error = zil_read_log_block(zilog, &blk, &abuf);
592 lrbuf = abuf->b_data;
593 ztp = (zil_trailer_t *)(lrbuf + BP_GET_LSIZE(&blk)) - 1;
594 blk = ztp->zit_next_blk;
595 VERIFY(arc_buf_remove_ref(abuf, &abuf) == 1);
597 dmu_objset_close(os);
599 return (0); /* normal end of chain */
604 zil_vdev_compare(const void *x1, const void *x2)
606 uint64_t v1 = ((zil_vdev_node_t *)x1)->zv_vdev;
607 uint64_t v2 = ((zil_vdev_node_t *)x2)->zv_vdev;
618 zil_add_block(zilog_t *zilog, blkptr_t *bp)
620 avl_tree_t *t = &zilog->zl_vdev_tree;
622 zil_vdev_node_t *zv, zvsearch;
623 int ndvas = BP_GET_NDVAS(bp);
626 if (zfs_nocacheflush)
629 ASSERT(zilog->zl_writer);
632 * Even though we're zl_writer, we still need a lock because the
633 * zl_get_data() callbacks may have dmu_sync() done callbacks
634 * that will run concurrently.
636 mutex_enter(&zilog->zl_vdev_lock);
637 for (i = 0; i < ndvas; i++) {
638 zvsearch.zv_vdev = DVA_GET_VDEV(&bp->blk_dva[i]);
639 if (avl_find(t, &zvsearch, &where) == NULL) {
640 zv = kmem_alloc(sizeof (*zv), KM_SLEEP);
641 zv->zv_vdev = zvsearch.zv_vdev;
642 avl_insert(t, zv, where);
645 mutex_exit(&zilog->zl_vdev_lock);
649 zil_flush_vdevs(zilog_t *zilog)
651 spa_t *spa = zilog->zl_spa;
652 avl_tree_t *t = &zilog->zl_vdev_tree;
657 ASSERT(zilog->zl_writer);
660 * We don't need zl_vdev_lock here because we're the zl_writer,
661 * and all zl_get_data() callbacks are done.
663 if (avl_numnodes(t) == 0)
666 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
668 zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
670 while ((zv = avl_destroy_nodes(t, &cookie)) != NULL) {
671 vdev_t *vd = vdev_lookup_top(spa, zv->zv_vdev);
674 kmem_free(zv, sizeof (*zv));
678 * Wait for all the flushes to complete. Not all devices actually
679 * support the DKIOCFLUSHWRITECACHE ioctl, so it's OK if it fails.
681 (void) zio_wait(zio);
683 spa_config_exit(spa, SCL_STATE, FTAG);
687 * Function called when a log block write completes
690 zil_lwb_write_done(zio_t *zio)
692 lwb_t *lwb = zio->io_private;
693 zilog_t *zilog = lwb->lwb_zilog;
695 ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF);
696 ASSERT(BP_GET_CHECKSUM(zio->io_bp) == ZIO_CHECKSUM_ZILOG);
697 ASSERT(BP_GET_TYPE(zio->io_bp) == DMU_OT_INTENT_LOG);
698 ASSERT(BP_GET_LEVEL(zio->io_bp) == 0);
699 ASSERT(BP_GET_BYTEORDER(zio->io_bp) == ZFS_HOST_BYTEORDER);
700 ASSERT(!BP_IS_GANG(zio->io_bp));
701 ASSERT(!BP_IS_HOLE(zio->io_bp));
702 ASSERT(zio->io_bp->blk_fill == 0);
705 * Ensure the lwb buffer pointer is cleared before releasing
706 * the txg. If we have had an allocation failure and
707 * the txg is waiting to sync then we want want zil_sync()
708 * to remove the lwb so that it's not picked up as the next new
709 * one in zil_commit_writer(). zil_sync() will only remove
710 * the lwb if lwb_buf is null.
712 zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
713 mutex_enter(&zilog->zl_lock);
716 zilog->zl_log_error = B_TRUE;
719 * Now that we've written this log block, we have a stable pointer
720 * to the next block in the chain, so it's OK to let the txg in
721 * which we allocated the next block sync. We still have the
722 * zl_lock to ensure zil_sync doesn't kmem free the lwb.
724 txg_rele_to_sync(&lwb->lwb_txgh);
725 mutex_exit(&zilog->zl_lock);
729 * Initialize the io for a log block.
732 zil_lwb_write_init(zilog_t *zilog, lwb_t *lwb)
736 zb.zb_objset = lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_OBJSET];
739 zb.zb_blkid = lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_SEQ];
741 if (zilog->zl_root_zio == NULL) {
742 zilog->zl_root_zio = zio_root(zilog->zl_spa, NULL, NULL,
745 if (lwb->lwb_zio == NULL) {
746 lwb->lwb_zio = zio_rewrite(zilog->zl_root_zio, zilog->zl_spa,
747 0, &lwb->lwb_blk, lwb->lwb_buf, lwb->lwb_sz,
748 zil_lwb_write_done, lwb, ZIO_PRIORITY_LOG_WRITE,
749 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, &zb);
754 * Start a log block write and advance to the next log block.
755 * Calls are serialized.
758 zil_lwb_write_start(zilog_t *zilog, lwb_t *lwb)
761 zil_trailer_t *ztp = (zil_trailer_t *)(lwb->lwb_buf + lwb->lwb_sz) - 1;
762 spa_t *spa = zilog->zl_spa;
763 blkptr_t *bp = &ztp->zit_next_blk;
768 ASSERT(lwb->lwb_nused <= ZIL_BLK_DATA_SZ(lwb));
771 * Allocate the next block and save its address in this block
772 * before writing it in order to establish the log chain.
773 * Note that if the allocation of nlwb synced before we wrote
774 * the block that points at it (lwb), we'd leak it if we crashed.
775 * Therefore, we don't do txg_rele_to_sync() until zil_lwb_write_done().
777 txg = txg_hold_open(zilog->zl_dmu_pool, &lwb->lwb_txgh);
778 txg_rele_to_quiesce(&lwb->lwb_txgh);
781 * Pick a ZIL blocksize. We request a size that is the
782 * maximum of the previous used size, the current used size and
783 * the amount waiting in the queue.
785 zil_blksz = MAX(zilog->zl_prev_used,
786 zilog->zl_cur_used + sizeof (*ztp));
787 zil_blksz = MAX(zil_blksz, zilog->zl_itx_list_sz + sizeof (*ztp));
788 zil_blksz = P2ROUNDUP_TYPED(zil_blksz, ZIL_MIN_BLKSZ, uint64_t);
789 if (zil_blksz > ZIL_MAX_BLKSZ)
790 zil_blksz = ZIL_MAX_BLKSZ;
793 /* pass the old blkptr in order to spread log blocks across devs */
794 error = zio_alloc_blk(spa, zil_blksz, bp, &lwb->lwb_blk, txg);
796 dmu_tx_t *tx = dmu_tx_create_assigned(zilog->zl_dmu_pool, txg);
799 * We dirty the dataset to ensure that zil_sync() will
800 * be called to remove this lwb from our zl_lwb_list.
801 * Failing to do so, may leave an lwb with a NULL lwb_buf
802 * hanging around on the zl_lwb_list.
804 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
808 * Since we've just experienced an allocation failure so we
809 * terminate the current lwb and send it on its way.
812 ztp->zit_nused = lwb->lwb_nused;
813 ztp->zit_bt.zbt_cksum = lwb->lwb_blk.blk_cksum;
814 zio_nowait(lwb->lwb_zio);
817 * By returning NULL the caller will call tx_wait_synced()
822 ASSERT3U(bp->blk_birth, ==, txg);
824 ztp->zit_nused = lwb->lwb_nused;
825 ztp->zit_bt.zbt_cksum = lwb->lwb_blk.blk_cksum;
826 bp->blk_cksum = lwb->lwb_blk.blk_cksum;
827 bp->blk_cksum.zc_word[ZIL_ZC_SEQ]++;
830 * Allocate a new log write buffer (lwb).
832 nlwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP);
834 nlwb->lwb_zilog = zilog;
837 nlwb->lwb_sz = BP_GET_LSIZE(&nlwb->lwb_blk);
838 nlwb->lwb_buf = zio_buf_alloc(nlwb->lwb_sz);
839 nlwb->lwb_max_txg = txg;
840 nlwb->lwb_zio = NULL;
843 * Put new lwb at the end of the log chain
845 mutex_enter(&zilog->zl_lock);
846 list_insert_tail(&zilog->zl_lwb_list, nlwb);
847 mutex_exit(&zilog->zl_lock);
849 /* Record the block for later vdev flushing */
850 zil_add_block(zilog, &lwb->lwb_blk);
853 * kick off the write for the old log block
855 dprintf_bp(&lwb->lwb_blk, "lwb %p txg %llu: ", lwb, txg);
856 ASSERT(lwb->lwb_zio);
857 zio_nowait(lwb->lwb_zio);
863 zil_lwb_commit(zilog_t *zilog, itx_t *itx, lwb_t *lwb)
865 lr_t *lrc = &itx->itx_lr; /* common log record */
866 lr_write_t *lr = (lr_write_t *)lrc;
867 uint64_t txg = lrc->lrc_txg;
868 uint64_t reclen = lrc->lrc_reclen;
873 ASSERT(lwb->lwb_buf != NULL);
875 if (lrc->lrc_txtype == TX_WRITE && itx->itx_wr_state == WR_NEED_COPY)
876 dlen = P2ROUNDUP_TYPED(
877 lr->lr_length, sizeof (uint64_t), uint64_t);
881 zilog->zl_cur_used += (reclen + dlen);
883 zil_lwb_write_init(zilog, lwb);
886 * If this record won't fit in the current log block, start a new one.
888 if (lwb->lwb_nused + reclen + dlen > ZIL_BLK_DATA_SZ(lwb)) {
889 lwb = zil_lwb_write_start(zilog, lwb);
892 zil_lwb_write_init(zilog, lwb);
893 ASSERT(lwb->lwb_nused == 0);
894 if (reclen + dlen > ZIL_BLK_DATA_SZ(lwb)) {
895 txg_wait_synced(zilog->zl_dmu_pool, txg);
901 * Update the lrc_seq, to be log record sequence number. See zil.h
902 * Then copy the record to the log buffer.
904 lrc->lrc_seq = ++zilog->zl_lr_seq; /* we are single threaded */
905 bcopy(lrc, lwb->lwb_buf + lwb->lwb_nused, reclen);
908 * If it's a write, fetch the data or get its blkptr as appropriate.
910 if (lrc->lrc_txtype == TX_WRITE) {
911 if (txg > spa_freeze_txg(zilog->zl_spa))
912 txg_wait_synced(zilog->zl_dmu_pool, txg);
913 if (itx->itx_wr_state != WR_COPIED) {
917 /* alignment is guaranteed */
918 lr = (lr_write_t *)(lwb->lwb_buf + lwb->lwb_nused);
920 ASSERT(itx->itx_wr_state == WR_NEED_COPY);
921 dbuf = lwb->lwb_buf + lwb->lwb_nused + reclen;
922 lr->lr_common.lrc_reclen += dlen;
924 ASSERT(itx->itx_wr_state == WR_INDIRECT);
927 error = zilog->zl_get_data(
928 itx->itx_private, lr, dbuf, lwb->lwb_zio);
930 txg_wait_synced(zilog->zl_dmu_pool, txg);
934 ASSERT(error == ENOENT || error == EEXIST ||
941 lwb->lwb_nused += reclen + dlen;
942 lwb->lwb_max_txg = MAX(lwb->lwb_max_txg, txg);
943 ASSERT3U(lwb->lwb_nused, <=, ZIL_BLK_DATA_SZ(lwb));
944 ASSERT3U(P2PHASE(lwb->lwb_nused, sizeof (uint64_t)), ==, 0);
950 zil_itx_create(uint64_t txtype, size_t lrsize)
954 lrsize = P2ROUNDUP_TYPED(lrsize, sizeof (uint64_t), size_t);
956 itx = kmem_alloc(offsetof(itx_t, itx_lr) + lrsize, KM_SLEEP);
957 itx->itx_lr.lrc_txtype = txtype;
958 itx->itx_lr.lrc_reclen = lrsize;
959 itx->itx_sod = lrsize; /* if write & WR_NEED_COPY will be increased */
960 itx->itx_lr.lrc_seq = 0; /* defensive */
966 zil_itx_assign(zilog_t *zilog, itx_t *itx, dmu_tx_t *tx)
970 ASSERT(itx->itx_lr.lrc_seq == 0);
972 mutex_enter(&zilog->zl_lock);
973 list_insert_tail(&zilog->zl_itx_list, itx);
974 zilog->zl_itx_list_sz += itx->itx_sod;
975 itx->itx_lr.lrc_txg = dmu_tx_get_txg(tx);
976 itx->itx_lr.lrc_seq = seq = ++zilog->zl_itx_seq;
977 mutex_exit(&zilog->zl_lock);
983 * Free up all in-memory intent log transactions that have now been synced.
986 zil_itx_clean(zilog_t *zilog)
988 uint64_t synced_txg = spa_last_synced_txg(zilog->zl_spa);
989 uint64_t freeze_txg = spa_freeze_txg(zilog->zl_spa);
993 list_create(&clean_list, sizeof (itx_t), offsetof(itx_t, itx_node));
995 mutex_enter(&zilog->zl_lock);
996 /* wait for a log writer to finish walking list */
997 while (zilog->zl_writer) {
998 cv_wait(&zilog->zl_cv_writer, &zilog->zl_lock);
1002 * Move the sync'd log transactions to a separate list so we can call
1003 * kmem_free without holding the zl_lock.
1005 * There is no need to set zl_writer as we don't drop zl_lock here
1007 while ((itx = list_head(&zilog->zl_itx_list)) != NULL &&
1008 itx->itx_lr.lrc_txg <= MIN(synced_txg, freeze_txg)) {
1009 list_remove(&zilog->zl_itx_list, itx);
1010 zilog->zl_itx_list_sz -= itx->itx_sod;
1011 list_insert_tail(&clean_list, itx);
1013 cv_broadcast(&zilog->zl_cv_writer);
1014 mutex_exit(&zilog->zl_lock);
1016 /* destroy sync'd log transactions */
1017 while ((itx = list_head(&clean_list)) != NULL) {
1018 list_remove(&clean_list, itx);
1019 kmem_free(itx, offsetof(itx_t, itx_lr)
1020 + itx->itx_lr.lrc_reclen);
1022 list_destroy(&clean_list);
1026 * If there are any in-memory intent log transactions which have now been
1027 * synced then start up a taskq to free them.
1030 zil_clean(zilog_t *zilog)
1034 mutex_enter(&zilog->zl_lock);
1035 itx = list_head(&zilog->zl_itx_list);
1036 if ((itx != NULL) &&
1037 (itx->itx_lr.lrc_txg <= spa_last_synced_txg(zilog->zl_spa))) {
1038 (void) taskq_dispatch(zilog->zl_clean_taskq,
1039 (task_func_t *)zil_itx_clean, zilog, TQ_SLEEP);
1041 mutex_exit(&zilog->zl_lock);
1045 zil_commit_writer(zilog_t *zilog, uint64_t seq, uint64_t foid)
1048 uint64_t commit_seq = 0;
1049 itx_t *itx, *itx_next = (itx_t *)-1;
1053 zilog->zl_writer = B_TRUE;
1054 ASSERT(zilog->zl_root_zio == NULL);
1055 spa = zilog->zl_spa;
1057 if (zilog->zl_suspend) {
1060 lwb = list_tail(&zilog->zl_lwb_list);
1063 * Return if there's nothing to flush before we
1064 * dirty the fs by calling zil_create()
1066 if (list_is_empty(&zilog->zl_itx_list)) {
1067 zilog->zl_writer = B_FALSE;
1070 mutex_exit(&zilog->zl_lock);
1072 mutex_enter(&zilog->zl_lock);
1073 lwb = list_tail(&zilog->zl_lwb_list);
1077 /* Loop through in-memory log transactions filling log blocks. */
1078 DTRACE_PROBE1(zil__cw1, zilog_t *, zilog);
1081 * Find the next itx to push:
1082 * Push all transactions related to specified foid and all
1083 * other transactions except TX_WRITE, TX_TRUNCATE,
1084 * TX_SETATTR and TX_ACL for all other files.
1086 if (itx_next != (itx_t *)-1)
1089 itx = list_head(&zilog->zl_itx_list);
1090 for (; itx != NULL; itx = list_next(&zilog->zl_itx_list, itx)) {
1091 if (foid == 0) /* push all foids? */
1093 if (itx->itx_sync) /* push all O_[D]SYNC */
1095 switch (itx->itx_lr.lrc_txtype) {
1100 /* lr_foid is same offset for these records */
1101 if (((lr_write_t *)&itx->itx_lr)->lr_foid
1103 continue; /* skip this record */
1111 if ((itx->itx_lr.lrc_seq > seq) &&
1112 ((lwb == NULL) || (lwb->lwb_nused == 0) ||
1113 (lwb->lwb_nused + itx->itx_sod > ZIL_BLK_DATA_SZ(lwb)))) {
1118 * Save the next pointer. Even though we soon drop
1119 * zl_lock all threads that may change the list
1120 * (another writer or zil_itx_clean) can't do so until
1121 * they have zl_writer.
1123 itx_next = list_next(&zilog->zl_itx_list, itx);
1124 list_remove(&zilog->zl_itx_list, itx);
1125 zilog->zl_itx_list_sz -= itx->itx_sod;
1126 mutex_exit(&zilog->zl_lock);
1127 txg = itx->itx_lr.lrc_txg;
1130 if (txg > spa_last_synced_txg(spa) ||
1131 txg > spa_freeze_txg(spa))
1132 lwb = zil_lwb_commit(zilog, itx, lwb);
1133 kmem_free(itx, offsetof(itx_t, itx_lr)
1134 + itx->itx_lr.lrc_reclen);
1135 mutex_enter(&zilog->zl_lock);
1137 DTRACE_PROBE1(zil__cw2, zilog_t *, zilog);
1138 /* determine commit sequence number */
1139 itx = list_head(&zilog->zl_itx_list);
1141 commit_seq = itx->itx_lr.lrc_seq;
1143 commit_seq = zilog->zl_itx_seq;
1144 mutex_exit(&zilog->zl_lock);
1146 /* write the last block out */
1147 if (lwb != NULL && lwb->lwb_zio != NULL)
1148 lwb = zil_lwb_write_start(zilog, lwb);
1150 zilog->zl_prev_used = zilog->zl_cur_used;
1151 zilog->zl_cur_used = 0;
1154 * Wait if necessary for the log blocks to be on stable storage.
1156 if (zilog->zl_root_zio) {
1157 DTRACE_PROBE1(zil__cw3, zilog_t *, zilog);
1158 (void) zio_wait(zilog->zl_root_zio);
1159 zilog->zl_root_zio = NULL;
1160 DTRACE_PROBE1(zil__cw4, zilog_t *, zilog);
1161 zil_flush_vdevs(zilog);
1164 if (zilog->zl_log_error || lwb == NULL) {
1165 zilog->zl_log_error = 0;
1166 txg_wait_synced(zilog->zl_dmu_pool, 0);
1169 mutex_enter(&zilog->zl_lock);
1170 zilog->zl_writer = B_FALSE;
1172 ASSERT3U(commit_seq, >=, zilog->zl_commit_seq);
1173 zilog->zl_commit_seq = commit_seq;
1177 * Push zfs transactions to stable storage up to the supplied sequence number.
1178 * If foid is 0 push out all transactions, otherwise push only those
1179 * for that file or might have been used to create that file.
1182 zil_commit(zilog_t *zilog, uint64_t seq, uint64_t foid)
1184 if (zilog == NULL || seq == 0)
1187 mutex_enter(&zilog->zl_lock);
1189 seq = MIN(seq, zilog->zl_itx_seq); /* cap seq at largest itx seq */
1191 while (zilog->zl_writer) {
1192 cv_wait(&zilog->zl_cv_writer, &zilog->zl_lock);
1193 if (seq < zilog->zl_commit_seq) {
1194 mutex_exit(&zilog->zl_lock);
1198 zil_commit_writer(zilog, seq, foid); /* drops zl_lock */
1199 /* wake up others waiting on the commit */
1200 cv_broadcast(&zilog->zl_cv_writer);
1201 mutex_exit(&zilog->zl_lock);
1205 * Called in syncing context to free committed log blocks and update log header.
1208 zil_sync(zilog_t *zilog, dmu_tx_t *tx)
1210 zil_header_t *zh = zil_header_in_syncing_context(zilog);
1211 uint64_t txg = dmu_tx_get_txg(tx);
1212 spa_t *spa = zilog->zl_spa;
1216 * We don't zero out zl_destroy_txg, so make sure we don't try
1217 * to destroy it twice.
1219 if (spa_sync_pass(spa) != 1)
1222 mutex_enter(&zilog->zl_lock);
1224 ASSERT(zilog->zl_stop_sync == 0);
1226 zh->zh_replay_seq = zilog->zl_replayed_seq[txg & TXG_MASK];
1228 if (zilog->zl_destroy_txg == txg) {
1229 blkptr_t blk = zh->zh_log;
1231 ASSERT(list_head(&zilog->zl_lwb_list) == NULL);
1233 bzero(zh, sizeof (zil_header_t));
1234 bzero(zilog->zl_replayed_seq, sizeof (zilog->zl_replayed_seq));
1236 if (zilog->zl_keep_first) {
1238 * If this block was part of log chain that couldn't
1239 * be claimed because a device was missing during
1240 * zil_claim(), but that device later returns,
1241 * then this block could erroneously appear valid.
1242 * To guard against this, assign a new GUID to the new
1243 * log chain so it doesn't matter what blk points to.
1245 zil_init_log_chain(zilog, &blk);
1250 while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) {
1251 zh->zh_log = lwb->lwb_blk;
1252 if (lwb->lwb_buf != NULL || lwb->lwb_max_txg > txg)
1254 list_remove(&zilog->zl_lwb_list, lwb);
1255 zio_free_blk(spa, &lwb->lwb_blk, txg);
1256 kmem_cache_free(zil_lwb_cache, lwb);
1259 * If we don't have anything left in the lwb list then
1260 * we've had an allocation failure and we need to zero
1261 * out the zil_header blkptr so that we don't end
1262 * up freeing the same block twice.
1264 if (list_head(&zilog->zl_lwb_list) == NULL)
1265 BP_ZERO(&zh->zh_log);
1267 mutex_exit(&zilog->zl_lock);
1273 zil_lwb_cache = kmem_cache_create("zil_lwb_cache",
1274 sizeof (struct lwb), 0, NULL, NULL, NULL, NULL, NULL, 0);
1280 kmem_cache_destroy(zil_lwb_cache);
1284 zil_alloc(objset_t *os, zil_header_t *zh_phys)
1288 zilog = kmem_zalloc(sizeof (zilog_t), KM_SLEEP);
1290 zilog->zl_header = zh_phys;
1292 zilog->zl_spa = dmu_objset_spa(os);
1293 zilog->zl_dmu_pool = dmu_objset_pool(os);
1294 zilog->zl_destroy_txg = TXG_INITIAL - 1;
1296 mutex_init(&zilog->zl_lock, NULL, MUTEX_DEFAULT, NULL);
1298 list_create(&zilog->zl_itx_list, sizeof (itx_t),
1299 offsetof(itx_t, itx_node));
1301 list_create(&zilog->zl_lwb_list, sizeof (lwb_t),
1302 offsetof(lwb_t, lwb_node));
1304 mutex_init(&zilog->zl_vdev_lock, NULL, MUTEX_DEFAULT, NULL);
1306 avl_create(&zilog->zl_vdev_tree, zil_vdev_compare,
1307 sizeof (zil_vdev_node_t), offsetof(zil_vdev_node_t, zv_node));
1309 cv_init(&zilog->zl_cv_writer, NULL, CV_DEFAULT, NULL);
1310 cv_init(&zilog->zl_cv_suspend, NULL, CV_DEFAULT, NULL);
1316 zil_free(zilog_t *zilog)
1320 zilog->zl_stop_sync = 1;
1322 while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) {
1323 list_remove(&zilog->zl_lwb_list, lwb);
1324 if (lwb->lwb_buf != NULL)
1325 zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
1326 kmem_cache_free(zil_lwb_cache, lwb);
1328 list_destroy(&zilog->zl_lwb_list);
1330 avl_destroy(&zilog->zl_vdev_tree);
1331 mutex_destroy(&zilog->zl_vdev_lock);
1333 ASSERT(list_head(&zilog->zl_itx_list) == NULL);
1334 list_destroy(&zilog->zl_itx_list);
1335 mutex_destroy(&zilog->zl_lock);
1337 cv_destroy(&zilog->zl_cv_writer);
1338 cv_destroy(&zilog->zl_cv_suspend);
1340 kmem_free(zilog, sizeof (zilog_t));
1344 * Open an intent log.
1347 zil_open(objset_t *os, zil_get_data_t *get_data)
1349 zilog_t *zilog = dmu_objset_zil(os);
1351 zilog->zl_get_data = get_data;
1352 zilog->zl_clean_taskq = taskq_create("zil_clean", 1, minclsyspri,
1353 2, 2, TASKQ_PREPOPULATE);
1359 * Close an intent log.
1362 zil_close(zilog_t *zilog)
1365 * If the log isn't already committed, mark the objset dirty
1366 * (so zil_sync() will be called) and wait for that txg to sync.
1368 if (!zil_is_committed(zilog)) {
1370 dmu_tx_t *tx = dmu_tx_create(zilog->zl_os);
1371 (void) dmu_tx_assign(tx, TXG_WAIT);
1372 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
1373 txg = dmu_tx_get_txg(tx);
1375 txg_wait_synced(zilog->zl_dmu_pool, txg);
1378 taskq_destroy(zilog->zl_clean_taskq);
1379 zilog->zl_clean_taskq = NULL;
1380 zilog->zl_get_data = NULL;
1382 zil_itx_clean(zilog);
1383 ASSERT(list_head(&zilog->zl_itx_list) == NULL);
1387 * Suspend an intent log. While in suspended mode, we still honor
1388 * synchronous semantics, but we rely on txg_wait_synced() to do it.
1389 * We suspend the log briefly when taking a snapshot so that the snapshot
1390 * contains all the data it's supposed to, and has an empty intent log.
1393 zil_suspend(zilog_t *zilog)
1395 const zil_header_t *zh = zilog->zl_header;
1397 mutex_enter(&zilog->zl_lock);
1398 if (zh->zh_flags & ZIL_REPLAY_NEEDED) { /* unplayed log */
1399 mutex_exit(&zilog->zl_lock);
1402 if (zilog->zl_suspend++ != 0) {
1404 * Someone else already began a suspend.
1405 * Just wait for them to finish.
1407 while (zilog->zl_suspending)
1408 cv_wait(&zilog->zl_cv_suspend, &zilog->zl_lock);
1409 mutex_exit(&zilog->zl_lock);
1412 zilog->zl_suspending = B_TRUE;
1413 mutex_exit(&zilog->zl_lock);
1415 zil_commit(zilog, UINT64_MAX, 0);
1418 * Wait for any in-flight log writes to complete.
1420 mutex_enter(&zilog->zl_lock);
1421 while (zilog->zl_writer)
1422 cv_wait(&zilog->zl_cv_writer, &zilog->zl_lock);
1423 mutex_exit(&zilog->zl_lock);
1425 zil_destroy(zilog, B_FALSE);
1427 mutex_enter(&zilog->zl_lock);
1428 zilog->zl_suspending = B_FALSE;
1429 cv_broadcast(&zilog->zl_cv_suspend);
1430 mutex_exit(&zilog->zl_lock);
1436 zil_resume(zilog_t *zilog)
1438 mutex_enter(&zilog->zl_lock);
1439 ASSERT(zilog->zl_suspend != 0);
1440 zilog->zl_suspend--;
1441 mutex_exit(&zilog->zl_lock);
1444 typedef struct zil_replay_arg {
1446 zil_replay_func_t **zr_replay;
1448 boolean_t zr_byteswap;
1453 zil_replay_log_record(zilog_t *zilog, lr_t *lr, void *zra, uint64_t claim_txg)
1455 zil_replay_arg_t *zr = zra;
1456 const zil_header_t *zh = zilog->zl_header;
1457 uint64_t reclen = lr->lrc_reclen;
1458 uint64_t txtype = lr->lrc_txtype;
1462 if (!zilog->zl_replay) /* giving up */
1465 if (lr->lrc_txg < claim_txg) /* already committed */
1468 if (lr->lrc_seq <= zh->zh_replay_seq) /* already replayed */
1471 /* Strip case-insensitive bit, still present in log record */
1474 if (txtype == 0 || txtype >= TX_MAX_TYPE) {
1480 * Make a copy of the data so we can revise and extend it.
1482 bcopy(lr, zr->zr_lrbuf, reclen);
1485 * The log block containing this lr may have been byteswapped
1486 * so that we can easily examine common fields like lrc_txtype.
1487 * However, the log is a mix of different data types, and only the
1488 * replay vectors know how to byteswap their records. Therefore, if
1489 * the lr was byteswapped, undo it before invoking the replay vector.
1491 if (zr->zr_byteswap)
1492 byteswap_uint64_array(zr->zr_lrbuf, reclen);
1495 * If this is a TX_WRITE with a blkptr, suck in the data.
1497 if (txtype == TX_WRITE && reclen == sizeof (lr_write_t)) {
1498 lr_write_t *lrw = (lr_write_t *)lr;
1499 blkptr_t *wbp = &lrw->lr_blkptr;
1500 uint64_t wlen = lrw->lr_length;
1501 char *wbuf = zr->zr_lrbuf + reclen;
1503 if (BP_IS_HOLE(wbp)) { /* compressed to a hole */
1507 * A subsequent write may have overwritten this block,
1508 * in which case wbp may have been been freed and
1509 * reallocated, and our read of wbp may fail with a
1510 * checksum error. We can safely ignore this because
1511 * the later write will provide the correct data.
1515 zb.zb_objset = dmu_objset_id(zilog->zl_os);
1516 zb.zb_object = lrw->lr_foid;
1518 zb.zb_blkid = lrw->lr_offset / BP_GET_LSIZE(wbp);
1520 (void) zio_wait(zio_read(NULL, zilog->zl_spa,
1521 wbp, wbuf, BP_GET_LSIZE(wbp), NULL, NULL,
1522 ZIO_PRIORITY_SYNC_READ,
1523 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, &zb));
1524 (void) memmove(wbuf, wbuf + lrw->lr_blkoff, wlen);
1529 * We must now do two things atomically: replay this log record,
1530 * and update the log header sequence number to reflect the fact that
1531 * we did so. At the end of each replay function the sequence number
1532 * is updated if we are in replay mode.
1534 for (pass = 1; pass <= 2; pass++) {
1535 zilog->zl_replaying_seq = lr->lrc_seq;
1536 /* Only byteswap (if needed) on the 1st pass. */
1537 error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lrbuf,
1538 zr->zr_byteswap && pass == 1);
1544 * The DMU's dnode layer doesn't see removes until the txg
1545 * commits, so a subsequent claim can spuriously fail with
1546 * EEXIST. So if we receive any error we try syncing out
1547 * any removes then retry the transaction.
1550 txg_wait_synced(spa_get_dsl(zilog->zl_spa), 0);
1555 name = kmem_alloc(MAXNAMELEN, KM_SLEEP);
1556 dmu_objset_name(zr->zr_os, name);
1557 cmn_err(CE_WARN, "ZFS replay transaction error %d, "
1558 "dataset %s, seq 0x%llx, txtype %llu %s\n",
1559 error, name, (u_longlong_t)lr->lrc_seq, (u_longlong_t)txtype,
1560 (lr->lrc_txtype & TX_CI) ? "CI" : "");
1561 zilog->zl_replay = B_FALSE;
1562 kmem_free(name, MAXNAMELEN);
1567 zil_incr_blks(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t claim_txg)
1569 zilog->zl_replay_blks++;
1573 * If this dataset has a non-empty intent log, replay it and destroy it.
1576 zil_replay(objset_t *os, void *arg, zil_replay_func_t *replay_func[TX_MAX_TYPE])
1578 zilog_t *zilog = dmu_objset_zil(os);
1579 const zil_header_t *zh = zilog->zl_header;
1580 zil_replay_arg_t zr;
1582 if ((zh->zh_flags & ZIL_REPLAY_NEEDED) == 0) {
1583 zil_destroy(zilog, B_TRUE);
1588 zr.zr_replay = replay_func;
1590 zr.zr_byteswap = BP_SHOULD_BYTESWAP(&zh->zh_log);
1591 zr.zr_lrbuf = kmem_alloc(2 * SPA_MAXBLOCKSIZE, KM_SLEEP);
1594 * Wait for in-progress removes to sync before starting replay.
1596 txg_wait_synced(zilog->zl_dmu_pool, 0);
1598 zilog->zl_replay = B_TRUE;
1599 zilog->zl_replay_time = lbolt;
1600 ASSERT(zilog->zl_replay_blks == 0);
1601 (void) zil_parse(zilog, zil_incr_blks, zil_replay_log_record, &zr,
1603 kmem_free(zr.zr_lrbuf, 2 * SPA_MAXBLOCKSIZE);
1605 zil_destroy(zilog, B_FALSE);
1606 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
1607 zilog->zl_replay = B_FALSE;
1611 * Report whether all transactions are committed
1614 zil_is_committed(zilog_t *zilog)
1619 mutex_enter(&zilog->zl_lock);
1620 while (zilog->zl_writer)
1621 cv_wait(&zilog->zl_cv_writer, &zilog->zl_lock);
1623 /* recent unpushed intent log transactions? */
1624 if (!list_is_empty(&zilog->zl_itx_list)) {
1629 /* intent log never used? */
1630 lwb = list_head(&zilog->zl_lwb_list);
1637 * more than 1 log buffer means zil_sync() hasn't yet freed
1638 * entries after a txg has committed
1640 if (list_next(&zilog->zl_lwb_list, lwb)) {
1645 ASSERT(zil_empty(zilog));
1648 cv_broadcast(&zilog->zl_cv_writer);
1649 mutex_exit(&zilog->zl_lock);
1655 zil_vdev_offline(char *osname, void *arg)
1661 error = dmu_objset_open(osname, DMU_OST_ANY, DS_MODE_USER, &os);
1665 zilog = dmu_objset_zil(os);
1666 if (zil_suspend(zilog) != 0)
1670 dmu_objset_close(os);