4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 #pragma ident "@(#)zio.c 1.32 08/03/20 SMI"
28 #include <sys/zfs_context.h>
29 #include <sys/fm/fs/zfs.h>
32 #include <sys/spa_impl.h>
33 #include <sys/vdev_impl.h>
34 #include <sys/zio_impl.h>
35 #include <sys/zio_compress.h>
36 #include <sys/zio_checksum.h>
39 * ==========================================================================
41 * ==========================================================================
43 uint8_t zio_priority_table[ZIO_PRIORITY_TABLE_SIZE] = {
44 0, /* ZIO_PRIORITY_NOW */
45 0, /* ZIO_PRIORITY_SYNC_READ */
46 0, /* ZIO_PRIORITY_SYNC_WRITE */
47 6, /* ZIO_PRIORITY_ASYNC_READ */
48 4, /* ZIO_PRIORITY_ASYNC_WRITE */
49 4, /* ZIO_PRIORITY_FREE */
50 0, /* ZIO_PRIORITY_CACHE_FILL */
51 0, /* ZIO_PRIORITY_LOG_WRITE */
52 10, /* ZIO_PRIORITY_RESILVER */
53 20, /* ZIO_PRIORITY_SCRUB */
57 * ==========================================================================
58 * I/O type descriptions
59 * ==========================================================================
61 char *zio_type_name[ZIO_TYPES] = {
62 "null", "read", "write", "free", "claim", "ioctl" };
64 /* Force an allocation failure when non-zero */
65 uint16_t zio_zil_fail_shift = 0;
66 uint16_t zio_io_fail_shift = 0;
68 /* Enable/disable the write-retry logic */
69 int zio_write_retry = 1;
71 /* Taskq to handle reissuing of I/Os */
73 int zio_resume_threads = 4;
75 typedef struct zio_sync_pass {
76 int zp_defer_free; /* defer frees after this pass */
77 int zp_dontcompress; /* don't compress after this pass */
78 int zp_rewrite; /* rewrite new bps after this pass */
81 zio_sync_pass_t zio_sync_pass = {
82 1, /* zp_defer_free */
83 4, /* zp_dontcompress */
87 static boolean_t zio_io_should_fail(uint16_t);
90 * ==========================================================================
92 * ==========================================================================
94 kmem_cache_t *zio_cache;
95 kmem_cache_t *zio_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
96 kmem_cache_t *zio_data_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
99 extern vmem_t *zio_alloc_arena;
103 * Determine if we are allowed to issue the IO based on the
104 * pool state. If we must wait then block until we are told
105 * that we may continue.
107 #define ZIO_ENTER(spa) { \
108 if (spa->spa_state == POOL_STATE_IO_FAILURE) { \
109 mutex_enter(&spa->spa_zio_lock); \
110 while (spa->spa_state == POOL_STATE_IO_FAILURE) \
111 cv_wait(&spa->spa_zio_cv, &spa->spa_zio_lock); \
112 mutex_exit(&spa->spa_zio_lock); \
117 * An allocation zio is one that either currently has the DVA allocate
118 * stage set or will have it later in it's lifetime.
120 #define IO_IS_ALLOCATING(zio) \
121 ((zio)->io_orig_pipeline & (1U << ZIO_STAGE_DVA_ALLOCATE))
127 vmem_t *data_alloc_arena = NULL;
130 data_alloc_arena = zio_alloc_arena;
133 zio_cache = kmem_cache_create("zio_cache", sizeof (zio_t), 0,
134 NULL, NULL, NULL, NULL, NULL, 0);
137 * For small buffers, we want a cache for each multiple of
138 * SPA_MINBLOCKSIZE. For medium-size buffers, we want a cache
139 * for each quarter-power of 2. For large buffers, we want
140 * a cache for each multiple of PAGESIZE.
142 for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) {
143 size_t size = (c + 1) << SPA_MINBLOCKSHIFT;
147 while (p2 & (p2 - 1))
150 if (size <= 4 * SPA_MINBLOCKSIZE) {
151 align = SPA_MINBLOCKSIZE;
152 } else if (P2PHASE(size, PAGESIZE) == 0) {
154 } else if (P2PHASE(size, p2 >> 2) == 0) {
160 (void) sprintf(name, "zio_buf_%lu", (ulong_t)size);
161 zio_buf_cache[c] = kmem_cache_create(name, size,
162 align, NULL, NULL, NULL, NULL, NULL, KMC_NODEBUG);
164 (void) sprintf(name, "zio_data_buf_%lu", (ulong_t)size);
165 zio_data_buf_cache[c] = kmem_cache_create(name, size,
166 align, NULL, NULL, NULL, NULL, data_alloc_arena,
173 ASSERT(zio_buf_cache[c] != NULL);
174 if (zio_buf_cache[c - 1] == NULL)
175 zio_buf_cache[c - 1] = zio_buf_cache[c];
177 ASSERT(zio_data_buf_cache[c] != NULL);
178 if (zio_data_buf_cache[c - 1] == NULL)
179 zio_data_buf_cache[c - 1] = zio_data_buf_cache[c];
182 zio_taskq = taskq_create("zio_taskq", zio_resume_threads,
183 maxclsyspri, 50, INT_MAX, TASKQ_PREPOPULATE);
192 kmem_cache_t *last_cache = NULL;
193 kmem_cache_t *last_data_cache = NULL;
195 for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) {
196 if (zio_buf_cache[c] != last_cache) {
197 last_cache = zio_buf_cache[c];
198 kmem_cache_destroy(zio_buf_cache[c]);
200 zio_buf_cache[c] = NULL;
202 if (zio_data_buf_cache[c] != last_data_cache) {
203 last_data_cache = zio_data_buf_cache[c];
204 kmem_cache_destroy(zio_data_buf_cache[c]);
206 zio_data_buf_cache[c] = NULL;
209 taskq_destroy(zio_taskq);
211 kmem_cache_destroy(zio_cache);
217 * ==========================================================================
218 * Allocate and free I/O buffers
219 * ==========================================================================
223 * Use zio_buf_alloc to allocate ZFS metadata. This data will appear in a
224 * crashdump if the kernel panics, so use it judiciously. Obviously, it's
225 * useful to inspect ZFS metadata, but if possible, we should avoid keeping
226 * excess / transient data in-core during a crashdump.
229 zio_buf_alloc(size_t size)
231 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
233 ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
235 return (kmem_cache_alloc(zio_buf_cache[c], KM_PUSHPAGE));
239 * Use zio_data_buf_alloc to allocate data. The data will not appear in a
240 * crashdump if the kernel panics. This exists so that we will limit the amount
241 * of ZFS data that shows up in a kernel crashdump. (Thus reducing the amount
242 * of kernel heap dumped to disk when the kernel panics)
245 zio_data_buf_alloc(size_t size)
247 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
249 ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
251 return (kmem_cache_alloc(zio_data_buf_cache[c], KM_PUSHPAGE));
255 zio_buf_free(void *buf, size_t size)
257 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
259 ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
261 kmem_cache_free(zio_buf_cache[c], buf);
265 zio_data_buf_free(void *buf, size_t size)
267 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
269 ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
271 kmem_cache_free(zio_data_buf_cache[c], buf);
275 * ==========================================================================
276 * Push and pop I/O transform buffers
277 * ==========================================================================
280 zio_push_transform(zio_t *zio, void *data, uint64_t size, uint64_t bufsize)
282 zio_transform_t *zt = kmem_alloc(sizeof (zio_transform_t), KM_SLEEP);
286 zt->zt_bufsize = bufsize;
288 zt->zt_next = zio->io_transform_stack;
289 zio->io_transform_stack = zt;
296 zio_pop_transform(zio_t *zio, void **data, uint64_t *size, uint64_t *bufsize)
298 zio_transform_t *zt = zio->io_transform_stack;
302 *bufsize = zt->zt_bufsize;
304 zio->io_transform_stack = zt->zt_next;
305 kmem_free(zt, sizeof (zio_transform_t));
307 if ((zt = zio->io_transform_stack) != NULL) {
308 zio->io_data = zt->zt_data;
309 zio->io_size = zt->zt_size;
314 zio_clear_transform_stack(zio_t *zio)
317 uint64_t size, bufsize;
319 ASSERT(zio->io_transform_stack != NULL);
321 zio_pop_transform(zio, &data, &size, &bufsize);
322 while (zio->io_transform_stack != NULL) {
323 zio_buf_free(data, bufsize);
324 zio_pop_transform(zio, &data, &size, &bufsize);
329 * ==========================================================================
330 * Create the various types of I/O (read, write, free)
331 * ==========================================================================
334 zio_create(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp,
335 void *data, uint64_t size, zio_done_func_t *done, void *private,
336 zio_type_t type, int priority, int flags, uint8_t stage, uint32_t pipeline)
340 ASSERT3U(size, <=, SPA_MAXBLOCKSIZE);
341 ASSERT(P2PHASE(size, SPA_MINBLOCKSIZE) == 0);
343 zio = kmem_cache_alloc(zio_cache, KM_SLEEP);
344 bzero(zio, sizeof (zio_t));
345 zio->io_parent = pio;
348 zio->io_flags = flags;
351 zio->io_bp_copy = *bp;
352 zio->io_bp_orig = *bp;
355 zio->io_private = private;
357 zio->io_priority = priority;
358 zio->io_stage = stage;
359 zio->io_pipeline = pipeline;
360 zio->io_timestamp = lbolt64;
361 mutex_init(&zio->io_lock, NULL, MUTEX_DEFAULT, NULL);
362 cv_init(&zio->io_cv, NULL, CV_DEFAULT, NULL);
363 zio_push_transform(zio, data, size, size);
366 * Note on config lock:
368 * If CONFIG_HELD is set, then the caller already has the config
369 * lock, so we don't need it for this io.
371 * We set CONFIG_GRABBED to indicate that we have grabbed the
372 * config lock on behalf of this io, so it should be released
375 * Unless CONFIG_HELD is set, we will grab the config lock for
376 * any top-level (parent-less) io, *except* NULL top-level ios.
377 * The NULL top-level ios rarely have any children, so we delay
378 * grabbing the lock until the first child is added (but it is
379 * still grabbed on behalf of the top-level i/o, so additional
380 * children don't need to also grab it). This greatly reduces
381 * contention on the config lock.
384 if (type != ZIO_TYPE_NULL &&
385 !(flags & ZIO_FLAG_CONFIG_HELD)) {
386 spa_config_enter(spa, RW_READER, zio);
387 zio->io_flags |= ZIO_FLAG_CONFIG_GRABBED;
391 zio->io_root = pio->io_root;
392 if (!(flags & ZIO_FLAG_NOBOOKMARK))
393 zio->io_logical = pio->io_logical;
394 mutex_enter(&pio->io_lock);
395 if (pio->io_parent == NULL &&
396 pio->io_type == ZIO_TYPE_NULL &&
397 !(pio->io_flags & ZIO_FLAG_CONFIG_GRABBED) &&
398 !(pio->io_flags & ZIO_FLAG_CONFIG_HELD)) {
399 pio->io_flags |= ZIO_FLAG_CONFIG_GRABBED;
400 spa_config_enter(spa, RW_READER, pio);
402 if (stage < ZIO_STAGE_READY)
403 pio->io_children_notready++;
404 pio->io_children_notdone++;
405 zio->io_sibling_next = pio->io_child;
406 zio->io_sibling_prev = NULL;
407 if (pio->io_child != NULL)
408 pio->io_child->io_sibling_prev = zio;
410 zio->io_ndvas = pio->io_ndvas;
411 mutex_exit(&pio->io_lock);
415 * Save off the original state incase we need to retry later.
417 zio->io_orig_stage = zio->io_stage;
418 zio->io_orig_pipeline = zio->io_pipeline;
419 zio->io_orig_flags = zio->io_flags;
425 zio_reset(zio_t *zio)
427 zio_clear_transform_stack(zio);
429 zio->io_flags = zio->io_orig_flags;
430 zio->io_stage = zio->io_orig_stage;
431 zio->io_pipeline = zio->io_orig_pipeline;
432 zio_push_transform(zio, zio->io_data, zio->io_size, zio->io_size);
436 zio_null(zio_t *pio, spa_t *spa, zio_done_func_t *done, void *private,
441 zio = zio_create(pio, spa, 0, NULL, NULL, 0, done, private,
442 ZIO_TYPE_NULL, ZIO_PRIORITY_NOW, flags, ZIO_STAGE_OPEN,
443 ZIO_WAIT_FOR_CHILDREN_PIPELINE);
449 zio_root(spa_t *spa, zio_done_func_t *done, void *private, int flags)
451 return (zio_null(NULL, spa, done, private, flags));
455 zio_read(zio_t *pio, spa_t *spa, blkptr_t *bp, void *data,
456 uint64_t size, zio_done_func_t *done, void *private,
457 int priority, int flags, zbookmark_t *zb)
461 ASSERT3U(size, ==, BP_GET_LSIZE(bp));
464 * If the user has specified that we allow I/Os to continue
465 * then attempt to satisfy the read.
467 if (spa_get_failmode(spa) != ZIO_FAILURE_MODE_CONTINUE)
470 zio = zio_create(pio, spa, bp->blk_birth, bp, data, size, done, private,
471 ZIO_TYPE_READ, priority, flags | ZIO_FLAG_USER,
472 ZIO_STAGE_OPEN, ZIO_READ_PIPELINE);
473 zio->io_bookmark = *zb;
475 zio->io_logical = zio;
478 * Work off our copy of the bp so the caller can free it.
480 zio->io_bp = &zio->io_bp_copy;
486 zio_write(zio_t *pio, spa_t *spa, int checksum, int compress, int ncopies,
487 uint64_t txg, blkptr_t *bp, void *data, uint64_t size,
488 zio_done_func_t *ready, zio_done_func_t *done, void *private, int priority,
489 int flags, zbookmark_t *zb)
493 ASSERT(checksum >= ZIO_CHECKSUM_OFF &&
494 checksum < ZIO_CHECKSUM_FUNCTIONS);
496 ASSERT(compress >= ZIO_COMPRESS_OFF &&
497 compress < ZIO_COMPRESS_FUNCTIONS);
501 zio = zio_create(pio, spa, txg, bp, data, size, done, private,
502 ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_USER,
503 ZIO_STAGE_OPEN, ZIO_WRITE_PIPELINE);
505 zio->io_ready = ready;
507 zio->io_bookmark = *zb;
509 zio->io_logical = zio;
511 zio->io_checksum = checksum;
512 zio->io_compress = compress;
513 zio->io_ndvas = ncopies;
515 if (bp->blk_birth != txg) {
516 /* XXX the bp usually (always?) gets re-zeroed later */
518 BP_SET_LSIZE(bp, size);
519 BP_SET_PSIZE(bp, size);
521 /* Make sure someone doesn't change their mind on overwrites */
522 ASSERT(MIN(zio->io_ndvas + BP_IS_GANG(bp),
523 spa_max_replication(spa)) == BP_GET_NDVAS(bp));
530 zio_rewrite(zio_t *pio, spa_t *spa, int checksum,
531 uint64_t txg, blkptr_t *bp, void *data, uint64_t size,
532 zio_done_func_t *done, void *private, int priority, int flags,
537 zio = zio_create(pio, spa, txg, bp, data, size, done, private,
538 ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_USER,
539 ZIO_STAGE_OPEN, ZIO_REWRITE_PIPELINE(bp));
541 zio->io_bookmark = *zb;
542 zio->io_checksum = checksum;
543 zio->io_compress = ZIO_COMPRESS_OFF;
546 ASSERT3U(zio->io_ndvas, <=, BP_GET_NDVAS(bp));
552 zio_write_allocate_ready(zio_t *zio)
554 /* Free up the previous block */
555 if (!BP_IS_HOLE(&zio->io_bp_orig)) {
556 zio_nowait(zio_free(zio, zio->io_spa, zio->io_txg,
557 &zio->io_bp_orig, NULL, NULL));
562 zio_write_allocate(zio_t *pio, spa_t *spa, int checksum,
563 uint64_t txg, blkptr_t *bp, void *data, uint64_t size,
564 zio_done_func_t *done, void *private, int priority, int flags)
569 BP_SET_LSIZE(bp, size);
570 BP_SET_PSIZE(bp, size);
571 BP_SET_COMPRESS(bp, ZIO_COMPRESS_OFF);
573 zio = zio_create(pio, spa, txg, bp, data, size, done, private,
574 ZIO_TYPE_WRITE, priority, flags,
575 ZIO_STAGE_OPEN, ZIO_WRITE_ALLOCATE_PIPELINE);
577 zio->io_checksum = checksum;
578 zio->io_compress = ZIO_COMPRESS_OFF;
579 zio->io_ready = zio_write_allocate_ready;
585 zio_free(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp,
586 zio_done_func_t *done, void *private)
590 ASSERT(!BP_IS_HOLE(bp));
592 if (txg == spa->spa_syncing_txg &&
593 spa->spa_sync_pass > zio_sync_pass.zp_defer_free) {
594 bplist_enqueue_deferred(&spa->spa_sync_bplist, bp);
595 return (zio_null(pio, spa, NULL, NULL, 0));
598 zio = zio_create(pio, spa, txg, bp, NULL, 0, done, private,
599 ZIO_TYPE_FREE, ZIO_PRIORITY_FREE, ZIO_FLAG_USER,
600 ZIO_STAGE_OPEN, ZIO_FREE_PIPELINE(bp));
602 zio->io_bp = &zio->io_bp_copy;
608 zio_claim(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp,
609 zio_done_func_t *done, void *private)
614 * A claim is an allocation of a specific block. Claims are needed
615 * to support immediate writes in the intent log. The issue is that
616 * immediate writes contain committed data, but in a txg that was
617 * *not* committed. Upon opening the pool after an unclean shutdown,
618 * the intent log claims all blocks that contain immediate write data
619 * so that the SPA knows they're in use.
621 * All claims *must* be resolved in the first txg -- before the SPA
622 * starts allocating blocks -- so that nothing is allocated twice.
624 ASSERT3U(spa->spa_uberblock.ub_rootbp.blk_birth, <, spa_first_txg(spa));
625 ASSERT3U(spa_first_txg(spa), <=, txg);
627 zio = zio_create(pio, spa, txg, bp, NULL, 0, done, private,
628 ZIO_TYPE_CLAIM, ZIO_PRIORITY_NOW, 0,
629 ZIO_STAGE_OPEN, ZIO_CLAIM_PIPELINE(bp));
631 zio->io_bp = &zio->io_bp_copy;
637 zio_ioctl(zio_t *pio, spa_t *spa, vdev_t *vd, int cmd,
638 zio_done_func_t *done, void *private, int priority, int flags)
643 if (vd->vdev_children == 0) {
644 zio = zio_create(pio, spa, 0, NULL, NULL, 0, done, private,
645 ZIO_TYPE_IOCTL, priority, flags,
646 ZIO_STAGE_OPEN, ZIO_IOCTL_PIPELINE);
651 zio = zio_null(pio, spa, NULL, NULL, flags);
653 for (c = 0; c < vd->vdev_children; c++)
654 zio_nowait(zio_ioctl(zio, spa, vd->vdev_child[c], cmd,
655 done, private, priority, flags));
662 zio_phys_bp_init(vdev_t *vd, blkptr_t *bp, uint64_t offset, uint64_t size,
663 int checksum, boolean_t labels)
665 ASSERT(vd->vdev_children == 0);
667 ASSERT(size <= SPA_MAXBLOCKSIZE);
668 ASSERT(P2PHASE(size, SPA_MINBLOCKSIZE) == 0);
669 ASSERT(P2PHASE(offset, SPA_MINBLOCKSIZE) == 0);
673 ASSERT(offset + size <= VDEV_LABEL_START_SIZE ||
674 offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE);
677 ASSERT3U(offset + size, <=, vd->vdev_psize);
681 BP_SET_LSIZE(bp, size);
682 BP_SET_PSIZE(bp, size);
684 BP_SET_CHECKSUM(bp, checksum);
685 BP_SET_COMPRESS(bp, ZIO_COMPRESS_OFF);
686 BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER);
688 if (checksum != ZIO_CHECKSUM_OFF)
689 ZIO_SET_CHECKSUM(&bp->blk_cksum, offset, 0, 0, 0);
693 zio_read_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size,
694 void *data, int checksum, zio_done_func_t *done, void *private,
695 int priority, int flags, boolean_t labels)
700 ZIO_ENTER(vd->vdev_spa);
702 zio_phys_bp_init(vd, &blk, offset, size, checksum, labels);
704 zio = zio_create(pio, vd->vdev_spa, 0, &blk, data, size, done, private,
705 ZIO_TYPE_READ, priority, flags | ZIO_FLAG_PHYSICAL,
706 ZIO_STAGE_OPEN, ZIO_READ_PHYS_PIPELINE);
709 zio->io_offset = offset;
712 * Work off our copy of the bp so the caller can free it.
714 zio->io_bp = &zio->io_bp_copy;
720 zio_write_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size,
721 void *data, int checksum, zio_done_func_t *done, void *private,
722 int priority, int flags, boolean_t labels)
724 zio_block_tail_t *zbt;
729 ZIO_ENTER(vd->vdev_spa);
731 zio_phys_bp_init(vd, &blk, offset, size, checksum, labels);
733 zio = zio_create(pio, vd->vdev_spa, 0, &blk, data, size, done, private,
734 ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_PHYSICAL,
735 ZIO_STAGE_OPEN, ZIO_WRITE_PHYS_PIPELINE);
738 zio->io_offset = offset;
740 zio->io_bp = &zio->io_bp_copy;
741 zio->io_checksum = checksum;
743 if (zio_checksum_table[checksum].ci_zbt) {
745 * zbt checksums are necessarily destructive -- they modify
746 * one word of the write buffer to hold the verifier/checksum.
747 * Therefore, we must make a local copy in case the data is
748 * being written to multiple places.
750 wbuf = zio_buf_alloc(size);
751 bcopy(data, wbuf, size);
752 zio_push_transform(zio, wbuf, size, size);
754 zbt = (zio_block_tail_t *)((char *)wbuf + size) - 1;
755 zbt->zbt_cksum = blk.blk_cksum;
762 * Create a child I/O to do some work for us. It has no associated bp.
765 zio_vdev_child_io(zio_t *zio, blkptr_t *bp, vdev_t *vd, uint64_t offset,
766 void *data, uint64_t size, int type, int priority, int flags,
767 zio_done_func_t *done, void *private)
769 uint32_t pipeline = ZIO_VDEV_CHILD_PIPELINE;
772 if (type == ZIO_TYPE_READ && bp != NULL) {
774 * If we have the bp, then the child should perform the
775 * checksum and the parent need not. This pushes error
776 * detection as close to the leaves as possible and
777 * eliminates redundant checksums in the interior nodes.
779 pipeline |= 1U << ZIO_STAGE_CHECKSUM_VERIFY;
780 zio->io_pipeline &= ~(1U << ZIO_STAGE_CHECKSUM_VERIFY);
783 cio = zio_create(zio, zio->io_spa, zio->io_txg, bp, data, size,
784 done, private, type, priority,
785 (zio->io_flags & ZIO_FLAG_VDEV_INHERIT) | ZIO_FLAG_CANFAIL | flags,
786 ZIO_STAGE_VDEV_IO_START - 1, pipeline);
789 cio->io_offset = offset;
795 * ==========================================================================
796 * Initiate I/O, either sync or async
797 * ==========================================================================
804 ASSERT(zio->io_stage == ZIO_STAGE_OPEN);
806 zio->io_waiter = curthread;
810 mutex_enter(&zio->io_lock);
811 while (zio->io_stalled != ZIO_STAGE_DONE)
812 cv_wait(&zio->io_cv, &zio->io_lock);
813 mutex_exit(&zio->io_lock);
815 error = zio->io_error;
816 mutex_destroy(&zio->io_lock);
817 cv_destroy(&zio->io_cv);
818 kmem_cache_free(zio_cache, zio);
824 zio_nowait(zio_t *zio)
830 zio_interrupt(zio_t *zio)
832 (void) taskq_dispatch(zio->io_spa->spa_zio_intr_taskq[zio->io_type],
833 (task_func_t *)zio_execute, zio, TQ_SLEEP);
837 zio_issue_async(zio_t *zio)
839 (void) taskq_dispatch(zio->io_spa->spa_zio_issue_taskq[zio->io_type],
840 (task_func_t *)zio_execute, zio, TQ_SLEEP);
842 return (ZIO_PIPELINE_STOP);
846 * ==========================================================================
847 * I/O pipeline interlocks: parent/child dependency scoreboarding
848 * ==========================================================================
851 zio_wait_for_children(zio_t *zio, uint32_t stage, uint64_t *countp)
853 int rv = ZIO_PIPELINE_CONTINUE;
855 mutex_enter(&zio->io_lock);
856 ASSERT(zio->io_stalled == 0);
858 zio->io_stalled = stage;
859 rv = ZIO_PIPELINE_STOP;
861 mutex_exit(&zio->io_lock);
867 zio_notify_parent(zio_t *zio, uint32_t stage, uint64_t *countp)
869 zio_t *pio = zio->io_parent;
871 mutex_enter(&pio->io_lock);
872 if (pio->io_error == 0 && !(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE))
873 pio->io_error = zio->io_error;
874 ASSERT3U(*countp, >, 0);
875 if (--*countp == 0 && pio->io_stalled == stage) {
877 mutex_exit(&pio->io_lock);
880 mutex_exit(&pio->io_lock);
885 zio_wait_for_children_ready(zio_t *zio)
887 return (zio_wait_for_children(zio, ZIO_STAGE_WAIT_FOR_CHILDREN_READY,
888 &zio->io_children_notready));
892 zio_wait_for_children_done(zio_t *zio)
894 return (zio_wait_for_children(zio, ZIO_STAGE_WAIT_FOR_CHILDREN_DONE,
895 &zio->io_children_notdone));
899 zio_read_init(zio_t *zio)
901 blkptr_t *bp = zio->io_bp;
903 if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF) {
904 uint64_t csize = BP_GET_PSIZE(bp);
905 void *cbuf = zio_buf_alloc(csize);
907 zio_push_transform(zio, cbuf, csize, csize);
908 zio->io_pipeline |= 1U << ZIO_STAGE_READ_DECOMPRESS;
911 if (BP_IS_GANG(bp)) {
912 uint64_t gsize = SPA_GANGBLOCKSIZE;
913 void *gbuf = zio_buf_alloc(gsize);
915 zio_push_transform(zio, gbuf, gsize, gsize);
916 zio->io_pipeline |= 1U << ZIO_STAGE_READ_GANG_MEMBERS;
919 if (!dmu_ot[BP_GET_TYPE(bp)].ot_metadata && BP_GET_LEVEL(bp) == 0)
920 zio->io_flags |= ZIO_FLAG_DONT_CACHE;
922 return (ZIO_PIPELINE_CONTINUE);
926 zio_ready(zio_t *zio)
928 zio_t *pio = zio->io_parent;
934 zio_notify_parent(zio, ZIO_STAGE_WAIT_FOR_CHILDREN_READY,
935 &pio->io_children_notready);
938 zio->io_bp_copy = *zio->io_bp;
940 return (ZIO_PIPELINE_CONTINUE);
944 zio_vdev_retry_io(zio_t *zio)
946 zio_t *pio = zio->io_parent;
949 * Preserve the failed bp so that the io_ready() callback can
950 * update the accounting accordingly. The callback will also be
951 * responsible for freeing the previously allocated block, if one
954 zio->io_bp_orig = *zio->io_bp;
957 * We must zero out the old DVA and blk_birth before reallocating
960 BP_ZERO_DVAS(zio->io_bp);
965 * Let the parent know that we will
966 * re-alloc the write (=> new bp info).
968 mutex_enter(&pio->io_lock);
969 pio->io_children_notready++;
972 * If the parent I/O is still in the open stage, then
973 * don't bother telling it to retry since it hasn't
974 * progressed far enough for it to care.
976 if (pio->io_stage > ZIO_STAGE_OPEN && IO_IS_ALLOCATING(pio))
977 pio->io_flags |= ZIO_FLAG_WRITE_RETRY;
979 ASSERT(pio->io_stage <= ZIO_STAGE_WAIT_FOR_CHILDREN_DONE);
980 mutex_exit(&pio->io_lock);
984 * We are getting ready to process the retry request so clear
985 * the flag and the zio's current error status.
987 zio->io_flags &= ~ZIO_FLAG_WRITE_RETRY;
990 return (ZIO_PIPELINE_CONTINUE);
994 zio_vdev_resume_io(spa_t *spa)
998 mutex_enter(&spa->spa_zio_lock);
1001 * Probe all of vdevs that have experienced an I/O error.
1002 * If we are still unable to verify the integrity of the vdev
1003 * then we prevent the resume from proceeeding.
1005 for (zio = list_head(&spa->spa_zio_list); zio != NULL;
1006 zio = list_next(&spa->spa_zio_list, zio)) {
1009 /* We only care about I/Os that must succeed */
1010 if (zio->io_vd == NULL || zio->io_flags & ZIO_FLAG_CANFAIL)
1012 error = vdev_probe(zio->io_vd);
1014 mutex_exit(&spa->spa_zio_lock);
1020 * Clear the vdev stats so that I/O can flow.
1022 vdev_clear(spa, NULL, B_FALSE);
1024 spa->spa_state = POOL_STATE_ACTIVE;
1025 while ((zio = list_head(&spa->spa_zio_list)) != NULL) {
1026 list_remove(&spa->spa_zio_list, zio);
1030 * If we are resuming an allocating I/O then we force it
1031 * to retry and let it resume operation where it left off.
1032 * Otherwise, go back to the ready stage and pick up from
1035 if (zio_write_retry && IO_IS_ALLOCATING(zio)) {
1036 zio->io_flags |= ZIO_FLAG_WRITE_RETRY;
1039 zio->io_stage = ZIO_STAGE_READY;
1042 (void) taskq_dispatch(zio_taskq, (task_func_t *)zio_execute,
1045 mutex_exit(&spa->spa_zio_lock);
1048 * Wait for the taskqs to finish and recheck the pool state since
1049 * it's possible that a resumed I/O has failed again.
1051 taskq_wait(zio_taskq);
1052 if (spa_state(spa) == POOL_STATE_IO_FAILURE)
1055 mutex_enter(&spa->spa_zio_lock);
1056 cv_broadcast(&spa->spa_zio_cv);
1057 mutex_exit(&spa->spa_zio_lock);
1063 zio_vdev_suspend_io(zio_t *zio)
1065 spa_t *spa = zio->io_spa;
1068 * We've experienced an unrecoverable failure so
1069 * set the pool state accordingly and queue all
1072 spa->spa_state = POOL_STATE_IO_FAILURE;
1074 mutex_enter(&spa->spa_zio_lock);
1075 list_insert_tail(&spa->spa_zio_list, zio);
1078 /* Used to notify ztest that the pool has suspended */
1079 cv_broadcast(&spa->spa_zio_cv);
1081 mutex_exit(&spa->spa_zio_lock);
1083 return (ZIO_PIPELINE_STOP);
1087 zio_assess(zio_t *zio)
1089 spa_t *spa = zio->io_spa;
1090 blkptr_t *bp = zio->io_bp;
1091 vdev_t *vd = zio->io_vd;
1093 ASSERT(zio->io_children_notready == 0);
1094 ASSERT(zio->io_children_notdone == 0);
1097 ASSERT(bp->blk_pad[0] == 0);
1098 ASSERT(bp->blk_pad[1] == 0);
1099 ASSERT(bp->blk_pad[2] == 0);
1100 ASSERT(bcmp(bp, &zio->io_bp_copy, sizeof (blkptr_t)) == 0);
1101 if (zio->io_type == ZIO_TYPE_WRITE && !BP_IS_HOLE(bp) &&
1102 !(zio->io_flags & ZIO_FLAG_IO_REPAIR)) {
1103 ASSERT(!BP_SHOULD_BYTESWAP(bp));
1104 if (zio->io_ndvas != 0)
1105 ASSERT3U(zio->io_ndvas, <=, BP_GET_NDVAS(bp));
1106 ASSERT(BP_COUNT_GANG(bp) == 0 ||
1107 (BP_COUNT_GANG(bp) == BP_GET_NDVAS(bp)));
1112 * Some child I/O has indicated that a retry is necessary, so
1113 * we set an error on the I/O and let the logic below do the
1116 if (zio->io_flags & ZIO_FLAG_WRITE_RETRY)
1117 zio->io_error = ERESTART;
1120 vdev_stat_update(zio);
1122 if (zio->io_error) {
1124 * If this I/O is attached to a particular vdev,
1125 * generate an error message describing the I/O failure
1126 * at the block level. We ignore these errors if the
1127 * device is currently unavailable.
1129 if (zio->io_error != ECKSUM && vd != NULL && !vdev_is_dead(vd))
1130 zfs_ereport_post(FM_EREPORT_ZFS_IO, spa, vd, zio, 0, 0);
1132 if ((zio->io_error == EIO ||
1133 !(zio->io_flags & ZIO_FLAG_SPECULATIVE)) &&
1134 zio->io_logical == zio) {
1136 * For root I/O requests, tell the SPA to log the error
1137 * appropriately. Also, generate a logical data
1140 spa_log_error(spa, zio);
1142 zfs_ereport_post(FM_EREPORT_ZFS_DATA, spa, NULL, zio,
1147 * If we are an allocating I/O then we attempt to reissue
1148 * the I/O on another vdev unless the pool is out of space.
1149 * We handle this condition based on the spa's failmode
1152 if (zio_write_retry && zio->io_error != ENOSPC &&
1153 IO_IS_ALLOCATING(zio))
1154 return (zio_vdev_retry_io(zio));
1156 ASSERT(!(zio->io_flags & ZIO_FLAG_WRITE_RETRY));
1159 * For I/O requests that cannot fail, we carry out
1160 * the requested behavior based on the failmode pool
1163 * XXX - Need to differentiate between an ENOSPC as
1164 * a result of vdev failures vs. a full pool.
1166 if (!(zio->io_flags & ZIO_FLAG_CANFAIL)) {
1170 blkbuf = kmem_alloc(BP_SPRINTF_LEN, KM_NOSLEEP);
1172 sprintf_blkptr(blkbuf, BP_SPRINTF_LEN,
1173 bp ? bp : &zio->io_bp_copy);
1175 cmn_err(CE_WARN, "ZFS: %s (%s on %s off %llx: zio %p "
1176 "%s): error %d", zio->io_error == ECKSUM ?
1177 "bad checksum" : "I/O failure",
1178 zio_type_name[zio->io_type],
1179 vdev_description(vd),
1180 (u_longlong_t)zio->io_offset,
1181 (void *)zio, blkbuf ? blkbuf : "", zio->io_error);
1184 if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_PANIC) {
1185 fm_panic("Pool '%s' has encountered an "
1186 "uncorrectable I/O failure and the "
1187 "failure mode property for this pool "
1188 "is set to panic.", spa_name(spa));
1190 cmn_err(CE_WARN, "Pool '%s' has encountered "
1191 "an uncorrectable I/O error. "
1192 "Manual intervention is required.", spa_name(spa));
1193 return (zio_vdev_suspend_io(zio));
1196 ASSERT(!(zio->io_flags & ZIO_FLAG_WRITE_RETRY));
1197 ASSERT(zio->io_children_notready == 0);
1199 return (ZIO_PIPELINE_CONTINUE);
1203 zio_done(zio_t *zio)
1205 zio_t *pio = zio->io_parent;
1206 spa_t *spa = zio->io_spa;
1208 ASSERT(zio->io_children_notready == 0);
1209 ASSERT(zio->io_children_notdone == 0);
1211 zio_clear_transform_stack(zio);
1216 ASSERT(zio->io_delegate_list == NULL);
1217 ASSERT(zio->io_delegate_next == NULL);
1222 mutex_enter(&pio->io_lock);
1223 next = zio->io_sibling_next;
1224 prev = zio->io_sibling_prev;
1226 next->io_sibling_prev = prev;
1228 prev->io_sibling_next = next;
1229 if (pio->io_child == zio)
1230 pio->io_child = next;
1231 mutex_exit(&pio->io_lock);
1233 zio_notify_parent(zio, ZIO_STAGE_WAIT_FOR_CHILDREN_DONE,
1234 &pio->io_children_notdone);
1238 * Note: this I/O is now done, and will shortly be freed, so there is no
1239 * need to clear this (or any other) flag.
1241 if (zio->io_flags & ZIO_FLAG_CONFIG_GRABBED)
1242 spa_config_exit(spa, zio);
1244 if (zio->io_waiter != NULL) {
1245 mutex_enter(&zio->io_lock);
1246 ASSERT(zio->io_stage == ZIO_STAGE_DONE);
1247 zio->io_stalled = zio->io_stage;
1248 cv_broadcast(&zio->io_cv);
1249 mutex_exit(&zio->io_lock);
1251 mutex_destroy(&zio->io_lock);
1252 cv_destroy(&zio->io_cv);
1253 kmem_cache_free(zio_cache, zio);
1256 return (ZIO_PIPELINE_STOP);
1260 * ==========================================================================
1261 * Compression support
1262 * ==========================================================================
1265 zio_write_compress(zio_t *zio)
1267 int compress = zio->io_compress;
1268 blkptr_t *bp = zio->io_bp;
1270 uint64_t lsize = zio->io_size;
1271 uint64_t csize = lsize;
1272 uint64_t cbufsize = 0;
1275 if (bp->blk_birth == zio->io_txg) {
1277 * We're rewriting an existing block, which means we're
1278 * working on behalf of spa_sync(). For spa_sync() to
1279 * converge, it must eventually be the case that we don't
1280 * have to allocate new blocks. But compression changes
1281 * the blocksize, which forces a reallocate, and makes
1282 * convergence take longer. Therefore, after the first
1283 * few passes, stop compressing to ensure convergence.
1285 pass = spa_sync_pass(zio->io_spa);
1286 if (pass > zio_sync_pass.zp_dontcompress)
1287 compress = ZIO_COMPRESS_OFF;
1289 ASSERT(BP_IS_HOLE(bp));
1293 if (compress != ZIO_COMPRESS_OFF)
1294 if (!zio_compress_data(compress, zio->io_data, zio->io_size,
1295 &cbuf, &csize, &cbufsize))
1296 compress = ZIO_COMPRESS_OFF;
1298 if (compress != ZIO_COMPRESS_OFF && csize != 0)
1299 zio_push_transform(zio, cbuf, csize, cbufsize);
1302 * The final pass of spa_sync() must be all rewrites, but the first
1303 * few passes offer a trade-off: allocating blocks defers convergence,
1304 * but newly allocated blocks are sequential, so they can be written
1305 * to disk faster. Therefore, we allow the first few passes of
1306 * spa_sync() to reallocate new blocks, but force rewrites after that.
1307 * There should only be a handful of blocks after pass 1 in any case.
1309 if (bp->blk_birth == zio->io_txg && BP_GET_PSIZE(bp) == csize &&
1310 pass > zio_sync_pass.zp_rewrite) {
1312 BP_SET_LSIZE(bp, lsize);
1313 BP_SET_COMPRESS(bp, compress);
1314 zio->io_pipeline = ZIO_REWRITE_PIPELINE(bp);
1316 if (bp->blk_birth == zio->io_txg)
1320 zio->io_pipeline = ZIO_WAIT_FOR_CHILDREN_PIPELINE;
1322 ASSERT3U(BP_GET_NDVAS(bp), ==, 0);
1323 BP_SET_LSIZE(bp, lsize);
1324 BP_SET_PSIZE(bp, csize);
1325 BP_SET_COMPRESS(bp, compress);
1329 return (ZIO_PIPELINE_CONTINUE);
1333 zio_read_decompress(zio_t *zio)
1335 blkptr_t *bp = zio->io_bp;
1339 int compress = BP_GET_COMPRESS(bp);
1341 ASSERT(compress != ZIO_COMPRESS_OFF);
1343 zio_pop_transform(zio, &data, &size, &bufsize);
1345 if (zio_decompress_data(compress, data, size,
1346 zio->io_data, zio->io_size))
1347 zio->io_error = EIO;
1349 zio_buf_free(data, bufsize);
1351 return (ZIO_PIPELINE_CONTINUE);
1355 * ==========================================================================
1356 * Gang block support
1357 * ==========================================================================
1360 zio_gang_byteswap(zio_t *zio)
1362 ASSERT(zio->io_size == SPA_GANGBLOCKSIZE);
1364 if (BP_SHOULD_BYTESWAP(zio->io_bp))
1365 byteswap_uint64_array(zio->io_data, zio->io_size);
1369 zio_get_gang_header(zio_t *zio)
1371 blkptr_t *bp = zio->io_bp;
1372 uint64_t gsize = SPA_GANGBLOCKSIZE;
1373 void *gbuf = zio_buf_alloc(gsize);
1375 ASSERT(BP_IS_GANG(bp));
1377 zio_push_transform(zio, gbuf, gsize, gsize);
1379 zio_nowait(zio_create(zio, zio->io_spa, bp->blk_birth, bp, gbuf, gsize,
1380 NULL, NULL, ZIO_TYPE_READ, zio->io_priority,
1381 zio->io_flags & ZIO_FLAG_GANG_INHERIT,
1382 ZIO_STAGE_OPEN, ZIO_READ_GANG_PIPELINE));
1384 return (zio_wait_for_children_done(zio));
1388 zio_read_gang_members(zio_t *zio)
1390 zio_gbh_phys_t *gbh;
1391 uint64_t gsize, gbufsize, loff, lsize;
1394 ASSERT(BP_IS_GANG(zio->io_bp));
1396 zio_gang_byteswap(zio);
1397 zio_pop_transform(zio, (void **)&gbh, &gsize, &gbufsize);
1399 for (loff = 0, i = 0; loff != zio->io_size; loff += lsize, i++) {
1400 blkptr_t *gbp = &gbh->zg_blkptr[i];
1401 lsize = BP_GET_PSIZE(gbp);
1403 ASSERT(BP_GET_COMPRESS(gbp) == ZIO_COMPRESS_OFF);
1404 ASSERT3U(lsize, ==, BP_GET_LSIZE(gbp));
1405 ASSERT3U(loff + lsize, <=, zio->io_size);
1406 ASSERT(i < SPA_GBH_NBLKPTRS);
1407 ASSERT(!BP_IS_HOLE(gbp));
1409 zio_nowait(zio_read(zio, zio->io_spa, gbp,
1410 (char *)zio->io_data + loff, lsize,
1411 NULL, NULL, zio->io_priority,
1412 zio->io_flags & ZIO_FLAG_GANG_INHERIT, &zio->io_bookmark));
1415 zio_buf_free(gbh, gbufsize);
1417 return (zio_wait_for_children_done(zio));
1421 zio_rewrite_gang_members(zio_t *zio)
1423 zio_gbh_phys_t *gbh;
1424 uint64_t gsize, gbufsize, loff, lsize;
1427 ASSERT(BP_IS_GANG(zio->io_bp));
1428 ASSERT3U(zio->io_size, ==, SPA_GANGBLOCKSIZE);
1430 zio_gang_byteswap(zio);
1431 zio_pop_transform(zio, (void **)&gbh, &gsize, &gbufsize);
1433 ASSERT(gsize == gbufsize);
1435 for (loff = 0, i = 0; loff != zio->io_size; loff += lsize, i++) {
1436 blkptr_t *gbp = &gbh->zg_blkptr[i];
1437 lsize = BP_GET_PSIZE(gbp);
1439 ASSERT(BP_GET_COMPRESS(gbp) == ZIO_COMPRESS_OFF);
1440 ASSERT3U(lsize, ==, BP_GET_LSIZE(gbp));
1441 ASSERT3U(loff + lsize, <=, zio->io_size);
1442 ASSERT(i < SPA_GBH_NBLKPTRS);
1443 ASSERT(!BP_IS_HOLE(gbp));
1445 zio_nowait(zio_rewrite(zio, zio->io_spa, zio->io_checksum,
1446 zio->io_txg, gbp, (char *)zio->io_data + loff, lsize,
1447 NULL, NULL, zio->io_priority,
1448 zio->io_flags & ZIO_FLAG_GANG_INHERIT, &zio->io_bookmark));
1451 zio_push_transform(zio, gbh, gsize, gbufsize);
1453 return (zio_wait_for_children_ready(zio));
1457 zio_free_gang_members(zio_t *zio)
1459 zio_gbh_phys_t *gbh;
1460 uint64_t gsize, gbufsize;
1463 ASSERT(BP_IS_GANG(zio->io_bp));
1465 zio_gang_byteswap(zio);
1466 zio_pop_transform(zio, (void **)&gbh, &gsize, &gbufsize);
1468 for (i = 0; i < SPA_GBH_NBLKPTRS; i++) {
1469 blkptr_t *gbp = &gbh->zg_blkptr[i];
1471 if (BP_IS_HOLE(gbp))
1473 zio_nowait(zio_free(zio, zio->io_spa, zio->io_txg,
1477 zio_buf_free(gbh, gbufsize);
1479 return (ZIO_PIPELINE_CONTINUE);
1483 zio_claim_gang_members(zio_t *zio)
1485 zio_gbh_phys_t *gbh;
1486 uint64_t gsize, gbufsize;
1489 ASSERT(BP_IS_GANG(zio->io_bp));
1491 zio_gang_byteswap(zio);
1492 zio_pop_transform(zio, (void **)&gbh, &gsize, &gbufsize);
1494 for (i = 0; i < SPA_GBH_NBLKPTRS; i++) {
1495 blkptr_t *gbp = &gbh->zg_blkptr[i];
1496 if (BP_IS_HOLE(gbp))
1498 zio_nowait(zio_claim(zio, zio->io_spa, zio->io_txg,
1502 zio_buf_free(gbh, gbufsize);
1504 return (ZIO_PIPELINE_CONTINUE);
1508 zio_write_allocate_gang_member_done(zio_t *zio)
1510 zio_t *pio = zio->io_parent;
1511 dva_t *cdva = zio->io_bp->blk_dva;
1512 dva_t *pdva = pio->io_bp->blk_dva;
1516 ASSERT3U(pio->io_ndvas, ==, zio->io_ndvas);
1517 ASSERT3U(BP_GET_NDVAS(zio->io_bp), <=, BP_GET_NDVAS(pio->io_bp));
1518 ASSERT3U(zio->io_ndvas, <=, BP_GET_NDVAS(zio->io_bp));
1519 ASSERT3U(pio->io_ndvas, <=, BP_GET_NDVAS(pio->io_bp));
1521 mutex_enter(&pio->io_lock);
1522 for (d = 0; d < BP_GET_NDVAS(pio->io_bp); d++) {
1523 ASSERT(DVA_GET_GANG(&pdva[d]));
1524 asize = DVA_GET_ASIZE(&pdva[d]);
1525 asize += DVA_GET_ASIZE(&cdva[d]);
1526 DVA_SET_ASIZE(&pdva[d], asize);
1528 mutex_exit(&pio->io_lock);
1532 zio_write_allocate_gang_members(zio_t *zio, metaslab_class_t *mc)
1534 blkptr_t *bp = zio->io_bp;
1535 dva_t *dva = bp->blk_dva;
1536 spa_t *spa = zio->io_spa;
1537 zio_gbh_phys_t *gbh;
1538 uint64_t txg = zio->io_txg;
1539 uint64_t resid = zio->io_size;
1540 uint64_t maxalloc = P2ROUNDUP(zio->io_size >> 1, SPA_MINBLOCKSIZE);
1541 uint64_t gsize, loff, lsize;
1543 int ndvas = zio->io_ndvas;
1544 int gbh_ndvas = MIN(ndvas + 1, spa_max_replication(spa));
1548 gsize = SPA_GANGBLOCKSIZE;
1549 gbps_left = SPA_GBH_NBLKPTRS;
1551 error = metaslab_alloc(spa, mc, gsize, bp, gbh_ndvas, txg, NULL,
1554 zio->io_error = error;
1555 return (ZIO_PIPELINE_CONTINUE);
1558 for (d = 0; d < gbh_ndvas; d++)
1559 DVA_SET_GANG(&dva[d], 1);
1561 bp->blk_birth = txg;
1563 gbh = zio_buf_alloc(gsize);
1566 for (loff = 0, i = 0; loff != zio->io_size;
1567 loff += lsize, resid -= lsize, gbps_left--, i++) {
1568 blkptr_t *gbp = &gbh->zg_blkptr[i];
1571 ASSERT(gbps_left != 0);
1572 maxalloc = MIN(maxalloc, resid);
1574 while (resid <= maxalloc * gbps_left) {
1575 error = metaslab_alloc(spa, mc, maxalloc, gbp, ndvas,
1579 ASSERT3U(error, ==, ENOSPC);
1580 /* XXX - free up previous allocations? */
1581 if (maxalloc == SPA_MINBLOCKSIZE) {
1582 zio->io_error = error;
1583 return (ZIO_PIPELINE_CONTINUE);
1585 maxalloc = P2ROUNDUP(maxalloc >> 1, SPA_MINBLOCKSIZE);
1588 if (resid <= maxalloc * gbps_left) {
1590 BP_SET_LSIZE(gbp, lsize);
1591 BP_SET_PSIZE(gbp, lsize);
1592 BP_SET_COMPRESS(gbp, ZIO_COMPRESS_OFF);
1593 gbp->blk_birth = txg;
1594 zio_nowait(zio_rewrite(zio, spa,
1595 zio->io_checksum, txg, gbp,
1596 (char *)zio->io_data + loff, lsize,
1597 zio_write_allocate_gang_member_done, NULL,
1599 zio->io_flags & ZIO_FLAG_GANG_INHERIT,
1600 &zio->io_bookmark));
1602 lsize = P2ROUNDUP(resid / gbps_left, SPA_MINBLOCKSIZE);
1603 ASSERT(lsize != SPA_MINBLOCKSIZE);
1604 zio_nowait(zio_write_allocate(zio, spa,
1605 zio->io_checksum, txg, gbp,
1606 (char *)zio->io_data + loff, lsize,
1607 zio_write_allocate_gang_member_done, NULL,
1609 zio->io_flags & ZIO_FLAG_GANG_INHERIT));
1613 ASSERT(resid == 0 && loff == zio->io_size);
1615 zio->io_pipeline |= 1U << ZIO_STAGE_GANG_CHECKSUM_GENERATE;
1617 zio_push_transform(zio, gbh, gsize, gsize);
1620 * As much as we'd like this to be 'ready' instead of 'done',
1621 * updating our ASIZE doesn't happen until the io_done callback,
1622 * so we have to wait for that to finish in order for our BP
1625 return (zio_wait_for_children_done(zio));
1629 * ==========================================================================
1630 * Allocate and free blocks
1631 * ==========================================================================
1634 zio_dva_allocate(zio_t *zio)
1636 spa_t *spa = zio->io_spa;
1637 metaslab_class_t *mc = spa->spa_normal_class;
1638 blkptr_t *bp = zio->io_bp;
1641 ASSERT(BP_IS_HOLE(bp));
1642 ASSERT3U(BP_GET_NDVAS(bp), ==, 0);
1643 ASSERT3U(zio->io_ndvas, >, 0);
1644 ASSERT3U(zio->io_ndvas, <=, spa_max_replication(spa));
1647 * For testing purposes, we force I/Os to retry. We don't allow
1648 * retries beyond the first pass since those I/Os are non-allocating
1651 if (zio_io_fail_shift &&
1652 spa_sync_pass(zio->io_spa) <= zio_sync_pass.zp_rewrite &&
1653 zio_io_should_fail(zio_io_fail_shift))
1654 zio->io_flags |= ZIO_FLAG_WRITE_RETRY;
1656 ASSERT3U(zio->io_size, ==, BP_GET_PSIZE(bp));
1658 error = metaslab_alloc(spa, mc, zio->io_size, bp, zio->io_ndvas,
1659 zio->io_txg, NULL, B_FALSE);
1662 bp->blk_birth = zio->io_txg;
1663 } else if (error == ENOSPC && zio->io_size > SPA_MINBLOCKSIZE) {
1664 return (zio_write_allocate_gang_members(zio, mc));
1666 zio->io_error = error;
1669 return (ZIO_PIPELINE_CONTINUE);
1673 zio_dva_free(zio_t *zio)
1675 blkptr_t *bp = zio->io_bp;
1677 metaslab_free(zio->io_spa, bp, zio->io_txg, B_FALSE);
1681 return (ZIO_PIPELINE_CONTINUE);
1685 zio_dva_claim(zio_t *zio)
1687 zio->io_error = metaslab_claim(zio->io_spa, zio->io_bp, zio->io_txg);
1689 return (ZIO_PIPELINE_CONTINUE);
1693 * ==========================================================================
1694 * Read and write to physical devices
1695 * ==========================================================================
1699 zio_vdev_io_start(zio_t *zio)
1701 vdev_t *vd = zio->io_vd;
1702 vdev_t *tvd = vd ? vd->vdev_top : NULL;
1703 blkptr_t *bp = zio->io_bp;
1705 spa_t *spa = zio->io_spa;
1708 * If the pool is already in a failure state then just suspend
1709 * this IO until the problem is resolved. We will reissue them
1712 if (spa_state(spa) == POOL_STATE_IO_FAILURE &&
1713 zio->io_type == ZIO_TYPE_WRITE)
1714 return (zio_vdev_suspend_io(zio));
1717 * The mirror_ops handle multiple DVAs in a single BP
1720 return (vdev_mirror_ops.vdev_op_io_start(zio));
1722 align = 1ULL << tvd->vdev_ashift;
1724 if (zio->io_retries == 0 && vd == tvd)
1725 zio->io_flags |= ZIO_FLAG_FAILFAST;
1727 if (!(zio->io_flags & ZIO_FLAG_PHYSICAL) && vd->vdev_children == 0) {
1728 zio->io_flags |= ZIO_FLAG_PHYSICAL;
1729 zio->io_offset += VDEV_LABEL_START_SIZE;
1732 if (P2PHASE(zio->io_size, align) != 0) {
1733 uint64_t asize = P2ROUNDUP(zio->io_size, align);
1734 char *abuf = zio_buf_alloc(asize);
1736 if (zio->io_type == ZIO_TYPE_WRITE) {
1737 bcopy(zio->io_data, abuf, zio->io_size);
1738 bzero(abuf + zio->io_size, asize - zio->io_size);
1740 zio_push_transform(zio, abuf, asize, asize);
1741 ASSERT(!(zio->io_flags & ZIO_FLAG_SUBBLOCK));
1742 zio->io_flags |= ZIO_FLAG_SUBBLOCK;
1745 ASSERT(P2PHASE(zio->io_offset, align) == 0);
1746 ASSERT(P2PHASE(zio->io_size, align) == 0);
1747 ASSERT(bp == NULL ||
1748 P2ROUNDUP(ZIO_GET_IOSIZE(zio), align) == zio->io_size);
1749 ASSERT(zio->io_type != ZIO_TYPE_WRITE || (spa_mode & FWRITE));
1751 return (vd->vdev_ops->vdev_op_io_start(zio));
1755 zio_vdev_io_done(zio_t *zio)
1757 if (zio->io_vd == NULL)
1758 return (vdev_mirror_ops.vdev_op_io_done(zio));
1760 return (zio->io_vd->vdev_ops->vdev_op_io_done(zio));
1765 zio_should_retry(zio_t *zio)
1767 vdev_t *vd = zio->io_vd;
1769 if (zio->io_error == 0)
1771 if (zio->io_delegate_list != NULL)
1773 if (vd && vd != vd->vdev_top)
1775 if (zio->io_flags & ZIO_FLAG_DONT_RETRY)
1777 if (zio->io_retries > 0)
1784 zio_vdev_io_assess(zio_t *zio)
1786 vdev_t *vd = zio->io_vd;
1787 vdev_t *tvd = vd ? vd->vdev_top : NULL;
1789 ASSERT(zio->io_vsd == NULL);
1791 if (zio->io_flags & ZIO_FLAG_SUBBLOCK) {
1795 zio_pop_transform(zio, &abuf, &asize, &asize);
1796 if (zio->io_type == ZIO_TYPE_READ)
1797 bcopy(abuf, zio->io_data, zio->io_size);
1798 zio_buf_free(abuf, asize);
1799 zio->io_flags &= ~ZIO_FLAG_SUBBLOCK;
1802 if (zio_injection_enabled && !zio->io_error)
1803 zio->io_error = zio_handle_fault_injection(zio, EIO);
1806 * If the I/O failed, determine whether we should attempt to retry it.
1809 if (zio_should_retry(zio)) {
1814 zio->io_flags &= ZIO_FLAG_RETRY_INHERIT;
1816 zio->io_flags &= ~ZIO_FLAG_FAILFAST;
1817 zio->io_flags |= ZIO_FLAG_DONT_CACHE;
1818 zio->io_stage = ZIO_STAGE_VDEV_IO_START - 1;
1820 return (ZIO_PIPELINE_CONTINUE);
1823 return (ZIO_PIPELINE_CONTINUE);
1827 zio_vdev_io_reissue(zio_t *zio)
1829 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START);
1830 ASSERT(zio->io_error == 0);
1836 zio_vdev_io_redone(zio_t *zio)
1838 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_DONE);
1844 zio_vdev_io_bypass(zio_t *zio)
1846 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START);
1847 ASSERT(zio->io_error == 0);
1849 zio->io_flags |= ZIO_FLAG_IO_BYPASS;
1850 zio->io_stage = ZIO_STAGE_VDEV_IO_ASSESS - 1;
1854 * ==========================================================================
1855 * Generate and verify checksums
1856 * ==========================================================================
1859 zio_checksum_generate(zio_t *zio)
1861 int checksum = zio->io_checksum;
1862 blkptr_t *bp = zio->io_bp;
1864 ASSERT3U(zio->io_size, ==, BP_GET_PSIZE(bp));
1866 BP_SET_CHECKSUM(bp, checksum);
1867 BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER);
1869 zio_checksum(checksum, &bp->blk_cksum, zio->io_data, zio->io_size);
1871 return (ZIO_PIPELINE_CONTINUE);
1875 zio_gang_checksum_generate(zio_t *zio)
1878 zio_gbh_phys_t *gbh = zio->io_data;
1880 ASSERT(BP_IS_GANG(zio->io_bp));
1881 ASSERT3U(zio->io_size, ==, SPA_GANGBLOCKSIZE);
1883 zio_set_gang_verifier(zio, &gbh->zg_tail.zbt_cksum);
1885 zio_checksum(ZIO_CHECKSUM_GANG_HEADER, &zc, zio->io_data, zio->io_size);
1887 return (ZIO_PIPELINE_CONTINUE);
1891 zio_checksum_verify(zio_t *zio)
1893 if (zio->io_bp != NULL) {
1894 zio->io_error = zio_checksum_error(zio);
1895 if (zio->io_error && !(zio->io_flags & ZIO_FLAG_SPECULATIVE))
1896 zfs_ereport_post(FM_EREPORT_ZFS_CHECKSUM,
1897 zio->io_spa, zio->io_vd, zio, 0, 0);
1900 return (ZIO_PIPELINE_CONTINUE);
1904 * Called by RAID-Z to ensure we don't compute the checksum twice.
1907 zio_checksum_verified(zio_t *zio)
1909 zio->io_pipeline &= ~(1U << ZIO_STAGE_CHECKSUM_VERIFY);
1913 * Set the external verifier for a gang block based on stuff in the bp
1916 zio_set_gang_verifier(zio_t *zio, zio_cksum_t *zcp)
1918 blkptr_t *bp = zio->io_bp;
1920 zcp->zc_word[0] = DVA_GET_VDEV(BP_IDENTITY(bp));
1921 zcp->zc_word[1] = DVA_GET_OFFSET(BP_IDENTITY(bp));
1922 zcp->zc_word[2] = bp->blk_birth;
1923 zcp->zc_word[3] = 0;
1927 * ==========================================================================
1928 * Define the pipeline
1929 * ==========================================================================
1931 typedef int zio_pipe_stage_t(zio_t *zio);
1933 zio_pipe_stage_t *zio_pipeline[ZIO_STAGE_DONE + 2] = {
1935 zio_wait_for_children_ready,
1939 zio_checksum_generate,
1940 zio_get_gang_header,
1941 zio_rewrite_gang_members,
1942 zio_free_gang_members,
1943 zio_claim_gang_members,
1947 zio_gang_checksum_generate,
1952 zio_wait_for_children_done,
1953 zio_checksum_verify,
1954 zio_read_gang_members,
1955 zio_read_decompress,
1962 * Execute the I/O pipeline until one of the following occurs:
1963 * (1) the I/O completes; (2) the pipeline stalls waiting for
1964 * dependent child I/Os; (3) the I/O issues, so we're waiting
1965 * for an I/O completion interrupt; (4) the I/O is delegated by
1966 * vdev-level caching or aggregation; (5) the I/O is deferred
1967 * due to vdev-level queueing; (6) the I/O is handed off to
1968 * another thread. In all cases, the pipeline stops whenever
1969 * there's no CPU work; it never burns a thread in cv_wait().
1971 * There's no locking on io_stage because there's no legitimate way
1972 * for multiple threads to be attempting to process the same I/O.
1975 zio_execute(zio_t *zio)
1977 while (zio->io_stage < ZIO_STAGE_DONE) {
1978 uint32_t pipeline = zio->io_pipeline;
1981 ASSERT(!MUTEX_HELD(&zio->io_lock));
1984 * If an error occurred outside the vdev stack,
1985 * just execute the interlock stages to clean up.
1987 if (zio->io_error &&
1988 ((1U << zio->io_stage) & ZIO_VDEV_IO_STAGES) == 0)
1989 pipeline &= ZIO_ERROR_PIPELINE_MASK;
1991 while (((1U << ++zio->io_stage) & pipeline) == 0)
1994 ASSERT(zio->io_stage <= ZIO_STAGE_DONE);
1995 ASSERT(zio->io_stalled == 0);
1997 rv = zio_pipeline[zio->io_stage](zio);
1999 if (rv == ZIO_PIPELINE_STOP)
2002 ASSERT(rv == ZIO_PIPELINE_CONTINUE);
2007 zio_io_should_fail(uint16_t range)
2009 static uint16_t allocs = 0;
2011 return (P2PHASE(allocs++, 1U<<range) == 0);
2015 * Try to allocate an intent log block. Return 0 on success, errno on failure.
2018 zio_alloc_blk(spa_t *spa, uint64_t size, blkptr_t *new_bp, blkptr_t *old_bp,
2023 spa_config_enter(spa, RW_READER, FTAG);
2025 if (zio_zil_fail_shift && zio_io_should_fail(zio_zil_fail_shift)) {
2026 spa_config_exit(spa, FTAG);
2031 * We were passed the previous log block's DVA in bp->blk_dva[0].
2032 * We use that as a hint for which vdev to allocate from next.
2034 error = metaslab_alloc(spa, spa->spa_log_class, size,
2035 new_bp, 1, txg, old_bp, B_TRUE);
2038 error = metaslab_alloc(spa, spa->spa_normal_class, size,
2039 new_bp, 1, txg, old_bp, B_TRUE);
2042 BP_SET_LSIZE(new_bp, size);
2043 BP_SET_PSIZE(new_bp, size);
2044 BP_SET_COMPRESS(new_bp, ZIO_COMPRESS_OFF);
2045 BP_SET_CHECKSUM(new_bp, ZIO_CHECKSUM_ZILOG);
2046 BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG);
2047 BP_SET_LEVEL(new_bp, 0);
2048 BP_SET_BYTEORDER(new_bp, ZFS_HOST_BYTEORDER);
2049 new_bp->blk_birth = txg;
2052 spa_config_exit(spa, FTAG);
2058 * Free an intent log block. We know it can't be a gang block, so there's
2059 * nothing to do except metaslab_free() it.
2062 zio_free_blk(spa_t *spa, blkptr_t *bp, uint64_t txg)
2064 ASSERT(!BP_IS_GANG(bp));
2066 spa_config_enter(spa, RW_READER, FTAG);
2068 metaslab_free(spa, bp, txg, B_FALSE);
2070 spa_config_exit(spa, FTAG);
2074 * start an async flush of the write cache for this vdev
2077 zio_flush(zio_t *zio, vdev_t *vd)
2079 zio_nowait(zio_ioctl(zio, zio->io_spa, vd, DKIOCFLUSHWRITECACHE,
2080 NULL, NULL, ZIO_PRIORITY_NOW,
2081 ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_RETRY));