4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 #pragma ident "@(#)zvol.c 1.31 08/04/09 SMI"
29 * ZFS volume emulation driver.
31 * Makes a DMU object look like a volume of arbitrary size, up to 2^64 bytes.
32 * Volumes are accessed through the symbolic links named:
34 * /dev/zvol/dsk/<pool_name>/<dataset_name>
35 * /dev/zvol/rdsk/<pool_name>/<dataset_name>
37 * These links are created by the ZFS-specific devfsadm link generator.
38 * Volumes are persistent through reboot. No user command needs to be
39 * run before opening and using a device.
42 #include <sys/types.h>
43 #include <sys/param.h>
44 #include <sys/errno.h>
47 #include <sys/modctl.h>
51 #include <sys/cmn_err.h>
56 #include <sys/dmu_traverse.h>
57 #include <sys/dnode.h>
58 #include <sys/dsl_dataset.h>
59 #include <sys/dsl_prop.h>
61 #include <sys/efi_partition.h>
62 #include <sys/byteorder.h>
63 #include <sys/pathname.h>
65 #include <sys/sunddi.h>
66 #include <sys/crc32.h>
67 #include <sys/dirent.h>
68 #include <sys/policy.h>
69 #include <sys/fs/zfs.h>
70 #include <sys/zfs_ioctl.h>
71 #include <sys/mkdev.h>
73 #include <sys/refcount.h>
74 #include <sys/zfs_znode.h>
75 #include <sys/zfs_rlock.h>
76 #include <sys/vdev_disk.h>
77 #include <sys/vdev_impl.h>
79 #include <sys/dumphdr.h>
81 #include "zfs_namecheck.h"
83 static void *zvol_state;
85 #define ZVOL_DUMPSIZE "dumpsize"
88 * This lock protects the zvol_state structure from being modified
89 * while it's being used, e.g. an open that comes in before a create
90 * finishes. It also protects temporary opens of the dataset so that,
91 * e.g., an open doesn't get a spurious EBUSY.
93 static kmutex_t zvol_state_lock;
94 static uint32_t zvol_minors;
96 #define NUM_EXTENTS ((SPA_MAXBLOCKSIZE) / sizeof (zvol_extent_t))
98 typedef struct zvol_extent {
99 dva_t ze_dva; /* dva associated with this extent */
100 uint64_t ze_stride; /* extent stride */
101 uint64_t ze_size; /* number of blocks in extent */
105 * The list of extents associated with the dump device
107 typedef struct zvol_ext_list {
108 zvol_extent_t zl_extents[NUM_EXTENTS];
109 struct zvol_ext_list *zl_next;
113 * The in-core state of each volume.
115 typedef struct zvol_state {
116 char zv_name[MAXPATHLEN]; /* pool/dd name */
117 uint64_t zv_volsize; /* amount of space we advertise */
118 uint64_t zv_volblocksize; /* volume block size */
119 minor_t zv_minor; /* minor number */
120 uint8_t zv_min_bs; /* minimum addressable block shift */
121 uint8_t zv_flags; /* readonly; dumpified */
122 objset_t *zv_objset; /* objset handle */
123 uint32_t zv_mode; /* DS_MODE_* flags at open time */
124 uint32_t zv_open_count[OTYPCNT]; /* open counts */
125 uint32_t zv_total_opens; /* total open count */
126 zilog_t *zv_zilog; /* ZIL handle */
127 zvol_ext_list_t *zv_list; /* List of extents for dump */
128 uint64_t zv_txg_assign; /* txg to assign during ZIL replay */
129 znode_t zv_znode; /* for range locking */
133 * zvol specific flags
135 #define ZVOL_RDONLY 0x1
136 #define ZVOL_DUMPIFIED 0x2
139 * zvol maximum transfer in one DMU tx.
141 int zvol_maxphys = DMU_MAX_ACCESS/2;
143 extern int zfs_set_prop_nvlist(const char *, nvlist_t *);
144 static int zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio);
145 static int zvol_dumpify(zvol_state_t *zv);
146 static int zvol_dump_fini(zvol_state_t *zv);
147 static int zvol_dump_init(zvol_state_t *zv, boolean_t resize);
150 zvol_size_changed(zvol_state_t *zv, major_t maj)
152 dev_t dev = makedevice(maj, zv->zv_minor);
154 VERIFY(ddi_prop_update_int64(dev, zfs_dip,
155 "Size", zv->zv_volsize) == DDI_SUCCESS);
156 VERIFY(ddi_prop_update_int64(dev, zfs_dip,
157 "Nblocks", lbtodb(zv->zv_volsize)) == DDI_SUCCESS);
159 /* Notify specfs to invalidate the cached size */
160 spec_size_invalidate(dev, VBLK);
161 spec_size_invalidate(dev, VCHR);
165 zvol_check_volsize(uint64_t volsize, uint64_t blocksize)
170 if (volsize % blocksize != 0)
174 if (volsize - 1 > SPEC_MAXOFFSET_T)
181 zvol_check_volblocksize(uint64_t volblocksize)
183 if (volblocksize < SPA_MINBLOCKSIZE ||
184 volblocksize > SPA_MAXBLOCKSIZE ||
192 zvol_readonly_changed_cb(void *arg, uint64_t newval)
194 zvol_state_t *zv = arg;
197 zv->zv_flags |= ZVOL_RDONLY;
199 zv->zv_flags &= ~ZVOL_RDONLY;
203 zvol_get_stats(objset_t *os, nvlist_t *nv)
206 dmu_object_info_t doi;
210 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &val);
214 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLSIZE, val);
216 error = dmu_object_info(os, ZVOL_OBJ, &doi);
219 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLBLOCKSIZE,
220 doi.doi_data_block_size);
227 * Find a free minor number.
230 zvol_minor_alloc(void)
234 ASSERT(MUTEX_HELD(&zvol_state_lock));
236 for (minor = 1; minor <= ZVOL_MAX_MINOR; minor++)
237 if (ddi_get_soft_state(zvol_state, minor) == NULL)
243 static zvol_state_t *
244 zvol_minor_lookup(const char *name)
249 ASSERT(MUTEX_HELD(&zvol_state_lock));
251 for (minor = 1; minor <= ZVOL_MAX_MINOR; minor++) {
252 zv = ddi_get_soft_state(zvol_state, minor);
255 if (strcmp(zv->zv_name, name) == 0)
263 zvol_init_extent(zvol_extent_t *ze, blkptr_t *bp)
265 ze->ze_dva = bp->blk_dva[0]; /* structure assignment */
270 /* extent mapping arg */
272 zvol_ext_list_t *ma_list;
273 zvol_extent_t *ma_extent;
279 zvol_map_block(traverse_blk_cache_t *bc, spa_t *spa, void *arg)
281 zbookmark_t *zb = &bc->bc_bookmark;
282 blkptr_t *bp = &bc->bc_blkptr;
283 void *data = bc->bc_data;
284 dnode_phys_t *dnp = bc->bc_dnode;
285 struct maparg *ma = (struct maparg *)arg;
288 /* If there is an error, then keep trying to make progress */
293 if (zb->zb_level == -1) {
294 ASSERT3U(BP_GET_TYPE(bp), ==, DMU_OT_OBJSET);
295 ASSERT3U(BP_GET_LEVEL(bp), ==, 0);
297 ASSERT3U(BP_GET_TYPE(bp), ==, dnp->dn_type);
298 ASSERT3U(BP_GET_LEVEL(bp), ==, zb->zb_level);
301 if (zb->zb_level > 0) {
303 blkptr_t *bpx, *bpend;
305 for (bpx = data, bpend = bpx + BP_GET_LSIZE(bp) / sizeof (*bpx);
306 bpx < bpend; bpx++) {
307 if (bpx->blk_birth != 0) {
308 fill += bpx->blk_fill;
310 ASSERT(bpx->blk_fill == 0);
313 ASSERT3U(fill, ==, bp->blk_fill);
316 if (zb->zb_level == 0 && dnp->dn_type == DMU_OT_DNODE) {
318 dnode_phys_t *dnx, *dnend;
320 for (dnx = data, dnend = dnx + (BP_GET_LSIZE(bp)>>DNODE_SHIFT);
321 dnx < dnend; dnx++) {
322 if (dnx->dn_type != DMU_OT_NONE)
325 ASSERT3U(fill, ==, bp->blk_fill);
329 if (zb->zb_level || dnp->dn_type == DMU_OT_DNODE)
332 /* Abort immediately if we have encountered gang blocks */
333 if (BP_IS_GANG(bp)) {
339 if (ma->ma_extent->ze_size == 0) {
340 zvol_init_extent(ma->ma_extent, bp);
344 stride = (DVA_GET_OFFSET(&bp->blk_dva[0])) -
345 ((DVA_GET_OFFSET(&ma->ma_extent->ze_dva)) +
346 (ma->ma_extent->ze_size - 1) * (ma->ma_extent->ze_stride));
347 if (DVA_GET_VDEV(BP_IDENTITY(bp)) ==
348 DVA_GET_VDEV(&ma->ma_extent->ze_dva)) {
349 if (ma->ma_extent->ze_stride == 0) {
350 /* second block in this extent */
351 ma->ma_extent->ze_stride = stride;
352 ma->ma_extent->ze_size++;
354 } else if (ma->ma_extent->ze_stride == stride) {
356 * the block we allocated has the same
359 ma->ma_extent->ze_size++;
365 * dtrace -n 'zfs-dprintf
366 * /stringof(arg0) == "zvol.c"/
368 * printf("%s: %s", stringof(arg1), stringof(arg3))
371 dprintf("ma_extent 0x%lx mrstride 0x%lx stride %lx\n",
372 ma->ma_extent->ze_size, ma->ma_extent->ze_stride, stride);
373 dprintf_bp(bp, "%s", "next blkptr:");
374 /* start a new extent */
375 if (ma->ma_extent == &ma->ma_list->zl_extents[NUM_EXTENTS - 1]) {
376 ma->ma_list->zl_next = kmem_zalloc(sizeof (zvol_ext_list_t),
378 ma->ma_list = ma->ma_list->zl_next;
379 ma->ma_extent = &ma->ma_list->zl_extents[0];
383 zvol_init_extent(ma->ma_extent, bp);
389 zvol_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx)
391 zfs_creat_t *zct = arg;
392 nvlist_t *nvprops = zct->zct_props;
394 uint64_t volblocksize, volsize;
396 VERIFY(nvlist_lookup_uint64(nvprops,
397 zfs_prop_to_name(ZFS_PROP_VOLSIZE), &volsize) == 0);
398 if (nvlist_lookup_uint64(nvprops,
399 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &volblocksize) != 0)
400 volblocksize = zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE);
403 * These properties must be removed from the list so the generic
404 * property setting step won't apply to them.
406 VERIFY(nvlist_remove_all(nvprops,
407 zfs_prop_to_name(ZFS_PROP_VOLSIZE)) == 0);
408 (void) nvlist_remove_all(nvprops,
409 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE));
411 error = dmu_object_claim(os, ZVOL_OBJ, DMU_OT_ZVOL, volblocksize,
415 error = zap_create_claim(os, ZVOL_ZAP_OBJ, DMU_OT_ZVOL_PROP,
419 error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize, tx);
424 * Replay a TX_WRITE ZIL transaction that didn't get committed
425 * after a system failure
428 zvol_replay_write(zvol_state_t *zv, lr_write_t *lr, boolean_t byteswap)
430 objset_t *os = zv->zv_objset;
431 char *data = (char *)(lr + 1); /* data follows lr_write_t */
432 uint64_t off = lr->lr_offset;
433 uint64_t len = lr->lr_length;
438 byteswap_uint64_array(lr, sizeof (*lr));
440 tx = dmu_tx_create(os);
441 dmu_tx_hold_write(tx, ZVOL_OBJ, off, len);
442 error = dmu_tx_assign(tx, zv->zv_txg_assign);
446 dmu_write(os, ZVOL_OBJ, off, len, data, tx);
455 zvol_replay_err(zvol_state_t *zv, lr_t *lr, boolean_t byteswap)
461 * Callback vectors for replaying records.
462 * Only TX_WRITE is needed for zvol.
464 zil_replay_func_t *zvol_replay_vector[TX_MAX_TYPE] = {
465 zvol_replay_err, /* 0 no such transaction type */
466 zvol_replay_err, /* TX_CREATE */
467 zvol_replay_err, /* TX_MKDIR */
468 zvol_replay_err, /* TX_MKXATTR */
469 zvol_replay_err, /* TX_SYMLINK */
470 zvol_replay_err, /* TX_REMOVE */
471 zvol_replay_err, /* TX_RMDIR */
472 zvol_replay_err, /* TX_LINK */
473 zvol_replay_err, /* TX_RENAME */
474 zvol_replay_write, /* TX_WRITE */
475 zvol_replay_err, /* TX_TRUNCATE */
476 zvol_replay_err, /* TX_SETATTR */
477 zvol_replay_err, /* TX_ACL */
481 * reconstruct dva that gets us to the desired offset (offset
485 zvol_get_dva(zvol_state_t *zv, uint64_t offset, dva_t *dva)
492 if ((zl = zv->zv_list) == NULL)
495 ze = &zl->zl_extents[0];
496 while (offset >= ze->ze_size * zv->zv_volblocksize) {
497 offset -= ze->ze_size * zv->zv_volblocksize;
499 if (idx == NUM_EXTENTS - 1) {
500 /* we've reached the end of this array */
501 ASSERT(zl->zl_next != NULL);
502 if (zl->zl_next == NULL)
505 ze = &zl->zl_extents[0];
512 DVA_SET_VDEV(dva, DVA_GET_VDEV(&ze->ze_dva));
513 tmp = DVA_GET_OFFSET((&ze->ze_dva));
514 tmp += (ze->ze_stride * (offset / zv->zv_volblocksize));
515 DVA_SET_OFFSET(dva, tmp);
520 zvol_free_extents(zvol_state_t *zv)
523 zvol_ext_list_t *tmp;
525 if (zv->zv_list != NULL) {
529 kmem_free(zl, sizeof (zvol_ext_list_t));
537 zvol_get_lbas(zvol_state_t *zv)
545 ma.ma_list = zl = kmem_zalloc(sizeof (zvol_ext_list_t), KM_SLEEP);
546 ma.ma_extent = &ma.ma_list->zl_extents[0];
548 zv->zv_list = ma.ma_list;
550 err = traverse_zvol(zv->zv_objset, ADVANCE_PRE, zvol_map_block, &ma);
551 if (err == EINTR && ma.ma_gang) {
553 * We currently don't support dump devices when the pool
554 * is so fragmented that our allocation has resulted in
557 zvol_free_extents(zv);
560 ASSERT3U(err, ==, 0);
562 ze = &zl->zl_extents[0];
564 blocks += ze->ze_size;
565 if (ze == &zl->zl_extents[NUM_EXTENTS - 1]) {
567 ze = &zl->zl_extents[0];
572 if (blocks != (zv->zv_volsize / zv->zv_volblocksize)) {
573 zvol_free_extents(zv);
581 * Create a minor node (plus a whole lot more) for the specified volume.
584 zvol_create_minor(const char *name, major_t maj)
588 dmu_object_info_t doi;
591 struct pathname linkpath;
592 int ds_mode = DS_MODE_PRIMARY;
595 size_t devpathlen = strlen(ZVOL_FULL_DEV_DIR) + strlen(name) + 1;
596 char chrbuf[30], blkbuf[30];
599 mutex_enter(&zvol_state_lock);
601 if ((zv = zvol_minor_lookup(name)) != NULL) {
602 mutex_exit(&zvol_state_lock);
606 if (strchr(name, '@') != 0)
607 ds_mode |= DS_MODE_READONLY;
609 error = dmu_objset_open(name, DMU_OST_ZVOL, ds_mode, &os);
612 mutex_exit(&zvol_state_lock);
616 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
619 dmu_objset_close(os);
620 mutex_exit(&zvol_state_lock);
625 * If there's an existing /dev/zvol symlink, try to use the
626 * same minor number we used last time.
628 devpath = kmem_alloc(devpathlen, KM_SLEEP);
630 (void) sprintf(devpath, "%s%s", ZVOL_FULL_DEV_DIR, name);
632 error = lookupname(devpath, UIO_SYSSPACE, NO_FOLLOW, NULL, &vp);
634 kmem_free(devpath, devpathlen);
636 if (error == 0 && vp->v_type != VLNK)
641 error = pn_getsymlink(vp, &linkpath, kcred);
643 char *ms = strstr(linkpath.pn_path, ZVOL_PSEUDO_DEV);
645 ms += strlen(ZVOL_PSEUDO_DEV);
656 * If we found a minor but it's already in use, we must pick a new one.
658 if (minor != 0 && ddi_get_soft_state(zvol_state, minor) != NULL)
662 minor = zvol_minor_alloc();
665 dmu_objset_close(os);
666 mutex_exit(&zvol_state_lock);
670 if (ddi_soft_state_zalloc(zvol_state, minor) != DDI_SUCCESS) {
671 dmu_objset_close(os);
672 mutex_exit(&zvol_state_lock);
676 (void) ddi_prop_update_string(minor, zfs_dip, ZVOL_PROP_NAME,
679 (void) sprintf(chrbuf, "%uc,raw", minor);
681 if (ddi_create_minor_node(zfs_dip, chrbuf, S_IFCHR,
682 minor, DDI_PSEUDO, 0) == DDI_FAILURE) {
683 ddi_soft_state_free(zvol_state, minor);
684 dmu_objset_close(os);
685 mutex_exit(&zvol_state_lock);
689 (void) sprintf(blkbuf, "%uc", minor);
691 if (ddi_create_minor_node(zfs_dip, blkbuf, S_IFBLK,
692 minor, DDI_PSEUDO, 0) == DDI_FAILURE) {
693 ddi_remove_minor_node(zfs_dip, chrbuf);
694 ddi_soft_state_free(zvol_state, minor);
695 dmu_objset_close(os);
696 mutex_exit(&zvol_state_lock);
700 zv = ddi_get_soft_state(zvol_state, minor);
702 (void) strcpy(zv->zv_name, name);
703 zv->zv_min_bs = DEV_BSHIFT;
704 zv->zv_minor = minor;
705 zv->zv_volsize = volsize;
707 zv->zv_mode = ds_mode;
708 zv->zv_zilog = zil_open(os, zvol_get_data);
709 mutex_init(&zv->zv_znode.z_range_lock, NULL, MUTEX_DEFAULT, NULL);
710 avl_create(&zv->zv_znode.z_range_avl, zfs_range_compare,
711 sizeof (rl_t), offsetof(rl_t, r_node));
712 /* get and cache the blocksize */
713 error = dmu_object_info(os, ZVOL_OBJ, &doi);
715 zv->zv_volblocksize = doi.doi_data_block_size;
717 zil_replay(os, zv, &zv->zv_txg_assign, zvol_replay_vector);
718 zvol_size_changed(zv, maj);
720 /* XXX this should handle the possible i/o error */
721 VERIFY(dsl_prop_register(dmu_objset_ds(zv->zv_objset),
722 "readonly", zvol_readonly_changed_cb, zv) == 0);
726 mutex_exit(&zvol_state_lock);
732 * Remove minor node for the specified volume.
735 zvol_remove_minor(const char *name)
740 mutex_enter(&zvol_state_lock);
742 if ((zv = zvol_minor_lookup(name)) == NULL) {
743 mutex_exit(&zvol_state_lock);
747 if (zv->zv_total_opens != 0) {
748 mutex_exit(&zvol_state_lock);
752 (void) sprintf(namebuf, "%uc,raw", zv->zv_minor);
753 ddi_remove_minor_node(zfs_dip, namebuf);
755 (void) sprintf(namebuf, "%uc", zv->zv_minor);
756 ddi_remove_minor_node(zfs_dip, namebuf);
758 VERIFY(dsl_prop_unregister(dmu_objset_ds(zv->zv_objset),
759 "readonly", zvol_readonly_changed_cb, zv) == 0);
761 zil_close(zv->zv_zilog);
763 dmu_objset_close(zv->zv_objset);
764 zv->zv_objset = NULL;
765 avl_destroy(&zv->zv_znode.z_range_avl);
766 mutex_destroy(&zv->zv_znode.z_range_lock);
768 ddi_soft_state_free(zvol_state, zv->zv_minor);
772 mutex_exit(&zvol_state_lock);
778 zvol_truncate(zvol_state_t *zv, uint64_t offset, uint64_t size)
783 tx = dmu_tx_create(zv->zv_objset);
784 dmu_tx_hold_free(tx, ZVOL_OBJ, offset, size);
785 error = dmu_tx_assign(tx, TXG_WAIT);
790 error = dmu_free_range(zv->zv_objset, ZVOL_OBJ, offset, size, tx);
796 zvol_prealloc(zvol_state_t *zv)
798 objset_t *os = zv->zv_objset;
801 uint64_t refd, avail, usedobjs, availobjs;
802 uint64_t resid = zv->zv_volsize;
805 /* Check the space usage before attempting to allocate the space */
806 dmu_objset_space(os, &refd, &avail, &usedobjs, &availobjs);
807 if (avail < zv->zv_volsize)
810 /* Free old extents if they exist */
811 zvol_free_extents(zv);
813 /* allocate the blocks by writing each one */
814 data = kmem_zalloc(SPA_MAXBLOCKSIZE, KM_SLEEP);
818 uint64_t bytes = MIN(resid, SPA_MAXBLOCKSIZE);
820 tx = dmu_tx_create(os);
821 dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes);
822 error = dmu_tx_assign(tx, TXG_WAIT);
825 kmem_free(data, SPA_MAXBLOCKSIZE);
826 (void) zvol_truncate(zv, 0, off);
829 dmu_write(os, ZVOL_OBJ, off, bytes, data, tx);
834 kmem_free(data, SPA_MAXBLOCKSIZE);
835 txg_wait_synced(dmu_objset_pool(os), 0);
841 zvol_update_volsize(zvol_state_t *zv, major_t maj, uint64_t volsize)
846 ASSERT(MUTEX_HELD(&zvol_state_lock));
848 tx = dmu_tx_create(zv->zv_objset);
849 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
850 dmu_tx_hold_free(tx, ZVOL_OBJ, volsize, DMU_OBJECT_END);
851 error = dmu_tx_assign(tx, TXG_WAIT);
857 error = zap_update(zv->zv_objset, ZVOL_ZAP_OBJ, "size", 8, 1,
862 error = zvol_truncate(zv, volsize, DMU_OBJECT_END);
865 zv->zv_volsize = volsize;
866 zvol_size_changed(zv, maj);
872 zvol_set_volsize(const char *name, major_t maj, uint64_t volsize)
876 dmu_object_info_t doi;
877 uint64_t old_volsize = 0ULL;
879 mutex_enter(&zvol_state_lock);
881 if ((zv = zvol_minor_lookup(name)) == NULL) {
882 mutex_exit(&zvol_state_lock);
885 old_volsize = zv->zv_volsize;
887 if ((error = dmu_object_info(zv->zv_objset, ZVOL_OBJ, &doi)) != 0 ||
888 (error = zvol_check_volsize(volsize,
889 doi.doi_data_block_size)) != 0) {
890 mutex_exit(&zvol_state_lock);
894 if (zv->zv_flags & ZVOL_RDONLY || (zv->zv_mode & DS_MODE_READONLY)) {
895 mutex_exit(&zvol_state_lock);
899 error = zvol_update_volsize(zv, maj, volsize);
902 * Reinitialize the dump area to the new size. If we
903 * failed to resize the dump area then restore the it back to
904 * it's original size.
906 if (error == 0 && zv->zv_flags & ZVOL_DUMPIFIED) {
907 if ((error = zvol_dumpify(zv)) != 0 ||
908 (error = dumpvp_resize()) != 0) {
909 (void) zvol_update_volsize(zv, maj, old_volsize);
910 error = zvol_dumpify(zv);
914 mutex_exit(&zvol_state_lock);
920 zvol_set_volblocksize(const char *name, uint64_t volblocksize)
926 mutex_enter(&zvol_state_lock);
928 if ((zv = zvol_minor_lookup(name)) == NULL) {
929 mutex_exit(&zvol_state_lock);
932 if (zv->zv_flags & ZVOL_RDONLY || (zv->zv_mode & DS_MODE_READONLY)) {
933 mutex_exit(&zvol_state_lock);
937 tx = dmu_tx_create(zv->zv_objset);
938 dmu_tx_hold_bonus(tx, ZVOL_OBJ);
939 error = dmu_tx_assign(tx, TXG_WAIT);
943 error = dmu_object_set_blocksize(zv->zv_objset, ZVOL_OBJ,
944 volblocksize, 0, tx);
945 if (error == ENOTSUP)
950 mutex_exit(&zvol_state_lock);
957 zvol_open(dev_t *devp, int flag, int otyp, cred_t *cr)
959 minor_t minor = getminor(*devp);
962 if (minor == 0) /* This is the control device */
965 mutex_enter(&zvol_state_lock);
967 zv = ddi_get_soft_state(zvol_state, minor);
969 mutex_exit(&zvol_state_lock);
973 ASSERT(zv->zv_objset != NULL);
975 if ((flag & FWRITE) &&
976 (zv->zv_flags & ZVOL_RDONLY || (zv->zv_mode & DS_MODE_READONLY))) {
977 mutex_exit(&zvol_state_lock);
981 if (zv->zv_open_count[otyp] == 0 || otyp == OTYP_LYR) {
982 zv->zv_open_count[otyp]++;
983 zv->zv_total_opens++;
986 mutex_exit(&zvol_state_lock);
993 zvol_close(dev_t dev, int flag, int otyp, cred_t *cr)
995 minor_t minor = getminor(dev);
998 if (minor == 0) /* This is the control device */
1001 mutex_enter(&zvol_state_lock);
1003 zv = ddi_get_soft_state(zvol_state, minor);
1005 mutex_exit(&zvol_state_lock);
1010 * The next statement is a workaround for the following DDI bug:
1011 * 6343604 specfs race: multiple "last-close" of the same device
1013 if (zv->zv_total_opens == 0) {
1014 mutex_exit(&zvol_state_lock);
1019 * If the open count is zero, this is a spurious close.
1020 * That indicates a bug in the kernel / DDI framework.
1022 ASSERT(zv->zv_open_count[otyp] != 0);
1023 ASSERT(zv->zv_total_opens != 0);
1026 * You may get multiple opens, but only one close.
1028 zv->zv_open_count[otyp]--;
1029 zv->zv_total_opens--;
1031 mutex_exit(&zvol_state_lock);
1037 zvol_get_done(dmu_buf_t *db, void *vzgd)
1039 zgd_t *zgd = (zgd_t *)vzgd;
1040 rl_t *rl = zgd->zgd_rl;
1042 dmu_buf_rele(db, vzgd);
1043 zfs_range_unlock(rl);
1044 zil_add_block(zgd->zgd_zilog, zgd->zgd_bp);
1045 kmem_free(zgd, sizeof (zgd_t));
1049 * Get data to generate a TX_WRITE intent log record.
1052 zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
1054 zvol_state_t *zv = arg;
1055 objset_t *os = zv->zv_objset;
1059 uint64_t boff; /* block starting offset */
1060 int dlen = lr->lr_length; /* length of user data */
1067 * Write records come in two flavors: immediate and indirect.
1068 * For small writes it's cheaper to store the data with the
1069 * log record (immediate); for large writes it's cheaper to
1070 * sync the data and get a pointer to it (indirect) so that
1071 * we don't have to write the data twice.
1073 if (buf != NULL) /* immediate write */
1074 return (dmu_read(os, ZVOL_OBJ, lr->lr_offset, dlen, buf));
1076 zgd = (zgd_t *)kmem_alloc(sizeof (zgd_t), KM_SLEEP);
1077 zgd->zgd_zilog = zv->zv_zilog;
1078 zgd->zgd_bp = &lr->lr_blkptr;
1081 * Lock the range of the block to ensure that when the data is
1082 * written out and its checksum is being calculated that no other
1083 * thread can change the block.
1085 boff = P2ALIGN_TYPED(lr->lr_offset, zv->zv_volblocksize, uint64_t);
1086 rl = zfs_range_lock(&zv->zv_znode, boff, zv->zv_volblocksize,
1090 VERIFY(0 == dmu_buf_hold(os, ZVOL_OBJ, lr->lr_offset, zgd, &db));
1091 error = dmu_sync(zio, db, &lr->lr_blkptr,
1092 lr->lr_common.lrc_txg, zvol_get_done, zgd);
1094 zil_add_block(zv->zv_zilog, &lr->lr_blkptr);
1096 * If we get EINPROGRESS, then we need to wait for a
1097 * write IO initiated by dmu_sync() to complete before
1098 * we can release this dbuf. We will finish everything
1099 * up in the zvol_get_done() callback.
1101 if (error == EINPROGRESS)
1103 dmu_buf_rele(db, zgd);
1104 zfs_range_unlock(rl);
1105 kmem_free(zgd, sizeof (zgd_t));
1110 * zvol_log_write() handles synchronous writes using TX_WRITE ZIL transactions.
1112 * We store data in the log buffers if it's small enough.
1113 * Otherwise we will later flush the data out via dmu_sync().
1115 ssize_t zvol_immediate_write_sz = 32768;
1118 zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx, offset_t off, ssize_t len)
1120 uint32_t blocksize = zv->zv_volblocksize;
1124 ssize_t nbytes = MIN(len, blocksize - P2PHASE(off, blocksize));
1125 itx_t *itx = zil_itx_create(TX_WRITE, sizeof (*lr));
1128 len > zvol_immediate_write_sz ? WR_INDIRECT : WR_NEED_COPY;
1129 itx->itx_private = zv;
1130 lr = (lr_write_t *)&itx->itx_lr;
1131 lr->lr_foid = ZVOL_OBJ;
1132 lr->lr_offset = off;
1133 lr->lr_length = nbytes;
1134 lr->lr_blkoff = off - P2ALIGN_TYPED(off, blocksize, uint64_t);
1135 BP_ZERO(&lr->lr_blkptr);
1137 (void) zil_itx_assign(zv->zv_zilog, itx, tx);
1144 zvol_dumpio(vdev_t *vd, uint64_t size, uint64_t offset, void *addr,
1145 int bflags, int isdump)
1152 for (c = 0; c < vd->vdev_children; c++) {
1153 if (zvol_dumpio(vd->vdev_child[c], size, offset,
1154 addr, bflags, isdump) != 0) {
1156 } else if (bflags & B_READ) {
1161 if (!vd->vdev_ops->vdev_op_leaf)
1162 return (numerrors < vd->vdev_children ? 0 : EIO);
1164 if (!vdev_writeable(vd))
1168 ASSERT3P(dvd, !=, NULL);
1169 direction = bflags & (B_WRITE | B_READ);
1170 ASSERT(ISP2(direction));
1171 offset += VDEV_LABEL_START_SIZE;
1173 if (ddi_in_panic() || isdump) {
1174 if (direction & B_READ)
1176 return (ldi_dump(dvd->vd_lh, addr, lbtodb(offset),
1179 return (vdev_disk_physio(dvd->vd_lh, addr, size, offset,
1185 zvol_physio(zvol_state_t *zv, int bflags, uint64_t off,
1186 uint64_t size, void *addr, int isdump)
1191 spa_t *spa = dmu_objset_spa(zv->zv_objset);
1193 ASSERT(size <= zv->zv_volblocksize);
1195 /* restrict requests to multiples of the system block size */
1196 if (P2PHASE(off, DEV_BSIZE) || P2PHASE(size, DEV_BSIZE))
1199 if (zvol_get_dva(zv, off, &dva) != 0)
1202 spa_config_enter(spa, RW_READER, FTAG);
1203 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva));
1205 error = zvol_dumpio(vd, size,
1206 DVA_GET_OFFSET(&dva) + (off % zv->zv_volblocksize),
1207 addr, bflags & (B_READ | B_WRITE | B_PHYS), isdump);
1209 spa_config_exit(spa, FTAG);
1214 zvol_strategy(buf_t *bp)
1216 zvol_state_t *zv = ddi_get_soft_state(zvol_state, getminor(bp->b_edev));
1217 uint64_t off, volsize;
1223 boolean_t reading, is_dump = zv->zv_flags & ZVOL_DUMPIFIED;
1226 bioerror(bp, ENXIO);
1231 if (getminor(bp->b_edev) == 0) {
1232 bioerror(bp, EINVAL);
1237 if (!(bp->b_flags & B_READ) &&
1238 (zv->zv_flags & ZVOL_RDONLY ||
1239 zv->zv_mode & DS_MODE_READONLY)) {
1240 bioerror(bp, EROFS);
1245 off = ldbtob(bp->b_blkno);
1246 volsize = zv->zv_volsize;
1252 addr = bp->b_un.b_addr;
1253 resid = bp->b_bcount;
1256 * There must be no buffer changes when doing a dmu_sync() because
1257 * we can't change the data whilst calculating the checksum.
1259 reading = bp->b_flags & B_READ;
1260 rl = zfs_range_lock(&zv->zv_znode, off, resid,
1261 reading ? RL_READER : RL_WRITER);
1263 if (resid > volsize - off) /* don't write past the end */
1264 resid = volsize - off;
1266 while (resid != 0 && off < volsize) {
1268 size = MIN(resid, zvol_maxphys);
1270 /* can't straddle a block boundary */
1271 size = MIN(size, P2END(off, zv->zv_volblocksize) - off);
1272 error = zvol_physio(zv, bp->b_flags, off, size,
1274 } else if (reading) {
1275 error = dmu_read(os, ZVOL_OBJ, off, size, addr);
1277 dmu_tx_t *tx = dmu_tx_create(os);
1278 dmu_tx_hold_write(tx, ZVOL_OBJ, off, size);
1279 error = dmu_tx_assign(tx, TXG_WAIT);
1283 dmu_write(os, ZVOL_OBJ, off, size, addr, tx);
1284 zvol_log_write(zv, tx, off, size);
1294 zfs_range_unlock(rl);
1296 if ((bp->b_resid = resid) == bp->b_bcount)
1297 bioerror(bp, off > volsize ? EINVAL : error);
1299 if (!(bp->b_flags & B_ASYNC) && !reading && !zil_disable && !is_dump)
1300 zil_commit(zv->zv_zilog, UINT64_MAX, ZVOL_OBJ);
1307 * Set the buffer count to the zvol maximum transfer.
1308 * Using our own routine instead of the default minphys()
1309 * means that for larger writes we write bigger buffers on X86
1310 * (128K instead of 56K) and flush the disk write cache less often
1311 * (every zvol_maxphys - currently 1MB) instead of minphys (currently
1312 * 56K on X86 and 128K on sparc).
1315 zvol_minphys(struct buf *bp)
1317 if (bp->b_bcount > zvol_maxphys)
1318 bp->b_bcount = zvol_maxphys;
1322 zvol_dump(dev_t dev, caddr_t addr, daddr_t blkno, int nblocks)
1324 minor_t minor = getminor(dev);
1331 if (minor == 0) /* This is the control device */
1334 zv = ddi_get_soft_state(zvol_state, minor);
1338 boff = ldbtob(blkno);
1339 resid = ldbtob(nblocks);
1340 if (boff + resid > zv->zv_volsize) {
1341 /* dump should know better than to write here */
1342 ASSERT(blkno + resid <= zv->zv_volsize);
1346 /* can't straddle a block boundary */
1347 size = MIN(resid, P2END(boff, zv->zv_volblocksize) - boff);
1349 error = zvol_physio(zv, B_WRITE, boff, size, addr, 1);
1362 zvol_read(dev_t dev, uio_t *uio, cred_t *cr)
1364 minor_t minor = getminor(dev);
1369 if (minor == 0) /* This is the control device */
1372 zv = ddi_get_soft_state(zvol_state, minor);
1376 rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid,
1378 while (uio->uio_resid > 0) {
1379 uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1);
1381 error = dmu_read_uio(zv->zv_objset, ZVOL_OBJ, uio, bytes);
1385 zfs_range_unlock(rl);
1391 zvol_write(dev_t dev, uio_t *uio, cred_t *cr)
1393 minor_t minor = getminor(dev);
1398 if (minor == 0) /* This is the control device */
1401 zv = ddi_get_soft_state(zvol_state, minor);
1405 if (zv->zv_flags & ZVOL_DUMPIFIED) {
1406 error = physio(zvol_strategy, NULL, dev, B_WRITE,
1411 rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid,
1413 while (uio->uio_resid > 0) {
1414 uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1);
1415 uint64_t off = uio->uio_loffset;
1417 dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
1418 dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes);
1419 error = dmu_tx_assign(tx, TXG_WAIT);
1424 error = dmu_write_uio(zv->zv_objset, ZVOL_OBJ, uio, bytes, tx);
1426 zvol_log_write(zv, tx, off, bytes);
1432 zfs_range_unlock(rl);
1437 * Dirtbag ioctls to support mkfs(1M) for UFS filesystems. See dkio(7I).
1441 zvol_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cr, int *rvalp)
1444 struct dk_cinfo dki;
1445 struct dk_minfo dkm;
1447 struct dk_callback *dkc;
1448 struct uuid uuid = EFI_RESERVED;
1453 mutex_enter(&zvol_state_lock);
1455 zv = ddi_get_soft_state(zvol_state, getminor(dev));
1458 mutex_exit(&zvol_state_lock);
1465 bzero(&dki, sizeof (dki));
1466 (void) strcpy(dki.dki_cname, "zvol");
1467 (void) strcpy(dki.dki_dname, "zvol");
1468 dki.dki_ctype = DKC_UNKNOWN;
1469 dki.dki_maxtransfer = 1 << (SPA_MAXBLOCKSHIFT - zv->zv_min_bs);
1470 mutex_exit(&zvol_state_lock);
1471 if (ddi_copyout(&dki, (void *)arg, sizeof (dki), flag))
1475 case DKIOCGMEDIAINFO:
1476 bzero(&dkm, sizeof (dkm));
1477 dkm.dki_lbsize = 1U << zv->zv_min_bs;
1478 dkm.dki_capacity = zv->zv_volsize >> zv->zv_min_bs;
1479 dkm.dki_media_type = DK_UNKNOWN;
1480 mutex_exit(&zvol_state_lock);
1481 if (ddi_copyout(&dkm, (void *)arg, sizeof (dkm), flag))
1486 if (ddi_copyin((void *)arg, &efi, sizeof (dk_efi_t), flag)) {
1487 mutex_exit(&zvol_state_lock);
1490 efi.dki_data = (void *)(uintptr_t)efi.dki_data_64;
1493 * Some clients may attempt to request a PMBR for the
1494 * zvol. Currently this interface will return ENOTTY to
1495 * such requests. These requests could be supported by
1496 * adding a check for lba == 0 and consing up an appropriate
1499 if (efi.dki_lba == 1) {
1503 bzero(&gpt, sizeof (gpt));
1504 bzero(&gpe, sizeof (gpe));
1506 if (efi.dki_length < sizeof (gpt)) {
1507 mutex_exit(&zvol_state_lock);
1511 gpt.efi_gpt_Signature = LE_64(EFI_SIGNATURE);
1512 gpt.efi_gpt_Revision = LE_32(EFI_VERSION_CURRENT);
1513 gpt.efi_gpt_HeaderSize = LE_32(sizeof (gpt));
1514 gpt.efi_gpt_FirstUsableLBA = LE_64(34ULL);
1515 gpt.efi_gpt_LastUsableLBA =
1516 LE_64((zv->zv_volsize >> zv->zv_min_bs) - 1);
1517 gpt.efi_gpt_NumberOfPartitionEntries = LE_32(1);
1518 gpt.efi_gpt_PartitionEntryLBA = LE_64(2ULL);
1519 gpt.efi_gpt_SizeOfPartitionEntry = LE_32(sizeof (gpe));
1521 UUID_LE_CONVERT(gpe.efi_gpe_PartitionTypeGUID, uuid);
1522 gpe.efi_gpe_StartingLBA = gpt.efi_gpt_FirstUsableLBA;
1523 gpe.efi_gpe_EndingLBA = gpt.efi_gpt_LastUsableLBA;
1525 CRC32(crc, &gpe, sizeof (gpe), -1U, crc32_table);
1526 gpt.efi_gpt_PartitionEntryArrayCRC32 = LE_32(~crc);
1528 CRC32(crc, &gpt, sizeof (gpt), -1U, crc32_table);
1529 gpt.efi_gpt_HeaderCRC32 = LE_32(~crc);
1531 mutex_exit(&zvol_state_lock);
1532 if (ddi_copyout(&gpt, efi.dki_data, sizeof (gpt), flag))
1534 } else if (efi.dki_lba == 2) {
1537 bzero(&gpe, sizeof (gpe));
1539 if (efi.dki_length < sizeof (gpe)) {
1540 mutex_exit(&zvol_state_lock);
1544 UUID_LE_CONVERT(gpe.efi_gpe_PartitionTypeGUID, uuid);
1545 gpe.efi_gpe_StartingLBA = LE_64(34ULL);
1546 gpe.efi_gpe_EndingLBA =
1547 LE_64((zv->zv_volsize >> zv->zv_min_bs) - 1);
1549 mutex_exit(&zvol_state_lock);
1550 if (ddi_copyout(&gpe, efi.dki_data, sizeof (gpe), flag))
1553 mutex_exit(&zvol_state_lock);
1558 case DKIOCFLUSHWRITECACHE:
1559 dkc = (struct dk_callback *)arg;
1560 zil_commit(zv->zv_zilog, UINT64_MAX, ZVOL_OBJ);
1561 if ((flag & FKIOCTL) && dkc != NULL && dkc->dkc_callback) {
1562 (*dkc->dkc_callback)(dkc->dkc_cookie, error);
1570 * commands using these (like prtvtoc) expect ENOTSUP
1571 * since we're emulating an EFI label
1577 rl = zfs_range_lock(&zv->zv_znode, 0, zv->zv_volsize,
1579 error = zvol_dumpify(zv);
1580 zfs_range_unlock(rl);
1584 rl = zfs_range_lock(&zv->zv_znode, 0, zv->zv_volsize,
1586 error = zvol_dump_fini(zv);
1587 zfs_range_unlock(rl);
1595 mutex_exit(&zvol_state_lock);
1602 return (zvol_minors != 0);
1608 VERIFY(ddi_soft_state_init(&zvol_state, sizeof (zvol_state_t), 1) == 0);
1609 mutex_init(&zvol_state_lock, NULL, MUTEX_DEFAULT, NULL);
1615 mutex_destroy(&zvol_state_lock);
1616 ddi_soft_state_fini(&zvol_state);
1620 zvol_is_swap(zvol_state_t *zv)
1623 boolean_t ret = B_FALSE;
1628 devpathlen = strlen(ZVOL_FULL_DEV_DIR) + strlen(zv->zv_name) + 1;
1629 devpath = kmem_alloc(devpathlen, KM_SLEEP);
1630 (void) sprintf(devpath, "%s%s", ZVOL_FULL_DEV_DIR, zv->zv_name);
1631 error = lookupname(devpath, UIO_SYSSPACE, FOLLOW, NULLVPP, &vp);
1632 kmem_free(devpath, devpathlen);
1634 ret = !error && IS_SWAPVP(common_specvp(vp));
1643 zvol_dump_init(zvol_state_t *zv, boolean_t resize)
1647 objset_t *os = zv->zv_objset;
1648 nvlist_t *nv = NULL;
1649 uint64_t checksum, compress, refresrv;
1651 ASSERT(MUTEX_HELD(&zvol_state_lock));
1653 tx = dmu_tx_create(os);
1654 dmu_tx_hold_free(tx, ZVOL_OBJ, 0, DMU_OBJECT_END);
1655 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
1656 error = dmu_tx_assign(tx, TXG_WAIT);
1663 * If we are resizing the dump device then we only need to
1664 * update the refreservation to match the newly updated
1665 * zvolsize. Otherwise, we save off the original state of the
1666 * zvol so that we can restore them if the zvol is ever undumpified.
1669 error = zap_update(os, ZVOL_ZAP_OBJ,
1670 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1,
1671 &zv->zv_volsize, tx);
1673 error = dsl_prop_get_integer(zv->zv_name,
1674 zfs_prop_to_name(ZFS_PROP_COMPRESSION), &compress, NULL);
1675 error = error ? error : dsl_prop_get_integer(zv->zv_name,
1676 zfs_prop_to_name(ZFS_PROP_CHECKSUM), &checksum, NULL);
1677 error = error ? error : dsl_prop_get_integer(zv->zv_name,
1678 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), &refresrv, NULL);
1680 error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
1681 zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1,
1683 error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
1684 zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1, &checksum, tx);
1685 error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
1686 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1,
1691 /* Truncate the file */
1693 error = zvol_truncate(zv, 0, DMU_OBJECT_END);
1699 * We only need update the zvol's property if we are initializing
1700 * the dump area for the first time.
1703 VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
1704 VERIFY(nvlist_add_uint64(nv,
1705 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 0) == 0);
1706 VERIFY(nvlist_add_uint64(nv,
1707 zfs_prop_to_name(ZFS_PROP_COMPRESSION),
1708 ZIO_COMPRESS_OFF) == 0);
1709 VERIFY(nvlist_add_uint64(nv,
1710 zfs_prop_to_name(ZFS_PROP_CHECKSUM),
1711 ZIO_CHECKSUM_OFF) == 0);
1713 error = zfs_set_prop_nvlist(zv->zv_name, nv);
1720 /* Allocate the space for the dump */
1721 error = zvol_prealloc(zv);
1726 zvol_dumpify(zvol_state_t *zv)
1729 uint64_t dumpsize = 0;
1731 objset_t *os = zv->zv_objset;
1733 if (zv->zv_flags & ZVOL_RDONLY || (zv->zv_mode & DS_MODE_READONLY))
1737 * We do not support swap devices acting as dump devices.
1739 if (zvol_is_swap(zv))
1742 if (zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE,
1743 8, 1, &dumpsize) != 0 || dumpsize != zv->zv_volsize) {
1744 boolean_t resize = (dumpsize > 0) ? B_TRUE : B_FALSE;
1746 if ((error = zvol_dump_init(zv, resize)) != 0) {
1747 (void) zvol_dump_fini(zv);
1753 * Build up our lba mapping.
1755 error = zvol_get_lbas(zv);
1757 (void) zvol_dump_fini(zv);
1761 tx = dmu_tx_create(os);
1762 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
1763 error = dmu_tx_assign(tx, TXG_WAIT);
1766 (void) zvol_dump_fini(zv);
1770 zv->zv_flags |= ZVOL_DUMPIFIED;
1771 error = zap_update(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, 8, 1,
1772 &zv->zv_volsize, tx);
1776 (void) zvol_dump_fini(zv);
1780 txg_wait_synced(dmu_objset_pool(os), 0);
1785 zvol_dump_fini(zvol_state_t *zv)
1788 objset_t *os = zv->zv_objset;
1791 uint64_t checksum, compress, refresrv;
1793 tx = dmu_tx_create(os);
1794 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
1795 error = dmu_tx_assign(tx, TXG_WAIT);
1802 * Attempt to restore the zvol back to its pre-dumpified state.
1803 * This is a best-effort attempt as it's possible that not all
1804 * of these properties were initialized during the dumpify process
1805 * (i.e. error during zvol_dump_init).
1807 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
1808 zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1, &checksum);
1809 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
1810 zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1, &compress);
1811 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
1812 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1, &refresrv);
1814 (void) zap_remove(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, tx);
1815 zvol_free_extents(zv);
1816 zv->zv_flags &= ~ZVOL_DUMPIFIED;
1819 VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
1820 (void) nvlist_add_uint64(nv,
1821 zfs_prop_to_name(ZFS_PROP_CHECKSUM), checksum);
1822 (void) nvlist_add_uint64(nv,
1823 zfs_prop_to_name(ZFS_PROP_COMPRESSION), compress);
1824 (void) nvlist_add_uint64(nv,
1825 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), refresrv);
1826 (void) zfs_set_prop_nvlist(zv->zv_name, nv);