4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (C) 2008-2010 Lawrence Livermore National Security, LLC.
23 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
24 * Rewritten for Linux by Brian Behlendorf <behlendorf1@llnl.gov>.
27 * ZFS volume emulation driver.
29 * Makes a DMU object look like a volume of arbitrary size, up to 2^64 bytes.
30 * Volumes are accessed through the symbolic links named:
32 * /dev/<pool_name>/<dataset_name>
34 * Volumes are persistent through reboot and module load. No user command
35 * needs to be run before opening and using a device.
38 #include <sys/dmu_traverse.h>
39 #include <sys/dsl_dataset.h>
40 #include <sys/dsl_prop.h>
42 #include <sys/zil_impl.h>
44 #include <sys/zfs_rlock.h>
45 #include <sys/zfs_znode.h>
47 #include <linux/blkdev_compat.h>
49 unsigned int zvol_major = ZVOL_MAJOR;
50 unsigned int zvol_threads = 0;
52 static taskq_t *zvol_taskq;
53 static kmutex_t zvol_state_lock;
54 static list_t zvol_state_list;
55 static char *zvol_tag = "zvol_tag";
58 * The in-core state of each volume.
60 typedef struct zvol_state {
61 char zv_name[DISK_NAME_LEN]; /* name */
62 uint64_t zv_volsize; /* advertised space */
63 uint64_t zv_volblocksize;/* volume block size */
64 objset_t *zv_objset; /* objset handle */
65 uint32_t zv_flags; /* ZVOL_* flags */
66 uint32_t zv_open_count; /* open counts */
67 uint32_t zv_changed; /* disk changed */
68 zilog_t *zv_zilog; /* ZIL handle */
69 znode_t zv_znode; /* for range locking */
70 dmu_buf_t *zv_dbuf; /* bonus handle */
71 dev_t zv_dev; /* device id */
72 struct gendisk *zv_disk; /* generic disk */
73 struct request_queue *zv_queue; /* request queue */
74 spinlock_t zv_lock; /* request queue lock */
75 list_node_t zv_next; /* next zvol_state_t linkage */
78 #define ZVOL_RDONLY 0x1
81 * Find the next available range of ZVOL_MINORS minor numbers. The
82 * zvol_state_list is kept in ascending minor order so we simply need
83 * to scan the list for the first gap in the sequence. This allows us
84 * to recycle minor number as devices are created and removed.
87 zvol_find_minor(unsigned *minor)
92 ASSERT(MUTEX_HELD(&zvol_state_lock));
93 for (zv = list_head(&zvol_state_list); zv != NULL;
94 zv = list_next(&zvol_state_list, zv), *minor += ZVOL_MINORS) {
95 if (MINOR(zv->zv_dev) != MINOR(*minor))
99 /* All minors are in use */
100 if (*minor >= (1 << MINORBITS))
107 * Find a zvol_state_t given the full major+minor dev_t.
109 static zvol_state_t *
110 zvol_find_by_dev(dev_t dev)
114 ASSERT(MUTEX_HELD(&zvol_state_lock));
115 for (zv = list_head(&zvol_state_list); zv != NULL;
116 zv = list_next(&zvol_state_list, zv)) {
117 if (zv->zv_dev == dev)
125 * Find a zvol_state_t given the name provided at zvol_alloc() time.
127 static zvol_state_t *
128 zvol_find_by_name(const char *name)
132 ASSERT(MUTEX_HELD(&zvol_state_lock));
133 for (zv = list_head(&zvol_state_list); zv != NULL;
134 zv = list_next(&zvol_state_list, zv)) {
135 if (!strncmp(zv->zv_name, name, DISK_NAME_LEN))
143 * ZFS_IOC_CREATE callback handles dmu zvol and zap object creation.
146 zvol_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx)
148 zfs_creat_t *zct = arg;
149 nvlist_t *nvprops = zct->zct_props;
151 uint64_t volblocksize, volsize;
153 VERIFY(nvlist_lookup_uint64(nvprops,
154 zfs_prop_to_name(ZFS_PROP_VOLSIZE), &volsize) == 0);
155 if (nvlist_lookup_uint64(nvprops,
156 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &volblocksize) != 0)
157 volblocksize = zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE);
160 * These properties must be removed from the list so the generic
161 * property setting step won't apply to them.
163 VERIFY(nvlist_remove_all(nvprops,
164 zfs_prop_to_name(ZFS_PROP_VOLSIZE)) == 0);
165 (void) nvlist_remove_all(nvprops,
166 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE));
168 error = dmu_object_claim(os, ZVOL_OBJ, DMU_OT_ZVOL, volblocksize,
172 error = zap_create_claim(os, ZVOL_ZAP_OBJ, DMU_OT_ZVOL_PROP,
176 error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize, tx);
181 * ZFS_IOC_OBJSET_STATS entry point.
184 zvol_get_stats(objset_t *os, nvlist_t *nv)
187 dmu_object_info_t *doi;
190 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &val);
194 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLSIZE, val);
195 doi = kmem_alloc(sizeof(dmu_object_info_t), KM_SLEEP);
196 error = dmu_object_info(os, ZVOL_OBJ, doi);
199 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLBLOCKSIZE,
200 doi->doi_data_block_size);
203 kmem_free(doi, sizeof(dmu_object_info_t));
209 * Sanity check volume size.
212 zvol_check_volsize(uint64_t volsize, uint64_t blocksize)
217 if (volsize % blocksize != 0)
221 if (volsize - 1 > MAXOFFSET_T)
228 * Ensure the zap is flushed then inform the VFS of the capacity change.
231 zvol_update_volsize(zvol_state_t *zv, uint64_t volsize)
233 struct block_device *bdev;
237 ASSERT(MUTEX_HELD(&zvol_state_lock));
239 tx = dmu_tx_create(zv->zv_objset);
240 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
241 error = dmu_tx_assign(tx, TXG_WAIT);
247 error = zap_update(zv->zv_objset, ZVOL_ZAP_OBJ, "size", 8, 1,
254 error = dmu_free_long_range(zv->zv_objset,
255 ZVOL_OBJ, volsize, DMU_OBJECT_END);
259 zv->zv_volsize = volsize;
262 bdev = bdget_disk(zv->zv_disk, 0);
266 error = check_disk_change(bdev);
267 ASSERT3U(error, !=, 0);
274 * Set ZFS_PROP_VOLSIZE set entry point.
277 zvol_set_volsize(const char *name, uint64_t volsize)
280 dmu_object_info_t *doi;
285 mutex_enter(&zvol_state_lock);
287 zv = zvol_find_by_name(name);
293 doi = kmem_alloc(sizeof(dmu_object_info_t), KM_SLEEP);
295 error = dmu_objset_hold(name, FTAG, &os);
299 if ((error = dmu_object_info(os, ZVOL_OBJ, doi)) != 0 ||
300 (error = zvol_check_volsize(volsize,doi->doi_data_block_size)) != 0)
303 VERIFY(dsl_prop_get_integer(name, "readonly", &readonly, NULL) == 0);
309 if (get_disk_ro(zv->zv_disk) || (zv->zv_flags & ZVOL_RDONLY)) {
314 error = zvol_update_volsize(zv, volsize);
316 kmem_free(doi, sizeof(dmu_object_info_t));
319 dmu_objset_rele(os, FTAG);
321 mutex_exit(&zvol_state_lock);
327 * Sanity check volume block size.
330 zvol_check_volblocksize(uint64_t volblocksize)
332 if (volblocksize < SPA_MINBLOCKSIZE ||
333 volblocksize > SPA_MAXBLOCKSIZE ||
341 * Set ZFS_PROP_VOLBLOCKSIZE set entry point.
344 zvol_set_volblocksize(const char *name, uint64_t volblocksize)
350 mutex_enter(&zvol_state_lock);
352 zv = zvol_find_by_name(name);
358 if (get_disk_ro(zv->zv_disk) || (zv->zv_flags & ZVOL_RDONLY)) {
363 tx = dmu_tx_create(zv->zv_objset);
364 dmu_tx_hold_bonus(tx, ZVOL_OBJ);
365 error = dmu_tx_assign(tx, TXG_WAIT);
369 error = dmu_object_set_blocksize(zv->zv_objset, ZVOL_OBJ,
370 volblocksize, 0, tx);
371 if (error == ENOTSUP)
375 zv->zv_volblocksize = volblocksize;
378 mutex_exit(&zvol_state_lock);
384 * Replay a TX_WRITE ZIL transaction that didn't get committed
385 * after a system failure
388 zvol_replay_write(zvol_state_t *zv, lr_write_t *lr, boolean_t byteswap)
390 objset_t *os = zv->zv_objset;
391 char *data = (char *)(lr + 1); /* data follows lr_write_t */
392 uint64_t off = lr->lr_offset;
393 uint64_t len = lr->lr_length;
398 byteswap_uint64_array(lr, sizeof (*lr));
400 tx = dmu_tx_create(os);
401 dmu_tx_hold_write(tx, ZVOL_OBJ, off, len);
402 error = dmu_tx_assign(tx, TXG_WAIT);
406 dmu_write(os, ZVOL_OBJ, off, len, data, tx);
414 zvol_replay_err(zvol_state_t *zv, lr_t *lr, boolean_t byteswap)
420 * Callback vectors for replaying records.
421 * Only TX_WRITE is needed for zvol.
423 zil_replay_func_t *zvol_replay_vector[TX_MAX_TYPE] = {
424 (zil_replay_func_t *)zvol_replay_err, /* no such transaction type */
425 (zil_replay_func_t *)zvol_replay_err, /* TX_CREATE */
426 (zil_replay_func_t *)zvol_replay_err, /* TX_MKDIR */
427 (zil_replay_func_t *)zvol_replay_err, /* TX_MKXATTR */
428 (zil_replay_func_t *)zvol_replay_err, /* TX_SYMLINK */
429 (zil_replay_func_t *)zvol_replay_err, /* TX_REMOVE */
430 (zil_replay_func_t *)zvol_replay_err, /* TX_RMDIR */
431 (zil_replay_func_t *)zvol_replay_err, /* TX_LINK */
432 (zil_replay_func_t *)zvol_replay_err, /* TX_RENAME */
433 (zil_replay_func_t *)zvol_replay_write, /* TX_WRITE */
434 (zil_replay_func_t *)zvol_replay_err, /* TX_TRUNCATE */
435 (zil_replay_func_t *)zvol_replay_err, /* TX_SETATTR */
436 (zil_replay_func_t *)zvol_replay_err, /* TX_ACL */
440 * zvol_log_write() handles synchronous writes using TX_WRITE ZIL transactions.
442 * We store data in the log buffers if it's small enough.
443 * Otherwise we will later flush the data out via dmu_sync().
445 ssize_t zvol_immediate_write_sz = 32768;
448 zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx,
449 uint64_t offset, uint64_t size, int sync)
451 uint32_t blocksize = zv->zv_volblocksize;
452 zilog_t *zilog = zv->zv_zilog;
455 if (zil_replaying(zilog, tx))
458 slogging = spa_has_slogs(zilog->zl_spa);
464 itx_wr_state_t write_state;
467 * Unlike zfs_log_write() we can be called with
468 * up to DMU_MAX_ACCESS/2 (5MB) writes.
470 if (blocksize > zvol_immediate_write_sz && !slogging &&
471 size >= blocksize && offset % blocksize == 0) {
472 write_state = WR_INDIRECT; /* uses dmu_sync */
475 write_state = WR_COPIED;
476 len = MIN(ZIL_MAX_LOG_DATA, size);
478 write_state = WR_NEED_COPY;
479 len = MIN(ZIL_MAX_LOG_DATA, size);
482 itx = zil_itx_create(TX_WRITE, sizeof (*lr) +
483 (write_state == WR_COPIED ? len : 0));
484 lr = (lr_write_t *)&itx->itx_lr;
485 if (write_state == WR_COPIED && dmu_read(zv->zv_objset,
486 ZVOL_OBJ, offset, len, lr+1, DMU_READ_NO_PREFETCH) != 0) {
487 zil_itx_destroy(itx);
488 itx = zil_itx_create(TX_WRITE, sizeof (*lr));
489 lr = (lr_write_t *)&itx->itx_lr;
490 write_state = WR_NEED_COPY;
493 itx->itx_wr_state = write_state;
494 if (write_state == WR_NEED_COPY)
496 lr->lr_foid = ZVOL_OBJ;
497 lr->lr_offset = offset;
500 BP_ZERO(&lr->lr_blkptr);
502 itx->itx_private = zv;
503 itx->itx_sync = sync;
505 (void) zil_itx_assign(zilog, itx, tx);
513 * Common write path running under the zvol taskq context. This function
514 * is responsible for copying the request structure data in to the DMU and
515 * signaling the request queue with the result of the copy.
518 zvol_write(void *arg)
520 struct request *req = (struct request *)arg;
521 struct request_queue *q = req->q;
522 zvol_state_t *zv = q->queuedata;
523 uint64_t offset = blk_rq_pos(req) << 9;
524 uint64_t size = blk_rq_bytes(req);
529 rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_WRITER);
531 tx = dmu_tx_create(zv->zv_objset);
532 dmu_tx_hold_write(tx, ZVOL_OBJ, offset, size);
534 /* This will only fail for ENOSPC */
535 error = dmu_tx_assign(tx, TXG_WAIT);
538 zfs_range_unlock(rl);
539 blk_end_request(req, -error, size);
543 error = dmu_write_req(zv->zv_objset, ZVOL_OBJ, req, tx);
545 zvol_log_write(zv, tx, offset, size, rq_is_sync(req));
548 zfs_range_unlock(rl);
551 zil_commit(zv->zv_zilog, ZVOL_OBJ);
553 blk_end_request(req, -error, size);
557 * Common read path running under the zvol taskq context. This function
558 * is responsible for copying the requested data out of the DMU and in to
559 * a linux request structure. It then must signal the request queue with
560 * an error code describing the result of the copy.
565 struct request *req = (struct request *)arg;
566 struct request_queue *q = req->q;
567 zvol_state_t *zv = q->queuedata;
568 uint64_t offset = blk_rq_pos(req) << 9;
569 uint64_t size = blk_rq_bytes(req);
573 rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_READER);
575 error = dmu_read_req(zv->zv_objset, ZVOL_OBJ, req);
577 zfs_range_unlock(rl);
579 /* convert checksum errors into IO errors */
583 blk_end_request(req, -error, size);
587 * Request will be added back to the request queue and retried if
588 * it cannot be immediately dispatched to the taskq for handling
591 zvol_dispatch(task_func_t func, struct request *req)
593 if (!taskq_dispatch(zvol_taskq, func, (void *)req, TQ_NOSLEEP))
594 blk_requeue_request(req->q, req);
598 * Common request path. Rather than registering a custom make_request()
599 * function we use the generic Linux version. This is done because it allows
600 * us to easily merge read requests which would otherwise we performed
601 * synchronously by the DMU. This is less critical in write case where the
602 * DMU will perform the correct merging within a transaction group. Using
603 * the generic make_request() also let's use leverage the fact that the
604 * elevator with ensure correct ordering in regards to barrior IOs. On
605 * the downside it means that in the write case we end up doing request
606 * merging twice once in the elevator and once in the DMU.
608 * The request handler is called under a spin lock so all the real work
609 * is handed off to be done in the context of the zvol taskq. This function
610 * simply performs basic request sanity checking and hands off the request.
613 zvol_request(struct request_queue *q)
615 zvol_state_t *zv = q->queuedata;
619 while ((req = blk_fetch_request(q)) != NULL) {
620 size = blk_rq_bytes(req);
622 if (blk_rq_pos(req) + blk_rq_sectors(req) >
623 get_capacity(zv->zv_disk)) {
625 "%s: bad access: block=%llu, count=%lu\n",
626 req->rq_disk->disk_name,
627 (long long unsigned)blk_rq_pos(req),
628 (long unsigned)blk_rq_sectors(req));
629 __blk_end_request(req, -EIO, size);
633 if (!blk_fs_request(req)) {
634 printk(KERN_INFO "%s: non-fs cmd\n",
635 req->rq_disk->disk_name);
636 __blk_end_request(req, -EIO, size);
640 switch (rq_data_dir(req)) {
642 zvol_dispatch(zvol_read, req);
645 if (unlikely(get_disk_ro(zv->zv_disk)) ||
646 unlikely(zv->zv_flags & ZVOL_RDONLY)) {
647 __blk_end_request(req, -EROFS, size);
651 zvol_dispatch(zvol_write, req);
654 printk(KERN_INFO "%s: unknown cmd: %d\n",
655 req->rq_disk->disk_name, (int)rq_data_dir(req));
656 __blk_end_request(req, -EIO, size);
663 zvol_get_done(zgd_t *zgd, int error)
666 dmu_buf_rele(zgd->zgd_db, zgd);
668 zfs_range_unlock(zgd->zgd_rl);
670 if (error == 0 && zgd->zgd_bp)
671 zil_add_block(zgd->zgd_zilog, zgd->zgd_bp);
673 kmem_free(zgd, sizeof (zgd_t));
677 * Get data to generate a TX_WRITE intent log record.
680 zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
682 zvol_state_t *zv = arg;
683 objset_t *os = zv->zv_objset;
684 uint64_t offset = lr->lr_offset;
685 uint64_t size = lr->lr_length;
693 zgd = (zgd_t *)kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
694 zgd->zgd_zilog = zv->zv_zilog;
695 zgd->zgd_rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_READER);
698 * Write records come in two flavors: immediate and indirect.
699 * For small writes it's cheaper to store the data with the
700 * log record (immediate); for large writes it's cheaper to
701 * sync the data and get a pointer to it (indirect) so that
702 * we don't have to write the data twice.
704 if (buf != NULL) { /* immediate write */
705 error = dmu_read(os, ZVOL_OBJ, offset, size, buf,
706 DMU_READ_NO_PREFETCH);
708 size = zv->zv_volblocksize;
709 offset = P2ALIGN_TYPED(offset, size, uint64_t);
710 error = dmu_buf_hold(os, ZVOL_OBJ, offset, zgd, &db,
711 DMU_READ_NO_PREFETCH);
714 zgd->zgd_bp = &lr->lr_blkptr;
717 ASSERT(db->db_offset == offset);
718 ASSERT(db->db_size == size);
720 error = dmu_sync(zio, lr->lr_common.lrc_txg,
728 zvol_get_done(zgd, error);
734 * The zvol_state_t's are inserted in increasing MINOR(dev_t) order.
737 zvol_insert(zvol_state_t *zv_insert)
739 zvol_state_t *zv = NULL;
741 ASSERT(MUTEX_HELD(&zvol_state_lock));
742 ASSERT3U(MINOR(zv_insert->zv_dev) & ZVOL_MINOR_MASK, ==, 0);
743 for (zv = list_head(&zvol_state_list); zv != NULL;
744 zv = list_next(&zvol_state_list, zv)) {
745 if (MINOR(zv->zv_dev) > MINOR(zv_insert->zv_dev))
749 list_insert_before(&zvol_state_list, zv, zv_insert);
753 * Simply remove the zvol from to list of zvols.
756 zvol_remove(zvol_state_t *zv_remove)
758 ASSERT(MUTEX_HELD(&zvol_state_lock));
759 list_remove(&zvol_state_list, zv_remove);
763 zvol_first_open(zvol_state_t *zv)
770 /* lie and say we're read-only */
771 error = dmu_objset_own(zv->zv_name, DMU_OST_ZVOL, 1, zvol_tag, &os);
775 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
777 dmu_objset_disown(os, zvol_tag);
782 error = dmu_bonus_hold(os, ZVOL_OBJ, zvol_tag, &zv->zv_dbuf);
784 dmu_objset_disown(os, zvol_tag);
788 set_capacity(zv->zv_disk, volsize >> 9);
789 zv->zv_volsize = volsize;
790 zv->zv_zilog = zil_open(os, zvol_get_data);
792 VERIFY(dsl_prop_get_integer(zv->zv_name, "readonly", &ro, NULL) == 0);
793 if (ro || dmu_objset_is_snapshot(os)) {
794 set_disk_ro(zv->zv_disk, 1);
795 zv->zv_flags |= ZVOL_RDONLY;
797 set_disk_ro(zv->zv_disk, 0);
798 zv->zv_flags &= ~ZVOL_RDONLY;
805 zvol_last_close(zvol_state_t *zv)
807 zil_close(zv->zv_zilog);
809 dmu_buf_rele(zv->zv_dbuf, zvol_tag);
811 dmu_objset_disown(zv->zv_objset, zvol_tag);
812 zv->zv_objset = NULL;
816 zvol_open(struct block_device *bdev, fmode_t flag)
818 zvol_state_t *zv = bdev->bd_disk->private_data;
819 int error = 0, drop_mutex = 0;
822 * If the caller is already holding the mutex do not take it
823 * again, this will happen as part of zvol_create_minor().
824 * Once add_disk() is called the device is live and the kernel
825 * will attempt to open it to read the partition information.
827 if (!mutex_owned(&zvol_state_lock)) {
828 mutex_enter(&zvol_state_lock);
832 ASSERT3P(zv, !=, NULL);
834 if (zv->zv_open_count == 0) {
835 error = zvol_first_open(zv);
840 if ((flag & FMODE_WRITE) &&
841 (get_disk_ro(zv->zv_disk) || (zv->zv_flags & ZVOL_RDONLY))) {
849 if (zv->zv_open_count == 0)
854 mutex_exit(&zvol_state_lock);
856 check_disk_change(bdev);
862 zvol_release(struct gendisk *disk, fmode_t mode)
864 zvol_state_t *zv = disk->private_data;
867 if (!mutex_owned(&zvol_state_lock)) {
868 mutex_enter(&zvol_state_lock);
872 ASSERT3P(zv, !=, NULL);
873 ASSERT3U(zv->zv_open_count, >, 0);
875 if (zv->zv_open_count == 0)
879 mutex_exit(&zvol_state_lock);
885 zvol_ioctl(struct block_device *bdev, fmode_t mode,
886 unsigned int cmd, unsigned long arg)
888 zvol_state_t *zv = bdev->bd_disk->private_data;
896 zil_commit(zv->zv_zilog, ZVOL_OBJ);
910 zvol_compat_ioctl(struct block_device *bdev, fmode_t mode,
911 unsigned cmd, unsigned long arg)
913 return zvol_ioctl(bdev, mode, cmd, arg);
916 #define zvol_compat_ioctl NULL
919 static int zvol_media_changed(struct gendisk *disk)
921 zvol_state_t *zv = disk->private_data;
923 return zv->zv_changed;
926 static int zvol_revalidate_disk(struct gendisk *disk)
928 zvol_state_t *zv = disk->private_data;
931 set_capacity(zv->zv_disk, zv->zv_volsize >> 9);
937 * Provide a simple virtual geometry for legacy compatibility. For devices
938 * smaller than 1 MiB a small head and sector count is used to allow very
939 * tiny devices. For devices over 1 Mib a standard head and sector count
940 * is used to keep the cylinders count reasonable.
943 zvol_getgeo(struct block_device *bdev, struct hd_geometry *geo)
945 zvol_state_t *zv = bdev->bd_disk->private_data;
946 sector_t sectors = get_capacity(zv->zv_disk);
948 if (sectors > 2048) {
957 geo->cylinders = sectors / (geo->heads * geo->sectors);
962 static struct kobject *
963 zvol_probe(dev_t dev, int *part, void *arg)
966 struct kobject *kobj;
968 mutex_enter(&zvol_state_lock);
969 zv = zvol_find_by_dev(dev);
970 kobj = zv ? get_disk(zv->zv_disk) : ERR_PTR(-ENOENT);
971 mutex_exit(&zvol_state_lock);
976 #ifdef HAVE_BDEV_BLOCK_DEVICE_OPERATIONS
977 static struct block_device_operations zvol_ops = {
979 .release = zvol_release,
981 .compat_ioctl = zvol_compat_ioctl,
982 .media_changed = zvol_media_changed,
983 .revalidate_disk = zvol_revalidate_disk,
984 .getgeo = zvol_getgeo,
985 .owner = THIS_MODULE,
988 #else /* HAVE_BDEV_BLOCK_DEVICE_OPERATIONS */
991 zvol_open_by_inode(struct inode *inode, struct file *file)
993 return zvol_open(inode->i_bdev, file->f_mode);
997 zvol_release_by_inode(struct inode *inode, struct file *file)
999 return zvol_release(inode->i_bdev->bd_disk, file->f_mode);
1003 zvol_ioctl_by_inode(struct inode *inode, struct file *file,
1004 unsigned int cmd, unsigned long arg)
1006 if (file == NULL || inode == NULL)
1008 return zvol_ioctl(inode->i_bdev, file->f_mode, cmd, arg);
1011 # ifdef CONFIG_COMPAT
1013 zvol_compat_ioctl_by_inode(struct file *file,
1014 unsigned int cmd, unsigned long arg)
1018 return zvol_compat_ioctl(file->f_dentry->d_inode->i_bdev,
1019 file->f_mode, cmd, arg);
1022 # define zvol_compat_ioctl_by_inode NULL
1025 static struct block_device_operations zvol_ops = {
1026 .open = zvol_open_by_inode,
1027 .release = zvol_release_by_inode,
1028 .ioctl = zvol_ioctl_by_inode,
1029 .compat_ioctl = zvol_compat_ioctl_by_inode,
1030 .media_changed = zvol_media_changed,
1031 .revalidate_disk = zvol_revalidate_disk,
1032 .getgeo = zvol_getgeo,
1033 .owner = THIS_MODULE,
1035 #endif /* HAVE_BDEV_BLOCK_DEVICE_OPERATIONS */
1038 * Allocate memory for a new zvol_state_t and setup the required
1039 * request queue and generic disk structures for the block device.
1041 static zvol_state_t *
1042 zvol_alloc(dev_t dev, const char *name)
1046 zv = kmem_zalloc(sizeof (zvol_state_t), KM_SLEEP);
1050 zv->zv_queue = blk_init_queue(zvol_request, &zv->zv_lock);
1051 if (zv->zv_queue == NULL)
1054 zv->zv_disk = alloc_disk(ZVOL_MINORS);
1055 if (zv->zv_disk == NULL)
1058 zv->zv_queue->queuedata = zv;
1060 zv->zv_open_count = 0;
1061 strlcpy(zv->zv_name, name, DISK_NAME_LEN);
1063 mutex_init(&zv->zv_znode.z_range_lock, NULL, MUTEX_DEFAULT, NULL);
1064 avl_create(&zv->zv_znode.z_range_avl, zfs_range_compare,
1065 sizeof (rl_t), offsetof(rl_t, r_node));
1066 zv->zv_znode.z_is_zvol = TRUE;
1068 spin_lock_init(&zv->zv_lock);
1069 list_link_init(&zv->zv_next);
1071 zv->zv_disk->major = zvol_major;
1072 zv->zv_disk->first_minor = (dev & MINORMASK);
1073 zv->zv_disk->fops = &zvol_ops;
1074 zv->zv_disk->private_data = zv;
1075 zv->zv_disk->queue = zv->zv_queue;
1076 snprintf(zv->zv_disk->disk_name, DISK_NAME_LEN, "%s", name);
1081 blk_cleanup_queue(zv->zv_queue);
1083 kmem_free(zv, sizeof (zvol_state_t));
1089 * Cleanup then free a zvol_state_t which was created by zvol_alloc().
1092 zvol_free(zvol_state_t *zv)
1094 avl_destroy(&zv->zv_znode.z_range_avl);
1095 mutex_destroy(&zv->zv_znode.z_range_lock);
1097 del_gendisk(zv->zv_disk);
1098 blk_cleanup_queue(zv->zv_queue);
1099 put_disk(zv->zv_disk);
1101 kmem_free(zv, sizeof (zvol_state_t));
1105 __zvol_create_minor(const char *name)
1109 dmu_object_info_t *doi;
1114 ASSERT(MUTEX_HELD(&zvol_state_lock));
1116 zv = zvol_find_by_name(name);
1122 doi = kmem_alloc(sizeof(dmu_object_info_t), KM_SLEEP);
1124 error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, zvol_tag, &os);
1128 error = dmu_object_info(os, ZVOL_OBJ, doi);
1130 goto out_dmu_objset_disown;
1132 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
1134 goto out_dmu_objset_disown;
1136 error = zvol_find_minor(&minor);
1138 goto out_dmu_objset_disown;
1140 zv = zvol_alloc(MKDEV(zvol_major, minor), name);
1143 goto out_dmu_objset_disown;
1146 if (dmu_objset_is_snapshot(os))
1147 zv->zv_flags |= ZVOL_RDONLY;
1149 zv->zv_volblocksize = doi->doi_data_block_size;
1150 zv->zv_volsize = volsize;
1153 set_capacity(zv->zv_disk, zv->zv_volsize >> 9);
1155 if (zil_replay_disable)
1156 zil_destroy(dmu_objset_zil(os), B_FALSE);
1158 zil_replay(os, zv, zvol_replay_vector);
1160 out_dmu_objset_disown:
1161 dmu_objset_disown(os, zvol_tag);
1162 zv->zv_objset = NULL;
1164 kmem_free(doi, sizeof(dmu_object_info_t));
1169 add_disk(zv->zv_disk);
1176 * Create a block device minor node and setup the linkage between it
1177 * and the specified volume. Once this function returns the block
1178 * device is live and ready for use.
1181 zvol_create_minor(const char *name)
1185 mutex_enter(&zvol_state_lock);
1186 error = __zvol_create_minor(name);
1187 mutex_exit(&zvol_state_lock);
1193 __zvol_remove_minor(const char *name)
1197 ASSERT(MUTEX_HELD(&zvol_state_lock));
1199 zv = zvol_find_by_name(name);
1203 if (zv->zv_open_count > 0)
1213 * Remove a block device minor node for the specified volume.
1216 zvol_remove_minor(const char *name)
1220 mutex_enter(&zvol_state_lock);
1221 error = __zvol_remove_minor(name);
1222 mutex_exit(&zvol_state_lock);
1228 zvol_create_minors_cb(spa_t *spa, uint64_t dsobj,
1229 const char *dsname, void *arg)
1231 if (strchr(dsname, '/') == NULL)
1234 (void) __zvol_create_minor(dsname);
1239 * Create minors for specified pool, if pool is NULL create minors
1240 * for all available pools.
1243 zvol_create_minors(const char *pool)
1248 mutex_enter(&zvol_state_lock);
1250 error = dmu_objset_find_spa(NULL, pool, zvol_create_minors_cb,
1251 NULL, DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS);
1253 mutex_enter(&spa_namespace_lock);
1254 while ((spa = spa_next(spa)) != NULL) {
1255 error = dmu_objset_find_spa(NULL,
1256 spa_name(spa), zvol_create_minors_cb, NULL,
1257 DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS);
1261 mutex_exit(&spa_namespace_lock);
1263 mutex_exit(&zvol_state_lock);
1269 * Remove minors for specified pool, if pool is NULL remove all minors.
1272 zvol_remove_minors(const char *pool)
1274 zvol_state_t *zv, *zv_next;
1277 str = kmem_zalloc(DISK_NAME_LEN, KM_SLEEP);
1279 (void) strncpy(str, pool, strlen(pool));
1280 (void) strcat(str, "/");
1283 mutex_enter(&zvol_state_lock);
1284 for (zv = list_head(&zvol_state_list); zv != NULL; zv = zv_next) {
1285 zv_next = list_next(&zvol_state_list, zv);
1287 if (pool == NULL || !strncmp(str, zv->zv_name, strlen(str))) {
1292 mutex_exit(&zvol_state_lock);
1293 kmem_free(str, DISK_NAME_LEN);
1302 zvol_threads = num_online_cpus();
1304 zvol_taskq = taskq_create(ZVOL_DRIVER, zvol_threads, maxclsyspri,
1305 zvol_threads, INT_MAX, TASKQ_PREPOPULATE);
1306 if (zvol_taskq == NULL) {
1307 printk(KERN_INFO "ZFS: taskq_create() failed\n");
1311 error = register_blkdev(zvol_major, ZVOL_DRIVER);
1313 printk(KERN_INFO "ZFS: register_blkdev() failed %d\n", error);
1314 taskq_destroy(zvol_taskq);
1318 blk_register_region(MKDEV(zvol_major, 0), 1UL << MINORBITS,
1319 THIS_MODULE, zvol_probe, NULL, NULL);
1321 mutex_init(&zvol_state_lock, NULL, MUTEX_DEFAULT, NULL);
1322 list_create(&zvol_state_list, sizeof (zvol_state_t),
1323 offsetof(zvol_state_t, zv_next));
1325 (void) zvol_create_minors(NULL);
1333 zvol_remove_minors(NULL);
1334 blk_unregister_region(MKDEV(zvol_major, 0), 1UL << MINORBITS);
1335 unregister_blkdev(zvol_major, ZVOL_DRIVER);
1336 taskq_destroy(zvol_taskq);
1337 mutex_destroy(&zvol_state_lock);
1338 list_destroy(&zvol_state_list);
1341 module_param(zvol_major, uint, 0);
1342 MODULE_PARM_DESC(zvol_major, "Major number for zvol device");
1344 module_param(zvol_threads, uint, 0);
1345 MODULE_PARM_DESC(zvol_threads, "Number of threads for zvol device");