4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (C) 2008-2010 Lawrence Livermore National Security, LLC.
23 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
24 * Rewritten for Linux by Brian Behlendorf <behlendorf1@llnl.gov>.
28 #include <sys/zfs_context.h>
30 #include <sys/vdev_disk.h>
31 #include <sys/vdev_impl.h>
32 #include <sys/fs/zfs.h>
34 #include <sys/sunldi.h>
36 char *zfs_vdev_scheduler = VDEV_SCHEDULER;
39 * Virtual device vector for disks.
41 typedef struct dio_request {
42 struct completion dr_comp; /* Completion for sync IO */
43 atomic_t dr_ref; /* References */
44 zio_t *dr_zio; /* Parent ZIO */
45 int dr_rw; /* Read/Write */
46 int dr_error; /* Bio error */
47 int dr_bio_count; /* Count of bio's */
48 struct bio *dr_bio[0]; /* Attached bio's */
52 #ifdef HAVE_OPEN_BDEV_EXCLUSIVE
54 vdev_bdev_mode(int smode)
58 ASSERT3S(smode & (FREAD | FWRITE), !=, 0);
70 vdev_bdev_mode(int smode)
74 ASSERT3S(smode & (FREAD | FWRITE), !=, 0);
76 if ((smode & FREAD) && !(smode & FWRITE))
81 #endif /* HAVE_OPEN_BDEV_EXCLUSIVE */
84 bdev_capacity(struct block_device *bdev)
86 struct hd_struct *part = bdev->bd_part;
88 /* The partition capacity referenced by the block device */
90 return (part->nr_sects << 9);
92 /* Otherwise assume the full device capacity */
93 return (get_capacity(bdev->bd_disk) << 9);
97 vdev_disk_error(zio_t *zio)
100 printk("ZFS: zio error=%d type=%d offset=%llu size=%llu "
101 "flags=%x delay=%llu\n", zio->io_error, zio->io_type,
102 (u_longlong_t)zio->io_offset, (u_longlong_t)zio->io_size,
103 zio->io_flags, (u_longlong_t)zio->io_delay);
108 * Use the Linux 'noop' elevator for zfs managed block devices. This
109 * strikes the ideal balance by allowing the zfs elevator to do all
110 * request ordering and prioritization. While allowing the Linux
111 * elevator to do the maximum front/back merging allowed by the
112 * physical device. This yields the largest possible requests for
113 * the device with the lowest total overhead.
115 * Unfortunately we cannot directly call the elevator_switch() function
116 * because it is not exported from the block layer. This means we have
117 * to use the sysfs interface and a user space upcall. Pools will be
118 * automatically imported on module load so we must do this at device
119 * open time from the kernel.
121 #define SET_SCHEDULER_CMD \
122 "exec 0</dev/null " \
123 " 1>/sys/block/%s/queue/scheduler " \
128 vdev_elevator_switch(vdev_t *v, char *elevator)
130 vdev_disk_t *vd = v->vdev_tsd;
131 struct block_device *bdev = vd->vd_bdev;
132 struct request_queue *q = bdev_get_queue(bdev);
133 char *device = bdev->bd_disk->disk_name;
134 char *argv[] = { "/bin/sh", "-c", NULL, NULL };
135 char *envp[] = { NULL };
138 /* Skip devices which are not whole disks (partitions) */
139 if (!v->vdev_wholedisk)
142 /* Skip devices without schedulers (loop, ram, dm, etc) */
143 if (!q->elevator || !blk_queue_stackable(q))
146 /* Leave existing scheduler when set to "none" */
147 if (!strncmp(elevator, "none", 4) && (strlen(elevator) == 4))
150 argv[2] = kmem_asprintf(SET_SCHEDULER_CMD, device, elevator);
151 error = call_usermodehelper(argv[0], argv, envp, 1);
153 printk("ZFS: Unable to set \"%s\" scheduler for %s (%s): %d\n",
154 elevator, v->vdev_path, device, error);
162 * Expanding a whole disk vdev involves invoking BLKRRPART on the
163 * whole disk device. This poses a problem, because BLKRRPART will
164 * return EBUSY if one of the disk's partitions is open. That's why
165 * we have to do it here, just before opening the data partition.
166 * Unfortunately, BLKRRPART works by dropping all partitions and
167 * recreating them, which means that for a short time window, all
168 * /dev/sdxN device files disappear (until udev recreates them).
169 * This means two things:
170 * - When we open the data partition just after a BLKRRPART, we
171 * can't do it using the normal device file path because of the
172 * obvious race condition with udev. Instead, we use reliable
173 * kernel APIs to get a handle to the new partition device from
174 * the whole disk device.
175 * - Because vdev_disk_open() initially needs to find the device
176 * using its path, multiple vdev_disk_open() invocations in
177 * short succession on the same disk with BLKRRPARTs in the
178 * middle have a high probability of failure (because of the
179 * race condition with udev). A typical situation where this
180 * might happen is when the zpool userspace tool does a
181 * TRYIMPORT immediately followed by an IMPORT. For this
182 * reason, we only invoke BLKRRPART in the module when strictly
183 * necessary (zpool online -e case), and rely on userspace to
184 * do it when possible.
186 static struct block_device *
187 vdev_disk_rrpart(const char *path, int mode, vdev_disk_t *vd)
189 #if defined(HAVE_3ARG_BLKDEV_GET) && defined(HAVE_GET_GENDISK)
190 struct block_device *bdev, *result = ERR_PTR(-ENXIO);
191 struct gendisk *disk;
194 bdev = vdev_bdev_open(path, vdev_bdev_mode(mode), vd);
198 disk = get_gendisk(bdev->bd_dev, &partno);
199 vdev_bdev_close(bdev, vdev_bdev_mode(mode));
202 bdev = bdget(disk_devt(disk));
204 error = blkdev_get(bdev, vdev_bdev_mode(mode), vd);
206 error = ioctl_by_bdev(bdev, BLKRRPART, 0);
207 vdev_bdev_close(bdev, vdev_bdev_mode(mode));
210 bdev = bdget_disk(disk, partno);
212 error = blkdev_get(bdev,
213 vdev_bdev_mode(mode) | FMODE_EXCL, vd);
222 return ERR_PTR(-EOPNOTSUPP);
223 #endif /* defined(HAVE_3ARG_BLKDEV_GET) && defined(HAVE_GET_GENDISK) */
227 vdev_disk_open(vdev_t *v, uint64_t *psize, uint64_t *max_psize,
230 struct block_device *bdev = ERR_PTR(-ENXIO);
232 int mode, block_size;
234 /* Must have a pathname and it must be absolute. */
235 if (v->vdev_path == NULL || v->vdev_path[0] != '/') {
236 v->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
240 vd = kmem_zalloc(sizeof(vdev_disk_t), KM_PUSHPAGE);
245 * Devices are always opened by the path provided at configuration
246 * time. This means that if the provided path is a udev by-id path
247 * then drives may be recabled without an issue. If the provided
248 * path is a udev by-path path then the physical location information
249 * will be preserved. This can be critical for more complicated
250 * configurations where drives are located in specific physical
251 * locations to maximize the systems tolerence to component failure.
252 * Alternately you can provide your own udev rule to flexibly map
253 * the drives as you see fit. It is not advised that you use the
254 * /dev/[hd]d devices which may be reorder due to probing order.
255 * Devices in the wrong locations will be detected by the higher
256 * level vdev validation.
258 mode = spa_mode(v->vdev_spa);
259 if (v->vdev_wholedisk && v->vdev_expanding)
260 bdev = vdev_disk_rrpart(v->vdev_path, mode, vd);
262 bdev = vdev_bdev_open(v->vdev_path, vdev_bdev_mode(mode), vd);
264 kmem_free(vd, sizeof(vdev_disk_t));
265 return -PTR_ERR(bdev);
270 block_size = vdev_bdev_block_size(bdev);
272 /* We think the wholedisk property should always be set when this
273 * function is called. ASSERT here so if any legitimate cases exist
274 * where it's not set, we'll find them during debugging. If we never
275 * hit the ASSERT, this and the following conditional statement can be
277 ASSERT3S(v->vdev_wholedisk, !=, -1ULL);
279 /* The wholedisk property was initialized to -1 in vdev_alloc() if it
280 * was unspecified. In that case, check if this is a whole device.
281 * When bdev->bd_contains == bdev we have a whole device and not simply
283 if (v->vdev_wholedisk == -1ULL)
284 v->vdev_wholedisk = (bdev->bd_contains == bdev);
286 /* Clear the nowritecache bit, causes vdev_reopen() to try again. */
287 v->vdev_nowritecache = B_FALSE;
289 /* Physical volume size in bytes */
290 *psize = bdev_capacity(bdev);
292 /* TODO: report possible expansion size */
295 /* Based on the minimum sector size set the block size */
296 *ashift = highbit(MAX(block_size, SPA_MINBLOCKSIZE)) - 1;
298 /* Try to set the io scheduler elevator algorithm */
299 (void) vdev_elevator_switch(v, zfs_vdev_scheduler);
305 vdev_disk_close(vdev_t *v)
307 vdev_disk_t *vd = v->vdev_tsd;
312 if (vd->vd_bdev != NULL)
313 vdev_bdev_close(vd->vd_bdev,
314 vdev_bdev_mode(spa_mode(v->vdev_spa)));
316 kmem_free(vd, sizeof(vdev_disk_t));
320 static dio_request_t *
321 vdev_disk_dio_alloc(int bio_count)
326 dr = kmem_zalloc(sizeof(dio_request_t) +
327 sizeof(struct bio *) * bio_count, KM_PUSHPAGE);
329 init_completion(&dr->dr_comp);
330 atomic_set(&dr->dr_ref, 0);
331 dr->dr_bio_count = bio_count;
334 for (i = 0; i < dr->dr_bio_count; i++)
335 dr->dr_bio[i] = NULL;
342 vdev_disk_dio_free(dio_request_t *dr)
346 for (i = 0; i < dr->dr_bio_count; i++)
348 bio_put(dr->dr_bio[i]);
350 kmem_free(dr, sizeof(dio_request_t) +
351 sizeof(struct bio *) * dr->dr_bio_count);
355 vdev_disk_dio_is_sync(dio_request_t *dr)
357 #ifdef HAVE_BIO_RW_SYNC
358 /* BIO_RW_SYNC preferred interface from 2.6.12-2.6.29 */
359 return (dr->dr_rw & (1 << BIO_RW_SYNC));
361 # ifdef HAVE_BIO_RW_SYNCIO
362 /* BIO_RW_SYNCIO preferred interface from 2.6.30-2.6.35 */
363 return (dr->dr_rw & (1 << BIO_RW_SYNCIO));
365 # ifdef HAVE_REQ_SYNC
366 /* REQ_SYNC preferred interface from 2.6.36-2.6.xx */
367 return (dr->dr_rw & REQ_SYNC);
369 # error "Unable to determine bio sync flag"
370 # endif /* HAVE_REQ_SYNC */
371 # endif /* HAVE_BIO_RW_SYNC */
372 #endif /* HAVE_BIO_RW_SYNCIO */
376 vdev_disk_dio_get(dio_request_t *dr)
378 atomic_inc(&dr->dr_ref);
382 vdev_disk_dio_put(dio_request_t *dr)
384 int rc = atomic_dec_return(&dr->dr_ref);
387 * Free the dio_request when the last reference is dropped and
388 * ensure zio_interpret is called only once with the correct zio
391 zio_t *zio = dr->dr_zio;
392 int error = dr->dr_error;
394 vdev_disk_dio_free(dr);
397 zio->io_delay = jiffies_to_msecs(
398 jiffies_64 - zio->io_delay);
399 zio->io_error = error;
400 ASSERT3S(zio->io_error, >=, 0);
402 vdev_disk_error(zio);
410 BIO_END_IO_PROTO(vdev_disk_physio_completion, bio, size, error)
412 dio_request_t *dr = bio->bi_private;
415 /* Fatal error but print some useful debugging before asserting */
417 PANIC("dr == NULL, bio->bi_private == NULL\n"
418 "bi_next: %p, bi_flags: %lx, bi_rw: %lu, bi_vcnt: %d\n"
419 "bi_idx: %d, bi_size: %d, bi_end_io: %p, bi_cnt: %d\n",
420 bio->bi_next, bio->bi_flags, bio->bi_rw, bio->bi_vcnt,
421 bio->bi_idx, bio->bi_size, bio->bi_end_io,
422 atomic_read(&bio->bi_cnt));
424 #ifndef HAVE_2ARGS_BIO_END_IO_T
427 #endif /* HAVE_2ARGS_BIO_END_IO_T */
429 if (error == 0 && !test_bit(BIO_UPTODATE, &bio->bi_flags))
432 if (dr->dr_error == 0)
433 dr->dr_error = -error;
435 /* Drop reference aquired by __vdev_disk_physio */
436 rc = vdev_disk_dio_put(dr);
438 /* Wake up synchronous waiter this is the last outstanding bio */
439 if ((rc == 1) && vdev_disk_dio_is_sync(dr))
440 complete(&dr->dr_comp);
442 BIO_END_IO_RETURN(0);
445 static inline unsigned long
446 bio_nr_pages(void *bio_ptr, unsigned int bio_size)
448 return ((((unsigned long)bio_ptr + bio_size + PAGE_SIZE - 1) >>
449 PAGE_SHIFT) - ((unsigned long)bio_ptr >> PAGE_SHIFT));
453 bio_map(struct bio *bio, void *bio_ptr, unsigned int bio_size)
455 unsigned int offset, size, i;
458 offset = offset_in_page(bio_ptr);
459 for (i = 0; i < bio->bi_max_vecs; i++) {
460 size = PAGE_SIZE - offset;
468 if (kmem_virt(bio_ptr))
469 page = vmalloc_to_page(bio_ptr);
471 page = virt_to_page(bio_ptr);
473 if (bio_add_page(bio, page, size, offset) != size)
485 __vdev_disk_physio(struct block_device *bdev, zio_t *zio, caddr_t kbuf_ptr,
486 size_t kbuf_size, uint64_t kbuf_offset, int flags)
491 int bio_size, bio_count = 16;
492 int i = 0, error = 0;
494 ASSERT3U(kbuf_offset + kbuf_size, <=, bdev->bd_inode->i_size);
497 dr = vdev_disk_dio_alloc(bio_count);
501 if (zio && !(zio->io_flags & (ZIO_FLAG_IO_RETRY | ZIO_FLAG_TRYHARD)))
502 bio_set_flags_failfast(bdev, &flags);
508 * When the IO size exceeds the maximum bio size for the request
509 * queue we are forced to break the IO in multiple bio's and wait
510 * for them all to complete. Ideally, all pool users will set
511 * their volume block size to match the maximum request size and
512 * the common case will be one bio per vdev IO request.
515 bio_offset = kbuf_offset;
516 bio_size = kbuf_size;
517 for (i = 0; i <= dr->dr_bio_count; i++) {
519 /* Finished constructing bio's for given buffer */
524 * By default only 'bio_count' bio's per dio are allowed.
525 * However, if we find ourselves in a situation where more
526 * are needed we allocate a larger dio and warn the user.
528 if (dr->dr_bio_count == i) {
529 vdev_disk_dio_free(dr);
531 printk("WARNING: Resized bio's/dio to %d\n",bio_count);
535 dr->dr_bio[i] = bio_alloc(GFP_NOIO,
536 bio_nr_pages(bio_ptr, bio_size));
537 if (dr->dr_bio[i] == NULL) {
538 vdev_disk_dio_free(dr);
542 /* Matching put called by vdev_disk_physio_completion */
543 vdev_disk_dio_get(dr);
545 dr->dr_bio[i]->bi_bdev = bdev;
546 dr->dr_bio[i]->bi_sector = bio_offset >> 9;
547 dr->dr_bio[i]->bi_rw = dr->dr_rw;
548 dr->dr_bio[i]->bi_end_io = vdev_disk_physio_completion;
549 dr->dr_bio[i]->bi_private = dr;
551 /* Remaining size is returned to become the new size */
552 bio_size = bio_map(dr->dr_bio[i], bio_ptr, bio_size);
554 /* Advance in buffer and construct another bio if needed */
555 bio_ptr += dr->dr_bio[i]->bi_size;
556 bio_offset += dr->dr_bio[i]->bi_size;
559 /* Extra reference to protect dio_request during submit_bio */
560 vdev_disk_dio_get(dr);
562 zio->io_delay = jiffies_64;
564 /* Submit all bio's associated with this dio */
565 for (i = 0; i < dr->dr_bio_count; i++)
567 submit_bio(dr->dr_rw, dr->dr_bio[i]);
570 * On synchronous blocking requests we wait for all bio the completion
571 * callbacks to run. We will be woken when the last callback runs
572 * for this dio. We are responsible for putting the last dio_request
573 * reference will in turn put back the last bio references. The
574 * only synchronous consumer is vdev_disk_read_rootlabel() all other
575 * IO originating from vdev_disk_io_start() is asynchronous.
577 if (vdev_disk_dio_is_sync(dr)) {
578 wait_for_completion(&dr->dr_comp);
579 error = dr->dr_error;
580 ASSERT3S(atomic_read(&dr->dr_ref), ==, 1);
583 (void)vdev_disk_dio_put(dr);
589 vdev_disk_physio(struct block_device *bdev, caddr_t kbuf,
590 size_t size, uint64_t offset, int flags)
592 bio_set_flags_failfast(bdev, &flags);
593 return __vdev_disk_physio(bdev, NULL, kbuf, size, offset, flags);
596 /* 2.6.24 API change */
597 #ifdef HAVE_BIO_EMPTY_BARRIER
598 BIO_END_IO_PROTO(vdev_disk_io_flush_completion, bio, size, rc)
600 zio_t *zio = bio->bi_private;
602 zio->io_delay = jiffies_to_msecs(jiffies_64 - zio->io_delay);
604 if (rc && (rc == -EOPNOTSUPP))
605 zio->io_vd->vdev_nowritecache = B_TRUE;
608 ASSERT3S(zio->io_error, >=, 0);
610 vdev_disk_error(zio);
613 BIO_END_IO_RETURN(0);
617 vdev_disk_io_flush(struct block_device *bdev, zio_t *zio)
619 struct request_queue *q;
622 q = bdev_get_queue(bdev);
626 bio = bio_alloc(GFP_KERNEL, 0);
630 bio->bi_end_io = vdev_disk_io_flush_completion;
631 bio->bi_private = zio;
633 zio->io_delay = jiffies_64;
634 submit_bio(VDEV_WRITE_FLUSH_FUA, bio);
640 vdev_disk_io_flush(struct block_device *bdev, zio_t *zio)
644 #endif /* HAVE_BIO_EMPTY_BARRIER */
647 vdev_disk_io_start(zio_t *zio)
649 vdev_t *v = zio->io_vd;
650 vdev_disk_t *vd = v->vdev_tsd;
653 switch (zio->io_type) {
656 if (!vdev_readable(v)) {
657 zio->io_error = ENXIO;
658 return ZIO_PIPELINE_CONTINUE;
661 switch (zio->io_cmd) {
662 case DKIOCFLUSHWRITECACHE:
664 if (zfs_nocacheflush)
667 if (v->vdev_nowritecache) {
668 zio->io_error = ENOTSUP;
672 error = vdev_disk_io_flush(vd->vd_bdev, zio);
674 return ZIO_PIPELINE_STOP;
676 zio->io_error = error;
677 if (error == ENOTSUP)
678 v->vdev_nowritecache = B_TRUE;
683 zio->io_error = ENOTSUP;
686 return ZIO_PIPELINE_CONTINUE;
697 zio->io_error = ENOTSUP;
698 return ZIO_PIPELINE_CONTINUE;
701 error = __vdev_disk_physio(vd->vd_bdev, zio, zio->io_data,
702 zio->io_size, zio->io_offset, flags);
704 zio->io_error = error;
705 return ZIO_PIPELINE_CONTINUE;
708 return ZIO_PIPELINE_STOP;
712 vdev_disk_io_done(zio_t *zio)
715 * If the device returned EIO, we revalidate the media. If it is
716 * determined the media has changed this triggers the asynchronous
717 * removal of the device from the configuration.
719 if (zio->io_error == EIO) {
720 vdev_t *v = zio->io_vd;
721 vdev_disk_t *vd = v->vdev_tsd;
723 if (check_disk_change(vd->vd_bdev)) {
724 vdev_bdev_invalidate(vd->vd_bdev);
725 v->vdev_remove_wanted = B_TRUE;
726 spa_async_request(zio->io_spa, SPA_ASYNC_REMOVE);
732 vdev_disk_hold(vdev_t *vd)
734 ASSERT(spa_config_held(vd->vdev_spa, SCL_STATE, RW_WRITER));
736 /* We must have a pathname, and it must be absolute. */
737 if (vd->vdev_path == NULL || vd->vdev_path[0] != '/')
741 * Only prefetch path and devid info if the device has
744 if (vd->vdev_tsd != NULL)
747 /* XXX: Implement me as a vnode lookup for the device */
748 vd->vdev_name_vp = NULL;
749 vd->vdev_devid_vp = NULL;
753 vdev_disk_rele(vdev_t *vd)
755 ASSERT(spa_config_held(vd->vdev_spa, SCL_STATE, RW_WRITER));
757 /* XXX: Implement me as a vnode rele for the device */
760 vdev_ops_t vdev_disk_ops = {
769 VDEV_TYPE_DISK, /* name of this vdev type */
770 B_TRUE /* leaf vdev */
774 * Given the root disk device devid or pathname, read the label from
775 * the device, and construct a configuration nvlist.
778 vdev_disk_read_rootlabel(char *devpath, char *devid, nvlist_t **config)
780 struct block_device *bdev;
785 bdev = vdev_bdev_open(devpath, vdev_bdev_mode(FREAD), NULL);
787 return -PTR_ERR(bdev);
789 s = bdev_capacity(bdev);
791 vdev_bdev_close(bdev, vdev_bdev_mode(FREAD));
795 size = P2ALIGN_TYPED(s, sizeof(vdev_label_t), uint64_t);
796 label = vmem_alloc(sizeof(vdev_label_t), KM_PUSHPAGE);
798 for (i = 0; i < VDEV_LABELS; i++) {
799 uint64_t offset, state, txg = 0;
801 /* read vdev label */
802 offset = vdev_label_offset(size, i, 0);
803 if (vdev_disk_physio(bdev, (caddr_t)label,
804 VDEV_SKIP_SIZE + VDEV_PHYS_SIZE, offset, READ_SYNC) != 0)
807 if (nvlist_unpack(label->vl_vdev_phys.vp_nvlist,
808 sizeof (label->vl_vdev_phys.vp_nvlist), config, 0) != 0) {
813 if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_STATE,
814 &state) != 0 || state >= POOL_STATE_DESTROYED) {
815 nvlist_free(*config);
820 if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_TXG,
821 &txg) != 0 || txg == 0) {
822 nvlist_free(*config);
830 vmem_free(label, sizeof(vdev_label_t));
831 vdev_bdev_close(bdev, vdev_bdev_mode(FREAD));
836 module_param(zfs_vdev_scheduler, charp, 0644);
837 MODULE_PARM_DESC(zfs_vdev_scheduler, "I/O scheduler");