4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (C) 2008-2010 Lawrence Livermore National Security, LLC.
23 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
24 * Rewritten for Linux by Brian Behlendorf <behlendorf1@llnl.gov>.
28 #include <sys/zfs_context.h>
30 #include <sys/vdev_disk.h>
31 #include <sys/vdev_impl.h>
32 #include <sys/fs/zfs.h>
34 #include <sys/sunldi.h>
36 char *zfs_vdev_scheduler = VDEV_SCHEDULER;
37 static void *zfs_vdev_holder = VDEV_HOLDER;
40 * Virtual device vector for disks.
42 typedef struct dio_request {
43 struct completion dr_comp; /* Completion for sync IO */
44 atomic_t dr_ref; /* References */
45 zio_t *dr_zio; /* Parent ZIO */
46 int dr_rw; /* Read/Write */
47 int dr_error; /* Bio error */
48 int dr_bio_count; /* Count of bio's */
49 struct bio *dr_bio[0]; /* Attached bio's */
53 #ifdef HAVE_OPEN_BDEV_EXCLUSIVE
55 vdev_bdev_mode(int smode)
59 ASSERT3S(smode & (FREAD | FWRITE), !=, 0);
71 vdev_bdev_mode(int smode)
75 ASSERT3S(smode & (FREAD | FWRITE), !=, 0);
77 if ((smode & FREAD) && !(smode & FWRITE))
82 #endif /* HAVE_OPEN_BDEV_EXCLUSIVE */
85 bdev_capacity(struct block_device *bdev)
87 struct hd_struct *part = bdev->bd_part;
89 /* The partition capacity referenced by the block device */
91 return (part->nr_sects << 9);
93 /* Otherwise assume the full device capacity */
94 return (get_capacity(bdev->bd_disk) << 9);
98 vdev_disk_error(zio_t *zio)
101 printk("ZFS: zio error=%d type=%d offset=%llu size=%llu "
102 "flags=%x delay=%llu\n", zio->io_error, zio->io_type,
103 (u_longlong_t)zio->io_offset, (u_longlong_t)zio->io_size,
104 zio->io_flags, (u_longlong_t)zio->io_delay);
109 * Use the Linux 'noop' elevator for zfs managed block devices. This
110 * strikes the ideal balance by allowing the zfs elevator to do all
111 * request ordering and prioritization. While allowing the Linux
112 * elevator to do the maximum front/back merging allowed by the
113 * physical device. This yields the largest possible requests for
114 * the device with the lowest total overhead.
117 vdev_elevator_switch(vdev_t *v, char *elevator)
119 vdev_disk_t *vd = v->vdev_tsd;
120 struct block_device *bdev = vd->vd_bdev;
121 struct request_queue *q = bdev_get_queue(bdev);
122 char *device = bdev->bd_disk->disk_name;
126 * Skip devices which are not whole disks (partitions).
127 * Device-mapper devices are excepted since they may be whole
128 * disks despite the vdev_wholedisk flag, in which case we can
129 * and should switch the elevator. If the device-mapper device
130 * does not have an elevator (i.e. dm-raid, dm-crypt, etc.) the
131 * "Skip devices without schedulers" check below will fail.
133 if (!v->vdev_wholedisk && strncmp(device, "dm-", 3) != 0)
136 /* Skip devices without schedulers (loop, ram, dm, etc) */
137 if (!q->elevator || !blk_queue_stackable(q))
140 /* Leave existing scheduler when set to "none" */
141 if (!strncmp(elevator, "none", 4) && (strlen(elevator) == 4))
144 #ifdef HAVE_ELEVATOR_CHANGE
145 error = elevator_change(q, elevator);
147 /* For pre-2.6.36 kernels elevator_change() is not available.
148 * Therefore we fall back to using a usermodehelper to echo the
149 * elevator into sysfs; This requires /bin/echo and sysfs to be
150 * mounted which may not be true early in the boot process.
152 # define SET_SCHEDULER_CMD \
153 "exec 0</dev/null " \
154 " 1>/sys/block/%s/queue/scheduler " \
159 char *argv[] = { "/bin/sh", "-c", NULL, NULL };
160 char *envp[] = { NULL };
162 argv[2] = kmem_asprintf(SET_SCHEDULER_CMD, device, elevator);
163 error = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC);
166 #endif /* HAVE_ELEVATOR_CHANGE */
168 printk("ZFS: Unable to set \"%s\" scheduler for %s (%s): %d\n",
169 elevator, v->vdev_path, device, error);
175 * Expanding a whole disk vdev involves invoking BLKRRPART on the
176 * whole disk device. This poses a problem, because BLKRRPART will
177 * return EBUSY if one of the disk's partitions is open. That's why
178 * we have to do it here, just before opening the data partition.
179 * Unfortunately, BLKRRPART works by dropping all partitions and
180 * recreating them, which means that for a short time window, all
181 * /dev/sdxN device files disappear (until udev recreates them).
182 * This means two things:
183 * - When we open the data partition just after a BLKRRPART, we
184 * can't do it using the normal device file path because of the
185 * obvious race condition with udev. Instead, we use reliable
186 * kernel APIs to get a handle to the new partition device from
187 * the whole disk device.
188 * - Because vdev_disk_open() initially needs to find the device
189 * using its path, multiple vdev_disk_open() invocations in
190 * short succession on the same disk with BLKRRPARTs in the
191 * middle have a high probability of failure (because of the
192 * race condition with udev). A typical situation where this
193 * might happen is when the zpool userspace tool does a
194 * TRYIMPORT immediately followed by an IMPORT. For this
195 * reason, we only invoke BLKRRPART in the module when strictly
196 * necessary (zpool online -e case), and rely on userspace to
197 * do it when possible.
199 static struct block_device *
200 vdev_disk_rrpart(const char *path, int mode, vdev_disk_t *vd)
202 #if defined(HAVE_3ARG_BLKDEV_GET) && defined(HAVE_GET_GENDISK)
203 struct block_device *bdev, *result = ERR_PTR(-ENXIO);
204 struct gendisk *disk;
207 bdev = vdev_bdev_open(path, vdev_bdev_mode(mode), zfs_vdev_holder);
211 disk = get_gendisk(bdev->bd_dev, &partno);
212 vdev_bdev_close(bdev, vdev_bdev_mode(mode));
215 bdev = bdget(disk_devt(disk));
217 error = blkdev_get(bdev, vdev_bdev_mode(mode), vd);
219 error = ioctl_by_bdev(bdev, BLKRRPART, 0);
220 vdev_bdev_close(bdev, vdev_bdev_mode(mode));
223 bdev = bdget_disk(disk, partno);
225 error = blkdev_get(bdev,
226 vdev_bdev_mode(mode) | FMODE_EXCL, vd);
235 return ERR_PTR(-EOPNOTSUPP);
236 #endif /* defined(HAVE_3ARG_BLKDEV_GET) && defined(HAVE_GET_GENDISK) */
240 vdev_disk_open(vdev_t *v, uint64_t *psize, uint64_t *max_psize,
243 struct block_device *bdev = ERR_PTR(-ENXIO);
245 int mode, block_size;
247 /* Must have a pathname and it must be absolute. */
248 if (v->vdev_path == NULL || v->vdev_path[0] != '/') {
249 v->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
254 * Reopen the device if it's not currently open. Otherwise,
255 * just update the physical size of the device.
257 if (v->vdev_tsd != NULL) {
258 ASSERT(v->vdev_reopening);
263 vd = kmem_zalloc(sizeof(vdev_disk_t), KM_PUSHPAGE);
268 * Devices are always opened by the path provided at configuration
269 * time. This means that if the provided path is a udev by-id path
270 * then drives may be recabled without an issue. If the provided
271 * path is a udev by-path path then the physical location information
272 * will be preserved. This can be critical for more complicated
273 * configurations where drives are located in specific physical
274 * locations to maximize the systems tolerence to component failure.
275 * Alternately you can provide your own udev rule to flexibly map
276 * the drives as you see fit. It is not advised that you use the
277 * /dev/[hd]d devices which may be reorder due to probing order.
278 * Devices in the wrong locations will be detected by the higher
279 * level vdev validation.
281 mode = spa_mode(v->vdev_spa);
282 if (v->vdev_wholedisk && v->vdev_expanding)
283 bdev = vdev_disk_rrpart(v->vdev_path, mode, vd);
285 bdev = vdev_bdev_open(v->vdev_path,
286 vdev_bdev_mode(mode), zfs_vdev_holder);
288 kmem_free(vd, sizeof(vdev_disk_t));
289 return -PTR_ERR(bdev);
296 /* Determine the physical block size */
297 block_size = vdev_bdev_block_size(vd->vd_bdev);
299 /* Clear the nowritecache bit, causes vdev_reopen() to try again. */
300 v->vdev_nowritecache = B_FALSE;
302 /* Physical volume size in bytes */
303 *psize = bdev_capacity(vd->vd_bdev);
305 /* TODO: report possible expansion size */
308 /* Based on the minimum sector size set the block size */
309 *ashift = highbit(MAX(block_size, SPA_MINBLOCKSIZE)) - 1;
311 /* Try to set the io scheduler elevator algorithm */
312 (void) vdev_elevator_switch(v, zfs_vdev_scheduler);
318 vdev_disk_close(vdev_t *v)
320 vdev_disk_t *vd = v->vdev_tsd;
322 if (v->vdev_reopening || vd == NULL)
325 if (vd->vd_bdev != NULL)
326 vdev_bdev_close(vd->vd_bdev,
327 vdev_bdev_mode(spa_mode(v->vdev_spa)));
329 kmem_free(vd, sizeof(vdev_disk_t));
333 static dio_request_t *
334 vdev_disk_dio_alloc(int bio_count)
339 dr = kmem_zalloc(sizeof(dio_request_t) +
340 sizeof(struct bio *) * bio_count, KM_PUSHPAGE);
342 init_completion(&dr->dr_comp);
343 atomic_set(&dr->dr_ref, 0);
344 dr->dr_bio_count = bio_count;
347 for (i = 0; i < dr->dr_bio_count; i++)
348 dr->dr_bio[i] = NULL;
355 vdev_disk_dio_free(dio_request_t *dr)
359 for (i = 0; i < dr->dr_bio_count; i++)
361 bio_put(dr->dr_bio[i]);
363 kmem_free(dr, sizeof(dio_request_t) +
364 sizeof(struct bio *) * dr->dr_bio_count);
368 vdev_disk_dio_is_sync(dio_request_t *dr)
370 #ifdef HAVE_BIO_RW_SYNC
371 /* BIO_RW_SYNC preferred interface from 2.6.12-2.6.29 */
372 return (dr->dr_rw & (1 << BIO_RW_SYNC));
374 # ifdef HAVE_BIO_RW_SYNCIO
375 /* BIO_RW_SYNCIO preferred interface from 2.6.30-2.6.35 */
376 return (dr->dr_rw & (1 << BIO_RW_SYNCIO));
378 # ifdef HAVE_REQ_SYNC
379 /* REQ_SYNC preferred interface from 2.6.36-2.6.xx */
380 return (dr->dr_rw & REQ_SYNC);
382 # error "Unable to determine bio sync flag"
383 # endif /* HAVE_REQ_SYNC */
384 # endif /* HAVE_BIO_RW_SYNC */
385 #endif /* HAVE_BIO_RW_SYNCIO */
389 vdev_disk_dio_get(dio_request_t *dr)
391 atomic_inc(&dr->dr_ref);
395 vdev_disk_dio_put(dio_request_t *dr)
397 int rc = atomic_dec_return(&dr->dr_ref);
400 * Free the dio_request when the last reference is dropped and
401 * ensure zio_interpret is called only once with the correct zio
404 zio_t *zio = dr->dr_zio;
405 int error = dr->dr_error;
407 vdev_disk_dio_free(dr);
410 zio->io_delay = jiffies_to_msecs(
411 jiffies_64 - zio->io_delay);
412 zio->io_error = error;
413 ASSERT3S(zio->io_error, >=, 0);
415 vdev_disk_error(zio);
423 BIO_END_IO_PROTO(vdev_disk_physio_completion, bio, size, error)
425 dio_request_t *dr = bio->bi_private;
428 /* Fatal error but print some useful debugging before asserting */
430 PANIC("dr == NULL, bio->bi_private == NULL\n"
431 "bi_next: %p, bi_flags: %lx, bi_rw: %lu, bi_vcnt: %d\n"
432 "bi_idx: %d, bi_size: %d, bi_end_io: %p, bi_cnt: %d\n",
433 bio->bi_next, bio->bi_flags, bio->bi_rw, bio->bi_vcnt,
434 bio->bi_idx, bio->bi_size, bio->bi_end_io,
435 atomic_read(&bio->bi_cnt));
437 #ifndef HAVE_2ARGS_BIO_END_IO_T
440 #endif /* HAVE_2ARGS_BIO_END_IO_T */
442 if (error == 0 && !test_bit(BIO_UPTODATE, &bio->bi_flags))
445 if (dr->dr_error == 0)
446 dr->dr_error = -error;
448 /* Drop reference aquired by __vdev_disk_physio */
449 rc = vdev_disk_dio_put(dr);
451 /* Wake up synchronous waiter this is the last outstanding bio */
452 if ((rc == 1) && vdev_disk_dio_is_sync(dr))
453 complete(&dr->dr_comp);
455 BIO_END_IO_RETURN(0);
458 static inline unsigned long
459 bio_nr_pages(void *bio_ptr, unsigned int bio_size)
461 return ((((unsigned long)bio_ptr + bio_size + PAGE_SIZE - 1) >>
462 PAGE_SHIFT) - ((unsigned long)bio_ptr >> PAGE_SHIFT));
466 bio_map(struct bio *bio, void *bio_ptr, unsigned int bio_size)
468 unsigned int offset, size, i;
471 offset = offset_in_page(bio_ptr);
472 for (i = 0; i < bio->bi_max_vecs; i++) {
473 size = PAGE_SIZE - offset;
481 if (kmem_virt(bio_ptr))
482 page = vmalloc_to_page(bio_ptr);
484 page = virt_to_page(bio_ptr);
486 if (bio_add_page(bio, page, size, offset) != size)
498 __vdev_disk_physio(struct block_device *bdev, zio_t *zio, caddr_t kbuf_ptr,
499 size_t kbuf_size, uint64_t kbuf_offset, int flags)
504 int bio_size, bio_count = 16;
505 int i = 0, error = 0;
507 ASSERT3U(kbuf_offset + kbuf_size, <=, bdev->bd_inode->i_size);
510 dr = vdev_disk_dio_alloc(bio_count);
514 if (zio && !(zio->io_flags & (ZIO_FLAG_IO_RETRY | ZIO_FLAG_TRYHARD)))
515 bio_set_flags_failfast(bdev, &flags);
521 * When the IO size exceeds the maximum bio size for the request
522 * queue we are forced to break the IO in multiple bio's and wait
523 * for them all to complete. Ideally, all pool users will set
524 * their volume block size to match the maximum request size and
525 * the common case will be one bio per vdev IO request.
528 bio_offset = kbuf_offset;
529 bio_size = kbuf_size;
530 for (i = 0; i <= dr->dr_bio_count; i++) {
532 /* Finished constructing bio's for given buffer */
537 * By default only 'bio_count' bio's per dio are allowed.
538 * However, if we find ourselves in a situation where more
539 * are needed we allocate a larger dio and warn the user.
541 if (dr->dr_bio_count == i) {
542 vdev_disk_dio_free(dr);
547 dr->dr_bio[i] = bio_alloc(GFP_NOIO,
548 bio_nr_pages(bio_ptr, bio_size));
549 if (dr->dr_bio[i] == NULL) {
550 vdev_disk_dio_free(dr);
554 /* Matching put called by vdev_disk_physio_completion */
555 vdev_disk_dio_get(dr);
557 dr->dr_bio[i]->bi_bdev = bdev;
558 dr->dr_bio[i]->bi_sector = bio_offset >> 9;
559 dr->dr_bio[i]->bi_rw = dr->dr_rw;
560 dr->dr_bio[i]->bi_end_io = vdev_disk_physio_completion;
561 dr->dr_bio[i]->bi_private = dr;
563 /* Remaining size is returned to become the new size */
564 bio_size = bio_map(dr->dr_bio[i], bio_ptr, bio_size);
566 /* Advance in buffer and construct another bio if needed */
567 bio_ptr += dr->dr_bio[i]->bi_size;
568 bio_offset += dr->dr_bio[i]->bi_size;
571 /* Extra reference to protect dio_request during submit_bio */
572 vdev_disk_dio_get(dr);
574 zio->io_delay = jiffies_64;
576 /* Submit all bio's associated with this dio */
577 for (i = 0; i < dr->dr_bio_count; i++)
579 submit_bio(dr->dr_rw, dr->dr_bio[i]);
582 * On synchronous blocking requests we wait for all bio the completion
583 * callbacks to run. We will be woken when the last callback runs
584 * for this dio. We are responsible for putting the last dio_request
585 * reference will in turn put back the last bio references. The
586 * only synchronous consumer is vdev_disk_read_rootlabel() all other
587 * IO originating from vdev_disk_io_start() is asynchronous.
589 if (vdev_disk_dio_is_sync(dr)) {
590 wait_for_completion(&dr->dr_comp);
591 error = dr->dr_error;
592 ASSERT3S(atomic_read(&dr->dr_ref), ==, 1);
595 (void)vdev_disk_dio_put(dr);
601 vdev_disk_physio(struct block_device *bdev, caddr_t kbuf,
602 size_t size, uint64_t offset, int flags)
604 bio_set_flags_failfast(bdev, &flags);
605 return __vdev_disk_physio(bdev, NULL, kbuf, size, offset, flags);
608 BIO_END_IO_PROTO(vdev_disk_io_flush_completion, bio, size, rc)
610 zio_t *zio = bio->bi_private;
612 zio->io_delay = jiffies_to_msecs(jiffies_64 - zio->io_delay);
614 if (rc && (rc == -EOPNOTSUPP))
615 zio->io_vd->vdev_nowritecache = B_TRUE;
618 ASSERT3S(zio->io_error, >=, 0);
620 vdev_disk_error(zio);
623 BIO_END_IO_RETURN(0);
627 vdev_disk_io_flush(struct block_device *bdev, zio_t *zio)
629 struct request_queue *q;
632 q = bdev_get_queue(bdev);
636 bio = bio_alloc(GFP_KERNEL, 0);
640 bio->bi_end_io = vdev_disk_io_flush_completion;
641 bio->bi_private = zio;
643 zio->io_delay = jiffies_64;
644 submit_bio(VDEV_WRITE_FLUSH_FUA, bio);
650 vdev_disk_io_start(zio_t *zio)
652 vdev_t *v = zio->io_vd;
653 vdev_disk_t *vd = v->vdev_tsd;
656 switch (zio->io_type) {
659 if (!vdev_readable(v)) {
660 zio->io_error = ENXIO;
661 return ZIO_PIPELINE_CONTINUE;
664 switch (zio->io_cmd) {
665 case DKIOCFLUSHWRITECACHE:
667 if (zfs_nocacheflush)
670 if (v->vdev_nowritecache) {
671 zio->io_error = ENOTSUP;
675 error = vdev_disk_io_flush(vd->vd_bdev, zio);
677 return ZIO_PIPELINE_STOP;
679 zio->io_error = error;
680 if (error == ENOTSUP)
681 v->vdev_nowritecache = B_TRUE;
686 zio->io_error = ENOTSUP;
689 return ZIO_PIPELINE_CONTINUE;
700 zio->io_error = ENOTSUP;
701 return ZIO_PIPELINE_CONTINUE;
704 error = __vdev_disk_physio(vd->vd_bdev, zio, zio->io_data,
705 zio->io_size, zio->io_offset, flags);
707 zio->io_error = error;
708 return ZIO_PIPELINE_CONTINUE;
711 return ZIO_PIPELINE_STOP;
715 vdev_disk_io_done(zio_t *zio)
718 * If the device returned EIO, we revalidate the media. If it is
719 * determined the media has changed this triggers the asynchronous
720 * removal of the device from the configuration.
722 if (zio->io_error == EIO) {
723 vdev_t *v = zio->io_vd;
724 vdev_disk_t *vd = v->vdev_tsd;
726 if (check_disk_change(vd->vd_bdev)) {
727 vdev_bdev_invalidate(vd->vd_bdev);
728 v->vdev_remove_wanted = B_TRUE;
729 spa_async_request(zio->io_spa, SPA_ASYNC_REMOVE);
735 vdev_disk_hold(vdev_t *vd)
737 ASSERT(spa_config_held(vd->vdev_spa, SCL_STATE, RW_WRITER));
739 /* We must have a pathname, and it must be absolute. */
740 if (vd->vdev_path == NULL || vd->vdev_path[0] != '/')
744 * Only prefetch path and devid info if the device has
747 if (vd->vdev_tsd != NULL)
750 /* XXX: Implement me as a vnode lookup for the device */
751 vd->vdev_name_vp = NULL;
752 vd->vdev_devid_vp = NULL;
756 vdev_disk_rele(vdev_t *vd)
758 ASSERT(spa_config_held(vd->vdev_spa, SCL_STATE, RW_WRITER));
760 /* XXX: Implement me as a vnode rele for the device */
763 vdev_ops_t vdev_disk_ops = {
772 VDEV_TYPE_DISK, /* name of this vdev type */
773 B_TRUE /* leaf vdev */
777 * Given the root disk device devid or pathname, read the label from
778 * the device, and construct a configuration nvlist.
781 vdev_disk_read_rootlabel(char *devpath, char *devid, nvlist_t **config)
783 struct block_device *bdev;
788 bdev = vdev_bdev_open(devpath, vdev_bdev_mode(FREAD), zfs_vdev_holder);
790 return -PTR_ERR(bdev);
792 s = bdev_capacity(bdev);
794 vdev_bdev_close(bdev, vdev_bdev_mode(FREAD));
798 size = P2ALIGN_TYPED(s, sizeof(vdev_label_t), uint64_t);
799 label = vmem_alloc(sizeof(vdev_label_t), KM_PUSHPAGE);
801 for (i = 0; i < VDEV_LABELS; i++) {
802 uint64_t offset, state, txg = 0;
804 /* read vdev label */
805 offset = vdev_label_offset(size, i, 0);
806 if (vdev_disk_physio(bdev, (caddr_t)label,
807 VDEV_SKIP_SIZE + VDEV_PHYS_SIZE, offset, READ_SYNC) != 0)
810 if (nvlist_unpack(label->vl_vdev_phys.vp_nvlist,
811 sizeof (label->vl_vdev_phys.vp_nvlist), config, 0) != 0) {
816 if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_STATE,
817 &state) != 0 || state >= POOL_STATE_DESTROYED) {
818 nvlist_free(*config);
823 if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_TXG,
824 &txg) != 0 || txg == 0) {
825 nvlist_free(*config);
833 vmem_free(label, sizeof(vdev_label_t));
834 vdev_bdev_close(bdev, vdev_bdev_mode(FREAD));
839 module_param(zfs_vdev_scheduler, charp, 0644);
840 MODULE_PARM_DESC(zfs_vdev_scheduler, "I/O scheduler");