4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (C) 2008-2010 Lawrence Livermore National Security, LLC.
23 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
24 * Rewritten for Linux by Brian Behlendorf <behlendorf1@llnl.gov>.
28 #include <sys/zfs_context.h>
30 #include <sys/vdev_disk.h>
31 #include <sys/vdev_impl.h>
32 #include <sys/fs/zfs.h>
34 #include <sys/sunldi.h>
36 char *zfs_vdev_scheduler = VDEV_SCHEDULER;
39 * Virtual device vector for disks.
41 typedef struct dio_request {
42 struct completion dr_comp; /* Completion for sync IO */
43 atomic_t dr_ref; /* References */
44 zio_t *dr_zio; /* Parent ZIO */
45 int dr_rw; /* Read/Write */
46 int dr_error; /* Bio error */
47 int dr_bio_count; /* Count of bio's */
48 struct bio *dr_bio[0]; /* Attached bio's */
52 #ifdef HAVE_OPEN_BDEV_EXCLUSIVE
54 vdev_bdev_mode(int smode)
58 ASSERT3S(smode & (FREAD | FWRITE), !=, 0);
70 vdev_bdev_mode(int smode)
74 ASSERT3S(smode & (FREAD | FWRITE), !=, 0);
76 if ((smode & FREAD) && !(smode & FWRITE))
81 #endif /* HAVE_OPEN_BDEV_EXCLUSIVE */
84 bdev_capacity(struct block_device *bdev)
86 struct hd_struct *part = bdev->bd_part;
88 /* The partition capacity referenced by the block device */
90 return (part->nr_sects << 9);
92 /* Otherwise assume the full device capacity */
93 return (get_capacity(bdev->bd_disk) << 9);
97 vdev_disk_error(zio_t *zio)
100 printk("ZFS: zio error=%d type=%d offset=%llu size=%llu "
101 "flags=%x delay=%llu\n", zio->io_error, zio->io_type,
102 (u_longlong_t)zio->io_offset, (u_longlong_t)zio->io_size,
103 zio->io_flags, (u_longlong_t)zio->io_delay);
108 * Use the Linux 'noop' elevator for zfs managed block devices. This
109 * strikes the ideal balance by allowing the zfs elevator to do all
110 * request ordering and prioritization. While allowing the Linux
111 * elevator to do the maximum front/back merging allowed by the
112 * physical device. This yields the largest possible requests for
113 * the device with the lowest total overhead.
115 * Unfortunately we cannot directly call the elevator_switch() function
116 * because it is not exported from the block layer. This means we have
117 * to use the sysfs interface and a user space upcall. Pools will be
118 * automatically imported on module load so we must do this at device
119 * open time from the kernel.
121 #define SET_SCHEDULER_CMD \
122 "exec 0</dev/null " \
123 " 1>/sys/block/%s/queue/scheduler " \
128 vdev_elevator_switch(vdev_t *v, char *elevator)
130 vdev_disk_t *vd = v->vdev_tsd;
131 struct block_device *bdev = vd->vd_bdev;
132 struct request_queue *q = bdev_get_queue(bdev);
133 char *device = bdev->bd_disk->disk_name;
134 char *argv[] = { "/bin/sh", "-c", NULL, NULL };
135 char *envp[] = { NULL };
138 /* Skip devices which are not whole disks (partitions) */
139 if (!v->vdev_wholedisk)
142 /* Skip devices without schedulers (loop, ram, dm, etc) */
143 if (!q->elevator || !blk_queue_stackable(q))
146 /* Leave existing scheduler when set to "none" */
147 if (!strncmp(elevator, "none", 4) && (strlen(elevator) == 4))
150 argv[2] = kmem_asprintf(SET_SCHEDULER_CMD, device, elevator);
151 error = call_usermodehelper(argv[0], argv, envp, 1);
153 printk("ZFS: Unable to set \"%s\" scheduler for %s (%s): %d\n",
154 elevator, v->vdev_path, device, error);
162 vdev_disk_open(vdev_t *v, uint64_t *psize, uint64_t *ashift)
164 struct block_device *bdev;
166 int mode, block_size;
168 /* Must have a pathname and it must be absolute. */
169 if (v->vdev_path == NULL || v->vdev_path[0] != '/') {
170 v->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
174 vd = kmem_zalloc(sizeof(vdev_disk_t), KM_SLEEP);
179 * Devices are always opened by the path provided at configuration
180 * time. This means that if the provided path is a udev by-id path
181 * then drives may be recabled without an issue. If the provided
182 * path is a udev by-path path then the physical location information
183 * will be preserved. This can be critical for more complicated
184 * configurations where drives are located in specific physical
185 * locations to maximize the systems tolerence to component failure.
186 * Alternately you can provide your own udev rule to flexibly map
187 * the drives as you see fit. It is not advised that you use the
188 * /dev/[hd]d devices which may be reorder due to probing order.
189 * Devices in the wrong locations will be detected by the higher
190 * level vdev validation.
192 mode = spa_mode(v->vdev_spa);
193 bdev = vdev_bdev_open(v->vdev_path, vdev_bdev_mode(mode), vd);
195 kmem_free(vd, sizeof(vdev_disk_t));
196 return -PTR_ERR(bdev);
201 block_size = vdev_bdev_block_size(bdev);
203 /* We think the wholedisk property should always be set when this
204 * function is called. ASSERT here so if any legitimate cases exist
205 * where it's not set, we'll find them during debugging. If we never
206 * hit the ASSERT, this and the following conditional statement can be
208 ASSERT3S(v->vdev_wholedisk, !=, -1ULL);
210 /* The wholedisk property was initialized to -1 in vdev_alloc() if it
211 * was unspecified. In that case, check if this is a whole device.
212 * When bdev->bd_contains == bdev we have a whole device and not simply
214 if (v->vdev_wholedisk == -1ULL)
215 v->vdev_wholedisk = (bdev->bd_contains == bdev);
217 /* Clear the nowritecache bit, causes vdev_reopen() to try again. */
218 v->vdev_nowritecache = B_FALSE;
220 /* Physical volume size in bytes */
221 *psize = bdev_capacity(bdev);
223 /* Based on the minimum sector size set the block size */
224 *ashift = highbit(MAX(block_size, SPA_MINBLOCKSIZE)) - 1;
226 /* Try to set the io scheduler elevator algorithm */
227 (void) vdev_elevator_switch(v, zfs_vdev_scheduler);
233 vdev_disk_close(vdev_t *v)
235 vdev_disk_t *vd = v->vdev_tsd;
240 if (vd->vd_bdev != NULL)
241 vdev_bdev_close(vd->vd_bdev,
242 vdev_bdev_mode(spa_mode(v->vdev_spa)));
244 kmem_free(vd, sizeof(vdev_disk_t));
248 static dio_request_t *
249 vdev_disk_dio_alloc(int bio_count)
254 dr = kmem_zalloc(sizeof(dio_request_t) +
255 sizeof(struct bio *) * bio_count, KM_SLEEP);
257 init_completion(&dr->dr_comp);
258 atomic_set(&dr->dr_ref, 0);
259 dr->dr_bio_count = bio_count;
262 for (i = 0; i < dr->dr_bio_count; i++)
263 dr->dr_bio[i] = NULL;
270 vdev_disk_dio_free(dio_request_t *dr)
274 for (i = 0; i < dr->dr_bio_count; i++)
276 bio_put(dr->dr_bio[i]);
278 kmem_free(dr, sizeof(dio_request_t) +
279 sizeof(struct bio *) * dr->dr_bio_count);
283 vdev_disk_dio_is_sync(dio_request_t *dr)
285 #ifdef HAVE_BIO_RW_SYNC
286 /* BIO_RW_SYNC preferred interface from 2.6.12-2.6.29 */
287 return (dr->dr_rw & (1 << BIO_RW_SYNC));
289 # ifdef HAVE_BIO_RW_SYNCIO
290 /* BIO_RW_SYNCIO preferred interface from 2.6.30-2.6.35 */
291 return (dr->dr_rw & (1 << BIO_RW_SYNCIO));
293 # ifdef HAVE_REQ_SYNC
294 /* REQ_SYNC preferred interface from 2.6.36-2.6.xx */
295 return (dr->dr_rw & REQ_SYNC);
297 # error "Unable to determine bio sync flag"
298 # endif /* HAVE_REQ_SYNC */
299 # endif /* HAVE_BIO_RW_SYNC */
300 #endif /* HAVE_BIO_RW_SYNCIO */
304 vdev_disk_dio_get(dio_request_t *dr)
306 atomic_inc(&dr->dr_ref);
310 vdev_disk_dio_put(dio_request_t *dr)
312 int rc = atomic_dec_return(&dr->dr_ref);
315 * Free the dio_request when the last reference is dropped and
316 * ensure zio_interpret is called only once with the correct zio
319 zio_t *zio = dr->dr_zio;
320 int error = dr->dr_error;
322 vdev_disk_dio_free(dr);
325 zio->io_delay = jiffies_to_msecs(
326 jiffies_64 - zio->io_delay);
327 zio->io_error = error;
328 ASSERT3S(zio->io_error, >=, 0);
330 vdev_disk_error(zio);
338 BIO_END_IO_PROTO(vdev_disk_physio_completion, bio, size, error)
340 dio_request_t *dr = bio->bi_private;
343 /* Fatal error but print some useful debugging before asserting */
345 PANIC("dr == NULL, bio->bi_private == NULL\n"
346 "bi_next: %p, bi_flags: %lx, bi_rw: %lu, bi_vcnt: %d\n"
347 "bi_idx: %d, bi_size: %d, bi_end_io: %p, bi_cnt: %d\n",
348 bio->bi_next, bio->bi_flags, bio->bi_rw, bio->bi_vcnt,
349 bio->bi_idx, bio->bi_size, bio->bi_end_io,
350 atomic_read(&bio->bi_cnt));
352 #ifndef HAVE_2ARGS_BIO_END_IO_T
355 #endif /* HAVE_2ARGS_BIO_END_IO_T */
357 if (error == 0 && !test_bit(BIO_UPTODATE, &bio->bi_flags))
360 if (dr->dr_error == 0)
361 dr->dr_error = -error;
363 /* Drop reference aquired by __vdev_disk_physio */
364 rc = vdev_disk_dio_put(dr);
366 /* Wake up synchronous waiter this is the last outstanding bio */
367 if ((rc == 1) && vdev_disk_dio_is_sync(dr))
368 complete(&dr->dr_comp);
370 BIO_END_IO_RETURN(0);
373 static inline unsigned long
374 bio_nr_pages(void *bio_ptr, unsigned int bio_size)
376 return ((((unsigned long)bio_ptr + bio_size + PAGE_SIZE - 1) >>
377 PAGE_SHIFT) - ((unsigned long)bio_ptr >> PAGE_SHIFT));
381 bio_map(struct bio *bio, void *bio_ptr, unsigned int bio_size)
383 unsigned int offset, size, i;
386 offset = offset_in_page(bio_ptr);
387 for (i = 0; i < bio->bi_max_vecs; i++) {
388 size = PAGE_SIZE - offset;
396 if (kmem_virt(bio_ptr))
397 page = vmalloc_to_page(bio_ptr);
399 page = virt_to_page(bio_ptr);
401 if (bio_add_page(bio, page, size, offset) != size)
413 __vdev_disk_physio(struct block_device *bdev, zio_t *zio, caddr_t kbuf_ptr,
414 size_t kbuf_size, uint64_t kbuf_offset, int flags)
419 int bio_size, bio_count = 16;
420 int i = 0, error = 0;
422 ASSERT3U(kbuf_offset + kbuf_size, <=, bdev->bd_inode->i_size);
425 dr = vdev_disk_dio_alloc(bio_count);
429 if (zio && !(zio->io_flags & (ZIO_FLAG_IO_RETRY | ZIO_FLAG_TRYHARD)))
430 bio_set_flags_failfast(bdev, &flags);
436 * When the IO size exceeds the maximum bio size for the request
437 * queue we are forced to break the IO in multiple bio's and wait
438 * for them all to complete. Ideally, all pool users will set
439 * their volume block size to match the maximum request size and
440 * the common case will be one bio per vdev IO request.
443 bio_offset = kbuf_offset;
444 bio_size = kbuf_size;
445 for (i = 0; i <= dr->dr_bio_count; i++) {
447 /* Finished constructing bio's for given buffer */
452 * By default only 'bio_count' bio's per dio are allowed.
453 * However, if we find ourselves in a situation where more
454 * are needed we allocate a larger dio and warn the user.
456 if (dr->dr_bio_count == i) {
457 vdev_disk_dio_free(dr);
459 printk("WARNING: Resized bio's/dio to %d\n",bio_count);
463 dr->dr_bio[i] = bio_alloc(GFP_NOIO,
464 bio_nr_pages(bio_ptr, bio_size));
465 if (dr->dr_bio[i] == NULL) {
466 vdev_disk_dio_free(dr);
470 /* Matching put called by vdev_disk_physio_completion */
471 vdev_disk_dio_get(dr);
473 dr->dr_bio[i]->bi_bdev = bdev;
474 dr->dr_bio[i]->bi_sector = bio_offset >> 9;
475 dr->dr_bio[i]->bi_rw = dr->dr_rw;
476 dr->dr_bio[i]->bi_end_io = vdev_disk_physio_completion;
477 dr->dr_bio[i]->bi_private = dr;
479 /* Remaining size is returned to become the new size */
480 bio_size = bio_map(dr->dr_bio[i], bio_ptr, bio_size);
482 /* Advance in buffer and construct another bio if needed */
483 bio_ptr += dr->dr_bio[i]->bi_size;
484 bio_offset += dr->dr_bio[i]->bi_size;
487 /* Extra reference to protect dio_request during submit_bio */
488 vdev_disk_dio_get(dr);
490 zio->io_delay = jiffies_64;
492 /* Submit all bio's associated with this dio */
493 for (i = 0; i < dr->dr_bio_count; i++)
495 submit_bio(dr->dr_rw, dr->dr_bio[i]);
498 * On synchronous blocking requests we wait for all bio the completion
499 * callbacks to run. We will be woken when the last callback runs
500 * for this dio. We are responsible for putting the last dio_request
501 * reference will in turn put back the last bio references. The
502 * only synchronous consumer is vdev_disk_read_rootlabel() all other
503 * IO originating from vdev_disk_io_start() is asynchronous.
505 if (vdev_disk_dio_is_sync(dr)) {
506 wait_for_completion(&dr->dr_comp);
507 error = dr->dr_error;
508 ASSERT3S(atomic_read(&dr->dr_ref), ==, 1);
511 (void)vdev_disk_dio_put(dr);
517 vdev_disk_physio(struct block_device *bdev, caddr_t kbuf,
518 size_t size, uint64_t offset, int flags)
520 bio_set_flags_failfast(bdev, &flags);
521 return __vdev_disk_physio(bdev, NULL, kbuf, size, offset, flags);
524 /* 2.6.24 API change */
525 #ifdef HAVE_BIO_EMPTY_BARRIER
526 BIO_END_IO_PROTO(vdev_disk_io_flush_completion, bio, size, rc)
528 zio_t *zio = bio->bi_private;
530 zio->io_delay = jiffies_to_msecs(jiffies_64 - zio->io_delay);
532 if (rc && (rc == -EOPNOTSUPP))
533 zio->io_vd->vdev_nowritecache = B_TRUE;
536 ASSERT3S(zio->io_error, >=, 0);
538 vdev_disk_error(zio);
541 BIO_END_IO_RETURN(0);
545 vdev_disk_io_flush(struct block_device *bdev, zio_t *zio)
547 struct request_queue *q;
550 q = bdev_get_queue(bdev);
554 bio = bio_alloc(GFP_KERNEL, 0);
558 bio->bi_end_io = vdev_disk_io_flush_completion;
559 bio->bi_private = zio;
561 zio->io_delay = jiffies_64;
562 submit_bio(VDEV_WRITE_FLUSH_FUA, bio);
568 vdev_disk_io_flush(struct block_device *bdev, zio_t *zio)
572 #endif /* HAVE_BIO_EMPTY_BARRIER */
575 vdev_disk_io_start(zio_t *zio)
577 vdev_t *v = zio->io_vd;
578 vdev_disk_t *vd = v->vdev_tsd;
581 switch (zio->io_type) {
584 if (!vdev_readable(v)) {
585 zio->io_error = ENXIO;
586 return ZIO_PIPELINE_CONTINUE;
589 switch (zio->io_cmd) {
590 case DKIOCFLUSHWRITECACHE:
592 if (zfs_nocacheflush)
595 if (v->vdev_nowritecache) {
596 zio->io_error = ENOTSUP;
600 error = vdev_disk_io_flush(vd->vd_bdev, zio);
602 return ZIO_PIPELINE_STOP;
604 zio->io_error = error;
605 if (error == ENOTSUP)
606 v->vdev_nowritecache = B_TRUE;
611 zio->io_error = ENOTSUP;
614 return ZIO_PIPELINE_CONTINUE;
625 zio->io_error = ENOTSUP;
626 return ZIO_PIPELINE_CONTINUE;
629 error = __vdev_disk_physio(vd->vd_bdev, zio, zio->io_data,
630 zio->io_size, zio->io_offset, flags);
632 zio->io_error = error;
633 return ZIO_PIPELINE_CONTINUE;
636 return ZIO_PIPELINE_STOP;
640 vdev_disk_io_done(zio_t *zio)
643 * If the device returned EIO, we revalidate the media. If it is
644 * determined the media has changed this triggers the asynchronous
645 * removal of the device from the configuration.
647 if (zio->io_error == EIO) {
648 vdev_t *v = zio->io_vd;
649 vdev_disk_t *vd = v->vdev_tsd;
651 if (check_disk_change(vd->vd_bdev)) {
652 vdev_bdev_invalidate(vd->vd_bdev);
653 v->vdev_remove_wanted = B_TRUE;
654 spa_async_request(zio->io_spa, SPA_ASYNC_REMOVE);
660 vdev_disk_hold(vdev_t *vd)
662 ASSERT(spa_config_held(vd->vdev_spa, SCL_STATE, RW_WRITER));
664 /* We must have a pathname, and it must be absolute. */
665 if (vd->vdev_path == NULL || vd->vdev_path[0] != '/')
669 * Only prefetch path and devid info if the device has
672 if (vd->vdev_tsd != NULL)
675 /* XXX: Implement me as a vnode lookup for the device */
676 vd->vdev_name_vp = NULL;
677 vd->vdev_devid_vp = NULL;
681 vdev_disk_rele(vdev_t *vd)
683 ASSERT(spa_config_held(vd->vdev_spa, SCL_STATE, RW_WRITER));
685 /* XXX: Implement me as a vnode rele for the device */
688 vdev_ops_t vdev_disk_ops = {
697 VDEV_TYPE_DISK, /* name of this vdev type */
698 B_TRUE /* leaf vdev */
702 * Given the root disk device devid or pathname, read the label from
703 * the device, and construct a configuration nvlist.
706 vdev_disk_read_rootlabel(char *devpath, char *devid, nvlist_t **config)
708 struct block_device *bdev;
713 bdev = vdev_bdev_open(devpath, vdev_bdev_mode(FREAD), NULL);
715 return -PTR_ERR(bdev);
717 s = bdev_capacity(bdev);
719 vdev_bdev_close(bdev, vdev_bdev_mode(FREAD));
723 size = P2ALIGN_TYPED(s, sizeof(vdev_label_t), uint64_t);
724 label = vmem_alloc(sizeof(vdev_label_t), KM_SLEEP);
726 for (i = 0; i < VDEV_LABELS; i++) {
727 uint64_t offset, state, txg = 0;
729 /* read vdev label */
730 offset = vdev_label_offset(size, i, 0);
731 if (vdev_disk_physio(bdev, (caddr_t)label,
732 VDEV_SKIP_SIZE + VDEV_PHYS_SIZE, offset, READ_SYNC) != 0)
735 if (nvlist_unpack(label->vl_vdev_phys.vp_nvlist,
736 sizeof (label->vl_vdev_phys.vp_nvlist), config, 0) != 0) {
741 if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_STATE,
742 &state) != 0 || state >= POOL_STATE_DESTROYED) {
743 nvlist_free(*config);
748 if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_TXG,
749 &txg) != 0 || txg == 0) {
750 nvlist_free(*config);
758 vmem_free(label, sizeof(vdev_label_t));
759 vdev_bdev_close(bdev, vdev_bdev_mode(FREAD));
764 module_param(zfs_vdev_scheduler, charp, 0644);
765 MODULE_PARM_DESC(zfs_vdev_scheduler, "I/O scheduler");