4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (C) 2008-2010 Lawrence Livermore National Security, LLC.
23 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
24 * Rewritten for Linux by Brian Behlendorf <behlendorf1@llnl.gov>.
28 #include <sys/zfs_context.h>
30 #include <sys/vdev_disk.h>
31 #include <sys/vdev_impl.h>
32 #include <sys/fs/zfs.h>
34 #include <sys/sunldi.h>
37 * Virtual device vector for disks.
39 typedef struct dio_request {
40 struct completion dr_comp; /* Completion for sync IO */
41 atomic_t dr_ref; /* References */
42 zio_t *dr_zio; /* Parent ZIO */
43 int dr_rw; /* Read/Write */
44 int dr_error; /* Bio error */
45 int dr_bio_count; /* Count of bio's */
46 struct bio *dr_bio[0]; /* Attached bio's */
50 #ifdef HAVE_OPEN_BDEV_EXCLUSIVE
52 vdev_bdev_mode(int smode)
56 ASSERT3S(smode & (FREAD | FWRITE), !=, 0);
68 vdev_bdev_mode(int smode)
72 ASSERT3S(smode & (FREAD | FWRITE), !=, 0);
74 if ((smode & FREAD) && !(smode & FWRITE))
79 #endif /* HAVE_OPEN_BDEV_EXCLUSIVE */
82 bdev_capacity(struct block_device *bdev)
84 struct hd_struct *part = bdev->bd_part;
86 /* The partition capacity referenced by the block device */
88 return part->nr_sects;
90 /* Otherwise assume the full device capacity */
91 return get_capacity(bdev->bd_disk);
95 vdev_disk_open(vdev_t *v, uint64_t *psize, uint64_t *ashift)
97 struct block_device *bdev;
101 /* Must have a pathname and it must be absolute. */
102 if (v->vdev_path == NULL || v->vdev_path[0] != '/') {
103 v->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
107 vd = kmem_zalloc(sizeof(vdev_disk_t), KM_SLEEP);
112 * Devices are always opened by the path provided at configuration
113 * time. This means that if the provided path is a udev by-id path
114 * then drives may be recabled without an issue. If the provided
115 * path is a udev by-path path then the physical location information
116 * will be preserved. This can be critical for more complicated
117 * configurations where drives are located in specific physical
118 * locations to maximize the systems tolerence to component failure.
119 * Alternately you can provide your own udev rule to flexibly map
120 * the drives as you see fit. It is not advised that you use the
121 * /dev/[hd]d devices which may be reorder due to probing order.
122 * Devices in the wrong locations will be detected by the higher
123 * level vdev validation.
125 mode = spa_mode(v->vdev_spa);
126 bdev = vdev_bdev_open(v->vdev_path, vdev_bdev_mode(mode), vd);
128 kmem_free(vd, sizeof(vdev_disk_t));
129 return -PTR_ERR(bdev);
134 block_size = vdev_bdev_block_size(bdev);
136 /* Check if this is a whole device. When bdev->bd_contains ==
137 * bdev we have a whole device and not simply a partition. */
138 v->vdev_wholedisk = !!(bdev->bd_contains == bdev);
140 /* Clear the nowritecache bit, causes vdev_reopen() to try again. */
141 v->vdev_nowritecache = B_FALSE;
143 /* Physical volume size in bytes */
144 *psize = bdev_capacity(bdev) * block_size;
146 /* Based on the minimum sector size set the block size */
147 *ashift = highbit(MAX(block_size, SPA_MINBLOCKSIZE)) - 1;
153 vdev_disk_close(vdev_t *v)
155 vdev_disk_t *vd = v->vdev_tsd;
160 if (vd->vd_bdev != NULL)
161 vdev_bdev_close(vd->vd_bdev,
162 vdev_bdev_mode(spa_mode(v->vdev_spa)));
164 kmem_free(vd, sizeof(vdev_disk_t));
168 static dio_request_t *
169 vdev_disk_dio_alloc(int bio_count)
174 dr = kmem_zalloc(sizeof(dio_request_t) +
175 sizeof(struct bio *) * bio_count, KM_SLEEP);
177 init_completion(&dr->dr_comp);
178 atomic_set(&dr->dr_ref, 0);
179 dr->dr_bio_count = bio_count;
182 for (i = 0; i < dr->dr_bio_count; i++)
183 dr->dr_bio[i] = NULL;
190 vdev_disk_dio_free(dio_request_t *dr)
194 for (i = 0; i < dr->dr_bio_count; i++)
196 bio_put(dr->dr_bio[i]);
198 kmem_free(dr, sizeof(dio_request_t) +
199 sizeof(struct bio *) * dr->dr_bio_count);
203 vdev_disk_dio_get(dio_request_t *dr)
205 atomic_inc(&dr->dr_ref);
209 vdev_disk_dio_put(dio_request_t *dr)
211 int rc = atomic_dec_return(&dr->dr_ref);
214 * Free the dio_request when the last reference is dropped and
215 * ensure zio_interpret is called only once with the correct zio
218 zio_t *zio = dr->dr_zio;
219 int error = dr->dr_error;
221 vdev_disk_dio_free(dr);
224 zio->io_error = error;
232 BIO_END_IO_PROTO(vdev_disk_physio_completion, bio, size, error)
234 dio_request_t *dr = bio->bi_private;
237 /* Fatal error but print some useful debugging before asserting */
239 PANIC("dr == NULL, bio->bi_private == NULL\n"
240 "bi_next: %p, bi_flags: %lx, bi_rw: %lu, bi_vcnt: %d\n"
241 "bi_idx: %d, bi_size: %d, bi_end_io: %p, bi_cnt: %d\n",
242 bio->bi_next, bio->bi_flags, bio->bi_rw, bio->bi_vcnt,
243 bio->bi_idx, bio->bi_size, bio->bi_end_io,
244 atomic_read(&bio->bi_cnt));
246 #ifndef HAVE_2ARGS_BIO_END_IO_T
249 #endif /* HAVE_2ARGS_BIO_END_IO_T */
251 if (error == 0 && !test_bit(BIO_UPTODATE, &bio->bi_flags))
254 if (dr->dr_error == 0)
255 dr->dr_error = error;
257 /* Drop reference aquired by __vdev_disk_physio */
258 rc = vdev_disk_dio_put(dr);
260 /* Wake up synchronous waiter this is the last outstanding bio */
261 if ((rc == 1) && (dr->dr_rw & (1 << DIO_RW_SYNCIO)))
262 complete(&dr->dr_comp);
264 BIO_END_IO_RETURN(0);
267 static inline unsigned long
268 bio_nr_pages(void *bio_ptr, unsigned int bio_size)
270 return ((((unsigned long)bio_ptr + bio_size + PAGE_SIZE - 1) >>
271 PAGE_SHIFT) - ((unsigned long)bio_ptr >> PAGE_SHIFT));
275 bio_map(struct bio *bio, void *bio_ptr, unsigned int bio_size)
277 unsigned int offset, size, i;
280 offset = offset_in_page(bio_ptr);
281 for (i = 0; i < bio->bi_max_vecs; i++) {
282 size = PAGE_SIZE - offset;
290 if (kmem_virt(bio_ptr))
291 page = vmalloc_to_page(bio_ptr);
293 page = virt_to_page(bio_ptr);
295 if (bio_add_page(bio, page, size, offset) != size)
307 __vdev_disk_physio(struct block_device *bdev, zio_t *zio, caddr_t kbuf_ptr,
308 size_t kbuf_size, uint64_t kbuf_offset, int flags)
313 int bio_size, bio_count = 16;
314 int i = 0, error = 0, block_size;
317 dr = vdev_disk_dio_alloc(bio_count);
323 block_size = vdev_bdev_block_size(bdev);
325 #ifdef BIO_RW_FAILFAST
326 if (flags & (1 << BIO_RW_FAILFAST))
327 dr->dr_rw |= 1 << BIO_RW_FAILFAST;
328 #endif /* BIO_RW_FAILFAST */
331 * When the IO size exceeds the maximum bio size for the request
332 * queue we are forced to break the IO in multiple bio's and wait
333 * for them all to complete. Ideally, all pool users will set
334 * their volume block size to match the maximum request size and
335 * the common case will be one bio per vdev IO request.
338 bio_offset = kbuf_offset;
339 bio_size = kbuf_size;
340 for (i = 0; i <= dr->dr_bio_count; i++) {
342 /* Finished constructing bio's for given buffer */
347 * By default only 'bio_count' bio's per dio are allowed.
348 * However, if we find ourselves in a situation where more
349 * are needed we allocate a larger dio and warn the user.
351 if (dr->dr_bio_count == i) {
352 vdev_disk_dio_free(dr);
354 printk("WARNING: Resized bio's/dio to %d\n",bio_count);
358 dr->dr_bio[i] = bio_alloc(GFP_NOIO,
359 bio_nr_pages(bio_ptr, bio_size));
360 if (dr->dr_bio[i] == NULL) {
361 vdev_disk_dio_free(dr);
365 /* Matching put called by vdev_disk_physio_completion */
366 vdev_disk_dio_get(dr);
368 dr->dr_bio[i]->bi_bdev = bdev;
369 dr->dr_bio[i]->bi_sector = bio_offset / block_size;
370 dr->dr_bio[i]->bi_rw = dr->dr_rw;
371 dr->dr_bio[i]->bi_end_io = vdev_disk_physio_completion;
372 dr->dr_bio[i]->bi_private = dr;
374 /* Remaining size is returned to become the new size */
375 bio_size = bio_map(dr->dr_bio[i], bio_ptr, bio_size);
377 /* Advance in buffer and construct another bio if needed */
378 bio_ptr += dr->dr_bio[i]->bi_size;
379 bio_offset += dr->dr_bio[i]->bi_size;
382 /* Extra reference to protect dio_request during submit_bio */
383 vdev_disk_dio_get(dr);
385 /* Submit all bio's associated with this dio */
386 for (i = 0; i < dr->dr_bio_count; i++)
388 submit_bio(dr->dr_rw, dr->dr_bio[i]);
391 * On synchronous blocking requests we wait for all bio the completion
392 * callbacks to run. We will be woken when the last callback runs
393 * for this dio. We are responsible for putting the last dio_request
394 * reference will in turn put back the last bio references. The
395 * only synchronous consumer is vdev_disk_read_rootlabel() all other
396 * IO originating from vdev_disk_io_start() is asynchronous.
398 if (dr->dr_rw & (1 << DIO_RW_SYNCIO)) {
399 wait_for_completion(&dr->dr_comp);
400 error = dr->dr_error;
401 ASSERT3S(atomic_read(&dr->dr_ref), ==, 1);
404 (void)vdev_disk_dio_put(dr);
410 vdev_disk_physio(struct block_device *bdev, caddr_t kbuf,
411 size_t size, uint64_t offset, int flags)
413 return __vdev_disk_physio(bdev, NULL, kbuf, size, offset, flags);
416 /* 2.6.24 API change */
417 #ifdef HAVE_BIO_EMPTY_BARRIER
418 BIO_END_IO_PROTO(vdev_disk_io_flush_completion, bio, size, rc)
420 zio_t *zio = bio->bi_private;
423 if (rc && (rc == -EOPNOTSUPP))
424 zio->io_vd->vdev_nowritecache = B_TRUE;
429 BIO_END_IO_RETURN(0);
433 vdev_disk_io_flush(struct block_device *bdev, zio_t *zio)
435 struct request_queue *q;
438 q = bdev_get_queue(bdev);
442 bio = bio_alloc(GFP_KERNEL, 0);
446 bio->bi_end_io = vdev_disk_io_flush_completion;
447 bio->bi_private = zio;
449 submit_bio(WRITE_BARRIER, bio);
455 vdev_disk_io_flush(struct block_device *bdev, zio_t *zio)
459 #endif /* HAVE_BIO_EMPTY_BARRIER */
462 vdev_disk_io_start(zio_t *zio)
464 vdev_t *v = zio->io_vd;
465 vdev_disk_t *vd = v->vdev_tsd;
468 switch (zio->io_type) {
471 if (!vdev_readable(v)) {
472 zio->io_error = ENXIO;
473 return ZIO_PIPELINE_CONTINUE;
476 switch (zio->io_cmd) {
477 case DKIOCFLUSHWRITECACHE:
479 if (zfs_nocacheflush)
482 if (v->vdev_nowritecache) {
483 zio->io_error = ENOTSUP;
487 error = vdev_disk_io_flush(vd->vd_bdev, zio);
489 return ZIO_PIPELINE_STOP;
491 zio->io_error = error;
492 if (error == ENOTSUP)
493 v->vdev_nowritecache = B_TRUE;
498 zio->io_error = ENOTSUP;
501 return ZIO_PIPELINE_CONTINUE;
512 zio->io_error = ENOTSUP;
513 return ZIO_PIPELINE_CONTINUE;
516 #ifdef BIO_RW_FAILFAST
517 if (zio->io_flags & (ZIO_FLAG_IO_RETRY | ZIO_FLAG_TRYHARD))
518 flags |= (1 << BIO_RW_FAILFAST);
519 #endif /* BIO_RW_FAILFAST */
521 error = __vdev_disk_physio(vd->vd_bdev, zio, zio->io_data,
522 zio->io_size, zio->io_offset, flags);
524 zio->io_error = error;
525 return ZIO_PIPELINE_CONTINUE;
528 return ZIO_PIPELINE_STOP;
532 vdev_disk_io_done(zio_t *zio)
535 * If the device returned EIO, we revalidate the media. If it is
536 * determined the media has changed this triggers the asynchronous
537 * removal of the device from the configuration.
539 if (zio->io_error == EIO) {
540 vdev_t *v = zio->io_vd;
541 vdev_disk_t *vd = v->vdev_tsd;
543 if (check_disk_change(vd->vd_bdev)) {
544 vdev_bdev_invalidate(vd->vd_bdev);
545 v->vdev_remove_wanted = B_TRUE;
546 spa_async_request(zio->io_spa, SPA_ASYNC_REMOVE);
552 vdev_disk_hold(vdev_t *vd)
554 ASSERT(spa_config_held(vd->vdev_spa, SCL_STATE, RW_WRITER));
556 /* We must have a pathname, and it must be absolute. */
557 if (vd->vdev_path == NULL || vd->vdev_path[0] != '/')
561 * Only prefetch path and devid info if the device has
564 if (vd->vdev_tsd != NULL)
567 /* XXX: Implement me as a vnode lookup for the device */
568 vd->vdev_name_vp = NULL;
569 vd->vdev_devid_vp = NULL;
573 vdev_disk_rele(vdev_t *vd)
575 ASSERT(spa_config_held(vd->vdev_spa, SCL_STATE, RW_WRITER));
577 /* XXX: Implement me as a vnode rele for the device */
580 vdev_ops_t vdev_disk_ops = {
589 VDEV_TYPE_DISK, /* name of this vdev type */
590 B_TRUE /* leaf vdev */
594 * Given the root disk device devid or pathname, read the label from
595 * the device, and construct a configuration nvlist.
598 vdev_disk_read_rootlabel(char *devpath, char *devid, nvlist_t **config)
600 struct block_device *bdev;
605 bdev = vdev_bdev_open(devpath, vdev_bdev_mode(FREAD), NULL);
607 return -PTR_ERR(bdev);
609 s = bdev_capacity(bdev) * vdev_bdev_block_size(bdev);
611 vdev_bdev_close(bdev, vdev_bdev_mode(FREAD));
615 size = P2ALIGN_TYPED(s, sizeof(vdev_label_t), uint64_t);
616 label = vmem_alloc(sizeof(vdev_label_t), KM_SLEEP);
618 for (i = 0; i < VDEV_LABELS; i++) {
619 uint64_t offset, state, txg = 0;
621 /* read vdev label */
622 offset = vdev_label_offset(size, i, 0);
623 if (vdev_disk_physio(bdev, (caddr_t)label,
624 VDEV_SKIP_SIZE + VDEV_PHYS_SIZE, offset, READ_SYNC) != 0)
627 if (nvlist_unpack(label->vl_vdev_phys.vp_nvlist,
628 sizeof (label->vl_vdev_phys.vp_nvlist), config, 0) != 0) {
633 if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_STATE,
634 &state) != 0 || state >= POOL_STATE_DESTROYED) {
635 nvlist_free(*config);
640 if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_TXG,
641 &txg) != 0 || txg == 0) {
642 nvlist_free(*config);
650 vmem_free(label, sizeof(vdev_label_t));
651 vdev_bdev_close(bdev, vdev_bdev_mode(FREAD));