return get_capacity(bdev->bd_disk);
}
+static void
+vdev_disk_error(zio_t *zio)
+{
+#ifdef ZFS_DEBUG
+ printk("ZFS: zio error=%d type=%d offset=%llu "
+ "size=%llu flags=%x\n", zio->io_error, zio->io_type,
+ (u_longlong_t)zio->io_offset, (u_longlong_t)zio->io_size,
+ zio->io_flags);
+#endif
+}
+
static int
vdev_disk_open(vdev_t *v, uint64_t *psize, uint64_t *ashift)
{
vd->vd_bdev = bdev;
block_size = vdev_bdev_block_size(bdev);
- /* Check if this is a whole device. When bdev->bd_contains ==
- * bdev we have a whole device and not simply a partition. */
- v->vdev_wholedisk = !!(bdev->bd_contains == bdev);
+ /* We think the wholedisk property should always be set when this
+ * function is called. ASSERT here so if any legitimate cases exist
+ * where it's not set, we'll find them during debugging. If we never
+ * hit the ASSERT, this and the following conditional statement can be
+ * removed. */
+ ASSERT3S(v->vdev_wholedisk, !=, -1ULL);
+
+ /* The wholedisk property was initialized to -1 in vdev_alloc() if it
+ * was unspecified. In that case, check if this is a whole device.
+ * When bdev->bd_contains == bdev we have a whole device and not simply
+ * a partition. */
+ if (v->vdev_wholedisk == -1ULL)
+ v->vdev_wholedisk = (bdev->bd_contains == bdev);
/* Clear the nowritecache bit, causes vdev_reopen() to try again. */
v->vdev_nowritecache = B_FALSE;
if (zio) {
zio->io_error = error;
+ ASSERT3S(zio->io_error, >=, 0);
+ if (zio->io_error)
+ vdev_disk_error(zio);
zio_interrupt(zio);
}
}
#endif /* HAVE_2ARGS_BIO_END_IO_T */
if (error == 0 && !test_bit(BIO_UPTODATE, &bio->bi_flags))
- error = EIO;
+ error = -EIO;
if (dr->dr_error == 0)
- dr->dr_error = error;
+ dr->dr_error = -error;
/* Drop reference aquired by __vdev_disk_physio */
rc = vdev_disk_dio_put(dr);
if (dr == NULL)
return ENOMEM;
+ if (zio && !(zio->io_flags & (ZIO_FLAG_IO_RETRY | ZIO_FLAG_TRYHARD)))
+ bio_set_flags_failfast(bdev, &flags);
+
dr->dr_zio = zio;
dr->dr_rw = flags;
block_size = vdev_bdev_block_size(bdev);
-#ifdef BIO_RW_FAILFAST
- if (flags & (1 << BIO_RW_FAILFAST))
- dr->dr_rw |= 1 << BIO_RW_FAILFAST;
-#endif /* BIO_RW_FAILFAST */
-
/*
* When the IO size exceeds the maximum bio size for the request
* queue we are forced to break the IO in multiple bio's and wait
vdev_disk_physio(struct block_device *bdev, caddr_t kbuf,
size_t size, uint64_t offset, int flags)
{
+ bio_set_flags_failfast(bdev, &flags);
return __vdev_disk_physio(bdev, NULL, kbuf, size, offset, flags);
}
zio->io_vd->vdev_nowritecache = B_TRUE;
bio_put(bio);
+ ASSERT3S(zio->io_error, >=, 0);
+ if (zio->io_error)
+ vdev_disk_error(zio);
zio_interrupt(zio);
BIO_END_IO_RETURN(0);
return ZIO_PIPELINE_CONTINUE;
}
-#ifdef BIO_RW_FAILFAST
- if (zio->io_flags & (ZIO_FLAG_IO_RETRY | ZIO_FLAG_TRYHARD))
- flags |= (1 << BIO_RW_FAILFAST);
-#endif /* BIO_RW_FAILFAST */
-
error = __vdev_disk_physio(vd->vd_bdev, zio, zio->io_data,
zio->io_size, zio->io_offset, flags);
if (error) {