#define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS)
#endif
+/*
+ * 2.6.27 API change,
+ * The blk_queue_stackable() queue flag was added in 2.6.27 to handle dm
+ * stacking drivers. Prior to this request stacking drivers were detected
+ * by checking (q->request_fn == NULL), for earlier kernels we revert to
+ * this legacy behavior.
+ */
+#ifndef blk_queue_stackable
+#define blk_queue_stackable(q) ((q)->request_fn == NULL)
+#endif
+
#ifndef HAVE_GET_DISK_RO
static inline int
get_disk_ro(struct gendisk *disk)
#endif /* HAVE_2ARGS_BIO_END_IO_T */
/*
- * 2.6.28 API change
+ * 2.6.38 - 2.6.x API,
+ * blkdev_get_by_path()
+ * blkdev_put()
+ *
+ * 2.6.28 - 2.6.37 API,
+ * open_bdev_exclusive()
+ * close_bdev_exclusive()
+ *
+ * 2.6.12 - 2.6.27 API,
+ * open_bdev_excl()
+ * close_bdev_excl()
+ *
* Used to exclusively open a block device from within the kernel.
*/
-#ifdef HAVE_OPEN_BDEV_EXCLUSIVE
+#if defined(HAVE_BLKDEV_GET_BY_PATH)
+# define vdev_bdev_open(path, md, hld) blkdev_get_by_path(path, \
+ (md) | FMODE_EXCL, hld)
+# define vdev_bdev_close(bdev, md) blkdev_put(bdev, (md) | FMODE_EXCL)
+#elif defined(HAVE_OPEN_BDEV_EXCLUSIVE)
# define vdev_bdev_open(path, md, hld) open_bdev_exclusive(path, md, hld)
# define vdev_bdev_close(bdev, md) close_bdev_exclusive(bdev, md)
#else
# define vdev_bdev_open(path, md, hld) open_bdev_excl(path, md, hld)
# define vdev_bdev_close(bdev, md) close_bdev_excl(bdev)
-#endif /* HAVE_OPEN_BDEV_EXCLUSIVE */
+#endif /* HAVE_BLKDEV_GET_BY_PATH | HAVE_OPEN_BDEV_EXCLUSIVE */
/*
* 2.6.22 API change
#endif
/*
+ * 2.6.37 API change
+ * The WRITE_FLUSH, WRITE_FUA, and WRITE_FLUSH_FUA flags have been
+ * introduced as a replacement for WRITE_BARRIER. This was done to
+ * allow richer semantics to be expressed to the block layer. It is
+ * the block layers responsibility to choose the correct way to
+ * implement these semantics.
+ */
+#ifdef WRITE_FLUSH_FUA
+# define VDEV_WRITE_FLUSH_FUA WRITE_FLUSH_FUA
+#else
+# define VDEV_WRITE_FLUSH_FUA WRITE_BARRIER
+#endif
+
+/*
* Default Linux IO Scheduler,
* Setting the scheduler to noop will allow the Linux IO scheduler to
* still perform front and back merging, while leaving the request