X-Git-Url: https://git.camperquake.de/gitweb.cgi?a=blobdiff_plain;f=include%2Flinux%2Fblkdev_compat.h;h=bd1b2bf540031c0f1bdc9aad6da12ebd8b5344c2;hb=30930fba219642cb1dadf1c8ef60ff799e3dc424;hp=d2e80ec88f6123283255ba4f0c80a561474386b7;hpb=45066d1f20a582fc8229776503b1cdd554d7fde4;p=zfs.git diff --git a/include/linux/blkdev_compat.h b/include/linux/blkdev_compat.h index d2e80ec..bd1b2bf 100644 --- a/include/linux/blkdev_compat.h +++ b/include/linux/blkdev_compat.h @@ -132,6 +132,23 @@ blk_end_request_x(struct request *req, int error, unsigned int nr_bytes) # endif /* HAVE_BLK_END_REQUEST_GPL_ONLY */ #endif /* HAVE_BLK_END_REQUEST */ +/* + * 2.6.36 API change, + * The blk_queue_flush() interface has replaced blk_queue_ordered() + * interface. However, while the old interface was available to all the + * new one is GPL-only. Thus if the GPL-only version is detected we + * implement our own trivial helper compatibility funcion. The hope is + * that long term this function will be opened up. + */ +#if defined(HAVE_BLK_QUEUE_FLUSH) && defined(HAVE_BLK_QUEUE_FLUSH_GPL_ONLY) +#define blk_queue_flush __blk_queue_flush +static inline void +__blk_queue_flush(struct request_queue *q, unsigned int flags) +{ + q->flush_flags = flags & (REQ_FLUSH | REQ_FUA); +} +#endif /* HAVE_BLK_QUEUE_FLUSH && HAVE_BLK_QUEUE_FLUSH_GPL_ONLY */ + #ifndef HAVE_BLK_RQ_POS static inline sector_t blk_rq_pos(struct request *req) @@ -172,6 +189,64 @@ __blk_rq_bytes(struct request *req) #define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS) #endif +/* + * 2.6.27 API change, + * The blk_queue_stackable() queue flag was added in 2.6.27 to handle dm + * stacking drivers. Prior to this request stacking drivers were detected + * by checking (q->request_fn == NULL), for earlier kernels we revert to + * this legacy behavior. + */ +#ifndef blk_queue_stackable +#define blk_queue_stackable(q) ((q)->request_fn == NULL) +#endif + +/* + * 2.6.34 API change, + * The blk_queue_max_hw_sectors() function replaces blk_queue_max_sectors(). + */ +#ifndef HAVE_BLK_QUEUE_MAX_HW_SECTORS +#define blk_queue_max_hw_sectors __blk_queue_max_hw_sectors +static inline void +__blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors) +{ + blk_queue_max_sectors(q, max_hw_sectors); +} +#endif + +/* + * 2.6.34 API change, + * The blk_queue_max_segments() function consolidates + * blk_queue_max_hw_segments() and blk_queue_max_phys_segments(). + */ +#ifndef HAVE_BLK_QUEUE_MAX_SEGMENTS +#define blk_queue_max_segments __blk_queue_max_segments +static inline void +__blk_queue_max_segments(struct request_queue *q, unsigned short max_segments) +{ + blk_queue_max_phys_segments(q, max_segments); + blk_queue_max_hw_segments(q, max_segments); +} +#endif + +/* + * 2.6.30 API change, + * The blk_queue_physical_block_size() function was introduced to + * indicate the smallest I/O the device can write without incurring + * a read-modify-write penalty. For older kernels this is a no-op. + */ +#ifndef HAVE_BLK_QUEUE_PHYSICAL_BLOCK_SIZE +#define blk_queue_physical_block_size(q, x) ((void)(0)) +#endif + +/* + * 2.6.30 API change, + * The blk_queue_io_opt() function was added to indicate the optimal + * I/O size for the device. For older kernels this is a no-op. + */ +#ifndef HAVE_BLK_QUEUE_IO_OPT +#define blk_queue_io_opt(q, x) ((void)(0)) +#endif + #ifndef HAVE_GET_DISK_RO static inline int get_disk_ro(struct gendisk *disk) @@ -328,6 +403,36 @@ bio_set_flags_failfast(struct block_device *bdev, int *flags) #endif /* + * 2.6.37 API change + * The WRITE_FLUSH, WRITE_FUA, and WRITE_FLUSH_FUA flags have been + * introduced as a replacement for WRITE_BARRIER. This was done to + * allow richer semantics to be expressed to the block layer. It is + * the block layers responsibility to choose the correct way to + * implement these semantics. + * + * The existence of these flags implies that REQ_FLUSH an REQ_FUA are + * defined. Thus we can safely define VDEV_REQ_FLUSH and VDEV_REQ_FUA + * compatibility macros. + */ +#ifdef WRITE_FLUSH_FUA +# define VDEV_WRITE_FLUSH_FUA WRITE_FLUSH_FUA +# define VDEV_REQ_FLUSH REQ_FLUSH +# define VDEV_REQ_FUA REQ_FUA +#else +# define VDEV_WRITE_FLUSH_FUA WRITE_BARRIER +# define VDEV_REQ_FLUSH REQ_HARDBARRIER +# define VDEV_REQ_FUA REQ_HARDBARRIER +#endif + +/* + * 2.6.32 API change + * Use the normal I/O patch for discards. + */ +#ifdef REQ_DISCARD +# define VDEV_REQ_DISCARD REQ_DISCARD +#endif + +/* * Default Linux IO Scheduler, * Setting the scheduler to noop will allow the Linux IO scheduler to * still perform front and back merging, while leaving the request