Commit 8fe0d473 authored by Mike Christie's avatar Mike Christie Committed by Jens Axboe

block: convert merge/insert code to check for REQ_OPs.

This patch converts the block layer merging code to use separate variables
for the operation and flags, and to check req_op for the REQ_OP.
Signed-off-by: default avatarMike Christie <mchristi@redhat.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarHannes Reinecke <hare@suse.com>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent 63a4cc24
...@@ -2161,7 +2161,7 @@ EXPORT_SYMBOL(submit_bio); ...@@ -2161,7 +2161,7 @@ EXPORT_SYMBOL(submit_bio);
static int blk_cloned_rq_check_limits(struct request_queue *q, static int blk_cloned_rq_check_limits(struct request_queue *q,
struct request *rq) struct request *rq)
{ {
if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, rq->cmd_flags)) { if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, req_op(rq))) {
printk(KERN_ERR "%s: over max size limit.\n", __func__); printk(KERN_ERR "%s: over max size limit.\n", __func__);
return -EIO; return -EIO;
} }
......
...@@ -649,7 +649,8 @@ static int attempt_merge(struct request_queue *q, struct request *req, ...@@ -649,7 +649,8 @@ static int attempt_merge(struct request_queue *q, struct request *req,
if (!rq_mergeable(req) || !rq_mergeable(next)) if (!rq_mergeable(req) || !rq_mergeable(next))
return 0; return 0;
if (!blk_check_merge_flags(req->cmd_flags, next->cmd_flags)) if (!blk_check_merge_flags(req->cmd_flags, req_op(req), next->cmd_flags,
req_op(next)))
return 0; return 0;
/* /*
...@@ -663,7 +664,7 @@ static int attempt_merge(struct request_queue *q, struct request *req, ...@@ -663,7 +664,7 @@ static int attempt_merge(struct request_queue *q, struct request *req,
|| req_no_special_merge(next)) || req_no_special_merge(next))
return 0; return 0;
if (req->cmd_flags & REQ_WRITE_SAME && if (req_op(req) == REQ_OP_WRITE_SAME &&
!blk_write_same_mergeable(req->bio, next->bio)) !blk_write_same_mergeable(req->bio, next->bio))
return 0; return 0;
...@@ -751,7 +752,8 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio) ...@@ -751,7 +752,8 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
if (!rq_mergeable(rq) || !bio_mergeable(bio)) if (!rq_mergeable(rq) || !bio_mergeable(bio))
return false; return false;
if (!blk_check_merge_flags(rq->cmd_flags, bio->bi_rw)) if (!blk_check_merge_flags(rq->cmd_flags, req_op(rq), bio->bi_rw,
bio_op(bio)))
return false; return false;
/* different data direction or already started, don't merge */ /* different data direction or already started, don't merge */
...@@ -767,7 +769,7 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio) ...@@ -767,7 +769,7 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
return false; return false;
/* must be using the same buffer */ /* must be using the same buffer */
if (rq->cmd_flags & REQ_WRITE_SAME && if (req_op(rq) == REQ_OP_WRITE_SAME &&
!blk_write_same_mergeable(rq->bio, bio)) !blk_write_same_mergeable(rq->bio, bio))
return false; return false;
......
...@@ -666,16 +666,16 @@ static inline bool rq_mergeable(struct request *rq) ...@@ -666,16 +666,16 @@ static inline bool rq_mergeable(struct request *rq)
return true; return true;
} }
static inline bool blk_check_merge_flags(unsigned int flags1, static inline bool blk_check_merge_flags(unsigned int flags1, unsigned int op1,
unsigned int flags2) unsigned int flags2, unsigned int op2)
{ {
if ((flags1 & REQ_DISCARD) != (flags2 & REQ_DISCARD)) if ((op1 == REQ_OP_DISCARD) != (op2 == REQ_OP_DISCARD))
return false; return false;
if ((flags1 & REQ_SECURE) != (flags2 & REQ_SECURE)) if ((flags1 & REQ_SECURE) != (flags2 & REQ_SECURE))
return false; return false;
if ((flags1 & REQ_WRITE_SAME) != (flags2 & REQ_WRITE_SAME)) if ((op1 == REQ_OP_WRITE_SAME) != (op2 == REQ_OP_WRITE_SAME))
return false; return false;
return true; return true;
...@@ -887,12 +887,12 @@ static inline unsigned int blk_rq_cur_sectors(const struct request *rq) ...@@ -887,12 +887,12 @@ static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
} }
static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
unsigned int cmd_flags) int op)
{ {
if (unlikely(cmd_flags & REQ_DISCARD)) if (unlikely(op == REQ_OP_DISCARD))
return min(q->limits.max_discard_sectors, UINT_MAX >> 9); return min(q->limits.max_discard_sectors, UINT_MAX >> 9);
if (unlikely(cmd_flags & REQ_WRITE_SAME)) if (unlikely(op == REQ_OP_WRITE_SAME))
return q->limits.max_write_same_sectors; return q->limits.max_write_same_sectors;
return q->limits.max_sectors; return q->limits.max_sectors;
...@@ -919,11 +919,11 @@ static inline unsigned int blk_rq_get_max_sectors(struct request *rq) ...@@ -919,11 +919,11 @@ static inline unsigned int blk_rq_get_max_sectors(struct request *rq)
if (unlikely(rq->cmd_type != REQ_TYPE_FS)) if (unlikely(rq->cmd_type != REQ_TYPE_FS))
return q->limits.max_hw_sectors; return q->limits.max_hw_sectors;
if (!q->limits.chunk_sectors || (rq->cmd_flags & REQ_DISCARD)) if (!q->limits.chunk_sectors || (req_op(rq) == REQ_OP_DISCARD))
return blk_queue_get_max_sectors(q, rq->cmd_flags); return blk_queue_get_max_sectors(q, req_op(rq));
return min(blk_max_size_offset(q, blk_rq_pos(rq)), return min(blk_max_size_offset(q, blk_rq_pos(rq)),
blk_queue_get_max_sectors(q, rq->cmd_flags)); blk_queue_get_max_sectors(q, req_op(rq)));
} }
static inline unsigned int blk_rq_count_bios(struct request *rq) static inline unsigned int blk_rq_count_bios(struct request *rq)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment