Commit e2a60da7 authored by Martin K. Petersen's avatar Martin K. Petersen Committed by Jens Axboe

block: Clean up special command handling logic

Remove special-casing of non-rw fs style requests (discard). The nomerge
flags are consolidated in blk_types.h, and rq_mergeable() and
bio_mergeable() have been modified to use them.

bio_is_rw() is used in place of bio_has_data() a few places. This is
done to to distinguish true reads and writes from other fs type requests
that carry a payload (e.g. write same).
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
Acked-by: default avatarMike Snitzer <snitzer@redhat.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent d41570b7
...@@ -1657,7 +1657,7 @@ generic_make_request_checks(struct bio *bio) ...@@ -1657,7 +1657,7 @@ generic_make_request_checks(struct bio *bio)
goto end_io; goto end_io;
} }
if (unlikely(!(bio->bi_rw & REQ_DISCARD) && if (likely(bio_is_rw(bio) &&
nr_sectors > queue_max_hw_sectors(q))) { nr_sectors > queue_max_hw_sectors(q))) {
printk(KERN_ERR "bio too big device %s (%u > %u)\n", printk(KERN_ERR "bio too big device %s (%u > %u)\n",
bdevname(bio->bi_bdev, b), bdevname(bio->bi_bdev, b),
...@@ -1699,8 +1699,7 @@ generic_make_request_checks(struct bio *bio) ...@@ -1699,8 +1699,7 @@ generic_make_request_checks(struct bio *bio)
if ((bio->bi_rw & REQ_DISCARD) && if ((bio->bi_rw & REQ_DISCARD) &&
(!blk_queue_discard(q) || (!blk_queue_discard(q) ||
((bio->bi_rw & REQ_SECURE) && ((bio->bi_rw & REQ_SECURE) && !blk_queue_secdiscard(q)))) {
!blk_queue_secdiscard(q)))) {
err = -EOPNOTSUPP; err = -EOPNOTSUPP;
goto end_io; goto end_io;
} }
...@@ -1818,7 +1817,7 @@ void submit_bio(int rw, struct bio *bio) ...@@ -1818,7 +1817,7 @@ void submit_bio(int rw, struct bio *bio)
* If it's a regular read/write or a barrier with data attached, * If it's a regular read/write or a barrier with data attached,
* go through the normal accounting stuff before submission. * go through the normal accounting stuff before submission.
*/ */
if (bio_has_data(bio) && !(rw & REQ_DISCARD)) { if (bio_has_data(bio)) {
if (rw & WRITE) { if (rw & WRITE) {
count_vm_events(PGPGOUT, count); count_vm_events(PGPGOUT, count);
} else { } else {
...@@ -1864,7 +1863,7 @@ EXPORT_SYMBOL(submit_bio); ...@@ -1864,7 +1863,7 @@ EXPORT_SYMBOL(submit_bio);
*/ */
int blk_rq_check_limits(struct request_queue *q, struct request *rq) int blk_rq_check_limits(struct request_queue *q, struct request *rq)
{ {
if (rq->cmd_flags & REQ_DISCARD) if (!rq_mergeable(rq))
return 0; return 0;
if (blk_rq_sectors(rq) > queue_max_sectors(q) || if (blk_rq_sectors(rq) > queue_max_sectors(q) ||
...@@ -2338,7 +2337,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes) ...@@ -2338,7 +2337,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
req->buffer = bio_data(req->bio); req->buffer = bio_data(req->bio);
/* update sector only for requests with clear definition of sector */ /* update sector only for requests with clear definition of sector */
if (req->cmd_type == REQ_TYPE_FS || (req->cmd_flags & REQ_DISCARD)) if (req->cmd_type == REQ_TYPE_FS)
req->__sector += total_bytes >> 9; req->__sector += total_bytes >> 9;
/* mixed attributes always follow the first bio */ /* mixed attributes always follow the first bio */
......
...@@ -417,18 +417,6 @@ static int attempt_merge(struct request_queue *q, struct request *req, ...@@ -417,18 +417,6 @@ static int attempt_merge(struct request_queue *q, struct request *req,
if (!rq_mergeable(req) || !rq_mergeable(next)) if (!rq_mergeable(req) || !rq_mergeable(next))
return 0; return 0;
/*
* Don't merge file system requests and discard requests
*/
if ((req->cmd_flags & REQ_DISCARD) != (next->cmd_flags & REQ_DISCARD))
return 0;
/*
* Don't merge discard requests and secure discard requests
*/
if ((req->cmd_flags & REQ_SECURE) != (next->cmd_flags & REQ_SECURE))
return 0;
/* /*
* not contiguous * not contiguous
*/ */
...@@ -521,15 +509,7 @@ int blk_attempt_req_merge(struct request_queue *q, struct request *rq, ...@@ -521,15 +509,7 @@ int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
bool blk_rq_merge_ok(struct request *rq, struct bio *bio) bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
{ {
if (!rq_mergeable(rq)) if (!rq_mergeable(rq) || !bio_mergeable(bio))
return false;
/* don't merge file system requests and discard requests */
if ((bio->bi_rw & REQ_DISCARD) != (rq->bio->bi_rw & REQ_DISCARD))
return false;
/* don't merge discard requests and secure discard requests */
if ((bio->bi_rw & REQ_SECURE) != (rq->bio->bi_rw & REQ_SECURE))
return false; return false;
/* different data direction or already started, don't merge */ /* different data direction or already started, don't merge */
......
...@@ -171,14 +171,13 @@ static inline int queue_congestion_off_threshold(struct request_queue *q) ...@@ -171,14 +171,13 @@ static inline int queue_congestion_off_threshold(struct request_queue *q)
* *
* a) it's attached to a gendisk, and * a) it's attached to a gendisk, and
* b) the queue had IO stats enabled when this request was started, and * b) the queue had IO stats enabled when this request was started, and
* c) it's a file system request or a discard request * c) it's a file system request
*/ */
static inline int blk_do_io_stat(struct request *rq) static inline int blk_do_io_stat(struct request *rq)
{ {
return rq->rq_disk && return rq->rq_disk &&
(rq->cmd_flags & REQ_IO_STAT) && (rq->cmd_flags & REQ_IO_STAT) &&
(rq->cmd_type == REQ_TYPE_FS || (rq->cmd_type == REQ_TYPE_FS);
(rq->cmd_flags & REQ_DISCARD));
} }
/* /*
......
...@@ -562,8 +562,7 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where) ...@@ -562,8 +562,7 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where)
if (rq->cmd_flags & REQ_SOFTBARRIER) { if (rq->cmd_flags & REQ_SOFTBARRIER) {
/* barriers are scheduling boundary, update end_sector */ /* barriers are scheduling boundary, update end_sector */
if (rq->cmd_type == REQ_TYPE_FS || if (rq->cmd_type == REQ_TYPE_FS) {
(rq->cmd_flags & REQ_DISCARD)) {
q->end_sector = rq_end_sector(rq); q->end_sector = rq_end_sector(rq);
q->boundary_rq = rq; q->boundary_rq = rq;
} }
...@@ -605,8 +604,7 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where) ...@@ -605,8 +604,7 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where)
if (elv_attempt_insert_merge(q, rq)) if (elv_attempt_insert_merge(q, rq))
break; break;
case ELEVATOR_INSERT_SORT: case ELEVATOR_INSERT_SORT:
BUG_ON(rq->cmd_type != REQ_TYPE_FS && BUG_ON(rq->cmd_type != REQ_TYPE_FS);
!(rq->cmd_flags & REQ_DISCARD));
rq->cmd_flags |= REQ_SORTED; rq->cmd_flags |= REQ_SORTED;
q->nr_sorted++; q->nr_sorted++;
if (rq_mergeable(rq)) { if (rq_mergeable(rq)) {
......
...@@ -386,9 +386,28 @@ static inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx, ...@@ -386,9 +386,28 @@ static inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx,
/* /*
* Check whether this bio carries any data or not. A NULL bio is allowed. * Check whether this bio carries any data or not. A NULL bio is allowed.
*/ */
static inline int bio_has_data(struct bio *bio) static inline bool bio_has_data(struct bio *bio)
{ {
return bio && bio->bi_io_vec != NULL; if (bio && bio->bi_vcnt)
return true;
return false;
}
static inline bool bio_is_rw(struct bio *bio)
{
if (!bio_has_data(bio))
return false;
return true;
}
static inline bool bio_mergeable(struct bio *bio)
{
if (bio->bi_rw & REQ_NOMERGE_FLAGS)
return false;
return true;
} }
/* /*
......
...@@ -194,6 +194,10 @@ enum rq_flag_bits { ...@@ -194,6 +194,10 @@ enum rq_flag_bits {
REQ_DISCARD | REQ_NOIDLE | REQ_FLUSH | REQ_FUA | REQ_SECURE) REQ_DISCARD | REQ_NOIDLE | REQ_FLUSH | REQ_FUA | REQ_SECURE)
#define REQ_CLONE_MASK REQ_COMMON_MASK #define REQ_CLONE_MASK REQ_COMMON_MASK
/* This mask is used for both bio and request merge checking */
#define REQ_NOMERGE_FLAGS \
(REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA)
#define REQ_RAHEAD (1 << __REQ_RAHEAD) #define REQ_RAHEAD (1 << __REQ_RAHEAD)
#define REQ_THROTTLED (1 << __REQ_THROTTLED) #define REQ_THROTTLED (1 << __REQ_THROTTLED)
......
...@@ -540,8 +540,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) ...@@ -540,8 +540,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
#define blk_account_rq(rq) \ #define blk_account_rq(rq) \
(((rq)->cmd_flags & REQ_STARTED) && \ (((rq)->cmd_flags & REQ_STARTED) && \
((rq)->cmd_type == REQ_TYPE_FS || \ ((rq)->cmd_type == REQ_TYPE_FS))
((rq)->cmd_flags & REQ_DISCARD)))
#define blk_pm_request(rq) \ #define blk_pm_request(rq) \
((rq)->cmd_type == REQ_TYPE_PM_SUSPEND || \ ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND || \
...@@ -595,17 +594,16 @@ static inline void blk_clear_rl_full(struct request_list *rl, bool sync) ...@@ -595,17 +594,16 @@ static inline void blk_clear_rl_full(struct request_list *rl, bool sync)
rl->flags &= ~flag; rl->flags &= ~flag;
} }
static inline bool rq_mergeable(struct request *rq)
{
if (rq->cmd_type != REQ_TYPE_FS)
return false;
/* if (rq->cmd_flags & REQ_NOMERGE_FLAGS)
* mergeable request must not have _NOMERGE or _BARRIER bit set, nor may return false;
* it already be started by driver.
*/ return true;
#define RQ_NOMERGE_FLAGS \ }
(REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA | REQ_DISCARD)
#define rq_mergeable(rq) \
(!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \
(((rq)->cmd_flags & REQ_DISCARD) || \
(rq)->cmd_type == REQ_TYPE_FS))
/* /*
* q->prep_rq_fn return values * q->prep_rq_fn return values
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment