Commit e17fc0a1 authored by David Woodhouse's avatar David Woodhouse Committed by Jens Axboe

Allow elevators to sort/merge discard requests

But blkdev_issue_discard() still emits requests which are interpreted as
soft barriers, because naïve callers might otherwise issue subsequent
writes to those same sectors, which might cross on the queue (if they're
reallocated quickly enough).

Callers still _can_ issue non-barrier discard requests, but they have to
take care of queue ordering for themselves.
Signed-off-by: default avatarDavid Woodhouse <David.Woodhouse@intel.com>
Signed-off-by: default avatarJens Axboe <jens.axboe@oracle.com>
parent d30a2605
...@@ -372,7 +372,7 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, ...@@ -372,7 +372,7 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
nr_sects = 0; nr_sects = 0;
} }
bio_get(bio); bio_get(bio);
submit_bio(WRITE_DISCARD, bio); submit_bio(DISCARD_BARRIER, bio);
/* Check if it failed immediately */ /* Check if it failed immediately */
if (bio_flagged(bio, BIO_EOPNOTSUPP)) if (bio_flagged(bio, BIO_EOPNOTSUPP))
......
...@@ -1077,12 +1077,13 @@ void init_request_from_bio(struct request *req, struct bio *bio) ...@@ -1077,12 +1077,13 @@ void init_request_from_bio(struct request *req, struct bio *bio)
/* /*
* REQ_BARRIER implies no merging, but lets make it explicit * REQ_BARRIER implies no merging, but lets make it explicit
*/ */
if (unlikely(bio_barrier(bio)))
req->cmd_flags |= (REQ_HARDBARRIER | REQ_NOMERGE);
if (unlikely(bio_discard(bio))) { if (unlikely(bio_discard(bio))) {
req->cmd_flags |= (REQ_SOFTBARRIER | REQ_DISCARD); req->cmd_flags |= REQ_DISCARD;
if (bio_barrier(bio))
req->cmd_flags |= REQ_SOFTBARRIER;
req->q->prepare_discard_fn(req->q, req); req->q->prepare_discard_fn(req->q, req);
} } else if (unlikely(bio_barrier(bio)))
req->cmd_flags |= (REQ_HARDBARRIER | REQ_NOMERGE);
if (bio_sync(bio)) if (bio_sync(bio))
req->cmd_flags |= REQ_RW_SYNC; req->cmd_flags |= REQ_RW_SYNC;
...@@ -1114,7 +1115,8 @@ static int __make_request(struct request_queue *q, struct bio *bio) ...@@ -1114,7 +1115,8 @@ static int __make_request(struct request_queue *q, struct bio *bio)
blk_queue_bounce(q, &bio); blk_queue_bounce(q, &bio);
barrier = bio_barrier(bio); barrier = bio_barrier(bio);
if (unlikely(barrier) && (q->next_ordered == QUEUE_ORDERED_NONE)) { if (unlikely(barrier) && bio_has_data(bio) &&
(q->next_ordered == QUEUE_ORDERED_NONE)) {
err = -EOPNOTSUPP; err = -EOPNOTSUPP;
goto end_io; goto end_io;
} }
......
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
void blk_recalc_rq_sectors(struct request *rq, int nsect) void blk_recalc_rq_sectors(struct request *rq, int nsect)
{ {
if (blk_fs_request(rq)) { if (blk_fs_request(rq) || blk_discard_rq(rq)) {
rq->hard_sector += nsect; rq->hard_sector += nsect;
rq->hard_nr_sectors -= nsect; rq->hard_nr_sectors -= nsect;
...@@ -131,13 +131,17 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, ...@@ -131,13 +131,17 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
if (!test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags)) if (!test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags))
return 0; return 0;
if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))
return 0;
if (bio->bi_size + nxt->bi_size > q->max_segment_size) if (bio->bi_size + nxt->bi_size > q->max_segment_size)
return 0; return 0;
if (!bio_has_data(bio))
return 1;
if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))
return 0;
/* /*
* bio and nxt are contigous in memory, check if the queue allows * bio and nxt are contiguous in memory; check if the queue allows
* these two to be merged into one * these two to be merged into one
*/ */
if (BIO_SEG_BOUNDARY(q, bio, nxt)) if (BIO_SEG_BOUNDARY(q, bio, nxt))
...@@ -153,8 +157,9 @@ static int blk_hw_contig_segment(struct request_queue *q, struct bio *bio, ...@@ -153,8 +157,9 @@ static int blk_hw_contig_segment(struct request_queue *q, struct bio *bio,
blk_recount_segments(q, bio); blk_recount_segments(q, bio);
if (!bio_flagged(nxt, BIO_SEG_VALID)) if (!bio_flagged(nxt, BIO_SEG_VALID))
blk_recount_segments(q, nxt); blk_recount_segments(q, nxt);
if (!BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)) || if (bio_has_data(bio) &&
BIOVEC_VIRT_OVERSIZE(bio->bi_hw_back_size + nxt->bi_hw_front_size)) (!BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)) ||
BIOVEC_VIRT_OVERSIZE(bio->bi_hw_back_size + nxt->bi_hw_front_size)))
return 0; return 0;
if (bio->bi_hw_back_size + nxt->bi_hw_front_size > q->max_segment_size) if (bio->bi_hw_back_size + nxt->bi_hw_front_size > q->max_segment_size)
return 0; return 0;
...@@ -317,8 +322,9 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req, ...@@ -317,8 +322,9 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
if (!bio_flagged(bio, BIO_SEG_VALID)) if (!bio_flagged(bio, BIO_SEG_VALID))
blk_recount_segments(q, bio); blk_recount_segments(q, bio);
len = req->biotail->bi_hw_back_size + bio->bi_hw_front_size; len = req->biotail->bi_hw_back_size + bio->bi_hw_front_size;
if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(req->biotail), __BVEC_START(bio)) if (!bio_has_data(bio) ||
&& !BIOVEC_VIRT_OVERSIZE(len)) { (BIOVEC_VIRT_MERGEABLE(__BVEC_END(req->biotail), __BVEC_START(bio))
&& !BIOVEC_VIRT_OVERSIZE(len))) {
int mergeable = ll_new_mergeable(q, req, bio); int mergeable = ll_new_mergeable(q, req, bio);
if (mergeable) { if (mergeable) {
...@@ -356,8 +362,9 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req, ...@@ -356,8 +362,9 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req,
blk_recount_segments(q, bio); blk_recount_segments(q, bio);
if (!bio_flagged(req->bio, BIO_SEG_VALID)) if (!bio_flagged(req->bio, BIO_SEG_VALID))
blk_recount_segments(q, req->bio); blk_recount_segments(q, req->bio);
if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(req->bio)) && if (!bio_has_data(bio) ||
!BIOVEC_VIRT_OVERSIZE(len)) { (BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(req->bio)) &&
!BIOVEC_VIRT_OVERSIZE(len))) {
int mergeable = ll_new_mergeable(q, req, bio); int mergeable = ll_new_mergeable(q, req, bio);
if (mergeable) { if (mergeable) {
......
...@@ -74,6 +74,12 @@ int elv_rq_merge_ok(struct request *rq, struct bio *bio) ...@@ -74,6 +74,12 @@ int elv_rq_merge_ok(struct request *rq, struct bio *bio)
if (!rq_mergeable(rq)) if (!rq_mergeable(rq))
return 0; return 0;
/*
* Don't merge file system requests and discard requests
*/
if (bio_discard(bio) != bio_discard(rq->bio))
return 0;
/* /*
* different data direction or already started, don't merge * different data direction or already started, don't merge
*/ */
...@@ -438,6 +444,8 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq) ...@@ -438,6 +444,8 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
list_for_each_prev(entry, &q->queue_head) { list_for_each_prev(entry, &q->queue_head) {
struct request *pos = list_entry_rq(entry); struct request *pos = list_entry_rq(entry);
if (blk_discard_rq(rq) != blk_discard_rq(pos))
break;
if (rq_data_dir(rq) != rq_data_dir(pos)) if (rq_data_dir(rq) != rq_data_dir(pos))
break; break;
if (pos->cmd_flags & stop_flags) if (pos->cmd_flags & stop_flags)
...@@ -607,7 +615,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where) ...@@ -607,7 +615,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
break; break;
case ELEVATOR_INSERT_SORT: case ELEVATOR_INSERT_SORT:
BUG_ON(!blk_fs_request(rq)); BUG_ON(!blk_fs_request(rq) && !blk_discard_rq(rq));
rq->cmd_flags |= REQ_SORTED; rq->cmd_flags |= REQ_SORTED;
q->nr_sorted++; q->nr_sorted++;
if (rq_mergeable(rq)) { if (rq_mergeable(rq)) {
...@@ -692,7 +700,7 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where, ...@@ -692,7 +700,7 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where,
* this request is scheduling boundary, update * this request is scheduling boundary, update
* end_sector * end_sector
*/ */
if (blk_fs_request(rq)) { if (blk_fs_request(rq) || blk_discard_rq(rq)) {
q->end_sector = rq_end_sector(rq); q->end_sector = rq_end_sector(rq);
q->boundary_rq = rq; q->boundary_rq = rq;
} }
......
...@@ -161,7 +161,7 @@ static int blk_ioctl_discard(struct block_device *bdev, uint64_t start, ...@@ -161,7 +161,7 @@ static int blk_ioctl_discard(struct block_device *bdev, uint64_t start,
bio->bi_size = len << 9; bio->bi_size = len << 9;
len = 0; len = 0;
} }
submit_bio(WRITE_DISCARD, bio); submit_bio(DISCARD_NOBARRIER, bio);
wait_for_completion(&wait); wait_for_completion(&wait);
......
...@@ -188,8 +188,8 @@ struct bio { ...@@ -188,8 +188,8 @@ struct bio {
#define bio_failfast(bio) ((bio)->bi_rw & (1 << BIO_RW_FAILFAST)) #define bio_failfast(bio) ((bio)->bi_rw & (1 << BIO_RW_FAILFAST))
#define bio_rw_ahead(bio) ((bio)->bi_rw & (1 << BIO_RW_AHEAD)) #define bio_rw_ahead(bio) ((bio)->bi_rw & (1 << BIO_RW_AHEAD))
#define bio_rw_meta(bio) ((bio)->bi_rw & (1 << BIO_RW_META)) #define bio_rw_meta(bio) ((bio)->bi_rw & (1 << BIO_RW_META))
#define bio_empty_barrier(bio) (bio_barrier(bio) && !bio_has_data(bio))
#define bio_discard(bio) ((bio)->bi_rw & (1 << BIO_RW_DISCARD)) #define bio_discard(bio) ((bio)->bi_rw & (1 << BIO_RW_DISCARD))
#define bio_empty_barrier(bio) (bio_barrier(bio) && !bio_has_data(bio) && !bio_discard(bio))
static inline unsigned int bio_cur_sectors(struct bio *bio) static inline unsigned int bio_cur_sectors(struct bio *bio)
{ {
......
...@@ -541,7 +541,7 @@ enum { ...@@ -541,7 +541,7 @@ enum {
#define blk_noretry_request(rq) ((rq)->cmd_flags & REQ_FAILFAST) #define blk_noretry_request(rq) ((rq)->cmd_flags & REQ_FAILFAST)
#define blk_rq_started(rq) ((rq)->cmd_flags & REQ_STARTED) #define blk_rq_started(rq) ((rq)->cmd_flags & REQ_STARTED)
#define blk_account_rq(rq) (blk_rq_started(rq) && blk_fs_request(rq)) #define blk_account_rq(rq) (blk_rq_started(rq) && (blk_fs_request(rq) || blk_discard_rq(rq)))
#define blk_pm_suspend_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND) #define blk_pm_suspend_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND)
#define blk_pm_resume_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_RESUME) #define blk_pm_resume_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_RESUME)
...@@ -598,7 +598,8 @@ static inline void blk_clear_queue_full(struct request_queue *q, int rw) ...@@ -598,7 +598,8 @@ static inline void blk_clear_queue_full(struct request_queue *q, int rw)
#define RQ_NOMERGE_FLAGS \ #define RQ_NOMERGE_FLAGS \
(REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER) (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER)
#define rq_mergeable(rq) \ #define rq_mergeable(rq) \
(!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && blk_fs_request((rq))) (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \
(blk_discard_rq(rq) || blk_fs_request((rq))))
/* /*
* q->prep_rq_fn return values * q->prep_rq_fn return values
......
...@@ -87,7 +87,8 @@ extern int dir_notify_enable; ...@@ -87,7 +87,8 @@ extern int dir_notify_enable;
#define WRITE_SYNC (WRITE | (1 << BIO_RW_SYNC)) #define WRITE_SYNC (WRITE | (1 << BIO_RW_SYNC))
#define SWRITE_SYNC (SWRITE | (1 << BIO_RW_SYNC)) #define SWRITE_SYNC (SWRITE | (1 << BIO_RW_SYNC))
#define WRITE_BARRIER (WRITE | (1 << BIO_RW_BARRIER)) #define WRITE_BARRIER (WRITE | (1 << BIO_RW_BARRIER))
#define WRITE_DISCARD (WRITE | (1 << BIO_RW_DISCARD)) #define DISCARD_NOBARRIER (1 << BIO_RW_DISCARD)
#define DISCARD_BARRIER ((1 << BIO_RW_DISCARD) | (1 << BIO_RW_BARRIER))
#define SEL_IN 1 #define SEL_IN 1
#define SEL_OUT 2 #define SEL_OUT 2
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment