Commit 58eea927 authored by Tejun Heo's avatar Tejun Heo Committed by Jens Axboe

block: simplify empty barrier implementation

Empty barrier required special handling in __elv_next_request() to
complete it without letting the low level driver see it.

With previous changes, barrier code is now flexible enough to skip the
BAR step using the same barrier sequence selection mechanism.  Drop
the special handling and mask off q->ordered from start_ordered().

Remove blk_empty_barrier() test which now has no user.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Signed-off-by: default avatarJens Axboe <jens.axboe@oracle.com>
parent 8f11b3e9
...@@ -162,6 +162,14 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp) ...@@ -162,6 +162,14 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp)
q->ordered = q->next_ordered; q->ordered = q->next_ordered;
q->ordseq |= QUEUE_ORDSEQ_STARTED; q->ordseq |= QUEUE_ORDSEQ_STARTED;
/*
* For an empty barrier, there's no actual BAR request, which
* in turn makes POSTFLUSH unnecessary. Mask them off.
*/
if (!rq->hard_nr_sectors)
q->ordered &= ~(QUEUE_ORDERED_DO_BAR |
QUEUE_ORDERED_DO_POSTFLUSH);
/* stash away the original request */ /* stash away the original request */
elv_dequeue_request(q, rq); elv_dequeue_request(q, rq);
q->orig_bar_rq = rq; q->orig_bar_rq = rq;
...@@ -171,13 +179,9 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp) ...@@ -171,13 +179,9 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp)
* Queue ordered sequence. As we stack them at the head, we * Queue ordered sequence. As we stack them at the head, we
* need to queue in reverse order. Note that we rely on that * need to queue in reverse order. Note that we rely on that
* no fs request uses ELEVATOR_INSERT_FRONT and thus no fs * no fs request uses ELEVATOR_INSERT_FRONT and thus no fs
* request gets inbetween ordered sequence. If this request is * request gets inbetween ordered sequence.
* an empty barrier, we don't need to do a postflush ever since
* there will be no data written between the pre and post flush.
* Hence a single flush will suffice.
*/ */
if ((q->ordered & QUEUE_ORDERED_DO_POSTFLUSH) && if (q->ordered & QUEUE_ORDERED_DO_POSTFLUSH) {
!blk_empty_barrier(q->orig_bar_rq)) {
queue_flush(q, QUEUE_ORDERED_DO_POSTFLUSH); queue_flush(q, QUEUE_ORDERED_DO_POSTFLUSH);
rq = &q->post_flush_rq; rq = &q->post_flush_rq;
} else } else
......
...@@ -755,14 +755,6 @@ struct request *elv_next_request(struct request_queue *q) ...@@ -755,14 +755,6 @@ struct request *elv_next_request(struct request_queue *q)
int ret; int ret;
while ((rq = __elv_next_request(q)) != NULL) { while ((rq = __elv_next_request(q)) != NULL) {
/*
* Kill the empty barrier place holder, the driver must
* not ever see it.
*/
if (blk_empty_barrier(rq)) {
__blk_end_request(rq, 0, blk_rq_bytes(rq));
continue;
}
if (!(rq->cmd_flags & REQ_STARTED)) { if (!(rq->cmd_flags & REQ_STARTED)) {
/* /*
* This is the first time the device driver * This is the first time the device driver
......
...@@ -596,7 +596,6 @@ enum { ...@@ -596,7 +596,6 @@ enum {
#define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA) #define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA)
#define blk_discard_rq(rq) ((rq)->cmd_flags & REQ_DISCARD) #define blk_discard_rq(rq) ((rq)->cmd_flags & REQ_DISCARD)
#define blk_bidi_rq(rq) ((rq)->next_rq != NULL) #define blk_bidi_rq(rq) ((rq)->next_rq != NULL)
#define blk_empty_barrier(rq) (blk_barrier_rq(rq) && blk_fs_request(rq) && !(rq)->hard_nr_sectors)
/* rq->queuelist of dequeued request must be list_empty() */ /* rq->queuelist of dequeued request must be list_empty() */
#define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist)) #define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment