Commit 313e4299 authored by Tejun Heo's avatar Tejun Heo Committed by Jens Axboe

block: reorganize QUEUE_ORDERED_* constants

Separate out ordering type (drain,) and action masks (preflush,
postflush, fua) from visible ordering mode selectors
(QUEUE_ORDERED_*).  Ordering types are now named QUEUE_ORDERED_BY_*
while action masks are named QUEUE_ORDERED_DO_*.

This change is necessary to add QUEUE_ORDERED_DO_BAR and make it
optional to improve empty barrier implementation.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Signed-off-by: default avatarJens Axboe <jens.axboe@oracle.com>
parent ba744d5e
...@@ -24,8 +24,8 @@ ...@@ -24,8 +24,8 @@
int blk_queue_ordered(struct request_queue *q, unsigned ordered, int blk_queue_ordered(struct request_queue *q, unsigned ordered,
prepare_flush_fn *prepare_flush_fn) prepare_flush_fn *prepare_flush_fn)
{ {
if (ordered & (QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH) && if (!prepare_flush_fn && (ordered & (QUEUE_ORDERED_DO_PREFLUSH |
prepare_flush_fn == NULL) { QUEUE_ORDERED_DO_POSTFLUSH))) {
printk(KERN_ERR "%s: prepare_flush_fn required\n", __func__); printk(KERN_ERR "%s: prepare_flush_fn required\n", __func__);
return -EINVAL; return -EINVAL;
} }
...@@ -134,7 +134,7 @@ static void queue_flush(struct request_queue *q, unsigned which) ...@@ -134,7 +134,7 @@ static void queue_flush(struct request_queue *q, unsigned which)
struct request *rq; struct request *rq;
rq_end_io_fn *end_io; rq_end_io_fn *end_io;
if (which == QUEUE_ORDERED_PREFLUSH) { if (which == QUEUE_ORDERED_DO_PREFLUSH) {
rq = &q->pre_flush_rq; rq = &q->pre_flush_rq;
end_io = pre_flush_end_io; end_io = pre_flush_end_io;
} else { } else {
...@@ -167,7 +167,7 @@ static inline struct request *start_ordered(struct request_queue *q, ...@@ -167,7 +167,7 @@ static inline struct request *start_ordered(struct request_queue *q,
blk_rq_init(q, rq); blk_rq_init(q, rq);
if (bio_data_dir(q->orig_bar_rq->bio) == WRITE) if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
rq->cmd_flags |= REQ_RW; rq->cmd_flags |= REQ_RW;
if (q->ordered & QUEUE_ORDERED_FUA) if (q->ordered & QUEUE_ORDERED_DO_FUA)
rq->cmd_flags |= REQ_FUA; rq->cmd_flags |= REQ_FUA;
init_request_from_bio(rq, q->orig_bar_rq->bio); init_request_from_bio(rq, q->orig_bar_rq->bio);
rq->end_io = bar_end_io; rq->end_io = bar_end_io;
...@@ -181,20 +181,20 @@ static inline struct request *start_ordered(struct request_queue *q, ...@@ -181,20 +181,20 @@ static inline struct request *start_ordered(struct request_queue *q,
* there will be no data written between the pre and post flush. * there will be no data written between the pre and post flush.
* Hence a single flush will suffice. * Hence a single flush will suffice.
*/ */
if ((q->ordered & QUEUE_ORDERED_POSTFLUSH) && !blk_empty_barrier(rq)) if ((q->ordered & QUEUE_ORDERED_DO_POSTFLUSH) && !blk_empty_barrier(rq))
queue_flush(q, QUEUE_ORDERED_POSTFLUSH); queue_flush(q, QUEUE_ORDERED_DO_POSTFLUSH);
else else
q->ordseq |= QUEUE_ORDSEQ_POSTFLUSH; q->ordseq |= QUEUE_ORDSEQ_POSTFLUSH;
elv_insert(q, rq, ELEVATOR_INSERT_FRONT); elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
if (q->ordered & QUEUE_ORDERED_PREFLUSH) { if (q->ordered & QUEUE_ORDERED_DO_PREFLUSH) {
queue_flush(q, QUEUE_ORDERED_PREFLUSH); queue_flush(q, QUEUE_ORDERED_DO_PREFLUSH);
rq = &q->pre_flush_rq; rq = &q->pre_flush_rq;
} else } else
q->ordseq |= QUEUE_ORDSEQ_PREFLUSH; q->ordseq |= QUEUE_ORDSEQ_PREFLUSH;
if ((q->ordered & QUEUE_ORDERED_TAG) || q->in_flight == 0) if ((q->ordered & QUEUE_ORDERED_BY_TAG) || q->in_flight == 0)
q->ordseq |= QUEUE_ORDSEQ_DRAIN; q->ordseq |= QUEUE_ORDSEQ_DRAIN;
else else
rq = NULL; rq = NULL;
...@@ -237,7 +237,7 @@ int blk_do_ordered(struct request_queue *q, struct request **rqp) ...@@ -237,7 +237,7 @@ int blk_do_ordered(struct request_queue *q, struct request **rqp)
rq != &q->pre_flush_rq && rq != &q->post_flush_rq) rq != &q->pre_flush_rq && rq != &q->post_flush_rq)
return 1; return 1;
if (q->ordered & QUEUE_ORDERED_TAG) { if (q->ordered & QUEUE_ORDERED_BY_TAG) {
/* Ordered by tag. Blocking the next barrier is enough. */ /* Ordered by tag. Blocking the next barrier is enough. */
if (is_barrier && rq != &q->bar_rq) if (is_barrier && rq != &q->bar_rq)
*rqp = NULL; *rqp = NULL;
......
...@@ -523,22 +523,29 @@ enum { ...@@ -523,22 +523,29 @@ enum {
* TAG_FLUSH : ordering by tag w/ pre and post flushes * TAG_FLUSH : ordering by tag w/ pre and post flushes
* TAG_FUA : ordering by tag w/ pre flush and FUA write * TAG_FUA : ordering by tag w/ pre flush and FUA write
*/ */
QUEUE_ORDERED_NONE = 0x00, QUEUE_ORDERED_BY_DRAIN = 0x01,
QUEUE_ORDERED_DRAIN = 0x01, QUEUE_ORDERED_BY_TAG = 0x02,
QUEUE_ORDERED_TAG = 0x02, QUEUE_ORDERED_DO_PREFLUSH = 0x10,
QUEUE_ORDERED_DO_POSTFLUSH = 0x40,
QUEUE_ORDERED_PREFLUSH = 0x10, QUEUE_ORDERED_DO_FUA = 0x80,
QUEUE_ORDERED_POSTFLUSH = 0x20,
QUEUE_ORDERED_FUA = 0x40, QUEUE_ORDERED_NONE = 0x00,
QUEUE_ORDERED_DRAIN_FLUSH = QUEUE_ORDERED_DRAIN | QUEUE_ORDERED_DRAIN = QUEUE_ORDERED_BY_DRAIN,
QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH, QUEUE_ORDERED_DRAIN_FLUSH = QUEUE_ORDERED_DRAIN |
QUEUE_ORDERED_DRAIN_FUA = QUEUE_ORDERED_DRAIN | QUEUE_ORDERED_DO_PREFLUSH |
QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA, QUEUE_ORDERED_DO_POSTFLUSH,
QUEUE_ORDERED_TAG_FLUSH = QUEUE_ORDERED_TAG | QUEUE_ORDERED_DRAIN_FUA = QUEUE_ORDERED_DRAIN |
QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH, QUEUE_ORDERED_DO_PREFLUSH |
QUEUE_ORDERED_TAG_FUA = QUEUE_ORDERED_TAG | QUEUE_ORDERED_DO_FUA,
QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA,
QUEUE_ORDERED_TAG = QUEUE_ORDERED_BY_TAG,
QUEUE_ORDERED_TAG_FLUSH = QUEUE_ORDERED_TAG |
QUEUE_ORDERED_DO_PREFLUSH |
QUEUE_ORDERED_DO_POSTFLUSH,
QUEUE_ORDERED_TAG_FUA = QUEUE_ORDERED_TAG |
QUEUE_ORDERED_DO_PREFLUSH |
QUEUE_ORDERED_DO_FUA,
/* /*
* Ordered operation sequence * Ordered operation sequence
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment