Commit f73f44eb authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

block: add a op_is_flush helper

This centralizes the checks for bios that needs to be go into the flush
state machine.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
Reviewed-by: default avatarBart Van Assche <bart.vanassche@sandisk.com>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent c13660a0
......@@ -1035,7 +1035,7 @@ static bool blk_rq_should_init_elevator(struct bio *bio)
* Flush requests do not use the elevator so skip initialization.
* This allows a request to share the flush and elevator data.
*/
if (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA))
if (op_is_flush(bio->bi_opf))
return false;
return true;
......@@ -1641,7 +1641,7 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
return BLK_QC_T_NONE;
}
if (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA)) {
if (op_is_flush(bio->bi_opf)) {
spin_lock_irq(q->queue_lock);
where = ELEVATOR_INSERT_FLUSH;
goto get_rq;
......@@ -2145,7 +2145,7 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
*/
BUG_ON(blk_queued_rq(rq));
if (rq->cmd_flags & (REQ_PREFLUSH | REQ_FUA))
if (op_is_flush(rq->cmd_flags))
where = ELEVATOR_INSERT_FLUSH;
add_acct_request(q, rq, where);
......@@ -3256,7 +3256,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
/*
* rq is already accounted, so use raw insert
*/
if (rq->cmd_flags & (REQ_PREFLUSH | REQ_FUA))
if (op_is_flush(rq->cmd_flags))
__elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH);
else
__elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE);
......
......@@ -111,7 +111,6 @@ struct request *blk_mq_sched_get_request(struct request_queue *q,
struct blk_mq_hw_ctx *hctx;
struct blk_mq_ctx *ctx;
struct request *rq;
const bool is_flush = op & (REQ_PREFLUSH | REQ_FUA);
blk_queue_enter_live(q);
ctx = blk_mq_get_ctx(q);
......@@ -126,7 +125,7 @@ struct request *blk_mq_sched_get_request(struct request_queue *q,
* Flush requests are special and go directly to the
* dispatch list.
*/
if (!is_flush && e->type->ops.mq.get_request) {
if (!op_is_flush(op) && e->type->ops.mq.get_request) {
rq = e->type->ops.mq.get_request(q, op, data);
if (rq)
rq->rq_flags |= RQF_QUEUED;
......@@ -139,7 +138,7 @@ struct request *blk_mq_sched_get_request(struct request_queue *q,
}
if (rq) {
if (!is_flush) {
if (!op_is_flush(op)) {
rq->elv.icq = NULL;
if (e && e->type->icq_cache)
blk_mq_sched_assign_ioc(q, rq, bio);
......
......@@ -1406,7 +1406,7 @@ static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie)
static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
{
const int is_sync = op_is_sync(bio->bi_opf);
const int is_flush_fua = bio->bi_opf & (REQ_PREFLUSH | REQ_FUA);
const int is_flush_fua = op_is_flush(bio->bi_opf);
struct blk_mq_alloc_data data = { .flags = 0 };
struct request *rq;
unsigned int request_count = 0, srcu_idx;
......@@ -1527,7 +1527,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
{
const int is_sync = op_is_sync(bio->bi_opf);
const int is_flush_fua = bio->bi_opf & (REQ_PREFLUSH | REQ_FUA);
const int is_flush_fua = op_is_flush(bio->bi_opf);
struct blk_plug *plug;
unsigned int request_count = 0;
struct blk_mq_alloc_data data = { .flags = 0 };
......
......@@ -666,7 +666,7 @@ static inline struct search *search_alloc(struct bio *bio,
s->iop.write_prio = 0;
s->iop.error = 0;
s->iop.flags = 0;
s->iop.flush_journal = (bio->bi_opf & (REQ_PREFLUSH|REQ_FUA)) != 0;
s->iop.flush_journal = op_is_flush(bio->bi_opf);
s->iop.wq = bcache_wq;
return s;
......
......@@ -787,8 +787,7 @@ static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
spin_lock_irqsave(&cache->lock, flags);
if (cache->need_tick_bio &&
!(bio->bi_opf & (REQ_FUA | REQ_PREFLUSH)) &&
if (cache->need_tick_bio && !op_is_flush(bio->bi_opf) &&
bio_op(bio) != REQ_OP_DISCARD) {
pb->tick = true;
cache->need_tick_bio = false;
......@@ -828,11 +827,6 @@ static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio)
return to_oblock(block_nr);
}
static int bio_triggers_commit(struct cache *cache, struct bio *bio)
{
return bio->bi_opf & (REQ_PREFLUSH | REQ_FUA);
}
/*
* You must increment the deferred set whilst the prison cell is held. To
* encourage this, we ask for 'cell' to be passed in.
......@@ -884,7 +878,7 @@ static void issue(struct cache *cache, struct bio *bio)
{
unsigned long flags;
if (!bio_triggers_commit(cache, bio)) {
if (!op_is_flush(bio->bi_opf)) {
accounted_request(cache, bio);
return;
}
......@@ -1069,8 +1063,7 @@ static void dec_io_migrations(struct cache *cache)
static bool discard_or_flush(struct bio *bio)
{
return bio_op(bio) == REQ_OP_DISCARD ||
bio->bi_opf & (REQ_PREFLUSH | REQ_FUA);
return bio_op(bio) == REQ_OP_DISCARD || op_is_flush(bio->bi_opf);
}
static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell)
......
......@@ -699,7 +699,7 @@ static void remap_to_origin(struct thin_c *tc, struct bio *bio)
static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
{
return (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA)) &&
return op_is_flush(bio->bi_opf) &&
dm_thin_changed_this_transaction(tc->td);
}
......@@ -870,8 +870,7 @@ static void __inc_remap_and_issue_cell(void *context,
struct bio *bio;
while ((bio = bio_list_pop(&cell->bios))) {
if (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA) ||
bio_op(bio) == REQ_OP_DISCARD)
if (op_is_flush(bio->bi_opf) || bio_op(bio) == REQ_OP_DISCARD)
bio_list_add(&info->defer_bios, bio);
else {
inc_all_io_entry(info->tc->pool, bio);
......@@ -1716,9 +1715,8 @@ static void __remap_and_issue_shared_cell(void *context,
struct bio *bio;
while ((bio = bio_list_pop(&cell->bios))) {
if ((bio_data_dir(bio) == WRITE) ||
(bio->bi_opf & (REQ_PREFLUSH | REQ_FUA) ||
bio_op(bio) == REQ_OP_DISCARD))
if (bio_data_dir(bio) == WRITE || op_is_flush(bio->bi_opf) ||
bio_op(bio) == REQ_OP_DISCARD)
bio_list_add(&info->defer_bios, bio);
else {
struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));;
......@@ -2635,8 +2633,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_SUBMITTED;
}
if (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA) ||
bio_op(bio) == REQ_OP_DISCARD) {
if (op_is_flush(bio->bi_opf) || bio_op(bio) == REQ_OP_DISCARD) {
thin_defer_bio_with_throttle(tc, bio);
return DM_MAPIO_SUBMITTED;
}
......
......@@ -220,6 +220,15 @@ static inline bool op_is_write(unsigned int op)
return (op & 1);
}
/*
* Check if the bio or request is one that needs special treatment in the
* flush state machine.
*/
static inline bool op_is_flush(unsigned int op)
{
return op & (REQ_FUA | REQ_PREFLUSH);
}
/*
* Reads are always treated as synchronous, as are requests with the FUA or
* PREFLUSH flag. Other operations may be marked as synchronous using the
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment