Commit 29ece8b4 authored by Yufen Yu's avatar Yufen Yu Committed by Jens Axboe

block: add BLK_MQ_POLL_CLASSIC for hybrid poll and return EINVAL for unexpected value

For q->poll_nsec == -1, means doing classic poll, not hybrid poll.
We introduce a new flag BLK_MQ_POLL_CLASSIC to replace -1, which
may make code much easier to read.

Additionally, since val is an int obtained with kstrtoint(), val can be
a negative value other than -1, so return -EINVAL for that case.

Thanks to Damien Le Moal for some good suggestion.
Reviewed-by: default avatarDamien Le Moal <damien.lemoal@wdc.com>
Signed-off-by: default avatarYufen Yu <yuyufen@huawei.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 9496c015
...@@ -2856,7 +2856,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, ...@@ -2856,7 +2856,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
/* /*
* Default to classic polling * Default to classic polling
*/ */
q->poll_nsec = -1; q->poll_nsec = BLK_MQ_POLL_CLASSIC;
blk_mq_init_cpu_queues(q, set->nr_hw_queues); blk_mq_init_cpu_queues(q, set->nr_hw_queues);
blk_mq_add_queue_tag_set(set, q); blk_mq_add_queue_tag_set(set, q);
...@@ -3391,7 +3391,7 @@ static bool blk_mq_poll_hybrid(struct request_queue *q, ...@@ -3391,7 +3391,7 @@ static bool blk_mq_poll_hybrid(struct request_queue *q,
{ {
struct request *rq; struct request *rq;
if (q->poll_nsec == -1) if (q->poll_nsec == BLK_MQ_POLL_CLASSIC)
return false; return false;
if (!blk_qc_t_is_internal(cookie)) if (!blk_qc_t_is_internal(cookie))
......
...@@ -360,8 +360,8 @@ static ssize_t queue_poll_delay_show(struct request_queue *q, char *page) ...@@ -360,8 +360,8 @@ static ssize_t queue_poll_delay_show(struct request_queue *q, char *page)
{ {
int val; int val;
if (q->poll_nsec == -1) if (q->poll_nsec == BLK_MQ_POLL_CLASSIC)
val = -1; val = BLK_MQ_POLL_CLASSIC;
else else
val = q->poll_nsec / 1000; val = q->poll_nsec / 1000;
...@@ -380,10 +380,12 @@ static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page, ...@@ -380,10 +380,12 @@ static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page,
if (err < 0) if (err < 0)
return err; return err;
if (val == -1) if (val == BLK_MQ_POLL_CLASSIC)
q->poll_nsec = -1; q->poll_nsec = BLK_MQ_POLL_CLASSIC;
else else if (val >= 0)
q->poll_nsec = val * 1000; q->poll_nsec = val * 1000;
else
return -EINVAL;
return count; return count;
} }
......
...@@ -50,6 +50,9 @@ struct blk_stat_callback; ...@@ -50,6 +50,9 @@ struct blk_stat_callback;
/* Must be consistent with blk_mq_poll_stats_bkt() */ /* Must be consistent with blk_mq_poll_stats_bkt() */
#define BLK_MQ_POLL_STATS_BKTS 16 #define BLK_MQ_POLL_STATS_BKTS 16
/* Doing classic polling */
#define BLK_MQ_POLL_CLASSIC -1
/* /*
* Maximum number of blkcg policies allowed to be registered concurrently. * Maximum number of blkcg policies allowed to be registered concurrently.
* Defined here to simplify include dependency. * Defined here to simplify include dependency.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment