Commit 568ec936 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

block: replace blk_queue_nowait with bdev_nowait

Replace blk_queue_nowait with a bdev_nowait helpers that takes the
block_device given that the I/O submission path should not have to
look into the request_queue.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarPankaj Raghav <p.raghav@samsung.com>
Link: https://lore.kernel.org/r/20220927075815.269694-1-hch@lst.deSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 99e60387
...@@ -713,7 +713,7 @@ void submit_bio_noacct(struct bio *bio) ...@@ -713,7 +713,7 @@ void submit_bio_noacct(struct bio *bio)
* For a REQ_NOWAIT based request, return -EOPNOTSUPP * For a REQ_NOWAIT based request, return -EOPNOTSUPP
* if queue does not support NOWAIT. * if queue does not support NOWAIT.
*/ */
if ((bio->bi_opf & REQ_NOWAIT) && !blk_queue_nowait(q)) if ((bio->bi_opf & REQ_NOWAIT) && !bdev_nowait(bdev))
goto not_supported; goto not_supported;
if (should_fail_bio(bio)) if (should_fail_bio(bio))
......
...@@ -1856,9 +1856,7 @@ static bool dm_table_supports_write_zeroes(struct dm_table *t) ...@@ -1856,9 +1856,7 @@ static bool dm_table_supports_write_zeroes(struct dm_table *t)
static int device_not_nowait_capable(struct dm_target *ti, struct dm_dev *dev, static int device_not_nowait_capable(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data) sector_t start, sector_t len, void *data)
{ {
struct request_queue *q = bdev_get_queue(dev->bdev); return !bdev_nowait(dev->bdev);
return !blk_queue_nowait(q);
} }
static bool dm_table_supports_nowait(struct dm_table *t) static bool dm_table_supports_nowait(struct dm_table *t)
......
...@@ -5844,7 +5844,7 @@ int md_run(struct mddev *mddev) ...@@ -5844,7 +5844,7 @@ int md_run(struct mddev *mddev)
} }
} }
sysfs_notify_dirent_safe(rdev->sysfs_state); sysfs_notify_dirent_safe(rdev->sysfs_state);
nowait = nowait && blk_queue_nowait(bdev_get_queue(rdev->bdev)); nowait = nowait && bdev_nowait(rdev->bdev);
} }
if (!bioset_initialized(&mddev->bio_set)) { if (!bioset_initialized(&mddev->bio_set)) {
...@@ -6980,7 +6980,7 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev) ...@@ -6980,7 +6980,7 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev)
* If the new disk does not support REQ_NOWAIT, * If the new disk does not support REQ_NOWAIT,
* disable on the whole MD. * disable on the whole MD.
*/ */
if (!blk_queue_nowait(bdev_get_queue(rdev->bdev))) { if (!bdev_nowait(rdev->bdev)) {
pr_info("%s: Disabling nowait because %pg does not support nowait\n", pr_info("%s: Disabling nowait because %pg does not support nowait\n",
mdname(mddev), rdev->bdev); mdname(mddev), rdev->bdev);
blk_queue_flag_clear(QUEUE_FLAG_NOWAIT, mddev->queue); blk_queue_flag_clear(QUEUE_FLAG_NOWAIT, mddev->queue);
......
...@@ -618,7 +618,6 @@ bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q); ...@@ -618,7 +618,6 @@ bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
#define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags) #define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags)
#define blk_queue_pm_only(q) atomic_read(&(q)->pm_only) #define blk_queue_pm_only(q) atomic_read(&(q)->pm_only)
#define blk_queue_registered(q) test_bit(QUEUE_FLAG_REGISTERED, &(q)->queue_flags) #define blk_queue_registered(q) test_bit(QUEUE_FLAG_REGISTERED, &(q)->queue_flags)
#define blk_queue_nowait(q) test_bit(QUEUE_FLAG_NOWAIT, &(q)->queue_flags)
#define blk_queue_sq_sched(q) test_bit(QUEUE_FLAG_SQ_SCHED, &(q)->queue_flags) #define blk_queue_sq_sched(q) test_bit(QUEUE_FLAG_SQ_SCHED, &(q)->queue_flags)
extern void blk_set_pm_only(struct request_queue *q); extern void blk_set_pm_only(struct request_queue *q);
...@@ -1280,6 +1279,11 @@ static inline bool bdev_fua(struct block_device *bdev) ...@@ -1280,6 +1279,11 @@ static inline bool bdev_fua(struct block_device *bdev)
return test_bit(QUEUE_FLAG_FUA, &bdev_get_queue(bdev)->queue_flags); return test_bit(QUEUE_FLAG_FUA, &bdev_get_queue(bdev)->queue_flags);
} }
static inline bool bdev_nowait(struct block_device *bdev)
{
return test_bit(QUEUE_FLAG_NOWAIT, &bdev_get_queue(bdev)->queue_flags);
}
static inline enum blk_zoned_model bdev_zoned_model(struct block_device *bdev) static inline enum blk_zoned_model bdev_zoned_model(struct block_device *bdev)
{ {
struct request_queue *q = bdev_get_queue(bdev); struct request_queue *q = bdev_get_queue(bdev);
......
...@@ -1377,7 +1377,7 @@ static void io_iopoll_req_issued(struct io_kiocb *req, unsigned int issue_flags) ...@@ -1377,7 +1377,7 @@ static void io_iopoll_req_issued(struct io_kiocb *req, unsigned int issue_flags)
static bool io_bdev_nowait(struct block_device *bdev) static bool io_bdev_nowait(struct block_device *bdev)
{ {
return !bdev || blk_queue_nowait(bdev_get_queue(bdev)); return !bdev || bdev_nowait(bdev);
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment