Commit 0b0bcacc authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

block: don't bother with bounce limits for make_request drivers

We only call blk_queue_bounce for request-based drivers, so stop messing
with it for make_request based drivers.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 1c4bc3ab
...@@ -989,6 +989,11 @@ int blk_init_allocated_queue(struct request_queue *q) ...@@ -989,6 +989,11 @@ int blk_init_allocated_queue(struct request_queue *q)
*/ */
blk_queue_make_request(q, blk_queue_bio); blk_queue_make_request(q, blk_queue_bio);
/*
* by default assume old behaviour and bounce for any highmem page
*/
blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
q->sg_reserved_size = INT_MAX; q->sg_reserved_size = INT_MAX;
/* Protect q->elevator from elevator_change */ /* Protect q->elevator from elevator_change */
......
...@@ -2349,6 +2349,11 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, ...@@ -2349,6 +2349,11 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
blk_queue_make_request(q, blk_mq_make_request); blk_queue_make_request(q, blk_mq_make_request);
/*
* by default assume old behaviour and bounce for any highmem page
*/
blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
/* /*
* Do this after blk_queue_make_request() overrides it... * Do this after blk_queue_make_request() overrides it...
*/ */
......
...@@ -172,11 +172,6 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn) ...@@ -172,11 +172,6 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
q->nr_batching = BLK_BATCH_REQ; q->nr_batching = BLK_BATCH_REQ;
blk_set_default_limits(&q->limits); blk_set_default_limits(&q->limits);
/*
* by default assume old behaviour and bounce for any highmem page
*/
blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
} }
EXPORT_SYMBOL(blk_queue_make_request); EXPORT_SYMBOL(blk_queue_make_request);
......
...@@ -418,7 +418,6 @@ static struct brd_device *brd_alloc(int i) ...@@ -418,7 +418,6 @@ static struct brd_device *brd_alloc(int i)
blk_queue_make_request(brd->brd_queue, brd_make_request); blk_queue_make_request(brd->brd_queue, brd_make_request);
blk_queue_max_hw_sectors(brd->brd_queue, 1024); blk_queue_max_hw_sectors(brd->brd_queue, 1024);
blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY);
/* This is so fdisk will align partitions on 4k, because of /* This is so fdisk will align partitions on 4k, because of
* direct_access API needing 4k alignment, returning a PFN * direct_access API needing 4k alignment, returning a PFN
......
...@@ -2850,7 +2850,6 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig ...@@ -2850,7 +2850,6 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
/* Setting the max_hw_sectors to an odd value of 8kibyte here /* Setting the max_hw_sectors to an odd value of 8kibyte here
This triggers a max_bio_size message upon first attach or connect */ This triggers a max_bio_size message upon first attach or connect */
blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8); blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8);
blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
q->queue_lock = &resource->req_lock; q->queue_lock = &resource->req_lock;
device->md_io.page = alloc_page(GFP_KERNEL); device->md_io.page = alloc_page(GFP_KERNEL);
......
...@@ -284,7 +284,6 @@ int rsxx_setup_dev(struct rsxx_cardinfo *card) ...@@ -284,7 +284,6 @@ int rsxx_setup_dev(struct rsxx_cardinfo *card)
} }
blk_queue_make_request(card->queue, rsxx_make_request); blk_queue_make_request(card->queue, rsxx_make_request);
blk_queue_bounce_limit(card->queue, BLK_BOUNCE_ANY);
blk_queue_max_hw_sectors(card->queue, blkdev_max_hw_sectors); blk_queue_max_hw_sectors(card->queue, blkdev_max_hw_sectors);
blk_queue_physical_block_size(card->queue, RSXX_HW_BLK_SIZE); blk_queue_physical_block_size(card->queue, RSXX_HW_BLK_SIZE);
......
...@@ -273,7 +273,6 @@ static int nsblk_attach_disk(struct nd_namespace_blk *nsblk) ...@@ -273,7 +273,6 @@ static int nsblk_attach_disk(struct nd_namespace_blk *nsblk)
blk_queue_make_request(q, nd_blk_make_request); blk_queue_make_request(q, nd_blk_make_request);
blk_queue_max_hw_sectors(q, UINT_MAX); blk_queue_max_hw_sectors(q, UINT_MAX);
blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
blk_queue_logical_block_size(q, nsblk_sector_size(nsblk)); blk_queue_logical_block_size(q, nsblk_sector_size(nsblk));
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
q->queuedata = nsblk; q->queuedata = nsblk;
......
...@@ -1297,7 +1297,6 @@ static int btt_blk_init(struct btt *btt) ...@@ -1297,7 +1297,6 @@ static int btt_blk_init(struct btt *btt)
blk_queue_make_request(btt->btt_queue, btt_make_request); blk_queue_make_request(btt->btt_queue, btt_make_request);
blk_queue_logical_block_size(btt->btt_queue, btt->sector_size); blk_queue_logical_block_size(btt->btt_queue, btt->sector_size);
blk_queue_max_hw_sectors(btt->btt_queue, UINT_MAX); blk_queue_max_hw_sectors(btt->btt_queue, UINT_MAX);
blk_queue_bounce_limit(btt->btt_queue, BLK_BOUNCE_ANY);
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, btt->btt_queue); queue_flag_set_unlocked(QUEUE_FLAG_NONROT, btt->btt_queue);
btt->btt_queue->queuedata = btt; btt->btt_queue->queuedata = btt;
......
...@@ -343,7 +343,6 @@ static int pmem_attach_disk(struct device *dev, ...@@ -343,7 +343,6 @@ static int pmem_attach_disk(struct device *dev,
blk_queue_make_request(q, pmem_make_request); blk_queue_make_request(q, pmem_make_request);
blk_queue_physical_block_size(q, PAGE_SIZE); blk_queue_physical_block_size(q, PAGE_SIZE);
blk_queue_max_hw_sectors(q, UINT_MAX); blk_queue_max_hw_sectors(q, UINT_MAX);
blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
queue_flag_set_unlocked(QUEUE_FLAG_DAX, q); queue_flag_set_unlocked(QUEUE_FLAG_DAX, q);
q->queuedata = pmem; q->queuedata = pmem;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment