Commit b49a0871 authored by Ming Lin's avatar Ming Lin Committed by Jens Axboe

block: remove split code in blkdev_issue_{discard,write_same}

The split code in blkdev_issue_{discard,write_same} can go away
now that any driver that cares does the split. We have to make
sure bio size doesn't overflow.

For discard, we set max discard sectors to (1<<31)>>9 to ensure
it doesn't overflow bi_size and hopefully it is of the proper
granularity as long as the granularity is a power of two.
Acked-by: default avatarChristoph Hellwig <hch@infradead.org>
Signed-off-by: default avatarMing Lin <ming.l@ssi.samsung.com>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent 0e28997e
...@@ -26,6 +26,13 @@ static void bio_batch_end_io(struct bio *bio) ...@@ -26,6 +26,13 @@ static void bio_batch_end_io(struct bio *bio)
bio_put(bio); bio_put(bio);
} }
/*
* Ensure that max discard sectors doesn't overflow bi_size and hopefully
* it is of the proper granularity as long as the granularity is a power
* of two.
*/
#define MAX_BIO_SECTORS ((1U << 31) >> 9)
/** /**
* blkdev_issue_discard - queue a discard * blkdev_issue_discard - queue a discard
* @bdev: blockdev to issue discard for * @bdev: blockdev to issue discard for
...@@ -43,8 +50,6 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, ...@@ -43,8 +50,6 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
DECLARE_COMPLETION_ONSTACK(wait); DECLARE_COMPLETION_ONSTACK(wait);
struct request_queue *q = bdev_get_queue(bdev); struct request_queue *q = bdev_get_queue(bdev);
int type = REQ_WRITE | REQ_DISCARD; int type = REQ_WRITE | REQ_DISCARD;
unsigned int max_discard_sectors, granularity;
int alignment;
struct bio_batch bb; struct bio_batch bb;
struct bio *bio; struct bio *bio;
int ret = 0; int ret = 0;
...@@ -56,21 +61,6 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, ...@@ -56,21 +61,6 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
if (!blk_queue_discard(q)) if (!blk_queue_discard(q))
return -EOPNOTSUPP; return -EOPNOTSUPP;
/* Zero-sector (unknown) and one-sector granularities are the same. */
granularity = max(q->limits.discard_granularity >> 9, 1U);
alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
/*
* Ensure that max_discard_sectors is of the proper
* granularity, so that requests stay aligned after a split.
*/
max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
max_discard_sectors -= max_discard_sectors % granularity;
if (unlikely(!max_discard_sectors)) {
/* Avoid infinite loop below. Being cautious never hurts. */
return -EOPNOTSUPP;
}
if (flags & BLKDEV_DISCARD_SECURE) { if (flags & BLKDEV_DISCARD_SECURE) {
if (!blk_queue_secdiscard(q)) if (!blk_queue_secdiscard(q))
return -EOPNOTSUPP; return -EOPNOTSUPP;
...@@ -84,7 +74,7 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, ...@@ -84,7 +74,7 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
blk_start_plug(&plug); blk_start_plug(&plug);
while (nr_sects) { while (nr_sects) {
unsigned int req_sects; unsigned int req_sects;
sector_t end_sect, tmp; sector_t end_sect;
bio = bio_alloc(gfp_mask, 1); bio = bio_alloc(gfp_mask, 1);
if (!bio) { if (!bio) {
...@@ -92,21 +82,8 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, ...@@ -92,21 +82,8 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
break; break;
} }
req_sects = min_t(sector_t, nr_sects, max_discard_sectors); req_sects = min_t(sector_t, nr_sects, MAX_BIO_SECTORS);
/*
* If splitting a request, and the next starting sector would be
* misaligned, stop the discard at the previous aligned sector.
*/
end_sect = sector + req_sects; end_sect = sector + req_sects;
tmp = end_sect;
if (req_sects < nr_sects &&
sector_div(tmp, granularity) != alignment) {
end_sect = end_sect - alignment;
sector_div(end_sect, granularity);
end_sect = end_sect * granularity + alignment;
req_sects = end_sect - sector;
}
bio->bi_iter.bi_sector = sector; bio->bi_iter.bi_sector = sector;
bio->bi_end_io = bio_batch_end_io; bio->bi_end_io = bio_batch_end_io;
...@@ -165,10 +142,8 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, ...@@ -165,10 +142,8 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
if (!q) if (!q)
return -ENXIO; return -ENXIO;
max_write_same_sectors = q->limits.max_write_same_sectors; /* Ensure that max_write_same_sectors doesn't overflow bi_size */
max_write_same_sectors = UINT_MAX >> 9;
if (max_write_same_sectors == 0)
return -EOPNOTSUPP;
atomic_set(&bb.done, 1); atomic_set(&bb.done, 1);
bb.error = 0; bb.error = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment