Commit 38f25255 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

block: add __blkdev_issue_discard

This is a version of blkdev_issue_discard which doesn't wait for
the I/O to complete, but instead allows the caller to submit
the final bio and/or chain it to others.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarMing Lin <ming.l@ssi.samsung.com>
Signed-off-by: default avatarSagi Grimberg <sagig@grimberg.me>
Reviewed-by: default avatarMing Lei <tom.leiming@gmail.com>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent 9082e87b
...@@ -22,45 +22,25 @@ static struct bio *next_bio(struct bio *bio, int rw, unsigned int nr_pages, ...@@ -22,45 +22,25 @@ static struct bio *next_bio(struct bio *bio, int rw, unsigned int nr_pages,
return new; return new;
} }
/** int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
* blkdev_issue_discard - queue a discard sector_t nr_sects, gfp_t gfp_mask, int type, struct bio **biop)
* @bdev: blockdev to issue discard for
* @sector: start sector
* @nr_sects: number of sectors to discard
* @gfp_mask: memory allocation flags (for bio_alloc)
* @flags: BLKDEV_IFL_* flags to control behaviour
*
* Description:
* Issue a discard request for the sectors in question.
*/
int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
{ {
struct request_queue *q = bdev_get_queue(bdev); struct request_queue *q = bdev_get_queue(bdev);
int type = REQ_WRITE | REQ_DISCARD; struct bio *bio = *biop;
unsigned int granularity; unsigned int granularity;
int alignment; int alignment;
struct bio *bio = NULL;
int ret = 0;
struct blk_plug plug;
if (!q) if (!q)
return -ENXIO; return -ENXIO;
if (!blk_queue_discard(q)) if (!blk_queue_discard(q))
return -EOPNOTSUPP; return -EOPNOTSUPP;
if ((type & REQ_SECURE) && !blk_queue_secdiscard(q))
return -EOPNOTSUPP;
/* Zero-sector (unknown) and one-sector granularities are the same. */ /* Zero-sector (unknown) and one-sector granularities are the same. */
granularity = max(q->limits.discard_granularity >> 9, 1U); granularity = max(q->limits.discard_granularity >> 9, 1U);
alignment = (bdev_discard_alignment(bdev) >> 9) % granularity; alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
if (flags & BLKDEV_DISCARD_SECURE) {
if (!blk_queue_secdiscard(q))
return -EOPNOTSUPP;
type |= REQ_SECURE;
}
blk_start_plug(&plug);
while (nr_sects) { while (nr_sects) {
unsigned int req_sects; unsigned int req_sects;
sector_t end_sect, tmp; sector_t end_sect, tmp;
...@@ -98,7 +78,38 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, ...@@ -98,7 +78,38 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
*/ */
cond_resched(); cond_resched();
} }
if (bio)
*biop = bio;
return 0;
}
EXPORT_SYMBOL(__blkdev_issue_discard);
/**
* blkdev_issue_discard - queue a discard
* @bdev: blockdev to issue discard for
* @sector: start sector
* @nr_sects: number of sectors to discard
* @gfp_mask: memory allocation flags (for bio_alloc)
* @flags: BLKDEV_IFL_* flags to control behaviour
*
* Description:
* Issue a discard request for the sectors in question.
*/
int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
{
int type = REQ_WRITE | REQ_DISCARD;
struct bio *bio = NULL;
struct blk_plug plug;
int ret;
if (flags & BLKDEV_DISCARD_SECURE)
type |= REQ_SECURE;
blk_start_plug(&plug);
ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, type,
&bio);
if (!ret && bio)
ret = submit_bio_wait(type, bio); ret = submit_bio_wait(type, bio);
blk_finish_plug(&plug); blk_finish_plug(&plug);
......
...@@ -1131,6 +1131,8 @@ static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, ...@@ -1131,6 +1131,8 @@ static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *); extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *);
extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, int type, struct bio **biop);
extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, struct page *page); sector_t nr_sects, gfp_t gfp_mask, struct page *page);
extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment