Commit 66cb45aa authored by Jens Axboe's avatar Jens Axboe

block: add support for limiting gaps in SG lists

Another restriction inherited for NVMe - those devices don't support
SG lists that have "gaps" in them. Gaps refers to cases where the
previous SG entry doesn't end on a page boundary. For NVMe, all SG
entries must start at offset 0 (except the first) and end on a page
boundary (except the last).
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent 3a4b0eda
......@@ -746,6 +746,14 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
goto done;
}
/*
* If the queue doesn't support SG gaps and adding this
* offset would create a gap, disallow it.
*/
if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS) &&
bvec_gap_to_prev(prev, offset))
return 0;
}
if (bio->bi_vcnt >= bio->bi_max_vecs)
......
......@@ -568,6 +568,8 @@ int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
{
struct request_queue *q = rq->q;
if (!rq_mergeable(rq) || !bio_mergeable(bio))
return false;
......@@ -591,6 +593,14 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
!blk_write_same_mergeable(rq->bio, bio))
return false;
if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS)) {
struct bio_vec *bprev;
bprev = &rq->biotail->bi_io_vec[bio->bi_vcnt - 1];
if (bvec_gap_to_prev(bprev, bio->bi_io_vec[0].bv_offset))
return false;
}
return true;
}
......
......@@ -186,6 +186,15 @@ static inline void *bio_data(struct bio *bio)
#define BIOVEC_SEG_BOUNDARY(q, b1, b2) \
__BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q)))
/*
* Check if adding a bio_vec after bprv with offset would create a gap in
* the SG list. Most drivers don't care about this, but some do.
*/
static inline bool bvec_gap_to_prev(struct bio_vec *bprv, unsigned int offset)
{
return offset || ((bprv->bv_offset + bprv->bv_len) & (PAGE_SIZE - 1));
}
#define bio_io_error(bio) bio_endio((bio), -EIO)
/*
......
......@@ -512,6 +512,7 @@ struct request_queue {
#define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */
#define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */
#define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/
#define QUEUE_FLAG_SG_GAPS 22 /* queue doesn't support SG gaps */
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_STACKABLE) | \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment