Commit 9e973e64 authored by Jens Axboe's avatar Jens Axboe

xen/blkfront: use blk_rq_map_sg to generate ring entries

On occasion, the request will apparently have more segments than we
fit into the ring. Jens says:

> The second problem is that the block layer then appears to create one
> too many segments, but from the dump it has rq->nr_phys_segments ==
> BLKIF_MAX_SEGMENTS_PER_REQUEST. I suspect the latter is due to
> xen-blkfront not handling the merging on its own. It should check that
> the new page doesn't form part of the previous page. The
> rq_for_each_segment() iterates all single bits in the request, not dma
> segments. The "easiest" way to do this is to call blk_rq_map_sg() and
> then iterate the mapped sg list. That will give you what you are
> looking for.

> Here's a test patch, compiles but otherwise untested. I spent more
> time figuring out how to enable XEN than to code it up, so YMMV!
> Probably the sg list wants to be put inside the ring and only
> initialized on allocation, then you can get rid of the sg on stack and
> sg_init_table() loop call in the function. I'll leave that, and the
> testing, to you.

[Moved sg array into info structure, and initialize once. -J]
Signed-off-by: default avatarJens Axboe <jens.axboe@oracle.com>
Signed-off-by: default avatarJeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
parent 1e428079
...@@ -40,6 +40,7 @@ ...@@ -40,6 +40,7 @@
#include <linux/hdreg.h> #include <linux/hdreg.h>
#include <linux/cdrom.h> #include <linux/cdrom.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/scatterlist.h>
#include <xen/xenbus.h> #include <xen/xenbus.h>
#include <xen/grant_table.h> #include <xen/grant_table.h>
...@@ -82,6 +83,7 @@ struct blkfront_info ...@@ -82,6 +83,7 @@ struct blkfront_info
enum blkif_state connected; enum blkif_state connected;
int ring_ref; int ring_ref;
struct blkif_front_ring ring; struct blkif_front_ring ring;
struct scatterlist sg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
unsigned int evtchn, irq; unsigned int evtchn, irq;
struct request_queue *rq; struct request_queue *rq;
struct work_struct work; struct work_struct work;
...@@ -204,12 +206,11 @@ static int blkif_queue_request(struct request *req) ...@@ -204,12 +206,11 @@ static int blkif_queue_request(struct request *req)
struct blkfront_info *info = req->rq_disk->private_data; struct blkfront_info *info = req->rq_disk->private_data;
unsigned long buffer_mfn; unsigned long buffer_mfn;
struct blkif_request *ring_req; struct blkif_request *ring_req;
struct req_iterator iter;
struct bio_vec *bvec;
unsigned long id; unsigned long id;
unsigned int fsect, lsect; unsigned int fsect, lsect;
int ref; int i, ref;
grant_ref_t gref_head; grant_ref_t gref_head;
struct scatterlist *sg;
if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) if (unlikely(info->connected != BLKIF_STATE_CONNECTED))
return 1; return 1;
...@@ -238,12 +239,13 @@ static int blkif_queue_request(struct request *req) ...@@ -238,12 +239,13 @@ static int blkif_queue_request(struct request *req)
if (blk_barrier_rq(req)) if (blk_barrier_rq(req))
ring_req->operation = BLKIF_OP_WRITE_BARRIER; ring_req->operation = BLKIF_OP_WRITE_BARRIER;
ring_req->nr_segments = 0; ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg);
rq_for_each_segment(bvec, req, iter) { BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST);
BUG_ON(ring_req->nr_segments == BLKIF_MAX_SEGMENTS_PER_REQUEST);
buffer_mfn = pfn_to_mfn(page_to_pfn(bvec->bv_page)); for_each_sg(info->sg, sg, ring_req->nr_segments, i) {
fsect = bvec->bv_offset >> 9; buffer_mfn = pfn_to_mfn(page_to_pfn(sg_page(sg)));
lsect = fsect + (bvec->bv_len >> 9) - 1; fsect = sg->offset >> 9;
lsect = fsect + (sg->length >> 9) - 1;
/* install a grant reference. */ /* install a grant reference. */
ref = gnttab_claim_grant_reference(&gref_head); ref = gnttab_claim_grant_reference(&gref_head);
BUG_ON(ref == -ENOSPC); BUG_ON(ref == -ENOSPC);
...@@ -254,16 +256,12 @@ static int blkif_queue_request(struct request *req) ...@@ -254,16 +256,12 @@ static int blkif_queue_request(struct request *req)
buffer_mfn, buffer_mfn,
rq_data_dir(req) ); rq_data_dir(req) );
info->shadow[id].frame[ring_req->nr_segments] = info->shadow[id].frame[i] = mfn_to_pfn(buffer_mfn);
mfn_to_pfn(buffer_mfn); ring_req->seg[i] =
ring_req->seg[ring_req->nr_segments] =
(struct blkif_request_segment) { (struct blkif_request_segment) {
.gref = ref, .gref = ref,
.first_sect = fsect, .first_sect = fsect,
.last_sect = lsect }; .last_sect = lsect };
ring_req->nr_segments++;
} }
info->ring.req_prod_pvt++; info->ring.req_prod_pvt++;
...@@ -622,6 +620,8 @@ static int setup_blkring(struct xenbus_device *dev, ...@@ -622,6 +620,8 @@ static int setup_blkring(struct xenbus_device *dev,
SHARED_RING_INIT(sring); SHARED_RING_INIT(sring);
FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE); FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE);
sg_init_table(info->sg, BLKIF_MAX_SEGMENTS_PER_REQUEST);
err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring)); err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring));
if (err < 0) { if (err < 0) {
free_page((unsigned long)sring); free_page((unsigned long)sring);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment