Commit babb29b0 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block

* 'for-linus' of git://git.kernel.dk/linux-2.6-block:
  xen/blkfront: use blk_rq_map_sg to generate ring entries
  block: reduce stack footprint of blk_recount_segments()
  cciss: shorten 30s timeout on controller reset
  block: add documentation for register_blkdev()
  block: fix bogus gcc warning for uninitialized var usage
parents 6fc79d40 9e973e64
...@@ -38,72 +38,84 @@ void blk_recalc_rq_sectors(struct request *rq, int nsect) ...@@ -38,72 +38,84 @@ void blk_recalc_rq_sectors(struct request *rq, int nsect)
} }
} }
void blk_recalc_rq_segments(struct request *rq) static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
struct bio *bio,
unsigned int *seg_size_ptr)
{ {
int nr_phys_segs;
unsigned int phys_size; unsigned int phys_size;
struct bio_vec *bv, *bvprv = NULL; struct bio_vec *bv, *bvprv = NULL;
int seg_size; int cluster, i, high, highprv = 1;
int cluster; unsigned int seg_size, nr_phys_segs;
struct req_iterator iter; struct bio *fbio;
int high, highprv = 1;
struct request_queue *q = rq->q;
if (!rq->bio) if (!bio)
return; return 0;
fbio = bio;
cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
seg_size = 0; seg_size = 0;
phys_size = nr_phys_segs = 0; phys_size = nr_phys_segs = 0;
rq_for_each_segment(bv, rq, iter) { for_each_bio(bio) {
/* bio_for_each_segment(bv, bio, i) {
* the trick here is making sure that a high page is never /*
* considered part of another segment, since that might * the trick here is making sure that a high page is
* change with the bounce page. * never considered part of another segment, since that
*/ * might change with the bounce page.
high = page_to_pfn(bv->bv_page) > q->bounce_pfn; */
if (high || highprv) high = page_to_pfn(bv->bv_page) > q->bounce_pfn;
goto new_segment; if (high || highprv)
if (cluster) {
if (seg_size + bv->bv_len > q->max_segment_size)
goto new_segment;
if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv))
goto new_segment;
if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv))
goto new_segment; goto new_segment;
if (cluster) {
if (seg_size + bv->bv_len > q->max_segment_size)
goto new_segment;
if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv))
goto new_segment;
if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv))
goto new_segment;
seg_size += bv->bv_len;
bvprv = bv;
continue;
}
new_segment:
if (nr_phys_segs == 1 && seg_size >
fbio->bi_seg_front_size)
fbio->bi_seg_front_size = seg_size;
seg_size += bv->bv_len; nr_phys_segs++;
bvprv = bv; bvprv = bv;
continue; seg_size = bv->bv_len;
highprv = high;
} }
new_segment:
if (nr_phys_segs == 1 && seg_size > rq->bio->bi_seg_front_size)
rq->bio->bi_seg_front_size = seg_size;
nr_phys_segs++;
bvprv = bv;
seg_size = bv->bv_len;
highprv = high;
} }
if (nr_phys_segs == 1 && seg_size > rq->bio->bi_seg_front_size) if (seg_size_ptr)
*seg_size_ptr = seg_size;
return nr_phys_segs;
}
void blk_recalc_rq_segments(struct request *rq)
{
unsigned int seg_size = 0, phys_segs;
phys_segs = __blk_recalc_rq_segments(rq->q, rq->bio, &seg_size);
if (phys_segs == 1 && seg_size > rq->bio->bi_seg_front_size)
rq->bio->bi_seg_front_size = seg_size; rq->bio->bi_seg_front_size = seg_size;
if (seg_size > rq->biotail->bi_seg_back_size) if (seg_size > rq->biotail->bi_seg_back_size)
rq->biotail->bi_seg_back_size = seg_size; rq->biotail->bi_seg_back_size = seg_size;
rq->nr_phys_segments = nr_phys_segs; rq->nr_phys_segments = phys_segs;
} }
void blk_recount_segments(struct request_queue *q, struct bio *bio) void blk_recount_segments(struct request_queue *q, struct bio *bio)
{ {
struct request rq;
struct bio *nxt = bio->bi_next; struct bio *nxt = bio->bi_next;
rq.q = q;
rq.bio = rq.biotail = bio;
bio->bi_next = NULL; bio->bi_next = NULL;
blk_recalc_rq_segments(&rq); bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, NULL);
bio->bi_next = nxt; bio->bi_next = nxt;
bio->bi_phys_segments = rq.nr_phys_segments;
bio->bi_flags |= (1 << BIO_SEG_VALID); bio->bi_flags |= (1 << BIO_SEG_VALID);
} }
EXPORT_SYMBOL(blk_recount_segments); EXPORT_SYMBOL(blk_recount_segments);
......
...@@ -256,6 +256,22 @@ void blkdev_show(struct seq_file *seqf, off_t offset) ...@@ -256,6 +256,22 @@ void blkdev_show(struct seq_file *seqf, off_t offset)
} }
#endif /* CONFIG_PROC_FS */ #endif /* CONFIG_PROC_FS */
/**
* register_blkdev - register a new block device
*
* @major: the requested major device number [1..255]. If @major=0, try to
* allocate any unused major number.
* @name: the name of the new block device as a zero terminated string
*
* The @name must be unique within the system.
*
* The return value depends on the @major input parameter.
* - if a major device number was requested in range [1..255] then the
* function returns zero on success, or a negative error code
* - if any unused major number was requested with @major=0 parameter
* then the return value is the allocated major number in range
* [1..255] or a negative error code otherwise
*/
int register_blkdev(unsigned int major, const char *name) int register_blkdev(unsigned int major, const char *name)
{ {
struct blk_major_name **n, *p; struct blk_major_name **n, *p;
......
...@@ -3611,11 +3611,15 @@ static int __devinit cciss_init_one(struct pci_dev *pdev, ...@@ -3611,11 +3611,15 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
schedule_timeout_uninterruptible(30*HZ); schedule_timeout_uninterruptible(30*HZ);
/* Now try to get the controller to respond to a no-op */ /* Now try to get the controller to respond to a no-op */
for (i=0; i<12; i++) { for (i=0; i<30; i++) {
if (cciss_noop(pdev) == 0) if (cciss_noop(pdev) == 0)
break; break;
else
printk("cciss: no-op failed%s\n", (i < 11 ? "; re-trying" : "")); schedule_timeout_uninterruptible(HZ);
}
if (i == 30) {
printk(KERN_ERR "cciss: controller seems dead\n");
return -EBUSY;
} }
} }
......
...@@ -40,6 +40,7 @@ ...@@ -40,6 +40,7 @@
#include <linux/hdreg.h> #include <linux/hdreg.h>
#include <linux/cdrom.h> #include <linux/cdrom.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/scatterlist.h>
#include <xen/xenbus.h> #include <xen/xenbus.h>
#include <xen/grant_table.h> #include <xen/grant_table.h>
...@@ -82,6 +83,7 @@ struct blkfront_info ...@@ -82,6 +83,7 @@ struct blkfront_info
enum blkif_state connected; enum blkif_state connected;
int ring_ref; int ring_ref;
struct blkif_front_ring ring; struct blkif_front_ring ring;
struct scatterlist sg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
unsigned int evtchn, irq; unsigned int evtchn, irq;
struct request_queue *rq; struct request_queue *rq;
struct work_struct work; struct work_struct work;
...@@ -204,12 +206,11 @@ static int blkif_queue_request(struct request *req) ...@@ -204,12 +206,11 @@ static int blkif_queue_request(struct request *req)
struct blkfront_info *info = req->rq_disk->private_data; struct blkfront_info *info = req->rq_disk->private_data;
unsigned long buffer_mfn; unsigned long buffer_mfn;
struct blkif_request *ring_req; struct blkif_request *ring_req;
struct req_iterator iter;
struct bio_vec *bvec;
unsigned long id; unsigned long id;
unsigned int fsect, lsect; unsigned int fsect, lsect;
int ref; int i, ref;
grant_ref_t gref_head; grant_ref_t gref_head;
struct scatterlist *sg;
if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) if (unlikely(info->connected != BLKIF_STATE_CONNECTED))
return 1; return 1;
...@@ -238,12 +239,13 @@ static int blkif_queue_request(struct request *req) ...@@ -238,12 +239,13 @@ static int blkif_queue_request(struct request *req)
if (blk_barrier_rq(req)) if (blk_barrier_rq(req))
ring_req->operation = BLKIF_OP_WRITE_BARRIER; ring_req->operation = BLKIF_OP_WRITE_BARRIER;
ring_req->nr_segments = 0; ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg);
rq_for_each_segment(bvec, req, iter) { BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST);
BUG_ON(ring_req->nr_segments == BLKIF_MAX_SEGMENTS_PER_REQUEST);
buffer_mfn = pfn_to_mfn(page_to_pfn(bvec->bv_page)); for_each_sg(info->sg, sg, ring_req->nr_segments, i) {
fsect = bvec->bv_offset >> 9; buffer_mfn = pfn_to_mfn(page_to_pfn(sg_page(sg)));
lsect = fsect + (bvec->bv_len >> 9) - 1; fsect = sg->offset >> 9;
lsect = fsect + (sg->length >> 9) - 1;
/* install a grant reference. */ /* install a grant reference. */
ref = gnttab_claim_grant_reference(&gref_head); ref = gnttab_claim_grant_reference(&gref_head);
BUG_ON(ref == -ENOSPC); BUG_ON(ref == -ENOSPC);
...@@ -254,16 +256,12 @@ static int blkif_queue_request(struct request *req) ...@@ -254,16 +256,12 @@ static int blkif_queue_request(struct request *req)
buffer_mfn, buffer_mfn,
rq_data_dir(req) ); rq_data_dir(req) );
info->shadow[id].frame[ring_req->nr_segments] = info->shadow[id].frame[i] = mfn_to_pfn(buffer_mfn);
mfn_to_pfn(buffer_mfn); ring_req->seg[i] =
ring_req->seg[ring_req->nr_segments] =
(struct blkif_request_segment) { (struct blkif_request_segment) {
.gref = ref, .gref = ref,
.first_sect = fsect, .first_sect = fsect,
.last_sect = lsect }; .last_sect = lsect };
ring_req->nr_segments++;
} }
info->ring.req_prod_pvt++; info->ring.req_prod_pvt++;
...@@ -622,6 +620,8 @@ static int setup_blkring(struct xenbus_device *dev, ...@@ -622,6 +620,8 @@ static int setup_blkring(struct xenbus_device *dev,
SHARED_RING_INIT(sring); SHARED_RING_INIT(sring);
FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE); FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE);
sg_init_table(info->sg, BLKIF_MAX_SEGMENTS_PER_REQUEST);
err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring)); err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring));
if (err < 0) { if (err < 0) {
free_page((unsigned long)sring); free_page((unsigned long)sring);
......
...@@ -302,7 +302,7 @@ void bio_init(struct bio *bio) ...@@ -302,7 +302,7 @@ void bio_init(struct bio *bio)
struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs) struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
{ {
struct bio *bio = NULL; struct bio *bio = NULL;
void *p; void *uninitialized_var(p);
if (bs) { if (bs) {
p = mempool_alloc(bs->bio_pool, gfp_mask); p = mempool_alloc(bs->bio_pool, gfp_mask);
......
...@@ -708,6 +708,8 @@ struct req_iterator { ...@@ -708,6 +708,8 @@ struct req_iterator {
}; };
/* This should not be used directly - use rq_for_each_segment */ /* This should not be used directly - use rq_for_each_segment */
#define for_each_bio(_bio) \
for (; _bio; _bio = _bio->bi_next)
#define __rq_for_each_bio(_bio, rq) \ #define __rq_for_each_bio(_bio, rq) \
if ((rq->bio)) \ if ((rq->bio)) \
for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next) for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment