Commit a9a5b952 authored by Linus Torvalds's avatar Linus Torvalds

Merge home:v2.5/linux

into penguin.transmeta.com:/home/torvalds/v2.5/linux
parents 84725bf0 55262c2d
......@@ -1616,7 +1616,8 @@ static int __make_request(request_queue_t *q, struct bio *bio)
sector = bio->bi_sector;
nr_sectors = bio_sectors(bio);
cur_nr_sectors = bio_iovec(bio)->bv_len >> 9;
cur_nr_sectors = bio_cur_sectors(bio);
rw = bio_data_dir(bio);
/*
......@@ -1672,7 +1673,10 @@ static int __make_request(request_queue_t *q, struct bio *bio)
}
bio->bi_next = req->bio;
req->bio = bio;
req->cbio = req->bio = bio;
req->nr_cbio_segments = bio_segments(bio);
req->nr_cbio_sectors = bio_sectors(bio);
/*
* may not be valid. if the low level driver said
* it didn't need a bounce buffer then it better
......@@ -1740,9 +1744,11 @@ static int __make_request(request_queue_t *q, struct bio *bio)
req->current_nr_sectors = req->hard_cur_sectors = cur_nr_sectors;
req->nr_phys_segments = bio_phys_segments(q, bio);
req->nr_hw_segments = bio_hw_segments(q, bio);
req->nr_cbio_segments = bio_segments(bio);
req->nr_cbio_sectors = bio_sectors(bio);
req->buffer = bio_data(bio); /* see ->buffer comment above */
req->waiting = NULL;
req->bio = req->biotail = bio;
req->cbio = req->bio = req->biotail = bio;
req->rq_disk = bio->bi_bdev->bd_disk;
req->start_time = jiffies;
......@@ -1914,6 +1920,81 @@ int submit_bio(int rw, struct bio *bio)
return 1;
}
/**
* blk_rq_next_segment
* @rq: the request being processed
*
* Description:
* Points to the next segment in the request if the current segment
* is complete. Leaves things unchanged if this segment is not over
* or if no more segments are left in this request.
*
* Meant to be used for bio traversal during I/O submission
* Does not affect any I/O completions or update completion state
* in the request, and does not modify any bio fields.
*
* Decrementing rq->nr_sectors, rq->current_nr_sectors and
* rq->nr_cbio_sectors as data is transferred is the caller's
* responsibility and should be done before calling this routine.
**/
void blk_rq_next_segment(struct request *rq)
{
if (rq->current_nr_sectors > 0)
return;
if (rq->nr_cbio_sectors > 0) {
--rq->nr_cbio_segments;
rq->current_nr_sectors = blk_rq_vec(rq)->bv_len >> 9;
} else {
if ((rq->cbio = rq->cbio->bi_next)) {
rq->nr_cbio_segments = bio_segments(rq->cbio);
rq->nr_cbio_sectors = bio_sectors(rq->cbio);
rq->current_nr_sectors = bio_cur_sectors(rq->cbio);
}
}
/* remember the size of this segment before we start I/O */
rq->hard_cur_sectors = rq->current_nr_sectors;
}
/**
* process_that_request_first - process partial request submission
* @req: the request being processed
* @nr_sectors: number of sectors I/O has been submitted on
*
* Description:
* May be used for processing bio's while submitting I/O without
* signalling completion. Fails if more data is requested than is
* available in the request in which case it doesn't advance any
* pointers.
*
* Assumes a request is correctly set up. No sanity checks.
*
* Return:
* 0 - no more data left to submit (not processed)
* 1 - data available to submit for this request (processed)
**/
int process_that_request_first(struct request *req, unsigned int nr_sectors)
{
unsigned int nsect;
if (req->nr_sectors < nr_sectors)
return 0;
req->nr_sectors -= nr_sectors;
req->sector += nr_sectors;
while (nr_sectors) {
nsect = min_t(unsigned, req->current_nr_sectors, nr_sectors);
req->current_nr_sectors -= nsect;
nr_sectors -= nsect;
if (req->cbio) {
req->nr_cbio_sectors -= nsect;
blk_rq_next_segment(req);
}
}
return 1;
}
void blk_recalc_rq_segments(struct request *rq)
{
struct bio *bio;
......@@ -1922,8 +2003,6 @@ void blk_recalc_rq_segments(struct request *rq)
if (!rq->bio)
return;
rq->buffer = bio_data(rq->bio);
nr_phys_segs = nr_hw_segs = 0;
rq_for_each_bio(bio, rq) {
/* Force bio hw/phys segs to be recalculated. */
......@@ -1941,11 +2020,24 @@ void blk_recalc_rq_sectors(struct request *rq, int nsect)
{
if (blk_fs_request(rq)) {
rq->hard_sector += nsect;
rq->nr_sectors = rq->hard_nr_sectors -= nsect;
rq->sector = rq->hard_sector;
rq->hard_nr_sectors -= nsect;
rq->current_nr_sectors = bio_iovec(rq->bio)->bv_len >> 9;
rq->hard_cur_sectors = rq->current_nr_sectors;
/*
* Move the I/O submission pointers ahead if required,
* i.e. for drivers not aware of rq->cbio.
*/
if ((rq->nr_sectors >= rq->hard_nr_sectors) &&
(rq->sector <= rq->hard_sector)) {
rq->sector = rq->hard_sector;
rq->nr_sectors = rq->hard_nr_sectors;
rq->hard_cur_sectors = bio_cur_sectors(rq->bio);
rq->current_nr_sectors = rq->hard_cur_sectors;
rq->nr_cbio_segments = bio_segments(rq->bio);
rq->nr_cbio_sectors = bio_sectors(rq->bio);
rq->buffer = bio_data(rq->bio);
rq->cbio = rq->bio;
}
/*
* if total number of sectors is less than the first segment
......@@ -2139,9 +2231,27 @@ void blk_rq_bio_prep(request_queue_t *q, struct request *rq, struct bio *bio)
rq->current_nr_sectors = bio_cur_sectors(bio);
rq->hard_cur_sectors = rq->current_nr_sectors;
rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio);
rq->nr_cbio_segments = bio_segments(bio);
rq->nr_cbio_sectors = bio_sectors(bio);
rq->buffer = bio_data(bio);
rq->bio = rq->biotail = bio;
rq->cbio = rq->bio = rq->biotail = bio;
}
void blk_rq_prep_restart(struct request *rq)
{
struct bio *bio;
bio = rq->cbio = rq->bio;
if (bio) {
rq->nr_cbio_segments = bio_segments(bio);
rq->nr_cbio_sectors = bio_sectors(bio);
rq->hard_cur_sectors = bio_cur_sectors(bio);
rq->buffer = bio_data(bio);
}
rq->sector = rq->hard_sector;
rq->nr_sectors = rq->hard_nr_sectors;
rq->current_nr_sectors = rq->hard_cur_sectors;
}
int __init blk_dev_init(void)
......@@ -2169,6 +2279,7 @@ int __init blk_dev_init(void)
return 0;
};
EXPORT_SYMBOL(process_that_request_first);
EXPORT_SYMBOL(end_that_request_first);
EXPORT_SYMBOL(end_that_request_chunk);
EXPORT_SYMBOL(end_that_request_last);
......
......@@ -212,7 +212,7 @@ static int sg_io(request_queue_t *q, struct block_device *bdev,
}
}
rq = blk_get_request(q, WRITE, __GFP_WAIT);
rq = blk_get_request(q, writing ? WRITE : READ, __GFP_WAIT);
/*
* fill in request structure
......@@ -227,8 +227,6 @@ static int sg_io(request_queue_t *q, struct block_device *bdev,
rq->sense_len = 0;
rq->flags |= REQ_BLOCK_PC;
if (writing)
rq->flags |= REQ_RW;
rq->hard_nr_sectors = rq->nr_sectors = nr_sectors;
rq->hard_cur_sectors = rq->current_nr_sectors = nr_sectors;
......@@ -329,7 +327,7 @@ static int sg_scsi_ioctl(request_queue_t *q, struct block_device *bdev,
memset(buffer, 0, bytes);
}
rq = blk_get_request(q, WRITE, __GFP_WAIT);
rq = blk_get_request(q, in_len ? WRITE : READ, __GFP_WAIT);
cmdlen = COMMAND_SIZE(opcode);
......@@ -373,8 +371,6 @@ static int sg_scsi_ioctl(request_queue_t *q, struct block_device *bdev,
rq->data = buffer;
rq->data_len = bytes;
rq->flags |= REQ_BLOCK_PC;
if (in_len)
rq->flags |= REQ_RW;
blk_do_rq(q, bdev, rq);
err = rq->errors & 0xff; /* only 8 bit SCSI status */
......
......@@ -2070,6 +2070,7 @@ static int cdrom_check_status(ide_drive_t *drive, struct request_sense *sense)
req.sense = sense;
req.cmd[0] = GPCMD_TEST_UNIT_READY;
req.flags |= REQ_QUIET;
#if ! STANDARD_ATAPI
/* the Sanyo 3 CD changer uses byte 7 of TEST_UNIT_READY to
......
......@@ -1479,7 +1479,7 @@ static int probe_lba_addressing (ide_drive_t *drive, int arg)
static int set_lba_addressing (ide_drive_t *drive, int arg)
{
return (probe_lba_addressing(drive, arg));
return probe_lba_addressing(drive, arg);
}
static void idedisk_add_settings(ide_drive_t *drive)
......@@ -1566,6 +1566,18 @@ static void idedisk_setup (ide_drive_t *drive)
(void) probe_lba_addressing(drive, 1);
if (drive->addressing == 1) {
ide_hwif_t *hwif = HWIF(drive);
int max_s = 2048;
if (max_s > hwif->rqsize)
max_s = hwif->rqsize;
blk_queue_max_sectors(&drive->queue, max_s);
}
printk("%s: max request size: %dKiB\n", drive->name, drive->queue.max_sectors / 2);
/* Extract geometry if we did not already have one for the drive */
if (!drive->cyl || !drive->head || !drive->sect) {
drive->cyl = drive->bios_cyl = id->cyls;
......
......@@ -998,6 +998,7 @@ EXPORT_SYMBOL(save_match);
static void ide_init_queue(ide_drive_t *drive)
{
request_queue_t *q = &drive->queue;
ide_hwif_t *hwif = HWIF(drive);
int max_sectors = 256;
/*
......@@ -1013,8 +1014,10 @@ static void ide_init_queue(ide_drive_t *drive)
drive->queue_setup = 1;
blk_queue_segment_boundary(q, 0xffff);
if (HWIF(drive)->rqsize)
max_sectors = HWIF(drive)->rqsize;
if (!hwif->rqsize)
hwif->rqsize = hwif->addressing ? 256 : 65536;
if (hwif->rqsize < max_sectors)
max_sectors = hwif->rqsize;
blk_queue_max_sectors(q, max_sectors);
/* IDE DMA can do PRD_ENTRIES number of segments. */
......
......@@ -131,6 +131,7 @@ struct bio {
#define bio_iovec(bio) bio_iovec_idx((bio), (bio)->bi_idx)
#define bio_page(bio) bio_iovec((bio))->bv_page
#define bio_offset(bio) bio_iovec((bio))->bv_offset
#define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx)
#define bio_sectors(bio) ((bio)->bi_size >> 9)
#define bio_cur_sectors(bio) (bio_iovec(bio)->bv_len >> 9)
#define bio_data(bio) (page_address(bio_page((bio))) + bio_offset((bio)))
......@@ -226,12 +227,12 @@ extern void bio_check_pages_dirty(struct bio *bio);
#ifdef CONFIG_HIGHMEM
/*
* remember to add offset! and never ever reenable interrupts between a
* bio_kmap_irq and bio_kunmap_irq!!
* bvec_kmap_irq and bvec_kunmap_irq!!
*
* This function MUST be inlined - it plays with the CPU interrupt flags.
* Hence the `extern inline'.
*/
extern inline char *bio_kmap_irq(struct bio *bio, unsigned long *flags)
extern inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
{
unsigned long addr;
......@@ -240,15 +241,15 @@ extern inline char *bio_kmap_irq(struct bio *bio, unsigned long *flags)
* balancing is a lot nicer this way
*/
local_irq_save(*flags);
addr = (unsigned long) kmap_atomic(bio_page(bio), KM_BIO_SRC_IRQ);
addr = (unsigned long) kmap_atomic(bvec->bv_page, KM_BIO_SRC_IRQ);
if (addr & ~PAGE_MASK)
BUG();
return (char *) addr + bio_offset(bio);
return (char *) addr + bvec->bv_offset;
}
extern inline void bio_kunmap_irq(char *buffer, unsigned long *flags)
extern inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
{
unsigned long ptr = (unsigned long) buffer & PAGE_MASK;
......@@ -257,8 +258,19 @@ extern inline void bio_kunmap_irq(char *buffer, unsigned long *flags)
}
#else
#define bio_kmap_irq(bio, flags) (bio_data(bio))
#define bio_kunmap_irq(buf, flags) do { *(flags) = 0; } while (0)
#define bvec_kmap_irq(bvec, flags) (page_address((bvec)->bv_page) + (bvec)->bv_offset)
#define bvec_kunmap_irq(buf, flags) do { *(flags) = 0; } while (0)
#endif
extern inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx,
unsigned long *flags)
{
return bvec_kmap_irq(bio_iovec_idx(bio, idx), flags);
}
#define __bio_kunmap_irq(buf, flags) bvec_kunmap_irq(buf, flags)
#define bio_kmap_irq(bio, flags) \
__bio_kmap_irq((bio), (bio)->bi_idx, (flags))
#define bio_kunmap_irq(buf,flags) __bio_kunmap_irq(buf, flags)
#endif /* __LINUX_BIO_H */
......@@ -11,6 +11,7 @@
#include <linux/backing-dev.h>
#include <linux/wait.h>
#include <linux/mempool.h>
#include <linux/bio.h>
#include <asm/scatterlist.h>
......@@ -36,25 +37,35 @@ struct request {
* blkdev_dequeue_request! */
unsigned long flags; /* see REQ_ bits below */
sector_t sector;
unsigned long nr_sectors;
/* Maintain bio traversal state for part by part I/O submission.
* hard_* are block layer internals, no driver should touch them!
*/
sector_t sector; /* next sector to submit */
unsigned long nr_sectors; /* no. of sectors left to submit */
/* no. of sectors left to submit in the current segment */
unsigned int current_nr_sectors;
sector_t hard_sector; /* next sector to complete */
unsigned long hard_nr_sectors; /* no. of sectors left to complete */
/* no. of sectors left to complete in the current segment */
unsigned int hard_cur_sectors;
/* no. of segments left to submit in the current bio */
unsigned short nr_cbio_segments;
/* no. of sectors left to submit in the current bio */
unsigned long nr_cbio_sectors;
struct bio *cbio; /* next bio to submit */
struct bio *bio; /* next unfinished bio to complete */
struct bio *biotail;
void *elevator_private;
int rq_status; /* should split this into a few status bits */
struct gendisk *rq_disk;
int errors;
unsigned long start_time;
sector_t hard_sector; /* the hard_* are block layer
* internals, no driver should
* touch them
*/
unsigned long hard_nr_sectors;
unsigned int hard_cur_sectors;
struct bio *bio;
struct bio *biotail;
/* Number of scatter-gather DMA addr+len pairs after
* physical address coalescing is performed.
......@@ -284,6 +295,32 @@ struct request_queue
*/
#define blk_queue_headactive(q, head_active)
/* current index into bio being processed for submission */
#define blk_rq_idx(rq) ((rq)->cbio->bi_vcnt - (rq)->nr_cbio_segments)
/* current bio vector being processed */
#define blk_rq_vec(rq) (bio_iovec_idx((rq)->cbio, blk_rq_idx(rq)))
/* current offset with respect to start of the segment being submitted */
#define blk_rq_offset(rq) \
(((rq)->hard_cur_sectors - (rq)->current_nr_sectors) << 9)
/*
* temporarily mapping a (possible) highmem bio (typically for PIO transfer)
*/
/* Assumes rq->cbio != NULL */
static inline char * rq_map_buffer(struct request *rq, unsigned long *flags)
{
return (__bio_kmap_irq(rq->cbio, blk_rq_idx(rq), flags)
+ blk_rq_offset(rq));
}
static inline void rq_unmap_buffer(char *buffer, unsigned long *flags)
{
__bio_kunmap_irq(buffer, flags);
}
/*
* q->prep_rq_fn return values
*/
......@@ -362,6 +399,7 @@ static inline request_queue_t *bdev_get_queue(struct block_device *bdev)
extern int end_that_request_first(struct request *, int, int);
extern int end_that_request_chunk(struct request *, int, int);
extern void end_that_request_last(struct request *);
extern int process_that_request_first(struct request *, unsigned int);
extern void end_request(struct request *req, int uptodate);
static inline void blkdev_dequeue_request(struct request *req)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment