Commit e45635cc authored by Jens Axboe's avatar Jens Axboe Committed by Linus Torvalds

[PATCH] read-ahead and failfast

Here's the patch to enable failfast flag in the bio submission code, and
use it for multipath and readahead.
parent fe216ead
...@@ -689,7 +689,7 @@ void blk_queue_invalidate_tags(request_queue_t *q) ...@@ -689,7 +689,7 @@ void blk_queue_invalidate_tags(request_queue_t *q)
static char *rq_flags[] = { static char *rq_flags[] = {
"REQ_RW", "REQ_RW",
"REQ_RW_AHEAD", "REQ_FAILFAST",
"REQ_SOFTBARRIER", "REQ_SOFTBARRIER",
"REQ_HARDBARRIER", "REQ_HARDBARRIER",
"REQ_CMD", "REQ_CMD",
...@@ -706,6 +706,10 @@ static char *rq_flags[] = { ...@@ -706,6 +706,10 @@ static char *rq_flags[] = {
"REQ_DRIVE_CMD", "REQ_DRIVE_CMD",
"REQ_DRIVE_TASK", "REQ_DRIVE_TASK",
"REQ_DRIVE_TASKFILE", "REQ_DRIVE_TASKFILE",
"REQ_PREEMPT",
"REQ_PM_SUSPEND",
"REQ_PM_RESUME",
"REQ_PM_SHUTDOWN",
}; };
void blk_dump_rq_flags(struct request *rq, char *msg) void blk_dump_rq_flags(struct request *rq, char *msg)
...@@ -1793,7 +1797,7 @@ void __blk_attempt_remerge(request_queue_t *q, struct request *rq) ...@@ -1793,7 +1797,7 @@ void __blk_attempt_remerge(request_queue_t *q, struct request *rq)
static int __make_request(request_queue_t *q, struct bio *bio) static int __make_request(request_queue_t *q, struct bio *bio)
{ {
struct request *req, *freereq = NULL; struct request *req, *freereq = NULL;
int el_ret, rw, nr_sectors, cur_nr_sectors, barrier; int el_ret, rw, nr_sectors, cur_nr_sectors, barrier, ra;
struct list_head *insert_here; struct list_head *insert_here;
sector_t sector; sector_t sector;
...@@ -1814,6 +1818,8 @@ static int __make_request(request_queue_t *q, struct bio *bio) ...@@ -1814,6 +1818,8 @@ static int __make_request(request_queue_t *q, struct bio *bio)
barrier = test_bit(BIO_RW_BARRIER, &bio->bi_rw); barrier = test_bit(BIO_RW_BARRIER, &bio->bi_rw);
ra = bio_flagged(bio, BIO_RW_AHEAD) || current->flags & PF_READAHEAD;
again: again:
insert_here = NULL; insert_here = NULL;
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
...@@ -1901,7 +1907,7 @@ static int __make_request(request_queue_t *q, struct bio *bio) ...@@ -1901,7 +1907,7 @@ static int __make_request(request_queue_t *q, struct bio *bio)
/* /*
* READA bit set * READA bit set
*/ */
if (bio_flagged(bio, BIO_RW_AHEAD)) if (ra)
goto end_io; goto end_io;
freereq = get_request_wait(q, rw); freereq = get_request_wait(q, rw);
...@@ -1921,6 +1927,12 @@ static int __make_request(request_queue_t *q, struct bio *bio) ...@@ -1921,6 +1927,12 @@ static int __make_request(request_queue_t *q, struct bio *bio)
if (barrier) if (barrier)
req->flags |= (REQ_HARDBARRIER | REQ_NOMERGE); req->flags |= (REQ_HARDBARRIER | REQ_NOMERGE);
/*
* don't stack up retries for read ahead
*/
if (ra)
req->flags |= REQ_FAILFAST;
req->errors = 0; req->errors = 0;
req->hard_sector = req->sector = sector; req->hard_sector = req->sector = sector;
req->hard_nr_sectors = req->nr_sectors = nr_sectors; req->hard_nr_sectors = req->nr_sectors = nr_sectors;
......
...@@ -767,6 +767,9 @@ static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret) ...@@ -767,6 +767,9 @@ static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret)
} else if (blk_fs_request(rq)) { } else if (blk_fs_request(rq)) {
/* Handle errors from READ and WRITE requests. */ /* Handle errors from READ and WRITE requests. */
if (blk_noretry_request(rq))
cdrom_end_request(drive, 0);
if (sense_key == NOT_READY) { if (sense_key == NOT_READY) {
/* Tray open. */ /* Tray open. */
cdrom_saw_media_change (drive); cdrom_saw_media_change (drive);
......
...@@ -907,7 +907,7 @@ ide_startstop_t idedisk_error (ide_drive_t *drive, const char *msg, u8 stat) ...@@ -907,7 +907,7 @@ ide_startstop_t idedisk_error (ide_drive_t *drive, const char *msg, u8 stat)
/* force an abort */ /* force an abort */
hwif->OUTB(WIN_IDLEIMMEDIATE,IDE_COMMAND_REG); hwif->OUTB(WIN_IDLEIMMEDIATE,IDE_COMMAND_REG);
} }
if (rq->errors >= ERROR_MAX) if (rq->errors >= ERROR_MAX || blk_noretry_request(rq))
DRIVER(drive)->end_request(drive, 0, 0); DRIVER(drive)->end_request(drive, 0, 0);
else { else {
if ((rq->errors & ERROR_RESET) == ERROR_RESET) { if ((rq->errors & ERROR_RESET) == ERROR_RESET) {
......
...@@ -58,8 +58,7 @@ ...@@ -58,8 +58,7 @@
#if (DISK_RECOVERY_TIME > 0) #if (DISK_RECOVERY_TIME > 0)
Error So the User Has To Fix the Compilation And Stop Hacking Port 0x43 #error So the User Has To Fix the Compilation And Stop Hacking Port 0x43. Does anyone ever use this anyway ??
Does anyone ever use this anyway ??
/* /*
* For really screwy hardware (hey, at least it *can* be used with Linux) * For really screwy hardware (hey, at least it *can* be used with Linux)
...@@ -113,6 +112,13 @@ int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors) ...@@ -113,6 +112,13 @@ int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors)
if (!nr_sectors) if (!nr_sectors)
nr_sectors = rq->hard_cur_sectors; nr_sectors = rq->hard_cur_sectors;
/*
* if failfast is set on a request, override number of sectors and
* complete the whole request right now
*/
if (blk_noretry_request(rq) && !uptodate)
nr_sectors = rq->hard_nr_sectors;
/* /*
* decide whether to reenable DMA -- 3 is a random magic for now, * decide whether to reenable DMA -- 3 is a random magic for now,
* if we DMA timeout more than 3 times, just stay in PIO * if we DMA timeout more than 3 times, just stay in PIO
......
...@@ -179,6 +179,7 @@ static int multipath_make_request (request_queue_t *q, struct bio * bio) ...@@ -179,6 +179,7 @@ static int multipath_make_request (request_queue_t *q, struct bio * bio)
mp_bh->bio = *bio; mp_bh->bio = *bio;
mp_bh->bio.bi_bdev = multipath->rdev->bdev; mp_bh->bio.bi_bdev = multipath->rdev->bdev;
mp_bh->bio.bi_flags |= (1 << BIO_RW_FAILFAST);
mp_bh->bio.bi_end_io = multipath_end_request; mp_bh->bio.bi_end_io = multipath_end_request;
mp_bh->bio.bi_private = mp_bh; mp_bh->bio.bi_private = mp_bh;
generic_make_request(&mp_bh->bio); generic_make_request(&mp_bh->bio);
......
...@@ -119,10 +119,12 @@ struct bio { ...@@ -119,10 +119,12 @@ struct bio {
* bit 0 -- read (not set) or write (set) * bit 0 -- read (not set) or write (set)
* bit 1 -- rw-ahead when set * bit 1 -- rw-ahead when set
* bit 2 -- barrier * bit 2 -- barrier
* bit 3 -- fail fast, don't want low level driver retries
*/ */
#define BIO_RW 0 #define BIO_RW 0
#define BIO_RW_AHEAD 1 #define BIO_RW_AHEAD 1
#define BIO_RW_BARRIER 2 #define BIO_RW_BARRIER 2
#define BIO_RW_FAILFAST 3
/* /*
* various member access, note that bio_data should of course not be used * various member access, note that bio_data should of course not be used
......
...@@ -166,7 +166,7 @@ struct request { ...@@ -166,7 +166,7 @@ struct request {
*/ */
enum rq_flag_bits { enum rq_flag_bits {
__REQ_RW, /* not set, read. set, write */ __REQ_RW, /* not set, read. set, write */
__REQ_RW_AHEAD, /* READA */ __REQ_FAILFAST, /* no low level driver retries */
__REQ_SOFTBARRIER, /* may not be passed by ioscheduler */ __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */
__REQ_HARDBARRIER, /* may not be passed by drive either */ __REQ_HARDBARRIER, /* may not be passed by drive either */
__REQ_CMD, /* is a regular fs rw request */ __REQ_CMD, /* is a regular fs rw request */
...@@ -195,7 +195,7 @@ enum rq_flag_bits { ...@@ -195,7 +195,7 @@ enum rq_flag_bits {
}; };
#define REQ_RW (1 << __REQ_RW) #define REQ_RW (1 << __REQ_RW)
#define REQ_RW_AHEAD (1 << __REQ_RW_AHEAD) #define REQ_FAILFAST (1 << __REQ_FAILFAST)
#define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER) #define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER)
#define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER) #define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER)
#define REQ_CMD (1 << __REQ_CMD) #define REQ_CMD (1 << __REQ_CMD)
...@@ -364,6 +364,7 @@ struct request_queue ...@@ -364,6 +364,7 @@ struct request_queue
#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
#define blk_fs_request(rq) ((rq)->flags & REQ_CMD) #define blk_fs_request(rq) ((rq)->flags & REQ_CMD)
#define blk_pc_request(rq) ((rq)->flags & REQ_BLOCK_PC) #define blk_pc_request(rq) ((rq)->flags & REQ_BLOCK_PC)
#define blk_noretry_request(rq) ((rq)->flags & REQ_FAILFAST)
#define blk_pm_suspend_request(rq) ((rq)->flags & REQ_PM_SUSPEND) #define blk_pm_suspend_request(rq) ((rq)->flags & REQ_PM_SUSPEND)
#define blk_pm_resume_request(rq) ((rq)->flags & REQ_PM_RESUME) #define blk_pm_resume_request(rq) ((rq)->flags & REQ_PM_RESUME)
......
...@@ -487,6 +487,7 @@ do { if (atomic_dec_and_test(&(tsk)->usage)) __put_task_struct(tsk); } while(0) ...@@ -487,6 +487,7 @@ do { if (atomic_dec_and_test(&(tsk)->usage)) __put_task_struct(tsk); } while(0)
#define PF_SWAPOFF 0x00080000 /* I am in swapoff */ #define PF_SWAPOFF 0x00080000 /* I am in swapoff */
#define PF_LESS_THROTTLE 0x01000000 /* Throttle me less: I clena memory */ #define PF_LESS_THROTTLE 0x01000000 /* Throttle me less: I clena memory */
#define PF_SYNCWRITE 0x00200000 /* I am doing a sync write */ #define PF_SYNCWRITE 0x00200000 /* I am doing a sync write */
#define PF_READAHEAD 0x00400000 /* I am doing read-ahead */
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
extern int set_cpus_allowed(task_t *p, unsigned long new_mask); extern int set_cpus_allowed(task_t *p, unsigned long new_mask);
......
...@@ -94,9 +94,14 @@ static int read_pages(struct address_space *mapping, struct file *filp, ...@@ -94,9 +94,14 @@ static int read_pages(struct address_space *mapping, struct file *filp,
{ {
unsigned page_idx; unsigned page_idx;
struct pagevec lru_pvec; struct pagevec lru_pvec;
int ret = 0;
current->flags |= PF_READAHEAD;
if (mapping->a_ops->readpages) if (mapping->a_ops->readpages) {
return mapping->a_ops->readpages(filp, mapping, pages, nr_pages); ret = mapping->a_ops->readpages(filp, mapping, pages, nr_pages);
goto out;
}
pagevec_init(&lru_pvec, 0); pagevec_init(&lru_pvec, 0);
for (page_idx = 0; page_idx < nr_pages; page_idx++) { for (page_idx = 0; page_idx < nr_pages; page_idx++) {
...@@ -112,7 +117,9 @@ static int read_pages(struct address_space *mapping, struct file *filp, ...@@ -112,7 +117,9 @@ static int read_pages(struct address_space *mapping, struct file *filp,
} }
} }
pagevec_lru_add(&lru_pvec); pagevec_lru_add(&lru_pvec);
return 0; out:
current->flags &= ~PF_READAHEAD;
return ret;
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment