Commit 0925bad3 authored by Linus Torvalds's avatar Linus Torvalds

v2.5.1 -> v2.5.1.1

- me: revert the "kill(-1..)" change.  POSIX isn't that clear on the
issue anyway, and the new behaviour breaks things.
- Jens Axboe: more bio updates
- Al Viro: rd_load cleanups. hpfs mount fix, mount cleanups
- Ingo Molnar: more raid updates
- Jakub Jelinek: fix Linux/x86 confusion about arg passing of "save_v86_state" and "do_signal"
- Trond Myklebust: fix NFS client race conditions
parent 51f4a834
...@@ -18,8 +18,8 @@ ...@@ -18,8 +18,8 @@
</authorgroup> </authorgroup>
<copyright> <copyright>
<year>2000</year> <year>2001</year>
<holder>Paul Russell</holder> <holder>Rusty Russell</holder>
</copyright> </copyright>
<legalnotice> <legalnotice>
...@@ -651,6 +651,29 @@ printk(KERN_INFO "my ip: %d.%d.%d.%d\n", NIPQUAD(ipaddress)); ...@@ -651,6 +651,29 @@ printk(KERN_INFO "my ip: %d.%d.%d.%d\n", NIPQUAD(ipaddress));
</para> </para>
</sect1> </sect1>
<sect1 id="routines-endian">
<title><function>cpu_to_be32()</function>/<function>be32_to_cpu()</function>/<function>cpu_to_le32()</function>/<function>le32_to_cpu()</function>
<filename class=headerfile>include/asm/byteorder.h</filename>
</title>
<para>
The <function>cpu_to_be32()</function> family (where the "32" can
be replaced by 64 or 16, and the "be" can be replaced by "le") are
the general way to do endian conversions in the kernel: they
return the converted value. All variations supply the reverse as
well: <function>be32_to_cpu()</function>, etc.
</para>
<para>
There are two major variations of these functions: the pointer
variation, such as <function>cpu_to_be32p()</function>, which take
a pointer to the given type, and return the converted value. The
other variation is the "in-situ" family, such as
<function>cpu_to_be32s()</function>, which convert value referred
to by the pointer, and return void.
</para>
</sect1>
<sect1 id="routines-local-irqs"> <sect1 id="routines-local-irqs">
<title><function>local_irq_save()</function>/<function>local_irq_restore()</function> <title><function>local_irq_save()</function>/<function>local_irq_restore()</function>
<filename class=headerfile>include/asm/system.h</filename> <filename class=headerfile>include/asm/system.h</filename>
......
...@@ -1322,8 +1322,10 @@ W: http://www.torque.net/sg ...@@ -1322,8 +1322,10 @@ W: http://www.torque.net/sg
S: Maintained S: Maintained
SCSI SUBSYSTEM SCSI SUBSYSTEM
P: Jens Axboe
M: axboe@suse.de
L: linux-scsi@vger.kernel.org L: linux-scsi@vger.kernel.org
S: Unmaintained S: Maintained
SCSI TAPE DRIVER SCSI TAPE DRIVER
P: Kai Mkisara P: Kai Mkisara
......
VERSION = 2 VERSION = 2
PATCHLEVEL = 5 PATCHLEVEL = 5
SUBLEVEL = 1 SUBLEVEL = 2
EXTRAVERSION = EXTRAVERSION =-pre1
KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION) KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION)
......
...@@ -28,7 +28,7 @@ ...@@ -28,7 +28,7 @@
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
asmlinkage int FASTCALL(do_signal(struct pt_regs *regs, sigset_t *oldset)); int FASTCALL(do_signal(struct pt_regs *regs, sigset_t *oldset));
int copy_siginfo_to_user(siginfo_t *to, siginfo_t *from) int copy_siginfo_to_user(siginfo_t *to, siginfo_t *from)
{ {
......
...@@ -62,7 +62,7 @@ ...@@ -62,7 +62,7 @@
( (unsigned)( & (((struct kernel_vm86_regs *)0)->VM86_REGS_PART2) ) ) ( (unsigned)( & (((struct kernel_vm86_regs *)0)->VM86_REGS_PART2) ) )
#define VM86_REGS_SIZE2 (sizeof(struct kernel_vm86_regs) - VM86_REGS_SIZE1) #define VM86_REGS_SIZE2 (sizeof(struct kernel_vm86_regs) - VM86_REGS_SIZE1)
asmlinkage struct pt_regs * FASTCALL(save_v86_state(struct kernel_vm86_regs * regs)); struct pt_regs * FASTCALL(save_v86_state(struct kernel_vm86_regs * regs));
struct pt_regs * save_v86_state(struct kernel_vm86_regs * regs) struct pt_regs * save_v86_state(struct kernel_vm86_regs * regs)
{ {
struct tss_struct *tss; struct tss_struct *tss;
......
...@@ -55,12 +55,6 @@ unsigned char __res[sizeof(bd_t)]; ...@@ -55,12 +55,6 @@ unsigned char __res[sizeof(bd_t)];
extern void m8xx_ide_init(void); extern void m8xx_ide_init(void);
#ifdef CONFIG_BLK_DEV_RAM
extern int rd_doload; /* 1 = load ramdisk, 0 = don't load */
extern int rd_prompt; /* 1 = prompt for ramdisk, 0 = don't prompt */
extern int rd_image_start; /* starting block # of image */
#endif
extern unsigned long find_available_memory(void); extern unsigned long find_available_memory(void);
extern void m8xx_cpm_reset(uint); extern void m8xx_cpm_reset(uint);
......
...@@ -111,12 +111,6 @@ extern unsigned long Hash_size, Hash_mask; ...@@ -111,12 +111,6 @@ extern unsigned long Hash_size, Hash_mask;
extern int probingmem; extern int probingmem;
extern unsigned long loops_per_jiffy; extern unsigned long loops_per_jiffy;
#ifdef CONFIG_BLK_DEV_RAM
extern int rd_doload; /* 1 = load ramdisk, 0 = don't load */
extern int rd_prompt; /* 1 = prompt for ramdisk, 0 = don't prompt */
extern int rd_image_start; /* starting block # of image */
#endif
#ifdef CONFIG_SOUND_MODULE #ifdef CONFIG_SOUND_MODULE
EXPORT_SYMBOL(ppc_cs4232_dma); EXPORT_SYMBOL(ppc_cs4232_dma);
EXPORT_SYMBOL(ppc_cs4232_dma2); EXPORT_SYMBOL(ppc_cs4232_dma2);
......
...@@ -35,7 +35,7 @@ int blk_do_rq(request_queue_t *q, struct request *rq) ...@@ -35,7 +35,7 @@ int blk_do_rq(request_queue_t *q, struct request *rq)
DECLARE_COMPLETION(wait); DECLARE_COMPLETION(wait);
int err = 0; int err = 0;
rq->flags |= REQ_BARRIER; rq->flags |= REQ_NOMERGE;
rq->waiting = &wait; rq->waiting = &wait;
elv_add_request(q, rq, 1); elv_add_request(q, rq, 1);
generic_unplug_device(q); generic_unplug_device(q);
...@@ -81,3 +81,5 @@ int block_ioctl(kdev_t dev, unsigned int cmd, unsigned long arg) ...@@ -81,3 +81,5 @@ int block_ioctl(kdev_t dev, unsigned int cmd, unsigned long arg)
#endif #endif
return err; return err;
} }
EXPORT_SYMBOL(block_ioctl);
...@@ -53,7 +53,7 @@ inline int bio_rq_in_between(struct bio *bio, struct request *rq, ...@@ -53,7 +53,7 @@ inline int bio_rq_in_between(struct bio *bio, struct request *rq,
* if .next is a valid request * if .next is a valid request
*/ */
next = rq->queuelist.next; next = rq->queuelist.next;
if (next == head) if (unlikely(next == head))
return 0; return 0;
next_rq = list_entry(next, struct request, queuelist); next_rq = list_entry(next, struct request, queuelist);
...@@ -121,20 +121,52 @@ inline int elv_rq_merge_ok(struct request *rq, struct bio *bio) ...@@ -121,20 +121,52 @@ inline int elv_rq_merge_ok(struct request *rq, struct bio *bio)
return 0; return 0;
} }
int elevator_linus_merge(request_queue_t *q, struct request **req, inline int elv_try_merge(struct request *__rq, struct bio *bio)
struct list_head *head, struct bio *bio)
{ {
unsigned int count = bio_sectors(bio); unsigned int count = bio_sectors(bio);
struct list_head *entry = &q->queue_head;
int ret = ELEVATOR_NO_MERGE; if (!elv_rq_merge_ok(__rq, bio))
return ELEVATOR_NO_MERGE;
/*
* we can merge and sequence is ok, check if it's possible
*/
if (__rq->sector + __rq->nr_sectors == bio->bi_sector) {
return ELEVATOR_BACK_MERGE;
} else if (__rq->sector - count == bio->bi_sector) {
__rq->elevator_sequence -= count;
return ELEVATOR_FRONT_MERGE;
}
return ELEVATOR_NO_MERGE;
}
int elevator_linus_merge(request_queue_t *q, struct request **req,
struct bio *bio)
{
struct list_head *entry;
struct request *__rq; struct request *__rq;
int ret;
/*
* give a one-shot try to merging with the last touched
* request
*/
if (q->last_merge) {
__rq = list_entry_rq(q->last_merge);
BUG_ON(__rq->flags & REQ_STARTED);
if ((ret = elv_try_merge(__rq, bio))) {
*req = __rq;
return ret;
}
}
entry = &q->queue_head; entry = &q->queue_head;
while ((entry = entry->prev) != head) { ret = ELEVATOR_NO_MERGE;
while ((entry = entry->prev) != &q->queue_head) {
__rq = list_entry_rq(entry); __rq = list_entry_rq(entry);
prefetch(list_entry_rq(entry->prev));
/* /*
* simply "aging" of requests in queue * simply "aging" of requests in queue
*/ */
...@@ -144,26 +176,15 @@ int elevator_linus_merge(request_queue_t *q, struct request **req, ...@@ -144,26 +176,15 @@ int elevator_linus_merge(request_queue_t *q, struct request **req,
break; break;
if (!(__rq->flags & REQ_CMD)) if (!(__rq->flags & REQ_CMD))
continue; continue;
if (__rq->elevator_sequence < 0)
break;
if (!*req && bio_rq_in_between(bio, __rq, &q->queue_head)) if (!*req && bio_rq_in_between(bio, __rq, &q->queue_head))
*req = __rq; *req = __rq;
if (!elv_rq_merge_ok(__rq, bio))
continue;
if (__rq->elevator_sequence < count) if ((ret = elv_try_merge(__rq, bio))) {
break;
/*
* we can merge and sequence is ok, check if it's possible
*/
if (__rq->sector + __rq->nr_sectors == bio->bi_sector) {
ret = ELEVATOR_BACK_MERGE;
*req = __rq;
break;
} else if (__rq->sector - count == bio->bi_sector) {
ret = ELEVATOR_FRONT_MERGE;
__rq->elevator_sequence -= count;
*req = __rq; *req = __rq;
q->last_merge = &__rq->queuelist;
break; break;
} }
} }
...@@ -183,7 +204,6 @@ void elevator_linus_merge_cleanup(request_queue_t *q, struct request *req, int c ...@@ -183,7 +204,6 @@ void elevator_linus_merge_cleanup(request_queue_t *q, struct request *req, int c
entry = &req->queuelist; entry = &req->queuelist;
while ((entry = entry->next) != &q->queue_head) { while ((entry = entry->next) != &q->queue_head) {
struct request *tmp; struct request *tmp;
prefetch(list_entry_rq(entry->next));
tmp = list_entry_rq(entry); tmp = list_entry_rq(entry);
tmp->elevator_sequence -= count; tmp->elevator_sequence -= count;
} }
...@@ -199,12 +219,20 @@ void elv_add_request_fn(request_queue_t *q, struct request *rq, ...@@ -199,12 +219,20 @@ void elv_add_request_fn(request_queue_t *q, struct request *rq,
struct list_head *insert_here) struct list_head *insert_here)
{ {
list_add(&rq->queuelist, insert_here); list_add(&rq->queuelist, insert_here);
/*
* new merges must not precede this barrier
*/
if (rq->flags & REQ_BARRIER)
q->last_merge = NULL;
else if (!q->last_merge)
q->last_merge = &rq->queuelist;
} }
struct request *elv_next_request_fn(request_queue_t *q) struct request *elv_next_request_fn(request_queue_t *q)
{ {
if (!blk_queue_empty(q)) if (!blk_queue_empty(q))
return list_entry(q->queue_head.next, struct request, queuelist); return list_entry_rq(q->queue_head.next);
return NULL; return NULL;
} }
...@@ -222,17 +250,24 @@ void elv_linus_exit(request_queue_t *q, elevator_t *e) ...@@ -222,17 +250,24 @@ void elv_linus_exit(request_queue_t *q, elevator_t *e)
* See if we can find a request that this buffer can be coalesced with. * See if we can find a request that this buffer can be coalesced with.
*/ */
int elevator_noop_merge(request_queue_t *q, struct request **req, int elevator_noop_merge(request_queue_t *q, struct request **req,
struct list_head *head, struct bio *bio) struct bio *bio)
{ {
unsigned int count = bio_sectors(bio);
struct list_head *entry = &q->queue_head; struct list_head *entry = &q->queue_head;
struct request *__rq; struct request *__rq;
int ret;
entry = &q->queue_head; if (q->last_merge) {
while ((entry = entry->prev) != head) { __rq = list_entry_rq(q->last_merge);
__rq = list_entry_rq(entry); BUG_ON(__rq->flags & REQ_STARTED);
if ((ret = elv_try_merge(__rq, bio))) {
*req = __rq;
return ret;
}
}
prefetch(list_entry_rq(entry->prev)); while ((entry = entry->prev) != &q->queue_head) {
__rq = list_entry_rq(entry);
if (__rq->flags & (REQ_BARRIER | REQ_STARTED)) if (__rq->flags & (REQ_BARRIER | REQ_STARTED))
break; break;
...@@ -240,18 +275,10 @@ int elevator_noop_merge(request_queue_t *q, struct request **req, ...@@ -240,18 +275,10 @@ int elevator_noop_merge(request_queue_t *q, struct request **req,
if (!(__rq->flags & REQ_CMD)) if (!(__rq->flags & REQ_CMD))
continue; continue;
if (!elv_rq_merge_ok(__rq, bio)) if ((ret = elv_try_merge(__rq, bio))) {
continue;
/*
* we can merge and sequence is ok, check if it's possible
*/
if (__rq->sector + __rq->nr_sectors == bio->bi_sector) {
*req = __rq;
return ELEVATOR_BACK_MERGE;
} else if (__rq->sector - count == bio->bi_sector) {
*req = __rq; *req = __rq;
return ELEVATOR_FRONT_MERGE; q->last_merge = &__rq->queuelist;
return ret;
} }
} }
...@@ -267,6 +294,7 @@ int elevator_init(request_queue_t *q, elevator_t *e, elevator_t type) ...@@ -267,6 +294,7 @@ int elevator_init(request_queue_t *q, elevator_t *e, elevator_t type)
*e = type; *e = type;
INIT_LIST_HEAD(&q->queue_head); INIT_LIST_HEAD(&q->queue_head);
q->last_merge = NULL;
if (e->elevator_init_fn) if (e->elevator_init_fn)
return e->elevator_init_fn(q, e); return e->elevator_init_fn(q, e);
......
...@@ -117,6 +117,11 @@ inline request_queue_t *blk_get_queue(kdev_t dev) ...@@ -117,6 +117,11 @@ inline request_queue_t *blk_get_queue(kdev_t dev)
return &blk_dev[MAJOR(dev)].request_queue; return &blk_dev[MAJOR(dev)].request_queue;
} }
void blk_queue_prep_rq(request_queue_t *q, prep_rq_fn *pfn)
{
q->prep_rq_fn = pfn;
}
/** /**
* blk_queue_make_request - define an alternate make_request function for a device * blk_queue_make_request - define an alternate make_request function for a device
* @q: the request queue for the device to be affected * @q: the request queue for the device to be affected
...@@ -179,7 +184,6 @@ void blk_queue_bounce_limit(request_queue_t *q, u64 dma_addr) ...@@ -179,7 +184,6 @@ void blk_queue_bounce_limit(request_queue_t *q, u64 dma_addr)
if (dma_addr == BLK_BOUNCE_ISA) { if (dma_addr == BLK_BOUNCE_ISA) {
init_emergency_isa_pool(); init_emergency_isa_pool();
q->bounce_gfp = GFP_NOIO | GFP_DMA; q->bounce_gfp = GFP_NOIO | GFP_DMA;
printk("isa pfn %lu, max low %lu, max %lu\n", bounce_pfn, blk_max_low_pfn, blk_max_pfn);
} else } else
q->bounce_gfp = GFP_NOHIGHIO; q->bounce_gfp = GFP_NOHIGHIO;
...@@ -319,7 +323,7 @@ void blk_dump_rq_flags(struct request *rq, char *msg) ...@@ -319,7 +323,7 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
/* /*
* standard prep_rq_fn that builds 10 byte cmds * standard prep_rq_fn that builds 10 byte cmds
*/ */
static int ll_10byte_cmd_build(request_queue_t *q, struct request *rq) int ll_10byte_cmd_build(request_queue_t *q, struct request *rq)
{ {
int hard_sect = get_hardsect_size(rq->rq_dev); int hard_sect = get_hardsect_size(rq->rq_dev);
sector_t block = rq->hard_sector / (hard_sect >> 9); sector_t block = rq->hard_sector / (hard_sect >> 9);
...@@ -477,7 +481,7 @@ int blk_rq_map_sg(request_queue_t *q, struct request *rq, struct scatterlist *sg ...@@ -477,7 +481,7 @@ int blk_rq_map_sg(request_queue_t *q, struct request *rq, struct scatterlist *sg
sg[nsegs - 1].length += nbytes; sg[nsegs - 1].length += nbytes;
} else { } else {
new_segment: new_segment:
sg[nsegs].address = NULL; memset(&sg[nsegs],0,sizeof(struct scatterlist));
sg[nsegs].page = bvec->bv_page; sg[nsegs].page = bvec->bv_page;
sg[nsegs].length = nbytes; sg[nsegs].length = nbytes;
sg[nsegs].offset = bvec->bv_offset; sg[nsegs].offset = bvec->bv_offset;
...@@ -540,11 +544,11 @@ static int ll_back_merge_fn(request_queue_t *q, struct request *req, ...@@ -540,11 +544,11 @@ static int ll_back_merge_fn(request_queue_t *q, struct request *req,
{ {
if (req->nr_sectors + bio_sectors(bio) > q->max_sectors) { if (req->nr_sectors + bio_sectors(bio) > q->max_sectors) {
req->flags |= REQ_NOMERGE; req->flags |= REQ_NOMERGE;
q->last_merge = NULL;
return 0; return 0;
} }
if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(req->biotail), if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(req->biotail), __BVEC_START(bio)))
__BVEC_START(bio)))
return ll_new_mergeable(q, req, bio); return ll_new_mergeable(q, req, bio);
return ll_new_hw_segment(q, req, bio); return ll_new_hw_segment(q, req, bio);
...@@ -555,11 +559,11 @@ static int ll_front_merge_fn(request_queue_t *q, struct request *req, ...@@ -555,11 +559,11 @@ static int ll_front_merge_fn(request_queue_t *q, struct request *req,
{ {
if (req->nr_sectors + bio_sectors(bio) > q->max_sectors) { if (req->nr_sectors + bio_sectors(bio) > q->max_sectors) {
req->flags |= REQ_NOMERGE; req->flags |= REQ_NOMERGE;
q->last_merge = NULL;
return 0; return 0;
} }
if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(req->bio)))
__BVEC_START(req->bio)))
return ll_new_mergeable(q, req, bio); return ll_new_mergeable(q, req, bio);
return ll_new_hw_segment(q, req, bio); return ll_new_hw_segment(q, req, bio);
...@@ -568,7 +572,7 @@ static int ll_front_merge_fn(request_queue_t *q, struct request *req, ...@@ -568,7 +572,7 @@ static int ll_front_merge_fn(request_queue_t *q, struct request *req,
static int ll_merge_requests_fn(request_queue_t *q, struct request *req, static int ll_merge_requests_fn(request_queue_t *q, struct request *req,
struct request *next) struct request *next)
{ {
int total_phys_segments = req->nr_phys_segments + next->nr_phys_segments; int total_phys_segments = req->nr_phys_segments +next->nr_phys_segments;
int total_hw_segments = req->nr_hw_segments + next->nr_hw_segments; int total_hw_segments = req->nr_hw_segments + next->nr_hw_segments;
/* /*
...@@ -599,6 +603,9 @@ static int ll_merge_requests_fn(request_queue_t *q, struct request *req, ...@@ -599,6 +603,9 @@ static int ll_merge_requests_fn(request_queue_t *q, struct request *req,
return 0; return 0;
/* Merge is OK... */ /* Merge is OK... */
if (q->last_merge == &next->queuelist)
q->last_merge = NULL;
req->nr_phys_segments = total_phys_segments; req->nr_phys_segments = total_phys_segments;
req->nr_hw_segments = total_hw_segments; req->nr_hw_segments = total_hw_segments;
return 1; return 1;
...@@ -799,7 +806,7 @@ int blk_init_queue(request_queue_t *q, request_fn_proc *rfn, spinlock_t *lock) ...@@ -799,7 +806,7 @@ int blk_init_queue(request_queue_t *q, request_fn_proc *rfn, spinlock_t *lock)
q->back_merge_fn = ll_back_merge_fn; q->back_merge_fn = ll_back_merge_fn;
q->front_merge_fn = ll_front_merge_fn; q->front_merge_fn = ll_front_merge_fn;
q->merge_requests_fn = ll_merge_requests_fn; q->merge_requests_fn = ll_merge_requests_fn;
q->prep_rq_fn = ll_10byte_cmd_build; q->prep_rq_fn = NULL;
q->plug_tq.sync = 0; q->plug_tq.sync = 0;
q->plug_tq.routine = &generic_unplug_device; q->plug_tq.routine = &generic_unplug_device;
q->plug_tq.data = q; q->plug_tq.data = q;
...@@ -1039,13 +1046,11 @@ static inline void attempt_back_merge(request_queue_t *q, struct request *rq) ...@@ -1039,13 +1046,11 @@ static inline void attempt_back_merge(request_queue_t *q, struct request *rq)
attempt_merge(q, rq); attempt_merge(q, rq);
} }
static inline void attempt_front_merge(request_queue_t *q, static inline void attempt_front_merge(request_queue_t *q, struct request *rq)
struct list_head *head,
struct request *rq)
{ {
struct list_head *prev = rq->queuelist.prev; struct list_head *prev = rq->queuelist.prev;
if (prev != head) if (prev != &q->queue_head)
attempt_merge(q, blkdev_entry_to_request(prev)); attempt_merge(q, blkdev_entry_to_request(prev));
} }
...@@ -1081,7 +1086,7 @@ static int __make_request(request_queue_t *q, struct bio *bio) ...@@ -1081,7 +1086,7 @@ static int __make_request(request_queue_t *q, struct bio *bio)
{ {
struct request *req, *freereq = NULL; struct request *req, *freereq = NULL;
int el_ret, latency = 0, rw, nr_sectors, cur_nr_sectors, barrier; int el_ret, latency = 0, rw, nr_sectors, cur_nr_sectors, barrier;
struct list_head *head, *insert_here; struct list_head *insert_here = &q->queue_head;
elevator_t *elevator = &q->elevator; elevator_t *elevator = &q->elevator;
sector_t sector; sector_t sector;
...@@ -1102,24 +1107,18 @@ static int __make_request(request_queue_t *q, struct bio *bio) ...@@ -1102,24 +1107,18 @@ static int __make_request(request_queue_t *q, struct bio *bio)
latency = elevator_request_latency(elevator, rw); latency = elevator_request_latency(elevator, rw);
barrier = test_bit(BIO_RW_BARRIER, &bio->bi_rw); barrier = test_bit(BIO_RW_BARRIER, &bio->bi_rw);
again:
req = NULL;
head = &q->queue_head;
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
insert_here = head->prev;
if (blk_queue_empty(q) || barrier) { if (blk_queue_empty(q) || barrier) {
blk_plug_device(q); blk_plug_device(q);
goto get_rq; goto get_rq;
} else if ((req = __elv_next_request(q))) { }
if (req->flags & REQ_STARTED)
head = head->next;
again:
req = NULL; req = NULL;
} insert_here = q->queue_head.prev;
el_ret = elevator->elevator_merge_fn(q, &req, head, bio); el_ret = elevator->elevator_merge_fn(q, &req, bio);
switch (el_ret) { switch (el_ret) {
case ELEVATOR_BACK_MERGE: case ELEVATOR_BACK_MERGE:
BUG_ON(req->flags & REQ_STARTED); BUG_ON(req->flags & REQ_STARTED);
...@@ -1157,7 +1156,7 @@ static int __make_request(request_queue_t *q, struct bio *bio) ...@@ -1157,7 +1156,7 @@ static int __make_request(request_queue_t *q, struct bio *bio)
req->sector = req->hard_sector = sector; req->sector = req->hard_sector = sector;
req->nr_sectors = req->hard_nr_sectors += nr_sectors; req->nr_sectors = req->hard_nr_sectors += nr_sectors;
drive_stat_acct(req, nr_sectors, 0); drive_stat_acct(req, nr_sectors, 0);
attempt_front_merge(q, head, req); attempt_front_merge(q, req);
goto out; goto out;
/* /*
...@@ -1188,7 +1187,6 @@ static int __make_request(request_queue_t *q, struct bio *bio) ...@@ -1188,7 +1187,6 @@ static int __make_request(request_queue_t *q, struct bio *bio)
req = freereq; req = freereq;
freereq = NULL; freereq = NULL;
} else if ((req = get_request(q, rw)) == NULL) { } else if ((req = get_request(q, rw)) == NULL) {
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
/* /*
...@@ -1200,6 +1198,7 @@ static int __make_request(request_queue_t *q, struct bio *bio) ...@@ -1200,6 +1198,7 @@ static int __make_request(request_queue_t *q, struct bio *bio)
} }
freereq = get_request_wait(q, rw); freereq = get_request_wait(q, rw);
spin_lock_irq(q->queue_lock);
goto again; goto again;
} }
...@@ -1755,3 +1754,6 @@ EXPORT_SYMBOL(submit_bio); ...@@ -1755,3 +1754,6 @@ EXPORT_SYMBOL(submit_bio);
EXPORT_SYMBOL(blk_queue_assign_lock); EXPORT_SYMBOL(blk_queue_assign_lock);
EXPORT_SYMBOL(blk_phys_contig_segment); EXPORT_SYMBOL(blk_phys_contig_segment);
EXPORT_SYMBOL(blk_hw_contig_segment); EXPORT_SYMBOL(blk_hw_contig_segment);
EXPORT_SYMBOL(ll_10byte_cmd_build);
EXPORT_SYMBOL(blk_queue_prep_rq);
...@@ -1007,6 +1007,7 @@ int __init loop_init(void) ...@@ -1007,6 +1007,7 @@ int __init loop_init(void)
goto out_mem; goto out_mem;
blk_queue_make_request(BLK_DEFAULT_QUEUE(MAJOR_NR), loop_make_request); blk_queue_make_request(BLK_DEFAULT_QUEUE(MAJOR_NR), loop_make_request);
blk_queue_bounce_limit(BLK_DEFAULT_QUEUE(MAJOR_NR), BLK_BOUNCE_HIGH);
for (i = 0; i < max_loop; i++) { for (i = 0; i < max_loop; i++) {
struct loop_device *lo = &loop_dev[i]; struct loop_device *lo = &loop_dev[i];
......
...@@ -43,26 +43,12 @@ ...@@ -43,26 +43,12 @@
*/ */
#include <linux/config.h> #include <linux/config.h>
#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/hdreg.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/ioctl.h>
#include <linux/fd.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/devfs_fs_kernel.h> #include <linux/devfs_fs_kernel.h>
#include <linux/smp_lock.h>
#include <asm/system.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/byteorder.h>
extern void wait_for_keypress(void);
/* /*
* 35 has been officially registered as the RAMDISK major number, but * 35 has been officially registered as the RAMDISK major number, but
...@@ -79,6 +65,8 @@ extern void wait_for_keypress(void); ...@@ -79,6 +65,8 @@ extern void wait_for_keypress(void);
#ifdef CONFIG_BLK_DEV_INITRD #ifdef CONFIG_BLK_DEV_INITRD
static int initrd_users; static int initrd_users;
static spinlock_t initrd_users_lock = SPIN_LOCK_UNLOCKED; static spinlock_t initrd_users_lock = SPIN_LOCK_UNLOCKED;
unsigned long initrd_start, initrd_end;
int initrd_below_start_ok;
#endif #endif
/* Various static variables go here. Most are used only in the RAM disk code. /* Various static variables go here. Most are used only in the RAM disk code.
...@@ -111,70 +99,6 @@ int rd_size = CONFIG_BLK_DEV_RAM_SIZE; /* Size of the RAM disks */ ...@@ -111,70 +99,6 @@ int rd_size = CONFIG_BLK_DEV_RAM_SIZE; /* Size of the RAM disks */
*/ */
int rd_blocksize = BLOCK_SIZE; /* blocksize of the RAM disks */ int rd_blocksize = BLOCK_SIZE; /* blocksize of the RAM disks */
#ifndef MODULE
int rd_doload; /* 1 = load RAM disk, 0 = don't load */
int rd_prompt = 1; /* 1 = prompt for RAM disk, 0 = don't prompt */
int rd_image_start; /* starting block # of image */
#ifdef CONFIG_BLK_DEV_INITRD
unsigned long initrd_start, initrd_end;
int mount_initrd = 1; /* zero if initrd should not be mounted */
int initrd_below_start_ok;
static int __init no_initrd(char *str)
{
mount_initrd = 0;
return 1;
}
__setup("noinitrd", no_initrd);
#endif
static int __init ramdisk_start_setup(char *str)
{
rd_image_start = simple_strtol(str,NULL,0);
return 1;
}
static int __init load_ramdisk(char *str)
{
rd_doload = simple_strtol(str,NULL,0) & 3;
return 1;
}
static int __init prompt_ramdisk(char *str)
{
rd_prompt = simple_strtol(str,NULL,0) & 1;
return 1;
}
static int __init ramdisk_size(char *str)
{
rd_size = simple_strtol(str,NULL,0);
return 1;
}
static int __init ramdisk_size2(char *str)
{
return ramdisk_size(str);
}
static int __init ramdisk_blocksize(char *str)
{
rd_blocksize = simple_strtol(str,NULL,0);
return 1;
}
__setup("ramdisk_start=", ramdisk_start_setup);
__setup("load_ramdisk=", load_ramdisk);
__setup("prompt_ramdisk=", prompt_ramdisk);
__setup("ramdisk=", ramdisk_size);
__setup("ramdisk_size=", ramdisk_size2);
__setup("ramdisk_blocksize=", ramdisk_blocksize);
#endif
/* /*
* Copyright (C) 2000 Linus Torvalds. * Copyright (C) 2000 Linus Torvalds.
* 2000 Transmeta Corp. * 2000 Transmeta Corp.
...@@ -492,7 +416,7 @@ static void __exit rd_cleanup (void) ...@@ -492,7 +416,7 @@ static void __exit rd_cleanup (void)
} }
/* This is the registration and initialization section of the RAM disk driver */ /* This is the registration and initialization section of the RAM disk driver */
int __init rd_init (void) static int __init rd_init (void)
{ {
int i; int i;
...@@ -548,7 +472,28 @@ int __init rd_init (void) ...@@ -548,7 +472,28 @@ int __init rd_init (void)
module_init(rd_init); module_init(rd_init);
module_exit(rd_cleanup); module_exit(rd_cleanup);
/* loadable module support */ /* options - nonmodular */
#ifndef MODULE
static int __init ramdisk_size(char *str)
{
rd_size = simple_strtol(str,NULL,0);
return 1;
}
static int __init ramdisk_size2(char *str) /* kludge */
{
return ramdisk_size(str);
}
static int __init ramdisk_blocksize(char *str)
{
rd_blocksize = simple_strtol(str,NULL,0);
return 1;
}
__setup("ramdisk=", ramdisk_size);
__setup("ramdisk_size=", ramdisk_size2);
__setup("ramdisk_blocksize=", ramdisk_blocksize);
#endif
/* options - modular */
MODULE_PARM (rd_size, "1i"); MODULE_PARM (rd_size, "1i");
MODULE_PARM_DESC(rd_size, "Size of each RAM disk in kbytes."); MODULE_PARM_DESC(rd_size, "Size of each RAM disk in kbytes.");
MODULE_PARM (rd_blocksize, "i"); MODULE_PARM (rd_blocksize, "i");
......
...@@ -459,6 +459,8 @@ void __init ide_init_amd74xx (ide_hwif_t *hwif) ...@@ -459,6 +459,8 @@ void __init ide_init_amd74xx (ide_hwif_t *hwif)
hwif->tuneproc = &amd74xx_tune_drive; hwif->tuneproc = &amd74xx_tune_drive;
hwif->speedproc = &amd74xx_tune_chipset; hwif->speedproc = &amd74xx_tune_chipset;
hwif->highmem = 1;
#ifndef CONFIG_BLK_DEV_IDEDMA #ifndef CONFIG_BLK_DEV_IDEDMA
hwif->drives[0].autotune = 1; hwif->drives[0].autotune = 1;
hwif->drives[1].autotune = 1; hwif->drives[1].autotune = 1;
......
...@@ -594,7 +594,7 @@ static int cdrom_decode_status (ide_startstop_t *startstop, ide_drive_t *drive, ...@@ -594,7 +594,7 @@ static int cdrom_decode_status (ide_startstop_t *startstop, ide_drive_t *drive,
cdrom_end_request (1, drive); cdrom_end_request (1, drive);
*startstop = ide_error (drive, "request sense failure", stat); *startstop = ide_error (drive, "request sense failure", stat);
return 1; return 1;
} else if (rq->flags & REQ_PC) { } else if (rq->flags & (REQ_PC | REQ_BLOCK_PC)) {
/* All other functions, except for READ. */ /* All other functions, except for READ. */
struct completion *wait = NULL; struct completion *wait = NULL;
pc = (struct packet_command *) rq->special; pc = (struct packet_command *) rq->special;
...@@ -2675,6 +2675,8 @@ int ide_cdrom_setup (ide_drive_t *drive) ...@@ -2675,6 +2675,8 @@ int ide_cdrom_setup (ide_drive_t *drive)
set_blocksize(MKDEV(HWIF(drive)->major, minor), CD_FRAMESIZE); set_blocksize(MKDEV(HWIF(drive)->major, minor), CD_FRAMESIZE);
blk_queue_hardsect_size(&drive->queue, CD_FRAMESIZE); blk_queue_hardsect_size(&drive->queue, CD_FRAMESIZE);
blk_queue_prep_rq(&drive->queue, ll_10byte_cmd_build);
drive->special.all = 0; drive->special.all = 0;
drive->ready_stat = 0; drive->ready_stat = 0;
......
...@@ -336,23 +336,7 @@ typedef struct { ...@@ -336,23 +336,7 @@ typedef struct {
#define IDEFLOPPY_IOCTL_FORMAT_START 0x4602 #define IDEFLOPPY_IOCTL_FORMAT_START 0x4602
#define IDEFLOPPY_IOCTL_FORMAT_GET_PROGRESS 0x4603 #define IDEFLOPPY_IOCTL_FORMAT_GET_PROGRESS 0x4603
/* #define IDEFLOPPY_RQ (REQ_SPECIAL)
* Special requests for our block device strategy routine.
*/
#define IDEFLOPPY_FIRST_RQ 90
/*
* IDEFLOPPY_PC_RQ is used to queue a packet command in the request queue.
*/
#define IDEFLOPPY_PC_RQ 90
#define IDEFLOPPY_LAST_RQ 90
/*
* A macro which can be used to check if a given request command
* originated in the driver or in the buffer cache layer.
*/
#define IDEFLOPPY_RQ_CMD(cmd) ((cmd >= IDEFLOPPY_FIRST_RQ) && (cmd <= IDEFLOPPY_LAST_RQ))
/* /*
* Error codes which are returned in rq->errors to the higher part * Error codes which are returned in rq->errors to the higher part
...@@ -696,7 +680,7 @@ static void idefloppy_end_request (byte uptodate, ide_hwgroup_t *hwgroup) ...@@ -696,7 +680,7 @@ static void idefloppy_end_request (byte uptodate, ide_hwgroup_t *hwgroup)
/* Why does this happen? */ /* Why does this happen? */
if (!rq) if (!rq)
return; return;
if (!IDEFLOPPY_RQ_CMD (rq->cmd)) { if (rq->flags & IDEFLOPPY_RQ) {
ide_end_request (uptodate, hwgroup); ide_end_request (uptodate, hwgroup);
return; return;
} }
...@@ -776,7 +760,7 @@ static void idefloppy_queue_pc_head (ide_drive_t *drive,idefloppy_pc_t *pc,struc ...@@ -776,7 +760,7 @@ static void idefloppy_queue_pc_head (ide_drive_t *drive,idefloppy_pc_t *pc,struc
{ {
ide_init_drive_cmd (rq); ide_init_drive_cmd (rq);
rq->buffer = (char *) pc; rq->buffer = (char *) pc;
rq->cmd = IDEFLOPPY_PC_RQ; rq->flags = IDEFLOPPY_RQ;
(void) ide_do_drive_cmd (drive, rq, ide_preempt); (void) ide_do_drive_cmd (drive, rq, ide_preempt);
} }
...@@ -1192,6 +1176,7 @@ static void idefloppy_create_rw_cmd (idefloppy_floppy_t *floppy, idefloppy_pc_t ...@@ -1192,6 +1176,7 @@ static void idefloppy_create_rw_cmd (idefloppy_floppy_t *floppy, idefloppy_pc_t
{ {
int block = sector / floppy->bs_factor; int block = sector / floppy->bs_factor;
int blocks = rq->nr_sectors / floppy->bs_factor; int blocks = rq->nr_sectors / floppy->bs_factor;
int cmd = rq_data_dir(rq);
#if IDEFLOPPY_DEBUG_LOG #if IDEFLOPPY_DEBUG_LOG
printk ("create_rw1%d_cmd: block == %d, blocks == %d\n", printk ("create_rw1%d_cmd: block == %d, blocks == %d\n",
...@@ -1200,18 +1185,18 @@ static void idefloppy_create_rw_cmd (idefloppy_floppy_t *floppy, idefloppy_pc_t ...@@ -1200,18 +1185,18 @@ static void idefloppy_create_rw_cmd (idefloppy_floppy_t *floppy, idefloppy_pc_t
idefloppy_init_pc (pc); idefloppy_init_pc (pc);
if (test_bit (IDEFLOPPY_USE_READ12, &floppy->flags)) { if (test_bit (IDEFLOPPY_USE_READ12, &floppy->flags)) {
pc->c[0] = rq->cmd == READ ? IDEFLOPPY_READ12_CMD : IDEFLOPPY_WRITE12_CMD; pc->c[0] = cmd == READ ? IDEFLOPPY_READ12_CMD : IDEFLOPPY_WRITE12_CMD;
put_unaligned (htonl (blocks), (unsigned int *) &pc->c[6]); put_unaligned (htonl (blocks), (unsigned int *) &pc->c[6]);
} else { } else {
pc->c[0] = rq->cmd == READ ? IDEFLOPPY_READ10_CMD : IDEFLOPPY_WRITE10_CMD; pc->c[0] = cmd == READ ? IDEFLOPPY_READ10_CMD : IDEFLOPPY_WRITE10_CMD;
put_unaligned (htons (blocks), (unsigned short *) &pc->c[7]); put_unaligned (htons (blocks), (unsigned short *) &pc->c[7]);
} }
put_unaligned (htonl (block), (unsigned int *) &pc->c[2]); put_unaligned (htonl (block), (unsigned int *) &pc->c[2]);
pc->callback = &idefloppy_rw_callback; pc->callback = &idefloppy_rw_callback;
pc->rq = rq; pc->rq = rq;
pc->b_data = rq->buffer; pc->b_data = rq->buffer;
pc->b_count = rq->cmd == READ ? 0 : rq->bio->bi_size; pc->b_count = cmd == READ ? 0 : rq->bio->bi_size;
if (rq->cmd == WRITE) if (rq->flags & REQ_RW)
set_bit (PC_WRITING, &pc->flags); set_bit (PC_WRITING, &pc->flags);
pc->buffer = NULL; pc->buffer = NULL;
pc->request_transfer = pc->buffer_size = blocks * floppy->block_size; pc->request_transfer = pc->buffer_size = blocks * floppy->block_size;
...@@ -1227,8 +1212,8 @@ static ide_startstop_t idefloppy_do_request (ide_drive_t *drive, struct request ...@@ -1227,8 +1212,8 @@ static ide_startstop_t idefloppy_do_request (ide_drive_t *drive, struct request
idefloppy_pc_t *pc; idefloppy_pc_t *pc;
#if IDEFLOPPY_DEBUG_LOG #if IDEFLOPPY_DEBUG_LOG
printk (KERN_INFO "rq_status: %d, rq_dev: %u, cmd: %d, errors: %d\n",rq->rq_status,(unsigned int) rq->rq_dev,rq->cmd,rq->errors); printk (KERN_INFO "rq_status: %d, rq_dev: %u, flags: %lx, errors: %d\n",rq->rq_status,(unsigned int) rq->rq_dev,rq->flags,rq->errors);
printk (KERN_INFO "sector: %ld, nr_sectors: %ld, current_nr_sectors: %ld\n",rq->sector,rq->nr_sectors,rq->current_nr_sectors); printk (KERN_INFO "sector: %ld, nr_sectors: %ld, current_nr_sectors: %d\n",rq->sector,rq->nr_sectors,rq->current_nr_sectors);
#endif /* IDEFLOPPY_DEBUG_LOG */ #endif /* IDEFLOPPY_DEBUG_LOG */
if (rq->errors >= ERROR_MAX) { if (rq->errors >= ERROR_MAX) {
...@@ -1240,9 +1225,7 @@ static ide_startstop_t idefloppy_do_request (ide_drive_t *drive, struct request ...@@ -1240,9 +1225,7 @@ static ide_startstop_t idefloppy_do_request (ide_drive_t *drive, struct request
idefloppy_end_request (0, HWGROUP(drive)); idefloppy_end_request (0, HWGROUP(drive));
return ide_stopped; return ide_stopped;
} }
switch (rq->cmd) { if (rq->flags & REQ_CMD) {
case READ:
case WRITE:
if (rq->sector % floppy->bs_factor || rq->nr_sectors % floppy->bs_factor) { if (rq->sector % floppy->bs_factor || rq->nr_sectors % floppy->bs_factor) {
printk ("%s: unsupported r/w request size\n", drive->name); printk ("%s: unsupported r/w request size\n", drive->name);
idefloppy_end_request (0, HWGROUP(drive)); idefloppy_end_request (0, HWGROUP(drive));
...@@ -1250,12 +1233,10 @@ static ide_startstop_t idefloppy_do_request (ide_drive_t *drive, struct request ...@@ -1250,12 +1233,10 @@ static ide_startstop_t idefloppy_do_request (ide_drive_t *drive, struct request
} }
pc = idefloppy_next_pc_storage (drive); pc = idefloppy_next_pc_storage (drive);
idefloppy_create_rw_cmd (floppy, pc, rq, block); idefloppy_create_rw_cmd (floppy, pc, rq, block);
break; } else if (rq->flags & IDEFLOPPY_RQ) {
case IDEFLOPPY_PC_RQ:
pc = (idefloppy_pc_t *) rq->buffer; pc = (idefloppy_pc_t *) rq->buffer;
break; } else {
default: blk_dump_rq_flags(rq, "ide-floppy: unsupported command in queue");
printk (KERN_ERR "ide-floppy: unsupported command %x in request queue\n", rq->cmd);
idefloppy_end_request (0,HWGROUP (drive)); idefloppy_end_request (0,HWGROUP (drive));
return ide_stopped; return ide_stopped;
} }
...@@ -1273,7 +1254,7 @@ static int idefloppy_queue_pc_tail (ide_drive_t *drive,idefloppy_pc_t *pc) ...@@ -1273,7 +1254,7 @@ static int idefloppy_queue_pc_tail (ide_drive_t *drive,idefloppy_pc_t *pc)
ide_init_drive_cmd (&rq); ide_init_drive_cmd (&rq);
rq.buffer = (char *) pc; rq.buffer = (char *) pc;
rq.cmd = IDEFLOPPY_PC_RQ; rq.flags = IDEFLOPPY_RQ;
return ide_do_drive_cmd (drive, &rq, ide_wait); return ide_do_drive_cmd (drive, &rq, ide_wait);
} }
......
...@@ -66,7 +66,7 @@ static mdk_personality_t *pers[MAX_PERSONALITY]; ...@@ -66,7 +66,7 @@ static mdk_personality_t *pers[MAX_PERSONALITY];
/* /*
* Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit' * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
* is 100 KB/sec, so the extra system load does not show up that much. * is 1000 KB/sec, so the extra system load does not show up that much.
* Increase it if you want to have more _guaranteed_ speed. Note that * Increase it if you want to have more _guaranteed_ speed. Note that
* the RAID driver will use the maximum available bandwith if the IO * the RAID driver will use the maximum available bandwith if the IO
* subsystem is idle. There is also an 'absolute maximum' reconstruction * subsystem is idle. There is also an 'absolute maximum' reconstruction
...@@ -76,8 +76,8 @@ static mdk_personality_t *pers[MAX_PERSONALITY]; ...@@ -76,8 +76,8 @@ static mdk_personality_t *pers[MAX_PERSONALITY];
* you can change it via /proc/sys/dev/raid/speed_limit_min and _max. * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
*/ */
static int sysctl_speed_limit_min = 100; static int sysctl_speed_limit_min = 1000;
static int sysctl_speed_limit_max = 100000; static int sysctl_speed_limit_max = 200000;
static struct ctl_table_header *raid_table_header; static struct ctl_table_header *raid_table_header;
...@@ -3336,7 +3336,7 @@ void md_done_sync(mddev_t *mddev, int blocks, int ok) ...@@ -3336,7 +3336,7 @@ void md_done_sync(mddev_t *mddev, int blocks, int ok)
int md_do_sync(mddev_t *mddev, mdp_disk_t *spare) int md_do_sync(mddev_t *mddev, mdp_disk_t *spare)
{ {
mddev_t *mddev2; mddev_t *mddev2;
unsigned int max_sectors, currspeed, unsigned int max_sectors, currspeed = 0,
j, window, err, serialize; j, window, err, serialize;
unsigned long mark[SYNC_MARKS]; unsigned long mark[SYNC_MARKS];
unsigned long mark_cnt[SYNC_MARKS]; unsigned long mark_cnt[SYNC_MARKS];
...@@ -3376,8 +3376,7 @@ int md_do_sync(mddev_t *mddev, mdp_disk_t *spare) ...@@ -3376,8 +3376,7 @@ int md_do_sync(mddev_t *mddev, mdp_disk_t *spare)
max_sectors = mddev->sb->size << 1; max_sectors = mddev->sb->size << 1;
printk(KERN_INFO "md: syncing RAID array md%d\n", mdidx(mddev)); printk(KERN_INFO "md: syncing RAID array md%d\n", mdidx(mddev));
printk(KERN_INFO "md: minimum _guaranteed_ reconstruction speed: %d KB/sec/disc.\n", printk(KERN_INFO "md: minimum _guaranteed_ reconstruction speed: %d KB/sec/disc.\n", sysctl_speed_limit_min);
sysctl_speed_limit_min);
printk(KERN_INFO "md: using maximum available idle IO bandwith " printk(KERN_INFO "md: using maximum available idle IO bandwith "
"(but not more than %d KB/sec) for reconstruction.\n", "(but not more than %d KB/sec) for reconstruction.\n",
sysctl_speed_limit_max); sysctl_speed_limit_max);
...@@ -3409,7 +3408,7 @@ int md_do_sync(mddev_t *mddev, mdp_disk_t *spare) ...@@ -3409,7 +3408,7 @@ int md_do_sync(mddev_t *mddev, mdp_disk_t *spare)
for (j = 0; j < max_sectors;) { for (j = 0; j < max_sectors;) {
int sectors; int sectors;
sectors = mddev->pers->sync_request(mddev, j); sectors = mddev->pers->sync_request(mddev, j, currspeed < sysctl_speed_limit_min);
if (sectors < 0) { if (sectors < 0) {
err = sectors; err = sectors;
goto out; goto out;
......
This diff is collapsed.
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
* Author: Fabrice Bellard (fabrice.bellard@netgem.com) * Author: Fabrice Bellard (fabrice.bellard@netgem.com)
* Copyright (C) 2000 Netgem S.A. * Copyright (C) 2000 Netgem S.A.
* *
* $Id: nftlmount.c,v 1.23 2001/09/19 21:42:32 dwmw2 Exp $ * $Id: nftlmount.c,v 1.25 2001/11/30 16:46:27 dwmw2 Exp $
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by * it under the terms of the GNU General Public License as published by
...@@ -39,7 +39,7 @@ ...@@ -39,7 +39,7 @@
#define SECTORSIZE 512 #define SECTORSIZE 512
char nftlmountrev[]="$Revision: 1.23 $"; char nftlmountrev[]="$Revision: 1.25 $";
/* find_boot_record: Find the NFTL Media Header and its Spare copy which contains the /* find_boot_record: Find the NFTL Media Header and its Spare copy which contains the
* various device information of the NFTL partition and Bad Unit Table. Update * various device information of the NFTL partition and Bad Unit Table. Update
...@@ -94,11 +94,11 @@ static int find_boot_record(struct NFTLrecord *nftl) ...@@ -94,11 +94,11 @@ static int find_boot_record(struct NFTLrecord *nftl)
continue; continue;
} }
#if 1 /* Some people seem to have devices without ECC or erase marks #if 0 /* Some people seem to have devices without ECC or erase marks
on the Media Header blocks. There are enough other sanity on the Media Header blocks. There are enough other sanity
checks in here that we can probably do without it. checks in here that we can probably do without it.
*/ */
if (le16_to_cpu ((h1.EraseMark | h1.EraseMark1) != ERASE_MARK)) { if (le16_to_cpu(h1.EraseMark | h1.EraseMark1) != ERASE_MARK) {
printk(KERN_NOTICE "ANAND header found at 0x%x in mtd%d, but erase mark not present (0x%04x,0x%04x instead)\n", printk(KERN_NOTICE "ANAND header found at 0x%x in mtd%d, but erase mark not present (0x%04x,0x%04x instead)\n",
block * nftl->EraseSize, nftl->mtd->index, block * nftl->EraseSize, nftl->mtd->index,
le16_to_cpu(h1.EraseMark), le16_to_cpu(h1.EraseMark1)); le16_to_cpu(h1.EraseMark), le16_to_cpu(h1.EraseMark1));
......
...@@ -467,10 +467,11 @@ Scsi_Cmnd *icmd; ...@@ -467,10 +467,11 @@ Scsi_Cmnd *icmd;
static void do_ql_ihandl(int irq, void *dev_id, struct pt_regs * regs) static void do_ql_ihandl(int irq, void *dev_id, struct pt_regs * regs)
{ {
unsigned long flags; unsigned long flags;
struct Scsi_Host *host = dev_id;
spin_lock_irqsave(&io_request_lock, flags); spin_lock_irqsave(&host->host_lock, flags);
ql_ihandl(irq, dev_id, regs); ql_ihandl(irq, dev_id, regs);
spin_unlock_irqrestore(&io_request_lock, flags); spin_unlock_irqrestore(&host->host_lock, flags);
} }
#endif #endif
......
...@@ -548,10 +548,8 @@ extern void print_status (int status); ...@@ -548,10 +548,8 @@ extern void print_status (int status);
*/ */
struct scsi_device { struct scsi_device {
/* private: */
/* /*
* This information is private to the scsi mid-layer. Wrapping it in a * This information is private to the scsi mid-layer.
* struct private is a way of marking it in a sort of C++ type of way.
*/ */
struct scsi_device *next; /* Used for linked list */ struct scsi_device *next; /* Used for linked list */
struct scsi_device *prev; /* Used for linked list */ struct scsi_device *prev; /* Used for linked list */
...@@ -563,7 +561,6 @@ struct scsi_device { ...@@ -563,7 +561,6 @@ struct scsi_device {
volatile unsigned short device_busy; /* commands actually active on low-level */ volatile unsigned short device_busy; /* commands actually active on low-level */
Scsi_Cmnd *device_queue; /* queue of SCSI Command structures */ Scsi_Cmnd *device_queue; /* queue of SCSI Command structures */
/* public: */
unsigned int id, lun, channel; unsigned int id, lun, channel;
unsigned int manufacturer; /* Manufacturer of device, for using unsigned int manufacturer; /* Manufacturer of device, for using
...@@ -681,11 +678,7 @@ struct scsi_request { ...@@ -681,11 +678,7 @@ struct scsi_request {
*/ */
struct scsi_cmnd { struct scsi_cmnd {
int sc_magic; int sc_magic;
/* private: */
/*
* This information is private to the scsi mid-layer. Wrapping it in a
* struct private is a way of marking it in a sort of C++ type of way.
*/
struct Scsi_Host *host; struct Scsi_Host *host;
unsigned short state; unsigned short state;
unsigned short owner; unsigned short owner;
...@@ -727,8 +720,6 @@ struct scsi_cmnd { ...@@ -727,8 +720,6 @@ struct scsi_cmnd {
struct scsi_cmnd *bh_next; /* To enumerate the commands waiting struct scsi_cmnd *bh_next; /* To enumerate the commands waiting
to be processed. */ to be processed. */
/* public: */
unsigned int target; unsigned int target;
unsigned int lun; unsigned int lun;
unsigned int channel; unsigned int channel;
......
...@@ -59,12 +59,10 @@ ...@@ -59,12 +59,10 @@
*/ */
int scsi_init_io(Scsi_Cmnd *SCpnt) int scsi_init_io(Scsi_Cmnd *SCpnt)
{ {
struct request *req; struct request *req = &SCpnt->request;
struct scatterlist *sgpnt; struct scatterlist *sgpnt;
int count, gfp_mask; int count, gfp_mask;
req = &SCpnt->request;
/* /*
* First we need to know how many scatter gather segments are needed. * First we need to know how many scatter gather segments are needed.
*/ */
...@@ -85,14 +83,13 @@ int scsi_init_io(Scsi_Cmnd *SCpnt) ...@@ -85,14 +83,13 @@ int scsi_init_io(Scsi_Cmnd *SCpnt)
BUG_ON(!sgpnt); BUG_ON(!sgpnt);
SCpnt->request_buffer = (char *) sgpnt; SCpnt->request_buffer = (char *) sgpnt;
SCpnt->request_bufflen = 0; SCpnt->request_bufflen = req->nr_sectors << 9;
req->buffer = NULL; req->buffer = NULL;
/* /*
* Next, walk the list, and fill in the addresses and sizes of * Next, walk the list, and fill in the addresses and sizes of
* each segment. * each segment.
*/ */
SCpnt->request_bufflen = req->nr_sectors << 9;
count = blk_rq_map_sg(req->q, req, SCpnt->request_buffer); count = blk_rq_map_sg(req->q, req, SCpnt->request_buffer);
/* /*
...@@ -142,8 +139,7 @@ void scsi_initialize_merge_fn(Scsi_Device * SDpnt) ...@@ -142,8 +139,7 @@ void scsi_initialize_merge_fn(Scsi_Device * SDpnt)
bounce_limit = BLK_BOUNCE_ANY; bounce_limit = BLK_BOUNCE_ANY;
else else
bounce_limit = SHpnt->pci_dev->dma_mask; bounce_limit = SHpnt->pci_dev->dma_mask;
} } else if (SHpnt->unchecked_isa_dma)
if (SHpnt->unchecked_isa_dma)
bounce_limit = BLK_BOUNCE_ISA; bounce_limit = BLK_BOUNCE_ISA;
blk_queue_bounce_limit(q, bounce_limit); blk_queue_bounce_limit(q, bounce_limit);
......
...@@ -287,9 +287,6 @@ static int sr_init_command(Scsi_Cmnd * SCpnt) ...@@ -287,9 +287,6 @@ static int sr_init_command(Scsi_Cmnd * SCpnt)
return 0; return 0;
} }
if (rq_data_dir(&SCpnt->request) == WRITE && !scsi_CDs[dev].device->writeable)
return 0;
/* /*
* we do lazy blocksize switching (when reading XA sectors, * we do lazy blocksize switching (when reading XA sectors,
* see CDROMREADMODE2 ioctl) * see CDROMREADMODE2 ioctl)
......
...@@ -142,6 +142,7 @@ struct bio *bio_alloc(int gfp_mask, int nr_iovecs) ...@@ -142,6 +142,7 @@ struct bio *bio_alloc(int gfp_mask, int nr_iovecs)
bio->bi_io_vec = bvl; bio->bi_io_vec = bvl;
return bio; return bio;
} }
mempool_free(bio, bio_pool); mempool_free(bio, bio_pool);
return NULL; return NULL;
} }
...@@ -311,28 +312,6 @@ struct bio *bio_copy(struct bio *bio, int gfp_mask, int copy) ...@@ -311,28 +312,6 @@ struct bio *bio_copy(struct bio *bio, int gfp_mask, int copy)
return NULL; return NULL;
} }
#ifdef BIO_PAGEIO
static int bio_end_io_page(struct bio *bio)
{
struct page *page = bio_page(bio);
if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
SetPageError(page);
if (!PageError(page))
SetPageUptodate(page);
/*
* Run the hooks that have to be done when a page I/O has completed.
*/
if (PageTestandClearDecrAfter(page))
atomic_dec(&nr_async_pages);
UnlockPage(page);
bio_put(bio);
return 1;
}
#endif
static int bio_end_io_kio(struct bio *bio, int nr_sectors) static int bio_end_io_kio(struct bio *bio, int nr_sectors)
{ {
struct kiobuf *kio = (struct kiobuf *) bio->bi_private; struct kiobuf *kio = (struct kiobuf *) bio->bi_private;
......
...@@ -410,6 +410,8 @@ struct super_block *hpfs_read_super(struct super_block *s, void *options, ...@@ -410,6 +410,8 @@ struct super_block *hpfs_read_super(struct super_block *s, void *options,
/*s->s_hpfs_mounting = 1;*/ /*s->s_hpfs_mounting = 1;*/
dev = s->s_dev; dev = s->s_dev;
set_blocksize(dev, 512); set_blocksize(dev, 512);
s->s_blocksize = 512;
s->s_blocksize_bits = 9;
s->s_hpfs_fs_size = -1; s->s_hpfs_fs_size = -1;
if (!(bootblock = hpfs_map_sector(s, 0, &bh0, 0))) goto bail1; if (!(bootblock = hpfs_map_sector(s, 0, &bh0, 0))) goto bail1;
if (!(superblock = hpfs_map_sector(s, 16, &bh1, 1))) goto bail2; if (!(superblock = hpfs_map_sector(s, 16, &bh1, 1))) goto bail2;
...@@ -436,8 +438,6 @@ struct super_block *hpfs_read_super(struct super_block *s, void *options, ...@@ -436,8 +438,6 @@ struct super_block *hpfs_read_super(struct super_block *s, void *options,
/* Fill superblock stuff */ /* Fill superblock stuff */
s->s_magic = HPFS_SUPER_MAGIC; s->s_magic = HPFS_SUPER_MAGIC;
s->s_blocksize = 512;
s->s_blocksize_bits = 9;
s->s_op = &hpfs_sops; s->s_op = &hpfs_sops;
s->s_hpfs_root = superblock->root; s->s_hpfs_root = superblock->root;
......
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
#include <asm/uaccess.h> #include <asm/uaccess.h>
struct vfsmount *do_kern_mount(char *type, int flags, char *name, void *data); struct vfsmount *do_kern_mount(const char *type, int flags, char *name, void *data);
int do_remount_sb(struct super_block *sb, int flags, void * data); int do_remount_sb(struct super_block *sb, int flags, void * data);
void kill_super(struct super_block *sb); void kill_super(struct super_block *sb);
...@@ -622,9 +622,18 @@ static int do_move_mount(struct nameidata *nd, char *old_name) ...@@ -622,9 +622,18 @@ static int do_move_mount(struct nameidata *nd, char *old_name)
static int do_add_mount(struct nameidata *nd, char *type, int flags, static int do_add_mount(struct nameidata *nd, char *type, int flags,
int mnt_flags, char *name, void *data) int mnt_flags, char *name, void *data)
{ {
struct vfsmount *mnt = do_kern_mount(type, flags, name, data); struct vfsmount *mnt;
int err = PTR_ERR(mnt); int err;
if (!type || !memchr(type, 0, PAGE_SIZE))
return -EINVAL;
/* we need capabilities... */
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
mnt = do_kern_mount(type, flags, name, data);
err = PTR_ERR(mnt);
if (IS_ERR(mnt)) if (IS_ERR(mnt))
goto out; goto out;
......
...@@ -161,15 +161,10 @@ static int nfs_prepare_write(struct file *file, struct page *page, unsigned offs ...@@ -161,15 +161,10 @@ static int nfs_prepare_write(struct file *file, struct page *page, unsigned offs
static int nfs_commit_write(struct file *file, struct page *page, unsigned offset, unsigned to) static int nfs_commit_write(struct file *file, struct page *page, unsigned offset, unsigned to)
{ {
long status; long status;
loff_t pos = ((loff_t)page->index<<PAGE_CACHE_SHIFT) + to;
struct inode *inode = page->mapping->host;
lock_kernel(); lock_kernel();
status = nfs_updatepage(file, page, offset, to-offset); status = nfs_updatepage(file, page, offset, to-offset);
unlock_kernel(); unlock_kernel();
/* most likely it's already done. CHECKME */
if (pos > inode->i_size)
inode->i_size = pos;
return status; return status;
} }
......
...@@ -107,17 +107,10 @@ nfs_read_inode(struct inode * inode) ...@@ -107,17 +107,10 @@ nfs_read_inode(struct inode * inode)
inode->i_rdev = 0; inode->i_rdev = 0;
/* We can't support UPDATE_ATIME(), since the server will reset it */ /* We can't support UPDATE_ATIME(), since the server will reset it */
inode->i_flags |= S_NOATIME; inode->i_flags |= S_NOATIME;
NFS_FILEID(inode) = 0;
NFS_FSID(inode) = 0;
NFS_FLAGS(inode) = 0;
INIT_LIST_HEAD(&inode->u.nfs_i.read); INIT_LIST_HEAD(&inode->u.nfs_i.read);
INIT_LIST_HEAD(&inode->u.nfs_i.dirty); INIT_LIST_HEAD(&inode->u.nfs_i.dirty);
INIT_LIST_HEAD(&inode->u.nfs_i.commit); INIT_LIST_HEAD(&inode->u.nfs_i.commit);
INIT_LIST_HEAD(&inode->u.nfs_i.writeback); INIT_LIST_HEAD(&inode->u.nfs_i.writeback);
inode->u.nfs_i.nread = 0;
inode->u.nfs_i.ndirty = 0;
inode->u.nfs_i.ncommit = 0;
inode->u.nfs_i.npages = 0;
NFS_CACHEINV(inode); NFS_CACHEINV(inode);
NFS_ATTRTIMEO(inode) = NFS_MINATTRTIMEO(inode); NFS_ATTRTIMEO(inode) = NFS_MINATTRTIMEO(inode);
NFS_ATTRTIMEO_UPDATE(inode) = jiffies; NFS_ATTRTIMEO_UPDATE(inode) = jiffies;
...@@ -655,19 +648,6 @@ nfs_fill_inode(struct inode *inode, struct nfs_fh *fh, struct nfs_fattr *fattr) ...@@ -655,19 +648,6 @@ nfs_fill_inode(struct inode *inode, struct nfs_fh *fh, struct nfs_fattr *fattr)
inode->i_op = &nfs_symlink_inode_operations; inode->i_op = &nfs_symlink_inode_operations;
else else
init_special_inode(inode, inode->i_mode, fattr->rdev); init_special_inode(inode, inode->i_mode, fattr->rdev);
/*
* Preset the size and mtime, as there's no need
* to invalidate the caches.
*/
inode->i_size = nfs_size_to_loff_t(fattr->size);
inode->i_mtime = nfs_time_to_secs(fattr->mtime);
inode->i_atime = nfs_time_to_secs(fattr->atime);
inode->i_ctime = nfs_time_to_secs(fattr->ctime);
NFS_CACHE_CTIME(inode) = fattr->ctime;
NFS_CACHE_MTIME(inode) = fattr->mtime;
NFS_CACHE_ISIZE(inode) = fattr->size;
NFS_ATTRTIMEO(inode) = NFS_MINATTRTIMEO(inode);
NFS_ATTRTIMEO_UPDATE(inode) = jiffies;
memcpy(&inode->u.nfs_i.fh, fh, sizeof(inode->u.nfs_i.fh)); memcpy(&inode->u.nfs_i.fh, fh, sizeof(inode->u.nfs_i.fh));
} }
nfs_refresh_inode(inode, fattr); nfs_refresh_inode(inode, fattr);
...@@ -697,6 +677,9 @@ nfs_find_actor(struct inode *inode, unsigned long ino, void *opaque) ...@@ -697,6 +677,9 @@ nfs_find_actor(struct inode *inode, unsigned long ino, void *opaque)
return 0; return 0;
if (memcmp(&inode->u.nfs_i.fh, fh, sizeof(inode->u.nfs_i.fh)) != 0) if (memcmp(&inode->u.nfs_i.fh, fh, sizeof(inode->u.nfs_i.fh)) != 0)
return 0; return 0;
/* Force an attribute cache update if inode->i_count == 0 */
if (!atomic_read(&inode->i_count))
NFS_CACHEINV(inode);
return 1; return 1;
} }
...@@ -797,7 +780,9 @@ printk("nfs_notify_change: revalidate failed, error=%d\n", error); ...@@ -797,7 +780,9 @@ printk("nfs_notify_change: revalidate failed, error=%d\n", error);
if (!S_ISREG(inode->i_mode)) if (!S_ISREG(inode->i_mode))
attr->ia_valid &= ~ATTR_SIZE; attr->ia_valid &= ~ATTR_SIZE;
filemap_fdatasync(inode->i_mapping);
error = nfs_wb_all(inode); error = nfs_wb_all(inode);
filemap_fdatawait(inode->i_mapping);
if (error) if (error)
goto out; goto out;
...@@ -825,6 +810,8 @@ printk("nfs_notify_change: revalidate failed, error=%d\n", error); ...@@ -825,6 +810,8 @@ printk("nfs_notify_change: revalidate failed, error=%d\n", error);
fattr.pre_ctime = NFS_CACHE_CTIME(inode); fattr.pre_ctime = NFS_CACHE_CTIME(inode);
fattr.valid |= NFS_ATTR_WCC; fattr.valid |= NFS_ATTR_WCC;
} }
/* Force an attribute cache update */
NFS_CACHEINV(inode);
error = nfs_refresh_inode(inode, &fattr); error = nfs_refresh_inode(inode, &fattr);
out: out:
return error; return error;
...@@ -965,6 +952,34 @@ __nfs_revalidate_inode(struct nfs_server *server, struct inode *inode) ...@@ -965,6 +952,34 @@ __nfs_revalidate_inode(struct nfs_server *server, struct inode *inode)
return status; return status;
} }
/*
* nfs_fattr_obsolete - Test if attribute data is newer than cached data
* @inode: inode
* @fattr: attributes to test
*
* Avoid stuffing the attribute cache with obsolete information.
* We always accept updates if the attribute cache timed out, or if
* fattr->ctime is newer than our cached value.
* If fattr->ctime matches the cached value, we still accept the update
* if it increases the file size.
*/
static inline
int nfs_fattr_obsolete(struct inode *inode, struct nfs_fattr *fattr)
{
s64 cdif;
if (time_after(jiffies, NFS_READTIME(inode)+NFS_ATTRTIMEO(inode)))
goto out_valid;
if ((cdif = (s64)fattr->ctime - (s64)NFS_CACHE_CTIME(inode)) > 0)
goto out_valid;
/* Ugh... */
if (cdif == 0 && fattr->size > NFS_CACHE_ISIZE(inode))
goto out_valid;
return -1;
out_valid:
return 0;
}
/* /*
* Many nfs protocol calls return the new file attributes after * Many nfs protocol calls return the new file attributes after
* an operation. Here we update the inode to reflect the state * an operation. Here we update the inode to reflect the state
...@@ -982,6 +997,7 @@ __nfs_refresh_inode(struct inode *inode, struct nfs_fattr *fattr) ...@@ -982,6 +997,7 @@ __nfs_refresh_inode(struct inode *inode, struct nfs_fattr *fattr)
{ {
__u64 new_size, new_mtime; __u64 new_size, new_mtime;
loff_t new_isize; loff_t new_isize;
time_t new_atime;
int invalid = 0; int invalid = 0;
dfprintk(VFS, "NFS: refresh_inode(%x/%ld ct=%d info=0x%x)\n", dfprintk(VFS, "NFS: refresh_inode(%x/%ld ct=%d info=0x%x)\n",
...@@ -1007,6 +1023,11 @@ __nfs_refresh_inode(struct inode *inode, struct nfs_fattr *fattr) ...@@ -1007,6 +1023,11 @@ __nfs_refresh_inode(struct inode *inode, struct nfs_fattr *fattr)
new_size = fattr->size; new_size = fattr->size;
new_isize = nfs_size_to_loff_t(fattr->size); new_isize = nfs_size_to_loff_t(fattr->size);
new_atime = nfs_time_to_secs(fattr->atime);
/* Avoid races */
if (nfs_fattr_obsolete(inode, fattr))
goto out_nochange;
/* /*
* Update the read time so we don't revalidate too often. * Update the read time so we don't revalidate too often.
*/ */
...@@ -1056,7 +1077,7 @@ __nfs_refresh_inode(struct inode *inode, struct nfs_fattr *fattr) ...@@ -1056,7 +1077,7 @@ __nfs_refresh_inode(struct inode *inode, struct nfs_fattr *fattr)
NFS_CACHE_CTIME(inode) = fattr->ctime; NFS_CACHE_CTIME(inode) = fattr->ctime;
inode->i_ctime = nfs_time_to_secs(fattr->ctime); inode->i_ctime = nfs_time_to_secs(fattr->ctime);
inode->i_atime = nfs_time_to_secs(fattr->atime); inode->i_atime = new_atime;
NFS_CACHE_MTIME(inode) = new_mtime; NFS_CACHE_MTIME(inode) = new_mtime;
inode->i_mtime = nfs_time_to_secs(new_mtime); inode->i_mtime = nfs_time_to_secs(new_mtime);
...@@ -1093,7 +1114,10 @@ __nfs_refresh_inode(struct inode *inode, struct nfs_fattr *fattr) ...@@ -1093,7 +1114,10 @@ __nfs_refresh_inode(struct inode *inode, struct nfs_fattr *fattr)
if (invalid) if (invalid)
nfs_zap_caches(inode); nfs_zap_caches(inode);
return 0; return 0;
out_nochange:
if (new_atime - inode->i_atime > 0)
inode->i_atime = new_atime;
return 0;
out_changed: out_changed:
/* /*
* Big trouble! The inode has become a different object. * Big trouble! The inode has become a different object.
......
...@@ -270,14 +270,12 @@ nfs_xdr_readres(struct rpc_rqst *req, u32 *p, struct nfs_readres *res) ...@@ -270,14 +270,12 @@ nfs_xdr_readres(struct rpc_rqst *req, u32 *p, struct nfs_readres *res)
count = ntohl(*p++); count = ntohl(*p++);
hdrlen = (u8 *) p - (u8 *) iov->iov_base; hdrlen = (u8 *) p - (u8 *) iov->iov_base;
recvd = req->rq_rlen - hdrlen; if (iov->iov_len > hdrlen) {
if (p != iov[req->rq_rnr-1].iov_base) { dprintk("NFS: READ header is short. iovec will be shifted.\n");
/* Unexpected reply header size. Punt. xdr_shift_iovec(iov, req->rq_rnr, iov->iov_len - hdrlen);
* XXX: Move iovec contents to align data on page
* boundary and adjust RPC header size guess */
printk(KERN_WARNING "NFS: Odd RPC header size in read reply: %d\n", hdrlen);
return -errno_NFSERR_IO;
} }
recvd = req->rq_rlen - hdrlen;
if (count > recvd) { if (count > recvd) {
printk(KERN_WARNING "NFS: server cheating in read reply: " printk(KERN_WARNING "NFS: server cheating in read reply: "
"count %d > recvd %d\n", count, recvd); "count %d > recvd %d\n", count, recvd);
...@@ -448,27 +446,23 @@ static int ...@@ -448,27 +446,23 @@ static int
nfs_xdr_readdirres(struct rpc_rqst *req, u32 *p, struct nfs_readdirres *res) nfs_xdr_readdirres(struct rpc_rqst *req, u32 *p, struct nfs_readdirres *res)
{ {
struct iovec *iov = req->rq_rvec; struct iovec *iov = req->rq_rvec;
int hdrlen;
int status, nr; int status, nr;
u32 *end, *entry, len; u32 *end, *entry, len;
if ((status = ntohl(*p++))) if ((status = ntohl(*p++)))
return -nfs_stat_to_errno(status); return -nfs_stat_to_errno(status);
if ((void *) p != ((u8 *) iov->iov_base+iov->iov_len)) {
/* Unexpected reply header size. Punt. */ hdrlen = (u8 *) p - (u8 *) iov->iov_base;
printk(KERN_WARNING "NFS: Odd RPC header size in readdirres reply\n"); if (iov->iov_len > hdrlen) {
return -errno_NFSERR_IO; dprintk("NFS: READDIR header is short. iovec will be shifted.\n");
xdr_shift_iovec(iov, req->rq_rnr, iov->iov_len - hdrlen);
} }
/* Get start and end address of XDR data */ /* Get start and end address of XDR data */
p = (u32 *) iov[1].iov_base; p = (u32 *) iov[1].iov_base;
end = (u32 *) ((u8 *) p + iov[1].iov_len); end = (u32 *) ((u8 *) p + iov[1].iov_len);
/* Get start and end of dirent buffer */
if (res->buffer != p) {
printk(KERN_ERR "NFS: Bad result buffer in readdir\n");
return -errno_NFSERR_IO;
}
for (nr = 0; *p++; nr++) { for (nr = 0; *p++; nr++) {
entry = p - 1; entry = p - 1;
if (p + 2 > end) if (p + 2 > end)
...@@ -598,13 +592,21 @@ nfs_xdr_readlinkargs(struct rpc_rqst *req, u32 *p, struct nfs_readlinkargs *args ...@@ -598,13 +592,21 @@ nfs_xdr_readlinkargs(struct rpc_rqst *req, u32 *p, struct nfs_readlinkargs *args
static int static int
nfs_xdr_readlinkres(struct rpc_rqst *req, u32 *p, struct nfs_readlinkres *res) nfs_xdr_readlinkres(struct rpc_rqst *req, u32 *p, struct nfs_readlinkres *res)
{ {
struct iovec *iov = req->rq_rvec;
u32 *strlen; u32 *strlen;
char *string; char *string;
int hdrlen;
int status; int status;
unsigned int len; unsigned int len;
if ((status = ntohl(*p++))) if ((status = ntohl(*p++)))
return -nfs_stat_to_errno(status); return -nfs_stat_to_errno(status);
hdrlen = (u8 *) p - (u8 *) iov->iov_base;
if (iov->iov_len > hdrlen) {
dprintk("NFS: READLINK header is short. iovec will be shifted.\n");
xdr_shift_iovec(iov, req->rq_rnr, iov->iov_len - hdrlen);
}
strlen = (u32*)res->buffer; strlen = (u32*)res->buffer;
/* Convert length of symlink */ /* Convert length of symlink */
len = ntohl(*strlen); len = ntohl(*strlen);
......
...@@ -397,7 +397,7 @@ nfs_readpage_result(struct rpc_task *task) ...@@ -397,7 +397,7 @@ nfs_readpage_result(struct rpc_task *task)
{ {
struct nfs_read_data *data = (struct nfs_read_data *) task->tk_calldata; struct nfs_read_data *data = (struct nfs_read_data *) task->tk_calldata;
struct inode *inode = data->inode; struct inode *inode = data->inode;
int count = data->res.count; unsigned int count = data->res.count;
dprintk("NFS: %4d nfs_readpage_result, (status %d)\n", dprintk("NFS: %4d nfs_readpage_result, (status %d)\n",
task->tk_pid, task->tk_status); task->tk_pid, task->tk_status);
...@@ -408,9 +408,15 @@ nfs_readpage_result(struct rpc_task *task) ...@@ -408,9 +408,15 @@ nfs_readpage_result(struct rpc_task *task)
struct page *page = req->wb_page; struct page *page = req->wb_page;
nfs_list_remove_request(req); nfs_list_remove_request(req);
if (task->tk_status >= 0 && count >= 0) { if (task->tk_status >= 0) {
SetPageUptodate(page); if (count < PAGE_CACHE_SIZE) {
char *p = kmap(page);
memset(p + count, 0, PAGE_CACHE_SIZE - count);
kunmap(page);
count = 0;
} else
count -= PAGE_CACHE_SIZE; count -= PAGE_CACHE_SIZE;
SetPageUptodate(page);
} else } else
SetPageError(page); SetPageError(page);
flush_dcache_page(page); flush_dcache_page(page);
......
...@@ -213,6 +213,7 @@ nfs_writepage_async(struct file *file, struct inode *inode, struct page *page, ...@@ -213,6 +213,7 @@ nfs_writepage_async(struct file *file, struct inode *inode, struct page *page,
unsigned int offset, unsigned int count) unsigned int offset, unsigned int count)
{ {
struct nfs_page *req; struct nfs_page *req;
loff_t end;
int status; int status;
req = nfs_update_request(file, inode, page, offset, count); req = nfs_update_request(file, inode, page, offset, count);
...@@ -223,6 +224,10 @@ nfs_writepage_async(struct file *file, struct inode *inode, struct page *page, ...@@ -223,6 +224,10 @@ nfs_writepage_async(struct file *file, struct inode *inode, struct page *page,
req->wb_cred = get_rpccred(NFS_I(inode)->mm_cred); req->wb_cred = get_rpccred(NFS_I(inode)->mm_cred);
nfs_unlock_request(req); nfs_unlock_request(req);
nfs_strategy(inode); nfs_strategy(inode);
end = ((loff_t)page->index<<PAGE_CACHE_SHIFT) + (loff_t)(offset + count);
if (inode->i_size < end)
inode->i_size = end;
out: out:
return status; return status;
} }
...@@ -781,6 +786,7 @@ nfs_updatepage(struct file *file, struct page *page, unsigned int offset, unsign ...@@ -781,6 +786,7 @@ nfs_updatepage(struct file *file, struct page *page, unsigned int offset, unsign
struct dentry *dentry = file->f_dentry; struct dentry *dentry = file->f_dentry;
struct inode *inode = dentry->d_inode; struct inode *inode = dentry->d_inode;
struct nfs_page *req; struct nfs_page *req;
loff_t end;
int status = 0; int status = 0;
dprintk("NFS: nfs_updatepage(%s/%s %d@%Ld)\n", dprintk("NFS: nfs_updatepage(%s/%s %d@%Ld)\n",
...@@ -812,6 +818,10 @@ nfs_updatepage(struct file *file, struct page *page, unsigned int offset, unsign ...@@ -812,6 +818,10 @@ nfs_updatepage(struct file *file, struct page *page, unsigned int offset, unsign
goto done; goto done;
status = 0; status = 0;
end = ((loff_t)page->index<<PAGE_CACHE_SHIFT) + (loff_t)(offset + count);
if (inode->i_size < end)
inode->i_size = end;
/* If we wrote past the end of the page. /* If we wrote past the end of the page.
* Call the strategy routine so it can send out a bunch * Call the strategy routine so it can send out a bunch
* of requests. * of requests.
......
This diff is collapsed.
...@@ -20,7 +20,6 @@ extern void add_blkdev_randomness(int major); ...@@ -20,7 +20,6 @@ extern void add_blkdev_randomness(int major);
#define INITRD_MINOR 250 /* shouldn't collide with /dev/ram* too soon ... */ #define INITRD_MINOR 250 /* shouldn't collide with /dev/ram* too soon ... */
extern unsigned long initrd_start,initrd_end; extern unsigned long initrd_start,initrd_end;
extern int mount_initrd; /* zero if initrd should not be mounted */
extern int initrd_below_start_ok; /* 1 if it is not an error if initrd_start < memory_start */ extern int initrd_below_start_ok; /* 1 if it is not an error if initrd_start < memory_start */
extern int rd_doload; /* 1 = load ramdisk, 0 = don't load */ extern int rd_doload; /* 1 = load ramdisk, 0 = don't load */
extern int rd_prompt; /* 1 = prompt for ramdisk, 0 = don't prompt */ extern int rd_prompt; /* 1 = prompt for ramdisk, 0 = don't prompt */
...@@ -55,6 +54,9 @@ extern inline struct request *elv_next_request(request_queue_t *q) ...@@ -55,6 +54,9 @@ extern inline struct request *elv_next_request(request_queue_t *q)
while ((rq = __elv_next_request(q))) { while ((rq = __elv_next_request(q))) {
rq->flags |= REQ_STARTED; rq->flags |= REQ_STARTED;
if (&rq->queuelist == q->last_merge)
q->last_merge = NULL;
if ((rq->flags & REQ_DONTPREP) || !q->prep_rq_fn) if ((rq->flags & REQ_DONTPREP) || !q->prep_rq_fn)
break; break;
......
...@@ -129,17 +129,18 @@ enum blk_queue_state { ...@@ -129,17 +129,18 @@ enum blk_queue_state {
struct request_queue struct request_queue
{ {
/*
* the queue request freelist, one for reads and one for writes
*/
struct request_list rq[2];
/* /*
* Together with queue_head for cacheline sharing * Together with queue_head for cacheline sharing
*/ */
struct list_head queue_head; struct list_head queue_head;
struct list_head *last_merge;
elevator_t elevator; elevator_t elevator;
/*
* the queue request freelist, one for reads and one for writes
*/
struct request_list rq[2];
request_fn_proc *request_fn; request_fn_proc *request_fn;
merge_request_fn *back_merge_fn; merge_request_fn *back_merge_fn;
merge_request_fn *front_merge_fn; merge_request_fn *front_merge_fn;
...@@ -213,27 +214,25 @@ struct request_queue ...@@ -213,27 +214,25 @@ struct request_queue
extern unsigned long blk_max_low_pfn, blk_max_pfn; extern unsigned long blk_max_low_pfn, blk_max_pfn;
#define BLK_BOUNCE_HIGH (blk_max_low_pfn << PAGE_SHIFT) /*
#define BLK_BOUNCE_ANY (blk_max_pfn << PAGE_SHIFT) * standard bounce addresses:
*
* BLK_BOUNCE_HIGH : bounce all highmem pages
* BLK_BOUNCE_ANY : don't bounce anything
* BLK_BOUNCE_ISA : bounce pages above ISA DMA boundary
*/
#define BLK_BOUNCE_HIGH ((blk_max_low_pfn + 1) << PAGE_SHIFT)
#define BLK_BOUNCE_ANY ((blk_max_pfn + 1) << PAGE_SHIFT)
#define BLK_BOUNCE_ISA (ISA_DMA_THRESHOLD) #define BLK_BOUNCE_ISA (ISA_DMA_THRESHOLD)
#ifdef CONFIG_HIGHMEM extern int init_emergency_isa_pool(void);
extern void create_bounce(unsigned long pfn, int gfp, struct bio **bio_orig); extern void create_bounce(unsigned long pfn, int gfp, struct bio **bio_orig);
extern void init_emergency_isa_pool(void);
extern inline void blk_queue_bounce(request_queue_t *q, struct bio **bio) extern inline void blk_queue_bounce(request_queue_t *q, struct bio **bio)
{ {
create_bounce(q->bounce_pfn, q->bounce_gfp, bio); create_bounce(q->bounce_pfn, q->bounce_gfp, bio);
} }
#else /* CONFIG_HIGHMEM */
#define blk_queue_bounce(q, bio) do { } while (0)
#define init_emergency_isa_pool() do { } while (0)
#endif /* CONFIG_HIGHMEM */
#define rq_for_each_bio(bio, rq) \ #define rq_for_each_bio(bio, rq) \
if ((rq->bio)) \ if ((rq->bio)) \
for (bio = (rq)->bio; bio; bio = bio->bi_next) for (bio = (rq)->bio; bio; bio = bio->bi_next)
...@@ -275,9 +274,8 @@ extern void blk_plug_device(request_queue_t *); ...@@ -275,9 +274,8 @@ extern void blk_plug_device(request_queue_t *);
extern void blk_recount_segments(request_queue_t *, struct bio *); extern void blk_recount_segments(request_queue_t *, struct bio *);
extern inline int blk_phys_contig_segment(request_queue_t *q, struct bio *, struct bio *); extern inline int blk_phys_contig_segment(request_queue_t *q, struct bio *, struct bio *);
extern inline int blk_hw_contig_segment(request_queue_t *q, struct bio *, struct bio *); extern inline int blk_hw_contig_segment(request_queue_t *q, struct bio *, struct bio *);
extern void blk_queue_assign_lock(request_queue_t *q, spinlock_t *);
extern int block_ioctl(kdev_t, unsigned int, unsigned long); extern int block_ioctl(kdev_t, unsigned int, unsigned long);
extern int ll_10byte_cmd_build(request_queue_t *, struct request *);
/* /*
* Access functions for manipulating queue properties * Access functions for manipulating queue properties
...@@ -292,6 +290,9 @@ extern void blk_queue_max_hw_segments(request_queue_t *q, unsigned short); ...@@ -292,6 +290,9 @@ extern void blk_queue_max_hw_segments(request_queue_t *q, unsigned short);
extern void blk_queue_max_segment_size(request_queue_t *q, unsigned int); extern void blk_queue_max_segment_size(request_queue_t *q, unsigned int);
extern void blk_queue_hardsect_size(request_queue_t *q, unsigned short); extern void blk_queue_hardsect_size(request_queue_t *q, unsigned short);
extern void blk_queue_segment_boundary(request_queue_t *q, unsigned long); extern void blk_queue_segment_boundary(request_queue_t *q, unsigned long);
extern void blk_queue_assign_lock(request_queue_t *q, spinlock_t *);
extern void blk_queue_prep_rq(request_queue_t *q, prep_rq_fn *pfn);
extern int blk_rq_map_sg(request_queue_t *, struct request *, struct scatterlist *); extern int blk_rq_map_sg(request_queue_t *, struct request *, struct scatterlist *);
extern void blk_dump_rq_flags(struct request *, char *); extern void blk_dump_rq_flags(struct request *, char *);
extern void generic_unplug_device(void *); extern void generic_unplug_device(void *);
......
This diff is collapsed.
#ifndef _LINUX_ELEVATOR_H #ifndef _LINUX_ELEVATOR_H
#define _LINUX_ELEVATOR_H #define _LINUX_ELEVATOR_H
typedef void (elevator_fn) (struct request *, elevator_t *,
struct list_head *,
struct list_head *, int);
typedef int (elevator_merge_fn) (request_queue_t *, struct request **, typedef int (elevator_merge_fn) (request_queue_t *, struct request **,
struct list_head *, struct bio *); struct bio *);
typedef void (elevator_merge_cleanup_fn) (request_queue_t *, struct request *, int); typedef void (elevator_merge_cleanup_fn) (request_queue_t *, struct request *, int);
...@@ -21,8 +17,7 @@ typedef void (elevator_exit_fn) (request_queue_t *, elevator_t *); ...@@ -21,8 +17,7 @@ typedef void (elevator_exit_fn) (request_queue_t *, elevator_t *);
struct elevator_s struct elevator_s
{ {
int read_latency; int latency[2];
int write_latency;
elevator_merge_fn *elevator_merge_fn; elevator_merge_fn *elevator_merge_fn;
elevator_merge_cleanup_fn *elevator_merge_cleanup_fn; elevator_merge_cleanup_fn *elevator_merge_cleanup_fn;
...@@ -35,11 +30,11 @@ struct elevator_s ...@@ -35,11 +30,11 @@ struct elevator_s
elevator_exit_fn *elevator_exit_fn; elevator_exit_fn *elevator_exit_fn;
}; };
int elevator_noop_merge(request_queue_t *, struct request **, struct list_head *, struct bio *); int elevator_noop_merge(request_queue_t *, struct request **, struct bio *);
void elevator_noop_merge_cleanup(request_queue_t *, struct request *, int); void elevator_noop_merge_cleanup(request_queue_t *, struct request *, int);
void elevator_noop_merge_req(struct request *, struct request *); void elevator_noop_merge_req(struct request *, struct request *);
int elevator_linus_merge(request_queue_t *, struct request **, struct list_head *, struct bio *); int elevator_linus_merge(request_queue_t *, struct request **, struct bio *);
void elevator_linus_merge_cleanup(request_queue_t *, struct request *, int); void elevator_linus_merge_cleanup(request_queue_t *, struct request *, int);
void elevator_linus_merge_req(struct request *, struct request *); void elevator_linus_merge_req(struct request *, struct request *);
int elv_linus_init(request_queue_t *, elevator_t *); int elv_linus_init(request_queue_t *, elevator_t *);
...@@ -69,32 +64,7 @@ extern void elevator_exit(request_queue_t *, elevator_t *); ...@@ -69,32 +64,7 @@ extern void elevator_exit(request_queue_t *, elevator_t *);
#define ELEVATOR_FRONT_MERGE 1 #define ELEVATOR_FRONT_MERGE 1
#define ELEVATOR_BACK_MERGE 2 #define ELEVATOR_BACK_MERGE 2
/* #define elevator_request_latency(e, rw) ((e)->latency[(rw) & 1])
* This is used in the elevator algorithm. We don't prioritise reads
* over writes any more --- although reads are more time-critical than
* writes, by treating them equally we increase filesystem throughput.
* This turns out to give better overall performance. -- sct
*/
#define IN_ORDER(s1,s2) \
((((s1)->rq_dev == (s2)->rq_dev && \
(s1)->sector < (s2)->sector)) || \
(s1)->rq_dev < (s2)->rq_dev)
#define BHRQ_IN_ORDER(bh, rq) \
((((bh)->b_rdev == (rq)->rq_dev && \
(bh)->b_rsector < (rq)->sector)) || \
(bh)->b_rdev < (rq)->rq_dev)
static inline int elevator_request_latency(elevator_t * elevator, int rw)
{
int latency;
latency = elevator->read_latency;
if (rw != READ)
latency = elevator->write_latency;
return latency;
}
/* /*
* will change once we move to a more complex data structure than a simple * will change once we move to a more complex data structure than a simple
...@@ -116,9 +86,7 @@ struct elv_linus_data { ...@@ -116,9 +86,7 @@ struct elv_linus_data {
#define ELEVATOR_NOOP \ #define ELEVATOR_NOOP \
((elevator_t) { \ ((elevator_t) { \
0, /* read_latency */ \ { 0, 0}, \
0, /* write_latency */ \
\
elevator_noop_merge, /* elevator_merge_fn */ \ elevator_noop_merge, /* elevator_merge_fn */ \
elevator_noop_merge_cleanup, /* elevator_merge_cleanup_fn */ \ elevator_noop_merge_cleanup, /* elevator_merge_cleanup_fn */ \
elevator_noop_merge_req, /* elevator_merge_req_fn */ \ elevator_noop_merge_req, /* elevator_merge_req_fn */ \
...@@ -130,9 +98,7 @@ struct elv_linus_data { ...@@ -130,9 +98,7 @@ struct elv_linus_data {
#define ELEVATOR_LINUS \ #define ELEVATOR_LINUS \
((elevator_t) { \ ((elevator_t) { \
8192, /* read passovers */ \ { 8192, 16384 }, \
16384, /* write passovers */ \
\
elevator_linus_merge, /* elevator_merge_fn */ \ elevator_linus_merge, /* elevator_merge_fn */ \
elevator_linus_merge_cleanup, /* elevator_merge_cleanup_fn */ \ elevator_linus_merge_cleanup, /* elevator_merge_cleanup_fn */ \
elevator_linus_merge_req, /* elevator_merge_req_fn */ \ elevator_linus_merge_req, /* elevator_merge_req_fn */ \
......
...@@ -240,7 +240,7 @@ struct mdk_personality_s ...@@ -240,7 +240,7 @@ struct mdk_personality_s
int (*stop_resync)(mddev_t *mddev); int (*stop_resync)(mddev_t *mddev);
int (*restart_resync)(mddev_t *mddev); int (*restart_resync)(mddev_t *mddev);
int (*sync_request)(mddev_t *mddev, sector_t sector_nr); int (*sync_request)(mddev_t *mddev, sector_t sector_nr, int go_faster);
}; };
......
...@@ -9,8 +9,8 @@ struct mirror_info { ...@@ -9,8 +9,8 @@ struct mirror_info {
int number; int number;
int raid_disk; int raid_disk;
kdev_t dev; kdev_t dev;
int sect_limit; sector_t head_position;
int head_position; atomic_t nr_pending;
/* /*
* State bits: * State bits:
...@@ -31,23 +31,21 @@ struct r1_private_data_s { ...@@ -31,23 +31,21 @@ struct r1_private_data_s {
int raid_disks; int raid_disks;
int working_disks; int working_disks;
int last_used; int last_used;
sector_t next_sect; sector_t next_seq_sect;
int sect_count;
mdk_thread_t *thread, *resync_thread; mdk_thread_t *thread, *resync_thread;
int resync_mirrors; int resync_mirrors;
mirror_info_t *spare; mirror_info_t *spare;
spinlock_t device_lock; spinlock_t device_lock;
/* for use when syncing mirrors: */ /* for use when syncing mirrors: */
unsigned long start_active, start_ready,
start_pending, start_future; spinlock_t resync_lock;
int cnt_done, cnt_active, cnt_ready, int nr_pending;
cnt_pending, cnt_future; int barrier;
int phase; sector_t next_resync;
int window;
wait_queue_head_t wait_done; wait_queue_head_t wait_idle;
wait_queue_head_t wait_ready; wait_queue_head_t wait_resume;
spinlock_t segment_lock;
mempool_t *r1bio_pool; mempool_t *r1bio_pool;
mempool_t *r1buf_pool; mempool_t *r1buf_pool;
...@@ -62,7 +60,8 @@ typedef struct r1_private_data_s conf_t; ...@@ -62,7 +60,8 @@ typedef struct r1_private_data_s conf_t;
#define mddev_to_conf(mddev) ((conf_t *) mddev->private) #define mddev_to_conf(mddev) ((conf_t *) mddev->private)
/* /*
* this is our 'private' 'collective' RAID1 buffer head. * this is our 'private' RAID1 bio.
*
* it contains information about what kind of IO operations were started * it contains information about what kind of IO operations were started
* for this RAID1 operation, and about their status: * for this RAID1 operation, and about their status:
*/ */
...@@ -83,6 +82,7 @@ struct r1bio_s { ...@@ -83,6 +82,7 @@ struct r1bio_s {
* if the IO is in READ direction, then this bio is used: * if the IO is in READ direction, then this bio is used:
*/ */
struct bio *read_bio; struct bio *read_bio;
int read_disk;
/* /*
* if the IO is in WRITE direction, then multiple bios are used: * if the IO is in WRITE direction, then multiple bios are used:
*/ */
...@@ -94,5 +94,5 @@ struct r1bio_s { ...@@ -94,5 +94,5 @@ struct r1bio_s {
/* bits for r1bio.state */ /* bits for r1bio.state */
#define R1BIO_Uptodate 1 #define R1BIO_Uptodate 1
#define R1BIO_SyncPhase 2
#endif #endif
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include <linux/config.h> #include <linux/config.h>
#include <linux/vt.h> #include <linux/vt.h>
#include <linux/kd.h> #include <linux/kd.h>
#include <linux/tty.h>
/* /*
* Presently, a lot of graphics programs do not restore the contents of * Presently, a lot of graphics programs do not restore the contents of
......
...@@ -3,12 +3,8 @@ ...@@ -3,12 +3,8 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/devfs_fs_kernel.h> #include <linux/devfs_fs_kernel.h>
#include <linux/unistd.h> #include <linux/unistd.h>
#include <linux/string.h>
#include <linux/ctype.h> #include <linux/ctype.h>
#include <linux/init.h>
#include <linux/smp_lock.h>
#include <linux/blk.h> #include <linux/blk.h>
#include <linux/tty.h>
#include <linux/fd.h> #include <linux/fd.h>
#include <linux/nfs_fs.h> #include <linux/nfs_fs.h>
...@@ -18,8 +14,6 @@ ...@@ -18,8 +14,6 @@
#include <linux/ext2_fs.h> #include <linux/ext2_fs.h>
#include <linux/romfs_fs.h> #include <linux/romfs_fs.h>
#include <asm/uaccess.h>
#define BUILD_CRAMDISK #define BUILD_CRAMDISK
extern int get_filesystem_list(char * buf); extern int get_filesystem_list(char * buf);
...@@ -38,12 +32,21 @@ asmlinkage long sys_ioctl(int fd, int cmd, unsigned long arg); ...@@ -38,12 +32,21 @@ asmlinkage long sys_ioctl(int fd, int cmd, unsigned long arg);
#ifdef CONFIG_BLK_DEV_INITRD #ifdef CONFIG_BLK_DEV_INITRD
unsigned int real_root_dev; /* do_proc_dointvec cannot handle kdev_t */ unsigned int real_root_dev; /* do_proc_dointvec cannot handle kdev_t */
#endif static int __initdata mount_initrd = 1;
#ifdef CONFIG_BLK_DEV_RAM
extern int rd_doload; static int __init no_initrd(char *str)
{
mount_initrd = 0;
return 1;
}
__setup("noinitrd", no_initrd);
#else #else
static int rd_doload = 0; static int __initdata mount_initrd = 0;
#endif #endif
int __initdata rd_doload; /* 1 = load RAM disk, 0 = don't load */
int root_mountflags = MS_RDONLY | MS_VERBOSE; int root_mountflags = MS_RDONLY | MS_VERBOSE;
static char root_device_name[64]; static char root_device_name[64];
...@@ -52,6 +55,13 @@ kdev_t ROOT_DEV; ...@@ -52,6 +55,13 @@ kdev_t ROOT_DEV;
static int do_devfs = 0; static int do_devfs = 0;
static int __init load_ramdisk(char *str)
{
rd_doload = simple_strtol(str,NULL,0) & 3;
return 1;
}
__setup("load_ramdisk=", load_ramdisk);
static int __init readonly(char *str) static int __init readonly(char *str)
{ {
if (*str) if (*str)
...@@ -371,6 +381,24 @@ static void __init change_floppy(char *fmt, ...) ...@@ -371,6 +381,24 @@ static void __init change_floppy(char *fmt, ...)
#ifdef CONFIG_BLK_DEV_RAM #ifdef CONFIG_BLK_DEV_RAM
int __initdata rd_prompt = 1; /* 1 = prompt for RAM disk, 0 = don't prompt */
static int __init prompt_ramdisk(char *str)
{
rd_prompt = simple_strtol(str,NULL,0) & 1;
return 1;
}
__setup("prompt_ramdisk=", prompt_ramdisk);
int __initdata rd_image_start; /* starting block # of image */
static int __init ramdisk_start_setup(char *str)
{
rd_image_start = simple_strtol(str,NULL,0);
return 1;
}
__setup("ramdisk_start=", ramdisk_start_setup);
static int __init crd_load(int in_fd, int out_fd); static int __init crd_load(int in_fd, int out_fd);
/* /*
...@@ -588,7 +616,6 @@ static int __init rd_load_image(char *from) ...@@ -588,7 +616,6 @@ static int __init rd_load_image(char *from)
static int __init rd_load_disk(int n) static int __init rd_load_disk(int n)
{ {
#ifdef CONFIG_BLK_DEV_RAM #ifdef CONFIG_BLK_DEV_RAM
extern int rd_prompt;
if (rd_prompt) if (rd_prompt)
change_floppy("root floppy disk to be loaded into RAM disk"); change_floppy("root floppy disk to be loaded into RAM disk");
create_dev("/dev/ram", MKDEV(RAMDISK_MAJOR, n), NULL); create_dev("/dev/ram", MKDEV(RAMDISK_MAJOR, n), NULL);
...@@ -715,13 +742,10 @@ static int __init initrd_load(void) ...@@ -715,13 +742,10 @@ static int __init initrd_load(void)
*/ */
void prepare_namespace(void) void prepare_namespace(void)
{ {
int do_initrd = 0;
int is_floppy = MAJOR(ROOT_DEV) == FLOPPY_MAJOR; int is_floppy = MAJOR(ROOT_DEV) == FLOPPY_MAJOR;
#ifdef CONFIG_BLK_DEV_INITRD #ifdef CONFIG_BLK_DEV_INITRD
if (!initrd_start) if (!initrd_start)
mount_initrd = 0; mount_initrd = 0;
if (mount_initrd)
do_initrd = 1;
real_root_dev = ROOT_DEV; real_root_dev = ROOT_DEV;
#endif #endif
sys_mkdir("/dev", 0700); sys_mkdir("/dev", 0700);
...@@ -732,7 +756,7 @@ void prepare_namespace(void) ...@@ -732,7 +756,7 @@ void prepare_namespace(void)
#endif #endif
create_dev("/dev/root", ROOT_DEV, NULL); create_dev("/dev/root", ROOT_DEV, NULL);
if (do_initrd) { if (mount_initrd) {
if (initrd_load() && ROOT_DEV != MKDEV(RAMDISK_MAJOR, 0)) { if (initrd_load() && ROOT_DEV != MKDEV(RAMDISK_MAJOR, 0)) {
handle_initrd(); handle_initrd();
goto out; goto out;
......
...@@ -116,11 +116,11 @@ EXPORT_SYMBOL(vmtruncate); ...@@ -116,11 +116,11 @@ EXPORT_SYMBOL(vmtruncate);
EXPORT_SYMBOL(find_vma); EXPORT_SYMBOL(find_vma);
EXPORT_SYMBOL(get_unmapped_area); EXPORT_SYMBOL(get_unmapped_area);
EXPORT_SYMBOL(init_mm); EXPORT_SYMBOL(init_mm);
EXPORT_SYMBOL(create_bounce);
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
EXPORT_SYMBOL(kmap_high); EXPORT_SYMBOL(kmap_high);
EXPORT_SYMBOL(kunmap_high); EXPORT_SYMBOL(kunmap_high);
EXPORT_SYMBOL(highmem_start_page); EXPORT_SYMBOL(highmem_start_page);
EXPORT_SYMBOL(create_bounce);
EXPORT_SYMBOL(kmap_prot); EXPORT_SYMBOL(kmap_prot);
EXPORT_SYMBOL(kmap_pte); EXPORT_SYMBOL(kmap_pte);
#endif #endif
......
...@@ -649,10 +649,8 @@ kill_proc_info(int sig, struct siginfo *info, pid_t pid) ...@@ -649,10 +649,8 @@ kill_proc_info(int sig, struct siginfo *info, pid_t pid)
/* /*
* kill_something_info() interprets pid in interesting ways just like kill(2). * kill_something_info() interprets pid in interesting ways just like kill(2).
* *
* POSIX (2001) specifies "If pid is -1, sig shall be sent to all processes * POSIX specifies that kill(-1,sig) is unspecified, but what we have
* (excluding an unspecified set of system processes) for which the process * is probably wrong. Should make it like BSD or SYSV.
* has permission to send that signal."
* So, probably the process should also signal itself.
*/ */
static int kill_something_info(int sig, struct siginfo *info, int pid) static int kill_something_info(int sig, struct siginfo *info, int pid)
...@@ -665,7 +663,7 @@ static int kill_something_info(int sig, struct siginfo *info, int pid) ...@@ -665,7 +663,7 @@ static int kill_something_info(int sig, struct siginfo *info, int pid)
read_lock(&tasklist_lock); read_lock(&tasklist_lock);
for_each_task(p) { for_each_task(p) {
if (p->pid > 1) { if (p->pid > 1 && p != current) {
int err = send_sig_info(sig, info, p); int err = send_sig_info(sig, info, p);
++count; ++count;
if (err != -EPERM) if (err != -EPERM)
......
...@@ -14,8 +14,6 @@ export-objs := shmem.o filemap.o mempool.o ...@@ -14,8 +14,6 @@ export-objs := shmem.o filemap.o mempool.o
obj-y := memory.o mmap.o filemap.o mprotect.o mlock.o mremap.o \ obj-y := memory.o mmap.o filemap.o mprotect.o mlock.o mremap.o \
vmalloc.o slab.o bootmem.o swap.o vmscan.o page_io.o \ vmalloc.o slab.o bootmem.o swap.o vmscan.o page_io.o \
page_alloc.o swap_state.o swapfile.o numa.o oom_kill.o \ page_alloc.o swap_state.o swapfile.o numa.o oom_kill.o \
shmem.o mempool.o shmem.o highmem.o mempool.o
obj-$(CONFIG_HIGHMEM) += highmem.o
include $(TOPDIR)/Rules.make include $(TOPDIR)/Rules.make
...@@ -19,6 +19,19 @@ ...@@ -19,6 +19,19 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/pagemap.h> #include <linux/pagemap.h>
#include <linux/mempool.h> #include <linux/mempool.h>
#include <linux/blkdev.h>
static mempool_t *page_pool, *isa_page_pool;
static void *page_pool_alloc(int gfp_mask, void *data)
{
return alloc_page(gfp_mask);
}
static void page_pool_free(void *page, void *data)
{
__free_page(page);
}
/* /*
* Virtual_count is not a pure "count". * Virtual_count is not a pure "count".
...@@ -28,6 +41,7 @@ ...@@ -28,6 +41,7 @@
* since the last TLB flush - so we can't use it. * since the last TLB flush - so we can't use it.
* n means that there are (n-1) current users of it. * n means that there are (n-1) current users of it.
*/ */
#ifdef CONFIG_HIGHMEM
static int pkmap_count[LAST_PKMAP]; static int pkmap_count[LAST_PKMAP];
static unsigned int last_pkmap_nr; static unsigned int last_pkmap_nr;
static spinlock_t kmap_lock = SPIN_LOCK_UNLOCKED; static spinlock_t kmap_lock = SPIN_LOCK_UNLOCKED;
...@@ -185,19 +199,6 @@ void kunmap_high(struct page *page) ...@@ -185,19 +199,6 @@ void kunmap_high(struct page *page)
} }
#define POOL_SIZE 64 #define POOL_SIZE 64
#define ISA_POOL_SIZE 16
static mempool_t *page_pool, *isa_page_pool;
static void *page_pool_alloc(int gfp_mask, void *data)
{
return alloc_page(gfp_mask);
}
static void page_pool_free(void *page, void *data)
{
__free_page(page);
}
static __init int init_emergency_pool(void) static __init int init_emergency_pool(void)
{ {
...@@ -211,11 +212,37 @@ static __init int init_emergency_pool(void) ...@@ -211,11 +212,37 @@ static __init int init_emergency_pool(void)
page_pool = mempool_create(POOL_SIZE, page_pool_alloc, page_pool_free, NULL); page_pool = mempool_create(POOL_SIZE, page_pool_alloc, page_pool_free, NULL);
if (!page_pool) if (!page_pool)
BUG(); BUG();
printk("highmem bounce pool size: %d pages and bhs.\n", POOL_SIZE); printk("highmem bounce pool size: %d pages\n", POOL_SIZE);
return 0; return 0;
} }
__initcall(init_emergency_pool);
/*
* highmem version, map in to vec
*/
static inline void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom)
{
unsigned long flags;
unsigned char *vto;
local_irq_save(flags);
vto = kmap_atomic(to->bv_page, KM_BOUNCE_READ);
memcpy(vto + to->bv_offset, vfrom, to->bv_len);
kunmap_atomic(vto, KM_BOUNCE_READ);
local_irq_restore(flags);
}
#else /* CONFIG_HIGHMEM */
#define bounce_copy_vec(to, vfrom) \
memcpy(page_address((to)->bv_page) + (to)->bv_offset, vfrom, (to)->bv_len)
#endif
#define ISA_POOL_SIZE 16
/* /*
* gets called "every" time someone init's a queue with BLK_BOUNCE_ISA * gets called "every" time someone init's a queue with BLK_BOUNCE_ISA
* as the max address, so check if the pool has already been created. * as the max address, so check if the pool has already been created.
...@@ -233,8 +260,6 @@ int init_emergency_isa_pool(void) ...@@ -233,8 +260,6 @@ int init_emergency_isa_pool(void)
return 0; return 0;
} }
__initcall(init_emergency_pool);
/* /*
* Simple bounce buffer support for highmem pages. Depending on the * Simple bounce buffer support for highmem pages. Depending on the
* queue gfp mask set, *to may or may not be a highmem page. kmap it * queue gfp mask set, *to may or may not be a highmem page. kmap it
...@@ -242,8 +267,7 @@ __initcall(init_emergency_pool); ...@@ -242,8 +267,7 @@ __initcall(init_emergency_pool);
*/ */
static inline void copy_to_high_bio_irq(struct bio *to, struct bio *from) static inline void copy_to_high_bio_irq(struct bio *to, struct bio *from)
{ {
unsigned char *vto, *vfrom; unsigned char *vfrom;
unsigned long flags;
struct bio_vec *tovec, *fromvec; struct bio_vec *tovec, *fromvec;
int i; int i;
...@@ -258,11 +282,7 @@ static inline void copy_to_high_bio_irq(struct bio *to, struct bio *from) ...@@ -258,11 +282,7 @@ static inline void copy_to_high_bio_irq(struct bio *to, struct bio *from)
vfrom = page_address(fromvec->bv_page) + fromvec->bv_offset; vfrom = page_address(fromvec->bv_page) + fromvec->bv_offset;
local_irq_save(flags); bounce_copy_vec(tovec, vfrom);
vto = kmap_atomic(tovec->bv_page, KM_BOUNCE_READ);
memcpy(vto + tovec->bv_offset, vfrom, tovec->bv_len);
kunmap_atomic(vto, KM_BOUNCE_READ);
local_irq_restore(flags);
} }
} }
...@@ -336,10 +356,25 @@ void create_bounce(unsigned long pfn, int gfp, struct bio **bio_orig) ...@@ -336,10 +356,25 @@ void create_bounce(unsigned long pfn, int gfp, struct bio **bio_orig)
BUG_ON((*bio_orig)->bi_idx); BUG_ON((*bio_orig)->bi_idx);
/*
* for non-isa bounce case, just check if the bounce pfn is equal
* to or bigger than the highest pfn in the system -- in that case,
* don't waste time iterating over bio segments
*/
if (!(gfp & GFP_DMA)) { if (!(gfp & GFP_DMA)) {
if (pfn >= blk_max_pfn)
return;
#ifndef CONFIG_HIGHMEM
/*
* should not hit for non-highmem case
*/
BUG();
#endif
bio_gfp = GFP_NOHIGHIO; bio_gfp = GFP_NOHIGHIO;
pool = page_pool; pool = page_pool;
} else { } else {
BUG_ON(!isa_page_pool);
bio_gfp = GFP_NOIO; bio_gfp = GFP_NOIO;
pool = isa_page_pool; pool = isa_page_pool;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment