Commit e73dff56 authored by Linus Torvalds's avatar Linus Torvalds

v2.4.0.5 -> v2.4.0.6

  - blk-14 from Jens Axboe
parent 950a2b0b
VERSION = 2
PATCHLEVEL = 4
SUBLEVEL = 1
EXTRAVERSION =-pre5
EXTRAVERSION =-pre6
KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION)
......
......@@ -23,6 +23,7 @@
#include <linux/delay.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/highmem.h>
#ifdef CONFIG_MCA
#include <linux/mca.h>
......
This diff is collapsed.
......@@ -362,6 +362,7 @@ static Scsi_Cmnd *__scsi_end_request(Scsi_Cmnd * SCpnt,
struct request *req;
struct buffer_head *bh;
Scsi_Device * SDpnt;
int nsect;
ASSERT_LOCK(&io_request_lock, 0);
......@@ -373,11 +374,13 @@ static Scsi_Cmnd *__scsi_end_request(Scsi_Cmnd * SCpnt,
}
do {
if ((bh = req->bh) != NULL) {
nsect = bh->b_size >> 9;
blk_finished_io(nsect);
req->bh = bh->b_reqnext;
req->nr_sectors -= bh->b_size >> 9;
req->sector += bh->b_size >> 9;
req->nr_sectors -= nsect;
req->sector += nsect;
bh->b_reqnext = NULL;
sectors -= bh->b_size >> 9;
sectors -= nsect;
bh->b_end_io(bh, uptodate);
if ((bh = req->bh) != NULL) {
req->current_nr_sectors = bh->b_size >> 9;
......
......@@ -134,12 +134,17 @@ static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
void __mark_inode_dirty(struct inode *inode, int flags)
{
struct super_block * sb = inode->i_sb;
if (sb) {
/* Don't do this for I_DIRTY_PAGES - that doesn't actually dirty the inode itself */
if (flags & (I_DIRTY | I_DIRTY_SYNC)) {
if (sb->s_op && sb->s_op->dirty_inode)
sb->s_op->dirty_inode(inode);
}
/* avoid the locking if we can */
if ((inode->i_state & flags) != flags) {
return ;
}
spin_lock(&inode_lock);
if ((inode->i_state & flags) != flags) {
inode->i_state |= flags;
......
......@@ -17,6 +17,7 @@
#include <linux/elf.h>
#include <linux/elfcore.h>
#include <linux/vmalloc.h>
#include <linux/highmem.h>
#include <asm/uaccess.h>
#include <asm/io.h>
......
......@@ -64,9 +64,10 @@ typedef void (plug_device_fn) (request_queue_t *q, kdev_t device);
typedef void (unplug_device_fn) (void *q);
/*
* Default nr free requests per queue
* Default nr free requests per queue, ll_rw_blk will scale it down
* according to available RAM at init time
*/
#define QUEUE_NR_REQUESTS 512
#define QUEUE_NR_REQUESTS 8192
struct request_queue
{
......@@ -176,6 +177,8 @@ extern int * max_sectors[MAX_BLKDEV];
extern int * max_segments[MAX_BLKDEV];
extern atomic_t queued_sectors;
#define MAX_SEGMENTS 128
#define MAX_SECTORS (MAX_SEGMENTS*8)
......@@ -203,5 +206,14 @@ static inline int get_hardsect_size(kdev_t dev)
return 512;
}
#define blk_finished_io(nsects) \
atomic_sub(nsects, &queued_sectors); \
if (atomic_read(&queued_sectors) < 0) { \
printk("block: queued_sectors < 0\n"); \
atomic_set(&queued_sectors, 0); \
}
#define blk_started_io(nsects) \
atomic_add(nsects, &queued_sectors);
#endif
......@@ -825,20 +825,17 @@ struct super_operations {
extern void __mark_inode_dirty(struct inode *, int);
static inline void mark_inode_dirty(struct inode *inode)
{
if ((inode->i_state & I_DIRTY) != I_DIRTY)
__mark_inode_dirty(inode, I_DIRTY);
__mark_inode_dirty(inode, I_DIRTY);
}
static inline void mark_inode_dirty_sync(struct inode *inode)
{
if (!(inode->i_state & I_DIRTY_SYNC))
__mark_inode_dirty(inode, I_DIRTY_SYNC);
__mark_inode_dirty(inode, I_DIRTY_SYNC);
}
static inline void mark_inode_dirty_pages(struct inode *inode)
{
if (inode && !(inode->i_state & I_DIRTY_PAGES))
__mark_inode_dirty(inode, I_DIRTY_PAGES);
__mark_inode_dirty(inode, I_DIRTY_PAGES);
}
struct dquot_operations {
......
......@@ -143,7 +143,8 @@ void __set_page_dirty(struct page *page)
list_add(&page->list, &mapping->dirty_pages);
spin_unlock(&pagecache_lock);
mark_inode_dirty_pages(mapping->host);
if (mapping->host)
mark_inode_dirty_pages(mapping->host);
}
/**
......
......@@ -9,6 +9,7 @@
#include <linux/malloc.h>
#include <linux/vmalloc.h>
#include <linux/spinlock.h>
#include <linux/highmem.h>
#include <linux/smp_lock.h>
#include <asm/uaccess.h>
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment