Commit d51c905a authored by Linus Torvalds's avatar Linus Torvalds

v2.4.10.0.1 -> v2.4.10.0.2

  - more buffers-in-pagecache coherency
parent a41cd6e4
VERSION = 2 VERSION = 2
PATCHLEVEL = 4 PATCHLEVEL = 4
SUBLEVEL = 10 SUBLEVEL = 11
EXTRAVERSION = EXTRAVERSION =-pre1
KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION) KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION)
......
...@@ -261,18 +261,14 @@ static int pci_conf2_read_config_word(struct pci_dev *dev, int where, u16 *value ...@@ -261,18 +261,14 @@ static int pci_conf2_read_config_word(struct pci_dev *dev, int where, u16 *value
u32 data; u32 data;
result = pci_conf2_read(0, dev->bus->number, PCI_SLOT(dev->devfn), result = pci_conf2_read(0, dev->bus->number, PCI_SLOT(dev->devfn),
PCI_FUNC(dev->devfn), where, 2, &data); PCI_FUNC(dev->devfn), where, 2, &data);
*value = (u8)data; *value = (u16)data;
return result; return result;
} }
static int pci_conf2_read_config_dword(struct pci_dev *dev, int where, u32 *value) static int pci_conf2_read_config_dword(struct pci_dev *dev, int where, u32 *value)
{ {
int result; return pci_conf2_read(0, dev->bus->number, PCI_SLOT(dev->devfn),
u32 data; PCI_FUNC(dev->devfn), where, 4, value);
result = pci_conf2_read(0, dev->bus->number, PCI_SLOT(dev->devfn),
PCI_FUNC(dev->devfn), where, 4, &data);
*value = (u8)data;
return result;
} }
static int pci_conf2_write_config_byte(struct pci_dev *dev, int where, u8 value) static int pci_conf2_write_config_byte(struct pci_dev *dev, int where, u8 value)
......
...@@ -2105,13 +2105,12 @@ ppp_register_compressor(struct compressor *cp) ...@@ -2105,13 +2105,12 @@ ppp_register_compressor(struct compressor *cp)
{ {
struct compressor_entry *ce; struct compressor_entry *ce;
int ret; int ret;
spin_lock(&compressor_list_lock); spin_lock(&compressor_list_lock);
ret = -EEXIST; ret = -EEXIST;
if (find_comp_entry(cp->compress_proto) != 0) if (find_comp_entry(cp->compress_proto) != 0)
goto out; goto out;
ret = -ENOMEM; ret = -ENOMEM;
ce = kmalloc(sizeof(struct compressor_entry), GFP_KERNEL); ce = kmalloc(sizeof(struct compressor_entry), GFP_ATOMIC);
if (ce == 0) if (ce == 0)
goto out; goto out;
ret = 0; ret = 0;
...@@ -2216,11 +2215,11 @@ ppp_create_interface(int unit, int *retp) ...@@ -2216,11 +2215,11 @@ ppp_create_interface(int unit, int *retp)
/* Create a new ppp structure and link it before `list'. */ /* Create a new ppp structure and link it before `list'. */
ret = -ENOMEM; ret = -ENOMEM;
ppp = kmalloc(sizeof(struct ppp), GFP_KERNEL); ppp = kmalloc(sizeof(struct ppp), GFP_ATOMIC);
if (ppp == 0) if (ppp == 0)
goto out; goto out;
memset(ppp, 0, sizeof(struct ppp)); memset(ppp, 0, sizeof(struct ppp));
dev = kmalloc(sizeof(struct net_device), GFP_KERNEL); dev = kmalloc(sizeof(struct net_device), GFP_ATOMIC);
if (dev == 0) { if (dev == 0) {
kfree(ppp); kfree(ppp);
goto out; goto out;
...@@ -2285,6 +2284,7 @@ init_ppp_file(struct ppp_file *pf, int kind) ...@@ -2285,6 +2284,7 @@ init_ppp_file(struct ppp_file *pf, int kind)
static void ppp_destroy_interface(struct ppp *ppp) static void ppp_destroy_interface(struct ppp *ppp)
{ {
struct net_device *dev; struct net_device *dev;
int n_channels ;
spin_lock(&all_ppp_lock); spin_lock(&all_ppp_lock);
list_del(&ppp->file.list); list_del(&ppp->file.list);
...@@ -2314,6 +2314,7 @@ static void ppp_destroy_interface(struct ppp *ppp) ...@@ -2314,6 +2314,7 @@ static void ppp_destroy_interface(struct ppp *ppp)
#endif /* CONFIG_PPP_FILTER */ #endif /* CONFIG_PPP_FILTER */
dev = ppp->dev; dev = ppp->dev;
ppp->dev = 0; ppp->dev = 0;
n_channels = ppp->n_channels ;
ppp_unlock(ppp); ppp_unlock(ppp);
if (dev) { if (dev) {
...@@ -2329,7 +2330,7 @@ static void ppp_destroy_interface(struct ppp *ppp) ...@@ -2329,7 +2330,7 @@ static void ppp_destroy_interface(struct ppp *ppp)
* ppp structure. Otherwise we leave it around until the * ppp structure. Otherwise we leave it around until the
* last channel disconnects from it. * last channel disconnects from it.
*/ */
if (ppp->n_channels == 0) if (n_channels == 0)
kfree(ppp); kfree(ppp);
spin_unlock(&all_ppp_lock); spin_unlock(&all_ppp_lock);
......
...@@ -541,12 +541,16 @@ int pppoe_release(struct socket *sock) ...@@ -541,12 +541,16 @@ int pppoe_release(struct socket *sock)
sk->state = PPPOX_DEAD; sk->state = PPPOX_DEAD;
po = sk->protinfo.pppox; po = sk->protinfo.pppox;
if (po->pppoe_pa.sid) if (po->pppoe_pa.sid) {
delete_item(po->pppoe_pa.sid, po->pppoe_pa.remote); delete_item(po->pppoe_pa.sid, po->pppoe_pa.remote);
po->pppoe_pa.sid = 0 ;
}
if (po->pppoe_dev) if (po->pppoe_dev)
dev_put(po->pppoe_dev); dev_put(po->pppoe_dev);
po->pppoe_dev = NULL ;
sock_orphan(sk); sock_orphan(sk);
sock->sk = NULL; sock->sk = NULL;
......
...@@ -2528,7 +2528,7 @@ _static int process_iso (uhci_t *s, urb_t *urb, int mode) ...@@ -2528,7 +2528,7 @@ _static int process_iso (uhci_t *s, urb_t *urb, int mode)
int i; int i;
int ret = 0; int ret = 0;
urb_priv_t *urb_priv = urb->hcpriv; urb_priv_t *urb_priv = urb->hcpriv;
struct list_head *p = urb_priv->desc_list.next; struct list_head *p = urb_priv->desc_list.next, *p_tmp;
uhci_desc_t *desc = list_entry (urb_priv->desc_list.prev, uhci_desc_t, desc_list); uhci_desc_t *desc = list_entry (urb_priv->desc_list.prev, uhci_desc_t, desc_list);
dbg("urb contains iso request"); dbg("urb contains iso request");
...@@ -2578,8 +2578,9 @@ _static int process_iso (uhci_t *s, urb_t *urb, int mode) ...@@ -2578,8 +2578,9 @@ _static int process_iso (uhci_t *s, urb_t *urb, int mode)
dbg("process_iso: %i: len:%d %08x status:%x", dbg("process_iso: %i: len:%d %08x status:%x",
i, urb->iso_frame_desc[i].actual_length, le32_to_cpu(desc->hw.td.status),urb->iso_frame_desc[i].status); i, urb->iso_frame_desc[i].actual_length, le32_to_cpu(desc->hw.td.status),urb->iso_frame_desc[i].status);
list_del (p); p_tmp = p;
p = p->next; p = p->next;
list_del (p_tmp);
delete_desc (s, desc); delete_desc (s, desc);
} }
......
...@@ -67,6 +67,17 @@ static unsigned int max_block(kdev_t dev) ...@@ -67,6 +67,17 @@ static unsigned int max_block(kdev_t dev)
return retval; return retval;
} }
static loff_t blkdev_size(kdev_t dev)
{
unsigned int blocks = ~0U;
int major = MAJOR(dev);
if (blk_size[major]) {
int minor = MINOR(dev);
blocks = blk_size[major][minor];
}
return (loff_t) blocks << BLOCK_SIZE_BITS;
}
static inline int blkdev_get_block(struct inode * inode, long iblock, struct buffer_head * bh_result) static inline int blkdev_get_block(struct inode * inode, long iblock, struct buffer_head * bh_result)
{ {
...@@ -308,7 +319,6 @@ static int __blkdev_commit_write(struct inode *inode, struct page *page, ...@@ -308,7 +319,6 @@ static int __blkdev_commit_write(struct inode *inode, struct page *page,
set_bit(BH_Uptodate, &bh->b_state); set_bit(BH_Uptodate, &bh->b_state);
if (!atomic_set_buffer_dirty(bh)) { if (!atomic_set_buffer_dirty(bh)) {
__mark_dirty(bh); __mark_dirty(bh);
buffer_insert_inode_data_queue(bh, inode);
need_balance_dirty = 1; need_balance_dirty = 1;
} }
} }
...@@ -404,6 +414,7 @@ static struct super_block *bd_read_super(struct super_block *sb, void *data, int ...@@ -404,6 +414,7 @@ static struct super_block *bd_read_super(struct super_block *sb, void *data, int
root->i_mode = S_IFDIR | S_IRUSR | S_IWUSR; root->i_mode = S_IFDIR | S_IRUSR | S_IWUSR;
root->i_uid = root->i_gid = 0; root->i_uid = root->i_gid = 0;
root->i_atime = root->i_mtime = root->i_ctime = CURRENT_TIME; root->i_atime = root->i_mtime = root->i_ctime = CURRENT_TIME;
sb->s_maxbytes = ~0ULL;
sb->s_blocksize = 1024; sb->s_blocksize = 1024;
sb->s_blocksize_bits = 10; sb->s_blocksize_bits = 10;
sb->s_magic = 0x62646576; sb->s_magic = 0x62646576;
...@@ -521,9 +532,11 @@ struct block_device *bdget(dev_t dev) ...@@ -521,9 +532,11 @@ struct block_device *bdget(dev_t dev)
new_bdev->bd_dev = dev; new_bdev->bd_dev = dev;
new_bdev->bd_op = NULL; new_bdev->bd_op = NULL;
new_bdev->bd_inode = inode; new_bdev->bd_inode = inode;
inode->i_size = blkdev_size(dev);
inode->i_rdev = to_kdev_t(dev); inode->i_rdev = to_kdev_t(dev);
inode->i_bdev = new_bdev; inode->i_bdev = new_bdev;
inode->i_data.a_ops = &def_blk_aops; inode->i_data.a_ops = &def_blk_aops;
inode->i_data.gfp_mask = GFP_USER;
spin_lock(&bdev_lock); spin_lock(&bdev_lock);
bdev = bdfind(dev, head); bdev = bdfind(dev, head);
if (!bdev) { if (!bdev) {
...@@ -810,22 +823,7 @@ int blkdev_put(struct block_device *bdev, int kind) ...@@ -810,22 +823,7 @@ int blkdev_put(struct block_device *bdev, int kind)
down(&bdev->bd_sem); down(&bdev->bd_sem);
lock_kernel(); lock_kernel();
if (kind == BDEV_FILE) { if (kind == BDEV_FILE) {
struct super_block * sb;
__block_fsync(bd_inode); __block_fsync(bd_inode);
/* Janitorianism: this shit must go away */
sb = get_super(bd_inode->i_rdev);
if (sb) {
if (sb->s_flags & MS_RDONLY) {
shrink_dcache_sb(sb);
invalidate_inodes(sb);
invalidate_buffers(bd_inode->i_rdev);
}
lock_super(sb);
unlock_super(sb);
drop_super(sb);
}
} else if (kind == BDEV_FS) } else if (kind == BDEV_FS)
fsync_no_super(rdev); fsync_no_super(rdev);
if (!--bdev->bd_openers) { if (!--bdev->bd_openers) {
......
...@@ -52,22 +52,13 @@ ...@@ -52,22 +52,13 @@
#include <asm/bitops.h> #include <asm/bitops.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#define NR_SIZES 7
static char buffersize_index[65] =
{-1, 0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1,
4, -1, -1, -1, -1, -1, -1, -1, -1,-1, -1, -1, -1, -1, -1, -1,
5, -1, -1, -1, -1, -1, -1, -1, -1,-1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1,-1, -1, -1, -1, -1, -1, -1,
6};
#define BUFSIZE_INDEX(X) ((int) buffersize_index[(X)>>9])
#define MAX_BUF_PER_PAGE (PAGE_CACHE_SIZE / 512) #define MAX_BUF_PER_PAGE (PAGE_CACHE_SIZE / 512)
#define NR_RESERVED (10*MAX_BUF_PER_PAGE) #define NR_RESERVED (10*MAX_BUF_PER_PAGE)
#define MAX_UNUSED_BUFFERS NR_RESERVED+20 /* don't ever have more than this #define MAX_UNUSED_BUFFERS NR_RESERVED+20 /* don't ever have more than this
number of unused buffer heads */ number of unused buffer heads */
/* Anti-deadlock ordering: /* Anti-deadlock ordering:
* lru_list_lock > hash_table_lock > free_list_lock > unused_list_lock * lru_list_lock > hash_table_lock > unused_list_lock
*/ */
#define BH_ENTRY(list) list_entry((list), struct buffer_head, b_inode_buffers) #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_inode_buffers)
...@@ -80,6 +71,11 @@ static unsigned int bh_hash_shift; ...@@ -80,6 +71,11 @@ static unsigned int bh_hash_shift;
static struct buffer_head **hash_table; static struct buffer_head **hash_table;
static rwlock_t hash_table_lock = RW_LOCK_UNLOCKED; static rwlock_t hash_table_lock = RW_LOCK_UNLOCKED;
#define BUF_CLEAN 0
#define BUF_LOCKED 1 /* Buffers scheduled for write */
#define BUF_DIRTY 2 /* Dirty buffers, not yet scheduled for write */
#define NR_LIST 3
static struct buffer_head *lru_list[NR_LIST]; static struct buffer_head *lru_list[NR_LIST];
static spinlock_t lru_list_lock = SPIN_LOCK_UNLOCKED; static spinlock_t lru_list_lock = SPIN_LOCK_UNLOCKED;
static int nr_buffers_type[NR_LIST]; static int nr_buffers_type[NR_LIST];
...@@ -90,14 +86,8 @@ static int nr_unused_buffer_heads; ...@@ -90,14 +86,8 @@ static int nr_unused_buffer_heads;
static spinlock_t unused_list_lock = SPIN_LOCK_UNLOCKED; static spinlock_t unused_list_lock = SPIN_LOCK_UNLOCKED;
static DECLARE_WAIT_QUEUE_HEAD(buffer_wait); static DECLARE_WAIT_QUEUE_HEAD(buffer_wait);
struct bh_free_head {
struct buffer_head *list;
spinlock_t lock;
};
static struct bh_free_head free_list[NR_SIZES];
static void truncate_buffers(kdev_t dev); static void truncate_buffers(kdev_t dev);
static int grow_buffers(kdev_t dev, int block, int size); static int grow_buffers(kdev_t dev, unsigned long block, int size);
static void __refile_buffer(struct buffer_head *); static void __refile_buffer(struct buffer_head *);
/* This is used by some architectures to estimate available memory. */ /* This is used by some architectures to estimate available memory. */
...@@ -482,12 +472,16 @@ asmlinkage long sys_fdatasync(unsigned int fd) ...@@ -482,12 +472,16 @@ asmlinkage long sys_fdatasync(unsigned int fd)
((block) << (bh_hash_shift - 12)))) ((block) << (bh_hash_shift - 12))))
#define hash(dev,block) hash_table[(_hashfn(HASHDEV(dev),block) & bh_hash_mask)] #define hash(dev,block) hash_table[(_hashfn(HASHDEV(dev),block) & bh_hash_mask)]
static __inline__ void __hash_link(struct buffer_head *bh, struct buffer_head **head) static inline void __insert_into_hash_list(struct buffer_head *bh)
{ {
if ((bh->b_next = *head) != NULL) struct buffer_head **head = &hash(bh->b_dev, bh->b_blocknr);
bh->b_next->b_pprev = &bh->b_next; struct buffer_head *next = *head;
*head = bh; *head = bh;
bh->b_pprev = head; bh->b_pprev = head;
bh->b_next = next;
if (next != NULL)
next->b_pprev = &bh->b_next;
} }
static __inline__ void __hash_unlink(struct buffer_head *bh) static __inline__ void __hash_unlink(struct buffer_head *bh)
...@@ -504,6 +498,8 @@ static void __insert_into_lru_list(struct buffer_head * bh, int blist) ...@@ -504,6 +498,8 @@ static void __insert_into_lru_list(struct buffer_head * bh, int blist)
{ {
struct buffer_head **bhp = &lru_list[blist]; struct buffer_head **bhp = &lru_list[blist];
if (bh->b_prev_free || bh->b_next_free) BUG();
if(!*bhp) { if(!*bhp) {
*bhp = bh; *bhp = bh;
bh->b_prev_free = bh; bh->b_prev_free = bh;
...@@ -531,19 +527,6 @@ static void __remove_from_lru_list(struct buffer_head * bh, int blist) ...@@ -531,19 +527,6 @@ static void __remove_from_lru_list(struct buffer_head * bh, int blist)
} }
} }
static void __remove_from_free_list(struct buffer_head * bh, int index)
{
if(bh->b_next_free == bh)
free_list[index].list = NULL;
else {
bh->b_prev_free->b_next_free = bh->b_next_free;
bh->b_next_free->b_prev_free = bh->b_prev_free;
if (free_list[index].list == bh)
free_list[index].list = bh->b_next_free;
}
bh->b_next_free = bh->b_prev_free = NULL;
}
/* must be called with both the hash_table_lock and the lru_list_lock /* must be called with both the hash_table_lock and the lru_list_lock
held */ held */
static void __remove_from_queues(struct buffer_head *bh) static void __remove_from_queues(struct buffer_head *bh)
...@@ -552,14 +535,6 @@ static void __remove_from_queues(struct buffer_head *bh) ...@@ -552,14 +535,6 @@ static void __remove_from_queues(struct buffer_head *bh)
__remove_from_lru_list(bh, bh->b_list); __remove_from_lru_list(bh, bh->b_list);
} }
static void __insert_into_queues(struct buffer_head *bh)
{
struct buffer_head **head = &hash(bh->b_dev, bh->b_blocknr);
__hash_link(bh, head);
__insert_into_lru_list(bh, bh->b_list);
}
struct buffer_head * get_hash_table(kdev_t dev, int block, int size) struct buffer_head * get_hash_table(kdev_t dev, int block, int size)
{ {
struct buffer_head *bh, **p = &hash(dev, block); struct buffer_head *bh, **p = &hash(dev, block);
...@@ -1214,6 +1189,7 @@ static __inline__ void __put_unused_buffer_head(struct buffer_head * bh) ...@@ -1214,6 +1189,7 @@ static __inline__ void __put_unused_buffer_head(struct buffer_head * bh)
if (nr_unused_buffer_heads >= MAX_UNUSED_BUFFERS) { if (nr_unused_buffer_heads >= MAX_UNUSED_BUFFERS) {
kmem_cache_free(bh_cachep, bh); kmem_cache_free(bh_cachep, bh);
} else { } else {
bh->b_dev = B_FREE;
bh->b_blocknr = -1; bh->b_blocknr = -1;
bh->b_this_page = NULL; bh->b_this_page = NULL;
...@@ -1320,7 +1296,7 @@ static struct buffer_head * create_buffers(struct page * page, unsigned long siz ...@@ -1320,7 +1296,7 @@ static struct buffer_head * create_buffers(struct page * page, unsigned long siz
if (!bh) if (!bh)
goto no_grow; goto no_grow;
bh->b_dev = B_FREE; /* Flag as unused */ bh->b_dev = NODEV;
bh->b_this_page = head; bh->b_this_page = head;
head = bh; head = bh;
...@@ -1376,15 +1352,18 @@ static struct buffer_head * create_buffers(struct page * page, unsigned long siz ...@@ -1376,15 +1352,18 @@ static struct buffer_head * create_buffers(struct page * page, unsigned long siz
/* /*
* Called when truncating a buffer on a page completely. * Called when truncating a buffer on a page completely.
*
* We can avoid IO by marking it clean.
* FIXME!! FIXME!! FIXME!! We need to unmap it too,
* so that the filesystem won't write to it. There's
* some bug somewhere..
*/ */
static void discard_buffer(struct buffer_head * bh) static void discard_buffer(struct buffer_head * bh)
{ {
if (buffer_mapped(bh)) {
mark_buffer_clean(bh); mark_buffer_clean(bh);
lock_buffer(bh);
clear_bit(BH_Uptodate, &bh->b_state);
clear_bit(BH_Mapped, &bh->b_state);
clear_bit(BH_Req, &bh->b_state);
clear_bit(BH_New, &bh->b_state);
unlock_buffer(bh);
}
} }
/* /*
...@@ -2120,7 +2099,6 @@ int brw_kiovec(int rw, int nr, struct kiobuf *iovec[], ...@@ -2120,7 +2099,6 @@ int brw_kiovec(int rw, int nr, struct kiobuf *iovec[],
} }
tmp = bhs[bhind++]; tmp = bhs[bhind++];
tmp->b_dev = B_FREE;
tmp->b_size = size; tmp->b_size = size;
set_bh_page(tmp, map, offset); set_bh_page(tmp, map, offset);
tmp->b_this_page = tmp; tmp->b_this_page = tmp;
...@@ -2304,7 +2282,6 @@ static void hash_page_buffers(struct page *page, kdev_t dev, int block, int size ...@@ -2304,7 +2282,6 @@ static void hash_page_buffers(struct page *page, kdev_t dev, int block, int size
if (Page_Uptodate(page)) if (Page_Uptodate(page))
uptodate |= 1 << BH_Uptodate; uptodate |= 1 << BH_Uptodate;
spin_lock(&lru_list_lock);
write_lock(&hash_table_lock); write_lock(&hash_table_lock);
do { do {
if (!(bh->b_state & (1 << BH_Mapped))) { if (!(bh->b_state & (1 << BH_Mapped))) {
...@@ -2314,23 +2291,21 @@ static void hash_page_buffers(struct page *page, kdev_t dev, int block, int size ...@@ -2314,23 +2291,21 @@ static void hash_page_buffers(struct page *page, kdev_t dev, int block, int size
bh->b_state = uptodate; bh->b_state = uptodate;
} }
/* Insert the buffer into the regular lists */ /* Insert the buffer into the hash lists if necessary */
if (!bh->b_pprev) { if (!bh->b_pprev)
__insert_into_queues(bh); __insert_into_hash_list(bh);
}
block++; block++;
bh = bh->b_this_page; bh = bh->b_this_page;
} while (bh != head); } while (bh != head);
write_unlock(&hash_table_lock); write_unlock(&hash_table_lock);
spin_unlock(&lru_list_lock);
} }
/* /*
* Try to increase the number of buffers available: the size argument * Try to increase the number of buffers available: the size argument
* is used to determine what kind of buffers we want. * is used to determine what kind of buffers we want.
*/ */
static int grow_buffers(kdev_t dev, int block, int size) static int grow_buffers(kdev_t dev, unsigned long block, int size)
{ {
struct page * page; struct page * page;
struct block_device *bdev; struct block_device *bdev;
...@@ -2389,7 +2364,7 @@ static int sync_page_buffers(struct buffer_head *bh, unsigned int gfp_mask) ...@@ -2389,7 +2364,7 @@ static int sync_page_buffers(struct buffer_head *bh, unsigned int gfp_mask)
ll_rw_block(WRITE, 1, &p); ll_rw_block(WRITE, 1, &p);
tryagain = 0; tryagain = 0;
} else if (buffer_locked(p)) { } else if (buffer_locked(p)) {
if (gfp_mask & __GFP_WAIT) { if (gfp_mask & __GFP_WAITBUF) {
wait_on_buffer(p); wait_on_buffer(p);
tryagain = 1; tryagain = 1;
} else } else
...@@ -2424,12 +2399,10 @@ static int sync_page_buffers(struct buffer_head *bh, unsigned int gfp_mask) ...@@ -2424,12 +2399,10 @@ static int sync_page_buffers(struct buffer_head *bh, unsigned int gfp_mask)
int try_to_free_buffers(struct page * page, unsigned int gfp_mask) int try_to_free_buffers(struct page * page, unsigned int gfp_mask)
{ {
struct buffer_head * tmp, * bh = page->buffers; struct buffer_head * tmp, * bh = page->buffers;
int index = BUFSIZE_INDEX(bh->b_size);
cleaned_buffers_try_again: cleaned_buffers_try_again:
spin_lock(&lru_list_lock); spin_lock(&lru_list_lock);
write_lock(&hash_table_lock); write_lock(&hash_table_lock);
spin_lock(&free_list[index].lock);
tmp = bh; tmp = bh;
do { do {
if (buffer_busy(tmp)) if (buffer_busy(tmp))
...@@ -2443,14 +2416,10 @@ int try_to_free_buffers(struct page * page, unsigned int gfp_mask) ...@@ -2443,14 +2416,10 @@ int try_to_free_buffers(struct page * page, unsigned int gfp_mask)
struct buffer_head * p = tmp; struct buffer_head * p = tmp;
tmp = tmp->b_this_page; tmp = tmp->b_this_page;
/* The buffer can be either on the regular if (p->b_dev == B_FREE) BUG();
* queues or on the free list..
*/
if (p->b_dev != B_FREE) {
remove_inode_queue(p); remove_inode_queue(p);
__remove_from_queues(p); __remove_from_queues(p);
} else
__remove_from_free_list(p, index);
__put_unused_buffer_head(p); __put_unused_buffer_head(p);
} while (tmp != bh); } while (tmp != bh);
spin_unlock(&unused_list_lock); spin_unlock(&unused_list_lock);
...@@ -2461,14 +2430,12 @@ int try_to_free_buffers(struct page * page, unsigned int gfp_mask) ...@@ -2461,14 +2430,12 @@ int try_to_free_buffers(struct page * page, unsigned int gfp_mask)
/* And free the page */ /* And free the page */
page->buffers = NULL; page->buffers = NULL;
page_cache_release(page); page_cache_release(page);
spin_unlock(&free_list[index].lock);
write_unlock(&hash_table_lock); write_unlock(&hash_table_lock);
spin_unlock(&lru_list_lock); spin_unlock(&lru_list_lock);
return 1; return 1;
busy_buffer_page: busy_buffer_page:
/* Uhhuh, start writeback so that we don't end up with all dirty pages */ /* Uhhuh, start writeback so that we don't end up with all dirty pages */
spin_unlock(&free_list[index].lock);
write_unlock(&hash_table_lock); write_unlock(&hash_table_lock);
spin_unlock(&lru_list_lock); spin_unlock(&lru_list_lock);
if (gfp_mask & __GFP_IO) { if (gfp_mask & __GFP_IO) {
...@@ -2581,12 +2548,6 @@ void __init buffer_init(unsigned long mempages) ...@@ -2581,12 +2548,6 @@ void __init buffer_init(unsigned long mempages)
for(i = 0; i < nr_hash; i++) for(i = 0; i < nr_hash; i++)
hash_table[i] = NULL; hash_table[i] = NULL;
/* Setup free lists. */
for(i = 0; i < NR_SIZES; i++) {
free_list[i].list = NULL;
free_list[i].lock = SPIN_LOCK_UNLOCKED;
}
/* Setup lru lists. */ /* Setup lru lists. */
for(i = 0; i < NR_LIST; i++) for(i = 0; i < NR_LIST; i++)
lru_list[i] = NULL; lru_list[i] = NULL;
......
...@@ -1086,11 +1086,6 @@ extern void end_buffer_io_sync(struct buffer_head *bh, int uptodate); ...@@ -1086,11 +1086,6 @@ extern void end_buffer_io_sync(struct buffer_head *bh, int uptodate);
/* reiserfs_writepage needs this */ /* reiserfs_writepage needs this */
extern void set_buffer_async_io(struct buffer_head *bh) ; extern void set_buffer_async_io(struct buffer_head *bh) ;
#define BUF_CLEAN 0
#define BUF_LOCKED 1 /* Buffers scheduled for write */
#define BUF_DIRTY 2 /* Dirty buffers, not yet scheduled for write */
#define NR_LIST 3
static inline void get_bh(struct buffer_head * bh) static inline void get_bh(struct buffer_head * bh)
{ {
atomic_inc(&(bh)->b_count); atomic_inc(&(bh)->b_count);
......
...@@ -92,7 +92,6 @@ static __inline__ void __list_del(struct list_head * prev, ...@@ -92,7 +92,6 @@ static __inline__ void __list_del(struct list_head * prev,
static __inline__ void list_del(struct list_head *entry) static __inline__ void list_del(struct list_head *entry)
{ {
__list_del(entry->prev, entry->next); __list_del(entry->prev, entry->next);
entry->next = entry->prev = 0;
} }
/** /**
......
...@@ -550,16 +550,17 @@ extern struct page *filemap_nopage(struct vm_area_struct *, unsigned long, int); ...@@ -550,16 +550,17 @@ extern struct page *filemap_nopage(struct vm_area_struct *, unsigned long, int);
#define __GFP_IO 0x40 /* Can start low memory physical IO? */ #define __GFP_IO 0x40 /* Can start low memory physical IO? */
#define __GFP_HIGHIO 0x80 /* Can start high mem physical IO? */ #define __GFP_HIGHIO 0x80 /* Can start high mem physical IO? */
#define __GFP_FS 0x100 /* Can call down to low-level FS? */ #define __GFP_FS 0x100 /* Can call down to low-level FS? */
#define __GFP_WAITBUF 0x200 /* Can we wait for buffers to complete? */
#define GFP_NOHIGHIO (__GFP_HIGH | __GFP_WAIT | __GFP_IO) #define GFP_NOHIGHIO (__GFP_HIGH | __GFP_WAIT | __GFP_IO)
#define GFP_NOIO (__GFP_HIGH | __GFP_WAIT) #define GFP_NOIO (__GFP_HIGH | __GFP_WAIT)
#define GFP_NOFS (__GFP_HIGH | __GFP_WAIT | __GFP_IO | __GFP_HIGHIO) #define GFP_NOFS (__GFP_HIGH | __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_WAITBUF)
#define GFP_ATOMIC (__GFP_HIGH) #define GFP_ATOMIC (__GFP_HIGH)
#define GFP_USER ( __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_FS) #define GFP_USER ( __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_WAITBUF | __GFP_FS)
#define GFP_HIGHUSER ( __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_FS | __GFP_HIGHMEM) #define GFP_HIGHUSER ( __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_WAITBUF | __GFP_FS | __GFP_HIGHMEM)
#define GFP_KERNEL (__GFP_HIGH | __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_FS) #define GFP_KERNEL (__GFP_HIGH | __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_WAITBUF | __GFP_FS)
#define GFP_NFS (__GFP_HIGH | __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_FS) #define GFP_NFS (__GFP_HIGH | __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_WAITBUF | __GFP_FS)
#define GFP_KSWAPD ( __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_FS) #define GFP_KSWAPD ( __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_WAITBUF | __GFP_FS)
/* Flag - indicates that the buffer will be suitable for DMA. Ignored on some /* Flag - indicates that the buffer will be suitable for DMA. Ignored on some
platforms, used as appropriate on others */ platforms, used as appropriate on others */
......
...@@ -24,7 +24,7 @@ typedef struct kmem_cache_s kmem_cache_t; ...@@ -24,7 +24,7 @@ typedef struct kmem_cache_s kmem_cache_t;
#define SLAB_NFS GFP_NFS #define SLAB_NFS GFP_NFS
#define SLAB_DMA GFP_DMA #define SLAB_DMA GFP_DMA
#define SLAB_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_HIGHIO|__GFP_FS) #define SLAB_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_HIGHIO|__GFP_WAITBUF|__GFP_FS)
#define SLAB_NO_GROW 0x00001000UL /* don't grow a cache */ #define SLAB_NO_GROW 0x00001000UL /* don't grow a cache */
/* flags to pass to kmem_cache_create(). /* flags to pass to kmem_cache_create().
......
...@@ -873,6 +873,13 @@ struct page * find_or_create_page(struct address_space *mapping, unsigned long i ...@@ -873,6 +873,13 @@ struct page * find_or_create_page(struct address_space *mapping, unsigned long i
return page; return page;
} }
/*
* Returns locked page at given index in given cache, creating it if needed.
*/
struct page *grab_cache_page(struct address_space *mapping, unsigned long index)
{
return find_or_create_page(mapping, index, mapping->gfp_mask);
}
#if 0 #if 0
...@@ -1005,24 +1012,6 @@ static inline int get_max_readahead(struct inode * inode) ...@@ -1005,24 +1012,6 @@ static inline int get_max_readahead(struct inode * inode)
return max_readahead[MAJOR(inode->i_dev)][MINOR(inode->i_dev)]; return max_readahead[MAJOR(inode->i_dev)][MINOR(inode->i_dev)];
} }
static inline unsigned long calc_end_index(struct inode * inode)
{
unsigned long end_index;
end_index = inode->i_size >> PAGE_CACHE_SHIFT;
return end_index;
}
static inline loff_t calc_rsize(struct inode * inode)
{
loff_t rsize;
rsize = inode->i_size;
return rsize;
}
static void generic_file_readahead(int reada_ok, static void generic_file_readahead(int reada_ok,
struct file * filp, struct inode * inode, struct file * filp, struct inode * inode,
struct page * page) struct page * page)
...@@ -1033,7 +1022,7 @@ static void generic_file_readahead(int reada_ok, ...@@ -1033,7 +1022,7 @@ static void generic_file_readahead(int reada_ok,
unsigned long raend; unsigned long raend;
int max_readahead = get_max_readahead(inode); int max_readahead = get_max_readahead(inode);
end_index = calc_end_index(inode); end_index = inode->i_size >> PAGE_CACHE_SHIFT;
raend = filp->f_raend; raend = filp->f_raend;
max_ahead = 0; max_ahead = 0;
...@@ -1157,8 +1146,8 @@ void mark_page_accessed(struct page *page) ...@@ -1157,8 +1146,8 @@ void mark_page_accessed(struct page *page)
*/ */
void do_generic_file_read(struct file * filp, loff_t *ppos, read_descriptor_t * desc, read_actor_t actor) void do_generic_file_read(struct file * filp, loff_t *ppos, read_descriptor_t * desc, read_actor_t actor)
{ {
struct inode *inode = filp->f_dentry->d_inode; struct address_space *mapping = filp->f_dentry->d_inode->i_mapping;
struct address_space *mapping = inode->i_mapping; struct inode *inode = mapping->host;
unsigned long index, offset; unsigned long index, offset;
struct page *cached_page; struct page *cached_page;
int reada_ok; int reada_ok;
...@@ -1212,13 +1201,13 @@ void do_generic_file_read(struct file * filp, loff_t *ppos, read_descriptor_t * ...@@ -1212,13 +1201,13 @@ void do_generic_file_read(struct file * filp, loff_t *ppos, read_descriptor_t *
struct page *page, **hash; struct page *page, **hash;
unsigned long end_index, nr, ret; unsigned long end_index, nr, ret;
end_index = calc_end_index(inode); end_index = inode->i_size >> PAGE_CACHE_SHIFT;
if (index > end_index) if (index > end_index)
break; break;
nr = PAGE_CACHE_SIZE; nr = PAGE_CACHE_SIZE;
if (index == end_index) { if (index == end_index) {
nr = calc_rsize(inode) & ~PAGE_CACHE_MASK; nr = inode->i_size & ~PAGE_CACHE_MASK;
if (nr <= offset) if (nr <= offset)
break; break;
} }
...@@ -1595,7 +1584,6 @@ struct page * filemap_nopage(struct vm_area_struct * area, ...@@ -1595,7 +1584,6 @@ struct page * filemap_nopage(struct vm_area_struct * area,
struct address_space *mapping = inode->i_mapping; struct address_space *mapping = inode->i_mapping;
struct page *page, **hash, *old_page; struct page *page, **hash, *old_page;
unsigned long size, pgoff; unsigned long size, pgoff;
loff_t rsize;
pgoff = ((address - area->vm_start) >> PAGE_CACHE_SHIFT) + area->vm_pgoff; pgoff = ((address - area->vm_start) >> PAGE_CACHE_SHIFT) + area->vm_pgoff;
...@@ -1604,8 +1592,7 @@ struct page * filemap_nopage(struct vm_area_struct * area, ...@@ -1604,8 +1592,7 @@ struct page * filemap_nopage(struct vm_area_struct * area,
* An external ptracer can access pages that normally aren't * An external ptracer can access pages that normally aren't
* accessible.. * accessible..
*/ */
rsize = calc_rsize(inode); size = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
size = (rsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
if ((pgoff >= size) && (area->vm_mm == current->mm)) if ((pgoff >= size) && (area->vm_mm == current->mm))
return NULL; return NULL;
...@@ -2104,14 +2091,13 @@ static long madvise_willneed(struct vm_area_struct * vma, ...@@ -2104,14 +2091,13 @@ static long madvise_willneed(struct vm_area_struct * vma,
long error = -EBADF; long error = -EBADF;
struct file * file; struct file * file;
unsigned long size, rlim_rss; unsigned long size, rlim_rss;
loff_t rsize;
/* Doesn't work if there's no mapped file. */ /* Doesn't work if there's no mapped file. */
if (!vma->vm_file) if (!vma->vm_file)
return error; return error;
file = vma->vm_file; file = vma->vm_file;
rsize = calc_rsize(file->f_dentry->d_inode); size = (file->f_dentry->d_inode->i_size + PAGE_CACHE_SIZE - 1) >>
size = (rsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; PAGE_CACHE_SHIFT;
start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
if (end > vma->vm_end) if (end > vma->vm_end)
...@@ -2549,19 +2535,6 @@ static inline struct page * __grab_cache_page(struct address_space *mapping, ...@@ -2549,19 +2535,6 @@ static inline struct page * __grab_cache_page(struct address_space *mapping,
return page; return page;
} }
/*
* Returns locked page at given index in given cache, creating it if needed.
*/
struct page *grab_cache_page(struct address_space *mapping, unsigned long index)
{
struct page *cached_page = NULL;
struct page *page = __grab_cache_page(mapping,index,&cached_page);
if (cached_page)
page_cache_release(cached_page);
return page;
}
inline void remove_suid(struct inode *inode) inline void remove_suid(struct inode *inode)
{ {
unsigned int mode; unsigned int mode;
...@@ -2595,8 +2568,8 @@ inline void remove_suid(struct inode *inode) ...@@ -2595,8 +2568,8 @@ inline void remove_suid(struct inode *inode)
ssize_t ssize_t
generic_file_write(struct file *file,const char *buf,size_t count, loff_t *ppos) generic_file_write(struct file *file,const char *buf,size_t count, loff_t *ppos)
{ {
struct inode *inode = file->f_dentry->d_inode; struct address_space *mapping = file->f_dentry->d_inode->i_mapping;
struct address_space *mapping = inode->i_mapping; struct inode *inode = mapping->host;
unsigned long limit = current->rlim[RLIMIT_FSIZE].rlim_cur; unsigned long limit = current->rlim[RLIMIT_FSIZE].rlim_cur;
loff_t pos; loff_t pos;
struct page *page, *cached_page; struct page *page, *cached_page;
...@@ -2628,8 +2601,7 @@ generic_file_write(struct file *file,const char *buf,size_t count, loff_t *ppos) ...@@ -2628,8 +2601,7 @@ generic_file_write(struct file *file,const char *buf,size_t count, loff_t *ppos)
written = 0; written = 0;
/* FIXME: this is for backwards compatibility with 2.4 */ if (file->f_flags & O_APPEND)
if (!S_ISBLK(inode->i_mode) && file->f_flags & O_APPEND)
pos = inode->i_size; pos = inode->i_size;
/* /*
...@@ -2690,17 +2662,15 @@ generic_file_write(struct file *file,const char *buf,size_t count, loff_t *ppos) ...@@ -2690,17 +2662,15 @@ generic_file_write(struct file *file,const char *buf,size_t count, loff_t *ppos)
err = -EPERM; err = -EPERM;
goto out; goto out;
} }
if (pos >= calc_rsize(inode)) { if (pos >= inode->i_size) {
if (count || pos > calc_rsize(inode)) { if (count || pos > inode->i_size) {
/* FIXME: this is for backwards compatibility with 2.4 */
err = -ENOSPC; err = -ENOSPC;
goto out; goto out;
} }
/* zero-length writes at blkdev end are OK */
} }
if (pos + count > calc_rsize(inode)) if (pos + count > inode->i_size)
count = calc_rsize(inode) - pos; count = inode->i_size - pos;
} }
err = 0; err = 0;
......
...@@ -319,6 +319,8 @@ static inline int zap_pte_range(mmu_gather_t *tlb, pmd_t * pmd, unsigned long ad ...@@ -319,6 +319,8 @@ static inline int zap_pte_range(mmu_gather_t *tlb, pmd_t * pmd, unsigned long ad
if (pte_none(pte)) if (pte_none(pte))
continue; continue;
if (pte_present(pte)) { if (pte_present(pte)) {
struct page *page = pte_page(pte);
if (VALID_PAGE(page) && !PageReserved(page))
freed ++; freed ++;
/* This will eventually call __free_pte on the pte. */ /* This will eventually call __free_pte on the pte. */
tlb_remove_page(tlb, ptep, address + offset); tlb_remove_page(tlb, ptep, address + offset);
......
...@@ -480,7 +480,7 @@ unsigned int nr_free_buffer_pages (void) ...@@ -480,7 +480,7 @@ unsigned int nr_free_buffer_pages (void)
zone_t **zonep, *zone; zone_t **zonep, *zone;
do { do {
zonelist = pgdat->node_zonelists + __GFP_HIGHMEM; zonelist = pgdat->node_zonelists + (GFP_USER & GFP_ZONEMASK);
zonep = zonelist->zones; zonep = zonelist->zones;
for (zone = *zonep++; zone; zone = *zonep++) for (zone = *zonep++; zone; zone = *zonep++)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment