Commit 01663d94 authored by Linus Torvalds's avatar Linus Torvalds

Import 2.3.26pre2

parent faf76fb3
......@@ -50,8 +50,7 @@ static int ext2_secrm_seed = 152; /* Random generator base */
* there's no need to test for changes during the operation.
*/
#define DIRECT_BLOCK(inode) \
((inode->i_size + inode->i_sb->s_blocksize - 1) / \
inode->i_sb->s_blocksize)
((unsigned long) ((inode->i_size + inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits))
#define INDIRECT_BLOCK(inode,offset) ((int)DIRECT_BLOCK(inode) - offset)
#define DINDIRECT_BLOCK(inode,offset) \
(INDIRECT_BLOCK(inode,offset) / addr_per_block)
......
......@@ -1009,7 +1009,7 @@ int isofs_get_block(struct inode *inode, long iblock,
abort_beyond_end:
printk("_isofs_bmap: block >= EOF (%ld, %ld)\n",
iblock, inode->i_size);
iblock, (unsigned long) inode->i_size);
goto abort;
abort_too_many_sections:
......
......@@ -680,7 +680,7 @@ printk("nfs_notify_change: revalidate failed, error=%d\n", error);
*/
if (attr->ia_valid & ATTR_SIZE) {
if (attr->ia_size != fattr.size)
printk("nfs_notify_change: attr=%ld, fattr=%d??\n",
printk("nfs_notify_change: attr=%Ld, fattr=%d??\n",
attr->ia_size, fattr.size);
inode->i_size = attr->ia_size;
inode->i_mtime = fattr.mtime.seconds;
......
......@@ -83,12 +83,9 @@ extern int * max_segments[MAX_BLKDEV];
#define MAX_SEGMENTS MAX_SECTORS
#define PageAlignSize(size) (((size) + PAGE_SIZE -1) & PAGE_MASK)
#if 0 /* small readahead */
#define MAX_READAHEAD PageAlignSize(4096*7)
#define MIN_READAHEAD PageAlignSize(4096*2)
#else /* large readahead */
#define MAX_READAHEAD PageAlignSize(4096*31)
#define MIN_READAHEAD PageAlignSize(4096*3)
#endif
/* read-ahead in pages.. */
#define MAX_READAHEAD 31
#define MIN_READAHEAD 3
#endif
......@@ -302,7 +302,7 @@ struct iattr {
umode_t ia_mode;
uid_t ia_uid;
gid_t ia_gid;
off_t ia_size;
loff_t ia_size;
time_t ia_atime;
time_t ia_mtime;
time_t ia_ctime;
......@@ -347,7 +347,7 @@ struct inode {
uid_t i_uid;
gid_t i_gid;
kdev_t i_rdev;
off_t i_size;
loff_t i_size;
time_t i_atime;
time_t i_mtime;
time_t i_ctime;
......
......@@ -208,14 +208,12 @@ static int newseg (key_t key, int shmflg, size_t size)
int id, err;
unsigned int shmall, shmmni;
lock_kernel();
shmall = shm_prm[1];
shmmni = shm_prm[2];
if (shmmni > IPCMNI) {
printk ("shmmni reset to max of %u\n", IPCMNI);
shmmni = shm_prm[2] = IPCMNI;
}
unlock_kernel();
if (shmmni < used_segs)
return -ENOSPC;
......@@ -282,10 +280,7 @@ asmlinkage long sys_shmget (key_t key, size_t size, int shmflg)
int err, id = 0;
size_t shmmax;
lock_kernel();
shmmax = shm_prm[0];
unlock_kernel();
if (size > shmmax)
return -EINVAL;
......@@ -387,11 +382,11 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds *buf)
err = -EFAULT;
if (!buf)
goto out;
lock_kernel();
shminfo.shmmni = shminfo.shmseg = shm_prm[2];
shminfo.shmmax = shm_prm[0];
shminfo.shmall = shm_prm[1];
unlock_kernel();
shminfo.shmmin = SHMMIN;
if(copy_to_user (buf, &shminfo, sizeof(struct shminfo)))
goto out_unlocked;
......
......@@ -567,7 +567,7 @@ static int read_cluster_nonblocking(struct file * file, unsigned long offset)
break;
}
return;
return error;
}
/*
......@@ -837,13 +837,14 @@ static inline int get_max_readahead(struct inode * inode)
static void generic_file_readahead(int reada_ok,
struct file * filp, struct inode * inode,
unsigned long ppos, struct page * page)
struct page * page)
{
unsigned long index = page->index;
unsigned long max_ahead, ahead;
unsigned long raend;
int max_readahead = get_max_readahead(inode);
raend = filp->f_raend & PAGE_CACHE_MASK;
raend = filp->f_raend;
max_ahead = 0;
/*
......@@ -855,14 +856,14 @@ static void generic_file_readahead(int reada_ok,
* page only.
*/
if (PageLocked(page)) {
if (!filp->f_ralen || ppos >= raend || ppos + filp->f_ralen < raend) {
raend = ppos;
if (raend < inode->i_size)
if (!filp->f_ralen || index >= raend || index + filp->f_ralen < raend) {
raend = index;
if (raend < (unsigned long) (inode->i_size >> PAGE_CACHE_SHIFT))
max_ahead = filp->f_ramax;
filp->f_rawin = 0;
filp->f_ralen = PAGE_CACHE_SIZE;
if (!max_ahead) {
filp->f_raend = ppos + filp->f_ralen;
filp->f_raend = index + filp->f_ralen;
filp->f_rawin += filp->f_ralen;
}
}
......@@ -876,7 +877,7 @@ static void generic_file_readahead(int reada_ok,
* We will later force unplug device in order to force asynchronous read IO.
*/
else if (reada_ok && filp->f_ramax && raend >= PAGE_CACHE_SIZE &&
ppos <= raend && ppos + filp->f_ralen >= raend) {
index <= raend && index + filp->f_ralen >= raend) {
/*
* Add ONE page to max_ahead in order to try to have about the same IO max size
* as synchronous read-ahead (MAX_READAHEAD + 1)*PAGE_CACHE_SIZE.
......@@ -952,17 +953,16 @@ void do_generic_file_read(struct file * filp, loff_t *ppos, read_descriptor_t *
{
struct dentry *dentry = filp->f_dentry;
struct inode *inode = dentry->d_inode;
unsigned long pos, pgpos;
unsigned long index, offset;
struct page *cached_page;
int reada_ok;
int error;
int max_readahead = get_max_readahead(inode);
unsigned long pgoff;
cached_page = NULL;
pos = *ppos;
pgpos = pos & PAGE_CACHE_MASK;
pgoff = pos >> PAGE_CACHE_SHIFT;
index = *ppos >> PAGE_CACHE_SHIFT;
offset = *ppos & ~PAGE_CACHE_MASK;
/*
* If the current position is outside the previous read-ahead window,
* we reset the current read-ahead context and set read ahead max to zero
......@@ -970,7 +970,7 @@ void do_generic_file_read(struct file * filp, loff_t *ppos, read_descriptor_t *
* otherwise, we assume that the file accesses are sequential enough to
* continue read-ahead.
*/
if (pgpos > filp->f_raend || pgpos + filp->f_rawin < filp->f_raend) {
if (index > filp->f_raend || index + filp->f_rawin < filp->f_raend) {
reada_ok = 0;
filp->f_raend = 0;
filp->f_ralen = 0;
......@@ -986,12 +986,12 @@ void do_generic_file_read(struct file * filp, loff_t *ppos, read_descriptor_t *
* Then, at least MIN_READAHEAD if read ahead is ok,
* and at most MAX_READAHEAD in all cases.
*/
if (pos + desc->count <= (PAGE_CACHE_SIZE >> 1)) {
if (!index && offset + desc->count <= (PAGE_CACHE_SIZE >> 1)) {
filp->f_ramax = 0;
} else {
unsigned long needed;
needed = ((pos + desc->count) & PAGE_CACHE_MASK) - pgpos;
needed = ((offset + desc->count) >> PAGE_CACHE_SHIFT) + 1;
if (filp->f_ramax < needed)
filp->f_ramax = needed;
......@@ -1004,17 +1004,27 @@ void do_generic_file_read(struct file * filp, loff_t *ppos, read_descriptor_t *
for (;;) {
struct page *page, **hash;
unsigned long end_index, nr;
if (pos >= inode->i_size)
end_index = inode->i_size >> PAGE_CACHE_SHIFT;
if (index > end_index)
break;
nr = PAGE_CACHE_SIZE;
if (index == end_index) {
nr = inode->i_size & ~PAGE_CACHE_MASK;
if (nr <= offset)
break;
}
nr = nr - offset;
/*
* Try to find the data in the page cache..
*/
hash = page_hash(&inode->i_data, pgoff);
hash = page_hash(&inode->i_data, index);
spin_lock(&pagecache_lock);
page = __find_page_nolock(&inode->i_data, pgoff, *hash);
page = __find_page_nolock(&inode->i_data, index, *hash);
if (!page)
goto no_cached_page;
found_page:
......@@ -1024,19 +1034,10 @@ void do_generic_file_read(struct file * filp, loff_t *ppos, read_descriptor_t *
if (!Page_Uptodate(page))
goto page_not_up_to_date;
page_ok:
/*
* Ok, we have the page, and it's up-to-date, so
* now we can copy it to user space...
*/
{
unsigned long offset, nr;
offset = pos & ~PAGE_CACHE_MASK;
nr = PAGE_CACHE_SIZE - offset;
if (nr > inode->i_size - pos)
nr = inode->i_size - pos;
/*
* Ok, we have the page, and it's up-to-date, so
* now we can copy it to user space...
*
* The actor routine returns how many bytes were actually used..
* NOTE! This may not be the same as how much of a user buffer
* we filled up (we may be padding etc), so we can only update
......@@ -1044,20 +1045,20 @@ void do_generic_file_read(struct file * filp, loff_t *ppos, read_descriptor_t *
* pointers and the remaining count).
*/
nr = actor(desc, page, offset, nr);
pos += nr;
pgoff = pos >> PAGE_CACHE_SHIFT;
offset += nr;
index += offset >> PAGE_CACHE_SHIFT;
offset &= ~PAGE_CACHE_MASK;
page_cache_release(page);
if (nr && desc->count)
continue;
break;
}
/*
* Ok, the page was not immediately readable, so let's try to read ahead while we're at it..
*/
page_not_up_to_date:
generic_file_readahead(reada_ok, filp, inode,
pos & PAGE_CACHE_MASK, page);
generic_file_readahead(reada_ok, filp, inode, page);
if (Page_Uptodate(page))
goto page_ok;
......@@ -1078,8 +1079,7 @@ void do_generic_file_read(struct file * filp, loff_t *ppos, read_descriptor_t *
goto page_ok;
/* Again, try some read-ahead while waiting for the page to finish.. */
generic_file_readahead(reada_ok, filp, inode,
pos & PAGE_CACHE_MASK, page);
generic_file_readahead(reada_ok, filp, inode, page);
wait_on_page(page);
if (Page_Uptodate(page))
goto page_ok;
......@@ -1111,7 +1111,7 @@ void do_generic_file_read(struct file * filp, loff_t *ppos, read_descriptor_t *
* dropped the page cache lock. Check for that.
*/
spin_lock(&pagecache_lock);
page = __find_page_nolock(&inode->i_data, pgoff, *hash);
page = __find_page_nolock(&inode->i_data, index, *hash);
if (page)
goto found_page;
}
......@@ -1120,14 +1120,14 @@ void do_generic_file_read(struct file * filp, loff_t *ppos, read_descriptor_t *
* Ok, add the new page to the hash-queues...
*/
page = cached_page;
__add_to_page_cache(page, &inode->i_data, pgoff, hash);
__add_to_page_cache(page, &inode->i_data, index, hash);
spin_unlock(&pagecache_lock);
cached_page = NULL;
goto readpage;
}
*ppos = pos;
*ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
filp->f_reada = 1;
if (cached_page)
page_cache_free(cached_page);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment