Commit 98b88030 authored by Linus Torvalds's avatar Linus Torvalds

v2.4.10.0.4 -> v2.4.10.1

  - Chris Mason: fix ppp race conditions
  - Al Viro: block device cleanups/fixes
  - Anton Altaparmakov: NTFS 1.1.20 update
  - Andrea Arcangeli: VM tweaks
parent a356c406
...@@ -98,6 +98,14 @@ list at sourceforge: linux-ntfs-dev@lists.sourceforge.net ...@@ -98,6 +98,14 @@ list at sourceforge: linux-ntfs-dev@lists.sourceforge.net
ChangeLog ChangeLog
========= =========
NTFS 1.1.20:
- Fixed two bugs in ntfs_readwrite_attr(). Thanks to Jan Kara for
spotting the out of bounds one.
- Check return value of set_blocksize() in ntfs_read_super() and make
use of get_hardsect_size() to determine the minimum block size.
- Fix return values of ntfs_vcn_to_lcn(). This should stop
peoples start of partition being overwritten at random.
NTFS 1.1.19: NTFS 1.1.19:
- Fixed ntfs_getdir_unsorted(), ntfs_readdir() and ntfs_printcb() to - Fixed ntfs_getdir_unsorted(), ntfs_readdir() and ntfs_printcb() to
cope with arbitrary cluster sizes. Very important for Win2k+. Also, cope with arbitrary cluster sizes. Very important for Win2k+. Also,
......
...@@ -620,6 +620,7 @@ ENTRY(sys_call_table) ...@@ -620,6 +620,7 @@ ENTRY(sys_call_table)
.long SYMBOL_NAME(sys_getdents64) /* 220 */ .long SYMBOL_NAME(sys_getdents64) /* 220 */
.long SYMBOL_NAME(sys_fcntl64) .long SYMBOL_NAME(sys_fcntl64)
.long SYMBOL_NAME(sys_ni_syscall) /* reserved for TUX */ .long SYMBOL_NAME(sys_ni_syscall) /* reserved for TUX */
.long SYMBOL_NAME(sys_ni_syscall) /* Reserved for Security */
.rept NR_syscalls-(.-sys_call_table)/4 .rept NR_syscalls-(.-sys_call_table)/4
.long SYMBOL_NAME(sys_ni_syscall) .long SYMBOL_NAME(sys_ni_syscall)
......
...@@ -579,6 +579,7 @@ int blkdev_get(struct block_device *bdev, mode_t mode, unsigned flags, int kind) ...@@ -579,6 +579,7 @@ int blkdev_get(struct block_device *bdev, mode_t mode, unsigned flags, int kind)
if (!ret) { if (!ret) {
bdev->bd_openers++; bdev->bd_openers++;
bdev->bd_inode->i_size = blkdev_size(rdev); bdev->bd_inode->i_size = blkdev_size(rdev);
bdev->bd_inode->i_blkbits = blksize_bits(block_size(rdev));
} else if (!bdev->bd_openers) } else if (!bdev->bd_openers)
bdev->bd_op = NULL; bdev->bd_op = NULL;
} }
......
...@@ -71,11 +71,6 @@ static unsigned int bh_hash_shift; ...@@ -71,11 +71,6 @@ static unsigned int bh_hash_shift;
static struct buffer_head **hash_table; static struct buffer_head **hash_table;
static rwlock_t hash_table_lock = RW_LOCK_UNLOCKED; static rwlock_t hash_table_lock = RW_LOCK_UNLOCKED;
#define BUF_CLEAN 0
#define BUF_LOCKED 1 /* Buffers scheduled for write */
#define BUF_DIRTY 2 /* Dirty buffers, not yet scheduled for write */
#define NR_LIST 3
static struct buffer_head *lru_list[NR_LIST]; static struct buffer_head *lru_list[NR_LIST];
static spinlock_t lru_list_lock = SPIN_LOCK_UNLOCKED; static spinlock_t lru_list_lock = SPIN_LOCK_UNLOCKED;
static int nr_buffers_type[NR_LIST]; static int nr_buffers_type[NR_LIST];
...@@ -672,10 +667,6 @@ void invalidate_bdev(struct block_device *bdev, int destroy_dirty_buffers) ...@@ -672,10 +667,6 @@ void invalidate_bdev(struct block_device *bdev, int destroy_dirty_buffers)
if (!atomic_read(&bh->b_count)) { if (!atomic_read(&bh->b_count)) {
if (destroy_dirty_buffers || !buffer_dirty(bh)) { if (destroy_dirty_buffers || !buffer_dirty(bh)) {
remove_inode_queue(bh); remove_inode_queue(bh);
#if 0
__remove_from_queues(bh);
put_last_free(bh);
#endif
} }
} else } else
printk("invalidate: busy buffer\n"); printk("invalidate: busy buffer\n");
...@@ -1228,20 +1219,6 @@ static struct buffer_head * get_unused_buffer_head(int async) ...@@ -1228,20 +1219,6 @@ static struct buffer_head * get_unused_buffer_head(int async)
} }
spin_unlock(&unused_list_lock); spin_unlock(&unused_list_lock);
} }
#if 0
/*
* (Pending further analysis ...)
* Ordinary (non-async) requests can use a different memory priority
* to free up pages. Any swapping thus generated will use async
* buffer heads.
*/
if(!async &&
(bh = kmem_cache_alloc(bh_cachep, SLAB_KERNEL)) != NULL) {
memset(bh, 0, sizeof(*bh));
init_waitqueue_head(&bh->b_wait);
return bh;
}
#endif
return NULL; return NULL;
} }
...@@ -1395,11 +1372,8 @@ int discard_bh_page(struct page *page, unsigned long offset, int drop_pagecache) ...@@ -1395,11 +1372,8 @@ int discard_bh_page(struct page *page, unsigned long offset, int drop_pagecache)
* instead. * instead.
*/ */
if (!offset) { if (!offset) {
if (!try_to_free_buffers(page, 0)) { if (!try_to_free_buffers(page, 0))
if (drop_pagecache)
atomic_inc(&buffermem_pages);
return 0; return 0;
}
} }
return 1; return 1;
...@@ -2228,12 +2202,27 @@ int block_symlink(struct inode *inode, const char *symname, int len) ...@@ -2228,12 +2202,27 @@ int block_symlink(struct inode *inode, const char *symname, int len)
return err; return err;
} }
static inline void link_dev_buffers(struct page * page, struct buffer_head *head)
{
struct buffer_head *bh, *tail;
bh = head;
do {
tail = bh;
bh = bh->b_this_page;
} while (bh);
tail->b_this_page = head;
page->buffers = head;
page_cache_get(page);
}
/* /*
* Create the page-cache page that contains the requested block * Create the page-cache page that contains the requested block
*/ */
static struct page * grow_dev_page(struct block_device *bdev, unsigned long index, int size) static struct page * grow_dev_page(struct block_device *bdev, unsigned long index, int size)
{ {
struct page * page; struct page * page;
struct buffer_head *bh;
page = find_or_create_page(bdev->bd_inode->i_mapping, index, GFP_NOFS); page = find_or_create_page(bdev->bd_inode->i_mapping, index, GFP_NOFS);
if (IS_ERR(page)) if (IS_ERR(page))
...@@ -2242,22 +2231,18 @@ static struct page * grow_dev_page(struct block_device *bdev, unsigned long inde ...@@ -2242,22 +2231,18 @@ static struct page * grow_dev_page(struct block_device *bdev, unsigned long inde
if (!PageLocked(page)) if (!PageLocked(page))
BUG(); BUG();
if (!page->buffers) { bh = page->buffers;
struct buffer_head *bh, *tail; if (bh) {
struct buffer_head *head = create_buffers(page, size, 0); if (bh->b_size == size)
if (!head) return page;
if (!try_to_free_buffers(page, GFP_NOFS))
goto failed; goto failed;
bh = head;
do {
tail = bh;
bh = bh->b_this_page;
} while (bh);
tail->b_this_page = head;
page->buffers = head;
page_cache_get(page);
atomic_inc(&buffermem_pages);
} }
bh = create_buffers(page, size, 0);
if (!bh)
goto failed;
link_dev_buffers(page, bh);
return page; return page;
failed: failed:
...@@ -2336,6 +2321,9 @@ static int grow_buffers(kdev_t dev, unsigned long block, int size) ...@@ -2336,6 +2321,9 @@ static int grow_buffers(kdev_t dev, unsigned long block, int size)
hash_page_buffers(page, dev, block, size); hash_page_buffers(page, dev, block, size);
UnlockPage(page); UnlockPage(page);
page_cache_release(page); page_cache_release(page);
/* We hashed up this page, so increment buffermem */
atomic_inc(&buffermem_pages);
return 1; return 1;
} }
......
...@@ -5,7 +5,7 @@ O_TARGET := ntfs.o ...@@ -5,7 +5,7 @@ O_TARGET := ntfs.o
obj-y := fs.o sysctl.o support.o util.o inode.o dir.o super.o attr.o unistr.o obj-y := fs.o sysctl.o support.o util.o inode.o dir.o super.o attr.o unistr.o
obj-m := $(O_TARGET) obj-m := $(O_TARGET)
# New version format started 3 February 2001. # New version format started 3 February 2001.
EXTRA_CFLAGS = -DNTFS_VERSION=\"1.1.19\" #-DDEBUG EXTRA_CFLAGS = -DNTFS_VERSION=\"1.1.20\" #-DDEBUG
include $(TOPDIR)/Rules.make include $(TOPDIR)/Rules.make
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#include <linux/locks.h> #include <linux/locks.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/smp_lock.h> #include <linux/smp_lock.h>
#include <linux/blkdev.h>
#include <asm/page.h> #include <asm/page.h>
#include <linux/nls.h> #include <linux/nls.h>
#include <linux/ntfs_fs.h> #include <linux/ntfs_fs.h>
...@@ -1012,22 +1013,27 @@ struct super_block *ntfs_read_super(struct super_block *sb, void *options, ...@@ -1012,22 +1013,27 @@ struct super_block *ntfs_read_super(struct super_block *sb, void *options,
{ {
ntfs_volume *vol; ntfs_volume *vol;
struct buffer_head *bh; struct buffer_head *bh;
int i, to_read; int i, to_read, blocksize;
ntfs_debug(DEBUG_OTHER, "ntfs_read_super\n"); ntfs_debug(DEBUG_OTHER, "ntfs_read_super\n");
vol = NTFS_SB2VOL(sb); vol = NTFS_SB2VOL(sb);
init_ntfs_super_block(vol); init_ntfs_super_block(vol);
if (!parse_options(vol, (char*)options)) if (!parse_options(vol, (char*)options))
goto ntfs_read_super_vol; goto ntfs_read_super_vol;
/* Assume a 512 bytes block device for now. */ blocksize = get_hardsect_size(sb->s_dev);
set_blocksize(sb->s_dev, 512); if (blocksize < 512)
blocksize = 512;
if (set_blocksize(sb->s_dev, blocksize) < 0) {
ntfs_error("Unable to set blocksize %d.\n", blocksize);
goto ntfs_read_super_vol;
}
/* Read the super block (boot block). */ /* Read the super block (boot block). */
if (!(bh = bread(sb->s_dev, 0, 512))) { if (!(bh = bread(sb->s_dev, 0, blocksize))) {
ntfs_error("Reading super block failed\n"); ntfs_error("Reading super block failed\n");
goto ntfs_read_super_unl; goto ntfs_read_super_unl;
} }
ntfs_debug(DEBUG_OTHER, "Done reading boot block\n"); ntfs_debug(DEBUG_OTHER, "Done reading boot block\n");
/* Check for 'NTFS' magic number */ /* Check for valid 'NTFS' boot sector. */
if (!is_boot_sector_ntfs(bh->b_data)) { if (!is_boot_sector_ntfs(bh->b_data)) {
ntfs_debug(DEBUG_OTHER, "Not a NTFS volume\n"); ntfs_debug(DEBUG_OTHER, "Not a NTFS volume\n");
bforget(bh); bforget(bh);
...@@ -1040,7 +1046,7 @@ struct super_block *ntfs_read_super(struct super_block *sb, void *options, ...@@ -1040,7 +1046,7 @@ struct super_block *ntfs_read_super(struct super_block *sb, void *options,
goto ntfs_read_super_unl; goto ntfs_read_super_unl;
} }
ntfs_debug(DEBUG_OTHER, "$Mft at cluster 0x%lx\n", vol->mft_lcn); ntfs_debug(DEBUG_OTHER, "$Mft at cluster 0x%lx\n", vol->mft_lcn);
bforget(bh); brelse(bh);
NTFS_SB(vol) = sb; NTFS_SB(vol) = sb;
if (vol->cluster_size > PAGE_SIZE) { if (vol->cluster_size > PAGE_SIZE) {
ntfs_error("Partition cluster size is not supported yet (it " ntfs_error("Partition cluster size is not supported yet (it "
...@@ -1050,9 +1056,12 @@ struct super_block *ntfs_read_super(struct super_block *sb, void *options, ...@@ -1050,9 +1056,12 @@ struct super_block *ntfs_read_super(struct super_block *sb, void *options,
ntfs_debug(DEBUG_OTHER, "Done to init volume\n"); ntfs_debug(DEBUG_OTHER, "Done to init volume\n");
/* Inform the kernel that a device block is a NTFS cluster. */ /* Inform the kernel that a device block is a NTFS cluster. */
sb->s_blocksize = vol->cluster_size; sb->s_blocksize = vol->cluster_size;
for (i = sb->s_blocksize, sb->s_blocksize_bits = 0; i != 1; i >>= 1) sb->s_blocksize_bits = vol->cluster_size_bits;
sb->s_blocksize_bits++; if (blocksize != vol->cluster_size &&
set_blocksize(sb->s_dev, sb->s_blocksize); set_blocksize(sb->s_dev, sb->s_blocksize) < 0) {
ntfs_error("Cluster size too small for device.\n");
goto ntfs_read_super_unl;
}
ntfs_debug(DEBUG_OTHER, "set_blocksize\n"); ntfs_debug(DEBUG_OTHER, "set_blocksize\n");
/* Allocate an MFT record (MFT record can be smaller than a cluster). */ /* Allocate an MFT record (MFT record can be smaller than a cluster). */
i = vol->cluster_size; i = vol->cluster_size;
......
...@@ -592,18 +592,23 @@ int ntfs_readwrite_attr(ntfs_inode *ino, ntfs_attribute *attr, __s64 offset, ...@@ -592,18 +592,23 @@ int ntfs_readwrite_attr(ntfs_inode *ino, ntfs_attribute *attr, __s64 offset,
* If write extends beyond _allocated_ size, extend attribute, * If write extends beyond _allocated_ size, extend attribute,
* updating attr->allocated and attr->size in the process. (AIA) * updating attr->allocated and attr->size in the process. (AIA)
*/ */
if (offset + l > attr->allocated) { if ((!attr->resident && offset + l > attr->allocated) ||
(attr->resident && offset + l > attr->size)) {
error = ntfs_resize_attr(ino, attr, offset + l); error = ntfs_resize_attr(ino, attr, offset + l);
if (error) if (error)
return error; return error;
} else if (offset + l > attr->size) }
/* If amount of data has increased: update. */ if (!attr->resident) {
attr->size = offset + l; /* Has amount of data increased? */
/* If amount of initialised data has increased: update. */ if (offset + l > attr->size)
if (offset + l > attr->initialized) { attr->size = offset + l;
/* FIXME: Zero-out the section between the old /* Has amount of initialised data increased? */
* initialised length and the write start. (AIA) */ if (offset + l > attr->initialized) {
attr->initialized = offset + l; /* FIXME: Clear the section between the old
* initialised length and the write start.
* (AIA) */
attr->initialized = offset + l;
}
} }
} }
if (attr->resident) { if (attr->resident) {
...@@ -619,10 +624,11 @@ int ntfs_readwrite_attr(ntfs_inode *ino, ntfs_attribute *attr, __s64 offset, ...@@ -619,10 +624,11 @@ int ntfs_readwrite_attr(ntfs_inode *ino, ntfs_attribute *attr, __s64 offset,
if (offset >= attr->initialized) if (offset >= attr->initialized)
return ntfs_read_zero(dest, l); return ntfs_read_zero(dest, l);
if (offset + l > attr->initialized) { if (offset + l > attr->initialized) {
dest->size = chunk = offset + l - attr->initialized; dest->size = chunk = attr->initialized - offset;
error = ntfs_readwrite_attr(ino, attr, offset, dest); error = ntfs_readwrite_attr(ino, attr, offset, dest);
if (error) if (error || (dest->size != chunk && (error = -EIO, 1)))
return error; return error;
dest->size += l - chunk;
return ntfs_read_zero(dest, l - chunk); return ntfs_read_zero(dest, l - chunk);
} }
if (attr->flags & ATTR_IS_COMPRESSED) if (attr->flags & ATTR_IS_COMPRESSED)
...@@ -707,31 +713,25 @@ int ntfs_write_attr(ntfs_inode *ino, int type, char *name, __s64 offset, ...@@ -707,31 +713,25 @@ int ntfs_write_attr(ntfs_inode *ino, int type, char *name, __s64 offset,
return ntfs_readwrite_attr(ino, attr, offset, buf); return ntfs_readwrite_attr(ino, attr, offset, buf);
} }
/* -2 = error, -1 = hole, >= 0 means real disk cluster (lcn). */
int ntfs_vcn_to_lcn(ntfs_inode *ino, int vcn) int ntfs_vcn_to_lcn(ntfs_inode *ino, int vcn)
{ {
int rnum; int rnum;
ntfs_attribute *data; ntfs_attribute *data;
data = ntfs_find_attr(ino, ino->vol->at_data, 0); data = ntfs_find_attr(ino, ino->vol->at_data, 0);
/* It's hard to give an error code. */
if (!data || data->resident || data->flags & (ATTR_IS_COMPRESSED | if (!data || data->resident || data->flags & (ATTR_IS_COMPRESSED |
ATTR_IS_ENCRYPTED)) ATTR_IS_ENCRYPTED))
return -1; return -2;
if (data->size <= (__s64)vcn << ino->vol->cluster_size_bits) if (data->size <= (__s64)vcn << ino->vol->cluster_size_bits)
return -1; return -2;
/*
* For Linux, block number 0 represents a hole. - No problem as we do
* not support bmap in any form whatsoever. The FIBMAP sys call is
* deprecated anyway and NTFS is not a block based file system so
* allowing bmapping is complete and utter garbage IMO. Use mmap once
* we implement it... (AIA)
*/
if (data->initialized <= (__s64)vcn << ino->vol->cluster_size_bits) if (data->initialized <= (__s64)vcn << ino->vol->cluster_size_bits)
return 0; return -1;
for (rnum = 0; rnum < data->d.r.len && for (rnum = 0; rnum < data->d.r.len &&
vcn >= data->d.r.runlist[rnum].len; rnum++) vcn >= data->d.r.runlist[rnum].len; rnum++)
vcn -= data->d.r.runlist[rnum].len; vcn -= data->d.r.runlist[rnum].len;
/* We need to cope with sparse runs. (AIA) */ if (data->d.r.runlist[rnum].lcn >= 0)
return data->d.r.runlist[rnum].lcn + vcn;
return data->d.r.runlist[rnum].lcn + vcn; return data->d.r.runlist[rnum].lcn + vcn;
} }
......
...@@ -227,6 +227,7 @@ ...@@ -227,6 +227,7 @@
#define __NR_madvise1 219 /* delete when C lib stub is removed */ #define __NR_madvise1 219 /* delete when C lib stub is removed */
#define __NR_getdents64 220 #define __NR_getdents64 220
#define __NR_fcntl64 221 #define __NR_fcntl64 221
#define __NR_security 223 /* syscall for security modules */
/* user-visible error numbers are in the range -1 - -124: see <asm-i386/errno.h> */ /* user-visible error numbers are in the range -1 - -124: see <asm-i386/errno.h> */
......
...@@ -1087,6 +1087,11 @@ extern void end_buffer_io_sync(struct buffer_head *bh, int uptodate); ...@@ -1087,6 +1087,11 @@ extern void end_buffer_io_sync(struct buffer_head *bh, int uptodate);
/* reiserfs_writepage needs this */ /* reiserfs_writepage needs this */
extern void set_buffer_async_io(struct buffer_head *bh) ; extern void set_buffer_async_io(struct buffer_head *bh) ;
#define BUF_CLEAN 0
#define BUF_LOCKED 1 /* Buffers scheduled for write */
#define BUF_DIRTY 2 /* Dirty buffers, not yet scheduled for write */
#define NR_LIST 3
static inline void get_bh(struct buffer_head * bh) static inline void get_bh(struct buffer_head * bh)
{ {
atomic_inc(&(bh)->b_count); atomic_inc(&(bh)->b_count);
......
...@@ -67,8 +67,8 @@ int vm_enough_memory(long pages) ...@@ -67,8 +67,8 @@ int vm_enough_memory(long pages)
if (sysctl_overcommit_memory) if (sysctl_overcommit_memory)
return 1; return 1;
free = atomic_read(&buffermem_pages); /* The page cache contains buffer pages these days.. */
free += atomic_read(&page_cache_size); free = atomic_read(&page_cache_size);
free += nr_free_pages(); free += nr_free_pages();
free += nr_swap_pages; free += nr_swap_pages;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment