Commit 98b88030 authored by Linus Torvalds's avatar Linus Torvalds

v2.4.10.0.4 -> v2.4.10.1

  - Chris Mason: fix ppp race conditions
  - Al Viro: block device cleanups/fixes
  - Anton Altaparmakov: NTFS 1.1.20 update
  - Andrea Arcangeli: VM tweaks
parent a356c406
......@@ -98,6 +98,14 @@ list at sourceforge: linux-ntfs-dev@lists.sourceforge.net
ChangeLog
=========
NTFS 1.1.20:
- Fixed two bugs in ntfs_readwrite_attr(). Thanks to Jan Kara for
spotting the out of bounds one.
- Check return value of set_blocksize() in ntfs_read_super() and make
use of get_hardsect_size() to determine the minimum block size.
- Fix return values of ntfs_vcn_to_lcn(). This should stop
peoples start of partition being overwritten at random.
NTFS 1.1.19:
- Fixed ntfs_getdir_unsorted(), ntfs_readdir() and ntfs_printcb() to
cope with arbitrary cluster sizes. Very important for Win2k+. Also,
......
......@@ -620,6 +620,7 @@ ENTRY(sys_call_table)
.long SYMBOL_NAME(sys_getdents64) /* 220 */
.long SYMBOL_NAME(sys_fcntl64)
.long SYMBOL_NAME(sys_ni_syscall) /* reserved for TUX */
.long SYMBOL_NAME(sys_ni_syscall) /* Reserved for Security */
.rept NR_syscalls-(.-sys_call_table)/4
.long SYMBOL_NAME(sys_ni_syscall)
......
......@@ -579,6 +579,7 @@ int blkdev_get(struct block_device *bdev, mode_t mode, unsigned flags, int kind)
if (!ret) {
bdev->bd_openers++;
bdev->bd_inode->i_size = blkdev_size(rdev);
bdev->bd_inode->i_blkbits = blksize_bits(block_size(rdev));
} else if (!bdev->bd_openers)
bdev->bd_op = NULL;
}
......
......@@ -71,11 +71,6 @@ static unsigned int bh_hash_shift;
static struct buffer_head **hash_table;
static rwlock_t hash_table_lock = RW_LOCK_UNLOCKED;
#define BUF_CLEAN 0
#define BUF_LOCKED 1 /* Buffers scheduled for write */
#define BUF_DIRTY 2 /* Dirty buffers, not yet scheduled for write */
#define NR_LIST 3
static struct buffer_head *lru_list[NR_LIST];
static spinlock_t lru_list_lock = SPIN_LOCK_UNLOCKED;
static int nr_buffers_type[NR_LIST];
......@@ -672,10 +667,6 @@ void invalidate_bdev(struct block_device *bdev, int destroy_dirty_buffers)
if (!atomic_read(&bh->b_count)) {
if (destroy_dirty_buffers || !buffer_dirty(bh)) {
remove_inode_queue(bh);
#if 0
__remove_from_queues(bh);
put_last_free(bh);
#endif
}
} else
printk("invalidate: busy buffer\n");
......@@ -1228,20 +1219,6 @@ static struct buffer_head * get_unused_buffer_head(int async)
}
spin_unlock(&unused_list_lock);
}
#if 0
/*
* (Pending further analysis ...)
* Ordinary (non-async) requests can use a different memory priority
* to free up pages. Any swapping thus generated will use async
* buffer heads.
*/
if(!async &&
(bh = kmem_cache_alloc(bh_cachep, SLAB_KERNEL)) != NULL) {
memset(bh, 0, sizeof(*bh));
init_waitqueue_head(&bh->b_wait);
return bh;
}
#endif
return NULL;
}
......@@ -1395,11 +1372,8 @@ int discard_bh_page(struct page *page, unsigned long offset, int drop_pagecache)
* instead.
*/
if (!offset) {
if (!try_to_free_buffers(page, 0)) {
if (drop_pagecache)
atomic_inc(&buffermem_pages);
if (!try_to_free_buffers(page, 0))
return 0;
}
}
return 1;
......@@ -2228,12 +2202,27 @@ int block_symlink(struct inode *inode, const char *symname, int len)
return err;
}
static inline void link_dev_buffers(struct page * page, struct buffer_head *head)
{
struct buffer_head *bh, *tail;
bh = head;
do {
tail = bh;
bh = bh->b_this_page;
} while (bh);
tail->b_this_page = head;
page->buffers = head;
page_cache_get(page);
}
/*
* Create the page-cache page that contains the requested block
*/
static struct page * grow_dev_page(struct block_device *bdev, unsigned long index, int size)
{
struct page * page;
struct buffer_head *bh;
page = find_or_create_page(bdev->bd_inode->i_mapping, index, GFP_NOFS);
if (IS_ERR(page))
......@@ -2242,22 +2231,18 @@ static struct page * grow_dev_page(struct block_device *bdev, unsigned long inde
if (!PageLocked(page))
BUG();
if (!page->buffers) {
struct buffer_head *bh, *tail;
struct buffer_head *head = create_buffers(page, size, 0);
if (!head)
bh = page->buffers;
if (bh) {
if (bh->b_size == size)
return page;
if (!try_to_free_buffers(page, GFP_NOFS))
goto failed;
bh = head;
do {
tail = bh;
bh = bh->b_this_page;
} while (bh);
tail->b_this_page = head;
page->buffers = head;
page_cache_get(page);
atomic_inc(&buffermem_pages);
}
bh = create_buffers(page, size, 0);
if (!bh)
goto failed;
link_dev_buffers(page, bh);
return page;
failed:
......@@ -2336,6 +2321,9 @@ static int grow_buffers(kdev_t dev, unsigned long block, int size)
hash_page_buffers(page, dev, block, size);
UnlockPage(page);
page_cache_release(page);
/* We hashed up this page, so increment buffermem */
atomic_inc(&buffermem_pages);
return 1;
}
......
......@@ -5,7 +5,7 @@ O_TARGET := ntfs.o
obj-y := fs.o sysctl.o support.o util.o inode.o dir.o super.o attr.o unistr.o
obj-m := $(O_TARGET)
# New version format started 3 February 2001.
EXTRA_CFLAGS = -DNTFS_VERSION=\"1.1.19\" #-DDEBUG
EXTRA_CFLAGS = -DNTFS_VERSION=\"1.1.20\" #-DDEBUG
include $(TOPDIR)/Rules.make
......@@ -27,6 +27,7 @@
#include <linux/locks.h>
#include <linux/init.h>
#include <linux/smp_lock.h>
#include <linux/blkdev.h>
#include <asm/page.h>
#include <linux/nls.h>
#include <linux/ntfs_fs.h>
......@@ -1012,22 +1013,27 @@ struct super_block *ntfs_read_super(struct super_block *sb, void *options,
{
ntfs_volume *vol;
struct buffer_head *bh;
int i, to_read;
int i, to_read, blocksize;
ntfs_debug(DEBUG_OTHER, "ntfs_read_super\n");
vol = NTFS_SB2VOL(sb);
init_ntfs_super_block(vol);
if (!parse_options(vol, (char*)options))
goto ntfs_read_super_vol;
/* Assume a 512 bytes block device for now. */
set_blocksize(sb->s_dev, 512);
blocksize = get_hardsect_size(sb->s_dev);
if (blocksize < 512)
blocksize = 512;
if (set_blocksize(sb->s_dev, blocksize) < 0) {
ntfs_error("Unable to set blocksize %d.\n", blocksize);
goto ntfs_read_super_vol;
}
/* Read the super block (boot block). */
if (!(bh = bread(sb->s_dev, 0, 512))) {
if (!(bh = bread(sb->s_dev, 0, blocksize))) {
ntfs_error("Reading super block failed\n");
goto ntfs_read_super_unl;
}
ntfs_debug(DEBUG_OTHER, "Done reading boot block\n");
/* Check for 'NTFS' magic number */
/* Check for valid 'NTFS' boot sector. */
if (!is_boot_sector_ntfs(bh->b_data)) {
ntfs_debug(DEBUG_OTHER, "Not a NTFS volume\n");
bforget(bh);
......@@ -1040,7 +1046,7 @@ struct super_block *ntfs_read_super(struct super_block *sb, void *options,
goto ntfs_read_super_unl;
}
ntfs_debug(DEBUG_OTHER, "$Mft at cluster 0x%lx\n", vol->mft_lcn);
bforget(bh);
brelse(bh);
NTFS_SB(vol) = sb;
if (vol->cluster_size > PAGE_SIZE) {
ntfs_error("Partition cluster size is not supported yet (it "
......@@ -1050,9 +1056,12 @@ struct super_block *ntfs_read_super(struct super_block *sb, void *options,
ntfs_debug(DEBUG_OTHER, "Done to init volume\n");
/* Inform the kernel that a device block is a NTFS cluster. */
sb->s_blocksize = vol->cluster_size;
for (i = sb->s_blocksize, sb->s_blocksize_bits = 0; i != 1; i >>= 1)
sb->s_blocksize_bits++;
set_blocksize(sb->s_dev, sb->s_blocksize);
sb->s_blocksize_bits = vol->cluster_size_bits;
if (blocksize != vol->cluster_size &&
set_blocksize(sb->s_dev, sb->s_blocksize) < 0) {
ntfs_error("Cluster size too small for device.\n");
goto ntfs_read_super_unl;
}
ntfs_debug(DEBUG_OTHER, "set_blocksize\n");
/* Allocate an MFT record (MFT record can be smaller than a cluster). */
i = vol->cluster_size;
......
......@@ -592,18 +592,23 @@ int ntfs_readwrite_attr(ntfs_inode *ino, ntfs_attribute *attr, __s64 offset,
* If write extends beyond _allocated_ size, extend attribute,
* updating attr->allocated and attr->size in the process. (AIA)
*/
if (offset + l > attr->allocated) {
if ((!attr->resident && offset + l > attr->allocated) ||
(attr->resident && offset + l > attr->size)) {
error = ntfs_resize_attr(ino, attr, offset + l);
if (error)
return error;
} else if (offset + l > attr->size)
/* If amount of data has increased: update. */
attr->size = offset + l;
/* If amount of initialised data has increased: update. */
if (offset + l > attr->initialized) {
/* FIXME: Zero-out the section between the old
* initialised length and the write start. (AIA) */
attr->initialized = offset + l;
}
if (!attr->resident) {
/* Has amount of data increased? */
if (offset + l > attr->size)
attr->size = offset + l;
/* Has amount of initialised data increased? */
if (offset + l > attr->initialized) {
/* FIXME: Clear the section between the old
* initialised length and the write start.
* (AIA) */
attr->initialized = offset + l;
}
}
}
if (attr->resident) {
......@@ -619,10 +624,11 @@ int ntfs_readwrite_attr(ntfs_inode *ino, ntfs_attribute *attr, __s64 offset,
if (offset >= attr->initialized)
return ntfs_read_zero(dest, l);
if (offset + l > attr->initialized) {
dest->size = chunk = offset + l - attr->initialized;
dest->size = chunk = attr->initialized - offset;
error = ntfs_readwrite_attr(ino, attr, offset, dest);
if (error)
if (error || (dest->size != chunk && (error = -EIO, 1)))
return error;
dest->size += l - chunk;
return ntfs_read_zero(dest, l - chunk);
}
if (attr->flags & ATTR_IS_COMPRESSED)
......@@ -707,31 +713,25 @@ int ntfs_write_attr(ntfs_inode *ino, int type, char *name, __s64 offset,
return ntfs_readwrite_attr(ino, attr, offset, buf);
}
/* -2 = error, -1 = hole, >= 0 means real disk cluster (lcn). */
int ntfs_vcn_to_lcn(ntfs_inode *ino, int vcn)
{
int rnum;
ntfs_attribute *data;
data = ntfs_find_attr(ino, ino->vol->at_data, 0);
/* It's hard to give an error code. */
if (!data || data->resident || data->flags & (ATTR_IS_COMPRESSED |
ATTR_IS_ENCRYPTED))
return -1;
return -2;
if (data->size <= (__s64)vcn << ino->vol->cluster_size_bits)
return -1;
/*
* For Linux, block number 0 represents a hole. - No problem as we do
* not support bmap in any form whatsoever. The FIBMAP sys call is
* deprecated anyway and NTFS is not a block based file system so
* allowing bmapping is complete and utter garbage IMO. Use mmap once
* we implement it... (AIA)
*/
return -2;
if (data->initialized <= (__s64)vcn << ino->vol->cluster_size_bits)
return 0;
return -1;
for (rnum = 0; rnum < data->d.r.len &&
vcn >= data->d.r.runlist[rnum].len; rnum++)
vcn >= data->d.r.runlist[rnum].len; rnum++)
vcn -= data->d.r.runlist[rnum].len;
/* We need to cope with sparse runs. (AIA) */
if (data->d.r.runlist[rnum].lcn >= 0)
return data->d.r.runlist[rnum].lcn + vcn;
return data->d.r.runlist[rnum].lcn + vcn;
}
......
......@@ -227,6 +227,7 @@
#define __NR_madvise1 219 /* delete when C lib stub is removed */
#define __NR_getdents64 220
#define __NR_fcntl64 221
#define __NR_security 223 /* syscall for security modules */
/* user-visible error numbers are in the range -1 - -124: see <asm-i386/errno.h> */
......
......@@ -1087,6 +1087,11 @@ extern void end_buffer_io_sync(struct buffer_head *bh, int uptodate);
/* reiserfs_writepage needs this */
extern void set_buffer_async_io(struct buffer_head *bh) ;
#define BUF_CLEAN 0
#define BUF_LOCKED 1 /* Buffers scheduled for write */
#define BUF_DIRTY 2 /* Dirty buffers, not yet scheduled for write */
#define NR_LIST 3
static inline void get_bh(struct buffer_head * bh)
{
atomic_inc(&(bh)->b_count);
......
......@@ -67,8 +67,8 @@ int vm_enough_memory(long pages)
if (sysctl_overcommit_memory)
return 1;
free = atomic_read(&buffermem_pages);
free += atomic_read(&page_cache_size);
/* The page cache contains buffer pages these days.. */
free = atomic_read(&page_cache_size);
free += nr_free_pages();
free += nr_swap_pages;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment