Commit 0f9ee687 authored by Anton Altaparmakov's avatar Anton Altaparmakov

Merge cantab.net:/home/aia21/bklinux-2.5

into cantab.net:/home/aia21/ntfs-2.5
parents d1f880ab d73e26f5
...@@ -247,6 +247,16 @@ ChangeLog ...@@ -247,6 +247,16 @@ ChangeLog
Note, a technical ChangeLog aimed at kernel hackers is in fs/ntfs/ChangeLog. Note, a technical ChangeLog aimed at kernel hackers is in fs/ntfs/ChangeLog.
2.1.3:
- Major bug fixes for reading files and volumes in corner cases which
were being hit by Windows 2k/XP users.
2.1.2:
- Major bug fixes aleviating the hangs in statfs experienced by some
users.
2.1.1:
- Update handling of compressed files so people no longer get the
frequently reported warning messages about initialized_size !=
data_size.
2.1.0: 2.1.0:
- Add configuration option for developmental write support. - Add configuration option for developmental write support.
- Initial implementation of file overwriting. (Writes to resident files - Initial implementation of file overwriting. (Writes to resident files
......
...@@ -20,6 +20,25 @@ ToDo: ...@@ -20,6 +20,25 @@ ToDo:
sufficient for synchronisation here. We then just need to make sure sufficient for synchronisation here. We then just need to make sure
ntfs_readpage/writepage/truncate interoperate properly with us. ntfs_readpage/writepage/truncate interoperate properly with us.
2.1.3 - Important bug fixes in corner cases.
- super.c::parse_ntfs_boot_sector(): Correct the check for 64-bit
clusters. (Philipp Thomas)
- attrib.c::load_attribute_list(): Fix bug when initialized_size is a
multiple of the block_size but not the cluster size. (Szabolcs
Szakacsits <szaka@sienet.hu>)
2.1.2 - Important bug fixes aleviating the hangs in statfs.
- Fix buggy free cluster and free inode determination logic.
2.1.1 - Minor updates.
- Add handling for initialized_size != data_size in compressed files.
- Reduce function local stack usage from 0x3d4 bytes to just noise in
fs/ntfs/upcase.c. (Randy Dunlap <rddunlap@osdl.ord>)
- Remove compiler warnings for newer gcc.
2.1.0 - First steps towards write support: implement file overwrite. 2.1.0 - First steps towards write support: implement file overwrite.
- Add configuration option for developmental write support with an - Add configuration option for developmental write support with an
......
...@@ -5,7 +5,7 @@ obj-$(CONFIG_NTFS_FS) += ntfs.o ...@@ -5,7 +5,7 @@ obj-$(CONFIG_NTFS_FS) += ntfs.o
ntfs-objs := aops.o attrib.o compress.o debug.o dir.o file.o inode.o mft.o \ ntfs-objs := aops.o attrib.o compress.o debug.o dir.o file.o inode.o mft.o \
mst.o namei.o super.o sysctl.o time.o unistr.o upcase.o mst.o namei.o super.o sysctl.o time.o unistr.o upcase.o
EXTRA_CFLAGS = -DNTFS_VERSION=\"2.1.0\" EXTRA_CFLAGS = -DNTFS_VERSION=\"2.1.3\"
ifeq ($(CONFIG_NTFS_DEBUG),y) ifeq ($(CONFIG_NTFS_DEBUG),y)
EXTRA_CFLAGS += -DDEBUG EXTRA_CFLAGS += -DDEBUG
......
/** /**
* attrib.c - NTFS attribute operations. Part of the Linux-NTFS project. * attrib.c - NTFS attribute operations. Part of the Linux-NTFS project.
* *
* Copyright (c) 2001,2002 Anton Altaparmakov. * Copyright (c) 2001-2003 Anton Altaparmakov
* Copyright (C) 2002 Richard Russon. * Copyright (c) 2002 Richard Russon
* *
* This program/include file is free software; you can redistribute it and/or * This program/include file is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published * modify it under the terms of the GNU General Public License as published
...@@ -1180,12 +1180,15 @@ BOOL find_attr(const ATTR_TYPES type, const uchar_t *name, const u32 name_len, ...@@ -1180,12 +1180,15 @@ BOOL find_attr(const ATTR_TYPES type, const uchar_t *name, const u32 name_len,
return TRUE; return TRUE;
/* @val is present; compare values. */ /* @val is present; compare values. */
else { else {
u32 vl;
register int rc; register int rc;
vl = le32_to_cpu(a->_ARA(value_length));
if (vl > val_len)
vl = val_len;
rc = memcmp(val, (u8*)a + le16_to_cpu( rc = memcmp(val, (u8*)a + le16_to_cpu(
a->_ARA(value_offset)), a->_ARA(value_offset)), vl);
min_t(const u32, val_len,
le32_to_cpu(a->_ARA(value_length))));
/* /*
* If @val collates before the current attribute's * If @val collates before the current attribute's
* value, there is no matching attribute. * value, there is no matching attribute.
...@@ -1235,11 +1238,9 @@ int load_attribute_list(ntfs_volume *vol, run_list *run_list, u8 *al, ...@@ -1235,11 +1238,9 @@ int load_attribute_list(ntfs_volume *vol, run_list *run_list, u8 *al,
unsigned char block_size_bits = sb->s_blocksize_bits; unsigned char block_size_bits = sb->s_blocksize_bits;
ntfs_debug("Entering."); ntfs_debug("Entering.");
#ifdef DEBUG
if (!vol || !run_list || !al || size <= 0 || initialized_size < 0 || if (!vol || !run_list || !al || size <= 0 || initialized_size < 0 ||
initialized_size > size) initialized_size > size)
return -EINVAL; return -EINVAL;
#endif
if (!initialized_size) { if (!initialized_size) {
memset(al, 0, size); memset(al, 0, size);
return 0; return 0;
...@@ -1270,8 +1271,8 @@ int load_attribute_list(ntfs_volume *vol, run_list *run_list, u8 *al, ...@@ -1270,8 +1271,8 @@ int load_attribute_list(ntfs_volume *vol, run_list *run_list, u8 *al,
"read attribute list."); "read attribute list.");
goto err_out; goto err_out;
} }
if (al + block_size > al_end) if (al + block_size >= al_end)
goto do_partial; goto do_final;
memcpy(al, bh->b_data, block_size); memcpy(al, bh->b_data, block_size);
brelse(bh); brelse(bh);
al += block_size; al += block_size;
...@@ -1285,7 +1286,7 @@ int load_attribute_list(ntfs_volume *vol, run_list *run_list, u8 *al, ...@@ -1285,7 +1286,7 @@ int load_attribute_list(ntfs_volume *vol, run_list *run_list, u8 *al,
done: done:
up_read(&run_list->lock); up_read(&run_list->lock);
return err; return err;
do_partial: do_final:
if (al < al_end) { if (al < al_end) {
/* Partial block. */ /* Partial block. */
memcpy(al, bh->b_data, al_end - al); memcpy(al, bh->b_data, al_end - al);
......
...@@ -2,8 +2,8 @@ ...@@ -2,8 +2,8 @@
* compress.c - NTFS kernel compressed attributes handling. * compress.c - NTFS kernel compressed attributes handling.
* Part of the Linux-NTFS project. * Part of the Linux-NTFS project.
* *
* Copyright (c) 2001,2002 Anton Altaparmakov. * Copyright (c) 2001-2003 Anton Altaparmakov
* Copyright (C) 2002 Richard Russon. * Copyright (c) 2002 Richard Russon
* *
* This program/include file is free software; you can redistribute it and/or * This program/include file is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published * modify it under the terms of the GNU General Public License as published
...@@ -44,7 +44,7 @@ typedef enum { ...@@ -44,7 +44,7 @@ typedef enum {
* The maximum compression block size is by definition 16 * the cluster * The maximum compression block size is by definition 16 * the cluster
* size, with the maximum supported cluster size being 4kiB. Thus the * size, with the maximum supported cluster size being 4kiB. Thus the
* maximum compression buffer size is 64kiB, so we use this when * maximum compression buffer size is 64kiB, so we use this when
* initializing the per-CPU buffers. * initializing the compression buffer.
*/ */
NTFS_MAX_CB_SIZE = 64 * 1024, NTFS_MAX_CB_SIZE = 64 * 1024,
} ntfs_compression_constants; } ntfs_compression_constants;
...@@ -88,6 +88,40 @@ void free_compression_buffers(void) ...@@ -88,6 +88,40 @@ void free_compression_buffers(void)
ntfs_compression_buffer = NULL; ntfs_compression_buffer = NULL;
} }
/**
* zero_partial_compressed_page - zero out of bounds compressed page region
*/
static void zero_partial_compressed_page(ntfs_inode *ni, struct page *page)
{
u8 *kp = page_address(page);
unsigned int kp_ofs;
ntfs_debug("Zeroing page region outside initialized size.");
if (((s64)page->index << PAGE_CACHE_SHIFT) >= ni->initialized_size) {
/*
* FIXME: Using clear_page() will become wrong when we get
* PAGE_CACHE_SIZE != PAGE_SIZE but for now there is no problem.
*/
clear_page(kp);
return;
}
kp_ofs = ni->initialized_size & ~PAGE_CACHE_MASK;
memset(kp + kp_ofs, 0, PAGE_CACHE_SIZE - kp_ofs);
return;
}
/**
* handle_bounds_compressed_page - test for&handle out of bounds compressed page
*/
static inline void handle_bounds_compressed_page(ntfs_inode *ni,
struct page *page)
{
if ((page->index >= (ni->initialized_size >> PAGE_CACHE_SHIFT)) &&
(ni->initialized_size < VFS_I(ni)->i_size))
zero_partial_compressed_page(ni, page);
return;
}
/** /**
* ntfs_decompress - decompress a compression block into an array of pages * ntfs_decompress - decompress a compression block into an array of pages
* @dest_pages: destination array of pages * @dest_pages: destination array of pages
...@@ -164,7 +198,7 @@ static int ntfs_decompress(struct page *dest_pages[], int *dest_index, ...@@ -164,7 +198,7 @@ static int ntfs_decompress(struct page *dest_pages[], int *dest_index,
cb - cb_start); cb - cb_start);
/* Have we reached the end of the compression block? */ /* Have we reached the end of the compression block? */
if (cb == cb_end || !le16_to_cpup(cb)) { if (cb == cb_end || !le16_to_cpup((u16*)cb)) {
int i; int i;
ntfs_debug("Completed. Returning success (0)."); ntfs_debug("Completed. Returning success (0).");
...@@ -173,19 +207,29 @@ static int ntfs_decompress(struct page *dest_pages[], int *dest_index, ...@@ -173,19 +207,29 @@ static int ntfs_decompress(struct page *dest_pages[], int *dest_index,
/* We can sleep from now on, so we drop lock. */ /* We can sleep from now on, so we drop lock. */
spin_unlock(&ntfs_cb_lock); spin_unlock(&ntfs_cb_lock);
/* Second stage: finalize completed pages. */ /* Second stage: finalize completed pages. */
for (i = 0; i < nr_completed_pages; i++) { if (nr_completed_pages > 0) {
int di = completed_pages[i]; struct page *page = dest_pages[completed_pages[0]];
ntfs_inode *ni = NTFS_I(page->mapping->host);
dp = dest_pages[di];
flush_dcache_page(dp); for (i = 0; i < nr_completed_pages; i++) {
kunmap(dp); int di = completed_pages[i];
SetPageUptodate(dp);
unlock_page(dp); dp = dest_pages[di];
if (di == xpage) /*
*xpage_done = 1; * If we are outside the initialized size, zero
else * the out of bounds page range.
page_cache_release(dp); */
dest_pages[di] = NULL; handle_bounds_compressed_page(ni, dp);
flush_dcache_page(dp);
kunmap(dp);
SetPageUptodate(dp);
unlock_page(dp);
if (di == xpage)
*xpage_done = 1;
else
page_cache_release(dp);
dest_pages[di] = NULL;
}
} }
return err; return err;
} }
...@@ -204,7 +248,8 @@ static int ntfs_decompress(struct page *dest_pages[], int *dest_index, ...@@ -204,7 +248,8 @@ static int ntfs_decompress(struct page *dest_pages[], int *dest_index,
/* Setup the current sub-block source pointers and validate range. */ /* Setup the current sub-block source pointers and validate range. */
cb_sb_start = cb; cb_sb_start = cb;
cb_sb_end = cb_sb_start + (le16_to_cpup(cb) & NTFS_SB_SIZE_MASK) + 3; cb_sb_end = cb_sb_start + (le16_to_cpup((u16*)cb) & NTFS_SB_SIZE_MASK)
+ 3;
if (cb_sb_end > cb_end) if (cb_sb_end > cb_end)
goto return_overflow; goto return_overflow;
...@@ -225,7 +270,7 @@ static int ntfs_decompress(struct page *dest_pages[], int *dest_index, ...@@ -225,7 +270,7 @@ static int ntfs_decompress(struct page *dest_pages[], int *dest_index,
dp_addr = (u8*)page_address(dp) + do_sb_start; dp_addr = (u8*)page_address(dp) + do_sb_start;
/* Now, we are ready to process the current sub-block (sb). */ /* Now, we are ready to process the current sub-block (sb). */
if (!(le16_to_cpup(cb) & NTFS_SB_IS_COMPRESSED)) { if (!(le16_to_cpup((u16*)cb) & NTFS_SB_IS_COMPRESSED)) {
ntfs_debug("Found uncompressed sub-block."); ntfs_debug("Found uncompressed sub-block.");
/* This sb is not compressed, just copy it into destination. */ /* This sb is not compressed, just copy it into destination. */
...@@ -330,7 +375,7 @@ static int ntfs_decompress(struct page *dest_pages[], int *dest_index, ...@@ -330,7 +375,7 @@ static int ntfs_decompress(struct page *dest_pages[], int *dest_index,
lg++; lg++;
/* Get the phrase token into i. */ /* Get the phrase token into i. */
pt = le16_to_cpup(cb); pt = le16_to_cpup((u16*)cb);
/* /*
* Calculate starting position of the byte sequence in * Calculate starting position of the byte sequence in
...@@ -763,6 +808,11 @@ int ntfs_read_compressed_block(struct page *page) ...@@ -763,6 +808,11 @@ int ntfs_read_compressed_block(struct page *page)
for (; cur2_page < cb_max_page; cur2_page++) { for (; cur2_page < cb_max_page; cur2_page++) {
page = pages[cur2_page]; page = pages[cur2_page];
if (page) { if (page) {
/*
* If we are outside the initialized size, zero
* the out of bounds page range.
*/
handle_bounds_compressed_page(ni, page);
flush_dcache_page(page); flush_dcache_page(page);
kunmap(page); kunmap(page);
SetPageUptodate(page); SetPageUptodate(page);
......
/** /**
* inode.c - NTFS kernel inode handling. Part of the Linux-NTFS project. * inode.c - NTFS kernel inode handling. Part of the Linux-NTFS project.
* *
* Copyright (c) 2001,2002 Anton Altaparmakov. * Copyright (c) 2001-2003 Anton Altaparmakov
* *
* This program/include file is free software; you can redistribute it and/or * This program/include file is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published * modify it under the terms of the GNU General Public License as published
...@@ -996,16 +996,6 @@ static int ntfs_read_locked_inode(struct inode *vi) ...@@ -996,16 +996,6 @@ static int ntfs_read_locked_inode(struct inode *vi)
if (NInoCompressed(ni)) { if (NInoCompressed(ni)) {
ni->_ICF(compressed_size) = sle64_to_cpu( ni->_ICF(compressed_size) = sle64_to_cpu(
ctx->attr->_ANR(compressed_size)); ctx->attr->_ANR(compressed_size));
if (vi->i_size != ni->initialized_size)
ntfs_warning(vi->i_sb, "BUG: Found "
"compressed file with "
"data_size not equal to "
"initialized_size. This will "
"probably cause problems when "
"trying to access the file. "
"Please notify linux-ntfs-dev@"
"lists.sf.net that you saw "
"this message. Thanks!");
} }
} else { /* Resident attribute. */ } else { /* Resident attribute. */
/* /*
......
/* /*
* super.c - NTFS kernel super block handling. Part of the Linux-NTFS project. * super.c - NTFS kernel super block handling. Part of the Linux-NTFS project.
* *
* Copyright (c) 2001,2002 Anton Altaparmakov. * Copyright (c) 2001-2003 Anton Altaparmakov
* Copyright (c) 2001,2002 Richard Russon. * Copyright (c) 2001,2002 Richard Russon
* *
* This program/include file is free software; you can redistribute it and/or * This program/include file is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published * modify it under the terms of the GNU General Public License as published
...@@ -619,9 +619,8 @@ static BOOL parse_ntfs_boot_sector(ntfs_volume *vol, const NTFS_BOOT_SECTOR *b) ...@@ -619,9 +619,8 @@ static BOOL parse_ntfs_boot_sector(ntfs_volume *vol, const NTFS_BOOT_SECTOR *b)
* the same as it is much faster on 32-bit CPUs. * the same as it is much faster on 32-bit CPUs.
*/ */
ll = sle64_to_cpu(b->number_of_sectors) >> sectors_per_cluster_bits; ll = sle64_to_cpu(b->number_of_sectors) >> sectors_per_cluster_bits;
if ((u64)ll >= 1ULL << (sizeof(unsigned long) * 8)) { if ((u64)ll >= 1ULL << 32) {
ntfs_error(vol->sb, "Cannot handle %i-bit clusters. Sorry.", ntfs_error(vol->sb, "Cannot handle 64-bit clusters. Sorry.");
sizeof(unsigned long) * 4);
return FALSE; return FALSE;
} }
vol->nr_clusters = ll; vol->nr_clusters = ll;
...@@ -1060,78 +1059,93 @@ static void ntfs_put_super(struct super_block *vfs_sb) ...@@ -1060,78 +1059,93 @@ static void ntfs_put_super(struct super_block *vfs_sb)
* get_nr_free_clusters - return the number of free clusters on a volume * get_nr_free_clusters - return the number of free clusters on a volume
* @vol: ntfs volume for which to obtain free cluster count * @vol: ntfs volume for which to obtain free cluster count
* *
* Calculate the number of free clusters on the mounted NTFS volume @vol. * Calculate the number of free clusters on the mounted NTFS volume @vol. We
* actually calculate the number of clusters in use instead because this
* allows us to not care about partial pages as these will be just zero filled
* and hence not be counted as allocated clusters.
* *
* Errors are ignored and we just return the number of free clusters we have * The only particularity is that clusters beyond the end of the logical ntfs
* found. This means we return an underestimate on error. * volume will be marked as allocated to prevent errors which means we have to
* discount those at the end. This is important as the cluster bitmap always
* has a size in multiples of 8 bytes, i.e. up to 63 clusters could be outside
* the logical volume and marked in use when they are not as they do not exist.
*
* If any pages cannot be read we assume all clusters in the erroring pages are
* in use. This means we return an underestimate on errors which is better than
* an overestimate.
*/ */
static s64 get_nr_free_clusters(ntfs_volume *vol) static s64 get_nr_free_clusters(ntfs_volume *vol)
{ {
s64 nr_free = vol->nr_clusters;
u32 *kaddr;
struct address_space *mapping = vol->lcnbmp_ino->i_mapping; struct address_space *mapping = vol->lcnbmp_ino->i_mapping;
filler_t *readpage = (filler_t*)mapping->a_ops->readpage; filler_t *readpage = (filler_t*)mapping->a_ops->readpage;
struct page *page; struct page *page;
unsigned long index, max_index; unsigned long index, max_index;
unsigned int max_size, i; unsigned int max_size;
s64 nr_free = 0LL;
u32 *b;
ntfs_debug("Entering."); ntfs_debug("Entering.");
/* Serialize accesses to the cluster bitmap. */ /* Serialize accesses to the cluster bitmap. */
down_read(&vol->lcnbmp_lock); down_read(&vol->lcnbmp_lock);
/* /*
* Convert the number of bits into bytes rounded up, then convert into * Convert the number of bits into bytes rounded up, then convert into
* multiples of PAGE_CACHE_SIZE. * multiples of PAGE_CACHE_SIZE, rounding up so that if we have one
* full and one partial page max_index = 2.
*/ */
max_index = (vol->nr_clusters + 7) >> (3 + PAGE_CACHE_SHIFT); max_index = (((vol->nr_clusters + 7) >> 3) + PAGE_CACHE_SIZE - 1) >>
PAGE_CACHE_SHIFT;
/* Use multiples of 4 bytes. */ /* Use multiples of 4 bytes. */
max_size = PAGE_CACHE_SIZE >> 2; max_size = PAGE_CACHE_SIZE >> 2;
ntfs_debug("Reading $BITMAP, max_index = 0x%lx, max_size = 0x%x.", ntfs_debug("Reading $Bitmap, max_index = 0x%lx, max_size = 0x%x.",
max_index, max_size); max_index, max_size);
for (index = 0UL; index < max_index;) { for (index = 0UL; index < max_index; index++) {
handle_partial_page: unsigned int i;
/* /*
* Read the page from page cache, getting it from backing store * Read the page from page cache, getting it from backing store
* if necessary, and increment the use count. * if necessary, and increment the use count.
*/ */
page = read_cache_page(mapping, index++, (filler_t*)readpage, page = read_cache_page(mapping, index, (filler_t*)readpage,
NULL); NULL);
/* Ignore pages which errored synchronously. */ /* Ignore pages which errored synchronously. */
if (IS_ERR(page)) { if (IS_ERR(page)) {
ntfs_debug("Sync read_cache_page() error. Skipping " ntfs_debug("Sync read_cache_page() error. Skipping "
"page (index 0x%lx).", index - 1); "page (index 0x%lx).", index);
nr_free -= PAGE_CACHE_SIZE * 8;
continue; continue;
} }
wait_on_page_locked(page); wait_on_page_locked(page);
/* Ignore pages which errored asynchronously. */
if (!PageUptodate(page)) { if (!PageUptodate(page)) {
ntfs_debug("Async read_cache_page() error. Skipping " ntfs_debug("Async read_cache_page() error. Skipping "
"page (index 0x%lx).", index - 1); "page (index 0x%lx).", index);
/* Ignore pages which errored asynchronously. */
page_cache_release(page); page_cache_release(page);
nr_free -= PAGE_CACHE_SIZE * 8;
continue; continue;
} }
b = (u32*)kmap(page); kaddr = (u32*)kmap_atomic(page, KM_USER0);
/* For each 4 bytes, add up the number zero bits. */
for (i = 0; i < max_size; i++)
nr_free += (s64)(32 - hweight32(b[i]));
kunmap(page);
page_cache_release(page);
}
if (max_size == PAGE_CACHE_SIZE >> 2) {
/* /*
* Get the multiples of 4 bytes in use in the final partial * For each 4 bytes, subtract the number of set bits. If this
* page. * is the last page and it is partial we don't really care as
* it just means we do a little extra work but it won't affect
* the result as all out of range bytes are set to zero by
* ntfs_readpage().
*/ */
max_size = ((((vol->nr_clusters + 7) >> 3) & ~PAGE_CACHE_MASK) for (i = 0; i < max_size; i++)
+ 3) >> 2; nr_free -= (s64)hweight32(kaddr[i]);
/* If there is a partial page go back and do it. */ kunmap_atomic(kaddr, KM_USER0);
if (max_size) { page_cache_release(page);
ntfs_debug("Handling partial page, max_size = 0x%x.",
max_size);
goto handle_partial_page;
}
} }
ntfs_debug("Finished reading $BITMAP, last index = 0x%lx", index - 1); ntfs_debug("Finished reading $Bitmap, last index = 0x%lx.", index - 1);
/*
* Fixup for eventual bits outside logical ntfs volume (see function
* description above).
*/
if (vol->nr_clusters & 63)
nr_free += 64 - (vol->nr_clusters & 63);
up_read(&vol->lcnbmp_lock); up_read(&vol->lcnbmp_lock);
/* If errors occured we may well have gone below zero, fix this. */
if (nr_free < 0)
nr_free = 0;
ntfs_debug("Exiting."); ntfs_debug("Exiting.");
return nr_free; return nr_free;
} }
...@@ -1141,64 +1155,81 @@ static s64 get_nr_free_clusters(ntfs_volume *vol) ...@@ -1141,64 +1155,81 @@ static s64 get_nr_free_clusters(ntfs_volume *vol)
* @vol: ntfs volume for which to obtain free inode count * @vol: ntfs volume for which to obtain free inode count
* *
* Calculate the number of free mft records (inodes) on the mounted NTFS * Calculate the number of free mft records (inodes) on the mounted NTFS
* volume @vol. * volume @vol. We actually calculate the number of mft records in use instead
* because this allows us to not care about partial pages as these will be just
* zero filled and hence not be counted as allocated mft record.
* *
* Errors are ignored and we just return the number of free inodes we have * If any pages cannot be read we assume all mft records in the erroring pages
* found. This means we return an underestimate on error. * are in use. This means we return an underestimate on errors which is better
* than an overestimate.
* *
* NOTE: Caller must hold mftbmp_lock rw_semaphore for reading or writing. * NOTE: Caller must hold mftbmp_lock rw_semaphore for reading or writing.
*/ */
static unsigned long __get_nr_free_mft_records(ntfs_volume *vol) static unsigned long __get_nr_free_mft_records(ntfs_volume *vol)
{ {
struct address_space *mapping; s64 nr_free = vol->nr_mft_records;
u32 *kaddr;
struct address_space *mapping = vol->mftbmp_ino->i_mapping;
filler_t *readpage = (filler_t*)mapping->a_ops->readpage;
struct page *page; struct page *page;
unsigned long index, max_index, nr_free = 0; unsigned long index, max_index;
unsigned int max_size, i; unsigned int max_size;
u32 *b;
mapping = vol->mftbmp_ino->i_mapping; ntfs_debug("Entering.");
/* /*
* Convert the number of bits into bytes rounded up to a multiple of 8 * Convert the number of bits into bytes rounded up, then convert into
* bytes, then convert into multiples of PAGE_CACHE_SIZE. * multiples of PAGE_CACHE_SIZE, rounding up so that if we have one
* full and one partial page max_index = 2.
*/ */
max_index = (((vol->nr_mft_records + 7) >> 3) + 7) >> PAGE_CACHE_SHIFT; max_index = (((vol->nr_mft_records + 7) >> 3) + PAGE_CACHE_SIZE - 1) >>
PAGE_CACHE_SHIFT;
/* Use multiples of 4 bytes. */ /* Use multiples of 4 bytes. */
max_size = PAGE_CACHE_SIZE >> 2; max_size = PAGE_CACHE_SIZE >> 2;
ntfs_debug("Reading $MFT/$BITMAP, max_index = 0x%lx, max_size = " ntfs_debug("Reading $MFT/$BITMAP, max_index = 0x%lx, max_size = "
"0x%x.", max_index, max_size); "0x%x.", max_index, max_size);
for (index = 0UL; index < max_index;) { for (index = 0UL; index < max_index; index++) {
handle_partial_page: unsigned int i;
page = ntfs_map_page(mapping, index++); /*
* Read the page from page cache, getting it from backing store
* if necessary, and increment the use count.
*/
page = read_cache_page(mapping, index, (filler_t*)readpage,
NULL);
/* Ignore pages which errored synchronously. */
if (IS_ERR(page)) { if (IS_ERR(page)) {
ntfs_debug("ntfs_map_page() error. Skipping page " ntfs_debug("Sync read_cache_page() error. Skipping "
"(index 0x%lx).", index - 1); "page (index 0x%lx).", index);
nr_free -= PAGE_CACHE_SIZE * 8;
continue; continue;
} }
b = (u32*)page_address(page); wait_on_page_locked(page);
/* For each 4 bytes, add up the number of zero bits. */ /* Ignore pages which errored asynchronously. */
for (i = 0; i < max_size; i++) if (!PageUptodate(page)) {
nr_free += 32 - hweight32(b[i]); ntfs_debug("Async read_cache_page() error. Skipping "
ntfs_unmap_page(page); "page (index 0x%lx).", index);
} page_cache_release(page);
if (index == max_index) { nr_free -= PAGE_CACHE_SIZE * 8;
continue;
}
kaddr = (u32*)kmap_atomic(page, KM_USER0);
/* /*
* Get the multiples of 4 bytes in use in the final partial * For each 4 bytes, subtract the number of set bits. If this
* page. * is the last page and it is partial we don't really care as
* it just means we do a little extra work but it won't affect
* the result as all out of range bytes are set to zero by
* ntfs_readpage().
*/ */
max_size = ((((((vol->nr_mft_records + 7) >> 3) + 7) & ~7) & for (i = 0; i < max_size; i++)
~PAGE_CACHE_MASK) + 3) >> 2; nr_free -= (s64)hweight32(kaddr[i]);
/* If there is a partial page go back and do it. */ kunmap_atomic(kaddr, KM_USER0);
if (max_size) { page_cache_release(page);
/* Compensate for out of bounds zero bits. */
if ((i = vol->nr_mft_records & 31))
nr_free -= 32 - i;
ntfs_debug("Handling partial page, max_size = 0x%x",
max_size);
goto handle_partial_page;
}
} }
ntfs_debug("Finished reading $MFT/$BITMAP, last index = 0x%lx", ntfs_debug("Finished reading $MFT/$BITMAP, last index = 0x%lx.",
index - 1); index - 1);
/* If errors occured we may well have gone below zero, fix this. */
if (nr_free < 0)
nr_free = 0;
ntfs_debug("Exiting.");
return nr_free; return nr_free;
} }
...@@ -1761,7 +1792,7 @@ static void __exit exit_ntfs_fs(void) ...@@ -1761,7 +1792,7 @@ static void __exit exit_ntfs_fs(void)
} }
MODULE_AUTHOR("Anton Altaparmakov <aia21@cantab.net>"); MODULE_AUTHOR("Anton Altaparmakov <aia21@cantab.net>");
MODULE_DESCRIPTION("NTFS 1.2/3.x driver - Copyright (c) 2001-2002 Anton Altaparmakov"); MODULE_DESCRIPTION("NTFS 1.2/3.x driver - Copyright (c) 2001-2003 Anton Altaparmakov");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
#ifdef DEBUG #ifdef DEBUG
MODULE_PARM(debug_msgs, "i"); MODULE_PARM(debug_msgs, "i");
......
/* /*
* unistr.c - NTFS Unicode string handling. Part of the Linux-NTFS project. * unistr.c - NTFS Unicode string handling. Part of the Linux-NTFS project.
* *
* Copyright (c) 2001 Anton Altaparmakov. * Copyright (c) 2001-2003 Anton Altaparmakov
* *
* This program/include file is free software; you can redistribute it and/or * This program/include file is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published * modify it under the terms of the GNU General Public License as published
...@@ -96,10 +96,12 @@ int ntfs_collate_names(const uchar_t *name1, const u32 name1_len, ...@@ -96,10 +96,12 @@ int ntfs_collate_names(const uchar_t *name1, const u32 name1_len,
const int err_val, const IGNORE_CASE_BOOL ic, const int err_val, const IGNORE_CASE_BOOL ic,
const uchar_t *upcase, const u32 upcase_len) const uchar_t *upcase, const u32 upcase_len)
{ {
u32 cnt; u32 cnt, min_len;
const u32 min_len = min_t(const u32, name1_len, name2_len);
uchar_t c1, c2; uchar_t c1, c2;
min_len = name1_len;
if (name1_len > name2_len)
min_len = name2_len;
for (cnt = 0; cnt < min_len; ++cnt) { for (cnt = 0; cnt < min_len; ++cnt) {
c1 = le16_to_cpu(*name1++); c1 = le16_to_cpu(*name1++);
c2 = le16_to_cpu(*name2++); c2 = le16_to_cpu(*name2++);
......
...@@ -2,8 +2,8 @@ ...@@ -2,8 +2,8 @@
* upcase.c - Generate the full NTFS Unicode upcase table in little endian. * upcase.c - Generate the full NTFS Unicode upcase table in little endian.
* Part of the Linux-NTFS project. * Part of the Linux-NTFS project.
* *
* Copyright (C) 2001 Richard Russon <ntfs@flatcap.org> * Copyright (c) 2001 Richard Russon <ntfs@flatcap.org>
* Copyright (c) 2001,2002 Anton Altaparmakov * Copyright (c) 2001-2003 Anton Altaparmakov
* *
* Modified for mkntfs inclusion 9 June 2001 by Anton Altaparmakov. * Modified for mkntfs inclusion 9 June 2001 by Anton Altaparmakov.
* Modified for kernel inclusion 10 September 2001 by Anton Altparmakov. * Modified for kernel inclusion 10 September 2001 by Anton Altparmakov.
...@@ -28,7 +28,7 @@ ...@@ -28,7 +28,7 @@
uchar_t *generate_default_upcase(void) uchar_t *generate_default_upcase(void)
{ {
const int uc_run_table[][3] = { /* Start, End, Add */ static const int uc_run_table[][3] = { /* Start, End, Add */
{0x0061, 0x007B, -32}, {0x0451, 0x045D, -80}, {0x1F70, 0x1F72, 74}, {0x0061, 0x007B, -32}, {0x0451, 0x045D, -80}, {0x1F70, 0x1F72, 74},
{0x00E0, 0x00F7, -32}, {0x045E, 0x0460, -80}, {0x1F72, 0x1F76, 86}, {0x00E0, 0x00F7, -32}, {0x045E, 0x0460, -80}, {0x1F72, 0x1F76, 86},
{0x00F8, 0x00FF, -32}, {0x0561, 0x0587, -48}, {0x1F76, 0x1F78, 100}, {0x00F8, 0x00FF, -32}, {0x0561, 0x0587, -48}, {0x1F76, 0x1F78, 100},
...@@ -45,7 +45,7 @@ uchar_t *generate_default_upcase(void) ...@@ -45,7 +45,7 @@ uchar_t *generate_default_upcase(void)
{0} {0}
}; };
const int uc_dup_table[][2] = { /* Start, End */ static const int uc_dup_table[][2] = { /* Start, End */
{0x0100, 0x012F}, {0x01A0, 0x01A6}, {0x03E2, 0x03EF}, {0x04CB, 0x04CC}, {0x0100, 0x012F}, {0x01A0, 0x01A6}, {0x03E2, 0x03EF}, {0x04CB, 0x04CC},
{0x0132, 0x0137}, {0x01B3, 0x01B7}, {0x0460, 0x0481}, {0x04D0, 0x04EB}, {0x0132, 0x0137}, {0x01B3, 0x01B7}, {0x0460, 0x0481}, {0x04D0, 0x04EB},
{0x0139, 0x0149}, {0x01CD, 0x01DD}, {0x0490, 0x04BF}, {0x04EE, 0x04F5}, {0x0139, 0x0149}, {0x01CD, 0x01DD}, {0x0490, 0x04BF}, {0x04EE, 0x04F5},
...@@ -55,7 +55,7 @@ uchar_t *generate_default_upcase(void) ...@@ -55,7 +55,7 @@ uchar_t *generate_default_upcase(void)
{0} {0}
}; };
const int uc_word_table[][2] = { /* Offset, Value */ static const int uc_word_table[][2] = { /* Offset, Value */
{0x00FF, 0x0178}, {0x01AD, 0x01AC}, {0x01F3, 0x01F1}, {0x0269, 0x0196}, {0x00FF, 0x0178}, {0x01AD, 0x01AC}, {0x01F3, 0x01F1}, {0x0269, 0x0196},
{0x0183, 0x0182}, {0x01B0, 0x01AF}, {0x0253, 0x0181}, {0x026F, 0x019C}, {0x0183, 0x0182}, {0x01B0, 0x01AF}, {0x0253, 0x0181}, {0x026F, 0x019C},
{0x0185, 0x0184}, {0x01B9, 0x01B8}, {0x0254, 0x0186}, {0x0272, 0x019D}, {0x0185, 0x0184}, {0x01B9, 0x01B8}, {0x0254, 0x0186}, {0x0272, 0x019D},
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment