Commit eeb382a1 authored by Anton Altaparmakov's avatar Anton Altaparmakov

NTFS 2.0.3: Small bug fixes, cleanups, and performance improvements.

- Remove some dead code from mft.c.
- Optimize readpage and read_block functions throughout aops.c so that
  only initialized blocks are read. Non-initialized ones have their
  buffer head mapped, zeroed, and set up to date, without scheduling
  any i/o. Thanks to Al Viro for advice on how to avoid the device i/o.
Thanks go to Andrew Morton for spotting the below:
- Fix buglet in allocate_compression_buffers() error code path.
- Call flush_dcache_page() after modifying page cache page contents in
  ntfs_file_readpage().
- Check for existence of page buffers throughout aops.c before calling
  create_empty_buffers(). This happens when an I/O error occurs and the
  read is retried. (It also happens once writing is implemented so that
  needed doing anyway but I had left it for later...)
- Don't BUG_ON() uptodate and/or mapped buffers throughout aops.c in
  readpage and read_block functions. Reasoning same as above (i.e. I/O
  error retries and future write code paths.)
parent 43b706e9
...@@ -168,6 +168,8 @@ ChangeLog ...@@ -168,6 +168,8 @@ ChangeLog
Note that a technical ChangeLog aimed at kernel hackers is in fs/ntfs/ChangeLog. Note that a technical ChangeLog aimed at kernel hackers is in fs/ntfs/ChangeLog.
2.0.3:
- Small bug fixes, cleanups, and performance improvements.
2.0.2: 2.0.2:
- Use default fmask of 0177 so that files are no executable by default. - Use default fmask of 0177 so that files are no executable by default.
If you want owner executable files, just use fmask=0077. If you want owner executable files, just use fmask=0077.
...@@ -177,7 +179,6 @@ Note that a technical ChangeLog aimed at kernel hackers is in fs/ntfs/ChangeLog. ...@@ -177,7 +179,6 @@ Note that a technical ChangeLog aimed at kernel hackers is in fs/ntfs/ChangeLog.
2.0.1: 2.0.1:
- Minor updates, primarily set the executable bit by default on files - Minor updates, primarily set the executable bit by default on files
so they can be executed. so they can be executed.
2.0.0: 2.0.0:
- Started ChangeLog. - Started ChangeLog.
...@@ -27,6 +27,25 @@ ToDo: ...@@ -27,6 +27,25 @@ ToDo:
quite big. Modularising them a bit, e.g. a-la get_block(), will make quite big. Modularising them a bit, e.g. a-la get_block(), will make
them cleaner and make code reuse easier. them cleaner and make code reuse easier.
2.0.3 - Small bug fixes, cleanups, and performance improvements.
- Remove some dead code from mft.c.
- Optimize readpage and read_block functions throughout aops.c so that
only initialized blocks are read. Non-initialized ones have their
buffer head mapped, zeroed, and set up to date, without scheduling
any i/o. Thanks to Al Viro for advice on how to avoid the device i/o.
Thanks go to Andrew Morton for spotting the below:
- Fix buglet in allocate_compression_buffers() error code path.
- Call flush_dcache_page() after modifying page cache page contents in
ntfs_file_readpage().
- Check for existence of page buffers throughout aops.c before calling
create_empty_buffers(). This happens when an I/O error occurs and the
read is retried. (It also happens once writing is implemented so that
needed doing anyway but I had left it for later...)
- Don't BUG_ON() uptodate and/or mapped buffers throughout aops.c in
readpage and read_block functions. Reasoning same as above (i.e. I/O
error retries and future write code paths.)
2.0.2 - Minor updates and cleanups. 2.0.2 - Minor updates and cleanups.
- Cleanup: rename mst.c::__post_read_mst_fixup to post_write_mst_fixup - Cleanup: rename mst.c::__post_read_mst_fixup to post_write_mst_fixup
......
...@@ -7,7 +7,7 @@ obj-y := time.o unistr.o inode.o file.o mft.o super.o debug.o aops.o \ ...@@ -7,7 +7,7 @@ obj-y := time.o unistr.o inode.o file.o mft.o super.o debug.o aops.o \
obj-m := $(O_TARGET) obj-m := $(O_TARGET)
EXTRA_CFLAGS = -DNTFS_VERSION=\"2.0.2\" EXTRA_CFLAGS = -DNTFS_VERSION=\"2.0.3\"
ifeq ($(CONFIG_NTFS_DEBUG),y) ifeq ($(CONFIG_NTFS_DEBUG),y)
EXTRA_CFLAGS += -DDEBUG EXTRA_CFLAGS += -DDEBUG
......
/* /**
* aops.c - NTFS kernel address space operations and page cache handling. * aops.c - NTFS kernel address space operations and page cache handling.
* Part of the Linux-NTFS project. * Part of the Linux-NTFS project.
* *
...@@ -35,7 +35,9 @@ ...@@ -35,7 +35,9 @@
#define page_buffers(page) (page)->buffers #define page_buffers(page) (page)->buffers
#endif #endif
/* /**
* end_buffer_read_file_async -
*
* Async io completion handler for accessing files. Adapted from * Async io completion handler for accessing files. Adapted from
* end_buffer_read_mst_async(). * end_buffer_read_mst_async().
*/ */
...@@ -94,7 +96,11 @@ static void end_buffer_read_file_async(struct buffer_head *bh, int uptodate) ...@@ -94,7 +96,11 @@ static void end_buffer_read_file_async(struct buffer_head *bh, int uptodate)
return; return;
} }
/* NTFS version of block_read_full_page(). Adapted from ntfs_mst_readpage(). */ /**
* ntfs_file_read_block -
*
* NTFS version of block_read_full_page(). Adapted from ntfs_mst_readpage().
*/
static int ntfs_file_read_block(struct page *page) static int ntfs_file_read_block(struct page *page)
{ {
VCN vcn; VCN vcn;
...@@ -102,7 +108,7 @@ static int ntfs_file_read_block(struct page *page) ...@@ -102,7 +108,7 @@ static int ntfs_file_read_block(struct page *page)
ntfs_inode *ni; ntfs_inode *ni;
ntfs_volume *vol; ntfs_volume *vol;
struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE]; struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
sector_t iblock, lblock; sector_t iblock, lblock, zblock;
unsigned int blocksize, blocks, vcn_ofs; unsigned int blocksize, blocks, vcn_ofs;
int i, nr; int i, nr;
unsigned char blocksize_bits; unsigned char blocksize_bits;
...@@ -113,6 +119,7 @@ static int ntfs_file_read_block(struct page *page) ...@@ -113,6 +119,7 @@ static int ntfs_file_read_block(struct page *page)
blocksize_bits = VFS_I(ni)->i_blkbits; blocksize_bits = VFS_I(ni)->i_blkbits;
blocksize = 1 << blocksize_bits; blocksize = 1 << blocksize_bits;
if (!page_has_buffers(page))
create_empty_buffers(page, blocksize); create_empty_buffers(page, blocksize);
bh = head = page_buffers(page); bh = head = page_buffers(page);
if (!bh) if (!bh)
...@@ -121,6 +128,7 @@ static int ntfs_file_read_block(struct page *page) ...@@ -121,6 +128,7 @@ static int ntfs_file_read_block(struct page *page)
blocks = PAGE_CACHE_SIZE >> blocksize_bits; blocks = PAGE_CACHE_SIZE >> blocksize_bits;
iblock = page->index << (PAGE_CACHE_SHIFT - blocksize_bits); iblock = page->index << (PAGE_CACHE_SHIFT - blocksize_bits);
lblock = (ni->allocated_size + blocksize - 1) >> blocksize_bits; lblock = (ni->allocated_size + blocksize - 1) >> blocksize_bits;
zblock = (ni->initialized_size + blocksize - 1) >> blocksize_bits;
#ifdef DEBUG #ifdef DEBUG
if (unlikely(!ni->mft_no)) { if (unlikely(!ni->mft_no)) {
...@@ -133,7 +141,12 @@ static int ntfs_file_read_block(struct page *page) ...@@ -133,7 +141,12 @@ static int ntfs_file_read_block(struct page *page)
/* Loop through all the buffers in the page. */ /* Loop through all the buffers in the page. */
nr = i = 0; nr = i = 0;
do { do {
BUG_ON(buffer_mapped(bh) || buffer_uptodate(bh)); if (unlikely(buffer_uptodate(bh)))
continue;
if (unlikely(buffer_mapped(bh))) {
arr[nr++] = bh;
continue;
}
bh->b_dev = VFS_I(ni)->i_dev; bh->b_dev = VFS_I(ni)->i_dev;
/* Is the block within the allowed limits? */ /* Is the block within the allowed limits? */
if (iblock < lblock) { if (iblock < lblock) {
...@@ -155,9 +168,14 @@ static int ntfs_file_read_block(struct page *page) ...@@ -155,9 +168,14 @@ static int ntfs_file_read_block(struct page *page)
bh->b_blocknr = ((lcn << vol->cluster_size_bits) bh->b_blocknr = ((lcn << vol->cluster_size_bits)
+ vcn_ofs) >> blocksize_bits; + vcn_ofs) >> blocksize_bits;
bh->b_state |= (1UL << BH_Mapped); bh->b_state |= (1UL << BH_Mapped);
/* Only read initialized data blocks. */
if (iblock < zblock) {
arr[nr++] = bh; arr[nr++] = bh;
continue; continue;
} }
/* Fully non-initialized data block, zero it. */
goto handle_zblock;
}
/* It is a hole, need to zero it. */ /* It is a hole, need to zero it. */
if (lcn == LCN_HOLE) if (lcn == LCN_HOLE)
goto handle_hole; goto handle_hole;
...@@ -183,6 +201,7 @@ static int ntfs_file_read_block(struct page *page) ...@@ -183,6 +201,7 @@ static int ntfs_file_read_block(struct page *page)
handle_hole: handle_hole:
bh->b_blocknr = -1UL; bh->b_blocknr = -1UL;
bh->b_state &= ~(1UL << BH_Mapped); bh->b_state &= ~(1UL << BH_Mapped);
handle_zblock:
memset(kmap(page) + i * blocksize, 0, blocksize); memset(kmap(page) + i * blocksize, 0, blocksize);
flush_dcache_page(page); flush_dcache_page(page);
kunmap(page); kunmap(page);
...@@ -301,6 +320,7 @@ static int ntfs_file_readpage(struct file *file, struct page *page) ...@@ -301,6 +320,7 @@ static int ntfs_file_readpage(struct file *file, struct page *page)
bytes); bytes);
} else } else
memset(addr, 0, PAGE_CACHE_SIZE); memset(addr, 0, PAGE_CACHE_SIZE);
flush_dcache_page(page);
kunmap(page); kunmap(page);
SetPageUptodate(page); SetPageUptodate(page);
...@@ -313,7 +333,9 @@ static int ntfs_file_readpage(struct file *file, struct page *page) ...@@ -313,7 +333,9 @@ static int ntfs_file_readpage(struct file *file, struct page *page)
return err; return err;
} }
/* /**
* end_buffer_read_mftbmp_async -
*
* Async io completion handler for accessing mft bitmap. Adapted from * Async io completion handler for accessing mft bitmap. Adapted from
* end_buffer_read_mst_async(). * end_buffer_read_mst_async().
*/ */
...@@ -373,13 +395,17 @@ static void end_buffer_read_mftbmp_async(struct buffer_head *bh, int uptodate) ...@@ -373,13 +395,17 @@ static void end_buffer_read_mftbmp_async(struct buffer_head *bh, int uptodate)
return; return;
} }
/* Readpage for accessing mft bitmap. Adapted from ntfs_mst_readpage(). */ /**
* ntfs_mftbmp_readpage -
*
* Readpage for accessing mft bitmap. Adapted from ntfs_mst_readpage().
*/
static int ntfs_mftbmp_readpage(ntfs_volume *vol, struct page *page) static int ntfs_mftbmp_readpage(ntfs_volume *vol, struct page *page)
{ {
VCN vcn; VCN vcn;
LCN lcn; LCN lcn;
struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE]; struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
sector_t iblock, lblock; sector_t iblock, lblock, zblock;
unsigned int blocksize, blocks, vcn_ofs; unsigned int blocksize, blocks, vcn_ofs;
int nr, i; int nr, i;
unsigned char blocksize_bits; unsigned char blocksize_bits;
...@@ -390,6 +416,7 @@ static int ntfs_mftbmp_readpage(ntfs_volume *vol, struct page *page) ...@@ -390,6 +416,7 @@ static int ntfs_mftbmp_readpage(ntfs_volume *vol, struct page *page)
blocksize = vol->sb->s_blocksize; blocksize = vol->sb->s_blocksize;
blocksize_bits = vol->sb->s_blocksize_bits; blocksize_bits = vol->sb->s_blocksize_bits;
if (!page_has_buffers(page))
create_empty_buffers(page, blocksize); create_empty_buffers(page, blocksize);
bh = head = page_buffers(page); bh = head = page_buffers(page);
if (!bh) if (!bh)
...@@ -398,11 +425,18 @@ static int ntfs_mftbmp_readpage(ntfs_volume *vol, struct page *page) ...@@ -398,11 +425,18 @@ static int ntfs_mftbmp_readpage(ntfs_volume *vol, struct page *page)
blocks = PAGE_CACHE_SIZE >> blocksize_bits; blocks = PAGE_CACHE_SIZE >> blocksize_bits;
iblock = page->index << (PAGE_CACHE_SHIFT - blocksize_bits); iblock = page->index << (PAGE_CACHE_SHIFT - blocksize_bits);
lblock = (vol->mftbmp_allocated_size + blocksize - 1) >> blocksize_bits; lblock = (vol->mftbmp_allocated_size + blocksize - 1) >> blocksize_bits;
zblock = (vol->mftbmp_initialized_size + blocksize - 1) >>
blocksize_bits;
/* Loop through all the buffers in the page. */ /* Loop through all the buffers in the page. */
nr = i = 0; nr = i = 0;
do { do {
BUG_ON(buffer_mapped(bh) || buffer_uptodate(bh)); if (unlikely(buffer_uptodate(bh)))
continue;
if (unlikely(buffer_mapped(bh))) {
arr[nr++] = bh;
continue;
}
bh->b_dev = vol->mft_ino->i_dev; bh->b_dev = vol->mft_ino->i_dev;
/* Is the block within the allowed limits? */ /* Is the block within the allowed limits? */
if (iblock < lblock) { if (iblock < lblock) {
...@@ -421,9 +455,14 @@ static int ntfs_mftbmp_readpage(ntfs_volume *vol, struct page *page) ...@@ -421,9 +455,14 @@ static int ntfs_mftbmp_readpage(ntfs_volume *vol, struct page *page)
bh->b_blocknr = ((lcn << vol->cluster_size_bits) bh->b_blocknr = ((lcn << vol->cluster_size_bits)
+ vcn_ofs) >> blocksize_bits; + vcn_ofs) >> blocksize_bits;
bh->b_state |= (1UL << BH_Mapped); bh->b_state |= (1UL << BH_Mapped);
/* Only read initialized data blocks. */
if (iblock < zblock) {
arr[nr++] = bh; arr[nr++] = bh;
continue; continue;
} }
/* Fully non-initialized data block, zero it. */
goto handle_zblock;
}
if (lcn != LCN_HOLE) { if (lcn != LCN_HOLE) {
/* Hard error, zero out region. */ /* Hard error, zero out region. */
SetPageError(page); SetPageError(page);
...@@ -442,6 +481,7 @@ static int ntfs_mftbmp_readpage(ntfs_volume *vol, struct page *page) ...@@ -442,6 +481,7 @@ static int ntfs_mftbmp_readpage(ntfs_volume *vol, struct page *page)
*/ */
bh->b_blocknr = -1UL; bh->b_blocknr = -1UL;
bh->b_state &= ~(1UL << BH_Mapped); bh->b_state &= ~(1UL << BH_Mapped);
handle_zblock:
memset(kmap(page) + i * blocksize, 0, blocksize); memset(kmap(page) + i * blocksize, 0, blocksize);
flush_dcache_page(page); flush_dcache_page(page);
kunmap(page); kunmap(page);
...@@ -593,15 +633,6 @@ static void end_buffer_read_mst_async(struct buffer_head *bh, int uptodate) ...@@ -593,15 +633,6 @@ static void end_buffer_read_mst_async(struct buffer_head *bh, int uptodate)
* the page before finally marking it uptodate and unlocking it. * the page before finally marking it uptodate and unlocking it.
* *
* Contains an adapted version of fs/buffer.c::block_read_full_page(). * Contains an adapted version of fs/buffer.c::block_read_full_page().
*
* TODO:/FIXME: The current implementation is simple but wasteful as we perform
* actual i/o from disk for all data up to allocated size completely ignoring
* the fact that initialized size, and data size for that matter, may well be
* lower and hence there is no point in reading them in. We can just zero the
* page range, which is what is currently done in our async i/o completion
* handler anyway, once the read from disk completes. However, I am not sure how
* to setup the buffer heads in that case, so for now we do the pointless i/o.
* Any help with this would be appreciated...
*/ */
int ntfs_mst_readpage(struct file *dir, struct page *page) int ntfs_mst_readpage(struct file *dir, struct page *page)
{ {
...@@ -610,7 +641,7 @@ int ntfs_mst_readpage(struct file *dir, struct page *page) ...@@ -610,7 +641,7 @@ int ntfs_mst_readpage(struct file *dir, struct page *page)
ntfs_inode *ni; ntfs_inode *ni;
ntfs_volume *vol; ntfs_volume *vol;
struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE]; struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
sector_t iblock, lblock; sector_t iblock, lblock, zblock;
unsigned int blocksize, blocks, vcn_ofs; unsigned int blocksize, blocks, vcn_ofs;
int i, nr; int i, nr;
unsigned char blocksize_bits; unsigned char blocksize_bits;
...@@ -624,6 +655,7 @@ int ntfs_mst_readpage(struct file *dir, struct page *page) ...@@ -624,6 +655,7 @@ int ntfs_mst_readpage(struct file *dir, struct page *page)
blocksize_bits = VFS_I(ni)->i_blkbits; blocksize_bits = VFS_I(ni)->i_blkbits;
blocksize = 1 << blocksize_bits; blocksize = 1 << blocksize_bits;
if (!page_has_buffers(page))
create_empty_buffers(page, blocksize); create_empty_buffers(page, blocksize);
bh = head = page_buffers(page); bh = head = page_buffers(page);
if (!bh) if (!bh)
...@@ -632,6 +664,7 @@ int ntfs_mst_readpage(struct file *dir, struct page *page) ...@@ -632,6 +664,7 @@ int ntfs_mst_readpage(struct file *dir, struct page *page)
blocks = PAGE_CACHE_SIZE >> blocksize_bits; blocks = PAGE_CACHE_SIZE >> blocksize_bits;
iblock = page->index << (PAGE_CACHE_SHIFT - blocksize_bits); iblock = page->index << (PAGE_CACHE_SHIFT - blocksize_bits);
lblock = (ni->allocated_size + blocksize - 1) >> blocksize_bits; lblock = (ni->allocated_size + blocksize - 1) >> blocksize_bits;
zblock = (ni->initialized_size + blocksize - 1) >> blocksize_bits;
#ifdef DEBUG #ifdef DEBUG
if (unlikely(!ni->run_list.rl && !ni->mft_no)) if (unlikely(!ni->run_list.rl && !ni->mft_no))
...@@ -642,7 +675,12 @@ int ntfs_mst_readpage(struct file *dir, struct page *page) ...@@ -642,7 +675,12 @@ int ntfs_mst_readpage(struct file *dir, struct page *page)
/* Loop through all the buffers in the page. */ /* Loop through all the buffers in the page. */
nr = i = 0; nr = i = 0;
do { do {
BUG_ON(buffer_mapped(bh) || buffer_uptodate(bh)); if (unlikely(buffer_uptodate(bh)))
continue;
if (unlikely(buffer_mapped(bh))) {
arr[nr++] = bh;
continue;
}
bh->b_dev = VFS_I(ni)->i_dev; bh->b_dev = VFS_I(ni)->i_dev;
/* Is the block within the allowed limits? */ /* Is the block within the allowed limits? */
if (iblock < lblock) { if (iblock < lblock) {
...@@ -664,9 +702,14 @@ int ntfs_mst_readpage(struct file *dir, struct page *page) ...@@ -664,9 +702,14 @@ int ntfs_mst_readpage(struct file *dir, struct page *page)
bh->b_blocknr = ((lcn << vol->cluster_size_bits) bh->b_blocknr = ((lcn << vol->cluster_size_bits)
+ vcn_ofs) >> blocksize_bits; + vcn_ofs) >> blocksize_bits;
bh->b_state |= (1UL << BH_Mapped); bh->b_state |= (1UL << BH_Mapped);
/* Only read initialized data blocks. */
if (iblock < zblock) {
arr[nr++] = bh; arr[nr++] = bh;
continue; continue;
} }
/* Fully non-initialized data block, zero it. */
goto handle_zblock;
}
/* It is a hole, need to zero it. */ /* It is a hole, need to zero it. */
if (lcn == LCN_HOLE) if (lcn == LCN_HOLE)
goto handle_hole; goto handle_hole;
...@@ -692,6 +735,7 @@ int ntfs_mst_readpage(struct file *dir, struct page *page) ...@@ -692,6 +735,7 @@ int ntfs_mst_readpage(struct file *dir, struct page *page)
handle_hole: handle_hole:
bh->b_blocknr = -1UL; bh->b_blocknr = -1UL;
bh->b_state &= ~(1UL << BH_Mapped); bh->b_state &= ~(1UL << BH_Mapped);
handle_zblock:
memset(kmap(page) + i * blocksize, 0, blocksize); memset(kmap(page) + i * blocksize, 0, blocksize);
flush_dcache_page(page); flush_dcache_page(page);
kunmap(page); kunmap(page);
...@@ -721,7 +765,9 @@ int ntfs_mst_readpage(struct file *dir, struct page *page) ...@@ -721,7 +765,9 @@ int ntfs_mst_readpage(struct file *dir, struct page *page)
return nr; return nr;
} }
/* Address space operations for accessing normal file data. */ /**
* ntfs_file_aops - address space operations for accessing normal file data
*/
struct address_space_operations ntfs_file_aops = { struct address_space_operations ntfs_file_aops = {
writepage: NULL, /* Write dirty page to disk. */ writepage: NULL, /* Write dirty page to disk. */
readpage: ntfs_file_readpage, /* Fill page with data. */ readpage: ntfs_file_readpage, /* Fill page with data. */
...@@ -733,7 +779,9 @@ struct address_space_operations ntfs_file_aops = { ...@@ -733,7 +779,9 @@ struct address_space_operations ntfs_file_aops = {
typedef int readpage_t(struct file *, struct page *); typedef int readpage_t(struct file *, struct page *);
/* Address space operations for accessing mftbmp. */ /**
* ntfs_mftbmp_aops - address space operations for accessing mftbmp
*/
struct address_space_operations ntfs_mftbmp_aops = { struct address_space_operations ntfs_mftbmp_aops = {
writepage: NULL, /* Write dirty page to disk. */ writepage: NULL, /* Write dirty page to disk. */
readpage: (readpage_t*)ntfs_mftbmp_readpage, /* Fill page with readpage: (readpage_t*)ntfs_mftbmp_readpage, /* Fill page with
...@@ -744,7 +792,9 @@ struct address_space_operations ntfs_mftbmp_aops = { ...@@ -744,7 +792,9 @@ struct address_space_operations ntfs_mftbmp_aops = {
commit_write: NULL, /* . */ commit_write: NULL, /* . */
}; };
/* /**
* ntfs_dir_aops -
*
* Address space operations for accessing normal directory data (i.e. index * Address space operations for accessing normal directory data (i.e. index
* allocation attribute). We can't just use the same operations as for files * allocation attribute). We can't just use the same operations as for files
* because 1) the attribute is different and even more importantly 2) the index * because 1) the attribute is different and even more importantly 2) the index
......
...@@ -69,7 +69,7 @@ int allocate_compression_buffers(void) ...@@ -69,7 +69,7 @@ int allocate_compression_buffers(void)
BUG_ON(ntfs_compression_buffers); BUG_ON(ntfs_compression_buffers);
ntfs_compression_buffers = (u8**)kmalloc(smp_num_cpus * sizeof(u8 *), ntfs_compression_buffers = (u8**)kmalloc(smp_num_cpus * sizeof(u8*),
GFP_KERNEL); GFP_KERNEL);
if (!ntfs_compression_buffers) if (!ntfs_compression_buffers)
return -ENOMEM; return -ENOMEM;
...@@ -81,7 +81,7 @@ int allocate_compression_buffers(void) ...@@ -81,7 +81,7 @@ int allocate_compression_buffers(void)
if (i == smp_num_cpus) if (i == smp_num_cpus)
return 0; return 0;
/* Allocation failed, cleanup and return error. */ /* Allocation failed, cleanup and return error. */
for (j = 0; i < j; j++) for (j = 0; j < i; j++)
vfree(ntfs_compression_buffers[j]); vfree(ntfs_compression_buffers[j]);
kfree(ntfs_compression_buffers); kfree(ntfs_compression_buffers);
return -ENOMEM; return -ENOMEM;
......
...@@ -69,44 +69,6 @@ static void __format_mft_record(MFT_RECORD *m, const int size, ...@@ -69,44 +69,6 @@ static void __format_mft_record(MFT_RECORD *m, const int size,
a->length = cpu_to_le32(0); a->length = cpu_to_le32(0);
} }
/**
* format_mft_record2 - initialize an empty mft record
* @vfs_sb: vfs super block of volume
* @inum: mft record number / inode number to format
* @mft_rec: mapped, pinned and locked mft record (optional)
*
* Initialize an empty mft record. This is used when extending the MFT.
*
* If @mft_rec is NULL, we call map_mft_record() to obtain the record and we
* unmap it again when finished.
*
* We return 0 on success or -errno on error.
*/
#if 0
// Can't do this as iget_map_mft_record no longer exists...
int format_mft_record2(struct super_block *vfs_sb, const unsigned long inum,
MFT_RECORD *mft_rec)
{
MFT_RECORD *m;
ntfs_inode *ni;
if (mft_rec)
m = mft_rec;
else {
m = iget_map_mft_record(WRITE, vfs_sb, inum, &ni);
if (IS_ERR(m))
return PTR_ERR(m);
}
__format_mft_record(m, NTFS_SB(vfs_sb)->mft_record_size, inum);
if (!mft_rec) {
// TODO: dirty mft record
unmap_mft_record(WRITE, ni);
// TODO: Do stuff to get rid of the ntfs_inode
}
return 0;
}
#endif
/** /**
* format_mft_record - initialize an empty mft record * format_mft_record - initialize an empty mft record
* @ni: ntfs inode of mft record * @ni: ntfs inode of mft record
...@@ -340,71 +302,6 @@ MFT_RECORD *map_mft_record(const int rw, ntfs_inode *ni) ...@@ -340,71 +302,6 @@ MFT_RECORD *map_mft_record(const int rw, ntfs_inode *ni)
return m; return m;
} }
/**
* iget_map_mft_record - iget, map, pin, lock an mft record
* @rw: map for read (rw = READ) or write (rw = WRITE)
* @vfs_sb: vfs super block of mounted volume
* @inum: inode number / MFT record number whose mft record to map
* @vfs_ino: output parameter which we set to the inode on successful return
*
* Does the same as map_mft_record(), except that it starts out only with the
* knowledge of the super block (@vfs_sb) and the mft record number which is of
* course the same as the inode number (@inum).
*
* On success, *@vfs_ino will contain a pointer to the inode structure of the
* mft record on return. On error return, *@vfs_ino is undefined.
*
* See map_mft_record() description for details and for a description of how
* errors are returned and what error codes are defined.
*
* IMPROTANT: The caller is responsible for calling iput(@vfs_ino) when
* finished with the inode, i.e. after unmap_mft_record() has been called. If
* that is omitted you will get busy inodes upon umount...
*/
#if 0
// this is no longer possible. iget() cannot be called as we may be loading
// an ntfs inode which will never have a corresponding vfs inode counter part.
// this is not going to be pretty. )-:
// we need our own hash for ntfs inodes now, ugh. )-:
// not having vfs inodes associated with all ntfs inodes is a bad mistake I am
// getting the impression. this will in the end turn out uglier than just
// having iget_no_wait().
// my only hope is that we can get away without this functionality in the driver
// altogether. we are ok for extent inodes already because we only handle them
// via map_extent_mft_record().
// if we really need it, we could have a list or hash of "pure ntfs inodes"
// to cope with this situation, so the lookup would be:
// look for the inode and if not present look for pure ntfs inode and if not
// present add a new pure ntfs inode. under this scheme extent inodes have to
// also be added to the list/hash of pure inodes.
MFT_RECORD *iget_map_mft_record(const int rw, struct super_block *vfs_sb,
const unsigned long inum, struct inode **vfs_ino)
{
struct inode *inode;
MFT_RECORD *mrec;
/*
* The corresponding iput() happens when clear_inode() is called on the
* base mft record of this extent mft record.
* When used on base mft records, caller has to perform the iput().
*/
inode = iget(vfs_sb, inum);
if (inode && !is_bad_inode(inode)) {
mrec = map_mft_record(rw, inode);
if (!IS_ERR(mrec)) {
ntfs_debug("Success for i_ino 0x%lx.", inum);
*vfs_ino = inode;
return mrec;
}
} else
mrec = ERR_PTR(-EIO);
if (inode)
iput(inode);
ntfs_debug("Failed for i_ino 0x%lx.", inum);
return mrec;
}
#endif
/** /**
* unmap_mft_record - release a mapped mft record * unmap_mft_record - release a mapped mft record
* @rw: unmap from read (@rw = READ) or write (@rw = WRITE) * @rw: unmap from read (@rw = READ) or write (@rw = WRITE)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment