Commit 0f9bfc48 authored by Anton Altaparmakov's avatar Anton Altaparmakov

NTFS: - Add mapping of unmapped buffers to all remaining code paths, i.e.

        fs/ntfs/aops.c::ntfs_write_mst_block(), mft.c::ntfs_sync_mft_mirror(),
        and write_mft_record_nolock().  From now on we require that the
        complete runlist for the mft mirror is always mapped into memory.
      - Add creation of buffers to fs/ntfs/mft.c::ntfs_sync_mft_mirror().
      - Do not check for the page being uptodate in mark_ntfs_record_dirty()
        as we now call this after marking the page not uptodate during mft
        mirror synchronisation (fs/ntfs/mft.c::ntfs_sync_mft_mirror()).
      - Improve error handling in fs/ntfs/aops.c::ntfs_{read,write}_block().
Signed-off-by: default avatarAnton Altaparmakov <aia21@cantab.net>
parent 453b5e0c
...@@ -84,6 +84,15 @@ ToDo/Notes: ...@@ -84,6 +84,15 @@ ToDo/Notes:
- Fix error handling in fs/ntfs/quota.c::ntfs_mark_quotas_out_of_date() - Fix error handling in fs/ntfs/quota.c::ntfs_mark_quotas_out_of_date()
where we failed to release i_sem on the $Quota/$Q attribute inode. where we failed to release i_sem on the $Quota/$Q attribute inode.
- Fix bug in handling of bad inodes in fs/ntfs/namei.c::ntfs_lookup(). - Fix bug in handling of bad inodes in fs/ntfs/namei.c::ntfs_lookup().
- Add mapping of unmapped buffers to all remaining code paths, i.e.
fs/ntfs/aops.c::ntfs_write_mst_block(), mft.c::ntfs_sync_mft_mirror(),
and write_mft_record_nolock(). From now on we require that the
complete runlist for the mft mirror is always mapped into memory.
- Add creation of buffers to fs/ntfs/mft.c::ntfs_sync_mft_mirror().
- Do not check for the page being uptodate in mark_ntfs_record_dirty()
as we now call this after marking the page not uptodate during mft
mirror synchronisation (fs/ntfs/mft.c::ntfs_sync_mft_mirror()).
- Improve error handling in fs/ntfs/aops.c::ntfs_{read,write}_block().
2.1.21 - Fix some races and bugs, rewrite mft write code, add mft allocator. 2.1.21 - Fix some races and bugs, rewrite mft write code, add mft allocator.
......
This diff is collapsed.
...@@ -2358,8 +2358,8 @@ int ntfs_truncate(struct inode *vi) ...@@ -2358,8 +2358,8 @@ int ntfs_truncate(struct inode *vi)
done: done:
ntfs_attr_put_search_ctx(ctx); ntfs_attr_put_search_ctx(ctx);
unmap_mft_record(ni); unmap_mft_record(ni);
ntfs_debug("Done.");
NInoClearTruncateFailed(ni); NInoClearTruncateFailed(ni);
ntfs_debug("Done.");
return 0; return 0;
err_out: err_out:
if (err != -ENOMEM) { if (err != -ENOMEM) {
...@@ -2608,6 +2608,7 @@ int ntfs_write_inode(struct inode *vi, int sync) ...@@ -2608,6 +2608,7 @@ int ntfs_write_inode(struct inode *vi, int sync)
ntfs_error(vi->i_sb, "Failed (error code %i): Marking inode " ntfs_error(vi->i_sb, "Failed (error code %i): Marking inode "
"as bad. You should run chkdsk.", -err); "as bad. You should run chkdsk.", -err);
make_bad_inode(vi); make_bad_inode(vi);
NVolSetErrors(ni->vol);
} }
return err; return err;
} }
......
...@@ -466,8 +466,10 @@ int ntfs_sync_mft_mirror(ntfs_volume *vol, const unsigned long mft_no, ...@@ -466,8 +466,10 @@ int ntfs_sync_mft_mirror(ntfs_volume *vol, const unsigned long mft_no,
struct buffer_head *bhs[max_bhs]; struct buffer_head *bhs[max_bhs];
struct buffer_head *bh, *head; struct buffer_head *bh, *head;
u8 *kmirr; u8 *kmirr;
unsigned int block_start, block_end, m_start, m_end; runlist_element *rl;
unsigned int block_start, block_end, m_start, m_end, page_ofs;
int i_bhs, nr_bhs, err = 0; int i_bhs, nr_bhs, err = 0;
unsigned char blocksize_bits = vol->mftmirr_ino->i_blkbits;
ntfs_debug("Entering for inode 0x%lx.", mft_no); ntfs_debug("Entering for inode 0x%lx.", mft_no);
BUG_ON(!max_bhs); BUG_ON(!max_bhs);
...@@ -486,24 +488,24 @@ int ntfs_sync_mft_mirror(ntfs_volume *vol, const unsigned long mft_no, ...@@ -486,24 +488,24 @@ int ntfs_sync_mft_mirror(ntfs_volume *vol, const unsigned long mft_no,
err = PTR_ERR(page); err = PTR_ERR(page);
goto err_out; goto err_out;
} }
/*
* Exclusion against other writers. This should never be a problem
* since the page in which the mft record @m resides is also locked and
* hence any other writers would be held up there but it is better to
* make sure no one is writing from elsewhere.
*/
lock_page(page); lock_page(page);
BUG_ON(!PageUptodate(page)); BUG_ON(!PageUptodate(page));
ClearPageUptodate(page); ClearPageUptodate(page);
/* Offset of the mft mirror record inside the page. */
page_ofs = (mft_no << vol->mft_record_size_bits) & ~PAGE_CACHE_MASK;
/* The address in the page of the mirror copy of the mft record @m. */ /* The address in the page of the mirror copy of the mft record @m. */
kmirr = page_address(page) + ((mft_no << vol->mft_record_size_bits) & kmirr = page_address(page) + page_ofs;
~PAGE_CACHE_MASK);
/* Copy the mst protected mft record to the mirror. */ /* Copy the mst protected mft record to the mirror. */
memcpy(kmirr, m, vol->mft_record_size); memcpy(kmirr, m, vol->mft_record_size);
/* Make sure we have mapped buffers. */ /*
* Create buffers if not present and mark the ones belonging to the mft
* mirror record dirty.
*/
mark_ntfs_record_dirty(page, page_ofs);
BUG_ON(!page_has_buffers(page)); BUG_ON(!page_has_buffers(page));
bh = head = page_buffers(page); bh = head = page_buffers(page);
BUG_ON(!bh); BUG_ON(!bh);
rl = NULL;
nr_bhs = 0; nr_bhs = 0;
block_start = 0; block_start = 0;
m_start = kmirr - (u8*)page_address(page); m_start = kmirr - (u8*)page_address(page);
...@@ -511,15 +513,61 @@ int ntfs_sync_mft_mirror(ntfs_volume *vol, const unsigned long mft_no, ...@@ -511,15 +513,61 @@ int ntfs_sync_mft_mirror(ntfs_volume *vol, const unsigned long mft_no,
do { do {
block_end = block_start + blocksize; block_end = block_start + blocksize;
/* If the buffer is outside the mft record, skip it. */ /* If the buffer is outside the mft record, skip it. */
if ((block_end <= m_start) || (block_start >= m_end)) if (block_end <= m_start)
continue; continue;
BUG_ON(!buffer_mapped(bh)); if (unlikely(block_start >= m_end))
break;
/* Need to map the buffer if it is not mapped already. */
if (unlikely(!buffer_mapped(bh))) {
VCN vcn;
LCN lcn;
unsigned int vcn_ofs;
/* Obtain the vcn and offset of the current block. */
vcn = ((VCN)mft_no << vol->mft_record_size_bits) +
(block_start - m_start);
vcn_ofs = vcn & vol->cluster_size_mask;
vcn >>= vol->cluster_size_bits;
if (!rl) {
down_read(&NTFS_I(vol->mftmirr_ino)->
runlist.lock);
rl = NTFS_I(vol->mftmirr_ino)->runlist.rl;
/*
* $MFTMirr always has the whole of its runlist
* in memory.
*/
BUG_ON(!rl);
}
/* Seek to element containing target vcn. */
while (rl->length && rl[1].vcn <= vcn)
rl++;
lcn = ntfs_rl_vcn_to_lcn(rl, vcn);
/* For $MFTMirr, only lcn >= 0 is a successful remap. */
if (likely(lcn >= 0)) {
/* Setup buffer head to correct block. */
bh->b_blocknr = ((lcn <<
vol->cluster_size_bits) +
vcn_ofs) >> blocksize_bits;
set_buffer_mapped(bh);
} else {
bh->b_blocknr = -1;
ntfs_error(vol->sb, "Cannot write mft mirror "
"record 0x%lx because its "
"location on disk could not "
"be determined (error code "
"%lli).", mft_no,
(long long)lcn);
err = -EIO;
}
}
BUG_ON(!buffer_uptodate(bh)); BUG_ON(!buffer_uptodate(bh));
BUG_ON(!nr_bhs && (m_start != block_start)); BUG_ON(!nr_bhs && (m_start != block_start));
BUG_ON(nr_bhs >= max_bhs); BUG_ON(nr_bhs >= max_bhs);
bhs[nr_bhs++] = bh; bhs[nr_bhs++] = bh;
BUG_ON((nr_bhs >= max_bhs) && (m_end != block_end)); BUG_ON((nr_bhs >= max_bhs) && (m_end != block_end));
} while (block_start = block_end, (bh = bh->b_this_page) != head); } while (block_start = block_end, (bh = bh->b_this_page) != head);
if (unlikely(rl))
up_read(&NTFS_I(vol->mftmirr_ino)->runlist.lock);
if (likely(!err)) { if (likely(!err)) {
/* Lock buffers and start synchronous write i/o on them. */ /* Lock buffers and start synchronous write i/o on them. */
for (i_bhs = 0; i_bhs < nr_bhs; i_bhs++) { for (i_bhs = 0; i_bhs < nr_bhs; i_bhs++) {
...@@ -528,8 +576,7 @@ int ntfs_sync_mft_mirror(ntfs_volume *vol, const unsigned long mft_no, ...@@ -528,8 +576,7 @@ int ntfs_sync_mft_mirror(ntfs_volume *vol, const unsigned long mft_no,
if (unlikely(test_set_buffer_locked(tbh))) if (unlikely(test_set_buffer_locked(tbh)))
BUG(); BUG();
BUG_ON(!buffer_uptodate(tbh)); BUG_ON(!buffer_uptodate(tbh));
if (buffer_dirty(tbh)) clear_buffer_dirty(tbh);
clear_buffer_dirty(tbh);
get_bh(tbh); get_bh(tbh);
tbh->b_end_io = end_buffer_write_sync; tbh->b_end_io = end_buffer_write_sync;
submit_bh(WRITE, tbh); submit_bh(WRITE, tbh);
...@@ -613,13 +660,14 @@ int write_mft_record_nolock(ntfs_inode *ni, MFT_RECORD *m, int sync) ...@@ -613,13 +660,14 @@ int write_mft_record_nolock(ntfs_inode *ni, MFT_RECORD *m, int sync)
{ {
ntfs_volume *vol = ni->vol; ntfs_volume *vol = ni->vol;
struct page *page = ni->page; struct page *page = ni->page;
unsigned int blocksize = vol->sb->s_blocksize; unsigned char blocksize_bits = vol->mft_ino->i_blkbits;
unsigned int blocksize = 1 << blocksize_bits;
int max_bhs = vol->mft_record_size / blocksize; int max_bhs = vol->mft_record_size / blocksize;
struct buffer_head *bhs[max_bhs]; struct buffer_head *bhs[max_bhs];
struct buffer_head *bh, *head; struct buffer_head *bh, *head;
runlist_element *rl;
unsigned int block_start, block_end, m_start, m_end; unsigned int block_start, block_end, m_start, m_end;
int i_bhs, nr_bhs, err = 0; int i_bhs, nr_bhs, err = 0;
BOOL rec_is_dirty = TRUE;
ntfs_debug("Entering for inode 0x%lx.", ni->mft_no); ntfs_debug("Entering for inode 0x%lx.", ni->mft_no);
BUG_ON(NInoAttr(ni)); BUG_ON(NInoAttr(ni));
...@@ -636,6 +684,7 @@ int write_mft_record_nolock(ntfs_inode *ni, MFT_RECORD *m, int sync) ...@@ -636,6 +684,7 @@ int write_mft_record_nolock(ntfs_inode *ni, MFT_RECORD *m, int sync)
BUG_ON(!page_has_buffers(page)); BUG_ON(!page_has_buffers(page));
bh = head = page_buffers(page); bh = head = page_buffers(page);
BUG_ON(!bh); BUG_ON(!bh);
rl = NULL;
nr_bhs = 0; nr_bhs = 0;
block_start = 0; block_start = 0;
m_start = ni->page_ofs; m_start = ni->page_ofs;
...@@ -647,31 +696,65 @@ int write_mft_record_nolock(ntfs_inode *ni, MFT_RECORD *m, int sync) ...@@ -647,31 +696,65 @@ int write_mft_record_nolock(ntfs_inode *ni, MFT_RECORD *m, int sync)
continue; continue;
if (unlikely(block_start >= m_end)) if (unlikely(block_start >= m_end))
break; break;
/*
* If this block is not the first one in the record, we ignore
* the buffer's dirty state because we could have raced with a
* parallel mark_ntfs_record_dirty().
*/
if (block_start == m_start) { if (block_start == m_start) {
/* This block is the first one in the record. */ /* This block is the first one in the record. */
if (!buffer_dirty(bh)) { if (!buffer_dirty(bh)) {
BUG_ON(nr_bhs);
/* Clean records are not written out. */ /* Clean records are not written out. */
rec_is_dirty = FALSE; break;
continue; }
}
/* Need to map the buffer if it is not mapped already. */
if (unlikely(!buffer_mapped(bh))) {
VCN vcn;
LCN lcn;
unsigned int vcn_ofs;
/* Obtain the vcn and offset of the current block. */
vcn = ((VCN)ni->mft_no << vol->mft_record_size_bits) +
(block_start - m_start);
vcn_ofs = vcn & vol->cluster_size_mask;
vcn >>= vol->cluster_size_bits;
if (!rl) {
down_read(&NTFS_I(vol->mft_ino)->runlist.lock);
rl = NTFS_I(vol->mft_ino)->runlist.rl;
BUG_ON(!rl);
}
/* Seek to element containing target vcn. */
while (rl->length && rl[1].vcn <= vcn)
rl++;
lcn = ntfs_rl_vcn_to_lcn(rl, vcn);
/* For $MFT, only lcn >= 0 is a successful remap. */
if (likely(lcn >= 0)) {
/* Setup buffer head to correct block. */
bh->b_blocknr = ((lcn <<
vol->cluster_size_bits) +
vcn_ofs) >> blocksize_bits;
set_buffer_mapped(bh);
} else {
bh->b_blocknr = -1;
ntfs_error(vol->sb, "Cannot write mft record "
"0x%lx because its location "
"on disk could not be "
"determined (error code %lli).",
ni->mft_no, (long long)lcn);
err = -EIO;
} }
rec_is_dirty = TRUE;
} else {
/*
* This block is not the first one in the record. We
* ignore the buffer's dirty state because we could
* have raced with a parallel mark_ntfs_record_dirty().
*/
if (!rec_is_dirty)
continue;
} }
BUG_ON(!buffer_mapped(bh));
BUG_ON(!buffer_uptodate(bh)); BUG_ON(!buffer_uptodate(bh));
BUG_ON(!nr_bhs && (m_start != block_start)); BUG_ON(!nr_bhs && (m_start != block_start));
BUG_ON(nr_bhs >= max_bhs); BUG_ON(nr_bhs >= max_bhs);
bhs[nr_bhs++] = bh; bhs[nr_bhs++] = bh;
BUG_ON((nr_bhs >= max_bhs) && (m_end != block_end)); BUG_ON((nr_bhs >= max_bhs) && (m_end != block_end));
} while (block_start = block_end, (bh = bh->b_this_page) != head); } while (block_start = block_end, (bh = bh->b_this_page) != head);
if (!rec_is_dirty) if (unlikely(rl))
up_read(&NTFS_I(vol->mft_ino)->runlist.lock);
if (!nr_bhs)
goto done; goto done;
if (unlikely(err)) if (unlikely(err))
goto cleanup_out; goto cleanup_out;
...@@ -745,7 +828,8 @@ int write_mft_record_nolock(ntfs_inode *ni, MFT_RECORD *m, int sync) ...@@ -745,7 +828,8 @@ int write_mft_record_nolock(ntfs_inode *ni, MFT_RECORD *m, int sync)
"Redirtying so the write is retried later."); "Redirtying so the write is retried later.");
mark_mft_record_dirty(ni); mark_mft_record_dirty(ni);
err = 0; err = 0;
} } else
NVolSetErrors(vol);
return err; return err;
} }
......
...@@ -983,6 +983,10 @@ static BOOL load_and_init_mft_mirror(ntfs_volume *vol) ...@@ -983,6 +983,10 @@ static BOOL load_and_init_mft_mirror(ntfs_volume *vol)
* @vol: ntfs super block describing device whose mft mirror to check * @vol: ntfs super block describing device whose mft mirror to check
* *
* Return TRUE on success or FALSE on error. * Return TRUE on success or FALSE on error.
*
* Note, this function also results in the mft mirror runlist being completely
* mapped into memory. The mft mirror write code requires this and will BUG()
* should it find an unmapped runlist element.
*/ */
static BOOL check_mft_mirror(ntfs_volume *vol) static BOOL check_mft_mirror(ntfs_volume *vol)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment