Commit 61d5048f authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Linus Torvalds

clean up vmtruncate

vmtruncate is a twisted maze of gotos, this patch cleans it up to have a
proper if else for the two major cases of extending and truncating truncate
and thus makes it a lot more readable while keeping exactly the same
functinality.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 1b1b32f2
...@@ -1909,50 +1909,49 @@ EXPORT_SYMBOL(unmap_mapping_range); ...@@ -1909,50 +1909,49 @@ EXPORT_SYMBOL(unmap_mapping_range);
*/ */
int vmtruncate(struct inode * inode, loff_t offset) int vmtruncate(struct inode * inode, loff_t offset)
{ {
struct address_space *mapping = inode->i_mapping; if (inode->i_size < offset) {
unsigned long limit; unsigned long limit;
if (inode->i_size < offset) limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
goto do_expand; if (limit != RLIM_INFINITY && offset > limit)
goto out_sig;
if (offset > inode->i_sb->s_maxbytes)
goto out_big;
i_size_write(inode, offset);
} else {
struct address_space *mapping = inode->i_mapping;
/* /*
* truncation of in-use swapfiles is disallowed - it would cause * truncation of in-use swapfiles is disallowed - it would
* subsequent swapout to scribble on the now-freed blocks. * cause subsequent swapout to scribble on the now-freed
* blocks.
*/ */
if (IS_SWAPFILE(inode)) if (IS_SWAPFILE(inode))
goto out_busy; return -ETXTBSY;
i_size_write(inode, offset); i_size_write(inode, offset);
/* /*
* unmap_mapping_range is called twice, first simply for efficiency * unmap_mapping_range is called twice, first simply for
* so that truncate_inode_pages does fewer single-page unmaps. However * efficiency so that truncate_inode_pages does fewer
* after this first call, and before truncate_inode_pages finishes, * single-page unmaps. However after this first call, and
* it is possible for private pages to be COWed, which remain after * before truncate_inode_pages finishes, it is possible for
* truncate_inode_pages finishes, hence the second unmap_mapping_range * private pages to be COWed, which remain after
* call must be made for correctness. * truncate_inode_pages finishes, hence the second
* unmap_mapping_range call must be made for correctness.
*/ */
unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1); unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1);
truncate_inode_pages(mapping, offset); truncate_inode_pages(mapping, offset);
unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1); unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1);
goto out_truncate; }
do_expand:
limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
if (limit != RLIM_INFINITY && offset > limit)
goto out_sig;
if (offset > inode->i_sb->s_maxbytes)
goto out_big;
i_size_write(inode, offset);
out_truncate:
if (inode->i_op && inode->i_op->truncate) if (inode->i_op && inode->i_op->truncate)
inode->i_op->truncate(inode); inode->i_op->truncate(inode);
return 0; return 0;
out_sig: out_sig:
send_sig(SIGXFSZ, current, 0); send_sig(SIGXFSZ, current, 0);
out_big: out_big:
return -EFBIG; return -EFBIG;
out_busy:
return -ETXTBSY;
} }
EXPORT_SYMBOL(vmtruncate); EXPORT_SYMBOL(vmtruncate);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment