Commit 821f7494 authored by Tyler Hicks's avatar Tyler Hicks

eCryptfs: Revert to a writethrough cache model

A change was made about a year ago to get eCryptfs to better utilize its
page cache during writes. The idea was to do the page encryption
operations during page writeback, rather than doing them when initially
writing into the page cache, to reduce the number of page encryption
operations during sequential writes. This meant that the encrypted page
would only be written to the lower filesystem during page writeback,
which was a change from how eCryptfs had previously wrote to the lower
filesystem in ecryptfs_write_end().

The change caused a few eCryptfs-internal bugs that were shook out.
Unfortunately, more grave side effects have been identified that will
force changes outside of eCryptfs. Because the lower filesystem isn't
consulted until page writeback, eCryptfs has no way to pass lower write
errors (ENOSPC, mainly) back to userspace. Additionaly, it was reported
that quotas could be bypassed because of the way eCryptfs may sometimes
open the lower filesystem using a privileged kthread.

It would be nice to resolve the latest issues, but it is best if the
eCryptfs commits be reverted to the old behavior in the meantime.

This reverts:
32001d6f "eCryptfs: Flush file in vma close"
5be79de2 "eCryptfs: Flush dirty pages in setattr"
57db4e8d "ecryptfs: modify write path to encrypt page in writepage"
Signed-off-by: default avatarTyler Hicks <tyhicks@canonical.com>
Tested-by: default avatarColin King <colin.king@canonical.com>
Cc: Colin King <colin.king@canonical.com>
Cc: Thieu Le <thieule@google.com>
parent e3ccaa97
...@@ -138,27 +138,6 @@ static int ecryptfs_readdir(struct file *file, void *dirent, filldir_t filldir) ...@@ -138,27 +138,6 @@ static int ecryptfs_readdir(struct file *file, void *dirent, filldir_t filldir)
return rc; return rc;
} }
static void ecryptfs_vma_close(struct vm_area_struct *vma)
{
filemap_write_and_wait(vma->vm_file->f_mapping);
}
static const struct vm_operations_struct ecryptfs_file_vm_ops = {
.close = ecryptfs_vma_close,
.fault = filemap_fault,
};
static int ecryptfs_file_mmap(struct file *file, struct vm_area_struct *vma)
{
int rc;
rc = generic_file_mmap(file, vma);
if (!rc)
vma->vm_ops = &ecryptfs_file_vm_ops;
return rc;
}
struct kmem_cache *ecryptfs_file_info_cache; struct kmem_cache *ecryptfs_file_info_cache;
static int read_or_initialize_metadata(struct dentry *dentry) static int read_or_initialize_metadata(struct dentry *dentry)
...@@ -311,15 +290,7 @@ static int ecryptfs_release(struct inode *inode, struct file *file) ...@@ -311,15 +290,7 @@ static int ecryptfs_release(struct inode *inode, struct file *file)
static int static int
ecryptfs_fsync(struct file *file, loff_t start, loff_t end, int datasync) ecryptfs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
{ {
int rc = 0; return vfs_fsync(ecryptfs_file_to_lower(file), datasync);
rc = generic_file_fsync(file, start, end, datasync);
if (rc)
goto out;
rc = vfs_fsync_range(ecryptfs_file_to_lower(file), start, end,
datasync);
out:
return rc;
} }
static int ecryptfs_fasync(int fd, struct file *file, int flag) static int ecryptfs_fasync(int fd, struct file *file, int flag)
...@@ -388,7 +359,7 @@ const struct file_operations ecryptfs_main_fops = { ...@@ -388,7 +359,7 @@ const struct file_operations ecryptfs_main_fops = {
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
.compat_ioctl = ecryptfs_compat_ioctl, .compat_ioctl = ecryptfs_compat_ioctl,
#endif #endif
.mmap = ecryptfs_file_mmap, .mmap = generic_file_mmap,
.open = ecryptfs_open, .open = ecryptfs_open,
.flush = ecryptfs_flush, .flush = ecryptfs_flush,
.release = ecryptfs_release, .release = ecryptfs_release,
......
...@@ -981,12 +981,6 @@ static int ecryptfs_setattr(struct dentry *dentry, struct iattr *ia) ...@@ -981,12 +981,6 @@ static int ecryptfs_setattr(struct dentry *dentry, struct iattr *ia)
goto out; goto out;
} }
if (S_ISREG(inode->i_mode)) {
rc = filemap_write_and_wait(inode->i_mapping);
if (rc)
goto out;
fsstack_copy_attr_all(inode, lower_inode);
}
memcpy(&lower_ia, ia, sizeof(lower_ia)); memcpy(&lower_ia, ia, sizeof(lower_ia));
if (ia->ia_valid & ATTR_FILE) if (ia->ia_valid & ATTR_FILE)
lower_ia.ia_file = ecryptfs_file_to_lower(ia->ia_file); lower_ia.ia_file = ecryptfs_file_to_lower(ia->ia_file);
......
...@@ -66,18 +66,6 @@ static int ecryptfs_writepage(struct page *page, struct writeback_control *wbc) ...@@ -66,18 +66,6 @@ static int ecryptfs_writepage(struct page *page, struct writeback_control *wbc)
{ {
int rc; int rc;
/*
* Refuse to write the page out if we are called from reclaim context
* since our writepage() path may potentially allocate memory when
* calling into the lower fs vfs_write() which may in turn invoke
* us again.
*/
if (current->flags & PF_MEMALLOC) {
redirty_page_for_writepage(wbc, page);
rc = 0;
goto out;
}
rc = ecryptfs_encrypt_page(page); rc = ecryptfs_encrypt_page(page);
if (rc) { if (rc) {
ecryptfs_printk(KERN_WARNING, "Error encrypting " ecryptfs_printk(KERN_WARNING, "Error encrypting "
...@@ -498,7 +486,6 @@ static int ecryptfs_write_end(struct file *file, ...@@ -498,7 +486,6 @@ static int ecryptfs_write_end(struct file *file,
struct ecryptfs_crypt_stat *crypt_stat = struct ecryptfs_crypt_stat *crypt_stat =
&ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat; &ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat;
int rc; int rc;
int need_unlock_page = 1;
ecryptfs_printk(KERN_DEBUG, "Calling fill_zeros_to_end_of_page" ecryptfs_printk(KERN_DEBUG, "Calling fill_zeros_to_end_of_page"
"(page w/ index = [0x%.16lx], to = [%d])\n", index, to); "(page w/ index = [0x%.16lx], to = [%d])\n", index, to);
...@@ -519,26 +506,26 @@ static int ecryptfs_write_end(struct file *file, ...@@ -519,26 +506,26 @@ static int ecryptfs_write_end(struct file *file,
"zeros in page with index = [0x%.16lx]\n", index); "zeros in page with index = [0x%.16lx]\n", index);
goto out; goto out;
} }
set_page_dirty(page); rc = ecryptfs_encrypt_page(page);
unlock_page(page); if (rc) {
need_unlock_page = 0; ecryptfs_printk(KERN_WARNING, "Error encrypting page (upper "
"index [0x%.16lx])\n", index);
goto out;
}
if (pos + copied > i_size_read(ecryptfs_inode)) { if (pos + copied > i_size_read(ecryptfs_inode)) {
i_size_write(ecryptfs_inode, pos + copied); i_size_write(ecryptfs_inode, pos + copied);
ecryptfs_printk(KERN_DEBUG, "Expanded file size to " ecryptfs_printk(KERN_DEBUG, "Expanded file size to "
"[0x%.16llx]\n", "[0x%.16llx]\n",
(unsigned long long)i_size_read(ecryptfs_inode)); (unsigned long long)i_size_read(ecryptfs_inode));
balance_dirty_pages_ratelimited(mapping);
rc = ecryptfs_write_inode_size_to_metadata(ecryptfs_inode);
if (rc) {
printk(KERN_ERR "Error writing inode size to metadata; "
"rc = [%d]\n", rc);
goto out;
}
} }
rc = copied; rc = ecryptfs_write_inode_size_to_metadata(ecryptfs_inode);
if (rc)
printk(KERN_ERR "Error writing inode size to metadata; "
"rc = [%d]\n", rc);
else
rc = copied;
out: out:
if (need_unlock_page) unlock_page(page);
unlock_page(page);
page_cache_release(page); page_cache_release(page);
return rc; return rc;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment