Commit 17214b70 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'fsverity-for-linus' of git://git.kernel.org/pub/scm/fs/fsverity/linux

Pull fsverity fixes from Eric Biggers:
 "Fix two significant performance issues with fsverity"

* tag 'fsverity-for-linus' of git://git.kernel.org/pub/scm/fs/fsverity/linux:
  fsverity: don't drop pagecache at end of FS_IOC_ENABLE_VERITY
  fsverity: Remove WQ_UNBOUND from fsverity read workqueue
parents 4f1e308d a075bacd
...@@ -8,7 +8,6 @@ ...@@ -8,7 +8,6 @@
#include "fsverity_private.h" #include "fsverity_private.h"
#include <linux/mount.h> #include <linux/mount.h>
#include <linux/pagemap.h>
#include <linux/sched/signal.h> #include <linux/sched/signal.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
...@@ -367,25 +366,27 @@ int fsverity_ioctl_enable(struct file *filp, const void __user *uarg) ...@@ -367,25 +366,27 @@ int fsverity_ioctl_enable(struct file *filp, const void __user *uarg)
goto out_drop_write; goto out_drop_write;
err = enable_verity(filp, &arg); err = enable_verity(filp, &arg);
if (err)
goto out_allow_write_access;
/* /*
* Some pages of the file may have been evicted from pagecache after * We no longer drop the inode's pagecache after enabling verity. This
* being used in the Merkle tree construction, then read into pagecache * used to be done to try to avoid a race condition where pages could be
* again by another process reading from the file concurrently. Since * evicted after being used in the Merkle tree construction, then
* these pages didn't undergo verification against the file digest which * re-instantiated by a concurrent read. Such pages are unverified, and
* fs-verity now claims to be enforcing, we have to wipe the pagecache * the backing storage could have filled them with different content, so
* to ensure that all future reads are verified. * they shouldn't be used to fulfill reads once verity is enabled.
*
* But, dropping the pagecache has a big performance impact, and it
* doesn't fully solve the race condition anyway. So for those reasons,
* and also because this race condition isn't very important relatively
* speaking (especially for small-ish files, where the chance of a page
* being used, evicted, *and* re-instantiated all while enabling verity
* is quite small), we no longer drop the inode's pagecache.
*/ */
filemap_write_and_wait(inode->i_mapping);
invalidate_inode_pages2(inode->i_mapping);
/* /*
* allow_write_access() is needed to pair with deny_write_access(). * allow_write_access() is needed to pair with deny_write_access().
* Regardless, the filesystem won't allow writing to verity files. * Regardless, the filesystem won't allow writing to verity files.
*/ */
out_allow_write_access:
allow_write_access(filp); allow_write_access(filp);
out_drop_write: out_drop_write:
mnt_drop_write_file(filp); mnt_drop_write_file(filp);
......
...@@ -387,15 +387,15 @@ EXPORT_SYMBOL_GPL(fsverity_enqueue_verify_work); ...@@ -387,15 +387,15 @@ EXPORT_SYMBOL_GPL(fsverity_enqueue_verify_work);
int __init fsverity_init_workqueue(void) int __init fsverity_init_workqueue(void)
{ {
/* /*
* Use an unbound workqueue to allow bios to be verified in parallel * Use a high-priority workqueue to prioritize verification work, which
* even when they happen to complete on the same CPU. This sacrifices * blocks reads from completing, over regular application tasks.
* locality, but it's worthwhile since hashing is CPU-intensive.
* *
* Also use a high-priority workqueue to prioritize verification work, * For performance reasons, don't use an unbound workqueue. Using an
* which blocks reads from completing, over regular application tasks. * unbound workqueue for crypto operations causes excessive scheduler
* latency on ARM64.
*/ */
fsverity_read_workqueue = alloc_workqueue("fsverity_read_queue", fsverity_read_workqueue = alloc_workqueue("fsverity_read_queue",
WQ_UNBOUND | WQ_HIGHPRI, WQ_HIGHPRI,
num_online_cpus()); num_online_cpus());
if (!fsverity_read_workqueue) if (!fsverity_read_workqueue)
return -ENOMEM; return -ENOMEM;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment