Commit 99b7db7b authored by Nick Piggin's avatar Nick Piggin Committed by Al Viro

fs: brlock vfsmount_lock

fs: brlock vfsmount_lock

Use a brlock for the vfsmount lock. It must be taken for write whenever
modifying the mount hash or associated fields, and may be taken for read when
performing mount hash lookups.

A new lock is added for the mnt-id allocator, so it doesn't need to take
the heavy vfsmount write-lock.

The number of atomics should remain the same for fastpath rlock cases, though
code would be slightly slower due to per-cpu access. Scalability is not not be
much improved in common cases yet, due to other locks (ie. dcache_lock) getting
in the way. However path lookups crossing mountpoints should be one case where
scalability is improved (currently requiring the global lock).

The slowpath is slower due to use of brlock. On a 64 core, 64 socket, 32 node
Altix system (high latency to remote nodes), a simple umount microbenchmark
(mount --bind mnt mnt2 ; umount mnt2 loop 1000 times), before this patch it
took 6.8s, afterwards took 7.1s, about 5% slower.

Cc: Al Viro <viro@ZenIV.linux.org.uk>
Signed-off-by: default avatarNick Piggin <npiggin@kernel.dk>
Signed-off-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
parent 6416ccb7
......@@ -1935,7 +1935,7 @@ static int prepend_path(const struct path *path, struct path *root,
bool slash = false;
int error = 0;
spin_lock(&vfsmount_lock);
br_read_lock(vfsmount_lock);
while (dentry != root->dentry || vfsmnt != root->mnt) {
struct dentry * parent;
......@@ -1964,7 +1964,7 @@ static int prepend_path(const struct path *path, struct path *root,
if (!error && !slash)
error = prepend(buffer, buflen, "/", 1);
spin_unlock(&vfsmount_lock);
br_read_unlock(vfsmount_lock);
return error;
global_root:
......@@ -2302,11 +2302,12 @@ int path_is_under(struct path *path1, struct path *path2)
struct vfsmount *mnt = path1->mnt;
struct dentry *dentry = path1->dentry;
int res;
spin_lock(&vfsmount_lock);
br_read_lock(vfsmount_lock);
if (mnt != path2->mnt) {
for (;;) {
if (mnt->mnt_parent == mnt) {
spin_unlock(&vfsmount_lock);
br_read_unlock(vfsmount_lock);
return 0;
}
if (mnt->mnt_parent == path2->mnt)
......@@ -2316,7 +2317,7 @@ int path_is_under(struct path *path1, struct path *path2)
dentry = mnt->mnt_mountpoint;
}
res = is_subdir(dentry, path2->dentry);
spin_unlock(&vfsmount_lock);
br_read_unlock(vfsmount_lock);
return res;
}
EXPORT_SYMBOL(path_is_under);
......
......@@ -9,6 +9,8 @@
* 2 of the License, or (at your option) any later version.
*/
#include <linux/lglock.h>
struct super_block;
struct linux_binprm;
struct path;
......@@ -70,7 +72,8 @@ extern struct vfsmount *copy_tree(struct vfsmount *, struct dentry *, int);
extern void __init mnt_init(void);
extern spinlock_t vfsmount_lock;
DECLARE_BRLOCK(vfsmount_lock);
/*
* fs_struct.c
......
......@@ -595,15 +595,16 @@ int follow_up(struct path *path)
{
struct vfsmount *parent;
struct dentry *mountpoint;
spin_lock(&vfsmount_lock);
br_read_lock(vfsmount_lock);
parent = path->mnt->mnt_parent;
if (parent == path->mnt) {
spin_unlock(&vfsmount_lock);
br_read_unlock(vfsmount_lock);
return 0;
}
mntget(parent);
mountpoint = dget(path->mnt->mnt_mountpoint);
spin_unlock(&vfsmount_lock);
br_read_unlock(vfsmount_lock);
dput(path->dentry);
path->dentry = mountpoint;
mntput(path->mnt);
......
This diff is collapsed.
......@@ -126,6 +126,9 @@ static int do_make_slave(struct vfsmount *mnt)
return 0;
}
/*
* vfsmount lock must be held for write
*/
void change_mnt_propagation(struct vfsmount *mnt, int type)
{
if (type == MS_SHARED) {
......@@ -270,12 +273,12 @@ int propagate_mnt(struct vfsmount *dest_mnt, struct dentry *dest_dentry,
prev_src_mnt = child;
}
out:
spin_lock(&vfsmount_lock);
br_write_lock(vfsmount_lock);
while (!list_empty(&tmp_list)) {
child = list_first_entry(&tmp_list, struct vfsmount, mnt_hash);
umount_tree(child, 0, &umount_list);
}
spin_unlock(&vfsmount_lock);
br_write_unlock(vfsmount_lock);
release_mounts(&umount_list);
return ret;
}
......@@ -296,6 +299,8 @@ static inline int do_refcount_check(struct vfsmount *mnt, int count)
* other mounts its parent propagates to.
* Check if any of these mounts that **do not have submounts**
* have more references than 'refcnt'. If so return busy.
*
* vfsmount lock must be held for read or write
*/
int propagate_mount_busy(struct vfsmount *mnt, int refcnt)
{
......@@ -353,6 +358,8 @@ static void __propagate_umount(struct vfsmount *mnt)
* collect all mounts that receive propagation from the mount in @list,
* and return these additional mounts in the same list.
* @list: the list of mounts to be unmounted.
*
* vfsmount lock must be held for write
*/
int propagate_umount(struct list_head *list)
{
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment