Commit 9f6c61f9 authored by Miklos Szeredi's avatar Miklos Szeredi

proc/mounts: add cursor

If mounts are deleted after a read(2) call on /proc/self/mounts (or its
kin), the subsequent read(2) could miss a mount that comes after the
deleted one in the list.  This is because the file position is interpreted
as the number mount entries from the start of the list.

E.g. first read gets entries #0 to #9; the seq file index will be 10.  Then
entry #5 is deleted, resulting in #10 becoming #9 and #11 becoming #10,
etc...  The next read will continue from entry #10, and #9 is missed.

Solve this by adding a cursor entry for each open instance.  Taking the
global namespace_sem for write seems excessive, since we are only dealing
with a per-namespace list.  Instead add a per-namespace spinlock and use
that together with namespace_sem taken for read to protect against
concurrent modification of the mount list.  This may reduce parallelism of
is_local_mountpoint(), but it's hardly a big contention point.  We could
also use RCU freeing of cursors to make traversal not need additional
locks, if that turns out to be neceesary.

Only move the cursor once for each read (cursor is not added on open) to
minimize cacheline invalidation.  When EOF is reached, the cursor is taken
off the list, in order to prevent an excessive number of cursors due to
inactive open file descriptors.
Reported-by: default avatarKarel Zak <kzak@redhat.com>
Signed-off-by: default avatarMiklos Szeredi <mszeredi@redhat.com>
parent 530f32fc
...@@ -9,7 +9,13 @@ struct mnt_namespace { ...@@ -9,7 +9,13 @@ struct mnt_namespace {
atomic_t count; atomic_t count;
struct ns_common ns; struct ns_common ns;
struct mount * root; struct mount * root;
/*
* Traversal and modification of .list is protected by either
* - taking namespace_sem for write, OR
* - taking namespace_sem for read AND taking .ns_lock.
*/
struct list_head list; struct list_head list;
spinlock_t ns_lock;
struct user_namespace *user_ns; struct user_namespace *user_ns;
struct ucounts *ucounts; struct ucounts *ucounts;
u64 seq; /* Sequence number to prevent loops */ u64 seq; /* Sequence number to prevent loops */
...@@ -133,9 +139,7 @@ struct proc_mounts { ...@@ -133,9 +139,7 @@ struct proc_mounts {
struct mnt_namespace *ns; struct mnt_namespace *ns;
struct path root; struct path root;
int (*show)(struct seq_file *, struct vfsmount *); int (*show)(struct seq_file *, struct vfsmount *);
void *cached_mount; struct mount cursor;
u64 cached_event;
loff_t cached_index;
}; };
extern const struct seq_operations mounts_op; extern const struct seq_operations mounts_op;
...@@ -153,3 +157,5 @@ static inline bool is_anon_ns(struct mnt_namespace *ns) ...@@ -153,3 +157,5 @@ static inline bool is_anon_ns(struct mnt_namespace *ns)
{ {
return ns->seq == 0; return ns->seq == 0;
} }
extern void mnt_cursor_del(struct mnt_namespace *ns, struct mount *cursor);
...@@ -648,6 +648,21 @@ struct vfsmount *lookup_mnt(const struct path *path) ...@@ -648,6 +648,21 @@ struct vfsmount *lookup_mnt(const struct path *path)
return m; return m;
} }
static inline void lock_ns_list(struct mnt_namespace *ns)
{
spin_lock(&ns->ns_lock);
}
static inline void unlock_ns_list(struct mnt_namespace *ns)
{
spin_unlock(&ns->ns_lock);
}
static inline bool mnt_is_cursor(struct mount *mnt)
{
return mnt->mnt.mnt_flags & MNT_CURSOR;
}
/* /*
* __is_local_mountpoint - Test to see if dentry is a mountpoint in the * __is_local_mountpoint - Test to see if dentry is a mountpoint in the
* current mount namespace. * current mount namespace.
...@@ -673,11 +688,15 @@ bool __is_local_mountpoint(struct dentry *dentry) ...@@ -673,11 +688,15 @@ bool __is_local_mountpoint(struct dentry *dentry)
goto out; goto out;
down_read(&namespace_sem); down_read(&namespace_sem);
lock_ns_list(ns);
list_for_each_entry(mnt, &ns->list, mnt_list) { list_for_each_entry(mnt, &ns->list, mnt_list) {
if (mnt_is_cursor(mnt))
continue;
is_covered = (mnt->mnt_mountpoint == dentry); is_covered = (mnt->mnt_mountpoint == dentry);
if (is_covered) if (is_covered)
break; break;
} }
unlock_ns_list(ns);
up_read(&namespace_sem); up_read(&namespace_sem);
out: out:
return is_covered; return is_covered;
...@@ -1245,46 +1264,71 @@ struct vfsmount *mnt_clone_internal(const struct path *path) ...@@ -1245,46 +1264,71 @@ struct vfsmount *mnt_clone_internal(const struct path *path)
} }
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
static struct mount *mnt_list_next(struct mnt_namespace *ns,
struct list_head *p)
{
struct mount *mnt, *ret = NULL;
lock_ns_list(ns);
list_for_each_continue(p, &ns->list) {
mnt = list_entry(p, typeof(*mnt), mnt_list);
if (!mnt_is_cursor(mnt)) {
ret = mnt;
break;
}
}
unlock_ns_list(ns);
return ret;
}
/* iterator; we want it to have access to namespace_sem, thus here... */ /* iterator; we want it to have access to namespace_sem, thus here... */
static void *m_start(struct seq_file *m, loff_t *pos) static void *m_start(struct seq_file *m, loff_t *pos)
{ {
struct proc_mounts *p = m->private; struct proc_mounts *p = m->private;
struct list_head *prev;
down_read(&namespace_sem); down_read(&namespace_sem);
if (p->cached_event == p->ns->event) { if (!*pos) {
void *v = p->cached_mount; prev = &p->ns->list;
if (*pos == p->cached_index) } else {
return v; prev = &p->cursor.mnt_list;
if (*pos == p->cached_index + 1) {
v = seq_list_next(v, &p->ns->list, &p->cached_index); /* Read after we'd reached the end? */
return p->cached_mount = v; if (list_empty(prev))
} return NULL;
} }
p->cached_event = p->ns->event; return mnt_list_next(p->ns, prev);
p->cached_mount = seq_list_start(&p->ns->list, *pos);
p->cached_index = *pos;
return p->cached_mount;
} }
static void *m_next(struct seq_file *m, void *v, loff_t *pos) static void *m_next(struct seq_file *m, void *v, loff_t *pos)
{ {
struct proc_mounts *p = m->private; struct proc_mounts *p = m->private;
struct mount *mnt = v;
p->cached_mount = seq_list_next(v, &p->ns->list, pos); ++*pos;
p->cached_index = *pos; return mnt_list_next(p->ns, &mnt->mnt_list);
return p->cached_mount;
} }
static void m_stop(struct seq_file *m, void *v) static void m_stop(struct seq_file *m, void *v)
{ {
struct proc_mounts *p = m->private;
struct mount *mnt = v;
lock_ns_list(p->ns);
if (mnt)
list_move_tail(&p->cursor.mnt_list, &mnt->mnt_list);
else
list_del_init(&p->cursor.mnt_list);
unlock_ns_list(p->ns);
up_read(&namespace_sem); up_read(&namespace_sem);
} }
static int m_show(struct seq_file *m, void *v) static int m_show(struct seq_file *m, void *v)
{ {
struct proc_mounts *p = m->private; struct proc_mounts *p = m->private;
struct mount *r = list_entry(v, struct mount, mnt_list); struct mount *r = v;
return p->show(m, &r->mnt); return p->show(m, &r->mnt);
} }
...@@ -1294,6 +1338,15 @@ const struct seq_operations mounts_op = { ...@@ -1294,6 +1338,15 @@ const struct seq_operations mounts_op = {
.stop = m_stop, .stop = m_stop,
.show = m_show, .show = m_show,
}; };
void mnt_cursor_del(struct mnt_namespace *ns, struct mount *cursor)
{
down_read(&namespace_sem);
lock_ns_list(ns);
list_del(&cursor->mnt_list);
unlock_ns_list(ns);
up_read(&namespace_sem);
}
#endif /* CONFIG_PROC_FS */ #endif /* CONFIG_PROC_FS */
/** /**
...@@ -3202,6 +3255,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns, bool a ...@@ -3202,6 +3255,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns, bool a
atomic_set(&new_ns->count, 1); atomic_set(&new_ns->count, 1);
INIT_LIST_HEAD(&new_ns->list); INIT_LIST_HEAD(&new_ns->list);
init_waitqueue_head(&new_ns->poll); init_waitqueue_head(&new_ns->poll);
spin_lock_init(&new_ns->ns_lock);
new_ns->user_ns = get_user_ns(user_ns); new_ns->user_ns = get_user_ns(user_ns);
new_ns->ucounts = ucounts; new_ns->ucounts = ucounts;
return new_ns; return new_ns;
...@@ -3842,10 +3896,14 @@ static bool mnt_already_visible(struct mnt_namespace *ns, ...@@ -3842,10 +3896,14 @@ static bool mnt_already_visible(struct mnt_namespace *ns,
bool visible = false; bool visible = false;
down_read(&namespace_sem); down_read(&namespace_sem);
lock_ns_list(ns);
list_for_each_entry(mnt, &ns->list, mnt_list) { list_for_each_entry(mnt, &ns->list, mnt_list) {
struct mount *child; struct mount *child;
int mnt_flags; int mnt_flags;
if (mnt_is_cursor(mnt))
continue;
if (mnt->mnt.mnt_sb->s_type != sb->s_type) if (mnt->mnt.mnt_sb->s_type != sb->s_type)
continue; continue;
...@@ -3893,6 +3951,7 @@ static bool mnt_already_visible(struct mnt_namespace *ns, ...@@ -3893,6 +3951,7 @@ static bool mnt_already_visible(struct mnt_namespace *ns,
next: ; next: ;
} }
found: found:
unlock_ns_list(ns);
up_read(&namespace_sem); up_read(&namespace_sem);
return visible; return visible;
} }
......
...@@ -279,7 +279,8 @@ static int mounts_open_common(struct inode *inode, struct file *file, ...@@ -279,7 +279,8 @@ static int mounts_open_common(struct inode *inode, struct file *file,
p->ns = ns; p->ns = ns;
p->root = root; p->root = root;
p->show = show; p->show = show;
p->cached_event = ~0ULL; INIT_LIST_HEAD(&p->cursor.mnt_list);
p->cursor.mnt.mnt_flags = MNT_CURSOR;
return 0; return 0;
...@@ -296,6 +297,7 @@ static int mounts_release(struct inode *inode, struct file *file) ...@@ -296,6 +297,7 @@ static int mounts_release(struct inode *inode, struct file *file)
struct seq_file *m = file->private_data; struct seq_file *m = file->private_data;
struct proc_mounts *p = m->private; struct proc_mounts *p = m->private;
path_put(&p->root); path_put(&p->root);
mnt_cursor_del(p->ns, &p->cursor);
put_mnt_ns(p->ns); put_mnt_ns(p->ns);
return seq_release_private(inode, file); return seq_release_private(inode, file);
} }
......
...@@ -50,7 +50,8 @@ struct fs_context; ...@@ -50,7 +50,8 @@ struct fs_context;
#define MNT_ATIME_MASK (MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME ) #define MNT_ATIME_MASK (MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME )
#define MNT_INTERNAL_FLAGS (MNT_SHARED | MNT_WRITE_HOLD | MNT_INTERNAL | \ #define MNT_INTERNAL_FLAGS (MNT_SHARED | MNT_WRITE_HOLD | MNT_INTERNAL | \
MNT_DOOMED | MNT_SYNC_UMOUNT | MNT_MARKED) MNT_DOOMED | MNT_SYNC_UMOUNT | MNT_MARKED | \
MNT_CURSOR)
#define MNT_INTERNAL 0x4000 #define MNT_INTERNAL 0x4000
...@@ -64,6 +65,7 @@ struct fs_context; ...@@ -64,6 +65,7 @@ struct fs_context;
#define MNT_SYNC_UMOUNT 0x2000000 #define MNT_SYNC_UMOUNT 0x2000000
#define MNT_MARKED 0x4000000 #define MNT_MARKED 0x4000000
#define MNT_UMOUNT 0x8000000 #define MNT_UMOUNT 0x8000000
#define MNT_CURSOR 0x10000000
struct vfsmount { struct vfsmount {
struct dentry *mnt_root; /* root of the mounted tree */ struct dentry *mnt_root; /* root of the mounted tree */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment