Commit 73d080d3 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs

Pull vfs fixes from Al Viro:
 "The alloc_super() one is a regression in this merge window, lazytime
  thing is older..."

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs:
  VFS: Handle lazytime in do_mount()
  alloc_super(): do ->s_umount initialization earlier
parents 1c6b942d d7ee9469
...@@ -2826,6 +2826,7 @@ long do_mount(const char *dev_name, const char __user *dir_name, ...@@ -2826,6 +2826,7 @@ long do_mount(const char *dev_name, const char __user *dir_name,
SB_DIRSYNC | SB_DIRSYNC |
SB_SILENT | SB_SILENT |
SB_POSIXACL | SB_POSIXACL |
SB_LAZYTIME |
SB_I_VERSION); SB_I_VERSION);
if (flags & MS_REMOUNT) if (flags & MS_REMOUNT)
......
...@@ -191,6 +191,24 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags, ...@@ -191,6 +191,24 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags,
INIT_LIST_HEAD(&s->s_mounts); INIT_LIST_HEAD(&s->s_mounts);
s->s_user_ns = get_user_ns(user_ns); s->s_user_ns = get_user_ns(user_ns);
init_rwsem(&s->s_umount);
lockdep_set_class(&s->s_umount, &type->s_umount_key);
/*
* sget() can have s_umount recursion.
*
* When it cannot find a suitable sb, it allocates a new
* one (this one), and tries again to find a suitable old
* one.
*
* In case that succeeds, it will acquire the s_umount
* lock of the old one. Since these are clearly distrinct
* locks, and this object isn't exposed yet, there's no
* risk of deadlocks.
*
* Annotate this by putting this lock in a different
* subclass.
*/
down_write_nested(&s->s_umount, SINGLE_DEPTH_NESTING);
if (security_sb_alloc(s)) if (security_sb_alloc(s))
goto fail; goto fail;
...@@ -218,25 +236,6 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags, ...@@ -218,25 +236,6 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags,
goto fail; goto fail;
if (list_lru_init_memcg(&s->s_inode_lru)) if (list_lru_init_memcg(&s->s_inode_lru))
goto fail; goto fail;
init_rwsem(&s->s_umount);
lockdep_set_class(&s->s_umount, &type->s_umount_key);
/*
* sget() can have s_umount recursion.
*
* When it cannot find a suitable sb, it allocates a new
* one (this one), and tries again to find a suitable old
* one.
*
* In case that succeeds, it will acquire the s_umount
* lock of the old one. Since these are clearly distrinct
* locks, and this object isn't exposed yet, there's no
* risk of deadlocks.
*
* Annotate this by putting this lock in a different
* subclass.
*/
down_write_nested(&s->s_umount, SINGLE_DEPTH_NESTING);
s->s_count = 1; s->s_count = 1;
atomic_set(&s->s_active, 1); atomic_set(&s->s_active, 1);
mutex_init(&s->s_vfs_rename_mutex); mutex_init(&s->s_vfs_rename_mutex);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment