Commit 75d4276e authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs

Pull vfs fixes from Al Viro:

 - untangle sys_close() abuses in xt_bpf

 - deal with register_shrinker() failures in sget()

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs:
  fix "netfilter: xt_bpf: Fix XT_BPF_MODE_FD_PINNED mode of 'xt_bpf_info_v1'"
  sget(): handle failures of register_shrinker()
  mm,vmscan: Make unregister_shrinker() no-op if register_shrinker() failed.
parents 5b6c02f3 040ee692
...@@ -517,7 +517,11 @@ struct super_block *sget_userns(struct file_system_type *type, ...@@ -517,7 +517,11 @@ struct super_block *sget_userns(struct file_system_type *type,
hlist_add_head(&s->s_instances, &type->fs_supers); hlist_add_head(&s->s_instances, &type->fs_supers);
spin_unlock(&sb_lock); spin_unlock(&sb_lock);
get_filesystem(type); get_filesystem(type);
register_shrinker(&s->s_shrink); err = register_shrinker(&s->s_shrink);
if (err) {
deactivate_locked_super(s);
s = ERR_PTR(err);
}
return s; return s;
} }
......
...@@ -419,6 +419,8 @@ static inline int bpf_map_attr_numa_node(const union bpf_attr *attr) ...@@ -419,6 +419,8 @@ static inline int bpf_map_attr_numa_node(const union bpf_attr *attr)
attr->numa_node : NUMA_NO_NODE; attr->numa_node : NUMA_NO_NODE;
} }
struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type);
#else /* !CONFIG_BPF_SYSCALL */ #else /* !CONFIG_BPF_SYSCALL */
static inline struct bpf_prog *bpf_prog_get(u32 ufd) static inline struct bpf_prog *bpf_prog_get(u32 ufd)
{ {
...@@ -506,6 +508,12 @@ static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, ...@@ -506,6 +508,12 @@ static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu,
{ {
return 0; return 0;
} }
static inline struct bpf_prog *bpf_prog_get_type_path(const char *name,
enum bpf_prog_type type)
{
return ERR_PTR(-EOPNOTSUPP);
}
#endif /* CONFIG_BPF_SYSCALL */ #endif /* CONFIG_BPF_SYSCALL */
static inline struct bpf_prog *bpf_prog_get_type(u32 ufd, static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
...@@ -514,6 +522,8 @@ static inline struct bpf_prog *bpf_prog_get_type(u32 ufd, ...@@ -514,6 +522,8 @@ static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
return bpf_prog_get_type_dev(ufd, type, false); return bpf_prog_get_type_dev(ufd, type, false);
} }
bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool);
int bpf_prog_offload_compile(struct bpf_prog *prog); int bpf_prog_offload_compile(struct bpf_prog *prog);
void bpf_prog_offload_destroy(struct bpf_prog *prog); void bpf_prog_offload_destroy(struct bpf_prog *prog);
......
...@@ -368,7 +368,45 @@ int bpf_obj_get_user(const char __user *pathname, int flags) ...@@ -368,7 +368,45 @@ int bpf_obj_get_user(const char __user *pathname, int flags)
putname(pname); putname(pname);
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(bpf_obj_get_user);
static struct bpf_prog *__get_prog_inode(struct inode *inode, enum bpf_prog_type type)
{
struct bpf_prog *prog;
int ret = inode_permission(inode, MAY_READ | MAY_WRITE);
if (ret)
return ERR_PTR(ret);
if (inode->i_op == &bpf_map_iops)
return ERR_PTR(-EINVAL);
if (inode->i_op != &bpf_prog_iops)
return ERR_PTR(-EACCES);
prog = inode->i_private;
ret = security_bpf_prog(prog);
if (ret < 0)
return ERR_PTR(ret);
if (!bpf_prog_get_ok(prog, &type, false))
return ERR_PTR(-EINVAL);
return bpf_prog_inc(prog);
}
struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type)
{
struct bpf_prog *prog;
struct path path;
int ret = kern_path(name, LOOKUP_FOLLOW, &path);
if (ret)
return ERR_PTR(ret);
prog = __get_prog_inode(d_backing_inode(path.dentry), type);
if (!IS_ERR(prog))
touch_atime(&path);
path_put(&path);
return prog;
}
EXPORT_SYMBOL(bpf_prog_get_type_path);
static void bpf_evict_inode(struct inode *inode) static void bpf_evict_inode(struct inode *inode)
{ {
......
...@@ -1057,7 +1057,7 @@ struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog) ...@@ -1057,7 +1057,7 @@ struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog)
} }
EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero); EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero);
static bool bpf_prog_get_ok(struct bpf_prog *prog, bool bpf_prog_get_ok(struct bpf_prog *prog,
enum bpf_prog_type *attach_type, bool attach_drv) enum bpf_prog_type *attach_type, bool attach_drv)
{ {
/* not an attachment, just a refcount inc, always allow */ /* not an attachment, just a refcount inc, always allow */
......
...@@ -297,10 +297,13 @@ EXPORT_SYMBOL(register_shrinker); ...@@ -297,10 +297,13 @@ EXPORT_SYMBOL(register_shrinker);
*/ */
void unregister_shrinker(struct shrinker *shrinker) void unregister_shrinker(struct shrinker *shrinker)
{ {
if (!shrinker->nr_deferred)
return;
down_write(&shrinker_rwsem); down_write(&shrinker_rwsem);
list_del(&shrinker->list); list_del(&shrinker->list);
up_write(&shrinker_rwsem); up_write(&shrinker_rwsem);
kfree(shrinker->nr_deferred); kfree(shrinker->nr_deferred);
shrinker->nr_deferred = NULL;
} }
EXPORT_SYMBOL(unregister_shrinker); EXPORT_SYMBOL(unregister_shrinker);
......
...@@ -55,21 +55,11 @@ static int __bpf_mt_check_fd(int fd, struct bpf_prog **ret) ...@@ -55,21 +55,11 @@ static int __bpf_mt_check_fd(int fd, struct bpf_prog **ret)
static int __bpf_mt_check_path(const char *path, struct bpf_prog **ret) static int __bpf_mt_check_path(const char *path, struct bpf_prog **ret)
{ {
mm_segment_t oldfs = get_fs();
int retval, fd;
if (strnlen(path, XT_BPF_PATH_MAX) == XT_BPF_PATH_MAX) if (strnlen(path, XT_BPF_PATH_MAX) == XT_BPF_PATH_MAX)
return -EINVAL; return -EINVAL;
set_fs(KERNEL_DS); *ret = bpf_prog_get_type_path(path, BPF_PROG_TYPE_SOCKET_FILTER);
fd = bpf_obj_get_user(path, 0); return PTR_ERR_OR_ZERO(*ret);
set_fs(oldfs);
if (fd < 0)
return fd;
retval = __bpf_mt_check_fd(fd, ret);
sys_close(fd);
return retval;
} }
static int bpf_mt_check(const struct xt_mtchk_param *par) static int bpf_mt_check(const struct xt_mtchk_param *par)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment