Commit 6fd2fe49 authored by Al Viro's avatar Al Viro Committed by Christian Brauner

copy_process(): don't use ksys_close() on cleanups

anon_inode_getfd() should be used *ONLY* in situations when we are
guaranteed to be past the last failure point (including copying the
descriptor number to userland, at that).  And ksys_close() should
not be used for cleanups at all.

anon_inode_getfile() is there for all nontrivial cases like that.
Just use that...

Fixes: b3e58382 ("clone: add CLONE_PIDFD")
Signed-off-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
Reviewed-by: default avatarJann Horn <jannh@google.com>
Signed-off-by: default avatarChristian Brauner <christian@brauner.io>
parent bee19cd8
...@@ -1712,31 +1712,6 @@ const struct file_operations pidfd_fops = { ...@@ -1712,31 +1712,6 @@ const struct file_operations pidfd_fops = {
#endif #endif
}; };
/**
* pidfd_create() - Create a new pid file descriptor.
*
* @pid: struct pid that the pidfd will reference
*
* This creates a new pid file descriptor with the O_CLOEXEC flag set.
*
* Note, that this function can only be called after the fd table has
* been unshared to avoid leaking the pidfd to the new process.
*
* Return: On success, a cloexec pidfd is returned.
* On error, a negative errno number will be returned.
*/
static int pidfd_create(struct pid *pid)
{
int fd;
fd = anon_inode_getfd("[pidfd]", &pidfd_fops, get_pid(pid),
O_RDWR | O_CLOEXEC);
if (fd < 0)
put_pid(pid);
return fd;
}
static void __delayed_free_task(struct rcu_head *rhp) static void __delayed_free_task(struct rcu_head *rhp)
{ {
struct task_struct *tsk = container_of(rhp, struct task_struct, rcu); struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
...@@ -1774,6 +1749,7 @@ static __latent_entropy struct task_struct *copy_process( ...@@ -1774,6 +1749,7 @@ static __latent_entropy struct task_struct *copy_process(
int pidfd = -1, retval; int pidfd = -1, retval;
struct task_struct *p; struct task_struct *p;
struct multiprocess_signals delayed; struct multiprocess_signals delayed;
struct file *pidfile = NULL;
/* /*
* Don't allow sharing the root directory with processes in a different * Don't allow sharing the root directory with processes in a different
...@@ -2046,11 +2022,20 @@ static __latent_entropy struct task_struct *copy_process( ...@@ -2046,11 +2022,20 @@ static __latent_entropy struct task_struct *copy_process(
* if the fd table isn't shared). * if the fd table isn't shared).
*/ */
if (clone_flags & CLONE_PIDFD) { if (clone_flags & CLONE_PIDFD) {
retval = pidfd_create(pid); retval = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
if (retval < 0) if (retval < 0)
goto bad_fork_free_pid; goto bad_fork_free_pid;
pidfd = retval; pidfd = retval;
pidfile = anon_inode_getfile("[pidfd]", &pidfd_fops, pid,
O_RDWR | O_CLOEXEC);
if (IS_ERR(pidfile)) {
put_unused_fd(pidfd);
goto bad_fork_free_pid;
}
get_pid(pid); /* held by pidfile now */
retval = put_user(pidfd, parent_tidptr); retval = put_user(pidfd, parent_tidptr);
if (retval) if (retval)
goto bad_fork_put_pidfd; goto bad_fork_put_pidfd;
...@@ -2168,6 +2153,9 @@ static __latent_entropy struct task_struct *copy_process( ...@@ -2168,6 +2153,9 @@ static __latent_entropy struct task_struct *copy_process(
goto bad_fork_cancel_cgroup; goto bad_fork_cancel_cgroup;
} }
/* past the last point of failure */
if (pidfile)
fd_install(pidfd, pidfile);
init_task_pid_links(p); init_task_pid_links(p);
if (likely(p->pid)) { if (likely(p->pid)) {
...@@ -2234,8 +2222,10 @@ static __latent_entropy struct task_struct *copy_process( ...@@ -2234,8 +2222,10 @@ static __latent_entropy struct task_struct *copy_process(
bad_fork_cgroup_threadgroup_change_end: bad_fork_cgroup_threadgroup_change_end:
cgroup_threadgroup_change_end(current); cgroup_threadgroup_change_end(current);
bad_fork_put_pidfd: bad_fork_put_pidfd:
if (clone_flags & CLONE_PIDFD) if (clone_flags & CLONE_PIDFD) {
ksys_close(pidfd); fput(pidfile);
put_unused_fd(pidfd);
}
bad_fork_free_pid: bad_fork_free_pid:
if (pid != &init_struct_pid) if (pid != &init_struct_pid)
free_pid(pid); free_pid(pid);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment