Commit 1da5c46f authored by Oleg Nesterov's avatar Oleg Nesterov Committed by Thomas Gleixner

kthread: Make struct kthread kmalloc'ed

commit 23196f2e "kthread: Pin the stack via try_get_task_stack() /
put_task_stack() in to_live_kthread() function" is a workaround for the
fragile design of struct kthread being allocated on the task stack.

struct kthread in its current form should be removed, but this needs
cleanups outside of kthread.c.

As a first step move struct kthread away from the task stack by making it
kmalloc'ed. This allows to access kthread.exited without the magic of
trying to pin task stack and the try logic in to_live_kthread().
Signed-off-by: default avatarOleg Nesterov <oleg@redhat.com>
Acked-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: default avatarThomas Gleixner <tglx@linutronix.de>
Cc: Chunming Zhou <David1.Zhou@amd.com>
Cc: Roman Pen <roman.penyaev@profitbricks.com>
Cc: Petr Mladek <pmladek@suse.com>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Tejun Heo <tj@kernel.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Alex Deucher <alexander.deucher@amd.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Link: http://lkml.kernel.org/r/20161129175057.GA5330@redhat.comSigned-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 7c478895
......@@ -48,6 +48,7 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
__k; \
})
void free_kthread_struct(struct task_struct *k);
void kthread_bind(struct task_struct *k, unsigned int cpu);
void kthread_bind_mask(struct task_struct *k, const struct cpumask *mask);
int kthread_stop(struct task_struct *k);
......
......@@ -354,6 +354,8 @@ void free_task(struct task_struct *tsk)
ftrace_graph_exit_task(tsk);
put_seccomp_filter(tsk);
arch_release_task_struct(tsk);
if (tsk->flags & PF_KTHREAD)
free_kthread_struct(tsk);
free_task_struct(tsk);
}
EXPORT_SYMBOL(free_task);
......
......@@ -53,14 +53,38 @@ enum KTHREAD_BITS {
KTHREAD_IS_PARKED,
};
#define __to_kthread(vfork) \
container_of(vfork, struct kthread, exited)
static inline void set_kthread_struct(void *kthread)
{
/*
* We abuse ->set_child_tid to avoid the new member and because it
* can't be wrongly copied by copy_process(). We also rely on fact
* that the caller can't exec, so PF_KTHREAD can't be cleared.
*/
current->set_child_tid = (__force void __user *)kthread;
}
static inline struct kthread *to_kthread(struct task_struct *k)
{
return __to_kthread(k->vfork_done);
WARN_ON(!(k->flags & PF_KTHREAD));
return (__force void *)k->set_child_tid;
}
void free_kthread_struct(struct task_struct *k)
{
/*
* Can be NULL if this kthread was created by kernel_thread()
* or if kmalloc() in kthread() failed.
*/
kfree(to_kthread(k));
}
#define __to_kthread(vfork) \
container_of(vfork, struct kthread, exited)
/*
* TODO: kill it and use to_kthread(). But we still need the users
* like kthread_stop() which has to sync with the exiting kthread.
*/
static struct kthread *to_live_kthread(struct task_struct *k)
{
struct completion *vfork = ACCESS_ONCE(k->vfork_done);
......@@ -181,14 +205,11 @@ static int kthread(void *_create)
int (*threadfn)(void *data) = create->threadfn;
void *data = create->data;
struct completion *done;
struct kthread self;
struct kthread *self;
int ret;
self.flags = 0;
self.data = data;
init_completion(&self.exited);
init_completion(&self.parked);
current->vfork_done = &self.exited;
self = kmalloc(sizeof(*self), GFP_KERNEL);
set_kthread_struct(self);
/* If user was SIGKILLed, I release the structure. */
done = xchg(&create->done, NULL);
......@@ -196,6 +217,19 @@ static int kthread(void *_create)
kfree(create);
do_exit(-EINTR);
}
if (!self) {
create->result = ERR_PTR(-ENOMEM);
complete(done);
do_exit(-ENOMEM);
}
self->flags = 0;
self->data = data;
init_completion(&self->exited);
init_completion(&self->parked);
current->vfork_done = &self->exited;
/* OK, tell user we're spawned, wait for stop or wakeup */
__set_current_state(TASK_UNINTERRUPTIBLE);
create->result = current;
......@@ -203,12 +237,10 @@ static int kthread(void *_create)
schedule();
ret = -EINTR;
if (!test_bit(KTHREAD_SHOULD_STOP, &self.flags)) {
__kthread_parkme(&self);
if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
__kthread_parkme(self);
ret = threadfn(data);
}
/* we can't just return, we must preserve "self" on stack */
do_exit(ret);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment