Commit 4e21fc13 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/signal

Pull third pile of kernel_execve() patches from Al Viro:
 "The last bits of infrastructure for kernel_thread() et.al., with
  alpha/arm/x86 use of those.  Plus sanitizing the asm glue and
  do_notify_resume() on alpha, fixing the "disabled irq while running
  task_work stuff" breakage there.

  At that point the rest of kernel_thread/kernel_execve/sys_execve work
  can be done independently for different architectures.  The only
  pending bits that do depend on having all architectures converted are
  restrictred to fs/* and kernel/* - that'll obviously have to wait for
  the next cycle.

  I thought we'd have to wait for all of them done before we start
  eliminating the longjump-style insanity in kernel_execve(), but it
  turned out there's a very simple way to do that without flagday-style
  changes."

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/signal:
  alpha: switch to saner kernel_execve() semantics
  arm: switch to saner kernel_execve() semantics
  x86, um: convert to saner kernel_execve() semantics
  infrastructure for saner ret_from_kernel_thread semantics
  make sure that kernel_thread() callbacks call do_exit() themselves
  make sure that we always have a return path from kernel_execve()
  ppc: eeh_event should just use kthread_run()
  don't bother with kernel_thread/kernel_execve for launching linuxrc
  alpha: get rid of switch_stack argument of do_work_pending()
  alpha: don't bother passing switch_stack separately from regs
  alpha: take SIGPENDING/NOTIFY_RESUME loop into signal.c
  alpha: simplify TIF_NEED_RESCHED handling
parents 8418263e 5522be6a
...@@ -274,6 +274,9 @@ config ARCH_WANT_OLD_COMPAT_IPC ...@@ -274,6 +274,9 @@ config ARCH_WANT_OLD_COMPAT_IPC
config GENERIC_KERNEL_THREAD config GENERIC_KERNEL_THREAD
bool bool
config GENERIC_KERNEL_EXECVE
bool
config HAVE_ARCH_SECCOMP_FILTER config HAVE_ARCH_SECCOMP_FILTER
bool bool
help help
......
...@@ -21,6 +21,7 @@ config ALPHA ...@@ -21,6 +21,7 @@ config ALPHA
select GENERIC_STRNCPY_FROM_USER select GENERIC_STRNCPY_FROM_USER
select GENERIC_STRNLEN_USER select GENERIC_STRNLEN_USER
select GENERIC_KERNEL_THREAD select GENERIC_KERNEL_THREAD
select GENERIC_KERNEL_EXECVE
help help
The Alpha is a 64-bit general-purpose processor designed and The Alpha is a 64-bit general-purpose processor designed and
marketed by the Digital Equipment Corporation of blessed memory, marketed by the Digital Equipment Corporation of blessed memory,
......
...@@ -482,7 +482,6 @@ ...@@ -482,7 +482,6 @@
#define __ARCH_WANT_SYS_SIGPENDING #define __ARCH_WANT_SYS_SIGPENDING
#define __ARCH_WANT_SYS_RT_SIGSUSPEND #define __ARCH_WANT_SYS_RT_SIGSUSPEND
#define __ARCH_WANT_SYS_EXECVE #define __ARCH_WANT_SYS_EXECVE
#define __ARCH_WANT_KERNEL_EXECVE
/* "Conditional" syscalls. What we want is /* "Conditional" syscalls. What we want is
......
...@@ -311,7 +311,7 @@ entSys: ...@@ -311,7 +311,7 @@ entSys:
.align 4 .align 4
ret_from_sys_call: ret_from_sys_call:
cmovne $26, 0, $19 /* $19 = 0 => non-restartable */ cmovne $26, 0, $18 /* $18 = 0 => non-restartable */
ldq $0, SP_OFF($sp) ldq $0, SP_OFF($sp)
and $0, 8, $0 and $0, 8, $0
beq $0, ret_to_kernel beq $0, ret_to_kernel
...@@ -320,8 +320,8 @@ ret_to_user: ...@@ -320,8 +320,8 @@ ret_to_user:
sampling and the rti. */ sampling and the rti. */
lda $16, 7 lda $16, 7
call_pal PAL_swpipl call_pal PAL_swpipl
ldl $5, TI_FLAGS($8) ldl $17, TI_FLAGS($8)
and $5, _TIF_WORK_MASK, $2 and $17, _TIF_WORK_MASK, $2
bne $2, work_pending bne $2, work_pending
restore_all: restore_all:
RESTORE_ALL RESTORE_ALL
...@@ -341,10 +341,10 @@ $syscall_error: ...@@ -341,10 +341,10 @@ $syscall_error:
* frame to indicate that a negative return value wasn't an * frame to indicate that a negative return value wasn't an
* error number.. * error number..
*/ */
ldq $19, 0($sp) /* old syscall nr (zero if success) */ ldq $18, 0($sp) /* old syscall nr (zero if success) */
beq $19, $ret_success beq $18, $ret_success
ldq $20, 72($sp) /* .. and this a3 */ ldq $19, 72($sp) /* .. and this a3 */
subq $31, $0, $0 /* with error in v0 */ subq $31, $0, $0 /* with error in v0 */
addq $31, 1, $1 /* set a3 for errno return */ addq $31, 1, $1 /* set a3 for errno return */
stq $0, 0($sp) stq $0, 0($sp)
...@@ -362,51 +362,35 @@ $ret_success: ...@@ -362,51 +362,35 @@ $ret_success:
* Do all cleanup when returning from all interrupts and system calls. * Do all cleanup when returning from all interrupts and system calls.
* *
* Arguments: * Arguments:
* $5: TI_FLAGS.
* $8: current. * $8: current.
* $19: The old syscall number, or zero if this is not a return * $17: TI_FLAGS.
* $18: The old syscall number, or zero if this is not a return
* from a syscall that errored and is possibly restartable. * from a syscall that errored and is possibly restartable.
* $20: The old a3 value * $19: The old a3 value
*/ */
.align 4 .align 4
.ent work_pending .ent work_pending
work_pending: work_pending:
and $5, _TIF_NEED_RESCHED, $2 and $17, _TIF_NOTIFY_RESUME | _TIF_SIGPENDING, $2
beq $2, $work_notifysig bne $2, $work_notifysig
$work_resched: $work_resched:
subq $sp, 16, $sp /*
stq $19, 0($sp) /* save syscall nr */ * We can get here only if we returned from syscall without SIGPENDING
stq $20, 8($sp) /* and error indication (a3) */ * or got through work_notifysig already. Either case means no syscall
* restarts for us, so let $18 and $19 burn.
*/
jsr $26, schedule jsr $26, schedule
ldq $19, 0($sp) mov 0, $18
ldq $20, 8($sp) br ret_to_user
addq $sp, 16, $sp
/* Make sure need_resched and sigpending don't change between
sampling and the rti. */
lda $16, 7
call_pal PAL_swpipl
ldl $5, TI_FLAGS($8)
and $5, _TIF_WORK_MASK, $2
beq $2, restore_all
and $5, _TIF_NEED_RESCHED, $2
bne $2, $work_resched
$work_notifysig: $work_notifysig:
mov $sp, $16 mov $sp, $16
bsr $1, do_switch_stack bsr $1, do_switch_stack
mov $sp, $17 jsr $26, do_work_pending
mov $5, $18
mov $19, $9 /* save old syscall number */
mov $20, $10 /* save old a3 */
and $5, _TIF_SIGPENDING, $2
cmovne $2, 0, $9 /* we don't want double syscall restarts */
jsr $26, do_notify_resume
mov $9, $19
mov $10, $20
bsr $1, undo_switch_stack bsr $1, undo_switch_stack
br ret_to_user br restore_all
.end work_pending .end work_pending
/* /*
...@@ -454,9 +438,9 @@ $strace_success: ...@@ -454,9 +438,9 @@ $strace_success:
.align 3 .align 3
$strace_error: $strace_error:
ldq $19, 0($sp) /* old syscall nr (zero if success) */ ldq $18, 0($sp) /* old syscall nr (zero if success) */
beq $19, $strace_success beq $18, $strace_success
ldq $20, 72($sp) /* .. and this a3 */ ldq $19, 72($sp) /* .. and this a3 */
subq $31, $0, $0 /* with error in v0 */ subq $31, $0, $0 /* with error in v0 */
addq $31, 1, $1 /* set a3 for errno return */ addq $31, 1, $1 /* set a3 for errno return */
...@@ -464,11 +448,11 @@ $strace_error: ...@@ -464,11 +448,11 @@ $strace_error:
stq $1, 72($sp) /* a3 for return */ stq $1, 72($sp) /* a3 for return */
bsr $1, do_switch_stack bsr $1, do_switch_stack
mov $19, $9 /* save old syscall number */ mov $18, $9 /* save old syscall number */
mov $20, $10 /* save old a3 */ mov $19, $10 /* save old a3 */
jsr $26, syscall_trace_leave jsr $26, syscall_trace_leave
mov $9, $19 mov $9, $18
mov $10, $20 mov $10, $19
bsr $1, undo_switch_stack bsr $1, undo_switch_stack
mov $31, $26 /* tell "ret_from_sys_call" we can restart */ mov $31, $26 /* tell "ret_from_sys_call" we can restart */
...@@ -619,24 +603,9 @@ ret_from_kernel_thread: ...@@ -619,24 +603,9 @@ ret_from_kernel_thread:
mov $9, $27 mov $9, $27
mov $10, $16 mov $10, $16
jsr $26, ($9) jsr $26, ($9)
ldgp $gp, 0($26)
mov $0, $16
mov $31, $26
jmp $31, sys_exit
.end ret_from_kernel_thread
.globl ret_from_kernel_execve
.align 4
.ent ret_from_kernel_execve
ret_from_kernel_execve:
mov $16, $sp
/* Avoid the HAE being gratuitously wrong, to avoid restoring it. */
ldq $2, alpha_mv+HAE_CACHE
stq $2, 152($sp) /* HAE */
mov $31, $19 /* to disable syscall restarts */ mov $31, $19 /* to disable syscall restarts */
br $31, ret_to_user br $31, ret_to_user
.end ret_from_kernel_thread
.end ret_from_kernel_execve
/* /*
......
...@@ -298,8 +298,9 @@ get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size) ...@@ -298,8 +298,9 @@ get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
static long static long
setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
struct switch_stack *sw, unsigned long mask, unsigned long sp) unsigned long mask, unsigned long sp)
{ {
struct switch_stack *sw = (struct switch_stack *)regs - 1;
long i, err = 0; long i, err = 0;
err |= __put_user(on_sig_stack((unsigned long)sc), &sc->sc_onstack); err |= __put_user(on_sig_stack((unsigned long)sc), &sc->sc_onstack);
...@@ -354,7 +355,7 @@ setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, ...@@ -354,7 +355,7 @@ setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
static int static int
setup_frame(int sig, struct k_sigaction *ka, sigset_t *set, setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
struct pt_regs *regs, struct switch_stack * sw) struct pt_regs *regs)
{ {
unsigned long oldsp, r26, err = 0; unsigned long oldsp, r26, err = 0;
struct sigframe __user *frame; struct sigframe __user *frame;
...@@ -364,7 +365,7 @@ setup_frame(int sig, struct k_sigaction *ka, sigset_t *set, ...@@ -364,7 +365,7 @@ setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
return -EFAULT; return -EFAULT;
err |= setup_sigcontext(&frame->sc, regs, sw, set->sig[0], oldsp); err |= setup_sigcontext(&frame->sc, regs, set->sig[0], oldsp);
if (err) if (err)
return -EFAULT; return -EFAULT;
...@@ -401,7 +402,7 @@ setup_frame(int sig, struct k_sigaction *ka, sigset_t *set, ...@@ -401,7 +402,7 @@ setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
static int static int
setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
sigset_t *set, struct pt_regs *regs, struct switch_stack * sw) sigset_t *set, struct pt_regs *regs)
{ {
unsigned long oldsp, r26, err = 0; unsigned long oldsp, r26, err = 0;
struct rt_sigframe __user *frame; struct rt_sigframe __user *frame;
...@@ -420,7 +421,7 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, ...@@ -420,7 +421,7 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp); err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
err |= __put_user(sas_ss_flags(oldsp), &frame->uc.uc_stack.ss_flags); err |= __put_user(sas_ss_flags(oldsp), &frame->uc.uc_stack.ss_flags);
err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, sw, err |= setup_sigcontext(&frame->uc.uc_mcontext, regs,
set->sig[0], oldsp); set->sig[0], oldsp);
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
if (err) if (err)
...@@ -464,15 +465,15 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, ...@@ -464,15 +465,15 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
*/ */
static inline void static inline void
handle_signal(int sig, struct k_sigaction *ka, siginfo_t *info, handle_signal(int sig, struct k_sigaction *ka, siginfo_t *info,
struct pt_regs * regs, struct switch_stack *sw) struct pt_regs * regs)
{ {
sigset_t *oldset = sigmask_to_save(); sigset_t *oldset = sigmask_to_save();
int ret; int ret;
if (ka->sa.sa_flags & SA_SIGINFO) if (ka->sa.sa_flags & SA_SIGINFO)
ret = setup_rt_frame(sig, ka, info, oldset, regs, sw); ret = setup_rt_frame(sig, ka, info, oldset, regs);
else else
ret = setup_frame(sig, ka, oldset, regs, sw); ret = setup_frame(sig, ka, oldset, regs);
if (ret) { if (ret) {
force_sigsegv(sig, current); force_sigsegv(sig, current);
...@@ -519,8 +520,7 @@ syscall_restart(unsigned long r0, unsigned long r19, ...@@ -519,8 +520,7 @@ syscall_restart(unsigned long r0, unsigned long r19,
* all (if we get here from anything but a syscall return, it will be 0) * all (if we get here from anything but a syscall return, it will be 0)
*/ */
static void static void
do_signal(struct pt_regs * regs, struct switch_stack * sw, do_signal(struct pt_regs *regs, unsigned long r0, unsigned long r19)
unsigned long r0, unsigned long r19)
{ {
siginfo_t info; siginfo_t info;
int signr; int signr;
...@@ -537,7 +537,7 @@ do_signal(struct pt_regs * regs, struct switch_stack * sw, ...@@ -537,7 +537,7 @@ do_signal(struct pt_regs * regs, struct switch_stack * sw,
/* Whee! Actually deliver the signal. */ /* Whee! Actually deliver the signal. */
if (r0) if (r0)
syscall_restart(r0, r19, regs, &ka); syscall_restart(r0, r19, regs, &ka);
handle_signal(signr, &ka, &info, regs, sw); handle_signal(signr, &ka, &info, regs);
if (single_stepping) if (single_stepping)
ptrace_set_bpt(current); /* re-set bpt */ ptrace_set_bpt(current); /* re-set bpt */
return; return;
...@@ -568,15 +568,23 @@ do_signal(struct pt_regs * regs, struct switch_stack * sw, ...@@ -568,15 +568,23 @@ do_signal(struct pt_regs * regs, struct switch_stack * sw,
} }
void void
do_notify_resume(struct pt_regs *regs, struct switch_stack *sw, do_work_pending(struct pt_regs *regs, unsigned long thread_flags,
unsigned long thread_info_flags,
unsigned long r0, unsigned long r19) unsigned long r0, unsigned long r19)
{ {
if (thread_info_flags & _TIF_SIGPENDING) do {
do_signal(regs, sw, r0, r19); if (thread_flags & _TIF_NEED_RESCHED) {
schedule();
if (thread_info_flags & _TIF_NOTIFY_RESUME) { } else {
clear_thread_flag(TIF_NOTIFY_RESUME); local_irq_enable();
tracehook_notify_resume(regs); if (thread_flags & _TIF_SIGPENDING) {
} do_signal(regs, r0, r19);
r0 = 0;
} else {
clear_thread_flag(TIF_NOTIFY_RESUME);
tracehook_notify_resume(regs);
}
}
local_irq_disable();
thread_flags = current_thread_info()->flags;
} while (thread_flags & _TIF_WORK_MASK);
} }
...@@ -53,6 +53,7 @@ config ARM ...@@ -53,6 +53,7 @@ config ARM
select GENERIC_STRNLEN_USER select GENERIC_STRNLEN_USER
select DCACHE_WORD_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && !CPU_BIG_ENDIAN select DCACHE_WORD_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && !CPU_BIG_ENDIAN
select GENERIC_KERNEL_THREAD select GENERIC_KERNEL_THREAD
select GENERIC_KERNEL_EXECVE
help help
The ARM series is a line of low-power-consumption RISC chip designs The ARM series is a line of low-power-consumption RISC chip designs
licensed by ARM Ltd and targeted at embedded applications and licensed by ARM Ltd and targeted at embedded applications and
......
...@@ -479,7 +479,6 @@ ...@@ -479,7 +479,6 @@
#define __ARCH_WANT_SYS_SOCKETCALL #define __ARCH_WANT_SYS_SOCKETCALL
#endif #endif
#define __ARCH_WANT_SYS_EXECVE #define __ARCH_WANT_SYS_EXECVE
#define __ARCH_WANT_KERNEL_EXECVE
/* /*
* "Conditional" syscalls * "Conditional" syscalls
......
...@@ -86,35 +86,14 @@ ENDPROC(ret_to_user) ...@@ -86,35 +86,14 @@ ENDPROC(ret_to_user)
*/ */
ENTRY(ret_from_fork) ENTRY(ret_from_fork)
bl schedule_tail bl schedule_tail
cmp r5, #0
movne r0, r4
movne lr, pc
movne pc, r5
get_thread_info tsk get_thread_info tsk
mov why, #1
b ret_slow_syscall b ret_slow_syscall
ENDPROC(ret_from_fork) ENDPROC(ret_from_fork)
ENTRY(ret_from_kernel_thread)
UNWIND(.fnstart)
UNWIND(.cantunwind)
bl schedule_tail
mov r0, r4
adr lr, BSYM(1f) @ kernel threads should not exit
mov pc, r5
1: bl do_exit
nop
UNWIND(.fnend)
ENDPROC(ret_from_kernel_thread)
/*
* turn a kernel thread into userland process
* use: ret_from_kernel_execve(struct pt_regs *normal)
*/
ENTRY(ret_from_kernel_execve)
mov why, #0 @ not a syscall
str why, [r0, #S_R0] @ ... and we want 0 in ->ARM_r0 as well
get_thread_info tsk @ thread structure
mov sp, r0 @ stack pointer just under pt_regs
b ret_slow_syscall
ENDPROC(ret_from_kernel_execve)
.equ NR_syscalls,0 .equ NR_syscalls,0
#define CALL(x) .equ NR_syscalls,NR_syscalls+1 #define CALL(x) .equ NR_syscalls,NR_syscalls+1
#include "calls.S" #include "calls.S"
......
...@@ -373,7 +373,6 @@ void release_thread(struct task_struct *dead_task) ...@@ -373,7 +373,6 @@ void release_thread(struct task_struct *dead_task)
} }
asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread");
int int
copy_thread(unsigned long clone_flags, unsigned long stack_start, copy_thread(unsigned long clone_flags, unsigned long stack_start,
...@@ -388,13 +387,13 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start, ...@@ -388,13 +387,13 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start,
*childregs = *regs; *childregs = *regs;
childregs->ARM_r0 = 0; childregs->ARM_r0 = 0;
childregs->ARM_sp = stack_start; childregs->ARM_sp = stack_start;
thread->cpu_context.pc = (unsigned long)ret_from_fork;
} else { } else {
memset(childregs, 0, sizeof(struct pt_regs));
thread->cpu_context.r4 = stk_sz; thread->cpu_context.r4 = stk_sz;
thread->cpu_context.r5 = stack_start; thread->cpu_context.r5 = stack_start;
thread->cpu_context.pc = (unsigned long)ret_from_kernel_thread;
childregs->ARM_cpsr = SVC_MODE; childregs->ARM_cpsr = SVC_MODE;
} }
thread->cpu_context.pc = (unsigned long)ret_from_fork;
thread->cpu_context.sp = (unsigned long)childregs; thread->cpu_context.sp = (unsigned long)childregs;
clear_ptrace_hw_breakpoint(p); clear_ptrace_hw_breakpoint(p);
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <linux/kthread.h>
#include <asm/eeh_event.h> #include <asm/eeh_event.h>
#include <asm/ppc-pci.h> #include <asm/ppc-pci.h>
...@@ -59,8 +60,6 @@ static int eeh_event_handler(void * dummy) ...@@ -59,8 +60,6 @@ static int eeh_event_handler(void * dummy)
struct eeh_event *event; struct eeh_event *event;
struct eeh_pe *pe; struct eeh_pe *pe;
set_task_comm(current, "eehd");
spin_lock_irqsave(&eeh_eventlist_lock, flags); spin_lock_irqsave(&eeh_eventlist_lock, flags);
event = NULL; event = NULL;
...@@ -108,7 +107,7 @@ static int eeh_event_handler(void * dummy) ...@@ -108,7 +107,7 @@ static int eeh_event_handler(void * dummy)
*/ */
static void eeh_thread_launcher(struct work_struct *dummy) static void eeh_thread_launcher(struct work_struct *dummy)
{ {
if (kernel_thread(eeh_event_handler, NULL, CLONE_KERNEL) < 0) if (IS_ERR(kthread_run(eeh_event_handler, NULL, "eehd")))
printk(KERN_ERR "Failed to start EEH daemon\n"); printk(KERN_ERR "Failed to start EEH daemon\n");
} }
......
...@@ -26,7 +26,6 @@ struct thread_struct { ...@@ -26,7 +26,6 @@ struct thread_struct {
jmp_buf *fault_catcher; jmp_buf *fault_catcher;
struct task_struct *prev_sched; struct task_struct *prev_sched;
unsigned long temp_stack; unsigned long temp_stack;
jmp_buf *exec_buf;
struct arch_thread arch; struct arch_thread arch;
jmp_buf switch_buf; jmp_buf switch_buf;
int mm_count; int mm_count;
...@@ -54,7 +53,6 @@ struct thread_struct { ...@@ -54,7 +53,6 @@ struct thread_struct {
.fault_addr = NULL, \ .fault_addr = NULL, \
.prev_sched = NULL, \ .prev_sched = NULL, \
.temp_stack = 0, \ .temp_stack = 0, \
.exec_buf = NULL, \
.arch = INIT_ARCH_THREAD, \ .arch = INIT_ARCH_THREAD, \
.request = { 0 } \ .request = { 0 } \
} }
......
...@@ -191,7 +191,6 @@ extern int os_getpid(void); ...@@ -191,7 +191,6 @@ extern int os_getpid(void);
extern int os_getpgrp(void); extern int os_getpgrp(void);
extern void init_new_thread_signals(void); extern void init_new_thread_signals(void);
extern int run_kernel_thread(int (*fn)(void *), void *arg, jmp_buf **jmp_ptr);
extern int os_map_memory(void *virt, int fd, unsigned long long off, extern int os_map_memory(void *virt, int fd, unsigned long long off,
unsigned long len, int r, int w, int x); unsigned long len, int r, int w, int x);
......
...@@ -47,8 +47,3 @@ void start_thread(struct pt_regs *regs, unsigned long eip, unsigned long esp) ...@@ -47,8 +47,3 @@ void start_thread(struct pt_regs *regs, unsigned long eip, unsigned long esp)
#endif #endif
} }
EXPORT_SYMBOL(start_thread); EXPORT_SYMBOL(start_thread);
void __noreturn ret_from_kernel_execve(struct pt_regs *unused)
{
UML_LONGJMP(current->thread.exec_buf, 1);
}
...@@ -135,14 +135,10 @@ void new_thread_handler(void) ...@@ -135,14 +135,10 @@ void new_thread_handler(void)
arg = current->thread.request.u.thread.arg; arg = current->thread.request.u.thread.arg;
/* /*
* The return value is 1 if the kernel thread execs a process, * callback returns only if the kernel thread execs a process
* 0 if it just exits
*/ */
n = run_kernel_thread(fn, arg, &current->thread.exec_buf); n = fn(arg);
if (n == 1) userspace(&current->thread.regs.regs);
userspace(&current->thread.regs.regs);
else
do_exit(0);
} }
/* Called magically, see new_thread_handler above */ /* Called magically, see new_thread_handler above */
......
...@@ -244,16 +244,3 @@ void init_new_thread_signals(void) ...@@ -244,16 +244,3 @@ void init_new_thread_signals(void)
signal(SIGWINCH, SIG_IGN); signal(SIGWINCH, SIG_IGN);
signal(SIGTERM, SIG_DFL); signal(SIGTERM, SIG_DFL);
} }
int run_kernel_thread(int (*fn)(void *), void *arg, jmp_buf **jmp_ptr)
{
jmp_buf buf;
int n;
*jmp_ptr = &buf;
n = UML_SETJMP(&buf);
if (n != 0)
return n;
(*fn)(arg);
return 0;
}
...@@ -109,6 +109,7 @@ config X86 ...@@ -109,6 +109,7 @@ config X86
select HAVE_RCU_USER_QS if X86_64 select HAVE_RCU_USER_QS if X86_64
select HAVE_IRQ_TIME_ACCOUNTING select HAVE_IRQ_TIME_ACCOUNTING
select GENERIC_KERNEL_THREAD select GENERIC_KERNEL_THREAD
select GENERIC_KERNEL_EXECVE
config INSTRUCTION_DECODER config INSTRUCTION_DECODER
def_bool y def_bool y
......
...@@ -51,7 +51,6 @@ ...@@ -51,7 +51,6 @@
# define __ARCH_WANT_SYS_UTIME # define __ARCH_WANT_SYS_UTIME
# define __ARCH_WANT_SYS_WAITPID # define __ARCH_WANT_SYS_WAITPID
# define __ARCH_WANT_SYS_EXECVE # define __ARCH_WANT_SYS_EXECVE
# define __ARCH_WANT_KERNEL_EXECVE
/* /*
* "Conditional" syscalls * "Conditional" syscalls
......
...@@ -299,12 +299,20 @@ ENTRY(ret_from_fork) ...@@ -299,12 +299,20 @@ ENTRY(ret_from_fork)
CFI_ENDPROC CFI_ENDPROC
END(ret_from_fork) END(ret_from_fork)
ENTRY(ret_from_kernel_execve) ENTRY(ret_from_kernel_thread)
movl %eax, %esp CFI_STARTPROC
movl $0,PT_EAX(%esp) pushl_cfi %eax
call schedule_tail
GET_THREAD_INFO(%ebp) GET_THREAD_INFO(%ebp)
popl_cfi %eax
pushl_cfi $0x0202 # Reset kernel eflags
popfl_cfi
movl PT_EBP(%esp),%eax
call *PT_EBX(%esp)
movl $0,PT_EAX(%esp)
jmp syscall_exit jmp syscall_exit
END(ret_from_kernel_execve) CFI_ENDPROC
ENDPROC(ret_from_kernel_thread)
/* /*
* Interrupt exit functions should be protected against kprobes * Interrupt exit functions should be protected against kprobes
...@@ -1015,21 +1023,6 @@ END(spurious_interrupt_bug) ...@@ -1015,21 +1023,6 @@ END(spurious_interrupt_bug)
*/ */
.popsection .popsection
ENTRY(ret_from_kernel_thread)
CFI_STARTPROC
pushl_cfi %eax
call schedule_tail
GET_THREAD_INFO(%ebp)
popl_cfi %eax
pushl_cfi $0x0202 # Reset kernel eflags
popfl_cfi
movl PT_EBP(%esp),%eax
call *PT_EBX(%esp)
call do_exit
ud2 # padding for call trace
CFI_ENDPROC
ENDPROC(ret_from_kernel_thread)
#ifdef CONFIG_XEN #ifdef CONFIG_XEN
/* Xen doesn't set %esp to be precisely what the normal sysenter /* Xen doesn't set %esp to be precisely what the normal sysenter
entrypoint expects, so fix it up before using the normal path. */ entrypoint expects, so fix it up before using the normal path. */
......
...@@ -563,15 +563,13 @@ ENTRY(ret_from_fork) ...@@ -563,15 +563,13 @@ ENTRY(ret_from_fork)
jmp ret_from_sys_call # go to the SYSRET fastpath jmp ret_from_sys_call # go to the SYSRET fastpath
1: 1:
subq $REST_SKIP, %rsp # move the stack pointer back subq $REST_SKIP, %rsp # leave space for volatiles
CFI_ADJUST_CFA_OFFSET REST_SKIP CFI_ADJUST_CFA_OFFSET REST_SKIP
movq %rbp, %rdi movq %rbp, %rdi
call *%rbx call *%rbx
# exit movl $0, RAX(%rsp)
mov %eax, %edi RESTORE_REST
call do_exit jmp int_ret_from_sys_call
ud2 # padding for call trace
CFI_ENDPROC CFI_ENDPROC
END(ret_from_fork) END(ret_from_fork)
...@@ -1326,20 +1324,6 @@ bad_gs: ...@@ -1326,20 +1324,6 @@ bad_gs:
jmp 2b jmp 2b
.previous .previous
ENTRY(ret_from_kernel_execve)
movq %rdi, %rsp
movl $0, RAX(%rsp)
// RESTORE_REST
movq 0*8(%rsp), %r15
movq 1*8(%rsp), %r14
movq 2*8(%rsp), %r13
movq 3*8(%rsp), %r12
movq 4*8(%rsp), %rbp
movq 5*8(%rsp), %rbx
addq $(6*8), %rsp
jmp int_ret_from_sys_call
END(ret_from_kernel_execve)
/* Call softirq on interrupt stack. Interrupts are off. */ /* Call softirq on interrupt stack. Interrupts are off. */
ENTRY(call_softirq) ENTRY(call_softirq)
CFI_STARTPROC CFI_STARTPROC
......
...@@ -14,6 +14,7 @@ config UML_X86 ...@@ -14,6 +14,7 @@ config UML_X86
def_bool y def_bool y
select GENERIC_FIND_FIRST_BIT select GENERIC_FIND_FIRST_BIT
select GENERIC_KERNEL_THREAD select GENERIC_KERNEL_THREAD
select GENERIC_KERNEL_EXECVE
config 64BIT config 64BIT
bool "64-bit kernel" if SUBARCH = "x86" bool "64-bit kernel" if SUBARCH = "x86"
......
...@@ -827,7 +827,15 @@ asmlinkage long sys_fanotify_mark(int fanotify_fd, unsigned int flags, ...@@ -827,7 +827,15 @@ asmlinkage long sys_fanotify_mark(int fanotify_fd, unsigned int flags,
const char __user *pathname); const char __user *pathname);
asmlinkage long sys_syncfs(int fd); asmlinkage long sys_syncfs(int fd);
#ifndef CONFIG_GENERIC_KERNEL_EXECVE
int kernel_execve(const char *filename, const char *const argv[], const char *const envp[]); int kernel_execve(const char *filename, const char *const argv[], const char *const envp[]);
#else
#define kernel_execve(filename, argv, envp) \
do_execve(filename, \
(const char __user *const __user *)argv, \
(const char __user *const __user *)envp, \
current_pt_regs())
#endif
asmlinkage long sys_perf_event_open( asmlinkage long sys_perf_event_open(
......
...@@ -16,13 +16,13 @@ ...@@ -16,13 +16,13 @@
#include <linux/initrd.h> #include <linux/initrd.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/freezer.h> #include <linux/freezer.h>
#include <linux/kmod.h>
#include "do_mounts.h" #include "do_mounts.h"
unsigned long initrd_start, initrd_end; unsigned long initrd_start, initrd_end;
int initrd_below_start_ok; int initrd_below_start_ok;
unsigned int real_root_dev; /* do_proc_dointvec cannot handle kdev_t */ unsigned int real_root_dev; /* do_proc_dointvec cannot handle kdev_t */
static int __initdata old_fd, root_fd;
static int __initdata mount_initrd = 1; static int __initdata mount_initrd = 1;
static int __init no_initrd(char *str) static int __init no_initrd(char *str)
...@@ -33,33 +33,29 @@ static int __init no_initrd(char *str) ...@@ -33,33 +33,29 @@ static int __init no_initrd(char *str)
__setup("noinitrd", no_initrd); __setup("noinitrd", no_initrd);
static int __init do_linuxrc(void *_shell) static int init_linuxrc(struct subprocess_info *info, struct cred *new)
{ {
static const char *argv[] = { "linuxrc", NULL, }; sys_unshare(CLONE_FS | CLONE_FILES);
extern const char *envp_init[]; /* move initrd over / and chdir/chroot in initrd root */
const char *shell = _shell; sys_chdir("/root");
sys_mount(".", "/", NULL, MS_MOVE, NULL);
sys_close(old_fd);sys_close(root_fd); sys_chroot(".");
sys_setsid(); sys_setsid();
return kernel_execve(shell, argv, envp_init); return 0;
} }
static void __init handle_initrd(void) static void __init handle_initrd(void)
{ {
static char *argv[] = { "linuxrc", NULL, };
extern char *envp_init[];
int error; int error;
int pid;
real_root_dev = new_encode_dev(ROOT_DEV); real_root_dev = new_encode_dev(ROOT_DEV);
create_dev("/dev/root.old", Root_RAM0); create_dev("/dev/root.old", Root_RAM0);
/* mount initrd on rootfs' /root */ /* mount initrd on rootfs' /root */
mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY); mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
sys_mkdir("/old", 0700); sys_mkdir("/old", 0700);
root_fd = sys_open("/", 0, 0); sys_chdir("/old");
old_fd = sys_open("/old", 0, 0);
/* move initrd over / and chdir/chroot in initrd root */
sys_chdir("/root");
sys_mount(".", "/", NULL, MS_MOVE, NULL);
sys_chroot(".");
/* /*
* In case that a resume from disk is carried out by linuxrc or one of * In case that a resume from disk is carried out by linuxrc or one of
...@@ -67,27 +63,22 @@ static void __init handle_initrd(void) ...@@ -67,27 +63,22 @@ static void __init handle_initrd(void)
*/ */
current->flags |= PF_FREEZER_SKIP; current->flags |= PF_FREEZER_SKIP;
pid = kernel_thread(do_linuxrc, "/linuxrc", SIGCHLD); call_usermodehelper_fns("/linuxrc", argv, envp_init, UMH_WAIT_PROC,
if (pid > 0) init_linuxrc, NULL, NULL);
while (pid != sys_wait4(-1, NULL, 0, NULL))
yield();
current->flags &= ~PF_FREEZER_SKIP; current->flags &= ~PF_FREEZER_SKIP;
/* move initrd to rootfs' /old */ /* move initrd to rootfs' /old */
sys_fchdir(old_fd); sys_mount("..", ".", NULL, MS_MOVE, NULL);
sys_mount("/", ".", NULL, MS_MOVE, NULL);
/* switch root and cwd back to / of rootfs */ /* switch root and cwd back to / of rootfs */
sys_fchdir(root_fd); sys_chroot("..");
sys_chroot(".");
sys_close(old_fd);
sys_close(root_fd);
if (new_decode_dev(real_root_dev) == Root_RAM0) { if (new_decode_dev(real_root_dev) == Root_RAM0) {
sys_chdir("/old"); sys_chdir("/old");
return; return;
} }
sys_chdir("/");
ROOT_DEV = new_decode_dev(real_root_dev); ROOT_DEV = new_decode_dev(real_root_dev);
mount_root(); mount_root();
......
...@@ -69,6 +69,7 @@ ...@@ -69,6 +69,7 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/perf_event.h> #include <linux/perf_event.h>
#include <linux/file.h> #include <linux/file.h>
#include <linux/ptrace.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/bugs.h> #include <asm/bugs.h>
...@@ -791,17 +792,17 @@ static void __init do_pre_smp_initcalls(void) ...@@ -791,17 +792,17 @@ static void __init do_pre_smp_initcalls(void)
do_one_initcall(*fn); do_one_initcall(*fn);
} }
static void run_init_process(const char *init_filename) static int run_init_process(const char *init_filename)
{ {
argv_init[0] = init_filename; argv_init[0] = init_filename;
kernel_execve(init_filename, argv_init, envp_init); return kernel_execve(init_filename, argv_init, envp_init);
} }
/* This is a non __init function. Force it to be noinline otherwise gcc static void __init kernel_init_freeable(void);
* makes it inline to init() and it becomes part of init.text section
*/ static int __ref kernel_init(void *unused)
static noinline int init_post(void)
{ {
kernel_init_freeable();
/* need to finish all async __init code before freeing the memory */ /* need to finish all async __init code before freeing the memory */
async_synchronize_full(); async_synchronize_full();
free_initmem(); free_initmem();
...@@ -813,7 +814,8 @@ static noinline int init_post(void) ...@@ -813,7 +814,8 @@ static noinline int init_post(void)
flush_delayed_fput(); flush_delayed_fput();
if (ramdisk_execute_command) { if (ramdisk_execute_command) {
run_init_process(ramdisk_execute_command); if (!run_init_process(ramdisk_execute_command))
return 0;
printk(KERN_WARNING "Failed to execute %s\n", printk(KERN_WARNING "Failed to execute %s\n",
ramdisk_execute_command); ramdisk_execute_command);
} }
...@@ -825,20 +827,22 @@ static noinline int init_post(void) ...@@ -825,20 +827,22 @@ static noinline int init_post(void)
* trying to recover a really broken machine. * trying to recover a really broken machine.
*/ */
if (execute_command) { if (execute_command) {
run_init_process(execute_command); if (!run_init_process(execute_command))
return 0;
printk(KERN_WARNING "Failed to execute %s. Attempting " printk(KERN_WARNING "Failed to execute %s. Attempting "
"defaults...\n", execute_command); "defaults...\n", execute_command);
} }
run_init_process("/sbin/init"); if (!run_init_process("/sbin/init") ||
run_init_process("/etc/init"); !run_init_process("/etc/init") ||
run_init_process("/bin/init"); !run_init_process("/bin/init") ||
run_init_process("/bin/sh"); !run_init_process("/bin/sh"))
return 0;
panic("No init found. Try passing init= option to kernel. " panic("No init found. Try passing init= option to kernel. "
"See Linux Documentation/init.txt for guidance."); "See Linux Documentation/init.txt for guidance.");
} }
static int __init kernel_init(void * unused) static void __init kernel_init_freeable(void)
{ {
/* /*
* Wait until kthreadd is all set-up. * Wait until kthreadd is all set-up.
...@@ -893,7 +897,4 @@ static int __init kernel_init(void * unused) ...@@ -893,7 +897,4 @@ static int __init kernel_init(void * unused)
* we're essentially up and running. Get rid of the * we're essentially up and running. Get rid of the
* initmem segments and start the user-mode stuff.. * initmem segments and start the user-mode stuff..
*/ */
init_post();
return 0;
} }
...@@ -37,6 +37,7 @@ ...@@ -37,6 +37,7 @@
#include <linux/notifier.h> #include <linux/notifier.h>
#include <linux/suspend.h> #include <linux/suspend.h>
#include <linux/rwsem.h> #include <linux/rwsem.h>
#include <linux/ptrace.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <trace/events/module.h> #include <trace/events/module.h>
...@@ -221,11 +222,13 @@ static int ____call_usermodehelper(void *data) ...@@ -221,11 +222,13 @@ static int ____call_usermodehelper(void *data)
retval = kernel_execve(sub_info->path, retval = kernel_execve(sub_info->path,
(const char *const *)sub_info->argv, (const char *const *)sub_info->argv,
(const char *const *)sub_info->envp); (const char *const *)sub_info->envp);
if (!retval)
return 0;
/* Exec failed? */ /* Exec failed? */
fail: fail:
sub_info->retval = retval; sub_info->retval = retval;
return 0; do_exit(0);
} }
static int call_helper(void *data) static int call_helper(void *data)
...@@ -292,7 +295,7 @@ static int wait_for_helper(void *data) ...@@ -292,7 +295,7 @@ static int wait_for_helper(void *data)
} }
umh_complete(sub_info); umh_complete(sub_info);
return 0; do_exit(0);
} }
/* This is run by khelper thread */ /* This is run by khelper thread */
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/freezer.h> #include <linux/freezer.h>
#include <linux/ptrace.h>
#include <trace/events/sched.h> #include <trace/events/sched.h>
static DEFINE_SPINLOCK(kthread_create_lock); static DEFINE_SPINLOCK(kthread_create_lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment