Commit 8eae2998 authored by Linus Torvalds's avatar Linus Torvalds

Split up "struct signal_struct" into "signal" and "sighand" parts.

This is required to get make the old LinuxThread semantics work
together with the fixed-for-POSIX full signal sharing. A traditional
CLONE_SIGHAND thread (LinuxThread) will not see any other shared
signal state, while a new-style CLONE_THREAD thread will share all
of it.

This way the two methods don't confuse each other.
parent fef31b03
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
static struct fs_struct init_fs = INIT_FS; static struct fs_struct init_fs = INIT_FS;
static struct files_struct init_files = INIT_FILES; static struct files_struct init_files = INIT_FILES;
static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
struct mm_struct init_mm = INIT_MM(init_mm); struct mm_struct init_mm = INIT_MM(init_mm);
/* /*
......
...@@ -37,11 +37,11 @@ sys_sigsuspend(int history0, int history1, old_sigset_t mask) ...@@ -37,11 +37,11 @@ sys_sigsuspend(int history0, int history1, old_sigset_t mask)
sigset_t saveset; sigset_t saveset;
mask &= _BLOCKABLE; mask &= _BLOCKABLE;
spin_lock_irq(&current->sig->siglock); spin_lock_irq(&current->sighand->siglock);
saveset = current->blocked; saveset = current->blocked;
siginitset(&current->blocked, mask); siginitset(&current->blocked, mask);
recalc_sigpending(); recalc_sigpending();
spin_unlock_irq(&current->sig->siglock); spin_unlock_irq(&current->sighand->siglock);
regs->eax = -EINTR; regs->eax = -EINTR;
while (1) { while (1) {
...@@ -66,11 +66,11 @@ sys_rt_sigsuspend(sigset_t *unewset, size_t sigsetsize) ...@@ -66,11 +66,11 @@ sys_rt_sigsuspend(sigset_t *unewset, size_t sigsetsize)
return -EFAULT; return -EFAULT;
sigdelsetmask(&newset, ~_BLOCKABLE); sigdelsetmask(&newset, ~_BLOCKABLE);
spin_lock_irq(&current->sig->siglock); spin_lock_irq(&current->sighand->siglock);
saveset = current->blocked; saveset = current->blocked;
current->blocked = newset; current->blocked = newset;
recalc_sigpending(); recalc_sigpending();
spin_unlock_irq(&current->sig->siglock); spin_unlock_irq(&current->sighand->siglock);
regs->eax = -EINTR; regs->eax = -EINTR;
while (1) { while (1) {
...@@ -224,10 +224,10 @@ asmlinkage int sys_sigreturn(unsigned long __unused) ...@@ -224,10 +224,10 @@ asmlinkage int sys_sigreturn(unsigned long __unused)
goto badframe; goto badframe;
sigdelsetmask(&set, ~_BLOCKABLE); sigdelsetmask(&set, ~_BLOCKABLE);
spin_lock_irq(&current->sig->siglock); spin_lock_irq(&current->sighand->siglock);
current->blocked = set; current->blocked = set;
recalc_sigpending(); recalc_sigpending();
spin_unlock_irq(&current->sig->siglock); spin_unlock_irq(&current->sighand->siglock);
if (restore_sigcontext(regs, &frame->sc, &eax)) if (restore_sigcontext(regs, &frame->sc, &eax))
goto badframe; goto badframe;
...@@ -252,10 +252,10 @@ asmlinkage int sys_rt_sigreturn(unsigned long __unused) ...@@ -252,10 +252,10 @@ asmlinkage int sys_rt_sigreturn(unsigned long __unused)
goto badframe; goto badframe;
sigdelsetmask(&set, ~_BLOCKABLE); sigdelsetmask(&set, ~_BLOCKABLE);
spin_lock_irq(&current->sig->siglock); spin_lock_irq(&current->sighand->siglock);
current->blocked = set; current->blocked = set;
recalc_sigpending(); recalc_sigpending();
spin_unlock_irq(&current->sig->siglock); spin_unlock_irq(&current->sighand->siglock);
if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &eax)) if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &eax))
goto badframe; goto badframe;
...@@ -513,7 +513,7 @@ static void ...@@ -513,7 +513,7 @@ static void
handle_signal(unsigned long sig, siginfo_t *info, sigset_t *oldset, handle_signal(unsigned long sig, siginfo_t *info, sigset_t *oldset,
struct pt_regs * regs) struct pt_regs * regs)
{ {
struct k_sigaction *ka = &current->sig->action[sig-1]; struct k_sigaction *ka = &current->sighand->action[sig-1];
/* Are we from a system call? */ /* Are we from a system call? */
if (regs->orig_eax >= 0) { if (regs->orig_eax >= 0) {
...@@ -547,11 +547,11 @@ handle_signal(unsigned long sig, siginfo_t *info, sigset_t *oldset, ...@@ -547,11 +547,11 @@ handle_signal(unsigned long sig, siginfo_t *info, sigset_t *oldset,
ka->sa.sa_handler = SIG_DFL; ka->sa.sa_handler = SIG_DFL;
if (!(ka->sa.sa_flags & SA_NODEFER)) { if (!(ka->sa.sa_flags & SA_NODEFER)) {
spin_lock_irq(&current->sig->siglock); spin_lock_irq(&current->sighand->siglock);
sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask); sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
sigaddset(&current->blocked,sig); sigaddset(&current->blocked,sig);
recalc_sigpending(); recalc_sigpending();
spin_unlock_irq(&current->sig->siglock); spin_unlock_irq(&current->sighand->siglock);
} }
} }
......
...@@ -512,10 +512,10 @@ int handle_vm86_trap(struct kernel_vm86_regs * regs, long error_code, int trapno ...@@ -512,10 +512,10 @@ int handle_vm86_trap(struct kernel_vm86_regs * regs, long error_code, int trapno
return 1; /* we let this handle by the calling routine */ return 1; /* we let this handle by the calling routine */
if (current->ptrace & PT_PTRACED) { if (current->ptrace & PT_PTRACED) {
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&current->sig->siglock, flags); spin_lock_irqsave(&current->sighand->siglock, flags);
sigdelset(&current->blocked, SIGTRAP); sigdelset(&current->blocked, SIGTRAP);
recalc_sigpending(); recalc_sigpending();
spin_unlock_irqrestore(&current->sig->siglock, flags); spin_unlock_irqrestore(&current->sighand->siglock, flags);
} }
send_sig(SIGTRAP, current, 1); send_sig(SIGTRAP, current, 1);
current->thread.trap_no = trapno; current->thread.trap_no = trapno;
......
...@@ -584,10 +584,10 @@ static int loop_thread(void *data) ...@@ -584,10 +584,10 @@ static int loop_thread(void *data)
hence, it mustn't be stopped at all because it could hence, it mustn't be stopped at all because it could
be indirectly used during suspension */ be indirectly used during suspension */
spin_lock_irq(&current->sig->siglock); spin_lock_irq(&current->sighand->siglock);
sigfillset(&current->blocked); sigfillset(&current->blocked);
flush_signals(current); flush_signals(current);
spin_unlock_irq(&current->sig->siglock); spin_unlock_irq(&current->sighand->siglock);
set_user_nice(current, -20); set_user_nice(current, -20);
......
...@@ -787,7 +787,7 @@ static void n_tty_receive_buf(struct tty_struct *tty, const unsigned char *cp, ...@@ -787,7 +787,7 @@ static void n_tty_receive_buf(struct tty_struct *tty, const unsigned char *cp,
int is_ignored(int sig) int is_ignored(int sig)
{ {
return (sigismember(&current->blocked, sig) || return (sigismember(&current->blocked, sig) ||
current->sig->action[sig-1].sa.sa_handler == SIG_IGN); current->sighand->action[sig-1].sa.sa_handler == SIG_IGN);
} }
static void n_tty_set_termios(struct tty_struct *tty, struct termios * old) static void n_tty_set_termios(struct tty_struct *tty, struct termios * old)
......
...@@ -1575,10 +1575,10 @@ void scsi_error_handler(void *data) ...@@ -1575,10 +1575,10 @@ void scsi_error_handler(void *data)
int rtn; int rtn;
DECLARE_MUTEX_LOCKED(sem); DECLARE_MUTEX_LOCKED(sem);
spin_lock_irq(&current->sig->siglock); spin_lock_irq(&current->sighand->siglock);
sigfillset(&current->blocked); sigfillset(&current->blocked);
recalc_sigpending(); recalc_sigpending();
spin_unlock_irq(&current->sig->siglock); spin_unlock_irq(&current->sighand->siglock);
lock_kernel(); lock_kernel();
......
...@@ -301,12 +301,12 @@ static int usb_stor_control_thread(void * __us) ...@@ -301,12 +301,12 @@ static int usb_stor_control_thread(void * __us)
daemonize(); daemonize();
/* avoid getting signals */ /* avoid getting signals */
spin_lock_irq(&current->sig->siglock); spin_lock_irq(&current->sighand->siglock);
flush_signals(current); flush_signals(current);
current->flags |= PF_IOTHREAD; current->flags |= PF_IOTHREAD;
sigfillset(&current->blocked); sigfillset(&current->blocked);
recalc_sigpending(); recalc_sigpending();
spin_unlock_irq(&current->sig->siglock); spin_unlock_irq(&current->sighand->siglock);
/* set our name for identification purposes */ /* set our name for identification purposes */
sprintf(current->comm, "usb-storage"); sprintf(current->comm, "usb-storage");
......
...@@ -70,10 +70,10 @@ static int autofs_write(struct file *file, const void *addr, int bytes) ...@@ -70,10 +70,10 @@ static int autofs_write(struct file *file, const void *addr, int bytes)
/* Keep the currently executing process from receiving a /* Keep the currently executing process from receiving a
SIGPIPE unless it was already supposed to get one */ SIGPIPE unless it was already supposed to get one */
if (wr == -EPIPE && !sigpipe) { if (wr == -EPIPE && !sigpipe) {
spin_lock_irqsave(&current->sig->siglock, flags); spin_lock_irqsave(&current->sighand->siglock, flags);
sigdelset(&current->pending.signal, SIGPIPE); sigdelset(&current->pending.signal, SIGPIPE);
recalc_sigpending(); recalc_sigpending();
spin_unlock_irqrestore(&current->sig->siglock, flags); spin_unlock_irqrestore(&current->sighand->siglock, flags);
} }
return (bytes > 0); return (bytes > 0);
...@@ -161,18 +161,18 @@ int autofs_wait(struct autofs_sb_info *sbi, struct qstr *name) ...@@ -161,18 +161,18 @@ int autofs_wait(struct autofs_sb_info *sbi, struct qstr *name)
sigset_t oldset; sigset_t oldset;
unsigned long irqflags; unsigned long irqflags;
spin_lock_irqsave(&current->sig->siglock, irqflags); spin_lock_irqsave(&current->sighand->siglock, irqflags);
oldset = current->blocked; oldset = current->blocked;
siginitsetinv(&current->blocked, SHUTDOWN_SIGS & ~oldset.sig[0]); siginitsetinv(&current->blocked, SHUTDOWN_SIGS & ~oldset.sig[0]);
recalc_sigpending(); recalc_sigpending();
spin_unlock_irqrestore(&current->sig->siglock, irqflags); spin_unlock_irqrestore(&current->sighand->siglock, irqflags);
interruptible_sleep_on(&wq->queue); interruptible_sleep_on(&wq->queue);
spin_lock_irqsave(&current->sig->siglock, irqflags); spin_lock_irqsave(&current->sighand->siglock, irqflags);
current->blocked = oldset; current->blocked = oldset;
recalc_sigpending(); recalc_sigpending();
spin_unlock_irqrestore(&current->sig->siglock, irqflags); spin_unlock_irqrestore(&current->sighand->siglock, irqflags);
} else { } else {
DPRINTK(("autofs_wait: skipped sleeping\n")); DPRINTK(("autofs_wait: skipped sleeping\n"));
} }
......
...@@ -74,10 +74,10 @@ static int autofs4_write(struct file *file, const void *addr, int bytes) ...@@ -74,10 +74,10 @@ static int autofs4_write(struct file *file, const void *addr, int bytes)
/* Keep the currently executing process from receiving a /* Keep the currently executing process from receiving a
SIGPIPE unless it was already supposed to get one */ SIGPIPE unless it was already supposed to get one */
if (wr == -EPIPE && !sigpipe) { if (wr == -EPIPE && !sigpipe) {
spin_lock_irqsave(&current->sig->siglock, flags); spin_lock_irqsave(&current->sighand->siglock, flags);
sigdelset(&current->pending.signal, SIGPIPE); sigdelset(&current->pending.signal, SIGPIPE);
recalc_sigpending(); recalc_sigpending();
spin_unlock_irqrestore(&current->sig->siglock, flags); spin_unlock_irqrestore(&current->sighand->siglock, flags);
} }
return (bytes > 0); return (bytes > 0);
...@@ -198,18 +198,18 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct qstr *name, ...@@ -198,18 +198,18 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct qstr *name,
sigset_t oldset; sigset_t oldset;
unsigned long irqflags; unsigned long irqflags;
spin_lock_irqsave(&current->sig->siglock, irqflags); spin_lock_irqsave(&current->sighand->siglock, irqflags);
oldset = current->blocked; oldset = current->blocked;
siginitsetinv(&current->blocked, SHUTDOWN_SIGS & ~oldset.sig[0]); siginitsetinv(&current->blocked, SHUTDOWN_SIGS & ~oldset.sig[0]);
recalc_sigpending(); recalc_sigpending();
spin_unlock_irqrestore(&current->sig->siglock, irqflags); spin_unlock_irqrestore(&current->sighand->siglock, irqflags);
interruptible_sleep_on(&wq->queue); interruptible_sleep_on(&wq->queue);
spin_lock_irqsave(&current->sig->siglock, irqflags); spin_lock_irqsave(&current->sighand->siglock, irqflags);
current->blocked = oldset; current->blocked = oldset;
recalc_sigpending(); recalc_sigpending();
spin_unlock_irqrestore(&current->sig->siglock, irqflags); spin_unlock_irqrestore(&current->sighand->siglock, irqflags);
} else { } else {
DPRINTK(("autofs_wait: skipped sleeping\n")); DPRINTK(("autofs_wait: skipped sleeping\n"));
} }
......
...@@ -559,31 +559,61 @@ static inline void put_proc_dentry(struct dentry *dentry) ...@@ -559,31 +559,61 @@ static inline void put_proc_dentry(struct dentry *dentry)
* disturbing other processes. (Other processes might share the signal * disturbing other processes. (Other processes might share the signal
* table via the CLONE_SIGHAND option to clone().) * table via the CLONE_SIGHAND option to clone().)
*/ */
static inline int de_thread(struct signal_struct *oldsig) static inline int de_thread(struct task_struct *tsk)
{ {
struct signal_struct *newsig; struct signal_struct *newsig, *oldsig = tsk->signal;
struct sighand_struct *newsighand, *oldsighand = tsk->sighand;
spinlock_t *lock = &oldsighand->siglock;
int count; int count;
if (atomic_read(&current->sig->count) <= 1) /*
* If we don't share sighandlers, then we aren't sharing anything
* and we can just re-use it all.
*/
if (atomic_read(&oldsighand->count) <= 1)
return 0; return 0;
newsig = kmem_cache_alloc(sigact_cachep, GFP_KERNEL); newsighand = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
if (!newsig) if (!newsighand)
return -ENOMEM; return -ENOMEM;
spin_lock_init(&newsighand->siglock);
atomic_set(&newsighand->count, 1);
memcpy(newsighand->action, oldsighand->action, sizeof(newsighand->action));
/*
* See if we need to allocate a new signal structure
*/
newsig = NULL;
if (atomic_read(&oldsig->count) > 1) {
newsig = kmem_cache_alloc(signal_cachep, GFP_KERNEL);
if (!newsig) {
kmem_cache_free(sighand_cachep, newsighand);
return -ENOMEM;
}
atomic_set(&newsig->count, 1);
newsig->group_exit = 0;
newsig->group_exit_code = 0;
newsig->group_exit_task = NULL;
newsig->group_stop_count = 0;
init_sigpending(&newsig->shared_pending);
}
if (thread_group_empty(current)) if (thread_group_empty(current))
goto out; goto no_thread_group;
/* /*
* Kill all other threads in the thread group: * Kill all other threads in the thread group:
*/ */
spin_lock_irq(&oldsig->siglock); spin_lock_irq(lock);
if (oldsig->group_exit) { if (oldsig->group_exit) {
/* /*
* Another group action in progress, just * Another group action in progress, just
* return so that the signal is processed. * return so that the signal is processed.
*/ */
spin_unlock_irq(&oldsig->siglock); spin_unlock_irq(lock);
kmem_cache_free(sigact_cachep, newsig); kmem_cache_free(sighand_cachep, newsighand);
if (newsig)
kmem_cache_free(signal_cachep, newsig);
return -EAGAIN; return -EAGAIN;
} }
oldsig->group_exit = 1; oldsig->group_exit = 1;
...@@ -598,13 +628,13 @@ static inline int de_thread(struct signal_struct *oldsig) ...@@ -598,13 +628,13 @@ static inline int de_thread(struct signal_struct *oldsig)
while (atomic_read(&oldsig->count) > count) { while (atomic_read(&oldsig->count) > count) {
oldsig->group_exit_task = current; oldsig->group_exit_task = current;
current->state = TASK_UNINTERRUPTIBLE; current->state = TASK_UNINTERRUPTIBLE;
spin_unlock_irq(&oldsig->siglock); spin_unlock_irq(lock);
schedule(); schedule();
spin_lock_irq(&oldsig->siglock); spin_lock_irq(lock);
if (oldsig->group_exit_task) if (oldsig->group_exit_task)
BUG(); BUG();
} }
spin_unlock_irq(&oldsig->siglock); spin_unlock_irq(lock);
/* /*
* At this point all other threads have exited, all we have to * At this point all other threads have exited, all we have to
...@@ -675,32 +705,29 @@ static inline int de_thread(struct signal_struct *oldsig) ...@@ -675,32 +705,29 @@ static inline int de_thread(struct signal_struct *oldsig)
release_task(leader); release_task(leader);
} }
out: no_thread_group:
spin_lock_init(&newsig->siglock);
atomic_set(&newsig->count, 1);
newsig->group_exit = 0;
newsig->group_exit_code = 0;
newsig->group_exit_task = NULL;
newsig->group_stop_count = 0;
memcpy(newsig->action, current->sig->action, sizeof(newsig->action));
init_sigpending(&newsig->shared_pending);
write_lock_irq(&tasklist_lock); write_lock_irq(&tasklist_lock);
spin_lock(&oldsig->siglock); spin_lock(&oldsighand->siglock);
spin_lock(&newsig->siglock); spin_lock(&newsighand->siglock);
if (current == oldsig->curr_target) if (current == oldsig->curr_target)
oldsig->curr_target = next_thread(current); oldsig->curr_target = next_thread(current);
current->sig = newsig; if (newsig)
current->signal = newsig;
current->sighand = newsighand;
init_sigpending(&current->pending); init_sigpending(&current->pending);
recalc_sigpending(); recalc_sigpending();
spin_unlock(&newsig->siglock); spin_unlock(&newsighand->siglock);
spin_unlock(&oldsig->siglock); spin_unlock(&oldsighand->siglock);
write_unlock_irq(&tasklist_lock); write_unlock_irq(&tasklist_lock);
if (atomic_dec_and_test(&oldsig->count)) if (newsig && atomic_dec_and_test(&oldsig->count))
kmem_cache_free(sigact_cachep, oldsig); kmem_cache_free(signal_cachep, oldsig);
if (atomic_dec_and_test(&oldsighand->count))
kmem_cache_free(sighand_cachep, oldsighand);
if (!thread_group_empty(current)) if (!thread_group_empty(current))
BUG(); BUG();
...@@ -746,21 +773,20 @@ int flush_old_exec(struct linux_binprm * bprm) ...@@ -746,21 +773,20 @@ int flush_old_exec(struct linux_binprm * bprm)
{ {
char * name; char * name;
int i, ch, retval; int i, ch, retval;
struct signal_struct * oldsig = current->sig;
/* /*
* Release all of the old mmap stuff * Release all of the old mmap stuff
*/ */
retval = exec_mmap(bprm->mm); retval = exec_mmap(bprm->mm);
if (retval) if (retval)
goto mmap_failed; goto out;
/* /*
* Make sure we have a private signal table and that * Make sure we have a private signal table and that
* we are unassociated from the previous thread group. * we are unassociated from the previous thread group.
*/ */
retval = de_thread(oldsig); retval = de_thread(current);
if (retval) if (retval)
goto flush_failed; goto out;
/* This is the point of no return */ /* This is the point of no return */
...@@ -794,14 +820,7 @@ int flush_old_exec(struct linux_binprm * bprm) ...@@ -794,14 +820,7 @@ int flush_old_exec(struct linux_binprm * bprm)
return 0; return 0;
mmap_failed: out:
flush_failed:
spin_lock_irq(&current->sig->siglock);
if (current->sig != oldsig) {
kmem_cache_free(sigact_cachep, current->sig);
current->sig = oldsig;
}
spin_unlock_irq(&current->sig->siglock);
return retval; return retval;
} }
...@@ -885,7 +904,7 @@ void compute_creds(struct linux_binprm *bprm) ...@@ -885,7 +904,7 @@ void compute_creds(struct linux_binprm *bprm)
if (must_not_trace_exec(current) if (must_not_trace_exec(current)
|| atomic_read(&current->fs->count) > 1 || atomic_read(&current->fs->count) > 1
|| atomic_read(&current->files->count) > 1 || atomic_read(&current->files->count) > 1
|| atomic_read(&current->sig->count) > 1) { || atomic_read(&current->sighand->count) > 1) {
if(!capable(CAP_SETUID)) { if(!capable(CAP_SETUID)) {
bprm->e_uid = current->uid; bprm->e_uid = current->uid;
bprm->e_gid = current->gid; bprm->e_gid = current->gid;
...@@ -1302,8 +1321,8 @@ int do_coredump(long signr, int exit_code, struct pt_regs * regs) ...@@ -1302,8 +1321,8 @@ int do_coredump(long signr, int exit_code, struct pt_regs * regs)
} }
mm->dumpable = 0; mm->dumpable = 0;
init_completion(&mm->core_done); init_completion(&mm->core_done);
current->sig->group_exit = 1; current->signal->group_exit = 1;
current->sig->group_exit_code = exit_code; current->signal->group_exit_code = exit_code;
coredump_wait(mm); coredump_wait(mm);
if (current->rlim[RLIMIT_CORE].rlim_cur < binfmt->min_coredump) if (current->rlim[RLIMIT_CORE].rlim_cur < binfmt->min_coredump)
...@@ -1330,7 +1349,7 @@ int do_coredump(long signr, int exit_code, struct pt_regs * regs) ...@@ -1330,7 +1349,7 @@ int do_coredump(long signr, int exit_code, struct pt_regs * regs)
retval = binfmt->core_dump(signr, regs, file); retval = binfmt->core_dump(signr, regs, file);
current->sig->group_exit_code |= 0x80; current->signal->group_exit_code |= 0x80;
close_fail: close_fail:
filp_close(file, NULL); filp_close(file, NULL);
fail_unlock: fail_unlock:
......
...@@ -205,10 +205,10 @@ int kjournald(void *arg) ...@@ -205,10 +205,10 @@ int kjournald(void *arg)
lock_kernel(); lock_kernel();
daemonize(); daemonize();
spin_lock_irq(&current->sig->siglock); spin_lock_irq(&current->sighand->siglock);
sigfillset(&current->blocked); sigfillset(&current->blocked);
recalc_sigpending(); recalc_sigpending();
spin_unlock_irq(&current->sig->siglock); spin_unlock_irq(&current->sighand->siglock);
sprintf(current->comm, "kjournald"); sprintf(current->comm, "kjournald");
......
...@@ -139,7 +139,7 @@ nlmclnt_proc(struct inode *inode, int cmd, struct file_lock *fl) ...@@ -139,7 +139,7 @@ nlmclnt_proc(struct inode *inode, int cmd, struct file_lock *fl)
} }
/* Keep the old signal mask */ /* Keep the old signal mask */
spin_lock_irqsave(&current->sig->siglock, flags); spin_lock_irqsave(&current->sighand->siglock, flags);
oldset = current->blocked; oldset = current->blocked;
/* If we're cleaning up locks because the process is exiting, /* If we're cleaning up locks because the process is exiting,
...@@ -149,7 +149,7 @@ nlmclnt_proc(struct inode *inode, int cmd, struct file_lock *fl) ...@@ -149,7 +149,7 @@ nlmclnt_proc(struct inode *inode, int cmd, struct file_lock *fl)
&& (current->flags & PF_EXITING)) { && (current->flags & PF_EXITING)) {
sigfillset(&current->blocked); /* Mask all signals */ sigfillset(&current->blocked); /* Mask all signals */
recalc_sigpending(); recalc_sigpending();
spin_unlock_irqrestore(&current->sig->siglock, flags); spin_unlock_irqrestore(&current->sighand->siglock, flags);
call = nlmclnt_alloc_call(); call = nlmclnt_alloc_call();
if (!call) { if (!call) {
...@@ -158,7 +158,7 @@ nlmclnt_proc(struct inode *inode, int cmd, struct file_lock *fl) ...@@ -158,7 +158,7 @@ nlmclnt_proc(struct inode *inode, int cmd, struct file_lock *fl)
} }
call->a_flags = RPC_TASK_ASYNC; call->a_flags = RPC_TASK_ASYNC;
} else { } else {
spin_unlock_irqrestore(&current->sig->siglock, flags); spin_unlock_irqrestore(&current->sighand->siglock, flags);
memset(call, 0, sizeof(*call)); memset(call, 0, sizeof(*call));
locks_init_lock(&call->a_args.lock.fl); locks_init_lock(&call->a_args.lock.fl);
locks_init_lock(&call->a_res.lock.fl); locks_init_lock(&call->a_res.lock.fl);
...@@ -183,10 +183,10 @@ nlmclnt_proc(struct inode *inode, int cmd, struct file_lock *fl) ...@@ -183,10 +183,10 @@ nlmclnt_proc(struct inode *inode, int cmd, struct file_lock *fl)
kfree(call); kfree(call);
out_restore: out_restore:
spin_lock_irqsave(&current->sig->siglock, flags); spin_lock_irqsave(&current->sighand->siglock, flags);
current->blocked = oldset; current->blocked = oldset;
recalc_sigpending(); recalc_sigpending();
spin_unlock_irqrestore(&current->sig->siglock, flags); spin_unlock_irqrestore(&current->sighand->siglock, flags);
done: done:
dprintk("lockd: clnt proc returns %d\n", status); dprintk("lockd: clnt proc returns %d\n", status);
...@@ -588,11 +588,11 @@ nlmclnt_cancel(struct nlm_host *host, struct file_lock *fl) ...@@ -588,11 +588,11 @@ nlmclnt_cancel(struct nlm_host *host, struct file_lock *fl)
int status; int status;
/* Block all signals while setting up call */ /* Block all signals while setting up call */
spin_lock_irqsave(&current->sig->siglock, flags); spin_lock_irqsave(&current->sighand->siglock, flags);
oldset = current->blocked; oldset = current->blocked;
sigfillset(&current->blocked); sigfillset(&current->blocked);
recalc_sigpending(); recalc_sigpending();
spin_unlock_irqrestore(&current->sig->siglock, flags); spin_unlock_irqrestore(&current->sighand->siglock, flags);
req = nlmclnt_alloc_call(); req = nlmclnt_alloc_call();
if (!req) if (!req)
...@@ -607,10 +607,10 @@ nlmclnt_cancel(struct nlm_host *host, struct file_lock *fl) ...@@ -607,10 +607,10 @@ nlmclnt_cancel(struct nlm_host *host, struct file_lock *fl)
if (status < 0) if (status < 0)
kfree(req); kfree(req);
spin_lock_irqsave(&current->sig->siglock, flags); spin_lock_irqsave(&current->sighand->siglock, flags);
current->blocked = oldset; current->blocked = oldset;
recalc_sigpending(); recalc_sigpending();
spin_unlock_irqrestore(&current->sig->siglock, flags); spin_unlock_irqrestore(&current->sighand->siglock, flags);
return status; return status;
} }
......
...@@ -101,10 +101,10 @@ lockd(struct svc_rqst *rqstp) ...@@ -101,10 +101,10 @@ lockd(struct svc_rqst *rqstp)
sprintf(current->comm, "lockd"); sprintf(current->comm, "lockd");
/* Process request with signals blocked. */ /* Process request with signals blocked. */
spin_lock_irq(&current->sig->siglock); spin_lock_irq(&current->sighand->siglock);
siginitsetinv(&current->blocked, sigmask(SIGKILL)); siginitsetinv(&current->blocked, sigmask(SIGKILL));
recalc_sigpending(); recalc_sigpending();
spin_unlock_irq(&current->sig->siglock); spin_unlock_irq(&current->sighand->siglock);
/* kick rpciod */ /* kick rpciod */
rpciod_up(); rpciod_up();
...@@ -126,9 +126,9 @@ lockd(struct svc_rqst *rqstp) ...@@ -126,9 +126,9 @@ lockd(struct svc_rqst *rqstp)
{ {
long timeout = MAX_SCHEDULE_TIMEOUT; long timeout = MAX_SCHEDULE_TIMEOUT;
if (signalled()) { if (signalled()) {
spin_lock_irq(&current->sig->siglock); spin_lock_irq(&current->sighand->siglock);
flush_signals(current); flush_signals(current);
spin_unlock_irq(&current->sig->siglock); spin_unlock_irq(&current->sighand->siglock);
if (nlmsvc_ops) { if (nlmsvc_ops) {
nlmsvc_invalidate_all(); nlmsvc_invalidate_all();
grace_period_expire = set_grace_period(); grace_period_expire = set_grace_period();
...@@ -297,9 +297,9 @@ lockd_down(void) ...@@ -297,9 +297,9 @@ lockd_down(void)
"lockd_down: lockd failed to exit, clearing pid\n"); "lockd_down: lockd failed to exit, clearing pid\n");
nlmsvc_pid = 0; nlmsvc_pid = 0;
} }
spin_lock_irq(&current->sig->siglock); spin_lock_irq(&current->sighand->siglock);
recalc_sigpending(); recalc_sigpending();
spin_unlock_irq(&current->sig->siglock); spin_unlock_irq(&current->sighand->siglock);
out: out:
up(&nlmsvc_sema); up(&nlmsvc_sema);
} }
......
...@@ -189,10 +189,10 @@ nfsd(struct svc_rqst *rqstp) ...@@ -189,10 +189,10 @@ nfsd(struct svc_rqst *rqstp)
*/ */
for (;;) { for (;;) {
/* Block all but the shutdown signals */ /* Block all but the shutdown signals */
spin_lock_irq(&current->sig->siglock); spin_lock_irq(&current->sighand->siglock);
siginitsetinv(&current->blocked, SHUTDOWN_SIGS); siginitsetinv(&current->blocked, SHUTDOWN_SIGS);
recalc_sigpending(); recalc_sigpending();
spin_unlock_irq(&current->sig->siglock); spin_unlock_irq(&current->sighand->siglock);
/* /*
* Find a socket with data available and call its * Find a socket with data available and call its
...@@ -210,10 +210,10 @@ nfsd(struct svc_rqst *rqstp) ...@@ -210,10 +210,10 @@ nfsd(struct svc_rqst *rqstp)
exp_readlock(); exp_readlock();
/* Process request with signals blocked. */ /* Process request with signals blocked. */
spin_lock_irq(&current->sig->siglock); spin_lock_irq(&current->sighand->siglock);
siginitsetinv(&current->blocked, ALLOWED_SIGS); siginitsetinv(&current->blocked, ALLOWED_SIGS);
recalc_sigpending(); recalc_sigpending();
spin_unlock_irq(&current->sig->siglock); spin_unlock_irq(&current->sighand->siglock);
svc_process(serv, rqstp); svc_process(serv, rqstp);
......
...@@ -190,16 +190,16 @@ static void collect_sigign_sigcatch(struct task_struct *p, sigset_t *ign, ...@@ -190,16 +190,16 @@ static void collect_sigign_sigcatch(struct task_struct *p, sigset_t *ign,
sigemptyset(catch); sigemptyset(catch);
read_lock(&tasklist_lock); read_lock(&tasklist_lock);
if (p->sig) { if (p->sighand) {
spin_lock_irq(&p->sig->siglock); spin_lock_irq(&p->sighand->siglock);
k = p->sig->action; k = p->sighand->action;
for (i = 1; i <= _NSIG; ++i, ++k) { for (i = 1; i <= _NSIG; ++i, ++k) {
if (k->sa.sa_handler == SIG_IGN) if (k->sa.sa_handler == SIG_IGN)
sigaddset(ign, i); sigaddset(ign, i);
else if (k->sa.sa_handler != SIG_DFL) else if (k->sa.sa_handler != SIG_DFL)
sigaddset(catch, i); sigaddset(catch, i);
} }
spin_unlock_irq(&p->sig->siglock); spin_unlock_irq(&p->sighand->siglock);
} }
read_unlock(&tasklist_lock); read_unlock(&tasklist_lock);
} }
......
...@@ -44,10 +44,14 @@ ...@@ -44,10 +44,14 @@
} }
#define INIT_SIGNALS(sig) { \ #define INIT_SIGNALS(sig) { \
.count = ATOMIC_INIT(1), \
.shared_pending = { NULL, &sig.shared_pending.head, {{0}}}, \
}
#define INIT_SIGHAND(sighand) { \
.count = ATOMIC_INIT(1), \ .count = ATOMIC_INIT(1), \
.action = { {{0,}}, }, \ .action = { {{0,}}, }, \
.siglock = SPIN_LOCK_UNLOCKED, \ .siglock = SPIN_LOCK_UNLOCKED, \
.shared_pending = { NULL, &sig.shared_pending.head, {{0}}}, \
} }
/* /*
...@@ -90,7 +94,8 @@ ...@@ -90,7 +94,8 @@
.thread = INIT_THREAD, \ .thread = INIT_THREAD, \
.fs = &init_fs, \ .fs = &init_fs, \
.files = &init_files, \ .files = &init_files, \
.sig = &init_signals, \ .signal = &init_signals, \
.sighand = &init_sighand, \
.pending = { NULL, &tsk.pending.head, {{0}}}, \ .pending = { NULL, &tsk.pending.head, {{0}}}, \
.blocked = {{0}}, \ .blocked = {{0}}, \
.alloc_lock = SPIN_LOCK_UNLOCKED, \ .alloc_lock = SPIN_LOCK_UNLOCKED, \
......
...@@ -220,10 +220,21 @@ struct mm_struct { ...@@ -220,10 +220,21 @@ struct mm_struct {
extern int mmlist_nr; extern int mmlist_nr;
struct signal_struct { struct sighand_struct {
atomic_t count; atomic_t count;
struct k_sigaction action[_NSIG]; struct k_sigaction action[_NSIG];
spinlock_t siglock; spinlock_t siglock;
};
/*
* NOTE! "signal_struct" does not have it's own
* locking, because a shared signal_struct always
* implies a shared sighand_struct, so locking
* sighand_struct is always a proper superset of
* the locking of signal_struct.
*/
struct signal_struct {
atomic_t count;
/* current thread group signal load-balancing target: */ /* current thread group signal load-balancing target: */
task_t *curr_target; task_t *curr_target;
...@@ -378,7 +389,8 @@ struct task_struct { ...@@ -378,7 +389,8 @@ struct task_struct {
/* namespace */ /* namespace */
struct namespace *namespace; struct namespace *namespace;
/* signal handlers */ /* signal handlers */
struct signal_struct *sig; struct signal_struct *signal;
struct sighand_struct *sighand;
sigset_t blocked, real_blocked; sigset_t blocked, real_blocked;
struct sigpending pending; struct sigpending pending;
...@@ -589,6 +601,8 @@ extern void exit_thread(void); ...@@ -589,6 +601,8 @@ extern void exit_thread(void);
extern void exit_mm(struct task_struct *); extern void exit_mm(struct task_struct *);
extern void exit_files(struct task_struct *); extern void exit_files(struct task_struct *);
extern void exit_signal(struct task_struct *);
extern void __exit_signal(struct task_struct *);
extern void exit_sighand(struct task_struct *); extern void exit_sighand(struct task_struct *);
extern void __exit_sighand(struct task_struct *); extern void __exit_sighand(struct task_struct *);
......
...@@ -71,7 +71,8 @@ extern kmem_cache_t *files_cachep; ...@@ -71,7 +71,8 @@ extern kmem_cache_t *files_cachep;
extern kmem_cache_t *filp_cachep; extern kmem_cache_t *filp_cachep;
extern kmem_cache_t *dquot_cachep; extern kmem_cache_t *dquot_cachep;
extern kmem_cache_t *fs_cachep; extern kmem_cache_t *fs_cachep;
extern kmem_cache_t *sigact_cachep; extern kmem_cache_t *signal_cachep;
extern kmem_cache_t *sighand_cachep;
extern kmem_cache_t *bio_cachep; extern kmem_cache_t *bio_cachep;
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
......
...@@ -76,6 +76,7 @@ void release_task(struct task_struct * p) ...@@ -76,6 +76,7 @@ void release_task(struct task_struct * p)
if (unlikely(p->ptrace)) if (unlikely(p->ptrace))
__ptrace_unlink(p); __ptrace_unlink(p);
BUG_ON(!list_empty(&p->ptrace_list) || !list_empty(&p->ptrace_children)); BUG_ON(!list_empty(&p->ptrace_list) || !list_empty(&p->ptrace_children));
__exit_signal(p);
__exit_sighand(p); __exit_sighand(p);
proc_dentry = __unhash_process(p); proc_dentry = __unhash_process(p);
...@@ -546,7 +547,7 @@ static void exit_notify(struct task_struct *tsk) ...@@ -546,7 +547,7 @@ static void exit_notify(struct task_struct *tsk)
{ {
struct task_struct *t; struct task_struct *t;
if (signal_pending(tsk) && !tsk->sig->group_exit if (signal_pending(tsk) && !tsk->signal->group_exit
&& !thread_group_empty(tsk)) { && !thread_group_empty(tsk)) {
/* /*
* This occurs when there was a race between our exit * This occurs when there was a race between our exit
...@@ -558,14 +559,14 @@ static void exit_notify(struct task_struct *tsk) ...@@ -558,14 +559,14 @@ static void exit_notify(struct task_struct *tsk)
* sure someone gets all the pending signals. * sure someone gets all the pending signals.
*/ */
read_lock(&tasklist_lock); read_lock(&tasklist_lock);
spin_lock_irq(&tsk->sig->siglock); spin_lock_irq(&tsk->sighand->siglock);
for (t = next_thread(tsk); t != tsk; t = next_thread(t)) for (t = next_thread(tsk); t != tsk; t = next_thread(t))
if (!signal_pending(t) && !(t->flags & PF_EXITING)) { if (!signal_pending(t) && !(t->flags & PF_EXITING)) {
recalc_sigpending_tsk(t); recalc_sigpending_tsk(t);
if (signal_pending(t)) if (signal_pending(t))
signal_wake_up(t, 0); signal_wake_up(t, 0);
} }
spin_unlock_irq(&tsk->sig->siglock); spin_unlock_irq(&tsk->sighand->siglock);
read_unlock(&tasklist_lock); read_unlock(&tasklist_lock);
} }
...@@ -708,9 +709,9 @@ task_t *next_thread(task_t *p) ...@@ -708,9 +709,9 @@ task_t *next_thread(task_t *p)
struct list_head *tmp, *head = &link->pidptr->task_list; struct list_head *tmp, *head = &link->pidptr->task_list;
#if CONFIG_SMP #if CONFIG_SMP
if (!p->sig) if (!p->sighand)
BUG(); BUG();
if (!spin_is_locked(&p->sig->siglock) && if (!spin_is_locked(&p->sighand->siglock) &&
!rwlock_is_locked(&tasklist_lock)) !rwlock_is_locked(&tasklist_lock))
BUG(); BUG();
#endif #endif
...@@ -730,21 +731,22 @@ do_group_exit(int exit_code) ...@@ -730,21 +731,22 @@ do_group_exit(int exit_code)
{ {
BUG_ON(exit_code & 0x80); /* core dumps don't get here */ BUG_ON(exit_code & 0x80); /* core dumps don't get here */
if (current->sig->group_exit) if (current->signal->group_exit)
exit_code = current->sig->group_exit_code; exit_code = current->signal->group_exit_code;
else if (!thread_group_empty(current)) { else if (!thread_group_empty(current)) {
struct signal_struct *const sig = current->sig; struct signal_struct *const sig = current->signal;
struct sighand_struct *const sighand = current->sighand;
read_lock(&tasklist_lock); read_lock(&tasklist_lock);
spin_lock_irq(&sig->siglock); spin_lock_irq(&sighand->siglock);
if (sig->group_exit) if (sig->group_exit)
/* Another thread got here before we took the lock. */ /* Another thread got here before we took the lock. */
exit_code = sig->group_exit_code; exit_code = sig->group_exit_code;
else { else {
sig->group_exit = 1; sig->group_exit = 1;
sig->group_exit_code = exit_code; sig->group_exit_code = exit_code;
zap_other_threads(current); zap_other_threads(current);
} }
spin_unlock_irq(&sig->siglock); spin_unlock_irq(&sighand->siglock);
read_unlock(&tasklist_lock); read_unlock(&tasklist_lock);
} }
...@@ -838,8 +840,8 @@ static int wait_task_zombie(task_t *p, unsigned int *stat_addr, struct rusage *r ...@@ -838,8 +840,8 @@ static int wait_task_zombie(task_t *p, unsigned int *stat_addr, struct rusage *r
retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0; retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
if (!retval && stat_addr) { if (!retval && stat_addr) {
if (p->sig->group_exit) if (p->signal->group_exit)
retval = put_user(p->sig->group_exit_code, stat_addr); retval = put_user(p->signal->group_exit_code, stat_addr);
else else
retval = put_user(p->exit_code, stat_addr); retval = put_user(p->exit_code, stat_addr);
} }
...@@ -879,7 +881,7 @@ static int wait_task_stopped(task_t *p, int delayed_group_leader, ...@@ -879,7 +881,7 @@ static int wait_task_stopped(task_t *p, int delayed_group_leader,
if (!p->exit_code) if (!p->exit_code)
return 0; return 0;
if (delayed_group_leader && !(p->ptrace & PT_PTRACED) && if (delayed_group_leader && !(p->ptrace & PT_PTRACED) &&
p->sig && p->sig->group_stop_count > 0) p->signal && p->signal->group_stop_count > 0)
/* /*
* A group stop is in progress and this is the group leader. * A group stop is in progress and this is the group leader.
* We won't report until all threads have stopped. * We won't report until all threads have stopped.
...@@ -1004,7 +1006,7 @@ asmlinkage long sys_wait4(pid_t pid,unsigned int * stat_addr, int options, struc ...@@ -1004,7 +1006,7 @@ asmlinkage long sys_wait4(pid_t pid,unsigned int * stat_addr, int options, struc
if (options & __WNOTHREAD) if (options & __WNOTHREAD)
break; break;
tsk = next_thread(tsk); tsk = next_thread(tsk);
if (tsk->sig != current->sig) if (tsk->signal != current->signal)
BUG(); BUG();
} while (tsk != current); } while (tsk != current);
read_unlock(&tasklist_lock); read_unlock(&tasklist_lock);
......
...@@ -665,23 +665,39 @@ static int copy_files(unsigned long clone_flags, struct task_struct * tsk) ...@@ -665,23 +665,39 @@ static int copy_files(unsigned long clone_flags, struct task_struct * tsk)
static inline int copy_sighand(unsigned long clone_flags, struct task_struct * tsk) static inline int copy_sighand(unsigned long clone_flags, struct task_struct * tsk)
{ {
struct signal_struct *sig; struct sighand_struct *sig;
if (clone_flags & CLONE_SIGHAND) { if (clone_flags & (CLONE_SIGHAND | CLONE_THREAD)) {
atomic_inc(&current->sig->count); atomic_inc(&current->sighand->count);
return 0; return 0;
} }
sig = kmem_cache_alloc(sigact_cachep, GFP_KERNEL); sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
tsk->sig = sig; tsk->sighand = sig;
if (!sig) if (!sig)
return -1; return -1;
spin_lock_init(&sig->siglock); spin_lock_init(&sig->siglock);
atomic_set(&sig->count, 1); atomic_set(&sig->count, 1);
memcpy(sig->action, current->sighand->action, sizeof(sig->action));
return 0;
}
static inline int copy_signal(unsigned long clone_flags, struct task_struct * tsk)
{
struct signal_struct *sig;
if (clone_flags & CLONE_THREAD) {
atomic_inc(&current->signal->count);
return 0;
}
sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL);
tsk->signal = sig;
if (!sig)
return -1;
atomic_set(&sig->count, 1);
sig->group_exit = 0; sig->group_exit = 0;
sig->group_exit_code = 0; sig->group_exit_code = 0;
sig->group_exit_task = NULL; sig->group_exit_task = NULL;
sig->group_stop_count = 0; sig->group_stop_count = 0;
memcpy(sig->action, current->sig->action, sizeof(sig->action));
sig->curr_target = NULL; sig->curr_target = NULL;
init_sigpending(&sig->shared_pending); init_sigpending(&sig->shared_pending);
...@@ -831,8 +847,10 @@ static struct task_struct *copy_process(unsigned long clone_flags, ...@@ -831,8 +847,10 @@ static struct task_struct *copy_process(unsigned long clone_flags,
goto bad_fork_cleanup_files; goto bad_fork_cleanup_files;
if (copy_sighand(clone_flags, p)) if (copy_sighand(clone_flags, p))
goto bad_fork_cleanup_fs; goto bad_fork_cleanup_fs;
if (copy_mm(clone_flags, p)) if (copy_signal(clone_flags, p))
goto bad_fork_cleanup_sighand; goto bad_fork_cleanup_sighand;
if (copy_mm(clone_flags, p))
goto bad_fork_cleanup_signal;
if (copy_namespace(clone_flags, p)) if (copy_namespace(clone_flags, p))
goto bad_fork_cleanup_mm; goto bad_fork_cleanup_mm;
retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs); retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs);
...@@ -923,31 +941,31 @@ static struct task_struct *copy_process(unsigned long clone_flags, ...@@ -923,31 +941,31 @@ static struct task_struct *copy_process(unsigned long clone_flags,
p->parent = p->real_parent; p->parent = p->real_parent;
if (clone_flags & CLONE_THREAD) { if (clone_flags & CLONE_THREAD) {
spin_lock(&current->sig->siglock); spin_lock(&current->sighand->siglock);
/* /*
* Important: if an exit-all has been started then * Important: if an exit-all has been started then
* do not create this new thread - the whole thread * do not create this new thread - the whole thread
* group is supposed to exit anyway. * group is supposed to exit anyway.
*/ */
if (current->sig->group_exit) { if (current->signal->group_exit) {
spin_unlock(&current->sig->siglock); spin_unlock(&current->sighand->siglock);
write_unlock_irq(&tasklist_lock); write_unlock_irq(&tasklist_lock);
goto bad_fork_cleanup_namespace; goto bad_fork_cleanup_namespace;
} }
p->tgid = current->tgid; p->tgid = current->tgid;
p->group_leader = current->group_leader; p->group_leader = current->group_leader;
if (current->sig->group_stop_count > 0) { if (current->signal->group_stop_count > 0) {
/* /*
* There is an all-stop in progress for the group. * There is an all-stop in progress for the group.
* We ourselves will stop as soon as we check signals. * We ourselves will stop as soon as we check signals.
* Make the new thread part of that group stop too. * Make the new thread part of that group stop too.
*/ */
current->sig->group_stop_count++; current->signal->group_stop_count++;
set_tsk_thread_flag(p, TIF_SIGPENDING); set_tsk_thread_flag(p, TIF_SIGPENDING);
} }
spin_unlock(&current->sig->siglock); spin_unlock(&current->sighand->siglock);
} }
SET_LINKS(p); SET_LINKS(p);
...@@ -977,6 +995,8 @@ static struct task_struct *copy_process(unsigned long clone_flags, ...@@ -977,6 +995,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
exit_namespace(p); exit_namespace(p);
bad_fork_cleanup_mm: bad_fork_cleanup_mm:
exit_mm(p); exit_mm(p);
bad_fork_cleanup_signal:
exit_signal(p);
bad_fork_cleanup_sighand: bad_fork_cleanup_sighand:
exit_sighand(p); exit_sighand(p);
bad_fork_cleanup_fs: bad_fork_cleanup_fs:
...@@ -1077,8 +1097,11 @@ struct task_struct *do_fork(unsigned long clone_flags, ...@@ -1077,8 +1097,11 @@ struct task_struct *do_fork(unsigned long clone_flags,
return p; return p;
} }
/* SLAB cache for signal_struct structures (tsk->sig) */ /* SLAB cache for signal_struct structures (tsk->signal) */
kmem_cache_t *sigact_cachep; kmem_cache_t *signal_cachep;
/* SLAB cache for sighand_struct structures (tsk->sighand) */
kmem_cache_t *sighand_cachep;
/* SLAB cache for files_struct structures (tsk->files) */ /* SLAB cache for files_struct structures (tsk->files) */
kmem_cache_t *files_cachep; kmem_cache_t *files_cachep;
...@@ -1094,11 +1117,17 @@ kmem_cache_t *mm_cachep; ...@@ -1094,11 +1117,17 @@ kmem_cache_t *mm_cachep;
void __init proc_caches_init(void) void __init proc_caches_init(void)
{ {
sigact_cachep = kmem_cache_create("signal_act", sighand_cachep = kmem_cache_create("sighand_cache",
sizeof(struct sighand_struct), 0,
SLAB_HWCACHE_ALIGN, NULL, NULL);
if (!sighand_cachep)
panic("Cannot create sighand SLAB cache");
signal_cachep = kmem_cache_create("signal_cache",
sizeof(struct signal_struct), 0, sizeof(struct signal_struct), 0,
SLAB_HWCACHE_ALIGN, NULL, NULL); SLAB_HWCACHE_ALIGN, NULL, NULL);
if (!sigact_cachep) if (!signal_cachep)
panic("Cannot create signal action SLAB cache"); panic("Cannot create signal SLAB cache");
files_cachep = kmem_cache_create("files_cache", files_cachep = kmem_cache_create("files_cache",
sizeof(struct files_struct), 0, sizeof(struct files_struct), 0,
......
...@@ -111,12 +111,12 @@ int exec_usermodehelper(char *program_path, char *argv[], char *envp[]) ...@@ -111,12 +111,12 @@ int exec_usermodehelper(char *program_path, char *argv[], char *envp[])
as the super user right after the execve fails if you time as the super user right after the execve fails if you time
the signal just right. the signal just right.
*/ */
spin_lock_irq(&curtask->sig->siglock); spin_lock_irq(&curtask->sighand->siglock);
sigemptyset(&curtask->blocked); sigemptyset(&curtask->blocked);
flush_signals(curtask); flush_signals(curtask);
flush_signal_handlers(curtask); flush_signal_handlers(curtask);
recalc_sigpending(); recalc_sigpending();
spin_unlock_irq(&curtask->sig->siglock); spin_unlock_irq(&curtask->sighand->siglock);
for (i = 0; i < curtask->files->max_fds; i++ ) { for (i = 0; i < curtask->files->max_fds; i++ ) {
if (curtask->files->fd[i]) close(i); if (curtask->files->fd[i]) close(i);
...@@ -239,20 +239,20 @@ int request_module(const char * module_name) ...@@ -239,20 +239,20 @@ int request_module(const char * module_name)
} }
/* Block everything but SIGKILL/SIGSTOP */ /* Block everything but SIGKILL/SIGSTOP */
spin_lock_irq(&current->sig->siglock); spin_lock_irq(&current->sighand->siglock);
tmpsig = current->blocked; tmpsig = current->blocked;
siginitsetinv(&current->blocked, sigmask(SIGKILL) | sigmask(SIGSTOP)); siginitsetinv(&current->blocked, sigmask(SIGKILL) | sigmask(SIGSTOP));
recalc_sigpending(); recalc_sigpending();
spin_unlock_irq(&current->sig->siglock); spin_unlock_irq(&current->sighand->siglock);
waitpid_result = waitpid(pid, NULL, __WCLONE); waitpid_result = waitpid(pid, NULL, __WCLONE);
atomic_dec(&kmod_concurrent); atomic_dec(&kmod_concurrent);
/* Allow signals again.. */ /* Allow signals again.. */
spin_lock_irq(&current->sig->siglock); spin_lock_irq(&current->sighand->siglock);
current->blocked = tmpsig; current->blocked = tmpsig;
recalc_sigpending(); recalc_sigpending();
spin_unlock_irq(&current->sig->siglock); spin_unlock_irq(&current->sighand->siglock);
if (waitpid_result != pid) { if (waitpid_result != pid) {
printk(KERN_ERR "request_module[%s]: waitpid(%d,...) failed, errno %d\n", printk(KERN_ERR "request_module[%s]: waitpid(%d,...) failed, errno %d\n",
......
This diff is collapsed.
...@@ -180,10 +180,10 @@ static int worker_thread(void *__startup) ...@@ -180,10 +180,10 @@ static int worker_thread(void *__startup)
set_user_nice(current, -10); set_user_nice(current, -10);
set_cpus_allowed(current, 1UL << cpu); set_cpus_allowed(current, 1UL << cpu);
spin_lock_irq(&current->sig->siglock); spin_lock_irq(&current->sighand->siglock);
siginitsetinv(&current->blocked, sigmask(SIGCHLD)); siginitsetinv(&current->blocked, sigmask(SIGCHLD));
recalc_sigpending(); recalc_sigpending();
spin_unlock_irq(&current->sig->siglock); spin_unlock_irq(&current->sighand->siglock);
complete(&startup->done); complete(&startup->done);
...@@ -213,10 +213,10 @@ static int worker_thread(void *__startup) ...@@ -213,10 +213,10 @@ static int worker_thread(void *__startup)
/* SIGCHLD - auto-reaping */ ; /* SIGCHLD - auto-reaping */ ;
/* zap all other signals */ /* zap all other signals */
spin_lock_irq(&current->sig->siglock); spin_lock_irq(&current->sighand->siglock);
flush_signals(current); flush_signals(current);
recalc_sigpending(); recalc_sigpending();
spin_unlock_irq(&current->sig->siglock); spin_unlock_irq(&current->sighand->siglock);
} }
} }
remove_wait_queue(&cwq->more_work, &wait); remove_wait_queue(&cwq->more_work, &wait);
......
...@@ -90,10 +90,10 @@ static int __pdflush(struct pdflush_work *my_work) ...@@ -90,10 +90,10 @@ static int __pdflush(struct pdflush_work *my_work)
strcpy(current->comm, "pdflush"); strcpy(current->comm, "pdflush");
/* interruptible sleep, so block all signals */ /* interruptible sleep, so block all signals */
spin_lock_irq(&current->sig->siglock); spin_lock_irq(&current->sighand->siglock);
siginitsetinv(&current->blocked, 0); siginitsetinv(&current->blocked, 0);
recalc_sigpending(); recalc_sigpending();
spin_unlock_irq(&current->sig->siglock); spin_unlock_irq(&current->sighand->siglock);
current->flags |= PF_FLUSHER; current->flags |= PF_FLUSHER;
my_work->fn = NULL; my_work->fn = NULL;
......
...@@ -233,27 +233,27 @@ void rpc_clnt_sigmask(struct rpc_clnt *clnt, sigset_t *oldset) ...@@ -233,27 +233,27 @@ void rpc_clnt_sigmask(struct rpc_clnt *clnt, sigset_t *oldset)
/* Turn off various signals */ /* Turn off various signals */
if (clnt->cl_intr) { if (clnt->cl_intr) {
struct k_sigaction *action = current->sig->action; struct k_sigaction *action = current->sighand->action;
if (action[SIGINT-1].sa.sa_handler == SIG_DFL) if (action[SIGINT-1].sa.sa_handler == SIG_DFL)
sigallow |= sigmask(SIGINT); sigallow |= sigmask(SIGINT);
if (action[SIGQUIT-1].sa.sa_handler == SIG_DFL) if (action[SIGQUIT-1].sa.sa_handler == SIG_DFL)
sigallow |= sigmask(SIGQUIT); sigallow |= sigmask(SIGQUIT);
} }
spin_lock_irqsave(&current->sig->siglock, irqflags); spin_lock_irqsave(&current->sighand->siglock, irqflags);
*oldset = current->blocked; *oldset = current->blocked;
siginitsetinv(&current->blocked, sigallow & ~oldset->sig[0]); siginitsetinv(&current->blocked, sigallow & ~oldset->sig[0]);
recalc_sigpending(); recalc_sigpending();
spin_unlock_irqrestore(&current->sig->siglock, irqflags); spin_unlock_irqrestore(&current->sighand->siglock, irqflags);
} }
void rpc_clnt_sigunmask(struct rpc_clnt *clnt, sigset_t *oldset) void rpc_clnt_sigunmask(struct rpc_clnt *clnt, sigset_t *oldset)
{ {
unsigned long irqflags; unsigned long irqflags;
spin_lock_irqsave(&current->sig->siglock, irqflags); spin_lock_irqsave(&current->sighand->siglock, irqflags);
current->blocked = *oldset; current->blocked = *oldset;
recalc_sigpending(); recalc_sigpending();
spin_unlock_irqrestore(&current->sig->siglock, irqflags); spin_unlock_irqrestore(&current->sighand->siglock, irqflags);
} }
/* /*
......
...@@ -964,10 +964,10 @@ rpciod(void *ptr) ...@@ -964,10 +964,10 @@ rpciod(void *ptr)
daemonize(); daemonize();
spin_lock_irq(&current->sig->siglock); spin_lock_irq(&current->sighand->siglock);
siginitsetinv(&current->blocked, sigmask(SIGKILL)); siginitsetinv(&current->blocked, sigmask(SIGKILL));
recalc_sigpending(); recalc_sigpending();
spin_unlock_irq(&current->sig->siglock); spin_unlock_irq(&current->sighand->siglock);
strcpy(current->comm, "rpciod"); strcpy(current->comm, "rpciod");
...@@ -1022,9 +1022,9 @@ rpciod_killall(void) ...@@ -1022,9 +1022,9 @@ rpciod_killall(void)
} }
} }
spin_lock_irqsave(&current->sig->siglock, flags); spin_lock_irqsave(&current->sighand->siglock, flags);
recalc_sigpending(); recalc_sigpending();
spin_unlock_irqrestore(&current->sig->siglock, flags); spin_unlock_irqrestore(&current->sighand->siglock, flags);
} }
/* /*
...@@ -1100,9 +1100,9 @@ rpciod_down(void) ...@@ -1100,9 +1100,9 @@ rpciod_down(void)
} }
interruptible_sleep_on(&rpciod_killer); interruptible_sleep_on(&rpciod_killer);
} }
spin_lock_irqsave(&current->sig->siglock, flags); spin_lock_irqsave(&current->sighand->siglock, flags);
recalc_sigpending(); recalc_sigpending();
spin_unlock_irqrestore(&current->sig->siglock, flags); spin_unlock_irqrestore(&current->sighand->siglock, flags);
out: out:
up(&rpciod_sema); up(&rpciod_sema);
MOD_DEC_USE_COUNT; MOD_DEC_USE_COUNT;
......
...@@ -235,9 +235,9 @@ svc_register(struct svc_serv *serv, int proto, unsigned short port) ...@@ -235,9 +235,9 @@ svc_register(struct svc_serv *serv, int proto, unsigned short port)
} }
if (!port) { if (!port) {
spin_lock_irqsave(&current->sig->siglock, flags); spin_lock_irqsave(&current->sighand->siglock, flags);
recalc_sigpending(); recalc_sigpending();
spin_unlock_irqrestore(&current->sig->siglock, flags); spin_unlock_irqrestore(&current->sighand->siglock, flags);
} }
return error; return error;
......
...@@ -133,7 +133,7 @@ void cap_bprm_compute_creds (struct linux_binprm *bprm) ...@@ -133,7 +133,7 @@ void cap_bprm_compute_creds (struct linux_binprm *bprm)
if (must_not_trace_exec (current) if (must_not_trace_exec (current)
|| atomic_read (&current->fs->count) > 1 || atomic_read (&current->fs->count) > 1
|| atomic_read (&current->files->count) > 1 || atomic_read (&current->files->count) > 1
|| atomic_read (&current->sig->count) > 1) { || atomic_read (&current->sighand->count) > 1) {
if (!capable (CAP_SETPCAP)) { if (!capable (CAP_SETPCAP)) {
new_permitted = cap_intersect (new_permitted, new_permitted = cap_intersect (new_permitted,
current-> current->
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment