Commit 8eae2998 authored by Linus Torvalds's avatar Linus Torvalds

Split up "struct signal_struct" into "signal" and "sighand" parts.

This is required to get make the old LinuxThread semantics work
together with the fixed-for-POSIX full signal sharing. A traditional
CLONE_SIGHAND thread (LinuxThread) will not see any other shared
signal state, while a new-style CLONE_THREAD thread will share all
of it.

This way the two methods don't confuse each other.
parent fef31b03
......@@ -11,6 +11,7 @@
static struct fs_struct init_fs = INIT_FS;
static struct files_struct init_files = INIT_FILES;
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
struct mm_struct init_mm = INIT_MM(init_mm);
/*
......
......@@ -37,11 +37,11 @@ sys_sigsuspend(int history0, int history1, old_sigset_t mask)
sigset_t saveset;
mask &= _BLOCKABLE;
spin_lock_irq(&current->sig->siglock);
spin_lock_irq(&current->sighand->siglock);
saveset = current->blocked;
siginitset(&current->blocked, mask);
recalc_sigpending();
spin_unlock_irq(&current->sig->siglock);
spin_unlock_irq(&current->sighand->siglock);
regs->eax = -EINTR;
while (1) {
......@@ -66,11 +66,11 @@ sys_rt_sigsuspend(sigset_t *unewset, size_t sigsetsize)
return -EFAULT;
sigdelsetmask(&newset, ~_BLOCKABLE);
spin_lock_irq(&current->sig->siglock);
spin_lock_irq(&current->sighand->siglock);
saveset = current->blocked;
current->blocked = newset;
recalc_sigpending();
spin_unlock_irq(&current->sig->siglock);
spin_unlock_irq(&current->sighand->siglock);
regs->eax = -EINTR;
while (1) {
......@@ -224,10 +224,10 @@ asmlinkage int sys_sigreturn(unsigned long __unused)
goto badframe;
sigdelsetmask(&set, ~_BLOCKABLE);
spin_lock_irq(&current->sig->siglock);
spin_lock_irq(&current->sighand->siglock);
current->blocked = set;
recalc_sigpending();
spin_unlock_irq(&current->sig->siglock);
spin_unlock_irq(&current->sighand->siglock);
if (restore_sigcontext(regs, &frame->sc, &eax))
goto badframe;
......@@ -252,10 +252,10 @@ asmlinkage int sys_rt_sigreturn(unsigned long __unused)
goto badframe;
sigdelsetmask(&set, ~_BLOCKABLE);
spin_lock_irq(&current->sig->siglock);
spin_lock_irq(&current->sighand->siglock);
current->blocked = set;
recalc_sigpending();
spin_unlock_irq(&current->sig->siglock);
spin_unlock_irq(&current->sighand->siglock);
if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &eax))
goto badframe;
......@@ -513,7 +513,7 @@ static void
handle_signal(unsigned long sig, siginfo_t *info, sigset_t *oldset,
struct pt_regs * regs)
{
struct k_sigaction *ka = &current->sig->action[sig-1];
struct k_sigaction *ka = &current->sighand->action[sig-1];
/* Are we from a system call? */
if (regs->orig_eax >= 0) {
......@@ -547,11 +547,11 @@ handle_signal(unsigned long sig, siginfo_t *info, sigset_t *oldset,
ka->sa.sa_handler = SIG_DFL;
if (!(ka->sa.sa_flags & SA_NODEFER)) {
spin_lock_irq(&current->sig->siglock);
spin_lock_irq(&current->sighand->siglock);
sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
sigaddset(&current->blocked,sig);
recalc_sigpending();
spin_unlock_irq(&current->sig->siglock);
spin_unlock_irq(&current->sighand->siglock);
}
}
......
......@@ -512,10 +512,10 @@ int handle_vm86_trap(struct kernel_vm86_regs * regs, long error_code, int trapno
return 1; /* we let this handle by the calling routine */
if (current->ptrace & PT_PTRACED) {
unsigned long flags;
spin_lock_irqsave(&current->sig->siglock, flags);
spin_lock_irqsave(&current->sighand->siglock, flags);
sigdelset(&current->blocked, SIGTRAP);
recalc_sigpending();
spin_unlock_irqrestore(&current->sig->siglock, flags);
spin_unlock_irqrestore(&current->sighand->siglock, flags);
}
send_sig(SIGTRAP, current, 1);
current->thread.trap_no = trapno;
......
......@@ -584,10 +584,10 @@ static int loop_thread(void *data)
hence, it mustn't be stopped at all because it could
be indirectly used during suspension */
spin_lock_irq(&current->sig->siglock);
spin_lock_irq(&current->sighand->siglock);
sigfillset(&current->blocked);
flush_signals(current);
spin_unlock_irq(&current->sig->siglock);
spin_unlock_irq(&current->sighand->siglock);
set_user_nice(current, -20);
......
......@@ -787,7 +787,7 @@ static void n_tty_receive_buf(struct tty_struct *tty, const unsigned char *cp,
int is_ignored(int sig)
{
return (sigismember(&current->blocked, sig) ||
current->sig->action[sig-1].sa.sa_handler == SIG_IGN);
current->sighand->action[sig-1].sa.sa_handler == SIG_IGN);
}
static void n_tty_set_termios(struct tty_struct *tty, struct termios * old)
......
......@@ -1575,10 +1575,10 @@ void scsi_error_handler(void *data)
int rtn;
DECLARE_MUTEX_LOCKED(sem);
spin_lock_irq(&current->sig->siglock);
spin_lock_irq(&current->sighand->siglock);
sigfillset(&current->blocked);
recalc_sigpending();
spin_unlock_irq(&current->sig->siglock);
spin_unlock_irq(&current->sighand->siglock);
lock_kernel();
......
......@@ -301,12 +301,12 @@ static int usb_stor_control_thread(void * __us)
daemonize();
/* avoid getting signals */
spin_lock_irq(&current->sig->siglock);
spin_lock_irq(&current->sighand->siglock);
flush_signals(current);
current->flags |= PF_IOTHREAD;
sigfillset(&current->blocked);
recalc_sigpending();
spin_unlock_irq(&current->sig->siglock);
spin_unlock_irq(&current->sighand->siglock);
/* set our name for identification purposes */
sprintf(current->comm, "usb-storage");
......
......@@ -70,10 +70,10 @@ static int autofs_write(struct file *file, const void *addr, int bytes)
/* Keep the currently executing process from receiving a
SIGPIPE unless it was already supposed to get one */
if (wr == -EPIPE && !sigpipe) {
spin_lock_irqsave(&current->sig->siglock, flags);
spin_lock_irqsave(&current->sighand->siglock, flags);
sigdelset(&current->pending.signal, SIGPIPE);
recalc_sigpending();
spin_unlock_irqrestore(&current->sig->siglock, flags);
spin_unlock_irqrestore(&current->sighand->siglock, flags);
}
return (bytes > 0);
......@@ -161,18 +161,18 @@ int autofs_wait(struct autofs_sb_info *sbi, struct qstr *name)
sigset_t oldset;
unsigned long irqflags;
spin_lock_irqsave(&current->sig->siglock, irqflags);
spin_lock_irqsave(&current->sighand->siglock, irqflags);
oldset = current->blocked;
siginitsetinv(&current->blocked, SHUTDOWN_SIGS & ~oldset.sig[0]);
recalc_sigpending();
spin_unlock_irqrestore(&current->sig->siglock, irqflags);
spin_unlock_irqrestore(&current->sighand->siglock, irqflags);
interruptible_sleep_on(&wq->queue);
spin_lock_irqsave(&current->sig->siglock, irqflags);
spin_lock_irqsave(&current->sighand->siglock, irqflags);
current->blocked = oldset;
recalc_sigpending();
spin_unlock_irqrestore(&current->sig->siglock, irqflags);
spin_unlock_irqrestore(&current->sighand->siglock, irqflags);
} else {
DPRINTK(("autofs_wait: skipped sleeping\n"));
}
......
......@@ -74,10 +74,10 @@ static int autofs4_write(struct file *file, const void *addr, int bytes)
/* Keep the currently executing process from receiving a
SIGPIPE unless it was already supposed to get one */
if (wr == -EPIPE && !sigpipe) {
spin_lock_irqsave(&current->sig->siglock, flags);
spin_lock_irqsave(&current->sighand->siglock, flags);
sigdelset(&current->pending.signal, SIGPIPE);
recalc_sigpending();
spin_unlock_irqrestore(&current->sig->siglock, flags);
spin_unlock_irqrestore(&current->sighand->siglock, flags);
}
return (bytes > 0);
......@@ -198,18 +198,18 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct qstr *name,
sigset_t oldset;
unsigned long irqflags;
spin_lock_irqsave(&current->sig->siglock, irqflags);
spin_lock_irqsave(&current->sighand->siglock, irqflags);
oldset = current->blocked;
siginitsetinv(&current->blocked, SHUTDOWN_SIGS & ~oldset.sig[0]);
recalc_sigpending();
spin_unlock_irqrestore(&current->sig->siglock, irqflags);
spin_unlock_irqrestore(&current->sighand->siglock, irqflags);
interruptible_sleep_on(&wq->queue);
spin_lock_irqsave(&current->sig->siglock, irqflags);
spin_lock_irqsave(&current->sighand->siglock, irqflags);
current->blocked = oldset;
recalc_sigpending();
spin_unlock_irqrestore(&current->sig->siglock, irqflags);
spin_unlock_irqrestore(&current->sighand->siglock, irqflags);
} else {
DPRINTK(("autofs_wait: skipped sleeping\n"));
}
......
......@@ -559,31 +559,61 @@ static inline void put_proc_dentry(struct dentry *dentry)
* disturbing other processes. (Other processes might share the signal
* table via the CLONE_SIGHAND option to clone().)
*/
static inline int de_thread(struct signal_struct *oldsig)
static inline int de_thread(struct task_struct *tsk)
{
struct signal_struct *newsig;
struct signal_struct *newsig, *oldsig = tsk->signal;
struct sighand_struct *newsighand, *oldsighand = tsk->sighand;
spinlock_t *lock = &oldsighand->siglock;
int count;
if (atomic_read(&current->sig->count) <= 1)
/*
* If we don't share sighandlers, then we aren't sharing anything
* and we can just re-use it all.
*/
if (atomic_read(&oldsighand->count) <= 1)
return 0;
newsig = kmem_cache_alloc(sigact_cachep, GFP_KERNEL);
if (!newsig)
newsighand = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
if (!newsighand)
return -ENOMEM;
spin_lock_init(&newsighand->siglock);
atomic_set(&newsighand->count, 1);
memcpy(newsighand->action, oldsighand->action, sizeof(newsighand->action));
/*
* See if we need to allocate a new signal structure
*/
newsig = NULL;
if (atomic_read(&oldsig->count) > 1) {
newsig = kmem_cache_alloc(signal_cachep, GFP_KERNEL);
if (!newsig) {
kmem_cache_free(sighand_cachep, newsighand);
return -ENOMEM;
}
atomic_set(&newsig->count, 1);
newsig->group_exit = 0;
newsig->group_exit_code = 0;
newsig->group_exit_task = NULL;
newsig->group_stop_count = 0;
init_sigpending(&newsig->shared_pending);
}
if (thread_group_empty(current))
goto out;
goto no_thread_group;
/*
* Kill all other threads in the thread group:
*/
spin_lock_irq(&oldsig->siglock);
spin_lock_irq(lock);
if (oldsig->group_exit) {
/*
* Another group action in progress, just
* return so that the signal is processed.
*/
spin_unlock_irq(&oldsig->siglock);
kmem_cache_free(sigact_cachep, newsig);
spin_unlock_irq(lock);
kmem_cache_free(sighand_cachep, newsighand);
if (newsig)
kmem_cache_free(signal_cachep, newsig);
return -EAGAIN;
}
oldsig->group_exit = 1;
......@@ -598,13 +628,13 @@ static inline int de_thread(struct signal_struct *oldsig)
while (atomic_read(&oldsig->count) > count) {
oldsig->group_exit_task = current;
current->state = TASK_UNINTERRUPTIBLE;
spin_unlock_irq(&oldsig->siglock);
spin_unlock_irq(lock);
schedule();
spin_lock_irq(&oldsig->siglock);
spin_lock_irq(lock);
if (oldsig->group_exit_task)
BUG();
}
spin_unlock_irq(&oldsig->siglock);
spin_unlock_irq(lock);
/*
* At this point all other threads have exited, all we have to
......@@ -675,32 +705,29 @@ static inline int de_thread(struct signal_struct *oldsig)
release_task(leader);
}
out:
spin_lock_init(&newsig->siglock);
atomic_set(&newsig->count, 1);
newsig->group_exit = 0;
newsig->group_exit_code = 0;
newsig->group_exit_task = NULL;
newsig->group_stop_count = 0;
memcpy(newsig->action, current->sig->action, sizeof(newsig->action));
init_sigpending(&newsig->shared_pending);
no_thread_group:
write_lock_irq(&tasklist_lock);
spin_lock(&oldsig->siglock);
spin_lock(&newsig->siglock);
spin_lock(&oldsighand->siglock);
spin_lock(&newsighand->siglock);
if (current == oldsig->curr_target)
oldsig->curr_target = next_thread(current);
current->sig = newsig;
if (newsig)
current->signal = newsig;
current->sighand = newsighand;
init_sigpending(&current->pending);
recalc_sigpending();
spin_unlock(&newsig->siglock);
spin_unlock(&oldsig->siglock);
spin_unlock(&newsighand->siglock);
spin_unlock(&oldsighand->siglock);
write_unlock_irq(&tasklist_lock);
if (atomic_dec_and_test(&oldsig->count))
kmem_cache_free(sigact_cachep, oldsig);
if (newsig && atomic_dec_and_test(&oldsig->count))
kmem_cache_free(signal_cachep, oldsig);
if (atomic_dec_and_test(&oldsighand->count))
kmem_cache_free(sighand_cachep, oldsighand);
if (!thread_group_empty(current))
BUG();
......@@ -746,21 +773,20 @@ int flush_old_exec(struct linux_binprm * bprm)
{
char * name;
int i, ch, retval;
struct signal_struct * oldsig = current->sig;
/*
* Release all of the old mmap stuff
*/
retval = exec_mmap(bprm->mm);
if (retval)
goto mmap_failed;
goto out;
/*
* Make sure we have a private signal table and that
* we are unassociated from the previous thread group.
*/
retval = de_thread(oldsig);
retval = de_thread(current);
if (retval)
goto flush_failed;
goto out;
/* This is the point of no return */
......@@ -794,14 +820,7 @@ int flush_old_exec(struct linux_binprm * bprm)
return 0;
mmap_failed:
flush_failed:
spin_lock_irq(&current->sig->siglock);
if (current->sig != oldsig) {
kmem_cache_free(sigact_cachep, current->sig);
current->sig = oldsig;
}
spin_unlock_irq(&current->sig->siglock);
out:
return retval;
}
......@@ -885,7 +904,7 @@ void compute_creds(struct linux_binprm *bprm)
if (must_not_trace_exec(current)
|| atomic_read(&current->fs->count) > 1
|| atomic_read(&current->files->count) > 1
|| atomic_read(&current->sig->count) > 1) {
|| atomic_read(&current->sighand->count) > 1) {
if(!capable(CAP_SETUID)) {
bprm->e_uid = current->uid;
bprm->e_gid = current->gid;
......@@ -1302,8 +1321,8 @@ int do_coredump(long signr, int exit_code, struct pt_regs * regs)
}
mm->dumpable = 0;
init_completion(&mm->core_done);
current->sig->group_exit = 1;
current->sig->group_exit_code = exit_code;
current->signal->group_exit = 1;
current->signal->group_exit_code = exit_code;
coredump_wait(mm);
if (current->rlim[RLIMIT_CORE].rlim_cur < binfmt->min_coredump)
......@@ -1330,7 +1349,7 @@ int do_coredump(long signr, int exit_code, struct pt_regs * regs)
retval = binfmt->core_dump(signr, regs, file);
current->sig->group_exit_code |= 0x80;
current->signal->group_exit_code |= 0x80;
close_fail:
filp_close(file, NULL);
fail_unlock:
......
......@@ -205,10 +205,10 @@ int kjournald(void *arg)
lock_kernel();
daemonize();
spin_lock_irq(&current->sig->siglock);
spin_lock_irq(&current->sighand->siglock);
sigfillset(&current->blocked);
recalc_sigpending();
spin_unlock_irq(&current->sig->siglock);
spin_unlock_irq(&current->sighand->siglock);
sprintf(current->comm, "kjournald");
......
......@@ -139,7 +139,7 @@ nlmclnt_proc(struct inode *inode, int cmd, struct file_lock *fl)
}
/* Keep the old signal mask */
spin_lock_irqsave(&current->sig->siglock, flags);
spin_lock_irqsave(&current->sighand->siglock, flags);
oldset = current->blocked;
/* If we're cleaning up locks because the process is exiting,
......@@ -149,7 +149,7 @@ nlmclnt_proc(struct inode *inode, int cmd, struct file_lock *fl)
&& (current->flags & PF_EXITING)) {
sigfillset(&current->blocked); /* Mask all signals */
recalc_sigpending();
spin_unlock_irqrestore(&current->sig->siglock, flags);
spin_unlock_irqrestore(&current->sighand->siglock, flags);
call = nlmclnt_alloc_call();
if (!call) {
......@@ -158,7 +158,7 @@ nlmclnt_proc(struct inode *inode, int cmd, struct file_lock *fl)
}
call->a_flags = RPC_TASK_ASYNC;
} else {
spin_unlock_irqrestore(&current->sig->siglock, flags);
spin_unlock_irqrestore(&current->sighand->siglock, flags);
memset(call, 0, sizeof(*call));
locks_init_lock(&call->a_args.lock.fl);
locks_init_lock(&call->a_res.lock.fl);
......@@ -183,10 +183,10 @@ nlmclnt_proc(struct inode *inode, int cmd, struct file_lock *fl)
kfree(call);
out_restore:
spin_lock_irqsave(&current->sig->siglock, flags);
spin_lock_irqsave(&current->sighand->siglock, flags);
current->blocked = oldset;
recalc_sigpending();
spin_unlock_irqrestore(&current->sig->siglock, flags);
spin_unlock_irqrestore(&current->sighand->siglock, flags);
done:
dprintk("lockd: clnt proc returns %d\n", status);
......@@ -588,11 +588,11 @@ nlmclnt_cancel(struct nlm_host *host, struct file_lock *fl)
int status;
/* Block all signals while setting up call */
spin_lock_irqsave(&current->sig->siglock, flags);
spin_lock_irqsave(&current->sighand->siglock, flags);
oldset = current->blocked;
sigfillset(&current->blocked);
recalc_sigpending();
spin_unlock_irqrestore(&current->sig->siglock, flags);
spin_unlock_irqrestore(&current->sighand->siglock, flags);
req = nlmclnt_alloc_call();
if (!req)
......@@ -607,10 +607,10 @@ nlmclnt_cancel(struct nlm_host *host, struct file_lock *fl)
if (status < 0)
kfree(req);
spin_lock_irqsave(&current->sig->siglock, flags);
spin_lock_irqsave(&current->sighand->siglock, flags);
current->blocked = oldset;
recalc_sigpending();
spin_unlock_irqrestore(&current->sig->siglock, flags);
spin_unlock_irqrestore(&current->sighand->siglock, flags);
return status;
}
......
......@@ -101,10 +101,10 @@ lockd(struct svc_rqst *rqstp)
sprintf(current->comm, "lockd");
/* Process request with signals blocked. */
spin_lock_irq(&current->sig->siglock);
spin_lock_irq(&current->sighand->siglock);
siginitsetinv(&current->blocked, sigmask(SIGKILL));
recalc_sigpending();
spin_unlock_irq(&current->sig->siglock);
spin_unlock_irq(&current->sighand->siglock);
/* kick rpciod */
rpciod_up();
......@@ -126,9 +126,9 @@ lockd(struct svc_rqst *rqstp)
{
long timeout = MAX_SCHEDULE_TIMEOUT;
if (signalled()) {
spin_lock_irq(&current->sig->siglock);
spin_lock_irq(&current->sighand->siglock);
flush_signals(current);
spin_unlock_irq(&current->sig->siglock);
spin_unlock_irq(&current->sighand->siglock);
if (nlmsvc_ops) {
nlmsvc_invalidate_all();
grace_period_expire = set_grace_period();
......@@ -297,9 +297,9 @@ lockd_down(void)
"lockd_down: lockd failed to exit, clearing pid\n");
nlmsvc_pid = 0;
}
spin_lock_irq(&current->sig->siglock);
spin_lock_irq(&current->sighand->siglock);
recalc_sigpending();
spin_unlock_irq(&current->sig->siglock);
spin_unlock_irq(&current->sighand->siglock);
out:
up(&nlmsvc_sema);
}
......
......@@ -189,10 +189,10 @@ nfsd(struct svc_rqst *rqstp)
*/
for (;;) {
/* Block all but the shutdown signals */
spin_lock_irq(&current->sig->siglock);
spin_lock_irq(&current->sighand->siglock);
siginitsetinv(&current->blocked, SHUTDOWN_SIGS);
recalc_sigpending();
spin_unlock_irq(&current->sig->siglock);
spin_unlock_irq(&current->sighand->siglock);
/*
* Find a socket with data available and call its
......@@ -210,10 +210,10 @@ nfsd(struct svc_rqst *rqstp)
exp_readlock();
/* Process request with signals blocked. */
spin_lock_irq(&current->sig->siglock);
spin_lock_irq(&current->sighand->siglock);
siginitsetinv(&current->blocked, ALLOWED_SIGS);
recalc_sigpending();
spin_unlock_irq(&current->sig->siglock);
spin_unlock_irq(&current->sighand->siglock);
svc_process(serv, rqstp);
......
......@@ -190,16 +190,16 @@ static void collect_sigign_sigcatch(struct task_struct *p, sigset_t *ign,
sigemptyset(catch);
read_lock(&tasklist_lock);
if (p->sig) {
spin_lock_irq(&p->sig->siglock);
k = p->sig->action;
if (p->sighand) {
spin_lock_irq(&p->sighand->siglock);
k = p->sighand->action;
for (i = 1; i <= _NSIG; ++i, ++k) {
if (k->sa.sa_handler == SIG_IGN)
sigaddset(ign, i);
else if (k->sa.sa_handler != SIG_DFL)
sigaddset(catch, i);
}
spin_unlock_irq(&p->sig->siglock);
spin_unlock_irq(&p->sighand->siglock);
}
read_unlock(&tasklist_lock);
}
......
......@@ -44,10 +44,14 @@
}
#define INIT_SIGNALS(sig) { \
.count = ATOMIC_INIT(1), \
.shared_pending = { NULL, &sig.shared_pending.head, {{0}}}, \
}
#define INIT_SIGHAND(sighand) { \
.count = ATOMIC_INIT(1), \
.action = { {{0,}}, }, \
.siglock = SPIN_LOCK_UNLOCKED, \
.shared_pending = { NULL, &sig.shared_pending.head, {{0}}}, \
}
/*
......@@ -90,7 +94,8 @@
.thread = INIT_THREAD, \
.fs = &init_fs, \
.files = &init_files, \
.sig = &init_signals, \
.signal = &init_signals, \
.sighand = &init_sighand, \
.pending = { NULL, &tsk.pending.head, {{0}}}, \
.blocked = {{0}}, \
.alloc_lock = SPIN_LOCK_UNLOCKED, \
......
......@@ -220,10 +220,21 @@ struct mm_struct {
extern int mmlist_nr;
struct signal_struct {
struct sighand_struct {
atomic_t count;
struct k_sigaction action[_NSIG];
spinlock_t siglock;
};
/*
* NOTE! "signal_struct" does not have it's own
* locking, because a shared signal_struct always
* implies a shared sighand_struct, so locking
* sighand_struct is always a proper superset of
* the locking of signal_struct.
*/
struct signal_struct {
atomic_t count;
/* current thread group signal load-balancing target: */
task_t *curr_target;
......@@ -378,7 +389,8 @@ struct task_struct {
/* namespace */
struct namespace *namespace;
/* signal handlers */
struct signal_struct *sig;
struct signal_struct *signal;
struct sighand_struct *sighand;
sigset_t blocked, real_blocked;
struct sigpending pending;
......@@ -589,6 +601,8 @@ extern void exit_thread(void);
extern void exit_mm(struct task_struct *);
extern void exit_files(struct task_struct *);
extern void exit_signal(struct task_struct *);
extern void __exit_signal(struct task_struct *);
extern void exit_sighand(struct task_struct *);
extern void __exit_sighand(struct task_struct *);
......
......@@ -71,7 +71,8 @@ extern kmem_cache_t *files_cachep;
extern kmem_cache_t *filp_cachep;
extern kmem_cache_t *dquot_cachep;
extern kmem_cache_t *fs_cachep;
extern kmem_cache_t *sigact_cachep;
extern kmem_cache_t *signal_cachep;
extern kmem_cache_t *sighand_cachep;
extern kmem_cache_t *bio_cachep;
#endif /* __KERNEL__ */
......
......@@ -76,6 +76,7 @@ void release_task(struct task_struct * p)
if (unlikely(p->ptrace))
__ptrace_unlink(p);
BUG_ON(!list_empty(&p->ptrace_list) || !list_empty(&p->ptrace_children));
__exit_signal(p);
__exit_sighand(p);
proc_dentry = __unhash_process(p);
......@@ -546,7 +547,7 @@ static void exit_notify(struct task_struct *tsk)
{
struct task_struct *t;
if (signal_pending(tsk) && !tsk->sig->group_exit
if (signal_pending(tsk) && !tsk->signal->group_exit
&& !thread_group_empty(tsk)) {
/*
* This occurs when there was a race between our exit
......@@ -558,14 +559,14 @@ static void exit_notify(struct task_struct *tsk)
* sure someone gets all the pending signals.
*/
read_lock(&tasklist_lock);
spin_lock_irq(&tsk->sig->siglock);
spin_lock_irq(&tsk->sighand->siglock);
for (t = next_thread(tsk); t != tsk; t = next_thread(t))
if (!signal_pending(t) && !(t->flags & PF_EXITING)) {
recalc_sigpending_tsk(t);
if (signal_pending(t))
signal_wake_up(t, 0);
}
spin_unlock_irq(&tsk->sig->siglock);
spin_unlock_irq(&tsk->sighand->siglock);
read_unlock(&tasklist_lock);
}
......@@ -708,9 +709,9 @@ task_t *next_thread(task_t *p)
struct list_head *tmp, *head = &link->pidptr->task_list;
#if CONFIG_SMP
if (!p->sig)
if (!p->sighand)
BUG();
if (!spin_is_locked(&p->sig->siglock) &&
if (!spin_is_locked(&p->sighand->siglock) &&
!rwlock_is_locked(&tasklist_lock))
BUG();
#endif
......@@ -730,21 +731,22 @@ do_group_exit(int exit_code)
{
BUG_ON(exit_code & 0x80); /* core dumps don't get here */
if (current->sig->group_exit)
exit_code = current->sig->group_exit_code;
if (current->signal->group_exit)
exit_code = current->signal->group_exit_code;
else if (!thread_group_empty(current)) {
struct signal_struct *const sig = current->sig;
struct signal_struct *const sig = current->signal;
struct sighand_struct *const sighand = current->sighand;
read_lock(&tasklist_lock);
spin_lock_irq(&sig->siglock);
spin_lock_irq(&sighand->siglock);
if (sig->group_exit)
/* Another thread got here before we took the lock. */
exit_code = sig->group_exit_code;
else {
sig->group_exit = 1;
sig->group_exit_code = exit_code;
sig->group_exit = 1;
sig->group_exit_code = exit_code;
zap_other_threads(current);
}
spin_unlock_irq(&sig->siglock);
spin_unlock_irq(&sighand->siglock);
read_unlock(&tasklist_lock);
}
......@@ -838,8 +840,8 @@ static int wait_task_zombie(task_t *p, unsigned int *stat_addr, struct rusage *r
retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
if (!retval && stat_addr) {
if (p->sig->group_exit)
retval = put_user(p->sig->group_exit_code, stat_addr);
if (p->signal->group_exit)
retval = put_user(p->signal->group_exit_code, stat_addr);
else
retval = put_user(p->exit_code, stat_addr);
}
......@@ -879,7 +881,7 @@ static int wait_task_stopped(task_t *p, int delayed_group_leader,
if (!p->exit_code)
return 0;
if (delayed_group_leader && !(p->ptrace & PT_PTRACED) &&
p->sig && p->sig->group_stop_count > 0)
p->signal && p->signal->group_stop_count > 0)
/*
* A group stop is in progress and this is the group leader.
* We won't report until all threads have stopped.
......@@ -1004,7 +1006,7 @@ asmlinkage long sys_wait4(pid_t pid,unsigned int * stat_addr, int options, struc
if (options & __WNOTHREAD)
break;
tsk = next_thread(tsk);
if (tsk->sig != current->sig)
if (tsk->signal != current->signal)
BUG();
} while (tsk != current);
read_unlock(&tasklist_lock);
......
......@@ -665,23 +665,39 @@ static int copy_files(unsigned long clone_flags, struct task_struct * tsk)
static inline int copy_sighand(unsigned long clone_flags, struct task_struct * tsk)
{
struct signal_struct *sig;
struct sighand_struct *sig;
if (clone_flags & CLONE_SIGHAND) {
atomic_inc(&current->sig->count);
if (clone_flags & (CLONE_SIGHAND | CLONE_THREAD)) {
atomic_inc(&current->sighand->count);
return 0;
}
sig = kmem_cache_alloc(sigact_cachep, GFP_KERNEL);
tsk->sig = sig;
sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
tsk->sighand = sig;
if (!sig)
return -1;
spin_lock_init(&sig->siglock);
atomic_set(&sig->count, 1);
memcpy(sig->action, current->sighand->action, sizeof(sig->action));
return 0;
}
static inline int copy_signal(unsigned long clone_flags, struct task_struct * tsk)
{
struct signal_struct *sig;
if (clone_flags & CLONE_THREAD) {
atomic_inc(&current->signal->count);
return 0;
}
sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL);
tsk->signal = sig;
if (!sig)
return -1;
atomic_set(&sig->count, 1);
sig->group_exit = 0;
sig->group_exit_code = 0;
sig->group_exit_task = NULL;
sig->group_stop_count = 0;
memcpy(sig->action, current->sig->action, sizeof(sig->action));
sig->curr_target = NULL;
init_sigpending(&sig->shared_pending);
......@@ -831,8 +847,10 @@ static struct task_struct *copy_process(unsigned long clone_flags,
goto bad_fork_cleanup_files;
if (copy_sighand(clone_flags, p))
goto bad_fork_cleanup_fs;
if (copy_mm(clone_flags, p))
if (copy_signal(clone_flags, p))
goto bad_fork_cleanup_sighand;
if (copy_mm(clone_flags, p))
goto bad_fork_cleanup_signal;
if (copy_namespace(clone_flags, p))
goto bad_fork_cleanup_mm;
retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs);
......@@ -923,31 +941,31 @@ static struct task_struct *copy_process(unsigned long clone_flags,
p->parent = p->real_parent;
if (clone_flags & CLONE_THREAD) {
spin_lock(&current->sig->siglock);
spin_lock(&current->sighand->siglock);
/*
* Important: if an exit-all has been started then
* do not create this new thread - the whole thread
* group is supposed to exit anyway.
*/
if (current->sig->group_exit) {
spin_unlock(&current->sig->siglock);
if (current->signal->group_exit) {
spin_unlock(&current->sighand->siglock);
write_unlock_irq(&tasklist_lock);
goto bad_fork_cleanup_namespace;
}
p->tgid = current->tgid;
p->group_leader = current->group_leader;
if (current->sig->group_stop_count > 0) {
if (current->signal->group_stop_count > 0) {
/*
* There is an all-stop in progress for the group.
* We ourselves will stop as soon as we check signals.
* Make the new thread part of that group stop too.
*/
current->sig->group_stop_count++;
current->signal->group_stop_count++;
set_tsk_thread_flag(p, TIF_SIGPENDING);
}
spin_unlock(&current->sig->siglock);
spin_unlock(&current->sighand->siglock);
}
SET_LINKS(p);
......@@ -977,6 +995,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
exit_namespace(p);
bad_fork_cleanup_mm:
exit_mm(p);
bad_fork_cleanup_signal:
exit_signal(p);
bad_fork_cleanup_sighand:
exit_sighand(p);
bad_fork_cleanup_fs:
......@@ -1077,8 +1097,11 @@ struct task_struct *do_fork(unsigned long clone_flags,
return p;
}
/* SLAB cache for signal_struct structures (tsk->sig) */
kmem_cache_t *sigact_cachep;
/* SLAB cache for signal_struct structures (tsk->signal) */
kmem_cache_t *signal_cachep;
/* SLAB cache for sighand_struct structures (tsk->sighand) */
kmem_cache_t *sighand_cachep;
/* SLAB cache for files_struct structures (tsk->files) */
kmem_cache_t *files_cachep;
......@@ -1094,11 +1117,17 @@ kmem_cache_t *mm_cachep;
void __init proc_caches_init(void)
{
sigact_cachep = kmem_cache_create("signal_act",
sighand_cachep = kmem_cache_create("sighand_cache",
sizeof(struct sighand_struct), 0,
SLAB_HWCACHE_ALIGN, NULL, NULL);
if (!sighand_cachep)
panic("Cannot create sighand SLAB cache");
signal_cachep = kmem_cache_create("signal_cache",
sizeof(struct signal_struct), 0,
SLAB_HWCACHE_ALIGN, NULL, NULL);
if (!sigact_cachep)
panic("Cannot create signal action SLAB cache");
if (!signal_cachep)
panic("Cannot create signal SLAB cache");
files_cachep = kmem_cache_create("files_cache",
sizeof(struct files_struct), 0,
......
......@@ -111,12 +111,12 @@ int exec_usermodehelper(char *program_path, char *argv[], char *envp[])
as the super user right after the execve fails if you time
the signal just right.
*/
spin_lock_irq(&curtask->sig->siglock);
spin_lock_irq(&curtask->sighand->siglock);
sigemptyset(&curtask->blocked);
flush_signals(curtask);
flush_signal_handlers(curtask);
recalc_sigpending();
spin_unlock_irq(&curtask->sig->siglock);
spin_unlock_irq(&curtask->sighand->siglock);
for (i = 0; i < curtask->files->max_fds; i++ ) {
if (curtask->files->fd[i]) close(i);
......@@ -239,20 +239,20 @@ int request_module(const char * module_name)
}
/* Block everything but SIGKILL/SIGSTOP */
spin_lock_irq(&current->sig->siglock);
spin_lock_irq(&current->sighand->siglock);
tmpsig = current->blocked;
siginitsetinv(&current->blocked, sigmask(SIGKILL) | sigmask(SIGSTOP));
recalc_sigpending();
spin_unlock_irq(&current->sig->siglock);
spin_unlock_irq(&current->sighand->siglock);
waitpid_result = waitpid(pid, NULL, __WCLONE);
atomic_dec(&kmod_concurrent);
/* Allow signals again.. */
spin_lock_irq(&current->sig->siglock);
spin_lock_irq(&current->sighand->siglock);
current->blocked = tmpsig;
recalc_sigpending();
spin_unlock_irq(&current->sig->siglock);
spin_unlock_irq(&current->sighand->siglock);
if (waitpid_result != pid) {
printk(KERN_ERR "request_module[%s]: waitpid(%d,...) failed, errno %d\n",
......
......@@ -138,16 +138,16 @@ int max_queued_signals = 1024;
(((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_STOP_MASK))
#define sig_user_defined(t, signr) \
(((t)->sig->action[(signr)-1].sa.sa_handler != SIG_DFL) && \
((t)->sig->action[(signr)-1].sa.sa_handler != SIG_IGN))
(((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) && \
((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_IGN))
#define sig_ignored(t, signr) \
(!((t)->ptrace & PT_PTRACED) && \
(t)->sig->action[(signr)-1].sa.sa_handler == SIG_IGN)
(t)->sighand->action[(signr)-1].sa.sa_handler == SIG_IGN)
#define sig_fatal(t, signr) \
(!T(signr, SIG_KERNEL_IGNORE_MASK|SIG_KERNEL_STOP_MASK) && \
(t)->sig->action[(signr)-1].sa.sa_handler == SIG_DFL)
(t)->sighand->action[(signr)-1].sa.sa_handler == SIG_DFL)
/*
* Re-calculate pending state from the set of locally pending
......@@ -183,9 +183,9 @@ static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
inline void recalc_sigpending_tsk(struct task_struct *t)
{
if (t->sig->group_stop_count > 0 ||
if (t->signal->group_stop_count > 0 ||
PENDING(&t->pending, &t->blocked) ||
PENDING(&t->sig->shared_pending, &t->blocked))
PENDING(&t->signal->shared_pending, &t->blocked))
set_tsk_thread_flag(t, TIF_SIGPENDING);
else
clear_tsk_thread_flag(t, TIF_SIGPENDING);
......@@ -265,20 +265,41 @@ flush_signals(struct task_struct *t)
*/
void __exit_sighand(struct task_struct *tsk)
{
struct signal_struct * sig = tsk->sig;
struct sighand_struct * sighand = tsk->sighand;
/* Ok, we're done with the signal handlers */
tsk->sighand = NULL;
if (atomic_dec_and_test(&sighand->count))
kmem_cache_free(sighand_cachep, sighand);
}
void exit_sighand(struct task_struct *tsk)
{
write_lock_irq(&tasklist_lock);
__exit_sighand(tsk);
write_unlock_irq(&tasklist_lock);
}
/*
* This function expects the tasklist_lock write-locked.
*/
void __exit_signal(struct task_struct *tsk)
{
struct signal_struct * sig = tsk->signal;
struct sighand_struct * sighand = tsk->sighand;
if (!sig)
BUG();
if (!atomic_read(&sig->count))
BUG();
spin_lock(&sig->siglock);
spin_lock(&sighand->siglock);
if (atomic_dec_and_test(&sig->count)) {
if (tsk == sig->curr_target)
sig->curr_target = next_thread(tsk);
tsk->sig = NULL;
spin_unlock(&sig->siglock);
tsk->signal = NULL;
spin_unlock(&sighand->siglock);
flush_sigqueue(&sig->shared_pending);
kmem_cache_free(sigact_cachep, sig);
kmem_cache_free(signal_cachep, sig);
} else {
/*
* If there is any task waiting for the group exit
......@@ -290,17 +311,17 @@ void __exit_sighand(struct task_struct *tsk)
}
if (tsk == sig->curr_target)
sig->curr_target = next_thread(tsk);
tsk->sig = NULL;
spin_unlock(&sig->siglock);
tsk->signal = NULL;
spin_unlock(&sighand->siglock);
}
clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
flush_sigqueue(&tsk->pending);
}
void exit_sighand(struct task_struct *tsk)
void exit_signal(struct task_struct *tsk)
{
write_lock_irq(&tasklist_lock);
__exit_sighand(tsk);
__exit_signal(tsk);
write_unlock_irq(&tasklist_lock);
}
......@@ -312,7 +333,7 @@ void
flush_signal_handlers(struct task_struct *t)
{
int i;
struct k_sigaction *ka = &t->sig->action[0];
struct k_sigaction *ka = &t->sighand->action[0];
for (i = _NSIG ; i != 0 ; i--) {
if (ka->sa.sa_handler != SIG_IGN)
ka->sa.sa_handler = SIG_DFL;
......@@ -336,11 +357,11 @@ block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
{
unsigned long flags;
spin_lock_irqsave(&current->sig->siglock, flags);
spin_lock_irqsave(&current->sighand->siglock, flags);
current->notifier_mask = mask;
current->notifier_data = priv;
current->notifier = notifier;
spin_unlock_irqrestore(&current->sig->siglock, flags);
spin_unlock_irqrestore(&current->sighand->siglock, flags);
}
/* Notify the system that blocking has ended. */
......@@ -350,11 +371,11 @@ unblock_all_signals(void)
{
unsigned long flags;
spin_lock_irqsave(&current->sig->siglock, flags);
spin_lock_irqsave(&current->sighand->siglock, flags);
current->notifier = NULL;
current->notifier_data = NULL;
recalc_sigpending();
spin_unlock_irqrestore(&current->sig->siglock, flags);
spin_unlock_irqrestore(&current->sighand->siglock, flags);
}
static inline int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
......@@ -443,7 +464,7 @@ int dequeue_signal(sigset_t *mask, siginfo_t *info)
{
int signr = __dequeue_signal(&current->pending, mask, info);
if (!signr)
signr = __dequeue_signal(&current->sig->shared_pending,
signr = __dequeue_signal(&current->signal->shared_pending,
mask, info);
return signr;
}
......@@ -559,7 +580,7 @@ static void handle_stop_signal(int sig, struct task_struct *p)
/*
* This is a stop signal. Remove SIGCONT from all queues.
*/
rm_from_queue(sigmask(SIGCONT), &p->sig->shared_pending);
rm_from_queue(sigmask(SIGCONT), &p->signal->shared_pending);
t = p;
do {
rm_from_queue(sigmask(SIGCONT), &t->pending);
......@@ -570,8 +591,8 @@ static void handle_stop_signal(int sig, struct task_struct *p)
/*
* Remove all stop signals from all queues,
* and wake all threads.
*/
if (unlikely(p->sig->group_stop_count > 0)) {
*/
if (unlikely(p->signal->group_stop_count > 0)) {
/*
* There was a group stop in progress. We'll
* pretend it finished before we got here. We are
......@@ -584,7 +605,7 @@ static void handle_stop_signal(int sig, struct task_struct *p)
* now, and it's as if the stop had finished and
* the SIGCHLD was pending on entry to this kill.
*/
p->sig->group_stop_count = 0;
p->signal->group_stop_count = 0;
if (p->ptrace & PT_PTRACED)
do_notify_parent_cldstop(p, p->parent);
else
......@@ -592,7 +613,7 @@ static void handle_stop_signal(int sig, struct task_struct *p)
p->group_leader,
p->group_leader->real_parent);
}
rm_from_queue(SIG_KERNEL_STOP_MASK, &p->sig->shared_pending);
rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
t = p;
do {
rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
......@@ -608,7 +629,7 @@ static void handle_stop_signal(int sig, struct task_struct *p)
* set, the thread will pause and acquire the
* siglock that we hold now and until we've
* queued the pending signal.
*/
*/
if (sig_user_defined(p, SIGCONT))
set_tsk_thread_flag(t, TIF_SIGPENDING);
wake_up_process(t);
......@@ -646,23 +667,23 @@ static int send_signal(int sig, struct siginfo *info, struct sigpending *signals
*signals->tail = q;
signals->tail = &q->next;
switch ((unsigned long) info) {
case 0:
q->info.si_signo = sig;
q->info.si_errno = 0;
q->info.si_code = SI_USER;
q->info.si_pid = current->pid;
q->info.si_uid = current->uid;
break;
case 1:
q->info.si_signo = sig;
q->info.si_errno = 0;
q->info.si_code = SI_KERNEL;
q->info.si_pid = 0;
q->info.si_uid = 0;
break;
default:
copy_siginfo(&q->info, info);
break;
case 0:
q->info.si_signo = sig;
q->info.si_errno = 0;
q->info.si_code = SI_USER;
q->info.si_pid = current->pid;
q->info.si_uid = current->uid;
break;
case 1:
q->info.si_signo = sig;
q->info.si_errno = 0;
q->info.si_code = SI_KERNEL;
q->info.si_pid = 0;
q->info.si_uid = 0;
break;
default:
copy_siginfo(&q->info, info);
break;
}
} else if (sig >= SIGRTMIN && info && (unsigned long)info != 1
&& info->si_code != SI_USER)
......@@ -689,7 +710,7 @@ specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
if (!irqs_disabled())
BUG();
#if CONFIG_SMP
if (!spin_is_locked(&t->sig->siglock))
if (!spin_is_locked(&t->sighand->siglock))
BUG();
#endif
......@@ -697,10 +718,10 @@ specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
if (sig_ignored(t, sig))
return 0;
/* Support queueing exactly one non-rt signal, so that we
can get more detailed information about the cause of
the signal. */
if (LEGACY_QUEUE(&t->pending, sig))
/* Support queueing exactly one non-rt signal, so that we
can get more detailed information about the cause of
the signal. */
if (LEGACY_QUEUE(&t->pending, sig))
return 0;
ret = send_signal(sig, info, &t->pending);
......@@ -721,13 +742,13 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
unsigned long int flags;
int ret;
spin_lock_irqsave(&t->sig->siglock, flags);
if (t->sig->action[sig-1].sa.sa_handler == SIG_IGN)
t->sig->action[sig-1].sa.sa_handler = SIG_DFL;
spin_lock_irqsave(&t->sighand->siglock, flags);
if (t->sighand->action[sig-1].sa.sa_handler == SIG_IGN)
t->sighand->action[sig-1].sa.sa_handler = SIG_DFL;
sigdelset(&t->blocked, sig);
recalc_sigpending_tsk(t);
ret = specific_send_sig_info(sig, info, t);
spin_unlock_irqrestore(&t->sig->siglock, flags);
spin_unlock_irqrestore(&t->sighand->siglock, flags);
return ret;
}
......@@ -737,13 +758,13 @@ force_sig_specific(int sig, struct task_struct *t)
{
unsigned long int flags;
spin_lock_irqsave(&t->sig->siglock, flags);
if (t->sig->action[sig-1].sa.sa_handler == SIG_IGN)
t->sig->action[sig-1].sa.sa_handler = SIG_DFL;
spin_lock_irqsave(&t->sighand->siglock, flags);
if (t->sighand->action[sig-1].sa.sa_handler == SIG_IGN)
t->sighand->action[sig-1].sa.sa_handler = SIG_DFL;
sigdelset(&t->blocked, sig);
recalc_sigpending_tsk(t);
specific_send_sig_info(sig, (void *)2, t);
spin_unlock_irqrestore(&t->sig->siglock, flags);
spin_unlock_irqrestore(&t->sighand->siglock, flags);
}
/*
......@@ -766,7 +787,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
int ret;
#if CONFIG_SMP
if (!spin_is_locked(&p->sig->siglock))
if (!spin_is_locked(&p->sighand->siglock))
BUG();
#endif
handle_stop_signal(sig, p);
......@@ -775,7 +796,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
if (sig_ignored(p, sig))
return 0;
if (LEGACY_QUEUE(&p->sig->shared_pending, sig))
if (LEGACY_QUEUE(&p->signal->shared_pending, sig))
/* This is a non-RT signal and we already have one queued. */
return 0;
......@@ -784,7 +805,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
* We always use the shared queue for process-wide signals,
* to avoid several races.
*/
ret = send_signal(sig, info, &p->sig->shared_pending);
ret = send_signal(sig, info, &p->signal->shared_pending);
if (unlikely(ret))
return ret;
......@@ -804,32 +825,32 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
return 0;
else {
/*
* Otherwise try to find a suitable thread.
*/
t = p->sig->curr_target;
* Otherwise try to find a suitable thread.
*/
t = p->signal->curr_target;
if (t == NULL)
/* restart balancing at this thread */
t = p->sig->curr_target = p;
/* restart balancing at this thread */
t = p->signal->curr_target = p;
BUG_ON(t->tgid != p->tgid);
while (!wants_signal(sig, t)) {
t = next_thread(t);
if (t == p->sig->curr_target)
/*
if (t == p->signal->curr_target)
/*
* No thread needs to be woken.
* Any eligible threads will see
* the signal in the queue soon.
*/
*/
return 0;
}
p->sig->curr_target = t;
p->signal->curr_target = t;
}
/*
* Found a killable thread. If the signal will be fatal,
* then start taking the whole group down immediately.
*/
if (sig_fatal(p, sig) && !p->sig->group_exit &&
if (sig_fatal(p, sig) && !p->signal->group_exit &&
!sigismember(&t->real_blocked, sig) &&
(sig == SIGKILL || !(t->ptrace & PT_PTRACED))) {
/*
......@@ -842,9 +863,9 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
* running and doing things after a slower
* thread has the fatal signal pending.
*/
p->sig->group_exit = 1;
p->sig->group_exit_code = sig;
p->sig->group_stop_count = 0;
p->signal->group_exit = 1;
p->signal->group_exit_code = sig;
p->signal->group_stop_count = 0;
t = p;
do {
sigaddset(&t->pending.signal, SIGKILL);
......@@ -865,16 +886,16 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
* the core-dump signal unblocked.
*/
rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
rm_from_queue(SIG_KERNEL_STOP_MASK, &p->sig->shared_pending);
p->sig->group_stop_count = 0;
p->sig->group_exit_task = t;
rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
p->signal->group_stop_count = 0;
p->signal->group_exit_task = t;
t = p;
do {
p->sig->group_stop_count++;
p->signal->group_stop_count++;
signal_wake_up(t, 0);
t = next_thread(t);
} while (t != p);
wake_up_process(p->sig->group_exit_task);
wake_up_process(p->signal->group_exit_task);
return 0;
}
......@@ -893,7 +914,7 @@ void zap_other_threads(struct task_struct *p)
{
struct task_struct *t;
p->sig->group_stop_count = 0;
p->signal->group_stop_count = 0;
if (thread_group_empty(p))
return;
......@@ -912,10 +933,10 @@ group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
int ret;
ret = check_kill_permission(sig, info, p);
if (!ret && sig && p->sig) {
spin_lock_irqsave(&p->sig->siglock, flags);
if (!ret && sig && p->sighand) {
spin_lock_irqsave(&p->sighand->siglock, flags);
ret = __group_send_sig_info(sig, info, p);
spin_unlock_irqrestore(&p->sig->siglock, flags);
spin_unlock_irqrestore(&p->sighand->siglock, flags);
}
return ret;
......@@ -1050,9 +1071,9 @@ send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
return group_send_sig_info(sig, info, p);
else {
int error;
spin_lock_irq(&p->sig->siglock);
spin_lock_irq(&p->sighand->siglock);
error = specific_send_sig_info(sig, info, p);
spin_unlock_irq(&p->sig->siglock);
spin_unlock_irq(&p->sighand->siglock);
return error;
}
}
......@@ -1107,7 +1128,7 @@ static inline void __wake_up_parent(struct task_struct *p,
do {
wake_up_interruptible(&tsk->wait_chldexit);
tsk = next_thread(tsk);
if (tsk->sig != parent->sig)
if (tsk->signal != parent->signal)
BUG();
} while (tsk != parent);
}
......@@ -1121,7 +1142,7 @@ void do_notify_parent(struct task_struct *tsk, int sig)
struct siginfo info;
unsigned long flags;
int why, status;
struct signal_struct *psig;
struct sighand_struct *psig;
if (sig == -1)
BUG();
......@@ -1160,7 +1181,7 @@ void do_notify_parent(struct task_struct *tsk, int sig)
info.si_code = why;
info.si_status = status;
psig = tsk->parent->sig;
psig = tsk->parent->sighand;
spin_lock_irqsave(&psig->siglock, flags);
if (sig == SIGCHLD && tsk->state != TASK_STOPPED &&
(psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
......@@ -1213,6 +1234,7 @@ do_notify_parent_cldstop(struct task_struct *tsk, struct task_struct *parent)
{
struct siginfo info;
unsigned long flags;
struct sighand_struct *sighand;
info.si_signo = SIGCHLD;
info.si_errno = 0;
......@@ -1226,15 +1248,16 @@ do_notify_parent_cldstop(struct task_struct *tsk, struct task_struct *parent)
info.si_status = tsk->exit_code & 0x7f;
info.si_code = CLD_STOPPED;
spin_lock_irqsave(&parent->sig->siglock, flags);
if (parent->sig->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
!(parent->sig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
sighand = parent->sighand;
spin_lock_irqsave(&sighand->siglock, flags);
if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
!(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
__group_send_sig_info(SIGCHLD, &info, parent);
/*
* Even if SIGCHLD is not generated, we must wake up wait4 calls.
*/
__wake_up_parent(tsk, parent);
spin_unlock_irqrestore(&parent->sig->siglock, flags);
spin_unlock_irqrestore(&sighand->siglock, flags);
}
static void
......@@ -1271,7 +1294,8 @@ finish_stop(int stop_count)
static void
do_signal_stop(int signr)
{
struct signal_struct *sig = current->sig;
struct signal_struct *sig = current->signal;
struct sighand_struct *sighand = current->sighand;
int stop_count = -1;
if (sig->group_stop_count > 0) {
......@@ -1279,17 +1303,17 @@ do_signal_stop(int signr)
* There is a group stop in progress. We don't need to
* start another one.
*/
spin_lock_irq(&sig->siglock);
spin_lock_irq(&sighand->siglock);
if (unlikely(sig->group_stop_count == 0)) {
BUG_ON(!sig->group_exit);
spin_unlock_irq(&sig->siglock);
spin_unlock_irq(&sighand->siglock);
return;
}
signr = sig->group_exit_code;
stop_count = --sig->group_stop_count;
current->exit_code = signr;
set_current_state(TASK_STOPPED);
spin_unlock_irq(&sig->siglock);
spin_unlock_irq(&sighand->siglock);
}
else if (thread_group_empty(current)) {
/*
......@@ -1305,7 +1329,7 @@ do_signal_stop(int signr)
*/
struct task_struct *t;
read_lock(&tasklist_lock);
spin_lock_irq(&sig->siglock);
spin_lock_irq(&sighand->siglock);
if (unlikely(sig->group_exit)) {
/*
......@@ -1313,7 +1337,7 @@ do_signal_stop(int signr)
* We'll just ignore the stop and process the
* associated fatal signal.
*/
spin_unlock_irq(&sig->siglock);
spin_unlock_irq(&sighand->siglock);
read_unlock(&tasklist_lock);
return;
}
......@@ -1343,7 +1367,7 @@ do_signal_stop(int signr)
current->exit_code = signr;
set_current_state(TASK_STOPPED);
spin_unlock_irq(&sig->siglock);
spin_unlock_irq(&sighand->siglock);
read_unlock(&tasklist_lock);
}
......@@ -1361,31 +1385,31 @@ int get_signal_to_deliver(siginfo_t *info, struct pt_regs *regs)
unsigned long signr = 0;
struct k_sigaction *ka;
spin_lock_irq(&current->sig->siglock);
if (unlikely(current->sig->group_stop_count > 0)) {
spin_lock_irq(&current->sighand->siglock);
if (unlikely(current->signal->group_stop_count > 0)) {
int stop_count;
if (current->sig->group_exit_task == current) {
if (current->signal->group_exit_task == current) {
/*
* Group stop is so we can do a core dump.
*/
current->sig->group_exit_task = NULL;
current->signal->group_exit_task = NULL;
goto dequeue;
}
/*
* There is a group stop in progress. We stop
* without any associated signal being in our queue.
*/
stop_count = --current->sig->group_stop_count;
signr = current->sig->group_exit_code;
stop_count = --current->signal->group_stop_count;
signr = current->signal->group_exit_code;
current->exit_code = signr;
set_current_state(TASK_STOPPED);
spin_unlock_irq(&current->sig->siglock);
spin_unlock_irq(&current->sighand->siglock);
finish_stop(stop_count);
continue;
}
dequeue:
signr = dequeue_signal(mask, info);
spin_unlock_irq(&current->sig->siglock);
spin_unlock_irq(&current->sighand->siglock);
if (!signr)
break;
......@@ -1395,10 +1419,10 @@ int get_signal_to_deliver(siginfo_t *info, struct pt_regs *regs)
* If there is a group stop in progress,
* we must participate in the bookkeeping.
*/
if (current->sig->group_stop_count > 0) {
spin_lock_irq(&current->sig->siglock);
--current->sig->group_stop_count;
spin_unlock_irq(&current->sig->siglock);
if (current->signal->group_stop_count > 0) {
spin_lock_irq(&current->sighand->siglock);
--current->signal->group_stop_count;
spin_unlock_irq(&current->sighand->siglock);
}
/* Let the debugger run. */
......@@ -1424,14 +1448,14 @@ int get_signal_to_deliver(siginfo_t *info, struct pt_regs *regs)
/* If the (new) signal is now blocked, requeue it. */
if (sigismember(&current->blocked, signr)) {
spin_lock_irq(&current->sig->siglock);
spin_lock_irq(&current->sighand->siglock);
specific_send_sig_info(signr, info, current);
spin_unlock_irq(&current->sig->siglock);
spin_unlock_irq(&current->sighand->siglock);
continue;
}
}
ka = &current->sig->action[signr-1];
ka = &current->sighand->action[signr-1];
if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
continue;
if (ka->sa.sa_handler != SIG_DFL) /* Run the handler. */
......@@ -1443,9 +1467,9 @@ int get_signal_to_deliver(siginfo_t *info, struct pt_regs *regs)
if (sig_kernel_ignore(signr)) /* Default is nothing. */
continue;
/* Init gets no signals it doesn't want. */
if (current->pid == 1)
continue;
/* Init gets no signals it doesn't want. */
if (current->pid == 1)
continue;
if (sig_kernel_stop(signr)) {
/*
......@@ -1457,8 +1481,8 @@ int get_signal_to_deliver(siginfo_t *info, struct pt_regs *regs)
if (signr == SIGSTOP ||
!is_orphaned_pgrp(current->pgrp))
do_signal_stop(signr);
continue;
}
continue;
}
/*
* Anything else is fatal, maybe with a core dump.
......@@ -1476,8 +1500,8 @@ int get_signal_to_deliver(siginfo_t *info, struct pt_regs *regs)
* and we just let them go to finish dying.
*/
const int code = signr | 0x80;
BUG_ON(!current->sig->group_exit);
BUG_ON(current->sig->group_exit_code != code);
BUG_ON(!current->signal->group_exit);
BUG_ON(current->signal->group_exit_code != code);
do_exit(code);
/* NOTREACHED */
}
......@@ -1549,7 +1573,7 @@ sys_rt_sigprocmask(int how, sigset_t *set, sigset_t *oset, size_t sigsetsize)
goto out;
sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
spin_lock_irq(&current->sig->siglock);
spin_lock_irq(&current->sighand->siglock);
old_set = current->blocked;
error = 0;
......@@ -1569,15 +1593,15 @@ sys_rt_sigprocmask(int how, sigset_t *set, sigset_t *oset, size_t sigsetsize)
current->blocked = new_set;
recalc_sigpending();
spin_unlock_irq(&current->sig->siglock);
spin_unlock_irq(&current->sighand->siglock);
if (error)
goto out;
if (oset)
goto set_old;
} else if (oset) {
spin_lock_irq(&current->sig->siglock);
spin_lock_irq(&current->sighand->siglock);
old_set = current->blocked;
spin_unlock_irq(&current->sig->siglock);
spin_unlock_irq(&current->sighand->siglock);
set_old:
error = -EFAULT;
......@@ -1597,10 +1621,10 @@ long do_sigpending(void *set, unsigned long sigsetsize)
if (sigsetsize > sizeof(sigset_t))
goto out;
spin_lock_irq(&current->sig->siglock);
spin_lock_irq(&current->sighand->siglock);
sigorsets(&pending, &current->pending.signal,
&current->sig->shared_pending.signal);
spin_unlock_irq(&current->sig->siglock);
&current->signal->shared_pending.signal);
spin_unlock_irq(&current->sighand->siglock);
/* Outside the lock because only this thread touches it. */
sigandsets(&pending, &current->blocked, &pending);
......@@ -1714,7 +1738,7 @@ sys_rt_sigtimedwait(const sigset_t *uthese, siginfo_t *uinfo,
return -EINVAL;
}
spin_lock_irq(&current->sig->siglock);
spin_lock_irq(&current->sighand->siglock);
sig = dequeue_signal(&these, &info);
if (!sig) {
timeout = MAX_SCHEDULE_TIMEOUT;
......@@ -1729,19 +1753,19 @@ sys_rt_sigtimedwait(const sigset_t *uthese, siginfo_t *uinfo,
current->real_blocked = current->blocked;
sigandsets(&current->blocked, &current->blocked, &these);
recalc_sigpending();
spin_unlock_irq(&current->sig->siglock);
spin_unlock_irq(&current->sighand->siglock);
current->state = TASK_INTERRUPTIBLE;
timeout = schedule_timeout(timeout);
spin_lock_irq(&current->sig->siglock);
spin_lock_irq(&current->sighand->siglock);
sig = dequeue_signal(&these, &info);
current->blocked = current->real_blocked;
siginitset(&current->real_blocked, 0);
recalc_sigpending();
}
}
spin_unlock_irq(&current->sig->siglock);
spin_unlock_irq(&current->sighand->siglock);
if (sig) {
ret = sig;
......@@ -1801,11 +1825,11 @@ sys_tkill(int pid, int sig)
* The null signal is a permissions and process existence
* probe. No signal is actually delivered.
*/
if (!error && sig && p->sig) {
spin_lock_irq(&p->sig->siglock);
if (!error && sig && p->sighand) {
spin_lock_irq(&p->sighand->siglock);
handle_stop_signal(sig, p);
error = specific_send_sig_info(sig, &info, p);
spin_unlock_irq(&p->sig->siglock);
spin_unlock_irq(&p->sighand->siglock);
}
}
read_unlock(&tasklist_lock);
......@@ -1838,15 +1862,15 @@ do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact)
if (sig < 1 || sig > _NSIG || (act && sig_kernel_only(sig)))
return -EINVAL;
k = &current->sig->action[sig-1];
k = &current->sighand->action[sig-1];
spin_lock_irq(&current->sig->siglock);
spin_lock_irq(&current->sighand->siglock);
if (signal_pending(current)) {
/*
* If there might be a fatal signal pending on multiple
* threads, make sure we take it before changing the action.
*/
spin_unlock_irq(&current->sig->siglock);
spin_unlock_irq(&current->sighand->siglock);
return -ERESTARTNOINTR;
}
......@@ -1875,20 +1899,20 @@ do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact)
* dance to maintain the lock hierarchy.
*/
struct task_struct *t = current;
spin_unlock_irq(&t->sig->siglock);
spin_unlock_irq(&t->sighand->siglock);
read_lock(&tasklist_lock);
spin_lock_irq(&t->sig->siglock);
spin_lock_irq(&t->sighand->siglock);
*k = *act;
sigdelsetmask(&k->sa.sa_mask,
sigmask(SIGKILL) | sigmask(SIGSTOP));
rm_from_queue(sigmask(sig), &t->sig->shared_pending);
rm_from_queue(sigmask(sig), &t->signal->shared_pending);
do {
rm_from_queue(sigmask(sig), &t->pending);
recalc_sigpending_tsk(t);
t = next_thread(t);
} while (t != current);
spin_unlock_irq(&current->sig->siglock);
read_unlock(&tasklist_lock);
spin_unlock_irq(&current->sighand->siglock);
read_unlock(&tasklist_lock);
return 0;
}
......@@ -1897,7 +1921,7 @@ do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact)
sigmask(SIGKILL) | sigmask(SIGSTOP));
}
spin_unlock_irq(&current->sig->siglock);
spin_unlock_irq(&current->sighand->siglock);
return 0;
}
......@@ -1926,7 +1950,7 @@ do_sigaltstack (const stack_t *uss, stack_t *uoss, unsigned long sp)
goto out;
error = -EPERM;
if (on_sig_stack (sp))
if (on_sig_stack(sp))
goto out;
error = -EINVAL;
......@@ -1984,9 +2008,9 @@ sys_sigprocmask(int how, old_sigset_t *set, old_sigset_t *oset)
error = -EFAULT;
if (copy_from_user(&new_set, set, sizeof(*set)))
goto out;
new_set &= ~(sigmask(SIGKILL)|sigmask(SIGSTOP));
new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
spin_lock_irq(&current->sig->siglock);
spin_lock_irq(&current->sighand->siglock);
old_set = current->blocked.sig[0];
error = 0;
......@@ -2006,7 +2030,7 @@ sys_sigprocmask(int how, old_sigset_t *set, old_sigset_t *oset)
}
recalc_sigpending();
spin_unlock_irq(&current->sig->siglock);
spin_unlock_irq(&current->sighand->siglock);
if (error)
goto out;
if (oset)
......@@ -2068,13 +2092,13 @@ sys_ssetmask(int newmask)
{
int old;
spin_lock_irq(&current->sig->siglock);
spin_lock_irq(&current->sighand->siglock);
old = current->blocked.sig[0];
siginitset(&current->blocked, newmask & ~(sigmask(SIGKILL)|
sigmask(SIGSTOP)));
recalc_sigpending();
spin_unlock_irq(&current->sig->siglock);
spin_unlock_irq(&current->sighand->siglock);
return old;
}
......
......@@ -180,10 +180,10 @@ static int worker_thread(void *__startup)
set_user_nice(current, -10);
set_cpus_allowed(current, 1UL << cpu);
spin_lock_irq(&current->sig->siglock);
spin_lock_irq(&current->sighand->siglock);
siginitsetinv(&current->blocked, sigmask(SIGCHLD));
recalc_sigpending();
spin_unlock_irq(&current->sig->siglock);
spin_unlock_irq(&current->sighand->siglock);
complete(&startup->done);
......@@ -213,10 +213,10 @@ static int worker_thread(void *__startup)
/* SIGCHLD - auto-reaping */ ;
/* zap all other signals */
spin_lock_irq(&current->sig->siglock);
spin_lock_irq(&current->sighand->siglock);
flush_signals(current);
recalc_sigpending();
spin_unlock_irq(&current->sig->siglock);
spin_unlock_irq(&current->sighand->siglock);
}
}
remove_wait_queue(&cwq->more_work, &wait);
......
......@@ -90,10 +90,10 @@ static int __pdflush(struct pdflush_work *my_work)
strcpy(current->comm, "pdflush");
/* interruptible sleep, so block all signals */
spin_lock_irq(&current->sig->siglock);
spin_lock_irq(&current->sighand->siglock);
siginitsetinv(&current->blocked, 0);
recalc_sigpending();
spin_unlock_irq(&current->sig->siglock);
spin_unlock_irq(&current->sighand->siglock);
current->flags |= PF_FLUSHER;
my_work->fn = NULL;
......
......@@ -233,27 +233,27 @@ void rpc_clnt_sigmask(struct rpc_clnt *clnt, sigset_t *oldset)
/* Turn off various signals */
if (clnt->cl_intr) {
struct k_sigaction *action = current->sig->action;
struct k_sigaction *action = current->sighand->action;
if (action[SIGINT-1].sa.sa_handler == SIG_DFL)
sigallow |= sigmask(SIGINT);
if (action[SIGQUIT-1].sa.sa_handler == SIG_DFL)
sigallow |= sigmask(SIGQUIT);
}
spin_lock_irqsave(&current->sig->siglock, irqflags);
spin_lock_irqsave(&current->sighand->siglock, irqflags);
*oldset = current->blocked;
siginitsetinv(&current->blocked, sigallow & ~oldset->sig[0]);
recalc_sigpending();
spin_unlock_irqrestore(&current->sig->siglock, irqflags);
spin_unlock_irqrestore(&current->sighand->siglock, irqflags);
}
void rpc_clnt_sigunmask(struct rpc_clnt *clnt, sigset_t *oldset)
{
unsigned long irqflags;
spin_lock_irqsave(&current->sig->siglock, irqflags);
spin_lock_irqsave(&current->sighand->siglock, irqflags);
current->blocked = *oldset;
recalc_sigpending();
spin_unlock_irqrestore(&current->sig->siglock, irqflags);
spin_unlock_irqrestore(&current->sighand->siglock, irqflags);
}
/*
......
......@@ -964,10 +964,10 @@ rpciod(void *ptr)
daemonize();
spin_lock_irq(&current->sig->siglock);
spin_lock_irq(&current->sighand->siglock);
siginitsetinv(&current->blocked, sigmask(SIGKILL));
recalc_sigpending();
spin_unlock_irq(&current->sig->siglock);
spin_unlock_irq(&current->sighand->siglock);
strcpy(current->comm, "rpciod");
......@@ -1022,9 +1022,9 @@ rpciod_killall(void)
}
}
spin_lock_irqsave(&current->sig->siglock, flags);
spin_lock_irqsave(&current->sighand->siglock, flags);
recalc_sigpending();
spin_unlock_irqrestore(&current->sig->siglock, flags);
spin_unlock_irqrestore(&current->sighand->siglock, flags);
}
/*
......@@ -1100,9 +1100,9 @@ rpciod_down(void)
}
interruptible_sleep_on(&rpciod_killer);
}
spin_lock_irqsave(&current->sig->siglock, flags);
spin_lock_irqsave(&current->sighand->siglock, flags);
recalc_sigpending();
spin_unlock_irqrestore(&current->sig->siglock, flags);
spin_unlock_irqrestore(&current->sighand->siglock, flags);
out:
up(&rpciod_sema);
MOD_DEC_USE_COUNT;
......
......@@ -235,9 +235,9 @@ svc_register(struct svc_serv *serv, int proto, unsigned short port)
}
if (!port) {
spin_lock_irqsave(&current->sig->siglock, flags);
spin_lock_irqsave(&current->sighand->siglock, flags);
recalc_sigpending();
spin_unlock_irqrestore(&current->sig->siglock, flags);
spin_unlock_irqrestore(&current->sighand->siglock, flags);
}
return error;
......
......@@ -133,7 +133,7 @@ void cap_bprm_compute_creds (struct linux_binprm *bprm)
if (must_not_trace_exec (current)
|| atomic_read (&current->fs->count) > 1
|| atomic_read (&current->files->count) > 1
|| atomic_read (&current->sig->count) > 1) {
|| atomic_read (&current->sighand->count) > 1) {
if (!capable (CAP_SETPCAP)) {
new_permitted = cap_intersect (new_permitted,
current->
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment