Commit 8eae2998 authored by Linus Torvalds's avatar Linus Torvalds

Split up "struct signal_struct" into "signal" and "sighand" parts.

This is required to get make the old LinuxThread semantics work
together with the fixed-for-POSIX full signal sharing. A traditional
CLONE_SIGHAND thread (LinuxThread) will not see any other shared
signal state, while a new-style CLONE_THREAD thread will share all
of it.

This way the two methods don't confuse each other.
parent fef31b03
......@@ -11,6 +11,7 @@
static struct fs_struct init_fs = INIT_FS;
static struct files_struct init_files = INIT_FILES;
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
struct mm_struct init_mm = INIT_MM(init_mm);
/*
......
......@@ -37,11 +37,11 @@ sys_sigsuspend(int history0, int history1, old_sigset_t mask)
sigset_t saveset;
mask &= _BLOCKABLE;
spin_lock_irq(&current->sig->siglock);
spin_lock_irq(&current->sighand->siglock);
saveset = current->blocked;
siginitset(&current->blocked, mask);
recalc_sigpending();
spin_unlock_irq(&current->sig->siglock);
spin_unlock_irq(&current->sighand->siglock);
regs->eax = -EINTR;
while (1) {
......@@ -66,11 +66,11 @@ sys_rt_sigsuspend(sigset_t *unewset, size_t sigsetsize)
return -EFAULT;
sigdelsetmask(&newset, ~_BLOCKABLE);
spin_lock_irq(&current->sig->siglock);
spin_lock_irq(&current->sighand->siglock);
saveset = current->blocked;
current->blocked = newset;
recalc_sigpending();
spin_unlock_irq(&current->sig->siglock);
spin_unlock_irq(&current->sighand->siglock);
regs->eax = -EINTR;
while (1) {
......@@ -224,10 +224,10 @@ asmlinkage int sys_sigreturn(unsigned long __unused)
goto badframe;
sigdelsetmask(&set, ~_BLOCKABLE);
spin_lock_irq(&current->sig->siglock);
spin_lock_irq(&current->sighand->siglock);
current->blocked = set;
recalc_sigpending();
spin_unlock_irq(&current->sig->siglock);
spin_unlock_irq(&current->sighand->siglock);
if (restore_sigcontext(regs, &frame->sc, &eax))
goto badframe;
......@@ -252,10 +252,10 @@ asmlinkage int sys_rt_sigreturn(unsigned long __unused)
goto badframe;
sigdelsetmask(&set, ~_BLOCKABLE);
spin_lock_irq(&current->sig->siglock);
spin_lock_irq(&current->sighand->siglock);
current->blocked = set;
recalc_sigpending();
spin_unlock_irq(&current->sig->siglock);
spin_unlock_irq(&current->sighand->siglock);
if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &eax))
goto badframe;
......@@ -513,7 +513,7 @@ static void
handle_signal(unsigned long sig, siginfo_t *info, sigset_t *oldset,
struct pt_regs * regs)
{
struct k_sigaction *ka = &current->sig->action[sig-1];
struct k_sigaction *ka = &current->sighand->action[sig-1];
/* Are we from a system call? */
if (regs->orig_eax >= 0) {
......@@ -547,11 +547,11 @@ handle_signal(unsigned long sig, siginfo_t *info, sigset_t *oldset,
ka->sa.sa_handler = SIG_DFL;
if (!(ka->sa.sa_flags & SA_NODEFER)) {
spin_lock_irq(&current->sig->siglock);
spin_lock_irq(&current->sighand->siglock);
sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
sigaddset(&current->blocked,sig);
recalc_sigpending();
spin_unlock_irq(&current->sig->siglock);
spin_unlock_irq(&current->sighand->siglock);
}
}
......
......@@ -512,10 +512,10 @@ int handle_vm86_trap(struct kernel_vm86_regs * regs, long error_code, int trapno
return 1; /* we let this handle by the calling routine */
if (current->ptrace & PT_PTRACED) {
unsigned long flags;
spin_lock_irqsave(&current->sig->siglock, flags);
spin_lock_irqsave(&current->sighand->siglock, flags);
sigdelset(&current->blocked, SIGTRAP);
recalc_sigpending();
spin_unlock_irqrestore(&current->sig->siglock, flags);
spin_unlock_irqrestore(&current->sighand->siglock, flags);
}
send_sig(SIGTRAP, current, 1);
current->thread.trap_no = trapno;
......
......@@ -584,10 +584,10 @@ static int loop_thread(void *data)
hence, it mustn't be stopped at all because it could
be indirectly used during suspension */
spin_lock_irq(&current->sig->siglock);
spin_lock_irq(&current->sighand->siglock);
sigfillset(&current->blocked);
flush_signals(current);
spin_unlock_irq(&current->sig->siglock);
spin_unlock_irq(&current->sighand->siglock);
set_user_nice(current, -20);
......
......@@ -787,7 +787,7 @@ static void n_tty_receive_buf(struct tty_struct *tty, const unsigned char *cp,
int is_ignored(int sig)
{
return (sigismember(&current->blocked, sig) ||
current->sig->action[sig-1].sa.sa_handler == SIG_IGN);
current->sighand->action[sig-1].sa.sa_handler == SIG_IGN);
}
static void n_tty_set_termios(struct tty_struct *tty, struct termios * old)
......
......@@ -1575,10 +1575,10 @@ void scsi_error_handler(void *data)
int rtn;
DECLARE_MUTEX_LOCKED(sem);
spin_lock_irq(&current->sig->siglock);
spin_lock_irq(&current->sighand->siglock);
sigfillset(&current->blocked);
recalc_sigpending();
spin_unlock_irq(&current->sig->siglock);
spin_unlock_irq(&current->sighand->siglock);
lock_kernel();
......
......@@ -301,12 +301,12 @@ static int usb_stor_control_thread(void * __us)
daemonize();
/* avoid getting signals */
spin_lock_irq(&current->sig->siglock);
spin_lock_irq(&current->sighand->siglock);
flush_signals(current);
current->flags |= PF_IOTHREAD;
sigfillset(&current->blocked);
recalc_sigpending();
spin_unlock_irq(&current->sig->siglock);
spin_unlock_irq(&current->sighand->siglock);
/* set our name for identification purposes */
sprintf(current->comm, "usb-storage");
......
......@@ -70,10 +70,10 @@ static int autofs_write(struct file *file, const void *addr, int bytes)
/* Keep the currently executing process from receiving a
SIGPIPE unless it was already supposed to get one */
if (wr == -EPIPE && !sigpipe) {
spin_lock_irqsave(&current->sig->siglock, flags);
spin_lock_irqsave(&current->sighand->siglock, flags);
sigdelset(&current->pending.signal, SIGPIPE);
recalc_sigpending();
spin_unlock_irqrestore(&current->sig->siglock, flags);
spin_unlock_irqrestore(&current->sighand->siglock, flags);
}
return (bytes > 0);
......@@ -161,18 +161,18 @@ int autofs_wait(struct autofs_sb_info *sbi, struct qstr *name)
sigset_t oldset;
unsigned long irqflags;
spin_lock_irqsave(&current->sig->siglock, irqflags);
spin_lock_irqsave(&current->sighand->siglock, irqflags);
oldset = current->blocked;
siginitsetinv(&current->blocked, SHUTDOWN_SIGS & ~oldset.sig[0]);
recalc_sigpending();
spin_unlock_irqrestore(&current->sig->siglock, irqflags);
spin_unlock_irqrestore(&current->sighand->siglock, irqflags);
interruptible_sleep_on(&wq->queue);
spin_lock_irqsave(&current->sig->siglock, irqflags);
spin_lock_irqsave(&current->sighand->siglock, irqflags);
current->blocked = oldset;
recalc_sigpending();
spin_unlock_irqrestore(&current->sig->siglock, irqflags);
spin_unlock_irqrestore(&current->sighand->siglock, irqflags);
} else {
DPRINTK(("autofs_wait: skipped sleeping\n"));
}
......
......@@ -74,10 +74,10 @@ static int autofs4_write(struct file *file, const void *addr, int bytes)
/* Keep the currently executing process from receiving a
SIGPIPE unless it was already supposed to get one */
if (wr == -EPIPE && !sigpipe) {
spin_lock_irqsave(&current->sig->siglock, flags);
spin_lock_irqsave(&current->sighand->siglock, flags);
sigdelset(&current->pending.signal, SIGPIPE);
recalc_sigpending();
spin_unlock_irqrestore(&current->sig->siglock, flags);
spin_unlock_irqrestore(&current->sighand->siglock, flags);
}
return (bytes > 0);
......@@ -198,18 +198,18 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct qstr *name,
sigset_t oldset;
unsigned long irqflags;
spin_lock_irqsave(&current->sig->siglock, irqflags);
spin_lock_irqsave(&current->sighand->siglock, irqflags);
oldset = current->blocked;
siginitsetinv(&current->blocked, SHUTDOWN_SIGS & ~oldset.sig[0]);
recalc_sigpending();
spin_unlock_irqrestore(&current->sig->siglock, irqflags);
spin_unlock_irqrestore(&current->sighand->siglock, irqflags);
interruptible_sleep_on(&wq->queue);
spin_lock_irqsave(&current->sig->siglock, irqflags);
spin_lock_irqsave(&current->sighand->siglock, irqflags);
current->blocked = oldset;
recalc_sigpending();
spin_unlock_irqrestore(&current->sig->siglock, irqflags);
spin_unlock_irqrestore(&current->sighand->siglock, irqflags);
} else {
DPRINTK(("autofs_wait: skipped sleeping\n"));
}
......
......@@ -559,31 +559,61 @@ static inline void put_proc_dentry(struct dentry *dentry)
* disturbing other processes. (Other processes might share the signal
* table via the CLONE_SIGHAND option to clone().)
*/
static inline int de_thread(struct signal_struct *oldsig)
static inline int de_thread(struct task_struct *tsk)
{
struct signal_struct *newsig;
struct signal_struct *newsig, *oldsig = tsk->signal;
struct sighand_struct *newsighand, *oldsighand = tsk->sighand;
spinlock_t *lock = &oldsighand->siglock;
int count;
if (atomic_read(&current->sig->count) <= 1)
/*
* If we don't share sighandlers, then we aren't sharing anything
* and we can just re-use it all.
*/
if (atomic_read(&oldsighand->count) <= 1)
return 0;
newsig = kmem_cache_alloc(sigact_cachep, GFP_KERNEL);
if (!newsig)
newsighand = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
if (!newsighand)
return -ENOMEM;
spin_lock_init(&newsighand->siglock);
atomic_set(&newsighand->count, 1);
memcpy(newsighand->action, oldsighand->action, sizeof(newsighand->action));
/*
* See if we need to allocate a new signal structure
*/
newsig = NULL;
if (atomic_read(&oldsig->count) > 1) {
newsig = kmem_cache_alloc(signal_cachep, GFP_KERNEL);
if (!newsig) {
kmem_cache_free(sighand_cachep, newsighand);
return -ENOMEM;
}
atomic_set(&newsig->count, 1);
newsig->group_exit = 0;
newsig->group_exit_code = 0;
newsig->group_exit_task = NULL;
newsig->group_stop_count = 0;
init_sigpending(&newsig->shared_pending);
}
if (thread_group_empty(current))
goto out;
goto no_thread_group;
/*
* Kill all other threads in the thread group:
*/
spin_lock_irq(&oldsig->siglock);
spin_lock_irq(lock);
if (oldsig->group_exit) {
/*
* Another group action in progress, just
* return so that the signal is processed.
*/
spin_unlock_irq(&oldsig->siglock);
kmem_cache_free(sigact_cachep, newsig);
spin_unlock_irq(lock);
kmem_cache_free(sighand_cachep, newsighand);
if (newsig)
kmem_cache_free(signal_cachep, newsig);
return -EAGAIN;
}
oldsig->group_exit = 1;
......@@ -598,13 +628,13 @@ static inline int de_thread(struct signal_struct *oldsig)
while (atomic_read(&oldsig->count) > count) {
oldsig->group_exit_task = current;
current->state = TASK_UNINTERRUPTIBLE;
spin_unlock_irq(&oldsig->siglock);
spin_unlock_irq(lock);
schedule();
spin_lock_irq(&oldsig->siglock);
spin_lock_irq(lock);
if (oldsig->group_exit_task)
BUG();
}
spin_unlock_irq(&oldsig->siglock);
spin_unlock_irq(lock);
/*
* At this point all other threads have exited, all we have to
......@@ -675,32 +705,29 @@ static inline int de_thread(struct signal_struct *oldsig)
release_task(leader);
}
out:
spin_lock_init(&newsig->siglock);
atomic_set(&newsig->count, 1);
newsig->group_exit = 0;
newsig->group_exit_code = 0;
newsig->group_exit_task = NULL;
newsig->group_stop_count = 0;
memcpy(newsig->action, current->sig->action, sizeof(newsig->action));
init_sigpending(&newsig->shared_pending);
no_thread_group:
write_lock_irq(&tasklist_lock);
spin_lock(&oldsig->siglock);
spin_lock(&newsig->siglock);
spin_lock(&oldsighand->siglock);
spin_lock(&newsighand->siglock);
if (current == oldsig->curr_target)
oldsig->curr_target = next_thread(current);
current->sig = newsig;
if (newsig)
current->signal = newsig;
current->sighand = newsighand;
init_sigpending(&current->pending);
recalc_sigpending();
spin_unlock(&newsig->siglock);
spin_unlock(&oldsig->siglock);
spin_unlock(&newsighand->siglock);
spin_unlock(&oldsighand->siglock);
write_unlock_irq(&tasklist_lock);
if (atomic_dec_and_test(&oldsig->count))
kmem_cache_free(sigact_cachep, oldsig);
if (newsig && atomic_dec_and_test(&oldsig->count))
kmem_cache_free(signal_cachep, oldsig);
if (atomic_dec_and_test(&oldsighand->count))
kmem_cache_free(sighand_cachep, oldsighand);
if (!thread_group_empty(current))
BUG();
......@@ -746,21 +773,20 @@ int flush_old_exec(struct linux_binprm * bprm)
{
char * name;
int i, ch, retval;
struct signal_struct * oldsig = current->sig;
/*
* Release all of the old mmap stuff
*/
retval = exec_mmap(bprm->mm);
if (retval)
goto mmap_failed;
goto out;
/*
* Make sure we have a private signal table and that
* we are unassociated from the previous thread group.
*/
retval = de_thread(oldsig);
retval = de_thread(current);
if (retval)
goto flush_failed;
goto out;
/* This is the point of no return */
......@@ -794,14 +820,7 @@ int flush_old_exec(struct linux_binprm * bprm)
return 0;
mmap_failed:
flush_failed:
spin_lock_irq(&current->sig->siglock);
if (current->sig != oldsig) {
kmem_cache_free(sigact_cachep, current->sig);
current->sig = oldsig;
}
spin_unlock_irq(&current->sig->siglock);
out:
return retval;
}
......@@ -885,7 +904,7 @@ void compute_creds(struct linux_binprm *bprm)
if (must_not_trace_exec(current)
|| atomic_read(&current->fs->count) > 1
|| atomic_read(&current->files->count) > 1
|| atomic_read(&current->sig->count) > 1) {
|| atomic_read(&current->sighand->count) > 1) {
if(!capable(CAP_SETUID)) {
bprm->e_uid = current->uid;
bprm->e_gid = current->gid;
......@@ -1302,8 +1321,8 @@ int do_coredump(long signr, int exit_code, struct pt_regs * regs)
}
mm->dumpable = 0;
init_completion(&mm->core_done);
current->sig->group_exit = 1;
current->sig->group_exit_code = exit_code;
current->signal->group_exit = 1;
current->signal->group_exit_code = exit_code;
coredump_wait(mm);
if (current->rlim[RLIMIT_CORE].rlim_cur < binfmt->min_coredump)
......@@ -1330,7 +1349,7 @@ int do_coredump(long signr, int exit_code, struct pt_regs * regs)
retval = binfmt->core_dump(signr, regs, file);
current->sig->group_exit_code |= 0x80;
current->signal->group_exit_code |= 0x80;
close_fail:
filp_close(file, NULL);
fail_unlock:
......
......@@ -205,10 +205,10 @@ int kjournald(void *arg)
lock_kernel();
daemonize();
spin_lock_irq(&current->sig->siglock);
spin_lock_irq(&current->sighand->siglock);
sigfillset(&current->blocked);
recalc_sigpending();
spin_unlock_irq(&current->sig->siglock);
spin_unlock_irq(&current->sighand->siglock);
sprintf(current->comm, "kjournald");
......
......@@ -139,7 +139,7 @@ nlmclnt_proc(struct inode *inode, int cmd, struct file_lock *fl)
}
/* Keep the old signal mask */
spin_lock_irqsave(&current->sig->siglock, flags);
spin_lock_irqsave(&current->sighand->siglock, flags);
oldset = current->blocked;
/* If we're cleaning up locks because the process is exiting,
......@@ -149,7 +149,7 @@ nlmclnt_proc(struct inode *inode, int cmd, struct file_lock *fl)
&& (current->flags & PF_EXITING)) {
sigfillset(&current->blocked); /* Mask all signals */
recalc_sigpending();
spin_unlock_irqrestore(&current->sig->siglock, flags);
spin_unlock_irqrestore(&current->sighand->siglock, flags);
call = nlmclnt_alloc_call();
if (!call) {
......@@ -158,7 +158,7 @@ nlmclnt_proc(struct inode *inode, int cmd, struct file_lock *fl)
}
call->a_flags = RPC_TASK_ASYNC;
} else {
spin_unlock_irqrestore(&current->sig->siglock, flags);
spin_unlock_irqrestore(&current->sighand->siglock, flags);
memset(call, 0, sizeof(*call));
locks_init_lock(&call->a_args.lock.fl);
locks_init_lock(&call->a_res.lock.fl);
......@@ -183,10 +183,10 @@ nlmclnt_proc(struct inode *inode, int cmd, struct file_lock *fl)
kfree(call);
out_restore:
spin_lock_irqsave(&current->sig->siglock, flags);
spin_lock_irqsave(&current->sighand->siglock, flags);
current->blocked = oldset;
recalc_sigpending();
spin_unlock_irqrestore(&current->sig->siglock, flags);
spin_unlock_irqrestore(&current->sighand->siglock, flags);
done:
dprintk("lockd: clnt proc returns %d\n", status);
......@@ -588,11 +588,11 @@ nlmclnt_cancel(struct nlm_host *host, struct file_lock *fl)
int status;
/* Block all signals while setting up call */
spin_lock_irqsave(&current->sig->siglock, flags);
spin_lock_irqsave(&current->sighand->siglock, flags);
oldset = current->blocked;
sigfillset(&current->blocked);
recalc_sigpending();
spin_unlock_irqrestore(&current->sig->siglock, flags);
spin_unlock_irqrestore(&current->sighand->siglock, flags);
req = nlmclnt_alloc_call();
if (!req)
......@@ -607,10 +607,10 @@ nlmclnt_cancel(struct nlm_host *host, struct file_lock *fl)
if (status < 0)
kfree(req);
spin_lock_irqsave(&current->sig->siglock, flags);
spin_lock_irqsave(&current->sighand->siglock, flags);
current->blocked = oldset;
recalc_sigpending();
spin_unlock_irqrestore(&current->sig->siglock, flags);
spin_unlock_irqrestore(&current->sighand->siglock, flags);
return status;
}
......
......@@ -101,10 +101,10 @@ lockd(struct svc_rqst *rqstp)
sprintf(current->comm, "lockd");
/* Process request with signals blocked. */
spin_lock_irq(&current->sig->siglock);
spin_lock_irq(&current->sighand->siglock);
siginitsetinv(&current->blocked, sigmask(SIGKILL));
recalc_sigpending();
spin_unlock_irq(&current->sig->siglock);
spin_unlock_irq(&current->sighand->siglock);
/* kick rpciod */
rpciod_up();
......@@ -126,9 +126,9 @@ lockd(struct svc_rqst *rqstp)
{
long timeout = MAX_SCHEDULE_TIMEOUT;
if (signalled()) {
spin_lock_irq(&current->sig->siglock);
spin_lock_irq(&current->sighand->siglock);
flush_signals(current);
spin_unlock_irq(&current->sig->siglock);
spin_unlock_irq(&current->sighand->siglock);
if (nlmsvc_ops) {
nlmsvc_invalidate_all();
grace_period_expire = set_grace_period();
......@@ -297,9 +297,9 @@ lockd_down(void)
"lockd_down: lockd failed to exit, clearing pid\n");
nlmsvc_pid = 0;
}
spin_lock_irq(&current->sig->siglock);
spin_lock_irq(&current->sighand->siglock);
recalc_sigpending();
spin_unlock_irq(&current->sig->siglock);
spin_unlock_irq(&current->sighand->siglock);
out:
up(&nlmsvc_sema);
}
......
......@@ -189,10 +189,10 @@ nfsd(struct svc_rqst *rqstp)
*/
for (;;) {
/* Block all but the shutdown signals */
spin_lock_irq(&current->sig->siglock);
spin_lock_irq(&current->sighand->siglock);
siginitsetinv(&current->blocked, SHUTDOWN_SIGS);
recalc_sigpending();
spin_unlock_irq(&current->sig->siglock);
spin_unlock_irq(&current->sighand->siglock);
/*
* Find a socket with data available and call its
......@@ -210,10 +210,10 @@ nfsd(struct svc_rqst *rqstp)
exp_readlock();
/* Process request with signals blocked. */
spin_lock_irq(&current->sig->siglock);
spin_lock_irq(&current->sighand->siglock);
siginitsetinv(&current->blocked, ALLOWED_SIGS);
recalc_sigpending();
spin_unlock_irq(&current->sig->siglock);
spin_unlock_irq(&current->sighand->siglock);
svc_process(serv, rqstp);
......
......@@ -190,16 +190,16 @@ static void collect_sigign_sigcatch(struct task_struct *p, sigset_t *ign,
sigemptyset(catch);
read_lock(&tasklist_lock);
if (p->sig) {
spin_lock_irq(&p->sig->siglock);
k = p->sig->action;
if (p->sighand) {
spin_lock_irq(&p->sighand->siglock);
k = p->sighand->action;
for (i = 1; i <= _NSIG; ++i, ++k) {
if (k->sa.sa_handler == SIG_IGN)
sigaddset(ign, i);
else if (k->sa.sa_handler != SIG_DFL)
sigaddset(catch, i);
}
spin_unlock_irq(&p->sig->siglock);
spin_unlock_irq(&p->sighand->siglock);
}
read_unlock(&tasklist_lock);
}
......
......@@ -44,10 +44,14 @@
}
#define INIT_SIGNALS(sig) { \
.count = ATOMIC_INIT(1), \
.shared_pending = { NULL, &sig.shared_pending.head, {{0}}}, \
}
#define INIT_SIGHAND(sighand) { \
.count = ATOMIC_INIT(1), \
.action = { {{0,}}, }, \
.siglock = SPIN_LOCK_UNLOCKED, \
.shared_pending = { NULL, &sig.shared_pending.head, {{0}}}, \
}
/*
......@@ -90,7 +94,8 @@
.thread = INIT_THREAD, \
.fs = &init_fs, \
.files = &init_files, \
.sig = &init_signals, \
.signal = &init_signals, \
.sighand = &init_sighand, \
.pending = { NULL, &tsk.pending.head, {{0}}}, \
.blocked = {{0}}, \
.alloc_lock = SPIN_LOCK_UNLOCKED, \
......
......@@ -220,10 +220,21 @@ struct mm_struct {
extern int mmlist_nr;
struct signal_struct {
struct sighand_struct {
atomic_t count;
struct k_sigaction action[_NSIG];
spinlock_t siglock;
};
/*
* NOTE! "signal_struct" does not have it's own
* locking, because a shared signal_struct always
* implies a shared sighand_struct, so locking
* sighand_struct is always a proper superset of
* the locking of signal_struct.
*/
struct signal_struct {
atomic_t count;
/* current thread group signal load-balancing target: */
task_t *curr_target;
......@@ -378,7 +389,8 @@ struct task_struct {
/* namespace */
struct namespace *namespace;
/* signal handlers */
struct signal_struct *sig;
struct signal_struct *signal;
struct sighand_struct *sighand;
sigset_t blocked, real_blocked;
struct sigpending pending;
......@@ -589,6 +601,8 @@ extern void exit_thread(void);
extern void exit_mm(struct task_struct *);
extern void exit_files(struct task_struct *);
extern void exit_signal(struct task_struct *);
extern void __exit_signal(struct task_struct *);
extern void exit_sighand(struct task_struct *);
extern void __exit_sighand(struct task_struct *);
......
......@@ -71,7 +71,8 @@ extern kmem_cache_t *files_cachep;
extern kmem_cache_t *filp_cachep;
extern kmem_cache_t *dquot_cachep;
extern kmem_cache_t *fs_cachep;
extern kmem_cache_t *sigact_cachep;
extern kmem_cache_t *signal_cachep;
extern kmem_cache_t *sighand_cachep;
extern kmem_cache_t *bio_cachep;
#endif /* __KERNEL__ */
......
......@@ -76,6 +76,7 @@ void release_task(struct task_struct * p)
if (unlikely(p->ptrace))
__ptrace_unlink(p);
BUG_ON(!list_empty(&p->ptrace_list) || !list_empty(&p->ptrace_children));
__exit_signal(p);
__exit_sighand(p);
proc_dentry = __unhash_process(p);
......@@ -546,7 +547,7 @@ static void exit_notify(struct task_struct *tsk)
{
struct task_struct *t;
if (signal_pending(tsk) && !tsk->sig->group_exit
if (signal_pending(tsk) && !tsk->signal->group_exit
&& !thread_group_empty(tsk)) {
/*
* This occurs when there was a race between our exit
......@@ -558,14 +559,14 @@ static void exit_notify(struct task_struct *tsk)
* sure someone gets all the pending signals.
*/
read_lock(&tasklist_lock);
spin_lock_irq(&tsk->sig->siglock);
spin_lock_irq(&tsk->sighand->siglock);
for (t = next_thread(tsk); t != tsk; t = next_thread(t))
if (!signal_pending(t) && !(t->flags & PF_EXITING)) {
recalc_sigpending_tsk(t);
if (signal_pending(t))
signal_wake_up(t, 0);
}
spin_unlock_irq(&tsk->sig->siglock);
spin_unlock_irq(&tsk->sighand->siglock);
read_unlock(&tasklist_lock);
}
......@@ -708,9 +709,9 @@ task_t *next_thread(task_t *p)
struct list_head *tmp, *head = &link->pidptr->task_list;
#if CONFIG_SMP
if (!p->sig)
if (!p->sighand)
BUG();
if (!spin_is_locked(&p->sig->siglock) &&
if (!spin_is_locked(&p->sighand->siglock) &&
!rwlock_is_locked(&tasklist_lock))
BUG();
#endif
......@@ -730,12 +731,13 @@ do_group_exit(int exit_code)
{
BUG_ON(exit_code & 0x80); /* core dumps don't get here */
if (current->sig->group_exit)
exit_code = current->sig->group_exit_code;
if (current->signal->group_exit)
exit_code = current->signal->group_exit_code;
else if (!thread_group_empty(current)) {
struct signal_struct *const sig = current->sig;
struct signal_struct *const sig = current->signal;
struct sighand_struct *const sighand = current->sighand;
read_lock(&tasklist_lock);
spin_lock_irq(&sig->siglock);
spin_lock_irq(&sighand->siglock);
if (sig->group_exit)
/* Another thread got here before we took the lock. */
exit_code = sig->group_exit_code;
......@@ -744,7 +746,7 @@ do_group_exit(int exit_code)
sig->group_exit_code = exit_code;
zap_other_threads(current);
}
spin_unlock_irq(&sig->siglock);
spin_unlock_irq(&sighand->siglock);
read_unlock(&tasklist_lock);
}
......@@ -838,8 +840,8 @@ static int wait_task_zombie(task_t *p, unsigned int *stat_addr, struct rusage *r
retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
if (!retval && stat_addr) {
if (p->sig->group_exit)
retval = put_user(p->sig->group_exit_code, stat_addr);
if (p->signal->group_exit)
retval = put_user(p->signal->group_exit_code, stat_addr);
else
retval = put_user(p->exit_code, stat_addr);
}
......@@ -879,7 +881,7 @@ static int wait_task_stopped(task_t *p, int delayed_group_leader,
if (!p->exit_code)
return 0;
if (delayed_group_leader && !(p->ptrace & PT_PTRACED) &&
p->sig && p->sig->group_stop_count > 0)
p->signal && p->signal->group_stop_count > 0)
/*
* A group stop is in progress and this is the group leader.
* We won't report until all threads have stopped.
......@@ -1004,7 +1006,7 @@ asmlinkage long sys_wait4(pid_t pid,unsigned int * stat_addr, int options, struc
if (options & __WNOTHREAD)
break;
tsk = next_thread(tsk);
if (tsk->sig != current->sig)
if (tsk->signal != current->signal)
BUG();
} while (tsk != current);
read_unlock(&tasklist_lock);
......
......@@ -665,23 +665,39 @@ static int copy_files(unsigned long clone_flags, struct task_struct * tsk)
static inline int copy_sighand(unsigned long clone_flags, struct task_struct * tsk)
{
struct signal_struct *sig;
struct sighand_struct *sig;
if (clone_flags & CLONE_SIGHAND) {
atomic_inc(&current->sig->count);
if (clone_flags & (CLONE_SIGHAND | CLONE_THREAD)) {
atomic_inc(&current->sighand->count);
return 0;
}
sig = kmem_cache_alloc(sigact_cachep, GFP_KERNEL);
tsk->sig = sig;
sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
tsk->sighand = sig;
if (!sig)
return -1;
spin_lock_init(&sig->siglock);
atomic_set(&sig->count, 1);
memcpy(sig->action, current->sighand->action, sizeof(sig->action));
return 0;
}
static inline int copy_signal(unsigned long clone_flags, struct task_struct * tsk)
{
struct signal_struct *sig;
if (clone_flags & CLONE_THREAD) {
atomic_inc(&current->signal->count);
return 0;
}
sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL);
tsk->signal = sig;
if (!sig)
return -1;
atomic_set(&sig->count, 1);
sig->group_exit = 0;
sig->group_exit_code = 0;
sig->group_exit_task = NULL;
sig->group_stop_count = 0;
memcpy(sig->action, current->sig->action, sizeof(sig->action));
sig->curr_target = NULL;
init_sigpending(&sig->shared_pending);
......@@ -831,8 +847,10 @@ static struct task_struct *copy_process(unsigned long clone_flags,
goto bad_fork_cleanup_files;
if (copy_sighand(clone_flags, p))
goto bad_fork_cleanup_fs;
if (copy_mm(clone_flags, p))
if (copy_signal(clone_flags, p))
goto bad_fork_cleanup_sighand;
if (copy_mm(clone_flags, p))
goto bad_fork_cleanup_signal;
if (copy_namespace(clone_flags, p))
goto bad_fork_cleanup_mm;
retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs);
......@@ -923,31 +941,31 @@ static struct task_struct *copy_process(unsigned long clone_flags,
p->parent = p->real_parent;
if (clone_flags & CLONE_THREAD) {
spin_lock(&current->sig->siglock);
spin_lock(&current->sighand->siglock);
/*
* Important: if an exit-all has been started then
* do not create this new thread - the whole thread
* group is supposed to exit anyway.
*/
if (current->sig->group_exit) {
spin_unlock(&current->sig->siglock);
if (current->signal->group_exit) {
spin_unlock(&current->sighand->siglock);
write_unlock_irq(&tasklist_lock);
goto bad_fork_cleanup_namespace;
}
p->tgid = current->tgid;
p->group_leader = current->group_leader;
if (current->sig->group_stop_count > 0) {
if (current->signal->group_stop_count > 0) {
/*
* There is an all-stop in progress for the group.
* We ourselves will stop as soon as we check signals.
* Make the new thread part of that group stop too.
*/
current->sig->group_stop_count++;
current->signal->group_stop_count++;
set_tsk_thread_flag(p, TIF_SIGPENDING);
}
spin_unlock(&current->sig->siglock);
spin_unlock(&current->sighand->siglock);
}
SET_LINKS(p);
......@@ -977,6 +995,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
exit_namespace(p);
bad_fork_cleanup_mm:
exit_mm(p);
bad_fork_cleanup_signal:
exit_signal(p);
bad_fork_cleanup_sighand:
exit_sighand(p);
bad_fork_cleanup_fs:
......@@ -1077,8 +1097,11 @@ struct task_struct *do_fork(unsigned long clone_flags,
return p;
}
/* SLAB cache for signal_struct structures (tsk->sig) */
kmem_cache_t *sigact_cachep;
/* SLAB cache for signal_struct structures (tsk->signal) */
kmem_cache_t *signal_cachep;
/* SLAB cache for sighand_struct structures (tsk->sighand) */
kmem_cache_t *sighand_cachep;
/* SLAB cache for files_struct structures (tsk->files) */
kmem_cache_t *files_cachep;
......@@ -1094,11 +1117,17 @@ kmem_cache_t *mm_cachep;
void __init proc_caches_init(void)
{
sigact_cachep = kmem_cache_create("signal_act",
sighand_cachep = kmem_cache_create("sighand_cache",
sizeof(struct sighand_struct), 0,
SLAB_HWCACHE_ALIGN, NULL, NULL);
if (!sighand_cachep)
panic("Cannot create sighand SLAB cache");
signal_cachep = kmem_cache_create("signal_cache",
sizeof(struct signal_struct), 0,
SLAB_HWCACHE_ALIGN, NULL, NULL);
if (!sigact_cachep)
panic("Cannot create signal action SLAB cache");
if (!signal_cachep)
panic("Cannot create signal SLAB cache");
files_cachep = kmem_cache_create("files_cache",
sizeof(struct files_struct), 0,
......
......@@ -111,12 +111,12 @@ int exec_usermodehelper(char *program_path, char *argv[], char *envp[])
as the super user right after the execve fails if you time
the signal just right.
*/
spin_lock_irq(&curtask->sig->siglock);
spin_lock_irq(&curtask->sighand->siglock);
sigemptyset(&curtask->blocked);
flush_signals(curtask);
flush_signal_handlers(curtask);
recalc_sigpending();
spin_unlock_irq(&curtask->sig->siglock);
spin_unlock_irq(&curtask->sighand->siglock);
for (i = 0; i < curtask->files->max_fds; i++ ) {
if (curtask->files->fd[i]) close(i);
......@@ -239,20 +239,20 @@ int request_module(const char * module_name)
}
/* Block everything but SIGKILL/SIGSTOP */
spin_lock_irq(&current->sig->siglock);
spin_lock_irq(&current->sighand->siglock);
tmpsig = current->blocked;
siginitsetinv(&current->blocked, sigmask(SIGKILL) | sigmask(SIGSTOP));
recalc_sigpending();
spin_unlock_irq(&current->sig->siglock);
spin_unlock_irq(&current->sighand->siglock);
waitpid_result = waitpid(pid, NULL, __WCLONE);
atomic_dec(&kmod_concurrent);
/* Allow signals again.. */
spin_lock_irq(&current->sig->siglock);
spin_lock_irq(&current->sighand->siglock);
current->blocked = tmpsig;
recalc_sigpending();
spin_unlock_irq(&current->sig->siglock);
spin_unlock_irq(&current->sighand->siglock);
if (waitpid_result != pid) {
printk(KERN_ERR "request_module[%s]: waitpid(%d,...) failed, errno %d\n",
......
This diff is collapsed.
......@@ -180,10 +180,10 @@ static int worker_thread(void *__startup)
set_user_nice(current, -10);
set_cpus_allowed(current, 1UL << cpu);
spin_lock_irq(&current->sig->siglock);
spin_lock_irq(&current->sighand->siglock);
siginitsetinv(&current->blocked, sigmask(SIGCHLD));
recalc_sigpending();
spin_unlock_irq(&current->sig->siglock);
spin_unlock_irq(&current->sighand->siglock);
complete(&startup->done);
......@@ -213,10 +213,10 @@ static int worker_thread(void *__startup)
/* SIGCHLD - auto-reaping */ ;
/* zap all other signals */
spin_lock_irq(&current->sig->siglock);
spin_lock_irq(&current->sighand->siglock);
flush_signals(current);
recalc_sigpending();
spin_unlock_irq(&current->sig->siglock);
spin_unlock_irq(&current->sighand->siglock);
}
}
remove_wait_queue(&cwq->more_work, &wait);
......
......@@ -90,10 +90,10 @@ static int __pdflush(struct pdflush_work *my_work)
strcpy(current->comm, "pdflush");
/* interruptible sleep, so block all signals */
spin_lock_irq(&current->sig->siglock);
spin_lock_irq(&current->sighand->siglock);
siginitsetinv(&current->blocked, 0);
recalc_sigpending();
spin_unlock_irq(&current->sig->siglock);
spin_unlock_irq(&current->sighand->siglock);
current->flags |= PF_FLUSHER;
my_work->fn = NULL;
......
......@@ -233,27 +233,27 @@ void rpc_clnt_sigmask(struct rpc_clnt *clnt, sigset_t *oldset)
/* Turn off various signals */
if (clnt->cl_intr) {
struct k_sigaction *action = current->sig->action;
struct k_sigaction *action = current->sighand->action;
if (action[SIGINT-1].sa.sa_handler == SIG_DFL)
sigallow |= sigmask(SIGINT);
if (action[SIGQUIT-1].sa.sa_handler == SIG_DFL)
sigallow |= sigmask(SIGQUIT);
}
spin_lock_irqsave(&current->sig->siglock, irqflags);
spin_lock_irqsave(&current->sighand->siglock, irqflags);
*oldset = current->blocked;
siginitsetinv(&current->blocked, sigallow & ~oldset->sig[0]);
recalc_sigpending();
spin_unlock_irqrestore(&current->sig->siglock, irqflags);
spin_unlock_irqrestore(&current->sighand->siglock, irqflags);
}
void rpc_clnt_sigunmask(struct rpc_clnt *clnt, sigset_t *oldset)
{
unsigned long irqflags;
spin_lock_irqsave(&current->sig->siglock, irqflags);
spin_lock_irqsave(&current->sighand->siglock, irqflags);
current->blocked = *oldset;
recalc_sigpending();
spin_unlock_irqrestore(&current->sig->siglock, irqflags);
spin_unlock_irqrestore(&current->sighand->siglock, irqflags);
}
/*
......
......@@ -964,10 +964,10 @@ rpciod(void *ptr)
daemonize();
spin_lock_irq(&current->sig->siglock);
spin_lock_irq(&current->sighand->siglock);
siginitsetinv(&current->blocked, sigmask(SIGKILL));
recalc_sigpending();
spin_unlock_irq(&current->sig->siglock);
spin_unlock_irq(&current->sighand->siglock);
strcpy(current->comm, "rpciod");
......@@ -1022,9 +1022,9 @@ rpciod_killall(void)
}
}
spin_lock_irqsave(&current->sig->siglock, flags);
spin_lock_irqsave(&current->sighand->siglock, flags);
recalc_sigpending();
spin_unlock_irqrestore(&current->sig->siglock, flags);
spin_unlock_irqrestore(&current->sighand->siglock, flags);
}
/*
......@@ -1100,9 +1100,9 @@ rpciod_down(void)
}
interruptible_sleep_on(&rpciod_killer);
}
spin_lock_irqsave(&current->sig->siglock, flags);
spin_lock_irqsave(&current->sighand->siglock, flags);
recalc_sigpending();
spin_unlock_irqrestore(&current->sig->siglock, flags);
spin_unlock_irqrestore(&current->sighand->siglock, flags);
out:
up(&rpciod_sema);
MOD_DEC_USE_COUNT;
......
......@@ -235,9 +235,9 @@ svc_register(struct svc_serv *serv, int proto, unsigned short port)
}
if (!port) {
spin_lock_irqsave(&current->sig->siglock, flags);
spin_lock_irqsave(&current->sighand->siglock, flags);
recalc_sigpending();
spin_unlock_irqrestore(&current->sig->siglock, flags);
spin_unlock_irqrestore(&current->sighand->siglock, flags);
}
return error;
......
......@@ -133,7 +133,7 @@ void cap_bprm_compute_creds (struct linux_binprm *bprm)
if (must_not_trace_exec (current)
|| atomic_read (&current->fs->count) > 1
|| atomic_read (&current->files->count) > 1
|| atomic_read (&current->sig->count) > 1) {
|| atomic_read (&current->sighand->count) > 1) {
if (!capable (CAP_SETPCAP)) {
new_permitted = cap_intersect (new_permitted,
current->
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment