Commit 9b10610a authored by David Howells's avatar David Howells Committed by Linus Torvalds

[PATCH] thread information block

syscall latency improvement

 * There's now an asm/thread_info.h header file with the basic structure
   def and asm offsets in it.

 * There's now a linux/thread_info.h header file which includes the asm
   version and wraps some bitops calls to make convenience functions for
   accessing the low-level flags. 

 * The task_struct has had some fields removed (and some flags), and has
   acquired a pointer to the thread_info struct.

 * task_struct's are now allocated on slabs in kernel/fork.c, whereas
   thread_info structs are allocated at the bottom of the stack pages.

 * Some more convenience functions are provided at the end of linux/sched.h to
   access flags in other tasks (these are here because they need to access the
   task_struct).
parent 4da68d0b
...@@ -43,6 +43,8 @@ ...@@ -43,6 +43,8 @@
#include <linux/config.h> #include <linux/config.h>
#include <linux/sys.h> #include <linux/sys.h>
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/thread_info.h>
#include <asm/errno.h>
#include <asm/segment.h> #include <asm/segment.h>
#include <asm/smp.h> #include <asm/smp.h>
...@@ -67,24 +69,6 @@ IF_MASK = 0x00000200 ...@@ -67,24 +69,6 @@ IF_MASK = 0x00000200
NT_MASK = 0x00004000 NT_MASK = 0x00004000
VM_MASK = 0x00020000 VM_MASK = 0x00020000
/*
* these are offsets into the task-struct.
*/
state = 0
flags = 4
work = 8
need_resched = work+0
syscall_trace = work+1
sigpending = work+2
notify_resume = work+3
addr_limit = 12
exec_domain = 16
tsk_ptrace = 24
cpu = 32
ENOSYS = 38
#define SAVE_ALL \ #define SAVE_ALL \
cld; \ cld; \
pushl %es; \ pushl %es; \
...@@ -131,10 +115,6 @@ ENOSYS = 38 ...@@ -131,10 +115,6 @@ ENOSYS = 38
.long 3b,6b; \ .long 3b,6b; \
.previous .previous
#define GET_CURRENT(reg) \
movl $-8192, reg; \
andl %esp, reg
ENTRY(lcall7) ENTRY(lcall7)
pushfl # We get a different stack layout with call gates, pushfl # We get a different stack layout with call gates,
pushl %eax # which has to be cleaned up later.. pushl %eax # which has to be cleaned up later..
...@@ -147,8 +127,8 @@ ENTRY(lcall7) ...@@ -147,8 +127,8 @@ ENTRY(lcall7)
movl %ecx,CS(%esp) # movl %ecx,CS(%esp) #
movl %esp,%ebx movl %esp,%ebx
pushl %ebx pushl %ebx
andl $-8192,%ebx # GET_CURRENT andl $-8192,%ebx # GET_THREAD_INFO
movl exec_domain(%ebx),%edx # Get the execution domain movl TI_EXEC_DOMAIN(%ebx),%edx # Get the execution domain
movl 4(%edx),%edx # Get the lcall7 handler for the domain movl 4(%edx),%edx # Get the lcall7 handler for the domain
pushl $0x7 pushl $0x7
call *%edx call *%edx
...@@ -168,8 +148,8 @@ ENTRY(lcall27) ...@@ -168,8 +148,8 @@ ENTRY(lcall27)
movl %ecx,CS(%esp) # movl %ecx,CS(%esp) #
movl %esp,%ebx movl %esp,%ebx
pushl %ebx pushl %ebx
andl $-8192,%ebx # GET_CURRENT andl $-8192,%ebx # GET_THREAD_INFO
movl exec_domain(%ebx),%edx # Get the execution domain movl TI_EXEC_DOMAIN(%ebx),%edx # Get the execution domain
movl 4(%edx),%edx # Get the lcall7 handler for the domain movl 4(%edx),%edx # Get the lcall7 handler for the domain
pushl $0x27 pushl $0x27
call *%edx call *%edx
...@@ -182,7 +162,7 @@ ENTRY(ret_from_fork) ...@@ -182,7 +162,7 @@ ENTRY(ret_from_fork)
pushl %ebx pushl %ebx
call SYMBOL_NAME(schedule_tail) call SYMBOL_NAME(schedule_tail)
addl $4, %esp addl $4, %esp
GET_CURRENT(%ebx) GET_THREAD_INFO(%ebx)
jmp syscall_exit jmp syscall_exit
/* /*
...@@ -195,17 +175,17 @@ ENTRY(ret_from_fork) ...@@ -195,17 +175,17 @@ ENTRY(ret_from_fork)
# userspace resumption stub bypassing syscall exit tracing # userspace resumption stub bypassing syscall exit tracing
ALIGN ALIGN
ENTRY(ret_from_intr) ENTRY(ret_from_intr)
GET_CURRENT(%ebx) GET_THREAD_INFO(%ebx)
ret_from_exception: ret_from_exception:
movl EFLAGS(%esp),%eax # mix EFLAGS and CS movl EFLAGS(%esp),%eax # mix EFLAGS and CS
movb CS(%esp),%al movb CS(%esp),%al
testl $(VM_MASK | 3),%eax testl $(VM_MASK | 3),%eax
jz restore_all # returning to kernel-space or vm86-space jz restore_all # returning to kernel-space or vm86-space
ENTRY(resume_userspace) ENTRY(resume_userspace)
cli # make sure need_resched and sigpending don't change cli # make sure we don't miss an interrupt setting need_resched
# between sampling and the iret # or sigpending between sampling and the iret
movl work(%ebx),%ecx movl TI_FLAGS(%ebx),%ecx
andl $0xffff00ff,%ecx # current->work (ignoring syscall_trace) andl $_TIF_WORK_MASK,%ecx # is there any work to be done on int/excp return?
jne work_pending jne work_pending
jmp restore_all jmp restore_all
...@@ -214,19 +194,19 @@ ENTRY(resume_userspace) ...@@ -214,19 +194,19 @@ ENTRY(resume_userspace)
ENTRY(system_call) ENTRY(system_call)
pushl %eax # save orig_eax pushl %eax # save orig_eax
SAVE_ALL SAVE_ALL
GET_CURRENT(%ebx) GET_THREAD_INFO(%ebx)
cmpl $(NR_syscalls),%eax cmpl $(NR_syscalls),%eax
jae syscall_badsys jae syscall_badsys
testb $0xff,syscall_trace(%ebx) # system call tracing in operation testb $_TIF_SYSCALL_TRACE,TI_FLAGS(%ebx) # system call tracing in operation
jnz syscall_trace_entry jnz syscall_trace_entry
syscall_traced: syscall_call:
call *SYMBOL_NAME(sys_call_table)(,%eax,4) call *SYMBOL_NAME(sys_call_table)(,%eax,4)
movl %eax,EAX(%esp) # store the return value movl %eax,EAX(%esp) # store the return value
syscall_exit: syscall_exit:
cli # make sure need_resched and sigpending don't change cli # make sure we don't miss an interrupt setting need_resched
# between sampling and the iret # or sigpending between sampling and the iret
movl work(%ebx),%ecx movl TI_FLAGS(%ebx),%ecx
testl %ecx,%ecx # current->work testw $_TIF_ALLWORK_MASK,%cx # current->work
jne syscall_exit_work jne syscall_exit_work
restore_all: restore_all:
RESTORE_ALL RESTORE_ALL
...@@ -234,16 +214,16 @@ restore_all: ...@@ -234,16 +214,16 @@ restore_all:
# perform work that needs to be done immediately before resumption # perform work that needs to be done immediately before resumption
ALIGN ALIGN
work_pending: work_pending:
testb %cl,%cl # current->work.need_resched testb $_TIF_NEED_RESCHED,%cl
jz work_notifysig jz work_notifysig
work_resched: work_resched:
call SYMBOL_NAME(schedule) call SYMBOL_NAME(schedule)
cli # make sure need_resched and sigpending don't change cli # make sure we don't miss an interrupt setting need_resched
# between sampling and the iret # or sigpending between sampling and the iret
movl work(%ebx),%ecx movl TI_FLAGS(%ebx),%ecx
andl $0xffff00ff,%ecx # ignore the syscall trace counter andl $_TIF_WORK_MASK,%ecx # is there any work to be done other than syscall tracing?
jz restore_all jz restore_all
testb %cl,%cl # current->work.need_resched testb $_TIF_NEED_RESCHED,%cl
jnz work_resched jnz work_resched
work_notifysig: # deal with pending signals and notify-resume requests work_notifysig: # deal with pending signals and notify-resume requests
...@@ -273,13 +253,13 @@ syscall_trace_entry: ...@@ -273,13 +253,13 @@ syscall_trace_entry:
call SYMBOL_NAME(do_syscall_trace) call SYMBOL_NAME(do_syscall_trace)
movl ORIG_EAX(%esp),%eax movl ORIG_EAX(%esp),%eax
cmpl $(NR_syscalls),%eax cmpl $(NR_syscalls),%eax
jnae syscall_traced jnae syscall_call
jmp syscall_exit jmp syscall_exit
# perform syscall exit tracing # perform syscall exit tracing
ALIGN ALIGN
syscall_exit_work: syscall_exit_work:
testb %ch,%ch # current->work.syscall_trace testb $_TIF_SYSCALL_TRACE,%cl
jz work_pending jz work_pending
sti # could let do_syscall_trace() call schedule() instead sti # could let do_syscall_trace() call schedule() instead
movl %esp,%eax movl %esp,%eax
...@@ -319,7 +299,7 @@ error_code: ...@@ -319,7 +299,7 @@ error_code:
movl $(__KERNEL_DS),%edx movl $(__KERNEL_DS),%edx
movl %edx,%ds movl %edx,%ds
movl %edx,%es movl %edx,%es
GET_CURRENT(%ebx) GET_THREAD_INFO(%ebx)
call *%edi call *%edi
addl $8,%esp addl $8,%esp
jmp ret_from_exception jmp ret_from_exception
...@@ -337,7 +317,7 @@ ENTRY(simd_coprocessor_error) ...@@ -337,7 +317,7 @@ ENTRY(simd_coprocessor_error)
ENTRY(device_not_available) ENTRY(device_not_available)
pushl $-1 # mark this as an int pushl $-1 # mark this as an int
SAVE_ALL SAVE_ALL
GET_CURRENT(%ebx) GET_THREAD_INFO(%ebx)
movl %cr0,%eax movl %cr0,%eax
testl $0x4,%eax # EM (math emulation bit) testl $0x4,%eax # EM (math emulation bit)
jne device_not_available_emulate jne device_not_available_emulate
......
...@@ -320,7 +320,7 @@ rp_sidt: ...@@ -320,7 +320,7 @@ rp_sidt:
ret ret
ENTRY(stack_start) ENTRY(stack_start)
.long SYMBOL_NAME(init_task_union)+8192 .long SYMBOL_NAME(init_thread_union)+8192
.long __KERNEL_DS .long __KERNEL_DS
/* This is the default interrupt "handler" :-) */ /* This is the default interrupt "handler" :-) */
......
...@@ -52,7 +52,7 @@ static inline void __save_init_fpu( struct task_struct *tsk ) ...@@ -52,7 +52,7 @@ static inline void __save_init_fpu( struct task_struct *tsk )
asm volatile( "fnsave %0 ; fwait" asm volatile( "fnsave %0 ; fwait"
: "=m" (tsk->thread.i387.fsave) ); : "=m" (tsk->thread.i387.fsave) );
} }
tsk->flags &= ~PF_USEDFPU; clear_thread_flag(TIF_USEDFPU);
} }
void save_init_fpu( struct task_struct *tsk ) void save_init_fpu( struct task_struct *tsk )
...@@ -65,7 +65,7 @@ void kernel_fpu_begin(void) ...@@ -65,7 +65,7 @@ void kernel_fpu_begin(void)
{ {
struct task_struct *tsk = current; struct task_struct *tsk = current;
if (tsk->flags & PF_USEDFPU) { if (test_thread_flag(TIF_USEDFPU)) {
__save_init_fpu(tsk); __save_init_fpu(tsk);
return; return;
} }
......
...@@ -13,15 +13,22 @@ static struct signal_struct init_signals = INIT_SIGNALS; ...@@ -13,15 +13,22 @@ static struct signal_struct init_signals = INIT_SIGNALS;
struct mm_struct init_mm = INIT_MM(init_mm); struct mm_struct init_mm = INIT_MM(init_mm);
/* /*
* Initial task structure. * Initial thread structure.
* *
* We need to make sure that this is 8192-byte aligned due to the * We need to make sure that this is 8192-byte aligned due to the
* way process stacks are handled. This is done by having a special * way process stacks are handled. This is done by having a special
* "init_task" linker map entry.. * "init_task" linker map entry..
*/ */
union task_union init_task_union union thread_union init_thread_union
__attribute__((__section__(".data.init_task"))) = __attribute__((__section__(".data.init_task"))) =
{ INIT_TASK(init_task_union.task) }; { INIT_THREAD_INFO(init_task) };
/*
* Initial task structure.
*
* All other task structs will be allocated on slabs in fork.c
*/
struct task_struct init_task = INIT_TASK(init_task);
/* /*
* per-CPU TSS segments. Threads are completely 'soft' on Linux, * per-CPU TSS segments. Threads are completely 'soft' on Linux,
......
...@@ -220,7 +220,7 @@ static void show(char * str) ...@@ -220,7 +220,7 @@ static void show(char * str)
continue; continue;
} }
esp &= ~(THREAD_SIZE-1); esp &= ~(THREAD_SIZE-1);
esp += sizeof(struct task_struct); esp += sizeof(struct thread_info);
show_stack((void*)esp); show_stack((void*)esp);
} }
printk("\nCPU %d:",cpu); printk("\nCPU %d:",cpu);
......
...@@ -263,8 +263,9 @@ void nmi_watchdog_tick (struct pt_regs * regs) ...@@ -263,8 +263,9 @@ void nmi_watchdog_tick (struct pt_regs * regs)
{ {
/* /*
* Since current-> is always on the stack, and we always switch * Since current_thread_info()-> is always on the stack, and we
* the stack NMI-atomically, it's safe to use smp_processor_id(). * always switch the stack NMI-atomically, it's safe to use
* smp_processor_id().
*/ */
int sum, cpu = smp_processor_id(); int sum, cpu = smp_processor_id();
......
...@@ -102,15 +102,21 @@ static void poll_idle (void) ...@@ -102,15 +102,21 @@ static void poll_idle (void)
* Deal with another CPU just having chosen a thread to * Deal with another CPU just having chosen a thread to
* run here: * run here:
*/ */
oldval = xchg(&current->work.need_resched, -1); oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);
if (!oldval) if (!oldval) {
set_thread_flag(TIF_POLLING_NRFLAG);
asm volatile( asm volatile(
"2:" "2:"
"cmpb $-1, %0;" "testl %0, %1;"
"rep; nop;" "rep; nop;"
"je 2b;" "je 2b;"
: :"m" (current->work.need_resched)); : : "i"(_TIF_NEED_RESCHED), "m" (current_thread_info()->flags));
clear_thread_flag(TIF_POLLING_NRFLAG);
} else {
set_need_resched();
}
} }
/* /*
...@@ -576,7 +582,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long esp, ...@@ -576,7 +582,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
{ {
struct pt_regs * childregs; struct pt_regs * childregs;
childregs = ((struct pt_regs *) (THREAD_SIZE + (unsigned long) p)) - 1; childregs = ((struct pt_regs *) (THREAD_SIZE + (unsigned long) p->thread_info)) - 1;
struct_cpy(childregs, regs); struct_cpy(childregs, regs);
childregs->eax = 0; childregs->eax = 0;
childregs->esp = esp; childregs->esp = esp;
...@@ -674,6 +680,8 @@ void __switch_to(struct task_struct *prev_p, struct task_struct *next_p) ...@@ -674,6 +680,8 @@ void __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
*next = &next_p->thread; *next = &next_p->thread;
struct tss_struct *tss = init_tss + smp_processor_id(); struct tss_struct *tss = init_tss + smp_processor_id();
/* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
unlazy_fpu(prev_p); unlazy_fpu(prev_p);
/* /*
......
...@@ -278,16 +278,10 @@ asmlinkage int sys_ptrace(long request, long pid, long addr, long data) ...@@ -278,16 +278,10 @@ asmlinkage int sys_ptrace(long request, long pid, long addr, long data)
if ((unsigned long) data > _NSIG) if ((unsigned long) data > _NSIG)
break; break;
if (request == PTRACE_SYSCALL) { if (request == PTRACE_SYSCALL) {
if (!(child->ptrace & PT_SYSCALLTRACE)) { set_thread_flag(TIF_SYSCALL_TRACE);
child->ptrace |= PT_SYSCALLTRACE;
child->work.syscall_trace++;
}
} }
else { else {
if (child->ptrace & PT_SYSCALLTRACE) { clear_thread_flag(TIF_SYSCALL_TRACE);
child->ptrace &= ~PT_SYSCALLTRACE;
child->work.syscall_trace--;
}
} }
child->exit_code = data; child->exit_code = data;
/* make sure the single step bit is not set. */ /* make sure the single step bit is not set. */
...@@ -323,10 +317,7 @@ asmlinkage int sys_ptrace(long request, long pid, long addr, long data) ...@@ -323,10 +317,7 @@ asmlinkage int sys_ptrace(long request, long pid, long addr, long data)
ret = -EIO; ret = -EIO;
if ((unsigned long) data > _NSIG) if ((unsigned long) data > _NSIG)
break; break;
if (child->ptrace & PT_SYSCALLTRACE) { clear_thread_flag(TIF_SYSCALL_TRACE);
child->ptrace &= ~PT_SYSCALLTRACE;
child->work.syscall_trace--;
}
if ((child->ptrace & PT_DTRACE) == 0) { if ((child->ptrace & PT_DTRACE) == 0) {
/* Spurious delayed TF traps may occur */ /* Spurious delayed TF traps may occur */
child->ptrace |= PT_DTRACE; child->ptrace |= PT_DTRACE;
...@@ -444,7 +435,7 @@ asmlinkage int sys_ptrace(long request, long pid, long addr, long data) ...@@ -444,7 +435,7 @@ asmlinkage int sys_ptrace(long request, long pid, long addr, long data)
break; break;
} }
out_tsk: out_tsk:
free_task_struct(child); put_task_struct(child);
out: out:
unlock_kernel(); unlock_kernel();
return ret; return ret;
...@@ -456,8 +447,9 @@ asmlinkage int sys_ptrace(long request, long pid, long addr, long data) ...@@ -456,8 +447,9 @@ asmlinkage int sys_ptrace(long request, long pid, long addr, long data)
__attribute__((regparm(3))) __attribute__((regparm(3)))
void do_syscall_trace(struct pt_regs *regs, int entryexit) void do_syscall_trace(struct pt_regs *regs, int entryexit)
{ {
if ((current->ptrace & (PT_PTRACED|PT_SYSCALLTRACE)) != if (!test_thread_flag(TIF_SYSCALL_TRACE))
(PT_PTRACED|PT_SYSCALLTRACE)) return;
if (current->ptrace & PT_PTRACED)
return; return;
/* the 0x80 provides a way for the tracing parent to distinguish /* the 0x80 provides a way for the tracing parent to distinguish
between a syscall stop and SIGTRAP delivery */ between a syscall stop and SIGTRAP delivery */
...@@ -476,15 +468,3 @@ void do_syscall_trace(struct pt_regs *regs, int entryexit) ...@@ -476,15 +468,3 @@ void do_syscall_trace(struct pt_regs *regs, int entryexit)
current->exit_code = 0; current->exit_code = 0;
} }
} }
/* notification of userspace execution resumption
* - triggered by current->work.notify_resume
*/
__attribute__((regparm(3)))
void do_notify_resume(struct pt_regs *regs, sigset_t *oldset,
struct task_work work_pending)
{
/* deal with pending signal delivery */
if (work_pending.sigpending)
do_signal(regs,oldset);
}
...@@ -2817,7 +2817,7 @@ void __init cpu_init (void) ...@@ -2817,7 +2817,7 @@ void __init cpu_init (void)
/* /*
* Force FPU initialization: * Force FPU initialization:
*/ */
current->flags &= ~PF_USEDFPU; clear_thread_flag(TIF_USEDFPU);
current->used_math = 0; current->used_math = 0;
stts(); stts();
} }
......
...@@ -394,10 +394,10 @@ static void setup_frame(int sig, struct k_sigaction *ka, ...@@ -394,10 +394,10 @@ static void setup_frame(int sig, struct k_sigaction *ka,
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
goto give_sigsegv; goto give_sigsegv;
err |= __put_user((current->exec_domain err |= __put_user((current_thread_info()->exec_domain
&& current->exec_domain->signal_invmap && current_thread_info()->exec_domain->signal_invmap
&& sig < 32 && sig < 32
? current->exec_domain->signal_invmap[sig] ? current_thread_info()->exec_domain->signal_invmap[sig]
: sig), : sig),
&frame->sig); &frame->sig);
if (err) if (err)
...@@ -464,10 +464,10 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, ...@@ -464,10 +464,10 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
goto give_sigsegv; goto give_sigsegv;
err |= __put_user((current->exec_domain err |= __put_user((current_thread_info()->exec_domain
&& current->exec_domain->signal_invmap && current_thread_info()->exec_domain->signal_invmap
&& sig < 32 && sig < 32
? current->exec_domain->signal_invmap[sig] ? current_thread_info()->exec_domain->signal_invmap[sig]
: sig), : sig),
&frame->sig); &frame->sig);
err |= __put_user(&frame->info, &frame->pinfo); err |= __put_user(&frame->info, &frame->pinfo);
...@@ -712,3 +712,16 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset) ...@@ -712,3 +712,16 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
} }
return 0; return 0;
} }
/*
* notification of userspace execution resumption
* - triggered by current->work.notify_resume
*/
__attribute__((regparm(3)))
void do_notify_resume(struct pt_regs *regs, sigset_t *oldset,
__u32 thread_info_flags)
{
/* deal with pending signal delivery */
if (thread_info_flags & _TIF_SIGPENDING)
do_signal(regs,oldset);
}
...@@ -818,7 +818,7 @@ static void __init do_boot_cpu (int apicid) ...@@ -818,7 +818,7 @@ static void __init do_boot_cpu (int apicid)
/* So we see what's up */ /* So we see what's up */
printk("Booting processor %d/%d eip %lx\n", cpu, apicid, start_eip); printk("Booting processor %d/%d eip %lx\n", cpu, apicid, start_eip);
stack_start.esp = (void *) (1024 + PAGE_SIZE + (char *)idle); stack_start.esp = (void *) (1024 + PAGE_SIZE + (char *)idle->thread_info);
/* /*
* This grunge runs the startup process for * This grunge runs the startup process for
...@@ -1024,7 +1024,7 @@ void __init smp_boot_cpus(void) ...@@ -1024,7 +1024,7 @@ void __init smp_boot_cpus(void)
map_cpu_to_boot_apicid(0, boot_cpu_apicid); map_cpu_to_boot_apicid(0, boot_cpu_apicid);
global_irq_holder = NO_PROC_ID; global_irq_holder = NO_PROC_ID;
current->cpu = 0; current_thread_info()->cpu = 0;
smp_tune_scheduling(); smp_tune_scheduling();
/* /*
......
...@@ -158,7 +158,7 @@ void show_trace_task(struct task_struct *tsk) ...@@ -158,7 +158,7 @@ void show_trace_task(struct task_struct *tsk)
unsigned long esp = tsk->thread.esp; unsigned long esp = tsk->thread.esp;
/* User space on another CPU? */ /* User space on another CPU? */
if ((esp ^ (unsigned long)tsk) & (PAGE_MASK<<1)) if ((esp ^ (unsigned long)tsk->thread_info) & (PAGE_MASK<<1))
return; return;
show_trace((unsigned long *)esp); show_trace((unsigned long *)esp);
} }
...@@ -208,8 +208,8 @@ void show_registers(struct pt_regs *regs) ...@@ -208,8 +208,8 @@ void show_registers(struct pt_regs *regs)
regs->esi, regs->edi, regs->ebp, esp); regs->esi, regs->edi, regs->ebp, esp);
printk("ds: %04x es: %04x ss: %04x\n", printk("ds: %04x es: %04x ss: %04x\n",
regs->xds & 0xffff, regs->xes & 0xffff, ss); regs->xds & 0xffff, regs->xes & 0xffff, ss);
printk("Process %s (pid: %d, stackpage=%08lx)", printk("Process %s (pid: %d, threadinfo=%p task=%p)",
current->comm, current->pid, 4096+(unsigned long)current); current->comm, current->pid, current_thread_info(), current);
/* /*
* When in-kernel, we also print out the stack and code at the * When in-kernel, we also print out the stack and code at the
* time of the fault.. * time of the fault..
...@@ -720,7 +720,7 @@ asmlinkage void math_state_restore(struct pt_regs regs) ...@@ -720,7 +720,7 @@ asmlinkage void math_state_restore(struct pt_regs regs)
} else { } else {
init_fpu(); init_fpu();
} }
current->flags |= PF_USEDFPU; /* So we fnsave on switch_to() */ set_thread_flag(TIF_USEDFPU); /* So we fnsave on switch_to() */
} }
#ifndef CONFIG_MATH_EMULATION #ifndef CONFIG_MATH_EMULATION
......
...@@ -8,6 +8,8 @@ ...@@ -8,6 +8,8 @@
* return an error value in addition to the "real" * return an error value in addition to the "real"
* return value. * return value.
*/ */
#include <asm/thread_info.h>
/* /*
* __get_user_X * __get_user_X
...@@ -21,15 +23,12 @@ ...@@ -21,15 +23,12 @@
* as they get called from within inline assembly. * as they get called from within inline assembly.
*/ */
addr_limit = 12
.text .text
.align 4 .align 4
.globl __get_user_1 .globl __get_user_1
__get_user_1: __get_user_1:
movl %esp,%edx GET_THREAD_INFO(%edx)
andl $0xffffe000,%edx cmpl TI_ADDR_LIMIT(%edx),%eax
cmpl addr_limit(%edx),%eax
jae bad_get_user jae bad_get_user
1: movzbl (%eax),%edx 1: movzbl (%eax),%edx
xorl %eax,%eax xorl %eax,%eax
...@@ -39,10 +38,9 @@ __get_user_1: ...@@ -39,10 +38,9 @@ __get_user_1:
.globl __get_user_2 .globl __get_user_2
__get_user_2: __get_user_2:
addl $1,%eax addl $1,%eax
movl %esp,%edx
jc bad_get_user jc bad_get_user
andl $0xffffe000,%edx GET_THREAD_INFO(%edx)
cmpl addr_limit(%edx),%eax cmpl TI_ADDR_LIMIT(%edx),%eax
jae bad_get_user jae bad_get_user
2: movzwl -1(%eax),%edx 2: movzwl -1(%eax),%edx
xorl %eax,%eax xorl %eax,%eax
...@@ -52,10 +50,9 @@ __get_user_2: ...@@ -52,10 +50,9 @@ __get_user_2:
.globl __get_user_4 .globl __get_user_4
__get_user_4: __get_user_4:
addl $3,%eax addl $3,%eax
movl %esp,%edx
jc bad_get_user jc bad_get_user
andl $0xffffe000,%edx GET_THREAD_INFO(%edx)
cmpl addr_limit(%edx),%eax cmpl TI_ADDR_LIMIT(%edx),%eax
jae bad_get_user jae bad_get_user
3: movl -3(%eax),%edx 3: movl -3(%eax),%edx
xorl %eax,%eax xorl %eax,%eax
......
...@@ -304,7 +304,7 @@ lockd_down(void) ...@@ -304,7 +304,7 @@ lockd_down(void)
* Wait for the lockd process to exit, but since we're holding * Wait for the lockd process to exit, but since we're holding
* the lockd semaphore, we can't wait around forever ... * the lockd semaphore, we can't wait around forever ...
*/ */
current->work.sigpending = 0; clear_thread_flag(TIF_SIGPENDING);
interruptible_sleep_on_timeout(&lockd_exit, HZ); interruptible_sleep_on_timeout(&lockd_exit, HZ);
if (nlmsvc_pid) { if (nlmsvc_pid) {
printk(KERN_WARNING printk(KERN_WARNING
......
...@@ -468,7 +468,7 @@ exp_writelock(void) ...@@ -468,7 +468,7 @@ exp_writelock(void)
return 0; return 0;
} }
current->work.sigpending = 0; clear_thread_flag(TIF_SIGPENDING);
want_lock++; want_lock++;
while (hash_count || hash_lock) { while (hash_count || hash_lock) {
interruptible_sleep_on(&hash_wait); interruptible_sleep_on(&hash_wait);
......
...@@ -391,7 +391,7 @@ int proc_pid_stat(struct task_struct *task, char * buffer) ...@@ -391,7 +391,7 @@ int proc_pid_stat(struct task_struct *task, char * buffer)
task->nswap, task->nswap,
task->cnswap, task->cnswap,
task->exit_signal, task->exit_signal,
task->cpu); task->thread_info->cpu);
if(mm) if(mm)
mmput(mm); mmput(mm);
return res; return res;
......
...@@ -1035,7 +1035,7 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry) ...@@ -1035,7 +1035,7 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry)
inode = proc_pid_make_inode(dir->i_sb, task, PROC_PID_INO); inode = proc_pid_make_inode(dir->i_sb, task, PROC_PID_INO);
free_task_struct(task); put_task_struct(task);
if (!inode) if (!inode)
goto out; goto out;
...@@ -1057,7 +1057,7 @@ void proc_pid_delete_inode(struct inode *inode) ...@@ -1057,7 +1057,7 @@ void proc_pid_delete_inode(struct inode *inode)
if (PROC_I(inode)->file) if (PROC_I(inode)->file)
fput(PROC_I(inode)->file); fput(PROC_I(inode)->file);
if (proc_task(inode)) if (proc_task(inode))
free_task_struct(proc_task(inode)); put_task_struct(proc_task(inode));
} }
#define PROC_NUMBUF 10 #define PROC_NUMBUF 10
......
#ifndef _I386_CURRENT_H #ifndef _I386_CURRENT_H
#define _I386_CURRENT_H #define _I386_CURRENT_H
#include <asm/thread_info.h>
struct task_struct; struct task_struct;
static inline struct task_struct * get_current(void) static inline struct task_struct * get_current(void)
{ {
struct task_struct *current; return current_thread_info()->task;
__asm__("andl %%esp,%0; ":"=r" (current) : "0" (~8191UL)); }
return current;
}
#define current get_current() #define current get_current()
......
...@@ -116,7 +116,8 @@ extern char _stext, _etext; ...@@ -116,7 +116,8 @@ extern char _stext, _etext;
#define GET_CURRENT \ #define GET_CURRENT \
"movl %esp, %ebx\n\t" \ "movl %esp, %ebx\n\t" \
"andl $-8192, %ebx\n\t" "andl $-8192, %ebx\n\t" \
"movl (%ebx),%ebx\n\t"
/* /*
* SMP has a few special interrupts for IPI messages * SMP has a few special interrupts for IPI messages
......
...@@ -28,16 +28,17 @@ extern void kernel_fpu_begin(void); ...@@ -28,16 +28,17 @@ extern void kernel_fpu_begin(void);
#define unlazy_fpu( tsk ) do { \ #define unlazy_fpu( tsk ) do { \
if ( tsk->flags & PF_USEDFPU ) \ if (test_thread_flag(TIF_USEDFPU)) \
save_init_fpu( tsk ); \ save_init_fpu( tsk ); \
} while (0) } while (0)
#define clear_fpu( tsk ) do { \ #define clear_fpu( tsk ) \
if ( tsk->flags & PF_USEDFPU ) { \ do { \
asm volatile("fwait"); \ if (test_thread_flag(TIF_USEDFPU)) { \
tsk->flags &= ~PF_USEDFPU; \ asm volatile("fwait"); \
stts(); \ clear_thread_flag(TIF_USEDFPU); \
} \ stts(); \
} \
} while (0) } while (0)
/* /*
......
...@@ -444,16 +444,8 @@ static inline unsigned long thread_saved_pc(struct thread_struct *t) ...@@ -444,16 +444,8 @@ static inline unsigned long thread_saved_pc(struct thread_struct *t)
} }
unsigned long get_wchan(struct task_struct *p); unsigned long get_wchan(struct task_struct *p);
#define KSTK_EIP(tsk) (((unsigned long *)(4096+(unsigned long)(tsk)))[1019]) #define KSTK_EIP(tsk) (((unsigned long *)(4096+(unsigned long)(tsk)->thread_info))[1019])
#define KSTK_ESP(tsk) (((unsigned long *)(4096+(unsigned long)(tsk)))[1022]) #define KSTK_ESP(tsk) (((unsigned long *)(4096+(unsigned long)(tsk)->thread_info))[1022])
#define THREAD_SIZE (2*PAGE_SIZE)
#define alloc_task_struct() ((struct task_struct *) __get_free_pages(GFP_KERNEL,1))
#define free_task_struct(p) free_pages((unsigned long) (p), 1)
#define get_task_struct(tsk) atomic_inc(&virt_to_page(tsk)->count)
#define init_task (init_task_union.task)
#define init_stack (init_task_union.stack)
struct microcode { struct microcode {
unsigned int hdrver; unsigned int hdrver;
......
...@@ -105,7 +105,7 @@ extern void smp_store_cpu_info(int id); /* Store per CPU info (like the initial ...@@ -105,7 +105,7 @@ extern void smp_store_cpu_info(int id); /* Store per CPU info (like the initial
* so this is correct in the x86 case. * so this is correct in the x86 case.
*/ */
#define smp_processor_id() (current->cpu) #define smp_processor_id() (current_thread_info()->cpu)
static __inline int hard_smp_processor_id(void) static __inline int hard_smp_processor_id(void)
{ {
......
/* thread_info.h: i386 low-level thread information
*
* Copyright (C) 2002 David Howells (dhowells@redhat.com)
* - Incorporating suggestions made by Linus Torvalds and Dave Miller
*/
#ifndef _ASM_THREAD_INFO_H
#define _ASM_THREAD_INFO_H
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
#include <asm/processor.h>
#endif
/*
* low level task data that entry.S needs immediate access to
* - this struct should fit entirely inside of one cache line
* - this struct shares the supervisor stack pages
* - if the contents of this structure are changed, the assembly constants must also be changed
*/
#ifndef __ASSEMBLY__
struct thread_info {
struct task_struct *task; /* main task structure */
struct exec_domain *exec_domain; /* execution domain */
__u32 flags; /* low level flags */
__u32 cpu; /* current CPU */
mm_segment_t addr_limit; /* thread address space:
0-0xBFFFFFFF for user-thead
0-0xFFFFFFFF for kernel-thread
*/
__u8 supervisor_stack[0];
};
#else /* !__ASSEMBLY__ */
/* offsets into the thread_info struct for assembly code access */
#define TI_TASK 0x00000000
#define TI_EXEC_DOMAIN 0x00000004
#define TI_FLAGS 0x00000008
#define TI_CPU 0x0000000C
#define TI_ADDR_LIMIT 0x00000010
#endif
/*
* macros/functions for gaining access to the thread information structure
*/
#ifndef __ASSEMBLY__
#define INIT_THREAD_INFO(tsk) \
{ \
task: &tsk, \
exec_domain: &default_exec_domain, \
flags: 0, \
cpu: 0, \
addr_limit: KERNEL_DS, \
}
#define init_thread_info (init_thread_union.thread_info)
#define init_stack (init_thread_union.stack)
/* how to get the thread information struct from C */
static inline struct thread_info *current_thread_info(void)
{
struct thread_info *ti;
__asm__("andl %%esp,%0; ":"=r" (ti) : "0" (~8191UL));
return ti;
}
/* thread information allocation */
#define THREAD_SIZE (2*PAGE_SIZE)
#define alloc_thread_info() ((struct thread_info *) __get_free_pages(GFP_KERNEL,1))
#define free_thread_info(ti) free_pages((unsigned long) (ti), 1)
#define get_thread_info(ti) get_task_struct((ti)->l_task)
#define put_thread_info(ti) put_task_struct((ti)->l_task)
#else /* !__ASSEMBLY__ */
/* how to get the thread information struct from ASM */
#define GET_THREAD_INFO(reg) \
movl $-8192, reg; \
andl %esp, reg
#endif
/*
* thread information flags
* - these are process state flags that various assembly files may need to access
* - pending work-to-be-done flags are in LSW
* - other flags in MSW
*/
#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
#define TIF_NOTIFY_RESUME 1 /* resumption notification requested */
#define TIF_SIGPENDING 2 /* signal pending */
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
#define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
#define _TIF_USEDFPU (1<<TIF_USEDFPU)
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */
#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */
#endif /* __KERNEL__ */
#endif /* _ASM_THREAD_INFO_H */
...@@ -27,14 +27,14 @@ ...@@ -27,14 +27,14 @@
#define USER_DS MAKE_MM_SEG(PAGE_OFFSET) #define USER_DS MAKE_MM_SEG(PAGE_OFFSET)
#define get_ds() (KERNEL_DS) #define get_ds() (KERNEL_DS)
#define get_fs() (current->addr_limit) #define get_fs() (current_thread_info()->addr_limit)
#define set_fs(x) (current->addr_limit = (x)) #define set_fs(x) (current_thread_info()->addr_limit = (x))
#define segment_eq(a,b) ((a).seg == (b).seg) #define segment_eq(a,b) ((a).seg == (b).seg)
extern int __verify_write(const void *, unsigned long); extern int __verify_write(const void *, unsigned long);
#define __addr_ok(addr) ((unsigned long)(addr) < (current->addr_limit.seg)) #define __addr_ok(addr) ((unsigned long)(addr) < (current_thread_info()->addr_limit.seg))
/* /*
* Uhhuh, this needs 33-bit arithmetic. We have a carry.. * Uhhuh, this needs 33-bit arithmetic. We have a carry..
...@@ -43,7 +43,7 @@ extern int __verify_write(const void *, unsigned long); ...@@ -43,7 +43,7 @@ extern int __verify_write(const void *, unsigned long);
unsigned long flag,sum; \ unsigned long flag,sum; \
asm("addl %3,%1 ; sbbl %0,%0; cmpl %1,%4; sbbl $0,%0" \ asm("addl %3,%1 ; sbbl %0,%0; cmpl %1,%4; sbbl $0,%0" \
:"=&r" (flag), "=r" (sum) \ :"=&r" (flag), "=r" (sum) \
:"1" (addr),"g" ((int)(size)),"g" (current->addr_limit.seg)); \ :"1" (addr),"g" ((int)(size)),"g" (current_thread_info()->addr_limit.seg)); \
flag; }) flag; })
#ifdef CONFIG_X86_WP_WORKS_OK #ifdef CONFIG_X86_WP_WORKS_OK
......
...@@ -20,7 +20,7 @@ ...@@ -20,7 +20,7 @@
#define FPU_SAVE \ #define FPU_SAVE \
do { \ do { \
if (!(current->flags & PF_USEDFPU)) \ if (!test_thread_flag(TIF_USEDFPU)) \
__asm__ __volatile__ (" clts;\n"); \ __asm__ __volatile__ (" clts;\n"); \
__asm__ __volatile__ ("fsave %0; fwait": "=m"(fpu_save[0])); \ __asm__ __volatile__ ("fsave %0; fwait": "=m"(fpu_save[0])); \
} while (0) } while (0)
...@@ -28,7 +28,7 @@ ...@@ -28,7 +28,7 @@
#define FPU_RESTORE \ #define FPU_RESTORE \
do { \ do { \
__asm__ __volatile__ ("frstor %0": : "m"(fpu_save[0])); \ __asm__ __volatile__ ("frstor %0": : "m"(fpu_save[0])); \
if (!(current->flags & PF_USEDFPU)) \ if (!test_thread_flag(TIF_USEDFPU)) \
stts(); \ stts(); \
} while (0) } while (0)
......
...@@ -35,14 +35,6 @@ ...@@ -35,14 +35,6 @@
siglock: SPIN_LOCK_UNLOCKED \ siglock: SPIN_LOCK_UNLOCKED \
} }
#define INIT_TASK_WORK \
{ \
need_resched: 0, \
syscall_trace: 0, \
sigpending: 0, \
notify_resume: 0, \
}
/* /*
* INIT_TASK is used to set up the first task table, touch at * INIT_TASK is used to set up the first task table, touch at
* your own risk!. Base=0, limit=0x1fffff (=2MB) * your own risk!. Base=0, limit=0x1fffff (=2MB)
...@@ -50,10 +42,8 @@ ...@@ -50,10 +42,8 @@
#define INIT_TASK(tsk) \ #define INIT_TASK(tsk) \
{ \ { \
state: 0, \ state: 0, \
thread_info: &init_thread_info, \
flags: 0, \ flags: 0, \
work: INIT_TASK_WORK, \
addr_limit: KERNEL_DS, \
exec_domain: &default_exec_domain, \
lock_depth: -1, \ lock_depth: -1, \
__nice: DEF_USER_NICE, \ __nice: DEF_USER_NICE, \
policy: SCHED_OTHER, \ policy: SCHED_OTHER, \
......
...@@ -13,6 +13,7 @@ extern unsigned long event; ...@@ -13,6 +13,7 @@ extern unsigned long event;
#include <linux/times.h> #include <linux/times.h>
#include <linux/timex.h> #include <linux/timex.h>
#include <linux/rbtree.h> #include <linux/rbtree.h>
#include <linux/thread_info.h>
#include <asm/system.h> #include <asm/system.h>
#include <asm/semaphore.h> #include <asm/semaphore.h>
...@@ -229,37 +230,15 @@ extern struct user_struct root_user; ...@@ -229,37 +230,15 @@ extern struct user_struct root_user;
typedef struct prio_array prio_array_t; typedef struct prio_array prio_array_t;
/* this struct must occupy one 32-bit chunk so that is can be read in one go */
struct task_work {
__s8 need_resched;
__u8 syscall_trace; /* count of syscall interceptors */
__u8 sigpending;
__u8 notify_resume; /* request for notification on
userspace execution resumption */
} __attribute__((packed));
struct task_struct { struct task_struct {
/*
* offsets of these are hardcoded elsewhere - touch with care
*/
volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
struct thread_info *thread_info;
atomic_t usage;
unsigned long flags; /* per process flags, defined below */ unsigned long flags; /* per process flags, defined below */
volatile struct task_work work;
mm_segment_t addr_limit; /* thread address space:
0-0xBFFFFFFF for user-thead
0-0xFFFFFFFF for kernel-thread
*/
struct exec_domain *exec_domain;
long __pad;
unsigned long ptrace; unsigned long ptrace;
int lock_depth; /* Lock depth */ int lock_depth; /* Lock depth */
/*
* offset 32 begins here on 32-bit platforms.
*/
unsigned int cpu;
int prio; int prio;
long __nice; long __nice;
list_t run_list; list_t run_list;
...@@ -368,6 +347,11 @@ struct task_struct { ...@@ -368,6 +347,11 @@ struct task_struct {
void *journal_info; void *journal_info;
}; };
extern void __put_task_struct(struct task_struct *tsk);
#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
#define put_task_struct(tsk) \
do { if (atomic_dec_and_test(&(tsk)->usage)) __put_task_struct(tsk); } while(0)
/* /*
* Per process flags * Per process flags
*/ */
...@@ -384,17 +368,14 @@ struct task_struct { ...@@ -384,17 +368,14 @@ struct task_struct {
#define PF_FREE_PAGES 0x00002000 /* per process page freeing */ #define PF_FREE_PAGES 0x00002000 /* per process page freeing */
#define PF_NOIO 0x00004000 /* avoid generating further I/O */ #define PF_NOIO 0x00004000 /* avoid generating further I/O */
#define PF_USEDFPU 0x00100000 /* task used FPU this quantum (SMP) */
/* /*
* Ptrace flags * Ptrace flags
*/ */
#define PT_PTRACED 0x00000001 #define PT_PTRACED 0x00000001
#define PT_SYSCALLTRACE 0x00000002 /* T if syscall_trace is +1 for ptrace() */ #define PT_DTRACE 0x00000002 /* delayed trace (used on m68k, i386) */
#define PT_DTRACE 0x00000004 /* delayed trace (used on m68k, i386) */ #define PT_TRACESYSGOOD 0x00000004
#define PT_TRACESYSGOOD 0x00000008 #define PT_PTRACE_CAP 0x00000008 /* ptracer can follow suid-exec */
#define PT_PTRACE_CAP 0x00000010 /* ptracer can follow suid-exec */
/* /*
* Limit the stack by to some sane default: root can always * Limit the stack by to some sane default: root can always
...@@ -470,16 +451,17 @@ asmlinkage long sys_sched_yield(void); ...@@ -470,16 +451,17 @@ asmlinkage long sys_sched_yield(void);
*/ */
extern struct exec_domain default_exec_domain; extern struct exec_domain default_exec_domain;
#ifndef INIT_TASK_SIZE #ifndef INIT_THREAD_SIZE
# define INIT_TASK_SIZE 2048*sizeof(long) # define INIT_THREAD_SIZE 2048*sizeof(long)
#endif #endif
union task_union { union thread_union {
struct task_struct task; struct thread_info thread_info;
unsigned long stack[INIT_TASK_SIZE/sizeof(long)]; unsigned long stack[INIT_THREAD_SIZE/sizeof(long)];
}; };
extern union task_union init_task_union; extern union thread_union init_thread_union;
extern struct task_struct init_task;
extern struct mm_struct init_mm; extern struct mm_struct init_mm;
extern struct task_struct *init_tasks[NR_CPUS]; extern struct task_struct *init_tasks[NR_CPUS];
...@@ -584,22 +566,6 @@ extern int kill_proc(pid_t, int, int); ...@@ -584,22 +566,6 @@ extern int kill_proc(pid_t, int, int);
extern int do_sigaction(int, const struct k_sigaction *, struct k_sigaction *); extern int do_sigaction(int, const struct k_sigaction *, struct k_sigaction *);
extern int do_sigaltstack(const stack_t *, stack_t *, unsigned long); extern int do_sigaltstack(const stack_t *, stack_t *, unsigned long);
static inline int signal_pending(struct task_struct *p)
{
return (p->work.sigpending != 0);
}
static inline int need_resched(void)
{
return unlikely(current->work.need_resched != 0);
}
static inline void cond_resched(void)
{
if (need_resched())
schedule();
}
/* /*
* Re-calculate pending state from the set of locally pending * Re-calculate pending state from the set of locally pending
* signals, globally pending signals, and blocked signals. * signals, globally pending signals, and blocked signals.
...@@ -630,15 +596,6 @@ static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked) ...@@ -630,15 +596,6 @@ static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
return ready != 0; return ready != 0;
} }
/* Reevaluate whether the task has signals pending delivery.
This is required every time the blocked sigset_t changes.
All callers should have t->sigmask_lock. */
static inline void recalc_sigpending(struct task_struct *t)
{
t->work.sigpending = has_pending_signals(&t->pending.signal, &t->blocked);
}
/* True if we are on the alternate signal stack. */ /* True if we are on the alternate signal stack. */
static inline int on_sig_stack(unsigned long sp) static inline int on_sig_stack(unsigned long sp)
...@@ -888,6 +845,72 @@ static inline char * d_path(struct dentry *dentry, struct vfsmount *vfsmnt, ...@@ -888,6 +845,72 @@ static inline char * d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
return res; return res;
} }
/* set thread flags in other task's structures
* - see asm/thread_info.h for TIF_xxxx flags available
*/
static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
{
set_ti_thread_flag(tsk->thread_info,flag);
}
static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
{
clear_ti_thread_flag(tsk->thread_info,flag);
}
static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
{
return test_and_set_ti_thread_flag(tsk->thread_info,flag);
}
static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
{
return test_and_clear_ti_thread_flag(tsk->thread_info,flag);
}
static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
{
return test_ti_thread_flag(tsk->thread_info,flag);
}
static inline void set_tsk_need_resched(struct task_struct *tsk)
{
set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
}
static inline void clear_tsk_need_resched(struct task_struct *tsk)
{
clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
}
static inline int signal_pending(struct task_struct *p)
{
return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
}
static inline int need_resched(void)
{
return unlikely(test_thread_flag(TIF_NEED_RESCHED));
}
static inline void cond_resched(void)
{
if (need_resched())
schedule();
}
/* Reevaluate whether the task has signals pending delivery.
This is required every time the blocked sigset_t changes.
Athread cathreaders should have t->sigmask_lock. */
static inline void recalc_sigpending(struct task_struct *t)
{
if (has_pending_signals(&t->pending.signal, &t->blocked))
set_thread_flag(TIF_SIGPENDING);
else
clear_thread_flag(TIF_SIGPENDING);
}
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif #endif
/* thread_info.h: common low-level thread information accessors
*
* Copyright (C) 2002 David Howells (dhowells@redhat.com)
* - Incorporating suggestions made by Linus Torvalds
*/
#ifndef _LINUX_THREAD_INFO_H
#define _LINUX_THREAD_INFO_H
#include <asm/thread_info.h>
#include <asm/bitops.h>
#ifdef __KERNEL__
/*
* flag set/clear/test wrappers
* - pass TIF_xxxx constants to these functions
*/
static inline void set_thread_flag(int flag)
{
set_bit(flag,&current_thread_info()->flags);
}
static inline void clear_thread_flag(int flag)
{
clear_bit(flag,&current_thread_info()->flags);
}
static inline int test_and_set_thread_flag(int flag)
{
return test_and_set_bit(flag,&current_thread_info()->flags);
}
static inline int test_and_clear_thread_flag(int flag)
{
return test_and_clear_bit(flag,&current_thread_info()->flags);
}
static inline int test_thread_flag(int flag)
{
return test_bit(flag,&current_thread_info()->flags);
}
static inline void set_ti_thread_flag(struct thread_info *ti, int flag)
{
set_bit(flag,&ti->flags);
}
static inline void clear_ti_thread_flag(struct thread_info *ti, int flag)
{
clear_bit(flag,&ti->flags);
}
static inline int test_and_set_ti_thread_flag(struct thread_info *ti, int flag)
{
return test_and_set_bit(flag,&ti->flags);
}
static inline int test_and_clear_ti_thread_flag(struct thread_info *ti, int flag)
{
return test_and_clear_bit(flag,&ti->flags);
}
static inline int test_ti_thread_flag(struct thread_info *ti, int flag)
{
return test_bit(flag,&ti->flags);
}
static inline void set_need_resched(void)
{
set_thread_flag(TIF_NEED_RESCHED);
}
static inline void clear_need_resched(void)
{
set_thread_flag(TIF_NEED_RESCHED);
}
#endif
#endif /* _LINUX_THREAD_INFO_H */
...@@ -67,8 +67,8 @@ default_handler(int segment, struct pt_regs *regp) ...@@ -67,8 +67,8 @@ default_handler(int segment, struct pt_regs *regp)
} }
set_personality(pers); set_personality(pers);
if (current->exec_domain->handler != default_handler) if (current_thread_info()->exec_domain->handler != default_handler)
current->exec_domain->handler(segment, regp); current_thread_info()->exec_domain->handler(segment, regp);
else else
send_sig(SIGSEGV, current, 1); send_sig(SIGSEGV, current, 1);
} }
...@@ -162,7 +162,7 @@ __set_personality(u_long personality) ...@@ -162,7 +162,7 @@ __set_personality(u_long personality)
struct exec_domain *ep, *oep; struct exec_domain *ep, *oep;
ep = lookup_exec_domain(personality); ep = lookup_exec_domain(personality);
if (ep == current->exec_domain) { if (ep == current_thread_info()->exec_domain) {
current->personality = personality; current->personality = personality;
return 0; return 0;
} }
...@@ -190,8 +190,8 @@ __set_personality(u_long personality) ...@@ -190,8 +190,8 @@ __set_personality(u_long personality)
*/ */
current->personality = personality; current->personality = personality;
oep = current->exec_domain; oep = current_thread_info()->exec_domain;
current->exec_domain = ep; current_thread_info()->exec_domain = ep;
set_fs_altroot(); set_fs_altroot();
put_exec_domain(oep); put_exec_domain(oep);
......
...@@ -65,7 +65,7 @@ static void release_task(struct task_struct * p) ...@@ -65,7 +65,7 @@ static void release_task(struct task_struct * p)
__restore_flags(flags); __restore_flags(flags);
p->pid = 0; p->pid = 0;
free_task_struct(p); put_task_struct(p);
} }
/* /*
...@@ -529,7 +529,7 @@ NORET_TYPE void do_exit(long code) ...@@ -529,7 +529,7 @@ NORET_TYPE void do_exit(long code)
if (current->leader) if (current->leader)
disassociate_ctty(1); disassociate_ctty(1);
put_exec_domain(tsk->exec_domain); put_exec_domain(tsk->thread_info->exec_domain);
if (tsk->binfmt && tsk->binfmt->module) if (tsk->binfmt && tsk->binfmt->module)
__MOD_DEC_USE_COUNT(tsk->binfmt->module); __MOD_DEC_USE_COUNT(tsk->binfmt->module);
......
...@@ -28,6 +28,8 @@ ...@@ -28,6 +28,8 @@
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
static kmem_cache_t *task_struct_cachep;
/* The idle threads do not count.. */ /* The idle threads do not count.. */
int nr_threads; int nr_threads;
...@@ -70,6 +72,14 @@ void remove_wait_queue(wait_queue_head_t *q, wait_queue_t * wait) ...@@ -70,6 +72,14 @@ void remove_wait_queue(wait_queue_head_t *q, wait_queue_t * wait)
void __init fork_init(unsigned long mempages) void __init fork_init(unsigned long mempages)
{ {
/* create a slab on which task_structs can be allocated */
task_struct_cachep =
kmem_cache_create("task_struct",
sizeof(struct task_struct),0,
SLAB_HWCACHE_ALIGN, NULL, NULL);
if (!task_struct_cachep)
panic("fork_init(): cannot create task_struct SLAB cache");
/* /*
* The default maximum number of threads is set to a safe * The default maximum number of threads is set to a safe
* value: the thread structures can take up at most half * value: the thread structures can take up at most half
...@@ -81,6 +91,35 @@ void __init fork_init(unsigned long mempages) ...@@ -81,6 +91,35 @@ void __init fork_init(unsigned long mempages)
init_task.rlim[RLIMIT_NPROC].rlim_max = max_threads/2; init_task.rlim[RLIMIT_NPROC].rlim_max = max_threads/2;
} }
struct task_struct *dup_task_struct(struct task_struct *orig)
{
struct task_struct *tsk;
struct thread_info *ti;
ti = alloc_thread_info();
if (!ti) return NULL;
tsk = kmem_cache_alloc(task_struct_cachep,GFP_ATOMIC);
if (!tsk) {
free_thread_info(ti);
return NULL;
}
*ti = *orig->thread_info;
*tsk = *orig;
tsk->thread_info = ti;
ti->task = tsk;
atomic_set(&tsk->usage,1);
return tsk;
}
void __put_task_struct(struct task_struct *tsk)
{
free_thread_info(tsk->thread_info);
kmem_cache_free(task_struct_cachep,tsk);
}
/* Protects next_safe and last_pid. */ /* Protects next_safe and last_pid. */
spinlock_t lastpid_lock = SPIN_LOCK_UNLOCKED; spinlock_t lastpid_lock = SPIN_LOCK_UNLOCKED;
...@@ -546,7 +585,7 @@ static inline void copy_flags(unsigned long clone_flags, struct task_struct *p) ...@@ -546,7 +585,7 @@ static inline void copy_flags(unsigned long clone_flags, struct task_struct *p)
{ {
unsigned long new_flags = p->flags; unsigned long new_flags = p->flags;
new_flags &= ~(PF_SUPERPRIV | PF_USEDFPU); new_flags &= ~PF_SUPERPRIV;
new_flags |= PF_FORKNOEXEC; new_flags |= PF_FORKNOEXEC;
if (!(clone_flags & CLONE_PTRACE)) if (!(clone_flags & CLONE_PTRACE))
p->ptrace = 0; p->ptrace = 0;
...@@ -585,12 +624,10 @@ int do_fork(unsigned long clone_flags, unsigned long stack_start, ...@@ -585,12 +624,10 @@ int do_fork(unsigned long clone_flags, unsigned long stack_start,
} }
retval = -ENOMEM; retval = -ENOMEM;
p = alloc_task_struct(); p = dup_task_struct(current);
if (!p) if (!p)
goto fork_out; goto fork_out;
*p = *current;
retval = -EAGAIN; retval = -EAGAIN;
if (atomic_read(&p->user->processes) >= p->rlim[RLIMIT_NPROC].rlim_cur) { if (atomic_read(&p->user->processes) >= p->rlim[RLIMIT_NPROC].rlim_cur) {
if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE)) if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE))
...@@ -608,7 +645,7 @@ int do_fork(unsigned long clone_flags, unsigned long stack_start, ...@@ -608,7 +645,7 @@ int do_fork(unsigned long clone_flags, unsigned long stack_start,
if (nr_threads >= max_threads) if (nr_threads >= max_threads)
goto bad_fork_cleanup_count; goto bad_fork_cleanup_count;
get_exec_domain(p->exec_domain); get_exec_domain(p->thread_info->exec_domain);
if (p->binfmt && p->binfmt->module) if (p->binfmt && p->binfmt->module)
__MOD_INC_USE_COUNT(p->binfmt->module); __MOD_INC_USE_COUNT(p->binfmt->module);
...@@ -631,7 +668,7 @@ int do_fork(unsigned long clone_flags, unsigned long stack_start, ...@@ -631,7 +668,7 @@ int do_fork(unsigned long clone_flags, unsigned long stack_start,
} }
spin_lock_init(&p->alloc_lock); spin_lock_init(&p->alloc_lock);
p->work.sigpending = 0; clear_tsk_thread_flag(p,TIF_SIGPENDING);
init_sigpending(&p->pending); init_sigpending(&p->pending);
p->it_real_value = p->it_virt_value = p->it_prof_value = 0; p->it_real_value = p->it_virt_value = p->it_prof_value = 0;
...@@ -755,7 +792,7 @@ int do_fork(unsigned long clone_flags, unsigned long stack_start, ...@@ -755,7 +792,7 @@ int do_fork(unsigned long clone_flags, unsigned long stack_start,
* Let the child process run first, to avoid most of the * Let the child process run first, to avoid most of the
* COW overhead when the child exec()s afterwards. * COW overhead when the child exec()s afterwards.
*/ */
current->work.need_resched = 1; set_need_resched();
fork_out: fork_out:
return retval; return retval;
...@@ -771,14 +808,14 @@ int do_fork(unsigned long clone_flags, unsigned long stack_start, ...@@ -771,14 +808,14 @@ int do_fork(unsigned long clone_flags, unsigned long stack_start,
bad_fork_cleanup_files: bad_fork_cleanup_files:
exit_files(p); /* blocking */ exit_files(p); /* blocking */
bad_fork_cleanup: bad_fork_cleanup:
put_exec_domain(p->exec_domain); put_exec_domain(p->thread_info->exec_domain);
if (p->binfmt && p->binfmt->module) if (p->binfmt && p->binfmt->module)
__MOD_DEC_USE_COUNT(p->binfmt->module); __MOD_DEC_USE_COUNT(p->binfmt->module);
bad_fork_cleanup_count: bad_fork_cleanup_count:
atomic_dec(&p->user->processes); atomic_dec(&p->user->processes);
free_uid(p->user); free_uid(p->user);
bad_fork_free: bad_fork_free:
free_task_struct(p); put_task_struct(p);
goto fork_out; goto fork_out;
} }
......
...@@ -563,7 +563,8 @@ EXPORT_SYMBOL(__tasklet_hi_schedule); ...@@ -563,7 +563,8 @@ EXPORT_SYMBOL(__tasklet_hi_schedule);
/* init task, for moving kthread roots - ought to export a function ?? */ /* init task, for moving kthread roots - ought to export a function ?? */
EXPORT_SYMBOL(init_task_union); EXPORT_SYMBOL(init_task);
EXPORT_SYMBOL(init_thread_union);
EXPORT_SYMBOL(tasklist_lock); EXPORT_SYMBOL(tasklist_lock);
EXPORT_SYMBOL(pidhash); EXPORT_SYMBOL(pidhash);
...@@ -51,7 +51,7 @@ static struct runqueue runqueues[NR_CPUS] __cacheline_aligned; ...@@ -51,7 +51,7 @@ static struct runqueue runqueues[NR_CPUS] __cacheline_aligned;
#define cpu_rq(cpu) (runqueues + (cpu)) #define cpu_rq(cpu) (runqueues + (cpu))
#define this_rq() cpu_rq(smp_processor_id()) #define this_rq() cpu_rq(smp_processor_id())
#define task_rq(p) cpu_rq((p)->cpu) #define task_rq(p) cpu_rq((p)->thread_info->cpu)
#define cpu_curr(cpu) (cpu_rq(cpu)->curr) #define cpu_curr(cpu) (cpu_rq(cpu)->curr)
#define rt_task(p) ((p)->policy != SCHED_OTHER) #define rt_task(p) ((p)->policy != SCHED_OTHER)
...@@ -192,13 +192,19 @@ static inline void deactivate_task(struct task_struct *p, runqueue_t *rq) ...@@ -192,13 +192,19 @@ static inline void deactivate_task(struct task_struct *p, runqueue_t *rq)
static inline void resched_task(task_t *p) static inline void resched_task(task_t *p)
{ {
int need_resched; #ifdef CONFIG_SMP
int need_resched, nrpolling;
/* minimise the chance of sending an interrupt to poll_idle() */
nrpolling = test_tsk_thread_flag(p,TIF_POLLING_NRFLAG);
need_resched = test_and_set_tsk_thread_flag(p,TIF_NEED_RESCHED);
nrpolling |= test_tsk_thread_flag(p,TIF_POLLING_NRFLAG);
need_resched = p->work.need_resched; if (!need_resched && !nrpolling && (p->thread_info->cpu != smp_processor_id()))
wmb(); smp_send_reschedule(p->thread_info->cpu);
p->work.need_resched = 1; #else
if (!need_resched && (p->cpu != smp_processor_id())) set_tsk_need_resched(p);
smp_send_reschedule(p->cpu); #endif
} }
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
...@@ -236,7 +242,7 @@ void wait_task_inactive(task_t * p) ...@@ -236,7 +242,7 @@ void wait_task_inactive(task_t * p)
void sched_task_migrated(task_t *new_task) void sched_task_migrated(task_t *new_task)
{ {
wait_task_inactive(new_task); wait_task_inactive(new_task);
new_task->cpu = smp_processor_id(); new_task->thread_info->cpu = smp_processor_id();
wake_up_process(new_task); wake_up_process(new_task);
} }
...@@ -299,7 +305,7 @@ void wake_up_forked_process(task_t * p) ...@@ -299,7 +305,7 @@ void wake_up_forked_process(task_t * p)
current->sleep_avg = current->sleep_avg * PARENT_FORK_PENALTY / 100; current->sleep_avg = current->sleep_avg * PARENT_FORK_PENALTY / 100;
} }
spin_lock_irq(&rq->lock); spin_lock_irq(&rq->lock);
p->cpu = smp_processor_id(); p->thread_info->cpu = smp_processor_id();
activate_task(p, rq); activate_task(p, rq);
spin_unlock_irq(&rq->lock); spin_unlock_irq(&rq->lock);
} }
...@@ -519,11 +525,11 @@ static void load_balance(runqueue_t *this_rq, int idle) ...@@ -519,11 +525,11 @@ static void load_balance(runqueue_t *this_rq, int idle)
*/ */
dequeue_task(next, array); dequeue_task(next, array);
busiest->nr_running--; busiest->nr_running--;
next->cpu = this_cpu; next->thread_info->cpu = this_cpu;
this_rq->nr_running++; this_rq->nr_running++;
enqueue_task(next, this_rq->active); enqueue_task(next, this_rq->active);
if (next->prio < current->prio) if (next->prio < current->prio)
current->work.need_resched = 1; set_need_resched();
if (!idle && --imbalance) { if (!idle && --imbalance) {
if (array == busiest->expired) { if (array == busiest->expired) {
array = busiest->active; array = busiest->active;
...@@ -572,7 +578,7 @@ void scheduler_tick(task_t *p) ...@@ -572,7 +578,7 @@ void scheduler_tick(task_t *p)
#endif #endif
/* Task might have expired already, but not scheduled off yet */ /* Task might have expired already, but not scheduled off yet */
if (p->array != rq->active) { if (p->array != rq->active) {
p->work.need_resched = 1; set_tsk_need_resched(p);
return; return;
} }
spin_lock(&rq->lock); spin_lock(&rq->lock);
...@@ -583,7 +589,7 @@ void scheduler_tick(task_t *p) ...@@ -583,7 +589,7 @@ void scheduler_tick(task_t *p)
*/ */
if ((p->policy == SCHED_RR) && !--p->time_slice) { if ((p->policy == SCHED_RR) && !--p->time_slice) {
p->time_slice = NICE_TO_TIMESLICE(p->__nice); p->time_slice = NICE_TO_TIMESLICE(p->__nice);
p->work.need_resched = 1; set_tsk_need_resched(p);
/* put it at the end of the queue: */ /* put it at the end of the queue: */
dequeue_task(p, rq->active); dequeue_task(p, rq->active);
...@@ -603,7 +609,7 @@ void scheduler_tick(task_t *p) ...@@ -603,7 +609,7 @@ void scheduler_tick(task_t *p)
p->sleep_avg--; p->sleep_avg--;
if (!--p->time_slice) { if (!--p->time_slice) {
dequeue_task(p, rq->active); dequeue_task(p, rq->active);
p->work.need_resched = 1; set_tsk_need_resched(p);
p->prio = effective_prio(p); p->prio = effective_prio(p);
p->time_slice = NICE_TO_TIMESLICE(p->__nice); p->time_slice = NICE_TO_TIMESLICE(p->__nice);
...@@ -684,7 +690,7 @@ asmlinkage void schedule(void) ...@@ -684,7 +690,7 @@ asmlinkage void schedule(void)
switch_tasks: switch_tasks:
prefetch(next); prefetch(next);
prev->work.need_resched = 0; clear_tsk_need_resched(prev);
if (likely(prev != next)) { if (likely(prev != next)) {
rq->nr_switches++; rq->nr_switches++;
...@@ -1316,9 +1322,9 @@ void __init init_idle(task_t *idle, int cpu) ...@@ -1316,9 +1322,9 @@ void __init init_idle(task_t *idle, int cpu)
idle->array = NULL; idle->array = NULL;
idle->prio = MAX_PRIO; idle->prio = MAX_PRIO;
idle->state = TASK_RUNNING; idle->state = TASK_RUNNING;
idle->cpu = cpu; idle->thread_info->cpu = cpu;
double_rq_unlock(idle_rq, rq); double_rq_unlock(idle_rq, rq);
idle->work.need_resched = 1; set_tsk_need_resched(idle);
__restore_flags(flags); __restore_flags(flags);
} }
......
...@@ -23,7 +23,7 @@ ...@@ -23,7 +23,7 @@
#define DEBUG_SIG 0 #define DEBUG_SIG 0
#if DEBUG_SIG #if DEBUG_SIG
#define SIG_SLAB_DEBUG (SLAB_DEBUG_FREE | SLAB_RED_ZONE /* | SLAB_POISON */) #define SIG_SLAB_DEBUG (SLAB_RED_ZONE /* | SLAB_POISON */)
#else #else
#define SIG_SLAB_DEBUG 0 #define SIG_SLAB_DEBUG 0
#endif #endif
...@@ -105,7 +105,7 @@ static void flush_sigqueue(struct sigpending *queue) ...@@ -105,7 +105,7 @@ static void flush_sigqueue(struct sigpending *queue)
void void
flush_signals(struct task_struct *t) flush_signals(struct task_struct *t)
{ {
t->work.sigpending = 0; clear_tsk_thread_flag(t,TIF_SIGPENDING);
flush_sigqueue(&t->pending); flush_sigqueue(&t->pending);
} }
...@@ -119,7 +119,7 @@ void exit_sighand(struct task_struct *tsk) ...@@ -119,7 +119,7 @@ void exit_sighand(struct task_struct *tsk)
if (atomic_dec_and_test(&sig->count)) if (atomic_dec_and_test(&sig->count))
kmem_cache_free(sigact_cachep, sig); kmem_cache_free(sigact_cachep, sig);
} }
tsk->work.sigpending = 0; clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
flush_sigqueue(&tsk->pending); flush_sigqueue(&tsk->pending);
spin_unlock_irq(&tsk->sigmask_lock); spin_unlock_irq(&tsk->sigmask_lock);
} }
...@@ -275,7 +275,7 @@ printk("SIG dequeue (%s:%d): %d ", current->comm, current->pid, ...@@ -275,7 +275,7 @@ printk("SIG dequeue (%s:%d): %d ", current->comm, current->pid,
if (current->notifier) { if (current->notifier) {
if (sigismember(current->notifier_mask, sig)) { if (sigismember(current->notifier_mask, sig)) {
if (!(current->notifier)(current->notifier_data)) { if (!(current->notifier)(current->notifier_data)) {
current->work.sigpending = 0; clear_thread_flag(TIF_SIGPENDING);
return 0; return 0;
} }
} }
...@@ -494,7 +494,7 @@ static int send_signal(int sig, struct siginfo *info, struct sigpending *signals ...@@ -494,7 +494,7 @@ static int send_signal(int sig, struct siginfo *info, struct sigpending *signals
*/ */
static inline void signal_wake_up(struct task_struct *t) static inline void signal_wake_up(struct task_struct *t)
{ {
t->work.sigpending = 1; set_tsk_thread_flag(t,TIF_SIGPENDING);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/* /*
...@@ -507,7 +507,7 @@ static inline void signal_wake_up(struct task_struct *t) ...@@ -507,7 +507,7 @@ static inline void signal_wake_up(struct task_struct *t)
* process of changing - but no harm is done by that * process of changing - but no harm is done by that
* other than doing an extra (lightweight) IPI interrupt. * other than doing an extra (lightweight) IPI interrupt.
*/ */
if ((t->state == TASK_RUNNING) && (t->cpu != smp_processor_id())) if ((t->state == TASK_RUNNING) && (t->thread_info->cpu != smp_processor_id()))
kick_if_running(t); kick_if_running(t);
#endif #endif
if (t->state & TASK_INTERRUPTIBLE) { if (t->state & TASK_INTERRUPTIBLE) {
......
...@@ -1109,7 +1109,7 @@ rpciod_killall(void) ...@@ -1109,7 +1109,7 @@ rpciod_killall(void)
unsigned long flags; unsigned long flags;
while (all_tasks) { while (all_tasks) {
current->work.sigpending = 0; clear_thread_flag(TIF_SIGPENDING);
rpc_killall_tasks(NULL); rpc_killall_tasks(NULL);
__rpc_schedule(); __rpc_schedule();
if (all_tasks) { if (all_tasks) {
...@@ -1183,7 +1183,7 @@ rpciod_down(void) ...@@ -1183,7 +1183,7 @@ rpciod_down(void)
* Usually rpciod will exit very quickly, so we * Usually rpciod will exit very quickly, so we
* wait briefly before checking the process id. * wait briefly before checking the process id.
*/ */
current->work.sigpending = 0; clear_thread_flag(TIF_SIGPENDING);
yield(); yield();
/* /*
* Display a message if we're going to wait longer. * Display a message if we're going to wait longer.
......
...@@ -185,7 +185,7 @@ svc_register(struct svc_serv *serv, int proto, unsigned short port) ...@@ -185,7 +185,7 @@ svc_register(struct svc_serv *serv, int proto, unsigned short port)
progp->pg_name, proto == IPPROTO_UDP? "udp" : "tcp", port); progp->pg_name, proto == IPPROTO_UDP? "udp" : "tcp", port);
if (!port) if (!port)
current->work.sigpending = 0; clear_thread_flag(TIF_SIGPENDING);
for (i = 0; i < progp->pg_nvers; i++) { for (i = 0; i < progp->pg_nvers; i++) {
if (progp->pg_vers[i] == NULL) if (progp->pg_vers[i] == NULL)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment