Commit e9d9db6b authored by Linus Torvalds's avatar Linus Torvalds

Merge master.kernel.org:BK/linux-2.5

into home.transmeta.com:/home/torvalds/v2.5/linux
parents 8875527c 4fe9df79
......@@ -69,24 +69,27 @@ IF_MASK = 0x00000200
NT_MASK = 0x00004000
VM_MASK = 0x00020000
/* These are offsets into the irq_stat structure
/*
* These are offsets into the irq_stat structure
* There is one per cpu and it is aligned to 32
* byte boundry (we put that here as a shift count)
*/
irq_array_shift = CONFIG_X86_L1_CACHE_SHIFT
irq_stat_local_irq_count = 4
irq_stat_local_bh_count = 8
irq_array_shift = CONFIG_X86_L1_CACHE_SHIFT
local_irq_count = 4
local_bh_count = 8
#ifdef CONFIG_SMP
#define GET_CPU_INDX movl TI_CPU(%ebx),%eax; \
shll $irq_array_shift,%eax
#define GET_CURRENT_CPU_INDX GET_THREAD_INFO(%ebx); \
GET_CPU_INDX
#define CPU_INDX (,%eax)
#define GET_CPU_IDX \
movl TI_CPU(%ebx), %eax; \
shll $irq_array_shift, %eax
#define GET_CURRENT_CPU_IDX \
GET_THREAD_INFO(%ebx); \
GET_CPU_IDX
#define CPU_IDX (,%eax)
#else
#define GET_CPU_INDX
#define GET_CURRENT_CPU_INDX GET_THREAD_INFO(%ebx)
#define CPU_INDX
#define GET_CPU_IDX
#define GET_CURRENT_CPU_IDX GET_THREAD_INFO(%ebx)
#define CPU_IDX
#endif
#ifdef CONFIG_PREEMPT
......@@ -111,9 +114,9 @@ irq_stat_local_bh_count = 8
pushl %edx; \
pushl %ecx; \
pushl %ebx; \
movl $(__KERNEL_DS),%edx; \
movl %edx,%ds; \
movl %edx,%es;
movl $(__KERNEL_DS), %edx; \
movl %edx, %ds; \
movl %edx, %es;
#define RESTORE_ALL \
popl %ebx; \
......@@ -125,7 +128,7 @@ irq_stat_local_bh_count = 8
popl %eax; \
1: popl %ds; \
2: popl %es; \
addl $4,%esp; \
addl $4, %esp; \
3: iret; \
.section .fixup,"ax"; \
4: movl $0,(%esp); \
......@@ -147,20 +150,21 @@ irq_stat_local_bh_count = 8
.previous
ENTRY(lcall7)
pushfl # We get a different stack layout with call gates,
pushl %eax # which has to be cleaned up later..
pushfl # We get a different stack layout with call
# gates, which has to be cleaned up later..
pushl %eax
SAVE_ALL
movl EIP(%esp),%eax # due to call gates, this is eflags, not eip..
movl CS(%esp),%edx # this is eip..
movl EFLAGS(%esp),%ecx # and this is cs..
movl EIP(%esp), %eax # due to call gates, this is eflags, not eip..
movl CS(%esp), %edx # this is eip..
movl EFLAGS(%esp), %ecx # and this is cs..
movl %eax,EFLAGS(%esp) #
movl %edx,EIP(%esp) # Now we move them to their "normal" places
movl %ecx,CS(%esp) #
movl %esp,%ebx
movl %esp, %ebx
pushl %ebx
andl $-8192,%ebx # GET_THREAD_INFO
movl TI_EXEC_DOMAIN(%ebx),%edx # Get the execution domain
movl 4(%edx),%edx # Get the lcall7 handler for the domain
andl $-8192, %ebx # GET_THREAD_INFO
movl TI_EXEC_DOMAIN(%ebx), %edx # Get the execution domain
movl 4(%edx), %edx # Get the lcall7 handler for the domain
pushl $0x7
call *%edx
addl $4, %esp
......@@ -168,20 +172,21 @@ ENTRY(lcall7)
jmp resume_userspace
ENTRY(lcall27)
pushfl # We get a different stack layout with call gates,
pushl %eax # which has to be cleaned up later..
pushfl # We get a different stack layout with call
# gates, which has to be cleaned up later..
pushl %eax
SAVE_ALL
movl EIP(%esp),%eax # due to call gates, this is eflags, not eip..
movl CS(%esp),%edx # this is eip..
movl EFLAGS(%esp),%ecx # and this is cs..
movl EIP(%esp), %eax # due to call gates, this is eflags, not eip..
movl CS(%esp), %edx # this is eip..
movl EFLAGS(%esp), %ecx # and this is cs..
movl %eax,EFLAGS(%esp) #
movl %edx,EIP(%esp) # Now we move them to their "normal" places
movl %ecx,CS(%esp) #
movl %esp,%ebx
movl %esp, %ebx
pushl %ebx
andl $-8192,%ebx # GET_THREAD_INFO
movl TI_EXEC_DOMAIN(%ebx),%edx # Get the execution domain
movl 4(%edx),%edx # Get the lcall7 handler for the domain
andl $-8192, %ebx # GET_THREAD_INFO
movl TI_EXEC_DOMAIN(%ebx), %edx # Get the execution domain
movl 4(%edx), %edx # Get the lcall7 handler for the domain
pushl $0x27
call *%edx
addl $4, %esp
......@@ -211,15 +216,17 @@ ENTRY(ret_from_intr)
GET_THREAD_INFO(%ebx)
init_ret_intr
ret_from_exception:
movl EFLAGS(%esp),%eax # mix EFLAGS and CS
movb CS(%esp),%al
testl $(VM_MASK | 3),%eax
movl EFLAGS(%esp), %eax # mix EFLAGS and CS
movb CS(%esp), %al
testl $(VM_MASK | 3), %eax
jz resume_kernel # returning to kernel or vm86-space
ENTRY(resume_userspace)
cli # make sure we don't miss an interrupt setting need_resched
# or sigpending between sampling and the iret
movl TI_FLAGS(%ebx),%ecx
andl $_TIF_WORK_MASK,%ecx # is there any work to be done on int/excp return?
cli # make sure we don't miss an interrupt
# setting need_resched or sigpending
# between sampling and the iret
movl TI_FLAGS(%ebx), %ecx
andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
# int/exception return?
jne work_pending
jmp restore_all
......@@ -227,15 +234,17 @@ ENTRY(resume_userspace)
ENTRY(resume_kernel)
cmpl $0,TI_PRE_COUNT(%ebx)
jnz restore_all
movl TI_FLAGS(%ebx),%ecx
testb $_TIF_NEED_RESCHED,%cl
movl TI_FLAGS(%ebx), %ecx
testb $_TIF_NEED_RESCHED, %cl
jz restore_all
movl SYMBOL_NAME(irq_stat)+irq_stat_local_bh_count CPU_INDX,%ecx
addl SYMBOL_NAME(irq_stat)+irq_stat_local_irq_count CPU_INDX,%ecx
movl SYMBOL_NAME(irq_stat)+local_bh_count CPU_IDX, %ecx
addl SYMBOL_NAME(irq_stat)+local_irq_count CPU_IDX, %ecx
jnz restore_all
incl TI_PRE_COUNT(%ebx)
sti
call SYMBOL_NAME(preempt_schedule)
movl TI_TASK(%ebx), %ecx # ti->task
movl $0,(%ecx) # current->state = TASK_RUNNING
call SYMBOL_NAME(schedule)
jmp ret_from_intr
#endif
......@@ -245,18 +254,20 @@ ENTRY(system_call)
pushl %eax # save orig_eax
SAVE_ALL
GET_THREAD_INFO(%ebx)
cmpl $(NR_syscalls),%eax
cmpl $(NR_syscalls), %eax
jae syscall_badsys
testb $_TIF_SYSCALL_TRACE,TI_FLAGS(%ebx) # system call tracing in operation
# system call tracing in operation
testb $_TIF_SYSCALL_TRACE,TI_FLAGS(%ebx)
jnz syscall_trace_entry
syscall_call:
call *SYMBOL_NAME(sys_call_table)(,%eax,4)
movl %eax,EAX(%esp) # store the return value
syscall_exit:
cli # make sure we don't miss an interrupt setting need_resched
# or sigpending between sampling and the iret
movl TI_FLAGS(%ebx),%ecx
testw $_TIF_ALLWORK_MASK,%cx # current->work
cli # make sure we don't miss an interrupt
# setting need_resched or sigpending
# between sampling and the iret
movl TI_FLAGS(%ebx), %ecx
testw $_TIF_ALLWORK_MASK, %cx # current->work
jne syscall_exit_work
restore_all:
RESTORE_ALL
......@@ -264,23 +275,27 @@ restore_all:
# perform work that needs to be done immediately before resumption
ALIGN
work_pending:
testb $_TIF_NEED_RESCHED,%cl
testb $_TIF_NEED_RESCHED, %cl
jz work_notifysig
work_resched:
call SYMBOL_NAME(schedule)
cli # make sure we don't miss an interrupt setting need_resched
# or sigpending between sampling and the iret
movl TI_FLAGS(%ebx),%ecx
andl $_TIF_WORK_MASK,%ecx # is there any work to be done other than syscall tracing?
cli # make sure we don't miss an interrupt
# setting need_resched or sigpending
# between sampling and the iret
movl TI_FLAGS(%ebx), %ecx
andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
# than syscall tracing?
jz restore_all
testb $_TIF_NEED_RESCHED,%cl
testb $_TIF_NEED_RESCHED, %cl
jnz work_resched
work_notifysig: # deal with pending signals and notify-resume requests
work_notifysig: # deal with pending signals and
# notify-resume requests
testl $(VM_MASK),EFLAGS(%esp)
movl %esp,%eax
jne work_notifysig_v86 # returning to kernel-space or vm86-space
xorl %edx,%edx
movl %esp, %eax
jne work_notifysig_v86 # returning to kernel-space or
# vm86-space
xorl %edx, %edx
call SYMBOL_NAME(do_notify_resume)
jmp restore_all
......@@ -289,8 +304,8 @@ work_notifysig_v86:
pushl %ecx
call SYMBOL_NAME(save_v86_state)
popl %ecx
movl %eax,%esp
xorl %edx,%edx
movl %eax, %esp
xorl %edx, %edx
call SYMBOL_NAME(do_notify_resume)
jmp restore_all
......@@ -298,22 +313,23 @@ work_notifysig_v86:
ALIGN
syscall_trace_entry:
movl $-ENOSYS,EAX(%esp)
movl %esp,%eax
movl %esp, %eax
xorl %edx,%edx
call SYMBOL_NAME(do_syscall_trace)
movl ORIG_EAX(%esp),%eax
cmpl $(NR_syscalls),%eax
movl ORIG_EAX(%esp), %eax
cmpl $(NR_syscalls), %eax
jnae syscall_call
jmp syscall_exit
# perform syscall exit tracing
ALIGN
syscall_exit_work:
testb $_TIF_SYSCALL_TRACE,%cl
testb $_TIF_SYSCALL_TRACE, %cl
jz work_pending
sti # could let do_syscall_trace() call schedule() instead
movl %esp,%eax
movl $1,%edx
sti # could let do_syscall_trace() call
# schedule() instead
movl %esp, %eax
movl $1, %edx
call SYMBOL_NAME(do_syscall_trace)
jmp resume_userspace
......@@ -323,13 +339,13 @@ syscall_badsys:
jmp resume_userspace
ENTRY(divide_error)
pushl $0 # no error code
pushl $0 # no error code
pushl $ SYMBOL_NAME(do_divide_error)
ALIGN
error_code:
pushl %ds
pushl %eax
xorl %eax,%eax
xorl %eax, %eax
pushl %ebp
pushl %edi
pushl %esi
......@@ -338,20 +354,20 @@ error_code:
pushl %ecx
pushl %ebx
cld
movl %es,%ecx
movl %es, %ecx
movl ORIG_EAX(%esp), %esi # get the error code
movl ES(%esp), %edi # get the function address
movl %eax, ORIG_EAX(%esp)
movl %ecx, ES(%esp)
movl %esp,%edx
movl %esp, %edx
pushl %esi # push the error code
pushl %edx # push the pt_regs pointer
movl $(__KERNEL_DS),%edx
movl %edx,%ds
movl %edx,%es
movl $(__KERNEL_DS), %edx
movl %edx, %ds
movl %edx, %es
GET_THREAD_INFO(%ebx)
call *%edi
addl $8,%esp
addl $8, %esp
preempt_stop
jmp ret_from_exception
......@@ -366,19 +382,19 @@ ENTRY(simd_coprocessor_error)
jmp error_code
ENTRY(device_not_available)
pushl $-1 # mark this as an int
pushl $-1 # mark this as an int
SAVE_ALL
GET_THREAD_INFO(%ebx)
movl %cr0,%eax
testl $0x4,%eax # EM (math emulation bit)
movl %cr0, %eax
testl $0x4, %eax # EM (math emulation bit)
jne device_not_available_emulate
preempt_stop
call SYMBOL_NAME(math_state_restore)
jmp ret_from_exception
device_not_available_emulate:
pushl $0 # temporary storage for ORIG_EIP
pushl $0 # temporary storage for ORIG_EIP
call SYMBOL_NAME(math_emulate)
addl $4,%esp
addl $4, %esp
preempt_stop
jmp ret_from_exception
......@@ -390,11 +406,11 @@ ENTRY(debug)
ENTRY(nmi)
pushl %eax
SAVE_ALL
movl %esp,%edx
movl %esp, %edx
pushl $0
pushl %edx
call SYMBOL_NAME(do_nmi)
addl $8,%esp
addl $8, %esp
RESTORE_ALL
ENTRY(int3)
......@@ -479,7 +495,7 @@ ENTRY(sys_call_table)
.long SYMBOL_NAME(sys_mknod)
.long SYMBOL_NAME(sys_chmod) /* 15 */
.long SYMBOL_NAME(sys_lchown16)
.long SYMBOL_NAME(sys_ni_syscall) /* old break syscall holder */
.long SYMBOL_NAME(sys_ni_syscall) /* old break syscall holder */
.long SYMBOL_NAME(sys_stat)
.long SYMBOL_NAME(sys_lseek)
.long SYMBOL_NAME(sys_getpid) /* 20 */
......@@ -493,11 +509,12 @@ ENTRY(sys_call_table)
.long SYMBOL_NAME(sys_fstat)
.long SYMBOL_NAME(sys_pause)
.long SYMBOL_NAME(sys_utime) /* 30 */
.long SYMBOL_NAME(sys_ni_syscall) /* old stty syscall holder */
.long SYMBOL_NAME(sys_ni_syscall) /* old gtty syscall holder */
.long SYMBOL_NAME(sys_ni_syscall) /* old stty syscall holder */
.long SYMBOL_NAME(sys_ni_syscall) /* old gtty syscall holder */
.long SYMBOL_NAME(sys_access)
.long SYMBOL_NAME(sys_nice)
.long SYMBOL_NAME(sys_ni_syscall) /* 35 */ /* old ftime syscall holder */
.long SYMBOL_NAME(sys_ni_syscall) /* 35 */
/* old ftime syscall holder */
.long SYMBOL_NAME(sys_sync)
.long SYMBOL_NAME(sys_kill)
.long SYMBOL_NAME(sys_rename)
......@@ -506,7 +523,7 @@ ENTRY(sys_call_table)
.long SYMBOL_NAME(sys_dup)
.long SYMBOL_NAME(sys_pipe)
.long SYMBOL_NAME(sys_times)
.long SYMBOL_NAME(sys_ni_syscall) /* old prof syscall holder */
.long SYMBOL_NAME(sys_ni_syscall) /* old prof syscall holder */
.long SYMBOL_NAME(sys_brk) /* 45 */
.long SYMBOL_NAME(sys_setgid16)
.long SYMBOL_NAME(sys_getgid16)
......@@ -514,13 +531,13 @@ ENTRY(sys_call_table)
.long SYMBOL_NAME(sys_geteuid16)
.long SYMBOL_NAME(sys_getegid16) /* 50 */
.long SYMBOL_NAME(sys_acct)
.long SYMBOL_NAME(sys_umount) /* recycled never used phys() */
.long SYMBOL_NAME(sys_ni_syscall) /* old lock syscall holder */
.long SYMBOL_NAME(sys_umount) /* recycled never used phys() */
.long SYMBOL_NAME(sys_ni_syscall) /* old lock syscall holder */
.long SYMBOL_NAME(sys_ioctl)
.long SYMBOL_NAME(sys_fcntl) /* 55 */
.long SYMBOL_NAME(sys_ni_syscall) /* old mpx syscall holder */
.long SYMBOL_NAME(sys_ni_syscall) /* old mpx syscall holder */
.long SYMBOL_NAME(sys_setpgid)
.long SYMBOL_NAME(sys_ni_syscall) /* old ulimit syscall holder */
.long SYMBOL_NAME(sys_ni_syscall) /* old ulimit syscall holder */
.long SYMBOL_NAME(sys_olduname)
.long SYMBOL_NAME(sys_umask) /* 60 */
.long SYMBOL_NAME(sys_chroot)
......@@ -560,7 +577,7 @@ ENTRY(sys_call_table)
.long SYMBOL_NAME(sys_fchown16) /* 95 */
.long SYMBOL_NAME(sys_getpriority)
.long SYMBOL_NAME(sys_setpriority)
.long SYMBOL_NAME(sys_ni_syscall) /* old profil syscall holder */
.long SYMBOL_NAME(sys_ni_syscall) /* old profil syscall holder */
.long SYMBOL_NAME(sys_statfs)
.long SYMBOL_NAME(sys_fstatfs) /* 100 */
.long SYMBOL_NAME(sys_ioperm)
......@@ -650,8 +667,8 @@ ENTRY(sys_call_table)
.long SYMBOL_NAME(sys_capset) /* 185 */
.long SYMBOL_NAME(sys_sigaltstack)
.long SYMBOL_NAME(sys_sendfile)
.long SYMBOL_NAME(sys_ni_syscall) /* streams1 */
.long SYMBOL_NAME(sys_ni_syscall) /* streams2 */
.long SYMBOL_NAME(sys_ni_syscall) /* streams1 */
.long SYMBOL_NAME(sys_ni_syscall) /* streams2 */
.long SYMBOL_NAME(sys_vfork) /* 190 */
.long SYMBOL_NAME(sys_getrlimit)
.long SYMBOL_NAME(sys_mmap2)
......@@ -700,7 +717,7 @@ ENTRY(sys_call_table)
.long SYMBOL_NAME(sys_removexattr) /* 235 */
.long SYMBOL_NAME(sys_lremovexattr)
.long SYMBOL_NAME(sys_fremovexattr)
.long SYMBOL_NAME(sys_tkill)
.long SYMBOL_NAME(sys_tkill)
.rept NR_syscalls-(.-sys_call_table)/4
.long SYMBOL_NAME(sys_ni_syscall)
......
......@@ -455,9 +455,11 @@ void do_syscall_trace(struct pt_regs *regs, int entryexit)
between a syscall stop and SIGTRAP delivery */
current->exit_code = SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
? 0x80 : 0);
preempt_disable();
current->state = TASK_STOPPED;
notify_parent(current, SIGCHLD);
schedule();
preempt_enable();
/*
* this isn't the same as continuing with a signal, but it will do
* for normal use. strace only continues with a signal if the
......
......@@ -610,9 +610,11 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
/* Let the debugger run. */
current->exit_code = signr;
preempt_disable();
current->state = TASK_STOPPED;
notify_parent(current, SIGCHLD);
schedule();
preempt_enable();
/* We're back. Did the debugger cancel the sig? */
if (!(signr = current->exit_code))
......@@ -667,12 +669,14 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
case SIGSTOP: {
struct signal_struct *sig;
current->state = TASK_STOPPED;
current->exit_code = signr;
sig = current->p_pptr->sig;
preempt_disable();
current->state = TASK_STOPPED;
if (sig && !(sig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
notify_parent(current, SIGCHLD);
schedule();
preempt_enable();
continue;
}
......
......@@ -415,6 +415,25 @@ static __inline__ unsigned long __ffs(unsigned long word)
#ifdef __KERNEL__
/*
* Every architecture must define this function. It's the fastest
* way of searching a 140-bit bitmap where the first 100 bits are
* unlikely to be set. It's guaranteed that at least one of the 140
* bits is cleared.
*/
static inline int sched_find_first_bit(unsigned long *b)
{
if (unlikely(b[0]))
return __ffs(b[0]);
if (unlikely(b[1]))
return __ffs(b[1]) + 32;
if (unlikely(b[2]))
return __ffs(b[2]) + 64;
if (b[3])
return __ffs(b[3]) + 96;
return __ffs(b[4]) + 128;
}
/**
* ffs - find first bit set
* @x: the word to search
......
......@@ -6,24 +6,6 @@
#include <asm/atomic.h>
#include <asm/pgalloc.h>
/*
* Every architecture must define this function. It's the fastest
* way of searching a 140-bit bitmap where the first 100 bits are
* unlikely to be set. It's guaranteed that at least one of the 140
* bits is cleared.
*/
static inline int sched_find_first_bit(unsigned long *b)
{
if (unlikely(b[0]))
return __ffs(b[0]);
if (unlikely(b[1]))
return __ffs(b[1]) + 32;
if (unlikely(b[2]))
return __ffs(b[2]) + 64;
if (b[3])
return __ffs(b[3]) + 96;
return __ffs(b[4]) + 128;
}
/*
* possibly do the LDT unload here?
*/
......
......@@ -13,24 +13,23 @@ struct task_struct; /* one of the stranger aspects of C forward declarations.. *
extern void FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next));
#define prepare_to_switch() do { } while(0)
#define switch_to(prev,next,last) do { \
#define switch_to(prev,next) do { \
asm volatile("pushl %%esi\n\t" \
"pushl %%edi\n\t" \
"pushl %%ebp\n\t" \
"movl %%esp,%0\n\t" /* save ESP */ \
"movl %3,%%esp\n\t" /* restore ESP */ \
"movl %2,%%esp\n\t" /* restore ESP */ \
"movl $1f,%1\n\t" /* save EIP */ \
"pushl %4\n\t" /* restore EIP */ \
"pushl %3\n\t" /* restore EIP */ \
"jmp __switch_to\n" \
"1:\t" \
"popl %%ebp\n\t" \
"popl %%edi\n\t" \
"popl %%esi\n\t" \
:"=m" (prev->thread.esp),"=m" (prev->thread.eip), \
"=b" (last) \
:"=m" (prev->thread.esp),"=m" (prev->thread.eip) \
:"m" (next->thread.esp),"m" (next->thread.eip), \
"a" (prev), "d" (next), \
"b" (prev)); \
"a" (prev), "d" (next)); \
} while (0)
#define _set_base(addr,base) do { unsigned long __pr; \
......
......@@ -92,7 +92,6 @@ extern unsigned long nr_running(void);
#define TASK_UNINTERRUPTIBLE 2
#define TASK_ZOMBIE 4
#define TASK_STOPPED 8
#define PREEMPT_ACTIVE 0x4000000
#define __set_task_state(tsk, state_value) \
do { (tsk)->state = (state_value); } while (0)
......
......@@ -177,9 +177,8 @@ do { \
do { \
--current_thread_info()->preempt_count; \
barrier(); \
if (unlikely(!(current_thread_info()->preempt_count) && \
test_thread_flag(TIF_NEED_RESCHED))) \
preempt_schedule(); \
if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
preempt_schedule(); \
} while (0)
#define spin_lock(lock) \
......
......@@ -476,7 +476,12 @@ static void exit_notify(void)
write_lock_irq(&tasklist_lock);
}
}
write_unlock_irq(&tasklist_lock);
/*
* No need to unlock IRQs, we'll schedule() immediately
* anyway. In the preemption case this also makes it
* impossible for the task to get runnable again.
*/
write_unlock(&tasklist_lock);
}
NORET_TYPE void do_exit(long code)
......
......@@ -435,17 +435,8 @@ static inline void context_switch(task_t *prev, task_t *next)
mmdrop(oldmm);
}
/*
* Here we just switch the register state and the stack. There are
* 3 processes affected by a context switch:
*
* prev ==> .... ==> (last => next)
*
* It's the 'much more previous' 'prev' that is on next's stack,
* but prev is set to (the just run) 'last' process by switch_to().
* This might sound slightly confusing but makes tons of sense.
*/
switch_to(prev, next, prev);
/* Here we just switch the register state and the stack. */
switch_to(prev, next);
}
unsigned long nr_running(void)
......@@ -770,6 +761,7 @@ asmlinkage void schedule(void)
if (unlikely(in_interrupt()))
BUG();
need_resched:
preempt_disable();
prev = current;
rq = this_rq();
......@@ -778,15 +770,6 @@ asmlinkage void schedule(void)
prev->sleep_timestamp = jiffies;
spin_lock_irq(&rq->lock);
#ifdef CONFIG_PREEMPT
/*
* if entering from preempt_schedule, off a kernel preemption,
* go straight to picking the next task.
*/
if (unlikely(preempt_get_count() & PREEMPT_ACTIVE))
goto pick_next_task;
#endif
switch (prev->state) {
case TASK_INTERRUPTIBLE:
if (unlikely(signal_pending(prev))) {
......@@ -798,7 +781,7 @@ asmlinkage void schedule(void)
case TASK_RUNNING:
;
}
#if CONFIG_SMP || CONFIG_PREEMPT
#if CONFIG_SMP
pick_next_task:
#endif
if (unlikely(!rq->nr_running)) {
......@@ -847,6 +830,8 @@ asmlinkage void schedule(void)
reacquire_kernel_lock(current);
preempt_enable_no_resched();
if (test_thread_flag(TIF_NEED_RESCHED))
goto need_resched;
return;
}
......@@ -856,12 +841,10 @@ asmlinkage void schedule(void)
*/
asmlinkage void preempt_schedule(void)
{
do {
current_thread_info()->preempt_count += PREEMPT_ACTIVE;
schedule();
current_thread_info()->preempt_count -= PREEMPT_ACTIVE;
barrier();
} while (test_thread_flag(TIF_NEED_RESCHED));
if (unlikely(preempt_get_count()))
return;
current->state = TASK_RUNNING;
schedule();
}
#endif /* CONFIG_PREEMPT */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment