Commit e9d9db6b authored by Linus Torvalds's avatar Linus Torvalds

Merge master.kernel.org:BK/linux-2.5

into home.transmeta.com:/home/torvalds/v2.5/linux
parents 8875527c 4fe9df79
This diff is collapsed.
...@@ -455,9 +455,11 @@ void do_syscall_trace(struct pt_regs *regs, int entryexit) ...@@ -455,9 +455,11 @@ void do_syscall_trace(struct pt_regs *regs, int entryexit)
between a syscall stop and SIGTRAP delivery */ between a syscall stop and SIGTRAP delivery */
current->exit_code = SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) current->exit_code = SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
? 0x80 : 0); ? 0x80 : 0);
preempt_disable();
current->state = TASK_STOPPED; current->state = TASK_STOPPED;
notify_parent(current, SIGCHLD); notify_parent(current, SIGCHLD);
schedule(); schedule();
preempt_enable();
/* /*
* this isn't the same as continuing with a signal, but it will do * this isn't the same as continuing with a signal, but it will do
* for normal use. strace only continues with a signal if the * for normal use. strace only continues with a signal if the
......
...@@ -610,9 +610,11 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset) ...@@ -610,9 +610,11 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) { if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
/* Let the debugger run. */ /* Let the debugger run. */
current->exit_code = signr; current->exit_code = signr;
preempt_disable();
current->state = TASK_STOPPED; current->state = TASK_STOPPED;
notify_parent(current, SIGCHLD); notify_parent(current, SIGCHLD);
schedule(); schedule();
preempt_enable();
/* We're back. Did the debugger cancel the sig? */ /* We're back. Did the debugger cancel the sig? */
if (!(signr = current->exit_code)) if (!(signr = current->exit_code))
...@@ -667,12 +669,14 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset) ...@@ -667,12 +669,14 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
case SIGSTOP: { case SIGSTOP: {
struct signal_struct *sig; struct signal_struct *sig;
current->state = TASK_STOPPED;
current->exit_code = signr; current->exit_code = signr;
sig = current->p_pptr->sig; sig = current->p_pptr->sig;
preempt_disable();
current->state = TASK_STOPPED;
if (sig && !(sig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP)) if (sig && !(sig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
notify_parent(current, SIGCHLD); notify_parent(current, SIGCHLD);
schedule(); schedule();
preempt_enable();
continue; continue;
} }
......
...@@ -415,6 +415,25 @@ static __inline__ unsigned long __ffs(unsigned long word) ...@@ -415,6 +415,25 @@ static __inline__ unsigned long __ffs(unsigned long word)
#ifdef __KERNEL__ #ifdef __KERNEL__
/*
* Every architecture must define this function. It's the fastest
* way of searching a 140-bit bitmap where the first 100 bits are
* unlikely to be set. It's guaranteed that at least one of the 140
* bits is cleared.
*/
static inline int sched_find_first_bit(unsigned long *b)
{
if (unlikely(b[0]))
return __ffs(b[0]);
if (unlikely(b[1]))
return __ffs(b[1]) + 32;
if (unlikely(b[2]))
return __ffs(b[2]) + 64;
if (b[3])
return __ffs(b[3]) + 96;
return __ffs(b[4]) + 128;
}
/** /**
* ffs - find first bit set * ffs - find first bit set
* @x: the word to search * @x: the word to search
......
...@@ -6,24 +6,6 @@ ...@@ -6,24 +6,6 @@
#include <asm/atomic.h> #include <asm/atomic.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
/*
* Every architecture must define this function. It's the fastest
* way of searching a 140-bit bitmap where the first 100 bits are
* unlikely to be set. It's guaranteed that at least one of the 140
* bits is cleared.
*/
static inline int sched_find_first_bit(unsigned long *b)
{
if (unlikely(b[0]))
return __ffs(b[0]);
if (unlikely(b[1]))
return __ffs(b[1]) + 32;
if (unlikely(b[2]))
return __ffs(b[2]) + 64;
if (b[3])
return __ffs(b[3]) + 96;
return __ffs(b[4]) + 128;
}
/* /*
* possibly do the LDT unload here? * possibly do the LDT unload here?
*/ */
......
...@@ -13,24 +13,23 @@ struct task_struct; /* one of the stranger aspects of C forward declarations.. * ...@@ -13,24 +13,23 @@ struct task_struct; /* one of the stranger aspects of C forward declarations.. *
extern void FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next)); extern void FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next));
#define prepare_to_switch() do { } while(0) #define prepare_to_switch() do { } while(0)
#define switch_to(prev,next,last) do { \
#define switch_to(prev,next) do { \
asm volatile("pushl %%esi\n\t" \ asm volatile("pushl %%esi\n\t" \
"pushl %%edi\n\t" \ "pushl %%edi\n\t" \
"pushl %%ebp\n\t" \ "pushl %%ebp\n\t" \
"movl %%esp,%0\n\t" /* save ESP */ \ "movl %%esp,%0\n\t" /* save ESP */ \
"movl %3,%%esp\n\t" /* restore ESP */ \ "movl %2,%%esp\n\t" /* restore ESP */ \
"movl $1f,%1\n\t" /* save EIP */ \ "movl $1f,%1\n\t" /* save EIP */ \
"pushl %4\n\t" /* restore EIP */ \ "pushl %3\n\t" /* restore EIP */ \
"jmp __switch_to\n" \ "jmp __switch_to\n" \
"1:\t" \ "1:\t" \
"popl %%ebp\n\t" \ "popl %%ebp\n\t" \
"popl %%edi\n\t" \ "popl %%edi\n\t" \
"popl %%esi\n\t" \ "popl %%esi\n\t" \
:"=m" (prev->thread.esp),"=m" (prev->thread.eip), \ :"=m" (prev->thread.esp),"=m" (prev->thread.eip) \
"=b" (last) \
:"m" (next->thread.esp),"m" (next->thread.eip), \ :"m" (next->thread.esp),"m" (next->thread.eip), \
"a" (prev), "d" (next), \ "a" (prev), "d" (next)); \
"b" (prev)); \
} while (0) } while (0)
#define _set_base(addr,base) do { unsigned long __pr; \ #define _set_base(addr,base) do { unsigned long __pr; \
......
...@@ -92,7 +92,6 @@ extern unsigned long nr_running(void); ...@@ -92,7 +92,6 @@ extern unsigned long nr_running(void);
#define TASK_UNINTERRUPTIBLE 2 #define TASK_UNINTERRUPTIBLE 2
#define TASK_ZOMBIE 4 #define TASK_ZOMBIE 4
#define TASK_STOPPED 8 #define TASK_STOPPED 8
#define PREEMPT_ACTIVE 0x4000000
#define __set_task_state(tsk, state_value) \ #define __set_task_state(tsk, state_value) \
do { (tsk)->state = (state_value); } while (0) do { (tsk)->state = (state_value); } while (0)
......
...@@ -177,9 +177,8 @@ do { \ ...@@ -177,9 +177,8 @@ do { \
do { \ do { \
--current_thread_info()->preempt_count; \ --current_thread_info()->preempt_count; \
barrier(); \ barrier(); \
if (unlikely(!(current_thread_info()->preempt_count) && \ if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
test_thread_flag(TIF_NEED_RESCHED))) \ preempt_schedule(); \
preempt_schedule(); \
} while (0) } while (0)
#define spin_lock(lock) \ #define spin_lock(lock) \
......
...@@ -476,7 +476,12 @@ static void exit_notify(void) ...@@ -476,7 +476,12 @@ static void exit_notify(void)
write_lock_irq(&tasklist_lock); write_lock_irq(&tasklist_lock);
} }
} }
write_unlock_irq(&tasklist_lock); /*
* No need to unlock IRQs, we'll schedule() immediately
* anyway. In the preemption case this also makes it
* impossible for the task to get runnable again.
*/
write_unlock(&tasklist_lock);
} }
NORET_TYPE void do_exit(long code) NORET_TYPE void do_exit(long code)
......
...@@ -435,17 +435,8 @@ static inline void context_switch(task_t *prev, task_t *next) ...@@ -435,17 +435,8 @@ static inline void context_switch(task_t *prev, task_t *next)
mmdrop(oldmm); mmdrop(oldmm);
} }
/* /* Here we just switch the register state and the stack. */
* Here we just switch the register state and the stack. There are switch_to(prev, next);
* 3 processes affected by a context switch:
*
* prev ==> .... ==> (last => next)
*
* It's the 'much more previous' 'prev' that is on next's stack,
* but prev is set to (the just run) 'last' process by switch_to().
* This might sound slightly confusing but makes tons of sense.
*/
switch_to(prev, next, prev);
} }
unsigned long nr_running(void) unsigned long nr_running(void)
...@@ -770,6 +761,7 @@ asmlinkage void schedule(void) ...@@ -770,6 +761,7 @@ asmlinkage void schedule(void)
if (unlikely(in_interrupt())) if (unlikely(in_interrupt()))
BUG(); BUG();
need_resched:
preempt_disable(); preempt_disable();
prev = current; prev = current;
rq = this_rq(); rq = this_rq();
...@@ -778,15 +770,6 @@ asmlinkage void schedule(void) ...@@ -778,15 +770,6 @@ asmlinkage void schedule(void)
prev->sleep_timestamp = jiffies; prev->sleep_timestamp = jiffies;
spin_lock_irq(&rq->lock); spin_lock_irq(&rq->lock);
#ifdef CONFIG_PREEMPT
/*
* if entering from preempt_schedule, off a kernel preemption,
* go straight to picking the next task.
*/
if (unlikely(preempt_get_count() & PREEMPT_ACTIVE))
goto pick_next_task;
#endif
switch (prev->state) { switch (prev->state) {
case TASK_INTERRUPTIBLE: case TASK_INTERRUPTIBLE:
if (unlikely(signal_pending(prev))) { if (unlikely(signal_pending(prev))) {
...@@ -798,7 +781,7 @@ asmlinkage void schedule(void) ...@@ -798,7 +781,7 @@ asmlinkage void schedule(void)
case TASK_RUNNING: case TASK_RUNNING:
; ;
} }
#if CONFIG_SMP || CONFIG_PREEMPT #if CONFIG_SMP
pick_next_task: pick_next_task:
#endif #endif
if (unlikely(!rq->nr_running)) { if (unlikely(!rq->nr_running)) {
...@@ -847,6 +830,8 @@ asmlinkage void schedule(void) ...@@ -847,6 +830,8 @@ asmlinkage void schedule(void)
reacquire_kernel_lock(current); reacquire_kernel_lock(current);
preempt_enable_no_resched(); preempt_enable_no_resched();
if (test_thread_flag(TIF_NEED_RESCHED))
goto need_resched;
return; return;
} }
...@@ -856,12 +841,10 @@ asmlinkage void schedule(void) ...@@ -856,12 +841,10 @@ asmlinkage void schedule(void)
*/ */
asmlinkage void preempt_schedule(void) asmlinkage void preempt_schedule(void)
{ {
do { if (unlikely(preempt_get_count()))
current_thread_info()->preempt_count += PREEMPT_ACTIVE; return;
schedule(); current->state = TASK_RUNNING;
current_thread_info()->preempt_count -= PREEMPT_ACTIVE; schedule();
barrier();
} while (test_thread_flag(TIF_NEED_RESCHED));
} }
#endif /* CONFIG_PREEMPT */ #endif /* CONFIG_PREEMPT */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment