Commit e9d9db6b authored by Linus Torvalds's avatar Linus Torvalds

Merge master.kernel.org:BK/linux-2.5

into home.transmeta.com:/home/torvalds/v2.5/linux
parents 8875527c 4fe9df79
This diff is collapsed.
......@@ -455,9 +455,11 @@ void do_syscall_trace(struct pt_regs *regs, int entryexit)
between a syscall stop and SIGTRAP delivery */
current->exit_code = SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
? 0x80 : 0);
preempt_disable();
current->state = TASK_STOPPED;
notify_parent(current, SIGCHLD);
schedule();
preempt_enable();
/*
* this isn't the same as continuing with a signal, but it will do
* for normal use. strace only continues with a signal if the
......
......@@ -610,9 +610,11 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
/* Let the debugger run. */
current->exit_code = signr;
preempt_disable();
current->state = TASK_STOPPED;
notify_parent(current, SIGCHLD);
schedule();
preempt_enable();
/* We're back. Did the debugger cancel the sig? */
if (!(signr = current->exit_code))
......@@ -667,12 +669,14 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
case SIGSTOP: {
struct signal_struct *sig;
current->state = TASK_STOPPED;
current->exit_code = signr;
sig = current->p_pptr->sig;
preempt_disable();
current->state = TASK_STOPPED;
if (sig && !(sig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
notify_parent(current, SIGCHLD);
schedule();
preempt_enable();
continue;
}
......
......@@ -415,6 +415,25 @@ static __inline__ unsigned long __ffs(unsigned long word)
#ifdef __KERNEL__
/*
* Every architecture must define this function. It's the fastest
* way of searching a 140-bit bitmap where the first 100 bits are
* unlikely to be set. It's guaranteed that at least one of the 140
* bits is cleared.
*/
static inline int sched_find_first_bit(unsigned long *b)
{
if (unlikely(b[0]))
return __ffs(b[0]);
if (unlikely(b[1]))
return __ffs(b[1]) + 32;
if (unlikely(b[2]))
return __ffs(b[2]) + 64;
if (b[3])
return __ffs(b[3]) + 96;
return __ffs(b[4]) + 128;
}
/**
* ffs - find first bit set
* @x: the word to search
......
......@@ -6,24 +6,6 @@
#include <asm/atomic.h>
#include <asm/pgalloc.h>
/*
* Every architecture must define this function. It's the fastest
* way of searching a 140-bit bitmap where the first 100 bits are
* unlikely to be set. It's guaranteed that at least one of the 140
* bits is cleared.
*/
static inline int sched_find_first_bit(unsigned long *b)
{
if (unlikely(b[0]))
return __ffs(b[0]);
if (unlikely(b[1]))
return __ffs(b[1]) + 32;
if (unlikely(b[2]))
return __ffs(b[2]) + 64;
if (b[3])
return __ffs(b[3]) + 96;
return __ffs(b[4]) + 128;
}
/*
* possibly do the LDT unload here?
*/
......
......@@ -13,24 +13,23 @@ struct task_struct; /* one of the stranger aspects of C forward declarations.. *
extern void FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next));
#define prepare_to_switch() do { } while(0)
#define switch_to(prev,next,last) do { \
#define switch_to(prev,next) do { \
asm volatile("pushl %%esi\n\t" \
"pushl %%edi\n\t" \
"pushl %%ebp\n\t" \
"movl %%esp,%0\n\t" /* save ESP */ \
"movl %3,%%esp\n\t" /* restore ESP */ \
"movl %2,%%esp\n\t" /* restore ESP */ \
"movl $1f,%1\n\t" /* save EIP */ \
"pushl %4\n\t" /* restore EIP */ \
"pushl %3\n\t" /* restore EIP */ \
"jmp __switch_to\n" \
"1:\t" \
"popl %%ebp\n\t" \
"popl %%edi\n\t" \
"popl %%esi\n\t" \
:"=m" (prev->thread.esp),"=m" (prev->thread.eip), \
"=b" (last) \
:"=m" (prev->thread.esp),"=m" (prev->thread.eip) \
:"m" (next->thread.esp),"m" (next->thread.eip), \
"a" (prev), "d" (next), \
"b" (prev)); \
"a" (prev), "d" (next)); \
} while (0)
#define _set_base(addr,base) do { unsigned long __pr; \
......
......@@ -92,7 +92,6 @@ extern unsigned long nr_running(void);
#define TASK_UNINTERRUPTIBLE 2
#define TASK_ZOMBIE 4
#define TASK_STOPPED 8
#define PREEMPT_ACTIVE 0x4000000
#define __set_task_state(tsk, state_value) \
do { (tsk)->state = (state_value); } while (0)
......
......@@ -177,9 +177,8 @@ do { \
do { \
--current_thread_info()->preempt_count; \
barrier(); \
if (unlikely(!(current_thread_info()->preempt_count) && \
test_thread_flag(TIF_NEED_RESCHED))) \
preempt_schedule(); \
if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
preempt_schedule(); \
} while (0)
#define spin_lock(lock) \
......
......@@ -476,7 +476,12 @@ static void exit_notify(void)
write_lock_irq(&tasklist_lock);
}
}
write_unlock_irq(&tasklist_lock);
/*
* No need to unlock IRQs, we'll schedule() immediately
* anyway. In the preemption case this also makes it
* impossible for the task to get runnable again.
*/
write_unlock(&tasklist_lock);
}
NORET_TYPE void do_exit(long code)
......
......@@ -435,17 +435,8 @@ static inline void context_switch(task_t *prev, task_t *next)
mmdrop(oldmm);
}
/*
* Here we just switch the register state and the stack. There are
* 3 processes affected by a context switch:
*
* prev ==> .... ==> (last => next)
*
* It's the 'much more previous' 'prev' that is on next's stack,
* but prev is set to (the just run) 'last' process by switch_to().
* This might sound slightly confusing but makes tons of sense.
*/
switch_to(prev, next, prev);
/* Here we just switch the register state and the stack. */
switch_to(prev, next);
}
unsigned long nr_running(void)
......@@ -770,6 +761,7 @@ asmlinkage void schedule(void)
if (unlikely(in_interrupt()))
BUG();
need_resched:
preempt_disable();
prev = current;
rq = this_rq();
......@@ -778,15 +770,6 @@ asmlinkage void schedule(void)
prev->sleep_timestamp = jiffies;
spin_lock_irq(&rq->lock);
#ifdef CONFIG_PREEMPT
/*
* if entering from preempt_schedule, off a kernel preemption,
* go straight to picking the next task.
*/
if (unlikely(preempt_get_count() & PREEMPT_ACTIVE))
goto pick_next_task;
#endif
switch (prev->state) {
case TASK_INTERRUPTIBLE:
if (unlikely(signal_pending(prev))) {
......@@ -798,7 +781,7 @@ asmlinkage void schedule(void)
case TASK_RUNNING:
;
}
#if CONFIG_SMP || CONFIG_PREEMPT
#if CONFIG_SMP
pick_next_task:
#endif
if (unlikely(!rq->nr_running)) {
......@@ -847,6 +830,8 @@ asmlinkage void schedule(void)
reacquire_kernel_lock(current);
preempt_enable_no_resched();
if (test_thread_flag(TIF_NEED_RESCHED))
goto need_resched;
return;
}
......@@ -856,12 +841,10 @@ asmlinkage void schedule(void)
*/
asmlinkage void preempt_schedule(void)
{
do {
current_thread_info()->preempt_count += PREEMPT_ACTIVE;
schedule();
current_thread_info()->preempt_count -= PREEMPT_ACTIVE;
barrier();
} while (test_thread_flag(TIF_NEED_RESCHED));
if (unlikely(preempt_get_count()))
return;
current->state = TASK_RUNNING;
schedule();
}
#endif /* CONFIG_PREEMPT */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment