Commit ba1074cf authored by Russell King's avatar Russell King

ARM preempt and scheduler fixups for 2.5.5

parent ad889c6b
...@@ -734,19 +734,22 @@ preempt_return: ...@@ -734,19 +734,22 @@ preempt_return:
ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
#ifdef CONFIG_PREEMPT #ifdef CONFIG_PREEMPT
svc_preempt: teq r9, #0 svc_preempt: teq r9, #0 @ was preempt count = 0
movne pc, lr movne pc, lr @ no
ldr r0, [r6, #4] @ local_irq_count ldr r0, [r6, #4] @ local_irq_count
ldr r1, [r6, #8] @ local_b_count ldr r1, [r6, #8] @ local_bh_count
adds r0, r0, r1 adds r0, r0, r1
movne pc, lr movne pc, lr
1: set_cpsr_c r0, #MODE_SVC @ enable IRQs ldr r1, [r8, #TI_TASK]
bl SYMBOL_NAME(preempt_schedule) set_cpsr_c r2, #MODE_SVC @ enable IRQs
str r0, [r1, #0] @ current->state = TASK_RUNNING
1: bl SYMBOL_NAME(schedule)
set_cpsr_c r0, #PSR_I_BIT | MODE_SVC @ disable IRQs set_cpsr_c r0, #PSR_I_BIT | MODE_SVC @ disable IRQs
ldr r0, [r8, #TI_FLAGS] ldr r0, [r8, #TI_FLAGS]
tst r0, #_TIF_NEED_RESCHED tst r0, #_TIF_NEED_RESCHED
bne 1b beq preempt_return
b preempt_return set_cpsr_c r0, #MODE_SVC @ enable IRQs
b 1b
#endif #endif
.align 5 .align 5
......
...@@ -55,7 +55,7 @@ work_resched: ...@@ -55,7 +55,7 @@ work_resched:
*/ */
ENTRY(ret_to_user) ENTRY(ret_to_user)
ret_slow_syscall: ret_slow_syscall:
set_cpsr_c r1, #PSR_I_BIT | MODE_SVC set_cpsr_c r1, #PSR_I_BIT | MODE_SVC @ disable interrupts
ldr r1, [tsk, #TI_FLAGS] ldr r1, [tsk, #TI_FLAGS]
tst r1, #_TIF_WORK_MASK tst r1, #_TIF_WORK_MASK
beq no_work_pending beq no_work_pending
...@@ -73,12 +73,9 @@ __do_notify_resume: ...@@ -73,12 +73,9 @@ __do_notify_resume:
b SYMBOL_NAME(do_notify_resume) @ note the bl above sets lr b SYMBOL_NAME(do_notify_resume) @ note the bl above sets lr
/* /*
* This is how we return from a fork. __switch_to will be calling us * This is how we return from a fork.
* with r0 pointing at the previous task that was running (ready for
* calling schedule_tail).
*/ */
ENTRY(ret_from_fork) ENTRY(ret_from_fork)
bl SYMBOL_NAME(schedule_tail)
get_thread_info tsk get_thread_info tsk
ldr ip, [tsk, #TI_FLAGS] @ check for syscall tracing ldr ip, [tsk, #TI_FLAGS] @ check for syscall tracing
mov why, #1 mov why, #1
......
...@@ -127,7 +127,7 @@ __entry: ...@@ -127,7 +127,7 @@ __entry:
mov r1, #MACH_TYPE_L7200 mov r1, #MACH_TYPE_L7200
#endif #endif
mov r0, #F_BIT | I_BIT | MODE_SVC @ make sure svc mode mov r0, #PSR_F_BIT | PSR_I_BIT | MODE_SVC @ make sure svc mode
msr cpsr_c, r0 @ and all irqs disabled msr cpsr_c, r0 @ and all irqs disabled
bl __lookup_processor_type bl __lookup_processor_type
teq r10, #0 @ invalid processor? teq r10, #0 @ invalid processor?
......
...@@ -326,6 +326,23 @@ static inline unsigned long __ffs(unsigned long word) ...@@ -326,6 +326,23 @@ static inline unsigned long __ffs(unsigned long word)
#define ffs(x) generic_ffs(x) #define ffs(x) generic_ffs(x)
/*
* Find first bit set in a 168-bit bitmap, where the first
* 128 bits are unlikely to be set.
*/
static inline int sched_find_first_bit(unsigned long *b)
{
if (unlikely(b[0]))
return __ffs(b[0]);
if (unlikely(b[1]))
return __ffs(b[1]) + 32;
if (unlikely(b[2]))
return __ffs(b[2]) + 64;
if (b[3])
return __ffs(b[3]) + 96;
return __ffs(b[4]) + 128;
}
/* /*
* hweightN: returns the hamming weight (i.e. the number * hweightN: returns the hamming weight (i.e. the number
* of bits set) of a N-bit word * of bits set) of a N-bit word
......
...@@ -49,27 +49,4 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, ...@@ -49,27 +49,4 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
#define activate_mm(prev, next) \ #define activate_mm(prev, next) \
switch_mm((prev),(next),NULL,smp_processor_id()) switch_mm((prev),(next),NULL,smp_processor_id())
/*
* Find first bit set in a 168-bit bitmap, where the first
* 128 bits are unlikely to be set.
*/
static inline int sched_find_first_bit(unsigned long *b)
{
#if MAX_RT_PRIO != 128 || MAX_PRIO != 168
#error update this function
#endif
if (unlikely(b[0]))
return __ffs(b[0]);
if (unlikely(b[1]))
return __ffs(b[1]) + 32;
if (unlikely(b[2]))
return __ffs(b[2]) + 64;
if (unlikely(b[3]))
return __ffs(b[3]) + 96;
if (b[4])
return __ffs(b[4]) + MAX_RT_PRIO;
return __ffs(b[5]) + 32 + MAX_RT_PRIO;
}
#endif #endif
...@@ -14,8 +14,19 @@ ...@@ -14,8 +14,19 @@
#define clear_page(page) memzero((void *)(page), PAGE_SIZE) #define clear_page(page) memzero((void *)(page), PAGE_SIZE)
extern void copy_page(void *to, void *from); extern void copy_page(void *to, void *from);
#define clear_user_page(page, vaddr) cpu_clear_user_page(page,vaddr) #define clear_user_page(addr,vaddr) \
#define copy_user_page(to, from, vaddr) cpu_copy_user_page(to,from,vaddr) do { \
preempt_disable(); \
cpu_clear_user_page(addr, vaddr); \
preempt_enable(); \
} while (0)
#define copy_user_page(to,from,vaddr) \
do { \
preempt_disable(); \
cpu_copy_user_page(to, from, vaddr); \
preempt_enable(); \
} while (0)
#ifdef STRICT_MM_TYPECHECKS #ifdef STRICT_MM_TYPECHECKS
/* /*
......
...@@ -53,9 +53,9 @@ extern asmlinkage void __backtrace(void); ...@@ -53,9 +53,9 @@ extern asmlinkage void __backtrace(void);
struct thread_info; struct thread_info;
extern struct task_struct *__switch_to(struct thread_info *, struct thread_info *); extern struct task_struct *__switch_to(struct thread_info *, struct thread_info *);
#define switch_to(prev,next,last) \ #define switch_to(prev,next) \
do { \ do { \
last = __switch_to(prev->thread_info,next->thread_info); \ __switch_to(prev->thread_info,next->thread_info); \
mb(); \ mb(); \
} while (0) } while (0)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment