Commit 8a57a00b authored by Richard Henderson's avatar Richard Henderson Committed by Richard Henderson

Follow mingo's scheduling changes for x86.

parent b7ac5241
...@@ -489,10 +489,8 @@ alpha_switch_to: ...@@ -489,10 +489,8 @@ alpha_switch_to:
.prologue 0 .prologue 0
bsr $1,do_switch_stack bsr $1,do_switch_stack
call_pal PAL_swpctx call_pal PAL_swpctx
unop
bsr $1,undo_switch_stack
lda $8,0x3fff lda $8,0x3fff
mov $17,$0 bsr $1,undo_switch_stack
bic $30,$8,$8 bic $30,$8,$8
ret $31,($26),1 ret $31,($26),1
.end alpha_switch_to .end alpha_switch_to
...@@ -503,7 +501,7 @@ alpha_switch_to: ...@@ -503,7 +501,7 @@ alpha_switch_to:
.ent ret_from_fork .ent ret_from_fork
ret_from_fork: ret_from_fork:
lda $26,ret_from_sys_call lda $26,ret_from_sys_call
mov $0,$16 mov $17,$16
jmp $31,schedule_tail jmp $31,schedule_tail
.end ret_from_fork .end ret_from_fork
#endif #endif
......
...@@ -460,6 +460,27 @@ find_next_bit(void * addr, unsigned long size, unsigned long offset) ...@@ -460,6 +460,27 @@ find_next_bit(void * addr, unsigned long size, unsigned long offset)
#ifdef __KERNEL__ #ifdef __KERNEL__
/*
* Every architecture must define this function. It's the fastest
* way of searching a 140-bit bitmap where the first 100 bits are
* unlikely to be set. It's guaranteed that at least one of the 140
* bits is set.
*/
static inline unsigned long
sched_find_first_bit(unsigned long b[3])
{
unsigned long b0 = b[0], b1 = b[1], b2 = b[2];
unsigned long ofs;
ofs = (b1 ? 64 : 128);
b1 = (b1 ? b1 : b2);
ofs = (b0 ? 0 : ofs);
b0 = (b0 ? b0 : b1);
return __ffs(b0) + ofs;
}
#define ext2_set_bit __test_and_set_bit #define ext2_set_bit __test_and_set_bit
#define ext2_clear_bit __test_and_clear_bit #define ext2_clear_bit __test_and_clear_bit
#define ext2_test_bit test_bit #define ext2_test_bit test_bit
......
...@@ -21,28 +21,6 @@ ...@@ -21,28 +21,6 @@
#include <asm/io.h> #include <asm/io.h>
#endif #endif
/* ??? This does not belong here. */
/*
* Every architecture must define this function. It's the fastest
* way of searching a 140-bit bitmap where the first 100 bits are
* unlikely to be set. It's guaranteed that at least one of the 140
* bits is cleared.
*/
static inline int
sched_find_first_bit(unsigned long *b)
{
unsigned long b0 = b[0], b1 = b[1], b2 = b[2];
unsigned long offset = 128;
if (unlikely(b0 | b1)) {
b2 = (b0 ? b0 : b1);
offset = (b0 ? 0 : 64);
}
return __ffs(b2) + offset;
}
extern inline unsigned long extern inline unsigned long
__reload_thread(struct pcb_struct *pcb) __reload_thread(struct pcb_struct *pcb)
......
...@@ -131,15 +131,13 @@ extern void halt(void) __attribute__((noreturn)); ...@@ -131,15 +131,13 @@ extern void halt(void) __attribute__((noreturn));
#define __halt() __asm__ __volatile__ ("call_pal %0 #halt" : : "i" (PAL_halt)) #define __halt() __asm__ __volatile__ ("call_pal %0 #halt" : : "i" (PAL_halt))
#define prepare_to_switch() do { } while(0) #define prepare_to_switch() do { } while(0)
#define switch_to(prev,next,last) \ #define switch_to(prev,next) \
do { \ do { \
unsigned long pcbb; \ alpha_switch_to(virt_to_phys(&(next)->thread_info->pcb), (prev)); \
pcbb = virt_to_phys(&(next)->thread_info->pcb); \ check_mmu_context(); \
(last) = alpha_switch_to(pcbb, (prev)); \
check_mmu_context(); \
} while (0) } while (0)
extern struct task_struct* alpha_switch_to(unsigned long, struct task_struct*); extern void alpha_switch_to(unsigned long, struct task_struct*);
#define mb() \ #define mb() \
__asm__ __volatile__("mb": : :"memory") __asm__ __volatile__("mb": : :"memory")
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment