Commit 03aed178 authored by Paul Mackerras's avatar Paul Mackerras

PPC fixes for SMP; also fix the stack overflow detection, remove

various bits of cruft, and remove the third argument to switch_to.
parent bff60e8b
#
# MPC4xx driver options
#
mainmenu_option next_comment
comment 'MPC4xx Driver Options'
if [ "$CONFIG_STB03xxx" = "y" ]; then
bool 'STB IR Keyboard' CONFIG_STB_KB
bool 'SICC Serial port' CONFIG_SERIAL_SICC
if [ "$CONFIG_SERIAL_SICC" = "y" -a "$CONFIG_UART0_TTYS1" = "y" ]; then
define_bool CONFIG_UART1_DFLT_CONSOLE y
define_bool CONFIG_SERIAL_SICC_CONSOLE y
fi
fi
endmenu
...@@ -294,6 +294,11 @@ if [ "$CONFIG_ADVANCED_OPTIONS" = "y" ]; then ...@@ -294,6 +294,11 @@ if [ "$CONFIG_ADVANCED_OPTIONS" = "y" ]; then
fi fi
fi fi
if [ "$CONFIG_ALL_PPC" = "y" ]; then
bool 'Support for ISA-bus hardware' CONFIG_ISA
else
define_bool CONFIG_ISA n
fi
define_bool CONFIG_EISA n define_bool CONFIG_EISA n
define_bool CONFIG_SBUS n define_bool CONFIG_SBUS n
...@@ -322,12 +327,6 @@ else ...@@ -322,12 +327,6 @@ else
fi fi
fi fi
if [ "$CONFIG_ALL_PPC" = "y" ]; then
bool 'Support for ISA-bus hardware' CONFIG_ISA
else
define_bool CONFIG_ISA n
fi
# only elf supported, a.out is not -- Cort # only elf supported, a.out is not -- Cort
if [ "$CONFIG_PROC_FS" = "y" ]; then if [ "$CONFIG_PROC_FS" = "y" ]; then
define_bool CONFIG_KCORE_ELF y define_bool CONFIG_KCORE_ELF y
...@@ -588,8 +587,18 @@ if [ "$CONFIG_8260" = "y" ]; then ...@@ -588,8 +587,18 @@ if [ "$CONFIG_8260" = "y" ]; then
source arch/ppc/8260_io/Config.in source arch/ppc/8260_io/Config.in
fi fi
if [ "$CONFIG_4xx" = "y" ]; then if [ "$CONFIG_4xx" = "y"]; then
source arch/ppc/4xx_io/Config.in mainmenu_option next_comment
comment 'IBM 4xx options'
if [ "$CONFIG_STB03xxx" = "y" ]; then
bool 'STB IR Keyboard' CONFIG_STB_KB
bool 'SICC Serial port' CONFIG_SERIAL_SICC
if [ "$CONFIG_SERIAL_SICC" = "y" -a "$CONFIG_UART0_TTYS1" = "y" ]; then
define_bool CONFIG_UART1_DFLT_CONSOLE y
define_bool CONFIG_SERIAL_SICC_CONSOLE y
fi
fi
endmenu
fi fi
source drivers/usb/Config.in source drivers/usb/Config.in
...@@ -598,6 +607,8 @@ if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then ...@@ -598,6 +607,8 @@ if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
source net/bluetooth/Config.in source net/bluetooth/Config.in
fi fi
source lib/Config.in
mainmenu_option next_comment mainmenu_option next_comment
comment 'Kernel hacking' comment 'Kernel hacking'
...@@ -629,5 +640,3 @@ if [ "$CONFIG_MCPN765" = "y" -o "$CONFIG_SANDPOINT" = "y" \ ...@@ -629,5 +640,3 @@ if [ "$CONFIG_MCPN765" = "y" -o "$CONFIG_SANDPOINT" = "y" \
bool 'Support for early boot texts over serial port' CONFIG_SERIAL_TEXT_DEBUG bool 'Support for early boot texts over serial port' CONFIG_SERIAL_TEXT_DEBUG
fi fi
endmenu endmenu
source lib/Config.in
...@@ -41,6 +41,72 @@ ...@@ -41,6 +41,72 @@
#undef SHOW_SYSCALLS #undef SHOW_SYSCALLS
#undef SHOW_SYSCALLS_TASK #undef SHOW_SYSCALLS_TASK
#ifndef CONFIG_PPC_ISERIES /* iSeries version is in iSeries_head.S */
/*
* This code finishes saving the registers to the exception frame
* and jumps to the appropriate handler for the exception, turning
* on address translation.
*/
.globl transfer_to_handler
transfer_to_handler:
stw r22,_NIP(r21)
stw r23,_MSR(r21)
SAVE_4GPRS(8, r21)
SAVE_8GPRS(12, r21)
SAVE_8GPRS(24, r21)
andi. r23,r23,MSR_PR
mfspr r23,SPRG3
addi r2,r23,-THREAD /* set r2 to current */
tovirt(r2,r2)
beq 2f /* if from user, fix up THREAD.regs */
addi r24,r1,STACK_FRAME_OVERHEAD
stw r24,PT_REGS(r23)
#ifdef CONFIG_ALTIVEC
BEGIN_FTR_SECTION
mfspr r22,SPRN_VRSAVE /* if G4, save vrsave register value */
stw r22,THREAD_VRSAVE(r23)
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif /* CONFIG_ALTIVEC */
b 3f
2: /* if from kernel, check for stack overflow */
lwz r22,THREAD_INFO-THREAD(r23)
cmplw r1,r22 /* if r1 <= current->thread_info */
ble- stack_ovf /* then the kernel stack overflowed */
3:
mflr r23
andi. r24,r23,0x3f00 /* get vector offset */
stw r24,TRAP(r21)
li r22,0
stw r22,RESULT(r21)
mtspr SPRG2,r22 /* r1 is now kernel sp */
lwz r24,0(r23) /* virtual address of handler */
lwz r23,4(r23) /* where to go when done */
FIX_SRR1(r20,r22)
mtspr SRR0,r24
mtspr SRR1,r20
mtlr r23
SYNC
RFI /* jump to handler, enable MMU */
/*
* On kernel stack overflow, load up an initial stack pointer
* and call StackOverflow(regs), which should not return.
*/
stack_ovf:
addi r3,r1,STACK_FRAME_OVERHEAD
lis r1,init_thread_union@ha
addi r1,r1,init_thread_union@l
addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
lis r24,StackOverflow@ha
addi r24,r24,StackOverflow@l
li r20,MSR_KERNEL
FIX_SRR1(r20,r22)
mtspr SRR0,r24
mtspr SRR1,r20
SYNC
RFI
#endif /* CONFIG_PPC_ISERIES */
#ifdef SHOW_SYSCALLS_TASK #ifdef SHOW_SYSCALLS_TASK
.data .data
show_syscalls_task: show_syscalls_task:
......
...@@ -734,69 +734,6 @@ InstructionSegment: ...@@ -734,69 +734,6 @@ InstructionSegment:
b InstructionSegmentCont b InstructionSegmentCont
#endif /* CONFIG_PPC64BRIDGE */ #endif /* CONFIG_PPC64BRIDGE */
/*
* This code finishes saving the registers to the exception frame
* and jumps to the appropriate handler for the exception, turning
* on address translation.
*/
.globl transfer_to_handler
transfer_to_handler:
stw r22,_NIP(r21)
stw r23,_MSR(r21)
SAVE_4GPRS(8, r21)
SAVE_8GPRS(12, r21)
SAVE_8GPRS(24, r21)
andi. r23,r23,MSR_PR
mfspr r23,SPRG3 /* if from user, fix up THREAD.regs */
beq 2f
addi r24,r1,STACK_FRAME_OVERHEAD
stw r24,PT_REGS(r23)
#ifdef CONFIG_ALTIVEC
BEGIN_FTR_SECTION
mfspr r22,SPRN_VRSAVE /* if G4, save vrsave register value */
stw r22,THREAD_VRSAVE(r23)
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif /* CONFIG_ALTIVEC */
2: addi r2,r23,-THREAD /* set r2 to current */
tovirt(r2,r2)
mflr r23
andi. r24,r23,0x3f00 /* get vector offset */
stw r24,TRAP(r21)
li r22,0
stw r22,RESULT(r21)
mtspr SPRG2,r22 /* r1 is now kernel sp */
addi r24,r2,TASK_STRUCT_SIZE /* check for kernel stack overflow */
cmplw 0,r1,r2
cmplw 1,r1,r24
crand 1,1,4
bgt- stack_ovf /* if r2 < r1 < r2+TASK_STRUCT_SIZE */
lwz r24,0(r23) /* virtual address of handler */
lwz r23,4(r23) /* where to go when done */
FIX_SRR1(r20,r22)
mtspr SRR0,r24
mtspr SRR1,r20
mtlr r23
SYNC
RFI /* jump to handler, enable MMU */
/*
* On kernel stack overflow, load up an initial stack pointer
* and call StackOverflow(regs), which should not return.
*/
stack_ovf:
addi r3,r1,STACK_FRAME_OVERHEAD
lis r1,init_thread_union@ha
addi r1,r1,init_thread_union@l
addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
lis r24,StackOverflow@ha
addi r24,r24,StackOverflow@l
li r20,MSR_KERNEL
FIX_SRR1(r20,r22)
mtspr SRR0,r24
mtspr SRR1,r20
SYNC
RFI
/* /*
* This task wants to use the FPU now. * This task wants to use the FPU now.
* On UP, disable FP for the task which had the FPU previously, * On UP, disable FP for the task which had the FPU previously,
...@@ -1221,15 +1158,15 @@ __secondary_start: ...@@ -1221,15 +1158,15 @@ __secondary_start:
bl identify_cpu bl identify_cpu
bl call_setup_cpu /* Call setup_cpu for this CPU */ bl call_setup_cpu /* Call setup_cpu for this CPU */
/* get current */ /* get current_thread_info and current */
lis r2,current_set@h lis r1,secondary_ti@ha
ori r2,r2,current_set@l tophys(r1,r1)
tophys(r2,r2) lwz r1,secondary_ti@l(r1)
slwi r24,r24,2 /* get current_set[cpu#] */ tophys(r2,r1)
lwzx r2,r2,r24 lwz r2,TI_TASK(r2)
/* stack */ /* stack */
addi r1,r2,THREAD_SIZE-STACK_FRAME_OVERHEAD addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
li r0,0 li r0,0
tophys(r3,r1) tophys(r3,r1)
stw r0,0(r3) stw r0,0(r3)
......
...@@ -826,87 +826,6 @@ finish_tlb_load: ...@@ -826,87 +826,6 @@ finish_tlb_load:
PPC405_ERR77_SYNC PPC405_ERR77_SYNC
rfi /* Should sync shadow TLBs */ rfi /* Should sync shadow TLBs */
/* This code finishes saving the registers to the exception frame
* and jumps to the appropriate handler for the exception, turning
* on address translation.
*/
_GLOBAL(transfer_to_handler)
stw r22,_NIP(r21) /* Save the faulting IP on the stack */
stw r23,_MSR(r21) /* Save the exception MSR on stack */
SAVE_4GPRS(8, r21) /* Save r8 through r11 on the stack */
SAVE_8GPRS(12, r21) /* Save r12 through r19 on the stack */
SAVE_8GPRS(24, r21) /* Save r24 through r31 on the stack */
andi. r23,r23,MSR_PR /* Is this from user space? */
mfspr r23,SPRN_SPRG3 /* If from user, fix up THREAD.regs */
beq 2f /* No, it is from the kernel; branch. */
addi r24,r1,STACK_FRAME_OVERHEAD
stw r24,PT_REGS(r23)
2: addi r2,r23,-THREAD /* Set r2 to current thread */
tovirt(r2,r2)
mflr r23
andi. r24,r23,0x3f00 /* Get vector offset */
stw r24,TRAP(r21)
li r22,RESULT
/* No need to put an erratum #77 workaround here
because interrupts are currently disabled */
stwcx. r22,r22,r21 /* Clear the reservation */
li r22,0
stw r22,RESULT(r21)
mtspr SPRN_SPRG2,r22 /* r1 is now the kernel stack pointer */
addi r24,r2,TASK_STRUCT_SIZE /* Check for kernel stack overflow */
cmplw cr0,r1,r2
cmplw cr1,r1,r24
crand cr1,cr1,cr4
bgt- stack_ovf /* If r2 < r1 < r2 + TASK_STRUCT_SIZE */
lwz r24,0(r23) /* Virtual address of the handler */
lwz r23,4(r23) /* Handler return pointer */
cmpwi cr0,r7,STND_EXC /* What type of exception is this? */
bne 3f /* It is a critical exception... */
/* Standard exception jump path
*/
/* We have to recover r7 from the register save stack.
* It was used to indicate standard/critical exception. In
* the case of a standard exception that is the system call
* trap, it may have originally contained one of the syscall
* parameters and we have to get it back now.
*/
lwz r7,GPR7(r21)
mtspr SPRN_SRR0,r24 /* Set up the instruction pointer */
mtspr SPRN_SRR1,r20 /* Set up the machine state register */
mtlr r23 /* Set up the return pointer */
SYNC
/* We shouldn't need a 405 erratum #77 workaround here, because we're not
* actually returning to the interrupted instruction yet. */
rfi
/* Critical exception jump path
*/
3: mtspr SPRN_SRR2,r24 /* Set up the instruction pointer */
mtspr SPRN_SRR3,r20 /* Set up the machine state register */
mtlr r23 /* Set up the return pointer */
SYNC
rfci
/* On kernel stack overlow, load up an initial stack pointer and call
* StackOverflow(regs), which should NOT return.
*/
stack_ovf:
addi r3,r1,STACK_FRAME_OVERHEAD
lis r1,init_thread_union@ha
addi r1,r1,init_thread_union@l
addi r1,r1,THREAD_SIZE - STACK_FRAME_OVERHEAD
lis r24,StackOverflow@ha
addi r24,r24,StackOverflow@l
li r20,MSR_KERNEL
mtspr SPRN_SRR0,r24
mtspr SPRN_SRR1,r20
SYNC
rfi
/* extern void giveup_altivec(struct task_struct *prev) /* extern void giveup_altivec(struct task_struct *prev)
* *
* The PowerPC 4xx family of processors do not have AltiVec capabilities, so * The PowerPC 4xx family of processors do not have AltiVec capabilities, so
......
...@@ -637,63 +637,6 @@ DataTLBError: ...@@ -637,63 +637,6 @@ DataTLBError:
. = 0x2000 . = 0x2000
/*
* This code finishes saving the registers to the exception frame
* and jumps to the appropriate handler for the exception, turning
* on address translation.
*/
.globl transfer_to_handler
transfer_to_handler:
stw r22,_NIP(r21)
lis r22,MSR_POW@h
andc r23,r23,r22
stw r23,_MSR(r21)
SAVE_4GPRS(8, r21)
SAVE_8GPRS(12, r21)
SAVE_8GPRS(24, r21)
andi. r23,r23,MSR_PR
mfspr r23,SPRG3 /* if from user, fix up THREAD.regs */
beq 2f
addi r24,r1,STACK_FRAME_OVERHEAD
stw r24,PT_REGS(r23)
2: addi r2,r23,-THREAD /* set r2 to current */
tovirt(r2,r2)
mflr r23
andi. r24,r23,0x3f00 /* get vector offset */
stw r24,TRAP(r21)
li r22,0
stw r22,RESULT(r21)
mtspr SPRG2,r22 /* r1 is now kernel sp */
addi r24,r2,TASK_STRUCT_SIZE /* check for kernel stack overflow */
cmplw 0,r1,r2
cmplw 1,r1,r24
crand 1,1,4
bgt- stack_ovf /* if r2 < r1 < r2+TASK_STRUCT_SIZE */
lwz r24,0(r23) /* virtual address of handler */
lwz r23,4(r23) /* where to go when done */
mtspr SRR0,r24
mtspr SRR1,r20
mtlr r23
SYNC
rfi /* jump to handler, enable MMU */
/*
* On kernel stack overflow, load up an initial stack pointer
* and call StackOverflow(regs), which should not return.
*/
stack_ovf:
addi r3,r1,STACK_FRAME_OVERHEAD
lis r1,init_thread_union@ha
addi r1,r1,init_thread_union@l
addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
lis r24,StackOverflow@ha
addi r24,r24,StackOverflow@l
li r20,MSR_KERNEL
mtspr SRR0,r24
mtspr SRR1,r20
SYNC
rfi
.globl giveup_fpu .globl giveup_fpu
giveup_fpu: giveup_fpu:
blr blr
...@@ -707,7 +650,6 @@ _GLOBAL(__setup_cpu_8xx) ...@@ -707,7 +650,6 @@ _GLOBAL(__setup_cpu_8xx)
* This is where the main kernel code starts. * This is where the main kernel code starts.
*/ */
start_here: start_here:
/* ptr to current */ /* ptr to current */
lis r2,init_task@h lis r2,init_task@h
ori r2,r2,init_task@l ori r2,r2,init_task@l
......
...@@ -531,13 +531,17 @@ transfer_to_handler: ...@@ -531,13 +531,17 @@ transfer_to_handler:
SAVE_GPR(31, r1) SAVE_GPR(31, r1)
andi. r23,r23,MSR_PR andi. r23,r23,MSR_PR
mfspr r23,SPRG3 /* if from user, fix up THREAD.regs */ mfspr r23,SPRG3
beq 2f addi r2,r23,-THREAD /* set r2 to current */
beq 2f /* if from user, fix up THREAD.regs */
addi r24,r1,STACK_FRAME_OVERHEAD addi r24,r1,STACK_FRAME_OVERHEAD
stw r24,PT_REGS(r23) stw r24,PT_REGS(r23)
2: addi r2,r23,-THREAD /* set r2 to current */ b 3f
li r22,RESULT 2: /* if from kernel, check for stack overflow */
stwcx. r22,r22,r1 /* to clear the reservation */ lwz r22,THREAD_INFO(r2)
cmplw r1,r22 /* if r1 <= current->thread_info */
ble- stack_ovf /* then the kernel stack overflowed */
3:
li r22,0 li r22,0
stw r22,RESULT(r1) stw r22,RESULT(r1)
mfspr r23,SPRG1 /* Get Paca address */ mfspr r23,SPRG1 /* Get Paca address */
...@@ -545,11 +549,6 @@ transfer_to_handler: ...@@ -545,11 +549,6 @@ transfer_to_handler:
mflr r23 mflr r23
andi. r24,r23,0x3f00 /* get vector offset */ andi. r24,r23,0x3f00 /* get vector offset */
stw r24,TRAP(r1) stw r24,TRAP(r1)
addi r24,r2,TASK_STRUCT_SIZE /* check for kernel stack overflow */
cmplw 0,r1,r2
cmplw 1,r1,r24
crand 1,1,4
bgt- stack_ovf /* if r2 < r1 < r2+TASK_STRUCT_SIZE */
lwz r24,0(r23) /* virtual address of handler */ lwz r24,0(r23) /* virtual address of handler */
lwz r23,4(r23) /* where to go when done */ lwz r23,4(r23) /* where to go when done */
li r20,MSR_KERNEL li r20,MSR_KERNEL
......
...@@ -586,8 +586,6 @@ atomic_t global_bh_count; ...@@ -586,8 +586,6 @@ atomic_t global_bh_count;
static void show(char * str) static void show(char * str)
{ {
int i;
unsigned long *stack;
int cpu = smp_processor_id(); int cpu = smp_processor_id();
printk("\n%s, CPU %d:\n", str, cpu); printk("\n%s, CPU %d:\n", str, cpu);
...@@ -598,13 +596,6 @@ static void show(char * str) ...@@ -598,13 +596,6 @@ static void show(char * str)
atomic_read(&global_bh_count), atomic_read(&global_bh_count),
local_bh_count(0), local_bh_count(0),
local_bh_count(1)); local_bh_count(1));
stack = (unsigned long *) &str;
for (i = 40; i ; i--) {
unsigned long x = *++stack;
if (x > (unsigned long) &init_task_union && x < (unsigned long) &vsprintf) {
printk("<[%08lx]> ", x);
}
}
} }
static inline void wait_on_bh(void) static inline void wait_on_bh(void)
......
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#include <asm/cputable.h> #include <asm/cputable.h>
#include <asm/mmu.h> #include <asm/mmu.h>
#include <asm/ppc_asm.h> #include <asm/ppc_asm.h>
#include <asm/thread_info.h>
#include "ppc_defs.h" #include "ppc_defs.h"
.text .text
...@@ -375,7 +376,8 @@ _GLOBAL(_tlbia) ...@@ -375,7 +376,8 @@ _GLOBAL(_tlbia)
SYNC SYNC
lis r9,hash_table_lock@h lis r9,hash_table_lock@h
ori r9,r9,hash_table_lock@l ori r9,r9,hash_table_lock@l
lwz r8,CPU(r2) rlwinm r8,r1,0,0,18
lwz r8,TI_CPU(r8)
oris r8,r8,10 oris r8,r8,10
10: lwarx r7,0,r9 10: lwarx r7,0,r9
cmpi 0,r7,0 cmpi 0,r7,0
...@@ -420,7 +422,8 @@ _GLOBAL(_tlbie) ...@@ -420,7 +422,8 @@ _GLOBAL(_tlbie)
SYNC SYNC
lis r9,hash_table_lock@h lis r9,hash_table_lock@h
ori r9,r9,hash_table_lock@l ori r9,r9,hash_table_lock@l
lwz r8,CPU(r2) rlwinm r8,r1,0,0,18
lwz r8,TI_CPU(r8)
oris r8,r8,11 oris r8,r8,11
10: lwarx r7,0,r9 10: lwarx r7,0,r9
cmpi 0,r7,0 cmpi 0,r7,0
......
...@@ -42,19 +42,13 @@ ...@@ -42,19 +42,13 @@
int int
main(void) main(void)
{ {
DEFINE(THREAD_SIZE, THREAD_SIZE);
DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(STATE, offsetof(struct task_struct, state));
DEFINE(THREAD, offsetof(struct task_struct, thread)); DEFINE(THREAD, offsetof(struct task_struct, thread));
DEFINE(THREAD_INFO, offsetof(struct task_struct, thread_info));
DEFINE(MM, offsetof(struct task_struct, mm)); DEFINE(MM, offsetof(struct task_struct, mm));
DEFINE(ACTIVE_MM, offsetof(struct task_struct, active_mm));
DEFINE(TASK_STRUCT_SIZE, sizeof(struct task_struct));
DEFINE(KSP, offsetof(struct thread_struct, ksp)); DEFINE(KSP, offsetof(struct thread_struct, ksp));
DEFINE(PGDIR, offsetof(struct thread_struct, pgdir)); DEFINE(PGDIR, offsetof(struct thread_struct, pgdir));
DEFINE(LAST_SYSCALL, offsetof(struct thread_struct, last_syscall)); DEFINE(LAST_SYSCALL, offsetof(struct thread_struct, last_syscall));
DEFINE(PT_REGS, offsetof(struct thread_struct, regs)); DEFINE(PT_REGS, offsetof(struct thread_struct, regs));
DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags));
DEFINE(THREAD_FPEXC_MODE, offsetof(struct thread_struct, fpexc_mode)); DEFINE(THREAD_FPEXC_MODE, offsetof(struct thread_struct, fpexc_mode));
DEFINE(THREAD_FPR0, offsetof(struct thread_struct, fpr[0])); DEFINE(THREAD_FPR0, offsetof(struct thread_struct, fpr[0]));
DEFINE(THREAD_FPSCR, offsetof(struct thread_struct, fpscr)); DEFINE(THREAD_FPSCR, offsetof(struct thread_struct, fpscr));
......
...@@ -197,9 +197,7 @@ dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpregs) ...@@ -197,9 +197,7 @@ dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpregs)
return 1; return 1;
} }
void void switch_to(struct task_struct *prev, struct task_struct *new)
_switch_to(struct task_struct *prev, struct task_struct *new,
struct task_struct **last)
{ {
struct thread_struct *new_thread, *old_thread; struct thread_struct *new_thread, *old_thread;
unsigned long s; unsigned long s;
...@@ -221,7 +219,7 @@ _switch_to(struct task_struct *prev, struct task_struct *new, ...@@ -221,7 +219,7 @@ _switch_to(struct task_struct *prev, struct task_struct *new,
* every switch, just a save. * every switch, just a save.
* -- Cort * -- Cort
*/ */
if ( prev->thread.regs && (prev->thread.regs->msr & MSR_FP) ) if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP))
giveup_fpu(prev); giveup_fpu(prev);
#ifdef CONFIG_ALTIVEC #ifdef CONFIG_ALTIVEC
/* /*
...@@ -240,8 +238,6 @@ _switch_to(struct task_struct *prev, struct task_struct *new, ...@@ -240,8 +238,6 @@ _switch_to(struct task_struct *prev, struct task_struct *new,
#endif /* CONFIG_ALTIVEC */ #endif /* CONFIG_ALTIVEC */
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
current_set[smp_processor_id()] = new;
/* Avoid the trap. On smp this this never happens since /* Avoid the trap. On smp this this never happens since
* we don't set last_task_used_altivec -- Cort * we don't set last_task_used_altivec -- Cort
*/ */
...@@ -249,7 +245,7 @@ _switch_to(struct task_struct *prev, struct task_struct *new, ...@@ -249,7 +245,7 @@ _switch_to(struct task_struct *prev, struct task_struct *new,
new->thread.regs->msr |= MSR_VEC; new->thread.regs->msr |= MSR_VEC;
new_thread = &new->thread; new_thread = &new->thread;
old_thread = &current->thread; old_thread = &current->thread;
*last = _switch(old_thread, new_thread); _switch(old_thread, new_thread);
__restore_flags(s); __restore_flags(s);
} }
...@@ -282,7 +278,7 @@ void show_regs(struct pt_regs * regs) ...@@ -282,7 +278,7 @@ void show_regs(struct pt_regs * regs)
#endif #endif
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
printk(" CPU: %d", current->processor); printk(" CPU: %d", smp_processor_id());
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
printk("\n"); printk("\n");
......
...@@ -162,7 +162,7 @@ int show_cpuinfo(struct seq_file *m, void *v) ...@@ -162,7 +162,7 @@ int show_cpuinfo(struct seq_file *m, void *v)
return 0; return 0;
pvr = cpu_data[i].pvr; pvr = cpu_data[i].pvr;
lpj = cpu_data[i].loops_per_jiffy; lpj = cpu_data[i].loops_per_jiffy;
seq_printf(m, "processor\t: %lu\n", i); seq_printf(m, "processor\t: %d\n", i);
#else #else
pvr = mfspr(PVR); pvr = mfspr(PVR);
lpj = loops_per_jiffy; lpj = loops_per_jiffy;
......
...@@ -37,6 +37,7 @@ ...@@ -37,6 +37,7 @@
#include <asm/smp.h> #include <asm/smp.h>
#include <asm/residual.h> #include <asm/residual.h>
#include <asm/time.h> #include <asm/time.h>
#include <asm/thread_info.h>
int smp_threads_ready; int smp_threads_ready;
volatile int smp_commenced; volatile int smp_commenced;
...@@ -49,11 +50,12 @@ atomic_t ipi_sent; ...@@ -49,11 +50,12 @@ atomic_t ipi_sent;
spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED; spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
unsigned int prof_multiplier[NR_CPUS]; unsigned int prof_multiplier[NR_CPUS];
unsigned int prof_counter[NR_CPUS]; unsigned int prof_counter[NR_CPUS];
cycles_t cacheflush_time; unsigned long cache_decay_ticks;
static int max_cpus __initdata = NR_CPUS; static int max_cpus __initdata = NR_CPUS;
unsigned long cpu_online_map; unsigned long cpu_online_map;
int smp_hw_index[NR_CPUS]; int smp_hw_index[NR_CPUS];
static struct smp_ops_t *smp_ops; static struct smp_ops_t *smp_ops;
struct thread_info *secondary_ti;
/* all cpu mappings are 1-1 -- Cort */ /* all cpu mappings are 1-1 -- Cort */
volatile unsigned long cpu_callin_map[NR_CPUS]; volatile unsigned long cpu_callin_map[NR_CPUS];
...@@ -66,6 +68,8 @@ int start_secondary(void *); ...@@ -66,6 +68,8 @@ int start_secondary(void *);
extern int cpu_idle(void *unused); extern int cpu_idle(void *unused);
void smp_call_function_interrupt(void); void smp_call_function_interrupt(void);
void smp_message_pass(int target, int msg, unsigned long data, int wait); void smp_message_pass(int target, int msg, unsigned long data, int wait);
static int __smp_call_function(void (*func) (void *info), void *info,
int wait, int target);
#ifdef CONFIG_PPC_ISERIES #ifdef CONFIG_PPC_ISERIES
extern void smp_iSeries_space_timers( unsigned nr ); extern void smp_iSeries_space_timers( unsigned nr );
...@@ -108,7 +112,7 @@ void smp_message_recv(int msg, struct pt_regs *regs) ...@@ -108,7 +112,7 @@ void smp_message_recv(int msg, struct pt_regs *regs)
smp_call_function_interrupt(); smp_call_function_interrupt();
break; break;
case PPC_MSG_RESCHEDULE: case PPC_MSG_RESCHEDULE:
current->work.need_resched = 1; set_need_resched();
break; break;
case PPC_MSG_INVALIDATE_TLB: case PPC_MSG_INVALIDATE_TLB:
_tlbia(); _tlbia();
...@@ -192,8 +196,8 @@ static struct call_data_struct { ...@@ -192,8 +196,8 @@ static struct call_data_struct {
* in the system. * in the system.
*/ */
int smp_call_function (void (*func) (void *info), void *info, int nonatomic, int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
int wait) int wait)
/* /*
* [SUMMARY] Run a function on all other CPUs. * [SUMMARY] Run a function on all other CPUs.
* <func> The function to run. This must be fast and non-blocking. * <func> The function to run. This must be fast and non-blocking.
...@@ -206,13 +210,24 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic, ...@@ -206,13 +210,24 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
* You must not call this function with disabled interrupts or from a * You must not call this function with disabled interrupts or from a
* hardware interrupt handler, you may call it from a bottom half handler. * hardware interrupt handler, you may call it from a bottom half handler.
*/ */
{
if (smp_num_cpus <= 1)
return 0;
return __smp_call_function(func, info, wait, MSG_ALL_BUT_SELF);
}
static int __smp_call_function(void (*func) (void *info), void *info,
int wait, int target)
{ {
struct call_data_struct data; struct call_data_struct data;
int ret = -1, cpus = smp_num_cpus-1; int ret = -1;
int timeout; int timeout;
int ncpus = 1;
if (!cpus) if (target == MSG_ALL_BUT_SELF)
return 0; ncpus = smp_num_cpus - 1;
else if (target == MSG_ALL)
ncpus = smp_num_cpus;
data.func = func; data.func = func;
data.info = info; data.info = info;
...@@ -224,11 +239,11 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic, ...@@ -224,11 +239,11 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
spin_lock_bh(&call_lock); spin_lock_bh(&call_lock);
call_data = &data; call_data = &data;
/* Send a message to all other CPUs and wait for them to respond */ /* Send a message to all other CPUs and wait for them to respond */
smp_message_pass(MSG_ALL_BUT_SELF, PPC_MSG_CALL_FUNCTION, 0, 0); smp_message_pass(target, PPC_MSG_CALL_FUNCTION, 0, 0);
/* Wait for response */ /* Wait for response */
timeout = 1000000; timeout = 1000000;
while (atomic_read(&data.started) != cpus) { while (atomic_read(&data.started) != ncpus) {
if (--timeout == 0) { if (--timeout == 0) {
printk("smp_call_function on cpu %d: other cpus not responding (%d)\n", printk("smp_call_function on cpu %d: other cpus not responding (%d)\n",
smp_processor_id(), atomic_read(&data.started)); smp_processor_id(), atomic_read(&data.started));
...@@ -240,7 +255,7 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic, ...@@ -240,7 +255,7 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
if (wait) { if (wait) {
timeout = 1000000; timeout = 1000000;
while (atomic_read(&data.finished) != cpus) { while (atomic_read(&data.finished) != ncpus) {
if (--timeout == 0) { if (--timeout == 0) {
printk("smp_call_function on cpu %d: other cpus not finishing (%d/%d)\n", printk("smp_call_function on cpu %d: other cpus not finishing (%d/%d)\n",
smp_processor_id(), atomic_read(&data.finished), atomic_read(&data.started)); smp_processor_id(), atomic_read(&data.finished), atomic_read(&data.started));
...@@ -276,9 +291,28 @@ void smp_call_function_interrupt(void) ...@@ -276,9 +291,28 @@ void smp_call_function_interrupt(void)
atomic_inc(&call_data->finished); atomic_inc(&call_data->finished);
} }
/*
* Task migration callback.
*/
void smp_task_migration_interrupt(void *new_task)
{
task_t *p;
p = new_task;
sched_task_migrated(p);
}
/*
* This function sends a 'task migration' IPI to another CPU.
* Must be called from syscall contexts, with interrupts *enabled*.
*/
void smp_migrate_task(int cpu, task_t *p)
{
__smp_call_function(smp_task_migration_interrupt, p, 0, cpu);
}
void __init smp_boot_cpus(void) void __init smp_boot_cpus(void)
{ {
extern struct task_struct *current_set[NR_CPUS];
int i, cpu_nr; int i, cpu_nr;
struct task_struct *p; struct task_struct *p;
...@@ -292,7 +326,6 @@ void __init smp_boot_cpus(void) ...@@ -292,7 +326,6 @@ void __init smp_boot_cpus(void)
* cpu 0, the master -- Cort * cpu 0, the master -- Cort
*/ */
cpu_callin_map[0] = 1; cpu_callin_map[0] = 1;
current->cpu = 0;
for (i = 0; i < NR_CPUS; i++) { for (i = 0; i < NR_CPUS; i++) {
prof_counter[i] = 1; prof_counter[i] = 1;
...@@ -300,10 +333,9 @@ void __init smp_boot_cpus(void) ...@@ -300,10 +333,9 @@ void __init smp_boot_cpus(void)
} }
/* /*
* XXX very rough, assumes 20 bus cycles to read a cache line, * XXX very rough.
* timebase increments every 4 bus cycles, 32kB L1 data cache.
*/ */
cacheflush_time = 5 * 1024; cache_decay_ticks = HZ/100;
smp_ops = ppc_md.smp_ops; smp_ops = ppc_md.smp_ops;
if (smp_ops == NULL) { if (smp_ops == NULL) {
...@@ -311,7 +343,7 @@ void __init smp_boot_cpus(void) ...@@ -311,7 +343,7 @@ void __init smp_boot_cpus(void)
return; return;
} }
/* Probe arch for CPUs */ /* Probe platform for CPUs */
cpu_nr = smp_ops->probe(); cpu_nr = smp_ops->probe();
/* /*
...@@ -338,9 +370,8 @@ void __init smp_boot_cpus(void) ...@@ -338,9 +370,8 @@ void __init smp_boot_cpus(void)
init_idle(p, i); init_idle(p, i);
unhash_process(p); unhash_process(p);
p->cpu = i; secondary_ti = p->thread_info;
p->cpus_allowed = 1 << i; /* we schedule the first task manually */ p->thread_info->cpu = i;
current_set[i] = p;
/* /*
* There was a cache flush loop here to flush the cache * There was a cache flush loop here to flush the cache
...@@ -357,11 +388,10 @@ void __init smp_boot_cpus(void) ...@@ -357,11 +388,10 @@ void __init smp_boot_cpus(void)
* use this value that I found through experimentation. * use this value that I found through experimentation.
* -- Cort * -- Cort
*/ */
for ( c = 1000; c && !cpu_callin_map[i] ; c-- ) for (c = 1000; c && !cpu_callin_map[i]; c--)
udelay(100); udelay(100);
if ( cpu_callin_map[i] ) if (cpu_callin_map[i]) {
{
char buf[32]; char buf[32];
sprintf(buf, "found cpu %d", i); sprintf(buf, "found cpu %d", i);
if (ppc_md.progress) ppc_md.progress(buf, 0x350+i); if (ppc_md.progress) ppc_md.progress(buf, 0x350+i);
...@@ -488,7 +518,7 @@ void __init smp_commence(void) ...@@ -488,7 +518,7 @@ void __init smp_commence(void)
void __init smp_callin(void) void __init smp_callin(void)
{ {
int cpu = current->processor; int cpu = smp_processor_id();
smp_store_cpu_info(cpu); smp_store_cpu_info(cpu);
set_dec(tb_ticks_per_jiffy); set_dec(tb_ticks_per_jiffy);
...@@ -505,7 +535,7 @@ void __init smp_callin(void) ...@@ -505,7 +535,7 @@ void __init smp_callin(void)
*/ */
cpu_online_map |= 1UL << smp_processor_id(); cpu_online_map |= 1UL << smp_processor_id();
while(!smp_commenced) while (!smp_commenced)
barrier(); barrier();
/* see smp_commence for more info */ /* see smp_commence for more info */
......
...@@ -48,7 +48,7 @@ static unsigned long __spin_trylock(volatile unsigned long *lock) ...@@ -48,7 +48,7 @@ static unsigned long __spin_trylock(volatile unsigned long *lock)
return ret; return ret;
} }
void _spin_lock(spinlock_t *lock) void _raw_spin_lock(spinlock_t *lock)
{ {
int cpu = smp_processor_id(); int cpu = smp_processor_id();
unsigned int stuck = INIT_STUCK; unsigned int stuck = INIT_STUCK;
...@@ -69,7 +69,7 @@ void _spin_lock(spinlock_t *lock) ...@@ -69,7 +69,7 @@ void _spin_lock(spinlock_t *lock)
lock->owner_cpu = cpu; lock->owner_cpu = cpu;
} }
int spin_trylock(spinlock_t *lock) int _raw_spin_trylock(spinlock_t *lock)
{ {
if (__spin_trylock(&lock->lock)) if (__spin_trylock(&lock->lock))
return 0; return 0;
...@@ -78,7 +78,7 @@ int spin_trylock(spinlock_t *lock) ...@@ -78,7 +78,7 @@ int spin_trylock(spinlock_t *lock)
return 1; return 1;
} }
void _spin_unlock(spinlock_t *lp) void _raw_spin_unlock(spinlock_t *lp)
{ {
if ( !lp->lock ) if ( !lp->lock )
printk("_spin_unlock(%p): no lock cpu %d curr PC %p %s/%d\n", printk("_spin_unlock(%p): no lock cpu %d curr PC %p %s/%d\n",
...@@ -99,7 +99,7 @@ void _spin_unlock(spinlock_t *lp) ...@@ -99,7 +99,7 @@ void _spin_unlock(spinlock_t *lp)
* with the high bit (sign) being the "write" bit. * with the high bit (sign) being the "write" bit.
* -- Cort * -- Cort
*/ */
void _read_lock(rwlock_t *rw) void _raw_read_lock(rwlock_t *rw)
{ {
unsigned long stuck = INIT_STUCK; unsigned long stuck = INIT_STUCK;
int cpu = smp_processor_id(); int cpu = smp_processor_id();
...@@ -126,7 +126,7 @@ void _read_lock(rwlock_t *rw) ...@@ -126,7 +126,7 @@ void _read_lock(rwlock_t *rw)
wmb(); wmb();
} }
void _read_unlock(rwlock_t *rw) void _raw_read_unlock(rwlock_t *rw)
{ {
if ( rw->lock == 0 ) if ( rw->lock == 0 )
printk("_read_unlock(): %s/%d (nip %08lX) lock %lx\n", printk("_read_unlock(): %s/%d (nip %08lX) lock %lx\n",
...@@ -136,7 +136,7 @@ void _read_unlock(rwlock_t *rw) ...@@ -136,7 +136,7 @@ void _read_unlock(rwlock_t *rw)
atomic_dec((atomic_t *) &(rw)->lock); atomic_dec((atomic_t *) &(rw)->lock);
} }
void _write_lock(rwlock_t *rw) void _raw_write_lock(rwlock_t *rw)
{ {
unsigned long stuck = INIT_STUCK; unsigned long stuck = INIT_STUCK;
int cpu = smp_processor_id(); int cpu = smp_processor_id();
...@@ -176,7 +176,7 @@ void _write_lock(rwlock_t *rw) ...@@ -176,7 +176,7 @@ void _write_lock(rwlock_t *rw)
wmb(); wmb();
} }
void _write_unlock(rwlock_t *rw) void _raw_write_unlock(rwlock_t *rw)
{ {
if ( !(rw->lock & (1<<31)) ) if ( !(rw->lock & (1<<31)) )
printk("_write_lock(): %s/%d (nip %08lX) lock %lx\n", printk("_write_lock(): %s/%d (nip %08lX) lock %lx\n",
......
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/cputable.h> #include <asm/cputable.h>
#include <asm/ppc_asm.h> #include <asm/ppc_asm.h>
#include <asm/thread_info.h>
#include <kernel/ppc_defs.h> #include <kernel/ppc_defs.h>
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
...@@ -63,9 +64,7 @@ hash_page: ...@@ -63,9 +64,7 @@ hash_page:
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
addis r2,r7,hash_table_lock@h addis r2,r7,hash_table_lock@h
ori r2,r2,hash_table_lock@l ori r2,r2,hash_table_lock@l
mfspr r5,SPRG3 lis r0,0x0fff
lwz r0,CPU-THREAD(r5)
oris r0,r0,0x0fff
b 10f b 10f
11: lwz r6,0(r2) 11: lwz r6,0(r2)
cmpwi 0,r6,0 cmpwi 0,r6,0
...@@ -215,8 +214,9 @@ _GLOBAL(add_hash_page) ...@@ -215,8 +214,9 @@ _GLOBAL(add_hash_page)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
lis r9,hash_table_lock@h lis r9,hash_table_lock@h
ori r9,r9,hash_table_lock@l ori r9,r9,hash_table_lock@l
lwz r8,CPU(r2) rlwinm r8,r1,0,0,18
oris r8,r8,10 lwz r8,TI_CPU(r8)
oris r8,r8,12
10: lwarx r7,0,r9 10: lwarx r7,0,r9
cmpi 0,r7,0 cmpi 0,r7,0
bne- 11f bne- 11f
...@@ -511,7 +511,8 @@ _GLOBAL(flush_hash_page) ...@@ -511,7 +511,8 @@ _GLOBAL(flush_hash_page)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
lis r9,hash_table_lock@h lis r9,hash_table_lock@h
ori r9,r9,hash_table_lock@l ori r9,r9,hash_table_lock@l
lwz r8,CPU(r2) rlwinm r8,r1,0,0,18
lwz r8,TI_CPU(r8)
oris r8,r8,9 oris r8,r8,9
10: lwarx r7,0,r9 10: lwarx r7,0,r9
cmpi 0,r7,0 cmpi 0,r7,0
......
...@@ -135,7 +135,6 @@ void show_mem(void) ...@@ -135,7 +135,6 @@ void show_mem(void)
{ {
int i,free = 0,total = 0,reserved = 0; int i,free = 0,total = 0,reserved = 0;
int shared = 0, cached = 0; int shared = 0, cached = 0;
struct task_struct *p;
int highmem = 0; int highmem = 0;
printk("Mem-info:\n"); printk("Mem-info:\n");
...@@ -153,7 +152,7 @@ void show_mem(void) ...@@ -153,7 +152,7 @@ void show_mem(void)
else if (!page_count(mem_map+i)) else if (!page_count(mem_map+i))
free++; free++;
else else
shared += atomic_read(&mem_map[i].count) - 1; shared += page_count(mem_map+i) - 1;
} }
printk("%d pages of RAM\n",total); printk("%d pages of RAM\n",total);
printk("%d pages of HIGHMEM\n", highmem); printk("%d pages of HIGHMEM\n", highmem);
...@@ -163,49 +162,6 @@ void show_mem(void) ...@@ -163,49 +162,6 @@ void show_mem(void)
printk("%d pages swap cached\n",cached); printk("%d pages swap cached\n",cached);
printk("%d pages in page table cache\n",(int)pgtable_cache_size); printk("%d pages in page table cache\n",(int)pgtable_cache_size);
show_buffers(); show_buffers();
printk("%-8s %3s %8s %8s %8s %9s %8s", "Process", "Pid",
"Ctx", "Ctx<<4", "Last Sys", "pc", "task");
#ifdef CONFIG_SMP
printk(" %3s", "CPU");
#endif /* CONFIG_SMP */
printk("\n");
for_each_task(p)
{
printk("%-8.8s %3d %8ld %8ld %8ld %c%08lx %08lx ",
p->comm,p->pid,
(p->mm)?p->mm->context:0,
(p->mm)?(p->mm->context<<4):0,
p->thread.last_syscall,
(p->thread.regs)?user_mode(p->thread.regs) ? 'u' : 'k' : '?',
(p->thread.regs)?p->thread.regs->nip:0,
(ulong)p);
{
int iscur = 0;
#ifdef CONFIG_SMP
printk("%3d ", p->processor);
if ( (p->processor != NO_PROC_ID) &&
(p == current_set[p->processor]) )
{
iscur = 1;
printk("current");
}
#else
if ( p == current )
{
iscur = 1;
printk("current");
}
if ( p == last_task_used_math )
{
if ( iscur )
printk(",");
printk("last math");
}
#endif /* CONFIG_SMP */
printk("\n");
}
}
} }
void si_meminfo(struct sysinfo *val) void si_meminfo(struct sysinfo *val)
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#define _PPC_BITOPS_H #define _PPC_BITOPS_H
#include <linux/config.h> #include <linux/config.h>
#include <linux/compiler.h>
#include <asm/byteorder.h> #include <asm/byteorder.h>
#include <asm/atomic.h> #include <asm/atomic.h>
...@@ -272,6 +273,79 @@ static __inline__ int ffs(int x) ...@@ -272,6 +273,79 @@ static __inline__ int ffs(int x)
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
/*
* Find the first bit set in a 140-bit bitmap.
* The first 100 bits are unlikely to be set.
*/
static inline int sched_find_first_bit(unsigned long *b)
{
if (unlikely(b[0]))
return __ffs(b[0]);
if (unlikely(b[1]))
return __ffs(b[1]) + 32;
if (unlikely(b[2]))
return __ffs(b[2]) + 64;
if (b[3])
return __ffs(b[3]) + 96;
return __ffs(b[4]) + 128;
}
/**
* find_next_bit - find the next set bit in a memory region
* @addr: The address to base the search on
* @offset: The bitnumber to start searching at
* @size: The maximum size to search
*/
static __inline__ unsigned long find_next_bit(void *addr,
unsigned long size, unsigned long offset)
{
unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
unsigned int result = offset & ~31UL;
unsigned int tmp;
if (offset >= size)
return size;
size -= result;
offset &= 31UL;
if (offset) {
tmp = *p++;
tmp &= ~0UL << offset;
if (size < 32)
goto found_first;
if (tmp)
goto found_middle;
size -= 32;
result += 32;
}
while (size >= 32) {
if ((tmp = *p++) != 0)
goto found_middle;
result += 32;
size -= 32;
}
if (!size)
return result;
tmp = *p;
found_first:
tmp &= ~0UL >> (32 - size);
if (tmp == 0UL) /* Are any bits set? */
return result + size; /* Nope. */
found_middle:
return result + __ffs(tmp);
}
/**
* find_first_bit - find the first set bit in a memory region
* @addr: The address to start the search at
* @size: The maximum size to search
*
* Returns the bit-number of the first set bit, not the number of the byte
* containing a bit.
*/
#define find_first_bit(addr, size) \
find_next_bit((addr), (size), 0)
/* /*
* This implementation of find_{first,next}_zero_bit was stolen from * This implementation of find_{first,next}_zero_bit was stolen from
* Linus' asm-alpha/bitops.h. * Linus' asm-alpha/bitops.h.
......
...@@ -10,19 +10,6 @@ ...@@ -10,19 +10,6 @@
#include <asm/bitops.h> #include <asm/bitops.h>
#include <asm/mmu.h> #include <asm/mmu.h>
static inline int sched_find_first_bit(unsigned long *b)
{
if (unlikely(b[0]))
return __ffs(b[0]);
if (unlikely(b[1]))
return __ffs(b[1]) + 32;
if (unlikely(b[2]))
return __ffs(b[2]) + 64;
if (b[3])
return __ffs(b[3]) + 96;
return __ffs(b[4]) + 128;
}
/* /*
* On 32-bit PowerPC 6xx/7xx/7xxx CPUs, we use a set of 16 VSIDs * On 32-bit PowerPC 6xx/7xx/7xxx CPUs, we use a set of 16 VSIDs
* (virtual segment identifiers) for each context. Although the * (virtual segment identifiers) for each context. Although the
......
...@@ -28,10 +28,10 @@ struct cpuinfo_PPC { ...@@ -28,10 +28,10 @@ struct cpuinfo_PPC {
unsigned long pgtable_cache_sz; unsigned long pgtable_cache_sz;
}; };
extern struct cpuinfo_PPC cpu_data[NR_CPUS]; extern struct cpuinfo_PPC cpu_data[];
extern unsigned long cpu_online_map; extern unsigned long cpu_online_map;
extern unsigned long smp_proc_in_lock[NR_CPUS]; extern unsigned long smp_proc_in_lock[];
extern volatile unsigned long cpu_callin_map[NR_CPUS]; extern volatile unsigned long cpu_callin_map[];
extern int smp_tb_synchronized; extern int smp_tb_synchronized;
extern void smp_store_cpu_info(int id); extern void smp_store_cpu_info(int id);
...@@ -50,7 +50,7 @@ extern void smp_local_timer_interrupt(struct pt_regs *); ...@@ -50,7 +50,7 @@ extern void smp_local_timer_interrupt(struct pt_regs *);
#define smp_processor_id() (current_thread_info()->cpu) #define smp_processor_id() (current_thread_info()->cpu)
extern int smp_hw_index[NR_CPUS]; extern int smp_hw_index[];
#define hard_smp_processor_id() (smp_hw_index[smp_processor_id()]) #define hard_smp_processor_id() (smp_hw_index[smp_processor_id()])
struct klock_info_struct { struct klock_info_struct {
......
...@@ -36,7 +36,7 @@ typedef struct { ...@@ -36,7 +36,7 @@ typedef struct {
#ifndef SPINLOCK_DEBUG #ifndef SPINLOCK_DEBUG
static inline void spin_lock(spinlock_t *lock) static inline void _raw_spin_lock(spinlock_t *lock)
{ {
unsigned long tmp; unsigned long tmp;
...@@ -59,24 +59,21 @@ static inline void spin_lock(spinlock_t *lock) ...@@ -59,24 +59,21 @@ static inline void spin_lock(spinlock_t *lock)
: "cr0", "memory"); : "cr0", "memory");
} }
static inline void spin_unlock(spinlock_t *lock) static inline void _raw_spin_unlock(spinlock_t *lock)
{ {
__asm__ __volatile__("eieio # spin_unlock": : :"memory"); __asm__ __volatile__("eieio # spin_unlock": : :"memory");
lock->lock = 0; lock->lock = 0;
} }
#define spin_trylock(lock) (!test_and_set_bit(0,(lock))) #define _raw_spin_trylock(lock) (!test_and_set_bit(0,(lock)))
#else #else
extern void _spin_lock(spinlock_t *lock); extern void _raw_spin_lock(spinlock_t *lock);
extern void _spin_unlock(spinlock_t *lock); extern void _raw_spin_unlock(spinlock_t *lock);
extern int spin_trylock(spinlock_t *lock); extern int _raw_spin_trylock(spinlock_t *lock);
extern unsigned long __spin_trylock(volatile unsigned long *lock); extern unsigned long __spin_trylock(volatile unsigned long *lock);
#define spin_lock(lp) _spin_lock(lp)
#define spin_unlock(lp) _spin_unlock(lp)
#endif #endif
/* /*
...@@ -107,7 +104,7 @@ typedef struct { ...@@ -107,7 +104,7 @@ typedef struct {
#ifndef SPINLOCK_DEBUG #ifndef SPINLOCK_DEBUG
static __inline__ void read_lock(rwlock_t *rw) static __inline__ void _raw_read_lock(rwlock_t *rw)
{ {
unsigned int tmp; unsigned int tmp;
...@@ -130,7 +127,7 @@ static __inline__ void read_lock(rwlock_t *rw) ...@@ -130,7 +127,7 @@ static __inline__ void read_lock(rwlock_t *rw)
: "cr0", "memory"); : "cr0", "memory");
} }
static __inline__ void read_unlock(rwlock_t *rw) static __inline__ void _raw_read_unlock(rwlock_t *rw)
{ {
unsigned int tmp; unsigned int tmp;
...@@ -146,7 +143,7 @@ static __inline__ void read_unlock(rwlock_t *rw) ...@@ -146,7 +143,7 @@ static __inline__ void read_unlock(rwlock_t *rw)
: "cr0", "memory"); : "cr0", "memory");
} }
static __inline__ void write_lock(rwlock_t *rw) static __inline__ void _raw_write_lock(rwlock_t *rw)
{ {
unsigned int tmp; unsigned int tmp;
...@@ -169,7 +166,7 @@ static __inline__ void write_lock(rwlock_t *rw) ...@@ -169,7 +166,7 @@ static __inline__ void write_lock(rwlock_t *rw)
: "cr0", "memory"); : "cr0", "memory");
} }
static __inline__ void write_unlock(rwlock_t *rw) static __inline__ void _raw_write_unlock(rwlock_t *rw)
{ {
__asm__ __volatile__("eieio # write_unlock": : :"memory"); __asm__ __volatile__("eieio # write_unlock": : :"memory");
rw->lock = 0; rw->lock = 0;
...@@ -177,15 +174,10 @@ static __inline__ void write_unlock(rwlock_t *rw) ...@@ -177,15 +174,10 @@ static __inline__ void write_unlock(rwlock_t *rw)
#else #else
extern void _read_lock(rwlock_t *rw); extern void _raw_read_lock(rwlock_t *rw);
extern void _read_unlock(rwlock_t *rw); extern void _raw_read_unlock(rwlock_t *rw);
extern void _write_lock(rwlock_t *rw); extern void _raw_write_lock(rwlock_t *rw);
extern void _write_unlock(rwlock_t *rw); extern void _raw_write_unlock(rwlock_t *rw);
#define read_lock(rw) _read_lock(rw)
#define write_lock(rw) _write_lock(rw)
#define write_unlock(rw) _write_unlock(rw)
#define read_unlock(rw) _read_unlock(rw)
#endif #endif
......
...@@ -81,9 +81,7 @@ extern void note_scsi_host(struct device_node *, void *); ...@@ -81,9 +81,7 @@ extern void note_scsi_host(struct device_node *, void *);
struct task_struct; struct task_struct;
#define prepare_to_switch() do { } while(0) #define prepare_to_switch() do { } while(0)
#define switch_to(prev,next,last) _switch_to((prev),(next),&(last)) extern void switch_to(struct task_struct *, struct task_struct *);
extern void _switch_to(struct task_struct *, struct task_struct *,
struct task_struct **);
struct thread_struct; struct thread_struct;
extern struct task_struct *_switch(struct thread_struct *prev, extern struct task_struct *_switch(struct thread_struct *prev,
......
...@@ -10,11 +10,12 @@ ...@@ -10,11 +10,12 @@
#ifdef __KERNEL__ #ifdef __KERNEL__
#ifndef __ASSEMBLY__
#include <asm/processor.h> #include <asm/processor.h>
#ifndef __ASSEMBLY__
/* /*
* low level task data. * low level task data.
* If you change this, change the TI_* offsets below to match.
*/ */
struct thread_info { struct thread_info {
struct task_struct *task; /* main task structure */ struct task_struct *task; /* main task structure */
...@@ -51,9 +52,21 @@ static inline struct thread_info *current_thread_info(void) ...@@ -51,9 +52,21 @@ static inline struct thread_info *current_thread_info(void)
#define free_thread_info(ti) free_pages((unsigned long) (ti), 1) #define free_thread_info(ti) free_pages((unsigned long) (ti), 1)
#define get_thread_info(ti) get_task_struct((ti)->task) #define get_thread_info(ti) get_task_struct((ti)->task)
#define put_thread_info(ti) put_task_struct((ti)->task) #define put_thread_info(ti) put_task_struct((ti)->task)
#define THREAD_SIZE (2*PAGE_SIZE)
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
/*
* Size of kernel stack for each process.
*/
#define THREAD_SIZE 8192 /* 2 pages */
/*
* Offsets in thread_info structure, used in assembly code
*/
#define TI_TASK 0
#define TI_EXECDOMAIN 4
#define TI_FLAGS 8
#define TI_CPU 12
/* /*
* thread information flag bit numbers * thread information flag bit numbers
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment