Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
2472788a
Commit
2472788a
authored
Feb 19, 2002
by
Paul Mackerras
Browse files
Options
Browse Files
Download
Plain Diff
Merge
bk://ppc@ppc.bkbits.net/for-linus-ppc
into cargo.(none):/home/paulus/kernel/for-linus-ppc
parents
b45a640b
03aed178
Changes
25
Hide whitespace changes
Inline
Side-by-side
Showing
25 changed files
with
305 additions
and
431 deletions
+305
-431
arch/ppc/4xx_io/Config.in
arch/ppc/4xx_io/Config.in
+0
-16
arch/ppc/config.in
arch/ppc/config.in
+19
-10
arch/ppc/kernel/entry.S
arch/ppc/kernel/entry.S
+68
-0
arch/ppc/kernel/head.S
arch/ppc/kernel/head.S
+7
-74
arch/ppc/kernel/head_4xx.S
arch/ppc/kernel/head_4xx.S
+0
-83
arch/ppc/kernel/head_8xx.S
arch/ppc/kernel/head_8xx.S
+0
-62
arch/ppc/kernel/iSeries_head.S
arch/ppc/kernel/iSeries_head.S
+9
-15
arch/ppc/kernel/irq.c
arch/ppc/kernel/irq.c
+0
-9
arch/ppc/kernel/misc.S
arch/ppc/kernel/misc.S
+5
-2
arch/ppc/kernel/mk_defs.c
arch/ppc/kernel/mk_defs.c
+1
-7
arch/ppc/kernel/process.c
arch/ppc/kernel/process.c
+10
-8
arch/ppc/kernel/setup.c
arch/ppc/kernel/setup.c
+1
-8
arch/ppc/kernel/signal.c
arch/ppc/kernel/signal.c
+1
-0
arch/ppc/kernel/smp.c
arch/ppc/kernel/smp.c
+54
-24
arch/ppc/lib/locks.c
arch/ppc/lib/locks.c
+7
-7
arch/ppc/mm/hashtable.S
arch/ppc/mm/hashtable.S
+7
-6
arch/ppc/mm/init.c
arch/ppc/mm/init.c
+1
-45
arch/ppc/vmlinux.lds
arch/ppc/vmlinux.lds
+5
-1
include/asm-ppc/bitops.h
include/asm-ppc/bitops.h
+74
-0
include/asm-ppc/mmu_context.h
include/asm-ppc/mmu_context.h
+0
-19
include/asm-ppc/processor.h
include/asm-ppc/processor.h
+2
-4
include/asm-ppc/smp.h
include/asm-ppc/smp.h
+4
-4
include/asm-ppc/spinlock.h
include/asm-ppc/spinlock.h
+14
-22
include/asm-ppc/system.h
include/asm-ppc/system.h
+1
-3
include/asm-ppc/thread_info.h
include/asm-ppc/thread_info.h
+15
-2
No files found.
arch/ppc/4xx_io/Config.in
deleted
100644 → 0
View file @
b45a640b
#
# MPC4xx driver options
#
mainmenu_option next_comment
comment 'MPC4xx Driver Options'
if [ "$CONFIG_STB03xxx" = "y" ]; then
bool 'STB IR Keyboard' CONFIG_STB_KB
bool 'SICC Serial port' CONFIG_SERIAL_SICC
if [ "$CONFIG_SERIAL_SICC" = "y" -a "$CONFIG_UART0_TTYS1" = "y" ]; then
define_bool CONFIG_UART1_DFLT_CONSOLE y
define_bool CONFIG_SERIAL_SICC_CONSOLE y
fi
fi
endmenu
arch/ppc/config.in
View file @
2472788a
...
...
@@ -294,6 +294,11 @@ if [ "$CONFIG_ADVANCED_OPTIONS" = "y" ]; then
fi
fi
if [ "$CONFIG_ALL_PPC" = "y" ]; then
bool 'Support for ISA-bus hardware' CONFIG_ISA
else
define_bool CONFIG_ISA n
fi
define_bool CONFIG_EISA n
define_bool CONFIG_SBUS n
...
...
@@ -322,12 +327,6 @@ else
fi
fi
if [ "$CONFIG_ALL_PPC" = "y" ]; then
bool 'Support for ISA-bus hardware' CONFIG_ISA
else
define_bool CONFIG_ISA n
fi
# only elf supported, a.out is not -- Cort
if [ "$CONFIG_PROC_FS" = "y" ]; then
define_bool CONFIG_KCORE_ELF y
...
...
@@ -588,8 +587,18 @@ if [ "$CONFIG_8260" = "y" ]; then
source arch/ppc/8260_io/Config.in
fi
if [ "$CONFIG_4xx" = "y" ]; then
source arch/ppc/4xx_io/Config.in
if [ "$CONFIG_4xx" = "y"]; then
mainmenu_option next_comment
comment 'IBM 4xx options'
if [ "$CONFIG_STB03xxx" = "y" ]; then
bool 'STB IR Keyboard' CONFIG_STB_KB
bool 'SICC Serial port' CONFIG_SERIAL_SICC
if [ "$CONFIG_SERIAL_SICC" = "y" -a "$CONFIG_UART0_TTYS1" = "y" ]; then
define_bool CONFIG_UART1_DFLT_CONSOLE y
define_bool CONFIG_SERIAL_SICC_CONSOLE y
fi
fi
endmenu
fi
source drivers/usb/Config.in
...
...
@@ -598,6 +607,8 @@ if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
source net/bluetooth/Config.in
fi
source lib/Config.in
mainmenu_option next_comment
comment 'Kernel hacking'
...
...
@@ -629,5 +640,3 @@ if [ "$CONFIG_MCPN765" = "y" -o "$CONFIG_SANDPOINT" = "y" \
bool 'Support for early boot texts over serial port' CONFIG_SERIAL_TEXT_DEBUG
fi
endmenu
source lib/Config.in
arch/ppc/kernel/entry.S
View file @
2472788a
...
...
@@ -41,6 +41,72 @@
#undef SHOW_SYSCALLS
#undef SHOW_SYSCALLS_TASK
#ifndef CONFIG_PPC_ISERIES /* iSeries version is in iSeries_head.S */
/*
*
This
code
finishes
saving
the
registers
to
the
exception
frame
*
and
jumps
to
the
appropriate
handler
for
the
exception
,
turning
*
on
address
translation
.
*/
.
globl
transfer_to_handler
transfer_to_handler
:
stw
r22
,
_NIP
(
r21
)
stw
r23
,
_MSR
(
r21
)
SAVE_4GPRS
(8,
r21
)
SAVE_8GPRS
(12,
r21
)
SAVE_8GPRS
(24,
r21
)
andi
.
r23
,
r23
,
MSR_PR
mfspr
r23
,
SPRG3
addi
r2
,
r23
,-
THREAD
/*
set
r2
to
current
*/
tovirt
(
r2
,
r2
)
beq
2
f
/*
if
from
user
,
fix
up
THREAD
.
regs
*/
addi
r24
,
r1
,
STACK_FRAME_OVERHEAD
stw
r24
,
PT_REGS
(
r23
)
#ifdef CONFIG_ALTIVEC
BEGIN_FTR_SECTION
mfspr
r22
,
SPRN_VRSAVE
/*
if
G4
,
save
vrsave
register
value
*/
stw
r22
,
THREAD_VRSAVE
(
r23
)
END_FTR_SECTION_IFSET
(
CPU_FTR_ALTIVEC
)
#endif /* CONFIG_ALTIVEC */
b
3
f
2
:
/
*
if
from
kernel
,
check
for
stack
overflow
*/
lwz
r22
,
THREAD_INFO
-
THREAD
(
r23
)
cmplw
r1
,
r22
/*
if
r1
<=
current
->
thread_info
*/
ble
-
stack_ovf
/*
then
the
kernel
stack
overflowed
*/
3
:
mflr
r23
andi
.
r24
,
r23
,
0x3f00
/*
get
vector
offset
*/
stw
r24
,
TRAP
(
r21
)
li
r22
,
0
stw
r22
,
RESULT
(
r21
)
mtspr
SPRG2
,
r22
/*
r1
is
now
kernel
sp
*/
lwz
r24
,
0
(
r23
)
/*
virtual
address
of
handler
*/
lwz
r23
,
4
(
r23
)
/*
where
to
go
when
done
*/
FIX_SRR1
(
r20
,
r22
)
mtspr
SRR0
,
r24
mtspr
SRR1
,
r20
mtlr
r23
SYNC
RFI
/*
jump
to
handler
,
enable
MMU
*/
/*
*
On
kernel
stack
overflow
,
load
up
an
initial
stack
pointer
*
and
call
StackOverflow
(
regs
),
which
should
not
return
.
*/
stack_ovf
:
addi
r3
,
r1
,
STACK_FRAME_OVERHEAD
lis
r1
,
init_thread_union
@
ha
addi
r1
,
r1
,
init_thread_union
@
l
addi
r1
,
r1
,
THREAD_SIZE
-
STACK_FRAME_OVERHEAD
lis
r24
,
StackOverflow
@
ha
addi
r24
,
r24
,
StackOverflow
@
l
li
r20
,
MSR_KERNEL
FIX_SRR1
(
r20
,
r22
)
mtspr
SRR0
,
r24
mtspr
SRR1
,
r20
SYNC
RFI
#endif /* CONFIG_PPC_ISERIES */
#ifdef SHOW_SYSCALLS_TASK
.
data
show_syscalls_task
:
...
...
@@ -277,7 +343,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
.
globl
ret_from_fork
ret_from_fork
:
#ifdef CONFIG_SMP
bl
schedule_tail
#endif
rlwinm
r3
,
r1
,
0
,
0
,
18
lwz
r3
,
TI_FLAGS
(
r3
)
andi
.
r0
,
r3
,
_TIF_SYSCALL_TRACE
...
...
arch/ppc/kernel/head.S
View file @
2472788a
...
...
@@ -734,69 +734,6 @@ InstructionSegment:
b
InstructionSegmentCont
#endif /* CONFIG_PPC64BRIDGE */
/*
*
This
code
finishes
saving
the
registers
to
the
exception
frame
*
and
jumps
to
the
appropriate
handler
for
the
exception
,
turning
*
on
address
translation
.
*/
.
globl
transfer_to_handler
transfer_to_handler
:
stw
r22
,
_NIP
(
r21
)
stw
r23
,
_MSR
(
r21
)
SAVE_4GPRS
(8,
r21
)
SAVE_8GPRS
(12,
r21
)
SAVE_8GPRS
(24,
r21
)
andi
.
r23
,
r23
,
MSR_PR
mfspr
r23
,
SPRG3
/*
if
from
user
,
fix
up
THREAD
.
regs
*/
beq
2
f
addi
r24
,
r1
,
STACK_FRAME_OVERHEAD
stw
r24
,
PT_REGS
(
r23
)
#ifdef CONFIG_ALTIVEC
BEGIN_FTR_SECTION
mfspr
r22
,
SPRN_VRSAVE
/*
if
G4
,
save
vrsave
register
value
*/
stw
r22
,
THREAD_VRSAVE
(
r23
)
END_FTR_SECTION_IFSET
(
CPU_FTR_ALTIVEC
)
#endif /* CONFIG_ALTIVEC */
2
:
addi
r2
,
r23
,-
THREAD
/*
set
r2
to
current
*/
tovirt
(
r2
,
r2
)
mflr
r23
andi
.
r24
,
r23
,
0x3f00
/*
get
vector
offset
*/
stw
r24
,
TRAP
(
r21
)
li
r22
,
0
stw
r22
,
RESULT
(
r21
)
mtspr
SPRG2
,
r22
/*
r1
is
now
kernel
sp
*/
addi
r24
,
r2
,
TASK_STRUCT_SIZE
/*
check
for
kernel
stack
overflow
*/
cmplw
0
,
r1
,
r2
cmplw
1
,
r1
,
r24
crand
1
,
1
,
4
bgt
-
stack_ovf
/*
if
r2
<
r1
<
r2
+
TASK_STRUCT_SIZE
*/
lwz
r24
,
0
(
r23
)
/*
virtual
address
of
handler
*/
lwz
r23
,
4
(
r23
)
/*
where
to
go
when
done
*/
FIX_SRR1
(
r20
,
r22
)
mtspr
SRR0
,
r24
mtspr
SRR1
,
r20
mtlr
r23
SYNC
RFI
/*
jump
to
handler
,
enable
MMU
*/
/*
*
On
kernel
stack
overflow
,
load
up
an
initial
stack
pointer
*
and
call
StackOverflow
(
regs
),
which
should
not
return
.
*/
stack_ovf
:
addi
r3
,
r1
,
STACK_FRAME_OVERHEAD
lis
r1
,
init_thread_union
@
ha
addi
r1
,
r1
,
init_thread_union
@
l
addi
r1
,
r1
,
THREAD_SIZE
-
STACK_FRAME_OVERHEAD
lis
r24
,
StackOverflow
@
ha
addi
r24
,
r24
,
StackOverflow
@
l
li
r20
,
MSR_KERNEL
FIX_SRR1
(
r20
,
r22
)
mtspr
SRR0
,
r24
mtspr
SRR1
,
r20
SYNC
RFI
/*
*
This
task
wants
to
use
the
FPU
now
.
*
On
UP
,
disable
FP
for
the
task
which
had
the
FPU
previously
,
...
...
@@ -1221,15 +1158,15 @@ __secondary_start:
bl
identify_cpu
bl
call_setup_cpu
/*
Call
setup_cpu
for
this
CPU
*/
/
*
get
current
*/
lis
r
2
,
current_set
@
h
ori
r2
,
r2
,
current_set
@
l
tophys
(
r2
,
r2
)
slwi
r24
,
r24
,
2
/*
get
current_set
[
cpu
#]
*/
lwz
x
r2
,
r2
,
r24
/
*
get
current
_thread_info
and
current
*/
lis
r
1
,
secondary_ti
@
ha
tophys
(
r1
,
r1
)
lwz
r1
,
secondary_ti
@
l
(
r1
)
tophys
(
r2
,
r1
)
lwz
r2
,
TI_TASK
(
r2
)
/
*
stack
*/
addi
r1
,
r
2
,
THREAD_SIZE
-
STACK_FRAME_OVERHEAD
addi
r1
,
r
1
,
THREAD_SIZE
-
STACK_FRAME_OVERHEAD
li
r0
,
0
tophys
(
r3
,
r1
)
stw
r0
,
0
(
r3
)
...
...
@@ -1727,10 +1664,6 @@ m8260_gorom:
.
data
.
globl
sdata
sdata
:
.
globl
init_thread_union
init_thread_union
:
.
space
8192
.
globl
empty_zero_page
empty_zero_page
:
.
space
4096
...
...
arch/ppc/kernel/head_4xx.S
View file @
2472788a
...
...
@@ -826,87 +826,6 @@ finish_tlb_load:
PPC405_ERR77_SYNC
rfi
/*
Should
sync
shadow
TLBs
*/
/*
This
code
finishes
saving
the
registers
to
the
exception
frame
*
and
jumps
to
the
appropriate
handler
for
the
exception
,
turning
*
on
address
translation
.
*/
_GLOBAL
(
transfer_to_handler
)
stw
r22
,
_NIP
(
r21
)
/*
Save
the
faulting
IP
on
the
stack
*/
stw
r23
,
_MSR
(
r21
)
/*
Save
the
exception
MSR
on
stack
*/
SAVE_4GPRS
(8,
r21
)
/*
Save
r8
through
r11
on
the
stack
*/
SAVE_8GPRS
(12,
r21
)
/*
Save
r12
through
r19
on
the
stack
*/
SAVE_8GPRS
(24,
r21
)
/*
Save
r24
through
r31
on
the
stack
*/
andi
.
r23
,
r23
,
MSR_PR
/*
Is
this
from
user
space
?
*/
mfspr
r23
,
SPRN_SPRG3
/*
If
from
user
,
fix
up
THREAD
.
regs
*/
beq
2
f
/*
No
,
it
is
from
the
kernel
; branch. */
addi
r24
,
r1
,
STACK_FRAME_OVERHEAD
stw
r24
,
PT_REGS
(
r23
)
2
:
addi
r2
,
r23
,-
THREAD
/*
Set
r2
to
current
thread
*/
tovirt
(
r2
,
r2
)
mflr
r23
andi
.
r24
,
r23
,
0x3f00
/*
Get
vector
offset
*/
stw
r24
,
TRAP
(
r21
)
li
r22
,
RESULT
/
*
No
need
to
put
an
erratum
#
77
workaround
here
because
interrupts
are
currently
disabled
*/
stwcx
.
r22
,
r22
,
r21
/*
Clear
the
reservation
*/
li
r22
,
0
stw
r22
,
RESULT
(
r21
)
mtspr
SPRN_SPRG2
,
r22
/*
r1
is
now
the
kernel
stack
pointer
*/
addi
r24
,
r2
,
TASK_STRUCT_SIZE
/*
Check
for
kernel
stack
overflow
*/
cmplw
cr0
,
r1
,
r2
cmplw
cr1
,
r1
,
r24
crand
cr1
,
cr1
,
cr4
bgt
-
stack_ovf
/*
If
r2
<
r1
<
r2
+
TASK_STRUCT_SIZE
*/
lwz
r24
,
0
(
r23
)
/*
Virtual
address
of
the
handler
*/
lwz
r23
,
4
(
r23
)
/*
Handler
return
pointer
*/
cmpwi
cr0
,
r7
,
STND_EXC
/*
What
type
of
exception
is
this
?
*/
bne
3
f
/*
It
is
a
critical
exception
...
*/
/
*
Standard
exception
jump
path
*/
/
*
We
have
to
recover
r7
from
the
register
save
stack
.
*
It
was
used
to
indicate
standard
/
critical
exception
.
In
*
the
case
of
a
standard
exception
that
is
the
system
call
*
trap
,
it
may
have
originally
contained
one
of
the
syscall
*
parameters
and
we
have
to
get
it
back
now
.
*/
lwz
r7
,
GPR7
(
r21
)
mtspr
SPRN_SRR0
,
r24
/*
Set
up
the
instruction
pointer
*/
mtspr
SPRN_SRR1
,
r20
/*
Set
up
the
machine
state
register
*/
mtlr
r23
/*
Set
up
the
return
pointer
*/
SYNC
/
*
We
shouldn
't need a 405 erratum #77 workaround here, because we'
re
not
*
actually
returning
to
the
interrupted
instruction
yet
.
*/
rfi
/
*
Critical
exception
jump
path
*/
3
:
mtspr
SPRN_SRR2
,
r24
/*
Set
up
the
instruction
pointer
*/
mtspr
SPRN_SRR3
,
r20
/*
Set
up
the
machine
state
register
*/
mtlr
r23
/*
Set
up
the
return
pointer
*/
SYNC
rfci
/*
On
kernel
stack
overlow
,
load
up
an
initial
stack
pointer
and
call
*
StackOverflow
(
regs
),
which
should
NOT
return
.
*/
stack_ovf
:
addi
r3
,
r1
,
STACK_FRAME_OVERHEAD
lis
r1
,
init_thread_union
@
ha
addi
r1
,
r1
,
init_thread_union
@
l
addi
r1
,
r1
,
THREAD_SIZE
-
STACK_FRAME_OVERHEAD
lis
r24
,
StackOverflow
@
ha
addi
r24
,
r24
,
StackOverflow
@
l
li
r20
,
MSR_KERNEL
mtspr
SPRN_SRR0
,
r24
mtspr
SPRN_SRR1
,
r20
SYNC
rfi
/*
extern
void
giveup_altivec
(
struct
task_struct
*
prev
)
*
*
The
PowerPC
4
xx
family
of
processors
do
not
have
AltiVec
capabilities
,
so
...
...
@@ -1082,8 +1001,6 @@ _GLOBAL(set_context)
*/
.
data
_GLOBAL
(
sdata
)
_GLOBAL
(
init_thread_union
)
.
space
8192
_GLOBAL
(
empty_zero_page
)
.
space
4096
_GLOBAL
(
swapper_pg_dir
)
...
...
arch/ppc/kernel/head_8xx.S
View file @
2472788a
...
...
@@ -637,63 +637,6 @@ DataTLBError:
.
=
0x2000
/*
*
This
code
finishes
saving
the
registers
to
the
exception
frame
*
and
jumps
to
the
appropriate
handler
for
the
exception
,
turning
*
on
address
translation
.
*/
.
globl
transfer_to_handler
transfer_to_handler
:
stw
r22
,
_NIP
(
r21
)
lis
r22
,
MSR_POW
@
h
andc
r23
,
r23
,
r22
stw
r23
,
_MSR
(
r21
)
SAVE_4GPRS
(8,
r21
)
SAVE_8GPRS
(12,
r21
)
SAVE_8GPRS
(24,
r21
)
andi
.
r23
,
r23
,
MSR_PR
mfspr
r23
,
SPRG3
/*
if
from
user
,
fix
up
THREAD
.
regs
*/
beq
2
f
addi
r24
,
r1
,
STACK_FRAME_OVERHEAD
stw
r24
,
PT_REGS
(
r23
)
2
:
addi
r2
,
r23
,-
THREAD
/*
set
r2
to
current
*/
tovirt
(
r2
,
r2
)
mflr
r23
andi
.
r24
,
r23
,
0x3f00
/*
get
vector
offset
*/
stw
r24
,
TRAP
(
r21
)
li
r22
,
0
stw
r22
,
RESULT
(
r21
)
mtspr
SPRG2
,
r22
/*
r1
is
now
kernel
sp
*/
addi
r24
,
r2
,
TASK_STRUCT_SIZE
/*
check
for
kernel
stack
overflow
*/
cmplw
0
,
r1
,
r2
cmplw
1
,
r1
,
r24
crand
1
,
1
,
4
bgt
-
stack_ovf
/*
if
r2
<
r1
<
r2
+
TASK_STRUCT_SIZE
*/
lwz
r24
,
0
(
r23
)
/*
virtual
address
of
handler
*/
lwz
r23
,
4
(
r23
)
/*
where
to
go
when
done
*/
mtspr
SRR0
,
r24
mtspr
SRR1
,
r20
mtlr
r23
SYNC
rfi
/*
jump
to
handler
,
enable
MMU
*/
/*
*
On
kernel
stack
overflow
,
load
up
an
initial
stack
pointer
*
and
call
StackOverflow
(
regs
),
which
should
not
return
.
*/
stack_ovf
:
addi
r3
,
r1
,
STACK_FRAME_OVERHEAD
lis
r1
,
init_thread_union
@
ha
addi
r1
,
r1
,
init_thread_union
@
l
addi
r1
,
r1
,
THREAD_SIZE
-
STACK_FRAME_OVERHEAD
lis
r24
,
StackOverflow
@
ha
addi
r24
,
r24
,
StackOverflow
@
l
li
r20
,
MSR_KERNEL
mtspr
SRR0
,
r24
mtspr
SRR1
,
r20
SYNC
rfi
.
globl
giveup_fpu
giveup_fpu
:
blr
...
...
@@ -707,7 +650,6 @@ _GLOBAL(__setup_cpu_8xx)
*
This
is
where
the
main
kernel
code
starts
.
*/
start_here
:
/
*
ptr
to
current
*/
lis
r2
,
init_task
@
h
ori
r2
,
r2
,
init_task
@
l
...
...
@@ -971,10 +913,6 @@ set_dec_cpu6:
.
data
.
globl
sdata
sdata
:
.
globl
init_thread_union
init_thread_union
:
.
space
8192
.
globl
empty_zero_page
empty_zero_page
:
.
space
4096
...
...
arch/ppc/kernel/iSeries_head.S
View file @
2472788a
...
...
@@ -531,13 +531,17 @@ transfer_to_handler:
SAVE_GPR
(31,
r1
)
andi
.
r23
,
r23
,
MSR_PR
mfspr
r23
,
SPRG3
/*
if
from
user
,
fix
up
THREAD
.
regs
*/
beq
2
f
mfspr
r23
,
SPRG3
addi
r2
,
r23
,-
THREAD
/*
set
r2
to
current
*/
beq
2
f
/*
if
from
user
,
fix
up
THREAD
.
regs
*/
addi
r24
,
r1
,
STACK_FRAME_OVERHEAD
stw
r24
,
PT_REGS
(
r23
)
2
:
addi
r2
,
r23
,-
THREAD
/*
set
r2
to
current
*/
li
r22
,
RESULT
stwcx
.
r22
,
r22
,
r1
/*
to
clear
the
reservation
*/
b
3
f
2
:
/
*
if
from
kernel
,
check
for
stack
overflow
*/
lwz
r22
,
THREAD_INFO
(
r2
)
cmplw
r1
,
r22
/*
if
r1
<=
current
->
thread_info
*/
ble
-
stack_ovf
/*
then
the
kernel
stack
overflowed
*/
3
:
li
r22
,
0
stw
r22
,
RESULT
(
r1
)
mfspr
r23
,
SPRG1
/*
Get
Paca
address
*/
...
...
@@ -545,11 +549,6 @@ transfer_to_handler:
mflr
r23
andi
.
r24
,
r23
,
0x3f00
/*
get
vector
offset
*/
stw
r24
,
TRAP
(
r1
)
addi
r24
,
r2
,
TASK_STRUCT_SIZE
/*
check
for
kernel
stack
overflow
*/
cmplw
0
,
r1
,
r2
cmplw
1
,
r1
,
r24
crand
1
,
1
,
4
bgt
-
stack_ovf
/*
if
r2
<
r1
<
r2
+
TASK_STRUCT_SIZE
*/
lwz
r24
,
0
(
r23
)
/*
virtual
address
of
handler
*/
lwz
r23
,
4
(
r23
)
/*
where
to
go
when
done
*/
li
r20
,
MSR_KERNEL
...
...
@@ -1496,11 +1495,6 @@ _GLOBAL(abort)
.
data
.
globl
sdata
sdata
:
.
globl
init_thread_union
init_thread_union
:
.
space
8192
.
globl
empty_zero_page
empty_zero_page
:
.
space
4096
...
...
arch/ppc/kernel/irq.c
View file @
2472788a
...
...
@@ -586,8 +586,6 @@ atomic_t global_bh_count;
static
void
show
(
char
*
str
)
{
int
i
;
unsigned
long
*
stack
;
int
cpu
=
smp_processor_id
();
printk
(
"
\n
%s, CPU %d:
\n
"
,
str
,
cpu
);
...
...
@@ -598,13 +596,6 @@ static void show(char * str)
atomic_read
(
&
global_bh_count
),
local_bh_count
(
0
),
local_bh_count
(
1
));
stack
=
(
unsigned
long
*
)
&
str
;
for
(
i
=
40
;
i
;
i
--
)
{
unsigned
long
x
=
*++
stack
;
if
(
x
>
(
unsigned
long
)
&
init_task_union
&&
x
<
(
unsigned
long
)
&
vsprintf
)
{
printk
(
"<[%08lx]> "
,
x
);
}
}
}
static
inline
void
wait_on_bh
(
void
)
...
...
arch/ppc/kernel/misc.S
View file @
2472788a
...
...
@@ -25,6 +25,7 @@
#include <asm/cputable.h>
#include <asm/mmu.h>
#include <asm/ppc_asm.h>
#include <asm/thread_info.h>
#include "ppc_defs.h"
.
text
...
...
@@ -375,7 +376,8 @@ _GLOBAL(_tlbia)
SYNC
lis
r9
,
hash_table_lock
@
h
ori
r9
,
r9
,
hash_table_lock
@
l
lwz
r8
,
CPU
(
r2
)
rlwinm
r8
,
r1
,
0
,
0
,
18
lwz
r8
,
TI_CPU
(
r8
)
oris
r8
,
r8
,
10
10
:
lwarx
r7
,
0
,
r9
cmpi
0
,
r7
,
0
...
...
@@ -420,7 +422,8 @@ _GLOBAL(_tlbie)
SYNC
lis
r9
,
hash_table_lock
@
h
ori
r9
,
r9
,
hash_table_lock
@
l
lwz
r8
,
CPU
(
r2
)
rlwinm
r8
,
r1
,
0
,
0
,
18
lwz
r8
,
TI_CPU
(
r8
)
oris
r8
,
r8
,
11
10
:
lwarx
r7
,
0
,
r9
cmpi
0
,
r7
,
0
...
...
arch/ppc/kernel/mk_defs.c
View file @
2472788a
...
...
@@ -42,19 +42,13 @@
int
main
(
void
)
{
DEFINE
(
THREAD_SIZE
,
THREAD_SIZE
);
DEFINE
(
TI_CPU
,
offsetof
(
struct
thread_info
,
cpu
));
DEFINE
(
TI_FLAGS
,
offsetof
(
struct
thread_info
,
flags
));
DEFINE
(
STATE
,
offsetof
(
struct
task_struct
,
state
));
DEFINE
(
THREAD
,
offsetof
(
struct
task_struct
,
thread
));
DEFINE
(
THREAD_INFO
,
offsetof
(
struct
task_struct
,
thread_info
));
DEFINE
(
MM
,
offsetof
(
struct
task_struct
,
mm
));
DEFINE
(
ACTIVE_MM
,
offsetof
(
struct
task_struct
,
active_mm
));
DEFINE
(
TASK_STRUCT_SIZE
,
sizeof
(
struct
task_struct
));
DEFINE
(
KSP
,
offsetof
(
struct
thread_struct
,
ksp
));
DEFINE
(
PGDIR
,
offsetof
(
struct
thread_struct
,
pgdir
));
DEFINE
(
LAST_SYSCALL
,
offsetof
(
struct
thread_struct
,
last_syscall
));
DEFINE
(
PT_REGS
,
offsetof
(
struct
thread_struct
,
regs
));
DEFINE
(
TASK_FLAGS
,
offsetof
(
struct
task_struct
,
flags
));
DEFINE
(
THREAD_FPEXC_MODE
,
offsetof
(
struct
thread_struct
,
fpexc_mode
));
DEFINE
(
THREAD_FPR0
,
offsetof
(
struct
thread_struct
,
fpr
[
0
]));
DEFINE
(
THREAD_FPSCR
,
offsetof
(
struct
thread_struct
,
fpscr
));
...
...
arch/ppc/kernel/process.c
View file @
2472788a
...
...
@@ -59,6 +59,12 @@ static struct files_struct init_files = INIT_FILES;
static
struct
signal_struct
init_signals
=
INIT_SIGNALS
;
struct
mm_struct
init_mm
=
INIT_MM
(
init_mm
);
/* this is 8kB-aligned so we can get to the thread_info struct
at the base of it from the stack pointer with 1 integer instruction. */
union
thread_union
init_thread_union
__attribute__
((
__section__
(
".data.init_task"
)))
=
{
INIT_THREAD_INFO
(
init_task
)
};
/* initial task structure */
struct
task_struct
init_task
=
INIT_TASK
(
init_task
);
...
...
@@ -191,9 +197,7 @@ dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpregs)
return
1
;
}
void
_switch_to
(
struct
task_struct
*
prev
,
struct
task_struct
*
new
,
struct
task_struct
**
last
)
void
switch_to
(
struct
task_struct
*
prev
,
struct
task_struct
*
new
)
{
struct
thread_struct
*
new_thread
,
*
old_thread
;
unsigned
long
s
;
...
...
@@ -215,7 +219,7 @@ _switch_to(struct task_struct *prev, struct task_struct *new,
* every switch, just a save.
* -- Cort
*/
if
(
prev
->
thread
.
regs
&&
(
prev
->
thread
.
regs
->
msr
&
MSR_FP
)
)
if
(
prev
->
thread
.
regs
&&
(
prev
->
thread
.
regs
->
msr
&
MSR_FP
)
)
giveup_fpu
(
prev
);
#ifdef CONFIG_ALTIVEC
/*
...
...
@@ -234,8 +238,6 @@ _switch_to(struct task_struct *prev, struct task_struct *new,
#endif
/* CONFIG_ALTIVEC */
#endif
/* CONFIG_SMP */
current_set
[
smp_processor_id
()]
=
new
;
/* Avoid the trap. On smp this this never happens since
* we don't set last_task_used_altivec -- Cort
*/
...
...
@@ -243,7 +245,7 @@ _switch_to(struct task_struct *prev, struct task_struct *new,
new
->
thread
.
regs
->
msr
|=
MSR_VEC
;
new_thread
=
&
new
->
thread
;
old_thread
=
&
current
->
thread
;
*
last
=
_switch
(
old_thread
,
new_thread
);
_switch
(
old_thread
,
new_thread
);
__restore_flags
(
s
);
}
...
...
@@ -276,7 +278,7 @@ void show_regs(struct pt_regs * regs)
#endif
#ifdef CONFIG_SMP
printk
(
" CPU: %d"
,
current
->
processor
);
printk
(
" CPU: %d"
,
smp_processor_id
()
);
#endif
/* CONFIG_SMP */
printk
(
"
\n
"
);
...
...
arch/ppc/kernel/setup.c
View file @
2472788a
...
...
@@ -35,7 +35,6 @@
#include <asm/uaccess.h>
#include <asm/system.h>
#include <asm/pmac_feature.h>
#include <asm/thread_info.h>
#if defined CONFIG_KGDB
#include <asm/kgdb.h>
...
...
@@ -163,7 +162,7 @@ int show_cpuinfo(struct seq_file *m, void *v)
return
0
;
pvr
=
cpu_data
[
i
].
pvr
;
lpj
=
cpu_data
[
i
].
loops_per_jiffy
;
seq_printf
(
m
,
"processor
\t
: %
lu
\n
"
,
i
);
seq_printf
(
m
,
"processor
\t
: %
d
\n
"
,
i
);
#else
pvr
=
mfspr
(
PVR
);
lpj
=
loops_per_jiffy
;
...
...
@@ -542,9 +541,6 @@ int __init ppc_init(void)
arch_initcall
(
ppc_init
);
/* Initial thread_info struct, copied into init_task_union */
struct
thread_info
init_thread_values
__initdata
=
INIT_THREAD_INFO
(
init_task
);
/* Warning, IO base is not yet inited */
void
__init
setup_arch
(
char
**
cmdline_p
)
{
...
...
@@ -553,9 +549,6 @@ void __init setup_arch(char **cmdline_p)
extern
char
*
klimit
;
extern
void
do_init_bootmem
(
void
);
/* initialize the thread_info for the init task */
init_thread_info
=
init_thread_values
;
/* so udelay does something sensible, assume <= 1000 bogomips */
loops_per_jiffy
=
500000000
/
HZ
;
...
...
arch/ppc/kernel/signal.c
View file @
2472788a
...
...
@@ -30,6 +30,7 @@
#include <linux/stddef.h>
#include <linux/elf.h>
#include <linux/tty.h>
#include <linux/binfmts.h>
#include <asm/ucontext.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
...
...
arch/ppc/kernel/smp.c
View file @
2472788a
...
...
@@ -37,6 +37,7 @@
#include <asm/smp.h>
#include <asm/residual.h>
#include <asm/time.h>
#include <asm/thread_info.h>
int
smp_threads_ready
;
volatile
int
smp_commenced
;
...
...
@@ -49,11 +50,12 @@ atomic_t ipi_sent;
spinlock_t
kernel_flag
__cacheline_aligned_in_smp
=
SPIN_LOCK_UNLOCKED
;
unsigned
int
prof_multiplier
[
NR_CPUS
];
unsigned
int
prof_counter
[
NR_CPUS
];
cycles_t
cacheflush_time
;
unsigned
long
cache_decay_ticks
;
static
int
max_cpus
__initdata
=
NR_CPUS
;
unsigned
long
cpu_online_map
;
int
smp_hw_index
[
NR_CPUS
];
static
struct
smp_ops_t
*
smp_ops
;
struct
thread_info
*
secondary_ti
;
/* all cpu mappings are 1-1 -- Cort */
volatile
unsigned
long
cpu_callin_map
[
NR_CPUS
];
...
...
@@ -66,6 +68,8 @@ int start_secondary(void *);
extern
int
cpu_idle
(
void
*
unused
);
void
smp_call_function_interrupt
(
void
);
void
smp_message_pass
(
int
target
,
int
msg
,
unsigned
long
data
,
int
wait
);
static
int
__smp_call_function
(
void
(
*
func
)
(
void
*
info
),
void
*
info
,
int
wait
,
int
target
);
#ifdef CONFIG_PPC_ISERIES
extern
void
smp_iSeries_space_timers
(
unsigned
nr
);
...
...
@@ -108,7 +112,7 @@ void smp_message_recv(int msg, struct pt_regs *regs)
smp_call_function_interrupt
();
break
;
case
PPC_MSG_RESCHEDULE
:
current
->
work
.
need_resched
=
1
;
set_need_resched
()
;
break
;
case
PPC_MSG_INVALIDATE_TLB
:
_tlbia
();
...
...
@@ -192,8 +196,8 @@ static struct call_data_struct {
* in the system.
*/
int
smp_call_function
(
void
(
*
func
)
(
void
*
info
),
void
*
info
,
int
nonatomic
,
int
wait
)
int
smp_call_function
(
void
(
*
func
)
(
void
*
info
),
void
*
info
,
int
nonatomic
,
int
wait
)
/*
* [SUMMARY] Run a function on all other CPUs.
* <func> The function to run. This must be fast and non-blocking.
...
...
@@ -206,13 +210,24 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
* You must not call this function with disabled interrupts or from a
* hardware interrupt handler, you may call it from a bottom half handler.
*/
{
if
(
smp_num_cpus
<=
1
)
return
0
;
return
__smp_call_function
(
func
,
info
,
wait
,
MSG_ALL_BUT_SELF
);
}
static
int
__smp_call_function
(
void
(
*
func
)
(
void
*
info
),
void
*
info
,
int
wait
,
int
target
)
{
struct
call_data_struct
data
;
int
ret
=
-
1
,
cpus
=
smp_num_cpus
-
1
;
int
ret
=
-
1
;
int
timeout
;
int
ncpus
=
1
;
if
(
!
cpus
)
return
0
;
if
(
target
==
MSG_ALL_BUT_SELF
)
ncpus
=
smp_num_cpus
-
1
;
else
if
(
target
==
MSG_ALL
)
ncpus
=
smp_num_cpus
;
data
.
func
=
func
;
data
.
info
=
info
;
...
...
@@ -224,11 +239,11 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
spin_lock_bh
(
&
call_lock
);
call_data
=
&
data
;
/* Send a message to all other CPUs and wait for them to respond */
smp_message_pass
(
MSG_ALL_BUT_SELF
,
PPC_MSG_CALL_FUNCTION
,
0
,
0
);
smp_message_pass
(
target
,
PPC_MSG_CALL_FUNCTION
,
0
,
0
);
/* Wait for response */
timeout
=
1000000
;
while
(
atomic_read
(
&
data
.
started
)
!=
cpus
)
{
while
(
atomic_read
(
&
data
.
started
)
!=
n
cpus
)
{
if
(
--
timeout
==
0
)
{
printk
(
"smp_call_function on cpu %d: other cpus not responding (%d)
\n
"
,
smp_processor_id
(),
atomic_read
(
&
data
.
started
));
...
...
@@ -240,7 +255,7 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
if
(
wait
)
{
timeout
=
1000000
;
while
(
atomic_read
(
&
data
.
finished
)
!=
cpus
)
{
while
(
atomic_read
(
&
data
.
finished
)
!=
n
cpus
)
{
if
(
--
timeout
==
0
)
{
printk
(
"smp_call_function on cpu %d: other cpus not finishing (%d/%d)
\n
"
,
smp_processor_id
(),
atomic_read
(
&
data
.
finished
),
atomic_read
(
&
data
.
started
));
...
...
@@ -276,9 +291,28 @@ void smp_call_function_interrupt(void)
atomic_inc
(
&
call_data
->
finished
);
}
/*
* Task migration callback.
*/
void
smp_task_migration_interrupt
(
void
*
new_task
)
{
task_t
*
p
;
p
=
new_task
;
sched_task_migrated
(
p
);
}
/*
* This function sends a 'task migration' IPI to another CPU.
* Must be called from syscall contexts, with interrupts *enabled*.
*/
void
smp_migrate_task
(
int
cpu
,
task_t
*
p
)
{
__smp_call_function
(
smp_task_migration_interrupt
,
p
,
0
,
cpu
);
}
void
__init
smp_boot_cpus
(
void
)
{
extern
struct
task_struct
*
current_set
[
NR_CPUS
];
int
i
,
cpu_nr
;
struct
task_struct
*
p
;
...
...
@@ -292,7 +326,6 @@ void __init smp_boot_cpus(void)
* cpu 0, the master -- Cort
*/
cpu_callin_map
[
0
]
=
1
;
current
->
cpu
=
0
;
for
(
i
=
0
;
i
<
NR_CPUS
;
i
++
)
{
prof_counter
[
i
]
=
1
;
...
...
@@ -300,10 +333,9 @@ void __init smp_boot_cpus(void)
}
/*
* XXX very rough, assumes 20 bus cycles to read a cache line,
* timebase increments every 4 bus cycles, 32kB L1 data cache.
* XXX very rough.
*/
cache
flush_time
=
5
*
1024
;
cache
_decay_ticks
=
HZ
/
100
;
smp_ops
=
ppc_md
.
smp_ops
;
if
(
smp_ops
==
NULL
)
{
...
...
@@ -311,7 +343,7 @@ void __init smp_boot_cpus(void)
return
;
}
/* Probe
arch
for CPUs */
/* Probe
platform
for CPUs */
cpu_nr
=
smp_ops
->
probe
();
/*
...
...
@@ -338,9 +370,8 @@ void __init smp_boot_cpus(void)
init_idle
(
p
,
i
);
unhash_process
(
p
);
p
->
cpu
=
i
;
p
->
cpus_allowed
=
1
<<
i
;
/* we schedule the first task manually */
current_set
[
i
]
=
p
;
secondary_ti
=
p
->
thread_info
;
p
->
thread_info
->
cpu
=
i
;
/*
* There was a cache flush loop here to flush the cache
...
...
@@ -357,11 +388,10 @@ void __init smp_boot_cpus(void)
* use this value that I found through experimentation.
* -- Cort
*/
for
(
c
=
1000
;
c
&&
!
cpu_callin_map
[
i
]
;
c
--
)
for
(
c
=
1000
;
c
&&
!
cpu_callin_map
[
i
];
c
--
)
udelay
(
100
);
if
(
cpu_callin_map
[
i
]
)
{
if
(
cpu_callin_map
[
i
])
{
char
buf
[
32
];
sprintf
(
buf
,
"found cpu %d"
,
i
);
if
(
ppc_md
.
progress
)
ppc_md
.
progress
(
buf
,
0x350
+
i
);
...
...
@@ -488,7 +518,7 @@ void __init smp_commence(void)
void
__init
smp_callin
(
void
)
{
int
cpu
=
current
->
processor
;
int
cpu
=
smp_processor_id
()
;
smp_store_cpu_info
(
cpu
);
set_dec
(
tb_ticks_per_jiffy
);
...
...
@@ -505,7 +535,7 @@ void __init smp_callin(void)
*/
cpu_online_map
|=
1UL
<<
smp_processor_id
();
while
(
!
smp_commenced
)
while
(
!
smp_commenced
)
barrier
();
/* see smp_commence for more info */
...
...
arch/ppc/lib/locks.c
View file @
2472788a
...
...
@@ -48,7 +48,7 @@ static unsigned long __spin_trylock(volatile unsigned long *lock)
return
ret
;
}
void
_spin_lock
(
spinlock_t
*
lock
)
void
_
raw_
spin_lock
(
spinlock_t
*
lock
)
{
int
cpu
=
smp_processor_id
();
unsigned
int
stuck
=
INIT_STUCK
;
...
...
@@ -69,7 +69,7 @@ void _spin_lock(spinlock_t *lock)
lock
->
owner_cpu
=
cpu
;
}
int
spin_trylock
(
spinlock_t
*
lock
)
int
_raw_
spin_trylock
(
spinlock_t
*
lock
)
{
if
(
__spin_trylock
(
&
lock
->
lock
))
return
0
;
...
...
@@ -78,7 +78,7 @@ int spin_trylock(spinlock_t *lock)
return
1
;
}
void
_spin_unlock
(
spinlock_t
*
lp
)
void
_
raw_
spin_unlock
(
spinlock_t
*
lp
)
{
if
(
!
lp
->
lock
)
printk
(
"_spin_unlock(%p): no lock cpu %d curr PC %p %s/%d
\n
"
,
...
...
@@ -99,7 +99,7 @@ void _spin_unlock(spinlock_t *lp)
* with the high bit (sign) being the "write" bit.
* -- Cort
*/
void
_read_lock
(
rwlock_t
*
rw
)
void
_r
aw_r
ead_lock
(
rwlock_t
*
rw
)
{
unsigned
long
stuck
=
INIT_STUCK
;
int
cpu
=
smp_processor_id
();
...
...
@@ -126,7 +126,7 @@ void _read_lock(rwlock_t *rw)
wmb
();
}
void
_read_unlock
(
rwlock_t
*
rw
)
void
_r
aw_r
ead_unlock
(
rwlock_t
*
rw
)
{
if
(
rw
->
lock
==
0
)
printk
(
"_read_unlock(): %s/%d (nip %08lX) lock %lx
\n
"
,
...
...
@@ -136,7 +136,7 @@ void _read_unlock(rwlock_t *rw)
atomic_dec
((
atomic_t
*
)
&
(
rw
)
->
lock
);
}
void
_write_lock
(
rwlock_t
*
rw
)
void
_
raw_
write_lock
(
rwlock_t
*
rw
)
{
unsigned
long
stuck
=
INIT_STUCK
;
int
cpu
=
smp_processor_id
();
...
...
@@ -176,7 +176,7 @@ void _write_lock(rwlock_t *rw)
wmb
();
}
void
_write_unlock
(
rwlock_t
*
rw
)
void
_
raw_
write_unlock
(
rwlock_t
*
rw
)
{
if
(
!
(
rw
->
lock
&
(
1
<<
31
))
)
printk
(
"_write_lock(): %s/%d (nip %08lX) lock %lx
\n
"
,
...
...
arch/ppc/mm/hashtable.S
View file @
2472788a
...
...
@@ -32,6 +32,7 @@
#include <asm/pgtable.h>
#include <asm/cputable.h>
#include <asm/ppc_asm.h>
#include <asm/thread_info.h>
#include <kernel/ppc_defs.h>
#ifdef CONFIG_SMP
...
...
@@ -63,9 +64,7 @@ hash_page:
#ifdef CONFIG_SMP
addis
r2
,
r7
,
hash_table_lock
@
h
ori
r2
,
r2
,
hash_table_lock
@
l
mfspr
r5
,
SPRG3
lwz
r0
,
CPU
-
THREAD
(
r5
)
oris
r0
,
r0
,
0x0fff
lis
r0
,
0x0fff
b
10
f
11
:
lwz
r6
,
0
(
r2
)
cmpwi
0
,
r6
,
0
...
...
@@ -215,8 +214,9 @@ _GLOBAL(add_hash_page)
#ifdef CONFIG_SMP
lis
r9
,
hash_table_lock
@
h
ori
r9
,
r9
,
hash_table_lock
@
l
lwz
r8
,
CPU
(
r2
)
oris
r8
,
r8
,
10
rlwinm
r8
,
r1
,
0
,
0
,
18
lwz
r8
,
TI_CPU
(
r8
)
oris
r8
,
r8
,
12
10
:
lwarx
r7
,
0
,
r9
cmpi
0
,
r7
,
0
bne
-
11
f
...
...
@@ -511,7 +511,8 @@ _GLOBAL(flush_hash_page)
#ifdef CONFIG_SMP
lis
r9
,
hash_table_lock
@
h
ori
r9
,
r9
,
hash_table_lock
@
l
lwz
r8
,
CPU
(
r2
)
rlwinm
r8
,
r1
,
0
,
0
,
18
lwz
r8
,
TI_CPU
(
r8
)
oris
r8
,
r8
,
9
10
:
lwarx
r7
,
0
,
r9
cmpi
0
,
r7
,
0
...
...
arch/ppc/mm/init.c
View file @
2472788a
...
...
@@ -135,7 +135,6 @@ void show_mem(void)
{
int
i
,
free
=
0
,
total
=
0
,
reserved
=
0
;
int
shared
=
0
,
cached
=
0
;
struct
task_struct
*
p
;
int
highmem
=
0
;
printk
(
"Mem-info:
\n
"
);
...
...
@@ -153,7 +152,7 @@ void show_mem(void)
else
if
(
!
page_count
(
mem_map
+
i
))
free
++
;
else
shared
+=
atomic_read
(
&
mem_map
[
i
].
count
)
-
1
;
shared
+=
page_count
(
mem_map
+
i
)
-
1
;
}
printk
(
"%d pages of RAM
\n
"
,
total
);
printk
(
"%d pages of HIGHMEM
\n
"
,
highmem
);
...
...
@@ -163,49 +162,6 @@ void show_mem(void)
printk
(
"%d pages swap cached
\n
"
,
cached
);
printk
(
"%d pages in page table cache
\n
"
,(
int
)
pgtable_cache_size
);
show_buffers
();
printk
(
"%-8s %3s %8s %8s %8s %9s %8s"
,
"Process"
,
"Pid"
,
"Ctx"
,
"Ctx<<4"
,
"Last Sys"
,
"pc"
,
"task"
);
#ifdef CONFIG_SMP
printk
(
" %3s"
,
"CPU"
);
#endif
/* CONFIG_SMP */
printk
(
"
\n
"
);
for_each_task
(
p
)
{
printk
(
"%-8.8s %3d %8ld %8ld %8ld %c%08lx %08lx "
,
p
->
comm
,
p
->
pid
,
(
p
->
mm
)
?
p
->
mm
->
context
:
0
,
(
p
->
mm
)
?
(
p
->
mm
->
context
<<
4
)
:
0
,
p
->
thread
.
last_syscall
,
(
p
->
thread
.
regs
)
?
user_mode
(
p
->
thread
.
regs
)
?
'u'
:
'k'
:
'?'
,
(
p
->
thread
.
regs
)
?
p
->
thread
.
regs
->
nip
:
0
,
(
ulong
)
p
);
{
int
iscur
=
0
;
#ifdef CONFIG_SMP
printk
(
"%3d "
,
p
->
processor
);
if
(
(
p
->
processor
!=
NO_PROC_ID
)
&&
(
p
==
current_set
[
p
->
processor
])
)
{
iscur
=
1
;
printk
(
"current"
);
}
#else
if
(
p
==
current
)
{
iscur
=
1
;
printk
(
"current"
);
}
if
(
p
==
last_task_used_math
)
{
if
(
iscur
)
printk
(
","
);
printk
(
"last math"
);
}
#endif
/* CONFIG_SMP */
printk
(
"
\n
"
);
}
}
}
void
si_meminfo
(
struct
sysinfo
*
val
)
...
...
arch/ppc/vmlinux.lds
View file @
2472788a
...
...
@@ -49,8 +49,9 @@ SECTIONS
.fini : { *(.fini) } =0
.ctors : { *(.ctors) }
.dtors : { *(.dtors) }
/* Read-write section, merged into data segment: */
. =
(. + 0x0FFF) & 0xFFFFF000
;
. =
ALIGN(4096)
;
.data :
{
*(.data)
...
...
@@ -80,6 +81,9 @@ SECTIONS
. = ALIGN(32);
.data.cacheline_aligned : { *(.data.cacheline_aligned) }
. = ALIGN(8192);
.data.init_task : { *(.data.init_task) }
. = ALIGN(4096);
__init_begin = .;
.text.init : { *(.text.init) }
...
...
include/asm-ppc/bitops.h
View file @
2472788a
...
...
@@ -10,6 +10,7 @@
#define _PPC_BITOPS_H
#include <linux/config.h>
#include <linux/compiler.h>
#include <asm/byteorder.h>
#include <asm/atomic.h>
...
...
@@ -272,6 +273,79 @@ static __inline__ int ffs(int x)
#endif
/* __KERNEL__ */
/*
* Find the first bit set in a 140-bit bitmap.
* The first 100 bits are unlikely to be set.
*/
static
inline
int
sched_find_first_bit
(
unsigned
long
*
b
)
{
if
(
unlikely
(
b
[
0
]))
return
__ffs
(
b
[
0
]);
if
(
unlikely
(
b
[
1
]))
return
__ffs
(
b
[
1
])
+
32
;
if
(
unlikely
(
b
[
2
]))
return
__ffs
(
b
[
2
])
+
64
;
if
(
b
[
3
])
return
__ffs
(
b
[
3
])
+
96
;
return
__ffs
(
b
[
4
])
+
128
;
}
/**
* find_next_bit - find the next set bit in a memory region
* @addr: The address to base the search on
* @offset: The bitnumber to start searching at
* @size: The maximum size to search
*/
static
__inline__
unsigned
long
find_next_bit
(
void
*
addr
,
unsigned
long
size
,
unsigned
long
offset
)
{
unsigned
int
*
p
=
((
unsigned
int
*
)
addr
)
+
(
offset
>>
5
);
unsigned
int
result
=
offset
&
~
31UL
;
unsigned
int
tmp
;
if
(
offset
>=
size
)
return
size
;
size
-=
result
;
offset
&=
31UL
;
if
(
offset
)
{
tmp
=
*
p
++
;
tmp
&=
~
0UL
<<
offset
;
if
(
size
<
32
)
goto
found_first
;
if
(
tmp
)
goto
found_middle
;
size
-=
32
;
result
+=
32
;
}
while
(
size
>=
32
)
{
if
((
tmp
=
*
p
++
)
!=
0
)
goto
found_middle
;
result
+=
32
;
size
-=
32
;
}
if
(
!
size
)
return
result
;
tmp
=
*
p
;
found_first:
tmp
&=
~
0UL
>>
(
32
-
size
);
if
(
tmp
==
0UL
)
/* Are any bits set? */
return
result
+
size
;
/* Nope. */
found_middle:
return
result
+
__ffs
(
tmp
);
}
/**
* find_first_bit - find the first set bit in a memory region
* @addr: The address to start the search at
* @size: The maximum size to search
*
* Returns the bit-number of the first set bit, not the number of the byte
* containing a bit.
*/
#define find_first_bit(addr, size) \
find_next_bit((addr), (size), 0)
/*
* This implementation of find_{first,next}_zero_bit was stolen from
* Linus' asm-alpha/bitops.h.
...
...
include/asm-ppc/mmu_context.h
View file @
2472788a
...
...
@@ -10,25 +10,6 @@
#include <asm/bitops.h>
#include <asm/mmu.h>
#if MAX_RT_PRIO != 128 || MAX_PRIO != 168
# error update this function.
#endif
static
inline
int
sched_find_first_bit
(
unsigned
long
*
b
)
{
if
(
unlikely
(
b
[
0
]))
return
__ffs
(
b
[
0
]);
if
(
unlikely
(
b
[
1
]))
return
__ffs
(
b
[
1
])
+
32
;
if
(
unlikely
(
b
[
2
]))
return
__ffs
(
b
[
2
])
+
64
;
if
(
unlikely
(
b
[
3
]))
return
__ffs
(
b
[
3
])
+
96
;
if
(
b
[
4
])
return
__ffs
(
b
[
4
])
+
128
;
return
__ffs
(
b
[
5
])
+
32
+
128
;
}
/*
* On 32-bit PowerPC 6xx/7xx/7xxx CPUs, we use a set of 16 VSIDs
* (virtual segment identifiers) for each context. Although the
...
...
include/asm-ppc/processor.h
View file @
2472788a
...
...
@@ -714,10 +714,8 @@ struct thread_struct {
/*
* Return saved PC of a blocked thread. For now, this is the "user" PC
*/
static
inline
unsigned
long
thread_saved_pc
(
struct
thread_struct
*
t
)
{
return
(
t
->
regs
)
?
t
->
regs
->
nip
:
0
;
}
#define thread_saved_pc(tsk) \
((tsk)->thread.regs? (tsk)->thread.regs->nip: 0)
#define copy_segments(tsk, mm) do { } while (0)
#define release_segments(mm) do { } while (0)
...
...
include/asm-ppc/smp.h
View file @
2472788a
...
...
@@ -28,10 +28,10 @@ struct cpuinfo_PPC {
unsigned
long
pgtable_cache_sz
;
};
extern
struct
cpuinfo_PPC
cpu_data
[
NR_CPUS
];
extern
struct
cpuinfo_PPC
cpu_data
[];
extern
unsigned
long
cpu_online_map
;
extern
unsigned
long
smp_proc_in_lock
[
NR_CPUS
];
extern
volatile
unsigned
long
cpu_callin_map
[
NR_CPUS
];
extern
unsigned
long
smp_proc_in_lock
[];
extern
volatile
unsigned
long
cpu_callin_map
[];
extern
int
smp_tb_synchronized
;
extern
void
smp_store_cpu_info
(
int
id
);
...
...
@@ -50,7 +50,7 @@ extern void smp_local_timer_interrupt(struct pt_regs *);
#define smp_processor_id() (current_thread_info()->cpu)
extern
int
smp_hw_index
[
NR_CPUS
];
extern
int
smp_hw_index
[];
#define hard_smp_processor_id() (smp_hw_index[smp_processor_id()])
struct
klock_info_struct
{
...
...
include/asm-ppc/spinlock.h
View file @
2472788a
...
...
@@ -36,7 +36,7 @@ typedef struct {
#ifndef SPINLOCK_DEBUG
static
inline
void
spin_lock
(
spinlock_t
*
lock
)
static
inline
void
_raw_
spin_lock
(
spinlock_t
*
lock
)
{
unsigned
long
tmp
;
...
...
@@ -59,24 +59,21 @@ static inline void spin_lock(spinlock_t *lock)
:
"cr0"
,
"memory"
);
}
static
inline
void
spin_unlock
(
spinlock_t
*
lock
)
static
inline
void
_raw_
spin_unlock
(
spinlock_t
*
lock
)
{
__asm__
__volatile__
(
"eieio # spin_unlock"
:
:
:
"memory"
);
lock
->
lock
=
0
;
}
#define spin_trylock(lock) (!test_and_set_bit(0,(lock)))
#define
_raw_
spin_trylock(lock) (!test_and_set_bit(0,(lock)))
#else
extern
void
_spin_lock
(
spinlock_t
*
lock
);
extern
void
_spin_unlock
(
spinlock_t
*
lock
);
extern
int
spin_trylock
(
spinlock_t
*
lock
);
extern
void
_
raw_
spin_lock
(
spinlock_t
*
lock
);
extern
void
_
raw_
spin_unlock
(
spinlock_t
*
lock
);
extern
int
_raw_
spin_trylock
(
spinlock_t
*
lock
);
extern
unsigned
long
__spin_trylock
(
volatile
unsigned
long
*
lock
);
#define spin_lock(lp) _spin_lock(lp)
#define spin_unlock(lp) _spin_unlock(lp)
#endif
/*
...
...
@@ -107,7 +104,7 @@ typedef struct {
#ifndef SPINLOCK_DEBUG
static
__inline__
void
read_lock
(
rwlock_t
*
rw
)
static
__inline__
void
_raw_
read_lock
(
rwlock_t
*
rw
)
{
unsigned
int
tmp
;
...
...
@@ -130,7 +127,7 @@ static __inline__ void read_lock(rwlock_t *rw)
:
"cr0"
,
"memory"
);
}
static
__inline__
void
read_unlock
(
rwlock_t
*
rw
)
static
__inline__
void
_raw_
read_unlock
(
rwlock_t
*
rw
)
{
unsigned
int
tmp
;
...
...
@@ -146,7 +143,7 @@ static __inline__ void read_unlock(rwlock_t *rw)
:
"cr0"
,
"memory"
);
}
static
__inline__
void
write_lock
(
rwlock_t
*
rw
)
static
__inline__
void
_raw_
write_lock
(
rwlock_t
*
rw
)
{
unsigned
int
tmp
;
...
...
@@ -169,7 +166,7 @@ static __inline__ void write_lock(rwlock_t *rw)
:
"cr0"
,
"memory"
);
}
static
__inline__
void
write_unlock
(
rwlock_t
*
rw
)
static
__inline__
void
_raw_
write_unlock
(
rwlock_t
*
rw
)
{
__asm__
__volatile__
(
"eieio # write_unlock"
:
:
:
"memory"
);
rw
->
lock
=
0
;
...
...
@@ -177,15 +174,10 @@ static __inline__ void write_unlock(rwlock_t *rw)
#else
extern
void
_read_lock
(
rwlock_t
*
rw
);
extern
void
_read_unlock
(
rwlock_t
*
rw
);
extern
void
_write_lock
(
rwlock_t
*
rw
);
extern
void
_write_unlock
(
rwlock_t
*
rw
);
#define read_lock(rw) _read_lock(rw)
#define write_lock(rw) _write_lock(rw)
#define write_unlock(rw) _write_unlock(rw)
#define read_unlock(rw) _read_unlock(rw)
extern
void
_raw_read_lock
(
rwlock_t
*
rw
);
extern
void
_raw_read_unlock
(
rwlock_t
*
rw
);
extern
void
_raw_write_lock
(
rwlock_t
*
rw
);
extern
void
_raw_write_unlock
(
rwlock_t
*
rw
);
#endif
...
...
include/asm-ppc/system.h
View file @
2472788a
...
...
@@ -81,9 +81,7 @@ extern void note_scsi_host(struct device_node *, void *);
struct
task_struct
;
#define prepare_to_switch() do { } while(0)
#define switch_to(prev,next,last) _switch_to((prev),(next),&(last))
extern
void
_switch_to
(
struct
task_struct
*
,
struct
task_struct
*
,
struct
task_struct
**
);
extern
void
switch_to
(
struct
task_struct
*
,
struct
task_struct
*
);
struct
thread_struct
;
extern
struct
task_struct
*
_switch
(
struct
thread_struct
*
prev
,
...
...
include/asm-ppc/thread_info.h
View file @
2472788a
...
...
@@ -10,11 +10,12 @@
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
#include <asm/processor.h>
#ifndef __ASSEMBLY__
/*
* low level task data.
* If you change this, change the TI_* offsets below to match.
*/
struct
thread_info
{
struct
task_struct
*
task
;
/* main task structure */
...
...
@@ -51,9 +52,21 @@ static inline struct thread_info *current_thread_info(void)
#define free_thread_info(ti) free_pages((unsigned long) (ti), 1)
#define get_thread_info(ti) get_task_struct((ti)->task)
#define put_thread_info(ti) put_task_struct((ti)->task)
#define THREAD_SIZE (2*PAGE_SIZE)
#endif
/* __ASSEMBLY__ */
/*
* Size of kernel stack for each process.
*/
#define THREAD_SIZE 8192
/* 2 pages */
/*
* Offsets in thread_info structure, used in assembly code
*/
#define TI_TASK 0
#define TI_EXECDOMAIN 4
#define TI_FLAGS 8
#define TI_CPU 12
/*
* thread information flag bit numbers
*/
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment