Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
0f4d7d65
Commit
0f4d7d65
authored
Jul 28, 2002
by
Paul Mackerras
Browse files
Options
Browse Files
Download
Plain Diff
Merge samba.org:/home/paulus/kernel/linux-2.5
into samba.org:/home/paulus/kernel/for-linus-ppc
parents
e9cdd788
9a692aef
Changes
37
Show whitespace changes
Inline
Side-by-side
Showing
37 changed files
with
370 additions
and
601 deletions
+370
-601
arch/ppc/boot/common/misc-common.c
arch/ppc/boot/common/misc-common.c
+8
-8
arch/ppc/boot/prep/Makefile
arch/ppc/boot/prep/Makefile
+1
-1
arch/ppc/boot/prep/misc.c
arch/ppc/boot/prep/misc.c
+4
-4
arch/ppc/boot/simple/Makefile
arch/ppc/boot/simple/Makefile
+2
-2
arch/ppc/boot/simple/misc-spruce.c
arch/ppc/boot/simple/misc-spruce.c
+1
-1
arch/ppc/config.in
arch/ppc/config.in
+4
-0
arch/ppc/kernel/Makefile
arch/ppc/kernel/Makefile
+1
-0
arch/ppc/kernel/irq.c
arch/ppc/kernel/irq.c
+13
-284
arch/ppc/kernel/ppc_htab.c
arch/ppc/kernel/ppc_htab.c
+0
-41
arch/ppc/kernel/ppc_ksyms.c
arch/ppc/kernel/ppc_ksyms.c
+0
-6
arch/ppc/kernel/prep_nvram.c
arch/ppc/kernel/prep_nvram.c
+7
-2
arch/ppc/kernel/process.c
arch/ppc/kernel/process.c
+1
-2
arch/ppc/kernel/temp.c
arch/ppc/kernel/temp.c
+7
-7
arch/ppc/kernel/time.c
arch/ppc/kernel/time.c
+13
-16
arch/ppc/mm/fault.c
arch/ppc/mm/fault.c
+6
-4
arch/ppc/mm/mmu_decl.h
arch/ppc/mm/mmu_decl.h
+3
-1
arch/ppc/mm/pgtable.c
arch/ppc/mm/pgtable.c
+7
-24
arch/ppc/mm/ppc_mmu.c
arch/ppc/mm/ppc_mmu.c
+7
-1
arch/ppc/mm/tlb.c
arch/ppc/mm/tlb.c
+35
-35
arch/ppc/platforms/Makefile
arch/ppc/platforms/Makefile
+2
-2
arch/ppc/platforms/cpc700.h
arch/ppc/platforms/cpc700.h
+2
-2
arch/ppc/platforms/cpc700_pic.c
arch/ppc/platforms/cpc700_pic.c
+2
-2
arch/ppc/platforms/iSeries_pic.c
arch/ppc/platforms/iSeries_pic.c
+4
-9
arch/ppc/platforms/iSeries_time.c
arch/ppc/platforms/iSeries_time.c
+3
-7
arch/ppc/platforms/lopec_setup.c
arch/ppc/platforms/lopec_setup.c
+30
-0
arch/ppc/platforms/pplus_setup.c
arch/ppc/platforms/pplus_setup.c
+20
-3
arch/ppc/platforms/spruce_setup.c
arch/ppc/platforms/spruce_setup.c
+2
-2
include/asm-ppc/hardirq.h
include/asm-ppc/hardirq.h
+64
-70
include/asm-ppc/hw_irq.h
include/asm-ppc/hw_irq.h
+9
-11
include/asm-ppc/pgtable.h
include/asm-ppc/pgtable.h
+17
-0
include/asm-ppc/rwsem.h
include/asm-ppc/rwsem.h
+15
-1
include/asm-ppc/smplock.h
include/asm-ppc/smplock.h
+1
-1
include/asm-ppc/softirq.h
include/asm-ppc/softirq.h
+11
-15
include/asm-ppc/system.h
include/asm-ppc/system.h
+0
-21
include/asm-ppc/thread_info.h
include/asm-ppc/thread_info.h
+10
-10
include/asm-ppc/tlb.h
include/asm-ppc/tlb.h
+58
-1
include/asm-ppc/tlbflush.h
include/asm-ppc/tlbflush.h
+0
-5
No files found.
arch/ppc/boot/common/misc-common.c
View file @
0f4d7d65
...
...
@@ -75,7 +75,7 @@ static int _cvt(unsigned long val, char *buf, long radix, char *digits);
void
_vprintk
(
void
(
*
putc
)(
const
char
),
const
char
*
fmt0
,
va_list
ap
);
unsigned
char
*
ISA_io
=
NULL
;
#if defined(CONFIG_SERIAL_CONSOLE)
#if defined(CONFIG_SERIAL_CONSOLE)
|| defined(CONFIG_SERIAL_8250_CONSOLE)
extern
unsigned
long
com_port
;
extern
int
serial_tstc
(
unsigned
long
com_port
);
...
...
@@ -96,7 +96,7 @@ void exit(void)
int
tstc
(
void
)
{
#if defined(CONFIG_SERIAL_CONSOLE)
#if defined(CONFIG_SERIAL_CONSOLE)
|| defined(CONFIG_SERIAL_8250_CONSOLE)
if
(
keyb_present
)
return
(
CRT_tstc
()
||
serial_tstc
(
com_port
));
else
...
...
@@ -109,10 +109,10 @@ int tstc(void)
int
getc
(
void
)
{
while
(
1
)
{
#if defined(CONFIG_SERIAL_CONSOLE)
#if defined(CONFIG_SERIAL_CONSOLE)
|| defined(CONFIG_SERIAL_8250_CONSOLE)
if
(
serial_tstc
(
com_port
))
return
(
serial_getc
(
com_port
));
#endif
/*
CONFIG_SERIAL_CONSOLE
*/
#endif
/*
serial console
*/
if
(
keyb_present
)
if
(
CRT_tstc
())
return
(
CRT_getc
());
...
...
@@ -124,11 +124,11 @@ putc(const char c)
{
int
x
,
y
;
#if defined(CONFIG_SERIAL_CONSOLE)
#if defined(CONFIG_SERIAL_CONSOLE)
|| defined(CONFIG_SERIAL_8250_CONSOLE)
serial_putc
(
com_port
,
c
);
if
(
c
==
'\n'
)
serial_putc
(
com_port
,
'\r'
);
#endif
/*
CONFIG_SERIAL_CONSOLE
*/
#endif
/*
serial console
*/
x
=
orig_x
;
y
=
orig_y
;
...
...
@@ -171,10 +171,10 @@ void puts(const char *s)
y
=
orig_y
;
while
(
(
c
=
*
s
++
)
!=
'\0'
)
{
#if defined(CONFIG_SERIAL_CONSOLE)
#if defined(CONFIG_SERIAL_CONSOLE)
|| defined(CONFIG_SERIAL_8250_CONSOLE)
serial_putc
(
com_port
,
c
);
if
(
c
==
'\n'
)
serial_putc
(
com_port
,
'\r'
);
#endif
/*
CONFIG_SERIAL_CONSOLE
*/
#endif
/*
serial console
*/
if
(
c
==
'\n'
)
{
x
=
0
;
...
...
arch/ppc/boot/prep/Makefile
View file @
0f4d7d65
...
...
@@ -27,7 +27,7 @@ boot-y := head.o ../simple/legacy.o misc.o of1275.o \
OBJCOPY_ARGS
=
-O
elf32-powerpc
LIBS
=
../lib/zlib.a
boot-$(
CONFIG_SERIAL_CONSOLE)
+=
../common/ns16550.o
boot-$(
$CONFIG_SERIAL_8250_CONSOLE)
+=
../common/ns16550.o
boot-$(CONFIG_VGA_CONSOLE)
+=
vreset.o kbd.o
EXTRA_TARGETS
:=
$
(
boot-y
)
...
...
arch/ppc/boot/prep/misc.c
View file @
0f4d7d65
...
...
@@ -56,9 +56,9 @@ unsigned long orig_MSR;
char
*
zimage_start
;
int
zimage_size
;
#if defined(CONFIG_SERIAL_CONSOLE)
#if defined(CONFIG_SERIAL_
8250_
CONSOLE)
unsigned
long
com_port
;
#endif
/* CONFIG_SERIAL_CONSOLE */
#endif
/* CONFIG_SERIAL_
8250_
CONSOLE */
#ifdef CONFIG_VGA_CONSOLE
char
*
vidmem
=
(
char
*
)
0xC00B8000
;
int
lines
=
25
,
cols
=
80
;
...
...
@@ -135,9 +135,9 @@ decompress_kernel(unsigned long load_addr, int num_words, unsigned long cksum,
unsigned
int
pci_viddid
,
pci_did
,
tulip_pci_base
,
tulip_base
;
serial_fixups
();
#if defined(CONFIG_SERIAL_CONSOLE)
#if defined(CONFIG_SERIAL_
8250_
CONSOLE)
com_port
=
serial_init
(
0
,
NULL
);
#endif
/* CONFIG_SERIAL_CONSOLE */
#endif
/* CONFIG_SERIAL_
8250_
CONSOLE */
#if defined(CONFIG_VGA_CONSOLE)
vga_init
((
unsigned
char
*
)
0xC0000000
);
#endif
/* CONFIG_VGA_CONSOLE */
...
...
arch/ppc/boot/simple/Makefile
View file @
0f4d7d65
...
...
@@ -137,8 +137,8 @@ ifeq ($(CONFIG_SERIAL_CONSOLE),y)
boot-$(CONFIG_8xx)
+=
m8xx_tty.o
boot-$(CONFIG_8260)
+=
m8260_tty.o
boot-$(CONFIG_GT64260_CONSOLE)
+=
gt64260_tty.o
boot-$(CONFIG_SERIAL)
+=
../common/ns16550.o
endif
boot-$(CONFIG_SERIAL_8250_CONSOLE)
+=
../common/ns16550.o
EXTRA_TARGETS
:=
$
(
boot-y
)
LIBS
:=
../lib/zlib.a
...
...
arch/ppc/boot/simple/misc-spruce.c
View file @
0f4d7d65
...
...
@@ -189,7 +189,7 @@ decompress_kernel(unsigned long load_addr, int num_words, unsigned long cksum)
unsigned
char
header_type
;
unsigned
int
bar0
;
#ifdef CONFIG_SERIAL_CONSOLE
#ifdef CONFIG_SERIAL_
8250_
CONSOLE
/* Initialize the serial console port */
com_port
=
serial_init
(
0
,
NULL
);
#endif
...
...
arch/ppc/config.in
View file @
0f4d7d65
...
...
@@ -365,6 +365,10 @@ if [ "$CONFIG_ALL_PPC" = "y" ]; then
bool 'Support for RTAS (RunTime Abstraction Services) in /proc' CONFIG_PPC_RTAS
bool 'Support for PReP Residual Data' CONFIG_PREP_RESIDUAL
dep_bool ' Support for reading of PReP Residual Data in /proc' CONFIG_PROC_PREPRESIDUAL $CONFIG_PREP_RESIDUAL
define_bool CONFIG_PPCBUG_NVRAM y
fi
if [ "$CONFIG_PPLUS" = "y" -o "$CONFIG_LOPEC" = "y" ]; then
bool 'Enable reading PPCBUG NVRAM during boot' CONFIG_PPCBUG_NVRAM
fi
bool 'Default bootloader kernel arguments' CONFIG_CMDLINE_BOOL
...
...
arch/ppc/kernel/Makefile
View file @
0f4d7d65
...
...
@@ -35,6 +35,7 @@ obj-$(CONFIG_PCI) += pci.o
ifneq
($(CONFIG_PPC_ISERIES),y)
obj-$(CONFIG_PCI)
+=
pci-dma.o
endif
obj-$(CONFIG_PPCBUG_NVRAM)
+=
prep_nvram.o
obj-$(CONFIG_KGDB)
+=
ppc-stub.o
obj-$(CONFIG_SMP)
+=
smp.o
obj-$(CONFIG_TAU)
+=
temp.o
...
...
arch/ppc/kernel/irq.c
View file @
0f4d7d65
...
...
@@ -178,15 +178,6 @@ setup_irq(unsigned int irq, struct irqaction * new)
return
0
;
}
#if (defined(CONFIG_8xx) || defined(CONFIG_8260))
/* Name change so we can catch standard drivers that potentially mess up
* the internal interrupt controller on 8xx and 8260. Just bear with me,
* I don't like this either and I am searching a better solution. For
* now, this is what I need. -- Dan
*/
#define request_irq request_8xxirq
#endif
void
free_irq
(
unsigned
int
irq
,
void
*
dev_id
)
{
irq_desc_t
*
desc
;
...
...
@@ -212,11 +203,7 @@ void free_irq(unsigned int irq, void* dev_id)
}
spin_unlock_irqrestore
(
&
desc
->
lock
,
flags
);
#ifdef CONFIG_SMP
/* Wait to make sure it's not being used on another CPU */
while
(
desc
->
status
&
IRQ_INPROGRESS
)
barrier
();
#endif
synchronize_irq
(
irq
);
irq_kfree
(
action
);
return
;
}
...
...
@@ -290,7 +277,7 @@ int request_irq(unsigned int irq, void (*handler)(int, void *, struct pt_regs *)
* This function may be called from IRQ context.
*/
void
disable_irq_nosync
(
unsigned
int
irq
)
void
disable_irq_nosync
(
unsigned
int
irq
)
{
irq_desc_t
*
desc
=
irq_desc
+
irq
;
unsigned
long
flags
;
...
...
@@ -320,12 +307,7 @@ int request_irq(unsigned int irq, void (*handler)(int, void *, struct pt_regs *)
void
disable_irq
(
unsigned
int
irq
)
{
disable_irq_nosync
(
irq
);
if
(
!
local_irq_count
(
smp_processor_id
()))
{
do
{
barrier
();
}
while
(
irq_desc
[
irq
].
status
&
IRQ_INPROGRESS
);
}
synchronize_irq
(
irq
);
}
/**
...
...
@@ -525,11 +507,10 @@ void ppc_irq_dispatch_handler(struct pt_regs *regs, int irq)
}
#ifndef CONFIG_PPC_ISERIES
/* iSeries version is in iSeries_pic.c */
int
do_IRQ
(
struct
pt_regs
*
regs
)
void
do_IRQ
(
struct
pt_regs
*
regs
)
{
int
cpu
=
smp_processor_id
();
int
irq
,
first
=
1
;
hardirq_enter
(
cpu
);
irq_enter
(
);
/*
* Every platform is required to implement ppc_md.get_irq.
...
...
@@ -546,11 +527,7 @@ int do_IRQ(struct pt_regs *regs)
if
(
irq
!=
-
2
&&
first
)
/* That's not SMP safe ... but who cares ? */
ppc_spurious_interrupts
++
;
hardirq_exit
(
cpu
);
if
(
softirq_pending
(
cpu
))
do_softirq
();
return
1
;
/* lets ret_from_int know we can do checks */
irq_exit
();
}
#endif
/* CONFIG_PPC_ISERIES */
...
...
@@ -582,262 +559,14 @@ void __init init_IRQ(void)
}
#ifdef CONFIG_SMP
unsigned
char
global_irq_holder
=
NO_PROC_ID
;
unsigned
volatile
long
global_irq_lock
;
/* pendantic :long for set_bit--RR*/
atomic_t
global_bh_count
;
static
void
show
(
char
*
str
)
{
int
cpu
=
smp_processor_id
();
printk
(
"
\n
%s, CPU %d:
\n
"
,
str
,
cpu
);
printk
(
"irq: [%d %d]
\n
"
,
local_irq_count
(
0
),
local_irq_count
(
1
));
printk
(
"bh: %d [%d %d]
\n
"
,
atomic_read
(
&
global_bh_count
),
local_bh_count
(
0
),
local_bh_count
(
1
));
}
static
inline
void
wait_on_bh
(
void
)
{
int
count
=
MAXCOUNT
;
do
{
if
(
!--
count
)
{
show
(
"wait_on_bh"
);
count
=
~
0
;
}
/* nothing .. wait for the other bh's to go away */
}
while
(
atomic_read
(
&
global_bh_count
)
!=
0
);
}
static
inline
void
wait_on_irq
(
int
cpu
)
void
synchronize_irq
(
unsigned
int
irq
)
{
int
count
=
MAXCOUNT
;
for
(;;)
{
/*
* Wait until all interrupts are gone. Wait
* for bottom half handlers unless we're
* already executing in one..
*/
if
(
!
irqs_running
())
if
(
local_bh_count
(
cpu
)
||
!
spin_is_locked
(
&
global_bh_lock
))
break
;
/* Duh, we have to loop. Release the lock to avoid deadlocks */
clear_bit
(
0
,
&
global_irq_lock
);
for
(;;)
{
if
(
!--
count
)
{
show
(
"wait_on_irq"
);
count
=
~
0
;
}
local_irq_enable
();
/*
* We have to allow irqs to arrive between local_irq_enable and local_irq_disable
* Some cpus apparently won't cause the interrupt
* for several instructions. We hope that isync will
* catch this --Troy
*/
__asm__
__volatile__
(
"isync"
);
local_irq_disable
();
if
(
irqs_running
())
continue
;
if
(
global_irq_lock
)
continue
;
if
(
!
local_bh_count
(
cpu
)
&&
spin_is_locked
(
&
global_bh_lock
))
continue
;
if
(
!
test_and_set_bit
(
0
,
&
global_irq_lock
))
break
;
}
}
}
/*
* This is called when we want to synchronize with
* bottom half handlers. We need to wait until
* no other CPU is executing any bottom half handler.
*
* Don't wait if we're already running in an interrupt
* context or are inside a bh handler.
*/
void
synchronize_bh
(
void
)
{
if
(
atomic_read
(
&
global_bh_count
)
&&
!
in_interrupt
())
wait_on_bh
();
}
/*
* This is called when we want to synchronize with
* interrupts. We may for example tell a device to
* stop sending interrupts: but to make sure there
* are no interrupts that are executing on another
* CPU we need to call this function.
*/
void
synchronize_irq
(
void
)
{
if
(
irqs_running
())
{
/* Stupid approach */
cli
();
sti
();
}
}
static
inline
void
get_irqlock
(
int
cpu
)
{
unsigned
int
loops
=
MAXCOUNT
;
if
(
test_and_set_bit
(
0
,
&
global_irq_lock
))
{
/* do we already hold the lock? */
if
((
unsigned
char
)
cpu
==
global_irq_holder
)
/* is there anything to synchronize with? */
if
(
!
irq_desc
[
irq
].
action
)
return
;
/* Uhhuh.. Somebody else got it. Wait.. */
do
{
do
{
if
(
loops
--
==
0
)
{
printk
(
"get_irqlock(%d) waiting, global_irq_holder=%d
\n
"
,
cpu
,
global_irq_holder
);
#ifdef CONFIG_XMON
xmon
(
0
);
#endif
}
}
while
(
test_bit
(
0
,
&
global_irq_lock
));
}
while
(
test_and_set_bit
(
0
,
&
global_irq_lock
));
}
/*
* We also need to make sure that nobody else is running
* in an interrupt context.
*/
wait_on_irq
(
cpu
);
/*
* Ok, finally..
*/
global_irq_holder
=
cpu
;
}
/*
* A global "cli()" while in an interrupt context
* turns into just a local cli(). Interrupts
* should use spinlocks for the (very unlikely)
* case that they ever want to protect against
* each other.
*
* If we already have local interrupts disabled,
* this will not turn a local disable into a
* global one (problems with spinlocks: this makes
* save_flags+cli+sti usable inside a spinlock).
*/
void
__global_cli
(
void
)
{
unsigned
long
flags
;
local_save_flags
(
flags
);
if
(
flags
&
(
1
<<
15
))
{
int
cpu
=
smp_processor_id
();
local_irq_disable
();
if
(
!
local_irq_count
(
cpu
))
get_irqlock
(
cpu
);
}
}
void
__global_sti
(
void
)
{
int
cpu
=
smp_processor_id
();
if
(
!
local_irq_count
(
cpu
))
release_irqlock
(
cpu
);
local_irq_enable
();
}
/*
* SMP flags value to restore to:
* 0 - global cli
* 1 - global sti
* 2 - local cli
* 3 - local sti
*/
unsigned
long
__global_save_flags
(
void
)
{
int
retval
;
int
local_enabled
;
unsigned
long
flags
;
local_save_flags
(
flags
);
local_enabled
=
(
flags
>>
15
)
&
1
;
/* default to local */
retval
=
2
+
local_enabled
;
/* check for global flags if we're not in an interrupt */
if
(
!
local_irq_count
(
smp_processor_id
()))
{
if
(
local_enabled
)
retval
=
1
;
if
(
global_irq_holder
==
(
unsigned
char
)
smp_processor_id
())
retval
=
0
;
}
return
retval
;
}
int
tb
(
long
vals
[],
int
max_size
)
{
register
unsigned
long
*
orig_sp
__asm__
(
"r1"
);
register
unsigned
long
lr
__asm__
(
"r3"
);
unsigned
long
*
sp
;
int
i
;
asm
volatile
(
"mflr 3"
);
vals
[
0
]
=
lr
;
sp
=
(
unsigned
long
*
)
*
orig_sp
;
sp
=
(
unsigned
long
*
)
*
sp
;
for
(
i
=
1
;
i
<
max_size
;
i
++
)
{
if
(
sp
==
0
)
{
break
;
}
vals
[
i
]
=
*
(
sp
+
1
);
sp
=
(
unsigned
long
*
)
*
sp
;
}
return
i
;
}
void
__global_restore_flags
(
unsigned
long
flags
)
{
switch
(
flags
)
{
case
0
:
__global_cli
();
break
;
case
1
:
__global_sti
();
break
;
case
2
:
local_irq_disable
();
break
;
case
3
:
local_irq_enable
();
break
;
default:
{
unsigned
long
trace
[
5
];
int
count
;
int
i
;
printk
(
"global_restore_flags: %08lx (%08lx)
\n
"
,
flags
,
(
&
flags
)[
-
1
]);
count
=
tb
(
trace
,
5
);
printk
(
"tb:"
);
for
(
i
=
0
;
i
<
count
;
i
++
)
{
printk
(
" %8.8lx"
,
trace
[
i
]);
}
printk
(
"
\n
"
);
}
}
while
(
irq_desc
[
irq
].
status
&
IRQ_INPROGRESS
)
barrier
();
}
#endif
/* CONFIG_SMP */
...
...
arch/ppc/kernel/ppc_htab.c
View file @
0f4d7d65
...
...
@@ -381,47 +381,6 @@ static ssize_t ppc_htab_write(struct file * file, const char * buffer,
}
}
return
count
;
#if 0 /* resizing htab is a bit difficult right now -- Cort */
unsigned long size;
extern void reset_SDR1(void);
/* only know how to set size right now */
if ( strncmp( buffer, "size ", 5) )
return -EINVAL;
size = simple_strtoul( &buffer[5], NULL, 10 );
/* only allow to shrink */
if ( size >= Hash_size>>10 )
return -EINVAL;
/* minimum size of htab */
if ( size < 64 )
return -EINVAL;
/* make sure it's a multiple of 64k */
if ( size % 64 )
return -EINVAL;
printk("Hash table resize to %luk\n", size);
/*
* We need to rehash all kernel entries for the new htab size.
* Kernel only since we do a flush_tlb_all(). Since it's kernel
* we only need to bother with vsids 0-15. To avoid problems of
* clobbering un-rehashed values we put the htab at a new spot
* and put everything there.
* -- Cort
*/
Hash_size = size<<10;
Hash_mask = (Hash_size >> 6) - 1;
_SDR1 = __pa(Hash) | (Hash_mask >> 10);
flush_tlb_all();
reset_SDR1();
#endif
return
count
;
#else
/* CONFIG_PPC_STD_MMU */
return
0
;
...
...
arch/ppc/kernel/ppc_ksyms.c
View file @
0f4d7d65
...
...
@@ -207,12 +207,6 @@ EXPORT_SYMBOL(last_task_used_altivec);
EXPORT_SYMBOL
(
giveup_altivec
);
#endif
/* CONFIG_ALTIVEC */
#ifdef CONFIG_SMP
EXPORT_SYMBOL
(
global_irq_lock
);
EXPORT_SYMBOL
(
global_irq_holder
);
EXPORT_SYMBOL
(
__global_cli
);
EXPORT_SYMBOL
(
__global_sti
);
EXPORT_SYMBOL
(
__global_save_flags
);
EXPORT_SYMBOL
(
__global_restore_flags
);
#ifdef CONFIG_DEBUG_SPINLOCK
EXPORT_SYMBOL
(
_raw_spin_lock
);
EXPORT_SYMBOL
(
_raw_spin_unlock
);
...
...
arch/ppc/
platforms
/prep_nvram.c
→
arch/ppc/
kernel
/prep_nvram.c
View file @
0f4d7d65
...
...
@@ -2,10 +2,15 @@
* BK Id: %F% %I% %G% %U% %#%
*/
/*
*
arch/ppc/platforms
/prep_nvram.c
*
arch/ppc/kernel
/prep_nvram.c
*
* Copyright (C) 1998 Corey Minyard
*
* This reads the NvRAM on PReP compliant machines (generally from IBM or
* Motorola). Motorola kept the format of NvRAM in their ROM, PPCBUG, the
* same, long after they had stopped producing PReP compliant machines. So
* this code is useful in those cases as well.
*
*/
#include <linux/init.h>
#include <linux/delay.h>
...
...
arch/ppc/kernel/process.c
View file @
0f4d7d65
...
...
@@ -202,8 +202,7 @@ void __switch_to(struct task_struct *prev, struct task_struct *new)
struct
thread_struct
*
new_thread
,
*
old_thread
;
unsigned
long
s
;
local_save_flags
(
s
);
local_irq_disable
();
local_irq_save
(
s
);
#if CHECK_STACK
check_stack
(
prev
);
check_stack
(
new
);
...
...
arch/ppc/kernel/temp.c
View file @
0f4d7d65
...
...
@@ -124,27 +124,27 @@ void TAUupdate(int cpu)
void
TAUException
(
struct
pt_regs
*
regs
)
{
unsigned
long
cpu
=
smp_processor_id
();
int
cpu
=
smp_processor_id
();
hardirq_enter
(
cpu
);
irq_enter
(
);
tau
[
cpu
].
interrupts
++
;
TAUupdate
(
cpu
);
hardirq_exit
(
cpu
);
return
;
irq_exit
();
}
#endif
/* CONFIG_TAU_INT */
static
void
tau_timeout
(
void
*
info
)
{
unsigned
long
cpu
=
smp_processor_id
()
;
int
cpu
;
unsigned
long
flags
;
int
size
;
int
shrink
;
/* disabling interrupts *should* be okay */
save_flags
(
flags
);
cli
();
local_irq_save
(
flags
);
cpu
=
smp_processor_id
();
#ifndef CONFIG_TAU_INT
TAUupdate
(
cpu
);
...
...
@@ -186,7 +186,7 @@ static void tau_timeout(void * info)
*/
mtspr
(
SPRN_THRM3
,
THRM3_SITV
(
500
*
60
)
|
THRM3_E
);
restore_flags
(
flags
);
local_irq_restore
(
flags
);
}
static
void
tau_timeout_smp
(
unsigned
long
unused
)
...
...
arch/ppc/kernel/time.c
View file @
0f4d7d65
...
...
@@ -75,7 +75,7 @@ u64 jiffies_64;
unsigned
long
disarm_decr
[
NR_CPUS
];
extern
int
do_sys_settimeofday
(
struct
timeval
*
tv
,
struct
timezone
*
tz
)
;
extern
struct
timezone
sys_tz
;
/* keep track of when we need to update the rtc */
time_t
last_rtc_update
;
...
...
@@ -151,7 +151,7 @@ static inline void ppc_do_profile (unsigned long nip)
* with interrupts disabled.
* We set it up to overflow again in 1/HZ seconds.
*/
int
timer_interrupt
(
struct
pt_regs
*
regs
)
void
timer_interrupt
(
struct
pt_regs
*
regs
)
{
int
next_dec
;
unsigned
long
cpu
=
smp_processor_id
();
...
...
@@ -161,7 +161,7 @@ int timer_interrupt(struct pt_regs * regs)
if
(
atomic_read
(
&
ppc_n_lost_interrupts
)
!=
0
)
do_IRQ
(
regs
);
hardirq_enter
(
cpu
);
irq_enter
(
);
while
((
next_dec
=
tb_ticks_per_jiffy
-
tb_delta
(
&
jiffy_stamp
))
<
0
)
{
jiffy_stamp
+=
tb_ticks_per_jiffy
;
...
...
@@ -214,12 +214,7 @@ int timer_interrupt(struct pt_regs * regs)
if
(
ppc_md
.
heartbeat
&&
!
ppc_md
.
heartbeat_count
--
)
ppc_md
.
heartbeat
();
hardirq_exit
(
cpu
);
if
(
softirq_pending
(
cpu
))
do_softirq
();
return
1
;
/* lets ret_from_int know we can do checks */
irq_exit
();
}
#endif
/* CONFIG_PPC_ISERIES */
...
...
@@ -358,14 +353,11 @@ void __init time_init(void)
/* Not exact, but the timer interrupt takes care of this */
set_dec
(
tb_ticks_per_jiffy
);
/* If platform provided a timezone (pmac), we correct the time
* using do_sys_settimeofday() which in turn calls warp_clock()
*/
/* If platform provided a timezone (pmac), we correct the time */
if
(
time_offset
)
{
struct
timezone
tz
;
tz
.
tz_minuteswest
=
-
time_offset
/
60
;
tz
.
tz_dsttime
=
0
;
do_sys_settimeofday
(
NULL
,
&
tz
);
sys_tz
.
tz_minuteswest
=
-
time_offset
/
60
;
sys_tz
.
tz_dsttime
=
0
;
xtime
.
tv_sec
-=
time_offset
;
}
}
...
...
@@ -373,6 +365,11 @@ void __init time_init(void)
#define STARTOFTIME 1970
#define SECDAY 86400L
#define SECYR (SECDAY * 365)
/*
* Note: this is wrong for 2100, but our signed 32-bit time_t will
* have overflowed long before that, so who cares. -- paulus
*/
#define leapyear(year) ((year) % 4 == 0)
#define days_in_year(a) (leapyear(a) ? 366 : 365)
#define days_in_month(a) (month_days[(a) - 1])
...
...
arch/ppc/mm/fault.c
View file @
0f4d7d65
...
...
@@ -189,16 +189,18 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
*/
survive:
switch
(
handle_mm_fault
(
mm
,
vma
,
address
,
is_write
))
{
case
1
:
case
VM_FAULT_MINOR
:
current
->
min_flt
++
;
break
;
case
2
:
case
VM_FAULT_MAJOR
:
current
->
maj_flt
++
;
break
;
case
0
:
case
VM_FAULT_SIGBUS
:
goto
do_sigbus
;
default
:
case
VM_FAULT_OOM
:
goto
out_of_memory
;
default:
BUG
();
}
up_read
(
&
mm
->
mmap_sem
);
...
...
arch/ppc/mm/mmu_decl.h
View file @
0f4d7d65
...
...
@@ -25,7 +25,6 @@
#include <asm/tlbflush.h>
extern
void
mapin_ram
(
void
);
extern
void
bat_mapin_ram
(
void
);
extern
int
map_page
(
unsigned
long
va
,
unsigned
long
pa
,
int
flags
);
extern
void
setbat
(
int
index
,
unsigned
long
virt
,
unsigned
long
phys
,
unsigned
int
size
,
int
flags
);
...
...
@@ -49,14 +48,17 @@ extern unsigned long Hash_size, Hash_mask;
#if defined(CONFIG_8xx)
#define flush_HPTE(X, va, pg) _tlbie(va)
#define MMU_init_hw() do { } while(0)
#define mmu_mapin_ram() (0UL)
#elif defined(CONFIG_4xx)
#define flush_HPTE(X, va, pg) _tlbie(va)
extern
void
MMU_init_hw
(
void
);
#define mmu_mapin_ram() (0UL)
#else
/* anything except 4xx or 8xx */
extern
void
MMU_init_hw
(
void
);
extern
unsigned
long
mmu_mapin_ram
(
void
);
/* Be careful....this needs to be updated if we ever encounter 603 SMPs,
* which includes all new 82xx processors. We need tlbie/tlbsync here
...
...
arch/ppc/mm/pgtable.c
View file @
0f4d7d65
...
...
@@ -252,31 +252,14 @@ void __init mapin_ram(void)
{
unsigned
long
v
,
p
,
s
,
f
;
#ifdef HAVE_BATS
if
(
!
__map_without_bats
)
bat_mapin_ram
();
#endif
/* HAVE_BATS */
v
=
KERNELBASE
;
p
=
PPC_MEMSTART
;
for
(
s
=
0
;
s
<
total_lowmem
;
s
+=
PAGE_SIZE
)
{
/* On the MPC8xx, we want the page shared so we
* don't get ASID compares on kernel space.
*/
f
=
_PAGE_PRESENT
|
_PAGE_ACCESSED
|
_PAGE_SHARED
|
_PAGE_HWEXEC
;
#if defined(CONFIG_KGDB) || defined(CONFIG_XMON)
/* Allows stub to set breakpoints everywhere */
f
|=
_PAGE_WRENABLE
;
#else
/* !CONFIG_KGDB && !CONFIG_XMON */
if
((
char
*
)
v
<
_stext
||
(
char
*
)
v
>=
etext
)
f
|=
_PAGE_WRENABLE
;
#ifdef CONFIG_PPC_STD_MMU
s
=
mmu_mapin_ram
();
v
=
KERNELBASE
+
s
;
p
=
PPC_MEMSTART
+
s
;
for
(;
s
<
total_lowmem
;
s
+=
PAGE_SIZE
)
{
if
((
char
*
)
v
>=
_stext
&&
(
char
*
)
v
<
etext
)
f
=
_PAGE_RAM_TEXT
;
else
/* On the powerpc (not all), no user access
forces R/W kernel access */
f
|=
_PAGE_USER
;
#endif
/* CONFIG_PPC_STD_MMU */
#endif
/* CONFIG_KGDB || CONFIG_XMON */
f
=
_PAGE_RAM
;
map_page
(
v
,
p
,
f
);
v
+=
PAGE_SIZE
;
p
+=
PAGE_SIZE
;
...
...
arch/ppc/mm/ppc_mmu.c
View file @
0f4d7d65
...
...
@@ -87,12 +87,15 @@ unsigned long p_mapped_by_bats(unsigned long pa)
return
0
;
}
void
__init
bat
_mapin_ram
(
void
)
unsigned
long
__init
mmu
_mapin_ram
(
void
)
{
unsigned
long
tot
,
bl
,
done
;
unsigned
long
max_size
=
(
256
<<
20
);
unsigned
long
align
;
if
(
__map_without_bats
)
return
0
;
/* Set up BAT2 and if necessary BAT3 to cover RAM. */
/* Make sure we don't map a block larger than the
...
...
@@ -119,7 +122,10 @@ void __init bat_mapin_ram(void)
break
;
setbat
(
3
,
KERNELBASE
+
done
,
PPC_MEMSTART
+
done
,
bl
,
_PAGE_KERNEL
);
done
=
(
unsigned
long
)
bat_addrs
[
3
].
limit
-
KERNELBASE
+
1
;
}
return
done
;
}
/*
...
...
arch/ppc/mm/tlb.c
View file @
0f4d7d65
...
...
@@ -31,13 +31,42 @@
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/highmem.h>
#include <asm/tlbflush.h>
#include <asm/tlb.h>
#include "mmu_decl.h"
/*
* Called when unmapping pages to flush entries from the TLB/hash table.
*/
void
flush_hash_entry
(
struct
mm_struct
*
mm
,
pte_t
*
ptep
,
unsigned
long
addr
)
{
unsigned
long
ptephys
;
if
(
Hash
!=
0
)
{
ptephys
=
__pa
(
ptep
)
&
PAGE_MASK
;
flush_hash_pages
(
mm
->
context
,
addr
,
ptephys
,
1
);
}
}
/*
* Called at the end of a mmu_gather operation to make sure the
* TLB flush is completely done.
*/
void
tlb_flush
(
mmu_gather_t
*
tlb
)
{
if
(
Hash
==
0
)
{
/*
* 603 needs to flush the whole TLB here since
* it doesn't use a hash table.
*/
_tlbia
();
}
}
/*
* TLB flushing:
*
* - flush_tlb_all() flushes all processes TLBs
* - flush_tlb_mm(mm) flushes the specified mm context TLB's
* - flush_tlb_page(vma, vmaddr) flushes one page
* - flush_tlb_range(vma, start, end) flushes a range of pages
...
...
@@ -91,29 +120,6 @@ static void flush_range(struct mm_struct *mm, unsigned long start,
}
}
/*
* Flush all tlb/hash table entries (except perhaps for those
* mapping RAM starting at PAGE_OFFSET, since they never change).
*/
void
flush_tlb_all
(
void
)
{
/*
* Just flush the kernel part of the address space, that's
* all that the current callers of this require.
* Eventually I hope to persuade the powers that be that
* we can and should dispense with flush_tlb_all().
* -- paulus.
*
* In fact this should never get called now that we
* have flush_tlb_kernel_range. -- paulus
*/
printk
(
KERN_ERR
"flush_tlb_all called from %p
\n
"
,
__builtin_return_address
(
0
));
flush_range
(
&
init_mm
,
TASK_SIZE
,
~
0UL
);
FINISH_FLUSH
;
}
/*
* Flush kernel TLB entries in the given range
*/
...
...
@@ -124,24 +130,19 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
}
/*
* Flush all the (user) entries for the address space described
* by mm. We can't rely on mm->mmap describing all the entries
* that might be in the hash table.
* Flush all the (user) entries for the address space described by mm.
*/
void
flush_tlb_mm
(
struct
mm_struct
*
mm
)
{
struct
vm_area_struct
*
mp
;
if
(
Hash
==
0
)
{
_tlbia
();
return
;
}
if
(
mm
->
map_count
)
{
struct
vm_area_struct
*
mp
;
for
(
mp
=
mm
->
mmap
;
mp
!=
NULL
;
mp
=
mp
->
vm_next
)
flush_range
(
mp
->
vm_mm
,
mp
->
vm_start
,
mp
->
vm_end
);
}
else
{
flush_range
(
mm
,
0
,
TASK_SIZE
);
}
FINISH_FLUSH
;
}
...
...
@@ -161,7 +162,6 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
FINISH_FLUSH
;
}
/*
* For each address in the range, find the pte for the address
* and check _PAGE_HASHPTE bit; if it is set, find and destroy
...
...
arch/ppc/platforms/Makefile
View file @
0f4d7d65
...
...
@@ -32,7 +32,7 @@ endif
obj-$(CONFIG_ALL_PPC)
+=
pmac_pic.o pmac_setup.o pmac_time.o
\
pmac_feature.o pmac_pci.o chrp_setup.o
\
chrp_time.o chrp_pci.o prep_pci.o
\
prep_time.o prep_
nvram.o prep_
setup.o
prep_time.o prep_setup.o
ifeq
($(CONFIG_ALL_PPC),y)
obj-$(CONFIG_NVRAM)
+=
pmac_nvram.o
endif
...
...
@@ -50,7 +50,7 @@ obj-$(CONFIG_MENF1) += menf1_setup.o menf1_pci.o
obj-$(CONFIG_MVME5100)
+=
mvme5100_setup.o mvme5100_pci.o
obj-$(CONFIG_PCORE)
+=
pcore_setup.o pcore_pci.o
obj-$(CONFIG_POWERPMC250)
+=
powerpmc250.o
obj-$(CONFIG_PPLUS)
+=
pplus_pci.o pplus_setup.o
prep_nvram.o
obj-$(CONFIG_PPLUS)
+=
pplus_pci.o pplus_setup.o
obj-$(CONFIG_PRPMC750)
+=
prpmc750_setup.o prpmc750_pci.o
obj-$(CONFIG_PRPMC800)
+=
prpmc800_setup.o prpmc800_pci.o
obj-$(CONFIG_SANDPOINT)
+=
sandpoint_setup.o sandpoint_pci.o
...
...
arch/ppc/platforms/cpc700.h
View file @
0f4d7d65
...
...
@@ -6,7 +6,7 @@
* Author: Mark A. Greer
* mgreer@mvista.com
*
* Copyright 2000 MontaVista Software Inc.
* Copyright 2000
-2002
MontaVista Software Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
...
...
@@ -101,7 +101,7 @@
* UIC Exports...
*/
extern
struct
hw_interrupt_type
cpc700_pic
;
extern
unsigned
int
cpc700_irq_assigns
[
27
][
2
];
extern
unsigned
int
cpc700_irq_assigns
[
32
][
2
];
extern
void
__init
cpc700_init_IRQ
(
void
);
extern
int
cpc700_get_irq
(
struct
pt_regs
*
);
...
...
arch/ppc/platforms/cpc700_pic.c
View file @
0f4d7d65
...
...
@@ -8,7 +8,7 @@
* mporter@mvista.com
* jpeters@mvista.com
*
* Copyright 2001 MontaVista Software Inc.
* Copyright 2001
-2002
MontaVista Software Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
...
...
@@ -168,7 +168,7 @@ cpc700_init_IRQ(void)
cpc700_pic_init_irq
(
i
);
}
for
(
i
=
20
;
i
<
27
;
i
++
)
{
for
(
i
=
20
;
i
<
32
;
i
++
)
{
irq_desc
[
i
].
handler
=
&
cpc700_pic
;
cpc700_pic_init_irq
(
i
);
}
...
...
arch/ppc/platforms/iSeries_pic.c
View file @
0f4d7d65
...
...
@@ -42,7 +42,7 @@ extern void __no_lpq_restore_flags(unsigned long flags);
extern
void
iSeries_smp_message_recv
(
struct
pt_regs
*
);
unsigned
lpEvent_count
=
0
;
int
do_IRQ
(
struct
pt_regs
*
regs
)
void
do_IRQ
(
struct
pt_regs
*
regs
)
{
int
cpu
=
smp_processor_id
();
unsigned
long
flags
;
...
...
@@ -52,7 +52,7 @@ int do_IRQ(struct pt_regs *regs)
if
(
is_soft_enabled
()
)
BUG
();
hardirq_enter
(
cpu
);
irq_enter
(
);
paca
=
(
struct
Paca
*
)
mfspr
(
SPRG1
);
...
...
@@ -65,21 +65,16 @@ int do_IRQ(struct pt_regs *regs)
lpq
=
paca
->
lpQueuePtr
;
if
(
lpq
)
{
local_save_flags
(
flags
);
local_irq_disable
();
local_irq_save
(
flags
);
lpEvent_count
+=
ItLpQueue_process
(
lpq
,
regs
);
local_irq_restore
(
flags
);
}
hardirq_exit
(
cpu
);
irq_exit
(
);
if
(
paca
->
xLpPacaPtr
->
xDecrInt
)
{
paca
->
xLpPacaPtr
->
xDecrInt
=
0
;
/* Signal a fake decrementer interrupt */
timer_interrupt
(
regs
);
}
if
(
softirq_pending
(
cpu
))
do_softirq
();
return
1
;
/* lets ret_from_int know we can do checks */
}
arch/ppc/platforms/iSeries_time.c
View file @
0f4d7d65
...
...
@@ -102,7 +102,7 @@ int timerRetEnabled = 0;
int
timerRetDisabled
=
0
;
extern
unsigned
long
iSeries_dec_value
;
int
timer_interrupt
(
struct
pt_regs
*
regs
)
void
timer_interrupt
(
struct
pt_regs
*
regs
)
{
long
next_dec
;
struct
Paca
*
paca
;
...
...
@@ -117,7 +117,7 @@ int timer_interrupt(struct pt_regs * regs)
else
timerRetDisabled
++
;
hardirq_enter
(
cpu
);
irq_enter
(
);
if
(
!
user_mode
(
regs
))
ppc_do_profile
(
instruction_pointer
(
regs
));
...
...
@@ -149,9 +149,5 @@ int timer_interrupt(struct pt_regs * regs)
paca
->
xLpPacaPtr
->
xDecrInt
=
0
;
set_dec
(
(
unsigned
)
next_dec
);
hardirq_exit
(
cpu
);
if
(
softirq_pending
(
cpu
))
do_softirq
();
return
1
;
irq_exit
();
}
arch/ppc/platforms/lopec_setup.c
View file @
0f4d7d65
...
...
@@ -33,8 +33,15 @@
#include <asm/bootinfo.h>
#include <asm/mpc10x.h>
#include <asm/hw_irq.h>
#include <asm/prep_nvram.h>
#include <asm/keyboard.h>
extern
char
saved_command_line
[];
extern
void
lopec_find_bridges
(
void
);
extern
int
pckbd_translate
(
unsigned
char
scancode
,
unsigned
char
*
keycode
,
char
raw_mode
);
extern
char
pckbd_unexpected_up
(
unsigned
char
keycode
);
extern
unsigned
char
pckbd_sysrq_xlate
[
128
];
/*
* Define all of the IRQ senses and polarities. Taken from the
...
...
@@ -333,6 +340,21 @@ lopec_setup_arch(void)
#ifdef CONFIG_DUMMY_CONSOLE
conswitchp
=
&
dummy_con
;
#endif
#ifdef CONFIG_PPCBUG_NVRAM
/* Read in NVRAM data */
init_prep_nvram
();
/* if no bootargs, look in NVRAM */
if
(
cmd_line
[
0
]
==
'\0'
)
{
char
*
bootargs
;
bootargs
=
prep_nvram_get_var
(
"bootargs"
);
if
(
bootargs
!=
NULL
)
{
strcpy
(
cmd_line
,
bootargs
);
/* again.. */
strcpy
(
saved_command_line
,
cmd_line
);
}
}
#endif
}
void
__init
...
...
@@ -362,6 +384,14 @@ platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
ppc_md
.
find_end_of_memory
=
lopec_find_end_of_memory
;
ppc_md
.
setup_io_mappings
=
lopec_map_io
;
#ifdef CONFIG_VT
ppc_md
.
kbd_translate
=
pckbd_translate
;
ppc_md
.
kbd_unexpected_up
=
pckbd_unexpected_up
;
#ifdef CONFIG_MAGIC_SYSRQ
ppc_md
.
ppc_kbd_sysrq_xlate
=
pckbd_sysrq_xlate
;
#endif
/* CONFIG_MAGIC_SYSRQ */
#endif
/* CONFIG_VT */
ppc_md
.
time_init
=
todc_time_init
;
ppc_md
.
set_rtc_time
=
todc_set_rtc_time
;
ppc_md
.
get_rtc_time
=
todc_get_rtc_time
;
...
...
arch/ppc/platforms/pplus_setup.c
View file @
0f4d7d65
...
...
@@ -9,7 +9,7 @@
* Cort Dougan, Johnnie Peters, Matt Porter, and
* Troy Benjegerdes.
*
* Copyright 2001 MontaVista Software Inc.
* Copyright 2001
-2002
MontaVista Software Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
...
...
@@ -76,8 +76,9 @@
TODC_ALLOC
();
extern
void
pplus_setup_hose
(
void
)
;
extern
char
saved_command_line
[]
;
extern
void
pplus_setup_hose
(
void
);
extern
void
pplus_set_VIA_IDE_native
(
void
);
extern
unsigned
long
loops_per_jiffy
;
...
...
@@ -131,7 +132,8 @@ pplus_setup_arch(void)
ROOT_DEV
=
Root_SDA2
;
#endif
printk
(
"PowerPlus port (C) 2001 MontaVista Software, Inc. (source@mvista.com)
\n
"
);
printk
(
KERN_INFO
"Motorola PowerPlus Platform
\n
"
);
printk
(
KERN_INFO
"Port by MontaVista Software, Inc. (source@mvista.com)
\n
"
);
if
(
ppc_md
.
progress
)
ppc_md
.
progress
(
"pplus_setup_arch: raven_init
\n
"
,
0
);
...
...
@@ -144,6 +146,21 @@ pplus_setup_arch(void)
conswitchp
=
&
vga_con
;
#elif defined(CONFIG_DUMMY_CONSOLE)
conswitchp
=
&
dummy_con
;
#endif
#ifdef CONFIG_PPCBUG_NVRAM
/* Read in NVRAM data */
init_prep_nvram
();
/* if no bootargs, look in NVRAM */
if
(
cmd_line
[
0
]
==
'\0'
)
{
char
*
bootargs
;
bootargs
=
prep_nvram_get_var
(
"bootargs"
);
if
(
bootargs
!=
NULL
)
{
strcpy
(
cmd_line
,
bootargs
);
/* again.. */
strcpy
(
saved_command_line
,
cmd_line
);
}
}
#endif
if
(
ppc_md
.
progress
)
ppc_md
.
progress
(
"pplus_setup_arch: exit
\n
"
,
0
);
...
...
arch/ppc/platforms/spruce_setup.c
View file @
0f4d7d65
...
...
@@ -6,7 +6,7 @@
* Authors: Johnnie Peters <jpeters@mvista.com>
* Matt Porter <mporter@mvista.com>
*
* Copyright 2001 MontaVista Software Inc.
* Copyright 2001
-2002
MontaVista Software Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
...
...
@@ -70,7 +70,7 @@ extern char cmd_line[];
*
* First entry is the sensitivity (level/edge), second is the polarity.
*/
unsigned
int
cpc700_irq_assigns
[
27
][
2
]
=
{
unsigned
int
cpc700_irq_assigns
[
32
][
2
]
=
{
{
1
,
1
},
/* IRQ 0: ECC Correctable Error - rising edge */
{
1
,
1
},
/* IRQ 1: PCI Write Mem Range - rising edge */
{
0
,
1
},
/* IRQ 2: PCI Write Command Reg - active high */
...
...
include/asm-ppc/hardirq.h
View file @
0f4d7d65
...
...
@@ -15,8 +15,6 @@
*/
typedef
struct
{
unsigned
long
__softirq_pending
;
/* set_bit is used on this */
unsigned
int
__local_irq_count
;
unsigned
int
__local_bh_count
;
unsigned
int
__syscall_count
;
struct
task_struct
*
__ksoftirqd_task
;
unsigned
int
__last_jiffy_stamp
;
...
...
@@ -25,91 +23,87 @@ typedef struct {
#include <linux/irq_cpustat.h>
/* Standard mappings for irq_cpustat_t above */
#define last_jiffy_stamp(cpu) __IRQ_STAT((cpu), __last_jiffy_stamp)
/*
* Are we in an interrupt context? Either doing bottom half
* or hardware interrupt processing?
* We put the hardirq and softirq counter into the preemption
* counter. The bitmask has the following meaning:
*
* - bits 0-7 are the preemption count (max preemption depth: 256)
* - bits 8-15 are the softirq count (max # of softirqs: 256)
* - bits 16-23 are the hardirq count (max # of hardirqs: 256)
*
* - ( bit 26 is the PREEMPT_ACTIVE flag. )
*
* PREEMPT_MASK: 0x000000ff
* HARDIRQ_MASK: 0x0000ff00
* SOFTIRQ_MASK: 0x00ff0000
*/
#define in_interrupt() ({ int __cpu = smp_processor_id(); \
(local_irq_count(__cpu) + local_bh_count(__cpu) != 0); })
#define in_irq() (local_irq_count(smp_processor_id()) != 0)
#ifndef CONFIG_SMP
#define PREEMPT_BITS 8
#define SOFTIRQ_BITS 8
#define HARDIRQ_BITS 8
#define hardirq_trylock(cpu) (local_irq_count(cpu) == 0)
#define hardirq_endlock(cpu) do { } while (0)
#define PREEMPT_SHIFT 0
#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
#define hardirq_enter(cpu) do { preempt_disable(); local_irq_count(cpu)++; } while (0)
#define hardirq_exit(cpu) do { local_irq_count(cpu)--; preempt_enable(); } while (0)
#define __MASK(x) ((1UL << (x))-1)
#define synchronize_irq() do { } while (0)
#define release_irqlock(cpu) do { } while (0)
#define PREEMPT_MASK (__MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
#define HARDIRQ_MASK (__MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
#define SOFTIRQ_MASK (__MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
#else
/* CONFIG_SMP */
#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK))
#include <asm/atomic.h>
#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
extern
unsigned
char
global_irq_holder
;
extern
unsigned
volatile
long
global_irq_lock
;
/*
* The hardirq mask has to be large enough to have
* space for potentially all IRQ sources in the system
* nesting on a single CPU:
*/
#if (1 << HARDIRQ_BITS) < NR_IRQS
# error HARDIRQ_BITS is too low!
#endif
static
inline
int
irqs_running
(
void
)
{
int
i
;
/*
* Are we doing bottom half or hardware interrupt processing?
* Are we in a softirq context? Interrupt context?
*/
#define in_irq() (hardirq_count())
#define in_softirq() (softirq_count())
#define in_interrupt() (irq_count())
for
(
i
=
0
;
i
<
NR_CPUS
;
i
++
)
if
(
local_irq_count
(
i
))
return
1
;
return
0
;
}
static
inline
void
release_irqlock
(
int
cpu
)
{
/* if we didn't own the irq lock, just ignore.. */
if
(
global_irq_holder
==
(
unsigned
char
)
cpu
)
{
global_irq_holder
=
NO_PROC_ID
;
clear_bit
(
0
,
&
global_irq_lock
);
}
}
#define hardirq_trylock() (!in_interrupt())
#define hardirq_endlock() do { } while (0)
static
inline
void
hardirq_enter
(
int
cpu
)
{
unsigned
int
loops
=
10000000
;
#define irq_enter() (preempt_count() += HARDIRQ_OFFSET)
preempt_disable
();
++
local_irq_count
(
cpu
);
while
(
test_bit
(
0
,
&
global_irq_lock
))
{
if
(
cpu
==
global_irq_holder
)
{
printk
(
"uh oh, interrupt while we hold global irq lock! (CPU %d)
\n
"
,
cpu
);
#ifdef CONFIG_XMON
xmon
(
0
);
#if CONFIG_PREEMPT
# define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1)
#else
# define IRQ_EXIT_OFFSET HARDIRQ_OFFSET
#endif
break
;
}
if
(
loops
--
==
0
)
{
printk
(
"do_IRQ waiting for irq lock (holder=%d)
\n
"
,
global_irq_holder
);
#ifdef CONFIG_XMON
xmon
(
0
);
#endif
}
}
}
static
inline
void
hardirq_exit
(
int
cpu
)
{
--
local_irq_count
(
cpu
);
preempt_enable
();
}
static
inline
int
hardirq_trylock
(
int
cpu
)
{
return
!
test_bit
(
0
,
&
global_irq_lock
);
}
#define hardirq_endlock(cpu) do { } while (0)
extern
void
synchronize_irq
(
void
);
#define irq_exit() \
do { \
preempt_count() -= IRQ_EXIT_OFFSET; \
if (!in_interrupt() && softirq_pending(smp_processor_id())) \
do_softirq(); \
preempt_enable_no_resched(); \
} while (0)
#ifndef CONFIG_SMP
# define synchronize_irq(irq) barrier()
#else
extern
void
synchronize_irq
(
unsigned
int
irq
);
#endif
/* CONFIG_SMP */
extern
void
show_stack
(
unsigned
long
*
sp
);
#endif
/* __ASM_HARDIRQ_H */
#endif
/* __KERNEL__ */
include/asm-ppc/hw_irq.h
View file @
0f4d7d65
...
...
@@ -8,21 +8,18 @@
#ifndef _PPC_HW_IRQ_H
#define _PPC_HW_IRQ_H
extern
unsigned
long
timer_interrupt_intercept
;
extern
unsigned
long
do_IRQ_intercept
;
extern
int
timer_interrupt
(
struct
pt_regs
*
);
extern
void
timer_interrupt
(
struct
pt_regs
*
);
extern
void
ppc_irq_dispatch_handler
(
struct
pt_regs
*
regs
,
int
irq
);
#define INLINE_IRQS
#ifdef INLINE_IRQS
#define mfmsr() ({unsigned int rval; \
asm volatile("mfmsr %0" : "=r" (rval)); rval;})
#define mtmsr(v) asm volatile("mtmsr %0" : : "r" (v))
#define local_save_flags(flags) ((flags) = mfmsr())
#define local_irq_restore(flags) mtmsr(flags)
#define irqs_disabled() ((mfmsr() & MSR_EE) == 0)
#ifdef INLINE_IRQS
static
inline
void
local_irq_disable
(
void
)
{
...
...
@@ -40,7 +37,7 @@ static inline void local_irq_enable(void)
mtmsr
(
msr
|
MSR_EE
);
}
static
inline
void
__do_save_and_cli
(
unsigned
long
*
flags
)
static
inline
void
local_irq_save_ptr
(
unsigned
long
*
flags
)
{
unsigned
long
msr
;
msr
=
mfmsr
();
...
...
@@ -49,7 +46,9 @@ static inline void __do_save_and_cli(unsigned long *flags)
__asm__
__volatile__
(
""
:
:
:
"memory"
);
}
#define local_irq_save(flags) __do_save_and_cli(&flags)
#define local_save_flags(flags) ((flags) = mfmsr())
#define local_irq_save(flags) local_irq_save_ptr(&flags)
#define local_irq_restore(flags) mtmsr(flags)
#else
...
...
@@ -57,9 +56,8 @@ extern void local_irq_enable(void);
extern
void
local_irq_disable
(
void
);
extern
void
local_irq_restore
(
unsigned
long
);
extern
void
local_save_flags_ptr
(
unsigned
long
*
);
extern
unsigned
long
local_irq_enable_end
,
local_irq_disable_end
,
local_irq_restore_end
,
local_save_flags_ptr_end
;
#define local_save_flags(flags) local_save_flags_ptr(
(unsigned long *)
&flags)
#define local_save_flags(flags) local_save_flags_ptr(&flags)
#define local_irq_save(flags) ({local_save_flags(flags);local_irq_disable();})
#endif
...
...
include/asm-ppc/pgtable.h
View file @
0f4d7d65
...
...
@@ -246,6 +246,23 @@ extern unsigned long ioremap_bot, ioremap_base;
#define _PAGE_KERNEL _PAGE_BASE | _PAGE_WRENABLE | _PAGE_SHARED | _PAGE_HWEXEC
#define _PAGE_IO _PAGE_KERNEL | _PAGE_NO_CACHE | _PAGE_GUARDED
#define _PAGE_RAM _PAGE_KERNEL
#if defined(CONFIG_KGDB) || defined(CONFIG_XMON)
/* We want the debuggers to be able to set breakpoints anywhere, so
* don't write protect the kernel text */
#define _PAGE_RAM_TEXT _PAGE_RAM
#else
#ifdef CONFIG_PPC_STD_MMU
/* On standard PPC MMU, no user access implies kernel read/write
* access, so to write-protect the kernel text we must turn on user
* access */
#define _PAGE_RAM_TEXT (_PAGE_RAM & ~_PAGE_WRENABLE) | _PAGE_USER
#else
#define _PAGE_RAM_TEXT (_PAGE_RAM & ~_PAGE_WRENABLE)
#endif
#endif
#define PAGE_NONE __pgprot(_PAGE_BASE)
#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
...
...
include/asm-ppc/rwsem.h
View file @
0f4d7d65
/*
* BK Id:
SCCS/s.rwsem.h 1.6 05/17/01 18:14:25 cort
* BK Id:
%F% %I% %G% %U% %#%
*/
/*
* include/asm-ppc/rwsem.h: R/W semaphores for PPC using the stuff
...
...
@@ -55,6 +55,7 @@ struct rw_semaphore {
extern
struct
rw_semaphore
*
rwsem_down_read_failed
(
struct
rw_semaphore
*
sem
);
extern
struct
rw_semaphore
*
rwsem_down_write_failed
(
struct
rw_semaphore
*
sem
);
extern
struct
rw_semaphore
*
rwsem_wake
(
struct
rw_semaphore
*
sem
);
extern
struct
rw_semaphore
*
rwsem_downgrade_wake
(
struct
rw_semaphore
*
sem
);
static
inline
void
init_rwsem
(
struct
rw_semaphore
*
sem
)
{
...
...
@@ -124,6 +125,19 @@ static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
atomic_add
(
delta
,
(
atomic_t
*
)(
&
sem
->
count
));
}
/*
* downgrade write lock to read lock
*/
static
inline
void
__downgrade_write
(
struct
rw_semaphore
*
sem
)
{
int
tmp
;
smp_wmb
();
tmp
=
atomic_add_return
(
-
RWSEM_WAITING_BIAS
,
(
atomic_t
*
)(
&
sem
->
count
));
if
(
tmp
<
0
)
rwsem_downgrade_wake
(
sem
);
}
/*
* implement exchange and add functionality
*/
...
...
include/asm-ppc/smplock.h
View file @
0f4d7d65
...
...
@@ -18,7 +18,7 @@ extern spinlock_t kernel_flag;
#ifdef CONFIG_SMP
#define kernel_locked() spin_is_locked(&kernel_flag)
#elif defined(CONFIG_PREEMPT)
#define kernel_locked() preempt_
get_
count()
#define kernel_locked() preempt_count()
#endif
/*
...
...
include/asm-ppc/softirq.h
View file @
0f4d7d65
...
...
@@ -5,34 +5,30 @@
#ifndef __ASM_SOFTIRQ_H
#define __ASM_SOFTIRQ_H
#include <
asm/atomic
.h>
#include <
linux/preempt
.h>
#include <asm/hardirq.h>
#define local_bh_disable() \
do { \
preempt_disable(); \
local_bh_count(smp_processor_id())++; \
preempt_count() += SOFTIRQ_OFFSET; \
barrier(); \
} while (0)
#define __local_bh_enable() \
do { \
barrier(); \
local_bh_count(smp_processor_id())--; \
preempt_enable(); \
preempt_count() -= SOFTIRQ_OFFSET; \
} while (0)
#define local_bh_enable() \
do { \
barrier
(); \
if (
!--local_bh_count(smp_processor_id())
\
&& softirq_pending(smp_processor_id())) {
\
__local_bh_enable
(); \
if (
unlikely(!in_interrupt()
\
&& softirq_pending(smp_processor_id())))
\
do_softirq(); \
}
\
preempt_enable();
\
if (preempt_count() == 0)
\
preempt_check_resched();
\
} while (0)
#define in_softirq() (local_bh_count(smp_processor_id()) != 0)
#endif
/* __ASM_SOFTIRQ_H */
#endif
/* __KERNEL__ */
include/asm-ppc/system.h
View file @
0f4d7d65
...
...
@@ -96,27 +96,6 @@ extern unsigned int rtas_data;
struct
pt_regs
;
extern
void
dump_regs
(
struct
pt_regs
*
);
#ifndef CONFIG_SMP
#define cli() local_irq_disable()
#define sti() local_irq_enable()
#define save_flags(flags) local_save_flags(flags)
#define restore_flags(flags) local_irq_restore(flags)
#define save_and_cli(flags) local_irq_save(flags)
#else
/* CONFIG_SMP */
extern
void
__global_cli
(
void
);
extern
void
__global_sti
(
void
);
extern
unsigned
long
__global_save_flags
(
void
);
extern
void
__global_restore_flags
(
unsigned
long
);
#define cli() __global_cli()
#define sti() __global_sti()
#define save_flags(x) ((x)=__global_save_flags())
#define restore_flags(x) __global_restore_flags(x)
#endif
/* !CONFIG_SMP */
static
__inline__
unsigned
long
xchg_u32
(
volatile
void
*
p
,
unsigned
long
val
)
{
...
...
include/asm-ppc/thread_info.h
View file @
0f4d7d65
...
...
@@ -22,25 +22,25 @@ struct thread_info {
struct
exec_domain
*
exec_domain
;
/* execution domain */
unsigned
long
flags
;
/* low level flags */
int
cpu
;
/* cpu we're on */
int
preempt_count
;
/* not used at present */
int
softirq_count
;
int
hardirq_count
;
int
preempt_count
;
};
/*
* macros/functions for gaining access to the thread information structure
*/
#define INIT_THREAD_INFO(tsk) \
{ \
task: &tsk, \
exec_domain: &default_exec_domain, \
flags: 0, \
cpu: 0, \
.task = &tsk, \
.exec_domain = &default_exec_domain, \
.flags = 0, \
.cpu = 0, \
.preempt_count = 1 \
}
#define init_thread_info (init_thread_union.thread_info)
#define init_stack (init_thread_union.stack)
/*
* macros/functions for gaining access to the thread information structure
*/
/* how to get the thread information struct from C */
static
inline
struct
thread_info
*
current_thread_info
(
void
)
{
...
...
include/asm-ppc/tlb.h
View file @
0f4d7d65
/*
* BK Id: SCCS/s.tlb.h 1.5 05/17/01 18:14:26 cort
* TLB shootdown specifics for PPC
*
* Copyright (C) 2002 Paul Mackerras, IBM Corp.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#ifndef _PPC_TLB_H
#define _PPC_TLB_H
#include <linux/config.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#include <asm/page.h>
#include <asm/mmu.h>
#ifdef CONFIG_PPC_STD_MMU
/* Classic PPC with hash-table based MMU... */
struct
free_pte_ctx
;
extern
void
tlb_flush
(
struct
free_pte_ctx
*
tlb
);
/* Get the generic bits... */
#include <asm-generic/tlb.h>
/* Nothing needed here in fact... */
#define tlb_start_vma(tlb, vma) do { } while (0)
#define tlb_end_vma(tlb, vma) do { } while (0)
extern
void
flush_hash_entry
(
struct
mm_struct
*
mm
,
pte_t
*
ptep
,
unsigned
long
address
);
static
inline
void
tlb_remove_tlb_entry
(
mmu_gather_t
*
tlb
,
pte_t
*
ptep
,
unsigned
long
address
)
{
if
(
pte_val
(
*
ptep
)
&
_PAGE_HASHPTE
)
flush_hash_entry
(
tlb
->
mm
,
ptep
,
address
);
}
#else
/* Embedded PPC with software-loaded TLB, very simple... */
struct
flush_tlb_arch
{
};
#define tlb_init_arch(tlb, full_flush) do { } while (0)
#define tlb_finish_arch(tlb) do { } while (0)
#define tlb_start_vma(tlb, vma) do { } while (0)
#define tlb_end_vma(tlb, vma) do { } while (0)
#define tlb_remove_tlb_entry(tlb, pte, address) do { } while (0)
#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
/* Get the generic bits... */
#include <asm-generic/tlb.h>
#endif
/* CONFIG_PPC_STD_MMU */
#endif
/* __PPC_TLB_H */
include/asm-ppc/tlbflush.h
View file @
0f4d7d65
...
...
@@ -22,8 +22,6 @@ extern void _tlbia(void);
#if defined(CONFIG_4xx)
static
inline
void
flush_tlb_all
(
void
)
{
_tlbia
();
}
static
inline
void
flush_tlb_mm
(
struct
mm_struct
*
mm
)
{
_tlbia
();
}
static
inline
void
flush_tlb_page
(
struct
vm_area_struct
*
vma
,
...
...
@@ -40,8 +38,6 @@ static inline void flush_tlb_kernel_range(unsigned long start,
#elif defined(CONFIG_8xx)
#define __tlbia() asm volatile ("tlbia; sync" : : : "memory")
static
inline
void
flush_tlb_all
(
void
)
{
__tlbia
();
}
static
inline
void
flush_tlb_mm
(
struct
mm_struct
*
mm
)
{
__tlbia
();
}
static
inline
void
flush_tlb_page
(
struct
vm_area_struct
*
vma
,
...
...
@@ -58,7 +54,6 @@ static inline void flush_tlb_kernel_range(unsigned long start,
#else
/* 6xx, 7xx, 7xxx cpus */
struct
mm_struct
;
struct
vm_area_struct
;
extern
void
flush_tlb_all
(
void
);
extern
void
flush_tlb_mm
(
struct
mm_struct
*
mm
);
extern
void
flush_tlb_page
(
struct
vm_area_struct
*
vma
,
unsigned
long
vmaddr
);
extern
void
flush_tlb_range
(
struct
vm_area_struct
*
vma
,
unsigned
long
start
,
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment