Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
aebb0040
Commit
aebb0040
authored
Mar 17, 2003
by
David S. Miller
Browse files
Options
Browse Files
Download
Plain Diff
Merge nuts.ninka.net:/home/davem/src/BK/linus-2.5
into nuts.ninka.net:/home/davem/src/BK/sparc-2.5
parents
b486a581
3e6619ff
Changes
9
Show whitespace changes
Inline
Side-by-side
Showing
9 changed files
with
472 additions
and
405 deletions
+472
-405
arch/sparc64/kernel/irq.c
arch/sparc64/kernel/irq.c
+1
-112
arch/sparc64/kernel/smp.c
arch/sparc64/kernel/smp.c
+38
-199
arch/sparc64/kernel/sparc64_ksyms.c
arch/sparc64/kernel/sparc64_ksyms.c
+5
-9
arch/sparc64/kernel/time.c
arch/sparc64/kernel/time.c
+411
-76
arch/sparc64/kernel/traps.c
arch/sparc64/kernel/traps.c
+3
-2
arch/sparc64/mm/ultra.S
arch/sparc64/mm/ultra.S
+3
-3
include/asm-sparc64/irq.h
include/asm-sparc64/irq.h
+0
-2
include/asm-sparc64/spitfire.h
include/asm-sparc64/spitfire.h
+0
-2
include/asm-sparc64/timer.h
include/asm-sparc64/timer.h
+11
-0
No files found.
arch/sparc64/kernel/irq.c
View file @
aebb0040
...
...
@@ -719,12 +719,8 @@ void handler_irq(int irq, struct pt_regs *regs)
*/
{
unsigned
long
clr_mask
=
1
<<
irq
;
unsigned
long
tick_mask
;
unsigned
long
tick_mask
=
tick_ops
->
softint_mask
;
if
(
SPARC64_USE_STICK
)
tick_mask
=
(
1UL
<<
16
);
else
tick_mask
=
(
1UL
<<
0
);
if
((
irq
==
14
)
&&
(
get_softint
()
&
tick_mask
))
{
irq
=
0
;
clr_mask
=
tick_mask
;
...
...
@@ -946,113 +942,6 @@ int probe_irq_off(unsigned long mask)
return
0
;
}
/* This is gets the master TICK_INT timer going. */
void
sparc64_init_timers
(
void
(
*
cfunc
)(
int
,
void
*
,
struct
pt_regs
*
),
unsigned
long
*
clock
)
{
unsigned
long
pstate
;
extern
unsigned
long
timer_tick_offset
;
int
node
,
err
;
#ifdef CONFIG_SMP
extern
void
smp_tick_init
(
void
);
#endif
if
(
!
SPARC64_USE_STICK
)
{
node
=
linux_cpus
[
0
].
prom_node
;
*
clock
=
prom_getint
(
node
,
"clock-frequency"
);
}
else
{
node
=
prom_root_node
;
*
clock
=
prom_getint
(
node
,
"stick-frequency"
);
}
timer_tick_offset
=
*
clock
/
HZ
;
#ifdef CONFIG_SMP
smp_tick_init
();
#endif
/* Register IRQ handler. */
err
=
request_irq
(
build_irq
(
0
,
0
,
0UL
,
0UL
),
cfunc
,
SA_STATIC_ALLOC
,
"timer"
,
NULL
);
if
(
err
)
{
prom_printf
(
"Serious problem, cannot register TICK_INT
\n
"
);
prom_halt
();
}
/* Guarentee that the following sequences execute
* uninterrupted.
*/
__asm__
__volatile__
(
"rdpr %%pstate, %0
\n\t
"
"wrpr %0, %1, %%pstate"
:
"=r"
(
pstate
)
:
"i"
(
PSTATE_IE
));
/* Set things up so user can access tick register for profiling
* purposes. Also workaround BB_ERRATA_1 by doing a dummy
* read back of %tick after writing it.
*/
__asm__
__volatile__
(
" sethi %%hi(0x80000000), %%g1
\n
"
" ba,pt %%xcc, 1f
\n
"
" sllx %%g1, 32, %%g1
\n
"
" .align 64
\n
"
"1: rd %%tick, %%g2
\n
"
" add %%g2, 6, %%g2
\n
"
" andn %%g2, %%g1, %%g2
\n
"
" wrpr %%g2, 0, %%tick
\n
"
" rdpr %%tick, %%g0"
:
/* no outputs */
:
/* no inputs */
:
"g1"
,
"g2"
);
/* Workaround for Spitfire Errata (#54 I think??), I discovered
* this via Sun BugID 4008234, mentioned in Solaris-2.5.1 patch
* number 103640.
*
* On Blackbird writes to %tick_cmpr can fail, the
* workaround seems to be to execute the wr instruction
* at the start of an I-cache line, and perform a dummy
* read back from %tick_cmpr right after writing to it. -DaveM
*/
if
(
!
SPARC64_USE_STICK
)
{
__asm__
__volatile__
(
" rd %%tick, %%g1
\n
"
" ba,pt %%xcc, 1f
\n
"
" add %%g1, %0, %%g1
\n
"
" .align 64
\n
"
"1: wr %%g1, 0x0, %%tick_cmpr
\n
"
" rd %%tick_cmpr, %%g0"
:
/* no outputs */
:
"r"
(
timer_tick_offset
)
:
"g1"
);
}
else
{
/* Let the user get at STICK too. */
__asm__
__volatile__
(
" sethi %%hi(0x80000000), %%g1
\n
"
" sllx %%g1, 32, %%g1
\n
"
" rd %%asr24, %%g2
\n
"
" andn %%g2, %%g1, %%g2
\n
"
" wr %%g2, 0, %%asr24"
:
/* no outputs */
:
/* no inputs */
:
"g1"
,
"g2"
);
__asm__
__volatile__
(
" rd %%asr24, %%g1
\n
"
" add %%g1, %0, %%g1
\n
"
" wr %%g1, 0x0, %%asr25"
:
/* no outputs */
:
"r"
(
timer_tick_offset
)
:
"g1"
);
}
/* Restore PSTATE_IE. */
__asm__
__volatile__
(
"wrpr %0, 0x0, %%pstate"
:
/* no outputs */
:
"r"
(
pstate
));
local_irq_enable
();
}
#ifdef CONFIG_SMP
static
int
retarget_one_irq
(
struct
irqaction
*
p
,
int
goal_cpu
)
{
...
...
arch/sparc64/kernel/smp.c
View file @
aebb0040
...
...
@@ -115,7 +115,6 @@ extern void cpu_probe(void);
void
__init
smp_callin
(
void
)
{
int
cpuid
=
hard_smp_processor_id
();
unsigned
long
pstate
;
extern
int
bigkernel
;
extern
unsigned
long
kern_locked_tte_data
;
...
...
@@ -133,50 +132,6 @@ void __init smp_callin(void)
cpu_probe
();
/* Guarentee that the following sequences execute
* uninterrupted.
*/
__asm__
__volatile__
(
"rdpr %%pstate, %0
\n\t
"
"wrpr %0, %1, %%pstate"
:
"=r"
(
pstate
)
:
"i"
(
PSTATE_IE
));
/* Set things up so user can access tick register for profiling
* purposes. Also workaround BB_ERRATA_1 by doing a dummy
* read back of %tick after writing it.
*/
__asm__
__volatile__
(
"sethi %%hi(0x80000000), %%g1
\n\t
"
"ba,pt %%xcc, 1f
\n\t
"
" sllx %%g1, 32, %%g1
\n\t
"
".align 64
\n
"
"1: rd %%tick, %%g2
\n\t
"
"add %%g2, 6, %%g2
\n\t
"
"andn %%g2, %%g1, %%g2
\n\t
"
"wrpr %%g2, 0, %%tick
\n\t
"
"rdpr %%tick, %%g0"
:
/* no outputs */
:
/* no inputs */
:
"g1"
,
"g2"
);
if
(
SPARC64_USE_STICK
)
{
/* Let the user get at STICK too. */
__asm__
__volatile__
(
"sethi %%hi(0x80000000), %%g1
\n\t
"
"sllx %%g1, 32, %%g1
\n\t
"
"rd %%asr24, %%g2
\n\t
"
"andn %%g2, %%g1, %%g2
\n\t
"
"wr %%g2, 0, %%asr24"
:
/* no outputs */
:
/* no inputs */
:
"g1"
,
"g2"
);
}
/* Restore PSTATE_IE. */
__asm__
__volatile__
(
"wrpr %0, 0x0, %%pstate"
:
/* no outputs */
:
"r"
(
pstate
));
smp_setup_percpu_timer
();
local_irq_enable
();
...
...
@@ -211,7 +166,7 @@ void cpu_panic(void)
static
unsigned
long
current_tick_offset
;
/* This
s
tick register synchronization scheme is taken entirely from
/* This tick register synchronization scheme is taken entirely from
* the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit.
*
* The only change I've made is to rework it so that the master
...
...
@@ -227,16 +182,7 @@ static unsigned long current_tick_offset;
static
spinlock_t
itc_sync_lock
=
SPIN_LOCK_UNLOCKED
;
static
unsigned
long
go
[
SLAVE
+
1
];
#define DEBUG_STICK_SYNC 0
static
inline
unsigned
long
get_stick
(
void
)
{
unsigned
long
val
;
__asm__
__volatile__
(
"rd %%asr24, %0"
:
"=r"
(
val
));
return
val
;
}
#define DEBUG_TICK_SYNC 0
static
inline
long
get_delta
(
long
*
rt
,
long
*
master
)
{
...
...
@@ -245,14 +191,14 @@ static inline long get_delta (long *rt, long *master)
unsigned
long
i
;
for
(
i
=
0
;
i
<
NUM_ITERS
;
i
++
)
{
t0
=
get_s
tick
();
t0
=
tick_ops
->
get_
tick
();
go
[
MASTER
]
=
1
;
membar
(
"#StoreLoad"
);
while
(
!
(
tm
=
go
[
SLAVE
]))
membar
(
"#LoadLoad"
);
go
[
SLAVE
]
=
0
;
membar
(
"#StoreStore"
);
t1
=
get_s
tick
();
t1
=
tick_ops
->
get_
tick
();
if
(
t1
-
t0
<
best_t1
-
best_t0
)
best_t0
=
t0
,
best_t1
=
t1
,
best_tm
=
tm
;
...
...
@@ -268,32 +214,11 @@ static inline long get_delta (long *rt, long *master)
return
tcenter
-
best_tm
;
}
static
void
adjust_stick
(
long
adj
)
{
unsigned
long
tmp
,
pstate
;
__asm__
__volatile__
(
"rdpr %%pstate, %0
\n\t
"
"ba,pt %%xcc, 1f
\n\t
"
" wrpr %0, %4, %%pstate
\n\t
"
".align 16
\n\t
"
"1:nop
\n\t
"
"rd %%asr24, %1
\n\t
"
"add %1, %2, %1
\n\t
"
"wr %1, 0x0, %%asr24
\n\t
"
"add %1, %3, %1
\n\t
"
"wr %1, 0x0, %%asr25
\n\t
"
"wrpr %0, 0x0, %%pstate"
:
"=&r"
(
pstate
),
"=&r"
(
tmp
)
:
"r"
(
adj
),
"r"
(
current_tick_offset
),
"i"
(
PSTATE_IE
));
}
void
smp_synchronize_stick_client
(
void
)
void
smp_synchronize_tick_client
(
void
)
{
long
i
,
delta
,
adj
,
adjust_latency
=
0
,
done
=
0
;
unsigned
long
flags
,
rt
,
master_time_stamp
,
bound
;
#if DEBUG_
S
TICK_SYNC
#if DEBUG_TICK_SYNC
struct
{
long
rt
;
/* roundtrip time */
long
master
;
/* master's timestamp */
...
...
@@ -323,9 +248,9 @@ void smp_synchronize_stick_client(void)
}
else
adj
=
-
delta
;
adjust_stick
(
adj
);
tick_ops
->
add_tick
(
adj
,
current_tick_offset
);
}
#if DEBUG_
S
TICK_SYNC
#if DEBUG_TICK_SYNC
t
[
i
].
rt
=
rt
;
t
[
i
].
master
=
master_time_stamp
;
t
[
i
].
diff
=
delta
;
...
...
@@ -335,25 +260,25 @@ void smp_synchronize_stick_client(void)
}
local_irq_restore
(
flags
);
#if DEBUG_
S
TICK_SYNC
#if DEBUG_TICK_SYNC
for
(
i
=
0
;
i
<
NUM_ROUNDS
;
i
++
)
printk
(
"rt=%5ld master=%5ld diff=%5ld adjlat=%5ld
\n
"
,
t
[
i
].
rt
,
t
[
i
].
master
,
t
[
i
].
diff
,
t
[
i
].
lat
);
#endif
printk
(
KERN_INFO
"CPU %d: synchronized
S
TICK with master CPU (last diff %ld cycles,"
printk
(
KERN_INFO
"CPU %d: synchronized TICK with master CPU (last diff %ld cycles,"
"maxerr %lu cycles)
\n
"
,
smp_processor_id
(),
delta
,
rt
);
}
static
void
smp_start_sync_
s
tick_client
(
int
cpu
);
static
void
smp_start_sync_tick_client
(
int
cpu
);
static
void
smp_synchronize_one_
s
tick
(
int
cpu
)
static
void
smp_synchronize_one_tick
(
int
cpu
)
{
unsigned
long
flags
,
i
;
go
[
MASTER
]
=
0
;
smp_start_sync_
s
tick_client
(
cpu
);
smp_start_sync_tick_client
(
cpu
);
/* wait for client to be ready */
while
(
!
go
[
MASTER
])
...
...
@@ -370,7 +295,7 @@ static void smp_synchronize_one_stick(int cpu)
membar
(
"#LoadLoad"
);
go
[
MASTER
]
=
0
;
membar
(
"#StoreStore"
);
go
[
SLAVE
]
=
get_s
tick
();
go
[
SLAVE
]
=
tick_ops
->
get_
tick
();
membar
(
"#StoreLoad"
);
}
}
...
...
@@ -638,11 +563,11 @@ static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 d
/* NOTE: Caller runs local copy on master. */
}
extern
unsigned
long
xcall_sync_
s
tick
;
extern
unsigned
long
xcall_sync_tick
;
static
void
smp_start_sync_
s
tick_client
(
int
cpu
)
static
void
smp_start_sync_tick_client
(
int
cpu
)
{
smp_cross_call_masked
(
&
xcall_sync_
s
tick
,
smp_cross_call_masked
(
&
xcall_sync_tick
,
0
,
0
,
0
,
(
1UL
<<
cpu
));
}
...
...
@@ -1118,12 +1043,7 @@ void smp_percpu_timer_interrupt(struct pt_regs *regs)
* Check for level 14 softint.
*/
{
unsigned
long
tick_mask
;
if
(
SPARC64_USE_STICK
)
tick_mask
=
(
1UL
<<
16
);
else
tick_mask
=
(
1UL
<<
0
);
unsigned
long
tick_mask
=
tick_ops
->
softint_mask
;
if
(
!
(
get_softint
()
&
tick_mask
))
{
extern
void
handler_irq
(
int
,
struct
pt_regs
*
);
...
...
@@ -1159,41 +1079,8 @@ void smp_percpu_timer_interrupt(struct pt_regs *regs)
:
"=r"
(
pstate
)
:
"i"
(
PSTATE_IE
));
/* Workaround for Spitfire Errata (#54 I think??), I discovered
* this via Sun BugID 4008234, mentioned in Solaris-2.5.1 patch
* number 103640.
*
* On Blackbird writes to %tick_cmpr can fail, the
* workaround seems to be to execute the wr instruction
* at the start of an I-cache line, and perform a dummy
* read back from %tick_cmpr right after writing to it. -DaveM
*
* Just to be anal we add a workaround for Spitfire
* Errata 50 by preventing pipeline bypasses on the
* final read of the %tick register into a compare
* instruction. The Errata 50 description states
* that %tick is not prone to this bug, but I am not
* taking any chances.
*/
if
(
!
SPARC64_USE_STICK
)
{
__asm__
__volatile__
(
"rd %%tick_cmpr, %0
\n\t
"
"ba,pt %%xcc, 1f
\n\t
"
" add %0, %2, %0
\n\t
"
".align 64
\n
"
"1: wr %0, 0x0, %%tick_cmpr
\n\t
"
"rd %%tick_cmpr, %%g0
\n\t
"
"rd %%tick, %1
\n\t
"
"mov %1, %1"
:
"=&r"
(
compare
),
"=r"
(
tick
)
:
"r"
(
current_tick_offset
));
}
else
{
__asm__
__volatile__
(
"rd %%asr25, %0
\n\t
"
"add %0, %2, %0
\n\t
"
"wr %0, 0x0, %%asr25
\n\t
"
"rd %%asr24, %1
\n\t
"
:
"=&r"
(
compare
),
"=r"
(
tick
)
:
"r"
(
current_tick_offset
));
}
compare
=
tick_ops
->
add_compare
(
current_tick_offset
);
tick
=
tick_ops
->
get_tick
();
/* Restore PSTATE_IE. */
__asm__
__volatile__
(
"wrpr %0, 0x0, %%pstate"
...
...
@@ -1217,35 +1104,7 @@ static void __init smp_setup_percpu_timer(void)
:
"=r"
(
pstate
)
:
"i"
(
PSTATE_IE
));
/* Workaround for Spitfire Errata (#54 I think??), I discovered
* this via Sun BugID 4008234, mentioned in Solaris-2.5.1 patch
* number 103640.
*
* On Blackbird writes to %tick_cmpr can fail, the
* workaround seems to be to execute the wr instruction
* at the start of an I-cache line, and perform a dummy
* read back from %tick_cmpr right after writing to it. -DaveM
*/
if
(
!
SPARC64_USE_STICK
)
{
__asm__
__volatile__
(
"rd %%tick, %%g1
\n\t
"
"ba,pt %%xcc, 1f
\n\t
"
" add %%g1, %0, %%g1
\n\t
"
".align 64
\n
"
"1: wr %%g1, 0x0, %%tick_cmpr
\n\t
"
"rd %%tick_cmpr, %%g0"
:
/* no outputs */
:
"r"
(
current_tick_offset
)
:
"g1"
);
}
else
{
__asm__
__volatile__
(
"rd %%asr24, %%g1
\n\t
"
"add %%g1, %0, %%g1
\n\t
"
"wr %%g1, 0x0, %%asr25"
:
/* no outputs */
:
"r"
(
current_tick_offset
)
:
"g1"
);
}
tick_ops
->
init_tick
(
current_tick_offset
);
/* Restore PSTATE_IE. */
__asm__
__volatile__
(
"wrpr %0, 0x0, %%pstate"
...
...
@@ -1314,44 +1173,23 @@ static void __init smp_tune_scheduling(void)
p
+=
(
64
/
sizeof
(
unsigned
long
)))
*
((
volatile
unsigned
long
*
)
p
);
/* Now the real measurement. */
if
(
!
SPARC64_USE_STICK
)
{
__asm__
__volatile__
(
"b,pt %%xcc, 1f
\n\t
"
" rd %%tick, %0
\n\t
"
".align 64
\n
"
"1:
\t
ldx [%2 + 0x000], %%g1
\n\t
"
"ldx [%2 + 0x040], %%g2
\n\t
"
"ldx [%2 + 0x080], %%g3
\n\t
"
"ldx [%2 + 0x0c0], %%g5
\n\t
"
"add %2, 0x100, %2
\n\t
"
"cmp %2, %4
\n\t
"
"bne,pt %%xcc, 1b
\n\t
"
" nop
\n\t
"
"rd %%tick, %1
\n\t
"
:
"=&r"
(
tick1
),
"=&r"
(
tick2
),
"=&r"
(
flush_base
)
:
"2"
(
flush_base
),
"r"
(
flush_base
+
ecache_size
)
:
"g1"
,
"g2"
,
"g3"
,
"g5"
);
}
else
{
__asm__
__volatile__
(
"b,pt %%xcc, 1f
\n\t
"
" rd %%asr24, %0
\n\t
"
".align 64
\n
"
"1:
\t
ldx [%2 + 0x000], %%g1
\n\t
"
"ldx [%2 + 0x040], %%g2
\n\t
"
"ldx [%2 + 0x080], %%g3
\n\t
"
"ldx [%2 + 0x0c0], %%g5
\n\t
"
"add %2, 0x100, %2
\n\t
"
"cmp %2, %4
\n\t
"
tick1
=
tick_ops
->
get_tick
();
__asm__
__volatile__
(
"1:
\n\t
"
"ldx [%0 + 0x000], %%g1
\n\t
"
"ldx [%0 + 0x040], %%g2
\n\t
"
"ldx [%0 + 0x080], %%g3
\n\t
"
"ldx [%0 + 0x0c0], %%g5
\n\t
"
"add %0, 0x100, %0
\n\t
"
"cmp %0, %2
\n\t
"
"bne,pt %%xcc, 1b
\n\t
"
" nop
\n\t
"
"rd %%asr24, %1
\n\t
"
:
"=&r"
(
tick1
),
"=&r"
(
tick2
),
"=&r"
(
flush_base
)
:
"2"
(
flush_base
),
" nop"
:
"=&r"
(
flush_base
)
:
"0"
(
flush_base
),
"r"
(
flush_base
+
ecache_size
)
:
"g1"
,
"g2"
,
"g3"
,
"g5"
);
}
tick2
=
tick_ops
->
get_tick
();
local_irq_restore
(
flags
);
...
...
@@ -1370,6 +1208,8 @@ static void __init smp_tune_scheduling(void)
report:
/* Convert ticks/sticks to jiffies. */
cache_decay_ticks
=
cacheflush_time
/
timer_tick_offset
;
if
(
cache_decay_ticks
<
1
)
cache_decay_ticks
=
1
;
printk
(
"Using heuristic of %ld cycles, %ld ticks.
\n
"
,
cacheflush_time
,
cache_decay_ticks
);
...
...
@@ -1438,8 +1278,7 @@ int __devinit __cpu_up(unsigned int cpu)
if
(
!
test_bit
(
cpu
,
&
cpu_online_map
))
{
ret
=
-
ENODEV
;
}
else
{
if
(
SPARC64_USE_STICK
)
smp_synchronize_one_stick
(
cpu
);
smp_synchronize_one_tick
(
cpu
);
}
}
return
ret
;
...
...
arch/sparc64/kernel/sparc64_ksyms.c
View file @
aebb0040
...
...
@@ -128,20 +128,13 @@ EXPORT_SYMBOL(__write_unlock);
#endif
/* Hard IRQ locking */
#ifdef CONFIG_SMP
EXPORT_SYMBOL
(
synchronize_irq
);
#endif
#if defined(CONFIG_MCOUNT)
extern
void
mcount
(
void
);
EXPORT_SYMBOL_NOVERS
(
mcount
);
#endif
/* Uniprocessor clock frequency */
#ifndef CONFIG_SMP
EXPORT_SYMBOL
(
up_clock_tick
);
#endif
/* Per-CPU information table */
EXPORT_SYMBOL
(
cpu_data
);
...
...
@@ -162,10 +155,13 @@ EXPORT_SYMBOL(_do_write_lock);
EXPORT_SYMBOL
(
_do_write_unlock
);
#endif
#ifdef CONFIG_SMP
EXPORT_SYMBOL
(
smp_call_function
);
#endif
#endif
/* CONFIG_SMP */
/* Uniprocessor clock frequency */
#ifndef CONFIG_SMP
extern
unsigned
long
up_clock_tick
;
EXPORT_SYMBOL
(
up_clock_tick
);
#endif
/* semaphores */
...
...
arch/sparc64/kernel/time.c
View file @
aebb0040
...
...
@@ -37,6 +37,7 @@
#include <asm/ebus.h>
#include <asm/isa.h>
#include <asm/starfire.h>
#include <asm/smp.h>
spinlock_t
mostek_lock
=
SPIN_LOCK_UNLOCKED
;
spinlock_t
rtc_lock
=
SPIN_LOCK_UNLOCKED
;
...
...
@@ -54,6 +55,336 @@ static unsigned long mstk48t59_regs = 0UL;
static
int
set_rtc_mmss
(
unsigned
long
);
struct
sparc64_tick_ops
*
tick_ops
;
static
void
tick_disable_protection
(
void
)
{
/* Set things up so user can access tick register for profiling
* purposes. Also workaround BB_ERRATA_1 by doing a dummy
* read back of %tick after writing it.
*/
__asm__
__volatile__
(
" sethi %%hi(0x80000000), %%g1
\n
"
" ba,pt %%xcc, 1f
\n
"
" sllx %%g1, 32, %%g1
\n
"
" .align 64
\n
"
"1: rd %%tick, %%g2
\n
"
" add %%g2, 6, %%g2
\n
"
" andn %%g2, %%g1, %%g2
\n
"
" wrpr %%g2, 0, %%tick
\n
"
" rdpr %%tick, %%g0"
:
/* no outputs */
:
/* no inputs */
:
"g1"
,
"g2"
);
}
static
void
tick_init_tick
(
unsigned
long
offset
)
{
tick_disable_protection
();
__asm__
__volatile__
(
" rd %%tick, %%g1
\n
"
" ba,pt %%xcc, 1f
\n
"
" add %%g1, %0, %%g1
\n
"
" .align 64
\n
"
"1: wr %%g1, 0x0, %%tick_cmpr
\n
"
" rd %%tick_cmpr, %%g0"
:
/* no outputs */
:
"r"
(
offset
)
:
"g1"
);
}
static
unsigned
long
tick_get_tick
(
void
)
{
unsigned
long
ret
;
__asm__
__volatile__
(
"rd %%tick, %0
\n\t
"
"mov %0, %0"
:
"=r"
(
ret
));
return
ret
;
}
static
unsigned
long
tick_get_compare
(
void
)
{
unsigned
long
ret
;
__asm__
__volatile__
(
"rd %%tick_cmpr, %0
\n\t
"
"mov %0, %0"
:
"=r"
(
ret
));
return
ret
;
}
static
unsigned
long
tick_add_compare
(
unsigned
long
adj
)
{
unsigned
long
new_compare
;
/* Workaround for Spitfire Errata (#54 I think??), I discovered
* this via Sun BugID 4008234, mentioned in Solaris-2.5.1 patch
* number 103640.
*
* On Blackbird writes to %tick_cmpr can fail, the
* workaround seems to be to execute the wr instruction
* at the start of an I-cache line, and perform a dummy
* read back from %tick_cmpr right after writing to it. -DaveM
*/
__asm__
__volatile__
(
"rd %%tick_cmpr, %0
\n\t
"
"ba,pt %%xcc, 1f
\n\t
"
" add %0, %1, %0
\n\t
"
".align 64
\n
"
"1:
\n\t
"
"wr %0, 0, %%tick_cmpr
\n\t
"
"rd %%tick_cmpr, %%g0"
:
"=&r"
(
new_compare
)
:
"r"
(
adj
));
return
new_compare
;
}
static
unsigned
long
tick_add_tick
(
unsigned
long
adj
,
unsigned
long
offset
)
{
unsigned
long
new_tick
,
tmp
;
/* Also need to handle Blackbird bug here too. */
__asm__
__volatile__
(
"rd %%tick, %0
\n\t
"
"add %0, %2, %0
\n\t
"
"wrpr %0, 0, %%tick
\n\t
"
"ba,pt %%xcc, 1f
\n\t
"
" add %0, %3, %1
\n\t
"
".align 64
\n
"
"1:
\n\t
"
"wr %1, 0, %%tick_cmpr
\n\t
"
"rd %%tick_cmpr, %%g0"
:
"=&r"
(
new_tick
),
"=&r"
(
tmp
)
:
"r"
(
adj
),
"r"
(
offset
));
return
new_tick
;
}
static
struct
sparc64_tick_ops
tick_operations
=
{
.
init_tick
=
tick_init_tick
,
.
get_tick
=
tick_get_tick
,
.
get_compare
=
tick_get_compare
,
.
add_tick
=
tick_add_tick
,
.
add_compare
=
tick_add_compare
,
.
softint_mask
=
1UL
<<
0
,
};
static
void
stick_init_tick
(
unsigned
long
offset
)
{
tick_disable_protection
();
/* Let the user get at STICK too. */
__asm__
__volatile__
(
" sethi %%hi(0x80000000), %%g1
\n
"
" sllx %%g1, 32, %%g1
\n
"
" rd %%asr24, %%g2
\n
"
" andn %%g2, %%g1, %%g2
\n
"
" wr %%g2, 0, %%asr24"
:
/* no outputs */
:
/* no inputs */
:
"g1"
,
"g2"
);
__asm__
__volatile__
(
" rd %%asr24, %%g1
\n
"
" add %%g1, %0, %%g1
\n
"
" wr %%g1, 0x0, %%asr25"
:
/* no outputs */
:
"r"
(
offset
)
:
"g1"
);
}
static
unsigned
long
stick_get_tick
(
void
)
{
unsigned
long
ret
;
__asm__
__volatile__
(
"rd %%asr24, %0"
:
"=r"
(
ret
));
return
ret
;
}
static
unsigned
long
stick_get_compare
(
void
)
{
unsigned
long
ret
;
__asm__
__volatile__
(
"rd %%asr25, %0"
:
"=r"
(
ret
));
return
ret
;
}
static
unsigned
long
stick_add_tick
(
unsigned
long
adj
,
unsigned
long
offset
)
{
unsigned
long
new_tick
,
tmp
;
__asm__
__volatile__
(
"rd %%asr24, %0
\n\t
"
"add %0, %2, %0
\n\t
"
"wr %0, 0, %%asr24
\n\t
"
"add %0, %3, %1
\n\t
"
"wr %1, 0, %%asr25"
:
"=&r"
(
new_tick
),
"=&r"
(
tmp
)
:
"r"
(
adj
),
"r"
(
offset
));
return
new_tick
;
}
static
unsigned
long
stick_add_compare
(
unsigned
long
adj
)
{
unsigned
long
new_compare
;
__asm__
__volatile__
(
"rd %%asr25, %0
\n\t
"
"add %0, %1, %0
\n\t
"
"wr %0, 0, %%asr25"
:
"=&r"
(
new_compare
)
:
"r"
(
adj
));
return
new_compare
;
}
static
struct
sparc64_tick_ops
stick_operations
=
{
.
init_tick
=
stick_init_tick
,
.
get_tick
=
stick_get_tick
,
.
get_compare
=
stick_get_compare
,
.
add_tick
=
stick_add_tick
,
.
add_compare
=
stick_add_compare
,
.
softint_mask
=
1UL
<<
16
,
};
/* On Hummingbird the STICK/STICK_CMPR register is implemented
* in I/O space. There are two 64-bit registers each, the
* first holds the low 32-bits of the value and the second holds
* the high 32-bits.
*
* Since STICK is constantly updating, we have to access it carefully.
*
* The sequence we use to read is:
* 1) read low
* 2) read high
* 3) read low again, if it rolled over increment high by 1
*
* Writing STICK safely is also tricky:
* 1) write low to zero
* 2) write high
* 3) write low
*/
#define HBIRD_STICKCMP_ADDR 0x1fe0000f060UL
#define HBIRD_STICK_ADDR 0x1fe0000f070UL
static
unsigned
long
__hbird_read_stick
(
void
)
{
unsigned
long
ret
,
tmp1
,
tmp2
,
tmp3
;
unsigned
long
addr
=
HBIRD_STICK_ADDR
;
__asm__
__volatile__
(
"ldxa [%1] %5, %2
\n\t
"
"add %1, 0x8, %1
\n\t
"
"ldxa [%1] %5, %3
\n\t
"
"sub %1, 0x8, %1
\n\t
"
"ldxa [%1] %5, %4
\n\t
"
"cmp %4, %2
\n\t
"
"bl,a,pn %%xcc, 1f
\n\t
"
" add %3, 1, %3
\n
"
"1:
\n\t
"
"sllx %3, 32, %3
\n\t
"
"or %3, %4, %0
\n\t
"
:
"=&r"
(
ret
),
"=&r"
(
addr
),
"=&r"
(
tmp1
),
"=&r"
(
tmp2
),
"=&r"
(
tmp3
)
:
"i"
(
ASI_PHYS_BYPASS_EC_E
),
"1"
(
addr
));
return
ret
;
}
static
unsigned
long
__hbird_read_compare
(
void
)
{
unsigned
long
low
,
high
;
unsigned
long
addr
=
HBIRD_STICKCMP_ADDR
;
__asm__
__volatile__
(
"ldxa [%2] %3, %0
\n\t
"
"add %2, 0x8, %2
\n\t
"
"ldxa [%2] %3, %1"
:
"=&r"
(
low
),
"=&r"
(
high
),
"=&r"
(
addr
)
:
"i"
(
ASI_PHYS_BYPASS_EC_E
),
"2"
(
addr
));
return
(
high
<<
32UL
)
|
low
;
}
static
void
__hbird_write_stick
(
unsigned
long
val
)
{
unsigned
long
low
=
(
val
&
0xffffffffUL
);
unsigned
long
high
=
(
val
>>
32UL
);
unsigned
long
addr
=
HBIRD_STICK_ADDR
;
__asm__
__volatile__
(
"stxa %%g0, [%0] %4
\n\t
"
"add %0, 0x8, %0
\n\t
"
"stxa %3, [%0] %4
\n\t
"
"sub %0, 0x8, %0
\n\t
"
"stxa %2, [%0] %4"
:
"=&r"
(
addr
)
:
"0"
(
addr
),
"r"
(
low
),
"r"
(
high
),
"i"
(
ASI_PHYS_BYPASS_EC_E
));
}
static
void
__hbird_write_compare
(
unsigned
long
val
)
{
unsigned
long
low
=
(
val
&
0xffffffffUL
);
unsigned
long
high
=
(
val
>>
32UL
);
unsigned
long
addr
=
HBIRD_STICKCMP_ADDR
;
__asm__
__volatile__
(
"stxa %2, [%0] %4
\n\t
"
"add %0, 0x8, %0
\n\t
"
"stxa %3, [%0] %4"
:
"=&r"
(
addr
)
:
"0"
(
addr
),
"r"
(
low
),
"r"
(
high
),
"i"
(
ASI_PHYS_BYPASS_EC_E
));
}
static
void
hbtick_init_tick
(
unsigned
long
offset
)
{
tick_disable_protection
();
__hbird_write_compare
(
__hbird_read_stick
()
+
offset
);
}
static
unsigned
long
hbtick_get_tick
(
void
)
{
return
__hbird_read_stick
();
}
static
unsigned
long
hbtick_get_compare
(
void
)
{
return
__hbird_read_compare
();
}
static
unsigned
long
hbtick_add_tick
(
unsigned
long
adj
,
unsigned
long
offset
)
{
unsigned
long
val
;
val
=
__hbird_read_stick
()
+
adj
;
__hbird_write_stick
(
val
);
__hbird_write_compare
(
val
+
offset
);
return
val
;
}
static
unsigned
long
hbtick_add_compare
(
unsigned
long
adj
)
{
unsigned
long
val
=
__hbird_read_compare
()
+
adj
;
__hbird_write_compare
(
val
);
return
val
;
}
static
struct
sparc64_tick_ops
hbtick_operations
=
{
.
init_tick
=
hbtick_init_tick
,
.
get_tick
=
hbtick_get_tick
,
.
get_compare
=
hbtick_get_compare
,
.
add_tick
=
hbtick_add_tick
,
.
add_compare
=
hbtick_add_compare
,
.
softint_mask
=
1UL
<<
16
,
};
/* timer_interrupt() needs to keep up the real-time clock,
* as well as call the "do_timer()" routine every clocktick
*
...
...
@@ -62,7 +393,8 @@ static int set_rtc_mmss(unsigned long);
*/
unsigned
long
timer_tick_offset
;
unsigned
long
timer_tick_compare
;
unsigned
long
timer_ticks_per_usec_quotient
;
static
unsigned
long
timer_ticks_per_usec_quotient
;
#define TICK_SIZE (tick_nsec / 1000)
...
...
@@ -146,43 +478,8 @@ static void timer_interrupt(int irq, void *dev_id, struct pt_regs * regs)
:
"=r"
(
pstate
)
:
"i"
(
PSTATE_IE
));
/* Workaround for Spitfire Errata (#54 I think??), I discovered
* this via Sun BugID 4008234, mentioned in Solaris-2.5.1 patch
* number 103640.
*
* On Blackbird writes to %tick_cmpr can fail, the
* workaround seems to be to execute the wr instruction
* at the start of an I-cache line, and perform a dummy
* read back from %tick_cmpr right after writing to it. -DaveM
*
* Just to be anal we add a workaround for Spitfire
* Errata 50 by preventing pipeline bypasses on the
* final read of the %tick register into a compare
* instruction. The Errata 50 description states
* that %tick is not prone to this bug, but I am not
* taking any chances.
*/
if
(
!
SPARC64_USE_STICK
)
{
__asm__
__volatile__
(
" rd %%tick_cmpr, %0
\n
"
" ba,pt %%xcc, 1f
\n
"
" add %0, %2, %0
\n
"
" .align 64
\n
"
"1: wr %0, 0, %%tick_cmpr
\n
"
" rd %%tick_cmpr, %%g0
\n
"
" rd %%tick, %1
\n
"
" mov %1, %1"
:
"=&r"
(
timer_tick_compare
),
"=r"
(
ticks
)
:
"r"
(
timer_tick_offset
));
}
else
{
__asm__
__volatile__
(
" rd %%asr25, %0
\n
"
" add %0, %2, %0
\n
"
" wr %0, 0, %%asr25
\n
"
" rd %%asr24, %1"
:
"=&r"
(
timer_tick_compare
),
"=r"
(
ticks
)
:
"r"
(
timer_tick_offset
));
}
timer_tick_compare
=
tick_ops
->
add_compare
(
timer_tick_offset
);
ticks
=
tick_ops
->
get_tick
();
/* Restore PSTATE_IE. */
__asm__
__volatile__
(
"wrpr %0, 0x0, %%pstate"
...
...
@@ -205,19 +502,7 @@ void timer_tick_interrupt(struct pt_regs *regs)
/*
* Only keep timer_tick_offset uptodate, but don't set TICK_CMPR.
*/
if
(
!
SPARC64_USE_STICK
)
{
__asm__
__volatile__
(
" rd %%tick_cmpr, %0
\n
"
" add %0, %1, %0"
:
"=&r"
(
timer_tick_compare
)
:
"r"
(
timer_tick_offset
));
}
else
{
__asm__
__volatile__
(
" rd %%asr25, %0
\n
"
" add %0, %1, %0"
:
"=&r"
(
timer_tick_compare
)
:
"r"
(
timer_tick_offset
));
}
timer_tick_compare
=
tick_ops
->
get_compare
()
+
timer_tick_offset
;
timer_check_rtc
();
...
...
@@ -620,40 +905,90 @@ void __init clock_probe(void)
local_irq_restore
(
flags
);
}
void
__init
time_init
(
void
)
/* This is gets the master TICK_INT timer going. */
static
unsigned
long
sparc64_init_timers
(
void
(
*
cfunc
)(
int
,
void
*
,
struct
pt_regs
*
))
{
/* clock_probe() is now done at end of [se]bus_init on sparc64
* so that sbus, fhc and ebus bus information is probed and
* available.
unsigned
long
pstate
,
clock
;
int
node
,
err
;
#ifdef CONFIG_SMP
extern
void
smp_tick_init
(
void
);
#endif
if
(
tlb_type
==
spitfire
)
{
unsigned
long
ver
,
manuf
,
impl
;
__asm__
__volatile__
(
"rdpr %%ver, %0"
:
"=&r"
(
ver
));
manuf
=
((
ver
>>
48
)
&
0xffff
);
impl
=
((
ver
>>
32
)
&
0xffff
);
if
(
manuf
==
0x17
&&
impl
==
0x13
)
{
/* Hummingbird, aka Ultra-IIe */
tick_ops
=
&
hbtick_operations
;
node
=
prom_root_node
;
clock
=
prom_getint
(
node
,
"stick-frequency"
);
}
else
{
tick_ops
=
&
tick_operations
;
node
=
linux_cpus
[
0
].
prom_node
;
clock
=
prom_getint
(
node
,
"clock-frequency"
);
}
}
else
{
tick_ops
=
&
stick_operations
;
node
=
prom_root_node
;
clock
=
prom_getint
(
node
,
"stick-frequency"
);
}
timer_tick_offset
=
clock
/
HZ
;
#ifdef CONFIG_SMP
smp_tick_init
();
#endif
/* Register IRQ handler. */
err
=
request_irq
(
build_irq
(
0
,
0
,
0UL
,
0UL
),
cfunc
,
SA_STATIC_ALLOC
,
"timer"
,
NULL
);
if
(
err
)
{
prom_printf
(
"Serious problem, cannot register TICK_INT
\n
"
);
prom_halt
();
}
/* Guarentee that the following sequences execute
* uninterrupted.
*/
unsigned
long
clock
;
__asm__
__volatile__
(
"rdpr %%pstate, %0
\n\t
"
"wrpr %0, %1, %%pstate"
:
"=r"
(
pstate
)
:
"i"
(
PSTATE_IE
));
tick_ops
->
init_tick
(
timer_tick_offset
);
/* Restore PSTATE_IE. */
__asm__
__volatile__
(
"wrpr %0, 0x0, %%pstate"
:
/* no outputs */
:
"r"
(
pstate
));
local_irq_enable
();
sparc64_init_timers
(
timer_interrupt
,
&
clock
);
timer_ticks_per_usec_quotient
=
((
1UL
<<
32
)
/
(
clock
/
1000020
));
return
clock
;
}
/* The quotient formula is taken from the IA64 port. */
void
__init
time_init
(
void
)
{
unsigned
long
clock
=
sparc64_init_timers
(
timer_interrupt
);
timer_ticks_per_usec_quotient
=
(((
1000000UL
<<
30
)
+
(
clock
/
2
))
/
clock
);
}
static
__inline__
unsigned
long
do_gettimeoffset
(
void
)
{
unsigned
long
ticks
;
unsigned
long
ticks
=
tick_ops
->
get_tick
()
;
if
(
!
SPARC64_USE_STICK
)
{
__asm__
__volatile__
(
" rd %%tick, %%g1
\n
"
" add %1, %%g1, %0
\n
"
" sub %0, %2, %0
\n
"
:
"=r"
(
ticks
)
:
"r"
(
timer_tick_offset
),
"r"
(
timer_tick_compare
)
:
"g1"
,
"g2"
);
}
else
{
__asm__
__volatile__
(
"rd %%asr24, %%g1
\n\t
"
"add %1, %%g1, %0
\n\t
"
"sub %0, %2, %0
\n\t
"
:
"=&r"
(
ticks
)
:
"r"
(
timer_tick_offset
),
"r"
(
timer_tick_compare
)
:
"g1"
);
}
ticks
+=
timer_tick_offset
;
ticks
-=
timer_tick_compare
;
return
(
ticks
*
timer_ticks_per_usec_quotient
)
>>
3
2
UL
;
return
(
ticks
*
timer_ticks_per_usec_quotient
)
>>
3
0
UL
;
}
void
do_settimeofday
(
struct
timeval
*
tv
)
...
...
arch/sparc64/kernel/traps.c
View file @
aebb0040
...
...
@@ -33,6 +33,7 @@
#include <asm/chafsr.h>
#include <asm/psrcompat.h>
#include <asm/processor.h>
#include <asm/timer.h>
#ifdef CONFIG_KMOD
#include <linux/kmod.h>
#endif
...
...
@@ -588,7 +589,7 @@ unsigned long __init cheetah_tune_scheduling(void)
flush_linesize
=
ecache_flush_linesize
;
flush_size
=
ecache_flush_size
>>
1
;
__asm__
__volatile__
(
"rd %%tick, %0"
:
"=r"
(
tick1
)
);
tick1
=
tick_ops
->
get_tick
(
);
__asm__
__volatile__
(
"1: subcc %0, %4, %0
\n\t
"
" bne,pt %%xcc, 1b
\n\t
"
...
...
@@ -597,7 +598,7 @@ unsigned long __init cheetah_tune_scheduling(void)
:
"0"
(
flush_size
),
"r"
(
flush_base
),
"i"
(
ASI_PHYS_USE_EC
),
"r"
(
flush_linesize
));
__asm__
__volatile__
(
"rd %%tick, %0"
:
"=r"
(
tick2
)
);
tick2
=
tick_ops
->
get_tick
(
);
raw
=
(
tick2
-
tick1
);
...
...
arch/sparc64/mm/ultra.S
View file @
aebb0040
...
...
@@ -560,8 +560,8 @@ xcall_flush_tlb_kernel_range:
/
*
This
runs
in
a
very
controlled
environment
,
so
we
do
*
not
need
to
worry
about
BH
races
etc
.
*/
.
globl
xcall_sync_
s
tick
xcall_sync_
s
tick
:
.
globl
xcall_sync_tick
xcall_sync_tick
:
rdpr
%
pstate
,
%
g2
wrpr
%
g2
,
PSTATE_IG
|
PSTATE_AG
,
%
pstate
rdpr
%
pil
,
%
g2
...
...
@@ -569,7 +569,7 @@ xcall_sync_stick:
sethi
%
hi
(
109
f
),
%
g7
b
,
pt
%
xcc
,
etrap_irq
109
:
or
%
g7
,
%
lo
(
109
b
),
%
g7
call
smp_synchronize_
s
tick_client
call
smp_synchronize_tick_client
nop
clr
%
l6
b
rtrap_xcall
...
...
include/asm-sparc64/irq.h
View file @
aebb0040
...
...
@@ -117,8 +117,6 @@ static __inline__ char *__irq_itoa(unsigned int irq)
extern
void
disable_irq
(
unsigned
int
);
#define disable_irq_nosync disable_irq
extern
void
enable_irq
(
unsigned
int
);
extern
void
sparc64_init_timers
(
void
(
*
lvl10_irq
)(
int
,
void
*
,
struct
pt_regs
*
),
unsigned
long
*
);
extern
unsigned
int
build_irq
(
int
pil
,
int
inofixup
,
unsigned
long
iclr
,
unsigned
long
imap
);
extern
unsigned
int
sbus_build_irq
(
void
*
sbus
,
unsigned
int
ino
);
extern
unsigned
int
psycho_build_irq
(
void
*
psycho
,
int
imap_off
,
int
ino
,
int
need_dma_sync
);
...
...
include/asm-sparc64/spitfire.h
View file @
aebb0040
...
...
@@ -45,8 +45,6 @@ enum ultra_tlb_layout {
extern
enum
ultra_tlb_layout
tlb_type
;
#define SPARC64_USE_STICK (tlb_type != spitfire)
#define CHEETAH_HIGHEST_LOCKED_TLBENT (16 - 1)
#define L1DCACHE_SIZE 0x4000
...
...
include/asm-sparc64/timer.h
View file @
aebb0040
...
...
@@ -50,6 +50,17 @@ struct sun5_timer {
*/
#define SUN5_HZ_TO_LIMIT(__hz) (1000000/(__hz))
struct
sparc64_tick_ops
{
void
(
*
init_tick
)(
unsigned
long
);
unsigned
long
(
*
get_tick
)(
void
);
unsigned
long
(
*
get_compare
)(
void
);
unsigned
long
(
*
add_tick
)(
unsigned
long
,
unsigned
long
);
unsigned
long
(
*
add_compare
)(
unsigned
long
);
unsigned
long
softint_mask
;
};
extern
struct
sparc64_tick_ops
*
tick_ops
;
#ifdef CONFIG_SMP
extern
unsigned
long
timer_tick_offset
;
extern
void
timer_tick_interrupt
(
struct
pt_regs
*
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment