Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
eb2d8d60
Commit
eb2d8d60
authored
Oct 13, 2007
by
David S. Miller
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
[SPARC64]: Access ivector_table[] using physical addresses.
Signed-off-by:
David S. Miller
<
davem@davemloft.net
>
parent
a650d383
Changes
5
Show whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
55 additions
and
45 deletions
+55
-45
arch/sparc64/kernel/entry.S
arch/sparc64/kernel/entry.S
+6
-6
arch/sparc64/kernel/irq.c
arch/sparc64/kernel/irq.c
+34
-22
arch/sparc64/kernel/sun4v_ivec.S
arch/sparc64/kernel/sun4v_ivec.S
+7
-9
arch/sparc64/kernel/traps.c
arch/sparc64/kernel/traps.c
+2
-2
include/asm-sparc64/cpudata.h
include/asm-sparc64/cpudata.h
+6
-6
No files found.
arch/sparc64/kernel/entry.S
View file @
eb2d8d60
...
@@ -429,16 +429,16 @@ do_ivec:
...
@@ -429,16 +429,16 @@ do_ivec:
stxa
%
g0
,
[%
g0
]
ASI_INTR_RECEIVE
stxa
%
g0
,
[%
g0
]
ASI_INTR_RECEIVE
membar
#
Sync
membar
#
Sync
sethi
%
hi
(
ivector_table
),
%
g2
sethi
%
hi
(
ivector_table_pa
),
%
g2
ldx
[%
g2
+
%
lo
(
ivector_table_pa
)],
%
g2
sllx
%
g3
,
4
,
%
g3
sllx
%
g3
,
4
,
%
g3
or
%
g2
,
%
lo
(
ivector_table
),
%
g2
add
%
g2
,
%
g3
,
%
g3
add
%
g2
,
%
g3
,
%
g3
TRAP_LOAD_IRQ_WORK
(%
g6
,
%
g1
)
TRAP_LOAD_IRQ_WORK
_PA
(%
g6
,
%
g1
)
ldx
[%
g6
],
%
g5
/*
g5
=
irq_work
(
cpu
)
*/
ldx
[%
g6
],
%
g5
stx
%
g5
,
[%
g3
+
0x00
]
/*
bucket
->
irq_chain
=
g5
*/
stx
a
%
g5
,
[%
g3
]
ASI_PHYS_USE_EC
stx
%
g3
,
[%
g6
]
/*
irq_work
(
cpu
)
=
bucket
*/
stx
%
g3
,
[%
g6
]
wr
%
g0
,
1
<<
PIL_DEVICE_IRQ
,
%
set_softint
wr
%
g0
,
1
<<
PIL_DEVICE_IRQ
,
%
set_softint
retry
retry
do_ivec_xcall
:
do_ivec_xcall
:
...
...
arch/sparc64/kernel/irq.c
View file @
eb2d8d60
...
@@ -51,15 +51,12 @@
...
@@ -51,15 +51,12 @@
* To make processing these packets efficient and race free we use
* To make processing these packets efficient and race free we use
* an array of irq buckets below. The interrupt vector handler in
* an array of irq buckets below. The interrupt vector handler in
* entry.S feeds incoming packets into per-cpu pil-indexed lists.
* entry.S feeds incoming packets into per-cpu pil-indexed lists.
* The IVEC handler does not need to act atomically, the PIL dispatch
* code uses CAS to get an atomic snapshot of the list and clear it
* at the same time.
*
*
* If you make changes to ino_bucket, please update hand coded assembler
* If you make changes to ino_bucket, please update hand coded assembler
* of the vectored interrupt trap handler(s) in entry.S and sun4v_ivec.S
* of the vectored interrupt trap handler(s) in entry.S and sun4v_ivec.S
*/
*/
struct
ino_bucket
{
struct
ino_bucket
{
/*0x00*/
unsigned
long
irq_chain
;
/*0x00*/
unsigned
long
irq_chain
_pa
;
/* Virtual interrupt number assigned to this INO. */
/* Virtual interrupt number assigned to this INO. */
/*0x08*/
unsigned
int
virt_irq
;
/*0x08*/
unsigned
int
virt_irq
;
...
@@ -68,20 +65,14 @@ struct ino_bucket {
...
@@ -68,20 +65,14 @@ struct ino_bucket {
#define NUM_IVECS (IMAP_INR + 1)
#define NUM_IVECS (IMAP_INR + 1)
struct
ino_bucket
ivector_table
[
NUM_IVECS
]
__attribute__
((
aligned
(
SMP_CACHE_BYTES
)));
struct
ino_bucket
ivector_table
[
NUM_IVECS
]
__attribute__
((
aligned
(
SMP_CACHE_BYTES
)));
unsigned
long
ivector_table_pa
;
#define __irq_ino(irq) \
#define __irq_ino(irq) \
(((struct ino_bucket *)(irq)) - &ivector_table[0])
(((struct ino_bucket *)(irq)) - &ivector_table[0])
#define __bucket(irq) ((struct ino_bucket *)(irq))
#define __bucket(irq) ((struct ino_bucket *)(irq))
#define __irq(bucket) ((unsigned long)(bucket))
#define __irq(bucket) ((unsigned long)(bucket))
/* This has to be in the main kernel image, it cannot be
#define irq_work_pa(__cpu) &(trap_block[(__cpu)].irq_worklist_pa)
* turned into per-cpu data. The reason is that the main
* kernel image is locked into the TLB and this structure
* is accessed from the vectored interrupt trap handler. If
* access to this structure takes a TLB miss it could cause
* the 5-level sparc v9 trap stack to overflow.
*/
#define irq_work(__cpu) &(trap_block[(__cpu)].irq_worklist)
static
struct
{
static
struct
{
unsigned
long
irq
;
unsigned
long
irq
;
...
@@ -689,9 +680,8 @@ void ack_bad_irq(unsigned int virt_irq)
...
@@ -689,9 +680,8 @@ void ack_bad_irq(unsigned int virt_irq)
void
handler_irq
(
int
irq
,
struct
pt_regs
*
regs
)
void
handler_irq
(
int
irq
,
struct
pt_regs
*
regs
)
{
{
struct
ino_bucket
*
bucket
;
unsigned
long
pstate
,
bucket_pa
;
struct
pt_regs
*
old_regs
;
struct
pt_regs
*
old_regs
;
unsigned
long
pstate
;
clear_softint
(
1
<<
irq
);
clear_softint
(
1
<<
irq
);
...
@@ -704,18 +694,30 @@ void handler_irq(int irq, struct pt_regs *regs)
...
@@ -704,18 +694,30 @@ void handler_irq(int irq, struct pt_regs *regs)
"ldx [%2], %1
\n\t
"
"ldx [%2], %1
\n\t
"
"stx %%g0, [%2]
\n\t
"
"stx %%g0, [%2]
\n\t
"
"wrpr %0, 0x0, %%pstate
\n\t
"
"wrpr %0, 0x0, %%pstate
\n\t
"
:
"=&r"
(
pstate
),
"=&r"
(
bucket
)
:
"=&r"
(
pstate
),
"=&r"
(
bucket
_pa
)
:
"r"
(
irq_work
(
smp_processor_id
())),
:
"r"
(
irq_work
_pa
(
smp_processor_id
())),
"i"
(
PSTATE_IE
)
"i"
(
PSTATE_IE
)
:
"memory"
);
:
"memory"
);
while
(
bucket
)
{
while
(
bucket_pa
)
{
struct
ino_bucket
*
next
=
__bucket
(
bucket
->
irq_chain
);
unsigned
long
next_pa
;
unsigned
int
virt_irq
;
bucket
->
irq_chain
=
0UL
;
__asm__
__volatile__
(
"ldxa [%2] %4, %0
\n\t
"
__do_IRQ
(
bucket
->
virt_irq
);
"lduwa [%3] %4, %1
\n\t
"
"stxa %%g0, [%2] %4"
:
"=&r"
(
next_pa
),
"=&r"
(
virt_irq
)
:
"r"
(
bucket_pa
+
offsetof
(
struct
ino_bucket
,
irq_chain_pa
)),
"r"
(
bucket_pa
+
offsetof
(
struct
ino_bucket
,
virt_irq
)),
"i"
(
ASI_PHYS_USE_EC
));
bucket
=
next
;
__do_IRQ
(
virt_irq
);
bucket_pa
=
next_pa
;
}
}
irq_exit
();
irq_exit
();
...
@@ -815,7 +817,7 @@ void init_irqwork_curcpu(void)
...
@@ -815,7 +817,7 @@ void init_irqwork_curcpu(void)
{
{
int
cpu
=
hard_smp_processor_id
();
int
cpu
=
hard_smp_processor_id
();
trap_block
[
cpu
].
irq_worklist
=
0UL
;
trap_block
[
cpu
].
irq_worklist
_pa
=
0UL
;
}
}
/* Please be very careful with register_one_mondo() and
/* Please be very careful with register_one_mondo() and
...
@@ -926,6 +928,14 @@ static struct irqaction timer_irq_action = {
...
@@ -926,6 +928,14 @@ static struct irqaction timer_irq_action = {
.
name
=
"timer"
,
.
name
=
"timer"
,
};
};
/* XXX Belongs in a common location. XXX */
static
unsigned
long
kimage_addr_to_ra
(
void
*
p
)
{
unsigned
long
val
=
(
unsigned
long
)
p
;
return
kern_base
+
(
val
-
KERNBASE
);
}
/* Only invoked on boot processor. */
/* Only invoked on boot processor. */
void
__init
init_IRQ
(
void
)
void
__init
init_IRQ
(
void
)
{
{
...
@@ -933,6 +943,8 @@ void __init init_IRQ(void)
...
@@ -933,6 +943,8 @@ void __init init_IRQ(void)
kill_prom_timer
();
kill_prom_timer
();
memset
(
&
ivector_table
[
0
],
0
,
sizeof
(
ivector_table
));
memset
(
&
ivector_table
[
0
],
0
,
sizeof
(
ivector_table
));
ivector_table_pa
=
kimage_addr_to_ra
(
&
ivector_table
[
0
]);
if
(
tlb_type
==
hypervisor
)
if
(
tlb_type
==
hypervisor
)
sun4v_init_mondo_queues
();
sun4v_init_mondo_queues
();
...
...
arch/sparc64/kernel/sun4v_ivec.S
View file @
eb2d8d60
...
@@ -96,19 +96,17 @@ sun4v_dev_mondo:
...
@@ -96,19 +96,17 @@ sun4v_dev_mondo:
stxa
%
g2
,
[%
g4
]
ASI_QUEUE
stxa
%
g2
,
[%
g4
]
ASI_QUEUE
membar
#
Sync
membar
#
Sync
/
*
Get
&
__irq_work
[
smp_processor_id
()]
into
%
g1
.
*/
TRAP_LOAD_IRQ_WORK_PA
(%
g1
,
%
g4
)
TRAP_LOAD_IRQ_WORK
(%
g1
,
%
g4
)
/
*
Get
&
ivector_table
[
IVEC
]
into
%
g4
.
*/
/
*
Get
__pa
(&
ivector_table
[
IVEC
])
into
%
g4
.
*/
sethi
%
hi
(
ivector_table
),
%
g4
sethi
%
hi
(
ivector_table_pa
),
%
g4
ldx
[%
g4
+
%
lo
(
ivector_table_pa
)],
%
g4
sllx
%
g3
,
4
,
%
g3
sllx
%
g3
,
4
,
%
g3
or
%
g4
,
%
lo
(
ivector_table
),
%
g4
add
%
g4
,
%
g3
,
%
g4
add
%
g4
,
%
g3
,
%
g4
/
*
Insert
ivector_table
[]
entry
into
__irq_work
[]
queue
.
*/
ldx
[%
g1
],
%
g2
ldx
[%
g1
],
%
g2
/*
g2
=
irq_work
(
cpu
)
*/
stxa
%
g2
,
[%
g4
]
ASI_PHYS_USE_EC
stx
%
g2
,
[%
g4
+
0x00
]
/*
bucket
->
irq_chain
=
g2
*/
stx
%
g4
,
[%
g1
]
stx
%
g4
,
[%
g1
]
/*
irq_work
(
cpu
)
=
bucket
*/
/
*
Signal
the
interrupt
by
setting
(
1
<<
pil
)
in
%
softint
.
*/
/
*
Signal
the
interrupt
by
setting
(
1
<<
pil
)
in
%
softint
.
*/
wr
%
g0
,
1
<<
PIL_DEVICE_IRQ
,
%
set_softint
wr
%
g0
,
1
<<
PIL_DEVICE_IRQ
,
%
set_softint
...
...
arch/sparc64/kernel/traps.c
View file @
eb2d8d60
...
@@ -2569,8 +2569,8 @@ void __init trap_init(void)
...
@@ -2569,8 +2569,8 @@ void __init trap_init(void)
offsetof
(
struct
trap_per_cpu
,
tsb_huge
))
||
offsetof
(
struct
trap_per_cpu
,
tsb_huge
))
||
(
TRAP_PER_CPU_TSB_HUGE_TEMP
!=
(
TRAP_PER_CPU_TSB_HUGE_TEMP
!=
offsetof
(
struct
trap_per_cpu
,
tsb_huge_temp
))
||
offsetof
(
struct
trap_per_cpu
,
tsb_huge_temp
))
||
(
TRAP_PER_CPU_IRQ_WORKLIST
!=
(
TRAP_PER_CPU_IRQ_WORKLIST
_PA
!=
offsetof
(
struct
trap_per_cpu
,
irq_worklist
))
||
offsetof
(
struct
trap_per_cpu
,
irq_worklist
_pa
))
||
(
TRAP_PER_CPU_CPU_MONDO_QMASK
!=
(
TRAP_PER_CPU_CPU_MONDO_QMASK
!=
offsetof
(
struct
trap_per_cpu
,
cpu_mondo_qmask
))
||
offsetof
(
struct
trap_per_cpu
,
cpu_mondo_qmask
))
||
(
TRAP_PER_CPU_DEV_MONDO_QMASK
!=
(
TRAP_PER_CPU_DEV_MONDO_QMASK
!=
...
...
include/asm-sparc64/cpudata.h
View file @
eb2d8d60
...
@@ -75,7 +75,7 @@ struct trap_per_cpu {
...
@@ -75,7 +75,7 @@ struct trap_per_cpu {
unsigned
long
tsb_huge_temp
;
unsigned
long
tsb_huge_temp
;
/* Dcache line 8: IRQ work list, and keep trap_block a power-of-2 in size. */
/* Dcache line 8: IRQ work list, and keep trap_block a power-of-2 in size. */
unsigned
long
irq_worklist
;
unsigned
long
irq_worklist
_pa
;
unsigned
int
cpu_mondo_qmask
;
unsigned
int
cpu_mondo_qmask
;
unsigned
int
dev_mondo_qmask
;
unsigned
int
dev_mondo_qmask
;
unsigned
int
resum_qmask
;
unsigned
int
resum_qmask
;
...
@@ -127,7 +127,7 @@ extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch,
...
@@ -127,7 +127,7 @@ extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch,
#define TRAP_PER_CPU_CPU_LIST_PA 0xc8
#define TRAP_PER_CPU_CPU_LIST_PA 0xc8
#define TRAP_PER_CPU_TSB_HUGE 0xd0
#define TRAP_PER_CPU_TSB_HUGE 0xd0
#define TRAP_PER_CPU_TSB_HUGE_TEMP 0xd8
#define TRAP_PER_CPU_TSB_HUGE_TEMP 0xd8
#define TRAP_PER_CPU_IRQ_WORKLIST 0xe0
#define TRAP_PER_CPU_IRQ_WORKLIST
_PA
0xe0
#define TRAP_PER_CPU_CPU_MONDO_QMASK 0xe8
#define TRAP_PER_CPU_CPU_MONDO_QMASK 0xe8
#define TRAP_PER_CPU_DEV_MONDO_QMASK 0xec
#define TRAP_PER_CPU_DEV_MONDO_QMASK 0xec
#define TRAP_PER_CPU_RESUM_QMASK 0xf0
#define TRAP_PER_CPU_RESUM_QMASK 0xf0
...
@@ -183,9 +183,9 @@ extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch,
...
@@ -183,9 +183,9 @@ extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch,
ldx [DEST + TRAP_PER_CPU_PGD_PADDR], DEST;
ldx [DEST + TRAP_PER_CPU_PGD_PADDR], DEST;
/* Clobbers TMP, loads local processor's IRQ work area into DEST. */
/* Clobbers TMP, loads local processor's IRQ work area into DEST. */
#define TRAP_LOAD_IRQ_WORK
(DEST, TMP)
\
#define TRAP_LOAD_IRQ_WORK
_PA(DEST, TMP)
\
TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
add DEST, TRAP_PER_CPU_IRQ_WORKLIST, DEST;
add DEST, TRAP_PER_CPU_IRQ_WORKLIST
_PA
, DEST;
/* Clobbers TMP, loads DEST with current thread info pointer. */
/* Clobbers TMP, loads DEST with current thread info pointer. */
#define TRAP_LOAD_THREAD_REG(DEST, TMP) \
#define TRAP_LOAD_THREAD_REG(DEST, TMP) \
...
@@ -222,9 +222,9 @@ extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch,
...
@@ -222,9 +222,9 @@ extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch,
ldx [DEST + TRAP_PER_CPU_PGD_PADDR], DEST;
ldx [DEST + TRAP_PER_CPU_PGD_PADDR], DEST;
/* Clobbers TMP, loads local processor's IRQ work area into DEST. */
/* Clobbers TMP, loads local processor's IRQ work area into DEST. */
#define TRAP_LOAD_IRQ_WORK
(DEST, TMP)
\
#define TRAP_LOAD_IRQ_WORK
_PA(DEST, TMP)
\
TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
add DEST, TRAP_PER_CPU_IRQ_WORKLIST, DEST;
add DEST, TRAP_PER_CPU_IRQ_WORKLIST
_PA
, DEST;
#define TRAP_LOAD_THREAD_REG(DEST, TMP) \
#define TRAP_LOAD_THREAD_REG(DEST, TMP) \
TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment