Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
d172ad18
Commit
d172ad18
authored
Jul 17, 2008
by
David S. Miller
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
sparc64: Convert to generic helpers for IPI function calls.
Signed-off-by:
David S. Miller
<
davem@davemloft.net
>
parent
4fe3ebec
Changes
7
Show whitespace changes
Inline
Side-by-side
Showing
7 changed files
with
33 additions
and
73 deletions
+33
-73
arch/sparc64/Kconfig
arch/sparc64/Kconfig
+1
-0
arch/sparc64/kernel/smp.c
arch/sparc64/kernel/smp.c
+17
-70
arch/sparc64/kernel/sparc64_ksyms.c
arch/sparc64/kernel/sparc64_ksyms.c
+0
-2
arch/sparc64/kernel/ttable.S
arch/sparc64/kernel/ttable.S
+6
-1
arch/sparc64/mm/ultra.S
arch/sparc64/mm/ultra.S
+5
-0
include/asm-sparc/pil.h
include/asm-sparc/pil.h
+1
-0
include/asm-sparc/smp_64.h
include/asm-sparc/smp_64.h
+3
-0
No files found.
arch/sparc64/Kconfig
View file @
d172ad18
...
@@ -16,6 +16,7 @@ config SPARC64
...
@@ -16,6 +16,7 @@ config SPARC64
select HAVE_IDE
select HAVE_IDE
select HAVE_LMB
select HAVE_LMB
select HAVE_ARCH_KGDB
select HAVE_ARCH_KGDB
select USE_GENERIC_SMP_HELPERS if SMP
config GENERIC_TIME
config GENERIC_TIME
bool
bool
...
...
arch/sparc64/kernel/smp.c
View file @
d172ad18
...
@@ -788,89 +788,36 @@ static void smp_start_sync_tick_client(int cpu)
...
@@ -788,89 +788,36 @@ static void smp_start_sync_tick_client(int cpu)
0
,
0
,
0
,
mask
);
0
,
0
,
0
,
mask
);
}
}
/* Send cross call to all processors except self. */
#define smp_cross_call(func, ctx, data1, data2) \
smp_cross_call_masked(func, ctx, data1, data2, cpu_online_map)
struct
call_data_struct
{
void
(
*
func
)
(
void
*
info
);
void
*
info
;
atomic_t
finished
;
int
wait
;
};
static
struct
call_data_struct
*
call_data
;
extern
unsigned
long
xcall_call_function
;
extern
unsigned
long
xcall_call_function
;
/**
void
arch_send_call_function_ipi
(
cpumask_t
mask
)
* smp_call_function(): Run a function on all other CPUs.
* @func: The function to run. This must be fast and non-blocking.
* @info: An arbitrary pointer to pass to the function.
* @wait: If true, wait (atomically) until function has completed on other CPUs.
*
* Returns 0 on success, else a negative status code. Does not return until
* remote CPUs are nearly ready to execute <<func>> or are or have executed.
*
* You must not call this function with disabled interrupts or from a
* hardware interrupt handler or from a bottom half handler.
*/
static
int
sparc64_smp_call_function_mask
(
void
(
*
func
)(
void
*
info
),
void
*
info
,
int
wait
,
cpumask_t
mask
)
{
{
struct
call_data_struct
data
;
int
cpus
;
/* Can deadlock when called with interrupts disabled */
WARN_ON
(
irqs_disabled
());
data
.
func
=
func
;
data
.
info
=
info
;
atomic_set
(
&
data
.
finished
,
0
);
data
.
wait
=
wait
;
spin_lock
(
&
call_lock
);
cpu_clear
(
smp_processor_id
(),
mask
);
cpus
=
cpus_weight
(
mask
);
if
(
!
cpus
)
goto
out_unlock
;
call_data
=
&
data
;
mb
();
smp_cross_call_masked
(
&
xcall_call_function
,
0
,
0
,
0
,
mask
);
smp_cross_call_masked
(
&
xcall_call_function
,
0
,
0
,
0
,
mask
);
}
/* Wait for response */
extern
unsigned
long
xcall_call_function_single
;
while
(
atomic_read
(
&
data
.
finished
)
!=
cpus
)
cpu_relax
();
out_unlock:
void
arch_send_call_function_single_ipi
(
int
cpu
)
spin_unlock
(
&
call_lock
);
{
cpumask_t
mask
=
cpumask_of_cpu
(
cpu
);
return
0
;
smp_cross_call_masked
(
&
xcall_call_function_single
,
0
,
0
,
0
,
mask
)
;
}
}
int
smp_call_function
(
void
(
*
func
)(
void
*
info
),
void
*
info
,
int
wait
)
/* Send cross call to all processors except self. */
{
#define smp_cross_call(func, ctx, data1, data2) \
return
sparc64_smp_call_function_mask
(
func
,
info
,
wait
,
cpu_online_map
);
smp_cross_call_masked(func, ctx, data1, data2, cpu_online_map)
}
void
smp_call_function_client
(
int
irq
,
struct
pt_regs
*
regs
)
void
smp_call_function_client
(
int
irq
,
struct
pt_regs
*
regs
)
{
{
void
(
*
func
)
(
void
*
info
)
=
call_data
->
func
;
clear_softint
(
1
<<
irq
);
void
*
info
=
call_data
->
info
;
generic_smp_call_function_interrupt
();
}
void
smp_call_function_single_client
(
int
irq
,
struct
pt_regs
*
regs
)
{
clear_softint
(
1
<<
irq
);
clear_softint
(
1
<<
irq
);
if
(
call_data
->
wait
)
{
generic_smp_call_function_single_interrupt
();
/* let initiator proceed only after completion */
func
(
info
);
atomic_inc
(
&
call_data
->
finished
);
}
else
{
/* let initiator proceed after getting data */
atomic_inc
(
&
call_data
->
finished
);
func
(
info
);
}
}
}
static
void
tsb_sync
(
void
*
info
)
static
void
tsb_sync
(
void
*
info
)
...
@@ -890,7 +837,7 @@ static void tsb_sync(void *info)
...
@@ -890,7 +837,7 @@ static void tsb_sync(void *info)
void
smp_tsb_sync
(
struct
mm_struct
*
mm
)
void
smp_tsb_sync
(
struct
mm_struct
*
mm
)
{
{
s
parc64_smp_call_function_mask
(
tsb_sync
,
mm
,
1
,
mm
->
cpu_vm_mask
);
s
mp_call_function_mask
(
mm
->
cpu_vm_mask
,
tsb_sync
,
mm
,
1
);
}
}
extern
unsigned
long
xcall_flush_tlb_mm
;
extern
unsigned
long
xcall_flush_tlb_mm
;
...
...
arch/sparc64/kernel/sparc64_ksyms.c
View file @
d172ad18
...
@@ -108,8 +108,6 @@ EXPORT_SYMBOL(__read_unlock);
...
@@ -108,8 +108,6 @@ EXPORT_SYMBOL(__read_unlock);
EXPORT_SYMBOL
(
__write_lock
);
EXPORT_SYMBOL
(
__write_lock
);
EXPORT_SYMBOL
(
__write_unlock
);
EXPORT_SYMBOL
(
__write_unlock
);
EXPORT_SYMBOL
(
__write_trylock
);
EXPORT_SYMBOL
(
__write_trylock
);
EXPORT_SYMBOL
(
smp_call_function
);
#endif
/* CONFIG_SMP */
#endif
/* CONFIG_SMP */
#ifdef CONFIG_MCOUNT
#ifdef CONFIG_MCOUNT
...
...
arch/sparc64/kernel/ttable.S
View file @
d172ad18
...
@@ -58,7 +58,12 @@ tl0_irq3: BTRAP(0x43)
...
@@ -58,7 +58,12 @@ tl0_irq3: BTRAP(0x43)
tl0_irq4
:
BTRAP
(0
x44
)
tl0_irq4
:
BTRAP
(0
x44
)
#endif
#endif
tl0_irq5
:
TRAP_IRQ
(
handler_irq
,
5
)
tl0_irq5
:
TRAP_IRQ
(
handler_irq
,
5
)
tl0_irq6
:
BTRAP
(0
x46
)
BTRAP
(
0x47
)
BTRAP
(
0x48
)
BTRAP
(
0x49
)
#ifdef CONFIG_SMP
tl0_irq6
:
TRAP_IRQ
(
smp_call_function_single_client
,
6
)
#else
tl0_irq6
:
BTRAP
(0
x46
)
#endif
tl0_irq7
:
BTRAP
(0
x47
)
BTRAP
(
0x48
)
BTRAP
(
0x49
)
tl0_irq10
:
BTRAP
(0
x4a
)
BTRAP
(
0x4b
)
BTRAP
(
0x4c
)
BTRAP
(
0x4d
)
tl0_irq10
:
BTRAP
(0
x4a
)
BTRAP
(
0x4b
)
BTRAP
(
0x4c
)
BTRAP
(
0x4d
)
tl0_irq14
:
TRAP_IRQ
(
timer_interrupt
,
14
)
tl0_irq14
:
TRAP_IRQ
(
timer_interrupt
,
14
)
tl0_irq15
:
TRAP_IRQ
(
handler_irq
,
15
)
tl0_irq15
:
TRAP_IRQ
(
handler_irq
,
15
)
...
...
arch/sparc64/mm/ultra.S
View file @
d172ad18
...
@@ -688,6 +688,11 @@ xcall_call_function:
...
@@ -688,6 +688,11 @@ xcall_call_function:
wr
%
g0
,
(
1
<<
PIL_SMP_CALL_FUNC
),
%
set_softint
wr
%
g0
,
(
1
<<
PIL_SMP_CALL_FUNC
),
%
set_softint
retry
retry
.
globl
xcall_call_function_single
xcall_call_function_single
:
wr
%
g0
,
(
1
<<
PIL_SMP_CALL_FUNC_SNGL
),
%
set_softint
retry
.
globl
xcall_receive_signal
.
globl
xcall_receive_signal
xcall_receive_signal
:
xcall_receive_signal
:
wr
%
g0
,
(
1
<<
PIL_SMP_RECEIVE_SIGNAL
),
%
set_softint
wr
%
g0
,
(
1
<<
PIL_SMP_RECEIVE_SIGNAL
),
%
set_softint
...
...
include/asm-sparc/pil.h
View file @
d172ad18
...
@@ -17,5 +17,6 @@
...
@@ -17,5 +17,6 @@
#define PIL_SMP_CAPTURE 3
#define PIL_SMP_CAPTURE 3
#define PIL_SMP_CTX_NEW_VERSION 4
#define PIL_SMP_CTX_NEW_VERSION 4
#define PIL_DEVICE_IRQ 5
#define PIL_DEVICE_IRQ 5
#define PIL_SMP_CALL_FUNC_SNGL 6
#endif
/* !(_SPARC64_PIL_H) */
#endif
/* !(_SPARC64_PIL_H) */
include/asm-sparc/smp_64.h
View file @
d172ad18
...
@@ -34,6 +34,9 @@ DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
...
@@ -34,6 +34,9 @@ DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
extern
cpumask_t
cpu_core_map
[
NR_CPUS
];
extern
cpumask_t
cpu_core_map
[
NR_CPUS
];
extern
int
sparc64_multi_core
;
extern
int
sparc64_multi_core
;
extern
void
arch_send_call_function_single_ipi
(
int
cpu
);
extern
void
arch_send_call_function_ipi
(
cpumask_t
mask
);
/*
/*
* General functions that each host system must provide.
* General functions that each host system must provide.
*/
*/
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment