Commit e1f3359e authored by Anton Blanchard's avatar Anton Blanchard

ppc64: cacheline align each XICS IPI bitmask.

parent acb21e41
...@@ -86,7 +86,11 @@ void xics_cause_IPI(int cpu); ...@@ -86,7 +86,11 @@ void xics_cause_IPI(int cpu);
/* /*
* XICS only has a single IPI, so encode the messages per CPU * XICS only has a single IPI, so encode the messages per CPU
*/ */
volatile unsigned long xics_ipi_message[NR_CPUS] = {0}; struct xics_ipi_struct {
volatile unsigned long value;
} ____cacheline_aligned;
struct xics_ipi_struct xics_ipi_message[NR_CPUS] __cacheline_aligned;
#define smp_message_pass(t,m,d,w) ppc_md.smp_message_pass((t),(m),(d),(w)) #define smp_message_pass(t,m,d,w) ppc_md.smp_message_pass((t),(m),(d),(w))
...@@ -322,7 +326,7 @@ smp_xics_message_pass(int target, int msg, unsigned long data, int wait) ...@@ -322,7 +326,7 @@ smp_xics_message_pass(int target, int msg, unsigned long data, int wait)
if (target == MSG_ALL || target == i if (target == MSG_ALL || target == i
|| (target == MSG_ALL_BUT_SELF || (target == MSG_ALL_BUT_SELF
&& i != smp_processor_id())) { && i != smp_processor_id())) {
set_bit(msg, &xics_ipi_message[i]); set_bit(msg, &xics_ipi_message[i].value);
mb(); mb();
xics_cause_IPI(i); xics_cause_IPI(i);
} }
...@@ -389,17 +393,6 @@ void smp_message_recv(int msg, struct pt_regs *regs) ...@@ -389,17 +393,6 @@ void smp_message_recv(int msg, struct pt_regs *regs)
void smp_send_reschedule(int cpu) void smp_send_reschedule(int cpu)
{ {
/*
* This is only used if `cpu' is running an idle task,
* so it will reschedule itself anyway...
*
* This isn't the case anymore since the other CPU could be
* sleeping and won't reschedule until the next interrupt (such
* as the timer).
* -- Cort
*/
/* This is only used if `cpu' is running an idle task,
so it will reschedule itself anyway... */
smp_message_pass(cpu, PPC_MSG_RESCHEDULE, 0, 0); smp_message_pass(cpu, PPC_MSG_RESCHEDULE, 0, 0);
} }
......
...@@ -239,31 +239,35 @@ xics_get_irq(struct pt_regs *regs) ...@@ -239,31 +239,35 @@ xics_get_irq(struct pt_regs *regs)
return irq; return irq;
} }
struct xics_ipi_struct {
volatile unsigned long value;
} ____cacheline_aligned;
extern struct xics_ipi_struct xics_ipi_message[NR_CPUS] __cacheline_aligned;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
void xics_ipi_action(int irq, void *dev_id, struct pt_regs *regs) void xics_ipi_action(int irq, void *dev_id, struct pt_regs *regs)
{ {
extern volatile unsigned long xics_ipi_message[];
int cpu = smp_processor_id(); int cpu = smp_processor_id();
ops->qirr_info(cpu, 0xff); ops->qirr_info(cpu, 0xff);
while (xics_ipi_message[cpu]) { while (xics_ipi_message[cpu].value) {
if (test_and_clear_bit(PPC_MSG_CALL_FUNCTION, &xics_ipi_message[cpu])) { if (test_and_clear_bit(PPC_MSG_CALL_FUNCTION, &xics_ipi_message[cpu].value)) {
mb(); mb();
smp_message_recv(PPC_MSG_CALL_FUNCTION, regs); smp_message_recv(PPC_MSG_CALL_FUNCTION, regs);
} }
if (test_and_clear_bit(PPC_MSG_RESCHEDULE, &xics_ipi_message[cpu])) { if (test_and_clear_bit(PPC_MSG_RESCHEDULE, &xics_ipi_message[cpu].value)) {
mb(); mb();
smp_message_recv(PPC_MSG_RESCHEDULE, regs); smp_message_recv(PPC_MSG_RESCHEDULE, regs);
} }
#if 0 #if 0
if (test_and_clear_bit(PPC_MSG_MIGRATE_TASK, &xics_ipi_message[cpu])) { if (test_and_clear_bit(PPC_MSG_MIGRATE_TASK, &xics_ipi_message[cpu].value)) {
mb(); mb();
smp_message_recv(PPC_MSG_MIGRATE_TASK, regs); smp_message_recv(PPC_MSG_MIGRATE_TASK, regs);
} }
#endif #endif
#ifdef CONFIG_XMON #ifdef CONFIG_XMON
if (test_and_clear_bit(PPC_MSG_XMON_BREAK, &xics_ipi_message[cpu])) { if (test_and_clear_bit(PPC_MSG_XMON_BREAK, &xics_ipi_message[cpu].value)) {
mb(); mb();
smp_message_recv(PPC_MSG_XMON_BREAK, regs); smp_message_recv(PPC_MSG_XMON_BREAK, regs);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment