Commit ffe9ee47 authored by Chris Dearman's avatar Chris Dearman Committed by Ralf Baechle

[MIPS] Separate performance counter interrupts

Support for performance counter overflow interrupt that is on a separate
interrupt from the timer.
Signed-off-by: default avatarChris Dearman <chris@mips.com>
Signed-off-by: default avatarRalf Baechle <ralf@linux-mips.org>
parent b72c0526
...@@ -129,13 +129,13 @@ static irqreturn_t ipi_call_interrupt(int irq, void *dev_id) ...@@ -129,13 +129,13 @@ static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
static struct irqaction irq_resched = { static struct irqaction irq_resched = {
.handler = ipi_resched_interrupt, .handler = ipi_resched_interrupt,
.flags = IRQF_DISABLED, .flags = IRQF_DISABLED|IRQF_PERCPU,
.name = "IPI_resched" .name = "IPI_resched"
}; };
static struct irqaction irq_call = { static struct irqaction irq_call = {
.handler = ipi_call_interrupt, .handler = ipi_call_interrupt,
.flags = IRQF_DISABLED, .flags = IRQF_DISABLED|IRQF_PERCPU,
.name = "IPI_call" .name = "IPI_call"
}; };
...@@ -275,10 +275,7 @@ void __init plat_prepare_cpus(unsigned int max_cpus) ...@@ -275,10 +275,7 @@ void __init plat_prepare_cpus(unsigned int max_cpus)
setup_irq(cpu_ipi_resched_irq, &irq_resched); setup_irq(cpu_ipi_resched_irq, &irq_resched);
setup_irq(cpu_ipi_call_irq, &irq_call); setup_irq(cpu_ipi_call_irq, &irq_call);
/* need to mark IPI's as IRQ_PER_CPU */
irq_desc[cpu_ipi_resched_irq].status |= IRQ_PER_CPU;
set_irq_handler(cpu_ipi_resched_irq, handle_percpu_irq); set_irq_handler(cpu_ipi_resched_irq, handle_percpu_irq);
irq_desc[cpu_ipi_call_irq].status |= IRQ_PER_CPU;
set_irq_handler(cpu_ipi_call_irq, handle_percpu_irq); set_irq_handler(cpu_ipi_call_irq, handle_percpu_irq);
} }
...@@ -326,8 +323,11 @@ void prom_boot_secondary(int cpu, struct task_struct *idle) ...@@ -326,8 +323,11 @@ void prom_boot_secondary(int cpu, struct task_struct *idle)
void prom_init_secondary(void) void prom_init_secondary(void)
{ {
/* Enable per-cpu interrupts */
/* This is Malta specific: IPI,performance and timer inetrrupts */
write_c0_status((read_c0_status() & ~ST0_IM ) | write_c0_status((read_c0_status() & ~ST0_IM ) |
(STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP7)); (STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP6 | STATUSF_IP7));
} }
void prom_smp_finish(void) void prom_smp_finish(void)
......
...@@ -199,6 +199,30 @@ int (*perf_irq)(void) = null_perf_irq; ...@@ -199,6 +199,30 @@ int (*perf_irq)(void) = null_perf_irq;
EXPORT_SYMBOL(null_perf_irq); EXPORT_SYMBOL(null_perf_irq);
EXPORT_SYMBOL(perf_irq); EXPORT_SYMBOL(perf_irq);
/*
* Performance counter IRQ or -1 if shared with timer
*/
int mipsxx_perfcount_irq;
EXPORT_SYMBOL(mipsxx_perfcount_irq);
/*
* Possibly handle a performance counter interrupt.
* Return true if the timer interrupt should not be checked
*/
static inline int handle_perf_irq (int r2)
{
/*
* The performance counter overflow interrupt may be shared with the
* timer interrupt (mipsxx_perfcount_irq < 0). If it is and a
* performance counter has overflowed (perf_irq() == IRQ_HANDLED)
* and we can't reliably determine if a counter interrupt has also
* happened (!r2) then don't check for a timer interrupt.
*/
return (mipsxx_perfcount_irq < 0) &&
perf_irq() == IRQ_HANDLED &&
!r2;
}
asmlinkage void ll_timer_interrupt(int irq) asmlinkage void ll_timer_interrupt(int irq)
{ {
int r2 = cpu_has_mips_r2; int r2 = cpu_has_mips_r2;
...@@ -206,18 +230,12 @@ asmlinkage void ll_timer_interrupt(int irq) ...@@ -206,18 +230,12 @@ asmlinkage void ll_timer_interrupt(int irq)
irq_enter(); irq_enter();
kstat_this_cpu.irqs[irq]++; kstat_this_cpu.irqs[irq]++;
/* if (handle_perf_irq(r2))
* Suckage alert: goto out;
* Before R2 of the architecture there was no way to see if a
* performance counter interrupt was pending, so we have to run the if (r2 && ((read_c0_cause() & (1 << 30)) == 0))
* performance counter interrupt handler anyway.
*/
if (!r2 || (read_c0_cause() & (1 << 26)))
if (perf_irq())
goto out; goto out;
/* we keep interrupt disabled all the time */
if (!r2 || (read_c0_cause() & (1 << 30)))
timer_interrupt(irq, NULL); timer_interrupt(irq, NULL);
out: out:
...@@ -258,7 +276,7 @@ unsigned int mips_hpt_frequency; ...@@ -258,7 +276,7 @@ unsigned int mips_hpt_frequency;
static struct irqaction timer_irqaction = { static struct irqaction timer_irqaction = {
.handler = timer_interrupt, .handler = timer_interrupt,
.flags = IRQF_DISABLED, .flags = IRQF_DISABLED | IRQF_PERCPU,
.name = "timer", .name = "timer",
}; };
......
...@@ -53,9 +53,8 @@ ...@@ -53,9 +53,8 @@
unsigned long cpu_khz; unsigned long cpu_khz;
#define CPUCTR_IMASKBIT (0x100 << MIPSCPU_INT_CPUCTR)
static int mips_cpu_timer_irq; static int mips_cpu_timer_irq;
extern int mipsxx_perfcount_irq;
extern void smtc_timer_broadcast(int); extern void smtc_timer_broadcast(int);
static void mips_timer_dispatch(void) static void mips_timer_dispatch(void)
...@@ -63,6 +62,11 @@ static void mips_timer_dispatch(void) ...@@ -63,6 +62,11 @@ static void mips_timer_dispatch(void)
do_IRQ(mips_cpu_timer_irq); do_IRQ(mips_cpu_timer_irq);
} }
static void mips_perf_dispatch(void)
{
do_IRQ(mipsxx_perfcount_irq);
}
/* /*
* Redeclare until I get around mopping the timer code insanity on MIPS. * Redeclare until I get around mopping the timer code insanity on MIPS.
*/ */
...@@ -70,6 +74,24 @@ extern int null_perf_irq(void); ...@@ -70,6 +74,24 @@ extern int null_perf_irq(void);
extern int (*perf_irq)(void); extern int (*perf_irq)(void);
/*
* Possibly handle a performance counter interrupt.
* Return true if the timer interrupt should not be checked
*/
static inline int handle_perf_irq (int r2)
{
/*
* The performance counter overflow interrupt may be shared with the
* timer interrupt (mipsxx_perfcount_irq < 0). If it is and a
* performance counter has overflowed (perf_irq() == IRQ_HANDLED)
* and we can't reliably determine if a counter interrupt has also
* happened (!r2) then don't check for a timer interrupt.
*/
return (mipsxx_perfcount_irq < 0) &&
perf_irq() == IRQ_HANDLED &&
!r2;
}
irqreturn_t mips_timer_interrupt(int irq, void *dev_id) irqreturn_t mips_timer_interrupt(int irq, void *dev_id)
{ {
int cpu = smp_processor_id(); int cpu = smp_processor_id();
...@@ -92,8 +114,7 @@ irqreturn_t mips_timer_interrupt(int irq, void *dev_id) ...@@ -92,8 +114,7 @@ irqreturn_t mips_timer_interrupt(int irq, void *dev_id)
* We could be here due to timer interrupt, * We could be here due to timer interrupt,
* perf counter overflow, or both. * perf counter overflow, or both.
*/ */
if (read_c0_cause() & (1 << 26)) (void) handle_perf_irq(1);
perf_irq();
if (read_c0_cause() & (1 << 30)) { if (read_c0_cause() & (1 << 30)) {
/* /*
...@@ -115,18 +136,18 @@ irqreturn_t mips_timer_interrupt(int irq, void *dev_id) ...@@ -115,18 +136,18 @@ irqreturn_t mips_timer_interrupt(int irq, void *dev_id)
#else /* CONFIG_MIPS_MT_SMTC */ #else /* CONFIG_MIPS_MT_SMTC */
int r2 = cpu_has_mips_r2; int r2 = cpu_has_mips_r2;
if (handle_perf_irq(r2))
goto out;
if (r2 && ((read_c0_cause() & (1 << 30)) == 0))
goto out;
if (cpu == 0) { if (cpu == 0) {
/* /*
* CPU 0 handles the global timer interrupt job and process * CPU 0 handles the global timer interrupt job and process
* accounting resets count/compare registers to trigger next * accounting resets count/compare registers to trigger next
* timer int. * timer int.
*/ */
if (!r2 || (read_c0_cause() & (1 << 26)))
if (perf_irq())
goto out;
/* we keep interrupt disabled all the time */
if (!r2 || (read_c0_cause() & (1 << 30)))
timer_interrupt(irq, NULL); timer_interrupt(irq, NULL);
} else { } else {
/* Everyone else needs to reset the timer int here as /* Everyone else needs to reset the timer int here as
...@@ -225,35 +246,82 @@ void __init mips_time_init(void) ...@@ -225,35 +246,82 @@ void __init mips_time_init(void)
mips_scroll_message(); mips_scroll_message();
} }
void __init plat_timer_setup(struct irqaction *irq) irqreturn_t mips_perf_interrupt(int irq, void *dev_id)
{ {
return perf_irq();
}
static struct irqaction perf_irqaction = {
.handler = mips_perf_interrupt,
.flags = IRQF_DISABLED | IRQF_PERCPU,
.name = "performance",
};
void __init plat_perf_setup(struct irqaction *irq)
{
int hwint = 0;
mipsxx_perfcount_irq = -1;
#ifdef MSC01E_INT_BASE #ifdef MSC01E_INT_BASE
if (cpu_has_veic) { if (cpu_has_veic) {
set_vi_handler (MSC01E_INT_CPUCTR, mips_timer_dispatch); set_vi_handler (MSC01E_INT_PERFCTR, mips_perf_dispatch);
mips_cpu_timer_irq = MSC01E_INT_BASE + MSC01E_INT_CPUCTR; mipsxx_perfcount_irq = MSC01E_INT_BASE + MSC01E_INT_PERFCTR;
} else } else
#endif #endif
{ if (cpu_has_mips_r2) {
/*
* Read IntCtl.IPPCI to determine the performance
* counter interrupt
*/
hwint = (read_c0_intctl () >> 26) & 7;
if (hwint != MIPSCPU_INT_CPUCTR) {
if (cpu_has_vint) if (cpu_has_vint)
set_vi_handler (MIPSCPU_INT_CPUCTR, mips_timer_dispatch); set_vi_handler (hwint, mips_perf_dispatch);
mips_cpu_timer_irq = MIPSCPU_INT_BASE + MIPSCPU_INT_CPUCTR; mipsxx_perfcount_irq = MIPSCPU_INT_BASE + hwint;
}
}
if (mipsxx_perfcount_irq >= 0) {
#ifdef CONFIG_MIPS_MT_SMTC
setup_irq_smtc(mipsxx_perfcount_irq, irq, 0x100 << hwint);
#else
setup_irq(mipsxx_perfcount_irq, irq);
#endif /* CONFIG_MIPS_MT_SMTC */
#ifdef CONFIG_SMP
set_irq_handler(mipsxx_perfcount_irq, handle_percpu_irq);
#endif
} }
}
void __init plat_timer_setup(struct irqaction *irq)
{
int hwint = 0;
if (cpu_has_veic) {
set_vi_handler (MSC01E_INT_CPUCTR, mips_timer_dispatch);
mips_cpu_timer_irq = MSC01E_INT_BASE + MSC01E_INT_CPUCTR;
}
else {
if (cpu_has_mips_r2)
/*
* Read IntCtl.IPTI to determine the timer interrupt
*/
hwint = (read_c0_intctl () >> 29) & 7;
else
hwint = MIPSCPU_INT_CPUCTR;
if (cpu_has_vint)
set_vi_handler (hwint, mips_timer_dispatch);
mips_cpu_timer_irq = MIPSCPU_INT_BASE + hwint;
}
/* we are using the cpu counter for timer interrupts */ /* we are using the cpu counter for timer interrupts */
irq->handler = mips_timer_interrupt; /* we use our own handler */ irq->handler = mips_timer_interrupt; /* we use our own handler */
#ifdef CONFIG_MIPS_MT_SMTC #ifdef CONFIG_MIPS_MT_SMTC
setup_irq_smtc(mips_cpu_timer_irq, irq, CPUCTR_IMASKBIT); setup_irq_smtc(mips_cpu_timer_irq, irq, 0x100 << hwint);
#else #else
setup_irq(mips_cpu_timer_irq, irq); setup_irq(mips_cpu_timer_irq, irq);
#endif /* CONFIG_MIPS_MT_SMTC */ #endif /* CONFIG_MIPS_MT_SMTC */
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/* irq_desc(riptor) is a global resource, when the interrupt overlaps
on seperate cpu's the first one tries to handle the second interrupt.
The effect is that the int remains disabled on the second cpu.
Mark the interrupt with IRQ_PER_CPU to avoid any confusion */
irq_desc[mips_cpu_timer_irq].status |= IRQ_PER_CPU;
set_irq_handler(mips_cpu_timer_irq, handle_percpu_irq); set_irq_handler(mips_cpu_timer_irq, handle_percpu_irq);
#endif #endif
plat_perf_setup(&perf_irqaction);
} }
...@@ -177,7 +177,10 @@ static int mipsxx_perfcount_handler(void) ...@@ -177,7 +177,10 @@ static int mipsxx_perfcount_handler(void)
unsigned int counters = op_model_mipsxx_ops.num_counters; unsigned int counters = op_model_mipsxx_ops.num_counters;
unsigned int control; unsigned int control;
unsigned int counter; unsigned int counter;
int handled = 0; int handled = IRQ_NONE;
if (cpu_has_mips_r2 && !(read_c0_cause() & (1 << 26)))
return handled;
switch (counters) { switch (counters) {
#define HANDLE_COUNTER(n) \ #define HANDLE_COUNTER(n) \
...@@ -188,7 +191,7 @@ static int mipsxx_perfcount_handler(void) ...@@ -188,7 +191,7 @@ static int mipsxx_perfcount_handler(void)
(counter & M_COUNTER_OVERFLOW)) { \ (counter & M_COUNTER_OVERFLOW)) { \
oprofile_add_sample(get_irq_regs(), n); \ oprofile_add_sample(get_irq_regs(), n); \
w_c0_perfcntr ## n(reg.counter[n]); \ w_c0_perfcntr ## n(reg.counter[n]); \
handled = 1; \ handled = IRQ_HANDLED; \
} }
HANDLE_COUNTER(3) HANDLE_COUNTER(3)
HANDLE_COUNTER(2) HANDLE_COUNTER(2)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment