Commit 61fa969f authored by John Crispin's avatar John Crispin

MIPS: lantiq: split up IRQ IM ranges

Up to now all our SoCs had the 5 IM ranges in a consecutive order. To accomodate
the SVIP we need to support IM ranges that are scattered inside the register range.
Signed-off-by: default avatarJohn Crispin <blogic@openwrt.org>
Patchwork: http://patchwork.linux-mips.org/patch/4237/
parent fea7a08a
...@@ -20,4 +20,6 @@ ...@@ -20,4 +20,6 @@
#define MIPS_CPU_TIMER_IRQ 7 #define MIPS_CPU_TIMER_IRQ 7
#define MAX_IM 5
#endif /* _FALCON_IRQ__ */ #endif /* _FALCON_IRQ__ */
...@@ -21,4 +21,6 @@ ...@@ -21,4 +21,6 @@
#define MIPS_CPU_TIMER_IRQ 7 #define MIPS_CPU_TIMER_IRQ 7
#define MAX_IM 5
#endif #endif
...@@ -55,8 +55,8 @@ ...@@ -55,8 +55,8 @@
*/ */
#define LTQ_ICU_EBU_IRQ 22 #define LTQ_ICU_EBU_IRQ 22
#define ltq_icu_w32(x, y) ltq_w32((x), ltq_icu_membase + (y)) #define ltq_icu_w32(m, x, y) ltq_w32((x), ltq_icu_membase[m] + (y))
#define ltq_icu_r32(x) ltq_r32(ltq_icu_membase + (x)) #define ltq_icu_r32(m, x) ltq_r32(ltq_icu_membase[m] + (x))
#define ltq_eiu_w32(x, y) ltq_w32((x), ltq_eiu_membase + (y)) #define ltq_eiu_w32(x, y) ltq_w32((x), ltq_eiu_membase + (y))
#define ltq_eiu_r32(x) ltq_r32(ltq_eiu_membase + (x)) #define ltq_eiu_r32(x) ltq_r32(ltq_eiu_membase + (x))
...@@ -82,17 +82,17 @@ static unsigned short ltq_eiu_irq[MAX_EIU] = { ...@@ -82,17 +82,17 @@ static unsigned short ltq_eiu_irq[MAX_EIU] = {
}; };
static int exin_avail; static int exin_avail;
static void __iomem *ltq_icu_membase; static void __iomem *ltq_icu_membase[MAX_IM];
static void __iomem *ltq_eiu_membase; static void __iomem *ltq_eiu_membase;
void ltq_disable_irq(struct irq_data *d) void ltq_disable_irq(struct irq_data *d)
{ {
u32 ier = LTQ_ICU_IM0_IER; u32 ier = LTQ_ICU_IM0_IER;
int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE; int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
int im = offset / INT_NUM_IM_OFFSET;
ier += LTQ_ICU_OFFSET * (offset / INT_NUM_IM_OFFSET);
offset %= INT_NUM_IM_OFFSET; offset %= INT_NUM_IM_OFFSET;
ltq_icu_w32(ltq_icu_r32(ier) & ~BIT(offset), ier); ltq_icu_w32(im, ltq_icu_r32(im, ier) & ~BIT(offset), ier);
} }
void ltq_mask_and_ack_irq(struct irq_data *d) void ltq_mask_and_ack_irq(struct irq_data *d)
...@@ -100,32 +100,31 @@ void ltq_mask_and_ack_irq(struct irq_data *d) ...@@ -100,32 +100,31 @@ void ltq_mask_and_ack_irq(struct irq_data *d)
u32 ier = LTQ_ICU_IM0_IER; u32 ier = LTQ_ICU_IM0_IER;
u32 isr = LTQ_ICU_IM0_ISR; u32 isr = LTQ_ICU_IM0_ISR;
int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE; int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
int im = offset / INT_NUM_IM_OFFSET;
ier += LTQ_ICU_OFFSET * (offset / INT_NUM_IM_OFFSET);
isr += LTQ_ICU_OFFSET * (offset / INT_NUM_IM_OFFSET);
offset %= INT_NUM_IM_OFFSET; offset %= INT_NUM_IM_OFFSET;
ltq_icu_w32(ltq_icu_r32(ier) & ~BIT(offset), ier); ltq_icu_w32(im, ltq_icu_r32(im, ier) & ~BIT(offset), ier);
ltq_icu_w32(BIT(offset), isr); ltq_icu_w32(im, BIT(offset), isr);
} }
static void ltq_ack_irq(struct irq_data *d) static void ltq_ack_irq(struct irq_data *d)
{ {
u32 isr = LTQ_ICU_IM0_ISR; u32 isr = LTQ_ICU_IM0_ISR;
int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE; int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
int im = offset / INT_NUM_IM_OFFSET;
isr += LTQ_ICU_OFFSET * (offset / INT_NUM_IM_OFFSET);
offset %= INT_NUM_IM_OFFSET; offset %= INT_NUM_IM_OFFSET;
ltq_icu_w32(BIT(offset), isr); ltq_icu_w32(im, BIT(offset), isr);
} }
void ltq_enable_irq(struct irq_data *d) void ltq_enable_irq(struct irq_data *d)
{ {
u32 ier = LTQ_ICU_IM0_IER; u32 ier = LTQ_ICU_IM0_IER;
int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE; int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
int im = offset / INT_NUM_IM_OFFSET;
ier += LTQ_ICU_OFFSET * (offset / INT_NUM_IM_OFFSET);
offset %= INT_NUM_IM_OFFSET; offset %= INT_NUM_IM_OFFSET;
ltq_icu_w32(ltq_icu_r32(ier) | BIT(offset), ier); ltq_icu_w32(im, ltq_icu_r32(im, ier) | BIT(offset), ier);
} }
static unsigned int ltq_startup_eiu_irq(struct irq_data *d) static unsigned int ltq_startup_eiu_irq(struct irq_data *d)
...@@ -192,7 +191,7 @@ static void ltq_hw_irqdispatch(int module) ...@@ -192,7 +191,7 @@ static void ltq_hw_irqdispatch(int module)
{ {
u32 irq; u32 irq;
irq = ltq_icu_r32(LTQ_ICU_IM0_IOSR + (module * LTQ_ICU_OFFSET)); irq = ltq_icu_r32(module, LTQ_ICU_IM0_IOSR);
if (irq == 0) if (irq == 0)
return; return;
...@@ -275,7 +274,7 @@ asmlinkage void plat_irq_dispatch(void) ...@@ -275,7 +274,7 @@ asmlinkage void plat_irq_dispatch(void)
do_IRQ(MIPS_CPU_TIMER_IRQ); do_IRQ(MIPS_CPU_TIMER_IRQ);
goto out; goto out;
} else { } else {
for (i = 0; i < 5; i++) { for (i = 0; i < MAX_IM; i++) {
if (pending & (CAUSEF_IP2 << i)) { if (pending & (CAUSEF_IP2 << i)) {
ltq_hw_irqdispatch(i); ltq_hw_irqdispatch(i);
goto out; goto out;
...@@ -318,15 +317,19 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent) ...@@ -318,15 +317,19 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent)
struct resource res; struct resource res;
int i; int i;
if (of_address_to_resource(node, 0, &res)) for (i = 0; i < MAX_IM; i++) {
panic("Failed to get icu memory range"); if (of_address_to_resource(node, i, &res))
panic("Failed to get icu memory range");
if (request_mem_region(res.start, resource_size(&res), res.name) < 0) if (request_mem_region(res.start, resource_size(&res),
pr_err("Failed to request icu memory"); res.name) < 0)
pr_err("Failed to request icu memory");
ltq_icu_membase = ioremap_nocache(res.start, resource_size(&res)); ltq_icu_membase[i] = ioremap_nocache(res.start,
if (!ltq_icu_membase) resource_size(&res));
panic("Failed to remap icu memory"); if (!ltq_icu_membase[i])
panic("Failed to remap icu memory");
}
/* the external interrupts are optional and xway only */ /* the external interrupts are optional and xway only */
eiu_node = of_find_compatible_node(NULL, NULL, "lantiq,eiu"); eiu_node = of_find_compatible_node(NULL, NULL, "lantiq,eiu");
...@@ -351,17 +354,17 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent) ...@@ -351,17 +354,17 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent)
} }
/* turn off all irqs by default */ /* turn off all irqs by default */
for (i = 0; i < 5; i++) { for (i = 0; i < MAX_IM; i++) {
/* make sure all irqs are turned off by default */ /* make sure all irqs are turned off by default */
ltq_icu_w32(0, LTQ_ICU_IM0_IER + (i * LTQ_ICU_OFFSET)); ltq_icu_w32(i, 0, LTQ_ICU_IM0_IER);
/* clear all possibly pending interrupts */ /* clear all possibly pending interrupts */
ltq_icu_w32(~0, LTQ_ICU_IM0_ISR + (i * LTQ_ICU_OFFSET)); ltq_icu_w32(i, ~0, LTQ_ICU_IM0_ISR);
} }
mips_cpu_irq_init(); mips_cpu_irq_init();
for (i = 2; i <= 6; i++) for (i = 0; i < MAX_IM; i++)
setup_irq(i, &cascade); setup_irq(i + 2, &cascade);
if (cpu_has_vint) { if (cpu_has_vint) {
pr_info("Setting up vectored interrupts\n"); pr_info("Setting up vectored interrupts\n");
...@@ -373,7 +376,8 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent) ...@@ -373,7 +376,8 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent)
set_vi_handler(7, ltq_hw5_irqdispatch); set_vi_handler(7, ltq_hw5_irqdispatch);
} }
irq_domain_add_linear(node, 6 * INT_NUM_IM_OFFSET, irq_domain_add_linear(node,
(MAX_IM * INT_NUM_IM_OFFSET) + MIPS_CPU_IRQ_CASCADE,
&irq_domain_ops, 0); &irq_domain_ops, 0);
#if defined(CONFIG_MIPS_MT_SMP) #if defined(CONFIG_MIPS_MT_SMP)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment