Commit f34d9224 authored by David S. Miller's avatar David S. Miller

Merge branch 'ipa-interrupts'

Alex Elder says:

====================
net: ipa: IPA interrupt cleanup

The first patch in this series makes all IPA interrupt handling be
done in a threaded context.  The remaining ones refactor some code
to simplify that threaded handler function.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 8ca34a13 176086d8
...@@ -74,21 +74,25 @@ static void ipa_interrupt_process(struct ipa_interrupt *interrupt, u32 irq_id) ...@@ -74,21 +74,25 @@ static void ipa_interrupt_process(struct ipa_interrupt *interrupt, u32 irq_id)
iowrite32(mask, ipa->reg_virt + offset); iowrite32(mask, ipa->reg_virt + offset);
} }
/* Process all IPA interrupt types that have been signaled */ /* IPA IRQ handler is threaded */
static void ipa_interrupt_process_all(struct ipa_interrupt *interrupt) static irqreturn_t ipa_isr_thread(int irq, void *dev_id)
{ {
struct ipa_interrupt *interrupt = dev_id;
struct ipa *ipa = interrupt->ipa; struct ipa *ipa = interrupt->ipa;
u32 enabled = interrupt->enabled; u32 enabled = interrupt->enabled;
u32 pending;
u32 offset; u32 offset;
u32 mask; u32 mask;
ipa_clock_get(ipa);
/* The status register indicates which conditions are present, /* The status register indicates which conditions are present,
* including conditions whose interrupt is not enabled. Handle * including conditions whose interrupt is not enabled. Handle
* only the enabled ones. * only the enabled ones.
*/ */
offset = ipa_reg_irq_stts_offset(ipa->version); offset = ipa_reg_irq_stts_offset(ipa->version);
mask = ioread32(ipa->reg_virt + offset); pending = ioread32(ipa->reg_virt + offset);
while ((mask &= enabled)) { while ((mask = pending & enabled)) {
do { do {
u32 irq_id = __ffs(mask); u32 irq_id = __ffs(mask);
...@@ -96,43 +100,20 @@ static void ipa_interrupt_process_all(struct ipa_interrupt *interrupt) ...@@ -96,43 +100,20 @@ static void ipa_interrupt_process_all(struct ipa_interrupt *interrupt)
ipa_interrupt_process(interrupt, irq_id); ipa_interrupt_process(interrupt, irq_id);
} while (mask); } while (mask);
mask = ioread32(ipa->reg_virt + offset); pending = ioread32(ipa->reg_virt + offset);
} }
}
/* Threaded part of the IPA IRQ handler */
static irqreturn_t ipa_isr_thread(int irq, void *dev_id)
{
struct ipa_interrupt *interrupt = dev_id;
ipa_clock_get(interrupt->ipa);
ipa_interrupt_process_all(interrupt); /* If any disabled interrupts are pending, clear them */
if (pending) {
struct device *dev = &ipa->pdev->dev;
ipa_clock_put(interrupt->ipa); dev_dbg(dev, "clearing disabled IPA interrupts 0x%08x\n",
pending);
return IRQ_HANDLED; offset = ipa_reg_irq_clr_offset(ipa->version);
} iowrite32(pending, ipa->reg_virt + offset);
}
/* Hard part (i.e., "real" IRQ handler) of the IRQ handler */
static irqreturn_t ipa_isr(int irq, void *dev_id)
{
struct ipa_interrupt *interrupt = dev_id;
struct ipa *ipa = interrupt->ipa;
u32 offset;
u32 mask;
offset = ipa_reg_irq_stts_offset(ipa->version);
mask = ioread32(ipa->reg_virt + offset);
if (mask & interrupt->enabled)
return IRQ_WAKE_THREAD;
/* Nothing in the mask was supposed to cause an interrupt */
offset = ipa_reg_irq_clr_offset(ipa->version);
iowrite32(mask, ipa->reg_virt + offset);
dev_err(&ipa->pdev->dev, "%s: unexpected interrupt, mask 0x%08x\n", ipa_clock_put(ipa);
__func__, mask);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
...@@ -260,7 +241,7 @@ struct ipa_interrupt *ipa_interrupt_config(struct ipa *ipa) ...@@ -260,7 +241,7 @@ struct ipa_interrupt *ipa_interrupt_config(struct ipa *ipa)
offset = ipa_reg_irq_en_offset(ipa->version); offset = ipa_reg_irq_en_offset(ipa->version);
iowrite32(0, ipa->reg_virt + offset); iowrite32(0, ipa->reg_virt + offset);
ret = request_threaded_irq(irq, ipa_isr, ipa_isr_thread, IRQF_ONESHOT, ret = request_threaded_irq(irq, NULL, ipa_isr_thread, IRQF_ONESHOT,
"ipa", interrupt); "ipa", interrupt);
if (ret) { if (ret) {
dev_err(dev, "error %d requesting \"ipa\" IRQ\n", ret); dev_err(dev, "error %d requesting \"ipa\" IRQ\n", ret);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment