Commit 75b8f78f authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'chelsio-cxgb-use-threaded-interrupts-for-deferred-work'

Sebastian Andrzej Siewior says:

====================
chelsio: cxgb: Use threaded interrupts for deferred work

Patch #2 fixes an issue in which del_timer_sync() and tasklet_kill() is
invoked from the interrupt handler. This is probably a rare error case
since it disables interrupts / the card in that case.
Patch #1 converts a worker to use a threaded interrupt which is then
also used in patch #2 instead adding another worker for this task (and
flush_work() to synchronise vs rmmod).
====================

Link: https://lore.kernel.org/r/20210202170104.1909200-1-bigeasy@linutronix.deSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 462e99a1 82154580
...@@ -238,7 +238,6 @@ struct adapter { ...@@ -238,7 +238,6 @@ struct adapter {
int msg_enable; int msg_enable;
u32 mmio_len; u32 mmio_len;
struct work_struct ext_intr_handler_task;
struct adapter_params params; struct adapter_params params;
/* Terminator modules. */ /* Terminator modules. */
...@@ -257,6 +256,7 @@ struct adapter { ...@@ -257,6 +256,7 @@ struct adapter {
/* guards async operations */ /* guards async operations */
spinlock_t async_lock ____cacheline_aligned; spinlock_t async_lock ____cacheline_aligned;
u32 pending_thread_intr;
u32 slow_intr_mask; u32 slow_intr_mask;
int t1powersave; int t1powersave;
}; };
...@@ -334,8 +334,7 @@ void t1_interrupts_enable(adapter_t *adapter); ...@@ -334,8 +334,7 @@ void t1_interrupts_enable(adapter_t *adapter);
void t1_interrupts_disable(adapter_t *adapter); void t1_interrupts_disable(adapter_t *adapter);
void t1_interrupts_clear(adapter_t *adapter); void t1_interrupts_clear(adapter_t *adapter);
int t1_elmer0_ext_intr_handler(adapter_t *adapter); int t1_elmer0_ext_intr_handler(adapter_t *adapter);
void t1_elmer0_ext_intr(adapter_t *adapter); irqreturn_t t1_slow_intr_handler(adapter_t *adapter);
int t1_slow_intr_handler(adapter_t *adapter);
int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc); int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc);
const struct board_info *t1_get_board_info(unsigned int board_id); const struct board_info *t1_get_board_info(unsigned int board_id);
...@@ -347,7 +346,6 @@ int t1_get_board_rev(adapter_t *adapter, const struct board_info *bi, ...@@ -347,7 +346,6 @@ int t1_get_board_rev(adapter_t *adapter, const struct board_info *bi,
int t1_init_hw_modules(adapter_t *adapter); int t1_init_hw_modules(adapter_t *adapter);
int t1_init_sw_modules(adapter_t *adapter, const struct board_info *bi); int t1_init_sw_modules(adapter_t *adapter, const struct board_info *bi);
void t1_free_sw_modules(adapter_t *adapter); void t1_free_sw_modules(adapter_t *adapter);
void t1_fatal_err(adapter_t *adapter);
void t1_link_changed(adapter_t *adapter, int port_id); void t1_link_changed(adapter_t *adapter, int port_id);
void t1_link_negotiated(adapter_t *adapter, int port_id, int link_stat, void t1_link_negotiated(adapter_t *adapter, int port_id, int link_stat,
int speed, int duplex, int pause); int speed, int duplex, int pause);
......
...@@ -211,7 +211,8 @@ static int cxgb_up(struct adapter *adapter) ...@@ -211,7 +211,8 @@ static int cxgb_up(struct adapter *adapter)
t1_interrupts_clear(adapter); t1_interrupts_clear(adapter);
adapter->params.has_msi = !disable_msi && !pci_enable_msi(adapter->pdev); adapter->params.has_msi = !disable_msi && !pci_enable_msi(adapter->pdev);
err = request_irq(adapter->pdev->irq, t1_interrupt, err = request_threaded_irq(adapter->pdev->irq, t1_interrupt,
t1_interrupt_thread,
adapter->params.has_msi ? 0 : IRQF_SHARED, adapter->params.has_msi ? 0 : IRQF_SHARED,
adapter->name, adapter); adapter->name, adapter);
if (err) { if (err) {
...@@ -916,51 +917,6 @@ static void mac_stats_task(struct work_struct *work) ...@@ -916,51 +917,6 @@ static void mac_stats_task(struct work_struct *work)
spin_unlock(&adapter->work_lock); spin_unlock(&adapter->work_lock);
} }
/*
* Processes elmer0 external interrupts in process context.
*/
static void ext_intr_task(struct work_struct *work)
{
struct adapter *adapter =
container_of(work, struct adapter, ext_intr_handler_task);
t1_elmer0_ext_intr_handler(adapter);
/* Now reenable external interrupts */
spin_lock_irq(&adapter->async_lock);
adapter->slow_intr_mask |= F_PL_INTR_EXT;
writel(F_PL_INTR_EXT, adapter->regs + A_PL_CAUSE);
writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
adapter->regs + A_PL_ENABLE);
spin_unlock_irq(&adapter->async_lock);
}
/*
* Interrupt-context handler for elmer0 external interrupts.
*/
void t1_elmer0_ext_intr(struct adapter *adapter)
{
/*
* Schedule a task to handle external interrupts as we require
* a process context. We disable EXT interrupts in the interim
* and let the task reenable them when it's done.
*/
adapter->slow_intr_mask &= ~F_PL_INTR_EXT;
writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
adapter->regs + A_PL_ENABLE);
schedule_work(&adapter->ext_intr_handler_task);
}
void t1_fatal_err(struct adapter *adapter)
{
if (adapter->flags & FULL_INIT_DONE) {
t1_sge_stop(adapter->sge);
t1_interrupts_disable(adapter);
}
pr_alert("%s: encountered fatal error, operation suspended\n",
adapter->name);
}
static const struct net_device_ops cxgb_netdev_ops = { static const struct net_device_ops cxgb_netdev_ops = {
.ndo_open = cxgb_open, .ndo_open = cxgb_open,
.ndo_stop = cxgb_close, .ndo_stop = cxgb_close,
...@@ -1062,8 +1018,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -1062,8 +1018,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
spin_lock_init(&adapter->async_lock); spin_lock_init(&adapter->async_lock);
spin_lock_init(&adapter->mac_lock); spin_lock_init(&adapter->mac_lock);
INIT_WORK(&adapter->ext_intr_handler_task,
ext_intr_task);
INIT_DELAYED_WORK(&adapter->stats_update_task, INIT_DELAYED_WORK(&adapter->stats_update_task,
mac_stats_task); mac_stats_task);
......
...@@ -940,10 +940,11 @@ void t1_sge_intr_clear(struct sge *sge) ...@@ -940,10 +940,11 @@ void t1_sge_intr_clear(struct sge *sge)
/* /*
* SGE 'Error' interrupt handler * SGE 'Error' interrupt handler
*/ */
int t1_sge_intr_error_handler(struct sge *sge) bool t1_sge_intr_error_handler(struct sge *sge)
{ {
struct adapter *adapter = sge->adapter; struct adapter *adapter = sge->adapter;
u32 cause = readl(adapter->regs + A_SG_INT_CAUSE); u32 cause = readl(adapter->regs + A_SG_INT_CAUSE);
bool wake = false;
if (adapter->port[0].dev->hw_features & NETIF_F_TSO) if (adapter->port[0].dev->hw_features & NETIF_F_TSO)
cause &= ~F_PACKET_TOO_BIG; cause &= ~F_PACKET_TOO_BIG;
...@@ -967,11 +968,14 @@ int t1_sge_intr_error_handler(struct sge *sge) ...@@ -967,11 +968,14 @@ int t1_sge_intr_error_handler(struct sge *sge)
sge->stats.pkt_mismatch++; sge->stats.pkt_mismatch++;
pr_alert("%s: SGE packet mismatch\n", adapter->name); pr_alert("%s: SGE packet mismatch\n", adapter->name);
} }
if (cause & SGE_INT_FATAL) if (cause & SGE_INT_FATAL) {
t1_fatal_err(adapter); t1_interrupts_disable(adapter);
adapter->pending_thread_intr |= F_PL_INTR_SGE_ERR;
wake = true;
}
writel(cause, adapter->regs + A_SG_INT_CAUSE); writel(cause, adapter->regs + A_SG_INT_CAUSE);
return 0; return wake;
} }
const struct sge_intr_counts *t1_sge_get_intr_counts(const struct sge *sge) const struct sge_intr_counts *t1_sge_get_intr_counts(const struct sge *sge)
...@@ -1619,11 +1623,46 @@ int t1_poll(struct napi_struct *napi, int budget) ...@@ -1619,11 +1623,46 @@ int t1_poll(struct napi_struct *napi, int budget)
return work_done; return work_done;
} }
irqreturn_t t1_interrupt_thread(int irq, void *data)
{
struct adapter *adapter = data;
u32 pending_thread_intr;
spin_lock_irq(&adapter->async_lock);
pending_thread_intr = adapter->pending_thread_intr;
adapter->pending_thread_intr = 0;
spin_unlock_irq(&adapter->async_lock);
if (!pending_thread_intr)
return IRQ_NONE;
if (pending_thread_intr & F_PL_INTR_EXT)
t1_elmer0_ext_intr_handler(adapter);
/* This error is fatal, interrupts remain off */
if (pending_thread_intr & F_PL_INTR_SGE_ERR) {
pr_alert("%s: encountered fatal error, operation suspended\n",
adapter->name);
t1_sge_stop(adapter->sge);
return IRQ_HANDLED;
}
spin_lock_irq(&adapter->async_lock);
adapter->slow_intr_mask |= F_PL_INTR_EXT;
writel(F_PL_INTR_EXT, adapter->regs + A_PL_CAUSE);
writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
adapter->regs + A_PL_ENABLE);
spin_unlock_irq(&adapter->async_lock);
return IRQ_HANDLED;
}
irqreturn_t t1_interrupt(int irq, void *data) irqreturn_t t1_interrupt(int irq, void *data)
{ {
struct adapter *adapter = data; struct adapter *adapter = data;
struct sge *sge = adapter->sge; struct sge *sge = adapter->sge;
int handled; irqreturn_t handled;
if (likely(responses_pending(adapter))) { if (likely(responses_pending(adapter))) {
writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE); writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
...@@ -1645,10 +1684,10 @@ irqreturn_t t1_interrupt(int irq, void *data) ...@@ -1645,10 +1684,10 @@ irqreturn_t t1_interrupt(int irq, void *data)
handled = t1_slow_intr_handler(adapter); handled = t1_slow_intr_handler(adapter);
spin_unlock(&adapter->async_lock); spin_unlock(&adapter->async_lock);
if (!handled) if (handled == IRQ_NONE)
sge->stats.unhandled_irqs++; sge->stats.unhandled_irqs++;
return IRQ_RETVAL(handled != 0); return handled;
} }
/* /*
......
...@@ -74,6 +74,7 @@ struct sge *t1_sge_create(struct adapter *, struct sge_params *); ...@@ -74,6 +74,7 @@ struct sge *t1_sge_create(struct adapter *, struct sge_params *);
int t1_sge_configure(struct sge *, struct sge_params *); int t1_sge_configure(struct sge *, struct sge_params *);
int t1_sge_set_coalesce_params(struct sge *, struct sge_params *); int t1_sge_set_coalesce_params(struct sge *, struct sge_params *);
void t1_sge_destroy(struct sge *); void t1_sge_destroy(struct sge *);
irqreturn_t t1_interrupt_thread(int irq, void *data);
irqreturn_t t1_interrupt(int irq, void *cookie); irqreturn_t t1_interrupt(int irq, void *cookie);
int t1_poll(struct napi_struct *, int); int t1_poll(struct napi_struct *, int);
...@@ -81,7 +82,7 @@ netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev); ...@@ -81,7 +82,7 @@ netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev);
void t1_vlan_mode(struct adapter *adapter, netdev_features_t features); void t1_vlan_mode(struct adapter *adapter, netdev_features_t features);
void t1_sge_start(struct sge *); void t1_sge_start(struct sge *);
void t1_sge_stop(struct sge *); void t1_sge_stop(struct sge *);
int t1_sge_intr_error_handler(struct sge *); bool t1_sge_intr_error_handler(struct sge *sge);
void t1_sge_intr_enable(struct sge *); void t1_sge_intr_enable(struct sge *);
void t1_sge_intr_disable(struct sge *); void t1_sge_intr_disable(struct sge *);
void t1_sge_intr_clear(struct sge *); void t1_sge_intr_clear(struct sge *);
......
...@@ -170,7 +170,7 @@ void t1_link_changed(adapter_t *adapter, int port_id) ...@@ -170,7 +170,7 @@ void t1_link_changed(adapter_t *adapter, int port_id)
t1_link_negotiated(adapter, port_id, link_ok, speed, duplex, fc); t1_link_negotiated(adapter, port_id, link_ok, speed, duplex, fc);
} }
static int t1_pci_intr_handler(adapter_t *adapter) static bool t1_pci_intr_handler(adapter_t *adapter)
{ {
u32 pcix_cause; u32 pcix_cause;
...@@ -179,9 +179,13 @@ static int t1_pci_intr_handler(adapter_t *adapter) ...@@ -179,9 +179,13 @@ static int t1_pci_intr_handler(adapter_t *adapter)
if (pcix_cause) { if (pcix_cause) {
pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_CAUSE, pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_CAUSE,
pcix_cause); pcix_cause);
t1_fatal_err(adapter); /* PCI errors are fatal */ /* PCI errors are fatal */
t1_interrupts_disable(adapter);
adapter->pending_thread_intr |= F_PL_INTR_SGE_ERR;
pr_alert("%s: PCI error encountered.\n", adapter->name);
return true;
} }
return 0; return false;
} }
#ifdef CONFIG_CHELSIO_T1_1G #ifdef CONFIG_CHELSIO_T1_1G
...@@ -210,13 +214,16 @@ static int fpga_phy_intr_handler(adapter_t *adapter) ...@@ -210,13 +214,16 @@ static int fpga_phy_intr_handler(adapter_t *adapter)
/* /*
* Slow path interrupt handler for FPGAs. * Slow path interrupt handler for FPGAs.
*/ */
static int fpga_slow_intr(adapter_t *adapter) static irqreturn_t fpga_slow_intr(adapter_t *adapter)
{ {
u32 cause = readl(adapter->regs + A_PL_CAUSE); u32 cause = readl(adapter->regs + A_PL_CAUSE);
irqreturn_t ret = IRQ_NONE;
cause &= ~F_PL_INTR_SGE_DATA; cause &= ~F_PL_INTR_SGE_DATA;
if (cause & F_PL_INTR_SGE_ERR) if (cause & F_PL_INTR_SGE_ERR) {
t1_sge_intr_error_handler(adapter->sge); if (t1_sge_intr_error_handler(adapter->sge))
ret = IRQ_WAKE_THREAD;
}
if (cause & FPGA_PCIX_INTERRUPT_GMAC) if (cause & FPGA_PCIX_INTERRUPT_GMAC)
fpga_phy_intr_handler(adapter); fpga_phy_intr_handler(adapter);
...@@ -231,14 +238,19 @@ static int fpga_slow_intr(adapter_t *adapter) ...@@ -231,14 +238,19 @@ static int fpga_slow_intr(adapter_t *adapter)
/* Clear TP interrupt */ /* Clear TP interrupt */
writel(tp_cause, adapter->regs + FPGA_TP_ADDR_INTERRUPT_CAUSE); writel(tp_cause, adapter->regs + FPGA_TP_ADDR_INTERRUPT_CAUSE);
} }
if (cause & FPGA_PCIX_INTERRUPT_PCIX) if (cause & FPGA_PCIX_INTERRUPT_PCIX) {
t1_pci_intr_handler(adapter); if (t1_pci_intr_handler(adapter))
ret = IRQ_WAKE_THREAD;
}
/* Clear the interrupts just processed. */ /* Clear the interrupts just processed. */
if (cause) if (cause)
writel(cause, adapter->regs + A_PL_CAUSE); writel(cause, adapter->regs + A_PL_CAUSE);
return cause != 0; if (ret != IRQ_NONE)
return ret;
return cause == 0 ? IRQ_NONE : IRQ_HANDLED;
} }
#endif #endif
...@@ -842,31 +854,45 @@ void t1_interrupts_clear(adapter_t* adapter) ...@@ -842,31 +854,45 @@ void t1_interrupts_clear(adapter_t* adapter)
/* /*
* Slow path interrupt handler for ASICs. * Slow path interrupt handler for ASICs.
*/ */
static int asic_slow_intr(adapter_t *adapter) static irqreturn_t asic_slow_intr(adapter_t *adapter)
{ {
u32 cause = readl(adapter->regs + A_PL_CAUSE); u32 cause = readl(adapter->regs + A_PL_CAUSE);
irqreturn_t ret = IRQ_HANDLED;
cause &= adapter->slow_intr_mask; cause &= adapter->slow_intr_mask;
if (!cause) if (!cause)
return 0; return IRQ_NONE;
if (cause & F_PL_INTR_SGE_ERR) if (cause & F_PL_INTR_SGE_ERR) {
t1_sge_intr_error_handler(adapter->sge); if (t1_sge_intr_error_handler(adapter->sge))
ret = IRQ_WAKE_THREAD;
}
if (cause & F_PL_INTR_TP) if (cause & F_PL_INTR_TP)
t1_tp_intr_handler(adapter->tp); t1_tp_intr_handler(adapter->tp);
if (cause & F_PL_INTR_ESPI) if (cause & F_PL_INTR_ESPI)
t1_espi_intr_handler(adapter->espi); t1_espi_intr_handler(adapter->espi);
if (cause & F_PL_INTR_PCIX) if (cause & F_PL_INTR_PCIX) {
t1_pci_intr_handler(adapter); if (t1_pci_intr_handler(adapter))
if (cause & F_PL_INTR_EXT) ret = IRQ_WAKE_THREAD;
t1_elmer0_ext_intr(adapter); }
if (cause & F_PL_INTR_EXT) {
/* Wake the threaded interrupt to handle external interrupts as
* we require a process context. We disable EXT interrupts in
* the interim and let the thread reenable them when it's done.
*/
adapter->pending_thread_intr |= F_PL_INTR_EXT;
adapter->slow_intr_mask &= ~F_PL_INTR_EXT;
writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
adapter->regs + A_PL_ENABLE);
ret = IRQ_WAKE_THREAD;
}
/* Clear the interrupts just processed. */ /* Clear the interrupts just processed. */
writel(cause, adapter->regs + A_PL_CAUSE); writel(cause, adapter->regs + A_PL_CAUSE);
readl(adapter->regs + A_PL_CAUSE); /* flush writes */ readl(adapter->regs + A_PL_CAUSE); /* flush writes */
return 1; return ret;
} }
int t1_slow_intr_handler(adapter_t *adapter) irqreturn_t t1_slow_intr_handler(adapter_t *adapter)
{ {
#ifdef CONFIG_CHELSIO_T1_1G #ifdef CONFIG_CHELSIO_T1_1G
if (!t1_is_asic(adapter)) if (!t1_is_asic(adapter))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment