Commit d62a107a authored by Joao Pinto's avatar Joao Pinto Committed by David S. Miller

net: stmmac: prepare dma interrupt treatment for multiple queues

This patch prepares DMA interrupts treatment for multiple queues.
Signed-off-by: default avatarJoao Pinto <jpinto@synopsys.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 4e593262
...@@ -438,7 +438,7 @@ struct stmmac_dma_ops { ...@@ -438,7 +438,7 @@ struct stmmac_dma_ops {
void (*start_rx)(void __iomem *ioaddr, u32 chan); void (*start_rx)(void __iomem *ioaddr, u32 chan);
void (*stop_rx)(void __iomem *ioaddr, u32 chan); void (*stop_rx)(void __iomem *ioaddr, u32 chan);
int (*dma_interrupt) (void __iomem *ioaddr, int (*dma_interrupt) (void __iomem *ioaddr,
struct stmmac_extra_stats *x); struct stmmac_extra_stats *x, u32 chan);
/* If supported then get the optional core features */ /* If supported then get the optional core features */
void (*get_hw_feature)(void __iomem *ioaddr, void (*get_hw_feature)(void __iomem *ioaddr,
struct dma_features *dma_cap); struct dma_features *dma_cap);
......
...@@ -193,7 +193,7 @@ void dwmac4_dma_stop_tx(void __iomem *ioaddr, u32 chan); ...@@ -193,7 +193,7 @@ void dwmac4_dma_stop_tx(void __iomem *ioaddr, u32 chan);
void dwmac4_dma_start_rx(void __iomem *ioaddr, u32 chan); void dwmac4_dma_start_rx(void __iomem *ioaddr, u32 chan);
void dwmac4_dma_stop_rx(void __iomem *ioaddr, u32 chan); void dwmac4_dma_stop_rx(void __iomem *ioaddr, u32 chan);
int dwmac4_dma_interrupt(void __iomem *ioaddr, int dwmac4_dma_interrupt(void __iomem *ioaddr,
struct stmmac_extra_stats *x); struct stmmac_extra_stats *x, u32 chan);
void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len); void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len);
void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len); void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len);
void dwmac4_set_rx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan); void dwmac4_set_rx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
......
...@@ -122,11 +122,11 @@ void dwmac4_disable_dma_irq(void __iomem *ioaddr, u32 chan) ...@@ -122,11 +122,11 @@ void dwmac4_disable_dma_irq(void __iomem *ioaddr, u32 chan)
} }
int dwmac4_dma_interrupt(void __iomem *ioaddr, int dwmac4_dma_interrupt(void __iomem *ioaddr,
struct stmmac_extra_stats *x) struct stmmac_extra_stats *x, u32 chan)
{ {
int ret = 0; int ret = 0;
u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(0)); u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(chan));
/* ABNORMAL interrupts */ /* ABNORMAL interrupts */
if (unlikely(intr_status & DMA_CHAN_STATUS_AIS)) { if (unlikely(intr_status & DMA_CHAN_STATUS_AIS)) {
...@@ -153,7 +153,7 @@ int dwmac4_dma_interrupt(void __iomem *ioaddr, ...@@ -153,7 +153,7 @@ int dwmac4_dma_interrupt(void __iomem *ioaddr,
if (likely(intr_status & DMA_CHAN_STATUS_RI)) { if (likely(intr_status & DMA_CHAN_STATUS_RI)) {
u32 value; u32 value;
value = readl(ioaddr + DMA_CHAN_INTR_ENA(STMMAC_CHAN0)); value = readl(ioaddr + DMA_CHAN_INTR_ENA(chan));
/* to schedule NAPI on real RIE event. */ /* to schedule NAPI on real RIE event. */
if (likely(value & DMA_CHAN_INTR_ENA_RIE)) { if (likely(value & DMA_CHAN_INTR_ENA_RIE)) {
x->rx_normal_irq_n++; x->rx_normal_irq_n++;
...@@ -172,7 +172,7 @@ int dwmac4_dma_interrupt(void __iomem *ioaddr, ...@@ -172,7 +172,7 @@ int dwmac4_dma_interrupt(void __iomem *ioaddr,
* status [21-0] expect reserved bits [5-3] * status [21-0] expect reserved bits [5-3]
*/ */
writel((intr_status & 0x3fffc7), writel((intr_status & 0x3fffc7),
ioaddr + DMA_CHAN_STATUS(STMMAC_CHAN0)); ioaddr + DMA_CHAN_STATUS(chan));
return ret; return ret;
} }
......
...@@ -143,7 +143,8 @@ void dwmac_dma_start_tx(void __iomem *ioaddr, u32 chan); ...@@ -143,7 +143,8 @@ void dwmac_dma_start_tx(void __iomem *ioaddr, u32 chan);
void dwmac_dma_stop_tx(void __iomem *ioaddr, u32 chan); void dwmac_dma_stop_tx(void __iomem *ioaddr, u32 chan);
void dwmac_dma_start_rx(void __iomem *ioaddr, u32 chan); void dwmac_dma_start_rx(void __iomem *ioaddr, u32 chan);
void dwmac_dma_stop_rx(void __iomem *ioaddr, u32 chan); void dwmac_dma_stop_rx(void __iomem *ioaddr, u32 chan);
int dwmac_dma_interrupt(void __iomem *ioaddr, struct stmmac_extra_stats *x); int dwmac_dma_interrupt(void __iomem *ioaddr, struct stmmac_extra_stats *x,
u32 chan);
int dwmac_dma_reset(void __iomem *ioaddr); int dwmac_dma_reset(void __iomem *ioaddr);
#endif /* __DWMAC_DMA_H__ */ #endif /* __DWMAC_DMA_H__ */
...@@ -156,7 +156,7 @@ static void show_rx_process_state(unsigned int status) ...@@ -156,7 +156,7 @@ static void show_rx_process_state(unsigned int status)
#endif #endif
int dwmac_dma_interrupt(void __iomem *ioaddr, int dwmac_dma_interrupt(void __iomem *ioaddr,
struct stmmac_extra_stats *x) struct stmmac_extra_stats *x, u32 chan)
{ {
int ret = 0; int ret = 0;
/* read the status register (CSR5) */ /* read the status register (CSR5) */
......
...@@ -1591,32 +1591,41 @@ static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode, ...@@ -1591,32 +1591,41 @@ static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
*/ */
static void stmmac_dma_interrupt(struct stmmac_priv *priv) static void stmmac_dma_interrupt(struct stmmac_priv *priv)
{ {
u32 chan = STMMAC_CHAN0; u32 tx_channel_count = priv->plat->tx_queues_to_use;
int status; int status;
u32 chan;
status = priv->hw->dma->dma_interrupt(priv->ioaddr, &priv->xstats); for (chan = 0; chan < tx_channel_count; chan++) {
if (likely((status & handle_rx)) || (status & handle_tx)) { status = priv->hw->dma->dma_interrupt(priv->ioaddr,
if (likely(napi_schedule_prep(&priv->napi))) { &priv->xstats, chan);
stmmac_disable_dma_irq(priv, chan); if (likely((status & handle_rx)) || (status & handle_tx)) {
__napi_schedule(&priv->napi); if (likely(napi_schedule_prep(&priv->napi))) {
stmmac_disable_dma_irq(priv, chan);
__napi_schedule(&priv->napi);
}
} }
}
if (unlikely(status & tx_hard_error_bump_tc)) {
/* Try to bump up the dma threshold on this failure */
if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
(tc <= 256)) {
tc += 64;
if (priv->plat->force_thresh_dma_mode)
stmmac_set_dma_operation_mode(priv->ioaddr,
tc, tc, chan);
else
stmmac_set_dma_operation_mode(priv->ioaddr, tc,
SF_DMA_MODE, chan);
priv->xstats.threshold = tc; if (unlikely(status & tx_hard_error_bump_tc)) {
/* Try to bump up the dma threshold on this failure */
if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
(tc <= 256)) {
tc += 64;
if (priv->plat->force_thresh_dma_mode)
stmmac_set_dma_operation_mode(priv,
tc,
tc,
chan);
else
stmmac_set_dma_operation_mode(priv,
tc,
SF_DMA_MODE,
chan);
priv->xstats.threshold = tc;
}
} else if (unlikely(status == tx_hard_error)) {
stmmac_tx_err(priv, chan);
} }
} else if (unlikely(status == tx_hard_error)) }
stmmac_tx_err(priv, chan);
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment