Commit a28453c0 authored by David S. Miller's avatar David S. Miller

Merge branch 'stmmac-dma-ops-multiqueue'

Joao Pinto says:

====================
net: stmmac: prepare dma operations for multiple queues

As agreed with David Miller, this patch-set is the second of 3 to enable
multiple queues in stmmac.

This second one concentrates on dma operations adding functionalities as:
a) DMA Operation Mode configuration per channel and done in the multiple
queues configuration function
b) DMA IRQ enable and Disable by channel
c) DMA start and stop by channel
d) RX and TX ring length configuration by channel
e) RX and TX set tail pointer by channel
f) DMA Channel initialization broke into Channel comon, RX and TX
initialization
g) TSO being configured for all available channels
h) DMA interrupt treatment by channel
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents e0304f58 7bac4e1e
...@@ -416,6 +416,14 @@ struct stmmac_dma_ops { ...@@ -416,6 +416,14 @@ struct stmmac_dma_ops {
int (*reset)(void __iomem *ioaddr); int (*reset)(void __iomem *ioaddr);
void (*init)(void __iomem *ioaddr, struct stmmac_dma_cfg *dma_cfg, void (*init)(void __iomem *ioaddr, struct stmmac_dma_cfg *dma_cfg,
u32 dma_tx, u32 dma_rx, int atds); u32 dma_tx, u32 dma_rx, int atds);
void (*init_chan)(void __iomem *ioaddr,
struct stmmac_dma_cfg *dma_cfg, u32 chan);
void (*init_rx_chan)(void __iomem *ioaddr,
struct stmmac_dma_cfg *dma_cfg,
u32 dma_rx_phy, u32 chan);
void (*init_tx_chan)(void __iomem *ioaddr,
struct stmmac_dma_cfg *dma_cfg,
u32 dma_tx_phy, u32 chan);
/* Configure the AXI Bus Mode Register */ /* Configure the AXI Bus Mode Register */
void (*axi)(void __iomem *ioaddr, struct stmmac_axi *axi); void (*axi)(void __iomem *ioaddr, struct stmmac_axi *axi);
/* Dump DMA registers */ /* Dump DMA registers */
...@@ -424,25 +432,28 @@ struct stmmac_dma_ops { ...@@ -424,25 +432,28 @@ struct stmmac_dma_ops {
* An invalid value enables the store-and-forward mode */ * An invalid value enables the store-and-forward mode */
void (*dma_mode)(void __iomem *ioaddr, int txmode, int rxmode, void (*dma_mode)(void __iomem *ioaddr, int txmode, int rxmode,
int rxfifosz); int rxfifosz);
void (*dma_rx_mode)(void __iomem *ioaddr, int mode, u32 channel,
int fifosz);
void (*dma_tx_mode)(void __iomem *ioaddr, int mode, u32 channel);
/* To track extra statistic (if supported) */ /* To track extra statistic (if supported) */
void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x, void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x,
void __iomem *ioaddr); void __iomem *ioaddr);
void (*enable_dma_transmission) (void __iomem *ioaddr); void (*enable_dma_transmission) (void __iomem *ioaddr);
void (*enable_dma_irq) (void __iomem *ioaddr); void (*enable_dma_irq)(void __iomem *ioaddr, u32 chan);
void (*disable_dma_irq) (void __iomem *ioaddr); void (*disable_dma_irq)(void __iomem *ioaddr, u32 chan);
void (*start_tx) (void __iomem *ioaddr); void (*start_tx)(void __iomem *ioaddr, u32 chan);
void (*stop_tx) (void __iomem *ioaddr); void (*stop_tx)(void __iomem *ioaddr, u32 chan);
void (*start_rx) (void __iomem *ioaddr); void (*start_rx)(void __iomem *ioaddr, u32 chan);
void (*stop_rx) (void __iomem *ioaddr); void (*stop_rx)(void __iomem *ioaddr, u32 chan);
int (*dma_interrupt) (void __iomem *ioaddr, int (*dma_interrupt) (void __iomem *ioaddr,
struct stmmac_extra_stats *x); struct stmmac_extra_stats *x, u32 chan);
/* If supported then get the optional core features */ /* If supported then get the optional core features */
void (*get_hw_feature)(void __iomem *ioaddr, void (*get_hw_feature)(void __iomem *ioaddr,
struct dma_features *dma_cap); struct dma_features *dma_cap);
/* Program the HW RX Watchdog */ /* Program the HW RX Watchdog */
void (*rx_watchdog) (void __iomem *ioaddr, u32 riwt); void (*rx_watchdog)(void __iomem *ioaddr, u32 riwt, u32 number_chan);
void (*set_tx_ring_len)(void __iomem *ioaddr, u32 len); void (*set_tx_ring_len)(void __iomem *ioaddr, u32 len, u32 chan);
void (*set_rx_ring_len)(void __iomem *ioaddr, u32 len); void (*set_rx_ring_len)(void __iomem *ioaddr, u32 len, u32 chan);
void (*set_rx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan); void (*set_rx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
void (*set_tx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan); void (*set_tx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
void (*enable_tso)(void __iomem *ioaddr, bool en, u32 chan); void (*enable_tso)(void __iomem *ioaddr, bool en, u32 chan);
......
...@@ -247,7 +247,8 @@ static void dwmac1000_get_hw_feature(void __iomem *ioaddr, ...@@ -247,7 +247,8 @@ static void dwmac1000_get_hw_feature(void __iomem *ioaddr,
dma_cap->enh_desc = (hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24; dma_cap->enh_desc = (hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24;
} }
static void dwmac1000_rx_watchdog(void __iomem *ioaddr, u32 riwt) static void dwmac1000_rx_watchdog(void __iomem *ioaddr, u32 riwt,
u32 number_chan)
{ {
writel(riwt, ioaddr + DMA_RX_WATCHDOG); writel(riwt, ioaddr + DMA_RX_WATCHDOG);
} }
......
...@@ -71,36 +71,48 @@ static void dwmac4_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi) ...@@ -71,36 +71,48 @@ static void dwmac4_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
writel(value, ioaddr + DMA_SYS_BUS_MODE); writel(value, ioaddr + DMA_SYS_BUS_MODE);
} }
static void dwmac4_dma_init_channel(void __iomem *ioaddr, void dwmac4_dma_init_rx_chan(void __iomem *ioaddr,
struct stmmac_dma_cfg *dma_cfg, struct stmmac_dma_cfg *dma_cfg,
u32 dma_tx_phy, u32 dma_rx_phy, u32 dma_rx_phy, u32 chan)
u32 channel)
{ {
u32 value; u32 value;
int txpbl = dma_cfg->txpbl ?: dma_cfg->pbl; u32 rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl;
int rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl;
/* set PBL for each channels. Currently we affect same configuration value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan));
* on each channel value = value | (rxpbl << DMA_BUS_MODE_RPBL_SHIFT);
*/ writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan));
value = readl(ioaddr + DMA_CHAN_CONTROL(channel));
if (dma_cfg->pblx8) writel(dma_rx_phy, ioaddr + DMA_CHAN_RX_BASE_ADDR(chan));
value = value | DMA_BUS_MODE_PBL; }
writel(value, ioaddr + DMA_CHAN_CONTROL(channel));
void dwmac4_dma_init_tx_chan(void __iomem *ioaddr,
struct stmmac_dma_cfg *dma_cfg,
u32 dma_tx_phy, u32 chan)
{
u32 value;
u32 txpbl = dma_cfg->txpbl ?: dma_cfg->pbl;
value = readl(ioaddr + DMA_CHAN_TX_CONTROL(channel)); value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan));
value = value | (txpbl << DMA_BUS_MODE_PBL_SHIFT); value = value | (txpbl << DMA_BUS_MODE_PBL_SHIFT);
writel(value, ioaddr + DMA_CHAN_TX_CONTROL(channel)); writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan));
value = readl(ioaddr + DMA_CHAN_RX_CONTROL(channel)); writel(dma_tx_phy, ioaddr + DMA_CHAN_TX_BASE_ADDR(chan));
value = value | (rxpbl << DMA_BUS_MODE_RPBL_SHIFT); }
writel(value, ioaddr + DMA_CHAN_RX_CONTROL(channel));
/* Mask interrupts by writing to CSR7 */ void dwmac4_dma_init_channel(void __iomem *ioaddr,
writel(DMA_CHAN_INTR_DEFAULT_MASK, ioaddr + DMA_CHAN_INTR_ENA(channel)); struct stmmac_dma_cfg *dma_cfg, u32 chan)
{
u32 value;
writel(dma_tx_phy, ioaddr + DMA_CHAN_TX_BASE_ADDR(channel)); /* common channel control register config */
writel(dma_rx_phy, ioaddr + DMA_CHAN_RX_BASE_ADDR(channel)); value = readl(ioaddr + DMA_CHAN_CONTROL(chan));
if (dma_cfg->pblx8)
value = value | DMA_BUS_MODE_PBL;
writel(value, ioaddr + DMA_CHAN_CONTROL(chan));
/* Mask interrupts by writing to CSR7 */
writel(DMA_CHAN_INTR_DEFAULT_MASK,
ioaddr + DMA_CHAN_INTR_ENA(chan));
} }
static void dwmac4_dma_init(void __iomem *ioaddr, static void dwmac4_dma_init(void __iomem *ioaddr,
...@@ -108,7 +120,6 @@ static void dwmac4_dma_init(void __iomem *ioaddr, ...@@ -108,7 +120,6 @@ static void dwmac4_dma_init(void __iomem *ioaddr,
u32 dma_tx, u32 dma_rx, int atds) u32 dma_tx, u32 dma_rx, int atds)
{ {
u32 value = readl(ioaddr + DMA_SYS_BUS_MODE); u32 value = readl(ioaddr + DMA_SYS_BUS_MODE);
int i;
/* Set the Fixed burst mode */ /* Set the Fixed burst mode */
if (dma_cfg->fixed_burst) if (dma_cfg->fixed_burst)
...@@ -122,9 +133,6 @@ static void dwmac4_dma_init(void __iomem *ioaddr, ...@@ -122,9 +133,6 @@ static void dwmac4_dma_init(void __iomem *ioaddr,
value |= DMA_SYS_BUS_AAL; value |= DMA_SYS_BUS_AAL;
writel(value, ioaddr + DMA_SYS_BUS_MODE); writel(value, ioaddr + DMA_SYS_BUS_MODE);
for (i = 0; i < DMA_CHANNEL_NB_MAX; i++)
dwmac4_dma_init_channel(ioaddr, dma_cfg, dma_tx, dma_rx, i);
} }
static void _dwmac4_dump_dma_regs(void __iomem *ioaddr, u32 channel, static void _dwmac4_dump_dma_regs(void __iomem *ioaddr, u32 channel,
...@@ -174,78 +182,34 @@ static void dwmac4_dump_dma_regs(void __iomem *ioaddr, u32 *reg_space) ...@@ -174,78 +182,34 @@ static void dwmac4_dump_dma_regs(void __iomem *ioaddr, u32 *reg_space)
_dwmac4_dump_dma_regs(ioaddr, i, reg_space); _dwmac4_dump_dma_regs(ioaddr, i, reg_space);
} }
static void dwmac4_rx_watchdog(void __iomem *ioaddr, u32 riwt) static void dwmac4_rx_watchdog(void __iomem *ioaddr, u32 riwt, u32 number_chan)
{ {
int i; u32 chan;
for (i = 0; i < DMA_CHANNEL_NB_MAX; i++) for (chan = 0; chan < number_chan; chan++)
writel(riwt, ioaddr + DMA_CHAN_RX_WATCHDOG(i)); writel(riwt, ioaddr + DMA_CHAN_RX_WATCHDOG(chan));
} }
static void dwmac4_dma_chan_op_mode(void __iomem *ioaddr, int txmode, static void dwmac4_dma_rx_chan_op_mode(void __iomem *ioaddr, int mode,
int rxmode, u32 channel, int rxfifosz) u32 channel, int fifosz)
{ {
unsigned int rqs = rxfifosz / 256 - 1; unsigned int rqs = fifosz / 256 - 1;
u32 mtl_tx_op, mtl_rx_op, mtl_rx_int; u32 mtl_rx_op, mtl_rx_int;
/* Following code only done for channel 0, other channels not yet
* supported.
*/
mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(channel));
if (txmode == SF_DMA_MODE) {
pr_debug("GMAC: enable TX store and forward mode\n");
/* Transmit COE type 2 cannot be done in cut-through mode. */
mtl_tx_op |= MTL_OP_MODE_TSF;
} else {
pr_debug("GMAC: disabling TX SF (threshold %d)\n", txmode);
mtl_tx_op &= ~MTL_OP_MODE_TSF;
mtl_tx_op &= MTL_OP_MODE_TTC_MASK;
/* Set the transmit threshold */
if (txmode <= 32)
mtl_tx_op |= MTL_OP_MODE_TTC_32;
else if (txmode <= 64)
mtl_tx_op |= MTL_OP_MODE_TTC_64;
else if (txmode <= 96)
mtl_tx_op |= MTL_OP_MODE_TTC_96;
else if (txmode <= 128)
mtl_tx_op |= MTL_OP_MODE_TTC_128;
else if (txmode <= 192)
mtl_tx_op |= MTL_OP_MODE_TTC_192;
else if (txmode <= 256)
mtl_tx_op |= MTL_OP_MODE_TTC_256;
else if (txmode <= 384)
mtl_tx_op |= MTL_OP_MODE_TTC_384;
else
mtl_tx_op |= MTL_OP_MODE_TTC_512;
}
/* For an IP with DWC_EQOS_NUM_TXQ == 1, the fields TXQEN and TQS are RO
* with reset values: TXQEN on, TQS == DWC_EQOS_TXFIFO_SIZE.
* For an IP with DWC_EQOS_NUM_TXQ > 1, the fields TXQEN and TQS are R/W
* with reset values: TXQEN off, TQS 256 bytes.
*
* Write the bits in both cases, since it will have no effect when RO.
* For DWC_EQOS_NUM_TXQ > 1, the top bits in MTL_OP_MODE_TQS_MASK might
* be RO, however, writing the whole TQS field will result in a value
* equal to DWC_EQOS_TXFIFO_SIZE, just like for DWC_EQOS_NUM_TXQ == 1.
*/
mtl_tx_op |= MTL_OP_MODE_TXQEN | MTL_OP_MODE_TQS_MASK;
writel(mtl_tx_op, ioaddr + MTL_CHAN_TX_OP_MODE(channel));
mtl_rx_op = readl(ioaddr + MTL_CHAN_RX_OP_MODE(channel)); mtl_rx_op = readl(ioaddr + MTL_CHAN_RX_OP_MODE(channel));
if (rxmode == SF_DMA_MODE) { if (mode == SF_DMA_MODE) {
pr_debug("GMAC: enable RX store and forward mode\n"); pr_debug("GMAC: enable RX store and forward mode\n");
mtl_rx_op |= MTL_OP_MODE_RSF; mtl_rx_op |= MTL_OP_MODE_RSF;
} else { } else {
pr_debug("GMAC: disable RX SF mode (threshold %d)\n", rxmode); pr_debug("GMAC: disable RX SF mode (threshold %d)\n", mode);
mtl_rx_op &= ~MTL_OP_MODE_RSF; mtl_rx_op &= ~MTL_OP_MODE_RSF;
mtl_rx_op &= MTL_OP_MODE_RTC_MASK; mtl_rx_op &= MTL_OP_MODE_RTC_MASK;
if (rxmode <= 32) if (mode <= 32)
mtl_rx_op |= MTL_OP_MODE_RTC_32; mtl_rx_op |= MTL_OP_MODE_RTC_32;
else if (rxmode <= 64) else if (mode <= 64)
mtl_rx_op |= MTL_OP_MODE_RTC_64; mtl_rx_op |= MTL_OP_MODE_RTC_64;
else if (rxmode <= 96) else if (mode <= 96)
mtl_rx_op |= MTL_OP_MODE_RTC_96; mtl_rx_op |= MTL_OP_MODE_RTC_96;
else else
mtl_rx_op |= MTL_OP_MODE_RTC_128; mtl_rx_op |= MTL_OP_MODE_RTC_128;
...@@ -255,7 +219,7 @@ static void dwmac4_dma_chan_op_mode(void __iomem *ioaddr, int txmode, ...@@ -255,7 +219,7 @@ static void dwmac4_dma_chan_op_mode(void __iomem *ioaddr, int txmode,
mtl_rx_op |= rqs << MTL_OP_MODE_RQS_SHIFT; mtl_rx_op |= rqs << MTL_OP_MODE_RQS_SHIFT;
/* enable flow control only if each channel gets 4 KiB or more FIFO */ /* enable flow control only if each channel gets 4 KiB or more FIFO */
if (rxfifosz >= 4096) { if (fifosz >= 4096) {
unsigned int rfd, rfa; unsigned int rfd, rfa;
mtl_rx_op |= MTL_OP_MODE_EHFC; mtl_rx_op |= MTL_OP_MODE_EHFC;
...@@ -266,7 +230,7 @@ static void dwmac4_dma_chan_op_mode(void __iomem *ioaddr, int txmode, ...@@ -266,7 +230,7 @@ static void dwmac4_dma_chan_op_mode(void __iomem *ioaddr, int txmode,
* Set Threshold for Deactivating Flow Control to min 1 frame, * Set Threshold for Deactivating Flow Control to min 1 frame,
* i.e. 1500 bytes. * i.e. 1500 bytes.
*/ */
switch (rxfifosz) { switch (fifosz) {
case 4096: case 4096:
/* This violates the above formula because of FIFO size /* This violates the above formula because of FIFO size
* limit therefore overflow may occur in spite of this. * limit therefore overflow may occur in spite of this.
...@@ -306,11 +270,49 @@ static void dwmac4_dma_chan_op_mode(void __iomem *ioaddr, int txmode, ...@@ -306,11 +270,49 @@ static void dwmac4_dma_chan_op_mode(void __iomem *ioaddr, int txmode,
ioaddr + MTL_CHAN_INT_CTRL(channel)); ioaddr + MTL_CHAN_INT_CTRL(channel));
} }
static void dwmac4_dma_operation_mode(void __iomem *ioaddr, int txmode, static void dwmac4_dma_tx_chan_op_mode(void __iomem *ioaddr, int mode,
int rxmode, int rxfifosz) u32 channel)
{ {
/* Only Channel 0 is actually configured and used */ u32 mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(channel));
dwmac4_dma_chan_op_mode(ioaddr, txmode, rxmode, 0, rxfifosz);
if (mode == SF_DMA_MODE) {
pr_debug("GMAC: enable TX store and forward mode\n");
/* Transmit COE type 2 cannot be done in cut-through mode. */
mtl_tx_op |= MTL_OP_MODE_TSF;
} else {
pr_debug("GMAC: disabling TX SF (threshold %d)\n", mode);
mtl_tx_op &= ~MTL_OP_MODE_TSF;
mtl_tx_op &= MTL_OP_MODE_TTC_MASK;
/* Set the transmit threshold */
if (mode <= 32)
mtl_tx_op |= MTL_OP_MODE_TTC_32;
else if (mode <= 64)
mtl_tx_op |= MTL_OP_MODE_TTC_64;
else if (mode <= 96)
mtl_tx_op |= MTL_OP_MODE_TTC_96;
else if (mode <= 128)
mtl_tx_op |= MTL_OP_MODE_TTC_128;
else if (mode <= 192)
mtl_tx_op |= MTL_OP_MODE_TTC_192;
else if (mode <= 256)
mtl_tx_op |= MTL_OP_MODE_TTC_256;
else if (mode <= 384)
mtl_tx_op |= MTL_OP_MODE_TTC_384;
else
mtl_tx_op |= MTL_OP_MODE_TTC_512;
}
/* For an IP with DWC_EQOS_NUM_TXQ == 1, the fields TXQEN and TQS are RO
* with reset values: TXQEN on, TQS == DWC_EQOS_TXFIFO_SIZE.
* For an IP with DWC_EQOS_NUM_TXQ > 1, the fields TXQEN and TQS are R/W
* with reset values: TXQEN off, TQS 256 bytes.
*
* Write the bits in both cases, since it will have no effect when RO.
* For DWC_EQOS_NUM_TXQ > 1, the top bits in MTL_OP_MODE_TQS_MASK might
* be RO, however, writing the whole TQS field will result in a value
* equal to DWC_EQOS_TXFIFO_SIZE, just like for DWC_EQOS_NUM_TXQ == 1.
*/
mtl_tx_op |= MTL_OP_MODE_TXQEN | MTL_OP_MODE_TQS_MASK;
writel(mtl_tx_op, ioaddr + MTL_CHAN_TX_OP_MODE(channel));
} }
static void dwmac4_get_hw_feature(void __iomem *ioaddr, static void dwmac4_get_hw_feature(void __iomem *ioaddr,
...@@ -385,9 +387,13 @@ static void dwmac4_enable_tso(void __iomem *ioaddr, bool en, u32 chan) ...@@ -385,9 +387,13 @@ static void dwmac4_enable_tso(void __iomem *ioaddr, bool en, u32 chan)
const struct stmmac_dma_ops dwmac4_dma_ops = { const struct stmmac_dma_ops dwmac4_dma_ops = {
.reset = dwmac4_dma_reset, .reset = dwmac4_dma_reset,
.init = dwmac4_dma_init, .init = dwmac4_dma_init,
.init_chan = dwmac4_dma_init_channel,
.init_rx_chan = dwmac4_dma_init_rx_chan,
.init_tx_chan = dwmac4_dma_init_tx_chan,
.axi = dwmac4_dma_axi, .axi = dwmac4_dma_axi,
.dump_regs = dwmac4_dump_dma_regs, .dump_regs = dwmac4_dump_dma_regs,
.dma_mode = dwmac4_dma_operation_mode, .dma_rx_mode = dwmac4_dma_rx_chan_op_mode,
.dma_tx_mode = dwmac4_dma_tx_chan_op_mode,
.enable_dma_irq = dwmac4_enable_dma_irq, .enable_dma_irq = dwmac4_enable_dma_irq,
.disable_dma_irq = dwmac4_disable_dma_irq, .disable_dma_irq = dwmac4_disable_dma_irq,
.start_tx = dwmac4_dma_start_tx, .start_tx = dwmac4_dma_start_tx,
...@@ -407,9 +413,13 @@ const struct stmmac_dma_ops dwmac4_dma_ops = { ...@@ -407,9 +413,13 @@ const struct stmmac_dma_ops dwmac4_dma_ops = {
const struct stmmac_dma_ops dwmac410_dma_ops = { const struct stmmac_dma_ops dwmac410_dma_ops = {
.reset = dwmac4_dma_reset, .reset = dwmac4_dma_reset,
.init = dwmac4_dma_init, .init = dwmac4_dma_init,
.init_chan = dwmac4_dma_init_channel,
.init_rx_chan = dwmac4_dma_init_rx_chan,
.init_tx_chan = dwmac4_dma_init_tx_chan,
.axi = dwmac4_dma_axi, .axi = dwmac4_dma_axi,
.dump_regs = dwmac4_dump_dma_regs, .dump_regs = dwmac4_dump_dma_regs,
.dma_mode = dwmac4_dma_operation_mode, .dma_rx_mode = dwmac4_dma_rx_chan_op_mode,
.dma_tx_mode = dwmac4_dma_tx_chan_op_mode,
.enable_dma_irq = dwmac410_enable_dma_irq, .enable_dma_irq = dwmac410_enable_dma_irq,
.disable_dma_irq = dwmac4_disable_dma_irq, .disable_dma_irq = dwmac4_disable_dma_irq,
.start_tx = dwmac4_dma_start_tx, .start_tx = dwmac4_dma_start_tx,
......
...@@ -185,17 +185,17 @@ ...@@ -185,17 +185,17 @@
int dwmac4_dma_reset(void __iomem *ioaddr); int dwmac4_dma_reset(void __iomem *ioaddr);
void dwmac4_enable_dma_transmission(void __iomem *ioaddr, u32 tail_ptr); void dwmac4_enable_dma_transmission(void __iomem *ioaddr, u32 tail_ptr);
void dwmac4_enable_dma_irq(void __iomem *ioaddr); void dwmac4_enable_dma_irq(void __iomem *ioaddr, u32 chan);
void dwmac410_enable_dma_irq(void __iomem *ioaddr); void dwmac410_enable_dma_irq(void __iomem *ioaddr, u32 chan);
void dwmac4_disable_dma_irq(void __iomem *ioaddr); void dwmac4_disable_dma_irq(void __iomem *ioaddr, u32 chan);
void dwmac4_dma_start_tx(void __iomem *ioaddr); void dwmac4_dma_start_tx(void __iomem *ioaddr, u32 chan);
void dwmac4_dma_stop_tx(void __iomem *ioaddr); void dwmac4_dma_stop_tx(void __iomem *ioaddr, u32 chan);
void dwmac4_dma_start_rx(void __iomem *ioaddr); void dwmac4_dma_start_rx(void __iomem *ioaddr, u32 chan);
void dwmac4_dma_stop_rx(void __iomem *ioaddr); void dwmac4_dma_stop_rx(void __iomem *ioaddr, u32 chan);
int dwmac4_dma_interrupt(void __iomem *ioaddr, int dwmac4_dma_interrupt(void __iomem *ioaddr,
struct stmmac_extra_stats *x); struct stmmac_extra_stats *x, u32 chan);
void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len); void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len, u32 chan);
void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len); void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len, u32 chan);
void dwmac4_set_rx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan); void dwmac4_set_rx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
void dwmac4_set_tx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan); void dwmac4_set_tx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
......
...@@ -37,96 +37,96 @@ int dwmac4_dma_reset(void __iomem *ioaddr) ...@@ -37,96 +37,96 @@ int dwmac4_dma_reset(void __iomem *ioaddr)
void dwmac4_set_rx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan) void dwmac4_set_rx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan)
{ {
writel(tail_ptr, ioaddr + DMA_CHAN_RX_END_ADDR(0)); writel(tail_ptr, ioaddr + DMA_CHAN_RX_END_ADDR(chan));
} }
void dwmac4_set_tx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan) void dwmac4_set_tx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan)
{ {
writel(tail_ptr, ioaddr + DMA_CHAN_TX_END_ADDR(0)); writel(tail_ptr, ioaddr + DMA_CHAN_TX_END_ADDR(chan));
} }
void dwmac4_dma_start_tx(void __iomem *ioaddr) void dwmac4_dma_start_tx(void __iomem *ioaddr, u32 chan)
{ {
u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0)); u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan));
value |= DMA_CONTROL_ST; value |= DMA_CONTROL_ST;
writel(value, ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0)); writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan));
value = readl(ioaddr + GMAC_CONFIG); value = readl(ioaddr + GMAC_CONFIG);
value |= GMAC_CONFIG_TE; value |= GMAC_CONFIG_TE;
writel(value, ioaddr + GMAC_CONFIG); writel(value, ioaddr + GMAC_CONFIG);
} }
void dwmac4_dma_stop_tx(void __iomem *ioaddr) void dwmac4_dma_stop_tx(void __iomem *ioaddr, u32 chan)
{ {
u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0)); u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan));
value &= ~DMA_CONTROL_ST; value &= ~DMA_CONTROL_ST;
writel(value, ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0)); writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan));
value = readl(ioaddr + GMAC_CONFIG); value = readl(ioaddr + GMAC_CONFIG);
value &= ~GMAC_CONFIG_TE; value &= ~GMAC_CONFIG_TE;
writel(value, ioaddr + GMAC_CONFIG); writel(value, ioaddr + GMAC_CONFIG);
} }
void dwmac4_dma_start_rx(void __iomem *ioaddr) void dwmac4_dma_start_rx(void __iomem *ioaddr, u32 chan)
{ {
u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0)); u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan));
value |= DMA_CONTROL_SR; value |= DMA_CONTROL_SR;
writel(value, ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0)); writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan));
value = readl(ioaddr + GMAC_CONFIG); value = readl(ioaddr + GMAC_CONFIG);
value |= GMAC_CONFIG_RE; value |= GMAC_CONFIG_RE;
writel(value, ioaddr + GMAC_CONFIG); writel(value, ioaddr + GMAC_CONFIG);
} }
void dwmac4_dma_stop_rx(void __iomem *ioaddr) void dwmac4_dma_stop_rx(void __iomem *ioaddr, u32 chan)
{ {
u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0)); u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan));
value &= ~DMA_CONTROL_SR; value &= ~DMA_CONTROL_SR;
writel(value, ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0)); writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan));
value = readl(ioaddr + GMAC_CONFIG); value = readl(ioaddr + GMAC_CONFIG);
value &= ~GMAC_CONFIG_RE; value &= ~GMAC_CONFIG_RE;
writel(value, ioaddr + GMAC_CONFIG); writel(value, ioaddr + GMAC_CONFIG);
} }
void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len) void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len, u32 chan)
{ {
writel(len, ioaddr + DMA_CHAN_TX_RING_LEN(STMMAC_CHAN0)); writel(len, ioaddr + DMA_CHAN_TX_RING_LEN(chan));
} }
void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len) void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len, u32 chan)
{ {
writel(len, ioaddr + DMA_CHAN_RX_RING_LEN(STMMAC_CHAN0)); writel(len, ioaddr + DMA_CHAN_RX_RING_LEN(chan));
} }
void dwmac4_enable_dma_irq(void __iomem *ioaddr) void dwmac4_enable_dma_irq(void __iomem *ioaddr, u32 chan)
{ {
writel(DMA_CHAN_INTR_DEFAULT_MASK, ioaddr + writel(DMA_CHAN_INTR_DEFAULT_MASK, ioaddr +
DMA_CHAN_INTR_ENA(STMMAC_CHAN0)); DMA_CHAN_INTR_ENA(chan));
} }
void dwmac410_enable_dma_irq(void __iomem *ioaddr) void dwmac410_enable_dma_irq(void __iomem *ioaddr, u32 chan)
{ {
writel(DMA_CHAN_INTR_DEFAULT_MASK_4_10, writel(DMA_CHAN_INTR_DEFAULT_MASK_4_10,
ioaddr + DMA_CHAN_INTR_ENA(STMMAC_CHAN0)); ioaddr + DMA_CHAN_INTR_ENA(chan));
} }
void dwmac4_disable_dma_irq(void __iomem *ioaddr) void dwmac4_disable_dma_irq(void __iomem *ioaddr, u32 chan)
{ {
writel(0, ioaddr + DMA_CHAN_INTR_ENA(STMMAC_CHAN0)); writel(0, ioaddr + DMA_CHAN_INTR_ENA(chan));
} }
int dwmac4_dma_interrupt(void __iomem *ioaddr, int dwmac4_dma_interrupt(void __iomem *ioaddr,
struct stmmac_extra_stats *x) struct stmmac_extra_stats *x, u32 chan)
{ {
int ret = 0; int ret = 0;
u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(0)); u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(chan));
/* ABNORMAL interrupts */ /* ABNORMAL interrupts */
if (unlikely(intr_status & DMA_CHAN_STATUS_AIS)) { if (unlikely(intr_status & DMA_CHAN_STATUS_AIS)) {
...@@ -153,7 +153,7 @@ int dwmac4_dma_interrupt(void __iomem *ioaddr, ...@@ -153,7 +153,7 @@ int dwmac4_dma_interrupt(void __iomem *ioaddr,
if (likely(intr_status & DMA_CHAN_STATUS_RI)) { if (likely(intr_status & DMA_CHAN_STATUS_RI)) {
u32 value; u32 value;
value = readl(ioaddr + DMA_CHAN_INTR_ENA(STMMAC_CHAN0)); value = readl(ioaddr + DMA_CHAN_INTR_ENA(chan));
/* to schedule NAPI on real RIE event. */ /* to schedule NAPI on real RIE event. */
if (likely(value & DMA_CHAN_INTR_ENA_RIE)) { if (likely(value & DMA_CHAN_INTR_ENA_RIE)) {
x->rx_normal_irq_n++; x->rx_normal_irq_n++;
...@@ -172,7 +172,7 @@ int dwmac4_dma_interrupt(void __iomem *ioaddr, ...@@ -172,7 +172,7 @@ int dwmac4_dma_interrupt(void __iomem *ioaddr,
* status [21-0] expect reserved bits [5-3] * status [21-0] expect reserved bits [5-3]
*/ */
writel((intr_status & 0x3fffc7), writel((intr_status & 0x3fffc7),
ioaddr + DMA_CHAN_STATUS(STMMAC_CHAN0)); ioaddr + DMA_CHAN_STATUS(chan));
return ret; return ret;
} }
......
...@@ -137,13 +137,14 @@ ...@@ -137,13 +137,14 @@
#define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */ #define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */
void dwmac_enable_dma_transmission(void __iomem *ioaddr); void dwmac_enable_dma_transmission(void __iomem *ioaddr);
void dwmac_enable_dma_irq(void __iomem *ioaddr); void dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan);
void dwmac_disable_dma_irq(void __iomem *ioaddr); void dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan);
void dwmac_dma_start_tx(void __iomem *ioaddr); void dwmac_dma_start_tx(void __iomem *ioaddr, u32 chan);
void dwmac_dma_stop_tx(void __iomem *ioaddr); void dwmac_dma_stop_tx(void __iomem *ioaddr, u32 chan);
void dwmac_dma_start_rx(void __iomem *ioaddr); void dwmac_dma_start_rx(void __iomem *ioaddr, u32 chan);
void dwmac_dma_stop_rx(void __iomem *ioaddr); void dwmac_dma_stop_rx(void __iomem *ioaddr, u32 chan);
int dwmac_dma_interrupt(void __iomem *ioaddr, struct stmmac_extra_stats *x); int dwmac_dma_interrupt(void __iomem *ioaddr, struct stmmac_extra_stats *x,
u32 chan);
int dwmac_dma_reset(void __iomem *ioaddr); int dwmac_dma_reset(void __iomem *ioaddr);
#endif /* __DWMAC_DMA_H__ */ #endif /* __DWMAC_DMA_H__ */
...@@ -47,38 +47,38 @@ void dwmac_enable_dma_transmission(void __iomem *ioaddr) ...@@ -47,38 +47,38 @@ void dwmac_enable_dma_transmission(void __iomem *ioaddr)
writel(1, ioaddr + DMA_XMT_POLL_DEMAND); writel(1, ioaddr + DMA_XMT_POLL_DEMAND);
} }
void dwmac_enable_dma_irq(void __iomem *ioaddr) void dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan)
{ {
writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA); writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
} }
void dwmac_disable_dma_irq(void __iomem *ioaddr) void dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan)
{ {
writel(0, ioaddr + DMA_INTR_ENA); writel(0, ioaddr + DMA_INTR_ENA);
} }
void dwmac_dma_start_tx(void __iomem *ioaddr) void dwmac_dma_start_tx(void __iomem *ioaddr, u32 chan)
{ {
u32 value = readl(ioaddr + DMA_CONTROL); u32 value = readl(ioaddr + DMA_CONTROL);
value |= DMA_CONTROL_ST; value |= DMA_CONTROL_ST;
writel(value, ioaddr + DMA_CONTROL); writel(value, ioaddr + DMA_CONTROL);
} }
void dwmac_dma_stop_tx(void __iomem *ioaddr) void dwmac_dma_stop_tx(void __iomem *ioaddr, u32 chan)
{ {
u32 value = readl(ioaddr + DMA_CONTROL); u32 value = readl(ioaddr + DMA_CONTROL);
value &= ~DMA_CONTROL_ST; value &= ~DMA_CONTROL_ST;
writel(value, ioaddr + DMA_CONTROL); writel(value, ioaddr + DMA_CONTROL);
} }
void dwmac_dma_start_rx(void __iomem *ioaddr) void dwmac_dma_start_rx(void __iomem *ioaddr, u32 chan)
{ {
u32 value = readl(ioaddr + DMA_CONTROL); u32 value = readl(ioaddr + DMA_CONTROL);
value |= DMA_CONTROL_SR; value |= DMA_CONTROL_SR;
writel(value, ioaddr + DMA_CONTROL); writel(value, ioaddr + DMA_CONTROL);
} }
void dwmac_dma_stop_rx(void __iomem *ioaddr) void dwmac_dma_stop_rx(void __iomem *ioaddr, u32 chan)
{ {
u32 value = readl(ioaddr + DMA_CONTROL); u32 value = readl(ioaddr + DMA_CONTROL);
value &= ~DMA_CONTROL_SR; value &= ~DMA_CONTROL_SR;
...@@ -156,7 +156,7 @@ static void show_rx_process_state(unsigned int status) ...@@ -156,7 +156,7 @@ static void show_rx_process_state(unsigned int status)
#endif #endif
int dwmac_dma_interrupt(void __iomem *ioaddr, int dwmac_dma_interrupt(void __iomem *ioaddr,
struct stmmac_extra_stats *x) struct stmmac_extra_stats *x, u32 chan)
{ {
int ret = 0; int ret = 0;
/* read the status register (CSR5) */ /* read the status register (CSR5) */
......
...@@ -730,6 +730,7 @@ static int stmmac_set_coalesce(struct net_device *dev, ...@@ -730,6 +730,7 @@ static int stmmac_set_coalesce(struct net_device *dev,
struct ethtool_coalesce *ec) struct ethtool_coalesce *ec)
{ {
struct stmmac_priv *priv = netdev_priv(dev); struct stmmac_priv *priv = netdev_priv(dev);
u32 rx_cnt = priv->plat->rx_queues_to_use;
unsigned int rx_riwt; unsigned int rx_riwt;
/* Check not supported parameters */ /* Check not supported parameters */
...@@ -768,7 +769,7 @@ static int stmmac_set_coalesce(struct net_device *dev, ...@@ -768,7 +769,7 @@ static int stmmac_set_coalesce(struct net_device *dev,
priv->tx_coal_frames = ec->tx_max_coalesced_frames; priv->tx_coal_frames = ec->tx_max_coalesced_frames;
priv->tx_coal_timer = ec->tx_coalesce_usecs; priv->tx_coal_timer = ec->tx_coalesce_usecs;
priv->rx_riwt = rx_riwt; priv->rx_riwt = rx_riwt;
priv->hw->dma->rx_watchdog(priv->ioaddr, priv->rx_riwt); priv->hw->dma->rx_watchdog(priv->ioaddr, priv->rx_riwt, rx_cnt);
return 0; return 0;
} }
......
...@@ -1277,6 +1277,96 @@ static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv) ...@@ -1277,6 +1277,96 @@ static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
} }
} }
/**
* stmmac_start_rx_dma - start RX DMA channel
* @priv: driver private structure
* @chan: RX channel index
* Description:
* This starts a RX DMA channel
*/
static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
{
netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
priv->hw->dma->start_rx(priv->ioaddr, chan);
}
/**
* stmmac_start_tx_dma - start TX DMA channel
* @priv: driver private structure
* @chan: TX channel index
* Description:
* This starts a TX DMA channel
*/
static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
{
netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
priv->hw->dma->start_tx(priv->ioaddr, chan);
}
/**
* stmmac_stop_rx_dma - stop RX DMA channel
* @priv: driver private structure
* @chan: RX channel index
* Description:
* This stops a RX DMA channel
*/
static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
{
netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
priv->hw->dma->stop_rx(priv->ioaddr, chan);
}
/**
* stmmac_stop_tx_dma - stop TX DMA channel
* @priv: driver private structure
* @chan: TX channel index
* Description:
* This stops a TX DMA channel
*/
static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
{
netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
priv->hw->dma->stop_tx(priv->ioaddr, chan);
}
/**
* stmmac_start_all_dma - start all RX and TX DMA channels
* @priv: driver private structure
* Description:
* This starts all the RX and TX DMA channels
*/
static void stmmac_start_all_dma(struct stmmac_priv *priv)
{
u32 rx_channels_count = priv->plat->rx_queues_to_use;
u32 tx_channels_count = priv->plat->tx_queues_to_use;
u32 chan = 0;
for (chan = 0; chan < rx_channels_count; chan++)
stmmac_start_rx_dma(priv, chan);
for (chan = 0; chan < tx_channels_count; chan++)
stmmac_start_tx_dma(priv, chan);
}
/**
* stmmac_stop_all_dma - stop all RX and TX DMA channels
* @priv: driver private structure
* Description:
* This stops the RX and TX DMA channels
*/
static void stmmac_stop_all_dma(struct stmmac_priv *priv)
{
u32 rx_channels_count = priv->plat->rx_queues_to_use;
u32 tx_channels_count = priv->plat->tx_queues_to_use;
u32 chan = 0;
for (chan = 0; chan < rx_channels_count; chan++)
stmmac_stop_rx_dma(priv, chan);
for (chan = 0; chan < tx_channels_count; chan++)
stmmac_stop_tx_dma(priv, chan);
}
/** /**
* stmmac_dma_operation_mode - HW DMA operation mode * stmmac_dma_operation_mode - HW DMA operation mode
* @priv: driver private structure * @priv: driver private structure
...@@ -1285,14 +1375,20 @@ static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv) ...@@ -1285,14 +1375,20 @@ static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
*/ */
static void stmmac_dma_operation_mode(struct stmmac_priv *priv) static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
{ {
u32 rx_channels_count = priv->plat->rx_queues_to_use;
u32 tx_channels_count = priv->plat->tx_queues_to_use;
int rxfifosz = priv->plat->rx_fifo_size; int rxfifosz = priv->plat->rx_fifo_size;
u32 txmode = 0;
u32 rxmode = 0;
u32 chan = 0;
if (rxfifosz == 0) if (rxfifosz == 0)
rxfifosz = priv->dma_cap.rx_fifo_size; rxfifosz = priv->dma_cap.rx_fifo_size;
if (priv->plat->force_thresh_dma_mode) if (priv->plat->force_thresh_dma_mode) {
priv->hw->dma->dma_mode(priv->ioaddr, tc, tc, rxfifosz); txmode = tc;
else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) { rxmode = tc;
} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
/* /*
* In case of GMAC, SF mode can be enabled * In case of GMAC, SF mode can be enabled
* to perform the TX COE in HW. This depends on: * to perform the TX COE in HW. This depends on:
...@@ -1300,12 +1396,26 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv) ...@@ -1300,12 +1396,26 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
* 2) There is no bugged Jumbo frame support * 2) There is no bugged Jumbo frame support
* that needs to not insert csum in the TDES. * that needs to not insert csum in the TDES.
*/ */
priv->hw->dma->dma_mode(priv->ioaddr, SF_DMA_MODE, SF_DMA_MODE, txmode = SF_DMA_MODE;
rxfifosz); rxmode = SF_DMA_MODE;
priv->xstats.threshold = SF_DMA_MODE; priv->xstats.threshold = SF_DMA_MODE;
} else } else {
priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE, txmode = tc;
rxmode = SF_DMA_MODE;
}
/* configure all channels */
if (priv->synopsys_id >= DWMAC_CORE_4_00) {
for (chan = 0; chan < rx_channels_count; chan++)
priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
rxfifosz); rxfifosz);
for (chan = 0; chan < tx_channels_count; chan++)
priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan);
} else {
priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
rxfifosz);
}
} }
/** /**
...@@ -1402,28 +1512,29 @@ static void stmmac_tx_clean(struct stmmac_priv *priv) ...@@ -1402,28 +1512,29 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
netif_tx_unlock(priv->dev); netif_tx_unlock(priv->dev);
} }
static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv) static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv, u32 chan)
{ {
priv->hw->dma->enable_dma_irq(priv->ioaddr); priv->hw->dma->enable_dma_irq(priv->ioaddr, chan);
} }
static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv) static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv, u32 chan)
{ {
priv->hw->dma->disable_dma_irq(priv->ioaddr); priv->hw->dma->disable_dma_irq(priv->ioaddr, chan);
} }
/** /**
* stmmac_tx_err - to manage the tx error * stmmac_tx_err - to manage the tx error
* @priv: driver private structure * @priv: driver private structure
* @chan: channel index
* Description: it cleans the descriptors and restarts the transmission * Description: it cleans the descriptors and restarts the transmission
* in case of transmission errors. * in case of transmission errors.
*/ */
static void stmmac_tx_err(struct stmmac_priv *priv) static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
{ {
int i; int i;
netif_stop_queue(priv->dev); netif_stop_queue(priv->dev);
priv->hw->dma->stop_tx(priv->ioaddr); stmmac_stop_tx_dma(priv, chan);
dma_free_tx_skbufs(priv); dma_free_tx_skbufs(priv);
for (i = 0; i < DMA_TX_SIZE; i++) for (i = 0; i < DMA_TX_SIZE; i++)
if (priv->extend_desc) if (priv->extend_desc)
...@@ -1437,12 +1548,40 @@ static void stmmac_tx_err(struct stmmac_priv *priv) ...@@ -1437,12 +1548,40 @@ static void stmmac_tx_err(struct stmmac_priv *priv)
priv->dirty_tx = 0; priv->dirty_tx = 0;
priv->cur_tx = 0; priv->cur_tx = 0;
netdev_reset_queue(priv->dev); netdev_reset_queue(priv->dev);
priv->hw->dma->start_tx(priv->ioaddr); stmmac_start_tx_dma(priv, chan);
priv->dev->stats.tx_errors++; priv->dev->stats.tx_errors++;
netif_wake_queue(priv->dev); netif_wake_queue(priv->dev);
} }
/**
* stmmac_set_dma_operation_mode - Set DMA operation mode by channel
* @priv: driver private structure
* @txmode: TX operating mode
* @rxmode: RX operating mode
* @chan: channel index
* Description: it is used for configuring of the DMA operation mode in
* runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
* mode.
*/
static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
u32 rxmode, u32 chan)
{
int rxfifosz = priv->plat->rx_fifo_size;
if (rxfifosz == 0)
rxfifosz = priv->dma_cap.rx_fifo_size;
if (priv->synopsys_id >= DWMAC_CORE_4_00) {
priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
rxfifosz);
priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan);
} else {
priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
rxfifosz);
}
}
/** /**
* stmmac_dma_interrupt - DMA ISR * stmmac_dma_interrupt - DMA ISR
* @priv: driver private structure * @priv: driver private structure
...@@ -1452,34 +1591,41 @@ static void stmmac_tx_err(struct stmmac_priv *priv) ...@@ -1452,34 +1591,41 @@ static void stmmac_tx_err(struct stmmac_priv *priv)
*/ */
static void stmmac_dma_interrupt(struct stmmac_priv *priv) static void stmmac_dma_interrupt(struct stmmac_priv *priv)
{ {
u32 tx_channel_count = priv->plat->tx_queues_to_use;
int status; int status;
int rxfifosz = priv->plat->rx_fifo_size; u32 chan;
if (rxfifosz == 0)
rxfifosz = priv->dma_cap.rx_fifo_size;
status = priv->hw->dma->dma_interrupt(priv->ioaddr, &priv->xstats); for (chan = 0; chan < tx_channel_count; chan++) {
status = priv->hw->dma->dma_interrupt(priv->ioaddr,
&priv->xstats, chan);
if (likely((status & handle_rx)) || (status & handle_tx)) { if (likely((status & handle_rx)) || (status & handle_tx)) {
if (likely(napi_schedule_prep(&priv->napi))) { if (likely(napi_schedule_prep(&priv->napi))) {
stmmac_disable_dma_irq(priv); stmmac_disable_dma_irq(priv, chan);
__napi_schedule(&priv->napi); __napi_schedule(&priv->napi);
} }
} }
if (unlikely(status & tx_hard_error_bump_tc)) { if (unlikely(status & tx_hard_error_bump_tc)) {
/* Try to bump up the dma threshold on this failure */ /* Try to bump up the dma threshold on this failure */
if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
(tc <= 256)) { (tc <= 256)) {
tc += 64; tc += 64;
if (priv->plat->force_thresh_dma_mode) if (priv->plat->force_thresh_dma_mode)
priv->hw->dma->dma_mode(priv->ioaddr, tc, tc, stmmac_set_dma_operation_mode(priv,
rxfifosz); tc,
tc,
chan);
else else
priv->hw->dma->dma_mode(priv->ioaddr, tc, stmmac_set_dma_operation_mode(priv,
SF_DMA_MODE, rxfifosz); tc,
SF_DMA_MODE,
chan);
priv->xstats.threshold = tc; priv->xstats.threshold = tc;
} }
} else if (unlikely(status == tx_hard_error)) } else if (unlikely(status == tx_hard_error)) {
stmmac_tx_err(priv); stmmac_tx_err(priv, chan);
}
}
} }
/** /**
...@@ -1586,6 +1732,11 @@ static void stmmac_check_ether_addr(struct stmmac_priv *priv) ...@@ -1586,6 +1732,11 @@ static void stmmac_check_ether_addr(struct stmmac_priv *priv)
*/ */
static int stmmac_init_dma_engine(struct stmmac_priv *priv) static int stmmac_init_dma_engine(struct stmmac_priv *priv)
{ {
u32 rx_channels_count = priv->plat->rx_queues_to_use;
u32 tx_channels_count = priv->plat->tx_queues_to_use;
u32 dummy_dma_rx_phy = 0;
u32 dummy_dma_tx_phy = 0;
u32 chan = 0;
int atds = 0; int atds = 0;
int ret = 0; int ret = 0;
...@@ -1603,19 +1754,43 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv) ...@@ -1603,19 +1754,43 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
return ret; return ret;
} }
if (priv->synopsys_id >= DWMAC_CORE_4_00) {
/* DMA Configuration */
priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg, priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
priv->dma_tx_phy, priv->dma_rx_phy, atds); dummy_dma_tx_phy, dummy_dma_rx_phy, atds);
/* DMA RX Channel Configuration */
for (chan = 0; chan < rx_channels_count; chan++) {
priv->hw->dma->init_rx_chan(priv->ioaddr,
priv->plat->dma_cfg,
priv->dma_rx_phy, chan);
if (priv->synopsys_id >= DWMAC_CORE_4_00) {
priv->rx_tail_addr = priv->dma_rx_phy + priv->rx_tail_addr = priv->dma_rx_phy +
(DMA_RX_SIZE * sizeof(struct dma_desc)); (DMA_RX_SIZE * sizeof(struct dma_desc));
priv->hw->dma->set_rx_tail_ptr(priv->ioaddr, priv->rx_tail_addr, priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
STMMAC_CHAN0); priv->rx_tail_addr,
chan);
}
/* DMA TX Channel Configuration */
for (chan = 0; chan < tx_channels_count; chan++) {
priv->hw->dma->init_chan(priv->ioaddr,
priv->plat->dma_cfg,
chan);
priv->hw->dma->init_tx_chan(priv->ioaddr,
priv->plat->dma_cfg,
priv->dma_tx_phy, chan);
priv->tx_tail_addr = priv->dma_tx_phy + priv->tx_tail_addr = priv->dma_tx_phy +
(DMA_TX_SIZE * sizeof(struct dma_desc)); (DMA_TX_SIZE * sizeof(struct dma_desc));
priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr, priv->hw->dma->set_tx_tail_ptr(priv->ioaddr,
STMMAC_CHAN0); priv->tx_tail_addr,
chan);
}
} else {
priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
priv->dma_tx_phy, priv->dma_rx_phy, atds);
} }
if (priv->plat->axi && priv->hw->dma->axi) if (priv->plat->axi && priv->hw->dma->axi)
...@@ -1656,6 +1831,27 @@ static void stmmac_init_tx_coalesce(struct stmmac_priv *priv) ...@@ -1656,6 +1831,27 @@ static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
add_timer(&priv->txtimer); add_timer(&priv->txtimer);
} }
static void stmmac_set_rings_length(struct stmmac_priv *priv)
{
u32 rx_channels_count = priv->plat->rx_queues_to_use;
u32 tx_channels_count = priv->plat->tx_queues_to_use;
u32 chan;
/* set TX ring length */
if (priv->hw->dma->set_tx_ring_len) {
for (chan = 0; chan < tx_channels_count; chan++)
priv->hw->dma->set_tx_ring_len(priv->ioaddr,
(DMA_TX_SIZE - 1), chan);
}
/* set RX ring length */
if (priv->hw->dma->set_rx_ring_len) {
for (chan = 0; chan < rx_channels_count; chan++)
priv->hw->dma->set_rx_ring_len(priv->ioaddr,
(DMA_RX_SIZE - 1), chan);
}
}
/** /**
* stmmac_set_tx_queue_weight - Set TX queue weight * stmmac_set_tx_queue_weight - Set TX queue weight
* @priv: driver private structure * @priv: driver private structure
...@@ -1749,6 +1945,9 @@ static void stmmac_mtl_configuration(struct stmmac_priv *priv) ...@@ -1749,6 +1945,9 @@ static void stmmac_mtl_configuration(struct stmmac_priv *priv)
/* Enable MAC RX Queues */ /* Enable MAC RX Queues */
if (rx_queues_count > 1 && priv->hw->mac->rx_queue_enable) if (rx_queues_count > 1 && priv->hw->mac->rx_queue_enable)
stmmac_mac_enable_rx_queues(priv); stmmac_mac_enable_rx_queues(priv);
/* Set the HW DMA mode and the COE */
stmmac_dma_operation_mode(priv);
} }
/** /**
...@@ -1766,6 +1965,9 @@ static void stmmac_mtl_configuration(struct stmmac_priv *priv) ...@@ -1766,6 +1965,9 @@ static void stmmac_mtl_configuration(struct stmmac_priv *priv)
static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
{ {
struct stmmac_priv *priv = netdev_priv(dev); struct stmmac_priv *priv = netdev_priv(dev);
u32 rx_cnt = priv->plat->rx_queues_to_use;
u32 tx_cnt = priv->plat->tx_queues_to_use;
u32 chan;
int ret; int ret;
/* DMA initialization and SW reset */ /* DMA initialization and SW reset */
...@@ -1812,9 +2014,6 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) ...@@ -1812,9 +2014,6 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
else else
stmmac_set_mac(priv->ioaddr, true); stmmac_set_mac(priv->ioaddr, true);
/* Set the HW DMA mode and the COE */
stmmac_dma_operation_mode(priv);
stmmac_mmc_setup(priv); stmmac_mmc_setup(priv);
if (init_ptp) { if (init_ptp) {
...@@ -1836,31 +2035,26 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) ...@@ -1836,31 +2035,26 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
__func__); __func__);
#endif #endif
/* Start the ball rolling... */ /* Start the ball rolling... */
netdev_dbg(priv->dev, "DMA RX/TX processes started...\n"); stmmac_start_all_dma(priv);
priv->hw->dma->start_tx(priv->ioaddr);
priv->hw->dma->start_rx(priv->ioaddr);
priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS; priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) { if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
priv->rx_riwt = MAX_DMA_RIWT; priv->rx_riwt = MAX_DMA_RIWT;
priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT); priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
} }
if (priv->hw->pcs && priv->hw->mac->pcs_ctrl_ane) if (priv->hw->pcs && priv->hw->mac->pcs_ctrl_ane)
priv->hw->mac->pcs_ctrl_ane(priv->hw, 1, priv->hw->ps, 0); priv->hw->mac->pcs_ctrl_ane(priv->hw, 1, priv->hw->ps, 0);
/* set TX ring length */ /* set TX and RX rings length */
if (priv->hw->dma->set_tx_ring_len) stmmac_set_rings_length(priv);
priv->hw->dma->set_tx_ring_len(priv->ioaddr,
(DMA_TX_SIZE - 1));
/* set RX ring length */
if (priv->hw->dma->set_rx_ring_len)
priv->hw->dma->set_rx_ring_len(priv->ioaddr,
(DMA_RX_SIZE - 1));
/* Enable TSO */ /* Enable TSO */
if (priv->tso) if (priv->tso) {
priv->hw->dma->enable_tso(priv->ioaddr, 1, STMMAC_CHAN0); for (chan = 0; chan < tx_cnt; chan++)
priv->hw->dma->enable_tso(priv->ioaddr, 1, chan);
}
return 0; return 0;
} }
...@@ -2024,8 +2218,7 @@ static int stmmac_release(struct net_device *dev) ...@@ -2024,8 +2218,7 @@ static int stmmac_release(struct net_device *dev)
free_irq(priv->lpi_irq, dev); free_irq(priv->lpi_irq, dev);
/* Stop TX/RX DMA and clear the descriptors */ /* Stop TX/RX DMA and clear the descriptors */
priv->hw->dma->stop_tx(priv->ioaddr); stmmac_stop_all_dma(priv);
priv->hw->dma->stop_rx(priv->ioaddr);
/* Release and free the Rx/Tx resources */ /* Release and free the Rx/Tx resources */
free_dma_desc_resources(priv); free_dma_desc_resources(priv);
...@@ -2786,6 +2979,7 @@ static int stmmac_poll(struct napi_struct *napi, int budget) ...@@ -2786,6 +2979,7 @@ static int stmmac_poll(struct napi_struct *napi, int budget)
{ {
struct stmmac_priv *priv = container_of(napi, struct stmmac_priv, napi); struct stmmac_priv *priv = container_of(napi, struct stmmac_priv, napi);
int work_done = 0; int work_done = 0;
u32 chan = STMMAC_CHAN0;
priv->xstats.napi_poll++; priv->xstats.napi_poll++;
stmmac_tx_clean(priv); stmmac_tx_clean(priv);
...@@ -2793,7 +2987,7 @@ static int stmmac_poll(struct napi_struct *napi, int budget) ...@@ -2793,7 +2987,7 @@ static int stmmac_poll(struct napi_struct *napi, int budget)
work_done = stmmac_rx(priv, budget); work_done = stmmac_rx(priv, budget);
if (work_done < budget) { if (work_done < budget) {
napi_complete_done(napi, work_done); napi_complete_done(napi, work_done);
stmmac_enable_dma_irq(priv); stmmac_enable_dma_irq(priv, chan);
} }
return work_done; return work_done;
} }
...@@ -2809,9 +3003,10 @@ static int stmmac_poll(struct napi_struct *napi, int budget) ...@@ -2809,9 +3003,10 @@ static int stmmac_poll(struct napi_struct *napi, int budget)
static void stmmac_tx_timeout(struct net_device *dev) static void stmmac_tx_timeout(struct net_device *dev)
{ {
struct stmmac_priv *priv = netdev_priv(dev); struct stmmac_priv *priv = netdev_priv(dev);
u32 chan = STMMAC_CHAN0;
/* Clear Tx resources and restart transmitting again */ /* Clear Tx resources and restart transmitting again */
stmmac_tx_err(priv); stmmac_tx_err(priv, chan);
} }
/** /**
...@@ -2920,6 +3115,12 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id) ...@@ -2920,6 +3115,12 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
{ {
struct net_device *dev = (struct net_device *)dev_id; struct net_device *dev = (struct net_device *)dev_id;
struct stmmac_priv *priv = netdev_priv(dev); struct stmmac_priv *priv = netdev_priv(dev);
u32 rx_cnt = priv->plat->rx_queues_to_use;
u32 tx_cnt = priv->plat->tx_queues_to_use;
u32 queues_count;
u32 queue;
queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
if (priv->irq_wake) if (priv->irq_wake)
pm_wakeup_event(priv->device, 0); pm_wakeup_event(priv->device, 0);
...@@ -2934,20 +3135,26 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id) ...@@ -2934,20 +3135,26 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
int status = priv->hw->mac->host_irq_status(priv->hw, int status = priv->hw->mac->host_irq_status(priv->hw,
&priv->xstats); &priv->xstats);
if (priv->synopsys_id >= DWMAC_CORE_4_00)
status |= priv->hw->mac->host_mtl_irq_status(priv->hw,
STMMAC_CHAN0);
if (unlikely(status)) { if (unlikely(status)) {
/* For LPI we need to save the tx status */ /* For LPI we need to save the tx status */
if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE) if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
priv->tx_path_in_lpi_mode = true; priv->tx_path_in_lpi_mode = true;
if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE) if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
priv->tx_path_in_lpi_mode = false; priv->tx_path_in_lpi_mode = false;
if (status & CORE_IRQ_MTL_RX_OVERFLOW && priv->hw->dma->set_rx_tail_ptr) }
if (priv->synopsys_id >= DWMAC_CORE_4_00) {
for (queue = 0; queue < queues_count; queue++) {
status |=
priv->hw->mac->host_mtl_irq_status(priv->hw,
queue);
if (status & CORE_IRQ_MTL_RX_OVERFLOW &&
priv->hw->dma->set_rx_tail_ptr)
priv->hw->dma->set_rx_tail_ptr(priv->ioaddr, priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
priv->rx_tail_addr, priv->rx_tail_addr,
STMMAC_CHAN0); queue);
}
} }
/* PCS link status */ /* PCS link status */
...@@ -3499,8 +3706,7 @@ int stmmac_dvr_remove(struct device *dev) ...@@ -3499,8 +3706,7 @@ int stmmac_dvr_remove(struct device *dev)
netdev_info(priv->dev, "%s: removing driver", __func__); netdev_info(priv->dev, "%s: removing driver", __func__);
priv->hw->dma->stop_rx(priv->ioaddr); stmmac_stop_all_dma(priv);
priv->hw->dma->stop_tx(priv->ioaddr);
stmmac_set_mac(priv->ioaddr, false); stmmac_set_mac(priv->ioaddr, false);
netif_carrier_off(ndev); netif_carrier_off(ndev);
...@@ -3546,8 +3752,7 @@ int stmmac_suspend(struct device *dev) ...@@ -3546,8 +3752,7 @@ int stmmac_suspend(struct device *dev)
napi_disable(&priv->napi); napi_disable(&priv->napi);
/* Stop TX/RX DMA */ /* Stop TX/RX DMA */
priv->hw->dma->stop_tx(priv->ioaddr); stmmac_stop_all_dma(priv);
priv->hw->dma->stop_rx(priv->ioaddr);
/* Enable Power down mode by programming the PMT regs */ /* Enable Power down mode by programming the PMT regs */
if (device_may_wakeup(priv->device)) { if (device_may_wakeup(priv->device)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment