Commit ad72f783 authored by Yanteng Si's avatar Yanteng Si Committed by Paolo Abeni

net: stmmac: Add multi-channel support

DW GMAC v3.73 can be equipped with the Audio Video (AV) feature which
enables transmission of time-sensitive traffic over bridged local area
networks (DWC Ethernet QoS Product). In that case there can be up to two
additional DMA-channels available with no Tx COE support (unless there is
vendor-specific IP-core alterations). Each channel is implemented as a
separate Control and Status register (CSR) for managing the transmit and
receive functions, descriptor handling, and interrupt handling.

Add the multi-channels DW GMAC controllers support just by making sure the
already implemented DMA-configs are performed on the per-channel basis.

Note the only currently known instance of the multi-channel DW GMAC
IP-core is the LS2K2000 GNET controller, which has been released with the
vendor-specific feature extension of having eight DMA-channels. The device
support will be added in one of the following up commits.
Signed-off-by: default avatarFeiyang Chen <chenfeiyang@loongson.cn>
Signed-off-by: default avatarYinggang Gu <guyinggang@loongson.cn>
Reviewed-by: default avatarSerge Semin <fancer.lancer@gmail.com>
Acked-by: default avatarHuacai Chen <chenhuacai@loongson.cn>
Signed-off-by: default avatarYanteng Si <siyanteng@loongson.cn>
Tested-by: default avatarSerge Semin <fancer.lancer@gmail.com>
Signed-off-by: default avatarPaolo Abeni <pabeni@redhat.com>
parent 12dbc67c
......@@ -395,7 +395,7 @@ static void sun8i_dwmac_dma_start_tx(struct stmmac_priv *priv,
writel(v, ioaddr + EMAC_TX_CTL1);
}
static void sun8i_dwmac_enable_dma_transmission(void __iomem *ioaddr)
static void sun8i_dwmac_enable_dma_transmission(void __iomem *ioaddr, u32 chan)
{
u32 v;
......
......@@ -70,15 +70,17 @@ static void dwmac1000_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
writel(value, ioaddr + DMA_AXI_BUS_MODE);
}
static void dwmac1000_dma_init(void __iomem *ioaddr,
struct stmmac_dma_cfg *dma_cfg)
static void dwmac1000_dma_init_channel(struct stmmac_priv *priv,
void __iomem *ioaddr,
struct stmmac_dma_cfg *dma_cfg, u32 chan)
{
u32 value = readl(ioaddr + DMA_BUS_MODE);
int txpbl = dma_cfg->txpbl ?: dma_cfg->pbl;
int rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl;
u32 value;
/*
* Set the DMA PBL (Programmable Burst Length) mode.
value = readl(ioaddr + DMA_CHAN_BUS_MODE(chan));
/* Set the DMA PBL (Programmable Burst Length) mode.
*
* Note: before stmmac core 3.50 this mode bit was 4xPBL, and
* post 3.5 mode bit acts as 8*PBL.
......@@ -104,10 +106,10 @@ static void dwmac1000_dma_init(void __iomem *ioaddr,
if (dma_cfg->aal)
value |= DMA_BUS_MODE_AAL;
writel(value, ioaddr + DMA_BUS_MODE);
writel(value, ioaddr + DMA_CHAN_BUS_MODE(chan));
/* Mask interrupts by writing to CSR7 */
writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_CHAN_INTR_ENA(chan));
}
static void dwmac1000_dma_init_rx(struct stmmac_priv *priv,
......@@ -116,7 +118,7 @@ static void dwmac1000_dma_init_rx(struct stmmac_priv *priv,
dma_addr_t dma_rx_phy, u32 chan)
{
/* RX descriptor base address list must be written into DMA CSR3 */
writel(lower_32_bits(dma_rx_phy), ioaddr + DMA_RCV_BASE_ADDR);
writel(lower_32_bits(dma_rx_phy), ioaddr + DMA_CHAN_RCV_BASE_ADDR(chan));
}
static void dwmac1000_dma_init_tx(struct stmmac_priv *priv,
......@@ -125,7 +127,7 @@ static void dwmac1000_dma_init_tx(struct stmmac_priv *priv,
dma_addr_t dma_tx_phy, u32 chan)
{
/* TX descriptor base address list must be written into DMA CSR4 */
writel(lower_32_bits(dma_tx_phy), ioaddr + DMA_TX_BASE_ADDR);
writel(lower_32_bits(dma_tx_phy), ioaddr + DMA_CHAN_TX_BASE_ADDR(chan));
}
static u32 dwmac1000_configure_fc(u32 csr6, int rxfifosz)
......@@ -153,7 +155,7 @@ static void dwmac1000_dma_operation_mode_rx(struct stmmac_priv *priv,
void __iomem *ioaddr, int mode,
u32 channel, int fifosz, u8 qmode)
{
u32 csr6 = readl(ioaddr + DMA_CONTROL);
u32 csr6 = readl(ioaddr + DMA_CHAN_CONTROL(channel));
if (mode == SF_DMA_MODE) {
pr_debug("GMAC: enable RX store and forward mode\n");
......@@ -175,14 +177,14 @@ static void dwmac1000_dma_operation_mode_rx(struct stmmac_priv *priv,
/* Configure flow control based on rx fifo size */
csr6 = dwmac1000_configure_fc(csr6, fifosz);
writel(csr6, ioaddr + DMA_CONTROL);
writel(csr6, ioaddr + DMA_CHAN_CONTROL(channel));
}
static void dwmac1000_dma_operation_mode_tx(struct stmmac_priv *priv,
void __iomem *ioaddr, int mode,
u32 channel, int fifosz, u8 qmode)
{
u32 csr6 = readl(ioaddr + DMA_CONTROL);
u32 csr6 = readl(ioaddr + DMA_CHAN_CONTROL(channel));
if (mode == SF_DMA_MODE) {
pr_debug("GMAC: enable TX store and forward mode\n");
......@@ -209,7 +211,7 @@ static void dwmac1000_dma_operation_mode_tx(struct stmmac_priv *priv,
csr6 |= DMA_CONTROL_TTC_256;
}
writel(csr6, ioaddr + DMA_CONTROL);
writel(csr6, ioaddr + DMA_CHAN_CONTROL(channel));
}
static void dwmac1000_dump_dma_regs(struct stmmac_priv *priv,
......@@ -271,12 +273,12 @@ static int dwmac1000_get_hw_feature(void __iomem *ioaddr,
static void dwmac1000_rx_watchdog(struct stmmac_priv *priv,
void __iomem *ioaddr, u32 riwt, u32 queue)
{
writel(riwt, ioaddr + DMA_RX_WATCHDOG);
writel(riwt, ioaddr + DMA_CHAN_RX_WATCHDOG(queue));
}
const struct stmmac_dma_ops dwmac1000_dma_ops = {
.reset = dwmac_dma_reset,
.init = dwmac1000_dma_init,
.init_chan = dwmac1000_dma_init_channel,
.init_rx_chan = dwmac1000_dma_init_rx,
.init_tx_chan = dwmac1000_dma_init_tx,
.axi = dwmac1000_dma_axi,
......
......@@ -22,6 +22,31 @@
#define DMA_INTR_ENA 0x0000101c /* Interrupt Enable */
#define DMA_MISSED_FRAME_CTR 0x00001020 /* Missed Frame Counter */
/* Following DMA defines are channels oriented */
#define DMA_CHAN_BASE_OFFSET 0x100
static inline u32 dma_chan_base_addr(u32 base, u32 chan)
{
return base + chan * DMA_CHAN_BASE_OFFSET;
}
#define DMA_CHAN_BUS_MODE(chan) dma_chan_base_addr(DMA_BUS_MODE, chan)
#define DMA_CHAN_XMT_POLL_DEMAND(chan) \
dma_chan_base_addr(DMA_XMT_POLL_DEMAND, chan)
#define DMA_CHAN_RCV_POLL_DEMAND(chan) \
dma_chan_base_addr(DMA_RCV_POLL_DEMAND, chan)
#define DMA_CHAN_RCV_BASE_ADDR(chan) \
dma_chan_base_addr(DMA_RCV_BASE_ADDR, chan)
#define DMA_CHAN_TX_BASE_ADDR(chan) \
dma_chan_base_addr(DMA_TX_BASE_ADDR, chan)
#define DMA_CHAN_STATUS(chan) dma_chan_base_addr(DMA_STATUS, chan)
#define DMA_CHAN_CONTROL(chan) dma_chan_base_addr(DMA_CONTROL, chan)
#define DMA_CHAN_INTR_ENA(chan) dma_chan_base_addr(DMA_INTR_ENA, chan)
#define DMA_CHAN_MISSED_FRAME_CTR(chan) \
dma_chan_base_addr(DMA_MISSED_FRAME_CTR, chan)
#define DMA_CHAN_RX_WATCHDOG(chan) \
dma_chan_base_addr(DMA_RX_WATCHDOG, chan)
/* SW Reset */
#define DMA_BUS_MODE_SFT_RESET 0x00000001 /* Software Reset */
......@@ -152,7 +177,7 @@
#define NUM_DWMAC1000_DMA_REGS 23
#define NUM_DWMAC4_DMA_REGS 27
void dwmac_enable_dma_transmission(void __iomem *ioaddr);
void dwmac_enable_dma_transmission(void __iomem *ioaddr, u32 chan);
void dwmac_enable_dma_irq(struct stmmac_priv *priv, void __iomem *ioaddr,
u32 chan, bool rx, bool tx);
void dwmac_disable_dma_irq(struct stmmac_priv *priv, void __iomem *ioaddr,
......
......@@ -28,65 +28,65 @@ int dwmac_dma_reset(void __iomem *ioaddr)
}
/* CSR1 enables the transmit DMA to check for new descriptor */
void dwmac_enable_dma_transmission(void __iomem *ioaddr)
void dwmac_enable_dma_transmission(void __iomem *ioaddr, u32 chan)
{
writel(1, ioaddr + DMA_XMT_POLL_DEMAND);
writel(1, ioaddr + DMA_CHAN_XMT_POLL_DEMAND(chan));
}
void dwmac_enable_dma_irq(struct stmmac_priv *priv, void __iomem *ioaddr,
u32 chan, bool rx, bool tx)
{
u32 value = readl(ioaddr + DMA_INTR_ENA);
u32 value = readl(ioaddr + DMA_CHAN_INTR_ENA(chan));
if (rx)
value |= DMA_INTR_DEFAULT_RX;
if (tx)
value |= DMA_INTR_DEFAULT_TX;
writel(value, ioaddr + DMA_INTR_ENA);
writel(value, ioaddr + DMA_CHAN_INTR_ENA(chan));
}
void dwmac_disable_dma_irq(struct stmmac_priv *priv, void __iomem *ioaddr,
u32 chan, bool rx, bool tx)
{
u32 value = readl(ioaddr + DMA_INTR_ENA);
u32 value = readl(ioaddr + DMA_CHAN_INTR_ENA(chan));
if (rx)
value &= ~DMA_INTR_DEFAULT_RX;
if (tx)
value &= ~DMA_INTR_DEFAULT_TX;
writel(value, ioaddr + DMA_INTR_ENA);
writel(value, ioaddr + DMA_CHAN_INTR_ENA(chan));
}
void dwmac_dma_start_tx(struct stmmac_priv *priv, void __iomem *ioaddr,
u32 chan)
{
u32 value = readl(ioaddr + DMA_CONTROL);
u32 value = readl(ioaddr + DMA_CHAN_CONTROL(chan));
value |= DMA_CONTROL_ST;
writel(value, ioaddr + DMA_CONTROL);
writel(value, ioaddr + DMA_CHAN_CONTROL(chan));
}
void dwmac_dma_stop_tx(struct stmmac_priv *priv, void __iomem *ioaddr, u32 chan)
{
u32 value = readl(ioaddr + DMA_CONTROL);
u32 value = readl(ioaddr + DMA_CHAN_CONTROL(chan));
value &= ~DMA_CONTROL_ST;
writel(value, ioaddr + DMA_CONTROL);
writel(value, ioaddr + DMA_CHAN_CONTROL(chan));
}
void dwmac_dma_start_rx(struct stmmac_priv *priv, void __iomem *ioaddr,
u32 chan)
{
u32 value = readl(ioaddr + DMA_CONTROL);
u32 value = readl(ioaddr + DMA_CHAN_CONTROL(chan));
value |= DMA_CONTROL_SR;
writel(value, ioaddr + DMA_CONTROL);
writel(value, ioaddr + DMA_CHAN_CONTROL(chan));
}
void dwmac_dma_stop_rx(struct stmmac_priv *priv, void __iomem *ioaddr, u32 chan)
{
u32 value = readl(ioaddr + DMA_CONTROL);
u32 value = readl(ioaddr + DMA_CHAN_CONTROL(chan));
value &= ~DMA_CONTROL_SR;
writel(value, ioaddr + DMA_CONTROL);
writel(value, ioaddr + DMA_CHAN_CONTROL(chan));
}
#ifdef DWMAC_DMA_DEBUG
......@@ -165,7 +165,7 @@ int dwmac_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr,
struct stmmac_pcpu_stats *stats = this_cpu_ptr(priv->xstats.pcpu_stats);
int ret = 0;
/* read the status register (CSR5) */
u32 intr_status = readl(ioaddr + DMA_STATUS);
u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(chan));
#ifdef DWMAC_DMA_DEBUG
/* Enable it to monitor DMA rx/tx status in case of critical problems */
......
......@@ -197,7 +197,7 @@ struct stmmac_dma_ops {
/* To track extra statistic (if supported) */
void (*dma_diagnostic_fr)(struct stmmac_extra_stats *x,
void __iomem *ioaddr);
void (*enable_dma_transmission) (void __iomem *ioaddr);
void (*enable_dma_transmission)(void __iomem *ioaddr, u32 chan);
void (*enable_dma_irq)(struct stmmac_priv *priv, void __iomem *ioaddr,
u32 chan, bool rx, bool tx);
void (*disable_dma_irq)(struct stmmac_priv *priv, void __iomem *ioaddr,
......
......@@ -2367,9 +2367,11 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
if (txfifosz == 0)
txfifosz = priv->dma_cap.tx_fifo_size;
/* Adjust for real per queue fifo size */
rxfifosz /= rx_channels_count;
txfifosz /= tx_channels_count;
/* Split up the shared Tx/Rx FIFO memory on DW QoS Eth and DW XGMAC */
if (priv->plat->has_gmac4 || priv->plat->has_xgmac) {
rxfifosz /= rx_channels_count;
txfifosz /= tx_channels_count;
}
if (priv->plat->force_thresh_dma_mode) {
txmode = tc;
......@@ -2553,7 +2555,7 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
true, priv->mode, true, true,
xdp_desc.len);
stmmac_enable_dma_transmission(priv, priv->ioaddr);
stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
xsk_tx_metadata_to_compl(meta,
&tx_q->tx_skbuff_dma[entry].xsk_meta);
......@@ -4753,7 +4755,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
stmmac_enable_dma_transmission(priv, priv->ioaddr);
stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
stmmac_flush_tx_descriptors(priv, queue);
stmmac_tx_timer_arm(priv, queue);
......@@ -4980,7 +4982,7 @@ static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
u64_stats_update_end(&txq_stats->q_syncp);
}
stmmac_enable_dma_transmission(priv, priv->ioaddr);
stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
tx_q->cur_tx = entry;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment