Commit 33719b57 authored by Andrew Halaney's avatar Andrew Halaney Committed by Paolo Abeni

net: stmmac: dwmac4: Allow platforms to specify some DMA/MTL offsets

Some platforms have dwmac4 implementations that have a different
address space layout than the default, resulting in the need to define
their own DMA/MTL offsets.

Extend the functions to allow a platform driver to indicate what its
addresses are, overriding the defaults.
Signed-off-by: default avatarAndrew Halaney <ahalaney@redhat.com>
Reviewed-by: default avatarJesse Brandeburg <jesse.brandeburg@intel.com>
Tested-by: default avatarBrian Masney <bmasney@redhat.com>
Signed-off-by: default avatarPaolo Abeni <pabeni@redhat.com>
parent 1d84b487
......@@ -336,14 +336,25 @@ enum power_event {
#define MTL_CHAN_BASE_ADDR 0x00000d00
#define MTL_CHAN_BASE_OFFSET 0x40
#define MTL_CHANX_BASE_ADDR(x) (MTL_CHAN_BASE_ADDR + \
(x * MTL_CHAN_BASE_OFFSET))
#define MTL_CHAN_TX_OP_MODE(x) MTL_CHANX_BASE_ADDR(x)
#define MTL_CHAN_TX_DEBUG(x) (MTL_CHANX_BASE_ADDR(x) + 0x8)
#define MTL_CHAN_INT_CTRL(x) (MTL_CHANX_BASE_ADDR(x) + 0x2c)
#define MTL_CHAN_RX_OP_MODE(x) (MTL_CHANX_BASE_ADDR(x) + 0x30)
#define MTL_CHAN_RX_DEBUG(x) (MTL_CHANX_BASE_ADDR(x) + 0x38)
static inline u32 mtl_chanx_base_addr(const struct dwmac4_addrs *addrs,
const u32 x)
{
u32 addr;
if (addrs)
addr = addrs->mtl_chan + (x * addrs->mtl_chan_offset);
else
addr = MTL_CHAN_BASE_ADDR + (x * MTL_CHAN_BASE_OFFSET);
return addr;
}
#define MTL_CHAN_TX_OP_MODE(addrs, x) mtl_chanx_base_addr(addrs, x)
#define MTL_CHAN_TX_DEBUG(addrs, x) (mtl_chanx_base_addr(addrs, x) + 0x8)
#define MTL_CHAN_INT_CTRL(addrs, x) (mtl_chanx_base_addr(addrs, x) + 0x2c)
#define MTL_CHAN_RX_OP_MODE(addrs, x) (mtl_chanx_base_addr(addrs, x) + 0x30)
#define MTL_CHAN_RX_DEBUG(addrs, x) (mtl_chanx_base_addr(addrs, x) + 0x38)
#define MTL_OP_MODE_RSF BIT(5)
#define MTL_OP_MODE_TXQEN_MASK GENMASK(3, 2)
......@@ -388,8 +399,19 @@ enum power_event {
/* MTL ETS Control register */
#define MTL_ETS_CTRL_BASE_ADDR 0x00000d10
#define MTL_ETS_CTRL_BASE_OFFSET 0x40
#define MTL_ETSX_CTRL_BASE_ADDR(x) (MTL_ETS_CTRL_BASE_ADDR + \
((x) * MTL_ETS_CTRL_BASE_OFFSET))
static inline u32 mtl_etsx_ctrl_base_addr(const struct dwmac4_addrs *addrs,
const u32 x)
{
u32 addr;
if (addrs)
addr = addrs->mtl_ets_ctrl + (x * addrs->mtl_ets_ctrl_offset);
else
addr = MTL_ETS_CTRL_BASE_ADDR + (x * MTL_ETS_CTRL_BASE_OFFSET);
return addr;
}
#define MTL_ETS_CTRL_CC BIT(3)
#define MTL_ETS_CTRL_AVALG BIT(2)
......@@ -397,31 +419,76 @@ enum power_event {
/* MTL Queue Quantum Weight */
#define MTL_TXQ_WEIGHT_BASE_ADDR 0x00000d18
#define MTL_TXQ_WEIGHT_BASE_OFFSET 0x40
#define MTL_TXQX_WEIGHT_BASE_ADDR(x) (MTL_TXQ_WEIGHT_BASE_ADDR + \
((x) * MTL_TXQ_WEIGHT_BASE_OFFSET))
static inline u32 mtl_txqx_weight_base_addr(const struct dwmac4_addrs *addrs,
const u32 x)
{
u32 addr;
if (addrs)
addr = addrs->mtl_txq_weight + (x * addrs->mtl_txq_weight_offset);
else
addr = MTL_TXQ_WEIGHT_BASE_ADDR + (x * MTL_TXQ_WEIGHT_BASE_OFFSET);
return addr;
}
#define MTL_TXQ_WEIGHT_ISCQW_MASK GENMASK(20, 0)
/* MTL sendSlopeCredit register */
#define MTL_SEND_SLP_CRED_BASE_ADDR 0x00000d1c
#define MTL_SEND_SLP_CRED_OFFSET 0x40
#define MTL_SEND_SLP_CREDX_BASE_ADDR(x) (MTL_SEND_SLP_CRED_BASE_ADDR + \
((x) * MTL_SEND_SLP_CRED_OFFSET))
static inline u32 mtl_send_slp_credx_base_addr(const struct dwmac4_addrs *addrs,
const u32 x)
{
u32 addr;
if (addrs)
addr = addrs->mtl_send_slp_cred + (x * addrs->mtl_send_slp_cred_offset);
else
addr = MTL_SEND_SLP_CRED_BASE_ADDR + (x * MTL_SEND_SLP_CRED_OFFSET);
return addr;
}
#define MTL_SEND_SLP_CRED_SSC_MASK GENMASK(13, 0)
/* MTL hiCredit register */
#define MTL_HIGH_CRED_BASE_ADDR 0x00000d20
#define MTL_HIGH_CRED_OFFSET 0x40
#define MTL_HIGH_CREDX_BASE_ADDR(x) (MTL_HIGH_CRED_BASE_ADDR + \
((x) * MTL_HIGH_CRED_OFFSET))
static inline u32 mtl_high_credx_base_addr(const struct dwmac4_addrs *addrs,
const u32 x)
{
u32 addr;
if (addrs)
addr = addrs->mtl_high_cred + (x * addrs->mtl_high_cred_offset);
else
addr = MTL_HIGH_CRED_BASE_ADDR + (x * MTL_HIGH_CRED_OFFSET);
return addr;
}
#define MTL_HIGH_CRED_HC_MASK GENMASK(28, 0)
/* MTL loCredit register */
#define MTL_LOW_CRED_BASE_ADDR 0x00000d24
#define MTL_LOW_CRED_OFFSET 0x40
#define MTL_LOW_CREDX_BASE_ADDR(x) (MTL_LOW_CRED_BASE_ADDR + \
((x) * MTL_LOW_CRED_OFFSET))
static inline u32 mtl_low_credx_base_addr(const struct dwmac4_addrs *addrs,
const u32 x)
{
u32 addr;
if (addrs)
addr = addrs->mtl_low_cred + (x * addrs->mtl_low_cred_offset);
else
addr = MTL_LOW_CRED_BASE_ADDR + (x * MTL_LOW_CRED_OFFSET);
return addr;
}
#define MTL_HIGH_CRED_LC_MASK GENMASK(28, 0)
......
......@@ -202,12 +202,14 @@ static void dwmac4_set_mtl_tx_queue_weight(struct stmmac_priv *priv,
struct mac_device_info *hw,
u32 weight, u32 queue)
{
const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
void __iomem *ioaddr = hw->pcsr;
u32 value = readl(ioaddr + MTL_TXQX_WEIGHT_BASE_ADDR(queue));
u32 value = readl(ioaddr + mtl_txqx_weight_base_addr(dwmac4_addrs,
queue));
value &= ~MTL_TXQ_WEIGHT_ISCQW_MASK;
value |= weight & MTL_TXQ_WEIGHT_ISCQW_MASK;
writel(value, ioaddr + MTL_TXQX_WEIGHT_BASE_ADDR(queue));
writel(value, ioaddr + mtl_txqx_weight_base_addr(dwmac4_addrs, queue));
}
static void dwmac4_map_mtl_dma(struct mac_device_info *hw, u32 queue, u32 chan)
......@@ -233,6 +235,7 @@ static void dwmac4_config_cbs(struct stmmac_priv *priv,
u32 send_slope, u32 idle_slope,
u32 high_credit, u32 low_credit, u32 queue)
{
const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
void __iomem *ioaddr = hw->pcsr;
u32 value;
......@@ -243,31 +246,33 @@ static void dwmac4_config_cbs(struct stmmac_priv *priv,
pr_debug("\tlow_credit: 0x%08x\n", low_credit);
/* enable AV algorithm */
value = readl(ioaddr + MTL_ETSX_CTRL_BASE_ADDR(queue));
value = readl(ioaddr + mtl_etsx_ctrl_base_addr(dwmac4_addrs, queue));
value |= MTL_ETS_CTRL_AVALG;
value |= MTL_ETS_CTRL_CC;
writel(value, ioaddr + MTL_ETSX_CTRL_BASE_ADDR(queue));
writel(value, ioaddr + mtl_etsx_ctrl_base_addr(dwmac4_addrs, queue));
/* configure send slope */
value = readl(ioaddr + MTL_SEND_SLP_CREDX_BASE_ADDR(queue));
value = readl(ioaddr + mtl_send_slp_credx_base_addr(dwmac4_addrs,
queue));
value &= ~MTL_SEND_SLP_CRED_SSC_MASK;
value |= send_slope & MTL_SEND_SLP_CRED_SSC_MASK;
writel(value, ioaddr + MTL_SEND_SLP_CREDX_BASE_ADDR(queue));
writel(value, ioaddr + mtl_send_slp_credx_base_addr(dwmac4_addrs,
queue));
/* configure idle slope (same register as tx weight) */
dwmac4_set_mtl_tx_queue_weight(priv, hw, idle_slope, queue);
/* configure high credit */
value = readl(ioaddr + MTL_HIGH_CREDX_BASE_ADDR(queue));
value = readl(ioaddr + mtl_high_credx_base_addr(dwmac4_addrs, queue));
value &= ~MTL_HIGH_CRED_HC_MASK;
value |= high_credit & MTL_HIGH_CRED_HC_MASK;
writel(value, ioaddr + MTL_HIGH_CREDX_BASE_ADDR(queue));
writel(value, ioaddr + mtl_high_credx_base_addr(dwmac4_addrs, queue));
/* configure high credit */
value = readl(ioaddr + MTL_LOW_CREDX_BASE_ADDR(queue));
value = readl(ioaddr + mtl_low_credx_base_addr(dwmac4_addrs, queue));
value &= ~MTL_HIGH_CRED_LC_MASK;
value |= low_credit & MTL_HIGH_CRED_LC_MASK;
writel(value, ioaddr + MTL_LOW_CREDX_BASE_ADDR(queue));
writel(value, ioaddr + mtl_low_credx_base_addr(dwmac4_addrs, queue));
}
static void dwmac4_dump_regs(struct mac_device_info *hw, u32 *reg_space)
......@@ -764,6 +769,7 @@ static void dwmac4_phystatus(void __iomem *ioaddr, struct stmmac_extra_stats *x)
static int dwmac4_irq_mtl_status(struct stmmac_priv *priv,
struct mac_device_info *hw, u32 chan)
{
const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
void __iomem *ioaddr = hw->pcsr;
u32 mtl_int_qx_status;
int ret = 0;
......@@ -773,12 +779,13 @@ static int dwmac4_irq_mtl_status(struct stmmac_priv *priv,
/* Check MTL Interrupt */
if (mtl_int_qx_status & MTL_INT_QX(chan)) {
/* read Queue x Interrupt status */
u32 status = readl(ioaddr + MTL_CHAN_INT_CTRL(chan));
u32 status = readl(ioaddr + MTL_CHAN_INT_CTRL(dwmac4_addrs,
chan));
if (status & MTL_RX_OVERFLOW_INT) {
/* clear Interrupt */
writel(status | MTL_RX_OVERFLOW_INT,
ioaddr + MTL_CHAN_INT_CTRL(chan));
ioaddr + MTL_CHAN_INT_CTRL(dwmac4_addrs, chan));
ret = CORE_IRQ_MTL_RX_OVERFLOW;
}
}
......@@ -840,11 +847,12 @@ static void dwmac4_debug(struct stmmac_priv *priv, void __iomem *ioaddr,
struct stmmac_extra_stats *x,
u32 rx_queues, u32 tx_queues)
{
const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
u32 value;
u32 queue;
for (queue = 0; queue < tx_queues; queue++) {
value = readl(ioaddr + MTL_CHAN_TX_DEBUG(queue));
value = readl(ioaddr + MTL_CHAN_TX_DEBUG(dwmac4_addrs, queue));
if (value & MTL_DEBUG_TXSTSFSTS)
x->mtl_tx_status_fifo_full++;
......@@ -869,7 +877,7 @@ static void dwmac4_debug(struct stmmac_priv *priv, void __iomem *ioaddr,
}
for (queue = 0; queue < rx_queues; queue++) {
value = readl(ioaddr + MTL_CHAN_RX_DEBUG(queue));
value = readl(ioaddr + MTL_CHAN_RX_DEBUG(dwmac4_addrs, queue));
if (value & MTL_DEBUG_RXFSTS_MASK) {
u32 rxfsts = (value & MTL_DEBUG_RXFSTS_MASK)
......
......@@ -95,29 +95,41 @@
/* Following DMA defines are chanels oriented */
#define DMA_CHAN_BASE_ADDR 0x00001100
#define DMA_CHAN_BASE_OFFSET 0x80
#define DMA_CHANX_BASE_ADDR(x) (DMA_CHAN_BASE_ADDR + \
(x * DMA_CHAN_BASE_OFFSET))
static inline u32 dma_chanx_base_addr(const struct dwmac4_addrs *addrs,
const u32 x)
{
u32 addr;
if (addrs)
addr = addrs->dma_chan + (x * addrs->dma_chan_offset);
else
addr = DMA_CHAN_BASE_ADDR + (x * DMA_CHAN_BASE_OFFSET);
return addr;
}
#define DMA_CHAN_REG_NUMBER 17
#define DMA_CHAN_CONTROL(x) DMA_CHANX_BASE_ADDR(x)
#define DMA_CHAN_TX_CONTROL(x) (DMA_CHANX_BASE_ADDR(x) + 0x4)
#define DMA_CHAN_RX_CONTROL(x) (DMA_CHANX_BASE_ADDR(x) + 0x8)
#define DMA_CHAN_TX_BASE_ADDR_HI(x) (DMA_CHANX_BASE_ADDR(x) + 0x10)
#define DMA_CHAN_TX_BASE_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x14)
#define DMA_CHAN_RX_BASE_ADDR_HI(x) (DMA_CHANX_BASE_ADDR(x) + 0x18)
#define DMA_CHAN_RX_BASE_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x1c)
#define DMA_CHAN_TX_END_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x20)
#define DMA_CHAN_RX_END_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x28)
#define DMA_CHAN_TX_RING_LEN(x) (DMA_CHANX_BASE_ADDR(x) + 0x2c)
#define DMA_CHAN_RX_RING_LEN(x) (DMA_CHANX_BASE_ADDR(x) + 0x30)
#define DMA_CHAN_INTR_ENA(x) (DMA_CHANX_BASE_ADDR(x) + 0x34)
#define DMA_CHAN_RX_WATCHDOG(x) (DMA_CHANX_BASE_ADDR(x) + 0x38)
#define DMA_CHAN_SLOT_CTRL_STATUS(x) (DMA_CHANX_BASE_ADDR(x) + 0x3c)
#define DMA_CHAN_CUR_TX_DESC(x) (DMA_CHANX_BASE_ADDR(x) + 0x44)
#define DMA_CHAN_CUR_RX_DESC(x) (DMA_CHANX_BASE_ADDR(x) + 0x4c)
#define DMA_CHAN_CUR_TX_BUF_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x54)
#define DMA_CHAN_CUR_RX_BUF_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x5c)
#define DMA_CHAN_STATUS(x) (DMA_CHANX_BASE_ADDR(x) + 0x60)
#define DMA_CHAN_CONTROL(addrs, x) dma_chanx_base_addr(addrs, x)
#define DMA_CHAN_TX_CONTROL(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x4)
#define DMA_CHAN_RX_CONTROL(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x8)
#define DMA_CHAN_TX_BASE_ADDR_HI(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x10)
#define DMA_CHAN_TX_BASE_ADDR(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x14)
#define DMA_CHAN_RX_BASE_ADDR_HI(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x18)
#define DMA_CHAN_RX_BASE_ADDR(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x1c)
#define DMA_CHAN_TX_END_ADDR(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x20)
#define DMA_CHAN_RX_END_ADDR(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x28)
#define DMA_CHAN_TX_RING_LEN(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x2c)
#define DMA_CHAN_RX_RING_LEN(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x30)
#define DMA_CHAN_INTR_ENA(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x34)
#define DMA_CHAN_RX_WATCHDOG(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x38)
#define DMA_CHAN_SLOT_CTRL_STATUS(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x3c)
#define DMA_CHAN_CUR_TX_DESC(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x44)
#define DMA_CHAN_CUR_RX_DESC(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x4c)
#define DMA_CHAN_CUR_TX_BUF_ADDR(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x54)
#define DMA_CHAN_CUR_RX_BUF_ADDR(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x5c)
#define DMA_CHAN_STATUS(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x60)
/* DMA Control X */
#define DMA_CONTROL_SPH BIT(24)
......
......@@ -11,6 +11,7 @@
#include "common.h"
#include "dwmac4_dma.h"
#include "dwmac4.h"
#include "stmmac.h"
int dwmac4_dma_reset(void __iomem *ioaddr)
{
......@@ -28,22 +29,27 @@ int dwmac4_dma_reset(void __iomem *ioaddr)
void dwmac4_set_rx_tail_ptr(struct stmmac_priv *priv, void __iomem *ioaddr,
u32 tail_ptr, u32 chan)
{
writel(tail_ptr, ioaddr + DMA_CHAN_RX_END_ADDR(chan));
const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
writel(tail_ptr, ioaddr + DMA_CHAN_RX_END_ADDR(dwmac4_addrs, chan));
}
void dwmac4_set_tx_tail_ptr(struct stmmac_priv *priv, void __iomem *ioaddr,
u32 tail_ptr, u32 chan)
{
writel(tail_ptr, ioaddr + DMA_CHAN_TX_END_ADDR(chan));
const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
writel(tail_ptr, ioaddr + DMA_CHAN_TX_END_ADDR(dwmac4_addrs, chan));
}
void dwmac4_dma_start_tx(struct stmmac_priv *priv, void __iomem *ioaddr,
u32 chan)
{
u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan));
const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(dwmac4_addrs, chan));
value |= DMA_CONTROL_ST;
writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan));
writel(value, ioaddr + DMA_CHAN_TX_CONTROL(dwmac4_addrs, chan));
value = readl(ioaddr + GMAC_CONFIG);
value |= GMAC_CONFIG_TE;
......@@ -53,20 +59,24 @@ void dwmac4_dma_start_tx(struct stmmac_priv *priv, void __iomem *ioaddr,
void dwmac4_dma_stop_tx(struct stmmac_priv *priv, void __iomem *ioaddr,
u32 chan)
{
u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan));
const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(dwmac4_addrs, chan));
value &= ~DMA_CONTROL_ST;
writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan));
writel(value, ioaddr + DMA_CHAN_TX_CONTROL(dwmac4_addrs, chan));
}
void dwmac4_dma_start_rx(struct stmmac_priv *priv, void __iomem *ioaddr,
u32 chan)
{
u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan));
const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(dwmac4_addrs, chan));
value |= DMA_CONTROL_SR;
writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan));
writel(value, ioaddr + DMA_CHAN_RX_CONTROL(dwmac4_addrs, chan));
value = readl(ioaddr + GMAC_CONFIG);
value |= GMAC_CONFIG_RE;
......@@ -76,81 +86,91 @@ void dwmac4_dma_start_rx(struct stmmac_priv *priv, void __iomem *ioaddr,
void dwmac4_dma_stop_rx(struct stmmac_priv *priv, void __iomem *ioaddr,
u32 chan)
{
u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan));
const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(dwmac4_addrs, chan));
value &= ~DMA_CONTROL_SR;
writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan));
writel(value, ioaddr + DMA_CHAN_RX_CONTROL(dwmac4_addrs, chan));
}
void dwmac4_set_tx_ring_len(struct stmmac_priv *priv, void __iomem *ioaddr,
u32 len, u32 chan)
{
writel(len, ioaddr + DMA_CHAN_TX_RING_LEN(chan));
const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
writel(len, ioaddr + DMA_CHAN_TX_RING_LEN(dwmac4_addrs, chan));
}
void dwmac4_set_rx_ring_len(struct stmmac_priv *priv, void __iomem *ioaddr,
u32 len, u32 chan)
{
writel(len, ioaddr + DMA_CHAN_RX_RING_LEN(chan));
const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
writel(len, ioaddr + DMA_CHAN_RX_RING_LEN(dwmac4_addrs, chan));
}
void dwmac4_enable_dma_irq(struct stmmac_priv *priv, void __iomem *ioaddr,
u32 chan, bool rx, bool tx)
{
u32 value = readl(ioaddr + DMA_CHAN_INTR_ENA(chan));
const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
u32 value = readl(ioaddr + DMA_CHAN_INTR_ENA(dwmac4_addrs, chan));
if (rx)
value |= DMA_CHAN_INTR_DEFAULT_RX;
if (tx)
value |= DMA_CHAN_INTR_DEFAULT_TX;
writel(value, ioaddr + DMA_CHAN_INTR_ENA(chan));
writel(value, ioaddr + DMA_CHAN_INTR_ENA(dwmac4_addrs, chan));
}
void dwmac410_enable_dma_irq(struct stmmac_priv *priv, void __iomem *ioaddr,
u32 chan, bool rx, bool tx)
{
u32 value = readl(ioaddr + DMA_CHAN_INTR_ENA(chan));
const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
u32 value = readl(ioaddr + DMA_CHAN_INTR_ENA(dwmac4_addrs, chan));
if (rx)
value |= DMA_CHAN_INTR_DEFAULT_RX_4_10;
if (tx)
value |= DMA_CHAN_INTR_DEFAULT_TX_4_10;
writel(value, ioaddr + DMA_CHAN_INTR_ENA(chan));
writel(value, ioaddr + DMA_CHAN_INTR_ENA(dwmac4_addrs, chan));
}
void dwmac4_disable_dma_irq(struct stmmac_priv *priv, void __iomem *ioaddr,
u32 chan, bool rx, bool tx)
{
u32 value = readl(ioaddr + DMA_CHAN_INTR_ENA(chan));
const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
u32 value = readl(ioaddr + DMA_CHAN_INTR_ENA(dwmac4_addrs, chan));
if (rx)
value &= ~DMA_CHAN_INTR_DEFAULT_RX;
if (tx)
value &= ~DMA_CHAN_INTR_DEFAULT_TX;
writel(value, ioaddr + DMA_CHAN_INTR_ENA(chan));
writel(value, ioaddr + DMA_CHAN_INTR_ENA(dwmac4_addrs, chan));
}
void dwmac410_disable_dma_irq(struct stmmac_priv *priv, void __iomem *ioaddr,
u32 chan, bool rx, bool tx)
{
u32 value = readl(ioaddr + DMA_CHAN_INTR_ENA(chan));
const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
u32 value = readl(ioaddr + DMA_CHAN_INTR_ENA(dwmac4_addrs, chan));
if (rx)
value &= ~DMA_CHAN_INTR_DEFAULT_RX_4_10;
if (tx)
value &= ~DMA_CHAN_INTR_DEFAULT_TX_4_10;
writel(value, ioaddr + DMA_CHAN_INTR_ENA(chan));
writel(value, ioaddr + DMA_CHAN_INTR_ENA(dwmac4_addrs, chan));
}
int dwmac4_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr,
struct stmmac_extra_stats *x, u32 chan, u32 dir)
{
u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(chan));
u32 intr_en = readl(ioaddr + DMA_CHAN_INTR_ENA(chan));
const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(dwmac4_addrs, chan));
u32 intr_en = readl(ioaddr + DMA_CHAN_INTR_ENA(dwmac4_addrs, chan));
int ret = 0;
if (dir == DMA_DIR_RX)
......@@ -195,7 +215,8 @@ int dwmac4_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr,
if (unlikely(intr_status & DMA_CHAN_STATUS_ERI))
x->rx_early_irq++;
writel(intr_status & intr_en, ioaddr + DMA_CHAN_STATUS(chan));
writel(intr_status & intr_en,
ioaddr + DMA_CHAN_STATUS(dwmac4_addrs, chan));
return ret;
}
......
......@@ -186,6 +186,24 @@ struct stmmac_safety_feature_cfg {
u32 tmouten;
};
/* Addresses that may be customized by a platform */
struct dwmac4_addrs {
u32 dma_chan;
u32 dma_chan_offset;
u32 mtl_chan;
u32 mtl_chan_offset;
u32 mtl_ets_ctrl;
u32 mtl_ets_ctrl_offset;
u32 mtl_txq_weight;
u32 mtl_txq_weight_offset;
u32 mtl_send_slp_cred;
u32 mtl_send_slp_cred_offset;
u32 mtl_high_cred;
u32 mtl_high_cred_offset;
u32 mtl_low_cred;
u32 mtl_low_cred_offset;
};
struct plat_stmmacenet_data {
int bus_id;
int phy_addr;
......@@ -274,5 +292,6 @@ struct plat_stmmacenet_data {
bool use_phy_wol;
bool sph_disable;
bool serdes_up_after_phy_linkup;
const struct dwmac4_addrs *dwmac4_addrs;
};
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment