Commit fd5736bf authored by Alex Marginean's avatar Alex Marginean Committed by Jakub Kicinski

enetc: Workaround for MDIO register access issue

Due to a hardware issue, an access to MDIO registers
that is concurrent with other ENETC register accesses
may lead to the MDIO access being dropped or corrupted.
The workaround introduces locking for all register accesses
to the ENETC register space.  To reduce performance impact,
a readers-writers locking scheme has been implemented.
The writer in this case is the MDIO access code (irrelevant
whether that MDIO access is a register read or write), and
the reader is any access code to non-MDIO ENETC registers.
Also, the datapath functions acquire the read lock fewer times
and use _hot accessors.  All the rest of the code uses the _wa
accessors which lock every register access.
The commit introducing MDIO support is -
commit ebfcb23d ("enetc: Add ENETC PF level external MDIO support")
but due to subsequent refactoring this patch is applicable on
top of a later commit.

Fixes: 6517798d ("enetc: Make MDIO accessors more generic and export to include/linux/fsl")
Signed-off-by: default avatarAlex Marginean <alexandru.marginean@nxp.com>
Signed-off-by: default avatarVladimir Oltean <vladimir.oltean@nxp.com>
Signed-off-by: default avatarClaudiu Manoil <claudiu.manoil@nxp.com>
Link: https://lore.kernel.org/r/20201112182608.26177-1-claudiu.manoil@nxp.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 1b9e2a8c
...@@ -16,6 +16,7 @@ config FSL_ENETC ...@@ -16,6 +16,7 @@ config FSL_ENETC
config FSL_ENETC_VF config FSL_ENETC_VF
tristate "ENETC VF driver" tristate "ENETC VF driver"
depends on PCI && PCI_MSI depends on PCI && PCI_MSI
select FSL_ENETC_MDIO
select PHYLINK select PHYLINK
select DIMLIB select DIMLIB
help help
......
...@@ -33,7 +33,10 @@ netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev) ...@@ -33,7 +33,10 @@ netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev)
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
} }
enetc_lock_mdio();
count = enetc_map_tx_buffs(tx_ring, skb, priv->active_offloads); count = enetc_map_tx_buffs(tx_ring, skb, priv->active_offloads);
enetc_unlock_mdio();
if (unlikely(!count)) if (unlikely(!count))
goto drop_packet_err; goto drop_packet_err;
...@@ -239,7 +242,7 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb, ...@@ -239,7 +242,7 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb,
skb_tx_timestamp(skb); skb_tx_timestamp(skb);
/* let H/W know BD ring has been updated */ /* let H/W know BD ring has been updated */
enetc_wr_reg(tx_ring->tpir, i); /* includes wmb() */ enetc_wr_reg_hot(tx_ring->tpir, i); /* includes wmb() */
return count; return count;
...@@ -262,12 +265,16 @@ static irqreturn_t enetc_msix(int irq, void *data) ...@@ -262,12 +265,16 @@ static irqreturn_t enetc_msix(int irq, void *data)
struct enetc_int_vector *v = data; struct enetc_int_vector *v = data;
int i; int i;
enetc_lock_mdio();
/* disable interrupts */ /* disable interrupts */
enetc_wr_reg(v->rbier, 0); enetc_wr_reg_hot(v->rbier, 0);
enetc_wr_reg(v->ricr1, v->rx_ictt); enetc_wr_reg_hot(v->ricr1, v->rx_ictt);
for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS) for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS)
enetc_wr_reg(v->tbier_base + ENETC_BDR_OFF(i), 0); enetc_wr_reg_hot(v->tbier_base + ENETC_BDR_OFF(i), 0);
enetc_unlock_mdio();
napi_schedule(&v->napi); napi_schedule(&v->napi);
...@@ -334,19 +341,23 @@ static int enetc_poll(struct napi_struct *napi, int budget) ...@@ -334,19 +341,23 @@ static int enetc_poll(struct napi_struct *napi, int budget)
v->rx_napi_work = false; v->rx_napi_work = false;
enetc_lock_mdio();
/* enable interrupts */ /* enable interrupts */
enetc_wr_reg(v->rbier, ENETC_RBIER_RXTIE); enetc_wr_reg_hot(v->rbier, ENETC_RBIER_RXTIE);
for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS) for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS)
enetc_wr_reg(v->tbier_base + ENETC_BDR_OFF(i), enetc_wr_reg_hot(v->tbier_base + ENETC_BDR_OFF(i),
ENETC_TBIER_TXTIE); ENETC_TBIER_TXTIE);
enetc_unlock_mdio();
return work_done; return work_done;
} }
static int enetc_bd_ready_count(struct enetc_bdr *tx_ring, int ci) static int enetc_bd_ready_count(struct enetc_bdr *tx_ring, int ci)
{ {
int pi = enetc_rd_reg(tx_ring->tcir) & ENETC_TBCIR_IDX_MASK; int pi = enetc_rd_reg_hot(tx_ring->tcir) & ENETC_TBCIR_IDX_MASK;
return pi >= ci ? pi - ci : tx_ring->bd_count - ci + pi; return pi >= ci ? pi - ci : tx_ring->bd_count - ci + pi;
} }
...@@ -386,7 +397,10 @@ static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget) ...@@ -386,7 +397,10 @@ static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
i = tx_ring->next_to_clean; i = tx_ring->next_to_clean;
tx_swbd = &tx_ring->tx_swbd[i]; tx_swbd = &tx_ring->tx_swbd[i];
enetc_lock_mdio();
bds_to_clean = enetc_bd_ready_count(tx_ring, i); bds_to_clean = enetc_bd_ready_count(tx_ring, i);
enetc_unlock_mdio();
do_tstamp = false; do_tstamp = false;
...@@ -429,16 +443,20 @@ static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget) ...@@ -429,16 +443,20 @@ static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
tx_swbd = tx_ring->tx_swbd; tx_swbd = tx_ring->tx_swbd;
} }
enetc_lock_mdio();
/* BD iteration loop end */ /* BD iteration loop end */
if (is_eof) { if (is_eof) {
tx_frm_cnt++; tx_frm_cnt++;
/* re-arm interrupt source */ /* re-arm interrupt source */
enetc_wr_reg(tx_ring->idr, BIT(tx_ring->index) | enetc_wr_reg_hot(tx_ring->idr, BIT(tx_ring->index) |
BIT(16 + tx_ring->index)); BIT(16 + tx_ring->index));
} }
if (unlikely(!bds_to_clean)) if (unlikely(!bds_to_clean))
bds_to_clean = enetc_bd_ready_count(tx_ring, i); bds_to_clean = enetc_bd_ready_count(tx_ring, i);
enetc_unlock_mdio();
} }
tx_ring->next_to_clean = i; tx_ring->next_to_clean = i;
...@@ -515,8 +533,6 @@ static int enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt) ...@@ -515,8 +533,6 @@ static int enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt)
if (likely(j)) { if (likely(j)) {
rx_ring->next_to_alloc = i; /* keep track from page reuse */ rx_ring->next_to_alloc = i; /* keep track from page reuse */
rx_ring->next_to_use = i; rx_ring->next_to_use = i;
/* update ENETC's consumer index */
enetc_wr_reg(rx_ring->rcir, i);
} }
return j; return j;
...@@ -534,8 +550,8 @@ static void enetc_get_rx_tstamp(struct net_device *ndev, ...@@ -534,8 +550,8 @@ static void enetc_get_rx_tstamp(struct net_device *ndev,
u64 tstamp; u64 tstamp;
if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_TSTMP) { if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_TSTMP) {
lo = enetc_rd(hw, ENETC_SICTR0); lo = enetc_rd_reg_hot(hw->reg + ENETC_SICTR0);
hi = enetc_rd(hw, ENETC_SICTR1); hi = enetc_rd_reg_hot(hw->reg + ENETC_SICTR1);
rxbd = enetc_rxbd_ext(rxbd); rxbd = enetc_rxbd_ext(rxbd);
tstamp_lo = le32_to_cpu(rxbd->ext.tstamp); tstamp_lo = le32_to_cpu(rxbd->ext.tstamp);
if (lo <= tstamp_lo) if (lo <= tstamp_lo)
...@@ -684,23 +700,31 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring, ...@@ -684,23 +700,31 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
u32 bd_status; u32 bd_status;
u16 size; u16 size;
enetc_lock_mdio();
if (cleaned_cnt >= ENETC_RXBD_BUNDLE) { if (cleaned_cnt >= ENETC_RXBD_BUNDLE) {
int count = enetc_refill_rx_ring(rx_ring, cleaned_cnt); int count = enetc_refill_rx_ring(rx_ring, cleaned_cnt);
/* update ENETC's consumer index */
enetc_wr_reg_hot(rx_ring->rcir, rx_ring->next_to_use);
cleaned_cnt -= count; cleaned_cnt -= count;
} }
rxbd = enetc_rxbd(rx_ring, i); rxbd = enetc_rxbd(rx_ring, i);
bd_status = le32_to_cpu(rxbd->r.lstatus); bd_status = le32_to_cpu(rxbd->r.lstatus);
if (!bd_status) if (!bd_status) {
enetc_unlock_mdio();
break; break;
}
enetc_wr_reg(rx_ring->idr, BIT(rx_ring->index)); enetc_wr_reg_hot(rx_ring->idr, BIT(rx_ring->index));
dma_rmb(); /* for reading other rxbd fields */ dma_rmb(); /* for reading other rxbd fields */
size = le16_to_cpu(rxbd->r.buf_len); size = le16_to_cpu(rxbd->r.buf_len);
skb = enetc_map_rx_buff_to_skb(rx_ring, i, size); skb = enetc_map_rx_buff_to_skb(rx_ring, i, size);
if (!skb) if (!skb) {
enetc_unlock_mdio();
break; break;
}
enetc_get_offloads(rx_ring, rxbd, skb); enetc_get_offloads(rx_ring, rxbd, skb);
...@@ -712,6 +736,7 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring, ...@@ -712,6 +736,7 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
if (unlikely(bd_status & if (unlikely(bd_status &
ENETC_RXBD_LSTATUS(ENETC_RXBD_ERR_MASK))) { ENETC_RXBD_LSTATUS(ENETC_RXBD_ERR_MASK))) {
enetc_unlock_mdio();
dev_kfree_skb(skb); dev_kfree_skb(skb);
while (!(bd_status & ENETC_RXBD_LSTATUS_F)) { while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
dma_rmb(); dma_rmb();
...@@ -751,6 +776,8 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring, ...@@ -751,6 +776,8 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
enetc_process_skb(rx_ring, skb); enetc_process_skb(rx_ring, skb);
enetc_unlock_mdio();
napi_gro_receive(napi, skb); napi_gro_receive(napi, skb);
rx_frm_cnt++; rx_frm_cnt++;
...@@ -1225,6 +1252,7 @@ static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring) ...@@ -1225,6 +1252,7 @@ static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
rx_ring->idr = hw->reg + ENETC_SIRXIDR; rx_ring->idr = hw->reg + ENETC_SIRXIDR;
enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring)); enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring));
enetc_wr(hw, ENETC_SIRXIDR, rx_ring->next_to_use);
/* enable ring */ /* enable ring */
enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr); enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr);
......
...@@ -324,14 +324,100 @@ struct enetc_hw { ...@@ -324,14 +324,100 @@ struct enetc_hw {
void __iomem *global; void __iomem *global;
}; };
/* general register accessors */ /* ENETC register accessors */
#define enetc_rd_reg(reg) ioread32((reg))
#define enetc_wr_reg(reg, val) iowrite32((val), (reg)) /* MDIO issue workaround (on LS1028A) -
* Due to a hardware issue, an access to MDIO registers
* that is concurrent with other ENETC register accesses
* may lead to the MDIO access being dropped or corrupted.
* To protect the MDIO accesses a readers-writers locking
* scheme is used, where the MDIO register accesses are
* protected by write locks to insure exclusivity, while
* the remaining ENETC registers are accessed under read
* locks since they only compete with MDIO accesses.
*/
extern rwlock_t enetc_mdio_lock;
/* use this locking primitive only on the fast datapath to
* group together multiple non-MDIO register accesses to
* minimize the overhead of the lock
*/
static inline void enetc_lock_mdio(void)
{
read_lock(&enetc_mdio_lock);
}
static inline void enetc_unlock_mdio(void)
{
read_unlock(&enetc_mdio_lock);
}
/* use these accessors only on the fast datapath under
* the enetc_lock_mdio() locking primitive to minimize
* the overhead of the lock
*/
static inline u32 enetc_rd_reg_hot(void __iomem *reg)
{
lockdep_assert_held(&enetc_mdio_lock);
return ioread32(reg);
}
static inline void enetc_wr_reg_hot(void __iomem *reg, u32 val)
{
lockdep_assert_held(&enetc_mdio_lock);
iowrite32(val, reg);
}
/* internal helpers for the MDIO w/a */
static inline u32 _enetc_rd_reg_wa(void __iomem *reg)
{
u32 val;
enetc_lock_mdio();
val = ioread32(reg);
enetc_unlock_mdio();
return val;
}
static inline void _enetc_wr_reg_wa(void __iomem *reg, u32 val)
{
enetc_lock_mdio();
iowrite32(val, reg);
enetc_unlock_mdio();
}
static inline u32 _enetc_rd_mdio_reg_wa(void __iomem *reg)
{
unsigned long flags;
u32 val;
write_lock_irqsave(&enetc_mdio_lock, flags);
val = ioread32(reg);
write_unlock_irqrestore(&enetc_mdio_lock, flags);
return val;
}
static inline void _enetc_wr_mdio_reg_wa(void __iomem *reg, u32 val)
{
unsigned long flags;
write_lock_irqsave(&enetc_mdio_lock, flags);
iowrite32(val, reg);
write_unlock_irqrestore(&enetc_mdio_lock, flags);
}
#ifdef ioread64 #ifdef ioread64
#define enetc_rd_reg64(reg) ioread64((reg)) static inline u64 _enetc_rd_reg64(void __iomem *reg)
{
return ioread64(reg);
}
#else #else
/* using this to read out stats on 32b systems */ /* using this to read out stats on 32b systems */
static inline u64 enetc_rd_reg64(void __iomem *reg) static inline u64 _enetc_rd_reg64(void __iomem *reg)
{ {
u32 low, high, tmp; u32 low, high, tmp;
...@@ -345,12 +431,29 @@ static inline u64 enetc_rd_reg64(void __iomem *reg) ...@@ -345,12 +431,29 @@ static inline u64 enetc_rd_reg64(void __iomem *reg)
} }
#endif #endif
static inline u64 _enetc_rd_reg64_wa(void __iomem *reg)
{
u64 val;
enetc_lock_mdio();
val = _enetc_rd_reg64(reg);
enetc_unlock_mdio();
return val;
}
/* general register accessors */
#define enetc_rd_reg(reg) _enetc_rd_reg_wa((reg))
#define enetc_wr_reg(reg, val) _enetc_wr_reg_wa((reg), (val))
#define enetc_rd(hw, off) enetc_rd_reg((hw)->reg + (off)) #define enetc_rd(hw, off) enetc_rd_reg((hw)->reg + (off))
#define enetc_wr(hw, off, val) enetc_wr_reg((hw)->reg + (off), val) #define enetc_wr(hw, off, val) enetc_wr_reg((hw)->reg + (off), val)
#define enetc_rd64(hw, off) enetc_rd_reg64((hw)->reg + (off)) #define enetc_rd64(hw, off) _enetc_rd_reg64_wa((hw)->reg + (off))
/* port register accessors - PF only */ /* port register accessors - PF only */
#define enetc_port_rd(hw, off) enetc_rd_reg((hw)->port + (off)) #define enetc_port_rd(hw, off) enetc_rd_reg((hw)->port + (off))
#define enetc_port_wr(hw, off, val) enetc_wr_reg((hw)->port + (off), val) #define enetc_port_wr(hw, off, val) enetc_wr_reg((hw)->port + (off), val)
#define enetc_port_rd_mdio(hw, off) _enetc_rd_mdio_reg_wa((hw)->port + (off))
#define enetc_port_wr_mdio(hw, off, val) _enetc_wr_mdio_reg_wa(\
(hw)->port + (off), val)
/* global register accessors - PF only */ /* global register accessors - PF only */
#define enetc_global_rd(hw, off) enetc_rd_reg((hw)->global + (off)) #define enetc_global_rd(hw, off) enetc_rd_reg((hw)->global + (off))
#define enetc_global_wr(hw, off, val) enetc_wr_reg((hw)->global + (off), val) #define enetc_global_wr(hw, off, val) enetc_wr_reg((hw)->global + (off), val)
......
...@@ -16,13 +16,13 @@ ...@@ -16,13 +16,13 @@
static inline u32 _enetc_mdio_rd(struct enetc_mdio_priv *mdio_priv, int off) static inline u32 _enetc_mdio_rd(struct enetc_mdio_priv *mdio_priv, int off)
{ {
return enetc_port_rd(mdio_priv->hw, mdio_priv->mdio_base + off); return enetc_port_rd_mdio(mdio_priv->hw, mdio_priv->mdio_base + off);
} }
static inline void _enetc_mdio_wr(struct enetc_mdio_priv *mdio_priv, int off, static inline void _enetc_mdio_wr(struct enetc_mdio_priv *mdio_priv, int off,
u32 val) u32 val)
{ {
enetc_port_wr(mdio_priv->hw, mdio_priv->mdio_base + off, val); enetc_port_wr_mdio(mdio_priv->hw, mdio_priv->mdio_base + off, val);
} }
#define enetc_mdio_rd(mdio_priv, off) \ #define enetc_mdio_rd(mdio_priv, off) \
...@@ -174,3 +174,7 @@ struct enetc_hw *enetc_hw_alloc(struct device *dev, void __iomem *port_regs) ...@@ -174,3 +174,7 @@ struct enetc_hw *enetc_hw_alloc(struct device *dev, void __iomem *port_regs)
return hw; return hw;
} }
EXPORT_SYMBOL_GPL(enetc_hw_alloc); EXPORT_SYMBOL_GPL(enetc_hw_alloc);
/* Lock for MDIO access errata on LS1028A */
DEFINE_RWLOCK(enetc_mdio_lock);
EXPORT_SYMBOL_GPL(enetc_mdio_lock);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment