Commit d3de85a5 authored by David S. Miller's avatar David S. Miller

Merge branch 'net-stmmac-fix-handling-of-oversized-frames'

Aaro Koskinen says:

====================
net: stmmac: fix handling of oversized frames

I accidentally had MTU size mismatch (9000 vs. 1500) in my network,
and I noticed I could kill a system using stmmac & 1500 MTU simply
by pinging it with "ping -s 2000 ...".

While testing a fix I encountered also some other issues that need fixing.

I have tested these only with enhanced descriptors, so the normal
descriptor changes need a careful review.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 288ac524 057a0c56
...@@ -29,11 +29,13 @@ ...@@ -29,11 +29,13 @@
/* Specific functions used for Ring mode */ /* Specific functions used for Ring mode */
/* Enhanced descriptors */ /* Enhanced descriptors */
static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end) static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end,
int bfsize)
{ {
p->des1 |= cpu_to_le32((BUF_SIZE_8KiB if (bfsize == BUF_SIZE_16KiB)
<< ERDES1_BUFFER2_SIZE_SHIFT) p->des1 |= cpu_to_le32((BUF_SIZE_8KiB
& ERDES1_BUFFER2_SIZE_MASK); << ERDES1_BUFFER2_SIZE_SHIFT)
& ERDES1_BUFFER2_SIZE_MASK);
if (end) if (end)
p->des1 |= cpu_to_le32(ERDES1_END_RING); p->des1 |= cpu_to_le32(ERDES1_END_RING);
...@@ -59,11 +61,15 @@ static inline void enh_set_tx_desc_len_on_ring(struct dma_desc *p, int len) ...@@ -59,11 +61,15 @@ static inline void enh_set_tx_desc_len_on_ring(struct dma_desc *p, int len)
} }
/* Normal descriptors */ /* Normal descriptors */
static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end) static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end, int bfsize)
{ {
p->des1 |= cpu_to_le32(((BUF_SIZE_2KiB - 1) if (bfsize >= BUF_SIZE_2KiB) {
<< RDES1_BUFFER2_SIZE_SHIFT) int bfsize2;
& RDES1_BUFFER2_SIZE_MASK);
bfsize2 = min(bfsize - BUF_SIZE_2KiB + 1, BUF_SIZE_2KiB - 1);
p->des1 |= cpu_to_le32((bfsize2 << RDES1_BUFFER2_SIZE_SHIFT)
& RDES1_BUFFER2_SIZE_MASK);
}
if (end) if (end)
p->des1 |= cpu_to_le32(RDES1_END_RING); p->des1 |= cpu_to_le32(RDES1_END_RING);
......
...@@ -296,7 +296,7 @@ static int dwmac4_wrback_get_rx_timestamp_status(void *desc, void *next_desc, ...@@ -296,7 +296,7 @@ static int dwmac4_wrback_get_rx_timestamp_status(void *desc, void *next_desc,
} }
static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic, static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
int mode, int end) int mode, int end, int bfsize)
{ {
dwmac4_set_rx_owner(p, disable_rx_ic); dwmac4_set_rx_owner(p, disable_rx_ic);
} }
......
...@@ -123,7 +123,7 @@ static int dwxgmac2_get_rx_timestamp_status(void *desc, void *next_desc, ...@@ -123,7 +123,7 @@ static int dwxgmac2_get_rx_timestamp_status(void *desc, void *next_desc,
} }
static void dwxgmac2_init_rx_desc(struct dma_desc *p, int disable_rx_ic, static void dwxgmac2_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
int mode, int end) int mode, int end, int bfsize)
{ {
dwxgmac2_set_rx_owner(p, disable_rx_ic); dwxgmac2_set_rx_owner(p, disable_rx_ic);
} }
......
...@@ -201,6 +201,11 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x, ...@@ -201,6 +201,11 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
if (unlikely(rdes0 & RDES0_OWN)) if (unlikely(rdes0 & RDES0_OWN))
return dma_own; return dma_own;
if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) {
stats->rx_length_errors++;
return discard_frame;
}
if (unlikely(rdes0 & RDES0_ERROR_SUMMARY)) { if (unlikely(rdes0 & RDES0_ERROR_SUMMARY)) {
if (unlikely(rdes0 & RDES0_DESCRIPTOR_ERROR)) { if (unlikely(rdes0 & RDES0_DESCRIPTOR_ERROR)) {
x->rx_desc++; x->rx_desc++;
...@@ -231,9 +236,10 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x, ...@@ -231,9 +236,10 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
* It doesn't match with the information reported into the databook. * It doesn't match with the information reported into the databook.
* At any rate, we need to understand if the CSUM hw computation is ok * At any rate, we need to understand if the CSUM hw computation is ok
* and report this info to the upper layers. */ * and report this info to the upper layers. */
ret = enh_desc_coe_rdes0(!!(rdes0 & RDES0_IPC_CSUM_ERROR), if (likely(ret == good_frame))
!!(rdes0 & RDES0_FRAME_TYPE), ret = enh_desc_coe_rdes0(!!(rdes0 & RDES0_IPC_CSUM_ERROR),
!!(rdes0 & ERDES0_RX_MAC_ADDR)); !!(rdes0 & RDES0_FRAME_TYPE),
!!(rdes0 & ERDES0_RX_MAC_ADDR));
if (unlikely(rdes0 & RDES0_DRIBBLING)) if (unlikely(rdes0 & RDES0_DRIBBLING))
x->dribbling_bit++; x->dribbling_bit++;
...@@ -259,15 +265,19 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x, ...@@ -259,15 +265,19 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
} }
static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
int mode, int end) int mode, int end, int bfsize)
{ {
int bfsize1;
p->des0 |= cpu_to_le32(RDES0_OWN); p->des0 |= cpu_to_le32(RDES0_OWN);
p->des1 |= cpu_to_le32(BUF_SIZE_8KiB & ERDES1_BUFFER1_SIZE_MASK);
bfsize1 = min(bfsize, BUF_SIZE_8KiB);
p->des1 |= cpu_to_le32(bfsize1 & ERDES1_BUFFER1_SIZE_MASK);
if (mode == STMMAC_CHAIN_MODE) if (mode == STMMAC_CHAIN_MODE)
ehn_desc_rx_set_on_chain(p); ehn_desc_rx_set_on_chain(p);
else else
ehn_desc_rx_set_on_ring(p, end); ehn_desc_rx_set_on_ring(p, end, bfsize);
if (disable_rx_ic) if (disable_rx_ic)
p->des1 |= cpu_to_le32(ERDES1_DISABLE_IC); p->des1 |= cpu_to_le32(ERDES1_DISABLE_IC);
......
...@@ -33,7 +33,7 @@ struct dma_extended_desc; ...@@ -33,7 +33,7 @@ struct dma_extended_desc;
struct stmmac_desc_ops { struct stmmac_desc_ops {
/* DMA RX descriptor ring initialization */ /* DMA RX descriptor ring initialization */
void (*init_rx_desc)(struct dma_desc *p, int disable_rx_ic, int mode, void (*init_rx_desc)(struct dma_desc *p, int disable_rx_ic, int mode,
int end); int end, int bfsize);
/* DMA TX descriptor ring initialization */ /* DMA TX descriptor ring initialization */
void (*init_tx_desc)(struct dma_desc *p, int mode, int end); void (*init_tx_desc)(struct dma_desc *p, int mode, int end);
/* Invoked by the xmit function to prepare the tx descriptor */ /* Invoked by the xmit function to prepare the tx descriptor */
......
...@@ -91,8 +91,6 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x, ...@@ -91,8 +91,6 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
return dma_own; return dma_own;
if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) { if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) {
pr_warn("%s: Oversized frame spanned multiple buffers\n",
__func__);
stats->rx_length_errors++; stats->rx_length_errors++;
return discard_frame; return discard_frame;
} }
...@@ -135,15 +133,19 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x, ...@@ -135,15 +133,19 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
} }
static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode, static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode,
int end) int end, int bfsize)
{ {
int bfsize1;
p->des0 |= cpu_to_le32(RDES0_OWN); p->des0 |= cpu_to_le32(RDES0_OWN);
p->des1 |= cpu_to_le32((BUF_SIZE_2KiB - 1) & RDES1_BUFFER1_SIZE_MASK);
bfsize1 = min(bfsize, BUF_SIZE_2KiB - 1);
p->des1 |= cpu_to_le32(bfsize & RDES1_BUFFER1_SIZE_MASK);
if (mode == STMMAC_CHAIN_MODE) if (mode == STMMAC_CHAIN_MODE)
ndesc_rx_set_on_chain(p, end); ndesc_rx_set_on_chain(p, end);
else else
ndesc_rx_set_on_ring(p, end); ndesc_rx_set_on_ring(p, end, bfsize);
if (disable_rx_ic) if (disable_rx_ic)
p->des1 |= cpu_to_le32(RDES1_DISABLE_IC); p->des1 |= cpu_to_le32(RDES1_DISABLE_IC);
......
...@@ -1136,11 +1136,13 @@ static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue) ...@@ -1136,11 +1136,13 @@ static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
if (priv->extend_desc) if (priv->extend_desc)
stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic, stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
priv->use_riwt, priv->mode, priv->use_riwt, priv->mode,
(i == DMA_RX_SIZE - 1)); (i == DMA_RX_SIZE - 1),
priv->dma_buf_sz);
else else
stmmac_init_rx_desc(priv, &rx_q->dma_rx[i], stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
priv->use_riwt, priv->mode, priv->use_riwt, priv->mode,
(i == DMA_RX_SIZE - 1)); (i == DMA_RX_SIZE - 1),
priv->dma_buf_sz);
} }
/** /**
...@@ -3352,9 +3354,8 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) ...@@ -3352,9 +3354,8 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
{ {
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
struct stmmac_channel *ch = &priv->channel[queue]; struct stmmac_channel *ch = &priv->channel[queue];
unsigned int entry = rx_q->cur_rx; unsigned int next_entry = rx_q->cur_rx;
int coe = priv->hw->rx_csum; int coe = priv->hw->rx_csum;
unsigned int next_entry;
unsigned int count = 0; unsigned int count = 0;
bool xmac; bool xmac;
...@@ -3372,10 +3373,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) ...@@ -3372,10 +3373,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true); stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
} }
while (count < limit) { while (count < limit) {
int status; int entry, status;
struct dma_desc *p; struct dma_desc *p;
struct dma_desc *np; struct dma_desc *np;
entry = next_entry;
if (priv->extend_desc) if (priv->extend_desc)
p = (struct dma_desc *)(rx_q->dma_erx + entry); p = (struct dma_desc *)(rx_q->dma_erx + entry);
else else
...@@ -3431,11 +3434,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) ...@@ -3431,11 +3434,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
* ignored * ignored
*/ */
if (frame_len > priv->dma_buf_sz) { if (frame_len > priv->dma_buf_sz) {
netdev_err(priv->dev, if (net_ratelimit())
"len %d larger than size (%d)\n", netdev_err(priv->dev,
frame_len, priv->dma_buf_sz); "len %d larger than size (%d)\n",
frame_len, priv->dma_buf_sz);
priv->dev->stats.rx_length_errors++; priv->dev->stats.rx_length_errors++;
break; continue;
} }
/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3 /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
...@@ -3470,7 +3474,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) ...@@ -3470,7 +3474,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
dev_warn(priv->device, dev_warn(priv->device,
"packet dropped\n"); "packet dropped\n");
priv->dev->stats.rx_dropped++; priv->dev->stats.rx_dropped++;
break; continue;
} }
dma_sync_single_for_cpu(priv->device, dma_sync_single_for_cpu(priv->device,
...@@ -3490,11 +3494,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) ...@@ -3490,11 +3494,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
} else { } else {
skb = rx_q->rx_skbuff[entry]; skb = rx_q->rx_skbuff[entry];
if (unlikely(!skb)) { if (unlikely(!skb)) {
netdev_err(priv->dev, if (net_ratelimit())
"%s: Inconsistent Rx chain\n", netdev_err(priv->dev,
priv->dev->name); "%s: Inconsistent Rx chain\n",
priv->dev->name);
priv->dev->stats.rx_dropped++; priv->dev->stats.rx_dropped++;
break; continue;
} }
prefetch(skb->data - NET_IP_ALIGN); prefetch(skb->data - NET_IP_ALIGN);
rx_q->rx_skbuff[entry] = NULL; rx_q->rx_skbuff[entry] = NULL;
...@@ -3529,7 +3534,6 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) ...@@ -3529,7 +3534,6 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
priv->dev->stats.rx_packets++; priv->dev->stats.rx_packets++;
priv->dev->stats.rx_bytes += frame_len; priv->dev->stats.rx_bytes += frame_len;
} }
entry = next_entry;
} }
stmmac_rx_refill(priv, queue); stmmac_rx_refill(priv, queue);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment