Commit eb2932b0 authored by David S. Miller's avatar David S. Miller

Merge branch 'net-bcmgenet-use-hardware-padding-of-runt-frames'

Doug Berger says:

====================
net: bcmgenet: use hardware padding of runt frames

Now that scatter-gather and tx-checksumming are enabled by default
it revealed a packet corruption issue that can occur for very short
fragmented packets.

When padding these frames to the minimum length it is possible for
the non-linear (fragment) data to be added to the end of the linear
header in an SKB. Since the number of fragments is read before the
padding and used afterward without reloading, the fragment that
should have been consumed can be tacked on in place of part of the
padding.

The third commit in this set corrects this by removing the software
padding and allowing the hardware to add the pad bytes if necessary.

The first two commits resolve warnings observed by the kbuild test
robot and are included here for simplicity of application.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents a5124386 20d1f2d1
......@@ -459,17 +459,6 @@ static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv,
genet_dma_ring_regs[r]);
}
static bool bcmgenet_hfb_is_filter_enabled(struct bcmgenet_priv *priv,
u32 f_index)
{
u32 offset;
u32 reg;
offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32);
reg = bcmgenet_hfb_reg_readl(priv, offset);
return !!(reg & (1 << (f_index % 32)));
}
static void bcmgenet_hfb_enable_filter(struct bcmgenet_priv *priv, u32 f_index)
{
u32 offset;
......@@ -533,19 +522,6 @@ static void bcmgenet_hfb_set_filter_length(struct bcmgenet_priv *priv,
bcmgenet_hfb_reg_writel(priv, reg, offset);
}
static int bcmgenet_hfb_find_unused_filter(struct bcmgenet_priv *priv)
{
u32 f_index;
/* First MAX_NUM_OF_FS_RULES are reserved for Rx NFC filters */
for (f_index = MAX_NUM_OF_FS_RULES;
f_index < priv->hw_params->hfb_filter_cnt; f_index++)
if (!bcmgenet_hfb_is_filter_enabled(priv, f_index))
return f_index;
return -ENOMEM;
}
static int bcmgenet_hfb_validate_mask(void *mask, size_t size)
{
while (size) {
......@@ -634,8 +610,9 @@ static int bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv,
{
struct ethtool_rx_flow_spec *fs = &rule->fs;
int err = 0, offset = 0, f_length = 0;
u16 val_16, mask_16;
u8 val_8, mask_8;
__be16 val_16;
u16 mask_16;
size_t size;
u32 *f_data;
......@@ -744,59 +721,6 @@ static int bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv,
return err;
}
/* bcmgenet_hfb_add_filter
*
* Add new filter to Hardware Filter Block to match and direct Rx traffic to
* desired Rx queue.
*
* f_data is an array of unsigned 32-bit integers where each 32-bit integer
* provides filter data for 2 bytes (4 nibbles) of Rx frame:
*
* bits 31:20 - unused
* bit 19 - nibble 0 match enable
* bit 18 - nibble 1 match enable
* bit 17 - nibble 2 match enable
* bit 16 - nibble 3 match enable
* bits 15:12 - nibble 0 data
* bits 11:8 - nibble 1 data
* bits 7:4 - nibble 2 data
* bits 3:0 - nibble 3 data
*
* Example:
* In order to match:
* - Ethernet frame type = 0x0800 (IP)
* - IP version field = 4
* - IP protocol field = 0x11 (UDP)
*
* The following filter is needed:
* u32 hfb_filter_ipv4_udp[] = {
* Rx frame offset 0x00: 0x00000000, 0x00000000, 0x00000000, 0x00000000,
* Rx frame offset 0x08: 0x00000000, 0x00000000, 0x000F0800, 0x00084000,
* Rx frame offset 0x10: 0x00000000, 0x00000000, 0x00000000, 0x00030011,
* };
*
* To add the filter to HFB and direct the traffic to Rx queue 0, call:
* bcmgenet_hfb_add_filter(priv, hfb_filter_ipv4_udp,
* ARRAY_SIZE(hfb_filter_ipv4_udp), 0);
*/
int bcmgenet_hfb_add_filter(struct bcmgenet_priv *priv, u32 *f_data,
u32 f_length, u32 rx_queue)
{
int f_index;
f_index = bcmgenet_hfb_find_unused_filter(priv);
if (f_index < 0)
return -ENOMEM;
if (f_length > priv->hw_params->hfb_filter_size)
return -EINVAL;
bcmgenet_hfb_set_filter(priv, f_data, f_length, rx_queue, f_index);
bcmgenet_hfb_enable_filter(priv, f_index);
return 0;
}
/* bcmgenet_hfb_clear
*
* Clear Hardware Filter Block and disable all filtering.
......@@ -2118,11 +2042,6 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
goto out;
}
if (skb_padto(skb, ETH_ZLEN)) {
ret = NETDEV_TX_OK;
goto out;
}
/* Retain how many bytes will be sent on the wire, without TSB inserted
* by transmit checksum offload
*/
......@@ -2169,6 +2088,9 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
len_stat = (size << DMA_BUFLENGTH_SHIFT) |
(priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT);
/* Note: if we ever change from DMA_TX_APPEND_CRC below we
* will need to restore software padding of "runt" packets
*/
if (!i) {
len_stat |= DMA_TX_APPEND_CRC | DMA_SOP;
if (skb->ip_summed == CHECKSUM_PARTIAL)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment