Commit 008e8165 authored by David S. Miller's avatar David S. Miller

Merge branch 'mlx4-next'

Or Gerlitz says:

====================
mlx4: Add CHECKSUM_COMPLETE support

These patches from Shani, Matan and myself add support for
CHECKSUM_COMPLETE reporting on non TCP/UDP packets such as
GRE and ICMP. I'd like to deeply thank Jerry Chu for his
innovation and support in that effort.

Based on the feedback from Eric and Ido Shamay, in V2 we dropped
the patch which removed the calls to napi_gro_frags() and added
a patch which makes the RX code to go through that path
regardless of the checksum status.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents b00394c0 f8c6455b
...@@ -115,7 +115,7 @@ static const char main_strings[][ETH_GSTRING_LEN] = { ...@@ -115,7 +115,7 @@ static const char main_strings[][ETH_GSTRING_LEN] = {
"tso_packets", "tso_packets",
"xmit_more", "xmit_more",
"queue_stopped", "wake_queue", "tx_timeout", "rx_alloc_failed", "queue_stopped", "wake_queue", "tx_timeout", "rx_alloc_failed",
"rx_csum_good", "rx_csum_none", "tx_chksum_offload", "rx_csum_good", "rx_csum_none", "rx_csum_complete", "tx_chksum_offload",
/* packet statistics */ /* packet statistics */
"broadcast", "rx_prio_0", "rx_prio_1", "rx_prio_2", "rx_prio_3", "broadcast", "rx_prio_0", "rx_prio_1", "rx_prio_2", "rx_prio_3",
......
...@@ -1893,6 +1893,7 @@ static void mlx4_en_clear_stats(struct net_device *dev) ...@@ -1893,6 +1893,7 @@ static void mlx4_en_clear_stats(struct net_device *dev)
priv->rx_ring[i]->packets = 0; priv->rx_ring[i]->packets = 0;
priv->rx_ring[i]->csum_ok = 0; priv->rx_ring[i]->csum_ok = 0;
priv->rx_ring[i]->csum_none = 0; priv->rx_ring[i]->csum_none = 0;
priv->rx_ring[i]->csum_complete = 0;
} }
} }
...@@ -2503,6 +2504,10 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, ...@@ -2503,6 +2504,10 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
/* Query for default mac and max mtu */ /* Query for default mac and max mtu */
priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port]; priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port];
if (mdev->dev->caps.rx_checksum_flags_port[priv->port] &
MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP)
priv->flags |= MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP;
/* Set default MAC */ /* Set default MAC */
dev->addr_len = ETH_ALEN; dev->addr_len = ETH_ALEN;
mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]); mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]);
......
...@@ -155,11 +155,13 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) ...@@ -155,11 +155,13 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
stats->rx_bytes = 0; stats->rx_bytes = 0;
priv->port_stats.rx_chksum_good = 0; priv->port_stats.rx_chksum_good = 0;
priv->port_stats.rx_chksum_none = 0; priv->port_stats.rx_chksum_none = 0;
priv->port_stats.rx_chksum_complete = 0;
for (i = 0; i < priv->rx_ring_num; i++) { for (i = 0; i < priv->rx_ring_num; i++) {
stats->rx_packets += priv->rx_ring[i]->packets; stats->rx_packets += priv->rx_ring[i]->packets;
stats->rx_bytes += priv->rx_ring[i]->bytes; stats->rx_bytes += priv->rx_ring[i]->bytes;
priv->port_stats.rx_chksum_good += priv->rx_ring[i]->csum_ok; priv->port_stats.rx_chksum_good += priv->rx_ring[i]->csum_ok;
priv->port_stats.rx_chksum_none += priv->rx_ring[i]->csum_none; priv->port_stats.rx_chksum_none += priv->rx_ring[i]->csum_none;
priv->port_stats.rx_chksum_complete += priv->rx_ring[i]->csum_complete;
} }
stats->tx_packets = 0; stats->tx_packets = 0;
stats->tx_bytes = 0; stats->tx_bytes = 0;
......
...@@ -42,6 +42,10 @@ ...@@ -42,6 +42,10 @@
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/irq.h> #include <linux/irq.h>
#if IS_ENABLED(CONFIG_IPV6)
#include <net/ip6_checksum.h>
#endif
#include "mlx4_en.h" #include "mlx4_en.h"
static int mlx4_alloc_pages(struct mlx4_en_priv *priv, static int mlx4_alloc_pages(struct mlx4_en_priv *priv,
...@@ -643,6 +647,86 @@ static void mlx4_en_refill_rx_buffers(struct mlx4_en_priv *priv, ...@@ -643,6 +647,86 @@ static void mlx4_en_refill_rx_buffers(struct mlx4_en_priv *priv,
} }
} }
/* When hardware doesn't strip the vlan, we need to calculate the checksum
* over it and add it to the hardware's checksum calculation
*/
static inline __wsum get_fixed_vlan_csum(__wsum hw_checksum,
struct vlan_hdr *vlanh)
{
return csum_add(hw_checksum, *(__wsum *)vlanh);
}
/* Although the stack expects checksum which doesn't include the pseudo
* header, the HW adds it. To address that, we are subtracting the pseudo
* header checksum from the checksum value provided by the HW.
*/
static void get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb,
struct iphdr *iph)
{
__u16 length_for_csum = 0;
__wsum csum_pseudo_header = 0;
length_for_csum = (be16_to_cpu(iph->tot_len) - (iph->ihl << 2));
csum_pseudo_header = csum_tcpudp_nofold(iph->saddr, iph->daddr,
length_for_csum, iph->protocol, 0);
skb->csum = csum_sub(hw_checksum, csum_pseudo_header);
}
#if IS_ENABLED(CONFIG_IPV6)
/* In IPv6 packets, besides subtracting the pseudo header checksum,
* we also compute/add the IP header checksum which
* is not added by the HW.
*/
static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb,
struct ipv6hdr *ipv6h)
{
__wsum csum_pseudo_hdr = 0;
if (ipv6h->nexthdr == IPPROTO_FRAGMENT || ipv6h->nexthdr == IPPROTO_HOPOPTS)
return -1;
hw_checksum = csum_add(hw_checksum, (__force __wsum)(ipv6h->nexthdr << 8));
csum_pseudo_hdr = csum_partial(&ipv6h->saddr,
sizeof(ipv6h->saddr) + sizeof(ipv6h->daddr), 0);
csum_pseudo_hdr = csum_add(csum_pseudo_hdr, (__force __wsum)ipv6h->payload_len);
csum_pseudo_hdr = csum_add(csum_pseudo_hdr, (__force __wsum)ntohs(ipv6h->nexthdr));
skb->csum = csum_sub(hw_checksum, csum_pseudo_hdr);
skb->csum = csum_add(skb->csum, csum_partial(ipv6h, sizeof(struct ipv6hdr), 0));
return 0;
}
#endif
static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
int hwtstamp_rx_filter)
{
__wsum hw_checksum = 0;
void *hdr = (u8 *)va + sizeof(struct ethhdr);
hw_checksum = csum_unfold((__force __sum16)cqe->checksum);
if (((struct ethhdr *)va)->h_proto == htons(ETH_P_8021Q) &&
hwtstamp_rx_filter != HWTSTAMP_FILTER_NONE) {
/* next protocol non IPv4 or IPv6 */
if (((struct vlan_hdr *)hdr)->h_vlan_encapsulated_proto
!= htons(ETH_P_IP) &&
((struct vlan_hdr *)hdr)->h_vlan_encapsulated_proto
!= htons(ETH_P_IPV6))
return -1;
hw_checksum = get_fixed_vlan_csum(hw_checksum, hdr);
hdr += sizeof(struct vlan_hdr);
}
if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4))
get_fixed_ipv4_csum(hw_checksum, skb, hdr);
#if IS_ENABLED(CONFIG_IPV6)
else if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6))
if (get_fixed_ipv6_csum(hw_checksum, skb, hdr))
return -1;
#endif
return 0;
}
int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget) int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
{ {
struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_priv *priv = netdev_priv(dev);
...@@ -744,73 +828,95 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud ...@@ -744,73 +828,95 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
(cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL)); (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL));
if (likely(dev->features & NETIF_F_RXCSUM)) { if (likely(dev->features & NETIF_F_RXCSUM)) {
if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) && if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP |
(cqe->checksum == cpu_to_be16(0xffff))) { MLX4_CQE_STATUS_UDP)) {
ring->csum_ok++; if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
/* This packet is eligible for GRO if it is: cqe->checksum == cpu_to_be16(0xffff)) {
* - DIX Ethernet (type interpretation) ip_summed = CHECKSUM_UNNECESSARY;
* - TCP/IP (v4) ring->csum_ok++;
* - without IP options } else {
* - not an IP fragment ip_summed = CHECKSUM_NONE;
* - no LLS polling in progress ring->csum_none++;
*/ }
if (!mlx4_en_cq_busy_polling(cq) && } else {
(dev->features & NETIF_F_GRO)) { if (priv->flags & MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP &&
struct sk_buff *gro_skb = napi_get_frags(&cq->napi); (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
if (!gro_skb) MLX4_CQE_STATUS_IPV6))) {
goto next; ip_summed = CHECKSUM_COMPLETE;
ring->csum_complete++;
nr = mlx4_en_complete_rx_desc(priv, } else {
rx_desc, frags, gro_skb, ip_summed = CHECKSUM_NONE;
length); ring->csum_none++;
if (!nr) }
goto next; }
} else {
skb_shinfo(gro_skb)->nr_frags = nr; ip_summed = CHECKSUM_NONE;
gro_skb->len = length; ring->csum_none++;
gro_skb->data_len = length; }
gro_skb->ip_summed = CHECKSUM_UNNECESSARY;
if (l2_tunnel) /* This packet is eligible for GRO if it is:
gro_skb->csum_level = 1; * - DIX Ethernet (type interpretation)
if ((cqe->vlan_my_qpn & * - TCP/IP (v4)
cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK)) && * - without IP options
(dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { * - not an IP fragment
u16 vid = be16_to_cpu(cqe->sl_vid); * - no LLS polling in progress
*/
if (!mlx4_en_cq_busy_polling(cq) &&
(dev->features & NETIF_F_GRO)) {
struct sk_buff *gro_skb = napi_get_frags(&cq->napi);
if (!gro_skb)
goto next;
nr = mlx4_en_complete_rx_desc(priv,
rx_desc, frags, gro_skb,
length);
if (!nr)
goto next;
if (ip_summed == CHECKSUM_COMPLETE) {
void *va = skb_frag_address(skb_shinfo(gro_skb)->frags);
if (check_csum(cqe, gro_skb, va, ring->hwtstamp_rx_filter)) {
ip_summed = CHECKSUM_NONE;
ring->csum_none++;
ring->csum_complete--;
}
}
__vlan_hwaccel_put_tag(gro_skb, htons(ETH_P_8021Q), vid); skb_shinfo(gro_skb)->nr_frags = nr;
} gro_skb->len = length;
gro_skb->data_len = length;
gro_skb->ip_summed = ip_summed;
if (dev->features & NETIF_F_RXHASH) if (l2_tunnel && ip_summed == CHECKSUM_UNNECESSARY)
skb_set_hash(gro_skb, gro_skb->encapsulation = 1;
be32_to_cpu(cqe->immed_rss_invalid), if ((cqe->vlan_my_qpn &
PKT_HASH_TYPE_L3); cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK)) &&
(dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
u16 vid = be16_to_cpu(cqe->sl_vid);
skb_record_rx_queue(gro_skb, cq->ring); __vlan_hwaccel_put_tag(gro_skb, htons(ETH_P_8021Q), vid);
skb_mark_napi_id(gro_skb, &cq->napi); }
if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) { if (dev->features & NETIF_F_RXHASH)
timestamp = mlx4_en_get_cqe_ts(cqe); skb_set_hash(gro_skb,
mlx4_en_fill_hwtstamps(mdev, be32_to_cpu(cqe->immed_rss_invalid),
skb_hwtstamps(gro_skb), PKT_HASH_TYPE_L3);
timestamp);
}
napi_gro_frags(&cq->napi); skb_record_rx_queue(gro_skb, cq->ring);
goto next; skb_mark_napi_id(gro_skb, &cq->napi);
}
/* GRO not possible, complete processing here */ if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) {
ip_summed = CHECKSUM_UNNECESSARY; timestamp = mlx4_en_get_cqe_ts(cqe);
} else { mlx4_en_fill_hwtstamps(mdev,
ip_summed = CHECKSUM_NONE; skb_hwtstamps(gro_skb),
ring->csum_none++; timestamp);
} }
} else {
ip_summed = CHECKSUM_NONE; napi_gro_frags(&cq->napi);
ring->csum_none++; goto next;
} }
/* GRO not possible, complete processing here */
skb = mlx4_en_rx_skb(priv, rx_desc, frags, length); skb = mlx4_en_rx_skb(priv, rx_desc, frags, length);
if (!skb) { if (!skb) {
priv->stats.rx_dropped++; priv->stats.rx_dropped++;
...@@ -822,6 +928,14 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud ...@@ -822,6 +928,14 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
goto next; goto next;
} }
if (ip_summed == CHECKSUM_COMPLETE) {
if (check_csum(cqe, skb, skb->data, ring->hwtstamp_rx_filter)) {
ip_summed = CHECKSUM_NONE;
ring->csum_complete--;
ring->csum_none++;
}
}
skb->ip_summed = ip_summed; skb->ip_summed = ip_summed;
skb->protocol = eth_type_trans(skb, dev); skb->protocol = eth_type_trans(skb, dev);
skb_record_rx_queue(skb, cq->ring); skb_record_rx_queue(skb, cq->ring);
......
...@@ -1629,6 +1629,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev) ...@@ -1629,6 +1629,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
struct mlx4_init_hca_param init_hca; struct mlx4_init_hca_param init_hca;
u64 icm_size; u64 icm_size;
int err; int err;
struct mlx4_config_dev_params params;
if (!mlx4_is_slave(dev)) { if (!mlx4_is_slave(dev)) {
err = mlx4_QUERY_FW(dev); err = mlx4_QUERY_FW(dev);
...@@ -1762,6 +1763,14 @@ static int mlx4_init_hca(struct mlx4_dev *dev) ...@@ -1762,6 +1763,14 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
goto unmap_bf; goto unmap_bf;
} }
/* Query CONFIG_DEV parameters */
err = mlx4_config_dev_retrieval(dev, &params);
if (err && err != -ENOTSUPP) {
mlx4_err(dev, "Failed to query CONFIG_DEV parameters\n");
} else if (!err) {
dev->caps.rx_checksum_flags_port[1] = params.rx_csum_flags_port_1;
dev->caps.rx_checksum_flags_port[2] = params.rx_csum_flags_port_2;
}
priv->eq_table.inta_pin = adapter.inta_pin; priv->eq_table.inta_pin = adapter.inta_pin;
memcpy(dev->board_id, adapter.board_id, sizeof dev->board_id); memcpy(dev->board_id, adapter.board_id, sizeof dev->board_id);
......
...@@ -326,6 +326,7 @@ struct mlx4_en_rx_ring { ...@@ -326,6 +326,7 @@ struct mlx4_en_rx_ring {
#endif #endif
unsigned long csum_ok; unsigned long csum_ok;
unsigned long csum_none; unsigned long csum_none;
unsigned long csum_complete;
int hwtstamp_rx_filter; int hwtstamp_rx_filter;
cpumask_var_t affinity_mask; cpumask_var_t affinity_mask;
}; };
...@@ -449,6 +450,7 @@ struct mlx4_en_port_stats { ...@@ -449,6 +450,7 @@ struct mlx4_en_port_stats {
unsigned long rx_alloc_failed; unsigned long rx_alloc_failed;
unsigned long rx_chksum_good; unsigned long rx_chksum_good;
unsigned long rx_chksum_none; unsigned long rx_chksum_none;
unsigned long rx_chksum_complete;
unsigned long tx_chksum_offload; unsigned long tx_chksum_offload;
#define NUM_PORT_STATS 9 #define NUM_PORT_STATS 9
}; };
...@@ -507,7 +509,8 @@ enum { ...@@ -507,7 +509,8 @@ enum {
MLX4_EN_FLAG_ENABLE_HW_LOOPBACK = (1 << 2), MLX4_EN_FLAG_ENABLE_HW_LOOPBACK = (1 << 2),
/* whether we need to drop packets that hardware loopback-ed */ /* whether we need to drop packets that hardware loopback-ed */
MLX4_EN_FLAG_RX_FILTER_NEEDED = (1 << 3), MLX4_EN_FLAG_RX_FILTER_NEEDED = (1 << 3),
MLX4_EN_FLAG_FORCE_PROMISC = (1 << 4) MLX4_EN_FLAG_FORCE_PROMISC = (1 << 4),
MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP = (1 << 5),
}; };
#define MLX4_EN_MAC_HASH_SIZE (1 << BITS_PER_BYTE) #define MLX4_EN_MAC_HASH_SIZE (1 << BITS_PER_BYTE)
......
...@@ -497,6 +497,7 @@ struct mlx4_caps { ...@@ -497,6 +497,7 @@ struct mlx4_caps {
u16 hca_core_clock; u16 hca_core_clock;
u64 phys_port_id[MLX4_MAX_PORTS + 1]; u64 phys_port_id[MLX4_MAX_PORTS + 1];
int tunnel_offload_mode; int tunnel_offload_mode;
u8 rx_checksum_flags_port[MLX4_MAX_PORTS + 1];
}; };
struct mlx4_buf_list { struct mlx4_buf_list {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment