Commit fe87797b authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-net-next-2021-06-22' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-net-next-2021-06-22

1) Various minor cleanups and fixes from net-next branch
2) Optimize mlx5 feature check on tx and
   a fix to allow Vxlan with Ipsec offloads
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents a7b62112 f1267798
...@@ -12,7 +12,6 @@ config MLX5_CORE ...@@ -12,7 +12,6 @@ config MLX5_CORE
depends on MLXFW || !MLXFW depends on MLXFW || !MLXFW
depends on PTP_1588_CLOCK || !PTP_1588_CLOCK depends on PTP_1588_CLOCK || !PTP_1588_CLOCK
depends on PCI_HYPERV_INTERFACE || !PCI_HYPERV_INTERFACE depends on PCI_HYPERV_INTERFACE || !PCI_HYPERV_INTERFACE
default n
help help
Core driver for low level functionality of the ConnectX-4 and Core driver for low level functionality of the ConnectX-4 and
Connect-IB cards by Mellanox Technologies. Connect-IB cards by Mellanox Technologies.
...@@ -36,7 +35,6 @@ config MLX5_CORE_EN ...@@ -36,7 +35,6 @@ config MLX5_CORE_EN
depends on NETDEVICES && ETHERNET && INET && PCI && MLX5_CORE depends on NETDEVICES && ETHERNET && INET && PCI && MLX5_CORE
select PAGE_POOL select PAGE_POOL
select DIMLIB select DIMLIB
default n
help help
Ethernet support in Mellanox Technologies ConnectX-4 NIC. Ethernet support in Mellanox Technologies ConnectX-4 NIC.
...@@ -141,7 +139,6 @@ config MLX5_CORE_EN_DCB ...@@ -141,7 +139,6 @@ config MLX5_CORE_EN_DCB
config MLX5_CORE_IPOIB config MLX5_CORE_IPOIB
bool "Mellanox 5th generation network adapters (connectX series) IPoIB offloads support" bool "Mellanox 5th generation network adapters (connectX series) IPoIB offloads support"
depends on MLX5_CORE_EN depends on MLX5_CORE_EN
default n
help help
MLX5 IPoIB offloads & acceleration support. MLX5 IPoIB offloads & acceleration support.
...@@ -149,7 +146,6 @@ config MLX5_FPGA_IPSEC ...@@ -149,7 +146,6 @@ config MLX5_FPGA_IPSEC
bool "Mellanox Technologies IPsec Innova support" bool "Mellanox Technologies IPsec Innova support"
depends on MLX5_CORE depends on MLX5_CORE
depends on MLX5_FPGA depends on MLX5_FPGA
default n
help help
Build IPsec support for the Innova family of network cards by Mellanox Build IPsec support for the Innova family of network cards by Mellanox
Technologies. Innova network cards are comprised of a ConnectX chip Technologies. Innova network cards are comprised of a ConnectX chip
...@@ -163,7 +159,6 @@ config MLX5_IPSEC ...@@ -163,7 +159,6 @@ config MLX5_IPSEC
depends on XFRM_OFFLOAD depends on XFRM_OFFLOAD
depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD
select MLX5_ACCEL select MLX5_ACCEL
default n
help help
Build IPsec support for the Connect-X family of network cards by Mellanox Build IPsec support for the Connect-X family of network cards by Mellanox
Technologies. Technologies.
...@@ -176,7 +171,6 @@ config MLX5_EN_IPSEC ...@@ -176,7 +171,6 @@ config MLX5_EN_IPSEC
depends on XFRM_OFFLOAD depends on XFRM_OFFLOAD
depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD
depends on MLX5_FPGA_IPSEC || MLX5_IPSEC depends on MLX5_FPGA_IPSEC || MLX5_IPSEC
default n
help help
Build support for IPsec cryptography-offload acceleration in the NIC. Build support for IPsec cryptography-offload acceleration in the NIC.
Note: Support for hardware with this capability needs to be selected Note: Support for hardware with this capability needs to be selected
...@@ -189,7 +183,6 @@ config MLX5_FPGA_TLS ...@@ -189,7 +183,6 @@ config MLX5_FPGA_TLS
depends on MLX5_CORE_EN depends on MLX5_CORE_EN
depends on MLX5_FPGA depends on MLX5_FPGA
select MLX5_EN_TLS select MLX5_EN_TLS
default n
help help
Build TLS support for the Innova family of network cards by Mellanox Build TLS support for the Innova family of network cards by Mellanox
Technologies. Innova network cards are comprised of a ConnectX chip Technologies. Innova network cards are comprised of a ConnectX chip
...@@ -204,7 +197,6 @@ config MLX5_TLS ...@@ -204,7 +197,6 @@ config MLX5_TLS
depends on MLX5_CORE_EN depends on MLX5_CORE_EN
select MLX5_ACCEL select MLX5_ACCEL
select MLX5_EN_TLS select MLX5_EN_TLS
default n
help help
Build TLS support for the Connect-X family of network cards by Mellanox Build TLS support for the Connect-X family of network cards by Mellanox
Technologies. Technologies.
...@@ -227,7 +219,6 @@ config MLX5_SW_STEERING ...@@ -227,7 +219,6 @@ config MLX5_SW_STEERING
config MLX5_SF config MLX5_SF
bool "Mellanox Technologies subfunction device support using auxiliary device" bool "Mellanox Technologies subfunction device support using auxiliary device"
depends on MLX5_CORE && MLX5_CORE_EN depends on MLX5_CORE && MLX5_CORE_EN
default n
help help
Build support for subfuction device in the NIC. A Mellanox subfunction Build support for subfuction device in the NIC. A Mellanox subfunction
device can support RDMA, netdevice and vdpa device. device can support RDMA, netdevice and vdpa device.
......
...@@ -136,8 +136,6 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb, ...@@ -136,8 +136,6 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
struct mlx5_wqe_eth_seg *eseg, u8 mode, struct mlx5_wqe_eth_seg *eseg, u8 mode,
struct xfrm_offload *xo) struct xfrm_offload *xo)
{ {
struct mlx5e_swp_spec swp_spec = {};
/* Tunnel Mode: /* Tunnel Mode:
* SWP: OutL3 InL3 InL4 * SWP: OutL3 InL3 InL4
* Pkt: MAC IP ESP IP L4 * Pkt: MAC IP ESP IP L4
...@@ -146,23 +144,58 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb, ...@@ -146,23 +144,58 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
* SWP: OutL3 InL4 * SWP: OutL3 InL4
* InL3 * InL3
* Pkt: MAC IP ESP L4 * Pkt: MAC IP ESP L4
*
* Tunnel(VXLAN TCP/UDP) over Transport Mode
* SWP: OutL3 InL3 InL4
* Pkt: MAC IP ESP UDP VXLAN IP L4
*/ */
swp_spec.l3_proto = skb->protocol;
swp_spec.is_tun = mode == XFRM_MODE_TUNNEL; /* Shared settings */
if (swp_spec.is_tun) { eseg->swp_outer_l3_offset = skb_network_offset(skb) / 2;
if (xo->proto == IPPROTO_IPV6) { if (skb->protocol == htons(ETH_P_IPV6))
swp_spec.tun_l3_proto = htons(ETH_P_IPV6); eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L3_IPV6;
swp_spec.tun_l4_proto = inner_ipv6_hdr(skb)->nexthdr;
} else { /* Tunnel mode */
swp_spec.tun_l3_proto = htons(ETH_P_IP); if (mode == XFRM_MODE_TUNNEL) {
swp_spec.tun_l4_proto = inner_ip_hdr(skb)->protocol; eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
} eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
} else { if (xo->proto == IPPROTO_IPV6)
swp_spec.tun_l3_proto = skb->protocol; eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
swp_spec.tun_l4_proto = xo->proto; if (inner_ip_hdr(skb)->protocol == IPPROTO_UDP)
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
return;
}
/* Transport mode */
if (mode != XFRM_MODE_TRANSPORT)
return;
if (!xo->inner_ipproto) {
eseg->swp_inner_l3_offset = skb_network_offset(skb) / 2;
eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
if (skb->protocol == htons(ETH_P_IPV6))
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
if (xo->proto == IPPROTO_UDP)
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
return;
}
/* Tunnel(VXLAN TCP/UDP) over Transport Mode */
switch (xo->inner_ipproto) {
case IPPROTO_UDP:
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
fallthrough;
case IPPROTO_TCP:
eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
eseg->swp_inner_l4_offset = (skb->csum_start + skb->head - skb->data) / 2;
if (skb->protocol == htons(ETH_P_IPV6))
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
break;
default:
break;
} }
mlx5e_set_eseg_swp(skb, eseg, &swp_spec); return;
} }
void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x, void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x,
......
...@@ -93,18 +93,38 @@ static inline bool mlx5e_ipsec_eseg_meta(struct mlx5_wqe_eth_seg *eseg) ...@@ -93,18 +93,38 @@ static inline bool mlx5e_ipsec_eseg_meta(struct mlx5_wqe_eth_seg *eseg)
void mlx5e_ipsec_tx_build_eseg(struct mlx5e_priv *priv, struct sk_buff *skb, void mlx5e_ipsec_tx_build_eseg(struct mlx5e_priv *priv, struct sk_buff *skb,
struct mlx5_wqe_eth_seg *eseg); struct mlx5_wqe_eth_seg *eseg);
static inline bool mlx5e_ipsec_feature_check(struct sk_buff *skb, struct net_device *netdev, static inline netdev_features_t
netdev_features_t features) mlx5e_ipsec_feature_check(struct sk_buff *skb, netdev_features_t features)
{ {
struct xfrm_offload *xo = xfrm_offload(skb);
struct sec_path *sp = skb_sec_path(skb); struct sec_path *sp = skb_sec_path(skb);
if (sp && sp->len) { if (sp && sp->len && xo) {
struct xfrm_state *x = sp->xvec[0]; struct xfrm_state *x = sp->xvec[0];
if (x && x->xso.offload_handle) if (!x || !x->xso.offload_handle)
return true; goto out_disable;
if (xo->inner_ipproto) {
/* Cannot support tunnel packet over IPsec tunnel mode
* because we cannot offload three IP header csum
*/
if (x->props.mode == XFRM_MODE_TUNNEL)
goto out_disable;
/* Only support UDP or TCP L4 checksum */
if (xo->inner_ipproto != IPPROTO_UDP &&
xo->inner_ipproto != IPPROTO_TCP)
goto out_disable;
}
return features;
} }
return false;
/* Disable CSUM and GSO for software IPsec */
out_disable:
return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
} }
#else #else
...@@ -120,8 +140,9 @@ static inline bool mlx5e_ipsec_eseg_meta(struct mlx5_wqe_eth_seg *eseg) ...@@ -120,8 +140,9 @@ static inline bool mlx5e_ipsec_eseg_meta(struct mlx5_wqe_eth_seg *eseg)
} }
static inline bool mlx5_ipsec_is_rx_flow(struct mlx5_cqe64 *cqe) { return false; } static inline bool mlx5_ipsec_is_rx_flow(struct mlx5_cqe64 *cqe) { return false; }
static inline bool mlx5e_ipsec_feature_check(struct sk_buff *skb, struct net_device *netdev, static inline netdev_features_t
netdev_features_t features) { return false; } mlx5e_ipsec_feature_check(struct sk_buff *skb, netdev_features_t features)
{ return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); }
#endif /* CONFIG_MLX5_EN_IPSEC */ #endif /* CONFIG_MLX5_EN_IPSEC */
#endif /* __MLX5E_IPSEC_RXTX_H__ */ #endif /* __MLX5E_IPSEC_RXTX_H__ */
...@@ -4330,6 +4330,11 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv, ...@@ -4330,6 +4330,11 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
/* Support Geneve offload for default UDP port */ /* Support Geneve offload for default UDP port */
if (port == GENEVE_UDP_PORT && mlx5_geneve_tx_allowed(priv->mdev)) if (port == GENEVE_UDP_PORT && mlx5_geneve_tx_allowed(priv->mdev))
return features; return features;
#endif
break;
#ifdef CONFIG_MLX5_EN_IPSEC
case IPPROTO_ESP:
return mlx5e_ipsec_feature_check(skb, features);
#endif #endif
} }
...@@ -4347,9 +4352,6 @@ netdev_features_t mlx5e_features_check(struct sk_buff *skb, ...@@ -4347,9 +4352,6 @@ netdev_features_t mlx5e_features_check(struct sk_buff *skb,
features = vlan_features_check(skb, features); features = vlan_features_check(skb, features);
features = vxlan_features_check(skb, features); features = vxlan_features_check(skb, features);
if (mlx5e_ipsec_feature_check(skb, netdev, features))
return features;
/* Validate if the tunneled packet is being offloaded by HW */ /* Validate if the tunneled packet is being offloaded by HW */
if (skb->encapsulation && if (skb->encapsulation &&
(features & NETIF_F_CSUM_MASK || features & NETIF_F_GSO_MASK)) (features & NETIF_F_CSUM_MASK || features & NETIF_F_GSO_MASK))
......
...@@ -712,7 +712,7 @@ mlx5_eq_create_generic(struct mlx5_core_dev *dev, ...@@ -712,7 +712,7 @@ mlx5_eq_create_generic(struct mlx5_core_dev *dev,
struct mlx5_eq *eq = kvzalloc(sizeof(*eq), GFP_KERNEL); struct mlx5_eq *eq = kvzalloc(sizeof(*eq), GFP_KERNEL);
int err; int err;
if (!param->affinity) if (!cpumask_available(param->affinity))
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
if (!eq) if (!eq)
......
...@@ -2969,8 +2969,11 @@ int mlx5_init_fs(struct mlx5_core_dev *dev) ...@@ -2969,8 +2969,11 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
return err; return err;
steering = kzalloc(sizeof(*steering), GFP_KERNEL); steering = kzalloc(sizeof(*steering), GFP_KERNEL);
if (!steering) if (!steering) {
err = -ENOMEM;
goto err; goto err;
}
steering->dev = dev; steering->dev = dev;
dev->priv.steering = steering; dev->priv.steering = steering;
......
...@@ -479,7 +479,7 @@ static int irq_pools_init(struct mlx5_core_dev *dev, int sf_vec, int pf_vec) ...@@ -479,7 +479,7 @@ static int irq_pools_init(struct mlx5_core_dev *dev, int sf_vec, int pf_vec)
if (!mlx5_sf_max_functions(dev)) if (!mlx5_sf_max_functions(dev))
return 0; return 0;
if (sf_vec < MLX5_IRQ_VEC_COMP_BASE_SF) { if (sf_vec < MLX5_IRQ_VEC_COMP_BASE_SF) {
mlx5_core_err(dev, "Not enught IRQs for SFs. SF may run at lower performance\n"); mlx5_core_err(dev, "Not enough IRQs for SFs. SF may run at lower performance\n");
return 0; return 0;
} }
......
...@@ -1024,6 +1024,7 @@ struct xfrm_offload { ...@@ -1024,6 +1024,7 @@ struct xfrm_offload {
#define CRYPTO_INVALID_PROTOCOL 128 #define CRYPTO_INVALID_PROTOCOL 128
__u8 proto; __u8 proto;
__u8 inner_ipproto;
}; };
struct sec_path { struct sec_path {
......
...@@ -565,6 +565,42 @@ static int xfrm_output_gso(struct net *net, struct sock *sk, struct sk_buff *skb ...@@ -565,6 +565,42 @@ static int xfrm_output_gso(struct net *net, struct sock *sk, struct sk_buff *skb
return 0; return 0;
} }
/* For partial checksum offload, the outer header checksum is calculated
* by software and the inner header checksum is calculated by hardware.
* This requires hardware to know the inner packet type to calculate
* the inner header checksum. Save inner ip protocol here to avoid
* traversing the packet in the vendor's xmit code.
* If the encap type is IPIP, just save skb->inner_ipproto. Otherwise,
* get the ip protocol from the IP header.
*/
static void xfrm_get_inner_ipproto(struct sk_buff *skb)
{
struct xfrm_offload *xo = xfrm_offload(skb);
const struct ethhdr *eth;
if (!xo)
return;
if (skb->inner_protocol_type == ENCAP_TYPE_IPPROTO) {
xo->inner_ipproto = skb->inner_ipproto;
return;
}
if (skb->inner_protocol_type != ENCAP_TYPE_ETHER)
return;
eth = (struct ethhdr *)skb_inner_mac_header(skb);
switch (ntohs(eth->h_proto)) {
case ETH_P_IPV6:
xo->inner_ipproto = inner_ipv6_hdr(skb)->nexthdr;
break;
case ETH_P_IP:
xo->inner_ipproto = inner_ip_hdr(skb)->protocol;
break;
}
}
int xfrm_output(struct sock *sk, struct sk_buff *skb) int xfrm_output(struct sock *sk, struct sk_buff *skb)
{ {
struct net *net = dev_net(skb_dst(skb)->dev); struct net *net = dev_net(skb_dst(skb)->dev);
...@@ -594,12 +630,15 @@ int xfrm_output(struct sock *sk, struct sk_buff *skb) ...@@ -594,12 +630,15 @@ int xfrm_output(struct sock *sk, struct sk_buff *skb)
kfree_skb(skb); kfree_skb(skb);
return -ENOMEM; return -ENOMEM;
} }
skb->encapsulation = 1;
sp->olen++; sp->olen++;
sp->xvec[sp->len++] = x; sp->xvec[sp->len++] = x;
xfrm_state_hold(x); xfrm_state_hold(x);
if (skb->encapsulation)
xfrm_get_inner_ipproto(skb);
skb->encapsulation = 1;
if (skb_is_gso(skb)) { if (skb_is_gso(skb)) {
if (skb->inner_protocol) if (skb->inner_protocol)
return xfrm_output_gso(net, sk, skb); return xfrm_output_gso(net, sk, skb);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment