Commit a4e82a81 authored by Tony Nguyen's avatar Tony Nguyen Committed by Jeff Kirsher

ice: Add support for tunnel offloads

Create a boost TCAM entry for each tunnel port in order to get a tunnel
PTYPE. Update netdev feature flags and implement the appropriate logic to
get and set values for hardware offloads.
Signed-off-by: default avatarTony Nguyen <anthony.l.nguyen@intel.com>
Signed-off-by: default avatarHenry Tieman <henry.w.tieman@intel.com>
Tested-by: default avatarAndrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
parent f45a645f
...@@ -37,6 +37,10 @@ ...@@ -37,6 +37,10 @@
#include <net/devlink.h> #include <net/devlink.h>
#include <net/ipv6.h> #include <net/ipv6.h>
#include <net/xdp_sock.h> #include <net/xdp_sock.h>
#include <net/geneve.h>
#include <net/gre.h>
#include <net/udp_tunnel.h>
#include <net/vxlan.h>
#include "ice_devids.h" #include "ice_devids.h"
#include "ice_type.h" #include "ice_type.h"
#include "ice_txrx.h" #include "ice_txrx.h"
......
...@@ -746,6 +746,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw) ...@@ -746,6 +746,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
status = ice_init_hw_tbls(hw); status = ice_init_hw_tbls(hw);
if (status) if (status)
goto err_unroll_fltr_mgmt_struct; goto err_unroll_fltr_mgmt_struct;
mutex_init(&hw->tnl_lock);
return 0; return 0;
err_unroll_fltr_mgmt_struct: err_unroll_fltr_mgmt_struct:
...@@ -775,6 +776,7 @@ void ice_deinit_hw(struct ice_hw *hw) ...@@ -775,6 +776,7 @@ void ice_deinit_hw(struct ice_hw *hw)
ice_sched_clear_agg(hw); ice_sched_clear_agg(hw);
ice_free_seg(hw); ice_free_seg(hw);
ice_free_hw_tbls(hw); ice_free_hw_tbls(hw);
mutex_destroy(&hw->tnl_lock);
if (hw->port_info) { if (hw->port_info) {
devm_kfree(ice_hw_to_dev(hw), hw->port_info); devm_kfree(ice_hw_to_dev(hw), hw->port_info);
......
...@@ -18,6 +18,11 @@ ...@@ -18,6 +18,11 @@
#define ICE_PKG_CNT 4 #define ICE_PKG_CNT 4
enum ice_status
ice_create_tunnel(struct ice_hw *hw, enum ice_tunnel_type type, u16 port);
enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all);
bool ice_tunnel_port_in_use(struct ice_hw *hw, u16 port, u16 *index);
enum ice_status enum ice_status
ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
struct ice_fv_word *es); struct ice_fv_word *es);
......
...@@ -149,6 +149,7 @@ struct ice_buf_hdr { ...@@ -149,6 +149,7 @@ struct ice_buf_hdr {
#define ICE_SID_CDID_REDIR_RSS 48 #define ICE_SID_CDID_REDIR_RSS 48
#define ICE_SID_RXPARSER_BOOST_TCAM 56 #define ICE_SID_RXPARSER_BOOST_TCAM 56
#define ICE_SID_TXPARSER_BOOST_TCAM 66
#define ICE_SID_XLT0_PE 80 #define ICE_SID_XLT0_PE 80
#define ICE_SID_XLT_KEY_BUILDER_PE 81 #define ICE_SID_XLT_KEY_BUILDER_PE 81
...@@ -291,6 +292,38 @@ struct ice_pkg_enum { ...@@ -291,6 +292,38 @@ struct ice_pkg_enum {
void *(*handler)(u32 sect_type, void *section, u32 index, u32 *offset); void *(*handler)(u32 sect_type, void *section, u32 index, u32 *offset);
}; };
/* Tunnel enabling */
enum ice_tunnel_type {
TNL_VXLAN = 0,
TNL_GENEVE,
TNL_LAST = 0xFF,
TNL_ALL = 0xFF,
};
struct ice_tunnel_type_scan {
enum ice_tunnel_type type;
const char *label_prefix;
};
struct ice_tunnel_entry {
enum ice_tunnel_type type;
u16 boost_addr;
u16 port;
u16 ref;
struct ice_boost_tcam_entry *boost_entry;
u8 valid;
u8 in_use;
u8 marked;
};
#define ICE_TUNNEL_MAX_ENTRIES 16
struct ice_tunnel_table {
struct ice_tunnel_entry tbl[ICE_TUNNEL_MAX_ENTRIES];
u16 count;
};
struct ice_pkg_es { struct ice_pkg_es {
__le16 count; __le16 count;
__le16 offset; __le16 offset;
......
...@@ -42,7 +42,10 @@ struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = { ...@@ -42,7 +42,10 @@ struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, sizeof(__be16)), ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, sizeof(__be16)),
/* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */ /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, sizeof(__be16)), ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, sizeof(__be16)),
/* GRE */
/* ICE_FLOW_FIELD_IDX_GRE_KEYID */
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12,
sizeof_field(struct gre_full_hdr, key)),
}; };
/* Bitmaps indicating relevant packet types for a particular protocol header /* Bitmaps indicating relevant packet types for a particular protocol header
...@@ -134,6 +137,18 @@ static const u32 ice_ptypes_sctp_il[] = { ...@@ -134,6 +137,18 @@ static const u32 ice_ptypes_sctp_il[] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
}; };
/* Packet types for packets with an Outermost/First GRE header */
static const u32 ice_ptypes_gre_of[] = {
0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
0x0000017E, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
/* Manage parameters and info. used during the creation of a flow profile */ /* Manage parameters and info. used during the creation of a flow profile */
struct ice_flow_prof_params { struct ice_flow_prof_params {
enum ice_block blk; enum ice_block blk;
...@@ -225,6 +240,12 @@ ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params) ...@@ -225,6 +240,12 @@ ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
src = (const unsigned long *)ice_ptypes_sctp_il; src = (const unsigned long *)ice_ptypes_sctp_il;
bitmap_and(params->ptypes, params->ptypes, src, bitmap_and(params->ptypes, params->ptypes, src,
ICE_FLOW_PTYPE_MAX); ICE_FLOW_PTYPE_MAX);
} else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
if (!i) {
src = (const unsigned long *)ice_ptypes_gre_of;
bitmap_and(params->ptypes, params->ptypes,
src, ICE_FLOW_PTYPE_MAX);
}
} }
} }
...@@ -275,6 +296,9 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params, ...@@ -275,6 +296,9 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT: case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
prot_id = ICE_PROT_SCTP_IL; prot_id = ICE_PROT_SCTP_IL;
break; break;
case ICE_FLOW_FIELD_IDX_GRE_KEYID:
prot_id = ICE_PROT_GRE_OF;
break;
default: default:
return ICE_ERR_NOT_IMPL; return ICE_ERR_NOT_IMPL;
} }
...@@ -945,6 +969,7 @@ ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof) ...@@ -945,6 +969,7 @@ ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
#define ICE_FLOW_PROF_ENCAP_M (BIT_ULL(ICE_FLOW_PROF_ENCAP_S)) #define ICE_FLOW_PROF_ENCAP_M (BIT_ULL(ICE_FLOW_PROF_ENCAP_S))
#define ICE_RSS_OUTER_HEADERS 1 #define ICE_RSS_OUTER_HEADERS 1
#define ICE_RSS_INNER_HEADERS 2
/* Flow profile ID format: /* Flow profile ID format:
* [0:31] - Packet match fields * [0:31] - Packet match fields
...@@ -1085,6 +1110,9 @@ ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, ...@@ -1085,6 +1110,9 @@ ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
mutex_lock(&hw->rss_locks); mutex_lock(&hw->rss_locks);
status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs, status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
ICE_RSS_OUTER_HEADERS); ICE_RSS_OUTER_HEADERS);
if (!status)
status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds,
addl_hdrs, ICE_RSS_INNER_HEADERS);
mutex_unlock(&hw->rss_locks); mutex_unlock(&hw->rss_locks);
return status; return status;
...@@ -1238,6 +1266,12 @@ enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle) ...@@ -1238,6 +1266,12 @@ enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
ICE_RSS_OUTER_HEADERS); ICE_RSS_OUTER_HEADERS);
if (status) if (status)
break; break;
status = ice_add_rss_cfg_sync(hw, vsi_handle,
r->hashed_flds,
r->packet_hdr,
ICE_RSS_INNER_HEADERS);
if (status)
break;
} }
} }
mutex_unlock(&hw->rss_locks); mutex_unlock(&hw->rss_locks);
......
...@@ -43,6 +43,7 @@ enum ice_flow_seg_hdr { ...@@ -43,6 +43,7 @@ enum ice_flow_seg_hdr {
ICE_FLOW_SEG_HDR_TCP = 0x00000040, ICE_FLOW_SEG_HDR_TCP = 0x00000040,
ICE_FLOW_SEG_HDR_UDP = 0x00000080, ICE_FLOW_SEG_HDR_UDP = 0x00000080,
ICE_FLOW_SEG_HDR_SCTP = 0x00000100, ICE_FLOW_SEG_HDR_SCTP = 0x00000100,
ICE_FLOW_SEG_HDR_GRE = 0x00000200,
}; };
enum ice_flow_field { enum ice_flow_field {
...@@ -58,6 +59,8 @@ enum ice_flow_field { ...@@ -58,6 +59,8 @@ enum ice_flow_field {
ICE_FLOW_FIELD_IDX_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT,
ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT,
ICE_FLOW_FIELD_IDX_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT,
/* GRE */
ICE_FLOW_FIELD_IDX_GRE_KEYID,
/* The total number of enums must not exceed 64 */ /* The total number of enums must not exceed 64 */
ICE_FLOW_FIELD_IDX_MAX ICE_FLOW_FIELD_IDX_MAX
}; };
......
...@@ -262,6 +262,12 @@ enum ice_rx_flex_desc_status_error_0_bits { ...@@ -262,6 +262,12 @@ enum ice_rx_flex_desc_status_error_0_bits {
ICE_RX_FLEX_DESC_STATUS0_LAST /* this entry must be last!!! */ ICE_RX_FLEX_DESC_STATUS0_LAST /* this entry must be last!!! */
}; };
enum ice_rx_flex_desc_status_error_1_bits {
/* Note: These are predefined bit offsets */
ICE_RX_FLEX_DESC_STATUS1_NAT_S = 4,
ICE_RX_FLEX_DESC_STATUS1_LAST /* this entry must be last!!! */
};
#define ICE_RXQ_CTX_SIZE_DWORDS 8 #define ICE_RXQ_CTX_SIZE_DWORDS 8
#define ICE_RXQ_CTX_SZ (ICE_RXQ_CTX_SIZE_DWORDS * sizeof(u32)) #define ICE_RXQ_CTX_SZ (ICE_RXQ_CTX_SIZE_DWORDS * sizeof(u32))
#define ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS 22 #define ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS 22
...@@ -413,6 +419,25 @@ enum ice_tx_ctx_desc_cmd_bits { ...@@ -413,6 +419,25 @@ enum ice_tx_ctx_desc_cmd_bits {
ICE_TX_CTX_DESC_RESERVED = 0x40 ICE_TX_CTX_DESC_RESERVED = 0x40
}; };
enum ice_tx_ctx_desc_eipt_offload {
ICE_TX_CTX_EIPT_NONE = 0x0,
ICE_TX_CTX_EIPT_IPV6 = 0x1,
ICE_TX_CTX_EIPT_IPV4_NO_CSUM = 0x2,
ICE_TX_CTX_EIPT_IPV4 = 0x3
};
#define ICE_TXD_CTX_QW0_EIPLEN_S 2
#define ICE_TXD_CTX_QW0_L4TUNT_S 9
#define ICE_TXD_CTX_UDP_TUNNELING BIT_ULL(ICE_TXD_CTX_QW0_L4TUNT_S)
#define ICE_TXD_CTX_GRE_TUNNELING (0x2ULL << ICE_TXD_CTX_QW0_L4TUNT_S)
#define ICE_TXD_CTX_QW0_NATLEN_S 12
#define ICE_TXD_CTX_QW0_L4T_CS_S 23
#define ICE_TXD_CTX_QW0_L4T_CS_M BIT_ULL(ICE_TXD_CTX_QW0_L4T_CS_S)
#define ICE_LAN_TXQ_MAX_QGRPS 127 #define ICE_LAN_TXQ_MAX_QGRPS 127
#define ICE_LAN_TXQ_MAX_QDIS 1023 #define ICE_LAN_TXQ_MAX_QDIS 1023
......
...@@ -2342,13 +2342,27 @@ static void ice_set_netdev_features(struct net_device *netdev) ...@@ -2342,13 +2342,27 @@ static void ice_set_netdev_features(struct net_device *netdev)
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX; NETIF_F_HW_VLAN_CTAG_RX;
tso_features = NETIF_F_TSO | tso_features = NETIF_F_TSO |
NETIF_F_TSO_ECN |
NETIF_F_TSO6 |
NETIF_F_GSO_GRE |
NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_GRE_CSUM |
NETIF_F_GSO_UDP_TUNNEL_CSUM |
NETIF_F_GSO_PARTIAL |
NETIF_F_GSO_IPXIP4 |
NETIF_F_GSO_IPXIP6 |
NETIF_F_GSO_UDP_L4; NETIF_F_GSO_UDP_L4;
netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM |
NETIF_F_GSO_GRE_CSUM;
/* set features that user can change */ /* set features that user can change */
netdev->hw_features = dflt_features | csumo_features | netdev->hw_features = dflt_features | csumo_features |
vlano_features | tso_features; vlano_features | tso_features;
/* add support for HW_CSUM on packets with MPLS header */
netdev->mpls_features = NETIF_F_HW_CSUM;
/* enable features */ /* enable features */
netdev->features |= netdev->hw_features; netdev->features |= netdev->hw_features;
/* encap and VLAN devices inherit default, csumo and tso features */ /* encap and VLAN devices inherit default, csumo and tso features */
...@@ -5157,6 +5171,70 @@ static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue) ...@@ -5157,6 +5171,70 @@ static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
pf->tx_timeout_recovery_level++; pf->tx_timeout_recovery_level++;
} }
/**
* ice_udp_tunnel_add - Get notifications about UDP tunnel ports that come up
* @netdev: This physical port's netdev
* @ti: Tunnel endpoint information
*/
static void
ice_udp_tunnel_add(struct net_device *netdev, struct udp_tunnel_info *ti)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
enum ice_tunnel_type tnl_type;
u16 port = ntohs(ti->port);
enum ice_status status;
switch (ti->type) {
case UDP_TUNNEL_TYPE_VXLAN:
tnl_type = TNL_VXLAN;
break;
case UDP_TUNNEL_TYPE_GENEVE:
tnl_type = TNL_GENEVE;
break;
default:
netdev_err(netdev, "Unknown tunnel type\n");
return;
}
status = ice_create_tunnel(&pf->hw, tnl_type, port);
if (status == ICE_ERR_OUT_OF_RANGE)
netdev_info(netdev, "Max tunneled UDP ports reached, port %d not added\n",
port);
else if (status)
netdev_err(netdev, "Error adding UDP tunnel - %d\n",
status);
}
/**
* ice_udp_tunnel_del - Get notifications about UDP tunnel ports that go away
* @netdev: This physical port's netdev
* @ti: Tunnel endpoint information
*/
static void
ice_udp_tunnel_del(struct net_device *netdev, struct udp_tunnel_info *ti)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
u16 port = ntohs(ti->port);
enum ice_status status;
bool retval;
retval = ice_tunnel_port_in_use(&pf->hw, port, NULL);
if (!retval) {
netdev_info(netdev, "port %d not found in UDP tunnels list\n",
port);
return;
}
status = ice_destroy_tunnel(&pf->hw, port, false);
if (status)
netdev_err(netdev, "error deleting port %d from UDP tunnels list\n",
port);
}
/** /**
* ice_open - Called when a network interface becomes active * ice_open - Called when a network interface becomes active
* @netdev: network interface device structure * @netdev: network interface device structure
...@@ -5213,6 +5291,10 @@ int ice_open(struct net_device *netdev) ...@@ -5213,6 +5291,10 @@ int ice_open(struct net_device *netdev)
if (err) if (err)
netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n", netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n",
vsi->vsi_num, vsi->vsw->sw_id); vsi->vsi_num, vsi->vsw->sw_id);
/* Update existing tunnels information */
udp_tunnel_get_rx_info(netdev);
return err; return err;
} }
...@@ -5263,21 +5345,21 @@ ice_features_check(struct sk_buff *skb, ...@@ -5263,21 +5345,21 @@ ice_features_check(struct sk_buff *skb,
features &= ~NETIF_F_GSO_MASK; features &= ~NETIF_F_GSO_MASK;
len = skb_network_header(skb) - skb->data; len = skb_network_header(skb) - skb->data;
if (len & ~(ICE_TXD_MACLEN_MAX)) if (len > ICE_TXD_MACLEN_MAX || len & 0x1)
goto out_rm_features; goto out_rm_features;
len = skb_transport_header(skb) - skb_network_header(skb); len = skb_transport_header(skb) - skb_network_header(skb);
if (len & ~(ICE_TXD_IPLEN_MAX)) if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
goto out_rm_features; goto out_rm_features;
if (skb->encapsulation) { if (skb->encapsulation) {
len = skb_inner_network_header(skb) - skb_transport_header(skb); len = skb_inner_network_header(skb) - skb_transport_header(skb);
if (len & ~(ICE_TXD_L4LEN_MAX)) if (len > ICE_TXD_L4LEN_MAX || len & 0x1)
goto out_rm_features; goto out_rm_features;
len = skb_inner_transport_header(skb) - len = skb_inner_transport_header(skb) -
skb_inner_network_header(skb); skb_inner_network_header(skb);
if (len & ~(ICE_TXD_IPLEN_MAX)) if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
goto out_rm_features; goto out_rm_features;
} }
...@@ -5326,4 +5408,6 @@ static const struct net_device_ops ice_netdev_ops = { ...@@ -5326,4 +5408,6 @@ static const struct net_device_ops ice_netdev_ops = {
.ndo_bpf = ice_xdp, .ndo_bpf = ice_xdp,
.ndo_xdp_xmit = ice_xdp_xmit, .ndo_xdp_xmit = ice_xdp_xmit,
.ndo_xsk_wakeup = ice_xsk_wakeup, .ndo_xsk_wakeup = ice_xsk_wakeup,
.ndo_udp_tunnel_add = ice_udp_tunnel_add,
.ndo_udp_tunnel_del = ice_udp_tunnel_del,
}; };
...@@ -18,6 +18,7 @@ enum ice_prot_id { ...@@ -18,6 +18,7 @@ enum ice_prot_id {
ICE_PROT_IPV6_IL = 41, ICE_PROT_IPV6_IL = 41,
ICE_PROT_TCP_IL = 49, ICE_PROT_TCP_IL = 49,
ICE_PROT_UDP_IL_OR_S = 53, ICE_PROT_UDP_IL_OR_S = 53,
ICE_PROT_GRE_OF = 64,
ICE_PROT_SCTP_IL = 96, ICE_PROT_SCTP_IL = 96,
ICE_PROT_META_ID = 255, /* when offset == metadata */ ICE_PROT_META_ID = 255, /* when offset == metadata */
ICE_PROT_INVALID = 255 /* when offset == ICE_FV_OFFSET_INVAL */ ICE_PROT_INVALID = 255 /* when offset == ICE_FV_OFFSET_INVAL */
......
...@@ -1807,12 +1807,94 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off) ...@@ -1807,12 +1807,94 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
l2_len = ip.hdr - skb->data; l2_len = ip.hdr - skb->data;
offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S; offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S;
if (skb->encapsulation) protocol = vlan_get_protocol(skb);
return -1;
if (protocol == htons(ETH_P_IP))
first->tx_flags |= ICE_TX_FLAGS_IPV4;
else if (protocol == htons(ETH_P_IPV6))
first->tx_flags |= ICE_TX_FLAGS_IPV6;
if (skb->encapsulation) {
bool gso_ena = false;
u32 tunnel = 0;
/* define outer network header type */
if (first->tx_flags & ICE_TX_FLAGS_IPV4) {
tunnel |= (first->tx_flags & ICE_TX_FLAGS_TSO) ?
ICE_TX_CTX_EIPT_IPV4 :
ICE_TX_CTX_EIPT_IPV4_NO_CSUM;
l4_proto = ip.v4->protocol;
} else if (first->tx_flags & ICE_TX_FLAGS_IPV6) {
tunnel |= ICE_TX_CTX_EIPT_IPV6;
exthdr = ip.hdr + sizeof(*ip.v6);
l4_proto = ip.v6->nexthdr;
if (l4.hdr != exthdr)
ipv6_skip_exthdr(skb, exthdr - skb->data,
&l4_proto, &frag_off);
}
/* define outer transport */
switch (l4_proto) {
case IPPROTO_UDP:
tunnel |= ICE_TXD_CTX_UDP_TUNNELING;
first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
break;
case IPPROTO_GRE:
tunnel |= ICE_TXD_CTX_GRE_TUNNELING;
first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
break;
case IPPROTO_IPIP:
case IPPROTO_IPV6:
first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
l4.hdr = skb_inner_network_header(skb);
break;
default:
if (first->tx_flags & ICE_TX_FLAGS_TSO)
return -1;
skb_checksum_help(skb);
return 0;
}
/* compute outer L3 header size */
tunnel |= ((l4.hdr - ip.hdr) / 4) <<
ICE_TXD_CTX_QW0_EIPLEN_S;
/* switch IP header pointer from outer to inner header */
ip.hdr = skb_inner_network_header(skb);
/* compute tunnel header size */
tunnel |= ((ip.hdr - l4.hdr) / 2) <<
ICE_TXD_CTX_QW0_NATLEN_S;
gso_ena = skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL;
/* indicate if we need to offload outer UDP header */
if ((first->tx_flags & ICE_TX_FLAGS_TSO) && !gso_ena &&
(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
tunnel |= ICE_TXD_CTX_QW0_L4T_CS_M;
/* record tunnel offload values */
off->cd_tunnel_params |= tunnel;
/* set DTYP=1 to indicate that it's an Tx context descriptor
* in IPsec tunnel mode with Tx offloads in Quad word 1
*/
off->cd_qw1 |= (u64)ICE_TX_DESC_DTYPE_CTX;
/* switch L4 header pointer from outer to inner */
l4.hdr = skb_inner_transport_header(skb);
l4_proto = 0;
/* reset type as we transition from outer to inner headers */
first->tx_flags &= ~(ICE_TX_FLAGS_IPV4 | ICE_TX_FLAGS_IPV6);
if (ip.v4->version == 4)
first->tx_flags |= ICE_TX_FLAGS_IPV4;
if (ip.v6->version == 6)
first->tx_flags |= ICE_TX_FLAGS_IPV6;
}
/* Enable IP checksum offloads */ /* Enable IP checksum offloads */
protocol = vlan_get_protocol(skb); if (first->tx_flags & ICE_TX_FLAGS_IPV4) {
if (protocol == htons(ETH_P_IP)) {
l4_proto = ip.v4->protocol; l4_proto = ip.v4->protocol;
/* the stack computes the IP header already, the only time we /* the stack computes the IP header already, the only time we
* need the hardware to recompute it is in the case of TSO. * need the hardware to recompute it is in the case of TSO.
...@@ -1822,7 +1904,7 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off) ...@@ -1822,7 +1904,7 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
else else
cmd |= ICE_TX_DESC_CMD_IIPT_IPV4; cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
} else if (protocol == htons(ETH_P_IPV6)) { } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) {
cmd |= ICE_TX_DESC_CMD_IIPT_IPV6; cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
exthdr = ip.hdr + sizeof(*ip.v6); exthdr = ip.hdr + sizeof(*ip.v6);
l4_proto = ip.v6->nexthdr; l4_proto = ip.v6->nexthdr;
...@@ -1969,6 +2051,40 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off) ...@@ -1969,6 +2051,40 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
ip.v6->payload_len = 0; ip.v6->payload_len = 0;
} }
if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
SKB_GSO_GRE_CSUM |
SKB_GSO_IPXIP4 |
SKB_GSO_IPXIP6 |
SKB_GSO_UDP_TUNNEL |
SKB_GSO_UDP_TUNNEL_CSUM)) {
if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
l4.udp->len = 0;
/* determine offset of outer transport header */
l4_start = l4.hdr - skb->data;
/* remove payload length from outer checksum */
paylen = skb->len - l4_start;
csum_replace_by_diff(&l4.udp->check,
(__force __wsum)htonl(paylen));
}
/* reset pointers to inner headers */
/* cppcheck-suppress unreadVariable */
ip.hdr = skb_inner_network_header(skb);
l4.hdr = skb_inner_transport_header(skb);
/* initialize inner IP header fields */
if (ip.v4->version == 4) {
ip.v4->tot_len = 0;
ip.v4->check = 0;
} else {
ip.v6->payload_len = 0;
}
}
/* determine offset of transport header */ /* determine offset of transport header */
l4_start = l4.hdr - skb->data; l4_start = l4.hdr - skb->data;
......
...@@ -113,6 +113,9 @@ static inline int ice_skb_pad(void) ...@@ -113,6 +113,9 @@ static inline int ice_skb_pad(void)
#define ICE_TX_FLAGS_TSO BIT(0) #define ICE_TX_FLAGS_TSO BIT(0)
#define ICE_TX_FLAGS_HW_VLAN BIT(1) #define ICE_TX_FLAGS_HW_VLAN BIT(1)
#define ICE_TX_FLAGS_SW_VLAN BIT(2) #define ICE_TX_FLAGS_SW_VLAN BIT(2)
#define ICE_TX_FLAGS_IPV4 BIT(5)
#define ICE_TX_FLAGS_IPV6 BIT(6)
#define ICE_TX_FLAGS_TUNNEL BIT(7)
#define ICE_TX_FLAGS_VLAN_M 0xffff0000 #define ICE_TX_FLAGS_VLAN_M 0xffff0000
#define ICE_TX_FLAGS_VLAN_PR_M 0xe0000000 #define ICE_TX_FLAGS_VLAN_PR_M 0xe0000000
#define ICE_TX_FLAGS_VLAN_PR_S 29 #define ICE_TX_FLAGS_VLAN_PR_S 29
......
...@@ -84,12 +84,17 @@ ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb, ...@@ -84,12 +84,17 @@ ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb,
union ice_32b_rx_flex_desc *rx_desc, u8 ptype) union ice_32b_rx_flex_desc *rx_desc, u8 ptype)
{ {
struct ice_rx_ptype_decoded decoded; struct ice_rx_ptype_decoded decoded;
u32 rx_error, rx_status; u16 rx_error, rx_status;
u16 rx_stat_err1;
bool ipv4, ipv6; bool ipv4, ipv6;
rx_status = le16_to_cpu(rx_desc->wb.status_error0); rx_status = le16_to_cpu(rx_desc->wb.status_error0);
rx_error = rx_status; rx_error = rx_status & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) |
BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) |
BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) |
BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S));
rx_stat_err1 = le16_to_cpu(rx_desc->wb.status_error1);
decoded = ice_decode_rx_desc_ptype(ptype); decoded = ice_decode_rx_desc_ptype(ptype);
/* Start with CHECKSUM_NONE and by default csum_level = 0 */ /* Start with CHECKSUM_NONE and by default csum_level = 0 */
...@@ -125,6 +130,18 @@ ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb, ...@@ -125,6 +130,18 @@ ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb,
if (rx_error & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S)) if (rx_error & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S))
goto checksum_fail; goto checksum_fail;
/* check for outer UDP checksum error in tunneled packets */
if ((rx_stat_err1 & BIT(ICE_RX_FLEX_DESC_STATUS1_NAT_S)) &&
(rx_error & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S)))
goto checksum_fail;
/* If there is an outer header present that might contain a checksum
* we need to bump the checksum level by 1 to reflect the fact that
* we are indicating we validated the inner checksum.
*/
if (decoded.tunnel_type >= ICE_RX_PTYPE_TUNNEL_IP_GRENAT)
skb->csum_level = 1;
/* Only report checksum unnecessary for TCP, UDP, or SCTP */ /* Only report checksum unnecessary for TCP, UDP, or SCTP */
switch (decoded.inner_prot) { switch (decoded.inner_prot) {
case ICE_RX_PTYPE_INNER_PROT_TCP: case ICE_RX_PTYPE_INNER_PROT_TCP:
......
...@@ -579,6 +579,10 @@ struct ice_hw { ...@@ -579,6 +579,10 @@ struct ice_hw {
u8 *pkg_copy; u8 *pkg_copy;
u32 pkg_size; u32 pkg_size;
/* tunneling info */
struct mutex tnl_lock;
struct ice_tunnel_table tnl;
/* HW block tables */ /* HW block tables */
struct ice_blk_info blk[ICE_BLK_COUNT]; struct ice_blk_info blk[ICE_BLK_COUNT];
struct mutex fl_profs_locks[ICE_BLK_COUNT]; /* lock fltr profiles */ struct mutex fl_profs_locks[ICE_BLK_COUNT]; /* lock fltr profiles */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment