Commit 97aeb877 authored by David S. Miller's avatar David S. Miller

Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue

Tony Nguyen says:

====================
ice: GTP support in switchdev

Marcin Szycik says:

Add support for adding GTP-C and GTP-U filters in switchdev mode.

To create a filter for GTP, create a GTP-type netdev with ip tool, enable
hardware offload, add qdisc and add a filter in tc:

ip link add $GTP0 type gtp role <sgsn/ggsn> hsize <hsize>
ethtool -K $PF0 hw-tc-offload on
tc qdisc add dev $GTP0 ingress
tc filter add dev $GTP0 ingress prio 1 flower enc_key_id 1337 \
action mirred egress redirect dev $VF1_PR

By default, a filter for GTP-U will be added. To add a filter for GTP-C,
specify enc_dst_port = 2123, e.g.:

tc filter add dev $GTP0 ingress prio 1 flower enc_key_id 1337 \
enc_dst_port 2123 action mirred egress redirect dev $VF1_PR

Note: outer IPv6 offload is not supported yet.
Note: GTP-U with no payload offload is not supported yet.

ICE COMMS package is required to create a filter as it contains GTP
profiles.

Changes in iproute2 [1] are required to be able to add GTP netdev and use
GTP-specific options (QFI and PDU type).

[1] https://lore.kernel.org/netdev/20220211182902.11542-1-wojciech.drewek@intel.com/T
---
v2: Add more CC
v3: Fix mail thread, sorry for spam
v4: Add GTP echo response in gtp module
v5: Change patch order
v6: Add GTP echo request in gtp module
v7: Fix kernel-docs in ice
v8: Remove handling of GTP Echo Response
v9: Add sending of multicast message on GTP Echo Response, fix GTP-C dummy
    packet selection
v10: Rebase, fixed most 80 char line limits
v11: Rebase, collect Harald's Reviewed-by on patch 3
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 4d17d43d 9a225f81
...@@ -51,6 +51,7 @@ ...@@ -51,6 +51,7 @@
#include <net/gre.h> #include <net/gre.h>
#include <net/udp_tunnel.h> #include <net/udp_tunnel.h>
#include <net/vxlan.h> #include <net/vxlan.h>
#include <net/gtp.h>
#if IS_ENABLED(CONFIG_DCB) #if IS_ENABLED(CONFIG_DCB)
#include <scsi/iscsi_proto.h> #include <scsi/iscsi_proto.h>
#endif /* CONFIG_DCB */ #endif /* CONFIG_DCB */
......
...@@ -1804,16 +1804,43 @@ static struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw) ...@@ -1804,16 +1804,43 @@ static struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw)
return bld; return bld;
} }
static bool ice_is_gtp_u_profile(u16 prof_idx)
{
return (prof_idx >= ICE_PROFID_IPV6_GTPU_TEID &&
prof_idx <= ICE_PROFID_IPV6_GTPU_IPV6_TCP_INNER) ||
prof_idx == ICE_PROFID_IPV4_GTPU_TEID;
}
static bool ice_is_gtp_c_profile(u16 prof_idx)
{
switch (prof_idx) {
case ICE_PROFID_IPV4_GTPC_TEID:
case ICE_PROFID_IPV4_GTPC_NO_TEID:
case ICE_PROFID_IPV6_GTPC_TEID:
case ICE_PROFID_IPV6_GTPC_NO_TEID:
return true;
default:
return false;
}
}
/** /**
* ice_get_sw_prof_type - determine switch profile type * ice_get_sw_prof_type - determine switch profile type
* @hw: pointer to the HW structure * @hw: pointer to the HW structure
* @fv: pointer to the switch field vector * @fv: pointer to the switch field vector
* @prof_idx: profile index to check
*/ */
static enum ice_prof_type static enum ice_prof_type
ice_get_sw_prof_type(struct ice_hw *hw, struct ice_fv *fv) ice_get_sw_prof_type(struct ice_hw *hw, struct ice_fv *fv, u32 prof_idx)
{ {
u16 i; u16 i;
if (ice_is_gtp_c_profile(prof_idx))
return ICE_PROF_TUN_GTPC;
if (ice_is_gtp_u_profile(prof_idx))
return ICE_PROF_TUN_GTPU;
for (i = 0; i < hw->blk[ICE_BLK_SW].es.fvw; i++) { for (i = 0; i < hw->blk[ICE_BLK_SW].es.fvw; i++) {
/* UDP tunnel will have UDP_OF protocol ID and VNI offset */ /* UDP tunnel will have UDP_OF protocol ID and VNI offset */
if (fv->ew[i].prot_id == (u8)ICE_PROT_UDP_OF && if (fv->ew[i].prot_id == (u8)ICE_PROT_UDP_OF &&
...@@ -1860,7 +1887,7 @@ ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs, ...@@ -1860,7 +1887,7 @@ ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs,
if (fv) { if (fv) {
/* Determine field vector type */ /* Determine field vector type */
prof_type = ice_get_sw_prof_type(hw, fv); prof_type = ice_get_sw_prof_type(hw, fv, offset);
if (req_profs & prof_type) if (req_profs & prof_type)
set_bit((u16)offset, bm); set_bit((u16)offset, bm);
...@@ -1871,20 +1898,19 @@ ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs, ...@@ -1871,20 +1898,19 @@ ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs,
/** /**
* ice_get_sw_fv_list * ice_get_sw_fv_list
* @hw: pointer to the HW structure * @hw: pointer to the HW structure
* @prot_ids: field vector to search for with a given protocol ID * @lkups: list of protocol types
* @ids_cnt: lookup/protocol count
* @bm: bitmap of field vectors to consider * @bm: bitmap of field vectors to consider
* @fv_list: Head of a list * @fv_list: Head of a list
* *
* Finds all the field vector entries from switch block that contain * Finds all the field vector entries from switch block that contain
* a given protocol ID and returns a list of structures of type * a given protocol ID and offset and returns a list of structures of type
* "ice_sw_fv_list_entry". Every structure in the list has a field vector * "ice_sw_fv_list_entry". Every structure in the list has a field vector
* definition and profile ID information * definition and profile ID information
* NOTE: The caller of the function is responsible for freeing the memory * NOTE: The caller of the function is responsible for freeing the memory
* allocated for every list entry. * allocated for every list entry.
*/ */
int int
ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt, ice_get_sw_fv_list(struct ice_hw *hw, struct ice_prot_lkup_ext *lkups,
unsigned long *bm, struct list_head *fv_list) unsigned long *bm, struct list_head *fv_list)
{ {
struct ice_sw_fv_list_entry *fvl; struct ice_sw_fv_list_entry *fvl;
...@@ -1896,7 +1922,7 @@ ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt, ...@@ -1896,7 +1922,7 @@ ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
memset(&state, 0, sizeof(state)); memset(&state, 0, sizeof(state));
if (!ids_cnt || !hw->seg) if (!lkups->n_val_words || !hw->seg)
return -EINVAL; return -EINVAL;
ice_seg = hw->seg; ice_seg = hw->seg;
...@@ -1915,20 +1941,17 @@ ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt, ...@@ -1915,20 +1941,17 @@ ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
if (!test_bit((u16)offset, bm)) if (!test_bit((u16)offset, bm))
continue; continue;
for (i = 0; i < ids_cnt; i++) { for (i = 0; i < lkups->n_val_words; i++) {
int j; int j;
/* This code assumes that if a switch field vector line
* has a matching protocol, then this line will contain
* the entries necessary to represent every field in
* that protocol header.
*/
for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++) for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
if (fv->ew[j].prot_id == prot_ids[i]) if (fv->ew[j].prot_id ==
lkups->fv_words[i].prot_id &&
fv->ew[j].off == lkups->fv_words[i].off)
break; break;
if (j >= hw->blk[ICE_BLK_SW].es.fvw) if (j >= hw->blk[ICE_BLK_SW].es.fvw)
break; break;
if (i + 1 == ids_cnt) { if (i + 1 == lkups->n_val_words) {
fvl = devm_kzalloc(ice_hw_to_dev(hw), fvl = devm_kzalloc(ice_hw_to_dev(hw),
sizeof(*fvl), GFP_KERNEL); sizeof(*fvl), GFP_KERNEL);
if (!fvl) if (!fvl)
......
...@@ -87,7 +87,7 @@ ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type type, ...@@ -87,7 +87,7 @@ ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type type,
void void
ice_init_prof_result_bm(struct ice_hw *hw); ice_init_prof_result_bm(struct ice_hw *hw);
int int
ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt, ice_get_sw_fv_list(struct ice_hw *hw, struct ice_prot_lkup_ext *lkups,
unsigned long *bm, struct list_head *fv_list); unsigned long *bm, struct list_head *fv_list);
int int
ice_pkg_buf_unreserve_section(struct ice_buf_build *bld, u16 count); ice_pkg_buf_unreserve_section(struct ice_buf_build *bld, u16 count);
......
...@@ -417,6 +417,8 @@ enum ice_tunnel_type { ...@@ -417,6 +417,8 @@ enum ice_tunnel_type {
TNL_VXLAN = 0, TNL_VXLAN = 0,
TNL_GENEVE, TNL_GENEVE,
TNL_GRETAP, TNL_GRETAP,
TNL_GTPC,
TNL_GTPU,
__TNL_TYPE_CNT, __TNL_TYPE_CNT,
TNL_LAST = 0xFF, TNL_LAST = 0xFF,
TNL_ALL = 0xFF, TNL_ALL = 0xFF,
...@@ -673,7 +675,9 @@ enum ice_prof_type { ...@@ -673,7 +675,9 @@ enum ice_prof_type {
ICE_PROF_NON_TUN = 0x1, ICE_PROF_NON_TUN = 0x1,
ICE_PROF_TUN_UDP = 0x2, ICE_PROF_TUN_UDP = 0x2,
ICE_PROF_TUN_GRE = 0x4, ICE_PROF_TUN_GRE = 0x4,
ICE_PROF_TUN_ALL = 0x6, ICE_PROF_TUN_GTPU = 0x8,
ICE_PROF_TUN_GTPC = 0x10,
ICE_PROF_TUN_ALL = 0x1E,
ICE_PROF_ALL = 0xFF, ICE_PROF_ALL = 0xFF,
}; };
......
...@@ -41,6 +41,8 @@ enum ice_protocol_type { ...@@ -41,6 +41,8 @@ enum ice_protocol_type {
ICE_VXLAN, ICE_VXLAN,
ICE_GENEVE, ICE_GENEVE,
ICE_NVGRE, ICE_NVGRE,
ICE_GTP,
ICE_GTP_NO_PAY,
ICE_VXLAN_GPE, ICE_VXLAN_GPE,
ICE_SCTP_IL, ICE_SCTP_IL,
ICE_PROTOCOL_LAST ICE_PROTOCOL_LAST
...@@ -52,6 +54,8 @@ enum ice_sw_tunnel_type { ...@@ -52,6 +54,8 @@ enum ice_sw_tunnel_type {
ICE_SW_TUN_VXLAN, ICE_SW_TUN_VXLAN,
ICE_SW_TUN_GENEVE, ICE_SW_TUN_GENEVE,
ICE_SW_TUN_NVGRE, ICE_SW_TUN_NVGRE,
ICE_SW_TUN_GTPU,
ICE_SW_TUN_GTPC,
ICE_ALL_TUNNELS /* All tunnel types including NVGRE */ ICE_ALL_TUNNELS /* All tunnel types including NVGRE */
}; };
...@@ -182,6 +186,20 @@ struct ice_udp_tnl_hdr { ...@@ -182,6 +186,20 @@ struct ice_udp_tnl_hdr {
__be32 vni; /* only use lower 24-bits */ __be32 vni; /* only use lower 24-bits */
}; };
struct ice_udp_gtp_hdr {
u8 flags;
u8 msg_type;
__be16 rsrvd_len;
__be32 teid;
__be16 rsrvd_seq_nbr;
u8 rsrvd_n_pdu_nbr;
u8 rsrvd_next_ext;
u8 rsvrd_ext_len;
u8 pdu_type;
u8 qfi;
u8 rsvrd;
};
struct ice_nvgre_hdr { struct ice_nvgre_hdr {
__be16 flags; __be16 flags;
__be16 protocol; __be16 protocol;
...@@ -198,6 +216,7 @@ union ice_prot_hdr { ...@@ -198,6 +216,7 @@ union ice_prot_hdr {
struct ice_sctp_hdr sctp_hdr; struct ice_sctp_hdr sctp_hdr;
struct ice_udp_tnl_hdr tnl_hdr; struct ice_udp_tnl_hdr tnl_hdr;
struct ice_nvgre_hdr nvgre_hdr; struct ice_nvgre_hdr nvgre_hdr;
struct ice_udp_gtp_hdr gtp_hdr;
}; };
/* This is mapping table entry that maps every word within a given protocol /* This is mapping table entry that maps every word within a given protocol
......
This diff is collapsed.
...@@ -14,6 +14,15 @@ ...@@ -14,6 +14,15 @@
#define ICE_VSI_INVAL_ID 0xffff #define ICE_VSI_INVAL_ID 0xffff
#define ICE_INVAL_Q_HANDLE 0xFFFF #define ICE_INVAL_Q_HANDLE 0xFFFF
/* Switch Profile IDs for Profile related switch rules */
#define ICE_PROFID_IPV4_GTPC_TEID 41
#define ICE_PROFID_IPV4_GTPC_NO_TEID 42
#define ICE_PROFID_IPV4_GTPU_TEID 43
#define ICE_PROFID_IPV6_GTPC_TEID 44
#define ICE_PROFID_IPV6_GTPC_NO_TEID 45
#define ICE_PROFID_IPV6_GTPU_TEID 46
#define ICE_PROFID_IPV6_GTPU_IPV6_TCP_INNER 70
#define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \ #define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \
(offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr)) (offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr))
......
...@@ -27,6 +27,9 @@ ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers, ...@@ -27,6 +27,9 @@ ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers,
if (flags & ICE_TC_FLWR_FIELD_ENC_DST_MAC) if (flags & ICE_TC_FLWR_FIELD_ENC_DST_MAC)
lkups_cnt++; lkups_cnt++;
if (flags & ICE_TC_FLWR_FIELD_ENC_OPTS)
lkups_cnt++;
if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 | if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 |
ICE_TC_FLWR_FIELD_ENC_DEST_IPV4 | ICE_TC_FLWR_FIELD_ENC_DEST_IPV4 |
ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 | ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 |
...@@ -102,6 +105,11 @@ ice_proto_type_from_tunnel(enum ice_tunnel_type type) ...@@ -102,6 +105,11 @@ ice_proto_type_from_tunnel(enum ice_tunnel_type type)
return ICE_GENEVE; return ICE_GENEVE;
case TNL_GRETAP: case TNL_GRETAP:
return ICE_NVGRE; return ICE_NVGRE;
case TNL_GTPU:
/* NO_PAY profiles will not work with GTP-U */
return ICE_GTP;
case TNL_GTPC:
return ICE_GTP_NO_PAY;
default: default:
return 0; return 0;
} }
...@@ -117,6 +125,10 @@ ice_sw_type_from_tunnel(enum ice_tunnel_type type) ...@@ -117,6 +125,10 @@ ice_sw_type_from_tunnel(enum ice_tunnel_type type)
return ICE_SW_TUN_GENEVE; return ICE_SW_TUN_GENEVE;
case TNL_GRETAP: case TNL_GRETAP:
return ICE_SW_TUN_NVGRE; return ICE_SW_TUN_NVGRE;
case TNL_GTPU:
return ICE_SW_TUN_GTPU;
case TNL_GTPC:
return ICE_SW_TUN_GTPC;
default: default:
return ICE_NON_TUN; return ICE_NON_TUN;
} }
...@@ -143,7 +155,15 @@ ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr, ...@@ -143,7 +155,15 @@ ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr,
break; break;
case TNL_GRETAP: case TNL_GRETAP:
list[i].h_u.nvgre_hdr.tni_flow = fltr->tenant_id; list[i].h_u.nvgre_hdr.tni_flow = fltr->tenant_id;
memcpy(&list[i].m_u.nvgre_hdr.tni_flow, "\xff\xff\xff\xff", 4); memcpy(&list[i].m_u.nvgre_hdr.tni_flow,
"\xff\xff\xff\xff", 4);
i++;
break;
case TNL_GTPC:
case TNL_GTPU:
list[i].h_u.gtp_hdr.teid = fltr->tenant_id;
memcpy(&list[i].m_u.gtp_hdr.teid,
"\xff\xff\xff\xff", 4);
i++; i++;
break; break;
default: default:
...@@ -160,6 +180,24 @@ ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr, ...@@ -160,6 +180,24 @@ ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr,
i++; i++;
} }
if (flags & ICE_TC_FLWR_FIELD_ENC_OPTS &&
(fltr->tunnel_type == TNL_GTPU || fltr->tunnel_type == TNL_GTPC)) {
list[i].type = ice_proto_type_from_tunnel(fltr->tunnel_type);
if (fltr->gtp_pdu_info_masks.pdu_type) {
list[i].h_u.gtp_hdr.pdu_type =
fltr->gtp_pdu_info_keys.pdu_type << 4;
memcpy(&list[i].m_u.gtp_hdr.pdu_type, "\xf0", 1);
}
if (fltr->gtp_pdu_info_masks.qfi) {
list[i].h_u.gtp_hdr.qfi = fltr->gtp_pdu_info_keys.qfi;
memcpy(&list[i].m_u.gtp_hdr.qfi, "\x3f", 1);
}
i++;
}
if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 | if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 |
ICE_TC_FLWR_FIELD_ENC_DEST_IPV4)) { ICE_TC_FLWR_FIELD_ENC_DEST_IPV4)) {
list[i].type = ice_proto_type_from_ipv4(false); list[i].type = ice_proto_type_from_ipv4(false);
...@@ -361,6 +399,12 @@ static int ice_tc_tun_get_type(struct net_device *tunnel_dev) ...@@ -361,6 +399,12 @@ static int ice_tc_tun_get_type(struct net_device *tunnel_dev)
if (netif_is_gretap(tunnel_dev) || if (netif_is_gretap(tunnel_dev) ||
netif_is_ip6gretap(tunnel_dev)) netif_is_ip6gretap(tunnel_dev))
return TNL_GRETAP; return TNL_GRETAP;
/* Assume GTP-U by default in case of GTP netdev.
* GTP-C may be selected later, based on enc_dst_port.
*/
if (netif_is_gtp(tunnel_dev))
return TNL_GTPU;
return TNL_LAST; return TNL_LAST;
} }
...@@ -760,6 +804,40 @@ ice_get_tunnel_device(struct net_device *dev, struct flow_rule *rule) ...@@ -760,6 +804,40 @@ ice_get_tunnel_device(struct net_device *dev, struct flow_rule *rule)
return NULL; return NULL;
} }
/**
* ice_parse_gtp_type - Sets GTP tunnel type to GTP-U or GTP-C
* @match: Flow match structure
* @fltr: Pointer to filter structure
*
* GTP-C/GTP-U is selected based on destination port number (enc_dst_port).
* Before calling this funtcion, fltr->tunnel_type should be set to TNL_GTPU,
* therefore making GTP-U the default choice (when destination port number is
* not specified).
*/
static int
ice_parse_gtp_type(struct flow_match_ports match,
struct ice_tc_flower_fltr *fltr)
{
u16 dst_port;
if (match.key->dst) {
dst_port = be16_to_cpu(match.key->dst);
switch (dst_port) {
case 2152:
break;
case 2123:
fltr->tunnel_type = TNL_GTPC;
break;
default:
NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported GTP port number");
return -EINVAL;
}
}
return 0;
}
static int static int
ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule, ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule,
struct ice_tc_flower_fltr *fltr) struct ice_tc_flower_fltr *fltr)
...@@ -815,8 +893,28 @@ ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule, ...@@ -815,8 +893,28 @@ ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule,
struct flow_match_ports match; struct flow_match_ports match;
flow_rule_match_enc_ports(rule, &match); flow_rule_match_enc_ports(rule, &match);
if (fltr->tunnel_type != TNL_GTPU) {
if (ice_tc_set_port(match, fltr, headers, true)) if (ice_tc_set_port(match, fltr, headers, true))
return -EINVAL; return -EINVAL;
} else {
if (ice_parse_gtp_type(match, fltr))
return -EINVAL;
}
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS)) {
struct flow_match_enc_opts match;
flow_rule_match_enc_opts(rule, &match);
memcpy(&fltr->gtp_pdu_info_keys, &match.key->data[0],
sizeof(struct gtp_pdu_session_info));
memcpy(&fltr->gtp_pdu_info_masks, &match.mask->data[0],
sizeof(struct gtp_pdu_session_info));
fltr->flags |= ICE_TC_FLWR_FIELD_ENC_OPTS;
} }
return 0; return 0;
...@@ -854,6 +952,7 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi, ...@@ -854,6 +952,7 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) |
BIT(FLOW_DISSECTOR_KEY_ENC_IP) | BIT(FLOW_DISSECTOR_KEY_ENC_IP) |
BIT(FLOW_DISSECTOR_KEY_PORTS))) { BIT(FLOW_DISSECTOR_KEY_PORTS))) {
NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported key used"); NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported key used");
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#define ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT BIT(15) #define ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT BIT(15)
#define ICE_TC_FLWR_FIELD_ENC_DST_MAC BIT(16) #define ICE_TC_FLWR_FIELD_ENC_DST_MAC BIT(16)
#define ICE_TC_FLWR_FIELD_ETH_TYPE_ID BIT(17) #define ICE_TC_FLWR_FIELD_ETH_TYPE_ID BIT(17)
#define ICE_TC_FLWR_FIELD_ENC_OPTS BIT(18)
#define ICE_TC_FLOWER_MASK_32 0xFFFFFFFF #define ICE_TC_FLOWER_MASK_32 0xFFFFFFFF
...@@ -119,6 +120,8 @@ struct ice_tc_flower_fltr { ...@@ -119,6 +120,8 @@ struct ice_tc_flower_fltr {
struct ice_tc_flower_lyr_2_4_hdrs inner_headers; struct ice_tc_flower_lyr_2_4_hdrs inner_headers;
struct ice_vsi *src_vsi; struct ice_vsi *src_vsi;
__be32 tenant_id; __be32 tenant_id;
struct gtp_pdu_session_info gtp_pdu_info_keys;
struct gtp_pdu_session_info gtp_pdu_info_masks;
u32 flags; u32 flags;
u8 tunnel_type; u8 tunnel_type;
struct ice_tc_flower_action action; struct ice_tc_flower_action action;
......
This diff is collapsed.
...@@ -7,8 +7,13 @@ ...@@ -7,8 +7,13 @@
#define GTP0_PORT 3386 #define GTP0_PORT 3386
#define GTP1U_PORT 2152 #define GTP1U_PORT 2152
/* GTP messages types */
#define GTP_ECHO_REQ 1 /* Echo Request */
#define GTP_ECHO_RSP 2 /* Echo Response */
#define GTP_TPDU 255 #define GTP_TPDU 255
#define GTPIE_RECOVERY 14
struct gtp0_header { /* According to GSM TS 09.60. */ struct gtp0_header { /* According to GSM TS 09.60. */
__u8 flags; __u8 flags;
__u8 type; __u8 type;
...@@ -27,6 +32,43 @@ struct gtp1_header { /* According to 3GPP TS 29.060. */ ...@@ -27,6 +32,43 @@ struct gtp1_header { /* According to 3GPP TS 29.060. */
__be32 tid; __be32 tid;
} __attribute__ ((packed)); } __attribute__ ((packed));
struct gtp1_header_long { /* According to 3GPP TS 29.060. */
__u8 flags;
__u8 type;
__be16 length;
__be32 tid;
__be16 seq;
__u8 npdu;
__u8 next;
} __packed;
/* GTP Information Element */
struct gtp_ie {
__u8 tag;
__u8 val;
} __packed;
struct gtp0_packet {
struct gtp0_header gtp0_h;
struct gtp_ie ie;
} __packed;
struct gtp1u_packet {
struct gtp1_header_long gtp1u_h;
struct gtp_ie ie;
} __packed;
struct gtp_pdu_session_info { /* According to 3GPP TS 38.415. */
u8 pdu_type;
u8 qfi;
};
static inline bool netif_is_gtp(const struct net_device *dev)
{
return dev->rtnl_link_ops &&
!strcmp(dev->rtnl_link_ops->kind, "gtp");
}
#define GTP1_F_NPDU 0x01 #define GTP1_F_NPDU 0x01
#define GTP1_F_SEQ 0x02 #define GTP1_F_SEQ 0x02
#define GTP1_F_EXTHDR 0x04 #define GTP1_F_EXTHDR 0x04
......
...@@ -8,6 +8,7 @@ enum gtp_genl_cmds { ...@@ -8,6 +8,7 @@ enum gtp_genl_cmds {
GTP_CMD_NEWPDP, GTP_CMD_NEWPDP,
GTP_CMD_DELPDP, GTP_CMD_DELPDP,
GTP_CMD_GETPDP, GTP_CMD_GETPDP,
GTP_CMD_ECHOREQ,
GTP_CMD_MAX, GTP_CMD_MAX,
}; };
......
...@@ -887,6 +887,8 @@ enum { ...@@ -887,6 +887,8 @@ enum {
IFLA_GTP_FD1, IFLA_GTP_FD1,
IFLA_GTP_PDP_HASHSIZE, IFLA_GTP_PDP_HASHSIZE,
IFLA_GTP_ROLE, IFLA_GTP_ROLE,
IFLA_GTP_CREATE_SOCKETS,
IFLA_GTP_RESTART_COUNT,
__IFLA_GTP_MAX, __IFLA_GTP_MAX,
}; };
#define IFLA_GTP_MAX (__IFLA_GTP_MAX - 1) #define IFLA_GTP_MAX (__IFLA_GTP_MAX - 1)
......
...@@ -176,8 +176,10 @@ enum { ...@@ -176,8 +176,10 @@ enum {
#define TUNNEL_VXLAN_OPT __cpu_to_be16(0x1000) #define TUNNEL_VXLAN_OPT __cpu_to_be16(0x1000)
#define TUNNEL_NOCACHE __cpu_to_be16(0x2000) #define TUNNEL_NOCACHE __cpu_to_be16(0x2000)
#define TUNNEL_ERSPAN_OPT __cpu_to_be16(0x4000) #define TUNNEL_ERSPAN_OPT __cpu_to_be16(0x4000)
#define TUNNEL_GTP_OPT __cpu_to_be16(0x8000)
#define TUNNEL_OPTIONS_PRESENT \ #define TUNNEL_OPTIONS_PRESENT \
(TUNNEL_GENEVE_OPT | TUNNEL_VXLAN_OPT | TUNNEL_ERSPAN_OPT) (TUNNEL_GENEVE_OPT | TUNNEL_VXLAN_OPT | TUNNEL_ERSPAN_OPT | \
TUNNEL_GTP_OPT)
#endif /* _UAPI_IF_TUNNEL_H_ */ #endif /* _UAPI_IF_TUNNEL_H_ */
...@@ -616,6 +616,10 @@ enum { ...@@ -616,6 +616,10 @@ enum {
* TCA_FLOWER_KEY_ENC_OPT_ERSPAN_ * TCA_FLOWER_KEY_ENC_OPT_ERSPAN_
* attributes * attributes
*/ */
TCA_FLOWER_KEY_ENC_OPTS_GTP, /* Nested
* TCA_FLOWER_KEY_ENC_OPT_GTP_
* attributes
*/
__TCA_FLOWER_KEY_ENC_OPTS_MAX, __TCA_FLOWER_KEY_ENC_OPTS_MAX,
}; };
...@@ -654,6 +658,17 @@ enum { ...@@ -654,6 +658,17 @@ enum {
#define TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX \ #define TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX \
(__TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX - 1) (__TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX - 1)
enum {
TCA_FLOWER_KEY_ENC_OPT_GTP_UNSPEC,
TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE, /* u8 */
TCA_FLOWER_KEY_ENC_OPT_GTP_QFI, /* u8 */
__TCA_FLOWER_KEY_ENC_OPT_GTP_MAX,
};
#define TCA_FLOWER_KEY_ENC_OPT_GTP_MAX \
(__TCA_FLOWER_KEY_ENC_OPT_GTP_MAX - 1)
enum { enum {
TCA_FLOWER_KEY_MPLS_OPTS_UNSPEC, TCA_FLOWER_KEY_MPLS_OPTS_UNSPEC,
TCA_FLOWER_KEY_MPLS_OPTS_LSE, TCA_FLOWER_KEY_MPLS_OPTS_LSE,
......
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#include <net/geneve.h> #include <net/geneve.h>
#include <net/vxlan.h> #include <net/vxlan.h>
#include <net/erspan.h> #include <net/erspan.h>
#include <net/gtp.h>
#include <net/dst.h> #include <net/dst.h>
#include <net/dst_metadata.h> #include <net/dst_metadata.h>
...@@ -723,6 +724,7 @@ enc_opts_policy[TCA_FLOWER_KEY_ENC_OPTS_MAX + 1] = { ...@@ -723,6 +724,7 @@ enc_opts_policy[TCA_FLOWER_KEY_ENC_OPTS_MAX + 1] = {
[TCA_FLOWER_KEY_ENC_OPTS_GENEVE] = { .type = NLA_NESTED }, [TCA_FLOWER_KEY_ENC_OPTS_GENEVE] = { .type = NLA_NESTED },
[TCA_FLOWER_KEY_ENC_OPTS_VXLAN] = { .type = NLA_NESTED }, [TCA_FLOWER_KEY_ENC_OPTS_VXLAN] = { .type = NLA_NESTED },
[TCA_FLOWER_KEY_ENC_OPTS_ERSPAN] = { .type = NLA_NESTED }, [TCA_FLOWER_KEY_ENC_OPTS_ERSPAN] = { .type = NLA_NESTED },
[TCA_FLOWER_KEY_ENC_OPTS_GTP] = { .type = NLA_NESTED },
}; };
static const struct nla_policy static const struct nla_policy
...@@ -746,6 +748,12 @@ erspan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1] = { ...@@ -746,6 +748,12 @@ erspan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1] = {
[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID] = { .type = NLA_U8 }, [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID] = { .type = NLA_U8 },
}; };
static const struct nla_policy
gtp_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GTP_MAX + 1] = {
[TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE] = { .type = NLA_U8 },
[TCA_FLOWER_KEY_ENC_OPT_GTP_QFI] = { .type = NLA_U8 },
};
static const struct nla_policy static const struct nla_policy
mpls_stack_entry_policy[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1] = { mpls_stack_entry_policy[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1] = {
[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH] = { .type = NLA_U8 }, [TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH] = { .type = NLA_U8 },
...@@ -1262,6 +1270,49 @@ static int fl_set_erspan_opt(const struct nlattr *nla, struct fl_flow_key *key, ...@@ -1262,6 +1270,49 @@ static int fl_set_erspan_opt(const struct nlattr *nla, struct fl_flow_key *key,
return sizeof(*md); return sizeof(*md);
} }
static int fl_set_gtp_opt(const struct nlattr *nla, struct fl_flow_key *key,
int depth, int option_len,
struct netlink_ext_ack *extack)
{
struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_GTP_MAX + 1];
struct gtp_pdu_session_info *sinfo;
u8 len = key->enc_opts.len;
int err;
sinfo = (struct gtp_pdu_session_info *)&key->enc_opts.data[len];
memset(sinfo, 0xff, option_len);
if (!depth)
return sizeof(*sinfo);
if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_GTP) {
NL_SET_ERR_MSG_MOD(extack, "Non-gtp option type for mask");
return -EINVAL;
}
err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_GTP_MAX, nla,
gtp_opt_policy, extack);
if (err < 0)
return err;
if (!option_len &&
(!tb[TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE] ||
!tb[TCA_FLOWER_KEY_ENC_OPT_GTP_QFI])) {
NL_SET_ERR_MSG_MOD(extack,
"Missing tunnel key gtp option pdu type or qfi");
return -EINVAL;
}
if (tb[TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE])
sinfo->pdu_type =
nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE]);
if (tb[TCA_FLOWER_KEY_ENC_OPT_GTP_QFI])
sinfo->qfi = nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_GTP_QFI]);
return sizeof(*sinfo);
}
static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key, static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
struct fl_flow_key *mask, struct fl_flow_key *mask,
struct netlink_ext_ack *extack) struct netlink_ext_ack *extack)
...@@ -1386,6 +1437,38 @@ static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key, ...@@ -1386,6 +1437,38 @@ static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
return -EINVAL; return -EINVAL;
} }
break; break;
case TCA_FLOWER_KEY_ENC_OPTS_GTP:
if (key->enc_opts.dst_opt_type) {
NL_SET_ERR_MSG_MOD(extack,
"Duplicate type for gtp options");
return -EINVAL;
}
option_len = 0;
key->enc_opts.dst_opt_type = TUNNEL_GTP_OPT;
option_len = fl_set_gtp_opt(nla_opt_key, key,
key_depth, option_len,
extack);
if (option_len < 0)
return option_len;
key->enc_opts.len += option_len;
/* At the same time we need to parse through the mask
* in order to verify exact and mask attribute lengths.
*/
mask->enc_opts.dst_opt_type = TUNNEL_GTP_OPT;
option_len = fl_set_gtp_opt(nla_opt_msk, mask,
msk_depth, option_len,
extack);
if (option_len < 0)
return option_len;
mask->enc_opts.len += option_len;
if (key->enc_opts.len != mask->enc_opts.len) {
NL_SET_ERR_MSG_MOD(extack,
"Key and mask miss aligned");
return -EINVAL;
}
break;
default: default:
NL_SET_ERR_MSG(extack, "Unknown tunnel option type"); NL_SET_ERR_MSG(extack, "Unknown tunnel option type");
return -EINVAL; return -EINVAL;
...@@ -2761,6 +2844,34 @@ static int fl_dump_key_erspan_opt(struct sk_buff *skb, ...@@ -2761,6 +2844,34 @@ static int fl_dump_key_erspan_opt(struct sk_buff *skb,
return -EMSGSIZE; return -EMSGSIZE;
} }
static int fl_dump_key_gtp_opt(struct sk_buff *skb,
struct flow_dissector_key_enc_opts *enc_opts)
{
struct gtp_pdu_session_info *session_info;
struct nlattr *nest;
nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_GTP);
if (!nest)
goto nla_put_failure;
session_info = (struct gtp_pdu_session_info *)&enc_opts->data[0];
if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE,
session_info->pdu_type))
goto nla_put_failure;
if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GTP_QFI, session_info->qfi))
goto nla_put_failure;
nla_nest_end(skb, nest);
return 0;
nla_put_failure:
nla_nest_cancel(skb, nest);
return -EMSGSIZE;
}
static int fl_dump_key_ct(struct sk_buff *skb, static int fl_dump_key_ct(struct sk_buff *skb,
struct flow_dissector_key_ct *key, struct flow_dissector_key_ct *key,
struct flow_dissector_key_ct *mask) struct flow_dissector_key_ct *mask)
...@@ -2824,6 +2935,11 @@ static int fl_dump_key_options(struct sk_buff *skb, int enc_opt_type, ...@@ -2824,6 +2935,11 @@ static int fl_dump_key_options(struct sk_buff *skb, int enc_opt_type,
if (err) if (err)
goto nla_put_failure; goto nla_put_failure;
break; break;
case TUNNEL_GTP_OPT:
err = fl_dump_key_gtp_opt(skb, enc_opts);
if (err)
goto nla_put_failure;
break;
default: default:
goto nla_put_failure; goto nla_put_failure;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment