Commit ef68de56 authored by David S. Miller's avatar David S. Miller

Merge branch 'Support-tunnels-over-VLAN-in-NFP'

John Hurley says:

====================
Support tunnels over VLAN in NFP

This patchset deals with tunnel encap and decap when the end-point IP
address is on an internal port (for example and OvS VLAN port). Tunnel
encap without VLAN is already supported in the NFP driver. This patchset
extends that to include a push VLAN along with tunnel header push.

Patches 1-4 extend the flow_offload IR API to include actions that use
skbedit to set the ptype of an SKB and that send a packet to port ingress
from the act_mirred module. Such actions are used in flower rules that
forward tunnel packets to internal ports where they can be decapsulated.
OvS and its TC API is an example of a user-space app that produces such
rules.

Patch 5 modifies the encap offload code to allow the pushing of a VLAN
header after a tunnel header push.

Patches 6-10 deal with tunnel decap when the end-point is on an internal
port. They detect 'pre-tunnel rules' which do not deal with tunnels
themselves but, rather, forward packets to internal ports where they
can be decapped if required. Such rules are offloaded to a table in HW
along with an indication of whether packets need to be passed to this
table of not (based on their destination MAC address). Matching against
this table prior to decapsulation in HW allows the correct parsing and
handling of outer VLANs on tunnelled packets and the correct updating of
stats for said 'pre-tunnel' rules.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 55a47dc2 2e0bc7f3
...@@ -173,7 +173,7 @@ nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output, ...@@ -173,7 +173,7 @@ nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output,
struct nfp_fl_payload *nfp_flow, struct nfp_fl_payload *nfp_flow,
bool last, struct net_device *in_dev, bool last, struct net_device *in_dev,
enum nfp_flower_tun_type tun_type, int *tun_out_cnt, enum nfp_flower_tun_type tun_type, int *tun_out_cnt,
struct netlink_ext_ack *extack) bool pkt_host, struct netlink_ext_ack *extack)
{ {
size_t act_size = sizeof(struct nfp_fl_output); size_t act_size = sizeof(struct nfp_fl_output);
struct nfp_flower_priv *priv = app->priv; struct nfp_flower_priv *priv = app->priv;
...@@ -218,6 +218,20 @@ nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output, ...@@ -218,6 +218,20 @@ nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output,
return gid; return gid;
} }
output->port = cpu_to_be32(NFP_FL_LAG_OUT | gid); output->port = cpu_to_be32(NFP_FL_LAG_OUT | gid);
} else if (nfp_flower_internal_port_can_offload(app, out_dev)) {
if (!(priv->flower_ext_feats & NFP_FL_FEATS_PRE_TUN_RULES)) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pre-tunnel rules not supported in loaded firmware");
return -EOPNOTSUPP;
}
if (nfp_flow->pre_tun_rule.dev || !pkt_host) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pre-tunnel rules require single egress dev and ptype HOST action");
return -EOPNOTSUPP;
}
nfp_flow->pre_tun_rule.dev = out_dev;
return 0;
} else { } else {
/* Set action output parameters. */ /* Set action output parameters. */
output->flags = cpu_to_be16(tmp_flags); output->flags = cpu_to_be16(tmp_flags);
...@@ -885,7 +899,7 @@ nfp_flower_output_action(struct nfp_app *app, ...@@ -885,7 +899,7 @@ nfp_flower_output_action(struct nfp_app *app,
struct nfp_fl_payload *nfp_fl, int *a_len, struct nfp_fl_payload *nfp_fl, int *a_len,
struct net_device *netdev, bool last, struct net_device *netdev, bool last,
enum nfp_flower_tun_type *tun_type, int *tun_out_cnt, enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
int *out_cnt, u32 *csum_updated, int *out_cnt, u32 *csum_updated, bool pkt_host,
struct netlink_ext_ack *extack) struct netlink_ext_ack *extack)
{ {
struct nfp_flower_priv *priv = app->priv; struct nfp_flower_priv *priv = app->priv;
...@@ -907,7 +921,7 @@ nfp_flower_output_action(struct nfp_app *app, ...@@ -907,7 +921,7 @@ nfp_flower_output_action(struct nfp_app *app,
output = (struct nfp_fl_output *)&nfp_fl->action_data[*a_len]; output = (struct nfp_fl_output *)&nfp_fl->action_data[*a_len];
err = nfp_fl_output(app, output, act, nfp_fl, last, netdev, *tun_type, err = nfp_fl_output(app, output, act, nfp_fl, last, netdev, *tun_type,
tun_out_cnt, extack); tun_out_cnt, pkt_host, extack);
if (err) if (err)
return err; return err;
...@@ -939,7 +953,7 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act, ...@@ -939,7 +953,7 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
struct net_device *netdev, struct net_device *netdev,
enum nfp_flower_tun_type *tun_type, int *tun_out_cnt, enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
int *out_cnt, u32 *csum_updated, int *out_cnt, u32 *csum_updated,
struct nfp_flower_pedit_acts *set_act, struct nfp_flower_pedit_acts *set_act, bool *pkt_host,
struct netlink_ext_ack *extack, int act_idx) struct netlink_ext_ack *extack, int act_idx)
{ {
struct nfp_fl_set_ipv4_tun *set_tun; struct nfp_fl_set_ipv4_tun *set_tun;
...@@ -955,17 +969,21 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act, ...@@ -955,17 +969,21 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
case FLOW_ACTION_DROP: case FLOW_ACTION_DROP:
nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_DROP); nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_DROP);
break; break;
case FLOW_ACTION_REDIRECT_INGRESS:
case FLOW_ACTION_REDIRECT: case FLOW_ACTION_REDIRECT:
err = nfp_flower_output_action(app, act, nfp_fl, a_len, netdev, err = nfp_flower_output_action(app, act, nfp_fl, a_len, netdev,
true, tun_type, tun_out_cnt, true, tun_type, tun_out_cnt,
out_cnt, csum_updated, extack); out_cnt, csum_updated, *pkt_host,
extack);
if (err) if (err)
return err; return err;
break; break;
case FLOW_ACTION_MIRRED_INGRESS:
case FLOW_ACTION_MIRRED: case FLOW_ACTION_MIRRED:
err = nfp_flower_output_action(app, act, nfp_fl, a_len, netdev, err = nfp_flower_output_action(app, act, nfp_fl, a_len, netdev,
false, tun_type, tun_out_cnt, false, tun_type, tun_out_cnt,
out_cnt, csum_updated, extack); out_cnt, csum_updated, *pkt_host,
extack);
if (err) if (err)
return err; return err;
break; break;
...@@ -1095,6 +1113,13 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act, ...@@ -1095,6 +1113,13 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
nfp_fl_set_mpls(set_m, act); nfp_fl_set_mpls(set_m, act);
*a_len += sizeof(struct nfp_fl_set_mpls); *a_len += sizeof(struct nfp_fl_set_mpls);
break; break;
case FLOW_ACTION_PTYPE:
/* TC ptype skbedit sets PACKET_HOST for ingress redirect. */
if (act->ptype != PACKET_HOST)
return -EOPNOTSUPP;
*pkt_host = true;
break;
default: default:
/* Currently we do not handle any other actions. */ /* Currently we do not handle any other actions. */
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: unsupported action in action list"); NL_SET_ERR_MSG_MOD(extack, "unsupported offload: unsupported action in action list");
...@@ -1150,6 +1175,7 @@ int nfp_flower_compile_action(struct nfp_app *app, ...@@ -1150,6 +1175,7 @@ int nfp_flower_compile_action(struct nfp_app *app,
struct nfp_flower_pedit_acts set_act; struct nfp_flower_pedit_acts set_act;
enum nfp_flower_tun_type tun_type; enum nfp_flower_tun_type tun_type;
struct flow_action_entry *act; struct flow_action_entry *act;
bool pkt_host = false;
u32 csum_updated = 0; u32 csum_updated = 0;
memset(nfp_flow->action_data, 0, NFP_FL_MAX_A_SIZ); memset(nfp_flow->action_data, 0, NFP_FL_MAX_A_SIZ);
...@@ -1166,7 +1192,7 @@ int nfp_flower_compile_action(struct nfp_app *app, ...@@ -1166,7 +1192,7 @@ int nfp_flower_compile_action(struct nfp_app *app,
err = nfp_flower_loop_action(app, act, flow, nfp_flow, &act_len, err = nfp_flower_loop_action(app, act, flow, nfp_flow, &act_len,
netdev, &tun_type, &tun_out_cnt, netdev, &tun_type, &tun_out_cnt,
&out_cnt, &csum_updated, &out_cnt, &csum_updated,
&set_act, extack, i); &set_act, &pkt_host, extack, i);
if (err) if (err)
return err; return err;
act_cnt++; act_cnt++;
......
...@@ -220,7 +220,8 @@ struct nfp_fl_set_ipv4_tun { ...@@ -220,7 +220,8 @@ struct nfp_fl_set_ipv4_tun {
__be16 tun_flags; __be16 tun_flags;
u8 ttl; u8 ttl;
u8 tos; u8 tos;
__be32 extra; __be16 outer_vlan_tpid;
__be16 outer_vlan_tci;
u8 tun_len; u8 tun_len;
u8 res2; u8 res2;
__be16 tun_proto; __be16 tun_proto;
...@@ -483,6 +484,7 @@ enum nfp_flower_cmsg_type_port { ...@@ -483,6 +484,7 @@ enum nfp_flower_cmsg_type_port {
NFP_FLOWER_CMSG_TYPE_QOS_MOD = 18, NFP_FLOWER_CMSG_TYPE_QOS_MOD = 18,
NFP_FLOWER_CMSG_TYPE_QOS_DEL = 19, NFP_FLOWER_CMSG_TYPE_QOS_DEL = 19,
NFP_FLOWER_CMSG_TYPE_QOS_STATS = 20, NFP_FLOWER_CMSG_TYPE_QOS_STATS = 20,
NFP_FLOWER_CMSG_TYPE_PRE_TUN_RULE = 21,
NFP_FLOWER_CMSG_TYPE_MAX = 32, NFP_FLOWER_CMSG_TYPE_MAX = 32,
}; };
......
...@@ -781,6 +781,7 @@ static int nfp_flower_init(struct nfp_app *app) ...@@ -781,6 +781,7 @@ static int nfp_flower_init(struct nfp_app *app)
INIT_LIST_HEAD(&app_priv->indr_block_cb_priv); INIT_LIST_HEAD(&app_priv->indr_block_cb_priv);
INIT_LIST_HEAD(&app_priv->non_repr_priv); INIT_LIST_HEAD(&app_priv->non_repr_priv);
app_priv->pre_tun_rule_cnt = 0;
return 0; return 0;
......
...@@ -42,6 +42,7 @@ struct nfp_app; ...@@ -42,6 +42,7 @@ struct nfp_app;
#define NFP_FL_FEATS_VLAN_PCP BIT(3) #define NFP_FL_FEATS_VLAN_PCP BIT(3)
#define NFP_FL_FEATS_VF_RLIM BIT(4) #define NFP_FL_FEATS_VF_RLIM BIT(4)
#define NFP_FL_FEATS_FLOW_MOD BIT(5) #define NFP_FL_FEATS_FLOW_MOD BIT(5)
#define NFP_FL_FEATS_PRE_TUN_RULES BIT(6)
#define NFP_FL_FEATS_FLOW_MERGE BIT(30) #define NFP_FL_FEATS_FLOW_MERGE BIT(30)
#define NFP_FL_FEATS_LAG BIT(31) #define NFP_FL_FEATS_LAG BIT(31)
...@@ -162,6 +163,7 @@ struct nfp_fl_internal_ports { ...@@ -162,6 +163,7 @@ struct nfp_fl_internal_ports {
* @qos_stats_work: Workqueue for qos stats processing * @qos_stats_work: Workqueue for qos stats processing
* @qos_rate_limiters: Current active qos rate limiters * @qos_rate_limiters: Current active qos rate limiters
* @qos_stats_lock: Lock on qos stats updates * @qos_stats_lock: Lock on qos stats updates
* @pre_tun_rule_cnt: Number of pre-tunnel rules offloaded
*/ */
struct nfp_flower_priv { struct nfp_flower_priv {
struct nfp_app *app; struct nfp_app *app;
...@@ -193,6 +195,7 @@ struct nfp_flower_priv { ...@@ -193,6 +195,7 @@ struct nfp_flower_priv {
struct delayed_work qos_stats_work; struct delayed_work qos_stats_work;
unsigned int qos_rate_limiters; unsigned int qos_rate_limiters;
spinlock_t qos_stats_lock; /* Protect the qos stats */ spinlock_t qos_stats_lock; /* Protect the qos stats */
int pre_tun_rule_cnt;
}; };
/** /**
...@@ -218,6 +221,7 @@ struct nfp_fl_qos { ...@@ -218,6 +221,7 @@ struct nfp_fl_qos {
* @block_shared: Flag indicating if offload applies to shared blocks * @block_shared: Flag indicating if offload applies to shared blocks
* @mac_list: List entry of reprs that share the same offloaded MAC * @mac_list: List entry of reprs that share the same offloaded MAC
* @qos_table: Stored info on filters implementing qos * @qos_table: Stored info on filters implementing qos
* @on_bridge: Indicates if the repr is attached to a bridge
*/ */
struct nfp_flower_repr_priv { struct nfp_flower_repr_priv {
struct nfp_repr *nfp_repr; struct nfp_repr *nfp_repr;
...@@ -227,6 +231,7 @@ struct nfp_flower_repr_priv { ...@@ -227,6 +231,7 @@ struct nfp_flower_repr_priv {
bool block_shared; bool block_shared;
struct list_head mac_list; struct list_head mac_list;
struct nfp_fl_qos qos_table; struct nfp_fl_qos qos_table;
bool on_bridge;
}; };
/** /**
...@@ -280,6 +285,11 @@ struct nfp_fl_payload { ...@@ -280,6 +285,11 @@ struct nfp_fl_payload {
char *action_data; char *action_data;
struct list_head linked_flows; struct list_head linked_flows;
bool in_hw; bool in_hw;
struct {
struct net_device *dev;
__be16 vlan_tci;
__be16 port_idx;
} pre_tun_rule;
}; };
struct nfp_fl_payload_link { struct nfp_fl_payload_link {
...@@ -333,6 +343,11 @@ static inline bool nfp_flower_is_merge_flow(struct nfp_fl_payload *flow_pay) ...@@ -333,6 +343,11 @@ static inline bool nfp_flower_is_merge_flow(struct nfp_fl_payload *flow_pay)
return flow_pay->tc_flower_cookie == (unsigned long)flow_pay; return flow_pay->tc_flower_cookie == (unsigned long)flow_pay;
} }
static inline bool nfp_flower_is_supported_bridge(struct net_device *netdev)
{
return netif_is_ovs_master(netdev);
}
int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count, int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
unsigned int host_ctx_split); unsigned int host_ctx_split);
void nfp_flower_metadata_cleanup(struct nfp_app *app); void nfp_flower_metadata_cleanup(struct nfp_app *app);
...@@ -415,4 +430,8 @@ void ...@@ -415,4 +430,8 @@ void
nfp_flower_non_repr_priv_put(struct nfp_app *app, struct net_device *netdev); nfp_flower_non_repr_priv_put(struct nfp_app *app, struct net_device *netdev);
u32 nfp_flower_get_port_id_from_netdev(struct nfp_app *app, u32 nfp_flower_get_port_id_from_netdev(struct nfp_app *app,
struct net_device *netdev); struct net_device *netdev);
int nfp_flower_xmit_pre_tun_flow(struct nfp_app *app,
struct nfp_fl_payload *flow);
int nfp_flower_xmit_pre_tun_del_flow(struct nfp_app *app,
struct nfp_fl_payload *flow);
#endif #endif
...@@ -61,6 +61,11 @@ ...@@ -61,6 +61,11 @@
NFP_FLOWER_LAYER_IPV4 | \ NFP_FLOWER_LAYER_IPV4 | \
NFP_FLOWER_LAYER_IPV6) NFP_FLOWER_LAYER_IPV6)
#define NFP_FLOWER_PRE_TUN_RULE_FIELDS \
(NFP_FLOWER_LAYER_PORT | \
NFP_FLOWER_LAYER_MAC | \
NFP_FLOWER_LAYER_IPV4)
struct nfp_flower_merge_check { struct nfp_flower_merge_check {
union { union {
struct { struct {
...@@ -489,6 +494,7 @@ nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer) ...@@ -489,6 +494,7 @@ nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer)
flow_pay->meta.flags = 0; flow_pay->meta.flags = 0;
INIT_LIST_HEAD(&flow_pay->linked_flows); INIT_LIST_HEAD(&flow_pay->linked_flows);
flow_pay->in_hw = false; flow_pay->in_hw = false;
flow_pay->pre_tun_rule.dev = NULL;
return flow_pay; return flow_pay;
...@@ -732,28 +738,62 @@ nfp_flower_copy_pre_actions(char *act_dst, char *act_src, int len, ...@@ -732,28 +738,62 @@ nfp_flower_copy_pre_actions(char *act_dst, char *act_src, int len,
return act_off; return act_off;
} }
static int nfp_fl_verify_post_tun_acts(char *acts, int len) static int
nfp_fl_verify_post_tun_acts(char *acts, int len, struct nfp_fl_push_vlan **vlan)
{ {
struct nfp_fl_act_head *a; struct nfp_fl_act_head *a;
unsigned int act_off = 0; unsigned int act_off = 0;
while (act_off < len) { while (act_off < len) {
a = (struct nfp_fl_act_head *)&acts[act_off]; a = (struct nfp_fl_act_head *)&acts[act_off];
if (a->jump_id != NFP_FL_ACTION_OPCODE_OUTPUT)
if (a->jump_id == NFP_FL_ACTION_OPCODE_PUSH_VLAN && !act_off)
*vlan = (struct nfp_fl_push_vlan *)a;
else if (a->jump_id != NFP_FL_ACTION_OPCODE_OUTPUT)
return -EOPNOTSUPP; return -EOPNOTSUPP;
act_off += a->len_lw << NFP_FL_LW_SIZ; act_off += a->len_lw << NFP_FL_LW_SIZ;
} }
/* Ensure any VLAN push also has an egress action. */
if (*vlan && act_off <= sizeof(struct nfp_fl_push_vlan))
return -EOPNOTSUPP;
return 0; return 0;
} }
static int
nfp_fl_push_vlan_after_tun(char *acts, int len, struct nfp_fl_push_vlan *vlan)
{
struct nfp_fl_set_ipv4_tun *tun;
struct nfp_fl_act_head *a;
unsigned int act_off = 0;
while (act_off < len) {
a = (struct nfp_fl_act_head *)&acts[act_off];
if (a->jump_id == NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL) {
tun = (struct nfp_fl_set_ipv4_tun *)a;
tun->outer_vlan_tpid = vlan->vlan_tpid;
tun->outer_vlan_tci = vlan->vlan_tci;
return 0;
}
act_off += a->len_lw << NFP_FL_LW_SIZ;
}
/* Return error if no tunnel action is found. */
return -EOPNOTSUPP;
}
static int static int
nfp_flower_merge_action(struct nfp_fl_payload *sub_flow1, nfp_flower_merge_action(struct nfp_fl_payload *sub_flow1,
struct nfp_fl_payload *sub_flow2, struct nfp_fl_payload *sub_flow2,
struct nfp_fl_payload *merge_flow) struct nfp_fl_payload *merge_flow)
{ {
unsigned int sub1_act_len, sub2_act_len, pre_off1, pre_off2; unsigned int sub1_act_len, sub2_act_len, pre_off1, pre_off2;
struct nfp_fl_push_vlan *post_tun_push_vlan = NULL;
bool tunnel_act = false; bool tunnel_act = false;
char *merge_act; char *merge_act;
int err; int err;
...@@ -790,18 +830,36 @@ nfp_flower_merge_action(struct nfp_fl_payload *sub_flow1, ...@@ -790,18 +830,36 @@ nfp_flower_merge_action(struct nfp_fl_payload *sub_flow1,
sub2_act_len -= pre_off2; sub2_act_len -= pre_off2;
/* FW does a tunnel push when egressing, therefore, if sub_flow 1 pushes /* FW does a tunnel push when egressing, therefore, if sub_flow 1 pushes
* a tunnel, sub_flow 2 can only have output actions for a valid merge. * a tunnel, there are restrictions on what sub_flow 2 actions lead to a
* valid merge.
*/ */
if (tunnel_act) { if (tunnel_act) {
char *post_tun_acts = &sub_flow2->action_data[pre_off2]; char *post_tun_acts = &sub_flow2->action_data[pre_off2];
err = nfp_fl_verify_post_tun_acts(post_tun_acts, sub2_act_len); err = nfp_fl_verify_post_tun_acts(post_tun_acts, sub2_act_len,
&post_tun_push_vlan);
if (err) if (err)
return err; return err;
if (post_tun_push_vlan) {
pre_off2 += sizeof(*post_tun_push_vlan);
sub2_act_len -= sizeof(*post_tun_push_vlan);
}
} }
/* Copy remaining actions from sub_flows 1 and 2. */ /* Copy remaining actions from sub_flows 1 and 2. */
memcpy(merge_act, sub_flow1->action_data + pre_off1, sub1_act_len); memcpy(merge_act, sub_flow1->action_data + pre_off1, sub1_act_len);
if (post_tun_push_vlan) {
/* Update tunnel action in merge to include VLAN push. */
err = nfp_fl_push_vlan_after_tun(merge_act, sub1_act_len,
post_tun_push_vlan);
if (err)
return err;
merge_flow->meta.act_len -= sizeof(*post_tun_push_vlan);
}
merge_act += sub1_act_len; merge_act += sub1_act_len;
memcpy(merge_act, sub_flow2->action_data + pre_off2, sub2_act_len); memcpy(merge_act, sub_flow2->action_data + pre_off2, sub2_act_len);
...@@ -944,6 +1002,106 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app, ...@@ -944,6 +1002,106 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
return err; return err;
} }
/**
* nfp_flower_validate_pre_tun_rule()
* @app: Pointer to the APP handle
* @flow: Pointer to NFP flow representation of rule
* @extack: Netlink extended ACK report
*
* Verifies the flow as a pre-tunnel rule.
*
* Return: negative value on error, 0 if verified.
*/
static int
nfp_flower_validate_pre_tun_rule(struct nfp_app *app,
struct nfp_fl_payload *flow,
struct netlink_ext_ack *extack)
{
struct nfp_flower_meta_tci *meta_tci;
struct nfp_flower_mac_mpls *mac;
struct nfp_fl_act_head *act;
u8 *mask = flow->mask_data;
bool vlan = false;
int act_offset;
u8 key_layer;
meta_tci = (struct nfp_flower_meta_tci *)flow->unmasked_data;
if (meta_tci->tci & cpu_to_be16(NFP_FLOWER_MASK_VLAN_PRESENT)) {
u16 vlan_tci = be16_to_cpu(meta_tci->tci);
vlan_tci &= ~NFP_FLOWER_MASK_VLAN_PRESENT;
flow->pre_tun_rule.vlan_tci = cpu_to_be16(vlan_tci);
vlan = true;
} else {
flow->pre_tun_rule.vlan_tci = cpu_to_be16(0xffff);
}
key_layer = meta_tci->nfp_flow_key_layer;
if (key_layer & ~NFP_FLOWER_PRE_TUN_RULE_FIELDS) {
NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: too many match fields");
return -EOPNOTSUPP;
}
if (!(key_layer & NFP_FLOWER_LAYER_MAC)) {
NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: MAC fields match required");
return -EOPNOTSUPP;
}
/* Skip fields known to exist. */
mask += sizeof(struct nfp_flower_meta_tci);
mask += sizeof(struct nfp_flower_in_port);
/* Ensure destination MAC address is fully matched. */
mac = (struct nfp_flower_mac_mpls *)mask;
if (!is_broadcast_ether_addr(&mac->mac_dst[0])) {
NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: dest MAC field must not be masked");
return -EOPNOTSUPP;
}
if (key_layer & NFP_FLOWER_LAYER_IPV4) {
int ip_flags = offsetof(struct nfp_flower_ipv4, ip_ext.flags);
int ip_proto = offsetof(struct nfp_flower_ipv4, ip_ext.proto);
int i;
mask += sizeof(struct nfp_flower_mac_mpls);
/* Ensure proto and flags are the only IP layer fields. */
for (i = 0; i < sizeof(struct nfp_flower_ipv4); i++)
if (mask[i] && i != ip_flags && i != ip_proto) {
NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: only flags and proto can be matched in ip header");
return -EOPNOTSUPP;
}
}
/* Action must be a single egress or pop_vlan and egress. */
act_offset = 0;
act = (struct nfp_fl_act_head *)&flow->action_data[act_offset];
if (vlan) {
if (act->jump_id != NFP_FL_ACTION_OPCODE_POP_VLAN) {
NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: match on VLAN must have VLAN pop as first action");
return -EOPNOTSUPP;
}
act_offset += act->len_lw << NFP_FL_LW_SIZ;
act = (struct nfp_fl_act_head *)&flow->action_data[act_offset];
}
if (act->jump_id != NFP_FL_ACTION_OPCODE_OUTPUT) {
NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: non egress action detected where egress was expected");
return -EOPNOTSUPP;
}
act_offset += act->len_lw << NFP_FL_LW_SIZ;
/* Ensure there are no more actions after egress. */
if (act_offset != flow->meta.act_len) {
NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: egress is not the last action");
return -EOPNOTSUPP;
}
return 0;
}
/** /**
* nfp_flower_add_offload() - Adds a new flow to hardware. * nfp_flower_add_offload() - Adds a new flow to hardware.
* @app: Pointer to the APP handle * @app: Pointer to the APP handle
...@@ -994,6 +1152,12 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev, ...@@ -994,6 +1152,12 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
if (err) if (err)
goto err_destroy_flow; goto err_destroy_flow;
if (flow_pay->pre_tun_rule.dev) {
err = nfp_flower_validate_pre_tun_rule(app, flow_pay, extack);
if (err)
goto err_destroy_flow;
}
err = nfp_compile_flow_metadata(app, flow, flow_pay, netdev, extack); err = nfp_compile_flow_metadata(app, flow, flow_pay, netdev, extack);
if (err) if (err)
goto err_destroy_flow; goto err_destroy_flow;
...@@ -1006,8 +1170,11 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev, ...@@ -1006,8 +1170,11 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
goto err_release_metadata; goto err_release_metadata;
} }
err = nfp_flower_xmit_flow(app, flow_pay, if (flow_pay->pre_tun_rule.dev)
NFP_FLOWER_CMSG_TYPE_FLOW_ADD); err = nfp_flower_xmit_pre_tun_flow(app, flow_pay);
else
err = nfp_flower_xmit_flow(app, flow_pay,
NFP_FLOWER_CMSG_TYPE_FLOW_ADD);
if (err) if (err)
goto err_remove_rhash; goto err_remove_rhash;
...@@ -1149,8 +1316,11 @@ nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev, ...@@ -1149,8 +1316,11 @@ nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
goto err_free_merge_flow; goto err_free_merge_flow;
} }
err = nfp_flower_xmit_flow(app, nfp_flow, if (nfp_flow->pre_tun_rule.dev)
NFP_FLOWER_CMSG_TYPE_FLOW_DEL); err = nfp_flower_xmit_pre_tun_del_flow(app, nfp_flow);
else
err = nfp_flower_xmit_flow(app, nfp_flow,
NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
/* Fall through on error. */ /* Fall through on error. */
err_free_merge_flow: err_free_merge_flow:
......
...@@ -15,6 +15,24 @@ ...@@ -15,6 +15,24 @@
#define NFP_FL_MAX_ROUTES 32 #define NFP_FL_MAX_ROUTES 32
#define NFP_TUN_PRE_TUN_RULE_LIMIT 32
#define NFP_TUN_PRE_TUN_RULE_DEL 0x1
#define NFP_TUN_PRE_TUN_IDX_BIT 0x8
/**
* struct nfp_tun_pre_run_rule - rule matched before decap
* @flags: options for the rule offset
* @port_idx: index of destination MAC address for the rule
* @vlan_tci: VLAN info associated with MAC
* @host_ctx_id: stats context of rule to update
*/
struct nfp_tun_pre_tun_rule {
__be32 flags;
__be16 port_idx;
__be16 vlan_tci;
__be32 host_ctx_id;
};
/** /**
* struct nfp_tun_active_tuns - periodic message of active tunnels * struct nfp_tun_active_tuns - periodic message of active tunnels
* @seq: sequence number of the message * @seq: sequence number of the message
...@@ -124,11 +142,12 @@ enum nfp_flower_mac_offload_cmd { ...@@ -124,11 +142,12 @@ enum nfp_flower_mac_offload_cmd {
/** /**
* struct nfp_tun_offloaded_mac - hashtable entry for an offloaded MAC * struct nfp_tun_offloaded_mac - hashtable entry for an offloaded MAC
* @ht_node: Hashtable entry * @ht_node: Hashtable entry
* @addr: Offloaded MAC address * @addr: Offloaded MAC address
* @index: Offloaded index for given MAC address * @index: Offloaded index for given MAC address
* @ref_count: Number of devs using this MAC address * @ref_count: Number of devs using this MAC address
* @repr_list: List of reprs sharing this MAC address * @repr_list: List of reprs sharing this MAC address
* @bridge_count: Number of bridge/internal devs with MAC
*/ */
struct nfp_tun_offloaded_mac { struct nfp_tun_offloaded_mac {
struct rhash_head ht_node; struct rhash_head ht_node;
...@@ -136,6 +155,7 @@ struct nfp_tun_offloaded_mac { ...@@ -136,6 +155,7 @@ struct nfp_tun_offloaded_mac {
u16 index; u16 index;
int ref_count; int ref_count;
struct list_head repr_list; struct list_head repr_list;
int bridge_count;
}; };
static const struct rhashtable_params offloaded_macs_params = { static const struct rhashtable_params offloaded_macs_params = {
...@@ -556,6 +576,8 @@ nfp_tunnel_offloaded_macs_inc_ref_and_link(struct nfp_tun_offloaded_mac *entry, ...@@ -556,6 +576,8 @@ nfp_tunnel_offloaded_macs_inc_ref_and_link(struct nfp_tun_offloaded_mac *entry,
list_del(&repr_priv->mac_list); list_del(&repr_priv->mac_list);
list_add_tail(&repr_priv->mac_list, &entry->repr_list); list_add_tail(&repr_priv->mac_list, &entry->repr_list);
} else if (nfp_flower_is_supported_bridge(netdev)) {
entry->bridge_count++;
} }
entry->ref_count++; entry->ref_count++;
...@@ -572,20 +594,35 @@ nfp_tunnel_add_shared_mac(struct nfp_app *app, struct net_device *netdev, ...@@ -572,20 +594,35 @@ nfp_tunnel_add_shared_mac(struct nfp_app *app, struct net_device *netdev,
entry = nfp_tunnel_lookup_offloaded_macs(app, netdev->dev_addr); entry = nfp_tunnel_lookup_offloaded_macs(app, netdev->dev_addr);
if (entry && nfp_tunnel_is_mac_idx_global(entry->index)) { if (entry && nfp_tunnel_is_mac_idx_global(entry->index)) {
nfp_tunnel_offloaded_macs_inc_ref_and_link(entry, netdev, mod); if (entry->bridge_count ||
return 0; !nfp_flower_is_supported_bridge(netdev)) {
nfp_tunnel_offloaded_macs_inc_ref_and_link(entry,
netdev, mod);
return 0;
}
/* MAC is global but matches need to go to pre_tun table. */
nfp_mac_idx = entry->index | NFP_TUN_PRE_TUN_IDX_BIT;
} }
/* Assign a global index if non-repr or MAC address is now shared. */ if (!nfp_mac_idx) {
if (entry || !port) { /* Assign a global index if non-repr or MAC is now shared. */
ida_idx = ida_simple_get(&priv->tun.mac_off_ids, 0, if (entry || !port) {
NFP_MAX_MAC_INDEX, GFP_KERNEL); ida_idx = ida_simple_get(&priv->tun.mac_off_ids, 0,
if (ida_idx < 0) NFP_MAX_MAC_INDEX, GFP_KERNEL);
return ida_idx; if (ida_idx < 0)
return ida_idx;
nfp_mac_idx = nfp_tunnel_get_global_mac_idx_from_ida(ida_idx); nfp_mac_idx =
} else { nfp_tunnel_get_global_mac_idx_from_ida(ida_idx);
nfp_mac_idx = nfp_tunnel_get_mac_idx_from_phy_port_id(port);
if (nfp_flower_is_supported_bridge(netdev))
nfp_mac_idx |= NFP_TUN_PRE_TUN_IDX_BIT;
} else {
nfp_mac_idx =
nfp_tunnel_get_mac_idx_from_phy_port_id(port);
}
} }
if (!entry) { if (!entry) {
...@@ -654,6 +691,25 @@ nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev, ...@@ -654,6 +691,25 @@ nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev,
list_del(&repr_priv->mac_list); list_del(&repr_priv->mac_list);
} }
if (nfp_flower_is_supported_bridge(netdev)) {
entry->bridge_count--;
if (!entry->bridge_count && entry->ref_count) {
u16 nfp_mac_idx;
nfp_mac_idx = entry->index & ~NFP_TUN_PRE_TUN_IDX_BIT;
if (__nfp_tunnel_offload_mac(app, mac, nfp_mac_idx,
false)) {
nfp_flower_cmsg_warn(app, "MAC offload index revert failed on %s.\n",
netdev_name(netdev));
return 0;
}
entry->index = nfp_mac_idx;
return 0;
}
}
/* If MAC is now used by 1 repr set the offloaded MAC index to port. */ /* If MAC is now used by 1 repr set the offloaded MAC index to port. */
if (entry->ref_count == 1 && list_is_singular(&entry->repr_list)) { if (entry->ref_count == 1 && list_is_singular(&entry->repr_list)) {
u16 nfp_mac_idx; u16 nfp_mac_idx;
...@@ -713,6 +769,9 @@ nfp_tunnel_offload_mac(struct nfp_app *app, struct net_device *netdev, ...@@ -713,6 +769,9 @@ nfp_tunnel_offload_mac(struct nfp_app *app, struct net_device *netdev,
return 0; return 0;
repr_priv = repr->app_priv; repr_priv = repr->app_priv;
if (repr_priv->on_bridge)
return 0;
mac_offloaded = &repr_priv->mac_offloaded; mac_offloaded = &repr_priv->mac_offloaded;
off_mac = &repr_priv->offloaded_mac_addr[0]; off_mac = &repr_priv->offloaded_mac_addr[0];
port = nfp_repr_get_port_id(netdev); port = nfp_repr_get_port_id(netdev);
...@@ -828,10 +887,119 @@ int nfp_tunnel_mac_event_handler(struct nfp_app *app, ...@@ -828,10 +887,119 @@ int nfp_tunnel_mac_event_handler(struct nfp_app *app,
if (err) if (err)
nfp_flower_cmsg_warn(app, "Failed to offload MAC change on %s.\n", nfp_flower_cmsg_warn(app, "Failed to offload MAC change on %s.\n",
netdev_name(netdev)); netdev_name(netdev));
} else if (event == NETDEV_CHANGEUPPER) {
/* If a repr is attached to a bridge then tunnel packets
* entering the physical port are directed through the bridge
* datapath and cannot be directly detunneled. Therefore,
* associated offloaded MACs and indexes should not be used
* by fw for detunneling.
*/
struct netdev_notifier_changeupper_info *info = ptr;
struct net_device *upper = info->upper_dev;
struct nfp_flower_repr_priv *repr_priv;
struct nfp_repr *repr;
if (!nfp_netdev_is_nfp_repr(netdev) ||
!nfp_flower_is_supported_bridge(upper))
return NOTIFY_OK;
repr = netdev_priv(netdev);
if (repr->app != app)
return NOTIFY_OK;
repr_priv = repr->app_priv;
if (info->linking) {
if (nfp_tunnel_offload_mac(app, netdev,
NFP_TUNNEL_MAC_OFFLOAD_DEL))
nfp_flower_cmsg_warn(app, "Failed to delete offloaded MAC on %s.\n",
netdev_name(netdev));
repr_priv->on_bridge = true;
} else {
repr_priv->on_bridge = false;
if (!(netdev->flags & IFF_UP))
return NOTIFY_OK;
if (nfp_tunnel_offload_mac(app, netdev,
NFP_TUNNEL_MAC_OFFLOAD_ADD))
nfp_flower_cmsg_warn(app, "Failed to offload MAC on %s.\n",
netdev_name(netdev));
}
} }
return NOTIFY_OK; return NOTIFY_OK;
} }
int nfp_flower_xmit_pre_tun_flow(struct nfp_app *app,
struct nfp_fl_payload *flow)
{
struct nfp_flower_priv *app_priv = app->priv;
struct nfp_tun_offloaded_mac *mac_entry;
struct nfp_tun_pre_tun_rule payload;
struct net_device *internal_dev;
int err;
if (app_priv->pre_tun_rule_cnt == NFP_TUN_PRE_TUN_RULE_LIMIT)
return -ENOSPC;
memset(&payload, 0, sizeof(struct nfp_tun_pre_tun_rule));
internal_dev = flow->pre_tun_rule.dev;
payload.vlan_tci = flow->pre_tun_rule.vlan_tci;
payload.host_ctx_id = flow->meta.host_ctx_id;
/* Lookup MAC index for the pre-tunnel rule egress device.
* Note that because the device is always an internal port, it will
* have a constant global index so does not need to be tracked.
*/
mac_entry = nfp_tunnel_lookup_offloaded_macs(app,
internal_dev->dev_addr);
if (!mac_entry)
return -ENOENT;
payload.port_idx = cpu_to_be16(mac_entry->index);
/* Copy mac id and vlan to flow - dev may not exist at delete time. */
flow->pre_tun_rule.vlan_tci = payload.vlan_tci;
flow->pre_tun_rule.port_idx = payload.port_idx;
err = nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_PRE_TUN_RULE,
sizeof(struct nfp_tun_pre_tun_rule),
(unsigned char *)&payload, GFP_KERNEL);
if (err)
return err;
app_priv->pre_tun_rule_cnt++;
return 0;
}
int nfp_flower_xmit_pre_tun_del_flow(struct nfp_app *app,
struct nfp_fl_payload *flow)
{
struct nfp_flower_priv *app_priv = app->priv;
struct nfp_tun_pre_tun_rule payload;
u32 tmp_flags = 0;
int err;
memset(&payload, 0, sizeof(struct nfp_tun_pre_tun_rule));
tmp_flags |= NFP_TUN_PRE_TUN_RULE_DEL;
payload.flags = cpu_to_be32(tmp_flags);
payload.vlan_tci = flow->pre_tun_rule.vlan_tci;
payload.port_idx = flow->pre_tun_rule.port_idx;
err = nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_PRE_TUN_RULE,
sizeof(struct nfp_tun_pre_tun_rule),
(unsigned char *)&payload, GFP_KERNEL);
if (err)
return err;
app_priv->pre_tun_rule_cnt--;
return 0;
}
int nfp_tunnel_config_start(struct nfp_app *app) int nfp_tunnel_config_start(struct nfp_app *app)
{ {
struct nfp_flower_priv *priv = app->priv; struct nfp_flower_priv *priv = app->priv;
......
...@@ -117,6 +117,8 @@ enum flow_action_id { ...@@ -117,6 +117,8 @@ enum flow_action_id {
FLOW_ACTION_GOTO, FLOW_ACTION_GOTO,
FLOW_ACTION_REDIRECT, FLOW_ACTION_REDIRECT,
FLOW_ACTION_MIRRED, FLOW_ACTION_MIRRED,
FLOW_ACTION_REDIRECT_INGRESS,
FLOW_ACTION_MIRRED_INGRESS,
FLOW_ACTION_VLAN_PUSH, FLOW_ACTION_VLAN_PUSH,
FLOW_ACTION_VLAN_POP, FLOW_ACTION_VLAN_POP,
FLOW_ACTION_VLAN_MANGLE, FLOW_ACTION_VLAN_MANGLE,
...@@ -126,6 +128,7 @@ enum flow_action_id { ...@@ -126,6 +128,7 @@ enum flow_action_id {
FLOW_ACTION_ADD, FLOW_ACTION_ADD,
FLOW_ACTION_CSUM, FLOW_ACTION_CSUM,
FLOW_ACTION_MARK, FLOW_ACTION_MARK,
FLOW_ACTION_PTYPE,
FLOW_ACTION_WAKE, FLOW_ACTION_WAKE,
FLOW_ACTION_QUEUE, FLOW_ACTION_QUEUE,
FLOW_ACTION_SAMPLE, FLOW_ACTION_SAMPLE,
...@@ -168,6 +171,7 @@ struct flow_action_entry { ...@@ -168,6 +171,7 @@ struct flow_action_entry {
const struct ip_tunnel_info *tunnel; /* FLOW_ACTION_TUNNEL_ENCAP */ const struct ip_tunnel_info *tunnel; /* FLOW_ACTION_TUNNEL_ENCAP */
u32 csum_flags; /* FLOW_ACTION_CSUM */ u32 csum_flags; /* FLOW_ACTION_CSUM */
u32 mark; /* FLOW_ACTION_MARK */ u32 mark; /* FLOW_ACTION_MARK */
u16 ptype; /* FLOW_ACTION_PTYPE */
struct { /* FLOW_ACTION_QUEUE */ struct { /* FLOW_ACTION_QUEUE */
u32 ctx; u32 ctx;
u32 index; u32 index;
......
...@@ -32,6 +32,24 @@ static inline bool is_tcf_mirred_egress_mirror(const struct tc_action *a) ...@@ -32,6 +32,24 @@ static inline bool is_tcf_mirred_egress_mirror(const struct tc_action *a)
return false; return false;
} }
static inline bool is_tcf_mirred_ingress_redirect(const struct tc_action *a)
{
#ifdef CONFIG_NET_CLS_ACT
if (a->ops && a->ops->id == TCA_ID_MIRRED)
return to_mirred(a)->tcfm_eaction == TCA_INGRESS_REDIR;
#endif
return false;
}
static inline bool is_tcf_mirred_ingress_mirror(const struct tc_action *a)
{
#ifdef CONFIG_NET_CLS_ACT
if (a->ops && a->ops->id == TCA_ID_MIRRED)
return to_mirred(a)->tcfm_eaction == TCA_INGRESS_MIRROR;
#endif
return false;
}
static inline struct net_device *tcf_mirred_dev(const struct tc_action *a) static inline struct net_device *tcf_mirred_dev(const struct tc_action *a)
{ {
return rtnl_dereference(to_mirred(a)->tcfm_dev); return rtnl_dereference(to_mirred(a)->tcfm_dev);
......
...@@ -54,4 +54,31 @@ static inline u32 tcf_skbedit_mark(const struct tc_action *a) ...@@ -54,4 +54,31 @@ static inline u32 tcf_skbedit_mark(const struct tc_action *a)
return mark; return mark;
} }
/* Return true iff action is ptype */
static inline bool is_tcf_skbedit_ptype(const struct tc_action *a)
{
#ifdef CONFIG_NET_CLS_ACT
u32 flags;
if (a->ops && a->ops->id == TCA_ID_SKBEDIT) {
rcu_read_lock();
flags = rcu_dereference(to_skbedit(a)->params)->flags;
rcu_read_unlock();
return flags == SKBEDIT_F_PTYPE;
}
#endif
return false;
}
static inline u32 tcf_skbedit_ptype(const struct tc_action *a)
{
u16 ptype;
rcu_read_lock();
ptype = rcu_dereference(to_skbedit(a)->params)->ptype;
rcu_read_unlock();
return ptype;
}
#endif /* __NET_TC_SKBEDIT_H */ #endif /* __NET_TC_SKBEDIT_H */
...@@ -3205,6 +3205,12 @@ int tc_setup_flow_action(struct flow_action *flow_action, ...@@ -3205,6 +3205,12 @@ int tc_setup_flow_action(struct flow_action *flow_action,
} else if (is_tcf_mirred_egress_mirror(act)) { } else if (is_tcf_mirred_egress_mirror(act)) {
entry->id = FLOW_ACTION_MIRRED; entry->id = FLOW_ACTION_MIRRED;
entry->dev = tcf_mirred_dev(act); entry->dev = tcf_mirred_dev(act);
} else if (is_tcf_mirred_ingress_redirect(act)) {
entry->id = FLOW_ACTION_REDIRECT_INGRESS;
entry->dev = tcf_mirred_dev(act);
} else if (is_tcf_mirred_ingress_mirror(act)) {
entry->id = FLOW_ACTION_MIRRED_INGRESS;
entry->dev = tcf_mirred_dev(act);
} else if (is_tcf_vlan(act)) { } else if (is_tcf_vlan(act)) {
switch (tcf_vlan_action(act)) { switch (tcf_vlan_action(act)) {
case TCA_VLAN_ACT_PUSH: case TCA_VLAN_ACT_PUSH:
...@@ -3294,6 +3300,9 @@ int tc_setup_flow_action(struct flow_action *flow_action, ...@@ -3294,6 +3300,9 @@ int tc_setup_flow_action(struct flow_action *flow_action,
default: default:
goto err_out; goto err_out;
} }
} else if (is_tcf_skbedit_ptype(act)) {
entry->id = FLOW_ACTION_PTYPE;
entry->ptype = tcf_skbedit_ptype(act);
} else { } else {
goto err_out; goto err_out;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment