Commit ef68de56 authored by David S. Miller's avatar David S. Miller

Merge branch 'Support-tunnels-over-VLAN-in-NFP'

John Hurley says:

====================
Support tunnels over VLAN in NFP

This patchset deals with tunnel encap and decap when the end-point IP
address is on an internal port (for example and OvS VLAN port). Tunnel
encap without VLAN is already supported in the NFP driver. This patchset
extends that to include a push VLAN along with tunnel header push.

Patches 1-4 extend the flow_offload IR API to include actions that use
skbedit to set the ptype of an SKB and that send a packet to port ingress
from the act_mirred module. Such actions are used in flower rules that
forward tunnel packets to internal ports where they can be decapsulated.
OvS and its TC API is an example of a user-space app that produces such
rules.

Patch 5 modifies the encap offload code to allow the pushing of a VLAN
header after a tunnel header push.

Patches 6-10 deal with tunnel decap when the end-point is on an internal
port. They detect 'pre-tunnel rules' which do not deal with tunnels
themselves but, rather, forward packets to internal ports where they
can be decapped if required. Such rules are offloaded to a table in HW
along with an indication of whether packets need to be passed to this
table of not (based on their destination MAC address). Matching against
this table prior to decapsulation in HW allows the correct parsing and
handling of outer VLANs on tunnelled packets and the correct updating of
stats for said 'pre-tunnel' rules.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 55a47dc2 2e0bc7f3
......@@ -173,7 +173,7 @@ nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output,
struct nfp_fl_payload *nfp_flow,
bool last, struct net_device *in_dev,
enum nfp_flower_tun_type tun_type, int *tun_out_cnt,
struct netlink_ext_ack *extack)
bool pkt_host, struct netlink_ext_ack *extack)
{
size_t act_size = sizeof(struct nfp_fl_output);
struct nfp_flower_priv *priv = app->priv;
......@@ -218,6 +218,20 @@ nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output,
return gid;
}
output->port = cpu_to_be32(NFP_FL_LAG_OUT | gid);
} else if (nfp_flower_internal_port_can_offload(app, out_dev)) {
if (!(priv->flower_ext_feats & NFP_FL_FEATS_PRE_TUN_RULES)) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pre-tunnel rules not supported in loaded firmware");
return -EOPNOTSUPP;
}
if (nfp_flow->pre_tun_rule.dev || !pkt_host) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pre-tunnel rules require single egress dev and ptype HOST action");
return -EOPNOTSUPP;
}
nfp_flow->pre_tun_rule.dev = out_dev;
return 0;
} else {
/* Set action output parameters. */
output->flags = cpu_to_be16(tmp_flags);
......@@ -885,7 +899,7 @@ nfp_flower_output_action(struct nfp_app *app,
struct nfp_fl_payload *nfp_fl, int *a_len,
struct net_device *netdev, bool last,
enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
int *out_cnt, u32 *csum_updated,
int *out_cnt, u32 *csum_updated, bool pkt_host,
struct netlink_ext_ack *extack)
{
struct nfp_flower_priv *priv = app->priv;
......@@ -907,7 +921,7 @@ nfp_flower_output_action(struct nfp_app *app,
output = (struct nfp_fl_output *)&nfp_fl->action_data[*a_len];
err = nfp_fl_output(app, output, act, nfp_fl, last, netdev, *tun_type,
tun_out_cnt, extack);
tun_out_cnt, pkt_host, extack);
if (err)
return err;
......@@ -939,7 +953,7 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
struct net_device *netdev,
enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
int *out_cnt, u32 *csum_updated,
struct nfp_flower_pedit_acts *set_act,
struct nfp_flower_pedit_acts *set_act, bool *pkt_host,
struct netlink_ext_ack *extack, int act_idx)
{
struct nfp_fl_set_ipv4_tun *set_tun;
......@@ -955,17 +969,21 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
case FLOW_ACTION_DROP:
nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_DROP);
break;
case FLOW_ACTION_REDIRECT_INGRESS:
case FLOW_ACTION_REDIRECT:
err = nfp_flower_output_action(app, act, nfp_fl, a_len, netdev,
true, tun_type, tun_out_cnt,
out_cnt, csum_updated, extack);
out_cnt, csum_updated, *pkt_host,
extack);
if (err)
return err;
break;
case FLOW_ACTION_MIRRED_INGRESS:
case FLOW_ACTION_MIRRED:
err = nfp_flower_output_action(app, act, nfp_fl, a_len, netdev,
false, tun_type, tun_out_cnt,
out_cnt, csum_updated, extack);
out_cnt, csum_updated, *pkt_host,
extack);
if (err)
return err;
break;
......@@ -1095,6 +1113,13 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
nfp_fl_set_mpls(set_m, act);
*a_len += sizeof(struct nfp_fl_set_mpls);
break;
case FLOW_ACTION_PTYPE:
/* TC ptype skbedit sets PACKET_HOST for ingress redirect. */
if (act->ptype != PACKET_HOST)
return -EOPNOTSUPP;
*pkt_host = true;
break;
default:
/* Currently we do not handle any other actions. */
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: unsupported action in action list");
......@@ -1150,6 +1175,7 @@ int nfp_flower_compile_action(struct nfp_app *app,
struct nfp_flower_pedit_acts set_act;
enum nfp_flower_tun_type tun_type;
struct flow_action_entry *act;
bool pkt_host = false;
u32 csum_updated = 0;
memset(nfp_flow->action_data, 0, NFP_FL_MAX_A_SIZ);
......@@ -1166,7 +1192,7 @@ int nfp_flower_compile_action(struct nfp_app *app,
err = nfp_flower_loop_action(app, act, flow, nfp_flow, &act_len,
netdev, &tun_type, &tun_out_cnt,
&out_cnt, &csum_updated,
&set_act, extack, i);
&set_act, &pkt_host, extack, i);
if (err)
return err;
act_cnt++;
......
......@@ -220,7 +220,8 @@ struct nfp_fl_set_ipv4_tun {
__be16 tun_flags;
u8 ttl;
u8 tos;
__be32 extra;
__be16 outer_vlan_tpid;
__be16 outer_vlan_tci;
u8 tun_len;
u8 res2;
__be16 tun_proto;
......@@ -483,6 +484,7 @@ enum nfp_flower_cmsg_type_port {
NFP_FLOWER_CMSG_TYPE_QOS_MOD = 18,
NFP_FLOWER_CMSG_TYPE_QOS_DEL = 19,
NFP_FLOWER_CMSG_TYPE_QOS_STATS = 20,
NFP_FLOWER_CMSG_TYPE_PRE_TUN_RULE = 21,
NFP_FLOWER_CMSG_TYPE_MAX = 32,
};
......
......@@ -781,6 +781,7 @@ static int nfp_flower_init(struct nfp_app *app)
INIT_LIST_HEAD(&app_priv->indr_block_cb_priv);
INIT_LIST_HEAD(&app_priv->non_repr_priv);
app_priv->pre_tun_rule_cnt = 0;
return 0;
......
......@@ -42,6 +42,7 @@ struct nfp_app;
#define NFP_FL_FEATS_VLAN_PCP BIT(3)
#define NFP_FL_FEATS_VF_RLIM BIT(4)
#define NFP_FL_FEATS_FLOW_MOD BIT(5)
#define NFP_FL_FEATS_PRE_TUN_RULES BIT(6)
#define NFP_FL_FEATS_FLOW_MERGE BIT(30)
#define NFP_FL_FEATS_LAG BIT(31)
......@@ -162,6 +163,7 @@ struct nfp_fl_internal_ports {
* @qos_stats_work: Workqueue for qos stats processing
* @qos_rate_limiters: Current active qos rate limiters
* @qos_stats_lock: Lock on qos stats updates
* @pre_tun_rule_cnt: Number of pre-tunnel rules offloaded
*/
struct nfp_flower_priv {
struct nfp_app *app;
......@@ -193,6 +195,7 @@ struct nfp_flower_priv {
struct delayed_work qos_stats_work;
unsigned int qos_rate_limiters;
spinlock_t qos_stats_lock; /* Protect the qos stats */
int pre_tun_rule_cnt;
};
/**
......@@ -218,6 +221,7 @@ struct nfp_fl_qos {
* @block_shared: Flag indicating if offload applies to shared blocks
* @mac_list: List entry of reprs that share the same offloaded MAC
* @qos_table: Stored info on filters implementing qos
* @on_bridge: Indicates if the repr is attached to a bridge
*/
struct nfp_flower_repr_priv {
struct nfp_repr *nfp_repr;
......@@ -227,6 +231,7 @@ struct nfp_flower_repr_priv {
bool block_shared;
struct list_head mac_list;
struct nfp_fl_qos qos_table;
bool on_bridge;
};
/**
......@@ -280,6 +285,11 @@ struct nfp_fl_payload {
char *action_data;
struct list_head linked_flows;
bool in_hw;
struct {
struct net_device *dev;
__be16 vlan_tci;
__be16 port_idx;
} pre_tun_rule;
};
struct nfp_fl_payload_link {
......@@ -333,6 +343,11 @@ static inline bool nfp_flower_is_merge_flow(struct nfp_fl_payload *flow_pay)
return flow_pay->tc_flower_cookie == (unsigned long)flow_pay;
}
static inline bool nfp_flower_is_supported_bridge(struct net_device *netdev)
{
return netif_is_ovs_master(netdev);
}
int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
unsigned int host_ctx_split);
void nfp_flower_metadata_cleanup(struct nfp_app *app);
......@@ -415,4 +430,8 @@ void
nfp_flower_non_repr_priv_put(struct nfp_app *app, struct net_device *netdev);
u32 nfp_flower_get_port_id_from_netdev(struct nfp_app *app,
struct net_device *netdev);
int nfp_flower_xmit_pre_tun_flow(struct nfp_app *app,
struct nfp_fl_payload *flow);
int nfp_flower_xmit_pre_tun_del_flow(struct nfp_app *app,
struct nfp_fl_payload *flow);
#endif
......@@ -61,6 +61,11 @@
NFP_FLOWER_LAYER_IPV4 | \
NFP_FLOWER_LAYER_IPV6)
#define NFP_FLOWER_PRE_TUN_RULE_FIELDS \
(NFP_FLOWER_LAYER_PORT | \
NFP_FLOWER_LAYER_MAC | \
NFP_FLOWER_LAYER_IPV4)
struct nfp_flower_merge_check {
union {
struct {
......@@ -489,6 +494,7 @@ nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer)
flow_pay->meta.flags = 0;
INIT_LIST_HEAD(&flow_pay->linked_flows);
flow_pay->in_hw = false;
flow_pay->pre_tun_rule.dev = NULL;
return flow_pay;
......@@ -732,28 +738,62 @@ nfp_flower_copy_pre_actions(char *act_dst, char *act_src, int len,
return act_off;
}
static int nfp_fl_verify_post_tun_acts(char *acts, int len)
static int
nfp_fl_verify_post_tun_acts(char *acts, int len, struct nfp_fl_push_vlan **vlan)
{
struct nfp_fl_act_head *a;
unsigned int act_off = 0;
while (act_off < len) {
a = (struct nfp_fl_act_head *)&acts[act_off];
if (a->jump_id != NFP_FL_ACTION_OPCODE_OUTPUT)
if (a->jump_id == NFP_FL_ACTION_OPCODE_PUSH_VLAN && !act_off)
*vlan = (struct nfp_fl_push_vlan *)a;
else if (a->jump_id != NFP_FL_ACTION_OPCODE_OUTPUT)
return -EOPNOTSUPP;
act_off += a->len_lw << NFP_FL_LW_SIZ;
}
/* Ensure any VLAN push also has an egress action. */
if (*vlan && act_off <= sizeof(struct nfp_fl_push_vlan))
return -EOPNOTSUPP;
return 0;
}
static int
nfp_fl_push_vlan_after_tun(char *acts, int len, struct nfp_fl_push_vlan *vlan)
{
struct nfp_fl_set_ipv4_tun *tun;
struct nfp_fl_act_head *a;
unsigned int act_off = 0;
while (act_off < len) {
a = (struct nfp_fl_act_head *)&acts[act_off];
if (a->jump_id == NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL) {
tun = (struct nfp_fl_set_ipv4_tun *)a;
tun->outer_vlan_tpid = vlan->vlan_tpid;
tun->outer_vlan_tci = vlan->vlan_tci;
return 0;
}
act_off += a->len_lw << NFP_FL_LW_SIZ;
}
/* Return error if no tunnel action is found. */
return -EOPNOTSUPP;
}
static int
nfp_flower_merge_action(struct nfp_fl_payload *sub_flow1,
struct nfp_fl_payload *sub_flow2,
struct nfp_fl_payload *merge_flow)
{
unsigned int sub1_act_len, sub2_act_len, pre_off1, pre_off2;
struct nfp_fl_push_vlan *post_tun_push_vlan = NULL;
bool tunnel_act = false;
char *merge_act;
int err;
......@@ -790,18 +830,36 @@ nfp_flower_merge_action(struct nfp_fl_payload *sub_flow1,
sub2_act_len -= pre_off2;
/* FW does a tunnel push when egressing, therefore, if sub_flow 1 pushes
* a tunnel, sub_flow 2 can only have output actions for a valid merge.
* a tunnel, there are restrictions on what sub_flow 2 actions lead to a
* valid merge.
*/
if (tunnel_act) {
char *post_tun_acts = &sub_flow2->action_data[pre_off2];
err = nfp_fl_verify_post_tun_acts(post_tun_acts, sub2_act_len);
err = nfp_fl_verify_post_tun_acts(post_tun_acts, sub2_act_len,
&post_tun_push_vlan);
if (err)
return err;
if (post_tun_push_vlan) {
pre_off2 += sizeof(*post_tun_push_vlan);
sub2_act_len -= sizeof(*post_tun_push_vlan);
}
}
/* Copy remaining actions from sub_flows 1 and 2. */
memcpy(merge_act, sub_flow1->action_data + pre_off1, sub1_act_len);
if (post_tun_push_vlan) {
/* Update tunnel action in merge to include VLAN push. */
err = nfp_fl_push_vlan_after_tun(merge_act, sub1_act_len,
post_tun_push_vlan);
if (err)
return err;
merge_flow->meta.act_len -= sizeof(*post_tun_push_vlan);
}
merge_act += sub1_act_len;
memcpy(merge_act, sub_flow2->action_data + pre_off2, sub2_act_len);
......@@ -944,6 +1002,106 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
return err;
}
/**
* nfp_flower_validate_pre_tun_rule()
* @app: Pointer to the APP handle
* @flow: Pointer to NFP flow representation of rule
* @extack: Netlink extended ACK report
*
* Verifies the flow as a pre-tunnel rule.
*
* Return: negative value on error, 0 if verified.
*/
static int
nfp_flower_validate_pre_tun_rule(struct nfp_app *app,
struct nfp_fl_payload *flow,
struct netlink_ext_ack *extack)
{
struct nfp_flower_meta_tci *meta_tci;
struct nfp_flower_mac_mpls *mac;
struct nfp_fl_act_head *act;
u8 *mask = flow->mask_data;
bool vlan = false;
int act_offset;
u8 key_layer;
meta_tci = (struct nfp_flower_meta_tci *)flow->unmasked_data;
if (meta_tci->tci & cpu_to_be16(NFP_FLOWER_MASK_VLAN_PRESENT)) {
u16 vlan_tci = be16_to_cpu(meta_tci->tci);
vlan_tci &= ~NFP_FLOWER_MASK_VLAN_PRESENT;
flow->pre_tun_rule.vlan_tci = cpu_to_be16(vlan_tci);
vlan = true;
} else {
flow->pre_tun_rule.vlan_tci = cpu_to_be16(0xffff);
}
key_layer = meta_tci->nfp_flow_key_layer;
if (key_layer & ~NFP_FLOWER_PRE_TUN_RULE_FIELDS) {
NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: too many match fields");
return -EOPNOTSUPP;
}
if (!(key_layer & NFP_FLOWER_LAYER_MAC)) {
NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: MAC fields match required");
return -EOPNOTSUPP;
}
/* Skip fields known to exist. */
mask += sizeof(struct nfp_flower_meta_tci);
mask += sizeof(struct nfp_flower_in_port);
/* Ensure destination MAC address is fully matched. */
mac = (struct nfp_flower_mac_mpls *)mask;
if (!is_broadcast_ether_addr(&mac->mac_dst[0])) {
NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: dest MAC field must not be masked");
return -EOPNOTSUPP;
}
if (key_layer & NFP_FLOWER_LAYER_IPV4) {
int ip_flags = offsetof(struct nfp_flower_ipv4, ip_ext.flags);
int ip_proto = offsetof(struct nfp_flower_ipv4, ip_ext.proto);
int i;
mask += sizeof(struct nfp_flower_mac_mpls);
/* Ensure proto and flags are the only IP layer fields. */
for (i = 0; i < sizeof(struct nfp_flower_ipv4); i++)
if (mask[i] && i != ip_flags && i != ip_proto) {
NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: only flags and proto can be matched in ip header");
return -EOPNOTSUPP;
}
}
/* Action must be a single egress or pop_vlan and egress. */
act_offset = 0;
act = (struct nfp_fl_act_head *)&flow->action_data[act_offset];
if (vlan) {
if (act->jump_id != NFP_FL_ACTION_OPCODE_POP_VLAN) {
NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: match on VLAN must have VLAN pop as first action");
return -EOPNOTSUPP;
}
act_offset += act->len_lw << NFP_FL_LW_SIZ;
act = (struct nfp_fl_act_head *)&flow->action_data[act_offset];
}
if (act->jump_id != NFP_FL_ACTION_OPCODE_OUTPUT) {
NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: non egress action detected where egress was expected");
return -EOPNOTSUPP;
}
act_offset += act->len_lw << NFP_FL_LW_SIZ;
/* Ensure there are no more actions after egress. */
if (act_offset != flow->meta.act_len) {
NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: egress is not the last action");
return -EOPNOTSUPP;
}
return 0;
}
/**
* nfp_flower_add_offload() - Adds a new flow to hardware.
* @app: Pointer to the APP handle
......@@ -994,6 +1152,12 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
if (err)
goto err_destroy_flow;
if (flow_pay->pre_tun_rule.dev) {
err = nfp_flower_validate_pre_tun_rule(app, flow_pay, extack);
if (err)
goto err_destroy_flow;
}
err = nfp_compile_flow_metadata(app, flow, flow_pay, netdev, extack);
if (err)
goto err_destroy_flow;
......@@ -1006,8 +1170,11 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
goto err_release_metadata;
}
err = nfp_flower_xmit_flow(app, flow_pay,
NFP_FLOWER_CMSG_TYPE_FLOW_ADD);
if (flow_pay->pre_tun_rule.dev)
err = nfp_flower_xmit_pre_tun_flow(app, flow_pay);
else
err = nfp_flower_xmit_flow(app, flow_pay,
NFP_FLOWER_CMSG_TYPE_FLOW_ADD);
if (err)
goto err_remove_rhash;
......@@ -1149,8 +1316,11 @@ nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
goto err_free_merge_flow;
}
err = nfp_flower_xmit_flow(app, nfp_flow,
NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
if (nfp_flow->pre_tun_rule.dev)
err = nfp_flower_xmit_pre_tun_del_flow(app, nfp_flow);
else
err = nfp_flower_xmit_flow(app, nfp_flow,
NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
/* Fall through on error. */
err_free_merge_flow:
......
......@@ -15,6 +15,24 @@
#define NFP_FL_MAX_ROUTES 32
#define NFP_TUN_PRE_TUN_RULE_LIMIT 32
#define NFP_TUN_PRE_TUN_RULE_DEL 0x1
#define NFP_TUN_PRE_TUN_IDX_BIT 0x8
/**
* struct nfp_tun_pre_run_rule - rule matched before decap
* @flags: options for the rule offset
* @port_idx: index of destination MAC address for the rule
* @vlan_tci: VLAN info associated with MAC
* @host_ctx_id: stats context of rule to update
*/
struct nfp_tun_pre_tun_rule {
__be32 flags;
__be16 port_idx;
__be16 vlan_tci;
__be32 host_ctx_id;
};
/**
* struct nfp_tun_active_tuns - periodic message of active tunnels
* @seq: sequence number of the message
......@@ -124,11 +142,12 @@ enum nfp_flower_mac_offload_cmd {
/**
* struct nfp_tun_offloaded_mac - hashtable entry for an offloaded MAC
* @ht_node: Hashtable entry
* @addr: Offloaded MAC address
* @index: Offloaded index for given MAC address
* @ref_count: Number of devs using this MAC address
* @repr_list: List of reprs sharing this MAC address
* @ht_node: Hashtable entry
* @addr: Offloaded MAC address
* @index: Offloaded index for given MAC address
* @ref_count: Number of devs using this MAC address
* @repr_list: List of reprs sharing this MAC address
* @bridge_count: Number of bridge/internal devs with MAC
*/
struct nfp_tun_offloaded_mac {
struct rhash_head ht_node;
......@@ -136,6 +155,7 @@ struct nfp_tun_offloaded_mac {
u16 index;
int ref_count;
struct list_head repr_list;
int bridge_count;
};
static const struct rhashtable_params offloaded_macs_params = {
......@@ -556,6 +576,8 @@ nfp_tunnel_offloaded_macs_inc_ref_and_link(struct nfp_tun_offloaded_mac *entry,
list_del(&repr_priv->mac_list);
list_add_tail(&repr_priv->mac_list, &entry->repr_list);
} else if (nfp_flower_is_supported_bridge(netdev)) {
entry->bridge_count++;
}
entry->ref_count++;
......@@ -572,20 +594,35 @@ nfp_tunnel_add_shared_mac(struct nfp_app *app, struct net_device *netdev,
entry = nfp_tunnel_lookup_offloaded_macs(app, netdev->dev_addr);
if (entry && nfp_tunnel_is_mac_idx_global(entry->index)) {
nfp_tunnel_offloaded_macs_inc_ref_and_link(entry, netdev, mod);
return 0;
if (entry->bridge_count ||
!nfp_flower_is_supported_bridge(netdev)) {
nfp_tunnel_offloaded_macs_inc_ref_and_link(entry,
netdev, mod);
return 0;
}
/* MAC is global but matches need to go to pre_tun table. */
nfp_mac_idx = entry->index | NFP_TUN_PRE_TUN_IDX_BIT;
}
/* Assign a global index if non-repr or MAC address is now shared. */
if (entry || !port) {
ida_idx = ida_simple_get(&priv->tun.mac_off_ids, 0,
NFP_MAX_MAC_INDEX, GFP_KERNEL);
if (ida_idx < 0)
return ida_idx;
if (!nfp_mac_idx) {
/* Assign a global index if non-repr or MAC is now shared. */
if (entry || !port) {
ida_idx = ida_simple_get(&priv->tun.mac_off_ids, 0,
NFP_MAX_MAC_INDEX, GFP_KERNEL);
if (ida_idx < 0)
return ida_idx;
nfp_mac_idx = nfp_tunnel_get_global_mac_idx_from_ida(ida_idx);
} else {
nfp_mac_idx = nfp_tunnel_get_mac_idx_from_phy_port_id(port);
nfp_mac_idx =
nfp_tunnel_get_global_mac_idx_from_ida(ida_idx);
if (nfp_flower_is_supported_bridge(netdev))
nfp_mac_idx |= NFP_TUN_PRE_TUN_IDX_BIT;
} else {
nfp_mac_idx =
nfp_tunnel_get_mac_idx_from_phy_port_id(port);
}
}
if (!entry) {
......@@ -654,6 +691,25 @@ nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev,
list_del(&repr_priv->mac_list);
}
if (nfp_flower_is_supported_bridge(netdev)) {
entry->bridge_count--;
if (!entry->bridge_count && entry->ref_count) {
u16 nfp_mac_idx;
nfp_mac_idx = entry->index & ~NFP_TUN_PRE_TUN_IDX_BIT;
if (__nfp_tunnel_offload_mac(app, mac, nfp_mac_idx,
false)) {
nfp_flower_cmsg_warn(app, "MAC offload index revert failed on %s.\n",
netdev_name(netdev));
return 0;
}
entry->index = nfp_mac_idx;
return 0;
}
}
/* If MAC is now used by 1 repr set the offloaded MAC index to port. */
if (entry->ref_count == 1 && list_is_singular(&entry->repr_list)) {
u16 nfp_mac_idx;
......@@ -713,6 +769,9 @@ nfp_tunnel_offload_mac(struct nfp_app *app, struct net_device *netdev,
return 0;
repr_priv = repr->app_priv;
if (repr_priv->on_bridge)
return 0;
mac_offloaded = &repr_priv->mac_offloaded;
off_mac = &repr_priv->offloaded_mac_addr[0];
port = nfp_repr_get_port_id(netdev);
......@@ -828,10 +887,119 @@ int nfp_tunnel_mac_event_handler(struct nfp_app *app,
if (err)
nfp_flower_cmsg_warn(app, "Failed to offload MAC change on %s.\n",
netdev_name(netdev));
} else if (event == NETDEV_CHANGEUPPER) {
/* If a repr is attached to a bridge then tunnel packets
* entering the physical port are directed through the bridge
* datapath and cannot be directly detunneled. Therefore,
* associated offloaded MACs and indexes should not be used
* by fw for detunneling.
*/
struct netdev_notifier_changeupper_info *info = ptr;
struct net_device *upper = info->upper_dev;
struct nfp_flower_repr_priv *repr_priv;
struct nfp_repr *repr;
if (!nfp_netdev_is_nfp_repr(netdev) ||
!nfp_flower_is_supported_bridge(upper))
return NOTIFY_OK;
repr = netdev_priv(netdev);
if (repr->app != app)
return NOTIFY_OK;
repr_priv = repr->app_priv;
if (info->linking) {
if (nfp_tunnel_offload_mac(app, netdev,
NFP_TUNNEL_MAC_OFFLOAD_DEL))
nfp_flower_cmsg_warn(app, "Failed to delete offloaded MAC on %s.\n",
netdev_name(netdev));
repr_priv->on_bridge = true;
} else {
repr_priv->on_bridge = false;
if (!(netdev->flags & IFF_UP))
return NOTIFY_OK;
if (nfp_tunnel_offload_mac(app, netdev,
NFP_TUNNEL_MAC_OFFLOAD_ADD))
nfp_flower_cmsg_warn(app, "Failed to offload MAC on %s.\n",
netdev_name(netdev));
}
}
return NOTIFY_OK;
}
int nfp_flower_xmit_pre_tun_flow(struct nfp_app *app,
struct nfp_fl_payload *flow)
{
struct nfp_flower_priv *app_priv = app->priv;
struct nfp_tun_offloaded_mac *mac_entry;
struct nfp_tun_pre_tun_rule payload;
struct net_device *internal_dev;
int err;
if (app_priv->pre_tun_rule_cnt == NFP_TUN_PRE_TUN_RULE_LIMIT)
return -ENOSPC;
memset(&payload, 0, sizeof(struct nfp_tun_pre_tun_rule));
internal_dev = flow->pre_tun_rule.dev;
payload.vlan_tci = flow->pre_tun_rule.vlan_tci;
payload.host_ctx_id = flow->meta.host_ctx_id;
/* Lookup MAC index for the pre-tunnel rule egress device.
* Note that because the device is always an internal port, it will
* have a constant global index so does not need to be tracked.
*/
mac_entry = nfp_tunnel_lookup_offloaded_macs(app,
internal_dev->dev_addr);
if (!mac_entry)
return -ENOENT;
payload.port_idx = cpu_to_be16(mac_entry->index);
/* Copy mac id and vlan to flow - dev may not exist at delete time. */
flow->pre_tun_rule.vlan_tci = payload.vlan_tci;
flow->pre_tun_rule.port_idx = payload.port_idx;
err = nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_PRE_TUN_RULE,
sizeof(struct nfp_tun_pre_tun_rule),
(unsigned char *)&payload, GFP_KERNEL);
if (err)
return err;
app_priv->pre_tun_rule_cnt++;
return 0;
}
int nfp_flower_xmit_pre_tun_del_flow(struct nfp_app *app,
struct nfp_fl_payload *flow)
{
struct nfp_flower_priv *app_priv = app->priv;
struct nfp_tun_pre_tun_rule payload;
u32 tmp_flags = 0;
int err;
memset(&payload, 0, sizeof(struct nfp_tun_pre_tun_rule));
tmp_flags |= NFP_TUN_PRE_TUN_RULE_DEL;
payload.flags = cpu_to_be32(tmp_flags);
payload.vlan_tci = flow->pre_tun_rule.vlan_tci;
payload.port_idx = flow->pre_tun_rule.port_idx;
err = nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_PRE_TUN_RULE,
sizeof(struct nfp_tun_pre_tun_rule),
(unsigned char *)&payload, GFP_KERNEL);
if (err)
return err;
app_priv->pre_tun_rule_cnt--;
return 0;
}
int nfp_tunnel_config_start(struct nfp_app *app)
{
struct nfp_flower_priv *priv = app->priv;
......
......@@ -117,6 +117,8 @@ enum flow_action_id {
FLOW_ACTION_GOTO,
FLOW_ACTION_REDIRECT,
FLOW_ACTION_MIRRED,
FLOW_ACTION_REDIRECT_INGRESS,
FLOW_ACTION_MIRRED_INGRESS,
FLOW_ACTION_VLAN_PUSH,
FLOW_ACTION_VLAN_POP,
FLOW_ACTION_VLAN_MANGLE,
......@@ -126,6 +128,7 @@ enum flow_action_id {
FLOW_ACTION_ADD,
FLOW_ACTION_CSUM,
FLOW_ACTION_MARK,
FLOW_ACTION_PTYPE,
FLOW_ACTION_WAKE,
FLOW_ACTION_QUEUE,
FLOW_ACTION_SAMPLE,
......@@ -168,6 +171,7 @@ struct flow_action_entry {
const struct ip_tunnel_info *tunnel; /* FLOW_ACTION_TUNNEL_ENCAP */
u32 csum_flags; /* FLOW_ACTION_CSUM */
u32 mark; /* FLOW_ACTION_MARK */
u16 ptype; /* FLOW_ACTION_PTYPE */
struct { /* FLOW_ACTION_QUEUE */
u32 ctx;
u32 index;
......
......@@ -32,6 +32,24 @@ static inline bool is_tcf_mirred_egress_mirror(const struct tc_action *a)
return false;
}
static inline bool is_tcf_mirred_ingress_redirect(const struct tc_action *a)
{
#ifdef CONFIG_NET_CLS_ACT
if (a->ops && a->ops->id == TCA_ID_MIRRED)
return to_mirred(a)->tcfm_eaction == TCA_INGRESS_REDIR;
#endif
return false;
}
static inline bool is_tcf_mirred_ingress_mirror(const struct tc_action *a)
{
#ifdef CONFIG_NET_CLS_ACT
if (a->ops && a->ops->id == TCA_ID_MIRRED)
return to_mirred(a)->tcfm_eaction == TCA_INGRESS_MIRROR;
#endif
return false;
}
static inline struct net_device *tcf_mirred_dev(const struct tc_action *a)
{
return rtnl_dereference(to_mirred(a)->tcfm_dev);
......
......@@ -54,4 +54,31 @@ static inline u32 tcf_skbedit_mark(const struct tc_action *a)
return mark;
}
/* Return true iff action is ptype */
static inline bool is_tcf_skbedit_ptype(const struct tc_action *a)
{
#ifdef CONFIG_NET_CLS_ACT
u32 flags;
if (a->ops && a->ops->id == TCA_ID_SKBEDIT) {
rcu_read_lock();
flags = rcu_dereference(to_skbedit(a)->params)->flags;
rcu_read_unlock();
return flags == SKBEDIT_F_PTYPE;
}
#endif
return false;
}
static inline u32 tcf_skbedit_ptype(const struct tc_action *a)
{
u16 ptype;
rcu_read_lock();
ptype = rcu_dereference(to_skbedit(a)->params)->ptype;
rcu_read_unlock();
return ptype;
}
#endif /* __NET_TC_SKBEDIT_H */
......@@ -3205,6 +3205,12 @@ int tc_setup_flow_action(struct flow_action *flow_action,
} else if (is_tcf_mirred_egress_mirror(act)) {
entry->id = FLOW_ACTION_MIRRED;
entry->dev = tcf_mirred_dev(act);
} else if (is_tcf_mirred_ingress_redirect(act)) {
entry->id = FLOW_ACTION_REDIRECT_INGRESS;
entry->dev = tcf_mirred_dev(act);
} else if (is_tcf_mirred_ingress_mirror(act)) {
entry->id = FLOW_ACTION_MIRRED_INGRESS;
entry->dev = tcf_mirred_dev(act);
} else if (is_tcf_vlan(act)) {
switch (tcf_vlan_action(act)) {
case TCA_VLAN_ACT_PUSH:
......@@ -3294,6 +3300,9 @@ int tc_setup_flow_action(struct flow_action *flow_action,
default:
goto err_out;
}
} else if (is_tcf_skbedit_ptype(act)) {
entry->id = FLOW_ACTION_PTYPE;
entry->ptype = tcf_skbedit_ptype(act);
} else {
goto err_out;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment