Commit 7885b4fc authored by John Hurley's avatar John Hurley Committed by David S. Miller

nfp: flower: allow non repr netdev offload

Previously the offload functions in NFP assumed that the ingress (or
egress) netdev passed to them was an nfp repr.

Modify the driver to permit the passing of non repr netdevs as the ingress
device for an offload rule candidate. This may include devices such as
tunnels. The driver should then base its offload decision on a combination
of ingress device and egress port for a rule.
Signed-off-by: default avatarJohn Hurley <john.hurley@netronome.com>
Reviewed-by: default avatarJakub Kicinski <jakub.kicinski@netronome.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 7f76fa36
......@@ -149,11 +149,12 @@ nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output,
/* Set action output parameters. */
output->flags = cpu_to_be16(tmp_flags);
/* Only offload if egress ports are on the same device as the
* ingress port.
*/
if (nfp_netdev_is_nfp_repr(in_dev)) {
/* Confirm ingress and egress are on same device. */
if (!switchdev_port_same_parent_id(in_dev, out_dev))
return -EOPNOTSUPP;
}
if (!nfp_netdev_is_nfp_repr(out_dev))
return -EOPNOTSUPP;
......@@ -840,9 +841,8 @@ nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a,
*a_len += sizeof(struct nfp_fl_push_vlan);
} else if (is_tcf_tunnel_set(a)) {
struct ip_tunnel_info *ip_tun = tcf_tunnel_info(a);
struct nfp_repr *repr = netdev_priv(netdev);
*tun_type = nfp_fl_get_tun_from_act_l4_port(repr->app, a);
*tun_type = nfp_fl_get_tun_from_act_l4_port(app, a);
if (*tun_type == NFP_FL_TUNNEL_NONE)
return -EOPNOTSUPP;
......
......@@ -222,7 +222,8 @@ void nfp_flower_metadata_cleanup(struct nfp_app *app);
int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
enum tc_setup_type type, void *type_data);
int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
int nfp_flower_compile_flow_match(struct nfp_app *app,
struct tc_cls_flower_offload *flow,
struct nfp_fl_key_ls *key_ls,
struct net_device *netdev,
struct nfp_fl_payload *nfp_flow,
......
......@@ -52,10 +52,13 @@ nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port,
return 0;
}
if (tun_type)
if (tun_type) {
frame->in_port = cpu_to_be32(NFP_FL_PORT_TYPE_TUN | tun_type);
else
} else {
if (!cmsg_port)
return -EOPNOTSUPP;
frame->in_port = cpu_to_be32(cmsg_port);
}
return 0;
}
......@@ -289,17 +292,21 @@ nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *frame,
}
}
int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
int nfp_flower_compile_flow_match(struct nfp_app *app,
struct tc_cls_flower_offload *flow,
struct nfp_fl_key_ls *key_ls,
struct net_device *netdev,
struct nfp_fl_payload *nfp_flow,
enum nfp_flower_tun_type tun_type)
{
struct nfp_repr *netdev_repr;
u32 cmsg_port = 0;
int err;
u8 *ext;
u8 *msk;
if (nfp_netdev_is_nfp_repr(netdev))
cmsg_port = nfp_repr_get_port_id(netdev);
memset(nfp_flow->unmasked_data, 0, key_ls->key_size);
memset(nfp_flow->mask_data, 0, key_ls->key_size);
......@@ -327,15 +334,13 @@ int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
/* Populate Exact Port data. */
err = nfp_flower_compile_port((struct nfp_flower_in_port *)ext,
nfp_repr_get_port_id(netdev),
false, tun_type);
cmsg_port, false, tun_type);
if (err)
return err;
/* Populate Mask Port Data. */
err = nfp_flower_compile_port((struct nfp_flower_in_port *)msk,
nfp_repr_get_port_id(netdev),
true, tun_type);
cmsg_port, true, tun_type);
if (err)
return err;
......@@ -399,16 +404,13 @@ int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
msk += sizeof(struct nfp_flower_ipv4_udp_tun);
/* Configure tunnel end point MAC. */
if (nfp_netdev_is_nfp_repr(netdev)) {
netdev_repr = netdev_priv(netdev);
nfp_tunnel_write_macs(netdev_repr->app);
nfp_tunnel_write_macs(app);
/* Store the tunnel destination in the rule data.
* This must be present and be an exact match.
*/
nfp_flow->nfp_tun_ipv4_addr = tun_dst;
nfp_tunnel_add_ipv4_off(netdev_repr->app, tun_dst);
}
nfp_tunnel_add_ipv4_off(app, tun_dst);
if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE_OP) {
err = nfp_flower_compile_geneve_opt(ext, flow, false);
......
......@@ -56,11 +56,10 @@
BIT(FLOW_DISSECTOR_KEY_ENC_PORTS))
static int
nfp_flower_xmit_flow(struct net_device *netdev,
struct nfp_fl_payload *nfp_flow, u8 mtype)
nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow,
u8 mtype)
{
u32 meta_len, key_len, mask_len, act_len, tot_len;
struct nfp_repr *priv = netdev_priv(netdev);
struct sk_buff *skb;
unsigned char *msg;
......@@ -78,7 +77,7 @@ nfp_flower_xmit_flow(struct net_device *netdev,
nfp_flow->meta.mask_len >>= NFP_FL_LW_SIZ;
nfp_flow->meta.act_len >>= NFP_FL_LW_SIZ;
skb = nfp_flower_cmsg_alloc(priv->app, tot_len, mtype, GFP_KERNEL);
skb = nfp_flower_cmsg_alloc(app, tot_len, mtype, GFP_KERNEL);
if (!skb)
return -ENOMEM;
......@@ -96,7 +95,7 @@ nfp_flower_xmit_flow(struct net_device *netdev,
nfp_flow->meta.mask_len <<= NFP_FL_LW_SIZ;
nfp_flow->meta.act_len <<= NFP_FL_LW_SIZ;
nfp_ctrl_tx(priv->app->ctrl, skb);
nfp_ctrl_tx(app->ctrl, skb);
return 0;
}
......@@ -427,13 +426,16 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
struct tc_cls_flower_offload *flow, bool egress)
{
enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
struct nfp_port *port = nfp_port_from_netdev(netdev);
struct nfp_flower_priv *priv = app->priv;
struct nfp_fl_payload *flow_pay;
struct nfp_fl_key_ls *key_layer;
struct nfp_port *port = NULL;
struct net_device *ingr_dev;
int err;
if (nfp_netdev_is_nfp_repr(netdev))
port = nfp_port_from_netdev(netdev);
ingr_dev = egress ? NULL : netdev;
flow_pay = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev,
NFP_FL_STATS_CTX_DONT_CARE);
......@@ -462,8 +464,8 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
flow_pay->ingress_dev = egress ? NULL : netdev;
err = nfp_flower_compile_flow_match(flow, key_layer, netdev, flow_pay,
tun_type);
err = nfp_flower_compile_flow_match(app, flow, key_layer, netdev,
flow_pay, tun_type);
if (err)
goto err_destroy_flow;
......@@ -476,7 +478,7 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
if (err)
goto err_destroy_flow;
err = nfp_flower_xmit_flow(netdev, flow_pay,
err = nfp_flower_xmit_flow(app, flow_pay,
NFP_FLOWER_CMSG_TYPE_FLOW_ADD);
if (err)
goto err_destroy_flow;
......@@ -487,6 +489,7 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
if (err)
goto err_destroy_flow;
if (port)
port->tc_offload_cnt++;
/* Deallocate flow payload when flower rule has been destroyed. */
......@@ -520,12 +523,15 @@ static int
nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
struct tc_cls_flower_offload *flow, bool egress)
{
struct nfp_port *port = nfp_port_from_netdev(netdev);
struct nfp_flower_priv *priv = app->priv;
struct nfp_fl_payload *nfp_flow;
struct nfp_port *port = NULL;
struct net_device *ingr_dev;
int err;
if (nfp_netdev_is_nfp_repr(netdev))
port = nfp_port_from_netdev(netdev);
ingr_dev = egress ? NULL : netdev;
nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev,
NFP_FL_STATS_CTX_DONT_CARE);
......@@ -539,12 +545,13 @@ nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
if (nfp_flow->nfp_tun_ipv4_addr)
nfp_tunnel_del_ipv4_off(app, nfp_flow->nfp_tun_ipv4_addr);
err = nfp_flower_xmit_flow(netdev, nfp_flow,
err = nfp_flower_xmit_flow(app, nfp_flow,
NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
if (err)
goto err_free_flow;
err_free_flow:
if (port)
port->tc_offload_cnt--;
kfree(nfp_flow->action_data);
kfree(nfp_flow->mask_data);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment