Commit d367341b authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-shared-4.16-1' of git://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux

Saeed Mahameed says:

====================
Mellanox, mlx5 E-Switch updates 2017-12-19

This series includes updates for mlx5 E-Switch infrastructures,
to be merged into net-next and rdma-next trees.

Mark's patches provide E-Switch refactoring that generalize the mlx5
E-Switch vf representors interfaces and data structures. The serious is
mainly focused on moving ethernet (netdev) specific representors logic out
of E-Switch (eswitch.c) into mlx5e representor module (en_rep.c), which
provides better separation and allows future support for other types of vf
representors (e.g. RDMA).

Gal's patches at the end of this serious, provide a simple syntax fix and
two other patches that handles vport ingress/egress ACL steering name
spaces to be aligned with the Firmware/Hardware specs.

V1->V2:
 - Addressed coding style comments in patches #1 and #7
 - The series is still based on rc4, as now I see net-next is also @rc4.

V2->V3:
 - Fixed compilation warning, reported by Dave.

Please pull and let me know if there's any problem.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 8d1666fd 9b93ab98
...@@ -56,8 +56,17 @@ struct mlx5e_neigh_update_table { ...@@ -56,8 +56,17 @@ struct mlx5e_neigh_update_table {
struct mlx5e_rep_priv { struct mlx5e_rep_priv {
struct mlx5_eswitch_rep *rep; struct mlx5_eswitch_rep *rep;
struct mlx5e_neigh_update_table neigh_update; struct mlx5e_neigh_update_table neigh_update;
struct net_device *netdev;
struct mlx5_flow_handle *vport_rx_rule;
struct list_head vport_sqs_list;
}; };
static inline
struct mlx5e_rep_priv *mlx5e_rep_to_rep_priv(struct mlx5_eswitch_rep *rep)
{
return (struct mlx5e_rep_priv *)rep->rep_if[REP_ETH].priv;
}
struct mlx5e_neigh { struct mlx5e_neigh {
struct net_device *dev; struct net_device *dev;
union { union {
...@@ -124,6 +133,11 @@ struct mlx5e_encap_entry { ...@@ -124,6 +133,11 @@ struct mlx5e_encap_entry {
int encap_size; int encap_size;
}; };
struct mlx5e_rep_sq {
struct mlx5_flow_handle *send_to_vport_rule;
struct list_head list;
};
void *mlx5e_alloc_nic_rep_priv(struct mlx5_core_dev *mdev); void *mlx5e_alloc_nic_rep_priv(struct mlx5_core_dev *mdev);
void mlx5e_register_vport_reps(struct mlx5e_priv *priv); void mlx5e_register_vport_reps(struct mlx5e_priv *priv);
void mlx5e_unregister_vport_reps(struct mlx5e_priv *priv); void mlx5e_unregister_vport_reps(struct mlx5e_priv *priv);
......
...@@ -617,7 +617,8 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv, ...@@ -617,7 +617,8 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
FLOW_DISSECTOR_KEY_ENC_PORTS, FLOW_DISSECTOR_KEY_ENC_PORTS,
f->mask); f->mask);
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw); struct mlx5e_rep_priv *uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
struct net_device *up_dev = uplink_rpriv->netdev;
struct mlx5e_priv *up_priv = netdev_priv(up_dev); struct mlx5e_priv *up_priv = netdev_priv(up_dev);
/* Full udp dst port must be given */ /* Full udp dst port must be given */
...@@ -1507,6 +1508,7 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv, ...@@ -1507,6 +1508,7 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
int *out_ttl) int *out_ttl)
{ {
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_rep_priv *uplink_rpriv;
struct rtable *rt; struct rtable *rt;
struct neighbour *n = NULL; struct neighbour *n = NULL;
...@@ -1520,9 +1522,10 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv, ...@@ -1520,9 +1522,10 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
#else #else
return -EOPNOTSUPP; return -EOPNOTSUPP;
#endif #endif
uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
/* if the egress device isn't on the same HW e-switch, we use the uplink */ /* if the egress device isn't on the same HW e-switch, we use the uplink */
if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev)) if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev))
*out_dev = mlx5_eswitch_get_uplink_netdev(esw); *out_dev = uplink_rpriv->netdev;
else else
*out_dev = rt->dst.dev; *out_dev = rt->dst.dev;
...@@ -1543,6 +1546,7 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv, ...@@ -1543,6 +1546,7 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
struct neighbour **out_n, struct neighbour **out_n,
int *out_ttl) int *out_ttl)
{ {
struct mlx5e_rep_priv *uplink_rpriv;
struct neighbour *n = NULL; struct neighbour *n = NULL;
struct dst_entry *dst; struct dst_entry *dst;
...@@ -1557,9 +1561,10 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv, ...@@ -1557,9 +1561,10 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
*out_ttl = ip6_dst_hoplimit(dst); *out_ttl = ip6_dst_hoplimit(dst);
uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
/* if the egress device isn't on the same HW e-switch, we use the uplink */ /* if the egress device isn't on the same HW e-switch, we use the uplink */
if (!switchdev_port_same_parent_id(priv->netdev, dst->dev)) if (!switchdev_port_same_parent_id(priv->netdev, dst->dev))
*out_dev = mlx5_eswitch_get_uplink_netdev(esw); *out_dev = uplink_rpriv->netdev;
else else
*out_dev = dst->dev; *out_dev = dst->dev;
#else #else
...@@ -1859,7 +1864,9 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv, ...@@ -1859,7 +1864,9 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow) struct mlx5e_tc_flow *flow)
{ {
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw); struct mlx5e_rep_priv *uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw,
REP_ETH);
struct net_device *up_dev = uplink_rpriv->netdev;
unsigned short family = ip_tunnel_info_af(tun_info); unsigned short family = ip_tunnel_info_af(tun_info);
struct mlx5e_priv *up_priv = netdev_priv(up_dev); struct mlx5e_priv *up_priv = netdev_priv(up_dev);
struct mlx5_esw_flow_attr *attr = flow->esw_attr; struct mlx5_esw_flow_attr *attr = flow->esw_attr;
......
...@@ -867,9 +867,10 @@ static int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw, ...@@ -867,9 +867,10 @@ static int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
esw_debug(dev, "Create vport[%d] egress ACL log_max_size(%d)\n", esw_debug(dev, "Create vport[%d] egress ACL log_max_size(%d)\n",
vport->vport, MLX5_CAP_ESW_EGRESS_ACL(dev, log_max_ft_size)); vport->vport, MLX5_CAP_ESW_EGRESS_ACL(dev, log_max_ft_size));
root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS); root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS,
vport->vport);
if (!root_ns) { if (!root_ns) {
esw_warn(dev, "Failed to get E-Switch egress flow namespace\n"); esw_warn(dev, "Failed to get E-Switch egress flow namespace for vport (%d)\n", vport->vport);
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
...@@ -984,9 +985,10 @@ static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw, ...@@ -984,9 +985,10 @@ static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n", esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n",
vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size)); vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size));
root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS); root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
vport->vport);
if (!root_ns) { if (!root_ns) {
esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n"); esw_warn(dev, "Failed to get E-Switch ingress flow namespace for vport (%d)\n", vport->vport);
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
...@@ -1290,7 +1292,7 @@ static int esw_create_tsar(struct mlx5_eswitch *esw) ...@@ -1290,7 +1292,7 @@ static int esw_create_tsar(struct mlx5_eswitch *esw)
err = mlx5_create_scheduling_element_cmd(dev, err = mlx5_create_scheduling_element_cmd(dev,
SCHEDULING_HIERARCHY_E_SWITCH, SCHEDULING_HIERARCHY_E_SWITCH,
&tsar_ctx, tsar_ctx,
&esw->qos.root_tsar_id); &esw->qos.root_tsar_id);
if (err) { if (err) {
esw_warn(esw->dev, "E-Switch create TSAR failed (%d)\n", err); esw_warn(esw->dev, "E-Switch create TSAR failed (%d)\n", err);
...@@ -1333,20 +1335,20 @@ static int esw_vport_enable_qos(struct mlx5_eswitch *esw, int vport_num, ...@@ -1333,20 +1335,20 @@ static int esw_vport_enable_qos(struct mlx5_eswitch *esw, int vport_num,
if (vport->qos.enabled) if (vport->qos.enabled)
return -EEXIST; return -EEXIST;
MLX5_SET(scheduling_context, &sched_ctx, element_type, MLX5_SET(scheduling_context, sched_ctx, element_type,
SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT); SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
vport_elem = MLX5_ADDR_OF(scheduling_context, &sched_ctx, vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx,
element_attributes); element_attributes);
MLX5_SET(vport_element, vport_elem, vport_number, vport_num); MLX5_SET(vport_element, vport_elem, vport_number, vport_num);
MLX5_SET(scheduling_context, &sched_ctx, parent_element_id, MLX5_SET(scheduling_context, sched_ctx, parent_element_id,
esw->qos.root_tsar_id); esw->qos.root_tsar_id);
MLX5_SET(scheduling_context, &sched_ctx, max_average_bw, MLX5_SET(scheduling_context, sched_ctx, max_average_bw,
initial_max_rate); initial_max_rate);
MLX5_SET(scheduling_context, &sched_ctx, bw_share, initial_bw_share); MLX5_SET(scheduling_context, sched_ctx, bw_share, initial_bw_share);
err = mlx5_create_scheduling_element_cmd(dev, err = mlx5_create_scheduling_element_cmd(dev,
SCHEDULING_HIERARCHY_E_SWITCH, SCHEDULING_HIERARCHY_E_SWITCH,
&sched_ctx, sched_ctx,
&vport->qos.esw_tsar_ix); &vport->qos.esw_tsar_ix);
if (err) { if (err) {
esw_warn(esw->dev, "E-Switch create TSAR vport element failed (vport=%d,err=%d)\n", esw_warn(esw->dev, "E-Switch create TSAR vport element failed (vport=%d,err=%d)\n",
...@@ -1392,22 +1394,22 @@ static int esw_vport_qos_config(struct mlx5_eswitch *esw, int vport_num, ...@@ -1392,22 +1394,22 @@ static int esw_vport_qos_config(struct mlx5_eswitch *esw, int vport_num,
if (!vport->qos.enabled) if (!vport->qos.enabled)
return -EIO; return -EIO;
MLX5_SET(scheduling_context, &sched_ctx, element_type, MLX5_SET(scheduling_context, sched_ctx, element_type,
SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT); SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
vport_elem = MLX5_ADDR_OF(scheduling_context, &sched_ctx, vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx,
element_attributes); element_attributes);
MLX5_SET(vport_element, vport_elem, vport_number, vport_num); MLX5_SET(vport_element, vport_elem, vport_number, vport_num);
MLX5_SET(scheduling_context, &sched_ctx, parent_element_id, MLX5_SET(scheduling_context, sched_ctx, parent_element_id,
esw->qos.root_tsar_id); esw->qos.root_tsar_id);
MLX5_SET(scheduling_context, &sched_ctx, max_average_bw, MLX5_SET(scheduling_context, sched_ctx, max_average_bw,
max_rate); max_rate);
MLX5_SET(scheduling_context, &sched_ctx, bw_share, bw_share); MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share);
bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW; bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW;
bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_BW_SHARE; bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_BW_SHARE;
err = mlx5_modify_scheduling_element_cmd(dev, err = mlx5_modify_scheduling_element_cmd(dev,
SCHEDULING_HIERARCHY_E_SWITCH, SCHEDULING_HIERARCHY_E_SWITCH,
&sched_ctx, sched_ctx,
vport->qos.esw_tsar_ix, vport->qos.esw_tsar_ix,
bitmask); bitmask);
if (err) { if (err) {
...@@ -1644,13 +1646,9 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) ...@@ -1644,13 +1646,9 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
goto abort; goto abort;
} }
esw->offloads.vport_reps = err = esw_offloads_init_reps(esw);
kzalloc(total_vports * sizeof(struct mlx5_eswitch_rep), if (err)
GFP_KERNEL);
if (!esw->offloads.vport_reps) {
err = -ENOMEM;
goto abort; goto abort;
}
hash_init(esw->offloads.encap_tbl); hash_init(esw->offloads.encap_tbl);
hash_init(esw->offloads.mod_hdr_tbl); hash_init(esw->offloads.mod_hdr_tbl);
...@@ -1681,8 +1679,8 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) ...@@ -1681,8 +1679,8 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
abort: abort:
if (esw->work_queue) if (esw->work_queue)
destroy_workqueue(esw->work_queue); destroy_workqueue(esw->work_queue);
esw_offloads_cleanup_reps(esw);
kfree(esw->vports); kfree(esw->vports);
kfree(esw->offloads.vport_reps);
kfree(esw); kfree(esw);
return err; return err;
} }
...@@ -1696,7 +1694,7 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) ...@@ -1696,7 +1694,7 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
esw->dev->priv.eswitch = NULL; esw->dev->priv.eswitch = NULL;
destroy_workqueue(esw->work_queue); destroy_workqueue(esw->work_queue);
kfree(esw->offloads.vport_reps); esw_offloads_cleanup_reps(esw);
kfree(esw->vports); kfree(esw->vports);
kfree(esw); kfree(esw);
} }
......
...@@ -45,6 +45,11 @@ enum { ...@@ -45,6 +45,11 @@ enum {
SRIOV_OFFLOADS SRIOV_OFFLOADS
}; };
enum {
REP_ETH,
NUM_REP_TYPES,
};
#ifdef CONFIG_MLX5_ESWITCH #ifdef CONFIG_MLX5_ESWITCH
#define MLX5_MAX_UC_PER_VPORT(dev) \ #define MLX5_MAX_UC_PER_VPORT(dev) \
...@@ -133,25 +138,21 @@ struct mlx5_eswitch_fdb { ...@@ -133,25 +138,21 @@ struct mlx5_eswitch_fdb {
}; };
}; };
struct mlx5_esw_sq { struct mlx5_eswitch_rep;
struct mlx5_flow_handle *send_to_vport_rule; struct mlx5_eswitch_rep_if {
struct list_head list; int (*load)(struct mlx5_core_dev *dev,
struct mlx5_eswitch_rep *rep);
void (*unload)(struct mlx5_eswitch_rep *rep);
void *priv;
bool valid;
}; };
struct mlx5_eswitch_rep { struct mlx5_eswitch_rep {
int (*load)(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep_if rep_if[NUM_REP_TYPES];
struct mlx5_eswitch_rep *rep);
void (*unload)(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep *rep);
u16 vport; u16 vport;
u8 hw_id[ETH_ALEN]; u8 hw_id[ETH_ALEN];
struct net_device *netdev;
struct mlx5_flow_handle *vport_rx_rule;
struct list_head vport_sqs_list;
u16 vlan; u16 vlan;
u32 vlan_refcount; u32 vlan_refcount;
bool valid;
}; };
struct mlx5_esw_offload { struct mlx5_esw_offload {
...@@ -197,6 +198,8 @@ struct mlx5_eswitch { ...@@ -197,6 +198,8 @@ struct mlx5_eswitch {
void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports); void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports);
int esw_offloads_init(struct mlx5_eswitch *esw, int nvports); int esw_offloads_init(struct mlx5_eswitch *esw, int nvports);
void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw);
int esw_offloads_init_reps(struct mlx5_eswitch *esw);
/* E-Switch API */ /* E-Switch API */
int mlx5_eswitch_init(struct mlx5_core_dev *dev); int mlx5_eswitch_init(struct mlx5_core_dev *dev);
...@@ -221,6 +224,10 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, ...@@ -221,6 +224,10 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
int vport, int vport,
struct ifla_vf_stats *vf_stats); struct ifla_vf_stats *vf_stats);
struct mlx5_flow_handle *
mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport,
u32 sqn);
void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule);
struct mlx5_flow_spec; struct mlx5_flow_spec;
struct mlx5_esw_flow_attr; struct mlx5_esw_flow_attr;
...@@ -257,12 +264,6 @@ struct mlx5_esw_flow_attr { ...@@ -257,12 +264,6 @@ struct mlx5_esw_flow_attr {
struct mlx5e_tc_flow_parse_attr *parse_attr; struct mlx5e_tc_flow_parse_attr *parse_attr;
}; };
int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep *rep,
u16 *sqns_array, int sqns_num);
void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep *rep);
int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode); int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode);
int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode); int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode);
int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode); int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode);
...@@ -272,10 +273,12 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap); ...@@ -272,10 +273,12 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap);
int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap); int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap);
void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw, void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
int vport_index, int vport_index,
struct mlx5_eswitch_rep *rep); struct mlx5_eswitch_rep_if *rep_if,
u8 rep_type);
void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw, void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
int vport_index); int vport_index,
struct net_device *mlx5_eswitch_get_uplink_netdev(struct mlx5_eswitch *esw); u8 rep_type);
void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type);
int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw, int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
struct mlx5_esw_flow_attr *attr); struct mlx5_esw_flow_attr *attr);
......
...@@ -130,7 +130,7 @@ static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val) ...@@ -130,7 +130,7 @@ static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none"); esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) { for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) {
rep = &esw->offloads.vport_reps[vf_vport]; rep = &esw->offloads.vport_reps[vf_vport];
if (!rep->valid) if (!rep->rep_if[REP_ETH].valid)
continue; continue;
err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val); err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
...@@ -302,7 +302,7 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw, ...@@ -302,7 +302,7 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
return err; return err;
} }
static struct mlx5_flow_handle * struct mlx5_flow_handle *
mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn) mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn)
{ {
struct mlx5_flow_act flow_act = {0}; struct mlx5_flow_act flow_act = {0};
...@@ -339,57 +339,9 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn ...@@ -339,57 +339,9 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn
return flow_rule; return flow_rule;
} }
void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch *esw, void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
struct mlx5_eswitch_rep *rep)
{
struct mlx5_esw_sq *esw_sq, *tmp;
if (esw->mode != SRIOV_OFFLOADS)
return;
list_for_each_entry_safe(esw_sq, tmp, &rep->vport_sqs_list, list) {
mlx5_del_flow_rules(esw_sq->send_to_vport_rule);
list_del(&esw_sq->list);
kfree(esw_sq);
}
}
int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep *rep,
u16 *sqns_array, int sqns_num)
{ {
struct mlx5_flow_handle *flow_rule; mlx5_del_flow_rules(rule);
struct mlx5_esw_sq *esw_sq;
int err;
int i;
if (esw->mode != SRIOV_OFFLOADS)
return 0;
for (i = 0; i < sqns_num; i++) {
esw_sq = kzalloc(sizeof(*esw_sq), GFP_KERNEL);
if (!esw_sq) {
err = -ENOMEM;
goto out_err;
}
/* Add re-inject rule to the PF/representor sqs */
flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw,
rep->vport,
sqns_array[i]);
if (IS_ERR(flow_rule)) {
err = PTR_ERR(flow_rule);
kfree(esw_sq);
goto out_err;
}
esw_sq->send_to_vport_rule = flow_rule;
list_add(&esw_sq->list, &rep->vport_sqs_list);
}
return 0;
out_err:
mlx5_eswitch_sqs2vport_stop(esw, rep);
return err;
} }
static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw) static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
...@@ -732,12 +684,111 @@ static int esw_offloads_start(struct mlx5_eswitch *esw) ...@@ -732,12 +684,111 @@ static int esw_offloads_start(struct mlx5_eswitch *esw)
return err; return err;
} }
int esw_offloads_init(struct mlx5_eswitch *esw, int nvports) void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
{
kfree(esw->offloads.vport_reps);
}
int esw_offloads_init_reps(struct mlx5_eswitch *esw)
{
int total_vfs = MLX5_TOTAL_VPORTS(esw->dev);
struct mlx5_core_dev *dev = esw->dev;
struct mlx5_esw_offload *offloads;
struct mlx5_eswitch_rep *rep;
u8 hw_id[ETH_ALEN];
int vport;
esw->offloads.vport_reps = kcalloc(total_vfs,
sizeof(struct mlx5_eswitch_rep),
GFP_KERNEL);
if (!esw->offloads.vport_reps)
return -ENOMEM;
offloads = &esw->offloads;
mlx5_query_nic_vport_mac_address(dev, 0, hw_id);
for (vport = 0; vport < total_vfs; vport++) {
rep = &offloads->vport_reps[vport];
rep->vport = vport;
ether_addr_copy(rep->hw_id, hw_id);
}
offloads->vport_reps[0].vport = FDB_UPLINK_VPORT;
return 0;
}
static void esw_offloads_unload_reps_type(struct mlx5_eswitch *esw, int nvports,
u8 rep_type)
{
struct mlx5_eswitch_rep *rep;
int vport;
for (vport = nvports - 1; vport >= 0; vport--) {
rep = &esw->offloads.vport_reps[vport];
if (!rep->rep_if[rep_type].valid)
continue;
rep->rep_if[rep_type].unload(rep);
}
}
static void esw_offloads_unload_reps(struct mlx5_eswitch *esw, int nvports)
{
u8 rep_type = NUM_REP_TYPES;
while (rep_type-- > 0)
esw_offloads_unload_reps_type(esw, nvports, rep_type);
}
static int esw_offloads_load_reps_type(struct mlx5_eswitch *esw, int nvports,
u8 rep_type)
{ {
struct mlx5_eswitch_rep *rep; struct mlx5_eswitch_rep *rep;
int vport; int vport;
int err; int err;
for (vport = 0; vport < nvports; vport++) {
rep = &esw->offloads.vport_reps[vport];
if (!rep->rep_if[rep_type].valid)
continue;
err = rep->rep_if[rep_type].load(esw->dev, rep);
if (err)
goto err_reps;
}
return 0;
err_reps:
esw_offloads_unload_reps_type(esw, vport, rep_type);
return err;
}
static int esw_offloads_load_reps(struct mlx5_eswitch *esw, int nvports)
{
u8 rep_type = 0;
int err;
for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
err = esw_offloads_load_reps_type(esw, nvports, rep_type);
if (err)
goto err_reps;
}
return err;
err_reps:
while (rep_type-- > 0)
esw_offloads_unload_reps_type(esw, nvports, rep_type);
return err;
}
int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
{
int err;
/* disable PF RoCE so missed packets don't go through RoCE steering */ /* disable PF RoCE so missed packets don't go through RoCE steering */
mlx5_dev_list_lock(); mlx5_dev_list_lock();
mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB); mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
...@@ -755,25 +806,13 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports) ...@@ -755,25 +806,13 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
if (err) if (err)
goto create_fg_err; goto create_fg_err;
for (vport = 0; vport < nvports; vport++) { err = esw_offloads_load_reps(esw, nvports);
rep = &esw->offloads.vport_reps[vport];
if (!rep->valid)
continue;
err = rep->load(esw, rep);
if (err) if (err)
goto err_reps; goto err_reps;
}
return 0; return 0;
err_reps: err_reps:
for (vport--; vport >= 0; vport--) {
rep = &esw->offloads.vport_reps[vport];
if (!rep->valid)
continue;
rep->unload(esw, rep);
}
esw_destroy_vport_rx_group(esw); esw_destroy_vport_rx_group(esw);
create_fg_err: create_fg_err:
...@@ -814,16 +853,7 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw) ...@@ -814,16 +853,7 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw)
void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports) void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports)
{ {
struct mlx5_eswitch_rep *rep; esw_offloads_unload_reps(esw, nvports);
int vport;
for (vport = nvports - 1; vport >= 0; vport--) {
rep = &esw->offloads.vport_reps[vport];
if (!rep->valid)
continue;
rep->unload(esw, rep);
}
esw_destroy_vport_rx_group(esw); esw_destroy_vport_rx_group(esw);
esw_destroy_offloads_table(esw); esw_destroy_offloads_table(esw);
esw_destroy_offloads_fdb_tables(esw); esw_destroy_offloads_fdb_tables(esw);
...@@ -1120,27 +1150,23 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap) ...@@ -1120,27 +1150,23 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap)
void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw, void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
int vport_index, int vport_index,
struct mlx5_eswitch_rep *__rep) struct mlx5_eswitch_rep_if *__rep_if,
u8 rep_type)
{ {
struct mlx5_esw_offload *offloads = &esw->offloads; struct mlx5_esw_offload *offloads = &esw->offloads;
struct mlx5_eswitch_rep *rep; struct mlx5_eswitch_rep_if *rep_if;
rep = &offloads->vport_reps[vport_index];
memset(rep, 0, sizeof(*rep)); rep_if = &offloads->vport_reps[vport_index].rep_if[rep_type];
rep->load = __rep->load; rep_if->load = __rep_if->load;
rep->unload = __rep->unload; rep_if->unload = __rep_if->unload;
rep->vport = __rep->vport; rep_if->priv = __rep_if->priv;
rep->netdev = __rep->netdev;
ether_addr_copy(rep->hw_id, __rep->hw_id);
INIT_LIST_HEAD(&rep->vport_sqs_list); rep_if->valid = true;
rep->valid = true;
} }
void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw, void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
int vport_index) int vport_index, u8 rep_type)
{ {
struct mlx5_esw_offload *offloads = &esw->offloads; struct mlx5_esw_offload *offloads = &esw->offloads;
struct mlx5_eswitch_rep *rep; struct mlx5_eswitch_rep *rep;
...@@ -1148,17 +1174,17 @@ void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw, ...@@ -1148,17 +1174,17 @@ void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
rep = &offloads->vport_reps[vport_index]; rep = &offloads->vport_reps[vport_index];
if (esw->mode == SRIOV_OFFLOADS && esw->vports[vport_index].enabled) if (esw->mode == SRIOV_OFFLOADS && esw->vports[vport_index].enabled)
rep->unload(esw, rep); rep->rep_if[rep_type].unload(rep);
rep->valid = false; rep->rep_if[rep_type].valid = false;
} }
struct net_device *mlx5_eswitch_get_uplink_netdev(struct mlx5_eswitch *esw) void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
{ {
#define UPLINK_REP_INDEX 0 #define UPLINK_REP_INDEX 0
struct mlx5_esw_offload *offloads = &esw->offloads; struct mlx5_esw_offload *offloads = &esw->offloads;
struct mlx5_eswitch_rep *rep; struct mlx5_eswitch_rep *rep;
rep = &offloads->vport_reps[UPLINK_REP_INDEX]; rep = &offloads->vport_reps[UPLINK_REP_INDEX];
return rep->netdev; return rep->rep_if[rep_type].priv;
} }
...@@ -2026,16 +2026,6 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev, ...@@ -2026,16 +2026,6 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
return &steering->fdb_root_ns->ns; return &steering->fdb_root_ns->ns;
else else
return NULL; return NULL;
case MLX5_FLOW_NAMESPACE_ESW_EGRESS:
if (steering->esw_egress_root_ns)
return &steering->esw_egress_root_ns->ns;
else
return NULL;
case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
if (steering->esw_ingress_root_ns)
return &steering->esw_ingress_root_ns->ns;
else
return NULL;
case MLX5_FLOW_NAMESPACE_SNIFFER_RX: case MLX5_FLOW_NAMESPACE_SNIFFER_RX:
if (steering->sniffer_rx_root_ns) if (steering->sniffer_rx_root_ns)
return &steering->sniffer_rx_root_ns->ns; return &steering->sniffer_rx_root_ns->ns;
...@@ -2066,6 +2056,33 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev, ...@@ -2066,6 +2056,33 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
} }
EXPORT_SYMBOL(mlx5_get_flow_namespace); EXPORT_SYMBOL(mlx5_get_flow_namespace);
struct mlx5_flow_namespace *mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev *dev,
enum mlx5_flow_namespace_type type,
int vport)
{
struct mlx5_flow_steering *steering = dev->priv.steering;
if (!steering || vport >= MLX5_TOTAL_VPORTS(dev))
return NULL;
switch (type) {
case MLX5_FLOW_NAMESPACE_ESW_EGRESS:
if (steering->esw_egress_root_ns &&
steering->esw_egress_root_ns[vport])
return &steering->esw_egress_root_ns[vport]->ns;
else
return NULL;
case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
if (steering->esw_ingress_root_ns &&
steering->esw_ingress_root_ns[vport])
return &steering->esw_ingress_root_ns[vport]->ns;
else
return NULL;
default:
return NULL;
}
}
static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns, static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
unsigned int prio, int num_levels) unsigned int prio, int num_levels)
{ {
...@@ -2343,13 +2360,41 @@ static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns) ...@@ -2343,13 +2360,41 @@ static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns)
clean_tree(&root_ns->ns.node); clean_tree(&root_ns->ns.node);
} }
static void cleanup_egress_acls_root_ns(struct mlx5_core_dev *dev)
{
struct mlx5_flow_steering *steering = dev->priv.steering;
int i;
if (!steering->esw_egress_root_ns)
return;
for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++)
cleanup_root_ns(steering->esw_egress_root_ns[i]);
kfree(steering->esw_egress_root_ns);
}
static void cleanup_ingress_acls_root_ns(struct mlx5_core_dev *dev)
{
struct mlx5_flow_steering *steering = dev->priv.steering;
int i;
if (!steering->esw_ingress_root_ns)
return;
for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++)
cleanup_root_ns(steering->esw_ingress_root_ns[i]);
kfree(steering->esw_ingress_root_ns);
}
void mlx5_cleanup_fs(struct mlx5_core_dev *dev) void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
{ {
struct mlx5_flow_steering *steering = dev->priv.steering; struct mlx5_flow_steering *steering = dev->priv.steering;
cleanup_root_ns(steering->root_ns); cleanup_root_ns(steering->root_ns);
cleanup_root_ns(steering->esw_egress_root_ns); cleanup_egress_acls_root_ns(dev);
cleanup_root_ns(steering->esw_ingress_root_ns); cleanup_ingress_acls_root_ns(dev);
cleanup_root_ns(steering->fdb_root_ns); cleanup_root_ns(steering->fdb_root_ns);
cleanup_root_ns(steering->sniffer_rx_root_ns); cleanup_root_ns(steering->sniffer_rx_root_ns);
cleanup_root_ns(steering->sniffer_tx_root_ns); cleanup_root_ns(steering->sniffer_tx_root_ns);
...@@ -2418,34 +2463,86 @@ static int init_fdb_root_ns(struct mlx5_flow_steering *steering) ...@@ -2418,34 +2463,86 @@ static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
return PTR_ERR(prio); return PTR_ERR(prio);
} }
static int init_ingress_acl_root_ns(struct mlx5_flow_steering *steering) static int init_egress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
{ {
struct fs_prio *prio; struct fs_prio *prio;
steering->esw_egress_root_ns = create_root_ns(steering, FS_FT_ESW_EGRESS_ACL); steering->esw_egress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_EGRESS_ACL);
if (!steering->esw_egress_root_ns) if (!steering->esw_egress_root_ns[vport])
return -ENOMEM; return -ENOMEM;
/* create 1 prio*/ /* create 1 prio*/
prio = fs_create_prio(&steering->esw_egress_root_ns->ns, 0, prio = fs_create_prio(&steering->esw_egress_root_ns[vport]->ns, 0, 1);
MLX5_TOTAL_VPORTS(steering->dev));
return PTR_ERR_OR_ZERO(prio); return PTR_ERR_OR_ZERO(prio);
} }
static int init_egress_acl_root_ns(struct mlx5_flow_steering *steering) static int init_ingress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
{ {
struct fs_prio *prio; struct fs_prio *prio;
steering->esw_ingress_root_ns = create_root_ns(steering, FS_FT_ESW_INGRESS_ACL); steering->esw_ingress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_INGRESS_ACL);
if (!steering->esw_ingress_root_ns) if (!steering->esw_ingress_root_ns[vport])
return -ENOMEM; return -ENOMEM;
/* create 1 prio*/ /* create 1 prio*/
prio = fs_create_prio(&steering->esw_ingress_root_ns->ns, 0, prio = fs_create_prio(&steering->esw_ingress_root_ns[vport]->ns, 0, 1);
MLX5_TOTAL_VPORTS(steering->dev));
return PTR_ERR_OR_ZERO(prio); return PTR_ERR_OR_ZERO(prio);
} }
static int init_egress_acls_root_ns(struct mlx5_core_dev *dev)
{
struct mlx5_flow_steering *steering = dev->priv.steering;
int err;
int i;
steering->esw_egress_root_ns = kcalloc(MLX5_TOTAL_VPORTS(dev),
sizeof(*steering->esw_egress_root_ns),
GFP_KERNEL);
if (!steering->esw_egress_root_ns)
return -ENOMEM;
for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++) {
err = init_egress_acl_root_ns(steering, i);
if (err)
goto cleanup_root_ns;
}
return 0;
cleanup_root_ns:
for (i--; i >= 0; i--)
cleanup_root_ns(steering->esw_egress_root_ns[i]);
kfree(steering->esw_egress_root_ns);
return err;
}
static int init_ingress_acls_root_ns(struct mlx5_core_dev *dev)
{
struct mlx5_flow_steering *steering = dev->priv.steering;
int err;
int i;
steering->esw_ingress_root_ns = kcalloc(MLX5_TOTAL_VPORTS(dev),
sizeof(*steering->esw_ingress_root_ns),
GFP_KERNEL);
if (!steering->esw_ingress_root_ns)
return -ENOMEM;
for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++) {
err = init_ingress_acl_root_ns(steering, i);
if (err)
goto cleanup_root_ns;
}
return 0;
cleanup_root_ns:
for (i--; i >= 0; i--)
cleanup_root_ns(steering->esw_ingress_root_ns[i]);
kfree(steering->esw_ingress_root_ns);
return err;
}
int mlx5_init_fs(struct mlx5_core_dev *dev) int mlx5_init_fs(struct mlx5_core_dev *dev)
{ {
struct mlx5_flow_steering *steering; struct mlx5_flow_steering *steering;
...@@ -2488,12 +2585,12 @@ int mlx5_init_fs(struct mlx5_core_dev *dev) ...@@ -2488,12 +2585,12 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
goto err; goto err;
} }
if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) { if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) {
err = init_egress_acl_root_ns(steering); err = init_egress_acls_root_ns(dev);
if (err) if (err)
goto err; goto err;
} }
if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) { if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) {
err = init_ingress_acl_root_ns(steering); err = init_ingress_acls_root_ns(dev);
if (err) if (err)
goto err; goto err;
} }
......
...@@ -71,8 +71,8 @@ struct mlx5_flow_steering { ...@@ -71,8 +71,8 @@ struct mlx5_flow_steering {
struct kmem_cache *ftes_cache; struct kmem_cache *ftes_cache;
struct mlx5_flow_root_namespace *root_ns; struct mlx5_flow_root_namespace *root_ns;
struct mlx5_flow_root_namespace *fdb_root_ns; struct mlx5_flow_root_namespace *fdb_root_ns;
struct mlx5_flow_root_namespace *esw_egress_root_ns; struct mlx5_flow_root_namespace **esw_egress_root_ns;
struct mlx5_flow_root_namespace *esw_ingress_root_ns; struct mlx5_flow_root_namespace **esw_ingress_root_ns;
struct mlx5_flow_root_namespace *sniffer_tx_root_ns; struct mlx5_flow_root_namespace *sniffer_tx_root_ns;
struct mlx5_flow_root_namespace *sniffer_rx_root_ns; struct mlx5_flow_root_namespace *sniffer_rx_root_ns;
}; };
......
...@@ -95,6 +95,10 @@ struct mlx5_flow_destination { ...@@ -95,6 +95,10 @@ struct mlx5_flow_destination {
struct mlx5_flow_namespace * struct mlx5_flow_namespace *
mlx5_get_flow_namespace(struct mlx5_core_dev *dev, mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
enum mlx5_flow_namespace_type type); enum mlx5_flow_namespace_type type);
struct mlx5_flow_namespace *
mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev *dev,
enum mlx5_flow_namespace_type type,
int vport);
struct mlx5_flow_table * struct mlx5_flow_table *
mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns, mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment