Commit 6c018b73 authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-updates-2019-05-31' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2019-05-31

This series provides some updates to mlx5 core and netdevice driver.

1) use __netdev_tx_sent_queue() to improve performance under GSO workload

2) Allow matching only enc_key_id/enc_dst_port for decapsulation action

3) Geneve support:
This patchset adds support for GENEVE tunnel encap/decap flows offload:
encapsulating layer 2 Ethernet frames within layer 4 UDP datagrams.
The driver supports 6081 destination UDP port number, which is the
default IANA-assigned port.

Encap:
  ConnectX-5 inserts the header (w/ or w/o Geneve TLV options) that is
  provided by the mlx5 driver to the outgoing packet.

Decap:
  Geneve header is matched and the packet is decapsulated.
  Notes about decap flows with Geneve TLV Options:
   - Support offloading of 32-bit options data only
   - At any given time, only one combination of class/type parameters
     can be offloaded, but the same class/type combination can have
     many different flows offloaded with different 32-bit option data
   - Options with value of 0 can't be offloaded

Managing Geneve TLV options:
  Matching (on receive) is done by ConnectX-5 flex parser.
  Geneve TLV options are managed using General Object of type
  “Geneve TLV Options”.

  When the first flow with a certain class/type values is requested
  to be offloaded, the driver creates a FW object with FW command
  (Geneve TLV Options general object) and starts counting the number
  of flows using this object.

  During this time, any request with a different class/type values
  will fail to be offloaded.
  Once the refcount reaches 0, the driver destroys the TLV options
  general object, and can now offload a flow with any class/type parameters.

  Geneve TLV Options object is added to core device.
  It is currently used to manage Geneve TLV options general
  object allocation in FW and its reference counting only.

  In the future it will also be used for managing geneve ports
  by registering callbacks for ndo_udp_tunnel_add/del.

TC tunnel code refactoring:
  As a preparation for Geneve code, the TC tunnel code in mlx5
  was rearranged in a modular way, so that it would be easier
  to add future tunnels:
   - Defined tc tunnel object with the fields and callbacks that
     any tunnel must implement.
   - Define tc UDP tunnel object for UDP tunnels, such as VXLAN
   - Move each tunnel code (GRE, VXLAN) to its own separate file
   - Rewrite tc tunnel implementation in a general way – using
     only the objects and their callbacks.

4) Termination tables:
Actions in tables set with the termination flag are guaranteed to terminate
the action list. Thus, potential looping functionality (e.g. haripin) can safely be
executed without potential loops.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents e2821fc8 ca6c7df0
...@@ -60,7 +60,7 @@ mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) ...@@ -60,7 +60,7 @@ mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
if (!__mlx5_ib_add(ibdev, profile)) if (!__mlx5_ib_add(ibdev, profile))
return -EINVAL; return -EINVAL;
rep->rep_if[REP_IB].priv = ibdev; rep->rep_data[REP_IB].priv = ibdev;
return 0; return 0;
} }
...@@ -70,13 +70,13 @@ mlx5_ib_vport_rep_unload(struct mlx5_eswitch_rep *rep) ...@@ -70,13 +70,13 @@ mlx5_ib_vport_rep_unload(struct mlx5_eswitch_rep *rep)
{ {
struct mlx5_ib_dev *dev; struct mlx5_ib_dev *dev;
if (!rep->rep_if[REP_IB].priv || if (!rep->rep_data[REP_IB].priv ||
rep->vport != MLX5_VPORT_UPLINK) rep->vport != MLX5_VPORT_UPLINK)
return; return;
dev = mlx5_ib_rep_to_dev(rep); dev = mlx5_ib_rep_to_dev(rep);
__mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX); __mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
rep->rep_if[REP_IB].priv = NULL; rep->rep_data[REP_IB].priv = NULL;
} }
static void *mlx5_ib_vport_get_proto_dev(struct mlx5_eswitch_rep *rep) static void *mlx5_ib_vport_get_proto_dev(struct mlx5_eswitch_rep *rep)
...@@ -84,16 +84,17 @@ static void *mlx5_ib_vport_get_proto_dev(struct mlx5_eswitch_rep *rep) ...@@ -84,16 +84,17 @@ static void *mlx5_ib_vport_get_proto_dev(struct mlx5_eswitch_rep *rep)
return mlx5_ib_rep_to_dev(rep); return mlx5_ib_rep_to_dev(rep);
} }
static const struct mlx5_eswitch_rep_ops rep_ops = {
.load = mlx5_ib_vport_rep_load,
.unload = mlx5_ib_vport_rep_unload,
.get_proto_dev = mlx5_ib_vport_get_proto_dev,
};
void mlx5_ib_register_vport_reps(struct mlx5_core_dev *mdev) void mlx5_ib_register_vport_reps(struct mlx5_core_dev *mdev)
{ {
struct mlx5_eswitch *esw = mdev->priv.eswitch; struct mlx5_eswitch *esw = mdev->priv.eswitch;
struct mlx5_eswitch_rep_if rep_if = {};
rep_if.load = mlx5_ib_vport_rep_load;
rep_if.unload = mlx5_ib_vport_rep_unload;
rep_if.get_proto_dev = mlx5_ib_vport_get_proto_dev;
mlx5_eswitch_register_vport_reps(esw, &rep_if, REP_IB); mlx5_eswitch_register_vport_reps(esw, &rep_ops, REP_IB);
} }
void mlx5_ib_unregister_vport_reps(struct mlx5_core_dev *mdev) void mlx5_ib_unregister_vport_reps(struct mlx5_core_dev *mdev)
......
...@@ -72,6 +72,6 @@ struct net_device *mlx5_ib_get_rep_netdev(struct mlx5_eswitch *esw, ...@@ -72,6 +72,6 @@ struct net_device *mlx5_ib_get_rep_netdev(struct mlx5_eswitch *esw,
static inline static inline
struct mlx5_ib_dev *mlx5_ib_rep_to_dev(struct mlx5_eswitch_rep *rep) struct mlx5_ib_dev *mlx5_ib_rep_to_dev(struct mlx5_eswitch_rep *rep)
{ {
return (struct mlx5_ib_dev *)rep->rep_if[REP_IB].priv; return rep->rep_data[REP_IB].priv;
} }
#endif /* __MLX5_IB_REP_H__ */ #endif /* __MLX5_IB_REP_H__ */
...@@ -31,12 +31,15 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \ ...@@ -31,12 +31,15 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \
mlx5_core-$(CONFIG_MLX5_EN_ARFS) += en_arfs.o mlx5_core-$(CONFIG_MLX5_EN_ARFS) += en_arfs.o
mlx5_core-$(CONFIG_MLX5_EN_RXNFC) += en_fs_ethtool.o mlx5_core-$(CONFIG_MLX5_EN_RXNFC) += en_fs_ethtool.o
mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o en/port_buffer.o mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o en/port_buffer.o
mlx5_core-$(CONFIG_MLX5_ESWITCH) += en_rep.o en_tc.o en/tc_tun.o lib/port_tun.o lag_mp.o mlx5_core-$(CONFIG_MLX5_ESWITCH) += en_rep.o en_tc.o en/tc_tun.o lib/port_tun.o lag_mp.o \
lib/geneve.o en/tc_tun_vxlan.o en/tc_tun_gre.o \
en/tc_tun_geneve.o
# #
# Core extra # Core extra
# #
mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o ecpf.o rdma.o mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o eswitch_offloads_termtbl.o \
ecpf.o rdma.o
mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o
mlx5_core-$(CONFIG_VXLAN) += lib/vxlan.o mlx5_core-$(CONFIG_VXLAN) += lib/vxlan.o
mlx5_core-$(CONFIG_PTP_1588_CLOCK) += lib/clock.o mlx5_core-$(CONFIG_PTP_1588_CLOCK) += lib/clock.o
......
...@@ -316,7 +316,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, ...@@ -316,7 +316,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_DESTROY_GENERAL_OBJECT: case MLX5_CMD_OP_DESTROY_GENERAL_OBJECT:
case MLX5_CMD_OP_DEALLOC_MEMIC: case MLX5_CMD_OP_DEALLOC_MEMIC:
case MLX5_CMD_OP_PAGE_FAULT_RESUME: case MLX5_CMD_OP_PAGE_FAULT_RESUME:
case MLX5_CMD_OP_QUERY_HOST_PARAMS: case MLX5_CMD_OP_QUERY_ESW_FUNCTIONS:
return MLX5_CMD_STAT_OK; return MLX5_CMD_STAT_OK;
case MLX5_CMD_OP_QUERY_HCA_CAP: case MLX5_CMD_OP_QUERY_HCA_CAP:
...@@ -628,7 +628,7 @@ const char *mlx5_command_str(int command) ...@@ -628,7 +628,7 @@ const char *mlx5_command_str(int command)
MLX5_COMMAND_STR_CASE(QUERY_MODIFY_HEADER_CONTEXT); MLX5_COMMAND_STR_CASE(QUERY_MODIFY_HEADER_CONTEXT);
MLX5_COMMAND_STR_CASE(ALLOC_MEMIC); MLX5_COMMAND_STR_CASE(ALLOC_MEMIC);
MLX5_COMMAND_STR_CASE(DEALLOC_MEMIC); MLX5_COMMAND_STR_CASE(DEALLOC_MEMIC);
MLX5_COMMAND_STR_CASE(QUERY_HOST_PARAMS); MLX5_COMMAND_STR_CASE(QUERY_ESW_FUNCTIONS);
default: return "unknown command opcode"; default: return "unknown command opcode";
} }
} }
......
...@@ -83,30 +83,3 @@ void mlx5_ec_cleanup(struct mlx5_core_dev *dev) ...@@ -83,30 +83,3 @@ void mlx5_ec_cleanup(struct mlx5_core_dev *dev)
mlx5_peer_pf_cleanup(dev); mlx5_peer_pf_cleanup(dev);
} }
static int mlx5_query_host_params_context(struct mlx5_core_dev *dev,
u32 *out, int outlen)
{
u32 in[MLX5_ST_SZ_DW(query_host_params_in)] = {};
MLX5_SET(query_host_params_in, in, opcode,
MLX5_CMD_OP_QUERY_HOST_PARAMS);
return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
}
int mlx5_query_host_params_num_vfs(struct mlx5_core_dev *dev, int *num_vf)
{
u32 out[MLX5_ST_SZ_DW(query_host_params_out)] = {};
int err;
err = mlx5_query_host_params_context(dev, out, sizeof(out));
if (err)
return err;
*num_vf = MLX5_GET(query_host_params_out, out,
host_params_context.host_num_of_vfs);
mlx5_core_dbg(dev, "host_num_of_vfs %d\n", *num_vf);
return 0;
}
...@@ -16,7 +16,6 @@ enum { ...@@ -16,7 +16,6 @@ enum {
bool mlx5_read_embedded_cpu(struct mlx5_core_dev *dev); bool mlx5_read_embedded_cpu(struct mlx5_core_dev *dev);
int mlx5_ec_init(struct mlx5_core_dev *dev); int mlx5_ec_init(struct mlx5_core_dev *dev);
void mlx5_ec_cleanup(struct mlx5_core_dev *dev); void mlx5_ec_cleanup(struct mlx5_core_dev *dev);
int mlx5_query_host_params_num_vfs(struct mlx5_core_dev *dev, int *num_vf);
#else /* CONFIG_MLX5_ESWITCH */ #else /* CONFIG_MLX5_ESWITCH */
...@@ -24,9 +23,6 @@ static inline bool ...@@ -24,9 +23,6 @@ static inline bool
mlx5_read_embedded_cpu(struct mlx5_core_dev *dev) { return false; } mlx5_read_embedded_cpu(struct mlx5_core_dev *dev) { return false; }
static inline int mlx5_ec_init(struct mlx5_core_dev *dev) { return 0; } static inline int mlx5_ec_init(struct mlx5_core_dev *dev) { return 0; }
static inline void mlx5_ec_cleanup(struct mlx5_core_dev *dev) {} static inline void mlx5_ec_cleanup(struct mlx5_core_dev *dev) {}
static inline int
mlx5_query_host_params_num_vfs(struct mlx5_core_dev *dev, int *num_vf)
{ return -EOPNOTSUPP; }
#endif /* CONFIG_MLX5_ESWITCH */ #endif /* CONFIG_MLX5_ESWITCH */
......
...@@ -14,9 +14,41 @@ ...@@ -14,9 +14,41 @@
enum { enum {
MLX5E_TC_TUNNEL_TYPE_UNKNOWN, MLX5E_TC_TUNNEL_TYPE_UNKNOWN,
MLX5E_TC_TUNNEL_TYPE_VXLAN, MLX5E_TC_TUNNEL_TYPE_VXLAN,
MLX5E_TC_TUNNEL_TYPE_GRETAP MLX5E_TC_TUNNEL_TYPE_GENEVE,
MLX5E_TC_TUNNEL_TYPE_GRETAP,
}; };
struct mlx5e_tc_tunnel {
int tunnel_type;
enum mlx5_flow_match_level match_level;
bool (*can_offload)(struct mlx5e_priv *priv);
int (*calc_hlen)(struct mlx5e_encap_entry *e);
int (*init_encap_attr)(struct net_device *tunnel_dev,
struct mlx5e_priv *priv,
struct mlx5e_encap_entry *e,
struct netlink_ext_ack *extack);
int (*generate_ip_tun_hdr)(char buf[],
__u8 *ip_proto,
struct mlx5e_encap_entry *e);
int (*parse_udp_ports)(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
struct tc_cls_flower_offload *f,
void *headers_c,
void *headers_v);
int (*parse_tunnel)(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
struct tc_cls_flower_offload *f,
void *headers_c,
void *headers_v);
};
extern struct mlx5e_tc_tunnel vxlan_tunnel;
extern struct mlx5e_tc_tunnel geneve_tunnel;
extern struct mlx5e_tc_tunnel gre_tunnel;
struct mlx5e_tc_tunnel *mlx5e_get_tc_tun(struct net_device *tunnel_dev);
int mlx5e_tc_tun_init_encap_attr(struct net_device *tunnel_dev, int mlx5e_tc_tun_init_encap_attr(struct net_device *tunnel_dev,
struct mlx5e_priv *priv, struct mlx5e_priv *priv,
struct mlx5e_encap_entry *e, struct mlx5e_encap_entry *e,
...@@ -30,7 +62,6 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv, ...@@ -30,7 +62,6 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
struct net_device *mirred_dev, struct net_device *mirred_dev,
struct mlx5e_encap_entry *e); struct mlx5e_encap_entry *e);
int mlx5e_tc_tun_get_type(struct net_device *tunnel_dev);
bool mlx5e_tc_tun_device_to_offload(struct mlx5e_priv *priv, bool mlx5e_tc_tun_device_to_offload(struct mlx5e_priv *priv,
struct net_device *netdev); struct net_device *netdev);
...@@ -41,4 +72,10 @@ int mlx5e_tc_tun_parse(struct net_device *filter_dev, ...@@ -41,4 +72,10 @@ int mlx5e_tc_tun_parse(struct net_device *filter_dev,
void *headers_c, void *headers_c,
void *headers_v, u8 *match_level); void *headers_v, u8 *match_level);
int mlx5e_tc_tun_parse_udp_ports(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
struct tc_cls_flower_offload *f,
void *headers_c,
void *headers_v);
#endif //__MLX5_EN_TC_TUNNEL_H__ #endif //__MLX5_EN_TC_TUNNEL_H__
This diff is collapsed.
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2018 Mellanox Technologies. */
#include <net/gre.h>
#include "en/tc_tun.h"
static bool mlx5e_tc_tun_can_offload_gretap(struct mlx5e_priv *priv)
{
return !!MLX5_CAP_ESW(priv->mdev, nvgre_encap_decap);
}
static int mlx5e_tc_tun_calc_hlen_gretap(struct mlx5e_encap_entry *e)
{
return gre_calc_hlen(e->tun_info->key.tun_flags);
}
static int mlx5e_tc_tun_init_encap_attr_gretap(struct net_device *tunnel_dev,
struct mlx5e_priv *priv,
struct mlx5e_encap_entry *e,
struct netlink_ext_ack *extack)
{
e->tunnel = &gre_tunnel;
e->reformat_type = MLX5_REFORMAT_TYPE_L2_TO_NVGRE;
return 0;
}
static int mlx5e_gen_ip_tunnel_header_gretap(char buf[],
__u8 *ip_proto,
struct mlx5e_encap_entry *e)
{
const struct ip_tunnel_key *tun_key = &e->tun_info->key;
struct gre_base_hdr *greh = (struct gre_base_hdr *)(buf);
__be32 tun_id = tunnel_id_to_key32(tun_key->tun_id);
int hdr_len;
*ip_proto = IPPROTO_GRE;
/* the HW does not calculate GRE csum or sequences */
if (tun_key->tun_flags & (TUNNEL_CSUM | TUNNEL_SEQ))
return -EOPNOTSUPP;
greh->protocol = htons(ETH_P_TEB);
/* GRE key */
hdr_len = mlx5e_tc_tun_calc_hlen_gretap(e);
greh->flags = gre_tnl_flags_to_gre_flags(tun_key->tun_flags);
if (tun_key->tun_flags & TUNNEL_KEY) {
__be32 *ptr = (__be32 *)(((u8 *)greh) + hdr_len - 4);
*ptr = tun_id;
}
return 0;
}
static int mlx5e_tc_tun_parse_gretap(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
struct tc_cls_flower_offload *f,
void *headers_c,
void *headers_v)
{
void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
/* gre protocol */
MLX5_SET_TO_ONES(fte_match_set_misc, misc_c, gre_protocol);
MLX5_SET(fte_match_set_misc, misc_v, gre_protocol, ETH_P_TEB);
/* gre key */
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
struct flow_match_enc_keyid enc_keyid;
flow_rule_match_enc_keyid(rule, &enc_keyid);
MLX5_SET(fte_match_set_misc, misc_c,
gre_key.key, be32_to_cpu(enc_keyid.mask->keyid));
MLX5_SET(fte_match_set_misc, misc_v,
gre_key.key, be32_to_cpu(enc_keyid.key->keyid));
}
return 0;
}
struct mlx5e_tc_tunnel gre_tunnel = {
.tunnel_type = MLX5E_TC_TUNNEL_TYPE_GRETAP,
.match_level = MLX5_MATCH_L3,
.can_offload = mlx5e_tc_tun_can_offload_gretap,
.calc_hlen = mlx5e_tc_tun_calc_hlen_gretap,
.init_encap_attr = mlx5e_tc_tun_init_encap_attr_gretap,
.generate_ip_tun_hdr = mlx5e_gen_ip_tunnel_header_gretap,
.parse_udp_ports = NULL,
.parse_tunnel = mlx5e_tc_tun_parse_gretap,
};
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2018 Mellanox Technologies. */
#include <net/vxlan.h>
#include "lib/vxlan.h"
#include "en/tc_tun.h"
static bool mlx5e_tc_tun_can_offload_vxlan(struct mlx5e_priv *priv)
{
return !!MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap);
}
static int mlx5e_tc_tun_calc_hlen_vxlan(struct mlx5e_encap_entry *e)
{
return VXLAN_HLEN;
}
static int mlx5e_tc_tun_check_udp_dport_vxlan(struct mlx5e_priv *priv,
struct tc_cls_flower_offload *f)
{
struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
struct netlink_ext_ack *extack = f->common.extack;
struct flow_match_ports enc_ports;
if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS))
return -EOPNOTSUPP;
flow_rule_match_enc_ports(rule, &enc_ports);
/* check the UDP destination port validity */
if (!mlx5_vxlan_lookup_port(priv->mdev->vxlan,
be16_to_cpu(enc_ports.key->dst))) {
NL_SET_ERR_MSG_MOD(extack,
"Matched UDP dst port is not registered as a VXLAN port");
netdev_warn(priv->netdev,
"UDP port %d is not registered as a VXLAN port\n",
be16_to_cpu(enc_ports.key->dst));
return -EOPNOTSUPP;
}
return 0;
}
static int mlx5e_tc_tun_parse_udp_ports_vxlan(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
struct tc_cls_flower_offload *f,
void *headers_c,
void *headers_v)
{
int err = 0;
err = mlx5e_tc_tun_parse_udp_ports(priv, spec, f, headers_c, headers_v);
if (err)
return err;
return mlx5e_tc_tun_check_udp_dport_vxlan(priv, f);
}
static int mlx5e_tc_tun_init_encap_attr_vxlan(struct net_device *tunnel_dev,
struct mlx5e_priv *priv,
struct mlx5e_encap_entry *e,
struct netlink_ext_ack *extack)
{
int dst_port = be16_to_cpu(e->tun_info->key.tp_dst);
e->tunnel = &vxlan_tunnel;
if (!mlx5_vxlan_lookup_port(priv->mdev->vxlan, dst_port)) {
NL_SET_ERR_MSG_MOD(extack,
"vxlan udp dport was not registered with the HW");
netdev_warn(priv->netdev,
"%d isn't an offloaded vxlan udp dport\n",
dst_port);
return -EOPNOTSUPP;
}
e->reformat_type = MLX5_REFORMAT_TYPE_L2_TO_VXLAN;
return 0;
}
static int mlx5e_gen_ip_tunnel_header_vxlan(char buf[],
__u8 *ip_proto,
struct mlx5e_encap_entry *e)
{
const struct ip_tunnel_key *tun_key = &e->tun_info->key;
__be32 tun_id = tunnel_id_to_key32(tun_key->tun_id);
struct udphdr *udp = (struct udphdr *)(buf);
struct vxlanhdr *vxh;
vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
*ip_proto = IPPROTO_UDP;
udp->dest = tun_key->tp_dst;
vxh->vx_flags = VXLAN_HF_VNI;
vxh->vx_vni = vxlan_vni_field(tun_id);
return 0;
}
static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
struct tc_cls_flower_offload *f,
void *headers_c,
void *headers_v)
{
struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
struct netlink_ext_ack *extack = f->common.extack;
struct flow_match_enc_keyid enc_keyid;
void *misc_c, *misc_v;
misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID))
return 0;
flow_rule_match_enc_keyid(rule, &enc_keyid);
if (!enc_keyid.mask->keyid)
return 0;
/* match on VNI is required */
if (!MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
ft_field_support.outer_vxlan_vni)) {
NL_SET_ERR_MSG_MOD(extack,
"Matching on VXLAN VNI is not supported");
netdev_warn(priv->netdev,
"Matching on VXLAN VNI is not supported\n");
return -EOPNOTSUPP;
}
MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni,
be32_to_cpu(enc_keyid.mask->keyid));
MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
be32_to_cpu(enc_keyid.key->keyid));
return 0;
}
struct mlx5e_tc_tunnel vxlan_tunnel = {
.tunnel_type = MLX5E_TC_TUNNEL_TYPE_VXLAN,
.match_level = MLX5_MATCH_L4,
.can_offload = mlx5e_tc_tun_can_offload_vxlan,
.calc_hlen = mlx5e_tc_tun_calc_hlen_vxlan,
.init_encap_attr = mlx5e_tc_tun_init_encap_attr_vxlan,
.generate_ip_tun_hdr = mlx5e_gen_ip_tunnel_header_vxlan,
.parse_udp_ports = mlx5e_tc_tun_parse_udp_ports_vxlan,
.parse_tunnel = mlx5e_tc_tun_parse_vxlan,
};
...@@ -128,7 +128,7 @@ static void mlx5e_rep_get_strings(struct net_device *dev, ...@@ -128,7 +128,7 @@ static void mlx5e_rep_get_strings(struct net_device *dev,
} }
} }
static void mlx5e_vf_rep_update_hw_counters(struct mlx5e_priv *priv) static void mlx5e_rep_update_hw_counters(struct mlx5e_priv *priv)
{ {
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_rep_priv *rpriv = priv->ppriv; struct mlx5e_rep_priv *rpriv = priv->ppriv;
...@@ -166,17 +166,6 @@ static void mlx5e_uplink_rep_update_hw_counters(struct mlx5e_priv *priv) ...@@ -166,17 +166,6 @@ static void mlx5e_uplink_rep_update_hw_counters(struct mlx5e_priv *priv)
vport_stats->tx_bytes = PPORT_802_3_GET(pstats, a_octets_transmitted_ok); vport_stats->tx_bytes = PPORT_802_3_GET(pstats, a_octets_transmitted_ok);
} }
static void mlx5e_rep_update_hw_counters(struct mlx5e_priv *priv)
{
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch_rep *rep = rpriv->rep;
if (rep->vport == MLX5_VPORT_UPLINK)
mlx5e_uplink_rep_update_hw_counters(priv);
else
mlx5e_vf_rep_update_hw_counters(priv);
}
static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv) static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv)
{ {
struct mlx5e_sw_stats *s = &priv->stats.sw; struct mlx5e_sw_stats *s = &priv->stats.sw;
...@@ -203,7 +192,7 @@ static void mlx5e_rep_get_ethtool_stats(struct net_device *dev, ...@@ -203,7 +192,7 @@ static void mlx5e_rep_get_ethtool_stats(struct net_device *dev,
mutex_lock(&priv->state_lock); mutex_lock(&priv->state_lock);
mlx5e_rep_update_sw_counters(priv); mlx5e_rep_update_sw_counters(priv);
mlx5e_rep_update_hw_counters(priv); priv->profile->update_stats(priv);
mutex_unlock(&priv->state_lock); mutex_unlock(&priv->state_lock);
for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++) for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
...@@ -363,7 +352,7 @@ static int mlx5e_uplink_rep_set_link_ksettings(struct net_device *netdev, ...@@ -363,7 +352,7 @@ static int mlx5e_uplink_rep_set_link_ksettings(struct net_device *netdev,
return mlx5e_ethtool_set_link_ksettings(priv, link_ksettings); return mlx5e_ethtool_set_link_ksettings(priv, link_ksettings);
} }
static const struct ethtool_ops mlx5e_vf_rep_ethtool_ops = { static const struct ethtool_ops mlx5e_rep_ethtool_ops = {
.get_drvinfo = mlx5e_rep_get_drvinfo, .get_drvinfo = mlx5e_rep_get_drvinfo,
.get_link = ethtool_op_get_link, .get_link = ethtool_op_get_link,
.get_strings = mlx5e_rep_get_strings, .get_strings = mlx5e_rep_get_strings,
...@@ -1101,7 +1090,7 @@ void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv, ...@@ -1101,7 +1090,7 @@ void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv,
mlx5_tun_entropy_refcount_dec(tun_entropy, e->reformat_type); mlx5_tun_entropy_refcount_dec(tun_entropy, e->reformat_type);
} }
static int mlx5e_vf_rep_open(struct net_device *dev) static int mlx5e_rep_open(struct net_device *dev)
{ {
struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_rep_priv *rpriv = priv->ppriv; struct mlx5e_rep_priv *rpriv = priv->ppriv;
...@@ -1124,7 +1113,7 @@ static int mlx5e_vf_rep_open(struct net_device *dev) ...@@ -1124,7 +1113,7 @@ static int mlx5e_vf_rep_open(struct net_device *dev)
return err; return err;
} }
static int mlx5e_vf_rep_close(struct net_device *dev) static int mlx5e_rep_close(struct net_device *dev)
{ {
struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_rep_priv *rpriv = priv->ppriv; struct mlx5e_rep_priv *rpriv = priv->ppriv;
...@@ -1276,7 +1265,7 @@ static int mlx5e_rep_get_offload_stats(int attr_id, const struct net_device *dev ...@@ -1276,7 +1265,7 @@ static int mlx5e_rep_get_offload_stats(int attr_id, const struct net_device *dev
} }
static void static void
mlx5e_vf_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) mlx5e_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
{ {
struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_priv *priv = netdev_priv(dev);
...@@ -1285,7 +1274,7 @@ mlx5e_vf_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) ...@@ -1285,7 +1274,7 @@ mlx5e_vf_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
memcpy(stats, &priv->stats.vf_vport, sizeof(*stats)); memcpy(stats, &priv->stats.vf_vport, sizeof(*stats));
} }
static int mlx5e_vf_rep_change_mtu(struct net_device *netdev, int new_mtu) static int mlx5e_rep_change_mtu(struct net_device *netdev, int new_mtu)
{ {
return mlx5e_change_mtu(netdev, new_mtu, NULL); return mlx5e_change_mtu(netdev, new_mtu, NULL);
} }
...@@ -1318,16 +1307,16 @@ static int mlx5e_uplink_rep_set_vf_vlan(struct net_device *dev, int vf, u16 vlan ...@@ -1318,16 +1307,16 @@ static int mlx5e_uplink_rep_set_vf_vlan(struct net_device *dev, int vf, u16 vlan
return 0; return 0;
} }
static const struct net_device_ops mlx5e_netdev_ops_vf_rep = { static const struct net_device_ops mlx5e_netdev_ops_rep = {
.ndo_open = mlx5e_vf_rep_open, .ndo_open = mlx5e_rep_open,
.ndo_stop = mlx5e_vf_rep_close, .ndo_stop = mlx5e_rep_close,
.ndo_start_xmit = mlx5e_xmit, .ndo_start_xmit = mlx5e_xmit,
.ndo_get_phys_port_name = mlx5e_rep_get_phys_port_name, .ndo_get_phys_port_name = mlx5e_rep_get_phys_port_name,
.ndo_setup_tc = mlx5e_rep_setup_tc, .ndo_setup_tc = mlx5e_rep_setup_tc,
.ndo_get_stats64 = mlx5e_vf_rep_get_stats, .ndo_get_stats64 = mlx5e_rep_get_stats,
.ndo_has_offload_stats = mlx5e_rep_has_offload_stats, .ndo_has_offload_stats = mlx5e_rep_has_offload_stats,
.ndo_get_offload_stats = mlx5e_rep_get_offload_stats, .ndo_get_offload_stats = mlx5e_rep_get_offload_stats,
.ndo_change_mtu = mlx5e_vf_rep_change_mtu, .ndo_change_mtu = mlx5e_rep_change_mtu,
.ndo_get_port_parent_id = mlx5e_rep_get_port_parent_id, .ndo_get_port_parent_id = mlx5e_rep_get_port_parent_id,
}; };
...@@ -1355,7 +1344,7 @@ static const struct net_device_ops mlx5e_netdev_ops_uplink_rep = { ...@@ -1355,7 +1344,7 @@ static const struct net_device_ops mlx5e_netdev_ops_uplink_rep = {
bool mlx5e_eswitch_rep(struct net_device *netdev) bool mlx5e_eswitch_rep(struct net_device *netdev)
{ {
if (netdev->netdev_ops == &mlx5e_netdev_ops_vf_rep || if (netdev->netdev_ops == &mlx5e_netdev_ops_rep ||
netdev->netdev_ops == &mlx5e_netdev_ops_uplink_rep) netdev->netdev_ops == &mlx5e_netdev_ops_uplink_rep)
return true; return true;
...@@ -1418,9 +1407,9 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev) ...@@ -1418,9 +1407,9 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev)
netdev->dcbnl_ops = &mlx5e_dcbnl_ops; netdev->dcbnl_ops = &mlx5e_dcbnl_ops;
#endif #endif
} else { } else {
netdev->netdev_ops = &mlx5e_netdev_ops_vf_rep; netdev->netdev_ops = &mlx5e_netdev_ops_rep;
eth_hw_addr_random(netdev); eth_hw_addr_random(netdev);
netdev->ethtool_ops = &mlx5e_vf_rep_ethtool_ops; netdev->ethtool_ops = &mlx5e_rep_ethtool_ops;
} }
netdev->watchdog_timeo = 15 * HZ; netdev->watchdog_timeo = 15 * HZ;
...@@ -1640,7 +1629,7 @@ static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv) ...@@ -1640,7 +1629,7 @@ static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv)
} }
} }
static void mlx5e_vf_rep_enable(struct mlx5e_priv *priv) static void mlx5e_rep_enable(struct mlx5e_priv *priv)
{ {
mlx5e_set_netdev_mtu_boundaries(priv); mlx5e_set_netdev_mtu_boundaries(priv);
} }
...@@ -1712,15 +1701,15 @@ static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv) ...@@ -1712,15 +1701,15 @@ static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv)
mlx5_lag_remove(mdev); mlx5_lag_remove(mdev);
} }
static const struct mlx5e_profile mlx5e_vf_rep_profile = { static const struct mlx5e_profile mlx5e_rep_profile = {
.init = mlx5e_init_rep, .init = mlx5e_init_rep,
.cleanup = mlx5e_cleanup_rep, .cleanup = mlx5e_cleanup_rep,
.init_rx = mlx5e_init_rep_rx, .init_rx = mlx5e_init_rep_rx,
.cleanup_rx = mlx5e_cleanup_rep_rx, .cleanup_rx = mlx5e_cleanup_rep_rx,
.init_tx = mlx5e_init_rep_tx, .init_tx = mlx5e_init_rep_tx,
.cleanup_tx = mlx5e_cleanup_rep_tx, .cleanup_tx = mlx5e_cleanup_rep_tx,
.enable = mlx5e_vf_rep_enable, .enable = mlx5e_rep_enable,
.update_stats = mlx5e_vf_rep_update_hw_counters, .update_stats = mlx5e_rep_update_hw_counters,
.rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep, .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep,
.rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq, .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
.max_tc = 1, .max_tc = 1,
...@@ -1759,7 +1748,8 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) ...@@ -1759,7 +1748,8 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
rpriv->rep = rep; rpriv->rep = rep;
nch = mlx5e_get_max_num_channels(dev); nch = mlx5e_get_max_num_channels(dev);
profile = (rep->vport == MLX5_VPORT_UPLINK) ? &mlx5e_uplink_rep_profile : &mlx5e_vf_rep_profile; profile = (rep->vport == MLX5_VPORT_UPLINK) ?
&mlx5e_uplink_rep_profile : &mlx5e_rep_profile;
netdev = mlx5e_create_netdev(dev, profile, nch, rpriv); netdev = mlx5e_create_netdev(dev, profile, nch, rpriv);
if (!netdev) { if (!netdev) {
pr_warn("Failed to create representor netdev for vport %d\n", pr_warn("Failed to create representor netdev for vport %d\n",
...@@ -1769,7 +1759,7 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) ...@@ -1769,7 +1759,7 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
} }
rpriv->netdev = netdev; rpriv->netdev = netdev;
rep->rep_if[REP_ETH].priv = rpriv; rep->rep_data[REP_ETH].priv = rpriv;
INIT_LIST_HEAD(&rpriv->vport_sqs_list); INIT_LIST_HEAD(&rpriv->vport_sqs_list);
if (rep->vport == MLX5_VPORT_UPLINK) { if (rep->vport == MLX5_VPORT_UPLINK) {
...@@ -1843,16 +1833,17 @@ static void *mlx5e_vport_rep_get_proto_dev(struct mlx5_eswitch_rep *rep) ...@@ -1843,16 +1833,17 @@ static void *mlx5e_vport_rep_get_proto_dev(struct mlx5_eswitch_rep *rep)
return rpriv->netdev; return rpriv->netdev;
} }
static const struct mlx5_eswitch_rep_ops rep_ops = {
.load = mlx5e_vport_rep_load,
.unload = mlx5e_vport_rep_unload,
.get_proto_dev = mlx5e_vport_rep_get_proto_dev
};
void mlx5e_rep_register_vport_reps(struct mlx5_core_dev *mdev) void mlx5e_rep_register_vport_reps(struct mlx5_core_dev *mdev)
{ {
struct mlx5_eswitch *esw = mdev->priv.eswitch; struct mlx5_eswitch *esw = mdev->priv.eswitch;
struct mlx5_eswitch_rep_if rep_if = {};
rep_if.load = mlx5e_vport_rep_load;
rep_if.unload = mlx5e_vport_rep_unload;
rep_if.get_proto_dev = mlx5e_vport_rep_get_proto_dev;
mlx5_eswitch_register_vport_reps(esw, &rep_if, REP_ETH); mlx5_eswitch_register_vport_reps(esw, &rep_ops, REP_ETH);
} }
void mlx5e_rep_unregister_vport_reps(struct mlx5_core_dev *mdev) void mlx5e_rep_unregister_vport_reps(struct mlx5_core_dev *mdev)
......
...@@ -91,7 +91,7 @@ struct mlx5e_rep_priv { ...@@ -91,7 +91,7 @@ struct mlx5e_rep_priv {
static inline static inline
struct mlx5e_rep_priv *mlx5e_rep_to_rep_priv(struct mlx5_eswitch_rep *rep) struct mlx5e_rep_priv *mlx5e_rep_to_rep_priv(struct mlx5_eswitch_rep *rep)
{ {
return (struct mlx5e_rep_priv *)rep->rep_if[REP_ETH].priv; return rep->rep_data[REP_ETH].priv;
} }
struct mlx5e_neigh { struct mlx5e_neigh {
...@@ -150,13 +150,12 @@ struct mlx5e_encap_entry { ...@@ -150,13 +150,12 @@ struct mlx5e_encap_entry {
struct hlist_node encap_hlist; struct hlist_node encap_hlist;
struct list_head flows; struct list_head flows;
u32 encap_id; u32 encap_id;
struct ip_tunnel_info tun_info; const struct ip_tunnel_info *tun_info;
unsigned char h_dest[ETH_ALEN]; /* destination eth addr */ unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
struct net_device *out_dev; struct net_device *out_dev;
struct net_device *route_dev; struct net_device *route_dev;
int tunnel_type; struct mlx5e_tc_tunnel *tunnel;
int tunnel_hlen;
int reformat_type; int reformat_type;
u8 flags; u8 flags;
char *encap_header; char *encap_header;
......
...@@ -53,6 +53,7 @@ ...@@ -53,6 +53,7 @@
#include "en/port.h" #include "en/port.h"
#include "en/tc_tun.h" #include "en/tc_tun.h"
#include "lib/devcom.h" #include "lib/devcom.h"
#include "lib/geneve.h"
struct mlx5_nic_flow_attr { struct mlx5_nic_flow_attr {
u32 action; u32 action;
...@@ -126,7 +127,7 @@ struct mlx5e_tc_flow { ...@@ -126,7 +127,7 @@ struct mlx5e_tc_flow {
}; };
struct mlx5e_tc_flow_parse_attr { struct mlx5e_tc_flow_parse_attr {
struct ip_tunnel_info tun_info[MLX5_MAX_FLOW_FWD_VPORTS]; const struct ip_tunnel_info *tun_info[MLX5_MAX_FLOW_FWD_VPORTS];
struct net_device *filter_dev; struct net_device *filter_dev;
struct mlx5_flow_spec spec; struct mlx5_flow_spec spec;
int num_mod_hdr_actions; int num_mod_hdr_actions;
...@@ -799,7 +800,7 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, ...@@ -799,7 +800,7 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
} }
if (attr->match_level != MLX5_MATCH_NONE) if (attr->match_level != MLX5_MATCH_NONE)
parse_attr->spec.match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; parse_attr->spec.match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
flow->rule[0] = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec, flow->rule[0] = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec,
&flow_act, dest, dest_ix); &flow_act, dest, dest_ix);
...@@ -1063,6 +1064,19 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, ...@@ -1063,6 +1064,19 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
return err; return err;
} }
static bool mlx5_flow_has_geneve_opt(struct mlx5e_tc_flow *flow)
{
struct mlx5_flow_spec *spec = &flow->esw_attr->parse_attr->spec;
void *headers_v = MLX5_ADDR_OF(fte_match_param,
spec->match_value,
misc_parameters_3);
u32 geneve_tlv_opt_0_data = MLX5_GET(fte_match_set_misc3,
headers_v,
geneve_tlv_option_0_data);
return !!geneve_tlv_opt_0_data;
}
static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv, static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow) struct mlx5e_tc_flow *flow)
{ {
...@@ -1084,6 +1098,9 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv, ...@@ -1084,6 +1098,9 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
mlx5e_tc_unoffload_fdb_rules(esw, flow, attr); mlx5e_tc_unoffload_fdb_rules(esw, flow, attr);
} }
if (mlx5_flow_has_geneve_opt(flow))
mlx5_geneve_tlv_option_del(priv->mdev->geneve);
mlx5_eswitch_del_vlan_action(esw, attr); mlx5_eswitch_del_vlan_action(esw, attr);
for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
...@@ -1339,7 +1356,6 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv, ...@@ -1339,7 +1356,6 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers); outer_headers);
struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f); struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
struct flow_match_control enc_control;
int err; int err;
err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f, err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f,
...@@ -1350,9 +1366,7 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv, ...@@ -1350,9 +1366,7 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
return err; return err;
} }
flow_rule_match_enc_control(rule, &enc_control); if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
if (enc_control.key->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
struct flow_match_ipv4_addrs match; struct flow_match_ipv4_addrs match;
flow_rule_match_enc_ipv4_addrs(rule, &match); flow_rule_match_enc_ipv4_addrs(rule, &match);
...@@ -1372,7 +1386,7 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv, ...@@ -1372,7 +1386,7 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype); MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP); MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
} else if (enc_control.key->addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) {
struct flow_match_ipv6_addrs match; struct flow_match_ipv6_addrs match;
flow_rule_match_enc_ipv6_addrs(rule, &match); flow_rule_match_enc_ipv6_addrs(rule, &match);
...@@ -1497,29 +1511,21 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, ...@@ -1497,29 +1511,21 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
BIT(FLOW_DISSECTOR_KEY_TCP) | BIT(FLOW_DISSECTOR_KEY_TCP) |
BIT(FLOW_DISSECTOR_KEY_IP) | BIT(FLOW_DISSECTOR_KEY_IP) |
BIT(FLOW_DISSECTOR_KEY_ENC_IP))) { BIT(FLOW_DISSECTOR_KEY_ENC_IP) |
BIT(FLOW_DISSECTOR_KEY_ENC_OPTS))) {
NL_SET_ERR_MSG_MOD(extack, "Unsupported key"); NL_SET_ERR_MSG_MOD(extack, "Unsupported key");
netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n", netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
dissector->used_keys); dissector->used_keys);
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
if ((flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) || if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) ||
flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID) || flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) ||
flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) && flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID) ||
flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) { flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS) ||
struct flow_match_control match; flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS)) {
if (parse_tunnel_attr(priv, spec, f, filter_dev, tunnel_match_level))
flow_rule_match_enc_control(rule, &match);
switch (match.key->addr_type) {
case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
if (parse_tunnel_attr(priv, spec, f, filter_dev, tunnel_match_level))
return -EOPNOTSUPP;
break;
default:
return -EOPNOTSUPP; return -EOPNOTSUPP;
}
/* In decap flow, header pointers should point to the inner /* In decap flow, header pointers should point to the inner
* headers, outer header were already set by parse_tunnel_attr * headers, outer header were already set by parse_tunnel_attr
...@@ -2581,21 +2587,21 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, ...@@ -2581,21 +2587,21 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv,
} }
struct encap_key { struct encap_key {
struct ip_tunnel_key *ip_tun_key; const struct ip_tunnel_key *ip_tun_key;
int tunnel_type; struct mlx5e_tc_tunnel *tc_tunnel;
}; };
static inline int cmp_encap_info(struct encap_key *a, static inline int cmp_encap_info(struct encap_key *a,
struct encap_key *b) struct encap_key *b)
{ {
return memcmp(a->ip_tun_key, b->ip_tun_key, sizeof(*a->ip_tun_key)) || return memcmp(a->ip_tun_key, b->ip_tun_key, sizeof(*a->ip_tun_key)) ||
a->tunnel_type != b->tunnel_type; a->tc_tunnel->tunnel_type != b->tc_tunnel->tunnel_type;
} }
static inline int hash_encap_info(struct encap_key *key) static inline int hash_encap_info(struct encap_key *key)
{ {
return jhash(key->ip_tun_key, sizeof(*key->ip_tun_key), return jhash(key->ip_tun_key, sizeof(*key->ip_tun_key),
key->tunnel_type); key->tc_tunnel->tunnel_type);
} }
...@@ -2625,7 +2631,7 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv, ...@@ -2625,7 +2631,7 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_esw_flow_attr *attr = flow->esw_attr; struct mlx5_esw_flow_attr *attr = flow->esw_attr;
struct mlx5e_tc_flow_parse_attr *parse_attr; struct mlx5e_tc_flow_parse_attr *parse_attr;
struct ip_tunnel_info *tun_info; const struct ip_tunnel_info *tun_info;
struct encap_key key, e_key; struct encap_key key, e_key;
struct mlx5e_encap_entry *e; struct mlx5e_encap_entry *e;
unsigned short family; unsigned short family;
...@@ -2634,17 +2640,17 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv, ...@@ -2634,17 +2640,17 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
int err = 0; int err = 0;
parse_attr = attr->parse_attr; parse_attr = attr->parse_attr;
tun_info = &parse_attr->tun_info[out_index]; tun_info = parse_attr->tun_info[out_index];
family = ip_tunnel_info_af(tun_info); family = ip_tunnel_info_af(tun_info);
key.ip_tun_key = &tun_info->key; key.ip_tun_key = &tun_info->key;
key.tunnel_type = mlx5e_tc_tun_get_type(mirred_dev); key.tc_tunnel = mlx5e_get_tc_tun(mirred_dev);
hash_key = hash_encap_info(&key); hash_key = hash_encap_info(&key);
hash_for_each_possible_rcu(esw->offloads.encap_tbl, e, hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
encap_hlist, hash_key) { encap_hlist, hash_key) {
e_key.ip_tun_key = &e->tun_info.key; e_key.ip_tun_key = &e->tun_info->key;
e_key.tunnel_type = e->tunnel_type; e_key.tc_tunnel = e->tunnel;
if (!cmp_encap_info(&e_key, &key)) { if (!cmp_encap_info(&e_key, &key)) {
found = true; found = true;
break; break;
...@@ -2659,7 +2665,7 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv, ...@@ -2659,7 +2665,7 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
if (!e) if (!e)
return -ENOMEM; return -ENOMEM;
e->tun_info = *tun_info; e->tun_info = tun_info;
err = mlx5e_tc_tun_init_encap_attr(mirred_dev, priv, e, extack); err = mlx5e_tc_tun_init_encap_attr(mirred_dev, priv, e, extack);
if (err) if (err)
goto out_err; goto out_err;
...@@ -2898,7 +2904,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, ...@@ -2898,7 +2904,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
} else if (encap) { } else if (encap) {
parse_attr->mirred_ifindex[attr->out_count] = parse_attr->mirred_ifindex[attr->out_count] =
out_dev->ifindex; out_dev->ifindex;
parse_attr->tun_info[attr->out_count] = *info; parse_attr->tun_info[attr->out_count] = info;
encap = false; encap = false;
attr->dests[attr->out_count].flags |= attr->dests[attr->out_count].flags |=
MLX5_ESW_DEST_ENCAP; MLX5_ESW_DEST_ENCAP;
......
...@@ -301,6 +301,7 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb, ...@@ -301,6 +301,7 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
bool xmit_more) bool xmit_more)
{ {
struct mlx5_wq_cyc *wq = &sq->wq; struct mlx5_wq_cyc *wq = &sq->wq;
bool send_doorbell;
wi->num_bytes = num_bytes; wi->num_bytes = num_bytes;
wi->num_dma = num_dma; wi->num_dma = num_dma;
...@@ -310,8 +311,6 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb, ...@@ -310,8 +311,6 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode); cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode);
cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
netdev_tx_sent_queue(sq->txq, num_bytes);
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
...@@ -321,7 +320,9 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb, ...@@ -321,7 +320,9 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
sq->stats->stopped++; sq->stats->stopped++;
} }
if (!xmit_more || netif_xmit_stopped(sq->txq)) send_doorbell = __netdev_tx_sent_queue(sq->txq, num_bytes,
xmit_more);
if (send_doorbell)
mlx5e_notify_hw(wq, sq->pc, sq->uar_map, cseg); mlx5e_notify_hw(wq, sq->pc, sq->uar_map, cseg);
} }
......
...@@ -533,8 +533,9 @@ static u64 gather_async_events_mask(struct mlx5_core_dev *dev) ...@@ -533,8 +533,9 @@ static u64 gather_async_events_mask(struct mlx5_core_dev *dev)
if (MLX5_CAP_GEN(dev, max_num_of_monitor_counters)) if (MLX5_CAP_GEN(dev, max_num_of_monitor_counters))
async_event_mask |= (1ull << MLX5_EVENT_TYPE_MONITOR_COUNTER); async_event_mask |= (1ull << MLX5_EVENT_TYPE_MONITOR_COUNTER);
if (mlx5_core_is_ecpf_esw_manager(dev)) if (mlx5_eswitch_is_funcs_handler(dev))
async_event_mask |= (1ull << MLX5_EVENT_TYPE_HOST_PARAMS_CHANGE); async_event_mask |=
(1ull << MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED);
return async_event_mask; return async_event_mask;
} }
......
...@@ -1686,13 +1686,41 @@ static int eswitch_vport_event(struct notifier_block *nb, ...@@ -1686,13 +1686,41 @@ static int eswitch_vport_event(struct notifier_block *nb,
return NOTIFY_OK; return NOTIFY_OK;
} }
static int query_esw_functions(struct mlx5_core_dev *dev,
u32 *out, int outlen)
{
u32 in[MLX5_ST_SZ_DW(query_esw_functions_in)] = {0};
MLX5_SET(query_esw_functions_in, in, opcode,
MLX5_CMD_OP_QUERY_ESW_FUNCTIONS);
return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
}
int mlx5_esw_query_functions(struct mlx5_core_dev *dev, u16 *num_vfs)
{
u32 out[MLX5_ST_SZ_DW(query_esw_functions_out)] = {0};
int err;
err = query_esw_functions(dev, out, sizeof(out));
if (err)
return err;
*num_vfs = MLX5_GET(query_esw_functions_out, out,
host_params_context.host_num_of_vfs);
esw_debug(dev, "host_num_of_vfs=%d\n", *num_vfs);
return 0;
}
/* Public E-Switch API */ /* Public E-Switch API */
#define ESW_ALLOWED(esw) ((esw) && MLX5_ESWITCH_MANAGER((esw)->dev)) #define ESW_ALLOWED(esw) ((esw) && MLX5_ESWITCH_MANAGER((esw)->dev))
int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
{ {
int vf_nvports = 0, total_nvports = 0;
struct mlx5_vport *vport; struct mlx5_vport *vport;
int total_nvports = 0;
u16 vf_nvports = 0;
int err; int err;
int i, enabled_events; int i, enabled_events;
...@@ -1712,7 +1740,7 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) ...@@ -1712,7 +1740,7 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
if (mode == SRIOV_OFFLOADS) { if (mode == SRIOV_OFFLOADS) {
if (mlx5_core_is_ecpf_esw_manager(esw->dev)) { if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
err = mlx5_query_host_params_num_vfs(esw->dev, &vf_nvports); err = mlx5_esw_query_functions(esw->dev, &vf_nvports);
if (err) if (err)
return err; return err;
total_nvports = esw->total_vports; total_nvports = esw->total_vports;
......
...@@ -173,6 +173,9 @@ struct mlx5_esw_offload { ...@@ -173,6 +173,9 @@ struct mlx5_esw_offload {
struct mutex peer_mutex; struct mutex peer_mutex;
DECLARE_HASHTABLE(encap_tbl, 8); DECLARE_HASHTABLE(encap_tbl, 8);
DECLARE_HASHTABLE(mod_hdr_tbl, 8); DECLARE_HASHTABLE(mod_hdr_tbl, 8);
DECLARE_HASHTABLE(termtbl_tbl, 8);
struct mutex termtbl_mutex; /* protects termtbl hash */
const struct mlx5_eswitch_rep_ops *rep_ops[NUM_REP_TYPES];
u8 inline_mode; u8 inline_mode;
u64 num_flows; u64 num_flows;
u8 encap; u8 encap;
...@@ -190,7 +193,7 @@ struct mlx5_host_work { ...@@ -190,7 +193,7 @@ struct mlx5_host_work {
struct mlx5_eswitch *esw; struct mlx5_eswitch *esw;
}; };
struct mlx5_host_info { struct mlx5_esw_functions {
struct mlx5_nb nb; struct mlx5_nb nb;
u16 num_vfs; u16 num_vfs;
}; };
...@@ -219,7 +222,7 @@ struct mlx5_eswitch { ...@@ -219,7 +222,7 @@ struct mlx5_eswitch {
int mode; int mode;
int nvports; int nvports;
u16 manager_vport; u16 manager_vport;
struct mlx5_host_info host_info; struct mlx5_esw_functions esw_funcs;
}; };
void esw_offloads_cleanup(struct mlx5_eswitch *esw); void esw_offloads_cleanup(struct mlx5_eswitch *esw);
...@@ -268,6 +271,25 @@ void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule); ...@@ -268,6 +271,25 @@ void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule);
struct mlx5_flow_spec; struct mlx5_flow_spec;
struct mlx5_esw_flow_attr; struct mlx5_esw_flow_attr;
struct mlx5_termtbl_handle;
bool
mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw,
struct mlx5_flow_act *flow_act,
struct mlx5_flow_spec *spec);
struct mlx5_flow_handle *
mlx5_eswitch_add_termtbl_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_table *ft,
struct mlx5_flow_spec *spec,
struct mlx5_esw_flow_attr *attr,
struct mlx5_flow_act *flow_act,
struct mlx5_flow_destination *dest,
int num_dest);
void
mlx5_eswitch_termtbl_put(struct mlx5_eswitch *esw,
struct mlx5_termtbl_handle *tt);
struct mlx5_flow_handle * struct mlx5_flow_handle *
mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
...@@ -338,6 +360,7 @@ struct mlx5_esw_flow_attr { ...@@ -338,6 +360,7 @@ struct mlx5_esw_flow_attr {
struct mlx5_eswitch_rep *rep; struct mlx5_eswitch_rep *rep;
struct mlx5_core_dev *mdev; struct mlx5_core_dev *mdev;
u32 encap_id; u32 encap_id;
struct mlx5_termtbl_handle *termtbl;
} dests[MLX5_MAX_FLOW_FWD_VPORTS]; } dests[MLX5_MAX_FLOW_FWD_VPORTS];
u32 mod_hdr_id; u32 mod_hdr_id;
u8 match_level; u8 match_level;
...@@ -386,6 +409,8 @@ bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, ...@@ -386,6 +409,8 @@ bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0,
bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0, bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0,
struct mlx5_core_dev *dev1); struct mlx5_core_dev *dev1);
int mlx5_esw_query_functions(struct mlx5_core_dev *dev, u16 *num_vfs);
#define MLX5_DEBUG_ESWITCH_MASK BIT(3) #define MLX5_DEBUG_ESWITCH_MASK BIT(3)
#define esw_info(__dev, format, ...) \ #define esw_info(__dev, format, ...) \
...@@ -404,6 +429,18 @@ static inline u16 mlx5_eswitch_manager_vport(struct mlx5_core_dev *dev) ...@@ -404,6 +429,18 @@ static inline u16 mlx5_eswitch_manager_vport(struct mlx5_core_dev *dev)
MLX5_VPORT_ECPF : MLX5_VPORT_PF; MLX5_VPORT_ECPF : MLX5_VPORT_PF;
} }
static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev)
{
/* Ideally device should have the functions changed supported
* capability regardless of it being ECPF or PF wherever such
* event should be processed such as on eswitch manager device.
* However, some ECPF based device might not have this capability
* set. Hence OR for ECPF check to cover such device.
*/
return MLX5_CAP_ESW(dev, esw_functions_changed) ||
mlx5_core_is_ecpf_esw_manager(dev);
}
static inline int mlx5_eswitch_uplink_idx(struct mlx5_eswitch *esw) static inline int mlx5_eswitch_uplink_idx(struct mlx5_eswitch *esw)
{ {
/* Uplink always locate at the last element of the array.*/ /* Uplink always locate at the last element of the array.*/
...@@ -498,6 +535,7 @@ static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {} ...@@ -498,6 +535,7 @@ static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {}
static inline int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) { return 0; } static inline int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) { return 0; }
static inline void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw) {} static inline void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw) {}
static inline bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1) { return true; } static inline bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1) { return true; }
static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; }
#define FDB_MAX_CHAIN 1 #define FDB_MAX_CHAIN 1
#define FDB_SLOW_PATH_CHAIN (FDB_MAX_CHAIN + 1) #define FDB_SLOW_PATH_CHAIN (FDB_MAX_CHAIN + 1)
......
...@@ -41,7 +41,6 @@ ...@@ -41,7 +41,6 @@
#include "en.h" #include "en.h"
#include "fs_core.h" #include "fs_core.h"
#include "lib/devcom.h" #include "lib/devcom.h"
#include "ecpf.h"
#include "lib/eq.h" #include "lib/eq.h"
/* There are two match-all miss flows, one for unicast dst mac and /* There are two match-all miss flows, one for unicast dst mac and
...@@ -174,7 +173,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, ...@@ -174,7 +173,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
MLX5_SET_TO_ONES(fte_match_set_misc, misc, MLX5_SET_TO_ONES(fte_match_set_misc, misc,
source_eswitch_owner_vhca_id); source_eswitch_owner_vhca_id);
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) { if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) {
if (attr->tunnel_match_level != MLX5_MATCH_NONE) if (attr->tunnel_match_level != MLX5_MATCH_NONE)
spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
...@@ -193,7 +192,11 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, ...@@ -193,7 +192,11 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
goto err_esw_get; goto err_esw_get;
} }
rule = mlx5_add_flow_rules(fdb, spec, &flow_act, dest, i); if (mlx5_eswitch_termtbl_required(esw, &flow_act, spec))
rule = mlx5_eswitch_add_termtbl_rule(esw, fdb, spec, attr,
&flow_act, dest, i);
else
rule = mlx5_add_flow_rules(fdb, spec, &flow_act, dest, i);
if (IS_ERR(rule)) if (IS_ERR(rule))
goto err_add_rule; goto err_add_rule;
else else
...@@ -267,10 +270,10 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw, ...@@ -267,10 +270,10 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
source_eswitch_owner_vhca_id); source_eswitch_owner_vhca_id);
if (attr->match_level == MLX5_MATCH_NONE) if (attr->match_level == MLX5_MATCH_NONE)
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
else else
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS |
MLX5_MATCH_MISC_PARAMETERS; MLX5_MATCH_MISC_PARAMETERS;
rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i); rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i);
...@@ -295,8 +298,16 @@ __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw, ...@@ -295,8 +298,16 @@ __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
bool fwd_rule) bool fwd_rule)
{ {
bool split = (attr->split_count > 0); bool split = (attr->split_count > 0);
int i;
mlx5_del_flow_rules(rule); mlx5_del_flow_rules(rule);
/* unref the term table */
for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
if (attr->dests[i].termtbl)
mlx5_eswitch_termtbl_put(esw, attr->dests[i].termtbl);
}
esw->offloads.num_flows--; esw->offloads.num_flows--;
if (fwd_rule) { if (fwd_rule) {
...@@ -333,7 +344,7 @@ static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val) ...@@ -333,7 +344,7 @@ static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none"); esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) { for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) {
rep = &esw->offloads.vport_reps[vf_vport]; rep = &esw->offloads.vport_reps[vf_vport];
if (atomic_read(&rep->rep_if[REP_ETH].state) != REP_LOADED) if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED)
continue; continue;
err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val); err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
...@@ -1278,7 +1289,7 @@ int esw_offloads_init_reps(struct mlx5_eswitch *esw) ...@@ -1278,7 +1289,7 @@ int esw_offloads_init_reps(struct mlx5_eswitch *esw)
ether_addr_copy(rep->hw_id, hw_id); ether_addr_copy(rep->hw_id, hw_id);
for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
atomic_set(&rep->rep_if[rep_type].state, atomic_set(&rep->rep_data[rep_type].state,
REP_UNREGISTERED); REP_UNREGISTERED);
} }
...@@ -1288,9 +1299,9 @@ int esw_offloads_init_reps(struct mlx5_eswitch *esw) ...@@ -1288,9 +1299,9 @@ int esw_offloads_init_reps(struct mlx5_eswitch *esw)
static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw, static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep *rep, u8 rep_type) struct mlx5_eswitch_rep *rep, u8 rep_type)
{ {
if (atomic_cmpxchg(&rep->rep_if[rep_type].state, if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
REP_LOADED, REP_REGISTERED) == REP_LOADED) REP_LOADED, REP_REGISTERED) == REP_LOADED)
rep->rep_if[rep_type].unload(rep); esw->offloads.rep_ops[rep_type]->unload(rep);
} }
static void __unload_reps_special_vport(struct mlx5_eswitch *esw, u8 rep_type) static void __unload_reps_special_vport(struct mlx5_eswitch *esw, u8 rep_type)
...@@ -1351,11 +1362,11 @@ static int __esw_offloads_load_rep(struct mlx5_eswitch *esw, ...@@ -1351,11 +1362,11 @@ static int __esw_offloads_load_rep(struct mlx5_eswitch *esw,
{ {
int err = 0; int err = 0;
if (atomic_cmpxchg(&rep->rep_if[rep_type].state, if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
REP_REGISTERED, REP_LOADED) == REP_REGISTERED) { REP_REGISTERED, REP_LOADED) == REP_REGISTERED) {
err = rep->rep_if[rep_type].load(esw->dev, rep); err = esw->offloads.rep_ops[rep_type]->load(esw->dev, rep);
if (err) if (err)
atomic_set(&rep->rep_if[rep_type].state, atomic_set(&rep->rep_data[rep_type].state,
REP_REGISTERED); REP_REGISTERED);
} }
...@@ -1784,57 +1795,79 @@ static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw) ...@@ -1784,57 +1795,79 @@ static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
esw_prio_tag_acls_cleanup(esw); esw_prio_tag_acls_cleanup(esw);
} }
static void esw_host_params_event_handler(struct work_struct *work) static void esw_functions_changed_event_handler(struct work_struct *work)
{ {
struct mlx5_host_work *host_work; struct mlx5_host_work *host_work;
struct mlx5_eswitch *esw; struct mlx5_eswitch *esw;
int err, num_vf = 0; u16 num_vfs = 0;
int err;
host_work = container_of(work, struct mlx5_host_work, work); host_work = container_of(work, struct mlx5_host_work, work);
esw = host_work->esw; esw = host_work->esw;
err = mlx5_query_host_params_num_vfs(esw->dev, &num_vf); err = mlx5_esw_query_functions(esw->dev, &num_vfs);
if (err || num_vf == esw->host_info.num_vfs) if (err || num_vfs == esw->esw_funcs.num_vfs)
goto out; goto out;
/* Number of VFs can only change from "0 to x" or "x to 0". */ /* Number of VFs can only change from "0 to x" or "x to 0". */
if (esw->host_info.num_vfs > 0) { if (esw->esw_funcs.num_vfs > 0) {
esw_offloads_unload_vf_reps(esw, esw->host_info.num_vfs); esw_offloads_unload_vf_reps(esw, esw->esw_funcs.num_vfs);
} else { } else {
err = esw_offloads_load_vf_reps(esw, num_vf); err = esw_offloads_load_vf_reps(esw, num_vfs);
if (err) if (err)
goto out; goto out;
} }
esw->host_info.num_vfs = num_vf; esw->esw_funcs.num_vfs = num_vfs;
out: out:
kfree(host_work); kfree(host_work);
} }
static int esw_host_params_event(struct notifier_block *nb, static int esw_functions_changed_event(struct notifier_block *nb,
unsigned long type, void *data) unsigned long type, void *data)
{ {
struct mlx5_esw_functions *esw_funcs;
struct mlx5_host_work *host_work; struct mlx5_host_work *host_work;
struct mlx5_host_info *host_info;
struct mlx5_eswitch *esw; struct mlx5_eswitch *esw;
host_work = kzalloc(sizeof(*host_work), GFP_ATOMIC); host_work = kzalloc(sizeof(*host_work), GFP_ATOMIC);
if (!host_work) if (!host_work)
return NOTIFY_DONE; return NOTIFY_DONE;
host_info = mlx5_nb_cof(nb, struct mlx5_host_info, nb); esw_funcs = mlx5_nb_cof(nb, struct mlx5_esw_functions, nb);
esw = container_of(host_info, struct mlx5_eswitch, host_info); esw = container_of(esw_funcs, struct mlx5_eswitch, esw_funcs);
host_work->esw = esw; host_work->esw = esw;
INIT_WORK(&host_work->work, esw_host_params_event_handler); INIT_WORK(&host_work->work, esw_functions_changed_event_handler);
queue_work(esw->work_queue, &host_work->work); queue_work(esw->work_queue, &host_work->work);
return NOTIFY_OK; return NOTIFY_OK;
} }
static void esw_functions_changed_event_init(struct mlx5_eswitch *esw,
u16 vf_nvports)
{
if (!mlx5_eswitch_is_funcs_handler(esw->dev))
return;
MLX5_NB_INIT(&esw->esw_funcs.nb, esw_functions_changed_event,
ESW_FUNCTIONS_CHANGED);
mlx5_eq_notifier_register(esw->dev, &esw->esw_funcs.nb);
esw->esw_funcs.num_vfs = vf_nvports;
}
static void esw_functions_changed_event_cleanup(struct mlx5_eswitch *esw)
{
if (!mlx5_eswitch_is_funcs_handler(esw->dev))
return;
mlx5_eq_notifier_unregister(esw->dev, &esw->esw_funcs.nb);
flush_workqueue(esw->work_queue);
}
int esw_offloads_init(struct mlx5_eswitch *esw, int vf_nvports, int esw_offloads_init(struct mlx5_eswitch *esw, int vf_nvports,
int total_nvports) int total_nvports)
{ {
...@@ -1849,13 +1882,9 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int vf_nvports, ...@@ -1849,13 +1882,9 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int vf_nvports,
goto err_reps; goto err_reps;
esw_offloads_devcom_init(esw); esw_offloads_devcom_init(esw);
mutex_init(&esw->offloads.termtbl_mutex);
if (mlx5_core_is_ecpf_esw_manager(esw->dev)) { esw_functions_changed_event_init(esw, vf_nvports);
MLX5_NB_INIT(&esw->host_info.nb, esw_host_params_event,
HOST_PARAMS_CHANGE);
mlx5_eq_notifier_register(esw->dev, &esw->host_info.nb);
esw->host_info.num_vfs = vf_nvports;
}
mlx5_rdma_enable_roce(esw->dev); mlx5_rdma_enable_roce(esw->dev);
...@@ -1889,13 +1918,12 @@ void esw_offloads_cleanup(struct mlx5_eswitch *esw) ...@@ -1889,13 +1918,12 @@ void esw_offloads_cleanup(struct mlx5_eswitch *esw)
{ {
u16 num_vfs; u16 num_vfs;
if (mlx5_core_is_ecpf_esw_manager(esw->dev)) { esw_functions_changed_event_cleanup(esw);
mlx5_eq_notifier_unregister(esw->dev, &esw->host_info.nb);
flush_workqueue(esw->work_queue); if (mlx5_eswitch_is_funcs_handler(esw->dev))
num_vfs = esw->host_info.num_vfs; num_vfs = esw->esw_funcs.num_vfs;
} else { else
num_vfs = esw->dev->priv.sriov.num_vfs; num_vfs = esw->dev->priv.sriov.num_vfs;
}
mlx5_rdma_disable_roce(esw->dev); mlx5_rdma_disable_roce(esw->dev);
esw_offloads_devcom_cleanup(esw); esw_offloads_devcom_cleanup(esw);
...@@ -2203,21 +2231,17 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap) ...@@ -2203,21 +2231,17 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap)
} }
void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw, void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep_if *__rep_if, const struct mlx5_eswitch_rep_ops *ops,
u8 rep_type) u8 rep_type)
{ {
struct mlx5_eswitch_rep_if *rep_if; struct mlx5_eswitch_rep_data *rep_data;
struct mlx5_eswitch_rep *rep; struct mlx5_eswitch_rep *rep;
int i; int i;
esw->offloads.rep_ops[rep_type] = ops;
mlx5_esw_for_all_reps(esw, i, rep) { mlx5_esw_for_all_reps(esw, i, rep) {
rep_if = &rep->rep_if[rep_type]; rep_data = &rep->rep_data[rep_type];
rep_if->load = __rep_if->load; atomic_set(&rep_data->state, REP_REGISTERED);
rep_if->unload = __rep_if->unload;
rep_if->get_proto_dev = __rep_if->get_proto_dev;
rep_if->priv = __rep_if->priv;
atomic_set(&rep_if->state, REP_REGISTERED);
} }
} }
EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps); EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps);
...@@ -2232,7 +2256,7 @@ void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type) ...@@ -2232,7 +2256,7 @@ void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type)
__unload_reps_all_vport(esw, max_vf, rep_type); __unload_reps_all_vport(esw, max_vf, rep_type);
mlx5_esw_for_all_reps(esw, i, rep) mlx5_esw_for_all_reps(esw, i, rep)
atomic_set(&rep->rep_if[rep_type].state, REP_UNREGISTERED); atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED);
} }
EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_reps); EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_reps);
...@@ -2241,7 +2265,7 @@ void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type) ...@@ -2241,7 +2265,7 @@ void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
struct mlx5_eswitch_rep *rep; struct mlx5_eswitch_rep *rep;
rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK); rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
return rep->rep_if[rep_type].priv; return rep->rep_data[rep_type].priv;
} }
void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw, void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
...@@ -2252,9 +2276,9 @@ void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw, ...@@ -2252,9 +2276,9 @@ void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
rep = mlx5_eswitch_get_rep(esw, vport); rep = mlx5_eswitch_get_rep(esw, vport);
if (atomic_read(&rep->rep_if[rep_type].state) == REP_LOADED && if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED &&
rep->rep_if[rep_type].get_proto_dev) esw->offloads.rep_ops[rep_type]->get_proto_dev)
return rep->rep_if[rep_type].get_proto_dev(rep); return esw->offloads.rep_ops[rep_type]->get_proto_dev(rep);
return NULL; return NULL;
} }
EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev); EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev);
......
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2019 Mellanox Technologies.
#include <linux/mlx5/fs.h>
#include "eswitch.h"
struct mlx5_termtbl_handle {
struct hlist_node termtbl_hlist;
struct mlx5_flow_table *termtbl;
struct mlx5_flow_act flow_act;
struct mlx5_flow_destination dest;
struct mlx5_flow_handle *rule;
int ref_count;
};
static u32
mlx5_eswitch_termtbl_hash(struct mlx5_flow_act *flow_act,
struct mlx5_flow_destination *dest)
{
u32 hash;
hash = jhash_1word(flow_act->action, 0);
hash = jhash((const void *)&flow_act->vlan,
sizeof(flow_act->vlan), hash);
hash = jhash((const void *)&dest->vport.num,
sizeof(dest->vport.num), hash);
hash = jhash((const void *)&dest->vport.vhca_id,
sizeof(dest->vport.num), hash);
return hash;
}
static int
mlx5_eswitch_termtbl_cmp(struct mlx5_flow_act *flow_act1,
struct mlx5_flow_destination *dest1,
struct mlx5_flow_act *flow_act2,
struct mlx5_flow_destination *dest2)
{
return flow_act1->action != flow_act2->action ||
dest1->vport.num != dest2->vport.num ||
dest1->vport.vhca_id != dest2->vport.vhca_id ||
memcmp(&flow_act1->vlan, &flow_act2->vlan,
sizeof(flow_act1->vlan));
}
static int
mlx5_eswitch_termtbl_create(struct mlx5_core_dev *dev,
struct mlx5_termtbl_handle *tt,
struct mlx5_flow_act *flow_act)
{
struct mlx5_flow_namespace *root_ns;
struct mlx5_flow_spec spec = {};
int prio, flags;
int err;
root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
if (!root_ns) {
esw_warn(dev, "Failed to get FDB flow namespace\n");
return -EOPNOTSUPP;
}
/* As this is the terminating action then the termination table is the
* same prio as the slow path
*/
prio = FDB_SLOW_PATH;
flags = MLX5_FLOW_TABLE_TERMINATION;
tt->termtbl = mlx5_create_auto_grouped_flow_table(root_ns, prio, 1, 1,
0, flags);
if (IS_ERR(tt->termtbl)) {
esw_warn(dev, "Failed to create termination table\n");
return -EOPNOTSUPP;
}
tt->rule = mlx5_add_flow_rules(tt->termtbl, &spec, flow_act,
&tt->dest, 1);
if (IS_ERR(tt->rule)) {
esw_warn(dev, "Failed to create termination table rule\n");
goto add_flow_err;
}
return 0;
add_flow_err:
err = mlx5_destroy_flow_table(tt->termtbl);
if (err)
esw_warn(dev, "Failed to destroy termination table\n");
return -EOPNOTSUPP;
}
static struct mlx5_termtbl_handle *
mlx5_eswitch_termtbl_get_create(struct mlx5_eswitch *esw,
struct mlx5_flow_act *flow_act,
struct mlx5_flow_destination *dest)
{
struct mlx5_termtbl_handle *tt;
bool found = false;
u32 hash_key;
int err;
mutex_lock(&esw->offloads.termtbl_mutex);
hash_key = mlx5_eswitch_termtbl_hash(flow_act, dest);
hash_for_each_possible(esw->offloads.termtbl_tbl, tt,
termtbl_hlist, hash_key) {
if (!mlx5_eswitch_termtbl_cmp(&tt->flow_act, &tt->dest,
flow_act, dest)) {
found = true;
break;
}
}
if (found)
goto tt_add_ref;
tt = kzalloc(sizeof(*tt), GFP_KERNEL);
if (!tt) {
err = -ENOMEM;
goto tt_create_err;
}
tt->dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
tt->dest.vport.num = dest->vport.num;
tt->dest.vport.vhca_id = dest->vport.vhca_id;
memcpy(&tt->flow_act, flow_act, sizeof(*flow_act));
err = mlx5_eswitch_termtbl_create(esw->dev, tt, flow_act);
if (err) {
esw_warn(esw->dev, "Failed to create termination table\n");
goto tt_create_err;
}
hash_add(esw->offloads.termtbl_tbl, &tt->termtbl_hlist, hash_key);
tt_add_ref:
tt->ref_count++;
mutex_unlock(&esw->offloads.termtbl_mutex);
return tt;
tt_create_err:
kfree(tt);
mutex_unlock(&esw->offloads.termtbl_mutex);
return ERR_PTR(err);
}
void
mlx5_eswitch_termtbl_put(struct mlx5_eswitch *esw,
struct mlx5_termtbl_handle *tt)
{
mutex_lock(&esw->offloads.termtbl_mutex);
if (--tt->ref_count == 0)
hash_del(&tt->termtbl_hlist);
mutex_unlock(&esw->offloads.termtbl_mutex);
if (!tt->ref_count) {
mlx5_del_flow_rules(tt->rule);
mlx5_destroy_flow_table(tt->termtbl);
kfree(tt);
}
}
static void
mlx5_eswitch_termtbl_actions_move(struct mlx5_flow_act *src,
struct mlx5_flow_act *dst)
{
if (!(src->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH))
return;
src->action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
dst->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
memcpy(&dst->vlan[0], &src->vlan[0], sizeof(src->vlan[0]));
memset(&src->vlan[0], 0, sizeof(src->vlan[0]));
if (!(src->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2))
return;
src->action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2;
dst->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2;
memcpy(&dst->vlan[1], &src->vlan[1], sizeof(src->vlan[1]));
memset(&src->vlan[1], 0, sizeof(src->vlan[1]));
}
bool
mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw,
struct mlx5_flow_act *flow_act,
struct mlx5_flow_spec *spec)
{
u32 port_mask = MLX5_GET(fte_match_param, spec->match_criteria,
misc_parameters.source_port);
u32 port_value = MLX5_GET(fte_match_param, spec->match_value,
misc_parameters.source_port);
if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, termination_table))
return false;
/* push vlan on RX */
return (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) &&
((port_mask & port_value) == MLX5_VPORT_UPLINK);
}
struct mlx5_flow_handle *
mlx5_eswitch_add_termtbl_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_table *fdb,
struct mlx5_flow_spec *spec,
struct mlx5_esw_flow_attr *attr,
struct mlx5_flow_act *flow_act,
struct mlx5_flow_destination *dest,
int num_dest)
{
struct mlx5_flow_act term_tbl_act = {};
struct mlx5_flow_handle *rule = NULL;
bool term_table_created = false;
int num_vport_dests = 0;
int i, curr_dest;
mlx5_eswitch_termtbl_actions_move(flow_act, &term_tbl_act);
term_tbl_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
for (i = 0; i < num_dest; i++) {
struct mlx5_termtbl_handle *tt;
/* only vport destinations can be terminated */
if (dest[i].type != MLX5_FLOW_DESTINATION_TYPE_VPORT)
continue;
/* get the terminating table for the action list */
tt = mlx5_eswitch_termtbl_get_create(esw, &term_tbl_act,
&dest[i]);
if (IS_ERR(tt)) {
esw_warn(esw->dev, "Failed to create termination table\n");
goto revert_changes;
}
attr->dests[num_vport_dests].termtbl = tt;
num_vport_dests++;
/* link the destination with the termination table */
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest[i].ft = tt->termtbl;
term_table_created = true;
}
/* at least one destination should reference a termination table */
if (!term_table_created)
goto revert_changes;
/* create the FTE */
rule = mlx5_add_flow_rules(fdb, spec, flow_act, dest, num_dest);
if (IS_ERR(rule))
goto revert_changes;
goto out;
revert_changes:
/* revert the changes that were made to the original flow_act
* and fall-back to the original rule actions
*/
mlx5_eswitch_termtbl_actions_move(&term_tbl_act, flow_act);
for (curr_dest = 0; curr_dest < num_vport_dests; curr_dest++) {
struct mlx5_termtbl_handle *tt = attr->dests[curr_dest].termtbl;
/* search for the destination associated with the
* current term table
*/
for (i = 0; i < num_dest; i++) {
if (dest[i].ft != tt->termtbl)
continue;
memset(&dest[i], 0, sizeof(dest[i]));
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest[i].vport.num = tt->dest.vport.num;
dest[i].vport.vhca_id = tt->dest.vport.vhca_id;
mlx5_eswitch_termtbl_put(esw, tt);
break;
}
}
rule = mlx5_add_flow_rules(fdb, spec, flow_act, dest, num_dest);
out:
return rule;
}
...@@ -108,8 +108,8 @@ static const char *eqe_type_str(u8 type) ...@@ -108,8 +108,8 @@ static const char *eqe_type_str(u8 type)
return "MLX5_EVENT_TYPE_STALL_EVENT"; return "MLX5_EVENT_TYPE_STALL_EVENT";
case MLX5_EVENT_TYPE_CMD: case MLX5_EVENT_TYPE_CMD:
return "MLX5_EVENT_TYPE_CMD"; return "MLX5_EVENT_TYPE_CMD";
case MLX5_EVENT_TYPE_HOST_PARAMS_CHANGE: case MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED:
return "MLX5_EVENT_TYPE_HOST_PARAMS_CHANGE"; return "MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED";
case MLX5_EVENT_TYPE_PAGE_REQUEST: case MLX5_EVENT_TYPE_PAGE_REQUEST:
return "MLX5_EVENT_TYPE_PAGE_REQUEST"; return "MLX5_EVENT_TYPE_PAGE_REQUEST";
case MLX5_EVENT_TYPE_PAGE_FAULT: case MLX5_EVENT_TYPE_PAGE_FAULT:
......
...@@ -147,6 +147,7 @@ static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns, ...@@ -147,6 +147,7 @@ static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns,
{ {
int en_encap = !!(ft->flags & MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT); int en_encap = !!(ft->flags & MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT);
int en_decap = !!(ft->flags & MLX5_FLOW_TABLE_TUNNEL_EN_DECAP); int en_decap = !!(ft->flags & MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
int term = !!(ft->flags & MLX5_FLOW_TABLE_TERMINATION);
u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {0}; u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {0};
u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {0}; u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {0};
struct mlx5_core_dev *dev = ns->dev; struct mlx5_core_dev *dev = ns->dev;
...@@ -167,6 +168,8 @@ static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns, ...@@ -167,6 +168,8 @@ static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns,
en_decap); en_decap);
MLX5_SET(create_flow_table_in, in, flow_table_context.reformat_en, MLX5_SET(create_flow_table_in, in, flow_table_context.reformat_en,
en_encap); en_encap);
MLX5_SET(create_flow_table_in, in, flow_table_context.termination_table,
term);
switch (ft->op_mod) { switch (ft->op_mod) {
case FS_FT_OP_MOD_NORMAL: case FS_FT_OP_MOD_NORMAL:
......
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2019 Mellanox Technologies. */
#include <linux/kernel.h>
#include "mlx5_core.h"
#include "geneve.h"
struct mlx5_geneve {
struct mlx5_core_dev *mdev;
__be16 opt_class;
u8 opt_type;
u32 obj_id;
struct mutex sync_lock; /* protect GENEVE obj operations */
u32 refcount;
};
static int mlx5_geneve_tlv_option_create(struct mlx5_core_dev *mdev,
__be16 class,
u8 type,
u8 len)
{
u32 in[MLX5_ST_SZ_DW(create_geneve_tlv_option_in)] = {};
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
u64 general_obj_types;
void *hdr, *opt;
u16 obj_id;
int err;
general_obj_types = MLX5_CAP_GEN_64(mdev, general_obj_types);
if (!(general_obj_types & MLX5_GENERAL_OBJ_TYPES_CAP_GENEVE_TLV_OPT))
return -EINVAL;
hdr = MLX5_ADDR_OF(create_geneve_tlv_option_in, in, hdr);
opt = MLX5_ADDR_OF(create_geneve_tlv_option_in, in, geneve_tlv_opt);
MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type, MLX5_OBJ_TYPE_GENEVE_TLV_OPT);
MLX5_SET(geneve_tlv_option, opt, option_class, be16_to_cpu(class));
MLX5_SET(geneve_tlv_option, opt, option_type, type);
MLX5_SET(geneve_tlv_option, opt, option_data_length, len);
err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
if (err)
return err;
obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
return obj_id;
}
static void mlx5_geneve_tlv_option_destroy(struct mlx5_core_dev *mdev, u16 obj_id)
{
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_OBJ_TYPE_GENEVE_TLV_OPT);
MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, obj_id);
mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
}
int mlx5_geneve_tlv_option_add(struct mlx5_geneve *geneve, struct geneve_opt *opt)
{
int res = 0;
if (IS_ERR_OR_NULL(geneve))
return -EOPNOTSUPP;
mutex_lock(&geneve->sync_lock);
if (geneve->refcount) {
if (geneve->opt_class == opt->opt_class &&
geneve->opt_type == opt->type) {
/* We already have TLV options obj allocated */
geneve->refcount++;
} else {
/* TLV options obj allocated, but its params
* do not match the new request.
* We support only one such object.
*/
mlx5_core_warn(geneve->mdev,
"Won't create Geneve TLV opt object with class:type:len = 0x%x:0x%x:%d (another class:type already exists)\n",
be16_to_cpu(opt->opt_class),
opt->type,
opt->length);
res = -EOPNOTSUPP;
goto unlock;
}
} else {
/* We don't have any TLV options obj allocated */
res = mlx5_geneve_tlv_option_create(geneve->mdev,
opt->opt_class,
opt->type,
opt->length);
if (res < 0) {
mlx5_core_warn(geneve->mdev,
"Failed creating Geneve TLV opt object class:type:len = 0x%x:0x%x:%d (err=%d)\n",
be16_to_cpu(opt->opt_class),
opt->type, opt->length, res);
goto unlock;
}
geneve->opt_class = opt->opt_class;
geneve->opt_type = opt->type;
geneve->obj_id = res;
geneve->refcount++;
}
unlock:
mutex_unlock(&geneve->sync_lock);
return res;
}
void mlx5_geneve_tlv_option_del(struct mlx5_geneve *geneve)
{
if (IS_ERR_OR_NULL(geneve))
return;
mutex_lock(&geneve->sync_lock);
if (--geneve->refcount == 0) {
/* We've just removed the last user of Geneve option.
* Now delete the object in FW.
*/
mlx5_geneve_tlv_option_destroy(geneve->mdev, geneve->obj_id);
geneve->opt_class = 0;
geneve->opt_type = 0;
geneve->obj_id = 0;
}
mutex_unlock(&geneve->sync_lock);
}
struct mlx5_geneve *mlx5_geneve_create(struct mlx5_core_dev *mdev)
{
struct mlx5_geneve *geneve =
kzalloc(sizeof(*geneve), GFP_KERNEL);
if (!geneve)
return ERR_PTR(-ENOMEM);
geneve->mdev = mdev;
mutex_init(&geneve->sync_lock);
return geneve;
}
void mlx5_geneve_destroy(struct mlx5_geneve *geneve)
{
if (IS_ERR_OR_NULL(geneve))
return;
/* Lockless since we are unloading */
if (geneve->refcount)
mlx5_geneve_tlv_option_destroy(geneve->mdev, geneve->obj_id);
kfree(geneve);
}
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2019 Mellanox Technologies. */
#ifndef __MLX5_GENEVE_H__
#define __MLX5_GENEVE_H__
#include <net/geneve.h>
#include <linux/mlx5/driver.h>
struct mlx5_geneve;
#ifdef CONFIG_MLX5_ESWITCH
struct mlx5_geneve *mlx5_geneve_create(struct mlx5_core_dev *mdev);
void mlx5_geneve_destroy(struct mlx5_geneve *geneve);
int mlx5_geneve_tlv_option_add(struct mlx5_geneve *geneve, struct geneve_opt *opt);
void mlx5_geneve_tlv_option_del(struct mlx5_geneve *geneve);
#else /* CONFIG_MLX5_ESWITCH */
static inline struct mlx5_geneve
*mlx5_geneve_create(struct mlx5_core_dev *mdev) { return NULL; }
static inline void
mlx5_geneve_destroy(struct mlx5_geneve *geneve) {}
static inline int
mlx5_geneve_tlv_option_add(struct mlx5_geneve *geneve, struct geneve_opt *opt) { return 0; }
static inline void
mlx5_geneve_tlv_option_del(struct mlx5_geneve *geneve) {}
#endif /* CONFIG_MLX5_ESWITCH */
#endif /* __MLX5_GENEVE_H__ */
...@@ -63,6 +63,7 @@ ...@@ -63,6 +63,7 @@
#include "accel/tls.h" #include "accel/tls.h"
#include "lib/clock.h" #include "lib/clock.h"
#include "lib/vxlan.h" #include "lib/vxlan.h"
#include "lib/geneve.h"
#include "lib/devcom.h" #include "lib/devcom.h"
#include "diag/fw_tracer.h" #include "diag/fw_tracer.h"
#include "ecpf.h" #include "ecpf.h"
...@@ -821,6 +822,7 @@ static int mlx5_init_once(struct mlx5_core_dev *dev) ...@@ -821,6 +822,7 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
mlx5_init_clock(dev); mlx5_init_clock(dev);
dev->vxlan = mlx5_vxlan_create(dev); dev->vxlan = mlx5_vxlan_create(dev);
dev->geneve = mlx5_geneve_create(dev);
err = mlx5_init_rl_table(dev); err = mlx5_init_rl_table(dev);
if (err) { if (err) {
...@@ -865,6 +867,7 @@ static int mlx5_init_once(struct mlx5_core_dev *dev) ...@@ -865,6 +867,7 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
err_rl_cleanup: err_rl_cleanup:
mlx5_cleanup_rl_table(dev); mlx5_cleanup_rl_table(dev);
err_tables_cleanup: err_tables_cleanup:
mlx5_geneve_destroy(dev->geneve);
mlx5_vxlan_destroy(dev->vxlan); mlx5_vxlan_destroy(dev->vxlan);
mlx5_cleanup_mkey_table(dev); mlx5_cleanup_mkey_table(dev);
mlx5_cleanup_qp_table(dev); mlx5_cleanup_qp_table(dev);
...@@ -887,6 +890,7 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev) ...@@ -887,6 +890,7 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
mlx5_eswitch_cleanup(dev->priv.eswitch); mlx5_eswitch_cleanup(dev->priv.eswitch);
mlx5_mpfs_cleanup(dev); mlx5_mpfs_cleanup(dev);
mlx5_cleanup_rl_table(dev); mlx5_cleanup_rl_table(dev);
mlx5_geneve_destroy(dev->geneve);
mlx5_vxlan_destroy(dev->vxlan); mlx5_vxlan_destroy(dev->vxlan);
mlx5_cleanup_clock(dev); mlx5_cleanup_clock(dev);
mlx5_cleanup_reserved_gids(dev); mlx5_cleanup_reserved_gids(dev);
......
...@@ -106,10 +106,10 @@ static int mlx5_rdma_enable_roce_steering(struct mlx5_core_dev *dev) ...@@ -106,10 +106,10 @@ static int mlx5_rdma_enable_roce_steering(struct mlx5_core_dev *dev)
return 0; return 0;
destroy_flow_table:
mlx5_destroy_flow_table(ft);
destroy_flow_group: destroy_flow_group:
mlx5_destroy_flow_group(fg); mlx5_destroy_flow_group(fg);
destroy_flow_table:
mlx5_destroy_flow_table(ft);
free: free:
kvfree(spec); kvfree(spec);
kvfree(flow_group_in); kvfree(flow_group_in);
......
...@@ -342,7 +342,7 @@ enum mlx5_event { ...@@ -342,7 +342,7 @@ enum mlx5_event {
MLX5_EVENT_TYPE_PAGE_FAULT = 0xc, MLX5_EVENT_TYPE_PAGE_FAULT = 0xc,
MLX5_EVENT_TYPE_NIC_VPORT_CHANGE = 0xd, MLX5_EVENT_TYPE_NIC_VPORT_CHANGE = 0xd,
MLX5_EVENT_TYPE_HOST_PARAMS_CHANGE = 0xe, MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED = 0xe,
MLX5_EVENT_TYPE_DCT_DRAINED = 0x1c, MLX5_EVENT_TYPE_DCT_DRAINED = 0x1c,
......
...@@ -107,6 +107,7 @@ enum { ...@@ -107,6 +107,7 @@ enum {
MLX5_REG_FPGA_CAP = 0x4022, MLX5_REG_FPGA_CAP = 0x4022,
MLX5_REG_FPGA_CTRL = 0x4023, MLX5_REG_FPGA_CTRL = 0x4023,
MLX5_REG_FPGA_ACCESS_REG = 0x4024, MLX5_REG_FPGA_ACCESS_REG = 0x4024,
MLX5_REG_CORE_DUMP = 0x402e,
MLX5_REG_PCAP = 0x5001, MLX5_REG_PCAP = 0x5001,
MLX5_REG_PMTU = 0x5003, MLX5_REG_PMTU = 0x5003,
MLX5_REG_PTYS = 0x5004, MLX5_REG_PTYS = 0x5004,
...@@ -646,6 +647,7 @@ struct mlx5_clock { ...@@ -646,6 +647,7 @@ struct mlx5_clock {
struct mlx5_fw_tracer; struct mlx5_fw_tracer;
struct mlx5_vxlan; struct mlx5_vxlan;
struct mlx5_geneve;
struct mlx5_core_dev { struct mlx5_core_dev {
struct device *device; struct device *device;
...@@ -680,6 +682,7 @@ struct mlx5_core_dev { ...@@ -680,6 +682,7 @@ struct mlx5_core_dev {
u32 issi; u32 issi;
struct mlx5e_resources mlx5e_res; struct mlx5e_resources mlx5e_res;
struct mlx5_vxlan *vxlan; struct mlx5_vxlan *vxlan;
struct mlx5_geneve *geneve;
struct { struct {
struct mlx5_rsvd_gids reserved_gids; struct mlx5_rsvd_gids reserved_gids;
u32 roce_en; u32 roce_en;
......
...@@ -29,17 +29,19 @@ enum { ...@@ -29,17 +29,19 @@ enum {
}; };
struct mlx5_eswitch_rep; struct mlx5_eswitch_rep;
struct mlx5_eswitch_rep_if { struct mlx5_eswitch_rep_ops {
int (*load)(struct mlx5_core_dev *dev, int (*load)(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep);
struct mlx5_eswitch_rep *rep); void (*unload)(struct mlx5_eswitch_rep *rep);
void (*unload)(struct mlx5_eswitch_rep *rep); void *(*get_proto_dev)(struct mlx5_eswitch_rep *rep);
void *(*get_proto_dev)(struct mlx5_eswitch_rep *rep); };
void *priv;
atomic_t state; struct mlx5_eswitch_rep_data {
void *priv;
atomic_t state;
}; };
struct mlx5_eswitch_rep { struct mlx5_eswitch_rep {
struct mlx5_eswitch_rep_if rep_if[NUM_REP_TYPES]; struct mlx5_eswitch_rep_data rep_data[NUM_REP_TYPES];
u16 vport; u16 vport;
u8 hw_id[ETH_ALEN]; u8 hw_id[ETH_ALEN];
u16 vlan; u16 vlan;
...@@ -47,7 +49,7 @@ struct mlx5_eswitch_rep { ...@@ -47,7 +49,7 @@ struct mlx5_eswitch_rep {
}; };
void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw, void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep_if *rep_if, const struct mlx5_eswitch_rep_ops *ops,
u8 rep_type); u8 rep_type);
void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type); void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type);
void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw, void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
......
...@@ -47,6 +47,7 @@ enum { ...@@ -47,6 +47,7 @@ enum {
enum { enum {
MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT = BIT(0), MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT = BIT(0),
MLX5_FLOW_TABLE_TUNNEL_EN_DECAP = BIT(1), MLX5_FLOW_TABLE_TUNNEL_EN_DECAP = BIT(1),
MLX5_FLOW_TABLE_TERMINATION = BIT(2),
}; };
#define LEFTOVERS_RULE_NUM 2 #define LEFTOVERS_RULE_NUM 2
......
...@@ -155,7 +155,7 @@ enum { ...@@ -155,7 +155,7 @@ enum {
MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY = 0x725, MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY = 0x725,
MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY = 0x726, MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY = 0x726,
MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS = 0x727, MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS = 0x727,
MLX5_CMD_OP_QUERY_HOST_PARAMS = 0x740, MLX5_CMD_OP_QUERY_ESW_FUNCTIONS = 0x740,
MLX5_CMD_OP_QUERY_VPORT_STATE = 0x750, MLX5_CMD_OP_QUERY_VPORT_STATE = 0x750,
MLX5_CMD_OP_MODIFY_VPORT_STATE = 0x751, MLX5_CMD_OP_MODIFY_VPORT_STATE = 0x751,
MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT = 0x752, MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT = 0x752,
...@@ -382,7 +382,8 @@ struct mlx5_ifc_flow_table_prop_layout_bits { ...@@ -382,7 +382,8 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
u8 reformat_and_modify_action[0x1]; u8 reformat_and_modify_action[0x1];
u8 reserved_at_15[0x2]; u8 reserved_at_15[0x2];
u8 table_miss_action_domain[0x1]; u8 table_miss_action_domain[0x1];
u8 reserved_at_18[0x8]; u8 termination_table[0x1];
u8 reserved_at_19[0x7];
u8 reserved_at_20[0x2]; u8 reserved_at_20[0x2];
u8 log_max_ft_size[0x6]; u8 log_max_ft_size[0x6];
u8 log_max_modify_header_context[0x8]; u8 log_max_modify_header_context[0x8];
...@@ -664,7 +665,9 @@ struct mlx5_ifc_e_switch_cap_bits { ...@@ -664,7 +665,9 @@ struct mlx5_ifc_e_switch_cap_bits {
u8 vport_svlan_insert[0x1]; u8 vport_svlan_insert[0x1];
u8 vport_cvlan_insert_if_not_exist[0x1]; u8 vport_cvlan_insert_if_not_exist[0x1];
u8 vport_cvlan_insert_overwrite[0x1]; u8 vport_cvlan_insert_overwrite[0x1];
u8 reserved_at_5[0x16]; u8 reserved_at_5[0x14];
u8 esw_functions_changed[0x1];
u8 reserved_at_1a[0x1];
u8 ecpf_vport_exists[0x1]; u8 ecpf_vport_exists[0x1];
u8 counter_eswitch_affinity[0x1]; u8 counter_eswitch_affinity[0x1];
u8 merged_eswitch[0x1]; u8 merged_eswitch[0x1];
...@@ -715,7 +718,9 @@ struct mlx5_ifc_qos_cap_bits { ...@@ -715,7 +718,9 @@ struct mlx5_ifc_qos_cap_bits {
}; };
struct mlx5_ifc_debug_cap_bits { struct mlx5_ifc_debug_cap_bits {
u8 reserved_at_0[0x20]; u8 core_dump_general[0x1];
u8 core_dump_qp[0x1];
u8 reserved_at_2[0x1e];
u8 reserved_at_20[0x2]; u8 reserved_at_20[0x2];
u8 stall_detect[0x1]; u8 stall_detect[0x1];
...@@ -2531,6 +2536,7 @@ union mlx5_ifc_hca_cap_union_bits { ...@@ -2531,6 +2536,7 @@ union mlx5_ifc_hca_cap_union_bits {
struct mlx5_ifc_e_switch_cap_bits e_switch_cap; struct mlx5_ifc_e_switch_cap_bits e_switch_cap;
struct mlx5_ifc_vector_calc_cap_bits vector_calc_cap; struct mlx5_ifc_vector_calc_cap_bits vector_calc_cap;
struct mlx5_ifc_qos_cap_bits qos_cap; struct mlx5_ifc_qos_cap_bits qos_cap;
struct mlx5_ifc_debug_cap_bits debug_cap;
struct mlx5_ifc_fpga_cap_bits fpga_cap; struct mlx5_ifc_fpga_cap_bits fpga_cap;
u8 reserved_at_0[0x8000]; u8 reserved_at_0[0x8000];
}; };
...@@ -7236,7 +7242,8 @@ struct mlx5_ifc_create_flow_table_out_bits { ...@@ -7236,7 +7242,8 @@ struct mlx5_ifc_create_flow_table_out_bits {
struct mlx5_ifc_flow_table_context_bits { struct mlx5_ifc_flow_table_context_bits {
u8 reformat_en[0x1]; u8 reformat_en[0x1];
u8 decap_en[0x1]; u8 decap_en[0x1];
u8 reserved_at_2[0x2]; u8 reserved_at_2[0x1];
u8 termination_table[0x1];
u8 table_miss_action[0x4]; u8 table_miss_action[0x4];
u8 level[0x8]; u8 level[0x8];
u8 reserved_at_10[0x8]; u8 reserved_at_10[0x8];
...@@ -8546,6 +8553,18 @@ struct mlx5_ifc_qcam_reg_bits { ...@@ -8546,6 +8553,18 @@ struct mlx5_ifc_qcam_reg_bits {
u8 reserved_at_1c0[0x80]; u8 reserved_at_1c0[0x80];
}; };
struct mlx5_ifc_core_dump_reg_bits {
u8 reserved_at_0[0x18];
u8 core_dump_type[0x8];
u8 reserved_at_20[0x30];
u8 vhca_id[0x10];
u8 reserved_at_60[0x8];
u8 qpn[0x18];
u8 reserved_at_80[0x180];
};
struct mlx5_ifc_pcap_reg_bits { struct mlx5_ifc_pcap_reg_bits {
u8 reserved_at_0[0x8]; u8 reserved_at_0[0x8];
u8 local_port[0x8]; u8 local_port[0x8];
...@@ -9704,7 +9723,7 @@ struct mlx5_ifc_host_params_context_bits { ...@@ -9704,7 +9723,7 @@ struct mlx5_ifc_host_params_context_bits {
u8 reserved_at_80[0x180]; u8 reserved_at_80[0x180];
}; };
struct mlx5_ifc_query_host_params_in_bits { struct mlx5_ifc_query_esw_functions_in_bits {
u8 opcode[0x10]; u8 opcode[0x10];
u8 reserved_at_10[0x10]; u8 reserved_at_10[0x10];
...@@ -9714,7 +9733,7 @@ struct mlx5_ifc_query_host_params_in_bits { ...@@ -9714,7 +9733,7 @@ struct mlx5_ifc_query_host_params_in_bits {
u8 reserved_at_40[0x40]; u8 reserved_at_40[0x40];
}; };
struct mlx5_ifc_query_host_params_out_bits { struct mlx5_ifc_query_esw_functions_out_bits {
u8 status[0x8]; u8 status[0x8];
u8 reserved_at_8[0x18]; u8 reserved_at_8[0x18];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment