Commit c80edd8d authored by Jakub Kicinski's avatar Jakub Kicinski

Merge tag 'mlx5-updates-2022-12-08' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2022-12-08

1) Support range match action in SW steering

Yevgeny Kliteynik says:
=======================

The following patch series adds support for a range match action in
SW Steering.

SW steering is able to match only on the exact values of the packet fields,
as requested by the user: the user provides mask for the fields that are of
interest, and the exact values to be matched on when the traffic is handled.

The following patch series add new type of action - Range Match, where the
user provides a field to be matched on and a range of values (min to max)
that will be considered as hit.

There are several new notions that were implemented in order to support
Range Match:
 - MATCH_RANGES Steering Table Entry (STE): the new STE type that allows
   matching the packets' fields on the range of values instead of a specific
   value.
 - Match Definer: this is a general FW object that defines which fields
   in the packet will be referenced by the mask and tag of each STE.
   Match definer ID is part of STE fields, and it defines how the HW needs
   to interpret the STE's mask/tag values.
   Till now SW steering used the definers that were managed by FW and
   implemented the STE layout as described by the HW spec.
   Now that we're adding a new type of STE, SW steering needs to also be
   able to define this new STE's layout, and this is do

=======================

2) From OZ add support for meter mtu offload
   2.1: Refactor the code to allow both metering and range post actions as a
        pre-step for adding police mtu offload support.
   2.2: Instantiate mtu green/red flow tables with a single match-all rule.
        Add the green/red actions to the hit/miss table accordingly
   2.3: Initialize the meter object with the TC police mtu parameter.
        Use the hardware range match action feature.

3) From MaorD, support routes with more than 2 nexthops in multipath

4) Michael and Or, improve and extend vport representor counters.

* tag 'mlx5-updates-2022-12-08' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux:
  net/mlx5: Expose steering dropped packets counter
  net/mlx5: Refactor and expand rep vport stat group
  net/mlx5e: multipath, support routes with more than 2 nexthops
  net/mlx5e: TC, add support for meter mtu offload
  net/mlx5e: meter, add mtu post meter tables
  net/mlx5e: meter, refactor to allow multiple post meter tables
  net/mlx5: DR, Add support for range match action
  net/mlx5: DR, Add function that tells if STE miss addr has been initialized
  net/mlx5: DR, Some refactoring of miss address handling
  net/mlx5: DR, Manage definers with refcounts
  net/mlx5: DR, Handle FT action in a separate function
  net/mlx5: DR, Rework is_fw_table function
  net/mlx5: DR, Add functions to create/destroy MATCH_DEFINER general object
  net/mlx5: fs, add match on ranges API
  net/mlx5: mlx5_ifc updates for MATCH_DEFINER general object
====================

Link: https://lore.kernel.org/r/20221209001420.142794-1-saeed@kernel.orgSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 043cd1e2 4fe1b3a5
......@@ -111,6 +111,7 @@ mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/dr_domain.o steering/dr_table.o
steering/dr_ste_v2.o \
steering/dr_cmd.o steering/dr_fw.o \
steering/dr_action.o steering/fs_dr.o \
steering/dr_definer.o \
steering/dr_dbg.o lib/smfs.o
#
# SF device
......
......@@ -228,6 +228,17 @@ const char *parse_fs_hdrs(struct trace_seq *p,
return ret;
}
static const char
*fs_dest_range_field_to_str(enum mlx5_flow_dest_range_field field)
{
switch (field) {
case MLX5_FLOW_DEST_RANGE_FIELD_PKT_LEN:
return "packet len";
default:
return "unknown dest range field";
}
}
const char *parse_fs_dst(struct trace_seq *p,
const struct mlx5_flow_destination *dst,
u32 counter_id)
......@@ -259,6 +270,11 @@ const char *parse_fs_dst(struct trace_seq *p,
case MLX5_FLOW_DESTINATION_TYPE_PORT:
trace_seq_printf(p, "port\n");
break;
case MLX5_FLOW_DESTINATION_TYPE_RANGE:
trace_seq_printf(p, "field=%s min=%d max=%d\n",
fs_dest_range_field_to_str(dst->range.field),
dst->range.min, dst->range.max);
break;
case MLX5_FLOW_DESTINATION_TYPE_NONE:
trace_seq_printf(p, "none\n");
break;
......
......@@ -3,6 +3,7 @@
#include "act.h"
#include "en/tc_priv.h"
#include "fs_core.h"
static bool police_act_validate_control(enum flow_action_id act_id,
struct netlink_ext_ack *extack)
......@@ -71,6 +72,8 @@ fill_meter_params_from_act(const struct flow_action_entry *act,
params->mode = MLX5_RATE_LIMIT_PPS;
params->rate = act->police.rate_pkt_ps;
params->burst = act->police.burst_pkt;
} else if (act->police.mtu) {
params->mtu = act->police.mtu;
} else {
return -EOPNOTSUPP;
}
......@@ -84,14 +87,25 @@ tc_act_parse_police(struct mlx5e_tc_act_parse_state *parse_state,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
enum mlx5_flow_namespace_type ns = mlx5e_get_flow_namespace(parse_state->flow);
struct mlx5e_flow_meter_params *params = &attr->meter_attr.params;
int err;
err = fill_meter_params_from_act(act, &attr->meter_attr.params);
err = fill_meter_params_from_act(act, params);
if (err)
return err;
if (params->mtu) {
if (!(mlx5_fs_get_capabilities(priv->mdev, ns) &
MLX5_FLOW_STEERING_CAP_MATCH_RANGES))
return -EOPNOTSUPP;
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
attr->flags |= MLX5_ATTR_FLAG_MTU;
} else {
attr->action |= MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO;
attr->exe_aso_type = MLX5_EXE_ASO_FLOW_METER;
}
return 0;
}
......
......@@ -241,7 +241,7 @@ mlx5e_flow_meter_destroy_aso_obj(struct mlx5_core_dev *mdev, u32 obj_id)
}
static struct mlx5e_flow_meter_handle *
__mlx5e_flow_meter_alloc(struct mlx5e_flow_meters *flow_meters)
__mlx5e_flow_meter_alloc(struct mlx5e_flow_meters *flow_meters, bool alloc_aso)
{
struct mlx5_core_dev *mdev = flow_meters->mdev;
struct mlx5e_flow_meter_aso_obj *meters_obj;
......@@ -268,6 +268,9 @@ __mlx5e_flow_meter_alloc(struct mlx5e_flow_meters *flow_meters)
}
meter->act_counter = counter;
if (!alloc_aso)
goto no_aso;
meters_obj = list_first_entry_or_null(&flow_meters->partial_list,
struct mlx5e_flow_meter_aso_obj,
entry);
......@@ -300,11 +303,12 @@ __mlx5e_flow_meter_alloc(struct mlx5e_flow_meters *flow_meters)
}
bitmap_set(meters_obj->meters_map, pos, 1);
meter->flow_meters = flow_meters;
meter->meters_obj = meters_obj;
meter->obj_id = meters_obj->base_id + pos / 2;
meter->idx = pos % 2;
no_aso:
meter->flow_meters = flow_meters;
mlx5_core_dbg(mdev, "flow meter allocated, obj_id=0x%x, index=%d\n",
meter->obj_id, meter->idx);
......@@ -332,6 +336,9 @@ __mlx5e_flow_meter_free(struct mlx5e_flow_meter_handle *meter)
mlx5_fc_destroy(mdev, meter->act_counter);
mlx5_fc_destroy(mdev, meter->drop_counter);
if (meter->params.mtu)
goto out_no_aso;
meters_obj = meter->meters_obj;
pos = (meter->obj_id - meters_obj->base_id) * 2 + meter->idx;
bitmap_clear(meters_obj->meters_map, pos, 1);
......@@ -345,6 +352,7 @@ __mlx5e_flow_meter_free(struct mlx5e_flow_meter_handle *meter)
list_add(&meters_obj->entry, &flow_meters->partial_list);
}
out_no_aso:
mlx5_core_dbg(mdev, "flow meter freed, obj_id=0x%x, index=%d\n",
meter->obj_id, meter->idx);
kfree(meter);
......@@ -409,12 +417,13 @@ mlx5e_tc_meter_alloc(struct mlx5e_flow_meters *flow_meters,
{
struct mlx5e_flow_meter_handle *meter;
meter = __mlx5e_flow_meter_alloc(flow_meters);
meter = __mlx5e_flow_meter_alloc(flow_meters, !params->mtu);
if (IS_ERR(meter))
return meter;
hash_add(flow_meters->hashtbl, &meter->hlist, params->index);
meter->params.index = params->index;
meter->params.mtu = params->mtu;
meter->refcnt++;
return meter;
......
......@@ -20,6 +20,7 @@ struct mlx5e_flow_meter_params {
u32 index;
u64 rate;
u64 burst;
u32 mtu;
};
struct mlx5e_flow_meter_handle {
......
......@@ -14,18 +14,49 @@
struct mlx5e_post_meter_priv;
enum mlx5e_post_meter_type {
MLX5E_POST_METER_RATE = 0,
MLX5E_POST_METER_MTU
};
#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
struct mlx5_flow_table *
mlx5e_post_meter_get_ft(struct mlx5e_post_meter_priv *post_meter);
struct mlx5_flow_table *
mlx5e_post_meter_get_mtu_true_ft(struct mlx5e_post_meter_priv *post_meter);
struct mlx5_flow_table *
mlx5e_post_meter_get_mtu_false_ft(struct mlx5e_post_meter_priv *post_meter);
struct mlx5e_post_meter_priv *
mlx5e_post_meter_init(struct mlx5e_priv *priv,
enum mlx5_flow_namespace_type ns_type,
struct mlx5e_post_act *post_act,
enum mlx5e_post_meter_type type,
struct mlx5_fc *act_counter,
struct mlx5_fc *drop_counter,
struct mlx5_flow_attr *branch_true,
struct mlx5_flow_attr *branch_false);
void
mlx5e_post_meter_cleanup(struct mlx5_eswitch *esw, struct mlx5e_post_meter_priv *post_meter);
#else /* CONFIG_MLX5_CLS_ACT */
static inline struct mlx5_flow_table *
mlx5e_post_meter_get_mtu_true_ft(struct mlx5e_post_meter_priv *post_meter)
{
return NULL;
}
static inline struct mlx5_flow_table *
mlx5e_post_meter_get_mtu_false_ft(struct mlx5e_post_meter_priv *post_meter)
{
return NULL;
}
#endif
#endif /* __MLX5_EN_POST_METER_H__ */
......@@ -85,18 +85,25 @@ static const struct counter_desc sw_rep_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
};
struct vport_stats {
u64 vport_rx_packets;
u64 vport_tx_packets;
u64 vport_rx_bytes;
u64 vport_tx_bytes;
};
static const struct counter_desc vport_rep_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct vport_stats, vport_rx_packets) },
{ MLX5E_DECLARE_STAT(struct vport_stats, vport_rx_bytes) },
{ MLX5E_DECLARE_STAT(struct vport_stats, vport_tx_packets) },
{ MLX5E_DECLARE_STAT(struct vport_stats, vport_tx_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_rep_stats, vport_rx_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_rep_stats, vport_rx_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_rep_stats, vport_tx_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_rep_stats, vport_tx_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_rep_stats,
rx_vport_rdma_unicast_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_rep_stats, rx_vport_rdma_unicast_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_rep_stats,
tx_vport_rdma_unicast_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_rep_stats, tx_vport_rdma_unicast_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_rep_stats,
rx_vport_rdma_multicast_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_rep_stats,
rx_vport_rdma_multicast_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_rep_stats,
tx_vport_rdma_multicast_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_rep_stats,
tx_vport_rdma_multicast_bytes) },
};
#define NUM_VPORT_REP_SW_COUNTERS ARRAY_SIZE(sw_rep_stats_desc)
......@@ -161,33 +168,80 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport_rep)
int i;
for (i = 0; i < NUM_VPORT_REP_HW_COUNTERS; i++)
data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.vf_vport,
data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.rep_stats,
vport_rep_stats_desc, i);
return idx;
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport_rep)
{
struct mlx5e_rep_stats *rep_stats = &priv->stats.rep_stats;
int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch_rep *rep = rpriv->rep;
struct rtnl_link_stats64 *vport_stats;
struct ifla_vf_stats vf_stats;
u32 *out;
int err;
err = mlx5_eswitch_get_vport_stats(esw, rep->vport, &vf_stats);
out = kvzalloc(outlen, GFP_KERNEL);
if (!out)
return;
err = mlx5_core_query_vport_counter(esw->dev, 1, rep->vport - 1, 0, out);
if (err) {
netdev_warn(priv->netdev, "vport %d error %d reading stats\n",
rep->vport, err);
return;
}
vport_stats = &priv->stats.vf_vport;
#define MLX5_GET_CTR(p, x) \
MLX5_GET64(query_vport_counter_out, p, x)
/* flip tx/rx as we are reporting the counters for the switch vport */
vport_stats->rx_packets = vf_stats.tx_packets;
vport_stats->rx_bytes = vf_stats.tx_bytes;
vport_stats->tx_packets = vf_stats.rx_packets;
vport_stats->tx_bytes = vf_stats.rx_bytes;
rep_stats->vport_rx_packets =
MLX5_GET_CTR(out, transmitted_ib_unicast.packets) +
MLX5_GET_CTR(out, transmitted_eth_unicast.packets) +
MLX5_GET_CTR(out, transmitted_ib_multicast.packets) +
MLX5_GET_CTR(out, transmitted_eth_multicast.packets) +
MLX5_GET_CTR(out, transmitted_eth_broadcast.packets);
rep_stats->vport_tx_packets =
MLX5_GET_CTR(out, received_ib_unicast.packets) +
MLX5_GET_CTR(out, received_eth_unicast.packets) +
MLX5_GET_CTR(out, received_ib_multicast.packets) +
MLX5_GET_CTR(out, received_eth_multicast.packets) +
MLX5_GET_CTR(out, received_eth_broadcast.packets);
rep_stats->vport_rx_bytes =
MLX5_GET_CTR(out, transmitted_ib_unicast.octets) +
MLX5_GET_CTR(out, transmitted_eth_unicast.octets) +
MLX5_GET_CTR(out, transmitted_ib_multicast.octets) +
MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
rep_stats->vport_tx_bytes =
MLX5_GET_CTR(out, received_ib_unicast.octets) +
MLX5_GET_CTR(out, received_eth_unicast.octets) +
MLX5_GET_CTR(out, received_ib_multicast.octets) +
MLX5_GET_CTR(out, received_eth_multicast.octets) +
MLX5_GET_CTR(out, received_eth_broadcast.octets);
rep_stats->rx_vport_rdma_unicast_packets =
MLX5_GET_CTR(out, transmitted_ib_unicast.packets);
rep_stats->tx_vport_rdma_unicast_packets =
MLX5_GET_CTR(out, received_ib_unicast.packets);
rep_stats->rx_vport_rdma_unicast_bytes =
MLX5_GET_CTR(out, transmitted_ib_unicast.octets);
rep_stats->tx_vport_rdma_unicast_bytes =
MLX5_GET_CTR(out, received_ib_unicast.octets);
rep_stats->rx_vport_rdma_multicast_packets =
MLX5_GET_CTR(out, transmitted_ib_multicast.packets);
rep_stats->tx_vport_rdma_multicast_packets =
MLX5_GET_CTR(out, received_ib_multicast.packets);
rep_stats->rx_vport_rdma_multicast_bytes =
MLX5_GET_CTR(out, transmitted_ib_multicast.octets);
rep_stats->tx_vport_rdma_multicast_bytes =
MLX5_GET_CTR(out, received_ib_multicast.octets);
kvfree(out);
}
static void mlx5e_rep_get_strings(struct net_device *dev,
......
......@@ -463,6 +463,21 @@ struct mlx5e_ptp_cq_stats {
u64 resync_event;
};
struct mlx5e_rep_stats {
u64 vport_rx_packets;
u64 vport_tx_packets;
u64 vport_rx_bytes;
u64 vport_tx_bytes;
u64 rx_vport_rdma_unicast_packets;
u64 tx_vport_rdma_unicast_packets;
u64 rx_vport_rdma_unicast_bytes;
u64 tx_vport_rdma_unicast_bytes;
u64 rx_vport_rdma_multicast_packets;
u64 tx_vport_rdma_multicast_packets;
u64 rx_vport_rdma_multicast_bytes;
u64 tx_vport_rdma_multicast_bytes;
};
struct mlx5e_stats {
struct mlx5e_sw_stats sw;
struct mlx5e_qcounter_stats qcnt;
......@@ -471,6 +486,7 @@ struct mlx5e_stats {
struct mlx5e_pport_stats pport;
struct rtnl_link_stats64 vf_vport;
struct mlx5e_pcie_stats pcie;
struct mlx5e_rep_stats rep_stats;
};
extern mlx5e_stats_grp_t mlx5e_nic_stats_grps[];
......
......@@ -402,8 +402,9 @@ mlx5_tc_rule_delete(struct mlx5e_priv *priv,
static bool
is_flow_meter_action(struct mlx5_flow_attr *attr)
{
return ((attr->action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) &&
(attr->exe_aso_type == MLX5_EXE_ASO_FLOW_METER));
return (((attr->action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) &&
(attr->exe_aso_type == MLX5_EXE_ASO_FLOW_METER)) ||
attr->flags & MLX5_ATTR_FLAG_MTU);
}
static int
......@@ -414,6 +415,7 @@ mlx5e_tc_add_flow_meter(struct mlx5e_priv *priv,
struct mlx5e_post_meter_priv *post_meter;
enum mlx5_flow_namespace_type ns_type;
struct mlx5e_flow_meter_handle *meter;
enum mlx5e_post_meter_type type;
meter = mlx5e_tc_meter_replace(priv->mdev, &attr->meter_attr.params);
if (IS_ERR(meter)) {
......@@ -422,7 +424,9 @@ mlx5e_tc_add_flow_meter(struct mlx5e_priv *priv,
}
ns_type = mlx5e_tc_meter_get_namespace(meter->flow_meters);
type = meter->params.mtu ? MLX5E_POST_METER_MTU : MLX5E_POST_METER_RATE;
post_meter = mlx5e_post_meter_init(priv, ns_type, post_act,
type,
meter->act_counter, meter->drop_counter,
attr->branch_true, attr->branch_false);
if (IS_ERR(post_meter)) {
......
......@@ -114,6 +114,7 @@ enum {
MLX5_ATTR_FLAG_ACCEPT = BIT(5),
MLX5_ATTR_FLAG_CT = BIT(6),
MLX5_ATTR_FLAG_TERMINATING = BIT(7),
MLX5_ATTR_FLAG_MTU = BIT(8),
};
/* Returns true if any of the flags that require skipping further TC/NF processing are set. */
......
......@@ -12,10 +12,11 @@ enum vnic_diag_counter {
MLX5_VNIC_DIAG_CQ_OVERRUN,
MLX5_VNIC_DIAG_INVALID_COMMAND,
MLX5_VNIC_DIAG_QOUTA_EXCEEDED_COMMAND,
MLX5_VNIC_DIAG_RX_STEERING_DISCARD,
};
static int mlx5_esw_query_vnic_diag(struct mlx5_vport *vport, enum vnic_diag_counter counter,
u32 *val)
u64 *val)
{
u32 out[MLX5_ST_SZ_DW(query_vnic_env_out)] = {};
u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {};
......@@ -57,6 +58,10 @@ static int mlx5_esw_query_vnic_diag(struct mlx5_vport *vport, enum vnic_diag_cou
case MLX5_VNIC_DIAG_QOUTA_EXCEEDED_COMMAND:
*val = MLX5_GET(vnic_diagnostic_statistics, vnic_diag_out, quota_exceeded_command);
break;
case MLX5_VNIC_DIAG_RX_STEERING_DISCARD:
*val = MLX5_GET64(vnic_diagnostic_statistics, vnic_diag_out,
nic_receive_steering_discard);
break;
}
return 0;
......@@ -65,14 +70,14 @@ static int mlx5_esw_query_vnic_diag(struct mlx5_vport *vport, enum vnic_diag_cou
static int __show_vnic_diag(struct seq_file *file, struct mlx5_vport *vport,
enum vnic_diag_counter type)
{
u32 val = 0;
u64 val = 0;
int ret;
ret = mlx5_esw_query_vnic_diag(vport, type, &val);
if (ret)
return ret;
seq_printf(file, "%d\n", val);
seq_printf(file, "%llu\n", val);
return 0;
}
......@@ -112,6 +117,11 @@ static int quota_exceeded_command_show(struct seq_file *file, void *priv)
return __show_vnic_diag(file, file->private, MLX5_VNIC_DIAG_QOUTA_EXCEEDED_COMMAND);
}
static int rx_steering_discard_show(struct seq_file *file, void *priv)
{
return __show_vnic_diag(file, file->private, MLX5_VNIC_DIAG_RX_STEERING_DISCARD);
}
DEFINE_SHOW_ATTRIBUTE(total_q_under_processor_handle);
DEFINE_SHOW_ATTRIBUTE(send_queue_priority_update_flow);
DEFINE_SHOW_ATTRIBUTE(comp_eq_overrun);
......@@ -119,6 +129,7 @@ DEFINE_SHOW_ATTRIBUTE(async_eq_overrun);
DEFINE_SHOW_ATTRIBUTE(cq_overrun);
DEFINE_SHOW_ATTRIBUTE(invalid_command);
DEFINE_SHOW_ATTRIBUTE(quota_exceeded_command);
DEFINE_SHOW_ATTRIBUTE(rx_steering_discard);
void mlx5_esw_vport_debugfs_destroy(struct mlx5_eswitch *esw, u16 vport_num)
{
......@@ -179,4 +190,9 @@ void mlx5_esw_vport_debugfs_create(struct mlx5_eswitch *esw, u16 vport_num, bool
if (MLX5_CAP_GEN(esw->dev, quota_exceeded_count))
debugfs_create_file("quota_exceeded_command", 0444, vnic_diag, vport,
&quota_exceeded_command_fops);
if (MLX5_CAP_GEN(esw->dev, nic_receive_steering_discard))
debugfs_create_file("rx_steering_discard", 0444, vnic_diag, vport,
&rx_steering_discard_fops);
}
......@@ -50,6 +50,7 @@
#include "en/mapping.h"
#include "devlink.h"
#include "lag/lag.h"
#include "en/tc/post_meter.h"
#define mlx5_esw_for_each_rep(esw, i, rep) \
xa_for_each(&((esw)->offloads.vport_reps), i, rep)
......@@ -201,6 +202,21 @@ esw_cleanup_decap_indir(struct mlx5_eswitch *esw,
true);
}
static int
esw_setup_mtu_dest(struct mlx5_flow_destination *dest,
struct mlx5e_meter_attr *meter,
int i)
{
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_RANGE;
dest[i].range.field = MLX5_FLOW_DEST_RANGE_FIELD_PKT_LEN;
dest[i].range.min = 0;
dest[i].range.max = meter->params.mtu;
dest[i].range.hit_ft = mlx5e_post_meter_get_mtu_true_ft(meter->post_meter);
dest[i].range.miss_ft = mlx5e_post_meter_get_mtu_false_ft(meter->post_meter);
return 0;
}
static int
esw_setup_sampler_dest(struct mlx5_flow_destination *dest,
struct mlx5_flow_act *flow_act,
......@@ -491,6 +507,9 @@ esw_setup_dests(struct mlx5_flow_destination *dest,
} else if (attr->flags & MLX5_ATTR_FLAG_ACCEPT) {
esw_setup_accept_dest(dest, flow_act, chains, *i);
(*i)++;
} else if (attr->flags & MLX5_ATTR_FLAG_MTU) {
err = esw_setup_mtu_dest(dest, &attr->meter_attr, *i);
(*i)++;
} else if (esw_is_indir_table(esw, attr)) {
err = esw_setup_indir_table(dest, flow_act, esw, attr, spec, true, i);
} else if (esw_is_chain_src_port_rewrite(esw, esw_attr)) {
......
......@@ -448,7 +448,8 @@ static bool is_fwd_dest_type(enum mlx5_flow_destination_type type)
type == MLX5_FLOW_DESTINATION_TYPE_UPLINK ||
type == MLX5_FLOW_DESTINATION_TYPE_VPORT ||
type == MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER ||
type == MLX5_FLOW_DESTINATION_TYPE_TIR;
type == MLX5_FLOW_DESTINATION_TYPE_TIR ||
type == MLX5_FLOW_DESTINATION_TYPE_RANGE;
}
static bool check_valid_spec(const struct mlx5_flow_spec *spec)
......@@ -1578,7 +1579,13 @@ static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
(d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM &&
d1->ft_num == d2->ft_num) ||
(d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER &&
d1->sampler_id == d2->sampler_id))
d1->sampler_id == d2->sampler_id) ||
(d1->type == MLX5_FLOW_DESTINATION_TYPE_RANGE &&
d1->range.field == d2->range.field &&
d1->range.hit_ft == d2->range.hit_ft &&
d1->range.miss_ft == d2->range.miss_ft &&
d1->range.min == d2->range.min &&
d1->range.max == d2->range.max))
return true;
}
......
......@@ -123,6 +123,7 @@ enum mlx5_flow_steering_mode {
enum mlx5_flow_steering_capabilty {
MLX5_FLOW_STEERING_CAP_VLAN_PUSH_ON_RX = 1UL << 0,
MLX5_FLOW_STEERING_CAP_VLAN_POP_ON_TX = 1UL << 1,
MLX5_FLOW_STEERING_CAP_MATCH_RANGES = 1UL << 2,
};
struct mlx5_flow_steering {
......
......@@ -118,13 +118,41 @@ struct mlx5_fib_event_work {
};
};
static struct net_device*
mlx5_lag_get_next_fib_dev(struct mlx5_lag *ldev,
struct fib_info *fi,
struct net_device *current_dev)
{
struct net_device *fib_dev;
int i, ldev_idx, nhs;
nhs = fib_info_num_path(fi);
i = 0;
if (current_dev) {
for (; i < nhs; i++) {
fib_dev = fib_info_nh(fi, i)->fib_nh_dev;
if (fib_dev == current_dev) {
i++;
break;
}
}
}
for (; i < nhs; i++) {
fib_dev = fib_info_nh(fi, i)->fib_nh_dev;
ldev_idx = mlx5_lag_dev_get_netdev_idx(ldev, fib_dev);
if (ldev_idx >= 0)
return ldev->pf[ldev_idx].netdev;
}
return NULL;
}
static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev, unsigned long event,
struct fib_entry_notifier_info *fen_info)
{
struct net_device *nh_dev0, *nh_dev1;
struct fib_info *fi = fen_info->fi;
struct lag_mp *mp = &ldev->lag_mp;
struct fib_nh *fib_nh0, *fib_nh1;
unsigned int nhs;
/* Handle delete event */
if (event == FIB_EVENT_ENTRY_DEL) {
......@@ -140,16 +168,25 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev, unsigned long event,
fi->fib_priority >= mp->fib.priority)
return;
nh_dev0 = mlx5_lag_get_next_fib_dev(ldev, fi, NULL);
nh_dev1 = mlx5_lag_get_next_fib_dev(ldev, fi, nh_dev0);
/* Handle add/replace event */
nhs = fib_info_num_path(fi);
if (nhs == 1) {
if (__mlx5_lag_is_active(ldev)) {
struct fib_nh *nh = fib_info_nh(fi, 0);
struct net_device *nh_dev = nh->fib_nh_dev;
int i = mlx5_lag_dev_get_netdev_idx(ldev, nh_dev);
if (!nh_dev0) {
if (mp->fib.dst == fen_info->dst && mp->fib.dst_len == fen_info->dst_len)
mp->fib.mfi = NULL;
return;
}
if (i < 0)
if (nh_dev0 == nh_dev1) {
mlx5_core_warn(ldev->pf[MLX5_LAG_P1].dev,
"Multipath offload doesn't support routes with multiple nexthops of the same device");
return;
}
if (!nh_dev1) {
if (__mlx5_lag_is_active(ldev)) {
int i = mlx5_lag_dev_get_netdev_idx(ldev, nh_dev0);
i++;
mlx5_lag_set_port_affinity(ldev, i);
......@@ -159,21 +196,6 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev, unsigned long event,
return;
}
if (nhs != 2)
return;
/* Verify next hops are ports of the same hca */
fib_nh0 = fib_info_nh(fi, 0);
fib_nh1 = fib_info_nh(fi, 1);
if (!(fib_nh0->fib_nh_dev == ldev->pf[MLX5_LAG_P1].netdev &&
fib_nh1->fib_nh_dev == ldev->pf[MLX5_LAG_P2].netdev) &&
!(fib_nh0->fib_nh_dev == ldev->pf[MLX5_LAG_P2].netdev &&
fib_nh1->fib_nh_dev == ldev->pf[MLX5_LAG_P1].netdev)) {
mlx5_core_warn(ldev->pf[MLX5_LAG_P1].dev,
"Multipath offload require two ports of the same HCA\n");
return;
}
/* First time we see multipath route */
if (!mp->fib.mfi && !__mlx5_lag_is_active(ldev)) {
struct lag_tracker tracker;
......@@ -268,7 +290,6 @@ static int mlx5_lag_fib_event(struct notifier_block *nb,
struct mlx5_fib_event_work *fib_work;
struct fib_entry_notifier_info *fen_info;
struct fib_nh_notifier_info *fnh_info;
struct net_device *fib_dev;
struct fib_info *fi;
if (info->family != AF_INET)
......@@ -285,11 +306,7 @@ static int mlx5_lag_fib_event(struct notifier_block *nb,
fi = fen_info->fi;
if (fi->nh)
return NOTIFY_DONE;
fib_dev = fib_info_nh(fen_info->fi, 0)->fib_nh_dev;
if (fib_dev != ldev->pf[MLX5_LAG_P1].netdev &&
fib_dev != ldev->pf[MLX5_LAG_P2].netdev) {
return NOTIFY_DONE;
}
fib_work = mlx5_lag_init_fib_work(ldev, event);
if (!fib_work)
return NOTIFY_DONE;
......
......@@ -564,6 +564,83 @@ void mlx5dr_cmd_destroy_reformat_ctx(struct mlx5_core_dev *mdev,
mlx5_cmd_exec_in(mdev, dealloc_packet_reformat_context, in);
}
static void dr_cmd_set_definer_format(void *ptr, u16 format_id,
u8 *dw_selectors,
u8 *byte_selectors)
{
if (format_id != MLX5_IFC_DEFINER_FORMAT_ID_SELECT)
return;
MLX5_SET(match_definer, ptr, format_select_dw0, dw_selectors[0]);
MLX5_SET(match_definer, ptr, format_select_dw1, dw_selectors[1]);
MLX5_SET(match_definer, ptr, format_select_dw2, dw_selectors[2]);
MLX5_SET(match_definer, ptr, format_select_dw3, dw_selectors[3]);
MLX5_SET(match_definer, ptr, format_select_dw4, dw_selectors[4]);
MLX5_SET(match_definer, ptr, format_select_dw5, dw_selectors[5]);
MLX5_SET(match_definer, ptr, format_select_dw6, dw_selectors[6]);
MLX5_SET(match_definer, ptr, format_select_dw7, dw_selectors[7]);
MLX5_SET(match_definer, ptr, format_select_dw8, dw_selectors[8]);
MLX5_SET(match_definer, ptr, format_select_byte0, byte_selectors[0]);
MLX5_SET(match_definer, ptr, format_select_byte1, byte_selectors[1]);
MLX5_SET(match_definer, ptr, format_select_byte2, byte_selectors[2]);
MLX5_SET(match_definer, ptr, format_select_byte3, byte_selectors[3]);
MLX5_SET(match_definer, ptr, format_select_byte4, byte_selectors[4]);
MLX5_SET(match_definer, ptr, format_select_byte5, byte_selectors[5]);
MLX5_SET(match_definer, ptr, format_select_byte6, byte_selectors[6]);
MLX5_SET(match_definer, ptr, format_select_byte7, byte_selectors[7]);
}
int mlx5dr_cmd_create_definer(struct mlx5_core_dev *mdev,
u16 format_id,
u8 *dw_selectors,
u8 *byte_selectors,
u8 *match_mask,
u32 *definer_id)
{
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
u32 in[MLX5_ST_SZ_DW(create_match_definer_in)] = {};
void *ptr;
int err;
ptr = MLX5_ADDR_OF(create_match_definer_in, in,
general_obj_in_cmd_hdr);
MLX5_SET(general_obj_in_cmd_hdr, ptr, opcode,
MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
MLX5_SET(general_obj_in_cmd_hdr, ptr, obj_type,
MLX5_OBJ_TYPE_MATCH_DEFINER);
ptr = MLX5_ADDR_OF(create_match_definer_in, in, obj_context);
MLX5_SET(match_definer, ptr, format_id, format_id);
dr_cmd_set_definer_format(ptr, format_id,
dw_selectors, byte_selectors);
ptr = MLX5_ADDR_OF(match_definer, ptr, match_mask);
memcpy(ptr, match_mask, MLX5_FLD_SZ_BYTES(match_definer, match_mask));
err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
if (err)
return err;
*definer_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
return 0;
}
void
mlx5dr_cmd_destroy_definer(struct mlx5_core_dev *mdev, u32 definer_id)
{
u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_OBJ_TYPE_MATCH_DEFINER);
MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, definer_id);
mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
}
int mlx5dr_cmd_query_gid(struct mlx5_core_dev *mdev, u8 vhca_port_num,
u16 index, struct mlx5dr_cmd_gid_attr *attr)
{
......
......@@ -49,7 +49,8 @@ enum dr_dump_rec_type {
DR_DUMP_REC_TYPE_ACTION_POP_VLAN = 3413,
DR_DUMP_REC_TYPE_ACTION_SAMPLER = 3415,
DR_DUMP_REC_TYPE_ACTION_INSERT_HDR = 3420,
DR_DUMP_REC_TYPE_ACTION_REMOVE_HDR = 3421
DR_DUMP_REC_TYPE_ACTION_REMOVE_HDR = 3421,
DR_DUMP_REC_TYPE_ACTION_MATCH_RANGE = 3425,
};
void mlx5dr_dbg_tbl_add(struct mlx5dr_table *tbl)
......@@ -107,6 +108,8 @@ dr_dump_rule_action_mem(struct seq_file *file, const u64 rule_id,
{
struct mlx5dr_action *action = action_mem->action;
const u64 action_id = DR_DBG_PTR_TO_ID(action);
u64 hit_tbl_ptr, miss_tbl_ptr;
u32 hit_tbl_id, miss_tbl_id;
switch (action->action_type) {
case DR_ACTION_TYP_DROP:
......@@ -198,6 +201,30 @@ dr_dump_rule_action_mem(struct seq_file *file, const u64 rule_id,
action->sampler->rx_icm_addr,
action->sampler->tx_icm_addr);
break;
case DR_ACTION_TYP_RANGE:
if (action->range->hit_tbl_action->dest_tbl->is_fw_tbl) {
hit_tbl_id = action->range->hit_tbl_action->dest_tbl->fw_tbl.id;
hit_tbl_ptr = 0;
} else {
hit_tbl_id = action->range->hit_tbl_action->dest_tbl->tbl->table_id;
hit_tbl_ptr =
DR_DBG_PTR_TO_ID(action->range->hit_tbl_action->dest_tbl->tbl);
}
if (action->range->miss_tbl_action->dest_tbl->is_fw_tbl) {
miss_tbl_id = action->range->miss_tbl_action->dest_tbl->fw_tbl.id;
miss_tbl_ptr = 0;
} else {
miss_tbl_id = action->range->miss_tbl_action->dest_tbl->tbl->table_id;
miss_tbl_ptr =
DR_DBG_PTR_TO_ID(action->range->miss_tbl_action->dest_tbl->tbl);
}
seq_printf(file, "%d,0x%llx,0x%llx,0x%x,0x%llx,0x%x,0x%llx,0x%x\n",
DR_DUMP_REC_TYPE_ACTION_MATCH_RANGE, action_id, rule_id,
hit_tbl_id, hit_tbl_ptr, miss_tbl_id, miss_tbl_ptr,
action->range->definer_id);
break;
default:
return 0;
}
......
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#include "dr_types.h"
#include "dr_ste.h"
struct dr_definer_object {
u32 id;
u16 format_id;
u8 dw_selectors[MLX5_IFC_DEFINER_DW_SELECTORS_NUM];
u8 byte_selectors[MLX5_IFC_DEFINER_BYTE_SELECTORS_NUM];
u8 match_mask[DR_STE_SIZE_MATCH_TAG];
refcount_t refcount;
};
static bool dr_definer_compare(struct dr_definer_object *definer,
u16 format_id, u8 *dw_selectors,
u8 *byte_selectors, u8 *match_mask)
{
int i;
if (definer->format_id != format_id)
return false;
for (i = 0; i < MLX5_IFC_DEFINER_DW_SELECTORS_NUM; i++)
if (definer->dw_selectors[i] != dw_selectors[i])
return false;
for (i = 0; i < MLX5_IFC_DEFINER_BYTE_SELECTORS_NUM; i++)
if (definer->byte_selectors[i] != byte_selectors[i])
return false;
if (memcmp(definer->match_mask, match_mask, DR_STE_SIZE_MATCH_TAG))
return false;
return true;
}
static struct dr_definer_object *
dr_definer_find_obj(struct mlx5dr_domain *dmn, u16 format_id,
u8 *dw_selectors, u8 *byte_selectors, u8 *match_mask)
{
struct dr_definer_object *definer_obj;
unsigned long id;
xa_for_each(&dmn->definers_xa, id, definer_obj) {
if (dr_definer_compare(definer_obj, format_id,
dw_selectors, byte_selectors,
match_mask))
return definer_obj;
}
return NULL;
}
static struct dr_definer_object *
dr_definer_create_obj(struct mlx5dr_domain *dmn, u16 format_id,
u8 *dw_selectors, u8 *byte_selectors, u8 *match_mask)
{
struct dr_definer_object *definer_obj;
int ret = 0;
definer_obj = kzalloc(sizeof(*definer_obj), GFP_KERNEL);
if (!definer_obj)
return NULL;
ret = mlx5dr_cmd_create_definer(dmn->mdev,
format_id,
dw_selectors,
byte_selectors,
match_mask,
&definer_obj->id);
if (ret)
goto err_free_definer_obj;
/* Definer ID can have 32 bits, but STE format
* supports only definers with 8 bit IDs.
*/
if (definer_obj->id > 0xff) {
mlx5dr_err(dmn, "Unsupported definer ID (%d)\n", definer_obj->id);
goto err_destroy_definer;
}
definer_obj->format_id = format_id;
memcpy(definer_obj->dw_selectors, dw_selectors, sizeof(definer_obj->dw_selectors));
memcpy(definer_obj->byte_selectors, byte_selectors, sizeof(definer_obj->byte_selectors));
memcpy(definer_obj->match_mask, match_mask, sizeof(definer_obj->match_mask));
refcount_set(&definer_obj->refcount, 1);
ret = xa_insert(&dmn->definers_xa, definer_obj->id, definer_obj, GFP_KERNEL);
if (ret) {
mlx5dr_dbg(dmn, "Couldn't insert new definer into xarray (%d)\n", ret);
goto err_destroy_definer;
}
return definer_obj;
err_destroy_definer:
mlx5dr_cmd_destroy_definer(dmn->mdev, definer_obj->id);
err_free_definer_obj:
kfree(definer_obj);
return NULL;
}
static void dr_definer_destroy_obj(struct mlx5dr_domain *dmn,
struct dr_definer_object *definer_obj)
{
mlx5dr_cmd_destroy_definer(dmn->mdev, definer_obj->id);
xa_erase(&dmn->definers_xa, definer_obj->id);
kfree(definer_obj);
}
int mlx5dr_definer_get(struct mlx5dr_domain *dmn, u16 format_id,
u8 *dw_selectors, u8 *byte_selectors,
u8 *match_mask, u32 *definer_id)
{
struct dr_definer_object *definer_obj;
int ret = 0;
definer_obj = dr_definer_find_obj(dmn, format_id, dw_selectors,
byte_selectors, match_mask);
if (!definer_obj) {
definer_obj = dr_definer_create_obj(dmn, format_id,
dw_selectors, byte_selectors,
match_mask);
if (!definer_obj)
return -ENOMEM;
} else {
refcount_inc(&definer_obj->refcount);
}
*definer_id = definer_obj->id;
return ret;
}
void mlx5dr_definer_put(struct mlx5dr_domain *dmn, u32 definer_id)
{
struct dr_definer_object *definer_obj;
definer_obj = xa_load(&dmn->definers_xa, definer_id);
if (!definer_obj) {
mlx5dr_err(dmn, "Definer ID %d not found\n", definer_id);
return;
}
if (refcount_dec_and_test(&definer_obj->refcount))
dr_definer_destroy_obj(dmn, definer_obj);
}
......@@ -425,10 +425,11 @@ mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type)
refcount_set(&dmn->refcount, 1);
mutex_init(&dmn->info.rx.mutex);
mutex_init(&dmn->info.tx.mutex);
xa_init(&dmn->definers_xa);
if (dr_domain_caps_init(mdev, dmn)) {
mlx5dr_err(dmn, "Failed init domain, no caps\n");
goto free_domain;
goto def_xa_destroy;
}
dmn->info.max_log_action_icm_sz = DR_CHUNK_SIZE_4K;
......@@ -453,7 +454,8 @@ mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type)
uninit_caps:
dr_domain_caps_uninit(dmn);
free_domain:
def_xa_destroy:
xa_destroy(&dmn->definers_xa);
kfree(dmn);
return NULL;
}
......@@ -493,6 +495,7 @@ int mlx5dr_domain_destroy(struct mlx5dr_domain *dmn)
dr_domain_uninit_csum_recalc_fts(dmn);
dr_domain_uninit_resources(dmn);
dr_domain_caps_uninit(dmn);
xa_destroy(&dmn->definers_xa);
mutex_destroy(&dmn->info.tx.mutex);
mutex_destroy(&dmn->info.rx.mutex);
kfree(dmn);
......
......@@ -35,16 +35,28 @@ static int dr_rule_append_to_miss_list(struct mlx5dr_domain *dmn,
return 0;
}
static void dr_rule_set_last_ste_miss_addr(struct mlx5dr_matcher *matcher,
struct mlx5dr_matcher_rx_tx *nic_matcher,
u8 *hw_ste)
{
struct mlx5dr_ste_ctx *ste_ctx = matcher->tbl->dmn->ste_ctx;
u64 icm_addr;
if (mlx5dr_ste_is_miss_addr_set(ste_ctx, hw_ste))
return;
icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk);
mlx5dr_ste_set_miss_addr(ste_ctx, hw_ste, icm_addr);
}
static struct mlx5dr_ste *
dr_rule_create_collision_htbl(struct mlx5dr_matcher *matcher,
struct mlx5dr_matcher_rx_tx *nic_matcher,
u8 *hw_ste)
{
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
struct mlx5dr_ste_htbl *new_htbl;
struct mlx5dr_ste *ste;
u64 icm_addr;
/* Create new table for miss entry */
new_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
......@@ -58,8 +70,7 @@ dr_rule_create_collision_htbl(struct mlx5dr_matcher *matcher,
/* One and only entry, never grows */
ste = new_htbl->chunk->ste_arr;
icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk);
mlx5dr_ste_set_miss_addr(ste_ctx, hw_ste, icm_addr);
dr_rule_set_last_ste_miss_addr(matcher, nic_matcher, hw_ste);
mlx5dr_htbl_get(new_htbl);
return ste;
......@@ -241,7 +252,6 @@ dr_rule_rehash_copy_ste(struct mlx5dr_matcher *matcher,
bool use_update_list = false;
u8 hw_ste[DR_STE_SIZE] = {};
struct mlx5dr_ste *new_ste;
u64 icm_addr;
int new_idx;
u8 sb_idx;
......@@ -250,9 +260,8 @@ dr_rule_rehash_copy_ste(struct mlx5dr_matcher *matcher,
mlx5dr_ste_set_bit_mask(hw_ste, nic_matcher->ste_builder[sb_idx].bit_mask);
/* Copy STE control and tag */
icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk);
memcpy(hw_ste, mlx5dr_ste_get_hw_ste(cur_ste), DR_STE_SIZE_REDUCED);
mlx5dr_ste_set_miss_addr(dmn->ste_ctx, hw_ste, icm_addr);
dr_rule_set_last_ste_miss_addr(matcher, nic_matcher, hw_ste);
new_idx = mlx5dr_ste_calc_hash_index(hw_ste, new_htbl);
new_ste = &new_htbl->chunk->ste_arr[new_idx];
......@@ -773,7 +782,6 @@ static int dr_rule_handle_empty_entry(struct mlx5dr_matcher *matcher,
{
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
struct mlx5dr_ste_send_info *ste_info;
u64 icm_addr;
/* Take ref on table, only on first time this ste is used */
mlx5dr_htbl_get(cur_htbl);
......@@ -781,8 +789,7 @@ static int dr_rule_handle_empty_entry(struct mlx5dr_matcher *matcher,
/* new entry -> new branch */
list_add_tail(&ste->miss_list_node, miss_list);
icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk);
mlx5dr_ste_set_miss_addr(dmn->ste_ctx, hw_ste, icm_addr);
dr_rule_set_last_ste_miss_addr(matcher, nic_matcher, hw_ste);
ste->ste_chain_location = ste_location;
......
......@@ -90,6 +90,16 @@ static void dr_ste_set_always_miss(struct dr_hw_ste_format *hw_ste)
hw_ste->mask[0] = 0;
}
bool mlx5dr_ste_is_miss_addr_set(struct mlx5dr_ste_ctx *ste_ctx,
u8 *hw_ste_p)
{
if (!ste_ctx->is_miss_addr_set)
return false;
/* check if miss address is already set for this type of STE */
return ste_ctx->is_miss_addr_set(hw_ste_p);
}
void mlx5dr_ste_set_miss_addr(struct mlx5dr_ste_ctx *ste_ctx,
u8 *hw_ste_p, u64 miss_addr)
{
......
......@@ -151,6 +151,7 @@ struct mlx5dr_ste_ctx {
bool is_rx, u16 gvmi);
void (*set_next_lu_type)(u8 *hw_ste_p, u16 lu_type);
u16 (*get_next_lu_type)(u8 *hw_ste_p);
bool (*is_miss_addr_set)(u8 *hw_ste_p);
void (*set_miss_addr)(u8 *hw_ste_p, u64 miss_addr);
u64 (*get_miss_addr)(u8 *hw_ste_p);
void (*set_hit_addr)(u8 *hw_ste_p, u64 icm_addr, u32 ht_size);
......
......@@ -13,6 +13,7 @@ enum dr_ste_v1_entry_format {
DR_STE_V1_TYPE_BWC_BYTE = 0x0,
DR_STE_V1_TYPE_BWC_DW = 0x1,
DR_STE_V1_TYPE_MATCH = 0x2,
DR_STE_V1_TYPE_MATCH_RANGES = 0x7,
};
/* Lookup type is built from 2B: [ Definer mode 1B ][ Definer index 1B ] */
......@@ -267,6 +268,16 @@ static void dr_ste_v1_set_entry_type(u8 *hw_ste_p, u8 entry_type)
MLX5_SET(ste_match_bwc_v1, hw_ste_p, entry_format, entry_type);
}
bool dr_ste_v1_is_miss_addr_set(u8 *hw_ste_p)
{
u8 entry_type = MLX5_GET(ste_match_bwc_v1, hw_ste_p, entry_format);
/* unlike MATCH STE, for MATCH_RANGES STE both hit and miss addresses
* are part of the action, so they both set as part of STE init
*/
return entry_type == DR_STE_V1_TYPE_MATCH_RANGES;
}
void dr_ste_v1_set_miss_addr(u8 *hw_ste_p, u64 miss_addr)
{
u64 index = miss_addr >> 6;
......@@ -520,6 +531,27 @@ static void dr_ste_v1_set_aso_flow_meter(u8 *d_action,
init_color);
}
static void dr_ste_v1_set_match_range_pkt_len(u8 *hw_ste_p, u32 definer_id,
u32 min, u32 max)
{
MLX5_SET(ste_match_ranges_v1, hw_ste_p, match_definer_ctx_idx, definer_id);
/* When the STE will be sent, its mask and tags will be swapped in
* dr_ste_v1_prepare_for_postsend(). This, however, is match range STE
* which doesn't have mask, and shouldn't have mask/tag swapped.
* We're using the common utilities functions to send this STE, so need
* to allow for this swapping - place the values in the corresponding
* locations to allow flipping them when writing to ICM.
*
* min/max_value_2 corresponds to match_dw_0 in its definer.
* To allow mask/tag swapping, writing the min/max_2 to min/max_0.
*
* Pkt len is 2 bytes that are stored in the higher section of the DW.
*/
MLX5_SET(ste_match_ranges_v1, hw_ste_p, min_value_0, min << 16);
MLX5_SET(ste_match_ranges_v1, hw_ste_p, max_value_0, max << 16);
}
static void dr_ste_v1_arr_init_next_match(u8 **last_ste,
u32 *added_stes,
u16 gvmi)
......@@ -535,6 +567,14 @@ static void dr_ste_v1_arr_init_next_match(u8 **last_ste,
memset(action, 0, MLX5_FLD_SZ_BYTES(ste_mask_and_match_v1, action));
}
static void dr_ste_v1_arr_init_next_match_range(u8 **last_ste,
u32 *added_stes,
u16 gvmi)
{
dr_ste_v1_arr_init_next_match(last_ste, added_stes, gvmi);
dr_ste_v1_set_entry_type(*last_ste, DR_STE_V1_TYPE_MATCH_RANGES);
}
void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
u8 *action_type_set,
u32 actions_caps,
......@@ -670,6 +710,20 @@ void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
action += DR_STE_ACTION_DOUBLE_SZ;
}
if (action_type_set[DR_ACTION_TYP_RANGE]) {
/* match ranges requires a new STE of its own type */
dr_ste_v1_arr_init_next_match_range(&last_ste, added_stes, attr->gvmi);
dr_ste_v1_set_miss_addr(last_ste, attr->range.miss_icm_addr);
/* we do not support setting any action on the match ranges STE */
action_sz = 0;
dr_ste_v1_set_match_range_pkt_len(last_ste,
attr->range.definer_id,
attr->range.min,
attr->range.max);
}
dr_ste_v1_set_hit_gvmi(last_ste, attr->hit_gvmi);
dr_ste_v1_set_hit_addr(last_ste, attr->final_icm_addr, 1);
}
......@@ -858,6 +912,20 @@ void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
action += DR_STE_ACTION_DOUBLE_SZ;
}
if (action_type_set[DR_ACTION_TYP_RANGE]) {
/* match ranges requires a new STE of its own type */
dr_ste_v1_arr_init_next_match_range(&last_ste, added_stes, attr->gvmi);
dr_ste_v1_set_miss_addr(last_ste, attr->range.miss_icm_addr);
/* we do not support setting any action on the match ranges STE */
action_sz = 0;
dr_ste_v1_set_match_range_pkt_len(last_ste,
attr->range.definer_id,
attr->range.min,
attr->range.max);
}
dr_ste_v1_set_hit_gvmi(last_ste, attr->hit_gvmi);
dr_ste_v1_set_hit_addr(last_ste, attr->final_icm_addr, 1);
}
......@@ -2144,6 +2212,7 @@ static struct mlx5dr_ste_ctx ste_ctx_v1 = {
.ste_init = &dr_ste_v1_init,
.set_next_lu_type = &dr_ste_v1_set_next_lu_type,
.get_next_lu_type = &dr_ste_v1_get_next_lu_type,
.is_miss_addr_set = &dr_ste_v1_is_miss_addr_set,
.set_miss_addr = &dr_ste_v1_set_miss_addr,
.get_miss_addr = &dr_ste_v1_get_miss_addr,
.set_hit_addr = &dr_ste_v1_set_hit_addr,
......
......@@ -7,6 +7,7 @@
#include "dr_types.h"
#include "dr_ste.h"
bool dr_ste_v1_is_miss_addr_set(u8 *hw_ste_p);
void dr_ste_v1_set_miss_addr(u8 *hw_ste_p, u64 miss_addr);
u64 dr_ste_v1_get_miss_addr(u8 *hw_ste_p);
void dr_ste_v1_set_byte_mask(u8 *hw_ste_p, u16 byte_mask);
......
......@@ -202,6 +202,7 @@ static struct mlx5dr_ste_ctx ste_ctx_v2 = {
.ste_init = &dr_ste_v1_init,
.set_next_lu_type = &dr_ste_v1_set_next_lu_type,
.get_next_lu_type = &dr_ste_v1_get_next_lu_type,
.is_miss_addr_set = &dr_ste_v1_is_miss_addr_set,
.set_miss_addr = &dr_ste_v1_set_miss_addr,
.get_miss_addr = &dr_ste_v1_get_miss_addr,
.set_hit_addr = &dr_ste_v1_set_hit_addr,
......
......@@ -81,6 +81,7 @@ mlx5dr_icm_next_higher_chunk(enum mlx5dr_icm_chunk_size chunk)
enum {
DR_STE_SIZE = 64,
DR_STE_SIZE_CTRL = 32,
DR_STE_SIZE_MATCH_TAG = 32,
DR_STE_SIZE_TAG = 16,
DR_STE_SIZE_MASK = 16,
DR_STE_SIZE_REDUCED = DR_STE_SIZE - DR_STE_SIZE_MASK,
......@@ -128,6 +129,7 @@ enum mlx5dr_action_type {
DR_ACTION_TYP_REMOVE_HDR,
DR_ACTION_TYP_SAMPLER,
DR_ACTION_TYP_ASO_FLOW_METER,
DR_ACTION_TYP_RANGE,
DR_ACTION_TYP_MAX,
};
......@@ -237,6 +239,7 @@ static inline void mlx5dr_htbl_get(struct mlx5dr_ste_htbl *htbl)
/* STE utils */
u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl);
bool mlx5dr_ste_is_miss_addr_set(struct mlx5dr_ste_ctx *ste_ctx, u8 *hw_ste_p);
void mlx5dr_ste_set_miss_addr(struct mlx5dr_ste_ctx *ste_ctx,
u8 *hw_ste, u64 miss_addr);
void mlx5dr_ste_set_hit_addr(struct mlx5dr_ste_ctx *ste_ctx,
......@@ -281,6 +284,13 @@ struct mlx5dr_ste_actions_attr {
u8 dest_reg_id;
u8 init_color;
} aso_flow_meter;
struct {
u64 miss_icm_addr;
u32 definer_id;
u32 min;
u32 max;
} range;
};
void mlx5dr_ste_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx,
......@@ -924,6 +934,7 @@ struct mlx5dr_domain {
struct mlx5dr_ste_ctx *ste_ctx;
struct list_head dbg_tbl_list;
struct mlx5dr_dbg_dump_info dump_info;
struct xarray definers_xa;
};
struct mlx5dr_table_rx_tx {
......@@ -1026,6 +1037,15 @@ struct mlx5dr_action_dest_tbl {
};
};
struct mlx5dr_action_range {
struct mlx5dr_domain *dmn;
struct mlx5dr_action *hit_tbl_action;
struct mlx5dr_action *miss_tbl_action;
u32 definer_id;
u32 min;
u32 max;
};
struct mlx5dr_action_ctr {
u32 ctr_id;
u32 offset;
......@@ -1072,6 +1092,7 @@ struct mlx5dr_action {
struct mlx5dr_action_push_vlan *push_vlan;
struct mlx5dr_action_flow_tag *flow_tag;
struct mlx5dr_action_aso_flow_meter *aso;
struct mlx5dr_action_range *range;
};
};
......@@ -1295,6 +1316,14 @@ int mlx5dr_cmd_create_reformat_ctx(struct mlx5_core_dev *mdev,
u32 *reformat_id);
void mlx5dr_cmd_destroy_reformat_ctx(struct mlx5_core_dev *mdev,
u32 reformat_id);
int mlx5dr_cmd_create_definer(struct mlx5_core_dev *mdev,
u16 format_id,
u8 *dw_selectors,
u8 *byte_selectors,
u8 *match_mask,
u32 *definer_id);
void mlx5dr_cmd_destroy_definer(struct mlx5_core_dev *mdev,
u32 definer_id);
struct mlx5dr_cmd_gid_attr {
u8 gid[16];
......@@ -1483,4 +1512,18 @@ int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
u32 flow_source);
void mlx5dr_fw_destroy_md_tbl(struct mlx5dr_domain *dmn, u32 tbl_id,
u32 group_id);
static inline bool mlx5dr_is_fw_table(struct mlx5_flow_table *ft)
{
return !ft->fs_dr_table.dr_table;
}
static inline bool mlx5dr_supp_match_ranges(struct mlx5_core_dev *dev)
{
return (MLX5_CAP_GEN(dev, steering_format_version) >=
MLX5_STEERING_FORMAT_CONNECTX_6DX) &&
(MLX5_CAP_GEN_64(dev, match_definer_format_supported) &
(1ULL << MLX5_IFC_DEFINER_FORMAT_ID_SELECT));
}
#endif /* _DR_TYPES_H_ */
......@@ -7,10 +7,11 @@
#include "fs_cmd.h"
#include "mlx5dr.h"
#include "fs_dr.h"
#include "dr_types.h"
static bool mlx5_dr_is_fw_table(u32 flags)
static bool dr_is_fw_term_table(struct mlx5_flow_table *ft)
{
if (flags & MLX5_FLOW_TABLE_TERMINATION)
if (ft->flags & MLX5_FLOW_TABLE_TERMINATION)
return true;
return false;
......@@ -69,7 +70,7 @@ static int mlx5_cmd_dr_create_flow_table(struct mlx5_flow_root_namespace *ns,
u32 flags;
int err;
if (mlx5_dr_is_fw_table(ft->flags))
if (dr_is_fw_term_table(ft))
return mlx5_fs_cmd_get_fw_cmds()->create_flow_table(ns, ft,
ft_attr,
next_ft);
......@@ -109,7 +110,7 @@ static int mlx5_cmd_dr_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
struct mlx5dr_action *action = ft->fs_dr_table.miss_action;
int err;
if (mlx5_dr_is_fw_table(ft->flags))
if (dr_is_fw_term_table(ft))
return mlx5_fs_cmd_get_fw_cmds()->destroy_flow_table(ns, ft);
err = mlx5dr_table_destroy(ft->fs_dr_table.dr_table);
......@@ -134,7 +135,7 @@ static int mlx5_cmd_dr_modify_flow_table(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_table *next_ft)
{
if (mlx5_dr_is_fw_table(ft->flags))
if (dr_is_fw_term_table(ft))
return mlx5_fs_cmd_get_fw_cmds()->modify_flow_table(ns, ft, next_ft);
return set_miss_action(ns, ft, next_ft);
......@@ -153,7 +154,7 @@ static int mlx5_cmd_dr_create_flow_group(struct mlx5_flow_root_namespace *ns,
match_criteria_enable);
struct mlx5dr_match_parameters mask;
if (mlx5_dr_is_fw_table(ft->flags))
if (dr_is_fw_term_table(ft))
return mlx5_fs_cmd_get_fw_cmds()->create_flow_group(ns, ft, in,
fg);
......@@ -178,7 +179,7 @@ static int mlx5_cmd_dr_destroy_flow_group(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_group *fg)
{
if (mlx5_dr_is_fw_table(ft->flags))
if (dr_is_fw_term_table(ft))
return mlx5_fs_cmd_get_fw_cmds()->destroy_flow_group(ns, ft, fg);
return mlx5dr_matcher_destroy(fg->fs_dr_matcher.dr_matcher);
......@@ -209,11 +210,22 @@ static struct mlx5dr_action *create_ft_action(struct mlx5dr_domain *domain,
{
struct mlx5_flow_table *dest_ft = dst->dest_attr.ft;
if (mlx5_dr_is_fw_table(dest_ft->flags))
if (mlx5dr_is_fw_table(dest_ft))
return mlx5dr_action_create_dest_flow_fw_table(domain, dest_ft);
return mlx5dr_action_create_dest_table(dest_ft->fs_dr_table.dr_table);
}
static struct mlx5dr_action *create_range_action(struct mlx5dr_domain *domain,
struct mlx5_flow_rule *dst)
{
return mlx5dr_action_create_dest_match_range(domain,
dst->dest_attr.range.field,
dst->dest_attr.range.hit_ft,
dst->dest_attr.range.miss_ft,
dst->dest_attr.range.min,
dst->dest_attr.range.max);
}
static struct mlx5dr_action *create_action_push_vlan(struct mlx5dr_domain *domain,
struct mlx5_fs_vlan *vlan)
{
......@@ -260,7 +272,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
int err = 0;
int i;
if (mlx5_dr_is_fw_table(ft->flags))
if (dr_is_fw_term_table(ft))
return mlx5_fs_cmd_get_fw_cmds()->create_fte(ns, ft, group, fte);
actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX, sizeof(*actions),
......@@ -467,6 +479,15 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
fs_dr_actions[fs_dr_num_actions++] = tmp_action;
term_actions[num_term_actions++].dest = tmp_action;
break;
case MLX5_FLOW_DESTINATION_TYPE_RANGE:
tmp_action = create_range_action(domain, dst);
if (!tmp_action) {
err = -ENOMEM;
goto free_actions;
}
fs_dr_actions[fs_dr_num_actions++] = tmp_action;
term_actions[num_term_actions++].dest = tmp_action;
break;
default:
err = -EOPNOTSUPP;
goto free_actions;
......@@ -702,7 +723,7 @@ static int mlx5_cmd_dr_delete_fte(struct mlx5_flow_root_namespace *ns,
int err;
int i;
if (mlx5_dr_is_fw_table(ft->flags))
if (dr_is_fw_term_table(ft))
return mlx5_fs_cmd_get_fw_cmds()->delete_fte(ns, ft, fte);
err = mlx5dr_rule_destroy(rule->dr_rule);
......@@ -727,7 +748,7 @@ static int mlx5_cmd_dr_update_fte(struct mlx5_flow_root_namespace *ns,
struct fs_fte fte_tmp = {};
int ret;
if (mlx5_dr_is_fw_table(ft->flags))
if (dr_is_fw_term_table(ft))
return mlx5_fs_cmd_get_fw_cmds()->update_fte(ns, ft, group, modify_mask, fte);
/* Backup current dr rule details */
......@@ -780,11 +801,19 @@ static int mlx5_cmd_dr_destroy_ns(struct mlx5_flow_root_namespace *ns)
static u32 mlx5_cmd_dr_get_capabilities(struct mlx5_flow_root_namespace *ns,
enum fs_flow_table_type ft_type)
{
u32 steering_caps = 0;
if (ft_type != FS_FT_FDB ||
MLX5_CAP_GEN(ns->dev, steering_format_version) == MLX5_STEERING_FORMAT_CONNECTX_5)
return 0;
return MLX5_FLOW_STEERING_CAP_VLAN_PUSH_ON_RX | MLX5_FLOW_STEERING_CAP_VLAN_POP_ON_TX;
steering_caps |= MLX5_FLOW_STEERING_CAP_VLAN_PUSH_ON_RX;
steering_caps |= MLX5_FLOW_STEERING_CAP_VLAN_POP_ON_TX;
if (mlx5dr_supp_match_ranges(ns->dev))
steering_caps |= MLX5_FLOW_STEERING_CAP_MATCH_RANGES;
return steering_caps;
}
bool mlx5_fs_dr_is_supported(struct mlx5_core_dev *dev)
......
......@@ -165,6 +165,41 @@ struct mlx5_ifc_ste_mask_and_match_v1_bits {
u8 action[0x60];
};
struct mlx5_ifc_ste_match_ranges_v1_bits {
u8 entry_format[0x8];
u8 counter_id[0x18];
u8 miss_address_63_48[0x10];
u8 match_definer_ctx_idx[0x8];
u8 miss_address_39_32[0x8];
u8 miss_address_31_6[0x1a];
u8 reserved_at_5a[0x1];
u8 match_polarity[0x1];
u8 reparse[0x1];
u8 reserved_at_5d[0x3];
u8 next_table_base_63_48[0x10];
u8 hash_definer_ctx_idx[0x8];
u8 next_table_base_39_32_size[0x8];
u8 next_table_base_31_5_size[0x1b];
u8 hash_type[0x2];
u8 hash_after_actions[0x1];
u8 reserved_at_9e[0x2];
u8 action[0x60];
u8 max_value_0[0x20];
u8 min_value_0[0x20];
u8 max_value_1[0x20];
u8 min_value_1[0x20];
u8 max_value_2[0x20];
u8 min_value_2[0x20];
u8 max_value_3[0x20];
u8 min_value_3[0x20];
};
struct mlx5_ifc_ste_eth_l2_src_v1_bits {
u8 reserved_at_0[0x1];
u8 sx_sniffer[0x1];
......
......@@ -140,8 +140,21 @@ mlx5dr_action_create_aso(struct mlx5dr_domain *dmn,
u8 init_color,
u8 meter_id);
struct mlx5dr_action *
mlx5dr_action_create_dest_match_range(struct mlx5dr_domain *dmn,
u32 field,
struct mlx5_flow_table *hit_ft,
struct mlx5_flow_table *miss_ft,
u32 min,
u32 max);
int mlx5dr_action_destroy(struct mlx5dr_action *action);
int mlx5dr_definer_get(struct mlx5dr_domain *dmn, u16 format_id,
u8 *dw_selectors, u8 *byte_selectors,
u8 *match_mask, u32 *definer_id);
void mlx5dr_definer_put(struct mlx5dr_domain *dmn, u32 definer_id);
static inline bool
mlx5dr_is_supported(struct mlx5_core_dev *dev)
{
......
......@@ -50,6 +50,7 @@ enum mlx5_flow_destination_type {
MLX5_FLOW_DESTINATION_TYPE_PORT,
MLX5_FLOW_DESTINATION_TYPE_COUNTER,
MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM,
MLX5_FLOW_DESTINATION_TYPE_RANGE,
};
enum {
......@@ -143,6 +144,10 @@ enum {
MLX5_FLOW_DEST_VPORT_REFORMAT_ID = BIT(1),
};
enum mlx5_flow_dest_range_field {
MLX5_FLOW_DEST_RANGE_FIELD_PKT_LEN = 0,
};
struct mlx5_flow_destination {
enum mlx5_flow_destination_type type;
union {
......@@ -156,6 +161,13 @@ struct mlx5_flow_destination {
struct mlx5_pkt_reformat *pkt_reformat;
u8 flags;
} vport;
struct {
struct mlx5_flow_table *hit_ft;
struct mlx5_flow_table *miss_ft;
enum mlx5_flow_dest_range_field field;
u32 min;
u32 max;
} range;
u32 sampler_id;
};
};
......
......@@ -6108,6 +6108,38 @@ struct mlx5_ifc_match_definer_format_32_bits {
u8 inner_dmac_15_0[0x10];
};
enum {
MLX5_IFC_DEFINER_FORMAT_ID_SELECT = 61,
};
#define MLX5_IFC_DEFINER_FORMAT_OFFSET_UNUSED 0x0
#define MLX5_IFC_DEFINER_FORMAT_OFFSET_OUTER_ETH_PKT_LEN 0x48
#define MLX5_IFC_DEFINER_DW_SELECTORS_NUM 9
#define MLX5_IFC_DEFINER_BYTE_SELECTORS_NUM 8
struct mlx5_ifc_match_definer_match_mask_bits {
u8 reserved_at_1c0[5][0x20];
u8 match_dw_8[0x20];
u8 match_dw_7[0x20];
u8 match_dw_6[0x20];
u8 match_dw_5[0x20];
u8 match_dw_4[0x20];
u8 match_dw_3[0x20];
u8 match_dw_2[0x20];
u8 match_dw_1[0x20];
u8 match_dw_0[0x20];
u8 match_byte_7[0x8];
u8 match_byte_6[0x8];
u8 match_byte_5[0x8];
u8 match_byte_4[0x8];
u8 match_byte_3[0x8];
u8 match_byte_2[0x8];
u8 match_byte_1[0x8];
u8 match_byte_0[0x8];
};
struct mlx5_ifc_match_definer_bits {
u8 modify_field_select[0x40];
......@@ -6116,9 +6148,41 @@ struct mlx5_ifc_match_definer_bits {
u8 reserved_at_80[0x10];
u8 format_id[0x10];
u8 reserved_at_a0[0x160];
u8 reserved_at_a0[0x60];
u8 format_select_dw3[0x8];
u8 format_select_dw2[0x8];
u8 format_select_dw1[0x8];
u8 format_select_dw0[0x8];
u8 format_select_dw7[0x8];
u8 format_select_dw6[0x8];
u8 format_select_dw5[0x8];
u8 format_select_dw4[0x8];
u8 reserved_at_100[0x18];
u8 format_select_dw8[0x8];
u8 reserved_at_120[0x20];
u8 format_select_byte3[0x8];
u8 format_select_byte2[0x8];
u8 format_select_byte1[0x8];
u8 format_select_byte0[0x8];
u8 format_select_byte7[0x8];
u8 format_select_byte6[0x8];
u8 format_select_byte5[0x8];
u8 format_select_byte4[0x8];
u8 reserved_at_180[0x40];
union {
struct {
u8 match_mask[16][0x20];
};
struct mlx5_ifc_match_definer_match_mask_bits match_mask_format;
};
};
struct mlx5_ifc_general_obj_in_cmd_hdr_bits {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment