Commit 12e9e0d0 authored by Saeed Mahameed's avatar Saeed Mahameed

Merge branch 'mlx5-next' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

This merge syncs with mlx5-next latest HW bits and layout updates for next
features, in addition one patch that improves
mlx5_create_auto_grouped_flow_table() API across all mlx5 users.

* 'mlx5-next' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux:
  net/mlx5: Refactor mlx5_create_auto_grouped_flow_table
  net/mlx5e: Add discard counters per priority
  net/mlx5e: Expose FEC feilds and related capability bit
  net/mlx5: Add mlx5_ifc definitions for connection tracking support
  net/mlx5: Add copy header action struct layout
  net/mlx5: Expose resource dump register mapping
  net/mlx5: Add structures and defines for MIRC register
  net/mlx5: Read MCAM register groups 1 and 2
  net/mlx5: Add structures layout for new MCAM access reg groups
  net/mlx5: Expose vDPA emulation device capabilities
  net/mlx5: Add Virtio Emulation related device capabilities
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
parents 6bc80380 61dc7b01
......@@ -3276,12 +3276,14 @@ static struct mlx5_ib_flow_prio *_get_prio(struct mlx5_flow_namespace *ns,
int num_entries, int num_groups,
u32 flags)
{
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_table *ft;
ft = mlx5_create_auto_grouped_flow_table(ns, priority,
num_entries,
num_groups,
0, flags);
ft_attr.prio = priority;
ft_attr.max_fte = num_entries;
ft_attr.flags = flags;
ft_attr.autogroup.max_num_groups = num_groups;
ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
if (IS_ERR(ft))
return ERR_CAST(ft);
......
......@@ -58,6 +58,7 @@ static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv,
struct ethtool_rx_flow_spec *fs,
int num_tuples)
{
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5e_ethtool_table *eth_ft;
struct mlx5_flow_namespace *ns;
struct mlx5_flow_table *ft;
......@@ -102,9 +103,11 @@ static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv,
table_size = min_t(u32, BIT(MLX5_CAP_FLOWTABLE(priv->mdev,
flow_table_properties_nic_receive.log_max_ft_size)),
MLX5E_ETHTOOL_NUM_ENTRIES);
ft = mlx5_create_auto_grouped_flow_table(ns, prio,
table_size,
MLX5E_ETHTOOL_NUM_GROUPS, 0, 0);
ft_attr.prio = prio;
ft_attr.max_fte = table_size;
ft_attr.autogroup.max_num_groups = MLX5E_ETHTOOL_NUM_GROUPS;
ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
if (IS_ERR(ft))
return (void *)ft;
......
......@@ -1133,6 +1133,7 @@ static void mlx5e_grp_per_port_buffer_congest_update_stats(struct mlx5e_priv *pr
static const struct counter_desc pport_per_prio_traffic_stats_desc[] = {
{ "rx_prio%d_bytes", PPORT_PER_PRIO_OFF(rx_octets) },
{ "rx_prio%d_packets", PPORT_PER_PRIO_OFF(rx_frames) },
{ "rx_prio%d_discards", PPORT_PER_PRIO_OFF(rx_discards) },
{ "tx_prio%d_bytes", PPORT_PER_PRIO_OFF(tx_octets) },
{ "tx_prio%d_packets", PPORT_PER_PRIO_OFF(tx_frames) },
};
......
......@@ -960,7 +960,8 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
mutex_lock(&priv->fs.tc.t_lock);
if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
int tc_grp_size, tc_tbl_size;
struct mlx5_flow_table_attr ft_attr = {};
int tc_grp_size, tc_tbl_size, tc_num_grps;
u32 max_flow_counter;
max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
......@@ -970,13 +971,15 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
tc_tbl_size = min_t(int, tc_grp_size * MLX5E_TC_TABLE_NUM_GROUPS,
BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev, log_max_ft_size)));
tc_num_grps = MLX5E_TC_TABLE_NUM_GROUPS;
ft_attr.prio = MLX5E_TC_PRIO;
ft_attr.max_fte = tc_tbl_size;
ft_attr.level = MLX5E_TC_FT_LEVEL;
ft_attr.autogroup.max_num_groups = tc_num_grps;
priv->fs.tc.t =
mlx5_create_auto_grouped_flow_table(priv->fs.ns,
MLX5E_TC_PRIO,
tc_tbl_size,
MLX5E_TC_TABLE_NUM_GROUPS,
MLX5E_TC_FT_LEVEL, 0);
&ft_attr);
if (IS_ERR(priv->fs.tc.t)) {
mutex_unlock(&priv->fs.tc.t_lock);
NL_SET_ERR_MSG_MOD(extack,
......
......@@ -277,6 +277,7 @@ enum {
static int esw_create_legacy_vepa_table(struct mlx5_eswitch *esw)
{
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_core_dev *dev = esw->dev;
struct mlx5_flow_namespace *root_ns;
struct mlx5_flow_table *fdb;
......@@ -289,8 +290,10 @@ static int esw_create_legacy_vepa_table(struct mlx5_eswitch *esw)
}
/* num FTE 2, num FG 2 */
fdb = mlx5_create_auto_grouped_flow_table(root_ns, LEGACY_VEPA_PRIO,
2, 2, 0, 0);
ft_attr.prio = LEGACY_VEPA_PRIO;
ft_attr.max_fte = 2;
ft_attr.autogroup.max_num_groups = 2;
fdb = mlx5_create_auto_grouped_flow_table(root_ns, &ft_attr);
if (IS_ERR(fdb)) {
err = PTR_ERR(fdb);
esw_warn(dev, "Failed to create VEPA FDB err %d\n", err);
......
......@@ -904,6 +904,7 @@ create_next_size_table(struct mlx5_eswitch *esw,
int level,
u32 flags)
{
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_table *fdb;
int sz;
......@@ -911,12 +912,12 @@ create_next_size_table(struct mlx5_eswitch *esw,
if (!sz)
return ERR_PTR(-ENOSPC);
fdb = mlx5_create_auto_grouped_flow_table(ns,
table_prio,
sz,
ESW_OFFLOADS_NUM_GROUPS,
level,
flags);
ft_attr.max_fte = sz;
ft_attr.prio = table_prio;
ft_attr.level = level;
ft_attr.flags = flags;
ft_attr.autogroup.max_num_groups = ESW_OFFLOADS_NUM_GROUPS;
fdb = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
if (IS_ERR(fdb)) {
esw_warn(esw->dev, "Failed to create FDB Table err %d (table prio: %d, level: %d, size: %d)\n",
(int)PTR_ERR(fdb), table_prio, level, sz);
......
......@@ -50,8 +50,8 @@ mlx5_eswitch_termtbl_create(struct mlx5_core_dev *dev,
struct mlx5_flow_act *flow_act)
{
static const struct mlx5_flow_spec spec = {};
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_namespace *root_ns;
int prio, flags;
int err;
root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
......@@ -63,10 +63,11 @@ mlx5_eswitch_termtbl_create(struct mlx5_core_dev *dev,
/* As this is the terminating action then the termination table is the
* same prio as the slow path
*/
prio = FDB_SLOW_PATH;
flags = MLX5_FLOW_TABLE_TERMINATION;
tt->termtbl = mlx5_create_auto_grouped_flow_table(root_ns, prio, 1, 1,
0, flags);
ft_attr.flags = MLX5_FLOW_TABLE_TERMINATION;
ft_attr.prio = FDB_SLOW_PATH;
ft_attr.max_fte = 1;
ft_attr.autogroup.max_num_groups = 1;
tt->termtbl = mlx5_create_auto_grouped_flow_table(root_ns, &ft_attr);
if (IS_ERR(tt->termtbl)) {
esw_warn(dev, "Failed to create termination table\n");
return -EOPNOTSUPP;
......
......@@ -1103,31 +1103,22 @@ EXPORT_SYMBOL(mlx5_create_lag_demux_flow_table);
struct mlx5_flow_table*
mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
int prio,
int num_flow_table_entries,
int max_num_groups,
u32 level,
u32 flags)
struct mlx5_flow_table_attr *ft_attr)
{
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_table *ft;
if (max_num_groups > num_flow_table_entries)
if (ft_attr->autogroup.max_num_groups > ft_attr->max_fte)
return ERR_PTR(-EINVAL);
ft_attr.max_fte = num_flow_table_entries;
ft_attr.prio = prio;
ft_attr.level = level;
ft_attr.flags = flags;
ft = mlx5_create_flow_table(ns, &ft_attr);
ft = mlx5_create_flow_table(ns, ft_attr);
if (IS_ERR(ft))
return ft;
ft->autogroup.active = true;
ft->autogroup.required_groups = max_num_groups;
ft->autogroup.required_groups = ft_attr->autogroup.max_num_groups;
/* We save place for flow groups in addition to max types */
ft->autogroup.group_size = ft->max_fte / (max_num_groups + 1);
ft->autogroup.group_size = ft->max_fte /
(ft->autogroup.required_groups + 1);
return ft;
}
......
......@@ -131,11 +131,11 @@ static int mlx5_get_pcam_reg(struct mlx5_core_dev *dev)
MLX5_PCAM_REGS_5000_TO_507F);
}
static int mlx5_get_mcam_reg(struct mlx5_core_dev *dev)
static int mlx5_get_mcam_access_reg_group(struct mlx5_core_dev *dev,
enum mlx5_mcam_reg_groups group)
{
return mlx5_query_mcam_reg(dev, dev->caps.mcam,
MLX5_MCAM_FEATURE_ENHANCED_FEATURES,
MLX5_MCAM_REGS_FIRST_128);
return mlx5_query_mcam_reg(dev, dev->caps.mcam[group],
MLX5_MCAM_FEATURE_ENHANCED_FEATURES, group);
}
static int mlx5_get_qcam_reg(struct mlx5_core_dev *dev)
......@@ -221,8 +221,11 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
if (MLX5_CAP_GEN(dev, pcam_reg))
mlx5_get_pcam_reg(dev);
if (MLX5_CAP_GEN(dev, mcam_reg))
mlx5_get_mcam_reg(dev);
if (MLX5_CAP_GEN(dev, mcam_reg)) {
mlx5_get_mcam_access_reg_group(dev, MLX5_MCAM_REGS_FIRST_128);
mlx5_get_mcam_access_reg_group(dev, MLX5_MCAM_REGS_0x9080_0x90FF);
mlx5_get_mcam_access_reg_group(dev, MLX5_MCAM_REGS_0x9100_0x917F);
}
if (MLX5_CAP_GEN(dev, qcam_reg))
mlx5_get_qcam_reg(dev);
......@@ -245,6 +248,13 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
return err;
}
if (MLX5_CAP_GEN_64(dev, general_obj_types) &
MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q) {
err = mlx5_core_get_caps(dev, MLX5_CAP_VDPA_EMULATION);
if (err)
return err;
}
return 0;
}
......
......@@ -1105,6 +1105,7 @@ enum mlx5_cap_type {
MLX5_CAP_DEV_MEM,
MLX5_CAP_RESERVED_16,
MLX5_CAP_TLS,
MLX5_CAP_VDPA_EMULATION = 0x13,
MLX5_CAP_DEV_EVENT = 0x14,
/* NUM OF CAP Types */
MLX5_CAP_NUM
......@@ -1120,6 +1121,9 @@ enum mlx5_pcam_feature_groups {
enum mlx5_mcam_reg_groups {
MLX5_MCAM_REGS_FIRST_128 = 0x0,
MLX5_MCAM_REGS_0x9080_0x90FF = 0x1,
MLX5_MCAM_REGS_0x9100_0x917F = 0x2,
MLX5_MCAM_REGS_NUM = 0x3,
};
enum mlx5_mcam_feature_groups {
......@@ -1268,7 +1272,16 @@ enum mlx5_qcam_feature_groups {
MLX5_GET(pcam_reg, (mdev)->caps.pcam, port_access_reg_cap_mask.regs_5000_to_507f.reg)
#define MLX5_CAP_MCAM_REG(mdev, reg) \
MLX5_GET(mcam_reg, (mdev)->caps.mcam, mng_access_reg_cap_mask.access_regs.reg)
MLX5_GET(mcam_reg, (mdev)->caps.mcam[MLX5_MCAM_REGS_FIRST_128], \
mng_access_reg_cap_mask.access_regs.reg)
#define MLX5_CAP_MCAM_REG1(mdev, reg) \
MLX5_GET(mcam_reg, (mdev)->caps.mcam[MLX5_MCAM_REGS_0x9080_0x90FF], \
mng_access_reg_cap_mask.access_regs1.reg)
#define MLX5_CAP_MCAM_REG2(mdev, reg) \
MLX5_GET(mcam_reg, (mdev)->caps.mcam[MLX5_MCAM_REGS_0x9100_0x917F], \
mng_access_reg_cap_mask.access_regs2.reg)
#define MLX5_CAP_MCAM_FEATURE(mdev, fld) \
MLX5_GET(mcam_reg, (mdev)->caps.mcam, mng_feature_cap_mask.enhanced_features.fld)
......@@ -1297,6 +1310,14 @@ enum mlx5_qcam_feature_groups {
#define MLX5_CAP_DEV_EVENT(mdev, cap)\
MLX5_ADDR_OF(device_event_cap, (mdev)->caps.hca_cur[MLX5_CAP_DEV_EVENT], cap)
#define MLX5_CAP_DEV_VDPA_EMULATION(mdev, cap)\
MLX5_GET(device_virtio_emulation_cap, \
(mdev)->caps.hca_cur[MLX5_CAP_VDPA_EMULATION], cap)
#define MLX5_CAP64_DEV_VDPA_EMULATION(mdev, cap)\
MLX5_GET64(device_virtio_emulation_cap, \
(mdev)->caps.hca_cur[MLX5_CAP_VDPA_EMULATION], cap)
enum {
MLX5_CMD_STAT_OK = 0x0,
MLX5_CMD_STAT_INT_ERR = 0x1,
......
......@@ -145,6 +145,8 @@ enum {
MLX5_REG_MCC = 0x9062,
MLX5_REG_MCDA = 0x9063,
MLX5_REG_MCAM = 0x907f,
MLX5_REG_MIRC = 0x9162,
MLX5_REG_RESOURCE_DUMP = 0xC000,
};
enum mlx5_qpts_trust_state {
......@@ -684,7 +686,7 @@ struct mlx5_core_dev {
u32 hca_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
u32 hca_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
u32 pcam[MLX5_ST_SZ_DW(pcam_reg)];
u32 mcam[MLX5_ST_SZ_DW(mcam_reg)];
u32 mcam[MLX5_MCAM_REGS_NUM][MLX5_ST_SZ_DW(mcam_reg)];
u32 fpga[MLX5_ST_SZ_DW(fpga_cap)];
u32 qcam[MLX5_ST_SZ_DW(qcam_reg)];
u8 embedded_cpu;
......
......@@ -145,25 +145,25 @@ mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev *dev,
enum mlx5_flow_namespace_type type,
int vport);
struct mlx5_flow_table *
mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
int prio,
int num_flow_table_entries,
int max_num_groups,
u32 level,
u32 flags);
struct mlx5_flow_table_attr {
int prio;
int max_fte;
u32 level;
u32 flags;
struct {
int max_num_groups;
} autogroup;
};
struct mlx5_flow_table *
mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
struct mlx5_flow_table_attr *ft_attr);
struct mlx5_flow_table *
mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
struct mlx5_flow_table_attr *ft_attr);
struct mlx5_flow_table *
mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
int prio,
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment