Commit c8fda7d2 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge tag 'mlx5-updates-2022-07-13' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2022-07-13

1) Support 802.1ad for bridge offloads

Vlad Buslov Says:
=================

Current mlx5 bridge VLAN offload implementation only supports 802.1Q VLAN
Ethernet protocol. That protocol type is assumed by default and
SWITCHDEV_ATTR_ID_BRIDGE_VLAN_PROTOCOL notification is ignored.

In order to support dynamically setting VLAN protocol handle
SWITCHDEV_ATTR_ID_BRIDGE_VLAN_PROTOCOL notification by flushing FDB and
re-creating VLAN modify header actions with a new protocol. Implement support
for 802.1ad protocol by saving the current VLAN protocol to per-bridge variable
and re-create the necessary flow groups according to its current value (either
use cvlan or svlan flow fields).
==================

2) debugfs to count ongoing FW commands

3) debugfs to query eswitch vport firmware diagnostic counters

4) Add missing meter configuration in flow action

5) Some misc cleanup

* tag 'mlx5-updates-2022-07-13' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux:
  net/mlx5e: Remove the duplicating check for striding RQ when enabling LRO
  net/mlx5e: Move the LRO-XSK check to mlx5e_fix_features
  net/mlx5e: Extend flower police validation
  net/mlx5e: configure meter in flow action
  net/mlx5e: Removed useless code in function
  net/mlx5: Bridge, implement QinQ support
  net/mlx5: Bridge, implement infrastructure for VLAN protocol change
  net/mlx5: Bridge, extract VLAN push/pop actions creation
  net/mlx5: Bridge, rename filter fg to vlan_filter
  net/mlx5: Bridge, refactor groups sizes and indices
  net/mlx5: debugfs, Add num of in-use FW command interface slots
  net/mlx5: Expose vnic diagnostic counters for eswitch managed vports
  net/mlx5: Use software VHCA id when it's supported
  net/mlx5: Introduce ifc bits for using software vhca id
  net/mlx5: Use the bitmap API to allocate bitmaps
====================

Link: https://lore.kernel.org/r/20220713225859.401241-1-saeed@kernel.orgSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 6e6fbb72 1a550486
...@@ -68,7 +68,7 @@ mlx5_core-$(CONFIG_MLX5_TC_SAMPLE) += en/tc/sample.o ...@@ -68,7 +68,7 @@ mlx5_core-$(CONFIG_MLX5_TC_SAMPLE) += en/tc/sample.o
# #
mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o eswitch_offloads_termtbl.o \ mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o eswitch_offloads_termtbl.o \
ecpf.o rdma.o esw/legacy.o \ ecpf.o rdma.o esw/legacy.o \
esw/devlink_port.o esw/vporttbl.o esw/qos.o esw/debugfs.o esw/devlink_port.o esw/vporttbl.o esw/qos.o
mlx5_core-$(CONFIG_MLX5_ESWITCH) += esw/acl/helper.o \ mlx5_core-$(CONFIG_MLX5_ESWITCH) += esw/acl/helper.o \
esw/acl/egress_lgcy.o esw/acl/egress_ofld.o \ esw/acl/egress_lgcy.o esw/acl/egress_ofld.o \
......
...@@ -166,6 +166,28 @@ static const struct file_operations stats_fops = { ...@@ -166,6 +166,28 @@ static const struct file_operations stats_fops = {
.write = average_write, .write = average_write,
}; };
static ssize_t slots_read(struct file *filp, char __user *buf, size_t count,
loff_t *pos)
{
struct mlx5_cmd *cmd;
char tbuf[6];
int weight;
int field;
int ret;
cmd = filp->private_data;
weight = bitmap_weight(&cmd->bitmask, cmd->max_reg_cmds);
field = cmd->max_reg_cmds - weight;
ret = snprintf(tbuf, sizeof(tbuf), "%d\n", field);
return simple_read_from_buffer(buf, count, pos, tbuf, ret);
}
static const struct file_operations slots_fops = {
.owner = THIS_MODULE,
.open = simple_open,
.read = slots_read,
};
void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev) void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev)
{ {
struct mlx5_cmd_stats *stats; struct mlx5_cmd_stats *stats;
...@@ -176,6 +198,8 @@ void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev) ...@@ -176,6 +198,8 @@ void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev)
cmd = &dev->priv.dbg.cmdif_debugfs; cmd = &dev->priv.dbg.cmdif_debugfs;
*cmd = debugfs_create_dir("commands", dev->priv.dbg.dbg_root); *cmd = debugfs_create_dir("commands", dev->priv.dbg.dbg_root);
debugfs_create_file("slots_inuse", 0400, *cmd, &dev->cmd, &slots_fops);
for (i = 0; i < MLX5_CMD_OP_MAX; i++) { for (i = 0; i < MLX5_CMD_OP_MAX; i++) {
stats = &dev->cmd.stats[i]; stats = &dev->cmd.stats[i];
namep = mlx5_command_str(i); namep = mlx5_command_str(i);
......
...@@ -269,6 +269,12 @@ mlx5_esw_bridge_port_obj_attr_set(struct net_device *dev, ...@@ -269,6 +269,12 @@ mlx5_esw_bridge_port_obj_attr_set(struct net_device *dev,
err = mlx5_esw_bridge_vlan_filtering_set(vport_num, esw_owner_vhca_id, err = mlx5_esw_bridge_vlan_filtering_set(vport_num, esw_owner_vhca_id,
attr->u.vlan_filtering, br_offloads); attr->u.vlan_filtering, br_offloads);
break; break;
case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_PROTOCOL:
err = mlx5_esw_bridge_vlan_proto_set(vport_num,
esw_owner_vhca_id,
attr->u.vlan_protocol,
br_offloads);
break;
default: default:
err = -EOPNOTSUPP; err = -EOPNOTSUPP;
} }
......
...@@ -10,6 +10,12 @@ tc_act_can_offload_police(struct mlx5e_tc_act_parse_state *parse_state, ...@@ -10,6 +10,12 @@ tc_act_can_offload_police(struct mlx5e_tc_act_parse_state *parse_state,
int act_index, int act_index,
struct mlx5_flow_attr *attr) struct mlx5_flow_attr *attr)
{ {
if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
NL_SET_ERR_MSG_MOD(parse_state->extack,
"Offload not supported when conform action is not pipe or ok");
return false;
}
if (mlx5e_policer_validate(parse_state->flow_action, act, if (mlx5e_policer_validate(parse_state->flow_action, act,
parse_state->extack)) parse_state->extack))
return false; return false;
......
...@@ -742,10 +742,7 @@ mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv, ...@@ -742,10 +742,7 @@ mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv,
eth_rule->flow_spec = *fs; eth_rule->flow_spec = *fs;
eth_rule->eth_ft = eth_ft; eth_rule->eth_ft = eth_ft;
if (!eth_ft->ft) {
err = -EINVAL;
goto del_ethtool_rule;
}
rule = add_ethtool_flow_rule(priv, eth_rule, eth_ft->ft, fs, rss_context); rule = add_ethtool_flow_rule(priv, eth_rule, eth_ft->ft, fs, rss_context);
if (IS_ERR(rule)) { if (IS_ERR(rule)) {
err = PTR_ERR(rule); err = PTR_ERR(rule);
......
...@@ -3594,20 +3594,7 @@ static int set_feature_lro(struct net_device *netdev, bool enable) ...@@ -3594,20 +3594,7 @@ static int set_feature_lro(struct net_device *netdev, bool enable)
mutex_lock(&priv->state_lock); mutex_lock(&priv->state_lock);
if (enable && priv->xsk.refcnt) {
netdev_warn(netdev, "LRO is incompatible with AF_XDP (%u XSKs are active)\n",
priv->xsk.refcnt);
err = -EINVAL;
goto out;
}
cur_params = &priv->channels.params; cur_params = &priv->channels.params;
if (enable && !MLX5E_GET_PFLAG(cur_params, MLX5E_PFLAG_RX_STRIDING_RQ)) {
netdev_warn(netdev, "can't set LRO with legacy RQ\n");
err = -EINVAL;
goto out;
}
new_params = *cur_params; new_params = *cur_params;
if (enable) if (enable)
...@@ -3916,6 +3903,11 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev, ...@@ -3916,6 +3903,11 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
} }
if (priv->xsk.refcnt) { if (priv->xsk.refcnt) {
if (features & NETIF_F_LRO) {
netdev_warn(netdev, "LRO is incompatible with AF_XDP (%u XSKs are active)\n",
priv->xsk.refcnt);
features &= ~NETIF_F_LRO;
}
if (features & NETIF_F_GRO_HW) { if (features & NETIF_F_GRO_HW) {
netdev_warn(netdev, "HW GRO is incompatible with AF_XDP (%u XSKs are active)\n", netdev_warn(netdev, "HW GRO is incompatible with AF_XDP (%u XSKs are active)\n",
priv->xsk.refcnt); priv->xsk.refcnt);
......
...@@ -26,7 +26,9 @@ struct mlx5_esw_bridge_offloads { ...@@ -26,7 +26,9 @@ struct mlx5_esw_bridge_offloads {
struct mlx5_flow_table *ingress_ft; struct mlx5_flow_table *ingress_ft;
struct mlx5_flow_group *ingress_vlan_fg; struct mlx5_flow_group *ingress_vlan_fg;
struct mlx5_flow_group *ingress_filter_fg; struct mlx5_flow_group *ingress_vlan_filter_fg;
struct mlx5_flow_group *ingress_qinq_fg;
struct mlx5_flow_group *ingress_qinq_filter_fg;
struct mlx5_flow_group *ingress_mac_fg; struct mlx5_flow_group *ingress_mac_fg;
struct mlx5_flow_table *skip_ft; struct mlx5_flow_table *skip_ft;
...@@ -60,6 +62,8 @@ int mlx5_esw_bridge_ageing_time_set(u16 vport_num, u16 esw_owner_vhca_id, unsign ...@@ -60,6 +62,8 @@ int mlx5_esw_bridge_ageing_time_set(u16 vport_num, u16 esw_owner_vhca_id, unsign
struct mlx5_esw_bridge_offloads *br_offloads); struct mlx5_esw_bridge_offloads *br_offloads);
int mlx5_esw_bridge_vlan_filtering_set(u16 vport_num, u16 esw_owner_vhca_id, bool enable, int mlx5_esw_bridge_vlan_filtering_set(u16 vport_num, u16 esw_owner_vhca_id, bool enable,
struct mlx5_esw_bridge_offloads *br_offloads); struct mlx5_esw_bridge_offloads *br_offloads);
int mlx5_esw_bridge_vlan_proto_set(u16 vport_num, u16 esw_owner_vhca_id, u16 proto,
struct mlx5_esw_bridge_offloads *br_offloads);
int mlx5_esw_bridge_port_vlan_add(u16 vport_num, u16 esw_owner_vhca_id, u16 vid, u16 flags, int mlx5_esw_bridge_port_vlan_add(u16 vport_num, u16 esw_owner_vhca_id, u16 vid, u16 flags,
struct mlx5_esw_bridge_offloads *br_offloads, struct mlx5_esw_bridge_offloads *br_offloads,
struct netlink_ext_ack *extack); struct netlink_ext_ack *extack);
......
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
#include <linux/debugfs.h>
#include "eswitch.h"
enum vnic_diag_counter {
MLX5_VNIC_DIAG_TOTAL_Q_UNDER_PROCESSOR_HANDLE,
MLX5_VNIC_DIAG_SEND_QUEUE_PRIORITY_UPDATE_FLOW,
MLX5_VNIC_DIAG_COMP_EQ_OVERRUN,
MLX5_VNIC_DIAG_ASYNC_EQ_OVERRUN,
MLX5_VNIC_DIAG_CQ_OVERRUN,
MLX5_VNIC_DIAG_INVALID_COMMAND,
MLX5_VNIC_DIAG_QOUTA_EXCEEDED_COMMAND,
};
static int mlx5_esw_query_vnic_diag(struct mlx5_vport *vport, enum vnic_diag_counter counter,
u32 *val)
{
u32 out[MLX5_ST_SZ_DW(query_vnic_env_out)] = {};
u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {};
struct mlx5_core_dev *dev = vport->dev;
u16 vport_num = vport->vport;
void *vnic_diag_out;
int err;
MLX5_SET(query_vnic_env_in, in, opcode, MLX5_CMD_OP_QUERY_VNIC_ENV);
MLX5_SET(query_vnic_env_in, in, vport_number, vport_num);
if (!mlx5_esw_is_manager_vport(dev->priv.eswitch, vport_num))
MLX5_SET(query_vnic_env_in, in, other_vport, 1);
err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (err)
return err;
vnic_diag_out = MLX5_ADDR_OF(query_vnic_env_out, out, vport_env);
switch (counter) {
case MLX5_VNIC_DIAG_TOTAL_Q_UNDER_PROCESSOR_HANDLE:
*val = MLX5_GET(vnic_diagnostic_statistics, vnic_diag_out, total_error_queues);
break;
case MLX5_VNIC_DIAG_SEND_QUEUE_PRIORITY_UPDATE_FLOW:
*val = MLX5_GET(vnic_diagnostic_statistics, vnic_diag_out,
send_queue_priority_update_flow);
break;
case MLX5_VNIC_DIAG_COMP_EQ_OVERRUN:
*val = MLX5_GET(vnic_diagnostic_statistics, vnic_diag_out, comp_eq_overrun);
break;
case MLX5_VNIC_DIAG_ASYNC_EQ_OVERRUN:
*val = MLX5_GET(vnic_diagnostic_statistics, vnic_diag_out, async_eq_overrun);
break;
case MLX5_VNIC_DIAG_CQ_OVERRUN:
*val = MLX5_GET(vnic_diagnostic_statistics, vnic_diag_out, cq_overrun);
break;
case MLX5_VNIC_DIAG_INVALID_COMMAND:
*val = MLX5_GET(vnic_diagnostic_statistics, vnic_diag_out, invalid_command);
break;
case MLX5_VNIC_DIAG_QOUTA_EXCEEDED_COMMAND:
*val = MLX5_GET(vnic_diagnostic_statistics, vnic_diag_out, quota_exceeded_command);
break;
}
return 0;
}
static int __show_vnic_diag(struct seq_file *file, struct mlx5_vport *vport,
enum vnic_diag_counter type)
{
u32 val = 0;
int ret;
ret = mlx5_esw_query_vnic_diag(vport, type, &val);
if (ret)
return ret;
seq_printf(file, "%d\n", val);
return 0;
}
static int total_q_under_processor_handle_show(struct seq_file *file, void *priv)
{
return __show_vnic_diag(file, file->private, MLX5_VNIC_DIAG_TOTAL_Q_UNDER_PROCESSOR_HANDLE);
}
static int send_queue_priority_update_flow_show(struct seq_file *file, void *priv)
{
return __show_vnic_diag(file, file->private,
MLX5_VNIC_DIAG_SEND_QUEUE_PRIORITY_UPDATE_FLOW);
}
static int comp_eq_overrun_show(struct seq_file *file, void *priv)
{
return __show_vnic_diag(file, file->private, MLX5_VNIC_DIAG_COMP_EQ_OVERRUN);
}
static int async_eq_overrun_show(struct seq_file *file, void *priv)
{
return __show_vnic_diag(file, file->private, MLX5_VNIC_DIAG_ASYNC_EQ_OVERRUN);
}
static int cq_overrun_show(struct seq_file *file, void *priv)
{
return __show_vnic_diag(file, file->private, MLX5_VNIC_DIAG_CQ_OVERRUN);
}
static int invalid_command_show(struct seq_file *file, void *priv)
{
return __show_vnic_diag(file, file->private, MLX5_VNIC_DIAG_INVALID_COMMAND);
}
static int quota_exceeded_command_show(struct seq_file *file, void *priv)
{
return __show_vnic_diag(file, file->private, MLX5_VNIC_DIAG_QOUTA_EXCEEDED_COMMAND);
}
DEFINE_SHOW_ATTRIBUTE(total_q_under_processor_handle);
DEFINE_SHOW_ATTRIBUTE(send_queue_priority_update_flow);
DEFINE_SHOW_ATTRIBUTE(comp_eq_overrun);
DEFINE_SHOW_ATTRIBUTE(async_eq_overrun);
DEFINE_SHOW_ATTRIBUTE(cq_overrun);
DEFINE_SHOW_ATTRIBUTE(invalid_command);
DEFINE_SHOW_ATTRIBUTE(quota_exceeded_command);
void mlx5_esw_vport_debugfs_destroy(struct mlx5_eswitch *esw, u16 vport_num)
{
struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
debugfs_remove_recursive(vport->dbgfs);
vport->dbgfs = NULL;
}
/* vnic diag dir name is "pf", "ecpf" or "{vf/sf}_xxxx" */
#define VNIC_DIAG_DIR_NAME_MAX_LEN 8
void mlx5_esw_vport_debugfs_create(struct mlx5_eswitch *esw, u16 vport_num, bool is_sf, u16 sf_num)
{
struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
struct dentry *vnic_diag;
char dir_name[VNIC_DIAG_DIR_NAME_MAX_LEN];
int err;
if (!MLX5_CAP_GEN(esw->dev, vport_group_manager))
return;
if (vport_num == MLX5_VPORT_PF) {
strcpy(dir_name, "pf");
} else if (vport_num == MLX5_VPORT_ECPF) {
strcpy(dir_name, "ecpf");
} else {
err = snprintf(dir_name, VNIC_DIAG_DIR_NAME_MAX_LEN, "%s_%d", is_sf ? "sf" : "vf",
is_sf ? sf_num : vport_num - MLX5_VPORT_FIRST_VF);
if (WARN_ON(err < 0))
return;
}
vport->dbgfs = debugfs_create_dir(dir_name, esw->dbgfs);
vnic_diag = debugfs_create_dir("vnic_diag", vport->dbgfs);
if (MLX5_CAP_GEN(esw->dev, vnic_env_queue_counters)) {
debugfs_create_file("total_q_under_processor_handle", 0444, vnic_diag, vport,
&total_q_under_processor_handle_fops);
debugfs_create_file("send_queue_priority_update_flow", 0444, vnic_diag, vport,
&send_queue_priority_update_flow_fops);
}
if (MLX5_CAP_GEN(esw->dev, eq_overrun_count)) {
debugfs_create_file("comp_eq_overrun", 0444, vnic_diag, vport,
&comp_eq_overrun_fops);
debugfs_create_file("async_eq_overrun", 0444, vnic_diag, vport,
&async_eq_overrun_fops);
}
if (MLX5_CAP_GEN(esw->dev, vnic_env_cq_overrun))
debugfs_create_file("cq_overrun", 0444, vnic_diag, vport, &cq_overrun_fops);
if (MLX5_CAP_GEN(esw->dev, invalid_command_count))
debugfs_create_file("invalid_command", 0444, vnic_diag, vport,
&invalid_command_fops);
if (MLX5_CAP_GEN(esw->dev, quota_exceeded_count))
debugfs_create_file("quota_exceeded_command", 0444, vnic_diag, vport,
&quota_exceeded_command_fops);
}
...@@ -36,6 +36,7 @@ ...@@ -36,6 +36,7 @@
#include <linux/mlx5/vport.h> #include <linux/mlx5/vport.h>
#include <linux/mlx5/fs.h> #include <linux/mlx5/fs.h>
#include <linux/mlx5/mpfs.h> #include <linux/mlx5/mpfs.h>
#include <linux/debugfs.h>
#include "esw/acl/lgcy.h" #include "esw/acl/lgcy.h"
#include "esw/legacy.h" #include "esw/legacy.h"
#include "esw/qos.h" #include "esw/qos.h"
...@@ -1002,6 +1003,7 @@ int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num, ...@@ -1002,6 +1003,7 @@ int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num,
if (err) if (err)
return err; return err;
mlx5_esw_vport_debugfs_create(esw, vport_num, false, 0);
err = esw_offloads_load_rep(esw, vport_num); err = esw_offloads_load_rep(esw, vport_num);
if (err) if (err)
goto err_rep; goto err_rep;
...@@ -1009,6 +1011,7 @@ int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num, ...@@ -1009,6 +1011,7 @@ int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num,
return err; return err;
err_rep: err_rep:
mlx5_esw_vport_debugfs_destroy(esw, vport_num);
mlx5_esw_vport_disable(esw, vport_num); mlx5_esw_vport_disable(esw, vport_num);
return err; return err;
} }
...@@ -1016,6 +1019,7 @@ int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num, ...@@ -1016,6 +1019,7 @@ int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num,
void mlx5_eswitch_unload_vport(struct mlx5_eswitch *esw, u16 vport_num) void mlx5_eswitch_unload_vport(struct mlx5_eswitch *esw, u16 vport_num)
{ {
esw_offloads_unload_rep(esw, vport_num); esw_offloads_unload_rep(esw, vport_num);
mlx5_esw_vport_debugfs_destroy(esw, vport_num);
mlx5_esw_vport_disable(esw, vport_num); mlx5_esw_vport_disable(esw, vport_num);
} }
...@@ -1622,6 +1626,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) ...@@ -1622,6 +1626,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
dev->priv.eswitch = esw; dev->priv.eswitch = esw;
BLOCKING_INIT_NOTIFIER_HEAD(&esw->n_head); BLOCKING_INIT_NOTIFIER_HEAD(&esw->n_head);
esw->dbgfs = debugfs_create_dir("esw", mlx5_debugfs_get_dev_root(esw->dev));
esw_info(dev, esw_info(dev,
"Total vports %d, per vport: max uc(%d) max mc(%d)\n", "Total vports %d, per vport: max uc(%d) max mc(%d)\n",
esw->total_vports, esw->total_vports,
...@@ -1645,6 +1650,7 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) ...@@ -1645,6 +1650,7 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
esw_info(esw->dev, "cleanup\n"); esw_info(esw->dev, "cleanup\n");
debugfs_remove_recursive(esw->dbgfs);
esw->dev->priv.eswitch = NULL; esw->dev->priv.eswitch = NULL;
destroy_workqueue(esw->work_queue); destroy_workqueue(esw->work_queue);
WARN_ON(refcount_read(&esw->qos.refcnt)); WARN_ON(refcount_read(&esw->qos.refcnt));
......
...@@ -191,6 +191,7 @@ struct mlx5_vport { ...@@ -191,6 +191,7 @@ struct mlx5_vport {
enum mlx5_eswitch_vport_event enabled_events; enum mlx5_eswitch_vport_event enabled_events;
int index; int index;
struct devlink_port *dl_port; struct devlink_port *dl_port;
struct dentry *dbgfs;
}; };
struct mlx5_esw_indir_table; struct mlx5_esw_indir_table;
...@@ -336,6 +337,7 @@ struct mlx5_eswitch { ...@@ -336,6 +337,7 @@ struct mlx5_eswitch {
u32 large_group_num; u32 large_group_num;
} params; } params;
struct blocking_notifier_head n_head; struct blocking_notifier_head n_head;
struct dentry *dbgfs;
}; };
void esw_offloads_disable(struct mlx5_eswitch *esw); void esw_offloads_disable(struct mlx5_eswitch *esw);
...@@ -684,6 +686,9 @@ int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, u16 vport_ ...@@ -684,6 +686,9 @@ int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, u16 vport_
void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, u16 vport_num); void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, u16 vport_num);
struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u16 vport_num); struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u16 vport_num);
void mlx5_esw_vport_debugfs_create(struct mlx5_eswitch *esw, u16 vport_num, bool is_sf, u16 sf_num);
void mlx5_esw_vport_debugfs_destroy(struct mlx5_eswitch *esw, u16 vport_num);
int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_port *dl_port, int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_port *dl_port,
u16 vport_num, u32 controller, u32 sfnum); u16 vport_num, u32 controller, u32 sfnum);
void mlx5_esw_devlink_sf_port_unregister(struct mlx5_eswitch *esw, u16 vport_num); void mlx5_esw_devlink_sf_port_unregister(struct mlx5_eswitch *esw, u16 vport_num);
......
...@@ -512,6 +512,20 @@ esw_cleanup_dests(struct mlx5_eswitch *esw, ...@@ -512,6 +512,20 @@ esw_cleanup_dests(struct mlx5_eswitch *esw,
} }
} }
static void
esw_setup_meter(struct mlx5_flow_attr *attr, struct mlx5_flow_act *flow_act)
{
struct mlx5e_flow_meter_handle *meter;
meter = attr->meter_attr.meter;
flow_act->exe_aso.type = attr->exe_aso_type;
flow_act->exe_aso.object_id = meter->obj_id;
flow_act->exe_aso.flow_meter.meter_idx = meter->idx;
flow_act->exe_aso.flow_meter.init_color = MLX5_FLOW_METER_COLOR_GREEN;
/* use metadata reg 5 for packet color */
flow_act->exe_aso.return_reg_id = 5;
}
struct mlx5_flow_handle * struct mlx5_flow_handle *
mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec, struct mlx5_flow_spec *spec,
...@@ -579,6 +593,10 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, ...@@ -579,6 +593,10 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
flow_act.modify_hdr = attr->modify_hdr; flow_act.modify_hdr = attr->modify_hdr;
if ((flow_act.action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) &&
attr->exe_aso_type == MLX5_EXE_ASO_FLOW_METER)
esw_setup_meter(attr, &flow_act);
if (split) { if (split) {
fwd_attr.chain = attr->chain; fwd_attr.chain = attr->chain;
fwd_attr.prio = attr->prio; fwd_attr.prio = attr->prio;
...@@ -3704,12 +3722,14 @@ int mlx5_esw_offloads_sf_vport_enable(struct mlx5_eswitch *esw, struct devlink_p ...@@ -3704,12 +3722,14 @@ int mlx5_esw_offloads_sf_vport_enable(struct mlx5_eswitch *esw, struct devlink_p
if (err) if (err)
goto devlink_err; goto devlink_err;
mlx5_esw_vport_debugfs_create(esw, vport_num, true, sfnum);
err = mlx5_esw_offloads_rep_load(esw, vport_num); err = mlx5_esw_offloads_rep_load(esw, vport_num);
if (err) if (err)
goto rep_err; goto rep_err;
return 0; return 0;
rep_err: rep_err:
mlx5_esw_vport_debugfs_destroy(esw, vport_num);
mlx5_esw_devlink_sf_port_unregister(esw, vport_num); mlx5_esw_devlink_sf_port_unregister(esw, vport_num);
devlink_err: devlink_err:
mlx5_esw_vport_disable(esw, vport_num); mlx5_esw_vport_disable(esw, vport_num);
...@@ -3719,6 +3739,7 @@ int mlx5_esw_offloads_sf_vport_enable(struct mlx5_eswitch *esw, struct devlink_p ...@@ -3719,6 +3739,7 @@ int mlx5_esw_offloads_sf_vport_enable(struct mlx5_eswitch *esw, struct devlink_p
void mlx5_esw_offloads_sf_vport_disable(struct mlx5_eswitch *esw, u16 vport_num) void mlx5_esw_offloads_sf_vport_disable(struct mlx5_eswitch *esw, u16 vport_num)
{ {
mlx5_esw_offloads_rep_unload(esw, vport_num); mlx5_esw_offloads_rep_unload(esw, vport_num);
mlx5_esw_vport_debugfs_destroy(esw, vport_num);
mlx5_esw_devlink_sf_port_unregister(esw, vport_num); mlx5_esw_devlink_sf_port_unregister(esw, vport_num);
mlx5_esw_vport_disable(esw, vport_num); mlx5_esw_vport_disable(esw, vport_num);
} }
......
...@@ -289,6 +289,10 @@ int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, uint32_t *sw_owner_id) ...@@ -289,6 +289,10 @@ int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, uint32_t *sw_owner_id)
sw_owner_id[i]); sw_owner_id[i]);
} }
if (MLX5_CAP_GEN_2_MAX(dev, sw_vhca_id_valid) &&
dev->priv.sw_vhca_id > 0)
MLX5_SET(init_hca_in, in, sw_vhca_id, dev->priv.sw_vhca_id);
return mlx5_cmd_exec_in(dev, init_hca, in); return mlx5_cmd_exec_in(dev, init_hca, in);
} }
......
...@@ -38,8 +38,7 @@ struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev) ...@@ -38,8 +38,7 @@ struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev)
MLX5_LOG_SW_ICM_BLOCK_SIZE(dev)); MLX5_LOG_SW_ICM_BLOCK_SIZE(dev));
dm->steering_sw_icm_alloc_blocks = dm->steering_sw_icm_alloc_blocks =
kcalloc(BITS_TO_LONGS(steering_icm_blocks), bitmap_zalloc(steering_icm_blocks, GFP_KERNEL);
sizeof(unsigned long), GFP_KERNEL);
if (!dm->steering_sw_icm_alloc_blocks) if (!dm->steering_sw_icm_alloc_blocks)
goto err_steering; goto err_steering;
} }
...@@ -50,8 +49,7 @@ struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev) ...@@ -50,8 +49,7 @@ struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev)
MLX5_LOG_SW_ICM_BLOCK_SIZE(dev)); MLX5_LOG_SW_ICM_BLOCK_SIZE(dev));
dm->header_modify_sw_icm_alloc_blocks = dm->header_modify_sw_icm_alloc_blocks =
kcalloc(BITS_TO_LONGS(header_modify_icm_blocks), bitmap_zalloc(header_modify_icm_blocks, GFP_KERNEL);
sizeof(unsigned long), GFP_KERNEL);
if (!dm->header_modify_sw_icm_alloc_blocks) if (!dm->header_modify_sw_icm_alloc_blocks)
goto err_modify_hdr; goto err_modify_hdr;
} }
...@@ -66,8 +64,7 @@ struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev) ...@@ -66,8 +64,7 @@ struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev)
MLX5_LOG_SW_ICM_BLOCK_SIZE(dev)); MLX5_LOG_SW_ICM_BLOCK_SIZE(dev));
dm->header_modify_pattern_sw_icm_alloc_blocks = dm->header_modify_pattern_sw_icm_alloc_blocks =
kcalloc(BITS_TO_LONGS(header_modify_pattern_icm_blocks), bitmap_zalloc(header_modify_pattern_icm_blocks, GFP_KERNEL);
sizeof(unsigned long), GFP_KERNEL);
if (!dm->header_modify_pattern_sw_icm_alloc_blocks) if (!dm->header_modify_pattern_sw_icm_alloc_blocks)
goto err_pattern; goto err_pattern;
} }
...@@ -75,10 +72,10 @@ struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev) ...@@ -75,10 +72,10 @@ struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev)
return dm; return dm;
err_pattern: err_pattern:
kfree(dm->header_modify_sw_icm_alloc_blocks); bitmap_free(dm->header_modify_sw_icm_alloc_blocks);
err_modify_hdr: err_modify_hdr:
kfree(dm->steering_sw_icm_alloc_blocks); bitmap_free(dm->steering_sw_icm_alloc_blocks);
err_steering: err_steering:
kfree(dm); kfree(dm);
...@@ -97,7 +94,7 @@ void mlx5_dm_cleanup(struct mlx5_core_dev *dev) ...@@ -97,7 +94,7 @@ void mlx5_dm_cleanup(struct mlx5_core_dev *dev)
WARN_ON(!bitmap_empty(dm->steering_sw_icm_alloc_blocks, WARN_ON(!bitmap_empty(dm->steering_sw_icm_alloc_blocks,
BIT(MLX5_CAP_DEV_MEM(dev, log_steering_sw_icm_size) - BIT(MLX5_CAP_DEV_MEM(dev, log_steering_sw_icm_size) -
MLX5_LOG_SW_ICM_BLOCK_SIZE(dev)))); MLX5_LOG_SW_ICM_BLOCK_SIZE(dev))));
kfree(dm->steering_sw_icm_alloc_blocks); bitmap_free(dm->steering_sw_icm_alloc_blocks);
} }
if (dm->header_modify_sw_icm_alloc_blocks) { if (dm->header_modify_sw_icm_alloc_blocks) {
...@@ -105,7 +102,7 @@ void mlx5_dm_cleanup(struct mlx5_core_dev *dev) ...@@ -105,7 +102,7 @@ void mlx5_dm_cleanup(struct mlx5_core_dev *dev)
BIT(MLX5_CAP_DEV_MEM(dev, BIT(MLX5_CAP_DEV_MEM(dev,
log_header_modify_sw_icm_size) - log_header_modify_sw_icm_size) -
MLX5_LOG_SW_ICM_BLOCK_SIZE(dev)))); MLX5_LOG_SW_ICM_BLOCK_SIZE(dev))));
kfree(dm->header_modify_sw_icm_alloc_blocks); bitmap_free(dm->header_modify_sw_icm_alloc_blocks);
} }
if (dm->header_modify_pattern_sw_icm_alloc_blocks) { if (dm->header_modify_pattern_sw_icm_alloc_blocks) {
...@@ -113,7 +110,7 @@ void mlx5_dm_cleanup(struct mlx5_core_dev *dev) ...@@ -113,7 +110,7 @@ void mlx5_dm_cleanup(struct mlx5_core_dev *dev)
BIT(MLX5_CAP_DEV_MEM(dev, BIT(MLX5_CAP_DEV_MEM(dev,
log_header_modify_pattern_sw_icm_size) - log_header_modify_pattern_sw_icm_size) -
MLX5_LOG_SW_ICM_BLOCK_SIZE(dev)))); MLX5_LOG_SW_ICM_BLOCK_SIZE(dev))));
kfree(dm->header_modify_pattern_sw_icm_alloc_blocks); bitmap_free(dm->header_modify_pattern_sw_icm_alloc_blocks);
} }
kfree(dm); kfree(dm);
......
...@@ -90,6 +90,8 @@ module_param_named(prof_sel, prof_sel, uint, 0444); ...@@ -90,6 +90,8 @@ module_param_named(prof_sel, prof_sel, uint, 0444);
MODULE_PARM_DESC(prof_sel, "profile selector. Valid range 0 - 2"); MODULE_PARM_DESC(prof_sel, "profile selector. Valid range 0 - 2");
static u32 sw_owner_id[4]; static u32 sw_owner_id[4];
#define MAX_SW_VHCA_ID (BIT(__mlx5_bit_sz(cmd_hca_cap_2, sw_vhca_id)) - 1)
static DEFINE_IDA(sw_vhca_ida);
enum { enum {
MLX5_ATOMIC_REQ_MODE_BE = 0x0, MLX5_ATOMIC_REQ_MODE_BE = 0x0,
...@@ -492,6 +494,31 @@ static int max_uc_list_get_devlink_param(struct mlx5_core_dev *dev) ...@@ -492,6 +494,31 @@ static int max_uc_list_get_devlink_param(struct mlx5_core_dev *dev)
return err; return err;
} }
static int handle_hca_cap_2(struct mlx5_core_dev *dev, void *set_ctx)
{
void *set_hca_cap;
int err;
if (!MLX5_CAP_GEN_MAX(dev, hca_cap_2))
return 0;
err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL_2);
if (err)
return err;
if (!MLX5_CAP_GEN_2_MAX(dev, sw_vhca_id_valid) ||
!(dev->priv.sw_vhca_id > 0))
return 0;
set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx,
capability);
memcpy(set_hca_cap, dev->caps.hca[MLX5_CAP_GENERAL_2]->cur,
MLX5_ST_SZ_BYTES(cmd_hca_cap_2));
MLX5_SET(cmd_hca_cap_2, set_hca_cap, sw_vhca_id_valid, 1);
return set_caps(dev, set_ctx, MLX5_CAP_GENERAL_2);
}
static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx) static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
{ {
struct mlx5_profile *prof = &dev->profile; struct mlx5_profile *prof = &dev->profile;
...@@ -662,6 +689,13 @@ static int set_hca_cap(struct mlx5_core_dev *dev) ...@@ -662,6 +689,13 @@ static int set_hca_cap(struct mlx5_core_dev *dev)
goto out; goto out;
} }
memset(set_ctx, 0, set_sz);
err = handle_hca_cap_2(dev, set_ctx);
if (err) {
mlx5_core_err(dev, "handle_hca_cap_2 failed\n");
goto out;
}
out: out:
kfree(set_ctx); kfree(set_ctx);
return err; return err;
...@@ -1506,6 +1540,18 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx) ...@@ -1506,6 +1540,18 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
if (err) if (err)
goto err_hca_caps; goto err_hca_caps;
/* The conjunction of sw_vhca_id with sw_owner_id will be a global
* unique id per function which uses mlx5_core.
* Those values are supplied to FW as part of the init HCA command to
* be used by both driver and FW when it's applicable.
*/
dev->priv.sw_vhca_id = ida_alloc_range(&sw_vhca_ida, 1,
MAX_SW_VHCA_ID,
GFP_KERNEL);
if (dev->priv.sw_vhca_id < 0)
mlx5_core_err(dev, "failed to allocate sw_vhca_id, err=%d\n",
dev->priv.sw_vhca_id);
return 0; return 0;
err_hca_caps: err_hca_caps:
...@@ -1530,6 +1576,9 @@ void mlx5_mdev_uninit(struct mlx5_core_dev *dev) ...@@ -1530,6 +1576,9 @@ void mlx5_mdev_uninit(struct mlx5_core_dev *dev)
{ {
struct mlx5_priv *priv = &dev->priv; struct mlx5_priv *priv = &dev->priv;
if (priv->sw_vhca_id > 0)
ida_free(&sw_vhca_ida, dev->priv.sw_vhca_id);
mlx5_hca_caps_free(dev); mlx5_hca_caps_free(dev);
mlx5_adev_cleanup(dev); mlx5_adev_cleanup(dev);
mlx5_pagealloc_cleanup(dev); mlx5_pagealloc_cleanup(dev);
......
...@@ -1086,9 +1086,17 @@ int mlx5_nic_vport_affiliate_multiport(struct mlx5_core_dev *master_mdev, ...@@ -1086,9 +1086,17 @@ int mlx5_nic_vport_affiliate_multiport(struct mlx5_core_dev *master_mdev,
goto free; goto free;
MLX5_SET(modify_nic_vport_context_in, in, field_select.affiliation, 1); MLX5_SET(modify_nic_vport_context_in, in, field_select.affiliation, 1);
if (MLX5_CAP_GEN_2(master_mdev, sw_vhca_id_valid)) {
MLX5_SET(modify_nic_vport_context_in, in,
nic_vport_context.vhca_id_type, VHCA_ID_TYPE_SW);
MLX5_SET(modify_nic_vport_context_in, in,
nic_vport_context.affiliated_vhca_id,
MLX5_CAP_GEN_2(master_mdev, sw_vhca_id));
} else {
MLX5_SET(modify_nic_vport_context_in, in, MLX5_SET(modify_nic_vport_context_in, in,
nic_vport_context.affiliated_vhca_id, nic_vport_context.affiliated_vhca_id,
MLX5_CAP_GEN(master_mdev, vhca_id)); MLX5_CAP_GEN(master_mdev, vhca_id));
}
MLX5_SET(modify_nic_vport_context_in, in, MLX5_SET(modify_nic_vport_context_in, in,
nic_vport_context.affiliation_criteria, nic_vport_context.affiliation_criteria,
MLX5_CAP_GEN(port_mdev, affiliate_nic_vport_criteria)); MLX5_CAP_GEN(port_mdev, affiliate_nic_vport_criteria));
......
...@@ -610,6 +610,7 @@ struct mlx5_priv { ...@@ -610,6 +610,7 @@ struct mlx5_priv {
spinlock_t ctx_lock; spinlock_t ctx_lock;
struct mlx5_adev **adev; struct mlx5_adev **adev;
int adev_idx; int adev_idx;
int sw_vhca_id;
struct mlx5_events *events; struct mlx5_events *events;
struct mlx5_flow_steering *steering; struct mlx5_flow_steering *steering;
......
...@@ -1826,7 +1826,14 @@ struct mlx5_ifc_cmd_hca_cap_2_bits { ...@@ -1826,7 +1826,14 @@ struct mlx5_ifc_cmd_hca_cap_2_bits {
u8 max_reformat_remove_size[0x8]; u8 max_reformat_remove_size[0x8];
u8 max_reformat_remove_offset[0x8]; u8 max_reformat_remove_offset[0x8];
u8 reserved_at_c0[0x740]; u8 reserved_at_c0[0x160];
u8 reserved_at_220[0x1];
u8 sw_vhca_id_valid[0x1];
u8 sw_vhca_id[0xe];
u8 reserved_at_230[0x10];
u8 reserved_at_240[0x5c0];
}; };
enum mlx5_ifc_flow_destination_type { enum mlx5_ifc_flow_destination_type {
...@@ -3782,6 +3789,11 @@ struct mlx5_ifc_rmpc_bits { ...@@ -3782,6 +3789,11 @@ struct mlx5_ifc_rmpc_bits {
struct mlx5_ifc_wq_bits wq; struct mlx5_ifc_wq_bits wq;
}; };
enum {
VHCA_ID_TYPE_HW = 0,
VHCA_ID_TYPE_SW = 1,
};
struct mlx5_ifc_nic_vport_context_bits { struct mlx5_ifc_nic_vport_context_bits {
u8 reserved_at_0[0x5]; u8 reserved_at_0[0x5];
u8 min_wqe_inline_mode[0x3]; u8 min_wqe_inline_mode[0x3];
...@@ -3798,8 +3810,8 @@ struct mlx5_ifc_nic_vport_context_bits { ...@@ -3798,8 +3810,8 @@ struct mlx5_ifc_nic_vport_context_bits {
u8 event_on_mc_address_change[0x1]; u8 event_on_mc_address_change[0x1];
u8 event_on_uc_address_change[0x1]; u8 event_on_uc_address_change[0x1];
u8 reserved_at_40[0xc]; u8 vhca_id_type[0x1];
u8 reserved_at_41[0xb];
u8 affiliation_criteria[0x4]; u8 affiliation_criteria[0x4];
u8 affiliated_vhca_id[0x10]; u8 affiliated_vhca_id[0x10];
...@@ -7259,7 +7271,12 @@ struct mlx5_ifc_init_hca_in_bits { ...@@ -7259,7 +7271,12 @@ struct mlx5_ifc_init_hca_in_bits {
u8 reserved_at_20[0x10]; u8 reserved_at_20[0x10];
u8 op_mod[0x10]; u8 op_mod[0x10];
u8 reserved_at_40[0x40]; u8 reserved_at_40[0x20];
u8 reserved_at_60[0x2];
u8 sw_vhca_id[0xe];
u8 reserved_at_70[0x10];
u8 sw_owner_id[4][0x20]; u8 sw_owner_id[4][0x20];
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment