Commit 1e5e4acb authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-updates-2021-04-21' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2021-04-21

devlink external port attribute for SF (Sub-Function) port flavour

This adds the support to instantiate Sub-Functions on external hosts
E.g when Eswitch manager is enabled on the ARM SmarNic SoC CPU, users
are now able to spawn new Sub-Functions on the Host server CPU.

Parav Pandit Says:
==================

This series introduces and uses external attribute for the SF port to
indicate that a SF port belongs to an external controller.

This is needed to generate unique phys_port_name when PF and SF numbers
are overlapping between local and external controllers.
For example two controllers 0 and 1, both of these controller have a SF.
having PF number 0, SF number 77. Here, phys_port_name has duplicate
entry which doesn't have controller number in it.

Hence, add controller number optionally when a SF port is for an
external controller. This extension is similar to existing PF and VF
eswitch ports of the external controller.

When a SF is for external controller an example view of external SF
port and config sequence:

On eswitch system:
$ devlink dev eswitch set pci/0033:01:00.0 mode switchdev

$ devlink port show
pci/0033:01:00.0/196607: type eth netdev enP51p1s0f0np0 flavour physical port 0 splittable false
pci/0033:01:00.0/131072: type eth netdev eth0 flavour pcipf controller 1 pfnum 0 external true splittable false
  function:
    hw_addr 00:00:00:00:00:00

$ devlink port add pci/0033:01:00.0 flavour pcisf pfnum 0 sfnum 77 controller 1
pci/0033:01:00.0/163840: type eth netdev eth1 flavour pcisf controller 1 pfnum 0 sfnum 77 splittable false
  function:
    hw_addr 00:00:00:00:00:00 state inactive opstate detached

phys_port_name construction:
$ cat /sys/class/net/eth1/phys_port_name
c1pf0sf77

Patch summary:
First 3 patches prepares the eswitch to handle vports in more generic
way using xarray to lookup vport from its unique vport number.
Patch-1 returns maximum eswitch ports only when eswitch is enabled
Patch-2 prepares eswitch to return eswitch max ports from a struct
Patch-3 uses xarray for vport and representor lookup
Patch-4 considers SF for an additioanl range of SF vports
Patch-5 relies on SF hw table to check SF support
Patch-6 extends SF devlink port attribute for external flag
Patch-7 stores the per controller SF allocation attributes
Patch-8 uses SF function id for filtering events
Patch-9 uses helper for allocation and free
Patch-10 splits hw table into per controller table and generic one
Patch-11 extends sf table for additional range

==================

====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 95aafe91 f1b9acd3
...@@ -96,7 +96,7 @@ int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw, ...@@ -96,7 +96,7 @@ int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw,
} }
if (!vport->egress.acl) { if (!vport->egress.acl) {
vport->egress.acl = esw_acl_table_create(esw, vport->vport, vport->egress.acl = esw_acl_table_create(esw, vport,
MLX5_FLOW_NAMESPACE_ESW_EGRESS, MLX5_FLOW_NAMESPACE_ESW_EGRESS,
table_size); table_size);
if (IS_ERR(vport->egress.acl)) { if (IS_ERR(vport->egress.acl)) {
......
...@@ -148,7 +148,7 @@ static void esw_acl_egress_ofld_groups_destroy(struct mlx5_vport *vport) ...@@ -148,7 +148,7 @@ static void esw_acl_egress_ofld_groups_destroy(struct mlx5_vport *vport)
esw_acl_egress_vlan_grp_destroy(vport); esw_acl_egress_vlan_grp_destroy(vport);
} }
static bool esw_acl_egress_needed(const struct mlx5_eswitch *esw, u16 vport_num) static bool esw_acl_egress_needed(struct mlx5_eswitch *esw, u16 vport_num)
{ {
return mlx5_eswitch_is_vf_vport(esw, vport_num) || mlx5_esw_is_sf_vport(esw, vport_num); return mlx5_eswitch_is_vf_vport(esw, vport_num) || mlx5_esw_is_sf_vport(esw, vport_num);
} }
...@@ -171,7 +171,7 @@ int esw_acl_egress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport ...@@ -171,7 +171,7 @@ int esw_acl_egress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport
table_size++; table_size++;
if (MLX5_CAP_GEN(esw->dev, prio_tag_required)) if (MLX5_CAP_GEN(esw->dev, prio_tag_required))
table_size++; table_size++;
vport->egress.acl = esw_acl_table_create(esw, vport->vport, vport->egress.acl = esw_acl_table_create(esw, vport,
MLX5_FLOW_NAMESPACE_ESW_EGRESS, table_size); MLX5_FLOW_NAMESPACE_ESW_EGRESS, table_size);
if (IS_ERR(vport->egress.acl)) { if (IS_ERR(vport->egress.acl)) {
err = PTR_ERR(vport->egress.acl); err = PTR_ERR(vport->egress.acl);
......
...@@ -6,14 +6,14 @@ ...@@ -6,14 +6,14 @@
#include "helper.h" #include "helper.h"
struct mlx5_flow_table * struct mlx5_flow_table *
esw_acl_table_create(struct mlx5_eswitch *esw, u16 vport_num, int ns, int size) esw_acl_table_create(struct mlx5_eswitch *esw, struct mlx5_vport *vport, int ns, int size)
{ {
struct mlx5_flow_table_attr ft_attr = {}; struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_core_dev *dev = esw->dev; struct mlx5_core_dev *dev = esw->dev;
struct mlx5_flow_namespace *root_ns; struct mlx5_flow_namespace *root_ns;
struct mlx5_flow_table *acl; struct mlx5_flow_table *acl;
int acl_supported; int acl_supported;
int vport_index; u16 vport_num;
int err; int err;
acl_supported = (ns == MLX5_FLOW_NAMESPACE_ESW_INGRESS) ? acl_supported = (ns == MLX5_FLOW_NAMESPACE_ESW_INGRESS) ?
...@@ -23,11 +23,11 @@ esw_acl_table_create(struct mlx5_eswitch *esw, u16 vport_num, int ns, int size) ...@@ -23,11 +23,11 @@ esw_acl_table_create(struct mlx5_eswitch *esw, u16 vport_num, int ns, int size)
if (!acl_supported) if (!acl_supported)
return ERR_PTR(-EOPNOTSUPP); return ERR_PTR(-EOPNOTSUPP);
vport_num = vport->vport;
esw_debug(dev, "Create vport[%d] %s ACL table\n", vport_num, esw_debug(dev, "Create vport[%d] %s ACL table\n", vport_num,
ns == MLX5_FLOW_NAMESPACE_ESW_INGRESS ? "ingress" : "egress"); ns == MLX5_FLOW_NAMESPACE_ESW_INGRESS ? "ingress" : "egress");
vport_index = mlx5_eswitch_vport_num_to_index(esw, vport_num); root_ns = mlx5_get_flow_vport_acl_namespace(dev, ns, vport->index);
root_ns = mlx5_get_flow_vport_acl_namespace(dev, ns, vport_index);
if (!root_ns) { if (!root_ns) {
esw_warn(dev, "Failed to get E-Switch root namespace for vport (%d)\n", esw_warn(dev, "Failed to get E-Switch root namespace for vport (%d)\n",
vport_num); vport_num);
......
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
/* General acl helper functions */ /* General acl helper functions */
struct mlx5_flow_table * struct mlx5_flow_table *
esw_acl_table_create(struct mlx5_eswitch *esw, u16 vport_num, int ns, int size); esw_acl_table_create(struct mlx5_eswitch *esw, struct mlx5_vport *vport, int ns, int size);
/* Egress acl helper functions */ /* Egress acl helper functions */
void esw_acl_egress_table_destroy(struct mlx5_vport *vport); void esw_acl_egress_table_destroy(struct mlx5_vport *vport);
......
...@@ -177,7 +177,7 @@ int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw, ...@@ -177,7 +177,7 @@ int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw,
} }
if (!vport->ingress.acl) { if (!vport->ingress.acl) {
vport->ingress.acl = esw_acl_table_create(esw, vport->vport, vport->ingress.acl = esw_acl_table_create(esw, vport,
MLX5_FLOW_NAMESPACE_ESW_INGRESS, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
table_size); table_size);
if (IS_ERR(vport->ingress.acl)) { if (IS_ERR(vport->ingress.acl)) {
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
#include "ofld.h" #include "ofld.h"
static bool static bool
esw_acl_ingress_prio_tag_enabled(const struct mlx5_eswitch *esw, esw_acl_ingress_prio_tag_enabled(struct mlx5_eswitch *esw,
const struct mlx5_vport *vport) const struct mlx5_vport *vport)
{ {
return (MLX5_CAP_GEN(esw->dev, prio_tag_required) && return (MLX5_CAP_GEN(esw->dev, prio_tag_required) &&
...@@ -255,7 +255,7 @@ int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw, ...@@ -255,7 +255,7 @@ int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw,
if (esw_acl_ingress_prio_tag_enabled(esw, vport)) if (esw_acl_ingress_prio_tag_enabled(esw, vport))
num_ftes++; num_ftes++;
vport->ingress.acl = esw_acl_table_create(esw, vport->vport, vport->ingress.acl = esw_acl_table_create(esw, vport,
MLX5_FLOW_NAMESPACE_ESW_INGRESS, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
num_ftes); num_ftes);
if (IS_ERR(vport->ingress.acl)) { if (IS_ERR(vport->ingress.acl)) {
......
...@@ -14,8 +14,7 @@ mlx5_esw_get_port_parent_id(struct mlx5_core_dev *dev, struct netdev_phys_item_i ...@@ -14,8 +14,7 @@ mlx5_esw_get_port_parent_id(struct mlx5_core_dev *dev, struct netdev_phys_item_i
memcpy(ppid->id, &parent_id, sizeof(parent_id)); memcpy(ppid->id, &parent_id, sizeof(parent_id));
} }
static bool static bool mlx5_esw_devlink_port_supported(struct mlx5_eswitch *esw, u16 vport_num)
mlx5_esw_devlink_port_supported(const struct mlx5_eswitch *esw, u16 vport_num)
{ {
return vport_num == MLX5_VPORT_UPLINK || return vport_num == MLX5_VPORT_UPLINK ||
(mlx5_core_is_ecpf(esw->dev) && vport_num == MLX5_VPORT_PF) || (mlx5_core_is_ecpf(esw->dev) && vport_num == MLX5_VPORT_PF) ||
...@@ -124,7 +123,7 @@ struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u1 ...@@ -124,7 +123,7 @@ struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u1
} }
int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_port *dl_port, int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_port *dl_port,
u16 vport_num, u32 sfnum) u16 vport_num, u32 controller, u32 sfnum)
{ {
struct mlx5_core_dev *dev = esw->dev; struct mlx5_core_dev *dev = esw->dev;
struct netdev_phys_item_id ppid = {}; struct netdev_phys_item_id ppid = {};
...@@ -142,7 +141,7 @@ int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_p ...@@ -142,7 +141,7 @@ int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_p
mlx5_esw_get_port_parent_id(dev, &ppid); mlx5_esw_get_port_parent_id(dev, &ppid);
memcpy(dl_port->attrs.switch_id.id, &ppid.id[0], ppid.id_len); memcpy(dl_port->attrs.switch_id.id, &ppid.id[0], ppid.id_len);
dl_port->attrs.switch_id.id_len = ppid.id_len; dl_port->attrs.switch_id.id_len = ppid.id_len;
devlink_port_attrs_pci_sf_set(dl_port, 0, pfnum, sfnum); devlink_port_attrs_pci_sf_set(dl_port, controller, pfnum, sfnum, !!controller);
devlink = priv_to_devlink(dev); devlink = priv_to_devlink(dev);
dl_port_index = mlx5_esw_vport_to_devlink_port_index(dev, vport_num); dl_port_index = mlx5_esw_vport_to_devlink_port_index(dev, vport_num);
err = devlink_port_register(devlink, dl_port, dl_port_index); err = devlink_port_register(devlink, dl_port, dl_port_index);
......
...@@ -216,7 +216,8 @@ static void esw_destroy_legacy_table(struct mlx5_eswitch *esw) ...@@ -216,7 +216,8 @@ static void esw_destroy_legacy_table(struct mlx5_eswitch *esw)
int esw_legacy_enable(struct mlx5_eswitch *esw) int esw_legacy_enable(struct mlx5_eswitch *esw)
{ {
struct mlx5_vport *vport; struct mlx5_vport *vport;
int ret, i; unsigned long i;
int ret;
ret = esw_create_legacy_table(esw); ret = esw_create_legacy_table(esw);
if (ret) if (ret)
......
...@@ -88,20 +88,17 @@ struct mlx5_eswitch *mlx5_devlink_eswitch_get(struct devlink *devlink) ...@@ -88,20 +88,17 @@ struct mlx5_eswitch *mlx5_devlink_eswitch_get(struct devlink *devlink)
struct mlx5_vport *__must_check struct mlx5_vport *__must_check
mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num) mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num)
{ {
u16 idx; struct mlx5_vport *vport;
if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager)) if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager))
return ERR_PTR(-EPERM); return ERR_PTR(-EPERM);
idx = mlx5_eswitch_vport_num_to_index(esw, vport_num); vport = xa_load(&esw->vports, vport_num);
if (!vport) {
if (idx > esw->total_vports - 1) { esw_debug(esw->dev, "vport out of range: num(0x%x)\n", vport_num);
esw_debug(esw->dev, "vport out of range: num(0x%x), idx(0x%x)\n",
vport_num, idx);
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
return vport;
return &esw->vports[idx];
} }
static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport, static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
...@@ -345,9 +342,10 @@ static void update_allmulti_vports(struct mlx5_eswitch *esw, ...@@ -345,9 +342,10 @@ static void update_allmulti_vports(struct mlx5_eswitch *esw,
{ {
u8 *mac = vaddr->node.addr; u8 *mac = vaddr->node.addr;
struct mlx5_vport *vport; struct mlx5_vport *vport;
u16 i, vport_num; unsigned long i;
u16 vport_num;
mlx5_esw_for_all_vports(esw, i, vport) { mlx5_esw_for_each_vport(esw, i, vport) {
struct hlist_head *vport_hash = vport->mc_list; struct hlist_head *vport_hash = vport->mc_list;
struct vport_addr *iter_vaddr = struct vport_addr *iter_vaddr =
l2addr_hash_find(vport_hash, l2addr_hash_find(vport_hash,
...@@ -1175,7 +1173,7 @@ static void mlx5_eswitch_event_handlers_unregister(struct mlx5_eswitch *esw) ...@@ -1175,7 +1173,7 @@ static void mlx5_eswitch_event_handlers_unregister(struct mlx5_eswitch *esw)
static void mlx5_eswitch_clear_vf_vports_info(struct mlx5_eswitch *esw) static void mlx5_eswitch_clear_vf_vports_info(struct mlx5_eswitch *esw)
{ {
struct mlx5_vport *vport; struct mlx5_vport *vport;
int i; unsigned long i;
mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) { mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
memset(&vport->qos, 0, sizeof(vport->qos)); memset(&vport->qos, 0, sizeof(vport->qos));
...@@ -1213,20 +1211,25 @@ void mlx5_eswitch_unload_vport(struct mlx5_eswitch *esw, u16 vport_num) ...@@ -1213,20 +1211,25 @@ void mlx5_eswitch_unload_vport(struct mlx5_eswitch *esw, u16 vport_num)
void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs) void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs)
{ {
int i; struct mlx5_vport *vport;
unsigned long i;
mlx5_esw_for_each_vf_vport_num_reverse(esw, i, num_vfs) mlx5_esw_for_each_vf_vport(esw, i, vport, num_vfs) {
mlx5_eswitch_unload_vport(esw, i); if (!vport->enabled)
continue;
mlx5_eswitch_unload_vport(esw, vport->vport);
}
} }
int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs, int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs,
enum mlx5_eswitch_vport_event enabled_events) enum mlx5_eswitch_vport_event enabled_events)
{ {
struct mlx5_vport *vport;
unsigned long i;
int err; int err;
int i;
mlx5_esw_for_each_vf_vport_num(esw, i, num_vfs) { mlx5_esw_for_each_vf_vport(esw, i, vport, num_vfs) {
err = mlx5_eswitch_load_vport(esw, i, enabled_events); err = mlx5_eswitch_load_vport(esw, vport->vport, enabled_events);
if (err) if (err)
goto vf_err; goto vf_err;
} }
...@@ -1234,7 +1237,7 @@ int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs, ...@@ -1234,7 +1237,7 @@ int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs,
return 0; return 0;
vf_err: vf_err:
mlx5_eswitch_unload_vf_vports(esw, i - 1); mlx5_eswitch_unload_vf_vports(esw, num_vfs);
return err; return err;
} }
...@@ -1563,24 +1566,161 @@ void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf) ...@@ -1563,24 +1566,161 @@ void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf)
up_write(&esw->mode_lock); up_write(&esw->mode_lock);
} }
static int mlx5_query_hca_cap_host_pf(struct mlx5_core_dev *dev, void *out)
{
u16 opmod = (MLX5_CAP_GENERAL << 1) | (HCA_CAP_OPMOD_GET_MAX & 0x01);
u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)] = {};
MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
MLX5_SET(query_hca_cap_in, in, op_mod, opmod);
MLX5_SET(query_hca_cap_in, in, function_id, MLX5_VPORT_PF);
MLX5_SET(query_hca_cap_in, in, other_function, true);
return mlx5_cmd_exec_inout(dev, query_hca_cap, in, out);
}
int mlx5_esw_sf_max_hpf_functions(struct mlx5_core_dev *dev, u16 *max_sfs, u16 *sf_base_id)
{
int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
void *query_ctx;
void *hca_caps;
int err;
if (!mlx5_core_is_ecpf(dev)) {
*max_sfs = 0;
return 0;
}
query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
if (!query_ctx)
return -ENOMEM;
err = mlx5_query_hca_cap_host_pf(dev, query_ctx);
if (err)
goto out_free;
hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
*max_sfs = MLX5_GET(cmd_hca_cap, hca_caps, max_num_sf);
*sf_base_id = MLX5_GET(cmd_hca_cap, hca_caps, sf_base_id);
out_free:
kfree(query_ctx);
return err;
}
static int mlx5_esw_vport_alloc(struct mlx5_eswitch *esw, struct mlx5_core_dev *dev,
int index, u16 vport_num)
{
struct mlx5_vport *vport;
int err;
vport = kzalloc(sizeof(*vport), GFP_KERNEL);
if (!vport)
return -ENOMEM;
vport->dev = esw->dev;
vport->vport = vport_num;
vport->index = index;
vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
INIT_WORK(&vport->vport_change_handler, esw_vport_change_handler);
err = xa_insert(&esw->vports, vport_num, vport, GFP_KERNEL);
if (err)
goto insert_err;
esw->total_vports++;
return 0;
insert_err:
kfree(vport);
return err;
}
static void mlx5_esw_vport_free(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
{
xa_erase(&esw->vports, vport->vport);
kfree(vport);
}
static void mlx5_esw_vports_cleanup(struct mlx5_eswitch *esw)
{
struct mlx5_vport *vport;
unsigned long i;
mlx5_esw_for_each_vport(esw, i, vport)
mlx5_esw_vport_free(esw, vport);
xa_destroy(&esw->vports);
}
static int mlx5_esw_vports_init(struct mlx5_eswitch *esw)
{
struct mlx5_core_dev *dev = esw->dev;
u16 max_host_pf_sfs;
u16 base_sf_num;
int idx = 0;
int err;
int i;
xa_init(&esw->vports);
err = mlx5_esw_vport_alloc(esw, dev, idx, MLX5_VPORT_PF);
if (err)
goto err;
if (esw->first_host_vport == MLX5_VPORT_PF)
xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_HOST_FN);
idx++;
for (i = 0; i < mlx5_core_max_vfs(dev); i++) {
err = mlx5_esw_vport_alloc(esw, dev, idx, idx);
if (err)
goto err;
xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_VF);
xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_HOST_FN);
idx++;
}
base_sf_num = mlx5_sf_start_function_id(dev);
for (i = 0; i < mlx5_sf_max_functions(dev); i++) {
err = mlx5_esw_vport_alloc(esw, dev, idx, base_sf_num + i);
if (err)
goto err;
xa_set_mark(&esw->vports, base_sf_num + i, MLX5_ESW_VPT_SF);
idx++;
}
err = mlx5_esw_sf_max_hpf_functions(dev, &max_host_pf_sfs, &base_sf_num);
if (err)
goto err;
for (i = 0; i < max_host_pf_sfs; i++) {
err = mlx5_esw_vport_alloc(esw, dev, idx, base_sf_num + i);
if (err)
goto err;
xa_set_mark(&esw->vports, base_sf_num + i, MLX5_ESW_VPT_SF);
idx++;
}
if (mlx5_ecpf_vport_exists(dev)) {
err = mlx5_esw_vport_alloc(esw, dev, idx, MLX5_VPORT_ECPF);
if (err)
goto err;
idx++;
}
err = mlx5_esw_vport_alloc(esw, dev, idx, MLX5_VPORT_UPLINK);
if (err)
goto err;
return 0;
err:
mlx5_esw_vports_cleanup(esw);
return err;
}
int mlx5_eswitch_init(struct mlx5_core_dev *dev) int mlx5_eswitch_init(struct mlx5_core_dev *dev)
{ {
struct mlx5_eswitch *esw; struct mlx5_eswitch *esw;
struct mlx5_vport *vport; int err;
int total_vports;
int err, i;
if (!MLX5_VPORT_MANAGER(dev)) if (!MLX5_VPORT_MANAGER(dev))
return 0; return 0;
total_vports = mlx5_eswitch_get_total_vports(dev);
esw_info(dev,
"Total vports %d, per vport: max uc(%d) max mc(%d)\n",
total_vports,
MLX5_MAX_UC_PER_VPORT(dev),
MLX5_MAX_MC_PER_VPORT(dev));
esw = kzalloc(sizeof(*esw), GFP_KERNEL); esw = kzalloc(sizeof(*esw), GFP_KERNEL);
if (!esw) if (!esw)
return -ENOMEM; return -ENOMEM;
...@@ -1595,18 +1735,13 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) ...@@ -1595,18 +1735,13 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
goto abort; goto abort;
} }
esw->vports = kcalloc(total_vports, sizeof(struct mlx5_vport), err = mlx5_esw_vports_init(esw);
GFP_KERNEL); if (err)
if (!esw->vports) {
err = -ENOMEM;
goto abort; goto abort;
}
esw->total_vports = total_vports;
err = esw_offloads_init_reps(esw); err = esw_offloads_init_reps(esw);
if (err) if (err)
goto abort; goto reps_err;
mutex_init(&esw->offloads.encap_tbl_lock); mutex_init(&esw->offloads.encap_tbl_lock);
hash_init(esw->offloads.encap_tbl); hash_init(esw->offloads.encap_tbl);
...@@ -1619,25 +1754,25 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) ...@@ -1619,25 +1754,25 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
mutex_init(&esw->state_lock); mutex_init(&esw->state_lock);
init_rwsem(&esw->mode_lock); init_rwsem(&esw->mode_lock);
mlx5_esw_for_all_vports(esw, i, vport) {
vport->vport = mlx5_eswitch_index_to_vport_num(esw, i);
vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
vport->dev = dev;
INIT_WORK(&vport->vport_change_handler,
esw_vport_change_handler);
}
esw->enabled_vports = 0; esw->enabled_vports = 0;
esw->mode = MLX5_ESWITCH_NONE; esw->mode = MLX5_ESWITCH_NONE;
esw->offloads.inline_mode = MLX5_INLINE_MODE_NONE; esw->offloads.inline_mode = MLX5_INLINE_MODE_NONE;
dev->priv.eswitch = esw; dev->priv.eswitch = esw;
BLOCKING_INIT_NOTIFIER_HEAD(&esw->n_head); BLOCKING_INIT_NOTIFIER_HEAD(&esw->n_head);
esw_info(dev,
"Total vports %d, per vport: max uc(%d) max mc(%d)\n",
esw->total_vports,
MLX5_MAX_UC_PER_VPORT(dev),
MLX5_MAX_MC_PER_VPORT(dev));
return 0; return 0;
reps_err:
mlx5_esw_vports_cleanup(esw);
abort: abort:
if (esw->work_queue) if (esw->work_queue)
destroy_workqueue(esw->work_queue); destroy_workqueue(esw->work_queue);
kfree(esw->vports);
kfree(esw); kfree(esw);
return err; return err;
} }
...@@ -1659,7 +1794,7 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) ...@@ -1659,7 +1794,7 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
mutex_destroy(&esw->offloads.encap_tbl_lock); mutex_destroy(&esw->offloads.encap_tbl_lock);
mutex_destroy(&esw->offloads.decap_tbl_lock); mutex_destroy(&esw->offloads.decap_tbl_lock);
esw_offloads_cleanup_reps(esw); esw_offloads_cleanup_reps(esw);
kfree(esw->vports); mlx5_esw_vports_cleanup(esw);
kfree(esw); kfree(esw);
} }
...@@ -1718,8 +1853,29 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, ...@@ -1718,8 +1853,29 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
return err; return err;
} }
static bool mlx5_esw_check_port_type(struct mlx5_eswitch *esw, u16 vport_num, xa_mark_t mark)
{
struct mlx5_vport *vport;
vport = mlx5_eswitch_get_vport(esw, vport_num);
if (IS_ERR(vport))
return false;
return xa_get_mark(&esw->vports, vport_num, mark);
}
bool mlx5_eswitch_is_vf_vport(struct mlx5_eswitch *esw, u16 vport_num)
{
return mlx5_esw_check_port_type(esw, vport_num, MLX5_ESW_VPT_VF);
}
bool mlx5_esw_is_sf_vport(struct mlx5_eswitch *esw, u16 vport_num)
{
return mlx5_esw_check_port_type(esw, vport_num, MLX5_ESW_VPT_SF);
}
static bool static bool
is_port_function_supported(const struct mlx5_eswitch *esw, u16 vport_num) is_port_function_supported(struct mlx5_eswitch *esw, u16 vport_num)
{ {
return vport_num == MLX5_VPORT_PF || return vport_num == MLX5_VPORT_PF ||
mlx5_eswitch_is_vf_vport(esw, vport_num) || mlx5_eswitch_is_vf_vport(esw, vport_num) ||
...@@ -1891,9 +2047,9 @@ static u32 calculate_vports_min_rate_divider(struct mlx5_eswitch *esw) ...@@ -1891,9 +2047,9 @@ static u32 calculate_vports_min_rate_divider(struct mlx5_eswitch *esw)
u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share); u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
struct mlx5_vport *evport; struct mlx5_vport *evport;
u32 max_guarantee = 0; u32 max_guarantee = 0;
int i; unsigned long i;
mlx5_esw_for_all_vports(esw, i, evport) { mlx5_esw_for_each_vport(esw, i, evport) {
if (!evport->enabled || evport->qos.min_rate < max_guarantee) if (!evport->enabled || evport->qos.min_rate < max_guarantee)
continue; continue;
max_guarantee = evport->qos.min_rate; max_guarantee = evport->qos.min_rate;
...@@ -1911,11 +2067,11 @@ static int normalize_vports_min_rate(struct mlx5_eswitch *esw) ...@@ -1911,11 +2067,11 @@ static int normalize_vports_min_rate(struct mlx5_eswitch *esw)
struct mlx5_vport *evport; struct mlx5_vport *evport;
u32 vport_max_rate; u32 vport_max_rate;
u32 vport_min_rate; u32 vport_min_rate;
unsigned long i;
u32 bw_share; u32 bw_share;
int err; int err;
int i;
mlx5_esw_for_all_vports(esw, i, evport) { mlx5_esw_for_each_vport(esw, i, evport) {
if (!evport->enabled) if (!evport->enabled)
continue; continue;
vport_min_rate = evport->qos.min_rate; vport_min_rate = evport->qos.min_rate;
...@@ -2205,3 +2361,19 @@ void mlx5_esw_unlock(struct mlx5_eswitch *esw) ...@@ -2205,3 +2361,19 @@ void mlx5_esw_unlock(struct mlx5_eswitch *esw)
{ {
up_write(&esw->mode_lock); up_write(&esw->mode_lock);
} }
/**
* mlx5_eswitch_get_total_vports - Get total vports of the eswitch
*
* @dev: Pointer to core device
*
* mlx5_eswitch_get_total_vports returns total number of eswitch vports.
*/
u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev)
{
struct mlx5_eswitch *esw;
esw = dev->priv.eswitch;
return mlx5_esw_allowed(esw) ? esw->total_vports : 0;
}
EXPORT_SYMBOL_GPL(mlx5_eswitch_get_total_vports);
...@@ -176,6 +176,7 @@ struct mlx5_vport { ...@@ -176,6 +176,7 @@ struct mlx5_vport {
u16 vport; u16 vport;
bool enabled; bool enabled;
enum mlx5_eswitch_vport_event enabled_events; enum mlx5_eswitch_vport_event enabled_events;
int index;
struct devlink_port *dl_port; struct devlink_port *dl_port;
}; };
...@@ -228,7 +229,7 @@ struct mlx5_esw_offload { ...@@ -228,7 +229,7 @@ struct mlx5_esw_offload {
struct mlx5_flow_table *ft_offloads; struct mlx5_flow_table *ft_offloads;
struct mlx5_flow_group *vport_rx_group; struct mlx5_flow_group *vport_rx_group;
struct mlx5_eswitch_rep *vport_reps; struct xarray vport_reps;
struct list_head peer_flows; struct list_head peer_flows;
struct mutex peer_mutex; struct mutex peer_mutex;
struct mutex encap_tbl_lock; /* protects encap_tbl */ struct mutex encap_tbl_lock; /* protects encap_tbl */
...@@ -278,7 +279,7 @@ struct mlx5_eswitch { ...@@ -278,7 +279,7 @@ struct mlx5_eswitch {
struct esw_mc_addr mc_promisc; struct esw_mc_addr mc_promisc;
/* end of legacy */ /* end of legacy */
struct workqueue_struct *work_queue; struct workqueue_struct *work_queue;
struct mlx5_vport *vports; struct xarray vports;
u32 flags; u32 flags;
int total_vports; int total_vports;
int enabled_vports; int enabled_vports;
...@@ -545,94 +546,11 @@ static inline u16 mlx5_eswitch_first_host_vport_num(struct mlx5_core_dev *dev) ...@@ -545,94 +546,11 @@ static inline u16 mlx5_eswitch_first_host_vport_num(struct mlx5_core_dev *dev)
MLX5_VPORT_PF : MLX5_VPORT_FIRST_VF; MLX5_VPORT_PF : MLX5_VPORT_FIRST_VF;
} }
static inline int mlx5_esw_sf_start_idx(const struct mlx5_eswitch *esw)
{
/* PF and VF vports indices start from 0 to max_vfs */
return MLX5_VPORT_PF_PLACEHOLDER + mlx5_core_max_vfs(esw->dev);
}
static inline int mlx5_esw_sf_end_idx(const struct mlx5_eswitch *esw)
{
return mlx5_esw_sf_start_idx(esw) + mlx5_sf_max_functions(esw->dev);
}
static inline int
mlx5_esw_sf_vport_num_to_index(const struct mlx5_eswitch *esw, u16 vport_num)
{
return vport_num - mlx5_sf_start_function_id(esw->dev) +
MLX5_VPORT_PF_PLACEHOLDER + mlx5_core_max_vfs(esw->dev);
}
static inline u16
mlx5_esw_sf_vport_index_to_num(const struct mlx5_eswitch *esw, int idx)
{
return mlx5_sf_start_function_id(esw->dev) + idx -
(MLX5_VPORT_PF_PLACEHOLDER + mlx5_core_max_vfs(esw->dev));
}
static inline bool
mlx5_esw_is_sf_vport(const struct mlx5_eswitch *esw, u16 vport_num)
{
return mlx5_sf_supported(esw->dev) &&
vport_num >= mlx5_sf_start_function_id(esw->dev) &&
(vport_num < (mlx5_sf_start_function_id(esw->dev) +
mlx5_sf_max_functions(esw->dev)));
}
static inline bool mlx5_eswitch_is_funcs_handler(const struct mlx5_core_dev *dev) static inline bool mlx5_eswitch_is_funcs_handler(const struct mlx5_core_dev *dev)
{ {
return mlx5_core_is_ecpf_esw_manager(dev); return mlx5_core_is_ecpf_esw_manager(dev);
} }
static inline int mlx5_eswitch_uplink_idx(struct mlx5_eswitch *esw)
{
/* Uplink always locate at the last element of the array.*/
return esw->total_vports - 1;
}
static inline int mlx5_eswitch_ecpf_idx(struct mlx5_eswitch *esw)
{
return esw->total_vports - 2;
}
static inline int mlx5_eswitch_vport_num_to_index(struct mlx5_eswitch *esw,
u16 vport_num)
{
if (vport_num == MLX5_VPORT_ECPF) {
if (!mlx5_ecpf_vport_exists(esw->dev))
esw_warn(esw->dev, "ECPF vport doesn't exist!\n");
return mlx5_eswitch_ecpf_idx(esw);
}
if (vport_num == MLX5_VPORT_UPLINK)
return mlx5_eswitch_uplink_idx(esw);
if (mlx5_esw_is_sf_vport(esw, vport_num))
return mlx5_esw_sf_vport_num_to_index(esw, vport_num);
/* PF and VF vports start from 0 to max_vfs */
return vport_num;
}
static inline u16 mlx5_eswitch_index_to_vport_num(struct mlx5_eswitch *esw,
int index)
{
if (index == mlx5_eswitch_ecpf_idx(esw) &&
mlx5_ecpf_vport_exists(esw->dev))
return MLX5_VPORT_ECPF;
if (index == mlx5_eswitch_uplink_idx(esw))
return MLX5_VPORT_UPLINK;
/* SF vports indices are after VFs and before ECPF */
if (mlx5_sf_supported(esw->dev) &&
index > mlx5_core_max_vfs(esw->dev))
return mlx5_esw_sf_vport_index_to_num(esw, index);
/* PF and VF vports start from 0 to max_vfs */
return index;
}
static inline unsigned int static inline unsigned int
mlx5_esw_vport_to_devlink_port_index(const struct mlx5_core_dev *dev, mlx5_esw_vport_to_devlink_port_index(const struct mlx5_core_dev *dev,
u16 vport_num) u16 vport_num)
...@@ -649,82 +567,42 @@ mlx5_esw_devlink_port_index_to_vport_num(unsigned int dl_port_index) ...@@ -649,82 +567,42 @@ mlx5_esw_devlink_port_index_to_vport_num(unsigned int dl_port_index)
/* TODO: This mlx5e_tc function shouldn't be called by eswitch */ /* TODO: This mlx5e_tc function shouldn't be called by eswitch */
void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw); void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
/* The vport getter/iterator are only valid after esw->total_vports /* Each mark identifies eswitch vport type.
* and vport->vport are initialized in mlx5_eswitch_init. * MLX5_ESW_VPT_HOST_FN is used to identify both PF and VF ports using
* a single mark.
* MLX5_ESW_VPT_VF identifies a SRIOV VF vport.
* MLX5_ESW_VPT_SF identifies SF vport.
*/ */
#define mlx5_esw_for_all_vports(esw, i, vport) \ #define MLX5_ESW_VPT_HOST_FN XA_MARK_0
for ((i) = MLX5_VPORT_PF; \ #define MLX5_ESW_VPT_VF XA_MARK_1
(vport) = &(esw)->vports[i], \ #define MLX5_ESW_VPT_SF XA_MARK_2
(i) < (esw)->total_vports; (i)++)
/* The vport iterator is valid only after vport are initialized in mlx5_eswitch_init.
#define mlx5_esw_for_all_vports_reverse(esw, i, vport) \ * Borrowed the idea from xa_for_each_marked() but with support for desired last element.
for ((i) = (esw)->total_vports - 1; \
(vport) = &(esw)->vports[i], \
(i) >= MLX5_VPORT_PF; (i)--)
#define mlx5_esw_for_each_vf_vport(esw, i, vport, nvfs) \
for ((i) = MLX5_VPORT_FIRST_VF; \
(vport) = &(esw)->vports[(i)], \
(i) <= (nvfs); (i)++)
#define mlx5_esw_for_each_vf_vport_reverse(esw, i, vport, nvfs) \
for ((i) = (nvfs); \
(vport) = &(esw)->vports[(i)], \
(i) >= MLX5_VPORT_FIRST_VF; (i)--)
/* The rep getter/iterator are only valid after esw->total_vports
* and vport->vport are initialized in mlx5_eswitch_init.
*/ */
#define mlx5_esw_for_all_reps(esw, i, rep) \
for ((i) = MLX5_VPORT_PF; \ #define mlx5_esw_for_each_vport(esw, index, vport) \
(rep) = &(esw)->offloads.vport_reps[i], \ xa_for_each(&((esw)->vports), index, vport)
(i) < (esw)->total_vports; (i)++)
#define mlx5_esw_for_each_entry_marked(xa, index, entry, last, filter) \
#define mlx5_esw_for_each_vf_rep(esw, i, rep, nvfs) \ for (index = 0, entry = xa_find(xa, &index, last, filter); \
for ((i) = MLX5_VPORT_FIRST_VF; \ entry; entry = xa_find_after(xa, &index, last, filter))
(rep) = &(esw)->offloads.vport_reps[i], \
(i) <= (nvfs); (i)++) #define mlx5_esw_for_each_vport_marked(esw, index, vport, last, filter) \
mlx5_esw_for_each_entry_marked(&((esw)->vports), index, vport, last, filter)
#define mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, nvfs) \
for ((i) = (nvfs); \ #define mlx5_esw_for_each_vf_vport(esw, index, vport, last) \
(rep) = &(esw)->offloads.vport_reps[i], \ mlx5_esw_for_each_vport_marked(esw, index, vport, last, MLX5_ESW_VPT_VF)
(i) >= MLX5_VPORT_FIRST_VF; (i)--)
#define mlx5_esw_for_each_host_func_vport(esw, index, vport, last) \
#define mlx5_esw_for_each_vf_vport_num(esw, vport, nvfs) \ mlx5_esw_for_each_vport_marked(esw, index, vport, last, MLX5_ESW_VPT_HOST_FN)
for ((vport) = MLX5_VPORT_FIRST_VF; (vport) <= (nvfs); (vport)++)
#define mlx5_esw_for_each_vf_vport_num_reverse(esw, vport, nvfs) \
for ((vport) = (nvfs); (vport) >= MLX5_VPORT_FIRST_VF; (vport)--)
/* Includes host PF (vport 0) if it's not esw manager. */
#define mlx5_esw_for_each_host_func_rep(esw, i, rep, nvfs) \
for ((i) = (esw)->first_host_vport; \
(rep) = &(esw)->offloads.vport_reps[i], \
(i) <= (nvfs); (i)++)
#define mlx5_esw_for_each_host_func_rep_reverse(esw, i, rep, nvfs) \
for ((i) = (nvfs); \
(rep) = &(esw)->offloads.vport_reps[i], \
(i) >= (esw)->first_host_vport; (i)--)
#define mlx5_esw_for_each_host_func_vport(esw, vport, nvfs) \
for ((vport) = (esw)->first_host_vport; \
(vport) <= (nvfs); (vport)++)
#define mlx5_esw_for_each_host_func_vport_reverse(esw, vport, nvfs) \
for ((vport) = (nvfs); \
(vport) >= (esw)->first_host_vport; (vport)--)
#define mlx5_esw_for_each_sf_rep(esw, i, rep) \
for ((i) = mlx5_esw_sf_start_idx(esw); \
(rep) = &(esw)->offloads.vport_reps[(i)], \
(i) < mlx5_esw_sf_end_idx(esw); (i++))
struct mlx5_eswitch *mlx5_devlink_eswitch_get(struct devlink *devlink); struct mlx5_eswitch *mlx5_devlink_eswitch_get(struct devlink *devlink);
struct mlx5_vport *__must_check struct mlx5_vport *__must_check
mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num); mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num);
bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num); bool mlx5_eswitch_is_vf_vport(struct mlx5_eswitch *esw, u16 vport_num);
bool mlx5_esw_is_sf_vport(struct mlx5_eswitch *esw, u16 vport_num);
int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data); int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data);
...@@ -784,12 +662,13 @@ void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, u16 vpo ...@@ -784,12 +662,13 @@ void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, u16 vpo
struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u16 vport_num); struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u16 vport_num);
int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_port *dl_port, int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_port *dl_port,
u16 vport_num, u32 sfnum); u16 vport_num, u32 controller, u32 sfnum);
void mlx5_esw_devlink_sf_port_unregister(struct mlx5_eswitch *esw, u16 vport_num); void mlx5_esw_devlink_sf_port_unregister(struct mlx5_eswitch *esw, u16 vport_num);
int mlx5_esw_offloads_sf_vport_enable(struct mlx5_eswitch *esw, struct devlink_port *dl_port, int mlx5_esw_offloads_sf_vport_enable(struct mlx5_eswitch *esw, struct devlink_port *dl_port,
u16 vport_num, u32 sfnum); u16 vport_num, u32 controller, u32 sfnum);
void mlx5_esw_offloads_sf_vport_disable(struct mlx5_eswitch *esw, u16 vport_num); void mlx5_esw_offloads_sf_vport_disable(struct mlx5_eswitch *esw, u16 vport_num);
int mlx5_esw_sf_max_hpf_functions(struct mlx5_core_dev *dev, u16 *max_sfs, u16 *sf_base_id);
int mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch *esw, u16 vport_num); int mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch *esw, u16 vport_num);
void mlx5_esw_vport_vhca_id_clear(struct mlx5_eswitch *esw, u16 vport_num); void mlx5_esw_vport_vhca_id_clear(struct mlx5_eswitch *esw, u16 vport_num);
...@@ -816,6 +695,8 @@ void mlx5_esw_unlock(struct mlx5_eswitch *esw); ...@@ -816,6 +695,8 @@ void mlx5_esw_unlock(struct mlx5_eswitch *esw);
void esw_vport_change_handle_locked(struct mlx5_vport *vport); void esw_vport_change_handle_locked(struct mlx5_vport *vport);
bool mlx5_esw_offloads_controller_valid(const struct mlx5_eswitch *esw, u32 controller);
#else /* CONFIG_MLX5_ESWITCH */ #else /* CONFIG_MLX5_ESWITCH */
/* eswitch API stubs */ /* eswitch API stubs */
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; } static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
......
...@@ -49,6 +49,16 @@ ...@@ -49,6 +49,16 @@
#include "en_tc.h" #include "en_tc.h"
#include "en/mapping.h" #include "en/mapping.h"
#define mlx5_esw_for_each_rep(esw, i, rep) \
xa_for_each(&((esw)->offloads.vport_reps), i, rep)
#define mlx5_esw_for_each_sf_rep(esw, i, rep) \
xa_for_each_marked(&((esw)->offloads.vport_reps), i, rep, MLX5_ESW_VPT_SF)
#define mlx5_esw_for_each_vf_rep(esw, index, rep) \
mlx5_esw_for_each_entry_marked(&((esw)->offloads.vport_reps), index, \
rep, (esw)->esw_funcs.num_vfs, MLX5_ESW_VPT_VF)
/* There are two match-all miss flows, one for unicast dst mac and /* There are two match-all miss flows, one for unicast dst mac and
* one for multicast. * one for multicast.
*/ */
...@@ -67,10 +77,7 @@ static const struct esw_vport_tbl_namespace mlx5_esw_vport_tbl_mirror_ns = { ...@@ -67,10 +77,7 @@ static const struct esw_vport_tbl_namespace mlx5_esw_vport_tbl_mirror_ns = {
static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw, static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw,
u16 vport_num) u16 vport_num)
{ {
int idx = mlx5_eswitch_vport_num_to_index(esw, vport_num); return xa_load(&esw->offloads.vport_reps, vport_num);
WARN_ON(idx > esw->total_vports - 1);
return &esw->offloads.vport_reps[idx];
} }
static void static void
...@@ -720,10 +727,11 @@ mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw, ...@@ -720,10 +727,11 @@ mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val) static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
{ {
struct mlx5_eswitch_rep *rep; struct mlx5_eswitch_rep *rep;
int i, err = 0; unsigned long i;
int err = 0;
esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none"); esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
mlx5_esw_for_each_host_func_rep(esw, i, rep, esw->esw_funcs.num_vfs) { mlx5_esw_for_each_host_func_vport(esw, i, rep, esw->esw_funcs.num_vfs) {
if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED) if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED)
continue; continue;
...@@ -972,13 +980,13 @@ void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule) ...@@ -972,13 +980,13 @@ void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
static void mlx5_eswitch_del_send_to_vport_meta_rules(struct mlx5_eswitch *esw) static void mlx5_eswitch_del_send_to_vport_meta_rules(struct mlx5_eswitch *esw)
{ {
struct mlx5_flow_handle **flows = esw->fdb_table.offloads.send_to_vport_meta_rules; struct mlx5_flow_handle **flows = esw->fdb_table.offloads.send_to_vport_meta_rules;
int i = 0, num_vfs = esw->esw_funcs.num_vfs, vport_num; int i = 0, num_vfs = esw->esw_funcs.num_vfs;
if (!num_vfs || !flows) if (!num_vfs || !flows)
return; return;
mlx5_esw_for_each_vf_vport_num(esw, vport_num, num_vfs) for (i = 0; i < num_vfs; i++)
mlx5_del_flow_rules(flows[i++]); mlx5_del_flow_rules(flows[i]);
kvfree(flows); kvfree(flows);
} }
...@@ -992,6 +1000,8 @@ mlx5_eswitch_add_send_to_vport_meta_rules(struct mlx5_eswitch *esw) ...@@ -992,6 +1000,8 @@ mlx5_eswitch_add_send_to_vport_meta_rules(struct mlx5_eswitch *esw)
struct mlx5_flow_handle *flow_rule; struct mlx5_flow_handle *flow_rule;
struct mlx5_flow_handle **flows; struct mlx5_flow_handle **flows;
struct mlx5_flow_spec *spec; struct mlx5_flow_spec *spec;
struct mlx5_vport *vport;
unsigned long i;
u16 vport_num; u16 vport_num;
num_vfs = esw->esw_funcs.num_vfs; num_vfs = esw->esw_funcs.num_vfs;
...@@ -1016,7 +1026,8 @@ mlx5_eswitch_add_send_to_vport_meta_rules(struct mlx5_eswitch *esw) ...@@ -1016,7 +1026,8 @@ mlx5_eswitch_add_send_to_vport_meta_rules(struct mlx5_eswitch *esw)
dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
mlx5_esw_for_each_vf_vport_num(esw, vport_num, num_vfs) { mlx5_esw_for_each_vf_vport(esw, i, vport, num_vfs) {
vport_num = vport->vport;
MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_0, MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_0,
mlx5_eswitch_get_vport_metadata_for_match(esw, vport_num)); mlx5_eswitch_get_vport_metadata_for_match(esw, vport_num));
dest.vport.num = vport_num; dest.vport.num = vport_num;
...@@ -1158,12 +1169,14 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw, ...@@ -1158,12 +1169,14 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
struct mlx5_flow_destination dest = {}; struct mlx5_flow_destination dest = {};
struct mlx5_flow_act flow_act = {0}; struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_handle **flows; struct mlx5_flow_handle **flows;
struct mlx5_flow_handle *flow;
struct mlx5_flow_spec *spec;
/* total vports is the same for both e-switches */ /* total vports is the same for both e-switches */
int nvports = esw->total_vports; int nvports = esw->total_vports;
struct mlx5_flow_handle *flow;
struct mlx5_flow_spec *spec;
struct mlx5_vport *vport;
unsigned long i;
void *misc; void *misc;
int err, i; int err;
spec = kvzalloc(sizeof(*spec), GFP_KERNEL); spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec) if (!spec)
...@@ -1182,6 +1195,7 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw, ...@@ -1182,6 +1195,7 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
misc_parameters); misc_parameters);
if (mlx5_core_is_ecpf_esw_manager(esw->dev)) { if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch, esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch,
spec, MLX5_VPORT_PF); spec, MLX5_VPORT_PF);
...@@ -1191,10 +1205,11 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw, ...@@ -1191,10 +1205,11 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
err = PTR_ERR(flow); err = PTR_ERR(flow);
goto add_pf_flow_err; goto add_pf_flow_err;
} }
flows[MLX5_VPORT_PF] = flow; flows[vport->index] = flow;
} }
if (mlx5_ecpf_vport_exists(esw->dev)) { if (mlx5_ecpf_vport_exists(esw->dev)) {
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_ECPF); MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_ECPF);
flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
spec, &flow_act, &dest, 1); spec, &flow_act, &dest, 1);
...@@ -1202,13 +1217,13 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw, ...@@ -1202,13 +1217,13 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
err = PTR_ERR(flow); err = PTR_ERR(flow);
goto add_ecpf_flow_err; goto add_ecpf_flow_err;
} }
flows[mlx5_eswitch_ecpf_idx(esw)] = flow; flows[vport->index] = flow;
} }
mlx5_esw_for_each_vf_vport_num(esw, i, mlx5_core_max_vfs(esw->dev)) { mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) {
esw_set_peer_miss_rule_source_port(esw, esw_set_peer_miss_rule_source_port(esw,
peer_dev->priv.eswitch, peer_dev->priv.eswitch,
spec, i); spec, vport->vport);
flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
spec, &flow_act, &dest, 1); spec, &flow_act, &dest, 1);
...@@ -1216,7 +1231,7 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw, ...@@ -1216,7 +1231,7 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
err = PTR_ERR(flow); err = PTR_ERR(flow);
goto add_vf_flow_err; goto add_vf_flow_err;
} }
flows[i] = flow; flows[vport->index] = flow;
} }
esw->fdb_table.offloads.peer_miss_rules = flows; esw->fdb_table.offloads.peer_miss_rules = flows;
...@@ -1225,15 +1240,20 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw, ...@@ -1225,15 +1240,20 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
return 0; return 0;
add_vf_flow_err: add_vf_flow_err:
nvports = --i; mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) {
mlx5_esw_for_each_vf_vport_num_reverse(esw, i, nvports) if (!flows[vport->index])
mlx5_del_flow_rules(flows[i]); continue;
mlx5_del_flow_rules(flows[vport->index]);
if (mlx5_ecpf_vport_exists(esw->dev)) }
mlx5_del_flow_rules(flows[mlx5_eswitch_ecpf_idx(esw)]); if (mlx5_ecpf_vport_exists(esw->dev)) {
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
mlx5_del_flow_rules(flows[vport->index]);
}
add_ecpf_flow_err: add_ecpf_flow_err:
if (mlx5_core_is_ecpf_esw_manager(esw->dev)) if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
mlx5_del_flow_rules(flows[MLX5_VPORT_PF]); vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
mlx5_del_flow_rules(flows[vport->index]);
}
add_pf_flow_err: add_pf_flow_err:
esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err); esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err);
kvfree(flows); kvfree(flows);
...@@ -1245,20 +1265,23 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw, ...@@ -1245,20 +1265,23 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw) static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw)
{ {
struct mlx5_flow_handle **flows; struct mlx5_flow_handle **flows;
int i; struct mlx5_vport *vport;
unsigned long i;
flows = esw->fdb_table.offloads.peer_miss_rules; flows = esw->fdb_table.offloads.peer_miss_rules;
mlx5_esw_for_each_vf_vport_num_reverse(esw, i, mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev))
mlx5_core_max_vfs(esw->dev)) mlx5_del_flow_rules(flows[vport->index]);
mlx5_del_flow_rules(flows[i]);
if (mlx5_ecpf_vport_exists(esw->dev))
mlx5_del_flow_rules(flows[mlx5_eswitch_ecpf_idx(esw)]);
if (mlx5_core_is_ecpf_esw_manager(esw->dev)) if (mlx5_ecpf_vport_exists(esw->dev)) {
mlx5_del_flow_rules(flows[MLX5_VPORT_PF]); vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
mlx5_del_flow_rules(flows[vport->index]);
}
if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
mlx5_del_flow_rules(flows[vport->index]);
}
kvfree(flows); kvfree(flows);
} }
...@@ -1402,11 +1425,11 @@ static void esw_vport_tbl_put(struct mlx5_eswitch *esw) ...@@ -1402,11 +1425,11 @@ static void esw_vport_tbl_put(struct mlx5_eswitch *esw)
{ {
struct mlx5_vport_tbl_attr attr; struct mlx5_vport_tbl_attr attr;
struct mlx5_vport *vport; struct mlx5_vport *vport;
int i; unsigned long i;
attr.chain = 0; attr.chain = 0;
attr.prio = 1; attr.prio = 1;
mlx5_esw_for_all_vports(esw, i, vport) { mlx5_esw_for_each_vport(esw, i, vport) {
attr.vport = vport->vport; attr.vport = vport->vport;
attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns; attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
mlx5_esw_vporttbl_put(esw, &attr); mlx5_esw_vporttbl_put(esw, &attr);
...@@ -1418,11 +1441,11 @@ static int esw_vport_tbl_get(struct mlx5_eswitch *esw) ...@@ -1418,11 +1441,11 @@ static int esw_vport_tbl_get(struct mlx5_eswitch *esw)
struct mlx5_vport_tbl_attr attr; struct mlx5_vport_tbl_attr attr;
struct mlx5_flow_table *fdb; struct mlx5_flow_table *fdb;
struct mlx5_vport *vport; struct mlx5_vport *vport;
int i; unsigned long i;
attr.chain = 0; attr.chain = 0;
attr.prio = 1; attr.prio = 1;
mlx5_esw_for_all_vports(esw, i, vport) { mlx5_esw_for_each_vport(esw, i, vport) {
attr.vport = vport->vport; attr.vport = vport->vport;
attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns; attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
fdb = mlx5_esw_vporttbl_get(esw, &attr); fdb = mlx5_esw_vporttbl_get(esw, &attr);
...@@ -1910,12 +1933,12 @@ mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport, ...@@ -1910,12 +1933,12 @@ mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
return flow_rule; return flow_rule;
} }
static int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, u8 *mode)
static int mlx5_eswitch_inline_mode_get(const struct mlx5_eswitch *esw, u8 *mode)
{ {
u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2; u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
struct mlx5_core_dev *dev = esw->dev; struct mlx5_core_dev *dev = esw->dev;
int vport; struct mlx5_vport *vport;
unsigned long i;
if (!MLX5_CAP_GEN(dev, vport_group_manager)) if (!MLX5_CAP_GEN(dev, vport_group_manager))
return -EOPNOTSUPP; return -EOPNOTSUPP;
...@@ -1936,8 +1959,8 @@ static int mlx5_eswitch_inline_mode_get(const struct mlx5_eswitch *esw, u8 *mode ...@@ -1936,8 +1959,8 @@ static int mlx5_eswitch_inline_mode_get(const struct mlx5_eswitch *esw, u8 *mode
query_vports: query_vports:
mlx5_query_nic_vport_min_inline(dev, esw->first_host_vport, &prev_mlx5_mode); mlx5_query_nic_vport_min_inline(dev, esw->first_host_vport, &prev_mlx5_mode);
mlx5_esw_for_each_host_func_vport(esw, vport, esw->esw_funcs.num_vfs) { mlx5_esw_for_each_host_func_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode); mlx5_query_nic_vport_min_inline(dev, vport->vport, &mlx5_mode);
if (prev_mlx5_mode != mlx5_mode) if (prev_mlx5_mode != mlx5_mode)
return -EINVAL; return -EINVAL;
prev_mlx5_mode = mlx5_mode; prev_mlx5_mode = mlx5_mode;
...@@ -2080,34 +2103,82 @@ static int esw_offloads_start(struct mlx5_eswitch *esw, ...@@ -2080,34 +2103,82 @@ static int esw_offloads_start(struct mlx5_eswitch *esw,
return err; return err;
} }
void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw) static void mlx5_esw_offloads_rep_mark_set(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep *rep,
xa_mark_t mark)
{ {
kfree(esw->offloads.vport_reps); bool mark_set;
/* Copy the mark from vport to its rep */
mark_set = xa_get_mark(&esw->vports, rep->vport, mark);
if (mark_set)
xa_set_mark(&esw->offloads.vport_reps, rep->vport, mark);
} }
int esw_offloads_init_reps(struct mlx5_eswitch *esw) static int mlx5_esw_offloads_rep_init(struct mlx5_eswitch *esw, const struct mlx5_vport *vport)
{ {
int total_vports = esw->total_vports;
struct mlx5_eswitch_rep *rep; struct mlx5_eswitch_rep *rep;
int vport_index; int rep_type;
u8 rep_type; int err;
esw->offloads.vport_reps = kcalloc(total_vports, rep = kzalloc(sizeof(*rep), GFP_KERNEL);
sizeof(struct mlx5_eswitch_rep), if (!rep)
GFP_KERNEL);
if (!esw->offloads.vport_reps)
return -ENOMEM; return -ENOMEM;
mlx5_esw_for_all_reps(esw, vport_index, rep) { rep->vport = vport->vport;
rep->vport = mlx5_eswitch_index_to_vport_num(esw, vport_index); rep->vport_index = vport->index;
rep->vport_index = vport_index; for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED);
for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) err = xa_insert(&esw->offloads.vport_reps, rep->vport, rep, GFP_KERNEL);
atomic_set(&rep->rep_data[rep_type].state, if (err)
REP_UNREGISTERED); goto insert_err;
}
mlx5_esw_offloads_rep_mark_set(esw, rep, MLX5_ESW_VPT_HOST_FN);
mlx5_esw_offloads_rep_mark_set(esw, rep, MLX5_ESW_VPT_VF);
mlx5_esw_offloads_rep_mark_set(esw, rep, MLX5_ESW_VPT_SF);
return 0; return 0;
insert_err:
kfree(rep);
return err;
}
static void mlx5_esw_offloads_rep_cleanup(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep *rep)
{
xa_erase(&esw->offloads.vport_reps, rep->vport);
kfree(rep);
}
void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
{
struct mlx5_eswitch_rep *rep;
unsigned long i;
mlx5_esw_for_each_rep(esw, i, rep)
mlx5_esw_offloads_rep_cleanup(esw, rep);
xa_destroy(&esw->offloads.vport_reps);
}
int esw_offloads_init_reps(struct mlx5_eswitch *esw)
{
struct mlx5_vport *vport;
unsigned long i;
int err;
xa_init(&esw->offloads.vport_reps);
mlx5_esw_for_each_vport(esw, i, vport) {
err = mlx5_esw_offloads_rep_init(esw, vport);
if (err)
goto err;
}
return 0;
err:
esw_offloads_cleanup_reps(esw);
return err;
} }
static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw, static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
...@@ -2121,7 +2192,7 @@ static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw, ...@@ -2121,7 +2192,7 @@ static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
static void __unload_reps_sf_vport(struct mlx5_eswitch *esw, u8 rep_type) static void __unload_reps_sf_vport(struct mlx5_eswitch *esw, u8 rep_type)
{ {
struct mlx5_eswitch_rep *rep; struct mlx5_eswitch_rep *rep;
int i; unsigned long i;
mlx5_esw_for_each_sf_rep(esw, i, rep) mlx5_esw_for_each_sf_rep(esw, i, rep)
__esw_offloads_unload_rep(esw, rep, rep_type); __esw_offloads_unload_rep(esw, rep, rep_type);
...@@ -2130,11 +2201,11 @@ static void __unload_reps_sf_vport(struct mlx5_eswitch *esw, u8 rep_type) ...@@ -2130,11 +2201,11 @@ static void __unload_reps_sf_vport(struct mlx5_eswitch *esw, u8 rep_type)
static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type) static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
{ {
struct mlx5_eswitch_rep *rep; struct mlx5_eswitch_rep *rep;
int i; unsigned long i;
__unload_reps_sf_vport(esw, rep_type); __unload_reps_sf_vport(esw, rep_type);
mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, esw->esw_funcs.num_vfs) mlx5_esw_for_each_vf_rep(esw, i, rep)
__esw_offloads_unload_rep(esw, rep, rep_type); __esw_offloads_unload_rep(esw, rep, rep_type);
if (mlx5_ecpf_vport_exists(esw->dev)) { if (mlx5_ecpf_vport_exists(esw->dev)) {
...@@ -2421,25 +2492,25 @@ static void esw_offloads_vport_metadata_cleanup(struct mlx5_eswitch *esw, ...@@ -2421,25 +2492,25 @@ static void esw_offloads_vport_metadata_cleanup(struct mlx5_eswitch *esw,
static void esw_offloads_metadata_uninit(struct mlx5_eswitch *esw) static void esw_offloads_metadata_uninit(struct mlx5_eswitch *esw)
{ {
struct mlx5_vport *vport; struct mlx5_vport *vport;
int i; unsigned long i;
if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
return; return;
mlx5_esw_for_all_vports_reverse(esw, i, vport) mlx5_esw_for_each_vport(esw, i, vport)
esw_offloads_vport_metadata_cleanup(esw, vport); esw_offloads_vport_metadata_cleanup(esw, vport);
} }
static int esw_offloads_metadata_init(struct mlx5_eswitch *esw) static int esw_offloads_metadata_init(struct mlx5_eswitch *esw)
{ {
struct mlx5_vport *vport; struct mlx5_vport *vport;
unsigned long i;
int err; int err;
int i;
if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
return 0; return 0;
mlx5_esw_for_all_vports(esw, i, vport) { mlx5_esw_for_each_vport(esw, i, vport) {
err = esw_offloads_vport_metadata_setup(esw, vport); err = esw_offloads_vport_metadata_setup(esw, vport);
if (err) if (err)
goto metadata_err; goto metadata_err;
...@@ -2676,11 +2747,25 @@ static int mlx5_esw_host_number_init(struct mlx5_eswitch *esw) ...@@ -2676,11 +2747,25 @@ static int mlx5_esw_host_number_init(struct mlx5_eswitch *esw)
return 0; return 0;
} }
bool mlx5_esw_offloads_controller_valid(const struct mlx5_eswitch *esw, u32 controller)
{
/* Local controller is always valid */
if (controller == 0)
return true;
if (!mlx5_core_is_ecpf_esw_manager(esw->dev))
return false;
/* External host number starts with zero in device */
return (controller == esw->offloads.host_number + 1);
}
int esw_offloads_enable(struct mlx5_eswitch *esw) int esw_offloads_enable(struct mlx5_eswitch *esw)
{ {
struct mapping_ctx *reg_c0_obj_pool; struct mapping_ctx *reg_c0_obj_pool;
struct mlx5_vport *vport; struct mlx5_vport *vport;
int err, i; unsigned long i;
int err;
if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat) && if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat) &&
MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, decap)) MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, decap))
...@@ -2926,13 +3011,44 @@ int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) ...@@ -2926,13 +3011,44 @@ int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
return err; return err;
} }
static int mlx5_esw_vports_inline_set(struct mlx5_eswitch *esw, u8 mlx5_mode,
struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = esw->dev;
struct mlx5_vport *vport;
u16 err_vport_num = 0;
unsigned long i;
int err = 0;
mlx5_esw_for_each_host_func_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
err = mlx5_modify_nic_vport_min_inline(dev, vport->vport, mlx5_mode);
if (err) {
err_vport_num = vport->vport;
NL_SET_ERR_MSG_MOD(extack,
"Failed to set min inline on vport");
goto revert_inline_mode;
}
}
return 0;
revert_inline_mode:
mlx5_esw_for_each_host_func_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
if (vport->vport == err_vport_num)
break;
mlx5_modify_nic_vport_min_inline(dev,
vport->vport,
esw->offloads.inline_mode);
}
return err;
}
int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode, int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
struct netlink_ext_ack *extack) struct netlink_ext_ack *extack)
{ {
struct mlx5_core_dev *dev = devlink_priv(devlink); struct mlx5_core_dev *dev = devlink_priv(devlink);
int err, vport, num_vport;
struct mlx5_eswitch *esw; struct mlx5_eswitch *esw;
u8 mlx5_mode; u8 mlx5_mode;
int err;
esw = mlx5_devlink_eswitch_get(devlink); esw = mlx5_devlink_eswitch_get(devlink);
if (IS_ERR(esw)) if (IS_ERR(esw))
...@@ -2967,25 +3083,14 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode, ...@@ -2967,25 +3083,14 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
if (err) if (err)
goto out; goto out;
mlx5_esw_for_each_host_func_vport(esw, vport, esw->esw_funcs.num_vfs) { err = mlx5_esw_vports_inline_set(esw, mlx5_mode, extack);
err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode); if (err)
if (err) { goto out;
NL_SET_ERR_MSG_MOD(extack,
"Failed to set min inline on vport");
goto revert_inline_mode;
}
}
esw->offloads.inline_mode = mlx5_mode; esw->offloads.inline_mode = mlx5_mode;
up_write(&esw->mode_lock); up_write(&esw->mode_lock);
return 0; return 0;
revert_inline_mode:
num_vport = --vport;
mlx5_esw_for_each_host_func_vport_reverse(esw, vport, num_vport)
mlx5_modify_nic_vport_min_inline(dev,
vport,
esw->offloads.inline_mode);
out: out:
up_write(&esw->mode_lock); up_write(&esw->mode_lock);
return err; return err;
...@@ -3116,11 +3221,11 @@ void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw, ...@@ -3116,11 +3221,11 @@ void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
{ {
struct mlx5_eswitch_rep_data *rep_data; struct mlx5_eswitch_rep_data *rep_data;
struct mlx5_eswitch_rep *rep; struct mlx5_eswitch_rep *rep;
int i; unsigned long i;
esw->offloads.rep_ops[rep_type] = ops; esw->offloads.rep_ops[rep_type] = ops;
mlx5_esw_for_all_reps(esw, i, rep) { mlx5_esw_for_each_rep(esw, i, rep) {
if (likely(mlx5_eswitch_vport_has_rep(esw, i))) { if (likely(mlx5_eswitch_vport_has_rep(esw, rep->vport))) {
rep->esw = esw; rep->esw = esw;
rep_data = &rep->rep_data[rep_type]; rep_data = &rep->rep_data[rep_type];
atomic_set(&rep_data->state, REP_REGISTERED); atomic_set(&rep_data->state, REP_REGISTERED);
...@@ -3132,12 +3237,12 @@ EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps); ...@@ -3132,12 +3237,12 @@ EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps);
void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type) void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type)
{ {
struct mlx5_eswitch_rep *rep; struct mlx5_eswitch_rep *rep;
int i; unsigned long i;
if (esw->mode == MLX5_ESWITCH_OFFLOADS) if (esw->mode == MLX5_ESWITCH_OFFLOADS)
__unload_reps_all_vport(esw, rep_type); __unload_reps_all_vport(esw, rep_type);
mlx5_esw_for_all_reps(esw, i, rep) mlx5_esw_for_each_rep(esw, i, rep)
atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED); atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED);
} }
EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_reps); EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_reps);
...@@ -3178,12 +3283,6 @@ struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw, ...@@ -3178,12 +3283,6 @@ struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
} }
EXPORT_SYMBOL(mlx5_eswitch_vport_rep); EXPORT_SYMBOL(mlx5_eswitch_vport_rep);
bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num)
{
return vport_num >= MLX5_VPORT_FIRST_VF &&
vport_num <= esw->dev->priv.sriov.max_vfs;
}
bool mlx5_eswitch_reg_c1_loopback_enabled(const struct mlx5_eswitch *esw) bool mlx5_eswitch_reg_c1_loopback_enabled(const struct mlx5_eswitch *esw)
{ {
return !!(esw->flags & MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED); return !!(esw->flags & MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED);
...@@ -3209,7 +3308,7 @@ u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw, ...@@ -3209,7 +3308,7 @@ u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw,
EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_match); EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_match);
int mlx5_esw_offloads_sf_vport_enable(struct mlx5_eswitch *esw, struct devlink_port *dl_port, int mlx5_esw_offloads_sf_vport_enable(struct mlx5_eswitch *esw, struct devlink_port *dl_port,
u16 vport_num, u32 sfnum) u16 vport_num, u32 controller, u32 sfnum)
{ {
int err; int err;
...@@ -3217,7 +3316,7 @@ int mlx5_esw_offloads_sf_vport_enable(struct mlx5_eswitch *esw, struct devlink_p ...@@ -3217,7 +3316,7 @@ int mlx5_esw_offloads_sf_vport_enable(struct mlx5_eswitch *esw, struct devlink_p
if (err) if (err)
return err; return err;
err = mlx5_esw_devlink_sf_port_register(esw, dl_port, vport_num, sfnum); err = mlx5_esw_devlink_sf_port_register(esw, dl_port, vport_num, controller, sfnum);
if (err) if (err)
goto devlink_err; goto devlink_err;
......
...@@ -148,9 +148,19 @@ mlx5_sf_dev_state_change_handler(struct notifier_block *nb, unsigned long event_ ...@@ -148,9 +148,19 @@ mlx5_sf_dev_state_change_handler(struct notifier_block *nb, unsigned long event_
struct mlx5_sf_dev_table *table = container_of(nb, struct mlx5_sf_dev_table, nb); struct mlx5_sf_dev_table *table = container_of(nb, struct mlx5_sf_dev_table, nb);
const struct mlx5_vhca_state_event *event = data; const struct mlx5_vhca_state_event *event = data;
struct mlx5_sf_dev *sf_dev; struct mlx5_sf_dev *sf_dev;
u16 max_functions;
u16 sf_index; u16 sf_index;
u16 base_id;
max_functions = mlx5_sf_max_functions(table->dev);
if (!max_functions)
return 0;
base_id = MLX5_CAP_GEN(table->dev, sf_base_id);
if (event->function_id < base_id || event->function_id >= (base_id + max_functions))
return 0;
sf_index = event->function_id - MLX5_CAP_GEN(table->dev, sf_base_id); sf_index = event->function_id - base_id;
sf_dev = xa_load(&table->devices, sf_index); sf_dev = xa_load(&table->devices, sf_index);
switch (event->new_vhca_state) { switch (event->new_vhca_state) {
case MLX5_VHCA_STATE_ALLOCATED: case MLX5_VHCA_STATE_ALLOCATED:
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
struct mlx5_sf { struct mlx5_sf {
struct devlink_port dl_port; struct devlink_port dl_port;
unsigned int port_index; unsigned int port_index;
u32 controller;
u16 id; u16 id;
u16 hw_fn_id; u16 hw_fn_id;
u16 hw_state; u16 hw_state;
...@@ -58,7 +59,8 @@ static void mlx5_sf_id_erase(struct mlx5_sf_table *table, struct mlx5_sf *sf) ...@@ -58,7 +59,8 @@ static void mlx5_sf_id_erase(struct mlx5_sf_table *table, struct mlx5_sf *sf)
} }
static struct mlx5_sf * static struct mlx5_sf *
mlx5_sf_alloc(struct mlx5_sf_table *table, u32 sfnum, struct netlink_ext_ack *extack) mlx5_sf_alloc(struct mlx5_sf_table *table, struct mlx5_eswitch *esw,
u32 controller, u32 sfnum, struct netlink_ext_ack *extack)
{ {
unsigned int dl_port_index; unsigned int dl_port_index;
struct mlx5_sf *sf; struct mlx5_sf *sf;
...@@ -66,7 +68,12 @@ mlx5_sf_alloc(struct mlx5_sf_table *table, u32 sfnum, struct netlink_ext_ack *ex ...@@ -66,7 +68,12 @@ mlx5_sf_alloc(struct mlx5_sf_table *table, u32 sfnum, struct netlink_ext_ack *ex
int id_err; int id_err;
int err; int err;
id_err = mlx5_sf_hw_table_sf_alloc(table->dev, sfnum); if (!mlx5_esw_offloads_controller_valid(esw, controller)) {
NL_SET_ERR_MSG_MOD(extack, "Invalid controller number");
return ERR_PTR(-EINVAL);
}
id_err = mlx5_sf_hw_table_sf_alloc(table->dev, controller, sfnum);
if (id_err < 0) { if (id_err < 0) {
err = id_err; err = id_err;
goto id_err; goto id_err;
...@@ -78,11 +85,12 @@ mlx5_sf_alloc(struct mlx5_sf_table *table, u32 sfnum, struct netlink_ext_ack *ex ...@@ -78,11 +85,12 @@ mlx5_sf_alloc(struct mlx5_sf_table *table, u32 sfnum, struct netlink_ext_ack *ex
goto alloc_err; goto alloc_err;
} }
sf->id = id_err; sf->id = id_err;
hw_fn_id = mlx5_sf_sw_to_hw_id(table->dev, sf->id); hw_fn_id = mlx5_sf_sw_to_hw_id(table->dev, controller, sf->id);
dl_port_index = mlx5_esw_vport_to_devlink_port_index(table->dev, hw_fn_id); dl_port_index = mlx5_esw_vport_to_devlink_port_index(table->dev, hw_fn_id);
sf->port_index = dl_port_index; sf->port_index = dl_port_index;
sf->hw_fn_id = hw_fn_id; sf->hw_fn_id = hw_fn_id;
sf->hw_state = MLX5_VHCA_STATE_ALLOCATED; sf->hw_state = MLX5_VHCA_STATE_ALLOCATED;
sf->controller = controller;
err = mlx5_sf_id_insert(table, sf); err = mlx5_sf_id_insert(table, sf);
if (err) if (err)
...@@ -93,7 +101,7 @@ mlx5_sf_alloc(struct mlx5_sf_table *table, u32 sfnum, struct netlink_ext_ack *ex ...@@ -93,7 +101,7 @@ mlx5_sf_alloc(struct mlx5_sf_table *table, u32 sfnum, struct netlink_ext_ack *ex
insert_err: insert_err:
kfree(sf); kfree(sf);
alloc_err: alloc_err:
mlx5_sf_hw_table_sf_free(table->dev, id_err); mlx5_sf_hw_table_sf_free(table->dev, controller, id_err);
id_err: id_err:
if (err == -EEXIST) if (err == -EEXIST)
NL_SET_ERR_MSG_MOD(extack, "SF already exist. Choose different sfnum"); NL_SET_ERR_MSG_MOD(extack, "SF already exist. Choose different sfnum");
...@@ -103,7 +111,7 @@ mlx5_sf_alloc(struct mlx5_sf_table *table, u32 sfnum, struct netlink_ext_ack *ex ...@@ -103,7 +111,7 @@ mlx5_sf_alloc(struct mlx5_sf_table *table, u32 sfnum, struct netlink_ext_ack *ex
static void mlx5_sf_free(struct mlx5_sf_table *table, struct mlx5_sf *sf) static void mlx5_sf_free(struct mlx5_sf_table *table, struct mlx5_sf *sf)
{ {
mlx5_sf_id_erase(table, sf); mlx5_sf_id_erase(table, sf);
mlx5_sf_hw_table_sf_free(table->dev, sf->id); mlx5_sf_hw_table_sf_free(table->dev, sf->controller, sf->id);
kfree(sf); kfree(sf);
} }
...@@ -272,12 +280,12 @@ static int mlx5_sf_add(struct mlx5_core_dev *dev, struct mlx5_sf_table *table, ...@@ -272,12 +280,12 @@ static int mlx5_sf_add(struct mlx5_core_dev *dev, struct mlx5_sf_table *table,
struct mlx5_sf *sf; struct mlx5_sf *sf;
int err; int err;
sf = mlx5_sf_alloc(table, new_attr->sfnum, extack); sf = mlx5_sf_alloc(table, esw, new_attr->controller, new_attr->sfnum, extack);
if (IS_ERR(sf)) if (IS_ERR(sf))
return PTR_ERR(sf); return PTR_ERR(sf);
err = mlx5_esw_offloads_sf_vport_enable(esw, &sf->dl_port, sf->hw_fn_id, err = mlx5_esw_offloads_sf_vport_enable(esw, &sf->dl_port, sf->hw_fn_id,
new_attr->sfnum); new_attr->controller, new_attr->sfnum);
if (err) if (err)
goto esw_err; goto esw_err;
*new_port_index = sf->port_index; *new_port_index = sf->port_index;
...@@ -306,7 +314,8 @@ mlx5_sf_new_check_attr(struct mlx5_core_dev *dev, const struct devlink_port_new_ ...@@ -306,7 +314,8 @@ mlx5_sf_new_check_attr(struct mlx5_core_dev *dev, const struct devlink_port_new_
"User must provide unique sfnum. Driver does not support auto assignment"); "User must provide unique sfnum. Driver does not support auto assignment");
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
if (new_attr->controller_valid && new_attr->controller) { if (new_attr->controller_valid && new_attr->controller &&
!mlx5_core_is_ecpf_esw_manager(dev)) {
NL_SET_ERR_MSG_MOD(extack, "External controller is unsupported"); NL_SET_ERR_MSG_MOD(extack, "External controller is unsupported");
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
...@@ -352,10 +361,10 @@ static void mlx5_sf_dealloc(struct mlx5_sf_table *table, struct mlx5_sf *sf) ...@@ -352,10 +361,10 @@ static void mlx5_sf_dealloc(struct mlx5_sf_table *table, struct mlx5_sf *sf)
* firmware gives confirmation that it is detached by the driver. * firmware gives confirmation that it is detached by the driver.
*/ */
mlx5_cmd_sf_disable_hca(table->dev, sf->hw_fn_id); mlx5_cmd_sf_disable_hca(table->dev, sf->hw_fn_id);
mlx5_sf_hw_table_sf_deferred_free(table->dev, sf->id); mlx5_sf_hw_table_sf_deferred_free(table->dev, sf->controller, sf->id);
kfree(sf); kfree(sf);
} else { } else {
mlx5_sf_hw_table_sf_deferred_free(table->dev, sf->id); mlx5_sf_hw_table_sf_deferred_free(table->dev, sf->controller, sf->id);
kfree(sf); kfree(sf);
} }
} }
...@@ -437,9 +446,6 @@ static int mlx5_sf_vhca_event(struct notifier_block *nb, unsigned long opcode, v ...@@ -437,9 +446,6 @@ static int mlx5_sf_vhca_event(struct notifier_block *nb, unsigned long opcode, v
static void mlx5_sf_table_enable(struct mlx5_sf_table *table) static void mlx5_sf_table_enable(struct mlx5_sf_table *table)
{ {
if (!mlx5_sf_max_functions(table->dev))
return;
init_completion(&table->disable_complete); init_completion(&table->disable_complete);
refcount_set(&table->refcount, 1); refcount_set(&table->refcount, 1);
} }
...@@ -462,9 +468,6 @@ static void mlx5_sf_deactivate_all(struct mlx5_sf_table *table) ...@@ -462,9 +468,6 @@ static void mlx5_sf_deactivate_all(struct mlx5_sf_table *table)
static void mlx5_sf_table_disable(struct mlx5_sf_table *table) static void mlx5_sf_table_disable(struct mlx5_sf_table *table)
{ {
if (!mlx5_sf_max_functions(table->dev))
return;
if (!refcount_read(&table->refcount)) if (!refcount_read(&table->refcount))
return; return;
...@@ -498,7 +501,8 @@ static int mlx5_sf_esw_event(struct notifier_block *nb, unsigned long event, voi ...@@ -498,7 +501,8 @@ static int mlx5_sf_esw_event(struct notifier_block *nb, unsigned long event, voi
static bool mlx5_sf_table_supported(const struct mlx5_core_dev *dev) static bool mlx5_sf_table_supported(const struct mlx5_core_dev *dev)
{ {
return dev->priv.eswitch && MLX5_ESWITCH_MANAGER(dev) && mlx5_sf_supported(dev); return dev->priv.eswitch && MLX5_ESWITCH_MANAGER(dev) &&
mlx5_sf_hw_table_supported(dev);
} }
int mlx5_sf_table_init(struct mlx5_core_dev *dev) int mlx5_sf_table_init(struct mlx5_core_dev *dev)
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include "ecpf.h" #include "ecpf.h"
#include "vhca_event.h" #include "vhca_event.h"
#include "mlx5_core.h" #include "mlx5_core.h"
#include "eswitch.h"
struct mlx5_sf_hw { struct mlx5_sf_hw {
u32 usr_sfnum; u32 usr_sfnum;
...@@ -15,59 +16,113 @@ struct mlx5_sf_hw { ...@@ -15,59 +16,113 @@ struct mlx5_sf_hw {
u8 pending_delete: 1; u8 pending_delete: 1;
}; };
struct mlx5_sf_hwc_table {
struct mlx5_sf_hw *sfs;
int max_fn;
u16 start_fn_id;
};
enum mlx5_sf_hwc_index {
MLX5_SF_HWC_LOCAL,
MLX5_SF_HWC_EXTERNAL,
MLX5_SF_HWC_MAX,
};
struct mlx5_sf_hw_table { struct mlx5_sf_hw_table {
struct mlx5_core_dev *dev; struct mlx5_core_dev *dev;
struct mlx5_sf_hw *sfs;
int max_local_functions;
struct mutex table_lock; /* Serializes sf deletion and vhca state change handler. */ struct mutex table_lock; /* Serializes sf deletion and vhca state change handler. */
struct notifier_block vhca_nb; struct notifier_block vhca_nb;
struct mlx5_sf_hwc_table hwc[MLX5_SF_HWC_MAX];
}; };
u16 mlx5_sf_sw_to_hw_id(const struct mlx5_core_dev *dev, u16 sw_id) static struct mlx5_sf_hwc_table *
mlx5_sf_controller_to_hwc(struct mlx5_core_dev *dev, u32 controller)
{ {
return sw_id + mlx5_sf_start_function_id(dev); int idx = !!controller;
return &dev->priv.sf_hw_table->hwc[idx];
} }
static u16 mlx5_sf_hw_to_sw_id(const struct mlx5_core_dev *dev, u16 hw_id) u16 mlx5_sf_sw_to_hw_id(struct mlx5_core_dev *dev, u32 controller, u16 sw_id)
{ {
return hw_id - mlx5_sf_start_function_id(dev); struct mlx5_sf_hwc_table *hwc;
hwc = mlx5_sf_controller_to_hwc(dev, controller);
return hwc->start_fn_id + sw_id;
} }
int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 usr_sfnum) static u16 mlx5_sf_hw_to_sw_id(struct mlx5_sf_hwc_table *hwc, u16 hw_id)
{
return hw_id - hwc->start_fn_id;
}
static struct mlx5_sf_hwc_table *
mlx5_sf_table_fn_to_hwc(struct mlx5_sf_hw_table *table, u16 fn_id)
{ {
struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
int sw_id = -ENOSPC;
u16 hw_fn_id;
int err;
int i; int i;
if (!table->max_local_functions) for (i = 0; i < ARRAY_SIZE(table->hwc); i++) {
return -EOPNOTSUPP; if (table->hwc[i].max_fn &&
fn_id >= table->hwc[i].start_fn_id &&
fn_id < (table->hwc[i].start_fn_id + table->hwc[i].max_fn))
return &table->hwc[i];
}
return NULL;
}
static int mlx5_sf_hw_table_id_alloc(struct mlx5_sf_hw_table *table, u32 controller,
u32 usr_sfnum)
{
struct mlx5_sf_hwc_table *hwc;
int i;
hwc = mlx5_sf_controller_to_hwc(table->dev, controller);
if (!hwc->sfs)
return -ENOSPC;
mutex_lock(&table->table_lock);
/* Check if sf with same sfnum already exists or not. */ /* Check if sf with same sfnum already exists or not. */
for (i = 0; i < table->max_local_functions; i++) { for (i = 0; i < hwc->max_fn; i++) {
if (table->sfs[i].allocated && table->sfs[i].usr_sfnum == usr_sfnum) { if (hwc->sfs[i].allocated && hwc->sfs[i].usr_sfnum == usr_sfnum)
err = -EEXIST; return -EEXIST;
goto exist_err;
}
} }
/* Find the free entry and allocate the entry from the array */ /* Find the free entry and allocate the entry from the array */
for (i = 0; i < table->max_local_functions; i++) { for (i = 0; i < hwc->max_fn; i++) {
if (!table->sfs[i].allocated) { if (!hwc->sfs[i].allocated) {
table->sfs[i].usr_sfnum = usr_sfnum; hwc->sfs[i].usr_sfnum = usr_sfnum;
table->sfs[i].allocated = true; hwc->sfs[i].allocated = true;
sw_id = i; return i;
break;
} }
} }
if (sw_id == -ENOSPC) { return -ENOSPC;
err = -ENOSPC; }
static void mlx5_sf_hw_table_id_free(struct mlx5_sf_hw_table *table, u32 controller, int id)
{
struct mlx5_sf_hwc_table *hwc;
hwc = mlx5_sf_controller_to_hwc(table->dev, controller);
hwc->sfs[id].allocated = false;
hwc->sfs[id].pending_delete = false;
}
int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 controller, u32 usr_sfnum)
{
struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
u16 hw_fn_id;
int sw_id;
int err;
if (!table)
return -EOPNOTSUPP;
mutex_lock(&table->table_lock);
sw_id = mlx5_sf_hw_table_id_alloc(table, controller, usr_sfnum);
if (sw_id < 0) {
err = sw_id;
goto exist_err; goto exist_err;
} }
hw_fn_id = mlx5_sf_sw_to_hw_id(dev, sw_id); hw_fn_id = mlx5_sf_sw_to_hw_id(dev, controller, sw_id);
err = mlx5_cmd_alloc_sf(dev, hw_fn_id); err = mlx5_cmd_alloc_sf(dev, hw_fn_id);
if (err) if (err)
goto err; goto err;
...@@ -76,47 +131,58 @@ int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 usr_sfnum) ...@@ -76,47 +131,58 @@ int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 usr_sfnum)
if (err) if (err)
goto vhca_err; goto vhca_err;
if (controller) {
/* If this SF is for external controller, SF manager
* needs to arm firmware to receive the events.
*/
err = mlx5_vhca_event_arm(dev, hw_fn_id);
if (err)
goto vhca_err;
}
mutex_unlock(&table->table_lock); mutex_unlock(&table->table_lock);
return sw_id; return sw_id;
vhca_err: vhca_err:
mlx5_cmd_dealloc_sf(dev, hw_fn_id); mlx5_cmd_dealloc_sf(dev, hw_fn_id);
err: err:
table->sfs[i].allocated = false; mlx5_sf_hw_table_id_free(table, controller, sw_id);
exist_err: exist_err:
mutex_unlock(&table->table_lock); mutex_unlock(&table->table_lock);
return err; return err;
} }
static void _mlx5_sf_hw_id_free(struct mlx5_core_dev *dev, u16 id) void mlx5_sf_hw_table_sf_free(struct mlx5_core_dev *dev, u32 controller, u16 id)
{ {
struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table; struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
u16 hw_fn_id; u16 hw_fn_id;
hw_fn_id = mlx5_sf_sw_to_hw_id(dev, id); mutex_lock(&table->table_lock);
hw_fn_id = mlx5_sf_sw_to_hw_id(dev, controller, id);
mlx5_cmd_dealloc_sf(dev, hw_fn_id); mlx5_cmd_dealloc_sf(dev, hw_fn_id);
table->sfs[id].allocated = false; mlx5_sf_hw_table_id_free(table, controller, id);
table->sfs[id].pending_delete = false; mutex_unlock(&table->table_lock);
} }
void mlx5_sf_hw_table_sf_free(struct mlx5_core_dev *dev, u16 id) static void mlx5_sf_hw_table_hwc_sf_free(struct mlx5_core_dev *dev,
struct mlx5_sf_hwc_table *hwc, int idx)
{ {
struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table; mlx5_cmd_dealloc_sf(dev, hwc->start_fn_id + idx);
hwc->sfs[idx].allocated = false;
mutex_lock(&table->table_lock); hwc->sfs[idx].pending_delete = false;
_mlx5_sf_hw_id_free(dev, id);
mutex_unlock(&table->table_lock);
} }
void mlx5_sf_hw_table_sf_deferred_free(struct mlx5_core_dev *dev, u16 id) void mlx5_sf_hw_table_sf_deferred_free(struct mlx5_core_dev *dev, u32 controller, u16 id)
{ {
struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table; struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
u32 out[MLX5_ST_SZ_DW(query_vhca_state_out)] = {}; u32 out[MLX5_ST_SZ_DW(query_vhca_state_out)] = {};
struct mlx5_sf_hwc_table *hwc;
u16 hw_fn_id; u16 hw_fn_id;
u8 state; u8 state;
int err; int err;
hw_fn_id = mlx5_sf_sw_to_hw_id(dev, id); hw_fn_id = mlx5_sf_sw_to_hw_id(dev, controller, id);
hwc = mlx5_sf_controller_to_hwc(dev, controller);
mutex_lock(&table->table_lock); mutex_lock(&table->table_lock);
err = mlx5_cmd_query_vhca_state(dev, hw_fn_id, out, sizeof(out)); err = mlx5_cmd_query_vhca_state(dev, hw_fn_id, out, sizeof(out));
if (err) if (err)
...@@ -124,53 +190,102 @@ void mlx5_sf_hw_table_sf_deferred_free(struct mlx5_core_dev *dev, u16 id) ...@@ -124,53 +190,102 @@ void mlx5_sf_hw_table_sf_deferred_free(struct mlx5_core_dev *dev, u16 id)
state = MLX5_GET(query_vhca_state_out, out, vhca_state_context.vhca_state); state = MLX5_GET(query_vhca_state_out, out, vhca_state_context.vhca_state);
if (state == MLX5_VHCA_STATE_ALLOCATED) { if (state == MLX5_VHCA_STATE_ALLOCATED) {
mlx5_cmd_dealloc_sf(dev, hw_fn_id); mlx5_cmd_dealloc_sf(dev, hw_fn_id);
table->sfs[id].allocated = false; hwc->sfs[id].allocated = false;
} else { } else {
table->sfs[id].pending_delete = true; hwc->sfs[id].pending_delete = true;
} }
err: err:
mutex_unlock(&table->table_lock); mutex_unlock(&table->table_lock);
} }
static void mlx5_sf_hw_dealloc_all(struct mlx5_sf_hw_table *table) static void mlx5_sf_hw_table_hwc_dealloc_all(struct mlx5_core_dev *dev,
struct mlx5_sf_hwc_table *hwc)
{ {
int i; int i;
for (i = 0; i < table->max_local_functions; i++) { for (i = 0; i < hwc->max_fn; i++) {
if (table->sfs[i].allocated) if (hwc->sfs[i].allocated)
_mlx5_sf_hw_id_free(table->dev, i); mlx5_sf_hw_table_hwc_sf_free(dev, hwc, i);
} }
} }
static void mlx5_sf_hw_table_dealloc_all(struct mlx5_sf_hw_table *table)
{
mlx5_sf_hw_table_hwc_dealloc_all(table->dev, &table->hwc[MLX5_SF_HWC_EXTERNAL]);
mlx5_sf_hw_table_hwc_dealloc_all(table->dev, &table->hwc[MLX5_SF_HWC_LOCAL]);
}
static int mlx5_sf_hw_table_hwc_init(struct mlx5_sf_hwc_table *hwc, u16 max_fn, u16 base_id)
{
struct mlx5_sf_hw *sfs;
if (!max_fn)
return 0;
sfs = kcalloc(max_fn, sizeof(*sfs), GFP_KERNEL);
if (!sfs)
return -ENOMEM;
hwc->sfs = sfs;
hwc->max_fn = max_fn;
hwc->start_fn_id = base_id;
return 0;
}
static void mlx5_sf_hw_table_hwc_cleanup(struct mlx5_sf_hwc_table *hwc)
{
kfree(hwc->sfs);
}
int mlx5_sf_hw_table_init(struct mlx5_core_dev *dev) int mlx5_sf_hw_table_init(struct mlx5_core_dev *dev)
{ {
struct mlx5_sf_hw_table *table; struct mlx5_sf_hw_table *table;
struct mlx5_sf_hw *sfs; u16 max_ext_fn = 0;
int max_functions; u16 ext_base_id;
u16 max_fn = 0;
u16 base_id;
int err;
if (!mlx5_sf_supported(dev) || !mlx5_vhca_event_supported(dev)) if (!mlx5_vhca_event_supported(dev))
return 0;
if (mlx5_sf_supported(dev))
max_fn = mlx5_sf_max_functions(dev);
err = mlx5_esw_sf_max_hpf_functions(dev, &max_ext_fn, &ext_base_id);
if (err)
return err;
if (!max_fn && !max_ext_fn)
return 0; return 0;
max_functions = mlx5_sf_max_functions(dev);
table = kzalloc(sizeof(*table), GFP_KERNEL); table = kzalloc(sizeof(*table), GFP_KERNEL);
if (!table) if (!table)
return -ENOMEM; return -ENOMEM;
sfs = kcalloc(max_functions, sizeof(*sfs), GFP_KERNEL);
if (!sfs)
goto table_err;
mutex_init(&table->table_lock); mutex_init(&table->table_lock);
table->dev = dev; table->dev = dev;
table->sfs = sfs;
table->max_local_functions = max_functions;
dev->priv.sf_hw_table = table; dev->priv.sf_hw_table = table;
mlx5_core_dbg(dev, "SF HW table: max sfs = %d\n", max_functions);
base_id = mlx5_sf_start_function_id(dev);
err = mlx5_sf_hw_table_hwc_init(&table->hwc[MLX5_SF_HWC_LOCAL], max_fn, base_id);
if (err)
goto table_err;
err = mlx5_sf_hw_table_hwc_init(&table->hwc[MLX5_SF_HWC_EXTERNAL],
max_ext_fn, ext_base_id);
if (err)
goto ext_err;
mlx5_core_dbg(dev, "SF HW table: max sfs = %d, ext sfs = %d\n", max_fn, max_ext_fn);
return 0; return 0;
ext_err:
mlx5_sf_hw_table_hwc_cleanup(&table->hwc[MLX5_SF_HWC_LOCAL]);
table_err: table_err:
mutex_destroy(&table->table_lock);
kfree(table); kfree(table);
return -ENOMEM; return err;
} }
void mlx5_sf_hw_table_cleanup(struct mlx5_core_dev *dev) void mlx5_sf_hw_table_cleanup(struct mlx5_core_dev *dev)
...@@ -181,7 +296,8 @@ void mlx5_sf_hw_table_cleanup(struct mlx5_core_dev *dev) ...@@ -181,7 +296,8 @@ void mlx5_sf_hw_table_cleanup(struct mlx5_core_dev *dev)
return; return;
mutex_destroy(&table->table_lock); mutex_destroy(&table->table_lock);
kfree(table->sfs); mlx5_sf_hw_table_hwc_cleanup(&table->hwc[MLX5_SF_HWC_EXTERNAL]);
mlx5_sf_hw_table_hwc_cleanup(&table->hwc[MLX5_SF_HWC_LOCAL]);
kfree(table); kfree(table);
} }
...@@ -189,21 +305,26 @@ static int mlx5_sf_hw_vhca_event(struct notifier_block *nb, unsigned long opcode ...@@ -189,21 +305,26 @@ static int mlx5_sf_hw_vhca_event(struct notifier_block *nb, unsigned long opcode
{ {
struct mlx5_sf_hw_table *table = container_of(nb, struct mlx5_sf_hw_table, vhca_nb); struct mlx5_sf_hw_table *table = container_of(nb, struct mlx5_sf_hw_table, vhca_nb);
const struct mlx5_vhca_state_event *event = data; const struct mlx5_vhca_state_event *event = data;
struct mlx5_sf_hwc_table *hwc;
struct mlx5_sf_hw *sf_hw; struct mlx5_sf_hw *sf_hw;
u16 sw_id; u16 sw_id;
if (event->new_vhca_state != MLX5_VHCA_STATE_ALLOCATED) if (event->new_vhca_state != MLX5_VHCA_STATE_ALLOCATED)
return 0; return 0;
sw_id = mlx5_sf_hw_to_sw_id(table->dev, event->function_id); hwc = mlx5_sf_table_fn_to_hwc(table, event->function_id);
sf_hw = &table->sfs[sw_id]; if (!hwc)
return 0;
sw_id = mlx5_sf_hw_to_sw_id(hwc, event->function_id);
sf_hw = &hwc->sfs[sw_id];
mutex_lock(&table->table_lock); mutex_lock(&table->table_lock);
/* SF driver notified through firmware that SF is finally detached. /* SF driver notified through firmware that SF is finally detached.
* Hence recycle the sf hardware id for reuse. * Hence recycle the sf hardware id for reuse.
*/ */
if (sf_hw->allocated && sf_hw->pending_delete) if (sf_hw->allocated && sf_hw->pending_delete)
_mlx5_sf_hw_id_free(table->dev, sw_id); mlx5_sf_hw_table_hwc_sf_free(table->dev, hwc, sw_id);
mutex_unlock(&table->table_lock); mutex_unlock(&table->table_lock);
return 0; return 0;
} }
...@@ -228,5 +349,10 @@ void mlx5_sf_hw_table_destroy(struct mlx5_core_dev *dev) ...@@ -228,5 +349,10 @@ void mlx5_sf_hw_table_destroy(struct mlx5_core_dev *dev)
mlx5_vhca_event_notifier_unregister(dev, &table->vhca_nb); mlx5_vhca_event_notifier_unregister(dev, &table->vhca_nb);
/* Dealloc SFs whose firmware event has been missed. */ /* Dealloc SFs whose firmware event has been missed. */
mlx5_sf_hw_dealloc_all(table); mlx5_sf_hw_table_dealloc_all(table);
}
bool mlx5_sf_hw_table_supported(const struct mlx5_core_dev *dev)
{
return !!dev->priv.sf_hw_table;
} }
...@@ -12,10 +12,11 @@ int mlx5_cmd_dealloc_sf(struct mlx5_core_dev *dev, u16 function_id); ...@@ -12,10 +12,11 @@ int mlx5_cmd_dealloc_sf(struct mlx5_core_dev *dev, u16 function_id);
int mlx5_cmd_sf_enable_hca(struct mlx5_core_dev *dev, u16 func_id); int mlx5_cmd_sf_enable_hca(struct mlx5_core_dev *dev, u16 func_id);
int mlx5_cmd_sf_disable_hca(struct mlx5_core_dev *dev, u16 func_id); int mlx5_cmd_sf_disable_hca(struct mlx5_core_dev *dev, u16 func_id);
u16 mlx5_sf_sw_to_hw_id(const struct mlx5_core_dev *dev, u16 sw_id); u16 mlx5_sf_sw_to_hw_id(struct mlx5_core_dev *dev, u32 controller, u16 sw_id);
int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 usr_sfnum); int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 controller, u32 usr_sfnum);
void mlx5_sf_hw_table_sf_free(struct mlx5_core_dev *dev, u16 id); void mlx5_sf_hw_table_sf_free(struct mlx5_core_dev *dev, u32 controller, u16 id);
void mlx5_sf_hw_table_sf_deferred_free(struct mlx5_core_dev *dev, u16 id); void mlx5_sf_hw_table_sf_deferred_free(struct mlx5_core_dev *dev, u32 controller, u16 id);
bool mlx5_sf_hw_table_supported(const struct mlx5_core_dev *dev);
#endif #endif
...@@ -1151,20 +1151,6 @@ u64 mlx5_query_nic_system_image_guid(struct mlx5_core_dev *mdev) ...@@ -1151,20 +1151,6 @@ u64 mlx5_query_nic_system_image_guid(struct mlx5_core_dev *mdev)
} }
EXPORT_SYMBOL_GPL(mlx5_query_nic_system_image_guid); EXPORT_SYMBOL_GPL(mlx5_query_nic_system_image_guid);
/**
* mlx5_eswitch_get_total_vports - Get total vports of the eswitch
*
* @dev: Pointer to core device
*
* mlx5_eswitch_get_total_vports returns total number of vports for
* the eswitch.
*/
u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev)
{
return MLX5_SPECIAL_VPORTS(dev) + mlx5_core_max_vfs(dev) + mlx5_sf_max_functions(dev);
}
EXPORT_SYMBOL_GPL(mlx5_eswitch_get_total_vports);
int mlx5_vport_get_other_func_cap(struct mlx5_core_dev *dev, u16 function_id, void *out) int mlx5_vport_get_other_func_cap(struct mlx5_core_dev *dev, u16 function_id, void *out)
{ {
u16 opmod = (MLX5_CAP_GENERAL << 1) | (HCA_CAP_OPMOD_GET_MAX & 0x01); u16 opmod = (MLX5_CAP_GENERAL << 1) | (HCA_CAP_OPMOD_GET_MAX & 0x01);
......
...@@ -65,8 +65,6 @@ struct mlx5_flow_handle * ...@@ -65,8 +65,6 @@ struct mlx5_flow_handle *
mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw, mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw,
struct mlx5_eswitch_rep *rep, u32 sqn); struct mlx5_eswitch_rep *rep, u32 sqn);
u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev);
#ifdef CONFIG_MLX5_ESWITCH #ifdef CONFIG_MLX5_ESWITCH
enum devlink_eswitch_encap_mode enum devlink_eswitch_encap_mode
mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev); mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev);
...@@ -126,6 +124,8 @@ u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw, ...@@ -126,6 +124,8 @@ u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw,
#define ESW_TUN_SLOW_TABLE_GOTO_VPORT_MARK ESW_TUN_OPTS_MASK #define ESW_TUN_SLOW_TABLE_GOTO_VPORT_MARK ESW_TUN_OPTS_MASK
u8 mlx5_eswitch_mode(struct mlx5_core_dev *dev); u8 mlx5_eswitch_mode(struct mlx5_core_dev *dev);
u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev);
#else /* CONFIG_MLX5_ESWITCH */ #else /* CONFIG_MLX5_ESWITCH */
static inline u8 mlx5_eswitch_mode(struct mlx5_core_dev *dev) static inline u8 mlx5_eswitch_mode(struct mlx5_core_dev *dev)
...@@ -162,10 +162,17 @@ mlx5_eswitch_get_vport_metadata_mask(void) ...@@ -162,10 +162,17 @@ mlx5_eswitch_get_vport_metadata_mask(void)
{ {
return 0; return 0;
} }
static inline u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev)
{
return 0;
}
#endif /* CONFIG_MLX5_ESWITCH */ #endif /* CONFIG_MLX5_ESWITCH */
static inline bool is_mdev_switchdev_mode(struct mlx5_core_dev *dev) static inline bool is_mdev_switchdev_mode(struct mlx5_core_dev *dev)
{ {
return mlx5_eswitch_mode(dev) == MLX5_ESWITCH_OFFLOADS; return mlx5_eswitch_mode(dev) == MLX5_ESWITCH_OFFLOADS;
} }
#endif #endif
...@@ -36,14 +36,6 @@ ...@@ -36,14 +36,6 @@
#include <linux/mlx5/driver.h> #include <linux/mlx5/driver.h>
#include <linux/mlx5/device.h> #include <linux/mlx5/device.h>
#define MLX5_VPORT_PF_PLACEHOLDER (1u)
#define MLX5_VPORT_UPLINK_PLACEHOLDER (1u)
#define MLX5_VPORT_ECPF_PLACEHOLDER(mdev) (mlx5_ecpf_vport_exists(mdev))
#define MLX5_SPECIAL_VPORTS(mdev) (MLX5_VPORT_PF_PLACEHOLDER + \
MLX5_VPORT_UPLINK_PLACEHOLDER + \
MLX5_VPORT_ECPF_PLACEHOLDER(mdev))
#define MLX5_VPORT_MANAGER(mdev) \ #define MLX5_VPORT_MANAGER(mdev) \
(MLX5_CAP_GEN(mdev, vport_group_manager) && \ (MLX5_CAP_GEN(mdev, vport_group_manager) && \
(MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) && \ (MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) && \
......
...@@ -98,11 +98,13 @@ struct devlink_port_pci_vf_attrs { ...@@ -98,11 +98,13 @@ struct devlink_port_pci_vf_attrs {
* @controller: Associated controller number * @controller: Associated controller number
* @sf: Associated PCI SF for of the PCI PF for this port. * @sf: Associated PCI SF for of the PCI PF for this port.
* @pf: Associated PCI PF number for this port. * @pf: Associated PCI PF number for this port.
* @external: when set, indicates if a port is for an external controller
*/ */
struct devlink_port_pci_sf_attrs { struct devlink_port_pci_sf_attrs {
u32 controller; u32 controller;
u32 sf; u32 sf;
u16 pf; u16 pf;
u8 external:1;
}; };
/** /**
...@@ -1508,7 +1510,8 @@ void devlink_port_attrs_pci_pf_set(struct devlink_port *devlink_port, u32 contro ...@@ -1508,7 +1510,8 @@ void devlink_port_attrs_pci_pf_set(struct devlink_port *devlink_port, u32 contro
void devlink_port_attrs_pci_vf_set(struct devlink_port *devlink_port, u32 controller, void devlink_port_attrs_pci_vf_set(struct devlink_port *devlink_port, u32 controller,
u16 pf, u16 vf, bool external); u16 pf, u16 vf, bool external);
void devlink_port_attrs_pci_sf_set(struct devlink_port *devlink_port, void devlink_port_attrs_pci_sf_set(struct devlink_port *devlink_port,
u32 controller, u16 pf, u32 sf); u32 controller, u16 pf, u32 sf,
bool external);
int devlink_sb_register(struct devlink *devlink, unsigned int sb_index, int devlink_sb_register(struct devlink *devlink, unsigned int sb_index,
u32 size, u16 ingress_pools_count, u32 size, u16 ingress_pools_count,
u16 egress_pools_count, u16 ingress_tc_count, u16 egress_pools_count, u16 ingress_tc_count,
......
...@@ -8599,9 +8599,10 @@ EXPORT_SYMBOL_GPL(devlink_port_attrs_pci_vf_set); ...@@ -8599,9 +8599,10 @@ EXPORT_SYMBOL_GPL(devlink_port_attrs_pci_vf_set);
* @controller: associated controller number for the devlink port instance * @controller: associated controller number for the devlink port instance
* @pf: associated PF for the devlink port instance * @pf: associated PF for the devlink port instance
* @sf: associated SF of a PF for the devlink port instance * @sf: associated SF of a PF for the devlink port instance
* @external: indicates if the port is for an external controller
*/ */
void devlink_port_attrs_pci_sf_set(struct devlink_port *devlink_port, u32 controller, void devlink_port_attrs_pci_sf_set(struct devlink_port *devlink_port, u32 controller,
u16 pf, u32 sf) u16 pf, u32 sf, bool external)
{ {
struct devlink_port_attrs *attrs = &devlink_port->attrs; struct devlink_port_attrs *attrs = &devlink_port->attrs;
int ret; int ret;
...@@ -8615,6 +8616,7 @@ void devlink_port_attrs_pci_sf_set(struct devlink_port *devlink_port, u32 contro ...@@ -8615,6 +8616,7 @@ void devlink_port_attrs_pci_sf_set(struct devlink_port *devlink_port, u32 contro
attrs->pci_sf.controller = controller; attrs->pci_sf.controller = controller;
attrs->pci_sf.pf = pf; attrs->pci_sf.pf = pf;
attrs->pci_sf.sf = sf; attrs->pci_sf.sf = sf;
attrs->pci_sf.external = external;
} }
EXPORT_SYMBOL_GPL(devlink_port_attrs_pci_sf_set); EXPORT_SYMBOL_GPL(devlink_port_attrs_pci_sf_set);
...@@ -8667,6 +8669,13 @@ static int __devlink_port_phys_port_name_get(struct devlink_port *devlink_port, ...@@ -8667,6 +8669,13 @@ static int __devlink_port_phys_port_name_get(struct devlink_port *devlink_port,
attrs->pci_vf.pf, attrs->pci_vf.vf); attrs->pci_vf.pf, attrs->pci_vf.vf);
break; break;
case DEVLINK_PORT_FLAVOUR_PCI_SF: case DEVLINK_PORT_FLAVOUR_PCI_SF:
if (attrs->pci_sf.external) {
n = snprintf(name, len, "c%u", attrs->pci_sf.controller);
if (n >= len)
return -EINVAL;
len -= n;
name += n;
}
n = snprintf(name, len, "pf%usf%u", attrs->pci_sf.pf, n = snprintf(name, len, "pf%usf%u", attrs->pci_sf.pf,
attrs->pci_sf.sf); attrs->pci_sf.sf);
break; break;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment