Commit 1e5e4acb authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-updates-2021-04-21' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2021-04-21

devlink external port attribute for SF (Sub-Function) port flavour

This adds the support to instantiate Sub-Functions on external hosts
E.g when Eswitch manager is enabled on the ARM SmarNic SoC CPU, users
are now able to spawn new Sub-Functions on the Host server CPU.

Parav Pandit Says:
==================

This series introduces and uses external attribute for the SF port to
indicate that a SF port belongs to an external controller.

This is needed to generate unique phys_port_name when PF and SF numbers
are overlapping between local and external controllers.
For example two controllers 0 and 1, both of these controller have a SF.
having PF number 0, SF number 77. Here, phys_port_name has duplicate
entry which doesn't have controller number in it.

Hence, add controller number optionally when a SF port is for an
external controller. This extension is similar to existing PF and VF
eswitch ports of the external controller.

When a SF is for external controller an example view of external SF
port and config sequence:

On eswitch system:
$ devlink dev eswitch set pci/0033:01:00.0 mode switchdev

$ devlink port show
pci/0033:01:00.0/196607: type eth netdev enP51p1s0f0np0 flavour physical port 0 splittable false
pci/0033:01:00.0/131072: type eth netdev eth0 flavour pcipf controller 1 pfnum 0 external true splittable false
  function:
    hw_addr 00:00:00:00:00:00

$ devlink port add pci/0033:01:00.0 flavour pcisf pfnum 0 sfnum 77 controller 1
pci/0033:01:00.0/163840: type eth netdev eth1 flavour pcisf controller 1 pfnum 0 sfnum 77 splittable false
  function:
    hw_addr 00:00:00:00:00:00 state inactive opstate detached

phys_port_name construction:
$ cat /sys/class/net/eth1/phys_port_name
c1pf0sf77

Patch summary:
First 3 patches prepares the eswitch to handle vports in more generic
way using xarray to lookup vport from its unique vport number.
Patch-1 returns maximum eswitch ports only when eswitch is enabled
Patch-2 prepares eswitch to return eswitch max ports from a struct
Patch-3 uses xarray for vport and representor lookup
Patch-4 considers SF for an additioanl range of SF vports
Patch-5 relies on SF hw table to check SF support
Patch-6 extends SF devlink port attribute for external flag
Patch-7 stores the per controller SF allocation attributes
Patch-8 uses SF function id for filtering events
Patch-9 uses helper for allocation and free
Patch-10 splits hw table into per controller table and generic one
Patch-11 extends sf table for additional range

==================

====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 95aafe91 f1b9acd3
...@@ -96,7 +96,7 @@ int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw, ...@@ -96,7 +96,7 @@ int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw,
} }
if (!vport->egress.acl) { if (!vport->egress.acl) {
vport->egress.acl = esw_acl_table_create(esw, vport->vport, vport->egress.acl = esw_acl_table_create(esw, vport,
MLX5_FLOW_NAMESPACE_ESW_EGRESS, MLX5_FLOW_NAMESPACE_ESW_EGRESS,
table_size); table_size);
if (IS_ERR(vport->egress.acl)) { if (IS_ERR(vport->egress.acl)) {
......
...@@ -148,7 +148,7 @@ static void esw_acl_egress_ofld_groups_destroy(struct mlx5_vport *vport) ...@@ -148,7 +148,7 @@ static void esw_acl_egress_ofld_groups_destroy(struct mlx5_vport *vport)
esw_acl_egress_vlan_grp_destroy(vport); esw_acl_egress_vlan_grp_destroy(vport);
} }
static bool esw_acl_egress_needed(const struct mlx5_eswitch *esw, u16 vport_num) static bool esw_acl_egress_needed(struct mlx5_eswitch *esw, u16 vport_num)
{ {
return mlx5_eswitch_is_vf_vport(esw, vport_num) || mlx5_esw_is_sf_vport(esw, vport_num); return mlx5_eswitch_is_vf_vport(esw, vport_num) || mlx5_esw_is_sf_vport(esw, vport_num);
} }
...@@ -171,7 +171,7 @@ int esw_acl_egress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport ...@@ -171,7 +171,7 @@ int esw_acl_egress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport
table_size++; table_size++;
if (MLX5_CAP_GEN(esw->dev, prio_tag_required)) if (MLX5_CAP_GEN(esw->dev, prio_tag_required))
table_size++; table_size++;
vport->egress.acl = esw_acl_table_create(esw, vport->vport, vport->egress.acl = esw_acl_table_create(esw, vport,
MLX5_FLOW_NAMESPACE_ESW_EGRESS, table_size); MLX5_FLOW_NAMESPACE_ESW_EGRESS, table_size);
if (IS_ERR(vport->egress.acl)) { if (IS_ERR(vport->egress.acl)) {
err = PTR_ERR(vport->egress.acl); err = PTR_ERR(vport->egress.acl);
......
...@@ -6,14 +6,14 @@ ...@@ -6,14 +6,14 @@
#include "helper.h" #include "helper.h"
struct mlx5_flow_table * struct mlx5_flow_table *
esw_acl_table_create(struct mlx5_eswitch *esw, u16 vport_num, int ns, int size) esw_acl_table_create(struct mlx5_eswitch *esw, struct mlx5_vport *vport, int ns, int size)
{ {
struct mlx5_flow_table_attr ft_attr = {}; struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_core_dev *dev = esw->dev; struct mlx5_core_dev *dev = esw->dev;
struct mlx5_flow_namespace *root_ns; struct mlx5_flow_namespace *root_ns;
struct mlx5_flow_table *acl; struct mlx5_flow_table *acl;
int acl_supported; int acl_supported;
int vport_index; u16 vport_num;
int err; int err;
acl_supported = (ns == MLX5_FLOW_NAMESPACE_ESW_INGRESS) ? acl_supported = (ns == MLX5_FLOW_NAMESPACE_ESW_INGRESS) ?
...@@ -23,11 +23,11 @@ esw_acl_table_create(struct mlx5_eswitch *esw, u16 vport_num, int ns, int size) ...@@ -23,11 +23,11 @@ esw_acl_table_create(struct mlx5_eswitch *esw, u16 vport_num, int ns, int size)
if (!acl_supported) if (!acl_supported)
return ERR_PTR(-EOPNOTSUPP); return ERR_PTR(-EOPNOTSUPP);
vport_num = vport->vport;
esw_debug(dev, "Create vport[%d] %s ACL table\n", vport_num, esw_debug(dev, "Create vport[%d] %s ACL table\n", vport_num,
ns == MLX5_FLOW_NAMESPACE_ESW_INGRESS ? "ingress" : "egress"); ns == MLX5_FLOW_NAMESPACE_ESW_INGRESS ? "ingress" : "egress");
vport_index = mlx5_eswitch_vport_num_to_index(esw, vport_num); root_ns = mlx5_get_flow_vport_acl_namespace(dev, ns, vport->index);
root_ns = mlx5_get_flow_vport_acl_namespace(dev, ns, vport_index);
if (!root_ns) { if (!root_ns) {
esw_warn(dev, "Failed to get E-Switch root namespace for vport (%d)\n", esw_warn(dev, "Failed to get E-Switch root namespace for vport (%d)\n",
vport_num); vport_num);
......
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
/* General acl helper functions */ /* General acl helper functions */
struct mlx5_flow_table * struct mlx5_flow_table *
esw_acl_table_create(struct mlx5_eswitch *esw, u16 vport_num, int ns, int size); esw_acl_table_create(struct mlx5_eswitch *esw, struct mlx5_vport *vport, int ns, int size);
/* Egress acl helper functions */ /* Egress acl helper functions */
void esw_acl_egress_table_destroy(struct mlx5_vport *vport); void esw_acl_egress_table_destroy(struct mlx5_vport *vport);
......
...@@ -177,7 +177,7 @@ int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw, ...@@ -177,7 +177,7 @@ int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw,
} }
if (!vport->ingress.acl) { if (!vport->ingress.acl) {
vport->ingress.acl = esw_acl_table_create(esw, vport->vport, vport->ingress.acl = esw_acl_table_create(esw, vport,
MLX5_FLOW_NAMESPACE_ESW_INGRESS, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
table_size); table_size);
if (IS_ERR(vport->ingress.acl)) { if (IS_ERR(vport->ingress.acl)) {
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
#include "ofld.h" #include "ofld.h"
static bool static bool
esw_acl_ingress_prio_tag_enabled(const struct mlx5_eswitch *esw, esw_acl_ingress_prio_tag_enabled(struct mlx5_eswitch *esw,
const struct mlx5_vport *vport) const struct mlx5_vport *vport)
{ {
return (MLX5_CAP_GEN(esw->dev, prio_tag_required) && return (MLX5_CAP_GEN(esw->dev, prio_tag_required) &&
...@@ -255,7 +255,7 @@ int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw, ...@@ -255,7 +255,7 @@ int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw,
if (esw_acl_ingress_prio_tag_enabled(esw, vport)) if (esw_acl_ingress_prio_tag_enabled(esw, vport))
num_ftes++; num_ftes++;
vport->ingress.acl = esw_acl_table_create(esw, vport->vport, vport->ingress.acl = esw_acl_table_create(esw, vport,
MLX5_FLOW_NAMESPACE_ESW_INGRESS, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
num_ftes); num_ftes);
if (IS_ERR(vport->ingress.acl)) { if (IS_ERR(vport->ingress.acl)) {
......
...@@ -14,8 +14,7 @@ mlx5_esw_get_port_parent_id(struct mlx5_core_dev *dev, struct netdev_phys_item_i ...@@ -14,8 +14,7 @@ mlx5_esw_get_port_parent_id(struct mlx5_core_dev *dev, struct netdev_phys_item_i
memcpy(ppid->id, &parent_id, sizeof(parent_id)); memcpy(ppid->id, &parent_id, sizeof(parent_id));
} }
static bool static bool mlx5_esw_devlink_port_supported(struct mlx5_eswitch *esw, u16 vport_num)
mlx5_esw_devlink_port_supported(const struct mlx5_eswitch *esw, u16 vport_num)
{ {
return vport_num == MLX5_VPORT_UPLINK || return vport_num == MLX5_VPORT_UPLINK ||
(mlx5_core_is_ecpf(esw->dev) && vport_num == MLX5_VPORT_PF) || (mlx5_core_is_ecpf(esw->dev) && vport_num == MLX5_VPORT_PF) ||
...@@ -124,7 +123,7 @@ struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u1 ...@@ -124,7 +123,7 @@ struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u1
} }
int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_port *dl_port, int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_port *dl_port,
u16 vport_num, u32 sfnum) u16 vport_num, u32 controller, u32 sfnum)
{ {
struct mlx5_core_dev *dev = esw->dev; struct mlx5_core_dev *dev = esw->dev;
struct netdev_phys_item_id ppid = {}; struct netdev_phys_item_id ppid = {};
...@@ -142,7 +141,7 @@ int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_p ...@@ -142,7 +141,7 @@ int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_p
mlx5_esw_get_port_parent_id(dev, &ppid); mlx5_esw_get_port_parent_id(dev, &ppid);
memcpy(dl_port->attrs.switch_id.id, &ppid.id[0], ppid.id_len); memcpy(dl_port->attrs.switch_id.id, &ppid.id[0], ppid.id_len);
dl_port->attrs.switch_id.id_len = ppid.id_len; dl_port->attrs.switch_id.id_len = ppid.id_len;
devlink_port_attrs_pci_sf_set(dl_port, 0, pfnum, sfnum); devlink_port_attrs_pci_sf_set(dl_port, controller, pfnum, sfnum, !!controller);
devlink = priv_to_devlink(dev); devlink = priv_to_devlink(dev);
dl_port_index = mlx5_esw_vport_to_devlink_port_index(dev, vport_num); dl_port_index = mlx5_esw_vport_to_devlink_port_index(dev, vport_num);
err = devlink_port_register(devlink, dl_port, dl_port_index); err = devlink_port_register(devlink, dl_port, dl_port_index);
......
...@@ -216,7 +216,8 @@ static void esw_destroy_legacy_table(struct mlx5_eswitch *esw) ...@@ -216,7 +216,8 @@ static void esw_destroy_legacy_table(struct mlx5_eswitch *esw)
int esw_legacy_enable(struct mlx5_eswitch *esw) int esw_legacy_enable(struct mlx5_eswitch *esw)
{ {
struct mlx5_vport *vport; struct mlx5_vport *vport;
int ret, i; unsigned long i;
int ret;
ret = esw_create_legacy_table(esw); ret = esw_create_legacy_table(esw);
if (ret) if (ret)
......
...@@ -176,6 +176,7 @@ struct mlx5_vport { ...@@ -176,6 +176,7 @@ struct mlx5_vport {
u16 vport; u16 vport;
bool enabled; bool enabled;
enum mlx5_eswitch_vport_event enabled_events; enum mlx5_eswitch_vport_event enabled_events;
int index;
struct devlink_port *dl_port; struct devlink_port *dl_port;
}; };
...@@ -228,7 +229,7 @@ struct mlx5_esw_offload { ...@@ -228,7 +229,7 @@ struct mlx5_esw_offload {
struct mlx5_flow_table *ft_offloads; struct mlx5_flow_table *ft_offloads;
struct mlx5_flow_group *vport_rx_group; struct mlx5_flow_group *vport_rx_group;
struct mlx5_eswitch_rep *vport_reps; struct xarray vport_reps;
struct list_head peer_flows; struct list_head peer_flows;
struct mutex peer_mutex; struct mutex peer_mutex;
struct mutex encap_tbl_lock; /* protects encap_tbl */ struct mutex encap_tbl_lock; /* protects encap_tbl */
...@@ -278,7 +279,7 @@ struct mlx5_eswitch { ...@@ -278,7 +279,7 @@ struct mlx5_eswitch {
struct esw_mc_addr mc_promisc; struct esw_mc_addr mc_promisc;
/* end of legacy */ /* end of legacy */
struct workqueue_struct *work_queue; struct workqueue_struct *work_queue;
struct mlx5_vport *vports; struct xarray vports;
u32 flags; u32 flags;
int total_vports; int total_vports;
int enabled_vports; int enabled_vports;
...@@ -545,94 +546,11 @@ static inline u16 mlx5_eswitch_first_host_vport_num(struct mlx5_core_dev *dev) ...@@ -545,94 +546,11 @@ static inline u16 mlx5_eswitch_first_host_vport_num(struct mlx5_core_dev *dev)
MLX5_VPORT_PF : MLX5_VPORT_FIRST_VF; MLX5_VPORT_PF : MLX5_VPORT_FIRST_VF;
} }
static inline int mlx5_esw_sf_start_idx(const struct mlx5_eswitch *esw)
{
/* PF and VF vports indices start from 0 to max_vfs */
return MLX5_VPORT_PF_PLACEHOLDER + mlx5_core_max_vfs(esw->dev);
}
static inline int mlx5_esw_sf_end_idx(const struct mlx5_eswitch *esw)
{
return mlx5_esw_sf_start_idx(esw) + mlx5_sf_max_functions(esw->dev);
}
static inline int
mlx5_esw_sf_vport_num_to_index(const struct mlx5_eswitch *esw, u16 vport_num)
{
return vport_num - mlx5_sf_start_function_id(esw->dev) +
MLX5_VPORT_PF_PLACEHOLDER + mlx5_core_max_vfs(esw->dev);
}
static inline u16
mlx5_esw_sf_vport_index_to_num(const struct mlx5_eswitch *esw, int idx)
{
return mlx5_sf_start_function_id(esw->dev) + idx -
(MLX5_VPORT_PF_PLACEHOLDER + mlx5_core_max_vfs(esw->dev));
}
static inline bool
mlx5_esw_is_sf_vport(const struct mlx5_eswitch *esw, u16 vport_num)
{
return mlx5_sf_supported(esw->dev) &&
vport_num >= mlx5_sf_start_function_id(esw->dev) &&
(vport_num < (mlx5_sf_start_function_id(esw->dev) +
mlx5_sf_max_functions(esw->dev)));
}
static inline bool mlx5_eswitch_is_funcs_handler(const struct mlx5_core_dev *dev) static inline bool mlx5_eswitch_is_funcs_handler(const struct mlx5_core_dev *dev)
{ {
return mlx5_core_is_ecpf_esw_manager(dev); return mlx5_core_is_ecpf_esw_manager(dev);
} }
static inline int mlx5_eswitch_uplink_idx(struct mlx5_eswitch *esw)
{
/* Uplink always locate at the last element of the array.*/
return esw->total_vports - 1;
}
static inline int mlx5_eswitch_ecpf_idx(struct mlx5_eswitch *esw)
{
return esw->total_vports - 2;
}
static inline int mlx5_eswitch_vport_num_to_index(struct mlx5_eswitch *esw,
u16 vport_num)
{
if (vport_num == MLX5_VPORT_ECPF) {
if (!mlx5_ecpf_vport_exists(esw->dev))
esw_warn(esw->dev, "ECPF vport doesn't exist!\n");
return mlx5_eswitch_ecpf_idx(esw);
}
if (vport_num == MLX5_VPORT_UPLINK)
return mlx5_eswitch_uplink_idx(esw);
if (mlx5_esw_is_sf_vport(esw, vport_num))
return mlx5_esw_sf_vport_num_to_index(esw, vport_num);
/* PF and VF vports start from 0 to max_vfs */
return vport_num;
}
static inline u16 mlx5_eswitch_index_to_vport_num(struct mlx5_eswitch *esw,
int index)
{
if (index == mlx5_eswitch_ecpf_idx(esw) &&
mlx5_ecpf_vport_exists(esw->dev))
return MLX5_VPORT_ECPF;
if (index == mlx5_eswitch_uplink_idx(esw))
return MLX5_VPORT_UPLINK;
/* SF vports indices are after VFs and before ECPF */
if (mlx5_sf_supported(esw->dev) &&
index > mlx5_core_max_vfs(esw->dev))
return mlx5_esw_sf_vport_index_to_num(esw, index);
/* PF and VF vports start from 0 to max_vfs */
return index;
}
static inline unsigned int static inline unsigned int
mlx5_esw_vport_to_devlink_port_index(const struct mlx5_core_dev *dev, mlx5_esw_vport_to_devlink_port_index(const struct mlx5_core_dev *dev,
u16 vport_num) u16 vport_num)
...@@ -649,82 +567,42 @@ mlx5_esw_devlink_port_index_to_vport_num(unsigned int dl_port_index) ...@@ -649,82 +567,42 @@ mlx5_esw_devlink_port_index_to_vport_num(unsigned int dl_port_index)
/* TODO: This mlx5e_tc function shouldn't be called by eswitch */ /* TODO: This mlx5e_tc function shouldn't be called by eswitch */
void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw); void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
/* The vport getter/iterator are only valid after esw->total_vports /* Each mark identifies eswitch vport type.
* and vport->vport are initialized in mlx5_eswitch_init. * MLX5_ESW_VPT_HOST_FN is used to identify both PF and VF ports using
* a single mark.
* MLX5_ESW_VPT_VF identifies a SRIOV VF vport.
* MLX5_ESW_VPT_SF identifies SF vport.
*/ */
#define mlx5_esw_for_all_vports(esw, i, vport) \ #define MLX5_ESW_VPT_HOST_FN XA_MARK_0
for ((i) = MLX5_VPORT_PF; \ #define MLX5_ESW_VPT_VF XA_MARK_1
(vport) = &(esw)->vports[i], \ #define MLX5_ESW_VPT_SF XA_MARK_2
(i) < (esw)->total_vports; (i)++)
/* The vport iterator is valid only after vport are initialized in mlx5_eswitch_init.
#define mlx5_esw_for_all_vports_reverse(esw, i, vport) \ * Borrowed the idea from xa_for_each_marked() but with support for desired last element.
for ((i) = (esw)->total_vports - 1; \
(vport) = &(esw)->vports[i], \
(i) >= MLX5_VPORT_PF; (i)--)
#define mlx5_esw_for_each_vf_vport(esw, i, vport, nvfs) \
for ((i) = MLX5_VPORT_FIRST_VF; \
(vport) = &(esw)->vports[(i)], \
(i) <= (nvfs); (i)++)
#define mlx5_esw_for_each_vf_vport_reverse(esw, i, vport, nvfs) \
for ((i) = (nvfs); \
(vport) = &(esw)->vports[(i)], \
(i) >= MLX5_VPORT_FIRST_VF; (i)--)
/* The rep getter/iterator are only valid after esw->total_vports
* and vport->vport are initialized in mlx5_eswitch_init.
*/ */
#define mlx5_esw_for_all_reps(esw, i, rep) \
for ((i) = MLX5_VPORT_PF; \ #define mlx5_esw_for_each_vport(esw, index, vport) \
(rep) = &(esw)->offloads.vport_reps[i], \ xa_for_each(&((esw)->vports), index, vport)
(i) < (esw)->total_vports; (i)++)
#define mlx5_esw_for_each_entry_marked(xa, index, entry, last, filter) \
#define mlx5_esw_for_each_vf_rep(esw, i, rep, nvfs) \ for (index = 0, entry = xa_find(xa, &index, last, filter); \
for ((i) = MLX5_VPORT_FIRST_VF; \ entry; entry = xa_find_after(xa, &index, last, filter))
(rep) = &(esw)->offloads.vport_reps[i], \
(i) <= (nvfs); (i)++) #define mlx5_esw_for_each_vport_marked(esw, index, vport, last, filter) \
mlx5_esw_for_each_entry_marked(&((esw)->vports), index, vport, last, filter)
#define mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, nvfs) \
for ((i) = (nvfs); \ #define mlx5_esw_for_each_vf_vport(esw, index, vport, last) \
(rep) = &(esw)->offloads.vport_reps[i], \ mlx5_esw_for_each_vport_marked(esw, index, vport, last, MLX5_ESW_VPT_VF)
(i) >= MLX5_VPORT_FIRST_VF; (i)--)
#define mlx5_esw_for_each_host_func_vport(esw, index, vport, last) \
#define mlx5_esw_for_each_vf_vport_num(esw, vport, nvfs) \ mlx5_esw_for_each_vport_marked(esw, index, vport, last, MLX5_ESW_VPT_HOST_FN)
for ((vport) = MLX5_VPORT_FIRST_VF; (vport) <= (nvfs); (vport)++)
#define mlx5_esw_for_each_vf_vport_num_reverse(esw, vport, nvfs) \
for ((vport) = (nvfs); (vport) >= MLX5_VPORT_FIRST_VF; (vport)--)
/* Includes host PF (vport 0) if it's not esw manager. */
#define mlx5_esw_for_each_host_func_rep(esw, i, rep, nvfs) \
for ((i) = (esw)->first_host_vport; \
(rep) = &(esw)->offloads.vport_reps[i], \
(i) <= (nvfs); (i)++)
#define mlx5_esw_for_each_host_func_rep_reverse(esw, i, rep, nvfs) \
for ((i) = (nvfs); \
(rep) = &(esw)->offloads.vport_reps[i], \
(i) >= (esw)->first_host_vport; (i)--)
#define mlx5_esw_for_each_host_func_vport(esw, vport, nvfs) \
for ((vport) = (esw)->first_host_vport; \
(vport) <= (nvfs); (vport)++)
#define mlx5_esw_for_each_host_func_vport_reverse(esw, vport, nvfs) \
for ((vport) = (nvfs); \
(vport) >= (esw)->first_host_vport; (vport)--)
#define mlx5_esw_for_each_sf_rep(esw, i, rep) \
for ((i) = mlx5_esw_sf_start_idx(esw); \
(rep) = &(esw)->offloads.vport_reps[(i)], \
(i) < mlx5_esw_sf_end_idx(esw); (i++))
struct mlx5_eswitch *mlx5_devlink_eswitch_get(struct devlink *devlink); struct mlx5_eswitch *mlx5_devlink_eswitch_get(struct devlink *devlink);
struct mlx5_vport *__must_check struct mlx5_vport *__must_check
mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num); mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num);
bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num); bool mlx5_eswitch_is_vf_vport(struct mlx5_eswitch *esw, u16 vport_num);
bool mlx5_esw_is_sf_vport(struct mlx5_eswitch *esw, u16 vport_num);
int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data); int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data);
...@@ -784,12 +662,13 @@ void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, u16 vpo ...@@ -784,12 +662,13 @@ void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, u16 vpo
struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u16 vport_num); struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u16 vport_num);
int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_port *dl_port, int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_port *dl_port,
u16 vport_num, u32 sfnum); u16 vport_num, u32 controller, u32 sfnum);
void mlx5_esw_devlink_sf_port_unregister(struct mlx5_eswitch *esw, u16 vport_num); void mlx5_esw_devlink_sf_port_unregister(struct mlx5_eswitch *esw, u16 vport_num);
int mlx5_esw_offloads_sf_vport_enable(struct mlx5_eswitch *esw, struct devlink_port *dl_port, int mlx5_esw_offloads_sf_vport_enable(struct mlx5_eswitch *esw, struct devlink_port *dl_port,
u16 vport_num, u32 sfnum); u16 vport_num, u32 controller, u32 sfnum);
void mlx5_esw_offloads_sf_vport_disable(struct mlx5_eswitch *esw, u16 vport_num); void mlx5_esw_offloads_sf_vport_disable(struct mlx5_eswitch *esw, u16 vport_num);
int mlx5_esw_sf_max_hpf_functions(struct mlx5_core_dev *dev, u16 *max_sfs, u16 *sf_base_id);
int mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch *esw, u16 vport_num); int mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch *esw, u16 vport_num);
void mlx5_esw_vport_vhca_id_clear(struct mlx5_eswitch *esw, u16 vport_num); void mlx5_esw_vport_vhca_id_clear(struct mlx5_eswitch *esw, u16 vport_num);
...@@ -816,6 +695,8 @@ void mlx5_esw_unlock(struct mlx5_eswitch *esw); ...@@ -816,6 +695,8 @@ void mlx5_esw_unlock(struct mlx5_eswitch *esw);
void esw_vport_change_handle_locked(struct mlx5_vport *vport); void esw_vport_change_handle_locked(struct mlx5_vport *vport);
bool mlx5_esw_offloads_controller_valid(const struct mlx5_eswitch *esw, u32 controller);
#else /* CONFIG_MLX5_ESWITCH */ #else /* CONFIG_MLX5_ESWITCH */
/* eswitch API stubs */ /* eswitch API stubs */
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; } static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
......
...@@ -148,9 +148,19 @@ mlx5_sf_dev_state_change_handler(struct notifier_block *nb, unsigned long event_ ...@@ -148,9 +148,19 @@ mlx5_sf_dev_state_change_handler(struct notifier_block *nb, unsigned long event_
struct mlx5_sf_dev_table *table = container_of(nb, struct mlx5_sf_dev_table, nb); struct mlx5_sf_dev_table *table = container_of(nb, struct mlx5_sf_dev_table, nb);
const struct mlx5_vhca_state_event *event = data; const struct mlx5_vhca_state_event *event = data;
struct mlx5_sf_dev *sf_dev; struct mlx5_sf_dev *sf_dev;
u16 max_functions;
u16 sf_index; u16 sf_index;
u16 base_id;
max_functions = mlx5_sf_max_functions(table->dev);
if (!max_functions)
return 0;
base_id = MLX5_CAP_GEN(table->dev, sf_base_id);
if (event->function_id < base_id || event->function_id >= (base_id + max_functions))
return 0;
sf_index = event->function_id - MLX5_CAP_GEN(table->dev, sf_base_id); sf_index = event->function_id - base_id;
sf_dev = xa_load(&table->devices, sf_index); sf_dev = xa_load(&table->devices, sf_index);
switch (event->new_vhca_state) { switch (event->new_vhca_state) {
case MLX5_VHCA_STATE_ALLOCATED: case MLX5_VHCA_STATE_ALLOCATED:
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
struct mlx5_sf { struct mlx5_sf {
struct devlink_port dl_port; struct devlink_port dl_port;
unsigned int port_index; unsigned int port_index;
u32 controller;
u16 id; u16 id;
u16 hw_fn_id; u16 hw_fn_id;
u16 hw_state; u16 hw_state;
...@@ -58,7 +59,8 @@ static void mlx5_sf_id_erase(struct mlx5_sf_table *table, struct mlx5_sf *sf) ...@@ -58,7 +59,8 @@ static void mlx5_sf_id_erase(struct mlx5_sf_table *table, struct mlx5_sf *sf)
} }
static struct mlx5_sf * static struct mlx5_sf *
mlx5_sf_alloc(struct mlx5_sf_table *table, u32 sfnum, struct netlink_ext_ack *extack) mlx5_sf_alloc(struct mlx5_sf_table *table, struct mlx5_eswitch *esw,
u32 controller, u32 sfnum, struct netlink_ext_ack *extack)
{ {
unsigned int dl_port_index; unsigned int dl_port_index;
struct mlx5_sf *sf; struct mlx5_sf *sf;
...@@ -66,7 +68,12 @@ mlx5_sf_alloc(struct mlx5_sf_table *table, u32 sfnum, struct netlink_ext_ack *ex ...@@ -66,7 +68,12 @@ mlx5_sf_alloc(struct mlx5_sf_table *table, u32 sfnum, struct netlink_ext_ack *ex
int id_err; int id_err;
int err; int err;
id_err = mlx5_sf_hw_table_sf_alloc(table->dev, sfnum); if (!mlx5_esw_offloads_controller_valid(esw, controller)) {
NL_SET_ERR_MSG_MOD(extack, "Invalid controller number");
return ERR_PTR(-EINVAL);
}
id_err = mlx5_sf_hw_table_sf_alloc(table->dev, controller, sfnum);
if (id_err < 0) { if (id_err < 0) {
err = id_err; err = id_err;
goto id_err; goto id_err;
...@@ -78,11 +85,12 @@ mlx5_sf_alloc(struct mlx5_sf_table *table, u32 sfnum, struct netlink_ext_ack *ex ...@@ -78,11 +85,12 @@ mlx5_sf_alloc(struct mlx5_sf_table *table, u32 sfnum, struct netlink_ext_ack *ex
goto alloc_err; goto alloc_err;
} }
sf->id = id_err; sf->id = id_err;
hw_fn_id = mlx5_sf_sw_to_hw_id(table->dev, sf->id); hw_fn_id = mlx5_sf_sw_to_hw_id(table->dev, controller, sf->id);
dl_port_index = mlx5_esw_vport_to_devlink_port_index(table->dev, hw_fn_id); dl_port_index = mlx5_esw_vport_to_devlink_port_index(table->dev, hw_fn_id);
sf->port_index = dl_port_index; sf->port_index = dl_port_index;
sf->hw_fn_id = hw_fn_id; sf->hw_fn_id = hw_fn_id;
sf->hw_state = MLX5_VHCA_STATE_ALLOCATED; sf->hw_state = MLX5_VHCA_STATE_ALLOCATED;
sf->controller = controller;
err = mlx5_sf_id_insert(table, sf); err = mlx5_sf_id_insert(table, sf);
if (err) if (err)
...@@ -93,7 +101,7 @@ mlx5_sf_alloc(struct mlx5_sf_table *table, u32 sfnum, struct netlink_ext_ack *ex ...@@ -93,7 +101,7 @@ mlx5_sf_alloc(struct mlx5_sf_table *table, u32 sfnum, struct netlink_ext_ack *ex
insert_err: insert_err:
kfree(sf); kfree(sf);
alloc_err: alloc_err:
mlx5_sf_hw_table_sf_free(table->dev, id_err); mlx5_sf_hw_table_sf_free(table->dev, controller, id_err);
id_err: id_err:
if (err == -EEXIST) if (err == -EEXIST)
NL_SET_ERR_MSG_MOD(extack, "SF already exist. Choose different sfnum"); NL_SET_ERR_MSG_MOD(extack, "SF already exist. Choose different sfnum");
...@@ -103,7 +111,7 @@ mlx5_sf_alloc(struct mlx5_sf_table *table, u32 sfnum, struct netlink_ext_ack *ex ...@@ -103,7 +111,7 @@ mlx5_sf_alloc(struct mlx5_sf_table *table, u32 sfnum, struct netlink_ext_ack *ex
static void mlx5_sf_free(struct mlx5_sf_table *table, struct mlx5_sf *sf) static void mlx5_sf_free(struct mlx5_sf_table *table, struct mlx5_sf *sf)
{ {
mlx5_sf_id_erase(table, sf); mlx5_sf_id_erase(table, sf);
mlx5_sf_hw_table_sf_free(table->dev, sf->id); mlx5_sf_hw_table_sf_free(table->dev, sf->controller, sf->id);
kfree(sf); kfree(sf);
} }
...@@ -272,12 +280,12 @@ static int mlx5_sf_add(struct mlx5_core_dev *dev, struct mlx5_sf_table *table, ...@@ -272,12 +280,12 @@ static int mlx5_sf_add(struct mlx5_core_dev *dev, struct mlx5_sf_table *table,
struct mlx5_sf *sf; struct mlx5_sf *sf;
int err; int err;
sf = mlx5_sf_alloc(table, new_attr->sfnum, extack); sf = mlx5_sf_alloc(table, esw, new_attr->controller, new_attr->sfnum, extack);
if (IS_ERR(sf)) if (IS_ERR(sf))
return PTR_ERR(sf); return PTR_ERR(sf);
err = mlx5_esw_offloads_sf_vport_enable(esw, &sf->dl_port, sf->hw_fn_id, err = mlx5_esw_offloads_sf_vport_enable(esw, &sf->dl_port, sf->hw_fn_id,
new_attr->sfnum); new_attr->controller, new_attr->sfnum);
if (err) if (err)
goto esw_err; goto esw_err;
*new_port_index = sf->port_index; *new_port_index = sf->port_index;
...@@ -306,7 +314,8 @@ mlx5_sf_new_check_attr(struct mlx5_core_dev *dev, const struct devlink_port_new_ ...@@ -306,7 +314,8 @@ mlx5_sf_new_check_attr(struct mlx5_core_dev *dev, const struct devlink_port_new_
"User must provide unique sfnum. Driver does not support auto assignment"); "User must provide unique sfnum. Driver does not support auto assignment");
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
if (new_attr->controller_valid && new_attr->controller) { if (new_attr->controller_valid && new_attr->controller &&
!mlx5_core_is_ecpf_esw_manager(dev)) {
NL_SET_ERR_MSG_MOD(extack, "External controller is unsupported"); NL_SET_ERR_MSG_MOD(extack, "External controller is unsupported");
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
...@@ -352,10 +361,10 @@ static void mlx5_sf_dealloc(struct mlx5_sf_table *table, struct mlx5_sf *sf) ...@@ -352,10 +361,10 @@ static void mlx5_sf_dealloc(struct mlx5_sf_table *table, struct mlx5_sf *sf)
* firmware gives confirmation that it is detached by the driver. * firmware gives confirmation that it is detached by the driver.
*/ */
mlx5_cmd_sf_disable_hca(table->dev, sf->hw_fn_id); mlx5_cmd_sf_disable_hca(table->dev, sf->hw_fn_id);
mlx5_sf_hw_table_sf_deferred_free(table->dev, sf->id); mlx5_sf_hw_table_sf_deferred_free(table->dev, sf->controller, sf->id);
kfree(sf); kfree(sf);
} else { } else {
mlx5_sf_hw_table_sf_deferred_free(table->dev, sf->id); mlx5_sf_hw_table_sf_deferred_free(table->dev, sf->controller, sf->id);
kfree(sf); kfree(sf);
} }
} }
...@@ -437,9 +446,6 @@ static int mlx5_sf_vhca_event(struct notifier_block *nb, unsigned long opcode, v ...@@ -437,9 +446,6 @@ static int mlx5_sf_vhca_event(struct notifier_block *nb, unsigned long opcode, v
static void mlx5_sf_table_enable(struct mlx5_sf_table *table) static void mlx5_sf_table_enable(struct mlx5_sf_table *table)
{ {
if (!mlx5_sf_max_functions(table->dev))
return;
init_completion(&table->disable_complete); init_completion(&table->disable_complete);
refcount_set(&table->refcount, 1); refcount_set(&table->refcount, 1);
} }
...@@ -462,9 +468,6 @@ static void mlx5_sf_deactivate_all(struct mlx5_sf_table *table) ...@@ -462,9 +468,6 @@ static void mlx5_sf_deactivate_all(struct mlx5_sf_table *table)
static void mlx5_sf_table_disable(struct mlx5_sf_table *table) static void mlx5_sf_table_disable(struct mlx5_sf_table *table)
{ {
if (!mlx5_sf_max_functions(table->dev))
return;
if (!refcount_read(&table->refcount)) if (!refcount_read(&table->refcount))
return; return;
...@@ -498,7 +501,8 @@ static int mlx5_sf_esw_event(struct notifier_block *nb, unsigned long event, voi ...@@ -498,7 +501,8 @@ static int mlx5_sf_esw_event(struct notifier_block *nb, unsigned long event, voi
static bool mlx5_sf_table_supported(const struct mlx5_core_dev *dev) static bool mlx5_sf_table_supported(const struct mlx5_core_dev *dev)
{ {
return dev->priv.eswitch && MLX5_ESWITCH_MANAGER(dev) && mlx5_sf_supported(dev); return dev->priv.eswitch && MLX5_ESWITCH_MANAGER(dev) &&
mlx5_sf_hw_table_supported(dev);
} }
int mlx5_sf_table_init(struct mlx5_core_dev *dev) int mlx5_sf_table_init(struct mlx5_core_dev *dev)
......
...@@ -12,10 +12,11 @@ int mlx5_cmd_dealloc_sf(struct mlx5_core_dev *dev, u16 function_id); ...@@ -12,10 +12,11 @@ int mlx5_cmd_dealloc_sf(struct mlx5_core_dev *dev, u16 function_id);
int mlx5_cmd_sf_enable_hca(struct mlx5_core_dev *dev, u16 func_id); int mlx5_cmd_sf_enable_hca(struct mlx5_core_dev *dev, u16 func_id);
int mlx5_cmd_sf_disable_hca(struct mlx5_core_dev *dev, u16 func_id); int mlx5_cmd_sf_disable_hca(struct mlx5_core_dev *dev, u16 func_id);
u16 mlx5_sf_sw_to_hw_id(const struct mlx5_core_dev *dev, u16 sw_id); u16 mlx5_sf_sw_to_hw_id(struct mlx5_core_dev *dev, u32 controller, u16 sw_id);
int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 usr_sfnum); int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 controller, u32 usr_sfnum);
void mlx5_sf_hw_table_sf_free(struct mlx5_core_dev *dev, u16 id); void mlx5_sf_hw_table_sf_free(struct mlx5_core_dev *dev, u32 controller, u16 id);
void mlx5_sf_hw_table_sf_deferred_free(struct mlx5_core_dev *dev, u16 id); void mlx5_sf_hw_table_sf_deferred_free(struct mlx5_core_dev *dev, u32 controller, u16 id);
bool mlx5_sf_hw_table_supported(const struct mlx5_core_dev *dev);
#endif #endif
...@@ -1151,20 +1151,6 @@ u64 mlx5_query_nic_system_image_guid(struct mlx5_core_dev *mdev) ...@@ -1151,20 +1151,6 @@ u64 mlx5_query_nic_system_image_guid(struct mlx5_core_dev *mdev)
} }
EXPORT_SYMBOL_GPL(mlx5_query_nic_system_image_guid); EXPORT_SYMBOL_GPL(mlx5_query_nic_system_image_guid);
/**
* mlx5_eswitch_get_total_vports - Get total vports of the eswitch
*
* @dev: Pointer to core device
*
* mlx5_eswitch_get_total_vports returns total number of vports for
* the eswitch.
*/
u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev)
{
return MLX5_SPECIAL_VPORTS(dev) + mlx5_core_max_vfs(dev) + mlx5_sf_max_functions(dev);
}
EXPORT_SYMBOL_GPL(mlx5_eswitch_get_total_vports);
int mlx5_vport_get_other_func_cap(struct mlx5_core_dev *dev, u16 function_id, void *out) int mlx5_vport_get_other_func_cap(struct mlx5_core_dev *dev, u16 function_id, void *out)
{ {
u16 opmod = (MLX5_CAP_GENERAL << 1) | (HCA_CAP_OPMOD_GET_MAX & 0x01); u16 opmod = (MLX5_CAP_GENERAL << 1) | (HCA_CAP_OPMOD_GET_MAX & 0x01);
......
...@@ -65,8 +65,6 @@ struct mlx5_flow_handle * ...@@ -65,8 +65,6 @@ struct mlx5_flow_handle *
mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw, mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw,
struct mlx5_eswitch_rep *rep, u32 sqn); struct mlx5_eswitch_rep *rep, u32 sqn);
u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev);
#ifdef CONFIG_MLX5_ESWITCH #ifdef CONFIG_MLX5_ESWITCH
enum devlink_eswitch_encap_mode enum devlink_eswitch_encap_mode
mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev); mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev);
...@@ -126,6 +124,8 @@ u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw, ...@@ -126,6 +124,8 @@ u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw,
#define ESW_TUN_SLOW_TABLE_GOTO_VPORT_MARK ESW_TUN_OPTS_MASK #define ESW_TUN_SLOW_TABLE_GOTO_VPORT_MARK ESW_TUN_OPTS_MASK
u8 mlx5_eswitch_mode(struct mlx5_core_dev *dev); u8 mlx5_eswitch_mode(struct mlx5_core_dev *dev);
u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev);
#else /* CONFIG_MLX5_ESWITCH */ #else /* CONFIG_MLX5_ESWITCH */
static inline u8 mlx5_eswitch_mode(struct mlx5_core_dev *dev) static inline u8 mlx5_eswitch_mode(struct mlx5_core_dev *dev)
...@@ -162,10 +162,17 @@ mlx5_eswitch_get_vport_metadata_mask(void) ...@@ -162,10 +162,17 @@ mlx5_eswitch_get_vport_metadata_mask(void)
{ {
return 0; return 0;
} }
static inline u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev)
{
return 0;
}
#endif /* CONFIG_MLX5_ESWITCH */ #endif /* CONFIG_MLX5_ESWITCH */
static inline bool is_mdev_switchdev_mode(struct mlx5_core_dev *dev) static inline bool is_mdev_switchdev_mode(struct mlx5_core_dev *dev)
{ {
return mlx5_eswitch_mode(dev) == MLX5_ESWITCH_OFFLOADS; return mlx5_eswitch_mode(dev) == MLX5_ESWITCH_OFFLOADS;
} }
#endif #endif
...@@ -36,14 +36,6 @@ ...@@ -36,14 +36,6 @@
#include <linux/mlx5/driver.h> #include <linux/mlx5/driver.h>
#include <linux/mlx5/device.h> #include <linux/mlx5/device.h>
#define MLX5_VPORT_PF_PLACEHOLDER (1u)
#define MLX5_VPORT_UPLINK_PLACEHOLDER (1u)
#define MLX5_VPORT_ECPF_PLACEHOLDER(mdev) (mlx5_ecpf_vport_exists(mdev))
#define MLX5_SPECIAL_VPORTS(mdev) (MLX5_VPORT_PF_PLACEHOLDER + \
MLX5_VPORT_UPLINK_PLACEHOLDER + \
MLX5_VPORT_ECPF_PLACEHOLDER(mdev))
#define MLX5_VPORT_MANAGER(mdev) \ #define MLX5_VPORT_MANAGER(mdev) \
(MLX5_CAP_GEN(mdev, vport_group_manager) && \ (MLX5_CAP_GEN(mdev, vport_group_manager) && \
(MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) && \ (MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) && \
......
...@@ -98,11 +98,13 @@ struct devlink_port_pci_vf_attrs { ...@@ -98,11 +98,13 @@ struct devlink_port_pci_vf_attrs {
* @controller: Associated controller number * @controller: Associated controller number
* @sf: Associated PCI SF for of the PCI PF for this port. * @sf: Associated PCI SF for of the PCI PF for this port.
* @pf: Associated PCI PF number for this port. * @pf: Associated PCI PF number for this port.
* @external: when set, indicates if a port is for an external controller
*/ */
struct devlink_port_pci_sf_attrs { struct devlink_port_pci_sf_attrs {
u32 controller; u32 controller;
u32 sf; u32 sf;
u16 pf; u16 pf;
u8 external:1;
}; };
/** /**
...@@ -1508,7 +1510,8 @@ void devlink_port_attrs_pci_pf_set(struct devlink_port *devlink_port, u32 contro ...@@ -1508,7 +1510,8 @@ void devlink_port_attrs_pci_pf_set(struct devlink_port *devlink_port, u32 contro
void devlink_port_attrs_pci_vf_set(struct devlink_port *devlink_port, u32 controller, void devlink_port_attrs_pci_vf_set(struct devlink_port *devlink_port, u32 controller,
u16 pf, u16 vf, bool external); u16 pf, u16 vf, bool external);
void devlink_port_attrs_pci_sf_set(struct devlink_port *devlink_port, void devlink_port_attrs_pci_sf_set(struct devlink_port *devlink_port,
u32 controller, u16 pf, u32 sf); u32 controller, u16 pf, u32 sf,
bool external);
int devlink_sb_register(struct devlink *devlink, unsigned int sb_index, int devlink_sb_register(struct devlink *devlink, unsigned int sb_index,
u32 size, u16 ingress_pools_count, u32 size, u16 ingress_pools_count,
u16 egress_pools_count, u16 ingress_tc_count, u16 egress_pools_count, u16 ingress_tc_count,
......
...@@ -8599,9 +8599,10 @@ EXPORT_SYMBOL_GPL(devlink_port_attrs_pci_vf_set); ...@@ -8599,9 +8599,10 @@ EXPORT_SYMBOL_GPL(devlink_port_attrs_pci_vf_set);
* @controller: associated controller number for the devlink port instance * @controller: associated controller number for the devlink port instance
* @pf: associated PF for the devlink port instance * @pf: associated PF for the devlink port instance
* @sf: associated SF of a PF for the devlink port instance * @sf: associated SF of a PF for the devlink port instance
* @external: indicates if the port is for an external controller
*/ */
void devlink_port_attrs_pci_sf_set(struct devlink_port *devlink_port, u32 controller, void devlink_port_attrs_pci_sf_set(struct devlink_port *devlink_port, u32 controller,
u16 pf, u32 sf) u16 pf, u32 sf, bool external)
{ {
struct devlink_port_attrs *attrs = &devlink_port->attrs; struct devlink_port_attrs *attrs = &devlink_port->attrs;
int ret; int ret;
...@@ -8615,6 +8616,7 @@ void devlink_port_attrs_pci_sf_set(struct devlink_port *devlink_port, u32 contro ...@@ -8615,6 +8616,7 @@ void devlink_port_attrs_pci_sf_set(struct devlink_port *devlink_port, u32 contro
attrs->pci_sf.controller = controller; attrs->pci_sf.controller = controller;
attrs->pci_sf.pf = pf; attrs->pci_sf.pf = pf;
attrs->pci_sf.sf = sf; attrs->pci_sf.sf = sf;
attrs->pci_sf.external = external;
} }
EXPORT_SYMBOL_GPL(devlink_port_attrs_pci_sf_set); EXPORT_SYMBOL_GPL(devlink_port_attrs_pci_sf_set);
...@@ -8667,6 +8669,13 @@ static int __devlink_port_phys_port_name_get(struct devlink_port *devlink_port, ...@@ -8667,6 +8669,13 @@ static int __devlink_port_phys_port_name_get(struct devlink_port *devlink_port,
attrs->pci_vf.pf, attrs->pci_vf.vf); attrs->pci_vf.pf, attrs->pci_vf.vf);
break; break;
case DEVLINK_PORT_FLAVOUR_PCI_SF: case DEVLINK_PORT_FLAVOUR_PCI_SF:
if (attrs->pci_sf.external) {
n = snprintf(name, len, "c%u", attrs->pci_sf.controller);
if (n >= len)
return -EINVAL;
len -= n;
name += n;
}
n = snprintf(name, len, "pf%usf%u", attrs->pci_sf.pf, n = snprintf(name, len, "pf%usf%u", attrs->pci_sf.pf,
attrs->pci_sf.sf); attrs->pci_sf.sf);
break; break;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment