Commit b545a13c authored by Jakub Kicinski's avatar Jakub Kicinski

Merge tag 'mlx5-updates-2023-06-21' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2023-06-21

mlx5 driver minor cleanup and fixes to net-next

* tag 'mlx5-updates-2023-06-21' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux:
  net/mlx5: Remove pointless vport lookup from mlx5_esw_check_port_type()
  net/mlx5: Remove redundant check from mlx5_esw_query_vport_vhca_id()
  net/mlx5: Remove redundant is_mdev_switchdev_mode() check from is_ib_rep_supported()
  net/mlx5: Remove redundant MLX5_ESWITCH_MANAGER() check from is_ib_rep_supported()
  net/mlx5e: E-Switch, Fix shared fdb error flow
  net/mlx5e: Remove redundant comment
  net/mlx5e: E-Switch, Pass other_vport flag if vport is not 0
  net/mlx5e: E-Switch, Use xarray for devcom paired device index
  net/mlx5e: E-Switch, Add peer fdb miss rules for vport manager or ecpf
  net/mlx5e: Use vhca_id for device index in vport rx rules
  net/mlx5: Lag, Remove duplicate code checking lag is supported
  net/mlx5: Fix error code in mlx5_is_reset_now_capable()
  net/mlx5: Fix reserved at offset in hca_cap register
  net/mlx5: Fix SFs kernel documentation error
  net/mlx5: Fix UAF in mlx5_eswitch_cleanup()
====================

Link: https://lore.kernel.org/r/20230623192907.39033-1-saeed@kernel.orgSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 35bf34b0 29e4c95f
...@@ -51,19 +51,21 @@ This will allow user to configure the SF before the SF have been fully probed, ...@@ -51,19 +51,21 @@ This will allow user to configure the SF before the SF have been fully probed,
which will save time. which will save time.
Usage example: Usage example:
Create SF:
$ devlink port add pci/0000:08:00.0 flavour pcisf pfnum 0 sfnum 11
$ devlink port function set pci/0000:08:00.0/32768 \
hw_addr 00:00:00:00:00:11 state active
Enable ETH auxiliary device: - Create SF::
$ devlink dev param set auxiliary/mlx5_core.sf.1 \
name enable_eth value true cmode driverinit
Now, in order to fully probe the SF, use devlink reload: $ devlink port add pci/0000:08:00.0 flavour pcisf pfnum 0 sfnum 11
$ devlink dev reload auxiliary/mlx5_core.sf.1 $ devlink port function set pci/0000:08:00.0/32768 hw_addr 00:00:00:00:00:11 state active
mlx5 supports ETH,rdma and vdpa (vnet) auxiliary devices devlink params (see :ref:`Documentation/networking/devlink/devlink-params.rst`) - Enable ETH auxiliary device::
$ devlink dev param set auxiliary/mlx5_core.sf.1 name enable_eth value true cmode driverinit
- Now, in order to fully probe the SF, use devlink reload::
$ devlink dev reload auxiliary/mlx5_core.sf.1
mlx5 supports ETH,rdma and vdpa (vnet) auxiliary devices devlink params (see :ref:`Documentation/networking/devlink/devlink-params.rst <devlink_params_generic>`).
mlx5 supports subfunction management using devlink port (see :ref:`Documentation/networking/devlink/devlink-port.rst <devlink_port>`) interface. mlx5 supports subfunction management using devlink port (see :ref:`Documentation/networking/devlink/devlink-port.rst <devlink_port>`) interface.
......
...@@ -151,12 +151,6 @@ static bool is_ib_rep_supported(struct mlx5_core_dev *dev) ...@@ -151,12 +151,6 @@ static bool is_ib_rep_supported(struct mlx5_core_dev *dev)
if (!is_eth_rep_supported(dev)) if (!is_eth_rep_supported(dev))
return false; return false;
if (!MLX5_ESWITCH_MANAGER(dev))
return false;
if (!is_mdev_switchdev_mode(dev))
return false;
if (mlx5_core_mp_enabled(dev)) if (mlx5_core_mp_enabled(dev))
return false; return false;
......
...@@ -408,7 +408,7 @@ static int mlx5e_sqs2vport_add_peers_rules(struct mlx5_eswitch *esw, struct mlx5 ...@@ -408,7 +408,7 @@ static int mlx5e_sqs2vport_add_peers_rules(struct mlx5_eswitch *esw, struct mlx5
mlx5_devcom_for_each_peer_entry(devcom, MLX5_DEVCOM_ESW_OFFLOADS, mlx5_devcom_for_each_peer_entry(devcom, MLX5_DEVCOM_ESW_OFFLOADS,
peer_esw, tmp) { peer_esw, tmp) {
int peer_rule_idx = mlx5_get_dev_index(peer_esw->dev); u16 peer_rule_idx = MLX5_CAP_GEN(peer_esw->dev, vhca_id);
struct mlx5e_rep_sq_peer *sq_peer; struct mlx5e_rep_sq_peer *sq_peer;
int err; int err;
...@@ -1581,7 +1581,7 @@ static void *mlx5e_vport_rep_get_proto_dev(struct mlx5_eswitch_rep *rep) ...@@ -1581,7 +1581,7 @@ static void *mlx5e_vport_rep_get_proto_dev(struct mlx5_eswitch_rep *rep)
static void mlx5e_vport_rep_event_unpair(struct mlx5_eswitch_rep *rep, static void mlx5e_vport_rep_event_unpair(struct mlx5_eswitch_rep *rep,
struct mlx5_eswitch *peer_esw) struct mlx5_eswitch *peer_esw)
{ {
int i = mlx5_get_dev_index(peer_esw->dev); u16 i = MLX5_CAP_GEN(peer_esw->dev, vhca_id);
struct mlx5e_rep_priv *rpriv; struct mlx5e_rep_priv *rpriv;
struct mlx5e_rep_sq *rep_sq; struct mlx5e_rep_sq *rep_sq;
...@@ -1603,7 +1603,7 @@ static int mlx5e_vport_rep_event_pair(struct mlx5_eswitch *esw, ...@@ -1603,7 +1603,7 @@ static int mlx5e_vport_rep_event_pair(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep *rep, struct mlx5_eswitch_rep *rep,
struct mlx5_eswitch *peer_esw) struct mlx5_eswitch *peer_esw)
{ {
int i = mlx5_get_dev_index(peer_esw->dev); u16 i = MLX5_CAP_GEN(peer_esw->dev, vhca_id);
struct mlx5_flow_handle *flow_rule; struct mlx5_flow_handle *flow_rule;
struct mlx5e_rep_sq_peer *sq_peer; struct mlx5e_rep_sq_peer *sq_peer;
struct mlx5e_rep_priv *rpriv; struct mlx5e_rep_priv *rpriv;
......
...@@ -1751,16 +1751,14 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) ...@@ -1751,16 +1751,14 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
if (!MLX5_VPORT_MANAGER(dev) && !MLX5_ESWITCH_MANAGER(dev)) if (!MLX5_VPORT_MANAGER(dev) && !MLX5_ESWITCH_MANAGER(dev))
return 0; return 0;
esw = kzalloc(sizeof(*esw), GFP_KERNEL);
if (!esw)
return -ENOMEM;
err = devl_params_register(priv_to_devlink(dev), mlx5_eswitch_params, err = devl_params_register(priv_to_devlink(dev), mlx5_eswitch_params,
ARRAY_SIZE(mlx5_eswitch_params)); ARRAY_SIZE(mlx5_eswitch_params));
if (err) if (err)
return err; goto free_esw;
esw = kzalloc(sizeof(*esw), GFP_KERNEL);
if (!esw) {
err = -ENOMEM;
goto unregister_param;
}
esw->dev = dev; esw->dev = dev;
esw->manager_vport = mlx5_eswitch_manager_vport(dev); esw->manager_vport = mlx5_eswitch_manager_vport(dev);
...@@ -1821,10 +1819,10 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) ...@@ -1821,10 +1819,10 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
if (esw->work_queue) if (esw->work_queue)
destroy_workqueue(esw->work_queue); destroy_workqueue(esw->work_queue);
debugfs_remove_recursive(esw->debugfs_root); debugfs_remove_recursive(esw->debugfs_root);
kfree(esw);
unregister_param:
devl_params_unregister(priv_to_devlink(dev), mlx5_eswitch_params, devl_params_unregister(priv_to_devlink(dev), mlx5_eswitch_params,
ARRAY_SIZE(mlx5_eswitch_params)); ARRAY_SIZE(mlx5_eswitch_params));
free_esw:
kfree(esw);
return err; return err;
} }
...@@ -1848,9 +1846,9 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) ...@@ -1848,9 +1846,9 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
esw_offloads_cleanup(esw); esw_offloads_cleanup(esw);
mlx5_esw_vports_cleanup(esw); mlx5_esw_vports_cleanup(esw);
debugfs_remove_recursive(esw->debugfs_root); debugfs_remove_recursive(esw->debugfs_root);
kfree(esw);
devl_params_unregister(priv_to_devlink(esw->dev), mlx5_eswitch_params, devl_params_unregister(priv_to_devlink(esw->dev), mlx5_eswitch_params,
ARRAY_SIZE(mlx5_eswitch_params)); ARRAY_SIZE(mlx5_eswitch_params));
kfree(esw);
} }
/* Vport Administration */ /* Vport Administration */
...@@ -1910,12 +1908,6 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, ...@@ -1910,12 +1908,6 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
static bool mlx5_esw_check_port_type(struct mlx5_eswitch *esw, u16 vport_num, xa_mark_t mark) static bool mlx5_esw_check_port_type(struct mlx5_eswitch *esw, u16 vport_num, xa_mark_t mark)
{ {
struct mlx5_vport *vport;
vport = mlx5_eswitch_get_vport(esw, vport_num);
if (IS_ERR(vport))
return false;
return xa_get_mark(&esw->vports, vport_num, mark); return xa_get_mark(&esw->vports, vport_num, mark);
} }
......
...@@ -353,7 +353,7 @@ struct mlx5_eswitch { ...@@ -353,7 +353,7 @@ struct mlx5_eswitch {
u32 large_group_num; u32 large_group_num;
} params; } params;
struct blocking_notifier_head n_head; struct blocking_notifier_head n_head;
bool paired[MLX5_MAX_PORTS]; struct xarray paired;
}; };
void esw_offloads_disable(struct mlx5_eswitch *esw); void esw_offloads_disable(struct mlx5_eswitch *esw);
......
...@@ -1069,6 +1069,9 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw, ...@@ -1069,6 +1069,9 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
void *misc; void *misc;
int err; int err;
if (!MLX5_VPORT_MANAGER(esw->dev) && !mlx5_core_is_ecpf_esw_manager(esw->dev))
return 0;
spec = kvzalloc(sizeof(*spec), GFP_KERNEL); spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec) if (!spec)
return -ENOMEM; return -ENOMEM;
...@@ -1177,11 +1180,14 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw, ...@@ -1177,11 +1180,14 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw, static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
struct mlx5_core_dev *peer_dev) struct mlx5_core_dev *peer_dev)
{ {
u16 peer_index = mlx5_get_dev_index(peer_dev);
struct mlx5_flow_handle **flows; struct mlx5_flow_handle **flows;
struct mlx5_vport *vport; struct mlx5_vport *vport;
unsigned long i; unsigned long i;
flows = esw->fdb_table.offloads.peer_miss_rules[mlx5_get_dev_index(peer_dev)]; flows = esw->fdb_table.offloads.peer_miss_rules[peer_index];
if (!flows)
return;
if (mlx5_core_ec_sriov_enabled(esw->dev)) { if (mlx5_core_ec_sriov_enabled(esw->dev)) {
mlx5_esw_for_each_ec_vf_vport(esw, i, vport, mlx5_core_max_ec_vfs(esw->dev)) { mlx5_esw_for_each_ec_vf_vport(esw, i, vport, mlx5_core_max_ec_vfs(esw->dev)) {
...@@ -1206,7 +1212,9 @@ static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw, ...@@ -1206,7 +1212,9 @@ static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF); vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
mlx5_del_flow_rules(flows[vport->index]); mlx5_del_flow_rules(flows[vport->index]);
} }
kvfree(flows); kvfree(flows);
esw->fdb_table.offloads.peer_miss_rules[peer_index] = NULL;
} }
static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw) static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
...@@ -1896,7 +1904,6 @@ static int esw_create_vport_rx_group(struct mlx5_eswitch *esw) ...@@ -1896,7 +1904,6 @@ static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
if (!flow_group_in) if (!flow_group_in)
return -ENOMEM; return -ENOMEM;
/* create vport rx group */
mlx5_esw_set_flow_group_source_port(esw, flow_group_in, 0); mlx5_esw_set_flow_group_source_port(esw, flow_group_in, 0);
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
...@@ -2543,13 +2550,13 @@ static int __esw_set_master_egress_rule(struct mlx5_core_dev *master, ...@@ -2543,13 +2550,13 @@ static int __esw_set_master_egress_rule(struct mlx5_core_dev *master,
return err; return err;
} }
static int esw_master_egress_create_resources(struct mlx5_flow_namespace *egress_ns, static int esw_master_egress_create_resources(struct mlx5_eswitch *esw,
struct mlx5_flow_namespace *egress_ns,
struct mlx5_vport *vport, size_t count) struct mlx5_vport *vport, size_t count)
{ {
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_table_attr ft_attr = { struct mlx5_flow_table_attr ft_attr = {
.max_fte = count, .prio = 0, .level = 0, .max_fte = count, .prio = 0, .level = 0,
.flags = MLX5_FLOW_TABLE_OTHER_VPORT,
}; };
struct mlx5_flow_table *acl; struct mlx5_flow_table *acl;
struct mlx5_flow_group *g; struct mlx5_flow_group *g;
...@@ -2564,6 +2571,9 @@ static int esw_master_egress_create_resources(struct mlx5_flow_namespace *egress ...@@ -2564,6 +2571,9 @@ static int esw_master_egress_create_resources(struct mlx5_flow_namespace *egress
if (!flow_group_in) if (!flow_group_in)
return -ENOMEM; return -ENOMEM;
if (vport->vport || mlx5_core_is_ecpf(esw->dev))
ft_attr.flags = MLX5_FLOW_TABLE_OTHER_VPORT;
acl = mlx5_create_vport_flow_table(egress_ns, &ft_attr, vport->vport); acl = mlx5_create_vport_flow_table(egress_ns, &ft_attr, vport->vport);
if (IS_ERR(acl)) { if (IS_ERR(acl)) {
err = PTR_ERR(acl); err = PTR_ERR(acl);
...@@ -2608,8 +2618,12 @@ static int esw_master_egress_create_resources(struct mlx5_flow_namespace *egress ...@@ -2608,8 +2618,12 @@ static int esw_master_egress_create_resources(struct mlx5_flow_namespace *egress
static void esw_master_egress_destroy_resources(struct mlx5_vport *vport) static void esw_master_egress_destroy_resources(struct mlx5_vport *vport)
{ {
if (!xa_empty(&vport->egress.offloads.bounce_rules))
return;
mlx5_destroy_flow_group(vport->egress.offloads.bounce_grp); mlx5_destroy_flow_group(vport->egress.offloads.bounce_grp);
vport->egress.offloads.bounce_grp = NULL;
mlx5_destroy_flow_table(vport->egress.acl); mlx5_destroy_flow_table(vport->egress.acl);
vport->egress.acl = NULL;
} }
static int esw_set_master_egress_rule(struct mlx5_core_dev *master, static int esw_set_master_egress_rule(struct mlx5_core_dev *master,
...@@ -2634,7 +2648,7 @@ static int esw_set_master_egress_rule(struct mlx5_core_dev *master, ...@@ -2634,7 +2648,7 @@ static int esw_set_master_egress_rule(struct mlx5_core_dev *master,
if (vport->egress.acl && vport->egress.type != VPORT_EGRESS_ACL_TYPE_SHARED_FDB) if (vport->egress.acl && vport->egress.type != VPORT_EGRESS_ACL_TYPE_SHARED_FDB)
return 0; return 0;
err = esw_master_egress_create_resources(egress_ns, vport, count); err = esw_master_egress_create_resources(esw, egress_ns, vport, count);
if (err) if (err)
return err; return err;
...@@ -2799,15 +2813,21 @@ static int mlx5_esw_offloads_devcom_event(int event, ...@@ -2799,15 +2813,21 @@ static int mlx5_esw_offloads_devcom_event(int event,
struct mlx5_eswitch *esw = my_data; struct mlx5_eswitch *esw = my_data;
struct mlx5_devcom *devcom = esw->dev->priv.devcom; struct mlx5_devcom *devcom = esw->dev->priv.devcom;
struct mlx5_eswitch *peer_esw = event_data; struct mlx5_eswitch *peer_esw = event_data;
u16 esw_i, peer_esw_i;
bool esw_paired;
int err; int err;
peer_esw_i = MLX5_CAP_GEN(peer_esw->dev, vhca_id);
esw_i = MLX5_CAP_GEN(esw->dev, vhca_id);
esw_paired = !!xa_load(&esw->paired, peer_esw_i);
switch (event) { switch (event) {
case ESW_OFFLOADS_DEVCOM_PAIR: case ESW_OFFLOADS_DEVCOM_PAIR:
if (mlx5_eswitch_vport_match_metadata_enabled(esw) != if (mlx5_eswitch_vport_match_metadata_enabled(esw) !=
mlx5_eswitch_vport_match_metadata_enabled(peer_esw)) mlx5_eswitch_vport_match_metadata_enabled(peer_esw))
break; break;
if (esw->paired[mlx5_get_dev_index(peer_esw->dev)]) if (esw_paired)
break; break;
err = mlx5_esw_offloads_set_ns_peer(esw, peer_esw, true); err = mlx5_esw_offloads_set_ns_peer(esw, peer_esw, true);
...@@ -2821,23 +2841,29 @@ static int mlx5_esw_offloads_devcom_event(int event, ...@@ -2821,23 +2841,29 @@ static int mlx5_esw_offloads_devcom_event(int event,
if (err) if (err)
goto err_pair; goto err_pair;
esw->paired[mlx5_get_dev_index(peer_esw->dev)] = true; err = xa_insert(&esw->paired, peer_esw_i, peer_esw, GFP_KERNEL);
peer_esw->paired[mlx5_get_dev_index(esw->dev)] = true; if (err)
goto err_xa;
err = xa_insert(&peer_esw->paired, esw_i, esw, GFP_KERNEL);
if (err)
goto err_peer_xa;
esw->num_peers++; esw->num_peers++;
peer_esw->num_peers++; peer_esw->num_peers++;
mlx5_devcom_comp_set_ready(devcom, MLX5_DEVCOM_ESW_OFFLOADS, true); mlx5_devcom_comp_set_ready(devcom, MLX5_DEVCOM_ESW_OFFLOADS, true);
break; break;
case ESW_OFFLOADS_DEVCOM_UNPAIR: case ESW_OFFLOADS_DEVCOM_UNPAIR:
if (!esw->paired[mlx5_get_dev_index(peer_esw->dev)]) if (!esw_paired)
break; break;
peer_esw->num_peers--; peer_esw->num_peers--;
esw->num_peers--; esw->num_peers--;
if (!esw->num_peers && !peer_esw->num_peers) if (!esw->num_peers && !peer_esw->num_peers)
mlx5_devcom_comp_set_ready(devcom, MLX5_DEVCOM_ESW_OFFLOADS, false); mlx5_devcom_comp_set_ready(devcom, MLX5_DEVCOM_ESW_OFFLOADS, false);
esw->paired[mlx5_get_dev_index(peer_esw->dev)] = false; xa_erase(&peer_esw->paired, esw_i);
peer_esw->paired[mlx5_get_dev_index(esw->dev)] = false; xa_erase(&esw->paired, peer_esw_i);
mlx5_esw_offloads_unpair(peer_esw, esw); mlx5_esw_offloads_unpair(peer_esw, esw);
mlx5_esw_offloads_unpair(esw, peer_esw); mlx5_esw_offloads_unpair(esw, peer_esw);
mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false); mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
...@@ -2846,6 +2872,10 @@ static int mlx5_esw_offloads_devcom_event(int event, ...@@ -2846,6 +2872,10 @@ static int mlx5_esw_offloads_devcom_event(int event,
return 0; return 0;
err_peer_xa:
xa_erase(&esw->paired, peer_esw_i);
err_xa:
mlx5_esw_offloads_unpair(peer_esw, esw);
err_pair: err_pair:
mlx5_esw_offloads_unpair(esw, peer_esw); mlx5_esw_offloads_unpair(esw, peer_esw);
err_peer: err_peer:
...@@ -2868,9 +2898,10 @@ void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw) ...@@ -2868,9 +2898,10 @@ void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw)
if (!MLX5_CAP_ESW(esw->dev, merged_eswitch)) if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
return; return;
if (!mlx5_is_lag_supported(esw->dev)) if (!mlx5_lag_is_supported(esw->dev))
return; return;
xa_init(&esw->paired);
mlx5_devcom_register_component(devcom, mlx5_devcom_register_component(devcom,
MLX5_DEVCOM_ESW_OFFLOADS, MLX5_DEVCOM_ESW_OFFLOADS,
mlx5_esw_offloads_devcom_event, mlx5_esw_offloads_devcom_event,
...@@ -2890,7 +2921,7 @@ void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw) ...@@ -2890,7 +2921,7 @@ void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
if (!MLX5_CAP_ESW(esw->dev, merged_eswitch)) if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
return; return;
if (!mlx5_is_lag_supported(esw->dev)) if (!mlx5_lag_is_supported(esw->dev))
return; return;
mlx5_devcom_send_event(devcom, MLX5_DEVCOM_ESW_OFFLOADS, mlx5_devcom_send_event(devcom, MLX5_DEVCOM_ESW_OFFLOADS,
...@@ -2898,6 +2929,7 @@ void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw) ...@@ -2898,6 +2929,7 @@ void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
ESW_OFFLOADS_DEVCOM_UNPAIR, esw); ESW_OFFLOADS_DEVCOM_UNPAIR, esw);
mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS); mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
xa_destroy(&esw->paired);
} }
bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw) bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
...@@ -3929,9 +3961,6 @@ static int mlx5_esw_query_vport_vhca_id(struct mlx5_eswitch *esw, u16 vport_num, ...@@ -3929,9 +3961,6 @@ static int mlx5_esw_query_vport_vhca_id(struct mlx5_eswitch *esw, u16 vport_num,
int err; int err;
*vhca_id = 0; *vhca_id = 0;
if (mlx5_esw_is_manager_vport(esw, vport_num) ||
!MLX5_CAP_GEN(esw->dev, vhca_resource_manager))
return -EPERM;
query_ctx = kzalloc(query_out_sz, GFP_KERNEL); query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
if (!query_ctx) if (!query_ctx)
......
...@@ -327,7 +327,7 @@ static bool mlx5_is_reset_now_capable(struct mlx5_core_dev *dev) ...@@ -327,7 +327,7 @@ static bool mlx5_is_reset_now_capable(struct mlx5_core_dev *dev)
if (!MLX5_CAP_GEN(dev, fast_teardown)) { if (!MLX5_CAP_GEN(dev, fast_teardown)) {
mlx5_core_warn(dev, "fast teardown is not supported by firmware\n"); mlx5_core_warn(dev, "fast teardown is not supported by firmware\n");
return -EOPNOTSUPP; return false;
} }
err = pci_read_config_word(dev->pdev, PCI_DEVICE_ID, &dev_id); err = pci_read_config_word(dev->pdev, PCI_DEVICE_ID, &dev_id);
......
...@@ -1268,14 +1268,21 @@ void mlx5_lag_remove_mdev(struct mlx5_core_dev *dev) ...@@ -1268,14 +1268,21 @@ void mlx5_lag_remove_mdev(struct mlx5_core_dev *dev)
mlx5_ldev_put(ldev); mlx5_ldev_put(ldev);
} }
bool mlx5_lag_is_supported(struct mlx5_core_dev *dev)
{
if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
!MLX5_CAP_GEN(dev, lag_master) ||
MLX5_CAP_GEN(dev, num_lag_ports) < 2 ||
MLX5_CAP_GEN(dev, num_lag_ports) > MLX5_MAX_PORTS)
return false;
return true;
}
void mlx5_lag_add_mdev(struct mlx5_core_dev *dev) void mlx5_lag_add_mdev(struct mlx5_core_dev *dev)
{ {
int err; int err;
if (!MLX5_CAP_GEN(dev, vport_group_manager) || if (!mlx5_lag_is_supported(dev))
!MLX5_CAP_GEN(dev, lag_master) ||
(MLX5_CAP_GEN(dev, num_lag_ports) > MLX5_MAX_PORTS ||
MLX5_CAP_GEN(dev, num_lag_ports) <= 1))
return; return;
recheck: recheck:
......
...@@ -74,15 +74,7 @@ struct mlx5_lag { ...@@ -74,15 +74,7 @@ struct mlx5_lag {
struct lag_mpesw lag_mpesw; struct lag_mpesw lag_mpesw;
}; };
static inline bool mlx5_is_lag_supported(struct mlx5_core_dev *dev) bool mlx5_lag_is_supported(struct mlx5_core_dev *dev);
{
if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
!MLX5_CAP_GEN(dev, lag_master) ||
MLX5_CAP_GEN(dev, num_lag_ports) < 2 ||
MLX5_CAP_GEN(dev, num_lag_ports) > MLX5_MAX_PORTS)
return false;
return true;
}
static inline struct mlx5_lag * static inline struct mlx5_lag *
mlx5_lag_dev(struct mlx5_core_dev *dev) mlx5_lag_dev(struct mlx5_core_dev *dev)
......
...@@ -1710,9 +1710,9 @@ struct mlx5_ifc_cmd_hca_cap_bits { ...@@ -1710,9 +1710,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 regexp_params[0x1]; u8 regexp_params[0x1];
u8 uar_sz[0x6]; u8 uar_sz[0x6];
u8 port_selection_cap[0x1]; u8 port_selection_cap[0x1];
u8 reserved_at_248[0x1]; u8 reserved_at_251[0x1];
u8 umem_uid_0[0x1]; u8 umem_uid_0[0x1];
u8 reserved_at_250[0x5]; u8 reserved_at_253[0x5];
u8 log_pg_sz[0x8]; u8 log_pg_sz[0x8];
u8 bf[0x1]; u8 bf[0x1];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment