Commit 93197c7c authored by Parav Pandit's avatar Parav Pandit Committed by David S. Miller

mlx5/core: Support max_io_eqs for a function

Implement get and set for the maximum IO event queues for SF and VF.
This enables administrator on the hypervisor to control the maximum
IO event queues which are typically used to derive the maximum and
default number of net device channels or rdma device completion vectors.
Reviewed-by: default avatarShay Drory <shayd@nvidia.com>
Signed-off-by: default avatarParav Pandit <parav@nvidia.com>
Reviewed-by: default avatarJiri Pirko <jiri@nvidia.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 5af3e387
...@@ -98,6 +98,8 @@ static const struct devlink_port_ops mlx5_esw_pf_vf_dl_port_ops = { ...@@ -98,6 +98,8 @@ static const struct devlink_port_ops mlx5_esw_pf_vf_dl_port_ops = {
.port_fn_ipsec_packet_get = mlx5_devlink_port_fn_ipsec_packet_get, .port_fn_ipsec_packet_get = mlx5_devlink_port_fn_ipsec_packet_get,
.port_fn_ipsec_packet_set = mlx5_devlink_port_fn_ipsec_packet_set, .port_fn_ipsec_packet_set = mlx5_devlink_port_fn_ipsec_packet_set,
#endif /* CONFIG_XFRM_OFFLOAD */ #endif /* CONFIG_XFRM_OFFLOAD */
.port_fn_max_io_eqs_get = mlx5_devlink_port_fn_max_io_eqs_get,
.port_fn_max_io_eqs_set = mlx5_devlink_port_fn_max_io_eqs_set,
}; };
static void mlx5_esw_offloads_sf_devlink_port_attrs_set(struct mlx5_eswitch *esw, static void mlx5_esw_offloads_sf_devlink_port_attrs_set(struct mlx5_eswitch *esw,
...@@ -143,6 +145,8 @@ static const struct devlink_port_ops mlx5_esw_dl_sf_port_ops = { ...@@ -143,6 +145,8 @@ static const struct devlink_port_ops mlx5_esw_dl_sf_port_ops = {
.port_fn_state_get = mlx5_devlink_sf_port_fn_state_get, .port_fn_state_get = mlx5_devlink_sf_port_fn_state_get,
.port_fn_state_set = mlx5_devlink_sf_port_fn_state_set, .port_fn_state_set = mlx5_devlink_sf_port_fn_state_set,
#endif #endif
.port_fn_max_io_eqs_get = mlx5_devlink_port_fn_max_io_eqs_get,
.port_fn_max_io_eqs_set = mlx5_devlink_port_fn_max_io_eqs_set,
}; };
int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, struct mlx5_vport *vport) int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
......
...@@ -573,6 +573,13 @@ int mlx5_devlink_port_fn_ipsec_packet_get(struct devlink_port *port, bool *is_en ...@@ -573,6 +573,13 @@ int mlx5_devlink_port_fn_ipsec_packet_get(struct devlink_port *port, bool *is_en
int mlx5_devlink_port_fn_ipsec_packet_set(struct devlink_port *port, bool enable, int mlx5_devlink_port_fn_ipsec_packet_set(struct devlink_port *port, bool enable,
struct netlink_ext_ack *extack); struct netlink_ext_ack *extack);
#endif /* CONFIG_XFRM_OFFLOAD */ #endif /* CONFIG_XFRM_OFFLOAD */
int mlx5_devlink_port_fn_max_io_eqs_get(struct devlink_port *port,
u32 *max_io_eqs,
struct netlink_ext_ack *extack);
int mlx5_devlink_port_fn_max_io_eqs_set(struct devlink_port *port,
u32 max_io_eqs,
struct netlink_ext_ack *extack);
void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type); void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type);
int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
......
...@@ -66,6 +66,8 @@ ...@@ -66,6 +66,8 @@
#define MLX5_ESW_FT_OFFLOADS_DROP_RULE (1) #define MLX5_ESW_FT_OFFLOADS_DROP_RULE (1)
#define MLX5_ESW_MAX_CTRL_EQS 4
static struct esw_vport_tbl_namespace mlx5_esw_vport_tbl_mirror_ns = { static struct esw_vport_tbl_namespace mlx5_esw_vport_tbl_mirror_ns = {
.max_fte = MLX5_ESW_VPORT_TBL_SIZE, .max_fte = MLX5_ESW_VPORT_TBL_SIZE,
.max_num_groups = MLX5_ESW_VPORT_TBL_NUM_GROUPS, .max_num_groups = MLX5_ESW_VPORT_TBL_NUM_GROUPS,
...@@ -4557,3 +4559,98 @@ int mlx5_devlink_port_fn_ipsec_packet_set(struct devlink_port *port, ...@@ -4557,3 +4559,98 @@ int mlx5_devlink_port_fn_ipsec_packet_set(struct devlink_port *port,
return err; return err;
} }
#endif /* CONFIG_XFRM_OFFLOAD */ #endif /* CONFIG_XFRM_OFFLOAD */
int
mlx5_devlink_port_fn_max_io_eqs_get(struct devlink_port *port, u32 *max_io_eqs,
struct netlink_ext_ack *extack)
{
struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port);
int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
u16 vport_num = vport->vport;
struct mlx5_eswitch *esw;
void *query_ctx;
void *hca_caps;
u32 max_eqs;
int err;
esw = mlx5_devlink_eswitch_nocheck_get(port->devlink);
if (!MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) {
NL_SET_ERR_MSG_MOD(extack,
"Device doesn't support VHCA management");
return -EOPNOTSUPP;
}
query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
if (!query_ctx)
return -ENOMEM;
mutex_lock(&esw->state_lock);
err = mlx5_vport_get_other_func_cap(esw->dev, vport_num, query_ctx,
MLX5_CAP_GENERAL);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Failed getting HCA caps");
goto out;
}
hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
max_eqs = MLX5_GET(cmd_hca_cap, hca_caps, max_num_eqs);
if (max_eqs < MLX5_ESW_MAX_CTRL_EQS)
*max_io_eqs = 0;
else
*max_io_eqs = max_eqs - MLX5_ESW_MAX_CTRL_EQS;
out:
mutex_unlock(&esw->state_lock);
kfree(query_ctx);
return err;
}
int
mlx5_devlink_port_fn_max_io_eqs_set(struct devlink_port *port, u32 max_io_eqs,
struct netlink_ext_ack *extack)
{
struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port);
int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
u16 vport_num = vport->vport;
struct mlx5_eswitch *esw;
void *query_ctx;
void *hca_caps;
u16 max_eqs;
int err;
esw = mlx5_devlink_eswitch_nocheck_get(port->devlink);
if (!MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) {
NL_SET_ERR_MSG_MOD(extack,
"Device doesn't support VHCA management");
return -EOPNOTSUPP;
}
if (check_add_overflow(max_io_eqs, MLX5_ESW_MAX_CTRL_EQS, &max_eqs)) {
NL_SET_ERR_MSG_MOD(extack, "Supplied value out of range");
return -EINVAL;
}
query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
if (!query_ctx)
return -ENOMEM;
mutex_lock(&esw->state_lock);
err = mlx5_vport_get_other_func_cap(esw->dev, vport_num, query_ctx,
MLX5_CAP_GENERAL);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Failed getting HCA caps");
goto out;
}
hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
MLX5_SET(cmd_hca_cap, hca_caps, max_num_eqs, max_eqs);
err = mlx5_vport_set_other_func_cap(esw->dev, hca_caps, vport_num,
MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE);
if (err)
NL_SET_ERR_MSG_MOD(extack, "Failed setting HCA caps");
out:
mutex_unlock(&esw->state_lock);
kfree(query_ctx);
return err;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment