Commit f3196bb0 authored by Parav Pandit's avatar Parav Pandit Committed by Saeed Mahameed

net/mlx5: Introduce vhca state event notifier

vhca state events indicates change in the state of the vhca that may
occur due to a SF allocation, deallocation or enabling/disabling the
SF HCA.

Introduce vhca state event handler which will be used by SF devlink
port manager and SF hardware id allocator in subsequent patches
to act on the event.

This enables single entity to subscribe, query and rearm the event
for a function.
Signed-off-by: default avatarParav Pandit <parav@nvidia.com>
Reviewed-by: default avatarVu Pham <vuhuong@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
parent a556dded
...@@ -203,3 +203,12 @@ config MLX5_SW_STEERING ...@@ -203,3 +203,12 @@ config MLX5_SW_STEERING
default y default y
help help
Build support for software-managed steering in the NIC. Build support for software-managed steering in the NIC.
config MLX5_SF
bool "Mellanox Technologies subfunction device support using auxiliary device"
depends on MLX5_CORE && MLX5_CORE_EN
default n
help
Build support for subfuction device in the NIC. A Mellanox subfunction
device can support RDMA, netdevice and vdpa device.
It is similar to a SRIOV VF but it doesn't require SRIOV support.
...@@ -86,3 +86,7 @@ mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/dr_domain.o steering/dr_table.o ...@@ -86,3 +86,7 @@ mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/dr_domain.o steering/dr_table.o
steering/dr_ste_v0.o \ steering/dr_ste_v0.o \
steering/dr_cmd.o steering/dr_fw.o \ steering/dr_cmd.o steering/dr_fw.o \
steering/dr_action.o steering/fs_dr.o steering/dr_action.o steering/fs_dr.o
#
# SF device
#
mlx5_core-$(CONFIG_MLX5_SF) += sf/vhca_event.o
...@@ -464,6 +464,8 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, ...@@ -464,6 +464,8 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_ALLOC_MEMIC: case MLX5_CMD_OP_ALLOC_MEMIC:
case MLX5_CMD_OP_MODIFY_XRQ: case MLX5_CMD_OP_MODIFY_XRQ:
case MLX5_CMD_OP_RELEASE_XRQ_ERROR: case MLX5_CMD_OP_RELEASE_XRQ_ERROR:
case MLX5_CMD_OP_QUERY_VHCA_STATE:
case MLX5_CMD_OP_MODIFY_VHCA_STATE:
*status = MLX5_DRIVER_STATUS_ABORTED; *status = MLX5_DRIVER_STATUS_ABORTED;
*synd = MLX5_DRIVER_SYND; *synd = MLX5_DRIVER_SYND;
return -EIO; return -EIO;
...@@ -657,6 +659,8 @@ const char *mlx5_command_str(int command) ...@@ -657,6 +659,8 @@ const char *mlx5_command_str(int command)
MLX5_COMMAND_STR_CASE(DESTROY_UMEM); MLX5_COMMAND_STR_CASE(DESTROY_UMEM);
MLX5_COMMAND_STR_CASE(RELEASE_XRQ_ERROR); MLX5_COMMAND_STR_CASE(RELEASE_XRQ_ERROR);
MLX5_COMMAND_STR_CASE(MODIFY_XRQ); MLX5_COMMAND_STR_CASE(MODIFY_XRQ);
MLX5_COMMAND_STR_CASE(QUERY_VHCA_STATE);
MLX5_COMMAND_STR_CASE(MODIFY_VHCA_STATE);
default: return "unknown command opcode"; default: return "unknown command opcode";
} }
} }
......
...@@ -595,6 +595,9 @@ static void gather_async_events_mask(struct mlx5_core_dev *dev, u64 mask[4]) ...@@ -595,6 +595,9 @@ static void gather_async_events_mask(struct mlx5_core_dev *dev, u64 mask[4])
async_event_mask |= async_event_mask |=
(1ull << MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED); (1ull << MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED);
if (MLX5_CAP_GEN_MAX(dev, vhca_state))
async_event_mask |= (1ull << MLX5_EVENT_TYPE_VHCA_STATE_CHANGE);
mask[0] = async_event_mask; mask[0] = async_event_mask;
if (MLX5_CAP_GEN(dev, event_cap)) if (MLX5_CAP_GEN(dev, event_cap))
......
...@@ -110,6 +110,8 @@ static const char *eqe_type_str(u8 type) ...@@ -110,6 +110,8 @@ static const char *eqe_type_str(u8 type)
return "MLX5_EVENT_TYPE_CMD"; return "MLX5_EVENT_TYPE_CMD";
case MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED: case MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED:
return "MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED"; return "MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED";
case MLX5_EVENT_TYPE_VHCA_STATE_CHANGE:
return "MLX5_EVENT_TYPE_VHCA_STATE_CHANGE";
case MLX5_EVENT_TYPE_PAGE_REQUEST: case MLX5_EVENT_TYPE_PAGE_REQUEST:
return "MLX5_EVENT_TYPE_PAGE_REQUEST"; return "MLX5_EVENT_TYPE_PAGE_REQUEST";
case MLX5_EVENT_TYPE_PAGE_FAULT: case MLX5_EVENT_TYPE_PAGE_FAULT:
...@@ -403,3 +405,8 @@ int mlx5_notifier_call_chain(struct mlx5_events *events, unsigned int event, voi ...@@ -403,3 +405,8 @@ int mlx5_notifier_call_chain(struct mlx5_events *events, unsigned int event, voi
{ {
return atomic_notifier_call_chain(&events->nh, event, data); return atomic_notifier_call_chain(&events->nh, event, data);
} }
void mlx5_events_work_enqueue(struct mlx5_core_dev *dev, struct work_struct *work)
{
queue_work(dev->priv.events->wq, work);
}
...@@ -73,6 +73,7 @@ ...@@ -73,6 +73,7 @@
#include "ecpf.h" #include "ecpf.h"
#include "lib/hv_vhca.h" #include "lib/hv_vhca.h"
#include "diag/rsc_dump.h" #include "diag/rsc_dump.h"
#include "sf/vhca_event.h"
MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>"); MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
MODULE_DESCRIPTION("Mellanox 5th generation network adapters (ConnectX series) core driver"); MODULE_DESCRIPTION("Mellanox 5th generation network adapters (ConnectX series) core driver");
...@@ -567,6 +568,8 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx) ...@@ -567,6 +568,8 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
if (MLX5_CAP_GEN_MAX(dev, mkey_by_name)) if (MLX5_CAP_GEN_MAX(dev, mkey_by_name))
MLX5_SET(cmd_hca_cap, set_hca_cap, mkey_by_name, 1); MLX5_SET(cmd_hca_cap, set_hca_cap, mkey_by_name, 1);
mlx5_vhca_state_cap_handle(dev, set_hca_cap);
return set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE); return set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE);
} }
...@@ -884,6 +887,12 @@ static int mlx5_init_once(struct mlx5_core_dev *dev) ...@@ -884,6 +887,12 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
goto err_eswitch_cleanup; goto err_eswitch_cleanup;
} }
err = mlx5_vhca_event_init(dev);
if (err) {
mlx5_core_err(dev, "Failed to init vhca event notifier %d\n", err);
goto err_fpga_cleanup;
}
dev->dm = mlx5_dm_create(dev); dev->dm = mlx5_dm_create(dev);
if (IS_ERR(dev->dm)) if (IS_ERR(dev->dm))
mlx5_core_warn(dev, "Failed to init device memory%d\n", err); mlx5_core_warn(dev, "Failed to init device memory%d\n", err);
...@@ -894,6 +903,8 @@ static int mlx5_init_once(struct mlx5_core_dev *dev) ...@@ -894,6 +903,8 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
return 0; return 0;
err_fpga_cleanup:
mlx5_fpga_cleanup(dev);
err_eswitch_cleanup: err_eswitch_cleanup:
mlx5_eswitch_cleanup(dev->priv.eswitch); mlx5_eswitch_cleanup(dev->priv.eswitch);
err_sriov_cleanup: err_sriov_cleanup:
...@@ -925,6 +936,7 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev) ...@@ -925,6 +936,7 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
mlx5_hv_vhca_destroy(dev->hv_vhca); mlx5_hv_vhca_destroy(dev->hv_vhca);
mlx5_fw_tracer_destroy(dev->tracer); mlx5_fw_tracer_destroy(dev->tracer);
mlx5_dm_cleanup(dev); mlx5_dm_cleanup(dev);
mlx5_vhca_event_cleanup(dev);
mlx5_fpga_cleanup(dev); mlx5_fpga_cleanup(dev);
mlx5_eswitch_cleanup(dev->priv.eswitch); mlx5_eswitch_cleanup(dev->priv.eswitch);
mlx5_sriov_cleanup(dev); mlx5_sriov_cleanup(dev);
...@@ -1129,6 +1141,8 @@ static int mlx5_load(struct mlx5_core_dev *dev) ...@@ -1129,6 +1141,8 @@ static int mlx5_load(struct mlx5_core_dev *dev)
goto err_sriov; goto err_sriov;
} }
mlx5_vhca_event_start(dev);
err = mlx5_ec_init(dev); err = mlx5_ec_init(dev);
if (err) { if (err) {
mlx5_core_err(dev, "Failed to init embedded CPU\n"); mlx5_core_err(dev, "Failed to init embedded CPU\n");
...@@ -1146,6 +1160,7 @@ static int mlx5_load(struct mlx5_core_dev *dev) ...@@ -1146,6 +1160,7 @@ static int mlx5_load(struct mlx5_core_dev *dev)
err_sriov: err_sriov:
mlx5_ec_cleanup(dev); mlx5_ec_cleanup(dev);
err_ec: err_ec:
mlx5_vhca_event_stop(dev);
mlx5_cleanup_fs(dev); mlx5_cleanup_fs(dev);
err_fs: err_fs:
mlx5_accel_tls_cleanup(dev); mlx5_accel_tls_cleanup(dev);
...@@ -1173,6 +1188,7 @@ static void mlx5_unload(struct mlx5_core_dev *dev) ...@@ -1173,6 +1188,7 @@ static void mlx5_unload(struct mlx5_core_dev *dev)
{ {
mlx5_sriov_detach(dev); mlx5_sriov_detach(dev);
mlx5_ec_cleanup(dev); mlx5_ec_cleanup(dev);
mlx5_vhca_event_stop(dev);
mlx5_cleanup_fs(dev); mlx5_cleanup_fs(dev);
mlx5_accel_ipsec_cleanup(dev); mlx5_accel_ipsec_cleanup(dev);
mlx5_accel_tls_cleanup(dev); mlx5_accel_tls_cleanup(dev);
......
...@@ -259,4 +259,6 @@ void mlx5_set_nic_state(struct mlx5_core_dev *dev, u8 state); ...@@ -259,4 +259,6 @@ void mlx5_set_nic_state(struct mlx5_core_dev *dev, u8 state);
void mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup); void mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup);
int mlx5_load_one(struct mlx5_core_dev *dev, bool boot); int mlx5_load_one(struct mlx5_core_dev *dev, bool boot);
void mlx5_events_work_enqueue(struct mlx5_core_dev *dev, struct work_struct *work);
#endif /* __MLX5_CORE_H__ */ #endif /* __MLX5_CORE_H__ */
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2020 Mellanox Technologies Ltd */
#ifndef __MLX5_IFC_VHCA_EVENT_H__
#define __MLX5_IFC_VHCA_EVENT_H__
enum mlx5_ifc_vhca_state {
MLX5_VHCA_STATE_INVALID = 0x0,
MLX5_VHCA_STATE_ALLOCATED = 0x1,
MLX5_VHCA_STATE_ACTIVE = 0x2,
MLX5_VHCA_STATE_IN_USE = 0x3,
MLX5_VHCA_STATE_TEARDOWN_REQUEST = 0x4,
};
struct mlx5_ifc_vhca_state_context_bits {
u8 arm_change_event[0x1];
u8 reserved_at_1[0xb];
u8 vhca_state[0x4];
u8 reserved_at_10[0x10];
u8 sw_function_id[0x20];
u8 reserved_at_40[0x80];
};
struct mlx5_ifc_query_vhca_state_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
u8 syndrome[0x20];
u8 reserved_at_40[0x40];
struct mlx5_ifc_vhca_state_context_bits vhca_state_context;
};
struct mlx5_ifc_query_vhca_state_in_bits {
u8 opcode[0x10];
u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
u8 embedded_cpu_function[0x1];
u8 reserved_at_41[0xf];
u8 function_id[0x10];
u8 reserved_at_60[0x20];
};
struct mlx5_ifc_vhca_state_field_select_bits {
u8 reserved_at_0[0x1e];
u8 sw_function_id[0x1];
u8 arm_change_event[0x1];
};
struct mlx5_ifc_modify_vhca_state_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
u8 syndrome[0x20];
u8 reserved_at_40[0x40];
};
struct mlx5_ifc_modify_vhca_state_in_bits {
u8 opcode[0x10];
u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
u8 embedded_cpu_function[0x1];
u8 reserved_at_41[0xf];
u8 function_id[0x10];
struct mlx5_ifc_vhca_state_field_select_bits vhca_state_field_select;
struct mlx5_ifc_vhca_state_context_bits vhca_state_context;
};
#endif
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2020 Mellanox Technologies Ltd */
#ifndef __MLX5_SF_H__
#define __MLX5_SF_H__
#include <linux/mlx5/driver.h>
static inline u16 mlx5_sf_start_function_id(const struct mlx5_core_dev *dev)
{
return MLX5_CAP_GEN(dev, sf_base_id);
}
#ifdef CONFIG_MLX5_SF
static inline bool mlx5_sf_supported(const struct mlx5_core_dev *dev)
{
return MLX5_CAP_GEN(dev, sf);
}
static inline u16 mlx5_sf_max_functions(const struct mlx5_core_dev *dev)
{
if (!mlx5_sf_supported(dev))
return 0;
if (MLX5_CAP_GEN(dev, max_num_sf))
return MLX5_CAP_GEN(dev, max_num_sf);
else
return 1 << MLX5_CAP_GEN(dev, log_max_sf);
}
#else
static inline bool mlx5_sf_supported(const struct mlx5_core_dev *dev)
{
return false;
}
static inline u16 mlx5_sf_max_functions(const struct mlx5_core_dev *dev)
{
return 0;
}
#endif
#endif
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2020 Mellanox Technologies Ltd */
#include <linux/mlx5/driver.h>
#include "mlx5_ifc_vhca_event.h"
#include "mlx5_core.h"
#include "vhca_event.h"
#include "ecpf.h"
struct mlx5_vhca_state_notifier {
struct mlx5_core_dev *dev;
struct mlx5_nb nb;
struct blocking_notifier_head n_head;
};
struct mlx5_vhca_event_work {
struct work_struct work;
struct mlx5_vhca_state_notifier *notifier;
struct mlx5_vhca_state_event event;
};
int mlx5_cmd_query_vhca_state(struct mlx5_core_dev *dev, u16 function_id,
bool ecpu, u32 *out, u32 outlen)
{
u32 in[MLX5_ST_SZ_DW(query_vhca_state_in)] = {};
MLX5_SET(query_vhca_state_in, in, opcode, MLX5_CMD_OP_QUERY_VHCA_STATE);
MLX5_SET(query_vhca_state_in, in, function_id, function_id);
MLX5_SET(query_vhca_state_in, in, embedded_cpu_function, ecpu);
return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
}
static int mlx5_cmd_modify_vhca_state(struct mlx5_core_dev *dev, u16 function_id,
bool ecpu, u32 *in, u32 inlen)
{
u32 out[MLX5_ST_SZ_DW(modify_vhca_state_out)] = {};
MLX5_SET(modify_vhca_state_in, in, opcode, MLX5_CMD_OP_MODIFY_VHCA_STATE);
MLX5_SET(modify_vhca_state_in, in, function_id, function_id);
MLX5_SET(modify_vhca_state_in, in, embedded_cpu_function, ecpu);
return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
}
int mlx5_modify_vhca_sw_id(struct mlx5_core_dev *dev, u16 function_id, bool ecpu, u32 sw_fn_id)
{
u32 out[MLX5_ST_SZ_DW(modify_vhca_state_out)] = {};
u32 in[MLX5_ST_SZ_DW(modify_vhca_state_in)] = {};
MLX5_SET(modify_vhca_state_in, in, opcode, MLX5_CMD_OP_MODIFY_VHCA_STATE);
MLX5_SET(modify_vhca_state_in, in, function_id, function_id);
MLX5_SET(modify_vhca_state_in, in, embedded_cpu_function, ecpu);
MLX5_SET(modify_vhca_state_in, in, vhca_state_field_select.sw_function_id, 1);
MLX5_SET(modify_vhca_state_in, in, vhca_state_context.sw_function_id, sw_fn_id);
return mlx5_cmd_exec_inout(dev, modify_vhca_state, in, out);
}
int mlx5_vhca_event_arm(struct mlx5_core_dev *dev, u16 function_id, bool ecpu)
{
u32 in[MLX5_ST_SZ_DW(modify_vhca_state_in)] = {};
MLX5_SET(modify_vhca_state_in, in, vhca_state_context.arm_change_event, 1);
MLX5_SET(modify_vhca_state_in, in, vhca_state_field_select.arm_change_event, 1);
return mlx5_cmd_modify_vhca_state(dev, function_id, ecpu, in, sizeof(in));
}
static void
mlx5_vhca_event_notify(struct mlx5_core_dev *dev, struct mlx5_vhca_state_event *event)
{
u32 out[MLX5_ST_SZ_DW(query_vhca_state_out)] = {};
int err;
err = mlx5_cmd_query_vhca_state(dev, event->function_id, event->ecpu, out, sizeof(out));
if (err)
return;
event->sw_function_id = MLX5_GET(query_vhca_state_out, out,
vhca_state_context.sw_function_id);
event->new_vhca_state = MLX5_GET(query_vhca_state_out, out,
vhca_state_context.vhca_state);
mlx5_vhca_event_arm(dev, event->function_id, event->ecpu);
blocking_notifier_call_chain(&dev->priv.vhca_state_notifier->n_head, 0, event);
}
static void mlx5_vhca_state_work_handler(struct work_struct *_work)
{
struct mlx5_vhca_event_work *work = container_of(_work, struct mlx5_vhca_event_work, work);
struct mlx5_vhca_state_notifier *notifier = work->notifier;
struct mlx5_core_dev *dev = notifier->dev;
mlx5_vhca_event_notify(dev, &work->event);
}
static int
mlx5_vhca_state_change_notifier(struct notifier_block *nb, unsigned long type, void *data)
{
struct mlx5_vhca_state_notifier *notifier =
mlx5_nb_cof(nb, struct mlx5_vhca_state_notifier, nb);
struct mlx5_vhca_event_work *work;
struct mlx5_eqe *eqe = data;
work = kzalloc(sizeof(*work), GFP_ATOMIC);
if (!work)
return NOTIFY_DONE;
INIT_WORK(&work->work, &mlx5_vhca_state_work_handler);
work->notifier = notifier;
work->event.function_id = be16_to_cpu(eqe->data.vhca_state.function_id);
work->event.ecpu = be16_to_cpu(eqe->data.vhca_state.ec_function);
mlx5_events_work_enqueue(notifier->dev, &work->work);
return NOTIFY_OK;
}
void mlx5_vhca_state_cap_handle(struct mlx5_core_dev *dev, void *set_hca_cap)
{
if (!mlx5_vhca_event_supported(dev))
return;
MLX5_SET(cmd_hca_cap, set_hca_cap, vhca_state, 1);
MLX5_SET(cmd_hca_cap, set_hca_cap, event_on_vhca_state_allocated, 1);
MLX5_SET(cmd_hca_cap, set_hca_cap, event_on_vhca_state_active, 1);
MLX5_SET(cmd_hca_cap, set_hca_cap, event_on_vhca_state_in_use, 1);
MLX5_SET(cmd_hca_cap, set_hca_cap, event_on_vhca_state_teardown_request, 1);
}
int mlx5_vhca_event_init(struct mlx5_core_dev *dev)
{
struct mlx5_vhca_state_notifier *notifier;
if (!mlx5_vhca_event_supported(dev))
return 0;
notifier = kzalloc(sizeof(*notifier), GFP_KERNEL);
if (!notifier)
return -ENOMEM;
dev->priv.vhca_state_notifier = notifier;
notifier->dev = dev;
BLOCKING_INIT_NOTIFIER_HEAD(&notifier->n_head);
MLX5_NB_INIT(&notifier->nb, mlx5_vhca_state_change_notifier, VHCA_STATE_CHANGE);
return 0;
}
void mlx5_vhca_event_cleanup(struct mlx5_core_dev *dev)
{
if (!mlx5_vhca_event_supported(dev))
return;
kfree(dev->priv.vhca_state_notifier);
dev->priv.vhca_state_notifier = NULL;
}
void mlx5_vhca_event_start(struct mlx5_core_dev *dev)
{
struct mlx5_vhca_state_notifier *notifier;
if (!dev->priv.vhca_state_notifier)
return;
notifier = dev->priv.vhca_state_notifier;
mlx5_eq_notifier_register(dev, &notifier->nb);
}
void mlx5_vhca_event_stop(struct mlx5_core_dev *dev)
{
struct mlx5_vhca_state_notifier *notifier;
if (!dev->priv.vhca_state_notifier)
return;
notifier = dev->priv.vhca_state_notifier;
mlx5_eq_notifier_unregister(dev, &notifier->nb);
}
int mlx5_vhca_event_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb)
{
if (!dev->priv.vhca_state_notifier)
return -EOPNOTSUPP;
return blocking_notifier_chain_register(&dev->priv.vhca_state_notifier->n_head, nb);
}
void mlx5_vhca_event_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb)
{
blocking_notifier_chain_unregister(&dev->priv.vhca_state_notifier->n_head, nb);
}
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2020 Mellanox Technologies Ltd */
#ifndef __MLX5_VHCA_EVENT_H__
#define __MLX5_VHCA_EVENT_H__
#ifdef CONFIG_MLX5_SF
struct mlx5_vhca_state_event {
u16 function_id;
u16 sw_function_id;
u8 new_vhca_state;
bool ecpu;
};
static inline bool mlx5_vhca_event_supported(const struct mlx5_core_dev *dev)
{
return MLX5_CAP_GEN_MAX(dev, vhca_state);
}
void mlx5_vhca_state_cap_handle(struct mlx5_core_dev *dev, void *set_hca_cap);
int mlx5_vhca_event_init(struct mlx5_core_dev *dev);
void mlx5_vhca_event_cleanup(struct mlx5_core_dev *dev);
void mlx5_vhca_event_start(struct mlx5_core_dev *dev);
void mlx5_vhca_event_stop(struct mlx5_core_dev *dev);
int mlx5_vhca_event_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb);
void mlx5_vhca_event_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb);
int mlx5_modify_vhca_sw_id(struct mlx5_core_dev *dev, u16 function_id, bool ecpu, u32 sw_fn_id);
int mlx5_vhca_event_arm(struct mlx5_core_dev *dev, u16 function_id, bool ecpu);
int mlx5_cmd_query_vhca_state(struct mlx5_core_dev *dev, u16 function_id,
bool ecpu, u32 *out, u32 outlen);
#else
static inline void mlx5_vhca_state_cap_handle(struct mlx5_core_dev *dev, void *set_hca_cap)
{
}
static inline int mlx5_vhca_event_init(struct mlx5_core_dev *dev)
{
return 0;
}
static inline void mlx5_vhca_event_cleanup(struct mlx5_core_dev *dev)
{
}
static inline void mlx5_vhca_event_start(struct mlx5_core_dev *dev)
{
}
static inline void mlx5_vhca_event_stop(struct mlx5_core_dev *dev)
{
}
#endif
#endif
...@@ -507,6 +507,7 @@ struct mlx5_devcom; ...@@ -507,6 +507,7 @@ struct mlx5_devcom;
struct mlx5_fw_reset; struct mlx5_fw_reset;
struct mlx5_eq_table; struct mlx5_eq_table;
struct mlx5_irq_table; struct mlx5_irq_table;
struct mlx5_vhca_state_notifier;
struct mlx5_rate_limit { struct mlx5_rate_limit {
u32 rate; u32 rate;
...@@ -603,6 +604,9 @@ struct mlx5_priv { ...@@ -603,6 +604,9 @@ struct mlx5_priv {
struct mlx5_bfreg_data bfregs; struct mlx5_bfreg_data bfregs;
struct mlx5_uars_page *uar; struct mlx5_uars_page *uar;
#ifdef CONFIG_MLX5_SF
struct mlx5_vhca_state_notifier *vhca_state_notifier;
#endif
}; };
enum mlx5_device_state { enum mlx5_device_state {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment