Commit 1bc60524 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'mlx5-next' of https://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux

Leon Romanovsky says:

====================
This PR is collected from
https://lore.kernel.org/all/cover.1695296682.git.leon@kernel.org

This series from Patrisious extends mlx5 to support IPsec packet offload
in multiport devices (MPV, see [1] for more details).

These devices have single flow steering logic and two netdev interfaces,
which require extra logic to manage IPsec configurations as they performed
on netdevs.

[1] https://lore.kernel.org/linux-rdma/20180104152544.28919-1-leon@kernel.org/

* 'mlx5-next' of https://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux:
  net/mlx5: Handle IPsec steering upon master unbind/bind
  net/mlx5: Configure IPsec steering for ingress RoCEv2 MPV traffic
  net/mlx5: Configure IPsec steering for egress RoCEv2 MPV traffic
  net/mlx5: Add create alias flow table function to ipsec roce
  net/mlx5: Implement alias object allow and create functions
  net/mlx5: Add alias flow table bits
  net/mlx5: Store devcom pointer inside IPsec RoCE
  net/mlx5: Register mlx5e priv to devcom in MPV mode
  RDMA/mlx5: Send events from IB driver about device affiliation state
  net/mlx5: Introduce ifc bits for migration in a chunk mode

====================

Link: https://lore.kernel.org/r/20231002083832.19746-1-leon@kernel.orgSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 35715ac1 82f9378c
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include <linux/mlx5/vport.h> #include <linux/mlx5/vport.h>
#include <linux/mlx5/fs.h> #include <linux/mlx5/fs.h>
#include <linux/mlx5/eswitch.h> #include <linux/mlx5/eswitch.h>
#include <linux/mlx5/driver.h>
#include <linux/list.h> #include <linux/list.h>
#include <rdma/ib_smi.h> #include <rdma/ib_smi.h>
#include <rdma/ib_umem_odp.h> #include <rdma/ib_umem_odp.h>
...@@ -3175,6 +3176,13 @@ static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev, ...@@ -3175,6 +3176,13 @@ static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
lockdep_assert_held(&mlx5_ib_multiport_mutex); lockdep_assert_held(&mlx5_ib_multiport_mutex);
mlx5_core_mp_event_replay(ibdev->mdev,
MLX5_DRIVER_EVENT_AFFILIATION_REMOVED,
NULL);
mlx5_core_mp_event_replay(mpi->mdev,
MLX5_DRIVER_EVENT_AFFILIATION_REMOVED,
NULL);
mlx5_ib_cleanup_cong_debugfs(ibdev, port_num); mlx5_ib_cleanup_cong_debugfs(ibdev, port_num);
spin_lock(&port->mp.mpi_lock); spin_lock(&port->mp.mpi_lock);
...@@ -3226,6 +3234,7 @@ static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev, ...@@ -3226,6 +3234,7 @@ static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev,
struct mlx5_ib_multiport_info *mpi) struct mlx5_ib_multiport_info *mpi)
{ {
u32 port_num = mlx5_core_native_port_num(mpi->mdev) - 1; u32 port_num = mlx5_core_native_port_num(mpi->mdev) - 1;
u64 key;
int err; int err;
lockdep_assert_held(&mlx5_ib_multiport_mutex); lockdep_assert_held(&mlx5_ib_multiport_mutex);
...@@ -3254,6 +3263,14 @@ static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev, ...@@ -3254,6 +3263,14 @@ static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev,
mlx5_ib_init_cong_debugfs(ibdev, port_num); mlx5_ib_init_cong_debugfs(ibdev, port_num);
key = ibdev->ib_dev.index;
mlx5_core_mp_event_replay(mpi->mdev,
MLX5_DRIVER_EVENT_AFFILIATION_DONE,
&key);
mlx5_core_mp_event_replay(ibdev->mdev,
MLX5_DRIVER_EVENT_AFFILIATION_DONE,
&key);
return true; return true;
unbind: unbind:
......
...@@ -525,6 +525,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, ...@@ -525,6 +525,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_SAVE_VHCA_STATE: case MLX5_CMD_OP_SAVE_VHCA_STATE:
case MLX5_CMD_OP_LOAD_VHCA_STATE: case MLX5_CMD_OP_LOAD_VHCA_STATE:
case MLX5_CMD_OP_SYNC_CRYPTO: case MLX5_CMD_OP_SYNC_CRYPTO:
case MLX5_CMD_OP_ALLOW_OTHER_VHCA_ACCESS:
*status = MLX5_DRIVER_STATUS_ABORTED; *status = MLX5_DRIVER_STATUS_ABORTED;
*synd = MLX5_DRIVER_SYND; *synd = MLX5_DRIVER_SYND;
return -ENOLINK; return -ENOLINK;
...@@ -728,6 +729,7 @@ const char *mlx5_command_str(int command) ...@@ -728,6 +729,7 @@ const char *mlx5_command_str(int command)
MLX5_COMMAND_STR_CASE(SAVE_VHCA_STATE); MLX5_COMMAND_STR_CASE(SAVE_VHCA_STATE);
MLX5_COMMAND_STR_CASE(LOAD_VHCA_STATE); MLX5_COMMAND_STR_CASE(LOAD_VHCA_STATE);
MLX5_COMMAND_STR_CASE(SYNC_CRYPTO); MLX5_COMMAND_STR_CASE(SYNC_CRYPTO);
MLX5_COMMAND_STR_CASE(ALLOW_OTHER_VHCA_ACCESS);
default: return "unknown command opcode"; default: return "unknown command opcode";
} }
} }
...@@ -2090,6 +2092,74 @@ int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size, ...@@ -2090,6 +2092,74 @@ int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
} }
EXPORT_SYMBOL(mlx5_cmd_exec_cb); EXPORT_SYMBOL(mlx5_cmd_exec_cb);
int mlx5_cmd_allow_other_vhca_access(struct mlx5_core_dev *dev,
struct mlx5_cmd_allow_other_vhca_access_attr *attr)
{
u32 out[MLX5_ST_SZ_DW(allow_other_vhca_access_out)] = {};
u32 in[MLX5_ST_SZ_DW(allow_other_vhca_access_in)] = {};
void *key;
MLX5_SET(allow_other_vhca_access_in,
in, opcode, MLX5_CMD_OP_ALLOW_OTHER_VHCA_ACCESS);
MLX5_SET(allow_other_vhca_access_in,
in, object_type_to_be_accessed, attr->obj_type);
MLX5_SET(allow_other_vhca_access_in,
in, object_id_to_be_accessed, attr->obj_id);
key = MLX5_ADDR_OF(allow_other_vhca_access_in, in, access_key);
memcpy(key, attr->access_key, sizeof(attr->access_key));
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
int mlx5_cmd_alias_obj_create(struct mlx5_core_dev *dev,
struct mlx5_cmd_alias_obj_create_attr *alias_attr,
u32 *obj_id)
{
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
u32 in[MLX5_ST_SZ_DW(create_alias_obj_in)] = {};
void *param;
void *attr;
void *key;
int ret;
attr = MLX5_ADDR_OF(create_alias_obj_in, in, hdr);
MLX5_SET(general_obj_in_cmd_hdr,
attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
MLX5_SET(general_obj_in_cmd_hdr,
attr, obj_type, alias_attr->obj_type);
param = MLX5_ADDR_OF(general_obj_in_cmd_hdr, in, op_param);
MLX5_SET(general_obj_create_param, param, alias_object, 1);
attr = MLX5_ADDR_OF(create_alias_obj_in, in, alias_ctx);
MLX5_SET(alias_context, attr, vhca_id_to_be_accessed, alias_attr->vhca_id);
MLX5_SET(alias_context, attr, object_id_to_be_accessed, alias_attr->obj_id);
key = MLX5_ADDR_OF(alias_context, attr, access_key);
memcpy(key, alias_attr->access_key, sizeof(alias_attr->access_key));
ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (ret)
return ret;
*obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
return 0;
}
int mlx5_cmd_alias_obj_destroy(struct mlx5_core_dev *dev, u32 obj_id,
u16 obj_type)
{
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, obj_type);
MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, obj_id);
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
static void destroy_msg_cache(struct mlx5_core_dev *dev) static void destroy_msg_cache(struct mlx5_core_dev *dev)
{ {
struct cmd_msg_cache *ch; struct cmd_msg_cache *ch;
......
...@@ -168,6 +168,13 @@ struct page_pool; ...@@ -168,6 +168,13 @@ struct page_pool;
#define mlx5e_state_dereference(priv, p) \ #define mlx5e_state_dereference(priv, p) \
rcu_dereference_protected((p), lockdep_is_held(&(priv)->state_lock)) rcu_dereference_protected((p), lockdep_is_held(&(priv)->state_lock))
enum mlx5e_devcom_events {
MPV_DEVCOM_MASTER_UP,
MPV_DEVCOM_MASTER_DOWN,
MPV_DEVCOM_IPSEC_MASTER_UP,
MPV_DEVCOM_IPSEC_MASTER_DOWN,
};
static inline u8 mlx5e_get_num_lag_ports(struct mlx5_core_dev *mdev) static inline u8 mlx5e_get_num_lag_ports(struct mlx5_core_dev *mdev)
{ {
if (mlx5_lag_is_lacp_owner(mdev)) if (mlx5_lag_is_lacp_owner(mdev))
...@@ -936,6 +943,7 @@ struct mlx5e_priv { ...@@ -936,6 +943,7 @@ struct mlx5e_priv {
struct mlx5e_htb *htb; struct mlx5e_htb *htb;
struct mlx5e_mqprio_rl *mqprio_rl; struct mlx5e_mqprio_rl *mqprio_rl;
struct dentry *dfs_root; struct dentry *dfs_root;
struct mlx5_devcom_comp_dev *devcom;
}; };
struct mlx5e_dev { struct mlx5e_dev {
......
...@@ -850,6 +850,7 @@ void mlx5e_ipsec_init(struct mlx5e_priv *priv) ...@@ -850,6 +850,7 @@ void mlx5e_ipsec_init(struct mlx5e_priv *priv)
xa_init_flags(&ipsec->sadb, XA_FLAGS_ALLOC); xa_init_flags(&ipsec->sadb, XA_FLAGS_ALLOC);
ipsec->mdev = priv->mdev; ipsec->mdev = priv->mdev;
init_completion(&ipsec->comp);
ipsec->wq = alloc_workqueue("mlx5e_ipsec: %s", WQ_UNBOUND, 0, ipsec->wq = alloc_workqueue("mlx5e_ipsec: %s", WQ_UNBOUND, 0,
priv->netdev->name); priv->netdev->name);
if (!ipsec->wq) if (!ipsec->wq)
...@@ -870,7 +871,7 @@ void mlx5e_ipsec_init(struct mlx5e_priv *priv) ...@@ -870,7 +871,7 @@ void mlx5e_ipsec_init(struct mlx5e_priv *priv)
} }
ipsec->is_uplink_rep = mlx5e_is_uplink_rep(priv); ipsec->is_uplink_rep = mlx5e_is_uplink_rep(priv);
ret = mlx5e_accel_ipsec_fs_init(ipsec); ret = mlx5e_accel_ipsec_fs_init(ipsec, &priv->devcom);
if (ret) if (ret)
goto err_fs_init; goto err_fs_init;
......
...@@ -38,6 +38,7 @@ ...@@ -38,6 +38,7 @@
#include <net/xfrm.h> #include <net/xfrm.h>
#include <linux/idr.h> #include <linux/idr.h>
#include "lib/aso.h" #include "lib/aso.h"
#include "lib/devcom.h"
#define MLX5E_IPSEC_SADB_RX_BITS 10 #define MLX5E_IPSEC_SADB_RX_BITS 10
#define MLX5E_IPSEC_ESN_SCOPE_MID 0x80000000L #define MLX5E_IPSEC_ESN_SCOPE_MID 0x80000000L
...@@ -221,12 +222,20 @@ struct mlx5e_ipsec_tx_create_attr { ...@@ -221,12 +222,20 @@ struct mlx5e_ipsec_tx_create_attr {
enum mlx5_flow_namespace_type chains_ns; enum mlx5_flow_namespace_type chains_ns;
}; };
struct mlx5e_ipsec_mpv_work {
int event;
struct work_struct work;
struct mlx5e_priv *slave_priv;
struct mlx5e_priv *master_priv;
};
struct mlx5e_ipsec { struct mlx5e_ipsec {
struct mlx5_core_dev *mdev; struct mlx5_core_dev *mdev;
struct xarray sadb; struct xarray sadb;
struct mlx5e_ipsec_sw_stats sw_stats; struct mlx5e_ipsec_sw_stats sw_stats;
struct mlx5e_ipsec_hw_stats hw_stats; struct mlx5e_ipsec_hw_stats hw_stats;
struct workqueue_struct *wq; struct workqueue_struct *wq;
struct completion comp;
struct mlx5e_flow_steering *fs; struct mlx5e_flow_steering *fs;
struct mlx5e_ipsec_rx *rx_ipv4; struct mlx5e_ipsec_rx *rx_ipv4;
struct mlx5e_ipsec_rx *rx_ipv6; struct mlx5e_ipsec_rx *rx_ipv6;
...@@ -238,6 +247,7 @@ struct mlx5e_ipsec { ...@@ -238,6 +247,7 @@ struct mlx5e_ipsec {
struct notifier_block netevent_nb; struct notifier_block netevent_nb;
struct mlx5_ipsec_fs *roce; struct mlx5_ipsec_fs *roce;
u8 is_uplink_rep: 1; u8 is_uplink_rep: 1;
struct mlx5e_ipsec_mpv_work mpv_work;
}; };
struct mlx5e_ipsec_esn_state { struct mlx5e_ipsec_esn_state {
...@@ -302,7 +312,7 @@ void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv); ...@@ -302,7 +312,7 @@ void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv);
void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv); void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv);
void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec); void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec);
int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec); int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec, struct mlx5_devcom_comp_dev **devcom);
int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry); int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry);
void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry); void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry);
int mlx5e_accel_ipsec_fs_add_pol(struct mlx5e_ipsec_pol_entry *pol_entry); int mlx5e_accel_ipsec_fs_add_pol(struct mlx5e_ipsec_pol_entry *pol_entry);
...@@ -328,6 +338,10 @@ void mlx5e_accel_ipsec_fs_read_stats(struct mlx5e_priv *priv, ...@@ -328,6 +338,10 @@ void mlx5e_accel_ipsec_fs_read_stats(struct mlx5e_priv *priv,
void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry, void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
struct mlx5_accel_esp_xfrm_attrs *attrs); struct mlx5_accel_esp_xfrm_attrs *attrs);
void mlx5e_ipsec_handle_mpv_event(int event, struct mlx5e_priv *slave_priv,
struct mlx5e_priv *master_priv);
void mlx5e_ipsec_send_event(struct mlx5e_priv *priv, int event);
static inline struct mlx5_core_dev * static inline struct mlx5_core_dev *
mlx5e_ipsec_sa2dev(struct mlx5e_ipsec_sa_entry *sa_entry) mlx5e_ipsec_sa2dev(struct mlx5e_ipsec_sa_entry *sa_entry)
{ {
...@@ -363,6 +377,15 @@ static inline u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev) ...@@ -363,6 +377,15 @@ static inline u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
{ {
return 0; return 0;
} }
static inline void mlx5e_ipsec_handle_mpv_event(int event, struct mlx5e_priv *slave_priv,
struct mlx5e_priv *master_priv)
{
}
static inline void mlx5e_ipsec_send_event(struct mlx5e_priv *priv, int event)
{
}
#endif #endif
#endif /* __MLX5E_IPSEC_H__ */ #endif /* __MLX5E_IPSEC_H__ */
...@@ -229,6 +229,83 @@ static int ipsec_miss_create(struct mlx5_core_dev *mdev, ...@@ -229,6 +229,83 @@ static int ipsec_miss_create(struct mlx5_core_dev *mdev,
return err; return err;
} }
static void handle_ipsec_rx_bringup(struct mlx5e_ipsec *ipsec, u32 family)
{
struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, XFRM_DEV_OFFLOAD_PACKET);
struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(ipsec->fs, false);
struct mlx5_flow_destination old_dest, new_dest;
old_dest = mlx5_ttc_get_default_dest(mlx5e_fs_get_ttc(ipsec->fs, false),
family2tt(family));
mlx5_ipsec_fs_roce_rx_create(ipsec->mdev, ipsec->roce, ns, &old_dest, family,
MLX5E_ACCEL_FS_ESP_FT_ROCE_LEVEL, MLX5E_NIC_PRIO);
new_dest.ft = mlx5_ipsec_fs_roce_ft_get(ipsec->roce, family);
new_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
mlx5_modify_rule_destination(rx->status.rule, &new_dest, &old_dest);
mlx5_modify_rule_destination(rx->sa.rule, &new_dest, &old_dest);
}
static void handle_ipsec_rx_cleanup(struct mlx5e_ipsec *ipsec, u32 family)
{
struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, XFRM_DEV_OFFLOAD_PACKET);
struct mlx5_flow_destination old_dest, new_dest;
old_dest.ft = mlx5_ipsec_fs_roce_ft_get(ipsec->roce, family);
old_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
new_dest = mlx5_ttc_get_default_dest(mlx5e_fs_get_ttc(ipsec->fs, false),
family2tt(family));
mlx5_modify_rule_destination(rx->sa.rule, &new_dest, &old_dest);
mlx5_modify_rule_destination(rx->status.rule, &new_dest, &old_dest);
mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family, ipsec->mdev);
}
static void ipsec_mpv_work_handler(struct work_struct *_work)
{
struct mlx5e_ipsec_mpv_work *work = container_of(_work, struct mlx5e_ipsec_mpv_work, work);
struct mlx5e_ipsec *ipsec = work->slave_priv->ipsec;
switch (work->event) {
case MPV_DEVCOM_IPSEC_MASTER_UP:
mutex_lock(&ipsec->tx->ft.mutex);
if (ipsec->tx->ft.refcnt)
mlx5_ipsec_fs_roce_tx_create(ipsec->mdev, ipsec->roce, ipsec->tx->ft.pol,
true);
mutex_unlock(&ipsec->tx->ft.mutex);
mutex_lock(&ipsec->rx_ipv4->ft.mutex);
if (ipsec->rx_ipv4->ft.refcnt)
handle_ipsec_rx_bringup(ipsec, AF_INET);
mutex_unlock(&ipsec->rx_ipv4->ft.mutex);
mutex_lock(&ipsec->rx_ipv6->ft.mutex);
if (ipsec->rx_ipv6->ft.refcnt)
handle_ipsec_rx_bringup(ipsec, AF_INET6);
mutex_unlock(&ipsec->rx_ipv6->ft.mutex);
break;
case MPV_DEVCOM_IPSEC_MASTER_DOWN:
mutex_lock(&ipsec->tx->ft.mutex);
if (ipsec->tx->ft.refcnt)
mlx5_ipsec_fs_roce_tx_destroy(ipsec->roce, ipsec->mdev);
mutex_unlock(&ipsec->tx->ft.mutex);
mutex_lock(&ipsec->rx_ipv4->ft.mutex);
if (ipsec->rx_ipv4->ft.refcnt)
handle_ipsec_rx_cleanup(ipsec, AF_INET);
mutex_unlock(&ipsec->rx_ipv4->ft.mutex);
mutex_lock(&ipsec->rx_ipv6->ft.mutex);
if (ipsec->rx_ipv6->ft.refcnt)
handle_ipsec_rx_cleanup(ipsec, AF_INET6);
mutex_unlock(&ipsec->rx_ipv6->ft.mutex);
break;
}
complete(&work->master_priv->ipsec->comp);
}
static void ipsec_rx_ft_disconnect(struct mlx5e_ipsec *ipsec, u32 family) static void ipsec_rx_ft_disconnect(struct mlx5e_ipsec *ipsec, u32 family)
{ {
struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(ipsec->fs, false); struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(ipsec->fs, false);
...@@ -264,7 +341,7 @@ static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec, ...@@ -264,7 +341,7 @@ static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
} }
mlx5_destroy_flow_table(rx->ft.status); mlx5_destroy_flow_table(rx->ft.status);
mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family); mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family, mdev);
} }
static void ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec, static void ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
...@@ -422,7 +499,7 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec, ...@@ -422,7 +499,7 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
err_add: err_add:
mlx5_destroy_flow_table(rx->ft.status); mlx5_destroy_flow_table(rx->ft.status);
err_fs_ft_status: err_fs_ft_status:
mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family); mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family, mdev);
return err; return err;
} }
...@@ -562,7 +639,7 @@ static int ipsec_counter_rule_tx(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_ ...@@ -562,7 +639,7 @@ static int ipsec_counter_rule_tx(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_
static void tx_destroy(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx, static void tx_destroy(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx,
struct mlx5_ipsec_fs *roce) struct mlx5_ipsec_fs *roce)
{ {
mlx5_ipsec_fs_roce_tx_destroy(roce); mlx5_ipsec_fs_roce_tx_destroy(roce, ipsec->mdev);
if (tx->chains) { if (tx->chains) {
ipsec_chains_destroy(tx->chains); ipsec_chains_destroy(tx->chains);
} else { } else {
...@@ -665,7 +742,7 @@ static int tx_create(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx, ...@@ -665,7 +742,7 @@ static int tx_create(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx,
} }
connect_roce: connect_roce:
err = mlx5_ipsec_fs_roce_tx_create(mdev, roce, tx->ft.pol); err = mlx5_ipsec_fs_roce_tx_create(mdev, roce, tx->ft.pol, false);
if (err) if (err)
goto err_roce; goto err_roce;
return 0; return 0;
...@@ -1888,7 +1965,8 @@ void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec) ...@@ -1888,7 +1965,8 @@ void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec)
} }
} }
int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec) int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec,
struct mlx5_devcom_comp_dev **devcom)
{ {
struct mlx5_core_dev *mdev = ipsec->mdev; struct mlx5_core_dev *mdev = ipsec->mdev;
struct mlx5_flow_namespace *ns, *ns_esw; struct mlx5_flow_namespace *ns, *ns_esw;
...@@ -1940,7 +2018,9 @@ int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec) ...@@ -1940,7 +2018,9 @@ int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec)
ipsec->tx_esw->ns = ns_esw; ipsec->tx_esw->ns = ns_esw;
xa_init_flags(&ipsec->rx_esw->ipsec_obj_id_map, XA_FLAGS_ALLOC1); xa_init_flags(&ipsec->rx_esw->ipsec_obj_id_map, XA_FLAGS_ALLOC1);
} else if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_ROCE) { } else if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_ROCE) {
ipsec->roce = mlx5_ipsec_fs_roce_init(mdev); ipsec->roce = mlx5_ipsec_fs_roce_init(mdev, devcom);
} else {
mlx5_core_warn(mdev, "IPsec was initialized without RoCE support\n");
} }
return 0; return 0;
...@@ -1987,3 +2067,33 @@ bool mlx5e_ipsec_fs_tunnel_enabled(struct mlx5e_ipsec_sa_entry *sa_entry) ...@@ -1987,3 +2067,33 @@ bool mlx5e_ipsec_fs_tunnel_enabled(struct mlx5e_ipsec_sa_entry *sa_entry)
return rx->allow_tunnel_mode; return rx->allow_tunnel_mode;
} }
void mlx5e_ipsec_handle_mpv_event(int event, struct mlx5e_priv *slave_priv,
struct mlx5e_priv *master_priv)
{
struct mlx5e_ipsec_mpv_work *work;
reinit_completion(&master_priv->ipsec->comp);
if (!slave_priv->ipsec) {
complete(&master_priv->ipsec->comp);
return;
}
work = &slave_priv->ipsec->mpv_work;
INIT_WORK(&work->work, ipsec_mpv_work_handler);
work->event = event;
work->slave_priv = slave_priv;
work->master_priv = master_priv;
queue_work(slave_priv->ipsec->wq, &work->work);
}
void mlx5e_ipsec_send_event(struct mlx5e_priv *priv, int event)
{
if (!priv->ipsec)
return; /* IPsec not supported */
mlx5_devcom_send_event(priv->devcom, event, event, priv);
wait_for_completion(&priv->ipsec->comp);
}
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include "en.h" #include "en.h"
#include "ipsec.h" #include "ipsec.h"
#include "lib/crypto.h" #include "lib/crypto.h"
#include "lib/ipsec_fs_roce.h"
enum { enum {
MLX5_IPSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET, MLX5_IPSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET,
...@@ -63,7 +64,7 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev) ...@@ -63,7 +64,7 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
caps |= MLX5_IPSEC_CAP_ESPINUDP; caps |= MLX5_IPSEC_CAP_ESPINUDP;
} }
if (mlx5_get_roce_state(mdev) && if (mlx5_get_roce_state(mdev) && mlx5_ipsec_fs_is_mpv_roce_supported(mdev) &&
MLX5_CAP_GEN_2(mdev, flow_table_type_2_type) & MLX5_FT_NIC_RX_2_NIC_RX_RDMA && MLX5_CAP_GEN_2(mdev, flow_table_type_2_type) & MLX5_FT_NIC_RX_2_NIC_RX_RDMA &&
MLX5_CAP_GEN_2(mdev, flow_table_type_2_type) & MLX5_FT_NIC_TX_RDMA_2_NIC_TX) MLX5_CAP_GEN_2(mdev, flow_table_type_2_type) & MLX5_FT_NIC_TX_RDMA_2_NIC_TX)
caps |= MLX5_IPSEC_CAP_ROCE; caps |= MLX5_IPSEC_CAP_ROCE;
......
...@@ -69,6 +69,7 @@ ...@@ -69,6 +69,7 @@
#include "en/htb.h" #include "en/htb.h"
#include "qos.h" #include "qos.h"
#include "en/trap.h" #include "en/trap.h"
#include "lib/devcom.h"
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev, u8 page_shift, bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev, u8 page_shift,
enum mlx5e_mpwrq_umr_mode umr_mode) enum mlx5e_mpwrq_umr_mode umr_mode)
...@@ -178,6 +179,61 @@ static void mlx5e_disable_async_events(struct mlx5e_priv *priv) ...@@ -178,6 +179,61 @@ static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
mlx5_notifier_unregister(priv->mdev, &priv->events_nb); mlx5_notifier_unregister(priv->mdev, &priv->events_nb);
} }
static int mlx5e_devcom_event_mpv(int event, void *my_data, void *event_data)
{
struct mlx5e_priv *slave_priv = my_data;
switch (event) {
case MPV_DEVCOM_MASTER_UP:
mlx5_devcom_comp_set_ready(slave_priv->devcom, true);
break;
case MPV_DEVCOM_MASTER_DOWN:
/* no need for comp set ready false since we unregister after
* and it hurts cleanup flow.
*/
break;
case MPV_DEVCOM_IPSEC_MASTER_UP:
case MPV_DEVCOM_IPSEC_MASTER_DOWN:
mlx5e_ipsec_handle_mpv_event(event, my_data, event_data);
break;
}
return 0;
}
static int mlx5e_devcom_init_mpv(struct mlx5e_priv *priv, u64 *data)
{
priv->devcom = mlx5_devcom_register_component(priv->mdev->priv.devc,
MLX5_DEVCOM_MPV,
*data,
mlx5e_devcom_event_mpv,
priv);
if (IS_ERR_OR_NULL(priv->devcom))
return -EOPNOTSUPP;
if (mlx5_core_is_mp_master(priv->mdev)) {
mlx5_devcom_send_event(priv->devcom, MPV_DEVCOM_MASTER_UP,
MPV_DEVCOM_MASTER_UP, priv);
mlx5e_ipsec_send_event(priv, MPV_DEVCOM_IPSEC_MASTER_UP);
}
return 0;
}
static void mlx5e_devcom_cleanup_mpv(struct mlx5e_priv *priv)
{
if (IS_ERR_OR_NULL(priv->devcom))
return;
if (mlx5_core_is_mp_master(priv->mdev)) {
mlx5_devcom_send_event(priv->devcom, MPV_DEVCOM_MASTER_DOWN,
MPV_DEVCOM_MASTER_DOWN, priv);
mlx5e_ipsec_send_event(priv, MPV_DEVCOM_IPSEC_MASTER_DOWN);
}
mlx5_devcom_unregister_component(priv->devcom);
}
static int blocking_event(struct notifier_block *nb, unsigned long event, void *data) static int blocking_event(struct notifier_block *nb, unsigned long event, void *data)
{ {
struct mlx5e_priv *priv = container_of(nb, struct mlx5e_priv, blocking_events_nb); struct mlx5e_priv *priv = container_of(nb, struct mlx5e_priv, blocking_events_nb);
...@@ -192,6 +248,13 @@ static int blocking_event(struct notifier_block *nb, unsigned long event, void * ...@@ -192,6 +248,13 @@ static int blocking_event(struct notifier_block *nb, unsigned long event, void *
return NOTIFY_BAD; return NOTIFY_BAD;
} }
break; break;
case MLX5_DRIVER_EVENT_AFFILIATION_DONE:
if (mlx5e_devcom_init_mpv(priv, data))
return NOTIFY_BAD;
break;
case MLX5_DRIVER_EVENT_AFFILIATION_REMOVED:
mlx5e_devcom_cleanup_mpv(priv);
break;
default: default:
return NOTIFY_DONE; return NOTIFY_DONE;
} }
......
...@@ -114,9 +114,9 @@ ...@@ -114,9 +114,9 @@
#define ETHTOOL_NUM_PRIOS 11 #define ETHTOOL_NUM_PRIOS 11
#define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS) #define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
/* Promiscuous, Vlan, mac, ttc, inner ttc, {UDP/ANY/aRFS/accel/{esp, esp_err}}, IPsec policy, /* Promiscuous, Vlan, mac, ttc, inner ttc, {UDP/ANY/aRFS/accel/{esp, esp_err}}, IPsec policy,
* IPsec RoCE policy * {IPsec RoCE MPV,Alias table},IPsec RoCE policy
*/ */
#define KERNEL_NIC_PRIO_NUM_LEVELS 9 #define KERNEL_NIC_PRIO_NUM_LEVELS 11
#define KERNEL_NIC_NUM_PRIOS 1 #define KERNEL_NIC_NUM_PRIOS 1
/* One more level for tc */ /* One more level for tc */
#define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1) #define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1)
...@@ -137,7 +137,7 @@ ...@@ -137,7 +137,7 @@
#define LAG_MIN_LEVEL (OFFLOADS_MIN_LEVEL + KERNEL_RX_MACSEC_MIN_LEVEL + 1) #define LAG_MIN_LEVEL (OFFLOADS_MIN_LEVEL + KERNEL_RX_MACSEC_MIN_LEVEL + 1)
#define KERNEL_TX_IPSEC_NUM_PRIOS 1 #define KERNEL_TX_IPSEC_NUM_PRIOS 1
#define KERNEL_TX_IPSEC_NUM_LEVELS 3 #define KERNEL_TX_IPSEC_NUM_LEVELS 4
#define KERNEL_TX_IPSEC_MIN_LEVEL (KERNEL_TX_IPSEC_NUM_LEVELS) #define KERNEL_TX_IPSEC_MIN_LEVEL (KERNEL_TX_IPSEC_NUM_LEVELS)
#define KERNEL_TX_MACSEC_NUM_PRIOS 1 #define KERNEL_TX_MACSEC_NUM_PRIOS 1
...@@ -231,7 +231,7 @@ enum { ...@@ -231,7 +231,7 @@ enum {
}; };
#define RDMA_RX_IPSEC_NUM_PRIOS 1 #define RDMA_RX_IPSEC_NUM_PRIOS 1
#define RDMA_RX_IPSEC_NUM_LEVELS 2 #define RDMA_RX_IPSEC_NUM_LEVELS 4
#define RDMA_RX_IPSEC_MIN_LEVEL (RDMA_RX_IPSEC_NUM_LEVELS) #define RDMA_RX_IPSEC_MIN_LEVEL (RDMA_RX_IPSEC_NUM_LEVELS)
#define RDMA_RX_BYPASS_MIN_LEVEL MLX5_BY_PASS_NUM_REGULAR_PRIOS #define RDMA_RX_BYPASS_MIN_LEVEL MLX5_BY_PASS_NUM_REGULAR_PRIOS
...@@ -288,7 +288,7 @@ enum { ...@@ -288,7 +288,7 @@ enum {
#define RDMA_TX_BYPASS_MIN_LEVEL MLX5_BY_PASS_NUM_PRIOS #define RDMA_TX_BYPASS_MIN_LEVEL MLX5_BY_PASS_NUM_PRIOS
#define RDMA_TX_COUNTERS_MIN_LEVEL (RDMA_TX_BYPASS_MIN_LEVEL + 1) #define RDMA_TX_COUNTERS_MIN_LEVEL (RDMA_TX_BYPASS_MIN_LEVEL + 1)
#define RDMA_TX_IPSEC_NUM_PRIOS 1 #define RDMA_TX_IPSEC_NUM_PRIOS 2
#define RDMA_TX_IPSEC_PRIO_NUM_LEVELS 1 #define RDMA_TX_IPSEC_PRIO_NUM_LEVELS 1
#define RDMA_TX_IPSEC_MIN_LEVEL (RDMA_TX_COUNTERS_MIN_LEVEL + RDMA_TX_IPSEC_NUM_PRIOS) #define RDMA_TX_IPSEC_MIN_LEVEL (RDMA_TX_COUNTERS_MIN_LEVEL + RDMA_TX_IPSEC_NUM_PRIOS)
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
enum mlx5_devcom_component { enum mlx5_devcom_component {
MLX5_DEVCOM_ESW_OFFLOADS, MLX5_DEVCOM_ESW_OFFLOADS,
MLX5_DEVCOM_MPV,
MLX5_DEVCOM_NUM_COMPONENTS, MLX5_DEVCOM_NUM_COMPONENTS,
}; };
......
...@@ -4,22 +4,28 @@ ...@@ -4,22 +4,28 @@
#ifndef __MLX5_LIB_IPSEC_H__ #ifndef __MLX5_LIB_IPSEC_H__
#define __MLX5_LIB_IPSEC_H__ #define __MLX5_LIB_IPSEC_H__
#include "lib/devcom.h"
struct mlx5_ipsec_fs; struct mlx5_ipsec_fs;
struct mlx5_flow_table * struct mlx5_flow_table *
mlx5_ipsec_fs_roce_ft_get(struct mlx5_ipsec_fs *ipsec_roce, u32 family); mlx5_ipsec_fs_roce_ft_get(struct mlx5_ipsec_fs *ipsec_roce, u32 family);
void mlx5_ipsec_fs_roce_rx_destroy(struct mlx5_ipsec_fs *ipsec_roce, void mlx5_ipsec_fs_roce_rx_destroy(struct mlx5_ipsec_fs *ipsec_roce,
u32 family); u32 family, struct mlx5_core_dev *mdev);
int mlx5_ipsec_fs_roce_rx_create(struct mlx5_core_dev *mdev, int mlx5_ipsec_fs_roce_rx_create(struct mlx5_core_dev *mdev,
struct mlx5_ipsec_fs *ipsec_roce, struct mlx5_ipsec_fs *ipsec_roce,
struct mlx5_flow_namespace *ns, struct mlx5_flow_namespace *ns,
struct mlx5_flow_destination *default_dst, struct mlx5_flow_destination *default_dst,
u32 family, u32 level, u32 prio); u32 family, u32 level, u32 prio);
void mlx5_ipsec_fs_roce_tx_destroy(struct mlx5_ipsec_fs *ipsec_roce); void mlx5_ipsec_fs_roce_tx_destroy(struct mlx5_ipsec_fs *ipsec_roce,
struct mlx5_core_dev *mdev);
int mlx5_ipsec_fs_roce_tx_create(struct mlx5_core_dev *mdev, int mlx5_ipsec_fs_roce_tx_create(struct mlx5_core_dev *mdev,
struct mlx5_ipsec_fs *ipsec_roce, struct mlx5_ipsec_fs *ipsec_roce,
struct mlx5_flow_table *pol_ft); struct mlx5_flow_table *pol_ft,
bool from_event);
void mlx5_ipsec_fs_roce_cleanup(struct mlx5_ipsec_fs *ipsec_roce); void mlx5_ipsec_fs_roce_cleanup(struct mlx5_ipsec_fs *ipsec_roce);
struct mlx5_ipsec_fs *mlx5_ipsec_fs_roce_init(struct mlx5_core_dev *mdev); struct mlx5_ipsec_fs *mlx5_ipsec_fs_roce_init(struct mlx5_core_dev *mdev,
struct mlx5_devcom_comp_dev **devcom);
bool mlx5_ipsec_fs_is_mpv_roce_supported(struct mlx5_core_dev *mdev);
#endif /* __MLX5_LIB_IPSEC_H__ */ #endif /* __MLX5_LIB_IPSEC_H__ */
...@@ -361,6 +361,12 @@ void mlx5_core_uplink_netdev_event_replay(struct mlx5_core_dev *dev) ...@@ -361,6 +361,12 @@ void mlx5_core_uplink_netdev_event_replay(struct mlx5_core_dev *dev)
} }
EXPORT_SYMBOL(mlx5_core_uplink_netdev_event_replay); EXPORT_SYMBOL(mlx5_core_uplink_netdev_event_replay);
void mlx5_core_mp_event_replay(struct mlx5_core_dev *dev, u32 event, void *data)
{
mlx5_blocking_notifier_call_chain(dev, event, data);
}
EXPORT_SYMBOL(mlx5_core_mp_event_replay);
int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type, int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type,
enum mlx5_cap_mode cap_mode) enum mlx5_cap_mode cap_mode)
{ {
......
...@@ -97,6 +97,22 @@ do { \ ...@@ -97,6 +97,22 @@ do { \
__func__, __LINE__, current->pid, \ __func__, __LINE__, current->pid, \
##__VA_ARGS__) ##__VA_ARGS__)
#define ACCESS_KEY_LEN 32
#define FT_ID_FT_TYPE_OFFSET 24
struct mlx5_cmd_allow_other_vhca_access_attr {
u16 obj_type;
u32 obj_id;
u8 access_key[ACCESS_KEY_LEN];
};
struct mlx5_cmd_alias_obj_create_attr {
u32 obj_id;
u16 vhca_id;
u16 obj_type;
u8 access_key[ACCESS_KEY_LEN];
};
static inline void mlx5_printk(struct mlx5_core_dev *dev, int level, const char *format, ...) static inline void mlx5_printk(struct mlx5_core_dev *dev, int level, const char *format, ...)
{ {
struct device *device = dev->device; struct device *device = dev->device;
...@@ -343,6 +359,12 @@ bool mlx5_eth_supported(struct mlx5_core_dev *dev); ...@@ -343,6 +359,12 @@ bool mlx5_eth_supported(struct mlx5_core_dev *dev);
bool mlx5_rdma_supported(struct mlx5_core_dev *dev); bool mlx5_rdma_supported(struct mlx5_core_dev *dev);
bool mlx5_vnet_supported(struct mlx5_core_dev *dev); bool mlx5_vnet_supported(struct mlx5_core_dev *dev);
bool mlx5_same_hw_devs(struct mlx5_core_dev *dev, struct mlx5_core_dev *peer_dev); bool mlx5_same_hw_devs(struct mlx5_core_dev *dev, struct mlx5_core_dev *peer_dev);
int mlx5_cmd_allow_other_vhca_access(struct mlx5_core_dev *dev,
struct mlx5_cmd_allow_other_vhca_access_attr *attr);
int mlx5_cmd_alias_obj_create(struct mlx5_core_dev *dev,
struct mlx5_cmd_alias_obj_create_attr *alias_attr,
u32 *obj_id);
int mlx5_cmd_alias_obj_destroy(struct mlx5_core_dev *dev, u32 obj_id, u16 obj_type);
static inline u16 mlx5_core_ec_vf_vport_base(const struct mlx5_core_dev *dev) static inline u16 mlx5_core_ec_vf_vport_base(const struct mlx5_core_dev *dev)
{ {
......
...@@ -367,6 +367,8 @@ enum mlx5_driver_event { ...@@ -367,6 +367,8 @@ enum mlx5_driver_event {
MLX5_DRIVER_EVENT_MACSEC_SA_ADDED, MLX5_DRIVER_EVENT_MACSEC_SA_ADDED,
MLX5_DRIVER_EVENT_MACSEC_SA_DELETED, MLX5_DRIVER_EVENT_MACSEC_SA_DELETED,
MLX5_DRIVER_EVENT_SF_PEER_DEVLINK, MLX5_DRIVER_EVENT_SF_PEER_DEVLINK,
MLX5_DRIVER_EVENT_AFFILIATION_DONE,
MLX5_DRIVER_EVENT_AFFILIATION_REMOVED,
}; };
enum { enum {
......
...@@ -1029,6 +1029,8 @@ bool mlx5_cmd_is_down(struct mlx5_core_dev *dev); ...@@ -1029,6 +1029,8 @@ bool mlx5_cmd_is_down(struct mlx5_core_dev *dev);
void mlx5_core_uplink_netdev_set(struct mlx5_core_dev *mdev, struct net_device *netdev); void mlx5_core_uplink_netdev_set(struct mlx5_core_dev *mdev, struct net_device *netdev);
void mlx5_core_uplink_netdev_event_replay(struct mlx5_core_dev *mdev); void mlx5_core_uplink_netdev_event_replay(struct mlx5_core_dev *mdev);
void mlx5_core_mp_event_replay(struct mlx5_core_dev *dev, u32 event, void *data);
void mlx5_health_cleanup(struct mlx5_core_dev *dev); void mlx5_health_cleanup(struct mlx5_core_dev *dev);
int mlx5_health_init(struct mlx5_core_dev *dev); int mlx5_health_init(struct mlx5_core_dev *dev);
void mlx5_start_health_poll(struct mlx5_core_dev *dev); void mlx5_start_health_poll(struct mlx5_core_dev *dev);
......
...@@ -312,6 +312,7 @@ enum { ...@@ -312,6 +312,7 @@ enum {
MLX5_CMD_OP_QUERY_VHCA_STATE = 0xb0d, MLX5_CMD_OP_QUERY_VHCA_STATE = 0xb0d,
MLX5_CMD_OP_MODIFY_VHCA_STATE = 0xb0e, MLX5_CMD_OP_MODIFY_VHCA_STATE = 0xb0e,
MLX5_CMD_OP_SYNC_CRYPTO = 0xb12, MLX5_CMD_OP_SYNC_CRYPTO = 0xb12,
MLX5_CMD_OP_ALLOW_OTHER_VHCA_ACCESS = 0xb16,
MLX5_CMD_OP_MAX MLX5_CMD_OP_MAX
}; };
...@@ -1934,6 +1935,14 @@ struct mlx5_ifc_cmd_hca_cap_bits { ...@@ -1934,6 +1935,14 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 match_definer_format_supported[0x40]; u8 match_definer_format_supported[0x40];
}; };
enum {
MLX5_CROSS_VHCA_OBJ_TO_OBJ_SUPPORTED_LOCAL_FLOW_TABLE_TO_REMOTE_FLOW_TABLE_MISS = 0x80000,
};
enum {
MLX5_ALLOWED_OBJ_FOR_OTHER_VHCA_ACCESS_FLOW_TABLE = 0x200,
};
struct mlx5_ifc_cmd_hca_cap_2_bits { struct mlx5_ifc_cmd_hca_cap_2_bits {
u8 reserved_at_0[0x80]; u8 reserved_at_0[0x80];
...@@ -1948,9 +1957,15 @@ struct mlx5_ifc_cmd_hca_cap_2_bits { ...@@ -1948,9 +1957,15 @@ struct mlx5_ifc_cmd_hca_cap_2_bits {
u8 reserved_at_c0[0x8]; u8 reserved_at_c0[0x8];
u8 migration_multi_load[0x1]; u8 migration_multi_load[0x1];
u8 migration_tracking_state[0x1]; u8 migration_tracking_state[0x1];
u8 reserved_at_ca[0x16]; u8 reserved_at_ca[0x6];
u8 migration_in_chunks[0x1];
u8 reserved_at_d1[0xf];
u8 cross_vhca_object_to_object_supported[0x20];
u8 allowed_object_for_other_vhca_access[0x40];
u8 reserved_at_e0[0xc0]; u8 reserved_at_140[0x60];
u8 flow_table_type_2_type[0x8]; u8 flow_table_type_2_type[0x8];
u8 reserved_at_1a8[0x3]; u8 reserved_at_1a8[0x3];
...@@ -6369,6 +6384,28 @@ struct mlx5_ifc_general_obj_out_cmd_hdr_bits { ...@@ -6369,6 +6384,28 @@ struct mlx5_ifc_general_obj_out_cmd_hdr_bits {
u8 reserved_at_60[0x20]; u8 reserved_at_60[0x20];
}; };
struct mlx5_ifc_allow_other_vhca_access_in_bits {
u8 opcode[0x10];
u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
u8 reserved_at_40[0x50];
u8 object_type_to_be_accessed[0x10];
u8 object_id_to_be_accessed[0x20];
u8 reserved_at_c0[0x40];
union {
u8 access_key_raw[0x100];
u8 access_key[8][0x20];
};
};
struct mlx5_ifc_allow_other_vhca_access_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
u8 syndrome[0x20];
u8 reserved_at_40[0x40];
};
struct mlx5_ifc_modify_header_arg_bits { struct mlx5_ifc_modify_header_arg_bits {
u8 reserved_at_0[0x80]; u8 reserved_at_0[0x80];
...@@ -6391,6 +6428,24 @@ struct mlx5_ifc_create_match_definer_out_bits { ...@@ -6391,6 +6428,24 @@ struct mlx5_ifc_create_match_definer_out_bits {
struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr; struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
}; };
struct mlx5_ifc_alias_context_bits {
u8 vhca_id_to_be_accessed[0x10];
u8 reserved_at_10[0xd];
u8 status[0x3];
u8 object_id_to_be_accessed[0x20];
u8 reserved_at_40[0x40];
union {
u8 access_key_raw[0x100];
u8 access_key[8][0x20];
};
u8 metadata[0x80];
};
struct mlx5_ifc_create_alias_obj_in_bits {
struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
struct mlx5_ifc_alias_context_bits alias_ctx;
};
enum { enum {
MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0, MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0,
MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1, MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1,
...@@ -11920,6 +11975,7 @@ enum { ...@@ -11920,6 +11975,7 @@ enum {
MLX5_GENERAL_OBJECT_TYPES_FLOW_METER_ASO = 0x24, MLX5_GENERAL_OBJECT_TYPES_FLOW_METER_ASO = 0x24,
MLX5_GENERAL_OBJECT_TYPES_MACSEC = 0x27, MLX5_GENERAL_OBJECT_TYPES_MACSEC = 0x27,
MLX5_GENERAL_OBJECT_TYPES_INT_KEK = 0x47, MLX5_GENERAL_OBJECT_TYPES_INT_KEK = 0x47,
MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS = 0xff15,
}; };
enum { enum {
...@@ -12395,7 +12451,8 @@ struct mlx5_ifc_query_vhca_migration_state_in_bits { ...@@ -12395,7 +12451,8 @@ struct mlx5_ifc_query_vhca_migration_state_in_bits {
u8 op_mod[0x10]; u8 op_mod[0x10];
u8 incremental[0x1]; u8 incremental[0x1];
u8 reserved_at_41[0xf]; u8 chunk[0x1];
u8 reserved_at_42[0xe];
u8 vhca_id[0x10]; u8 vhca_id[0x10];
u8 reserved_at_60[0x20]; u8 reserved_at_60[0x20];
...@@ -12411,7 +12468,11 @@ struct mlx5_ifc_query_vhca_migration_state_out_bits { ...@@ -12411,7 +12468,11 @@ struct mlx5_ifc_query_vhca_migration_state_out_bits {
u8 required_umem_size[0x20]; u8 required_umem_size[0x20];
u8 reserved_at_a0[0x160]; u8 reserved_at_a0[0x20];
u8 remaining_total_size[0x40];
u8 reserved_at_100[0x100];
}; };
struct mlx5_ifc_save_vhca_state_in_bits { struct mlx5_ifc_save_vhca_state_in_bits {
...@@ -12443,7 +12504,7 @@ struct mlx5_ifc_save_vhca_state_out_bits { ...@@ -12443,7 +12504,7 @@ struct mlx5_ifc_save_vhca_state_out_bits {
u8 actual_image_size[0x20]; u8 actual_image_size[0x20];
u8 reserved_at_60[0x20]; u8 next_required_umem_size[0x20];
}; };
struct mlx5_ifc_load_vhca_state_in_bits { struct mlx5_ifc_load_vhca_state_in_bits {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment