Commit 1de8fda4 authored by Steffen Klassert's avatar Steffen Klassert

Merge branch 'mlx5 IPsec packet offload support (Part II)'

Leon Romanovsky says:

============
This is second part with implementation of packet offload.
============
Signed-off-by: default avatarSteffen Klassert <steffen.klassert@secunet.com>
parents e8a292d6 37d244ad
......@@ -84,7 +84,8 @@ enum {
MLX5E_ARFS_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1,
#endif
#ifdef CONFIG_MLX5_EN_IPSEC
MLX5E_ACCEL_FS_ESP_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1,
MLX5E_ACCEL_FS_POL_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1,
MLX5E_ACCEL_FS_ESP_FT_LEVEL,
MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL,
#endif
};
......
......@@ -34,8 +34,6 @@
#ifndef __MLX5E_IPSEC_H__
#define __MLX5E_IPSEC_H__
#ifdef CONFIG_MLX5_EN_IPSEC
#include <linux/mlx5/device.h>
#include <net/xfrm.h>
#include <linux/idr.h>
......@@ -76,6 +74,10 @@ struct mlx5_accel_esp_xfrm_attrs {
u8 type : 2;
u8 family;
u32 replay_window;
u32 authsize;
u32 reqid;
u64 hard_packet_limit;
u64 soft_packet_limit;
};
enum mlx5_ipsec_cap {
......@@ -86,6 +88,17 @@ enum mlx5_ipsec_cap {
struct mlx5e_priv;
struct mlx5e_ipsec_hw_stats {
u64 ipsec_rx_pkts;
u64 ipsec_rx_bytes;
u64 ipsec_rx_drop_pkts;
u64 ipsec_rx_drop_bytes;
u64 ipsec_tx_pkts;
u64 ipsec_tx_bytes;
u64 ipsec_tx_drop_pkts;
u64 ipsec_tx_drop_bytes;
};
struct mlx5e_ipsec_sw_stats {
atomic64_t ipsec_rx_drop_sp_alloc;
atomic64_t ipsec_rx_drop_sadb_miss;
......@@ -99,23 +112,35 @@ struct mlx5e_ipsec_sw_stats {
struct mlx5e_ipsec_rx;
struct mlx5e_ipsec_tx;
struct mlx5e_ipsec_work {
struct work_struct work;
struct mlx5e_ipsec *ipsec;
u32 id;
};
struct mlx5e_ipsec_aso {
u8 ctx[MLX5_ST_SZ_BYTES(ipsec_aso)];
dma_addr_t dma_addr;
struct mlx5_aso *aso;
/* IPsec ASO caches data on every query call,
* so in nested calls, we can use this boolean to save
* recursive calls to mlx5e_ipsec_aso_query()
*/
u8 use_cache : 1;
};
struct mlx5e_ipsec {
struct mlx5_core_dev *mdev;
DECLARE_HASHTABLE(sadb_rx, MLX5E_IPSEC_SADB_RX_BITS);
spinlock_t sadb_rx_lock; /* Protects sadb_rx */
struct xarray sadb;
struct mlx5e_ipsec_sw_stats sw_stats;
struct mlx5e_ipsec_hw_stats hw_stats;
struct workqueue_struct *wq;
struct mlx5e_flow_steering *fs;
struct mlx5e_ipsec_rx *rx_ipv4;
struct mlx5e_ipsec_rx *rx_ipv6;
struct mlx5e_ipsec_tx *tx;
struct mlx5e_ipsec_aso *aso;
struct notifier_block nb;
};
struct mlx5e_ipsec_esn_state {
......@@ -127,6 +152,7 @@ struct mlx5e_ipsec_esn_state {
struct mlx5e_ipsec_rule {
struct mlx5_flow_handle *rule;
struct mlx5_modify_hdr *modify_hdr;
struct mlx5_pkt_reformat *pkt_reformat;
};
struct mlx5e_ipsec_modify_state_work {
......@@ -135,9 +161,7 @@ struct mlx5e_ipsec_modify_state_work {
};
struct mlx5e_ipsec_sa_entry {
struct hlist_node hlist; /* Item in SADB_RX hashtable */
struct mlx5e_ipsec_esn_state esn_state;
unsigned int handle; /* Handle in SADB_RX */
struct xfrm_state *x;
struct mlx5e_ipsec *ipsec;
struct mlx5_accel_esp_xfrm_attrs attrs;
......@@ -149,17 +173,43 @@ struct mlx5e_ipsec_sa_entry {
struct mlx5e_ipsec_modify_state_work modify_work;
};
struct mlx5_accel_pol_xfrm_attrs {
union {
__be32 a4;
__be32 a6[4];
} saddr;
union {
__be32 a4;
__be32 a6[4];
} daddr;
u8 family;
u8 action;
u8 type : 2;
u8 dir : 2;
u32 reqid;
};
struct mlx5e_ipsec_pol_entry {
struct xfrm_policy *x;
struct mlx5e_ipsec *ipsec;
struct mlx5e_ipsec_rule ipsec_rule;
struct mlx5_accel_pol_xfrm_attrs attrs;
};
#ifdef CONFIG_MLX5_EN_IPSEC
void mlx5e_ipsec_init(struct mlx5e_priv *priv);
void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv);
void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv);
struct xfrm_state *mlx5e_ipsec_sadb_rx_lookup(struct mlx5e_ipsec *dev,
unsigned int handle);
void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec);
int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec);
int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry);
void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry);
int mlx5e_accel_ipsec_fs_add_pol(struct mlx5e_ipsec_pol_entry *pol_entry);
void mlx5e_accel_ipsec_fs_del_pol(struct mlx5e_ipsec_pol_entry *pol_entry);
int mlx5_ipsec_create_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry);
void mlx5_ipsec_free_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry);
......@@ -172,11 +222,27 @@ void mlx5_accel_esp_modify_xfrm(struct mlx5e_ipsec_sa_entry *sa_entry,
int mlx5e_ipsec_aso_init(struct mlx5e_ipsec *ipsec);
void mlx5e_ipsec_aso_cleanup(struct mlx5e_ipsec *ipsec);
int mlx5e_ipsec_aso_query(struct mlx5e_ipsec_sa_entry *sa_entry,
struct mlx5_wqe_aso_ctrl_seg *data);
void mlx5e_ipsec_aso_update_curlft(struct mlx5e_ipsec_sa_entry *sa_entry,
u64 *packets);
void mlx5e_accel_ipsec_fs_read_stats(struct mlx5e_priv *priv,
void *ipsec_stats);
void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
struct mlx5_accel_esp_xfrm_attrs *attrs);
static inline struct mlx5_core_dev *
mlx5e_ipsec_sa2dev(struct mlx5e_ipsec_sa_entry *sa_entry)
{
return sa_entry->ipsec->mdev;
}
static inline struct mlx5_core_dev *
mlx5e_ipsec_pol2dev(struct mlx5e_ipsec_pol_entry *pol_entry)
{
return pol_entry->ipsec->mdev;
}
#else
static inline void mlx5e_ipsec_init(struct mlx5e_priv *priv)
{
......
......@@ -6,6 +6,10 @@
#include "ipsec.h"
#include "lib/mlx5.h"
enum {
MLX5_IPSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET,
};
u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
{
u32 caps = 0;
......@@ -83,6 +87,20 @@ static void mlx5e_ipsec_packet_setup(void *obj, u32 pdn,
MLX5_SET(ipsec_obj, obj, aso_return_reg, MLX5_IPSEC_ASO_REG_C_4_5);
if (attrs->dir == XFRM_DEV_OFFLOAD_OUT)
MLX5_SET(ipsec_aso, aso_ctx, mode, MLX5_IPSEC_ASO_INC_SN);
if (attrs->hard_packet_limit != XFRM_INF) {
MLX5_SET(ipsec_aso, aso_ctx, remove_flow_pkt_cnt,
lower_32_bits(attrs->hard_packet_limit));
MLX5_SET(ipsec_aso, aso_ctx, hard_lft_arm, 1);
MLX5_SET(ipsec_aso, aso_ctx, remove_flow_enable, 1);
}
if (attrs->soft_packet_limit != XFRM_INF) {
MLX5_SET(ipsec_aso, aso_ctx, remove_flow_soft_lft,
lower_32_bits(attrs->soft_packet_limit));
MLX5_SET(ipsec_aso, aso_ctx, soft_lft_arm, 1);
}
}
static int mlx5_create_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry)
......@@ -246,6 +264,113 @@ void mlx5_accel_esp_modify_xfrm(struct mlx5e_ipsec_sa_entry *sa_entry,
memcpy(&sa_entry->attrs, attrs, sizeof(sa_entry->attrs));
}
static void
mlx5e_ipsec_aso_update_esn(struct mlx5e_ipsec_sa_entry *sa_entry,
const struct mlx5_accel_esp_xfrm_attrs *attrs)
{
struct mlx5_wqe_aso_ctrl_seg data = {};
data.data_mask_mode = MLX5_ASO_DATA_MASK_MODE_BITWISE_64BIT << 6;
data.condition_1_0_operand = MLX5_ASO_ALWAYS_TRUE | MLX5_ASO_ALWAYS_TRUE
<< 4;
data.data_offset_condition_operand = MLX5_IPSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET;
data.bitwise_data = cpu_to_be64(BIT_ULL(54));
data.data_mask = data.bitwise_data;
mlx5e_ipsec_aso_query(sa_entry, &data);
}
static void mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry,
u32 mode_param)
{
struct mlx5_accel_esp_xfrm_attrs attrs = {};
if (mode_param < MLX5E_IPSEC_ESN_SCOPE_MID) {
sa_entry->esn_state.esn++;
sa_entry->esn_state.overlap = 0;
} else {
sa_entry->esn_state.overlap = 1;
}
mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &attrs);
mlx5_accel_esp_modify_xfrm(sa_entry, &attrs);
mlx5e_ipsec_aso_update_esn(sa_entry, &attrs);
}
static void mlx5e_ipsec_handle_event(struct work_struct *_work)
{
struct mlx5e_ipsec_work *work =
container_of(_work, struct mlx5e_ipsec_work, work);
struct mlx5_accel_esp_xfrm_attrs *attrs;
struct mlx5e_ipsec_sa_entry *sa_entry;
struct mlx5e_ipsec_aso *aso;
struct mlx5e_ipsec *ipsec;
int ret;
sa_entry = xa_load(&work->ipsec->sadb, work->id);
if (!sa_entry)
goto out;
ipsec = sa_entry->ipsec;
aso = ipsec->aso;
attrs = &sa_entry->attrs;
spin_lock(&sa_entry->x->lock);
ret = mlx5e_ipsec_aso_query(sa_entry, NULL);
if (ret)
goto unlock;
aso->use_cache = true;
if (attrs->esn_trigger &&
!MLX5_GET(ipsec_aso, aso->ctx, esn_event_arm)) {
u32 mode_param = MLX5_GET(ipsec_aso, aso->ctx, mode_parameter);
mlx5e_ipsec_update_esn_state(sa_entry, mode_param);
}
if (attrs->soft_packet_limit != XFRM_INF)
if (!MLX5_GET(ipsec_aso, aso->ctx, soft_lft_arm) ||
!MLX5_GET(ipsec_aso, aso->ctx, hard_lft_arm) ||
!MLX5_GET(ipsec_aso, aso->ctx, remove_flow_enable))
xfrm_state_check_expire(sa_entry->x);
aso->use_cache = false;
unlock:
spin_unlock(&sa_entry->x->lock);
out:
kfree(work);
}
static int mlx5e_ipsec_event(struct notifier_block *nb, unsigned long event,
void *data)
{
struct mlx5e_ipsec *ipsec = container_of(nb, struct mlx5e_ipsec, nb);
struct mlx5_eqe_obj_change *object;
struct mlx5e_ipsec_work *work;
struct mlx5_eqe *eqe = data;
u16 type;
if (event != MLX5_EVENT_TYPE_OBJECT_CHANGE)
return NOTIFY_DONE;
object = &eqe->data.obj_change;
type = be16_to_cpu(object->obj_type);
if (type != MLX5_GENERAL_OBJECT_TYPES_IPSEC)
return NOTIFY_DONE;
work = kmalloc(sizeof(*work), GFP_ATOMIC);
if (!work)
return NOTIFY_DONE;
INIT_WORK(&work->work, mlx5e_ipsec_handle_event);
work->ipsec = ipsec;
work->id = be32_to_cpu(object->obj_id);
queue_work(ipsec->wq, &work->work);
return NOTIFY_OK;
}
int mlx5e_ipsec_aso_init(struct mlx5e_ipsec *ipsec)
{
struct mlx5_core_dev *mdev = ipsec->mdev;
......@@ -273,6 +398,9 @@ int mlx5e_ipsec_aso_init(struct mlx5e_ipsec *ipsec)
goto err_aso_create;
}
ipsec->nb.notifier_call = mlx5e_ipsec_event;
mlx5_notifier_register(mdev, &ipsec->nb);
ipsec->aso = aso;
return 0;
......@@ -293,8 +421,76 @@ void mlx5e_ipsec_aso_cleanup(struct mlx5e_ipsec *ipsec)
aso = ipsec->aso;
pdev = mlx5_core_dma_dev(mdev);
mlx5_notifier_unregister(mdev, &ipsec->nb);
mlx5_aso_destroy(aso->aso);
dma_unmap_single(pdev, aso->dma_addr, sizeof(aso->ctx),
DMA_BIDIRECTIONAL);
kfree(aso);
}
static void mlx5e_ipsec_aso_copy(struct mlx5_wqe_aso_ctrl_seg *ctrl,
struct mlx5_wqe_aso_ctrl_seg *data)
{
if (!data)
return;
ctrl->data_mask_mode = data->data_mask_mode;
ctrl->condition_1_0_operand = data->condition_1_0_operand;
ctrl->condition_1_0_offset = data->condition_1_0_offset;
ctrl->data_offset_condition_operand = data->data_offset_condition_operand;
ctrl->condition_0_data = data->condition_0_data;
ctrl->condition_0_mask = data->condition_0_mask;
ctrl->condition_1_data = data->condition_1_data;
ctrl->condition_1_mask = data->condition_1_mask;
ctrl->bitwise_data = data->bitwise_data;
ctrl->data_mask = data->data_mask;
}
int mlx5e_ipsec_aso_query(struct mlx5e_ipsec_sa_entry *sa_entry,
struct mlx5_wqe_aso_ctrl_seg *data)
{
struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
struct mlx5e_ipsec_aso *aso = ipsec->aso;
struct mlx5_core_dev *mdev = ipsec->mdev;
struct mlx5_wqe_aso_ctrl_seg *ctrl;
struct mlx5e_hw_objs *res;
struct mlx5_aso_wqe *wqe;
u8 ds_cnt;
lockdep_assert_held(&sa_entry->x->lock);
if (aso->use_cache)
return 0;
res = &mdev->mlx5e_res.hw_objs;
memset(aso->ctx, 0, sizeof(aso->ctx));
wqe = mlx5_aso_get_wqe(aso->aso);
ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS);
mlx5_aso_build_wqe(aso->aso, ds_cnt, wqe, sa_entry->ipsec_obj_id,
MLX5_ACCESS_ASO_OPC_MOD_IPSEC);
ctrl = &wqe->aso_ctrl;
ctrl->va_l =
cpu_to_be32(lower_32_bits(aso->dma_addr) | ASO_CTRL_READ_EN);
ctrl->va_h = cpu_to_be32(upper_32_bits(aso->dma_addr));
ctrl->l_key = cpu_to_be32(res->mkey);
mlx5e_ipsec_aso_copy(ctrl, data);
mlx5_aso_post_wqe(aso->aso, false, &wqe->ctrl);
return mlx5_aso_poll_cq(aso->aso, false);
}
void mlx5e_ipsec_aso_update_curlft(struct mlx5e_ipsec_sa_entry *sa_entry,
u64 *packets)
{
struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
struct mlx5e_ipsec_aso *aso = ipsec->aso;
u64 hard_cnt;
hard_cnt = MLX5_GET(ipsec_aso, aso->ctx, remove_flow_pkt_cnt);
/* HW decresases the limit till it reaches zero to fire an avent.
* We need to fix the calculations, so the returned count is a total
* number of passed packets and not how much left.
*/
*packets = sa_entry->attrs.hard_packet_limit - hard_cnt;
}
......@@ -312,27 +312,31 @@ void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
struct mlx5_cqe64 *cqe)
{
u32 ipsec_meta_data = be32_to_cpu(cqe->ft_metadata);
struct mlx5e_priv *priv;
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_ipsec *ipsec = priv->ipsec;
struct mlx5e_ipsec_sa_entry *sa_entry;
struct xfrm_offload *xo;
struct xfrm_state *xs;
struct sec_path *sp;
u32 sa_handle;
sa_handle = MLX5_IPSEC_METADATA_HANDLE(ipsec_meta_data);
priv = netdev_priv(netdev);
sp = secpath_set(skb);
if (unlikely(!sp)) {
atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sp_alloc);
atomic64_inc(&ipsec->sw_stats.ipsec_rx_drop_sp_alloc);
return;
}
xs = mlx5e_ipsec_sadb_rx_lookup(priv->ipsec, sa_handle);
if (unlikely(!xs)) {
atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sadb_miss);
rcu_read_lock();
sa_entry = xa_load(&ipsec->sadb, sa_handle);
if (unlikely(!sa_entry)) {
rcu_read_unlock();
atomic64_inc(&ipsec->sw_stats.ipsec_rx_drop_sadb_miss);
return;
}
xfrm_state_hold(sa_entry->x);
rcu_read_unlock();
sp->xvec[sp->len++] = xs;
sp->xvec[sp->len++] = sa_entry->x;
sp->olen++;
xo = xfrm_offload(skb);
......@@ -349,6 +353,6 @@ void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
xo->status = CRYPTO_INVALID_PACKET_SYNTAX;
break;
default:
atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_syndrome);
atomic64_inc(&ipsec->sw_stats.ipsec_rx_drop_syndrome);
}
}
......@@ -37,6 +37,17 @@
#include "en.h"
#include "ipsec.h"
static const struct counter_desc mlx5e_ipsec_hw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_rx_pkts) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_rx_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_rx_drop_pkts) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_rx_drop_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_tx_pkts) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_tx_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_tx_drop_pkts) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_tx_drop_bytes) },
};
static const struct counter_desc mlx5e_ipsec_sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_rx_drop_sp_alloc) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_rx_drop_sadb_miss) },
......@@ -50,8 +61,48 @@ static const struct counter_desc mlx5e_ipsec_sw_stats_desc[] = {
#define MLX5E_READ_CTR_ATOMIC64(ptr, dsc, i) \
atomic64_read((atomic64_t *)((char *)(ptr) + (dsc)[i].offset))
#define NUM_IPSEC_HW_COUNTERS ARRAY_SIZE(mlx5e_ipsec_hw_stats_desc)
#define NUM_IPSEC_SW_COUNTERS ARRAY_SIZE(mlx5e_ipsec_sw_stats_desc)
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec_hw)
{
if (!priv->ipsec)
return 0;
return NUM_IPSEC_HW_COUNTERS;
}
static inline MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ipsec_hw) {}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ipsec_hw)
{
unsigned int i;
if (!priv->ipsec)
return idx;
for (i = 0; i < NUM_IPSEC_HW_COUNTERS; i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN,
mlx5e_ipsec_hw_stats_desc[i].format);
return idx;
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec_hw)
{
int i;
if (!priv->ipsec)
return idx;
mlx5e_accel_ipsec_fs_read_stats(priv, &priv->ipsec->hw_stats);
for (i = 0; i < NUM_IPSEC_HW_COUNTERS; i++)
data[idx++] = MLX5E_READ_CTR_ATOMIC64(&priv->ipsec->hw_stats,
mlx5e_ipsec_hw_stats_desc, i);
return idx;
}
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec_sw)
{
return priv->ipsec ? NUM_IPSEC_SW_COUNTERS : 0;
......@@ -81,4 +132,5 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec_sw)
return idx;
}
MLX5E_DEFINE_STATS_GRP(ipsec_hw, 0);
MLX5E_DEFINE_STATS_GRP(ipsec_sw, 0);
......@@ -2480,6 +2480,7 @@ mlx5e_stats_grp_t mlx5e_nic_stats_grps[] = {
&MLX5E_STATS_GRP(per_prio),
&MLX5E_STATS_GRP(pme),
#ifdef CONFIG_MLX5_EN_IPSEC
&MLX5E_STATS_GRP(ipsec_hw),
&MLX5E_STATS_GRP(ipsec_sw),
#endif
&MLX5E_STATS_GRP(tls),
......
......@@ -490,6 +490,7 @@ extern MLX5E_DECLARE_STATS_GRP(per_prio);
extern MLX5E_DECLARE_STATS_GRP(pme);
extern MLX5E_DECLARE_STATS_GRP(channels);
extern MLX5E_DECLARE_STATS_GRP(per_port_buff_congest);
extern MLX5E_DECLARE_STATS_GRP(ipsec_hw);
extern MLX5E_DECLARE_STATS_GRP(ipsec_sw);
extern MLX5E_DECLARE_STATS_GRP(ptp);
extern MLX5E_DECLARE_STATS_GRP(macsec_hw);
......
......@@ -19,6 +19,7 @@
#include "diag/fw_tracer.h"
#include "mlx5_irq.h"
#include "devlink.h"
#include "en_accel/ipsec.h"
enum {
MLX5_EQE_OWNER_INIT_VAL = 0x1,
......@@ -578,6 +579,10 @@ static void gather_async_events_mask(struct mlx5_core_dev *dev, u64 mask[4])
if (MLX5_CAP_MACSEC(dev, log_max_macsec_offload))
async_event_mask |= (1ull << MLX5_EVENT_TYPE_OBJECT_CHANGE);
if (mlx5_ipsec_device_caps(dev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD)
async_event_mask |=
(1ull << MLX5_EVENT_TYPE_OBJECT_CHANGE);
mask[0] = async_event_mask;
if (MLX5_CAP_GEN(dev, event_cap))
......
......@@ -111,8 +111,8 @@
#define ETHTOOL_PRIO_NUM_LEVELS 1
#define ETHTOOL_NUM_PRIOS 11
#define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
/* Promiscuous, Vlan, mac, ttc, inner ttc, {UDP/ANY/aRFS/accel/{esp, esp_err}} */
#define KERNEL_NIC_PRIO_NUM_LEVELS 7
/* Promiscuous, Vlan, mac, ttc, inner ttc, {UDP/ANY/aRFS/accel/{esp, esp_err}}, IPsec policy */
#define KERNEL_NIC_PRIO_NUM_LEVELS 8
#define KERNEL_NIC_NUM_PRIOS 1
/* One more level for tc */
#define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1)
......@@ -133,7 +133,7 @@
#define LAG_MIN_LEVEL (OFFLOADS_MIN_LEVEL + KERNEL_RX_MACSEC_MIN_LEVEL + 1)
#define KERNEL_TX_IPSEC_NUM_PRIOS 1
#define KERNEL_TX_IPSEC_NUM_LEVELS 1
#define KERNEL_TX_IPSEC_NUM_LEVELS 2
#define KERNEL_TX_IPSEC_MIN_LEVEL (KERNEL_TX_IPSEC_NUM_LEVELS)
#define KERNEL_TX_MACSEC_NUM_PRIOS 1
......
......@@ -15,6 +15,7 @@
#define MLX5_WQE_CTRL_WQE_OPC_MOD_SHIFT 24
#define MLX5_MACSEC_ASO_DS_CNT (DIV_ROUND_UP(sizeof(struct mlx5_aso_wqe), MLX5_SEND_WQE_DS))
#define ASO_CTRL_READ_EN BIT(0)
struct mlx5_wqe_aso_ctrl_seg {
__be32 va_h;
__be32 va_l; /* include read_enable */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment