Commit e8a292d6 authored by Steffen Klassert's avatar Steffen Klassert

Merge branch 'mlx5 IPsec packet offload support (Part I)'

Leon Romanovsky says:

============
This series follows previously sent "Extend XFRM core to allow packet
offload configuration" series [1].

It is first part with refactoring to mlx5 allow us natively extend
mlx5 IPsec logic to support both crypto and packet offloads.
============
Signed-off-by: default avatarSteffen Klassert <steffen.klassert@secunet.com>
parents 89ae6573 a8e05293
...@@ -1245,4 +1245,5 @@ int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, int max_t ...@@ -1245,4 +1245,5 @@ int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, int max_t
int mlx5e_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivi); int mlx5e_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivi);
int mlx5e_get_vf_stats(struct net_device *dev, int vf, struct ifla_vf_stats *vf_stats); int mlx5e_get_vf_stats(struct net_device *dev, int vf, struct ifla_vf_stats *vf_stats);
#endif #endif
int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, u32 *mkey);
#endif /* __MLX5_EN_H__ */ #endif /* __MLX5_EN_H__ */
...@@ -162,7 +162,6 @@ mlx5e_tc_meter_modify(struct mlx5_core_dev *mdev, ...@@ -162,7 +162,6 @@ mlx5e_tc_meter_modify(struct mlx5_core_dev *mdev,
MLX5_ACCESS_ASO_OPC_MOD_FLOW_METER); MLX5_ACCESS_ASO_OPC_MOD_FLOW_METER);
aso_ctrl = &aso_wqe->aso_ctrl; aso_ctrl = &aso_wqe->aso_ctrl;
memset(aso_ctrl, 0, sizeof(*aso_ctrl));
aso_ctrl->data_mask_mode = MLX5_ASO_DATA_MASK_MODE_BYTEWISE_64BYTE << 6; aso_ctrl->data_mask_mode = MLX5_ASO_DATA_MASK_MODE_BYTEWISE_64BYTE << 6;
aso_ctrl->condition_1_0_operand = MLX5_ASO_ALWAYS_TRUE | aso_ctrl->condition_1_0_operand = MLX5_ASO_ALWAYS_TRUE |
MLX5_ASO_ALWAYS_TRUE << 4; MLX5_ASO_ALWAYS_TRUE << 4;
......
...@@ -162,28 +162,21 @@ mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry, ...@@ -162,28 +162,21 @@ mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
/* esn */ /* esn */
if (sa_entry->esn_state.trigger) { if (sa_entry->esn_state.trigger) {
attrs->flags |= MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED; attrs->esn_trigger = true;
attrs->esn = sa_entry->esn_state.esn; attrs->esn = sa_entry->esn_state.esn;
if (sa_entry->esn_state.overlap) attrs->esn_overlap = sa_entry->esn_state.overlap;
attrs->flags |= MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP; attrs->replay_window = x->replay_esn->replay_window;
} }
/* action */ attrs->dir = x->xso.dir;
attrs->action = (x->xso.dir == XFRM_DEV_OFFLOAD_OUT) ?
MLX5_ACCEL_ESP_ACTION_ENCRYPT :
MLX5_ACCEL_ESP_ACTION_DECRYPT;
/* flags */
attrs->flags |= (x->props.mode == XFRM_MODE_TRANSPORT) ?
MLX5_ACCEL_ESP_FLAGS_TRANSPORT :
MLX5_ACCEL_ESP_FLAGS_TUNNEL;
/* spi */ /* spi */
attrs->spi = be32_to_cpu(x->id.spi); attrs->spi = be32_to_cpu(x->id.spi);
/* source , destination ips */ /* source , destination ips */
memcpy(&attrs->saddr, x->props.saddr.a6, sizeof(attrs->saddr)); memcpy(&attrs->saddr, x->props.saddr.a6, sizeof(attrs->saddr));
memcpy(&attrs->daddr, x->id.daddr.a6, sizeof(attrs->daddr)); memcpy(&attrs->daddr, x->id.daddr.a6, sizeof(attrs->daddr));
attrs->is_ipv6 = (x->props.family != AF_INET); attrs->family = x->props.family;
attrs->type = x->xso.type;
} }
static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x) static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x)
...@@ -257,6 +250,17 @@ static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x) ...@@ -257,6 +250,17 @@ static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x)
netdev_info(netdev, "Unsupported xfrm offload type\n"); netdev_info(netdev, "Unsupported xfrm offload type\n");
return -EINVAL; return -EINVAL;
} }
if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET) {
if (x->replay_esn && x->replay_esn->replay_window != 32 &&
x->replay_esn->replay_window != 64 &&
x->replay_esn->replay_window != 128 &&
x->replay_esn->replay_window != 256) {
netdev_info(netdev,
"Unsupported replay window size %u\n",
x->replay_esn->replay_window);
return -EINVAL;
}
}
return 0; return 0;
} }
...@@ -303,7 +307,7 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x) ...@@ -303,7 +307,7 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x)
if (err) if (err)
goto err_xfrm; goto err_xfrm;
err = mlx5e_accel_ipsec_fs_add_rule(priv, sa_entry); err = mlx5e_accel_ipsec_fs_add_rule(sa_entry);
if (err) if (err)
goto err_hw_ctx; goto err_hw_ctx;
...@@ -321,7 +325,7 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x) ...@@ -321,7 +325,7 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x)
goto out; goto out;
err_add_rule: err_add_rule:
mlx5e_accel_ipsec_fs_del_rule(priv, sa_entry); mlx5e_accel_ipsec_fs_del_rule(sa_entry);
err_hw_ctx: err_hw_ctx:
mlx5_ipsec_free_sa_ctx(sa_entry); mlx5_ipsec_free_sa_ctx(sa_entry);
err_xfrm: err_xfrm:
...@@ -341,10 +345,9 @@ static void mlx5e_xfrm_del_state(struct xfrm_state *x) ...@@ -341,10 +345,9 @@ static void mlx5e_xfrm_del_state(struct xfrm_state *x)
static void mlx5e_xfrm_free_state(struct xfrm_state *x) static void mlx5e_xfrm_free_state(struct xfrm_state *x)
{ {
struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x); struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
struct mlx5e_priv *priv = netdev_priv(x->xso.dev);
cancel_work_sync(&sa_entry->modify_work.work); cancel_work_sync(&sa_entry->modify_work.work);
mlx5e_accel_ipsec_fs_del_rule(priv, sa_entry); mlx5e_accel_ipsec_fs_del_rule(sa_entry);
mlx5_ipsec_free_sa_ctx(sa_entry); mlx5_ipsec_free_sa_ctx(sa_entry);
kfree(sa_entry); kfree(sa_entry);
} }
...@@ -371,15 +374,26 @@ void mlx5e_ipsec_init(struct mlx5e_priv *priv) ...@@ -371,15 +374,26 @@ void mlx5e_ipsec_init(struct mlx5e_priv *priv)
if (!ipsec->wq) if (!ipsec->wq)
goto err_wq; goto err_wq;
if (mlx5_ipsec_device_caps(priv->mdev) &
MLX5_IPSEC_CAP_PACKET_OFFLOAD) {
ret = mlx5e_ipsec_aso_init(ipsec);
if (ret)
goto err_aso;
}
ret = mlx5e_accel_ipsec_fs_init(ipsec); ret = mlx5e_accel_ipsec_fs_init(ipsec);
if (ret) if (ret)
goto err_fs_init; goto err_fs_init;
ipsec->fs = priv->fs;
priv->ipsec = ipsec; priv->ipsec = ipsec;
netdev_dbg(priv->netdev, "IPSec attached to netdevice\n"); netdev_dbg(priv->netdev, "IPSec attached to netdevice\n");
return; return;
err_fs_init: err_fs_init:
if (mlx5_ipsec_device_caps(priv->mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD)
mlx5e_ipsec_aso_cleanup(ipsec);
err_aso:
destroy_workqueue(ipsec->wq); destroy_workqueue(ipsec->wq);
err_wq: err_wq:
kfree(ipsec); kfree(ipsec);
...@@ -395,6 +409,8 @@ void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv) ...@@ -395,6 +409,8 @@ void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv)
return; return;
mlx5e_accel_ipsec_fs_cleanup(ipsec); mlx5e_accel_ipsec_fs_cleanup(ipsec);
if (mlx5_ipsec_device_caps(priv->mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD)
mlx5e_ipsec_aso_cleanup(ipsec);
destroy_workqueue(ipsec->wq); destroy_workqueue(ipsec->wq);
kfree(ipsec); kfree(ipsec);
priv->ipsec = NULL; priv->ipsec = NULL;
......
...@@ -39,22 +39,11 @@ ...@@ -39,22 +39,11 @@
#include <linux/mlx5/device.h> #include <linux/mlx5/device.h>
#include <net/xfrm.h> #include <net/xfrm.h>
#include <linux/idr.h> #include <linux/idr.h>
#include "lib/aso.h"
#define MLX5E_IPSEC_SADB_RX_BITS 10 #define MLX5E_IPSEC_SADB_RX_BITS 10
#define MLX5E_IPSEC_ESN_SCOPE_MID 0x80000000L #define MLX5E_IPSEC_ESN_SCOPE_MID 0x80000000L
enum mlx5_accel_esp_flags {
MLX5_ACCEL_ESP_FLAGS_TUNNEL = 0, /* Default */
MLX5_ACCEL_ESP_FLAGS_TRANSPORT = 1UL << 0,
MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED = 1UL << 1,
MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP = 1UL << 2,
};
enum mlx5_accel_esp_action {
MLX5_ACCEL_ESP_ACTION_DECRYPT,
MLX5_ACCEL_ESP_ACTION_ENCRYPT,
};
struct aes_gcm_keymat { struct aes_gcm_keymat {
u64 seq_iv; u64 seq_iv;
...@@ -66,7 +55,6 @@ struct aes_gcm_keymat { ...@@ -66,7 +55,6 @@ struct aes_gcm_keymat {
}; };
struct mlx5_accel_esp_xfrm_attrs { struct mlx5_accel_esp_xfrm_attrs {
enum mlx5_accel_esp_action action;
u32 esn; u32 esn;
u32 spi; u32 spi;
u32 flags; u32 flags;
...@@ -82,12 +70,18 @@ struct mlx5_accel_esp_xfrm_attrs { ...@@ -82,12 +70,18 @@ struct mlx5_accel_esp_xfrm_attrs {
__be32 a6[4]; __be32 a6[4];
} daddr; } daddr;
u8 is_ipv6; u8 dir : 2;
u8 esn_overlap : 1;
u8 esn_trigger : 1;
u8 type : 2;
u8 family;
u32 replay_window;
}; };
enum mlx5_ipsec_cap { enum mlx5_ipsec_cap {
MLX5_IPSEC_CAP_CRYPTO = 1 << 0, MLX5_IPSEC_CAP_CRYPTO = 1 << 0,
MLX5_IPSEC_CAP_ESN = 1 << 1, MLX5_IPSEC_CAP_ESN = 1 << 1,
MLX5_IPSEC_CAP_PACKET_OFFLOAD = 1 << 2,
}; };
struct mlx5e_priv; struct mlx5e_priv;
...@@ -102,17 +96,26 @@ struct mlx5e_ipsec_sw_stats { ...@@ -102,17 +96,26 @@ struct mlx5e_ipsec_sw_stats {
atomic64_t ipsec_tx_drop_trailer; atomic64_t ipsec_tx_drop_trailer;
}; };
struct mlx5e_accel_fs_esp; struct mlx5e_ipsec_rx;
struct mlx5e_ipsec_tx; struct mlx5e_ipsec_tx;
struct mlx5e_ipsec_aso {
u8 ctx[MLX5_ST_SZ_BYTES(ipsec_aso)];
dma_addr_t dma_addr;
struct mlx5_aso *aso;
};
struct mlx5e_ipsec { struct mlx5e_ipsec {
struct mlx5_core_dev *mdev; struct mlx5_core_dev *mdev;
DECLARE_HASHTABLE(sadb_rx, MLX5E_IPSEC_SADB_RX_BITS); DECLARE_HASHTABLE(sadb_rx, MLX5E_IPSEC_SADB_RX_BITS);
spinlock_t sadb_rx_lock; /* Protects sadb_rx */ spinlock_t sadb_rx_lock; /* Protects sadb_rx */
struct mlx5e_ipsec_sw_stats sw_stats; struct mlx5e_ipsec_sw_stats sw_stats;
struct workqueue_struct *wq; struct workqueue_struct *wq;
struct mlx5e_accel_fs_esp *rx_fs; struct mlx5e_flow_steering *fs;
struct mlx5e_ipsec_tx *tx_fs; struct mlx5e_ipsec_rx *rx_ipv4;
struct mlx5e_ipsec_rx *rx_ipv6;
struct mlx5e_ipsec_tx *tx;
struct mlx5e_ipsec_aso *aso;
}; };
struct mlx5e_ipsec_esn_state { struct mlx5e_ipsec_esn_state {
...@@ -123,7 +126,7 @@ struct mlx5e_ipsec_esn_state { ...@@ -123,7 +126,7 @@ struct mlx5e_ipsec_esn_state {
struct mlx5e_ipsec_rule { struct mlx5e_ipsec_rule {
struct mlx5_flow_handle *rule; struct mlx5_flow_handle *rule;
struct mlx5_modify_hdr *set_modify_hdr; struct mlx5_modify_hdr *modify_hdr;
}; };
struct mlx5e_ipsec_modify_state_work { struct mlx5e_ipsec_modify_state_work {
...@@ -155,10 +158,8 @@ struct xfrm_state *mlx5e_ipsec_sadb_rx_lookup(struct mlx5e_ipsec *dev, ...@@ -155,10 +158,8 @@ struct xfrm_state *mlx5e_ipsec_sadb_rx_lookup(struct mlx5e_ipsec *dev,
void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec); void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec);
int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec); int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec);
int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_priv *priv, int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry);
struct mlx5e_ipsec_sa_entry *sa_entry); void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry);
void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_priv *priv,
struct mlx5e_ipsec_sa_entry *sa_entry);
int mlx5_ipsec_create_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry); int mlx5_ipsec_create_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry);
void mlx5_ipsec_free_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry); void mlx5_ipsec_free_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry);
...@@ -168,6 +169,9 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev); ...@@ -168,6 +169,9 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev);
void mlx5_accel_esp_modify_xfrm(struct mlx5e_ipsec_sa_entry *sa_entry, void mlx5_accel_esp_modify_xfrm(struct mlx5e_ipsec_sa_entry *sa_entry,
const struct mlx5_accel_esp_xfrm_attrs *attrs); const struct mlx5_accel_esp_xfrm_attrs *attrs);
int mlx5e_ipsec_aso_init(struct mlx5e_ipsec *ipsec);
void mlx5e_ipsec_aso_cleanup(struct mlx5e_ipsec *ipsec);
static inline struct mlx5_core_dev * static inline struct mlx5_core_dev *
mlx5e_ipsec_sa2dev(struct mlx5e_ipsec_sa_entry *sa_entry) mlx5e_ipsec_sa2dev(struct mlx5e_ipsec_sa_entry *sa_entry)
{ {
......
...@@ -9,53 +9,57 @@ ...@@ -9,53 +9,57 @@
#define NUM_IPSEC_FTE BIT(15) #define NUM_IPSEC_FTE BIT(15)
enum accel_fs_esp_type { struct mlx5e_ipsec_ft {
ACCEL_FS_ESP4, struct mutex mutex; /* Protect changes to this struct */
ACCEL_FS_ESP6, struct mlx5_flow_table *sa;
ACCEL_FS_ESP_NUM_TYPES, struct mlx5_flow_table *status;
u32 refcnt;
}; };
struct mlx5e_ipsec_rx_err { struct mlx5e_ipsec_miss {
struct mlx5_flow_table *ft; struct mlx5_flow_group *group;
struct mlx5_flow_handle *rule; struct mlx5_flow_handle *rule;
struct mlx5_modify_hdr *copy_modify_hdr;
}; };
struct mlx5e_accel_fs_esp_prot { struct mlx5e_ipsec_rx {
struct mlx5_flow_table *ft; struct mlx5e_ipsec_ft ft;
struct mlx5_flow_group *miss_group; struct mlx5e_ipsec_miss sa;
struct mlx5_flow_handle *miss_rule; struct mlx5e_ipsec_rule status;
struct mlx5_flow_destination default_dest;
struct mlx5e_ipsec_rx_err rx_err;
u32 refcnt;
struct mutex prot_mutex; /* protect ESP4/ESP6 protocol */
};
struct mlx5e_accel_fs_esp {
struct mlx5e_accel_fs_esp_prot fs_prot[ACCEL_FS_ESP_NUM_TYPES];
}; };
struct mlx5e_ipsec_tx { struct mlx5e_ipsec_tx {
struct mlx5e_ipsec_ft ft;
struct mlx5_flow_namespace *ns; struct mlx5_flow_namespace *ns;
struct mlx5_flow_table *ft;
struct mutex mutex; /* Protect IPsec TX steering */
u32 refcnt;
}; };
/* IPsec RX flow steering */ /* IPsec RX flow steering */
static enum mlx5_traffic_types fs_esp2tt(enum accel_fs_esp_type i) static enum mlx5_traffic_types family2tt(u32 family)
{ {
if (i == ACCEL_FS_ESP4) if (family == AF_INET)
return MLX5_TT_IPV4_IPSEC_ESP; return MLX5_TT_IPV4_IPSEC_ESP;
return MLX5_TT_IPV6_IPSEC_ESP; return MLX5_TT_IPV6_IPSEC_ESP;
} }
static int rx_err_add_rule(struct mlx5e_priv *priv, static struct mlx5_flow_table *ipsec_ft_create(struct mlx5_flow_namespace *ns,
struct mlx5e_accel_fs_esp_prot *fs_prot, int level, int prio,
struct mlx5e_ipsec_rx_err *rx_err) int max_num_groups)
{
struct mlx5_flow_table_attr ft_attr = {};
ft_attr.autogroup.num_reserved_entries = 1;
ft_attr.autogroup.max_num_groups = max_num_groups;
ft_attr.max_fte = NUM_IPSEC_FTE;
ft_attr.level = level;
ft_attr.prio = prio;
return mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
}
static int ipsec_status_rule(struct mlx5_core_dev *mdev,
struct mlx5e_ipsec_rx *rx,
struct mlx5_flow_destination *dest)
{ {
u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {}; u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5_flow_act flow_act = {}; struct mlx5_flow_act flow_act = {};
struct mlx5_modify_hdr *modify_hdr; struct mlx5_modify_hdr *modify_hdr;
struct mlx5_flow_handle *fte; struct mlx5_flow_handle *fte;
...@@ -79,8 +83,8 @@ static int rx_err_add_rule(struct mlx5e_priv *priv, ...@@ -79,8 +83,8 @@ static int rx_err_add_rule(struct mlx5e_priv *priv,
if (IS_ERR(modify_hdr)) { if (IS_ERR(modify_hdr)) {
err = PTR_ERR(modify_hdr); err = PTR_ERR(modify_hdr);
netdev_err(priv->netdev, mlx5_core_err(mdev,
"fail to alloc ipsec copy modify_header_id err=%d\n", err); "fail to alloc ipsec copy modify_header_id err=%d\n", err);
goto out_spec; goto out_spec;
} }
...@@ -88,17 +92,16 @@ static int rx_err_add_rule(struct mlx5e_priv *priv, ...@@ -88,17 +92,16 @@ static int rx_err_add_rule(struct mlx5e_priv *priv,
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
flow_act.modify_hdr = modify_hdr; flow_act.modify_hdr = modify_hdr;
fte = mlx5_add_flow_rules(rx_err->ft, spec, &flow_act, fte = mlx5_add_flow_rules(rx->ft.status, spec, &flow_act, dest, 1);
&fs_prot->default_dest, 1);
if (IS_ERR(fte)) { if (IS_ERR(fte)) {
err = PTR_ERR(fte); err = PTR_ERR(fte);
netdev_err(priv->netdev, "fail to add ipsec rx err copy rule err=%d\n", err); mlx5_core_err(mdev, "fail to add ipsec rx err copy rule err=%d\n", err);
goto out; goto out;
} }
kvfree(spec); kvfree(spec);
rx_err->rule = fte; rx->status.rule = fte;
rx_err->copy_modify_hdr = modify_hdr; rx->status.modify_hdr = modify_hdr;
return 0; return 0;
out: out:
...@@ -108,13 +111,12 @@ static int rx_err_add_rule(struct mlx5e_priv *priv, ...@@ -108,13 +111,12 @@ static int rx_err_add_rule(struct mlx5e_priv *priv,
return err; return err;
} }
static int rx_fs_create(struct mlx5e_priv *priv, static int ipsec_miss_create(struct mlx5_core_dev *mdev,
struct mlx5e_accel_fs_esp_prot *fs_prot) struct mlx5_flow_table *ft,
struct mlx5e_ipsec_miss *miss,
struct mlx5_flow_destination *dest)
{ {
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_table *ft = fs_prot->ft;
struct mlx5_flow_group *miss_group;
struct mlx5_flow_handle *miss_rule;
MLX5_DECLARE_FLOW_ACT(flow_act); MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_spec *spec; struct mlx5_flow_spec *spec;
u32 *flow_group_in; u32 *flow_group_in;
...@@ -130,359 +132,384 @@ static int rx_fs_create(struct mlx5e_priv *priv, ...@@ -130,359 +132,384 @@ static int rx_fs_create(struct mlx5e_priv *priv,
/* Create miss_group */ /* Create miss_group */
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ft->max_fte - 1); MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ft->max_fte - 1);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ft->max_fte - 1); MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ft->max_fte - 1);
miss_group = mlx5_create_flow_group(ft, flow_group_in); miss->group = mlx5_create_flow_group(ft, flow_group_in);
if (IS_ERR(miss_group)) { if (IS_ERR(miss->group)) {
err = PTR_ERR(miss_group); err = PTR_ERR(miss->group);
netdev_err(priv->netdev, "fail to create ipsec rx miss_group err=%d\n", err); mlx5_core_err(mdev, "fail to create IPsec miss_group err=%d\n",
err);
goto out; goto out;
} }
fs_prot->miss_group = miss_group;
/* Create miss rule */ /* Create miss rule */
miss_rule = mlx5_add_flow_rules(ft, spec, &flow_act, &fs_prot->default_dest, 1); miss->rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1);
if (IS_ERR(miss_rule)) { if (IS_ERR(miss->rule)) {
mlx5_destroy_flow_group(fs_prot->miss_group); mlx5_destroy_flow_group(miss->group);
err = PTR_ERR(miss_rule); err = PTR_ERR(miss->rule);
netdev_err(priv->netdev, "fail to create ipsec rx miss_rule err=%d\n", err); mlx5_core_err(mdev, "fail to create IPsec miss_rule err=%d\n",
err);
goto out; goto out;
} }
fs_prot->miss_rule = miss_rule;
out: out:
kvfree(flow_group_in); kvfree(flow_group_in);
kvfree(spec); kvfree(spec);
return err; return err;
} }
static void rx_destroy(struct mlx5e_priv *priv, enum accel_fs_esp_type type) static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_rx *rx)
{ {
struct mlx5e_accel_fs_esp_prot *fs_prot; mlx5_del_flow_rules(rx->sa.rule);
struct mlx5e_accel_fs_esp *accel_esp; mlx5_destroy_flow_group(rx->sa.group);
mlx5_destroy_flow_table(rx->ft.sa);
accel_esp = priv->ipsec->rx_fs;
/* The netdev unreg already happened, so all offloaded rule are already removed */
fs_prot = &accel_esp->fs_prot[type];
mlx5_del_flow_rules(fs_prot->miss_rule); mlx5_del_flow_rules(rx->status.rule);
mlx5_destroy_flow_group(fs_prot->miss_group); mlx5_modify_header_dealloc(mdev, rx->status.modify_hdr);
mlx5_destroy_flow_table(fs_prot->ft); mlx5_destroy_flow_table(rx->ft.status);
mlx5_del_flow_rules(fs_prot->rx_err.rule);
mlx5_modify_header_dealloc(priv->mdev, fs_prot->rx_err.copy_modify_hdr);
mlx5_destroy_flow_table(fs_prot->rx_err.ft);
} }
static int rx_create(struct mlx5e_priv *priv, enum accel_fs_esp_type type) static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx, u32 family)
{ {
struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(priv->fs, false); struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(ipsec->fs, false);
struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(priv->fs, false); struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(ipsec->fs, false);
struct mlx5_flow_table_attr ft_attr = {}; struct mlx5_flow_destination dest;
struct mlx5e_accel_fs_esp_prot *fs_prot;
struct mlx5e_accel_fs_esp *accel_esp;
struct mlx5_flow_table *ft; struct mlx5_flow_table *ft;
int err; int err;
accel_esp = priv->ipsec->rx_fs; ft = ipsec_ft_create(ns, MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL,
fs_prot = &accel_esp->fs_prot[type]; MLX5E_NIC_PRIO, 1);
fs_prot->default_dest =
mlx5_ttc_get_default_dest(ttc, fs_esp2tt(type));
ft_attr.max_fte = 1;
ft_attr.autogroup.max_num_groups = 1;
ft_attr.level = MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL;
ft_attr.prio = MLX5E_NIC_PRIO;
ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
if (IS_ERR(ft)) if (IS_ERR(ft))
return PTR_ERR(ft); return PTR_ERR(ft);
fs_prot->rx_err.ft = ft; rx->ft.status = ft;
err = rx_err_add_rule(priv, fs_prot, &fs_prot->rx_err);
dest = mlx5_ttc_get_default_dest(ttc, family2tt(family));
err = ipsec_status_rule(mdev, rx, &dest);
if (err) if (err)
goto err_add; goto err_add;
/* Create FT */ /* Create FT */
ft_attr.max_fte = NUM_IPSEC_FTE; ft = ipsec_ft_create(ns, MLX5E_ACCEL_FS_ESP_FT_LEVEL, MLX5E_NIC_PRIO,
ft_attr.level = MLX5E_ACCEL_FS_ESP_FT_LEVEL; 1);
ft_attr.prio = MLX5E_NIC_PRIO;
ft_attr.autogroup.num_reserved_entries = 1;
ft_attr.autogroup.max_num_groups = 1;
ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
if (IS_ERR(ft)) { if (IS_ERR(ft)) {
err = PTR_ERR(ft); err = PTR_ERR(ft);
goto err_fs_ft; goto err_fs_ft;
} }
fs_prot->ft = ft; rx->ft.sa = ft;
err = rx_fs_create(priv, fs_prot); err = ipsec_miss_create(mdev, rx->ft.sa, &rx->sa, &dest);
if (err) if (err)
goto err_fs; goto err_fs;
return 0; return 0;
err_fs: err_fs:
mlx5_destroy_flow_table(fs_prot->ft); mlx5_destroy_flow_table(rx->ft.sa);
err_fs_ft: err_fs_ft:
mlx5_del_flow_rules(fs_prot->rx_err.rule); mlx5_del_flow_rules(rx->status.rule);
mlx5_modify_header_dealloc(priv->mdev, fs_prot->rx_err.copy_modify_hdr); mlx5_modify_header_dealloc(mdev, rx->status.modify_hdr);
err_add: err_add:
mlx5_destroy_flow_table(fs_prot->rx_err.ft); mlx5_destroy_flow_table(rx->ft.status);
return err; return err;
} }
static int rx_ft_get(struct mlx5e_priv *priv, enum accel_fs_esp_type type) static struct mlx5e_ipsec_rx *rx_ft_get(struct mlx5_core_dev *mdev,
struct mlx5e_ipsec *ipsec, u32 family)
{ {
struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(priv->fs, false); struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(ipsec->fs, false);
struct mlx5e_accel_fs_esp_prot *fs_prot;
struct mlx5_flow_destination dest = {}; struct mlx5_flow_destination dest = {};
struct mlx5e_accel_fs_esp *accel_esp; struct mlx5e_ipsec_rx *rx;
int err = 0; int err = 0;
accel_esp = priv->ipsec->rx_fs; if (family == AF_INET)
fs_prot = &accel_esp->fs_prot[type]; rx = ipsec->rx_ipv4;
mutex_lock(&fs_prot->prot_mutex); else
if (fs_prot->refcnt) rx = ipsec->rx_ipv6;
mutex_lock(&rx->ft.mutex);
if (rx->ft.refcnt)
goto skip; goto skip;
/* create FT */ /* create FT */
err = rx_create(priv, type); err = rx_create(mdev, ipsec, rx, family);
if (err) if (err)
goto out; goto out;
/* connect */ /* connect */
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest.ft = fs_prot->ft; dest.ft = rx->ft.sa;
mlx5_ttc_fwd_dest(ttc, fs_esp2tt(type), &dest); mlx5_ttc_fwd_dest(ttc, family2tt(family), &dest);
skip: skip:
fs_prot->refcnt++; rx->ft.refcnt++;
out: out:
mutex_unlock(&fs_prot->prot_mutex); mutex_unlock(&rx->ft.mutex);
return err; if (err)
return ERR_PTR(err);
return rx;
} }
static void rx_ft_put(struct mlx5e_priv *priv, enum accel_fs_esp_type type) static void rx_ft_put(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
u32 family)
{ {
struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(priv->fs, false); struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(ipsec->fs, false);
struct mlx5e_accel_fs_esp_prot *fs_prot; struct mlx5e_ipsec_rx *rx;
struct mlx5e_accel_fs_esp *accel_esp;
if (family == AF_INET)
accel_esp = priv->ipsec->rx_fs; rx = ipsec->rx_ipv4;
fs_prot = &accel_esp->fs_prot[type]; else
mutex_lock(&fs_prot->prot_mutex); rx = ipsec->rx_ipv6;
fs_prot->refcnt--;
if (fs_prot->refcnt) mutex_lock(&rx->ft.mutex);
rx->ft.refcnt--;
if (rx->ft.refcnt)
goto out; goto out;
/* disconnect */ /* disconnect */
mlx5_ttc_fwd_default_dest(ttc, fs_esp2tt(type)); mlx5_ttc_fwd_default_dest(ttc, family2tt(family));
/* remove FT */ /* remove FT */
rx_destroy(priv, type); rx_destroy(mdev, rx);
out: out:
mutex_unlock(&fs_prot->prot_mutex); mutex_unlock(&rx->ft.mutex);
} }
/* IPsec TX flow steering */ /* IPsec TX flow steering */
static int tx_create(struct mlx5e_priv *priv) static int tx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_tx *tx)
{ {
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5e_ipsec *ipsec = priv->ipsec;
struct mlx5_flow_table *ft; struct mlx5_flow_table *ft;
int err;
ft_attr.max_fte = NUM_IPSEC_FTE; ft = ipsec_ft_create(tx->ns, 0, 0, 1);
ft_attr.autogroup.max_num_groups = 1; if (IS_ERR(ft))
ft = mlx5_create_auto_grouped_flow_table(ipsec->tx_fs->ns, &ft_attr); return PTR_ERR(ft);
if (IS_ERR(ft)) {
err = PTR_ERR(ft); tx->ft.sa = ft;
netdev_err(priv->netdev, "fail to create ipsec tx ft err=%d\n", err);
return err;
}
ipsec->tx_fs->ft = ft;
return 0; return 0;
} }
static int tx_ft_get(struct mlx5e_priv *priv) static struct mlx5e_ipsec_tx *tx_ft_get(struct mlx5_core_dev *mdev,
struct mlx5e_ipsec *ipsec)
{ {
struct mlx5e_ipsec_tx *tx_fs = priv->ipsec->tx_fs; struct mlx5e_ipsec_tx *tx = ipsec->tx;
int err = 0; int err = 0;
mutex_lock(&tx_fs->mutex); mutex_lock(&tx->ft.mutex);
if (tx_fs->refcnt) if (tx->ft.refcnt)
goto skip; goto skip;
err = tx_create(priv); err = tx_create(mdev, tx);
if (err) if (err)
goto out; goto out;
skip: skip:
tx_fs->refcnt++; tx->ft.refcnt++;
out: out:
mutex_unlock(&tx_fs->mutex); mutex_unlock(&tx->ft.mutex);
return err; if (err)
return ERR_PTR(err);
return tx;
} }
static void tx_ft_put(struct mlx5e_priv *priv) static void tx_ft_put(struct mlx5e_ipsec *ipsec)
{ {
struct mlx5e_ipsec_tx *tx_fs = priv->ipsec->tx_fs; struct mlx5e_ipsec_tx *tx = ipsec->tx;
mutex_lock(&tx_fs->mutex); mutex_lock(&tx->ft.mutex);
tx_fs->refcnt--; tx->ft.refcnt--;
if (tx_fs->refcnt) if (tx->ft.refcnt)
goto out; goto out;
mlx5_destroy_flow_table(tx_fs->ft); mlx5_destroy_flow_table(tx->ft.sa);
out: out:
mutex_unlock(&tx_fs->mutex); mutex_unlock(&tx->ft.mutex);
} }
static void setup_fte_common(struct mlx5_accel_esp_xfrm_attrs *attrs, static void setup_fte_addr4(struct mlx5_flow_spec *spec, __be32 *saddr,
u32 ipsec_obj_id, __be32 *daddr)
struct mlx5_flow_spec *spec,
struct mlx5_flow_act *flow_act)
{ {
u8 ip_version = attrs->is_ipv6 ? 6 : 4; spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS;
/* ip_version */
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version); MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, ip_version); MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, 4);
memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4), saddr, 4);
memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4), daddr, 4);
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4);
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
}
/* Non fragmented */ static void setup_fte_addr6(struct mlx5_flow_spec *spec, __be32 *saddr,
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.frag); __be32 *daddr)
MLX5_SET(fte_match_param, spec->match_value, outer_headers.frag, 0); {
spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, 6);
memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), saddr, 16);
memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), daddr, 16);
memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), 0xff, 16);
memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 0xff, 16);
}
static void setup_fte_esp(struct mlx5_flow_spec *spec)
{
/* ESP header */ /* ESP header */
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol); MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, IPPROTO_ESP); MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, IPPROTO_ESP);
}
static void setup_fte_spi(struct mlx5_flow_spec *spec, u32 spi)
{
/* SPI number */ /* SPI number */
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters.outer_esp_spi); MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters.outer_esp_spi);
MLX5_SET(fte_match_param, spec->match_value, misc_parameters.outer_esp_spi, spi);
}
static void setup_fte_no_frags(struct mlx5_flow_spec *spec)
{
/* Non fragmented */
spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.frag);
MLX5_SET(fte_match_param, spec->match_value, outer_headers.frag, 0);
}
static void setup_fte_reg_a(struct mlx5_flow_spec *spec)
{
/* Add IPsec indicator in metadata_reg_a */
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
MLX5_SET(fte_match_param, spec->match_criteria,
misc_parameters_2.metadata_reg_a, MLX5_ETH_WQE_FT_META_IPSEC);
MLX5_SET(fte_match_param, spec->match_value, MLX5_SET(fte_match_param, spec->match_value,
misc_parameters.outer_esp_spi, attrs->spi); misc_parameters_2.metadata_reg_a, MLX5_ETH_WQE_FT_META_IPSEC);
}
if (ip_version == 4) {
memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value, static int setup_modify_header(struct mlx5_core_dev *mdev, u32 val, u8 dir,
outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4), struct mlx5_flow_act *flow_act)
&attrs->saddr.a4, 4); {
memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value, u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4), enum mlx5_flow_namespace_type ns_type;
&attrs->daddr.a4, 4); struct mlx5_modify_hdr *modify_hdr;
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4); MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, switch (dir) {
outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4); case XFRM_DEV_OFFLOAD_IN:
} else { MLX5_SET(set_action_in, action, field,
memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value, MLX5_ACTION_IN_FIELD_METADATA_REG_B);
outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), ns_type = MLX5_FLOW_NAMESPACE_KERNEL;
&attrs->saddr.a6, 16); break;
memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value, default:
outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), return -EINVAL;
&attrs->daddr.a6, 16);
memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
0xff, 16);
memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
0xff, 16);
} }
flow_act->crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC; MLX5_SET(set_action_in, action, data, val);
flow_act->crypto.obj_id = ipsec_obj_id; MLX5_SET(set_action_in, action, offset, 0);
flow_act->flags |= FLOW_ACT_NO_APPEND; MLX5_SET(set_action_in, action, length, 32);
modify_hdr = mlx5_modify_header_alloc(mdev, ns_type, 1, action);
if (IS_ERR(modify_hdr)) {
mlx5_core_err(mdev, "Failed to allocate modify_header %ld\n",
PTR_ERR(modify_hdr));
return PTR_ERR(modify_hdr);
}
flow_act->modify_hdr = modify_hdr;
flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
return 0;
} }
static int rx_add_rule(struct mlx5e_priv *priv, static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
struct mlx5e_ipsec_sa_entry *sa_entry)
{ {
u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule; struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule;
struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs; struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
u32 ipsec_obj_id = sa_entry->ipsec_obj_id; struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
struct mlx5_modify_hdr *modify_hdr = NULL; struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
struct mlx5e_accel_fs_esp_prot *fs_prot;
struct mlx5_flow_destination dest = {}; struct mlx5_flow_destination dest = {};
struct mlx5e_accel_fs_esp *accel_esp;
struct mlx5_flow_act flow_act = {}; struct mlx5_flow_act flow_act = {};
struct mlx5_flow_handle *rule; struct mlx5_flow_handle *rule;
enum accel_fs_esp_type type;
struct mlx5_flow_spec *spec; struct mlx5_flow_spec *spec;
int err = 0; struct mlx5e_ipsec_rx *rx;
int err;
accel_esp = priv->ipsec->rx_fs; rx = rx_ft_get(mdev, ipsec, attrs->family);
type = attrs->is_ipv6 ? ACCEL_FS_ESP6 : ACCEL_FS_ESP4; if (IS_ERR(rx))
fs_prot = &accel_esp->fs_prot[type]; return PTR_ERR(rx);
err = rx_ft_get(priv, type);
if (err)
return err;
spec = kvzalloc(sizeof(*spec), GFP_KERNEL); spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec) { if (!spec) {
err = -ENOMEM; err = -ENOMEM;
goto out_err; goto err_alloc;
} }
setup_fte_common(attrs, ipsec_obj_id, spec, &flow_act); if (attrs->family == AF_INET)
setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
else
setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
/* Set bit[31] ipsec marker */ setup_fte_spi(spec, attrs->spi);
/* Set bit[23-0] ipsec_obj_id */ setup_fte_esp(spec);
MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET); setup_fte_no_frags(spec);
MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_B);
MLX5_SET(set_action_in, action, data, (ipsec_obj_id | BIT(31)));
MLX5_SET(set_action_in, action, offset, 0);
MLX5_SET(set_action_in, action, length, 32);
modify_hdr = mlx5_modify_header_alloc(priv->mdev, MLX5_FLOW_NAMESPACE_KERNEL, err = setup_modify_header(mdev, sa_entry->ipsec_obj_id | BIT(31),
1, action); XFRM_DEV_OFFLOAD_IN, &flow_act);
if (IS_ERR(modify_hdr)) { if (err)
err = PTR_ERR(modify_hdr); goto err_mod_header;
netdev_err(priv->netdev,
"fail to alloc ipsec set modify_header_id err=%d\n", err);
modify_hdr = NULL;
goto out_err;
}
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | flow_act.crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC;
MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT | flow_act.crypto.obj_id = sa_entry->ipsec_obj_id;
MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; flow_act.flags |= FLOW_ACT_NO_APPEND;
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
flow_act.modify_hdr = modify_hdr; dest.ft = rx->ft.status;
dest.ft = fs_prot->rx_err.ft; rule = mlx5_add_flow_rules(rx->ft.sa, spec, &flow_act, &dest, 1);
rule = mlx5_add_flow_rules(fs_prot->ft, spec, &flow_act, &dest, 1);
if (IS_ERR(rule)) { if (IS_ERR(rule)) {
err = PTR_ERR(rule); err = PTR_ERR(rule);
netdev_err(priv->netdev, "fail to add ipsec rule attrs->action=0x%x, err=%d\n", mlx5_core_err(mdev, "fail to add RX ipsec rule err=%d\n", err);
attrs->action, err); goto err_add_flow;
goto out_err;
} }
kvfree(spec);
ipsec_rule->rule = rule; ipsec_rule->rule = rule;
ipsec_rule->set_modify_hdr = modify_hdr; ipsec_rule->modify_hdr = flow_act.modify_hdr;
goto out; return 0;
out_err:
if (modify_hdr)
mlx5_modify_header_dealloc(priv->mdev, modify_hdr);
rx_ft_put(priv, type);
out: err_add_flow:
mlx5_modify_header_dealloc(mdev, flow_act.modify_hdr);
err_mod_header:
kvfree(spec); kvfree(spec);
err_alloc:
rx_ft_put(mdev, ipsec, attrs->family);
return err; return err;
} }
static int tx_add_rule(struct mlx5e_priv *priv, static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
struct mlx5e_ipsec_sa_entry *sa_entry)
{ {
struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
struct mlx5_flow_act flow_act = {}; struct mlx5_flow_act flow_act = {};
struct mlx5_flow_handle *rule; struct mlx5_flow_handle *rule;
struct mlx5_flow_spec *spec; struct mlx5_flow_spec *spec;
struct mlx5e_ipsec_tx *tx;
int err = 0; int err = 0;
err = tx_ft_get(priv); tx = tx_ft_get(mdev, ipsec);
if (err) if (IS_ERR(tx))
return err; return PTR_ERR(tx);
spec = kvzalloc(sizeof(*spec), GFP_KERNEL); spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec) { if (!spec) {
...@@ -490,23 +517,25 @@ static int tx_add_rule(struct mlx5e_priv *priv, ...@@ -490,23 +517,25 @@ static int tx_add_rule(struct mlx5e_priv *priv,
goto out; goto out;
} }
setup_fte_common(&sa_entry->attrs, sa_entry->ipsec_obj_id, spec, if (attrs->family == AF_INET)
&flow_act); setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
else
setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
/* Add IPsec indicator in metadata_reg_a */ setup_fte_spi(spec, attrs->spi);
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2; setup_fte_esp(spec);
MLX5_SET(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_a, setup_fte_no_frags(spec);
MLX5_ETH_WQE_FT_META_IPSEC); setup_fte_reg_a(spec);
MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_a,
MLX5_ETH_WQE_FT_META_IPSEC);
flow_act.crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC;
flow_act.crypto.obj_id = sa_entry->ipsec_obj_id;
flow_act.flags |= FLOW_ACT_NO_APPEND;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW | flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW |
MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT; MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT;
rule = mlx5_add_flow_rules(priv->ipsec->tx_fs->ft, spec, &flow_act, NULL, 0); rule = mlx5_add_flow_rules(tx->ft.sa, spec, &flow_act, NULL, 0);
if (IS_ERR(rule)) { if (IS_ERR(rule)) {
err = PTR_ERR(rule); err = PTR_ERR(rule);
netdev_err(priv->netdev, "fail to add ipsec rule attrs->action=0x%x, err=%d\n", mlx5_core_err(mdev, "fail to add TX ipsec rule err=%d\n", err);
sa_entry->attrs.action, err);
goto out; goto out;
} }
...@@ -515,65 +544,55 @@ static int tx_add_rule(struct mlx5e_priv *priv, ...@@ -515,65 +544,55 @@ static int tx_add_rule(struct mlx5e_priv *priv,
out: out:
kvfree(spec); kvfree(spec);
if (err) if (err)
tx_ft_put(priv); tx_ft_put(ipsec);
return err; return err;
} }
int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_priv *priv, int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
struct mlx5e_ipsec_sa_entry *sa_entry)
{ {
if (sa_entry->attrs.action == MLX5_ACCEL_ESP_ACTION_ENCRYPT) if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT)
return tx_add_rule(priv, sa_entry); return tx_add_rule(sa_entry);
return rx_add_rule(priv, sa_entry); return rx_add_rule(sa_entry);
} }
void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_priv *priv, void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
struct mlx5e_ipsec_sa_entry *sa_entry)
{ {
struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule; struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule;
struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry); struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
mlx5_del_flow_rules(ipsec_rule->rule); mlx5_del_flow_rules(ipsec_rule->rule);
if (sa_entry->attrs.action == MLX5_ACCEL_ESP_ACTION_ENCRYPT) { if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT) {
tx_ft_put(priv); tx_ft_put(sa_entry->ipsec);
return; return;
} }
mlx5_modify_header_dealloc(mdev, ipsec_rule->set_modify_hdr); mlx5_modify_header_dealloc(mdev, ipsec_rule->modify_hdr);
rx_ft_put(priv, rx_ft_put(mdev, sa_entry->ipsec, sa_entry->attrs.family);
sa_entry->attrs.is_ipv6 ? ACCEL_FS_ESP6 : ACCEL_FS_ESP4);
} }
void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec) void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec)
{ {
struct mlx5e_accel_fs_esp_prot *fs_prot; if (!ipsec->tx)
struct mlx5e_accel_fs_esp *accel_esp;
enum accel_fs_esp_type i;
if (!ipsec->rx_fs)
return; return;
mutex_destroy(&ipsec->tx_fs->mutex); mutex_destroy(&ipsec->tx->ft.mutex);
WARN_ON(ipsec->tx_fs->refcnt); WARN_ON(ipsec->tx->ft.refcnt);
kfree(ipsec->tx_fs); kfree(ipsec->tx);
accel_esp = ipsec->rx_fs; mutex_destroy(&ipsec->rx_ipv4->ft.mutex);
for (i = 0; i < ACCEL_FS_ESP_NUM_TYPES; i++) { WARN_ON(ipsec->rx_ipv4->ft.refcnt);
fs_prot = &accel_esp->fs_prot[i]; kfree(ipsec->rx_ipv4);
mutex_destroy(&fs_prot->prot_mutex);
WARN_ON(fs_prot->refcnt); mutex_destroy(&ipsec->rx_ipv6->ft.mutex);
} WARN_ON(ipsec->rx_ipv6->ft.refcnt);
kfree(ipsec->rx_fs); kfree(ipsec->rx_ipv6);
} }
int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec) int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec)
{ {
struct mlx5e_accel_fs_esp_prot *fs_prot;
struct mlx5e_accel_fs_esp *accel_esp;
struct mlx5_flow_namespace *ns; struct mlx5_flow_namespace *ns;
enum accel_fs_esp_type i;
int err = -ENOMEM; int err = -ENOMEM;
ns = mlx5_get_flow_namespace(ipsec->mdev, ns = mlx5_get_flow_namespace(ipsec->mdev,
...@@ -581,26 +600,28 @@ int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec) ...@@ -581,26 +600,28 @@ int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec)
if (!ns) if (!ns)
return -EOPNOTSUPP; return -EOPNOTSUPP;
ipsec->tx_fs = kzalloc(sizeof(*ipsec->tx_fs), GFP_KERNEL); ipsec->tx = kzalloc(sizeof(*ipsec->tx), GFP_KERNEL);
if (!ipsec->tx_fs) if (!ipsec->tx)
return -ENOMEM; return -ENOMEM;
ipsec->rx_fs = kzalloc(sizeof(*ipsec->rx_fs), GFP_KERNEL); ipsec->rx_ipv4 = kzalloc(sizeof(*ipsec->rx_ipv4), GFP_KERNEL);
if (!ipsec->rx_fs) if (!ipsec->rx_ipv4)
goto err_rx; goto err_rx_ipv4;
mutex_init(&ipsec->tx_fs->mutex); ipsec->rx_ipv6 = kzalloc(sizeof(*ipsec->rx_ipv6), GFP_KERNEL);
ipsec->tx_fs->ns = ns; if (!ipsec->rx_ipv6)
goto err_rx_ipv6;
accel_esp = ipsec->rx_fs; mutex_init(&ipsec->tx->ft.mutex);
for (i = 0; i < ACCEL_FS_ESP_NUM_TYPES; i++) { mutex_init(&ipsec->rx_ipv4->ft.mutex);
fs_prot = &accel_esp->fs_prot[i]; mutex_init(&ipsec->rx_ipv6->ft.mutex);
mutex_init(&fs_prot->prot_mutex); ipsec->tx->ns = ns;
}
return 0; return 0;
err_rx: err_rx_ipv6:
kfree(ipsec->tx_fs); kfree(ipsec->rx_ipv4);
err_rx_ipv4:
kfree(ipsec->tx);
return err; return err;
} }
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
/* Copyright (c) 2017, Mellanox Technologies inc. All rights reserved. */ /* Copyright (c) 2017, Mellanox Technologies inc. All rights reserved. */
#include "mlx5_core.h" #include "mlx5_core.h"
#include "en.h"
#include "ipsec.h" #include "ipsec.h"
#include "lib/mlx5.h" #include "lib/mlx5.h"
...@@ -31,6 +32,12 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev) ...@@ -31,6 +32,12 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
MLX5_CAP_ETH(mdev, insert_trailer) && MLX5_CAP_ETH(mdev, swp)) MLX5_CAP_ETH(mdev, insert_trailer) && MLX5_CAP_ETH(mdev, swp))
caps |= MLX5_IPSEC_CAP_CRYPTO; caps |= MLX5_IPSEC_CAP_CRYPTO;
if (MLX5_CAP_IPSEC(mdev, ipsec_full_offload) &&
MLX5_CAP_FLOWTABLE_NIC_TX(mdev, reformat_add_esp_trasport) &&
MLX5_CAP_FLOWTABLE_NIC_RX(mdev, reformat_del_esp_trasport) &&
MLX5_CAP_FLOWTABLE_NIC_RX(mdev, decap))
caps |= MLX5_IPSEC_CAP_PACKET_OFFLOAD;
if (!caps) if (!caps)
return 0; return 0;
...@@ -46,6 +53,38 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev) ...@@ -46,6 +53,38 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
} }
EXPORT_SYMBOL_GPL(mlx5_ipsec_device_caps); EXPORT_SYMBOL_GPL(mlx5_ipsec_device_caps);
static void mlx5e_ipsec_packet_setup(void *obj, u32 pdn,
struct mlx5_accel_esp_xfrm_attrs *attrs)
{
void *aso_ctx;
aso_ctx = MLX5_ADDR_OF(ipsec_obj, obj, ipsec_aso);
if (attrs->esn_trigger) {
MLX5_SET(ipsec_aso, aso_ctx, esn_event_arm, 1);
if (attrs->dir == XFRM_DEV_OFFLOAD_IN) {
MLX5_SET(ipsec_aso, aso_ctx, window_sz,
attrs->replay_window / 64);
MLX5_SET(ipsec_aso, aso_ctx, mode,
MLX5_IPSEC_ASO_REPLAY_PROTECTION);
}
}
/* ASO context */
MLX5_SET(ipsec_obj, obj, ipsec_aso_access_pd, pdn);
MLX5_SET(ipsec_obj, obj, full_offload, 1);
MLX5_SET(ipsec_aso, aso_ctx, valid, 1);
/* MLX5_IPSEC_ASO_REG_C_4_5 is type C register that is used
* in flow steering to perform matching against. Please be
* aware that this register was chosen arbitrary and can't
* be used in other places as long as IPsec packet offload
* active.
*/
MLX5_SET(ipsec_obj, obj, aso_return_reg, MLX5_IPSEC_ASO_REG_C_4_5);
if (attrs->dir == XFRM_DEV_OFFLOAD_OUT)
MLX5_SET(ipsec_aso, aso_ctx, mode, MLX5_IPSEC_ASO_INC_SN);
}
static int mlx5_create_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry) static int mlx5_create_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry)
{ {
struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs; struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
...@@ -54,6 +93,7 @@ static int mlx5_create_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry) ...@@ -54,6 +93,7 @@ static int mlx5_create_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry)
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)]; u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
u32 in[MLX5_ST_SZ_DW(create_ipsec_obj_in)] = {}; u32 in[MLX5_ST_SZ_DW(create_ipsec_obj_in)] = {};
void *obj, *salt_p, *salt_iv_p; void *obj, *salt_p, *salt_iv_p;
struct mlx5e_hw_objs *res;
int err; int err;
obj = MLX5_ADDR_OF(create_ipsec_obj_in, in, ipsec_object); obj = MLX5_ADDR_OF(create_ipsec_obj_in, in, ipsec_object);
...@@ -66,11 +106,10 @@ static int mlx5_create_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry) ...@@ -66,11 +106,10 @@ static int mlx5_create_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry)
salt_iv_p = MLX5_ADDR_OF(ipsec_obj, obj, implicit_iv); salt_iv_p = MLX5_ADDR_OF(ipsec_obj, obj, implicit_iv);
memcpy(salt_iv_p, &aes_gcm->seq_iv, sizeof(aes_gcm->seq_iv)); memcpy(salt_iv_p, &aes_gcm->seq_iv, sizeof(aes_gcm->seq_iv));
/* esn */ /* esn */
if (attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED) { if (attrs->esn_trigger) {
MLX5_SET(ipsec_obj, obj, esn_en, 1); MLX5_SET(ipsec_obj, obj, esn_en, 1);
MLX5_SET(ipsec_obj, obj, esn_msb, attrs->esn); MLX5_SET(ipsec_obj, obj, esn_msb, attrs->esn);
if (attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP) MLX5_SET(ipsec_obj, obj, esn_overlap, attrs->esn_overlap);
MLX5_SET(ipsec_obj, obj, esn_overlap, 1);
} }
MLX5_SET(ipsec_obj, obj, dekn, sa_entry->enc_key_id); MLX5_SET(ipsec_obj, obj, dekn, sa_entry->enc_key_id);
...@@ -81,6 +120,10 @@ static int mlx5_create_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry) ...@@ -81,6 +120,10 @@ static int mlx5_create_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry)
MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
MLX5_GENERAL_OBJECT_TYPES_IPSEC); MLX5_GENERAL_OBJECT_TYPES_IPSEC);
res = &mdev->mlx5e_res.hw_objs;
if (attrs->type == XFRM_DEV_OFFLOAD_PACKET)
mlx5e_ipsec_packet_setup(obj, res->pdn, attrs);
err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
if (!err) if (!err)
sa_entry->ipsec_obj_id = sa_entry->ipsec_obj_id =
...@@ -152,7 +195,7 @@ static int mlx5_modify_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry, ...@@ -152,7 +195,7 @@ static int mlx5_modify_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry,
void *obj; void *obj;
int err; int err;
if (!(attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED)) if (!attrs->esn_trigger)
return 0; return 0;
general_obj_types = MLX5_CAP_GEN_64(mdev, general_obj_types); general_obj_types = MLX5_CAP_GEN_64(mdev, general_obj_types);
...@@ -183,8 +226,7 @@ static int mlx5_modify_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry, ...@@ -183,8 +226,7 @@ static int mlx5_modify_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry,
MLX5_MODIFY_IPSEC_BITMASK_ESN_OVERLAP | MLX5_MODIFY_IPSEC_BITMASK_ESN_OVERLAP |
MLX5_MODIFY_IPSEC_BITMASK_ESN_MSB); MLX5_MODIFY_IPSEC_BITMASK_ESN_MSB);
MLX5_SET(ipsec_obj, obj, esn_msb, attrs->esn); MLX5_SET(ipsec_obj, obj, esn_msb, attrs->esn);
if (attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP) MLX5_SET(ipsec_obj, obj, esn_overlap, attrs->esn_overlap);
MLX5_SET(ipsec_obj, obj, esn_overlap, 1);
/* general object fields set */ /* general object fields set */
MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT); MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
...@@ -203,3 +245,56 @@ void mlx5_accel_esp_modify_xfrm(struct mlx5e_ipsec_sa_entry *sa_entry, ...@@ -203,3 +245,56 @@ void mlx5_accel_esp_modify_xfrm(struct mlx5e_ipsec_sa_entry *sa_entry,
memcpy(&sa_entry->attrs, attrs, sizeof(sa_entry->attrs)); memcpy(&sa_entry->attrs, attrs, sizeof(sa_entry->attrs));
} }
int mlx5e_ipsec_aso_init(struct mlx5e_ipsec *ipsec)
{
struct mlx5_core_dev *mdev = ipsec->mdev;
struct mlx5e_ipsec_aso *aso;
struct mlx5e_hw_objs *res;
struct device *pdev;
int err;
aso = kzalloc(sizeof(*ipsec->aso), GFP_KERNEL);
if (!aso)
return -ENOMEM;
res = &mdev->mlx5e_res.hw_objs;
pdev = mlx5_core_dma_dev(mdev);
aso->dma_addr = dma_map_single(pdev, aso->ctx, sizeof(aso->ctx),
DMA_BIDIRECTIONAL);
err = dma_mapping_error(pdev, aso->dma_addr);
if (err)
goto err_dma;
aso->aso = mlx5_aso_create(mdev, res->pdn);
if (IS_ERR(aso->aso)) {
err = PTR_ERR(aso->aso);
goto err_aso_create;
}
ipsec->aso = aso;
return 0;
err_aso_create:
dma_unmap_single(pdev, aso->dma_addr, sizeof(aso->ctx),
DMA_BIDIRECTIONAL);
err_dma:
kfree(aso);
return err;
}
void mlx5e_ipsec_aso_cleanup(struct mlx5e_ipsec *ipsec)
{
struct mlx5_core_dev *mdev = ipsec->mdev;
struct mlx5e_ipsec_aso *aso;
struct device *pdev;
aso = ipsec->aso;
pdev = mlx5_core_dma_dev(mdev);
mlx5_aso_destroy(aso->aso);
dma_unmap_single(pdev, aso->dma_addr, sizeof(aso->ctx),
DMA_BIDIRECTIONAL);
kfree(aso);
}
...@@ -353,12 +353,15 @@ void mlx5_aso_build_wqe(struct mlx5_aso *aso, u8 ds_cnt, ...@@ -353,12 +353,15 @@ void mlx5_aso_build_wqe(struct mlx5_aso *aso, u8 ds_cnt,
cseg->general_id = cpu_to_be32(obj_id); cseg->general_id = cpu_to_be32(obj_id);
} }
void *mlx5_aso_get_wqe(struct mlx5_aso *aso) struct mlx5_aso_wqe *mlx5_aso_get_wqe(struct mlx5_aso *aso)
{ {
struct mlx5_aso_wqe *wqe;
u16 pi; u16 pi;
pi = mlx5_wq_cyc_ctr2ix(&aso->wq, aso->pc); pi = mlx5_wq_cyc_ctr2ix(&aso->wq, aso->pc);
return mlx5_wq_cyc_get_wqe(&aso->wq, pi); wqe = mlx5_wq_cyc_get_wqe(&aso->wq, pi);
memset(wqe, 0, sizeof(*wqe));
return wqe;
} }
void mlx5_aso_post_wqe(struct mlx5_aso *aso, bool with_data, void mlx5_aso_post_wqe(struct mlx5_aso *aso, bool with_data,
......
...@@ -71,13 +71,14 @@ enum { ...@@ -71,13 +71,14 @@ enum {
}; };
enum { enum {
MLX5_ACCESS_ASO_OPC_MOD_IPSEC = 0x0,
MLX5_ACCESS_ASO_OPC_MOD_FLOW_METER = 0x2, MLX5_ACCESS_ASO_OPC_MOD_FLOW_METER = 0x2,
MLX5_ACCESS_ASO_OPC_MOD_MACSEC = 0x5, MLX5_ACCESS_ASO_OPC_MOD_MACSEC = 0x5,
}; };
struct mlx5_aso; struct mlx5_aso;
void *mlx5_aso_get_wqe(struct mlx5_aso *aso); struct mlx5_aso_wqe *mlx5_aso_get_wqe(struct mlx5_aso *aso);
void mlx5_aso_build_wqe(struct mlx5_aso *aso, u8 ds_cnt, void mlx5_aso_build_wqe(struct mlx5_aso *aso, u8 ds_cnt,
struct mlx5_aso_wqe *aso_wqe, struct mlx5_aso_wqe *aso_wqe,
u32 obj_id, u32 opc_mode); u32 obj_id, u32 opc_mode);
......
...@@ -445,7 +445,10 @@ struct mlx5_ifc_flow_table_prop_layout_bits { ...@@ -445,7 +445,10 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
u8 max_modify_header_actions[0x8]; u8 max_modify_header_actions[0x8];
u8 max_ft_level[0x8]; u8 max_ft_level[0x8];
u8 reserved_at_40[0x6]; u8 reformat_add_esp_trasport[0x1];
u8 reserved_at_41[0x2];
u8 reformat_del_esp_trasport[0x1];
u8 reserved_at_44[0x2];
u8 execute_aso[0x1]; u8 execute_aso[0x1];
u8 reserved_at_47[0x19]; u8 reserved_at_47[0x19];
...@@ -638,8 +641,10 @@ struct mlx5_ifc_fte_match_set_misc2_bits { ...@@ -638,8 +641,10 @@ struct mlx5_ifc_fte_match_set_misc2_bits {
u8 reserved_at_1a0[0x8]; u8 reserved_at_1a0[0x8];
u8 macsec_syndrome[0x8]; u8 macsec_syndrome[0x8];
u8 ipsec_syndrome[0x8];
u8 reserved_at_1b8[0x8];
u8 reserved_at_1b0[0x50]; u8 reserved_at_1c0[0x40];
}; };
struct mlx5_ifc_fte_match_set_misc3_bits { struct mlx5_ifc_fte_match_set_misc3_bits {
...@@ -6384,6 +6389,9 @@ enum mlx5_reformat_ctx_type { ...@@ -6384,6 +6389,9 @@ enum mlx5_reformat_ctx_type {
MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL = 0x2, MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL = 0x2,
MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2 = 0x3, MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2 = 0x3,
MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL = 0x4, MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL = 0x4,
MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV4 = 0x5,
MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT = 0x8,
MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV6 = 0xb,
MLX5_REFORMAT_TYPE_INSERT_HDR = 0xf, MLX5_REFORMAT_TYPE_INSERT_HDR = 0xf,
MLX5_REFORMAT_TYPE_REMOVE_HDR = 0x10, MLX5_REFORMAT_TYPE_REMOVE_HDR = 0x10,
MLX5_REFORMAT_TYPE_ADD_MACSEC = 0x11, MLX5_REFORMAT_TYPE_ADD_MACSEC = 0x11,
...@@ -11563,6 +11571,41 @@ enum { ...@@ -11563,6 +11571,41 @@ enum {
MLX5_IPSEC_OBJECT_ICV_LEN_16B, MLX5_IPSEC_OBJECT_ICV_LEN_16B,
}; };
enum {
MLX5_IPSEC_ASO_REG_C_0_1 = 0x0,
MLX5_IPSEC_ASO_REG_C_2_3 = 0x1,
MLX5_IPSEC_ASO_REG_C_4_5 = 0x2,
MLX5_IPSEC_ASO_REG_C_6_7 = 0x3,
};
enum {
MLX5_IPSEC_ASO_MODE = 0x0,
MLX5_IPSEC_ASO_REPLAY_PROTECTION = 0x1,
MLX5_IPSEC_ASO_INC_SN = 0x2,
};
struct mlx5_ifc_ipsec_aso_bits {
u8 valid[0x1];
u8 reserved_at_201[0x1];
u8 mode[0x2];
u8 window_sz[0x2];
u8 soft_lft_arm[0x1];
u8 hard_lft_arm[0x1];
u8 remove_flow_enable[0x1];
u8 esn_event_arm[0x1];
u8 reserved_at_20a[0x16];
u8 remove_flow_pkt_cnt[0x20];
u8 remove_flow_soft_lft[0x20];
u8 reserved_at_260[0x80];
u8 mode_parameter[0x20];
u8 replay_protection_window[0x100];
};
struct mlx5_ifc_ipsec_obj_bits { struct mlx5_ifc_ipsec_obj_bits {
u8 modify_field_select[0x40]; u8 modify_field_select[0x40];
u8 full_offload[0x1]; u8 full_offload[0x1];
...@@ -11584,7 +11627,11 @@ struct mlx5_ifc_ipsec_obj_bits { ...@@ -11584,7 +11627,11 @@ struct mlx5_ifc_ipsec_obj_bits {
u8 implicit_iv[0x40]; u8 implicit_iv[0x40];
u8 reserved_at_100[0x700]; u8 reserved_at_100[0x8];
u8 ipsec_aso_access_pd[0x18];
u8 reserved_at_120[0xe0];
struct mlx5_ifc_ipsec_aso_bits ipsec_aso;
}; };
struct mlx5_ifc_create_ipsec_obj_in_bits { struct mlx5_ifc_create_ipsec_obj_in_bits {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment