Commit 50172733 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge tag 'mlx5-updates-2020-10-12' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2020-10-12

Updates to mlx5 driver:
- Cleanup fix of uininitialized pointer read
- xfrm IPSec TX offload
====================
Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents d5e6f064 5be01904
...@@ -236,6 +236,7 @@ struct mlx5e_accel_fs_tcp; ...@@ -236,6 +236,7 @@ struct mlx5e_accel_fs_tcp;
struct mlx5e_flow_steering { struct mlx5e_flow_steering {
struct mlx5_flow_namespace *ns; struct mlx5_flow_namespace *ns;
struct mlx5_flow_namespace *egress_ns;
#ifdef CONFIG_MLX5_EN_RXNFC #ifdef CONFIG_MLX5_EN_RXNFC
struct mlx5e_ethtool_steering ethtool; struct mlx5e_ethtool_steering ethtool;
#endif #endif
......
...@@ -107,6 +107,9 @@ struct mlx5e_accel_tx_state { ...@@ -107,6 +107,9 @@ struct mlx5e_accel_tx_state {
#ifdef CONFIG_MLX5_EN_TLS #ifdef CONFIG_MLX5_EN_TLS
struct mlx5e_accel_tx_tls_state tls; struct mlx5e_accel_tx_tls_state tls;
#endif #endif
#ifdef CONFIG_MLX5_EN_IPSEC
struct mlx5e_accel_tx_ipsec_state ipsec;
#endif
}; };
static inline bool mlx5e_accel_tx_begin(struct net_device *dev, static inline bool mlx5e_accel_tx_begin(struct net_device *dev,
...@@ -125,22 +128,46 @@ static inline bool mlx5e_accel_tx_begin(struct net_device *dev, ...@@ -125,22 +128,46 @@ static inline bool mlx5e_accel_tx_begin(struct net_device *dev,
} }
#endif #endif
#ifdef CONFIG_MLX5_EN_IPSEC
if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state) && xfrm_offload(skb)) {
if (unlikely(!mlx5e_ipsec_handle_tx_skb(dev, skb, &state->ipsec)))
return false;
}
#endif
return true; return true;
} }
static inline bool mlx5e_accel_tx_is_ipsec_flow(struct mlx5e_accel_tx_state *state)
{
#ifdef CONFIG_MLX5_EN_IPSEC
return mlx5e_ipsec_is_tx_flow(&state->ipsec);
#endif
return false;
}
static inline unsigned int mlx5e_accel_tx_ids_len(struct mlx5e_txqsq *sq,
struct mlx5e_accel_tx_state *state)
{
#ifdef CONFIG_MLX5_EN_IPSEC
if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state))
return mlx5e_ipsec_tx_ids_len(&state->ipsec);
#endif
return 0;
}
/* Part of the eseg touched by TX offloads */ /* Part of the eseg touched by TX offloads */
#define MLX5E_ACCEL_ESEG_LEN offsetof(struct mlx5_wqe_eth_seg, mss) #define MLX5E_ACCEL_ESEG_LEN offsetof(struct mlx5_wqe_eth_seg, mss)
static inline bool mlx5e_accel_tx_eseg(struct mlx5e_priv *priv, static inline bool mlx5e_accel_tx_eseg(struct mlx5e_priv *priv,
struct mlx5e_txqsq *sq,
struct sk_buff *skb, struct sk_buff *skb,
struct mlx5_wqe_eth_seg *eseg) struct mlx5_wqe_eth_seg *eseg)
{ {
#ifdef CONFIG_MLX5_EN_IPSEC #ifdef CONFIG_MLX5_EN_IPSEC
if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state)) { if (xfrm_offload(skb))
if (unlikely(!mlx5e_ipsec_handle_tx_skb(priv, eseg, skb))) mlx5e_ipsec_tx_build_eseg(priv, skb, eseg);
return false;
}
#endif #endif
#if IS_ENABLED(CONFIG_GENEVE) #if IS_ENABLED(CONFIG_GENEVE)
...@@ -153,11 +180,18 @@ static inline bool mlx5e_accel_tx_eseg(struct mlx5e_priv *priv, ...@@ -153,11 +180,18 @@ static inline bool mlx5e_accel_tx_eseg(struct mlx5e_priv *priv,
static inline void mlx5e_accel_tx_finish(struct mlx5e_txqsq *sq, static inline void mlx5e_accel_tx_finish(struct mlx5e_txqsq *sq,
struct mlx5e_tx_wqe *wqe, struct mlx5e_tx_wqe *wqe,
struct mlx5e_accel_tx_state *state) struct mlx5e_accel_tx_state *state,
struct mlx5_wqe_inline_seg *inlseg)
{ {
#ifdef CONFIG_MLX5_EN_TLS #ifdef CONFIG_MLX5_EN_TLS
mlx5e_tls_handle_tx_wqe(sq, &wqe->ctrl, &state->tls); mlx5e_tls_handle_tx_wqe(sq, &wqe->ctrl, &state->tls);
#endif #endif
#ifdef CONFIG_MLX5_EN_IPSEC
if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state) &&
state->ipsec.xo && state->ipsec.tailen)
mlx5e_ipsec_handle_tx_wqe(wqe, &state->ipsec, inlseg);
#endif
} }
static inline int mlx5e_accel_init_rx(struct mlx5e_priv *priv) static inline int mlx5e_accel_init_rx(struct mlx5e_priv *priv)
......
...@@ -560,6 +560,9 @@ void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv) ...@@ -560,6 +560,9 @@ void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
return; return;
} }
if (mlx5_is_ipsec_device(mdev))
netdev->gso_partial_features |= NETIF_F_GSO_ESP;
mlx5_core_dbg(mdev, "mlx5e: ESP GSO capability turned on\n"); mlx5_core_dbg(mdev, "mlx5e: ESP GSO capability turned on\n");
netdev->features |= NETIF_F_GSO_ESP; netdev->features |= NETIF_F_GSO_ESP;
netdev->hw_features |= NETIF_F_GSO_ESP; netdev->hw_features |= NETIF_F_GSO_ESP;
......
...@@ -76,6 +76,7 @@ struct mlx5e_ipsec_stats { ...@@ -76,6 +76,7 @@ struct mlx5e_ipsec_stats {
}; };
struct mlx5e_accel_fs_esp; struct mlx5e_accel_fs_esp;
struct mlx5e_ipsec_tx;
struct mlx5e_ipsec { struct mlx5e_ipsec {
struct mlx5e_priv *en_priv; struct mlx5e_priv *en_priv;
...@@ -87,6 +88,7 @@ struct mlx5e_ipsec { ...@@ -87,6 +88,7 @@ struct mlx5e_ipsec {
struct mlx5e_ipsec_stats stats; struct mlx5e_ipsec_stats stats;
struct workqueue_struct *wq; struct workqueue_struct *wq;
struct mlx5e_accel_fs_esp *rx_fs; struct mlx5e_accel_fs_esp *rx_fs;
struct mlx5e_ipsec_tx *tx_fs;
}; };
struct mlx5e_ipsec_esn_state { struct mlx5e_ipsec_esn_state {
......
...@@ -34,6 +34,12 @@ struct mlx5e_accel_fs_esp { ...@@ -34,6 +34,12 @@ struct mlx5e_accel_fs_esp {
struct mlx5e_accel_fs_esp_prot fs_prot[ACCEL_FS_ESP_NUM_TYPES]; struct mlx5e_accel_fs_esp_prot fs_prot[ACCEL_FS_ESP_NUM_TYPES];
}; };
struct mlx5e_ipsec_tx {
struct mlx5_flow_table *ft;
struct mutex mutex; /* Protect IPsec TX steering */
u32 refcnt;
};
/* IPsec RX flow steering */ /* IPsec RX flow steering */
static enum mlx5e_traffic_types fs_esp2tt(enum accel_fs_esp_type i) static enum mlx5e_traffic_types fs_esp2tt(enum accel_fs_esp_type i)
{ {
...@@ -323,6 +329,77 @@ static void rx_ft_put(struct mlx5e_priv *priv, enum accel_fs_esp_type type) ...@@ -323,6 +329,77 @@ static void rx_ft_put(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
mutex_unlock(&fs_prot->prot_mutex); mutex_unlock(&fs_prot->prot_mutex);
} }
/* IPsec TX flow steering */
static int tx_create(struct mlx5e_priv *priv)
{
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5e_ipsec *ipsec = priv->ipsec;
struct mlx5_flow_table *ft;
int err;
priv->fs.egress_ns =
mlx5_get_flow_namespace(priv->mdev,
MLX5_FLOW_NAMESPACE_EGRESS_KERNEL);
if (!priv->fs.egress_ns)
return -EOPNOTSUPP;
ft_attr.max_fte = NUM_IPSEC_FTE;
ft_attr.autogroup.max_num_groups = 1;
ft = mlx5_create_auto_grouped_flow_table(priv->fs.egress_ns, &ft_attr);
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
netdev_err(priv->netdev, "fail to create ipsec tx ft err=%d\n", err);
return err;
}
ipsec->tx_fs->ft = ft;
return 0;
}
static void tx_destroy(struct mlx5e_priv *priv)
{
struct mlx5e_ipsec *ipsec = priv->ipsec;
if (IS_ERR_OR_NULL(ipsec->tx_fs->ft))
return;
mlx5_destroy_flow_table(ipsec->tx_fs->ft);
ipsec->tx_fs->ft = NULL;
}
static int tx_ft_get(struct mlx5e_priv *priv)
{
struct mlx5e_ipsec_tx *tx_fs = priv->ipsec->tx_fs;
int err = 0;
mutex_lock(&tx_fs->mutex);
if (tx_fs->refcnt++)
goto out;
err = tx_create(priv);
if (err) {
tx_fs->refcnt--;
goto out;
}
out:
mutex_unlock(&tx_fs->mutex);
return err;
}
static void tx_ft_put(struct mlx5e_priv *priv)
{
struct mlx5e_ipsec_tx *tx_fs = priv->ipsec->tx_fs;
mutex_lock(&tx_fs->mutex);
if (--tx_fs->refcnt)
goto out;
tx_destroy(priv);
out:
mutex_unlock(&tx_fs->mutex);
}
static void setup_fte_common(struct mlx5_accel_esp_xfrm_attrs *attrs, static void setup_fte_common(struct mlx5_accel_esp_xfrm_attrs *attrs,
u32 ipsec_obj_id, u32 ipsec_obj_id,
struct mlx5_flow_spec *spec, struct mlx5_flow_spec *spec,
...@@ -457,6 +534,54 @@ static int rx_add_rule(struct mlx5e_priv *priv, ...@@ -457,6 +534,54 @@ static int rx_add_rule(struct mlx5e_priv *priv,
return err; return err;
} }
static int tx_add_rule(struct mlx5e_priv *priv,
struct mlx5_accel_esp_xfrm_attrs *attrs,
u32 ipsec_obj_id,
struct mlx5e_ipsec_rule *ipsec_rule)
{
struct mlx5_flow_act flow_act = {};
struct mlx5_flow_handle *rule;
struct mlx5_flow_spec *spec;
int err = 0;
err = tx_ft_get(priv);
if (err)
return err;
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec) {
err = -ENOMEM;
goto out;
}
setup_fte_common(attrs, ipsec_obj_id, spec, &flow_act);
/* Add IPsec indicator in metadata_reg_a */
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
MLX5_SET(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_a,
MLX5_ETH_WQE_FT_META_IPSEC);
MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_a,
MLX5_ETH_WQE_FT_META_IPSEC);
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW |
MLX5_FLOW_CONTEXT_ACTION_IPSEC_ENCRYPT;
rule = mlx5_add_flow_rules(priv->ipsec->tx_fs->ft, spec, &flow_act, NULL, 0);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
netdev_err(priv->netdev, "fail to add ipsec rule attrs->action=0x%x, err=%d\n",
attrs->action, err);
goto out;
}
ipsec_rule->rule = rule;
out:
kvfree(spec);
if (err)
tx_ft_put(priv);
return err;
}
static void rx_del_rule(struct mlx5e_priv *priv, static void rx_del_rule(struct mlx5e_priv *priv,
struct mlx5_accel_esp_xfrm_attrs *attrs, struct mlx5_accel_esp_xfrm_attrs *attrs,
struct mlx5e_ipsec_rule *ipsec_rule) struct mlx5e_ipsec_rule *ipsec_rule)
...@@ -470,15 +595,27 @@ static void rx_del_rule(struct mlx5e_priv *priv, ...@@ -470,15 +595,27 @@ static void rx_del_rule(struct mlx5e_priv *priv,
rx_ft_put(priv, attrs->is_ipv6 ? ACCEL_FS_ESP6 : ACCEL_FS_ESP4); rx_ft_put(priv, attrs->is_ipv6 ? ACCEL_FS_ESP6 : ACCEL_FS_ESP4);
} }
static void tx_del_rule(struct mlx5e_priv *priv,
struct mlx5e_ipsec_rule *ipsec_rule)
{
mlx5_del_flow_rules(ipsec_rule->rule);
ipsec_rule->rule = NULL;
tx_ft_put(priv);
}
int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_priv *priv, int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_priv *priv,
struct mlx5_accel_esp_xfrm_attrs *attrs, struct mlx5_accel_esp_xfrm_attrs *attrs,
u32 ipsec_obj_id, u32 ipsec_obj_id,
struct mlx5e_ipsec_rule *ipsec_rule) struct mlx5e_ipsec_rule *ipsec_rule)
{ {
if (!priv->ipsec->rx_fs || attrs->action != MLX5_ACCEL_ESP_ACTION_DECRYPT) if (!priv->ipsec->rx_fs)
return -EOPNOTSUPP; return -EOPNOTSUPP;
return rx_add_rule(priv, attrs, ipsec_obj_id, ipsec_rule); if (attrs->action == MLX5_ACCEL_ESP_ACTION_DECRYPT)
return rx_add_rule(priv, attrs, ipsec_obj_id, ipsec_rule);
else
return tx_add_rule(priv, attrs, ipsec_obj_id, ipsec_rule);
} }
void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_priv *priv, void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_priv *priv,
...@@ -488,7 +625,18 @@ void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_priv *priv, ...@@ -488,7 +625,18 @@ void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_priv *priv,
if (!priv->ipsec->rx_fs) if (!priv->ipsec->rx_fs)
return; return;
rx_del_rule(priv, attrs, ipsec_rule); if (attrs->action == MLX5_ACCEL_ESP_ACTION_DECRYPT)
rx_del_rule(priv, attrs, ipsec_rule);
else
tx_del_rule(priv, ipsec_rule);
}
static void fs_cleanup_tx(struct mlx5e_priv *priv)
{
mutex_destroy(&priv->ipsec->tx_fs->mutex);
WARN_ON(priv->ipsec->tx_fs->refcnt);
kfree(priv->ipsec->tx_fs);
priv->ipsec->tx_fs = NULL;
} }
static void fs_cleanup_rx(struct mlx5e_priv *priv) static void fs_cleanup_rx(struct mlx5e_priv *priv)
...@@ -507,6 +655,17 @@ static void fs_cleanup_rx(struct mlx5e_priv *priv) ...@@ -507,6 +655,17 @@ static void fs_cleanup_rx(struct mlx5e_priv *priv)
priv->ipsec->rx_fs = NULL; priv->ipsec->rx_fs = NULL;
} }
static int fs_init_tx(struct mlx5e_priv *priv)
{
priv->ipsec->tx_fs =
kzalloc(sizeof(struct mlx5e_ipsec_tx), GFP_KERNEL);
if (!priv->ipsec->tx_fs)
return -ENOMEM;
mutex_init(&priv->ipsec->tx_fs->mutex);
return 0;
}
static int fs_init_rx(struct mlx5e_priv *priv) static int fs_init_rx(struct mlx5e_priv *priv)
{ {
struct mlx5e_accel_fs_esp_prot *fs_prot; struct mlx5e_accel_fs_esp_prot *fs_prot;
...@@ -532,13 +691,24 @@ void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_priv *priv) ...@@ -532,13 +691,24 @@ void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_priv *priv)
if (!priv->ipsec->rx_fs) if (!priv->ipsec->rx_fs)
return; return;
fs_cleanup_tx(priv);
fs_cleanup_rx(priv); fs_cleanup_rx(priv);
} }
int mlx5e_accel_ipsec_fs_init(struct mlx5e_priv *priv) int mlx5e_accel_ipsec_fs_init(struct mlx5e_priv *priv)
{ {
int err;
if (!mlx5_is_ipsec_device(priv->mdev) || !priv->ipsec) if (!mlx5_is_ipsec_device(priv->mdev) || !priv->ipsec)
return -EOPNOTSUPP; return -EOPNOTSUPP;
return fs_init_rx(priv); err = fs_init_tx(priv);
if (err)
return err;
err = fs_init_rx(priv);
if (err)
fs_cleanup_tx(priv);
return err;
} }
...@@ -34,7 +34,7 @@ ...@@ -34,7 +34,7 @@
#include <crypto/aead.h> #include <crypto/aead.h>
#include <net/xfrm.h> #include <net/xfrm.h>
#include <net/esp.h> #include <net/esp.h>
#include "accel/ipsec_offload.h"
#include "en_accel/ipsec_rxtx.h" #include "en_accel/ipsec_rxtx.h"
#include "en_accel/ipsec.h" #include "en_accel/ipsec.h"
#include "accel/accel.h" #include "accel/accel.h"
...@@ -233,18 +233,94 @@ static void mlx5e_ipsec_set_metadata(struct sk_buff *skb, ...@@ -233,18 +233,94 @@ static void mlx5e_ipsec_set_metadata(struct sk_buff *skb,
ntohs(mdata->content.tx.seq)); ntohs(mdata->content.tx.seq));
} }
bool mlx5e_ipsec_handle_tx_skb(struct mlx5e_priv *priv, void mlx5e_ipsec_handle_tx_wqe(struct mlx5e_tx_wqe *wqe,
struct mlx5_wqe_eth_seg *eseg, struct mlx5e_accel_tx_ipsec_state *ipsec_st,
struct sk_buff *skb) struct mlx5_wqe_inline_seg *inlseg)
{
inlseg->byte_count = cpu_to_be32(ipsec_st->tailen | MLX5_INLINE_SEG);
esp_output_fill_trailer((u8 *)inlseg->data, 0, ipsec_st->plen, ipsec_st->xo->proto);
}
static int mlx5e_ipsec_set_state(struct mlx5e_priv *priv,
struct sk_buff *skb,
struct xfrm_state *x,
struct xfrm_offload *xo,
struct mlx5e_accel_tx_ipsec_state *ipsec_st)
{
unsigned int blksize, clen, alen, plen;
struct crypto_aead *aead;
unsigned int tailen;
ipsec_st->x = x;
ipsec_st->xo = xo;
if (mlx5_is_ipsec_device(priv->mdev)) {
aead = x->data;
alen = crypto_aead_authsize(aead);
blksize = ALIGN(crypto_aead_blocksize(aead), 4);
clen = ALIGN(skb->len + 2, blksize);
plen = max_t(u32, clen - skb->len, 4);
tailen = plen + alen;
ipsec_st->plen = plen;
ipsec_st->tailen = tailen;
}
return 0;
}
void mlx5e_ipsec_tx_build_eseg(struct mlx5e_priv *priv, struct sk_buff *skb,
struct mlx5_wqe_eth_seg *eseg)
{ {
struct xfrm_offload *xo = xfrm_offload(skb); struct xfrm_offload *xo = xfrm_offload(skb);
struct mlx5e_ipsec_metadata *mdata; struct xfrm_encap_tmpl *encap;
struct mlx5e_ipsec_sa_entry *sa_entry;
struct xfrm_state *x; struct xfrm_state *x;
struct sec_path *sp; struct sec_path *sp;
u8 l3_proto;
sp = skb_sec_path(skb);
if (unlikely(sp->len != 1))
return;
x = xfrm_input_state(skb);
if (unlikely(!x))
return;
if (unlikely(!x->xso.offload_handle ||
(skb->protocol != htons(ETH_P_IP) &&
skb->protocol != htons(ETH_P_IPV6))))
return;
mlx5e_ipsec_set_swp(skb, eseg, x->props.mode, xo);
if (!xo) l3_proto = (x->props.family == AF_INET) ?
return true; ((struct iphdr *)skb_network_header(skb))->protocol :
((struct ipv6hdr *)skb_network_header(skb))->nexthdr;
if (mlx5_is_ipsec_device(priv->mdev)) {
eseg->flow_table_metadata |= cpu_to_be32(MLX5_ETH_WQE_FT_META_IPSEC);
eseg->trailer |= cpu_to_be32(MLX5_ETH_WQE_INSERT_TRAILER);
encap = x->encap;
if (!encap) {
eseg->trailer |= (l3_proto == IPPROTO_ESP) ?
cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_OUTER_IP_ASSOC) :
cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_OUTER_L4_ASSOC);
} else if (encap->encap_type == UDP_ENCAP_ESPINUDP) {
eseg->trailer |= (l3_proto == IPPROTO_ESP) ?
cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_INNER_IP_ASSOC) :
cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_INNER_L4_ASSOC);
}
}
}
bool mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
struct sk_buff *skb,
struct mlx5e_accel_tx_ipsec_state *ipsec_st)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct xfrm_offload *xo = xfrm_offload(skb);
struct mlx5e_ipsec_sa_entry *sa_entry;
struct mlx5e_ipsec_metadata *mdata;
struct xfrm_state *x;
struct sec_path *sp;
sp = skb_sec_path(skb); sp = skb_sec_path(skb);
if (unlikely(sp->len != 1)) { if (unlikely(sp->len != 1)) {
...@@ -270,15 +346,21 @@ bool mlx5e_ipsec_handle_tx_skb(struct mlx5e_priv *priv, ...@@ -270,15 +346,21 @@ bool mlx5e_ipsec_handle_tx_skb(struct mlx5e_priv *priv,
atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_trailer); atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_trailer);
goto drop; goto drop;
} }
mdata = mlx5e_ipsec_add_metadata(skb);
if (IS_ERR(mdata)) { if (MLX5_CAP_GEN(priv->mdev, fpga)) {
atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_metadata); mdata = mlx5e_ipsec_add_metadata(skb);
goto drop; if (IS_ERR(mdata)) {
atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_metadata);
goto drop;
}
} }
mlx5e_ipsec_set_swp(skb, eseg, x->props.mode, xo);
sa_entry = (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle; sa_entry = (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle;
sa_entry->set_iv_op(skb, x, xo); sa_entry->set_iv_op(skb, x, xo);
mlx5e_ipsec_set_metadata(skb, mdata, xo); if (MLX5_CAP_GEN(priv->mdev, fpga))
mlx5e_ipsec_set_metadata(skb, mdata, xo);
mlx5e_ipsec_set_state(priv, skb, x, xo, ipsec_st);
return true; return true;
......
...@@ -43,6 +43,13 @@ ...@@ -43,6 +43,13 @@
#define MLX5_IPSEC_METADATA_SYNDROM_MASK (0x7F) #define MLX5_IPSEC_METADATA_SYNDROM_MASK (0x7F)
#define MLX5_IPSEC_METADATA_HANDLE(metadata) (((metadata) >> 8) & 0xFF) #define MLX5_IPSEC_METADATA_HANDLE(metadata) (((metadata) >> 8) & 0xFF)
struct mlx5e_accel_tx_ipsec_state {
struct xfrm_offload *xo;
struct xfrm_state *x;
u32 tailen;
u32 plen;
};
#ifdef CONFIG_MLX5_EN_IPSEC #ifdef CONFIG_MLX5_EN_IPSEC
struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev, struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev,
...@@ -55,16 +62,32 @@ void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x, ...@@ -55,16 +62,32 @@ void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x,
struct xfrm_offload *xo); struct xfrm_offload *xo);
void mlx5e_ipsec_set_iv(struct sk_buff *skb, struct xfrm_state *x, void mlx5e_ipsec_set_iv(struct sk_buff *skb, struct xfrm_state *x,
struct xfrm_offload *xo); struct xfrm_offload *xo);
bool mlx5e_ipsec_handle_tx_skb(struct mlx5e_priv *priv, bool mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
struct mlx5_wqe_eth_seg *eseg, struct sk_buff *skb,
struct sk_buff *skb); struct mlx5e_accel_tx_ipsec_state *ipsec_st);
void mlx5e_ipsec_handle_tx_wqe(struct mlx5e_tx_wqe *wqe,
struct mlx5e_accel_tx_ipsec_state *ipsec_st,
struct mlx5_wqe_inline_seg *inlseg);
void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev, void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
struct sk_buff *skb, struct sk_buff *skb,
struct mlx5_cqe64 *cqe); struct mlx5_cqe64 *cqe);
static inline unsigned int mlx5e_ipsec_tx_ids_len(struct mlx5e_accel_tx_ipsec_state *ipsec_st)
{
return ipsec_st->tailen;
}
static inline bool mlx5_ipsec_is_rx_flow(struct mlx5_cqe64 *cqe) static inline bool mlx5_ipsec_is_rx_flow(struct mlx5_cqe64 *cqe)
{ {
return !!(MLX5_IPSEC_METADATA_MARKER_MASK & be32_to_cpu(cqe->ft_metadata)); return !!(MLX5_IPSEC_METADATA_MARKER_MASK & be32_to_cpu(cqe->ft_metadata));
} }
static inline bool mlx5e_ipsec_is_tx_flow(struct mlx5e_accel_tx_ipsec_state *ipsec_st)
{
return ipsec_st->x;
}
void mlx5e_ipsec_tx_build_eseg(struct mlx5e_priv *priv, struct sk_buff *skb,
struct mlx5_wqe_eth_seg *eseg);
#else #else
static inline static inline
void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev, void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
......
...@@ -4569,7 +4569,6 @@ mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size, ...@@ -4569,7 +4569,6 @@ mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
err_free: err_free:
kfree(flow); kfree(flow);
kvfree(parse_attr); kvfree(parse_attr);
kfree(attr);
return err; return err;
} }
......
...@@ -144,9 +144,29 @@ static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs) ...@@ -144,9 +144,29 @@ static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs)
memcpy(&vhdr->h_vlan_encapsulated_proto, skb->data + cpy1_sz, cpy2_sz); memcpy(&vhdr->h_vlan_encapsulated_proto, skb->data + cpy1_sz, cpy2_sz);
} }
/* RM 2311217: no L4 inner checksum for IPsec tunnel type packet */
static void
ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5_wqe_eth_seg *eseg)
{
eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
if (skb->encapsulation) {
eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM;
sq->stats->csum_partial_inner++;
} else {
eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
sq->stats->csum_partial++;
}
}
static inline void static inline void
mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg) mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg)
{ {
if (unlikely(eseg->flow_table_metadata & cpu_to_be32(MLX5_ETH_WQE_FT_META_IPSEC))) {
ipsec_txwqe_build_eseg_csum(sq, skb, eseg);
return;
}
if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM; eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
if (skb->encapsulation) { if (skb->encapsulation) {
...@@ -237,12 +257,14 @@ struct mlx5e_tx_attr { ...@@ -237,12 +257,14 @@ struct mlx5e_tx_attr {
u16 headlen; u16 headlen;
u16 ihs; u16 ihs;
__be16 mss; __be16 mss;
u16 insz;
u8 opcode; u8 opcode;
}; };
struct mlx5e_tx_wqe_attr { struct mlx5e_tx_wqe_attr {
u16 ds_cnt; u16 ds_cnt;
u16 ds_cnt_inl; u16 ds_cnt_inl;
u16 ds_cnt_ids;
u8 num_wqebbs; u8 num_wqebbs;
}; };
...@@ -299,6 +321,7 @@ static void mlx5e_sq_xmit_prepare(struct mlx5e_txqsq *sq, struct sk_buff *skb, ...@@ -299,6 +321,7 @@ static void mlx5e_sq_xmit_prepare(struct mlx5e_txqsq *sq, struct sk_buff *skb,
stats->packets++; stats->packets++;
} }
attr->insz = mlx5e_accel_tx_ids_len(sq, accel);
stats->bytes += attr->num_bytes; stats->bytes += attr->num_bytes;
} }
...@@ -307,9 +330,13 @@ static void mlx5e_sq_calc_wqe_attr(struct sk_buff *skb, const struct mlx5e_tx_at ...@@ -307,9 +330,13 @@ static void mlx5e_sq_calc_wqe_attr(struct sk_buff *skb, const struct mlx5e_tx_at
{ {
u16 ds_cnt = MLX5E_TX_WQE_EMPTY_DS_COUNT; u16 ds_cnt = MLX5E_TX_WQE_EMPTY_DS_COUNT;
u16 ds_cnt_inl = 0; u16 ds_cnt_inl = 0;
u16 ds_cnt_ids = 0;
ds_cnt += !!attr->headlen + skb_shinfo(skb)->nr_frags; if (attr->insz)
ds_cnt_ids = DIV_ROUND_UP(sizeof(struct mlx5_wqe_inline_seg) + attr->insz,
MLX5_SEND_WQE_DS);
ds_cnt += !!attr->headlen + skb_shinfo(skb)->nr_frags + ds_cnt_ids;
if (attr->ihs) { if (attr->ihs) {
u16 inl = attr->ihs - INL_HDR_START_SZ; u16 inl = attr->ihs - INL_HDR_START_SZ;
...@@ -323,6 +350,7 @@ static void mlx5e_sq_calc_wqe_attr(struct sk_buff *skb, const struct mlx5e_tx_at ...@@ -323,6 +350,7 @@ static void mlx5e_sq_calc_wqe_attr(struct sk_buff *skb, const struct mlx5e_tx_at
*wqe_attr = (struct mlx5e_tx_wqe_attr) { *wqe_attr = (struct mlx5e_tx_wqe_attr) {
.ds_cnt = ds_cnt, .ds_cnt = ds_cnt,
.ds_cnt_inl = ds_cnt_inl, .ds_cnt_inl = ds_cnt_inl,
.ds_cnt_ids = ds_cnt_ids,
.num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS), .num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS),
}; };
} }
...@@ -398,11 +426,11 @@ mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb, ...@@ -398,11 +426,11 @@ mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
if (attr->ihs) { if (attr->ihs) {
if (skb_vlan_tag_present(skb)) { if (skb_vlan_tag_present(skb)) {
eseg->inline_hdr.sz = cpu_to_be16(attr->ihs + VLAN_HLEN); eseg->inline_hdr.sz |= cpu_to_be16(attr->ihs + VLAN_HLEN);
mlx5e_insert_vlan(eseg->inline_hdr.start, skb, attr->ihs); mlx5e_insert_vlan(eseg->inline_hdr.start, skb, attr->ihs);
stats->added_vlan_packets++; stats->added_vlan_packets++;
} else { } else {
eseg->inline_hdr.sz = cpu_to_be16(attr->ihs); eseg->inline_hdr.sz |= cpu_to_be16(attr->ihs);
memcpy(eseg->inline_hdr.start, skb->data, attr->ihs); memcpy(eseg->inline_hdr.start, skb->data, attr->ihs);
} }
dseg += wqe_attr->ds_cnt_inl; dseg += wqe_attr->ds_cnt_inl;
...@@ -414,6 +442,7 @@ mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb, ...@@ -414,6 +442,7 @@ mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
stats->added_vlan_packets++; stats->added_vlan_packets++;
} }
dseg += wqe_attr->ds_cnt_ids;
num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + attr->ihs, num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + attr->ihs,
attr->headlen, dseg); attr->headlen, dseg);
if (unlikely(num_dma < 0)) if (unlikely(num_dma < 0))
...@@ -430,7 +459,8 @@ mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb, ...@@ -430,7 +459,8 @@ mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
static bool mlx5e_tx_skb_supports_mpwqe(struct sk_buff *skb, struct mlx5e_tx_attr *attr) static bool mlx5e_tx_skb_supports_mpwqe(struct sk_buff *skb, struct mlx5e_tx_attr *attr)
{ {
return !skb_is_nonlinear(skb) && !skb_vlan_tag_present(skb) && !attr->ihs; return !skb_is_nonlinear(skb) && !skb_vlan_tag_present(skb) && !attr->ihs &&
!attr->insz;
} }
static bool mlx5e_tx_mpwqe_same_eseg(struct mlx5e_txqsq *sq, struct mlx5_wqe_eth_seg *eseg) static bool mlx5e_tx_mpwqe_same_eseg(struct mlx5e_txqsq *sq, struct mlx5_wqe_eth_seg *eseg)
...@@ -580,7 +610,7 @@ void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq) ...@@ -580,7 +610,7 @@ void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq)
static bool mlx5e_txwqe_build_eseg(struct mlx5e_priv *priv, struct mlx5e_txqsq *sq, static bool mlx5e_txwqe_build_eseg(struct mlx5e_priv *priv, struct mlx5e_txqsq *sq,
struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg) struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg)
{ {
if (unlikely(!mlx5e_accel_tx_eseg(priv, sq, skb, eseg))) if (unlikely(!mlx5e_accel_tx_eseg(priv, skb, eseg)))
return false; return false;
mlx5e_txwqe_build_eseg_csum(sq, skb, eseg); mlx5e_txwqe_build_eseg_csum(sq, skb, eseg);
...@@ -625,7 +655,8 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -625,7 +655,8 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
wqe = MLX5E_TX_FETCH_WQE(sq, pi); wqe = MLX5E_TX_FETCH_WQE(sq, pi);
/* May update the WQE, but may not post other WQEs. */ /* May update the WQE, but may not post other WQEs. */
mlx5e_accel_tx_finish(sq, wqe, &accel); mlx5e_accel_tx_finish(sq, wqe, &accel,
(struct mlx5_wqe_inline_seg *)(wqe->data + wqe_attr.ds_cnt_inl));
if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &wqe->eth))) if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &wqe->eth)))
return NETDEV_TX_OK; return NETDEV_TX_OK;
......
...@@ -776,6 +776,9 @@ static int mlx5_cmd_modify_header_alloc(struct mlx5_flow_root_namespace *ns, ...@@ -776,6 +776,9 @@ static int mlx5_cmd_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
table_type = FS_FT_NIC_RX; table_type = FS_FT_NIC_RX;
break; break;
case MLX5_FLOW_NAMESPACE_EGRESS: case MLX5_FLOW_NAMESPACE_EGRESS:
#ifdef CONFIG_MLX5_IPSEC
case MLX5_FLOW_NAMESPACE_EGRESS_KERNEL:
#endif
max_actions = MLX5_CAP_FLOWTABLE_NIC_TX(dev, max_modify_header_actions); max_actions = MLX5_CAP_FLOWTABLE_NIC_TX(dev, max_modify_header_actions);
table_type = FS_FT_NIC_TX; table_type = FS_FT_NIC_TX;
break; break;
......
...@@ -126,6 +126,10 @@ ...@@ -126,6 +126,10 @@
#define LAG_NUM_PRIOS 1 #define LAG_NUM_PRIOS 1
#define LAG_MIN_LEVEL (OFFLOADS_MIN_LEVEL + 1) #define LAG_MIN_LEVEL (OFFLOADS_MIN_LEVEL + 1)
#define KERNEL_TX_IPSEC_NUM_PRIOS 1
#define KERNEL_TX_IPSEC_NUM_LEVELS 1
#define KERNEL_TX_MIN_LEVEL (KERNEL_TX_IPSEC_NUM_LEVELS)
struct node_caps { struct node_caps {
size_t arr_sz; size_t arr_sz;
long *caps; long *caps;
...@@ -180,13 +184,24 @@ static struct init_tree_node { ...@@ -180,13 +184,24 @@ static struct init_tree_node {
static struct init_tree_node egress_root_fs = { static struct init_tree_node egress_root_fs = {
.type = FS_TYPE_NAMESPACE, .type = FS_TYPE_NAMESPACE,
#ifdef CONFIG_MLX5_IPSEC
.ar_size = 2,
#else
.ar_size = 1, .ar_size = 1,
#endif
.children = (struct init_tree_node[]) { .children = (struct init_tree_node[]) {
ADD_PRIO(0, MLX5_BY_PASS_NUM_PRIOS, 0, ADD_PRIO(0, MLX5_BY_PASS_NUM_PRIOS, 0,
FS_CHAINING_CAPS_EGRESS, FS_CHAINING_CAPS_EGRESS,
ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS, ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
BY_PASS_PRIO_NUM_LEVELS))), BY_PASS_PRIO_NUM_LEVELS))),
#ifdef CONFIG_MLX5_IPSEC
ADD_PRIO(0, KERNEL_TX_MIN_LEVEL, 0,
FS_CHAINING_CAPS_EGRESS,
ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
ADD_MULTIPLE_PRIO(KERNEL_TX_IPSEC_NUM_PRIOS,
KERNEL_TX_IPSEC_NUM_LEVELS))),
#endif
} }
}; };
...@@ -2165,8 +2180,10 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev, ...@@ -2165,8 +2180,10 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
break; break;
} }
if (type == MLX5_FLOW_NAMESPACE_EGRESS) { if (type == MLX5_FLOW_NAMESPACE_EGRESS ||
type == MLX5_FLOW_NAMESPACE_EGRESS_KERNEL) {
root_ns = steering->egress_root_ns; root_ns = steering->egress_root_ns;
prio = type - MLX5_FLOW_NAMESPACE_EGRESS;
} else if (type == MLX5_FLOW_NAMESPACE_RDMA_RX) { } else if (type == MLX5_FLOW_NAMESPACE_RDMA_RX) {
root_ns = steering->rdma_rx_root_ns; root_ns = steering->rdma_rx_root_ns;
prio = RDMA_RX_BYPASS_PRIO; prio = RDMA_RX_BYPASS_PRIO;
......
...@@ -76,6 +76,7 @@ enum mlx5_flow_namespace_type { ...@@ -76,6 +76,7 @@ enum mlx5_flow_namespace_type {
MLX5_FLOW_NAMESPACE_SNIFFER_RX, MLX5_FLOW_NAMESPACE_SNIFFER_RX,
MLX5_FLOW_NAMESPACE_SNIFFER_TX, MLX5_FLOW_NAMESPACE_SNIFFER_TX,
MLX5_FLOW_NAMESPACE_EGRESS, MLX5_FLOW_NAMESPACE_EGRESS,
MLX5_FLOW_NAMESPACE_EGRESS_KERNEL,
MLX5_FLOW_NAMESPACE_RDMA_RX, MLX5_FLOW_NAMESPACE_RDMA_RX,
MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL, MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL,
MLX5_FLOW_NAMESPACE_RDMA_TX, MLX5_FLOW_NAMESPACE_RDMA_TX,
......
...@@ -245,6 +245,10 @@ enum { ...@@ -245,6 +245,10 @@ enum {
MLX5_ETH_WQE_SWP_OUTER_L4_UDP = 1 << 5, MLX5_ETH_WQE_SWP_OUTER_L4_UDP = 1 << 5,
}; };
enum {
MLX5_ETH_WQE_FT_META_IPSEC = BIT(0),
};
struct mlx5_wqe_eth_seg { struct mlx5_wqe_eth_seg {
u8 swp_outer_l4_offset; u8 swp_outer_l4_offset;
u8 swp_outer_l3_offset; u8 swp_outer_l3_offset;
...@@ -253,7 +257,7 @@ struct mlx5_wqe_eth_seg { ...@@ -253,7 +257,7 @@ struct mlx5_wqe_eth_seg {
u8 cs_flags; u8 cs_flags;
u8 swp_flags; u8 swp_flags;
__be16 mss; __be16 mss;
__be32 rsvd2; __be32 flow_table_metadata;
union { union {
struct { struct {
__be16 sz; __be16 sz;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment