Commit f02bac9a authored by Maxim Mikityanskiy's avatar Maxim Mikityanskiy Committed by Saeed Mahameed

net/mlx5e: Return bool from TLS and IPSEC offloads

TLS and IPSEC offloads currently return struct sk_buff *, but the value
is either NULL or the same skb that was passed as a parameter. Return
bool instead to provide stronger guarantees to the calling code (it
won't need to support handling a different SKB that could be potentially
returned before this change) and to simplify restructuring this code in
the following commits.
Signed-off-by: default avatarMaxim Mikityanskiy <maximmi@mellanox.com>
Reviewed-by: default avatarTariq Toukan <tariqt@mellanox.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
parent 76cd622f
...@@ -102,8 +102,7 @@ mlx5e_udp_gso_handle_tx_skb(struct sk_buff *skb) ...@@ -102,8 +102,7 @@ mlx5e_udp_gso_handle_tx_skb(struct sk_buff *skb)
udp_hdr(skb)->len = htons(payload_len); udp_hdr(skb)->len = htons(payload_len);
} }
static inline struct sk_buff * static inline bool mlx5e_accel_handle_tx(struct sk_buff *skb,
mlx5e_accel_handle_tx(struct sk_buff *skb,
struct mlx5e_txqsq *sq, struct mlx5e_txqsq *sq,
struct net_device *dev, struct net_device *dev,
struct mlx5e_tx_wqe **wqe, struct mlx5e_tx_wqe **wqe,
...@@ -111,24 +110,22 @@ mlx5e_accel_handle_tx(struct sk_buff *skb, ...@@ -111,24 +110,22 @@ mlx5e_accel_handle_tx(struct sk_buff *skb,
{ {
#ifdef CONFIG_MLX5_EN_TLS #ifdef CONFIG_MLX5_EN_TLS
if (test_bit(MLX5E_SQ_STATE_TLS, &sq->state)) { if (test_bit(MLX5E_SQ_STATE_TLS, &sq->state)) {
skb = mlx5e_tls_handle_tx_skb(dev, sq, skb, wqe, pi); if (unlikely(!mlx5e_tls_handle_tx_skb(dev, sq, skb, wqe, pi)))
if (unlikely(!skb)) return false;
return NULL;
} }
#endif #endif
#ifdef CONFIG_MLX5_EN_IPSEC #ifdef CONFIG_MLX5_EN_IPSEC
if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state)) { if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state)) {
skb = mlx5e_ipsec_handle_tx_skb(dev, *wqe, skb); if (unlikely(!mlx5e_ipsec_handle_tx_skb(dev, *wqe, skb)))
if (unlikely(!skb)) return false;
return NULL;
} }
#endif #endif
if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
mlx5e_udp_gso_handle_tx_skb(skb); mlx5e_udp_gso_handle_tx_skb(skb);
return skb; return true;
} }
#endif /* __MLX5E_EN_ACCEL_H__ */ #endif /* __MLX5E_EN_ACCEL_H__ */
...@@ -233,7 +233,7 @@ static void mlx5e_ipsec_set_metadata(struct sk_buff *skb, ...@@ -233,7 +233,7 @@ static void mlx5e_ipsec_set_metadata(struct sk_buff *skb,
ntohs(mdata->content.tx.seq)); ntohs(mdata->content.tx.seq));
} }
struct sk_buff *mlx5e_ipsec_handle_tx_skb(struct net_device *netdev, bool mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
struct mlx5e_tx_wqe *wqe, struct mlx5e_tx_wqe *wqe,
struct sk_buff *skb) struct sk_buff *skb)
{ {
...@@ -245,7 +245,7 @@ struct sk_buff *mlx5e_ipsec_handle_tx_skb(struct net_device *netdev, ...@@ -245,7 +245,7 @@ struct sk_buff *mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
struct sec_path *sp; struct sec_path *sp;
if (!xo) if (!xo)
return skb; return true;
sp = skb_sec_path(skb); sp = skb_sec_path(skb);
if (unlikely(sp->len != 1)) { if (unlikely(sp->len != 1)) {
...@@ -281,11 +281,11 @@ struct sk_buff *mlx5e_ipsec_handle_tx_skb(struct net_device *netdev, ...@@ -281,11 +281,11 @@ struct sk_buff *mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
sa_entry->set_iv_op(skb, x, xo); sa_entry->set_iv_op(skb, x, xo);
mlx5e_ipsec_set_metadata(skb, mdata, xo); mlx5e_ipsec_set_metadata(skb, mdata, xo);
return skb; return true;
drop: drop:
kfree_skb(skb); kfree_skb(skb);
return NULL; return false;
} }
static inline struct xfrm_state * static inline struct xfrm_state *
......
...@@ -52,7 +52,7 @@ void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x, ...@@ -52,7 +52,7 @@ void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x,
struct xfrm_offload *xo); struct xfrm_offload *xo);
void mlx5e_ipsec_set_iv(struct sk_buff *skb, struct xfrm_state *x, void mlx5e_ipsec_set_iv(struct sk_buff *skb, struct xfrm_state *x,
struct xfrm_offload *xo); struct xfrm_offload *xo);
struct sk_buff *mlx5e_ipsec_handle_tx_skb(struct net_device *netdev, bool mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
struct mlx5e_tx_wqe *wqe, struct mlx5e_tx_wqe *wqe,
struct sk_buff *skb); struct sk_buff *skb);
......
...@@ -95,10 +95,9 @@ mlx5e_get_ktls_tx_priv_ctx(struct tls_context *tls_ctx) ...@@ -95,10 +95,9 @@ mlx5e_get_ktls_tx_priv_ctx(struct tls_context *tls_ctx)
void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv); void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv);
void mlx5e_ktls_tx_offload_set_pending(struct mlx5e_ktls_offload_context_tx *priv_tx); void mlx5e_ktls_tx_offload_set_pending(struct mlx5e_ktls_offload_context_tx *priv_tx);
struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev, bool mlx5e_ktls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5e_tx_wqe **wqe,
struct sk_buff *skb, u16 *pi);
struct mlx5e_tx_wqe **wqe, u16 *pi);
void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq, void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
struct mlx5e_tx_wqe_info *wi, struct mlx5e_tx_wqe_info *wi,
u32 *dma_fifo_cc); u32 *dma_fifo_cc);
......
...@@ -413,10 +413,9 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx, ...@@ -413,10 +413,9 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
return MLX5E_KTLS_SYNC_FAIL; return MLX5E_KTLS_SYNC_FAIL;
} }
struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev, bool mlx5e_ktls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5e_tx_wqe **wqe,
struct sk_buff *skb, u16 *pi)
struct mlx5e_tx_wqe **wqe, u16 *pi)
{ {
struct mlx5e_ktls_offload_context_tx *priv_tx; struct mlx5e_ktls_offload_context_tx *priv_tx;
struct mlx5e_sq_stats *stats = sq->stats; struct mlx5e_sq_stats *stats = sq->stats;
...@@ -474,9 +473,9 @@ struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev, ...@@ -474,9 +473,9 @@ struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
stats->tls_encrypted_bytes += datalen; stats->tls_encrypted_bytes += datalen;
out: out:
return skb; return true;
err_out: err_out:
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
return NULL; return false;
} }
...@@ -184,11 +184,9 @@ static void mlx5e_tls_complete_sync_skb(struct sk_buff *skb, ...@@ -184,11 +184,9 @@ static void mlx5e_tls_complete_sync_skb(struct sk_buff *skb,
nskb->queue_mapping = skb->queue_mapping; nskb->queue_mapping = skb->queue_mapping;
} }
static struct sk_buff * static bool mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context_tx *context,
mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context_tx *context,
struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5e_tx_wqe **wqe, struct mlx5e_tx_wqe **wqe, u16 *pi,
u16 *pi,
struct mlx5e_tls *tls) struct mlx5e_tls *tls)
{ {
u32 tcp_seq = ntohl(tcp_hdr(skb)->seq); u32 tcp_seq = ntohl(tcp_hdr(skb)->seq);
...@@ -217,7 +215,7 @@ mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context_tx *context, ...@@ -217,7 +215,7 @@ mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context_tx *context,
if (likely(payload <= -info.sync_len)) if (likely(payload <= -info.sync_len))
/* SKB payload doesn't require offload /* SKB payload doesn't require offload
*/ */
return skb; return true;
atomic64_inc(&tls->sw_stats.tx_tls_drop_bypass_required); atomic64_inc(&tls->sw_stats.tx_tls_drop_bypass_required);
goto err_out; goto err_out;
...@@ -250,17 +248,15 @@ mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context_tx *context, ...@@ -250,17 +248,15 @@ mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context_tx *context,
mlx5e_sq_xmit(sq, nskb, *wqe, *pi, true); mlx5e_sq_xmit(sq, nskb, *wqe, *pi, true);
*pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc); *pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
*wqe = MLX5E_TX_FETCH_WQE(sq, *pi); *wqe = MLX5E_TX_FETCH_WQE(sq, *pi);
return skb; return true;
err_out: err_out:
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
return NULL; return false;
} }
struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev, bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5e_tx_wqe **wqe,
struct sk_buff *skb,
struct mlx5e_tx_wqe **wqe,
u16 *pi) u16 *pi)
{ {
struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_priv *priv = netdev_priv(netdev);
...@@ -270,41 +266,35 @@ struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev, ...@@ -270,41 +266,35 @@ struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev,
int datalen; int datalen;
u32 skb_seq; u32 skb_seq;
if (MLX5_CAP_GEN(sq->channel->mdev, tls_tx)) { if (MLX5_CAP_GEN(sq->channel->mdev, tls_tx))
skb = mlx5e_ktls_handle_tx_skb(netdev, sq, skb, wqe, pi); return mlx5e_ktls_handle_tx_skb(netdev, sq, skb, wqe, pi);
goto out;
}
if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk)) if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk))
goto out; return true;
datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb)); datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
if (!datalen) if (!datalen)
goto out; return true;
tls_ctx = tls_get_ctx(skb->sk); tls_ctx = tls_get_ctx(skb->sk);
if (unlikely(tls_ctx->netdev != netdev)) if (unlikely(tls_ctx->netdev != netdev))
goto out; return true;
skb_seq = ntohl(tcp_hdr(skb)->seq); skb_seq = ntohl(tcp_hdr(skb)->seq);
context = mlx5e_get_tls_tx_context(tls_ctx); context = mlx5e_get_tls_tx_context(tls_ctx);
expected_seq = context->expected_seq; expected_seq = context->expected_seq;
if (unlikely(expected_seq != skb_seq)) { if (unlikely(expected_seq != skb_seq))
skb = mlx5e_tls_handle_ooo(context, sq, skb, wqe, pi, priv->tls); return mlx5e_tls_handle_ooo(context, sq, skb, wqe, pi, priv->tls);
goto out;
}
if (unlikely(mlx5e_tls_add_metadata(skb, context->swid))) { if (unlikely(mlx5e_tls_add_metadata(skb, context->swid))) {
atomic64_inc(&priv->tls->sw_stats.tx_tls_drop_metadata); atomic64_inc(&priv->tls->sw_stats.tx_tls_drop_metadata);
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
skb = NULL; return false;
goto out;
} }
context->expected_seq = skb_seq + datalen; context->expected_seq = skb_seq + datalen;
out: return true;
return skb;
} }
static int tls_update_resync_sn(struct net_device *netdev, static int tls_update_resync_sn(struct net_device *netdev,
......
...@@ -40,10 +40,8 @@ ...@@ -40,10 +40,8 @@
#include "en.h" #include "en.h"
#include "en/txrx.h" #include "en/txrx.h"
struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev, bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5e_tx_wqe **wqe,
struct sk_buff *skb,
struct mlx5e_tx_wqe **wqe,
u16 *pi); u16 *pi);
void mlx5e_tls_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb, void mlx5e_tls_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb,
......
...@@ -394,8 +394,7 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -394,8 +394,7 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
wqe = MLX5E_TX_FETCH_WQE(sq, pi); wqe = MLX5E_TX_FETCH_WQE(sq, pi);
/* might send skbs and update wqe and pi */ /* might send skbs and update wqe and pi */
skb = mlx5e_accel_handle_tx(skb, sq, dev, &wqe, &pi); if (unlikely(!mlx5e_accel_handle_tx(skb, sq, dev, &wqe, &pi)))
if (unlikely(!skb))
return NETDEV_TX_OK; return NETDEV_TX_OK;
return mlx5e_sq_xmit(sq, skb, wqe, pi, netdev_xmit_more()); return mlx5e_sq_xmit(sq, skb, wqe, pi, netdev_xmit_more());
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment