Commit ed3c9a2f authored by Jakub Kicinski's avatar Jakub Kicinski Committed by David S. Miller

net: tls: make the offload check helper take skb not socket

All callers of tls_is_sk_tx_device_offloaded() currently do
an equivalent of:

 if (skb->sk && tls_is_skb_tx_device_offloaded(skb->sk))

Have the helper accept skb and do the skb->sk check locally.
Two drivers have local static inlines with similar wrappers
already.

While at it change the ifdef condition to TLS_DEVICE.
Only TLS_DEVICE selects SOCK_VALIDATE_XMIT, so the two are
equivalent. This makes removing the duplicated IS_ENABLED()
check in funeth more obviously correct.
Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
Acked-by: default avatarMaxim Mikityanskiy <maxtram95@gmail.com>
Reviewed-by: default avatarSimon Horman <simon.horman@corigine.com>
Acked-by: default avatarTariq Toukan <tariqt@nvidia.com>
Acked-by: default avatarDimitris Michailidis <dmichail@fungible.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 580b7fe5
...@@ -5442,7 +5442,7 @@ static netdev_tx_t bond_tls_device_xmit(struct bonding *bond, struct sk_buff *sk ...@@ -5442,7 +5442,7 @@ static netdev_tx_t bond_tls_device_xmit(struct bonding *bond, struct sk_buff *sk
{ {
struct net_device *tls_netdev = rcu_dereference(tls_get_ctx(skb->sk)->netdev); struct net_device *tls_netdev = rcu_dereference(tls_get_ctx(skb->sk)->netdev);
/* tls_netdev might become NULL, even if tls_is_sk_tx_device_offloaded /* tls_netdev might become NULL, even if tls_is_skb_tx_device_offloaded
* was true, if tls_device_down is running in parallel, but it's OK, * was true, if tls_device_down is running in parallel, but it's OK,
* because bond_get_slave_by_dev has a NULL check. * because bond_get_slave_by_dev has a NULL check.
*/ */
...@@ -5461,7 +5461,7 @@ static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev ...@@ -5461,7 +5461,7 @@ static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev
return NETDEV_TX_OK; return NETDEV_TX_OK;
#if IS_ENABLED(CONFIG_TLS_DEVICE) #if IS_ENABLED(CONFIG_TLS_DEVICE)
if (skb->sk && tls_is_sk_tx_device_offloaded(skb->sk)) if (tls_is_skb_tx_device_offloaded(skb))
return bond_tls_device_xmit(bond, skb, dev); return bond_tls_device_xmit(bond, skb, dev);
#endif #endif
......
...@@ -1175,7 +1175,7 @@ static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb, ...@@ -1175,7 +1175,7 @@ static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
txq = netdev_pick_tx(dev, skb, sb_dev); txq = netdev_pick_tx(dev, skb, sb_dev);
if (xfrm_offload(skb) || is_ptp_enabled(skb, dev) || if (xfrm_offload(skb) || is_ptp_enabled(skb, dev) ||
skb->encapsulation || skb->encapsulation ||
cxgb4_is_ktls_skb(skb) || tls_is_skb_tx_device_offloaded(skb) ||
(proto != IPPROTO_TCP && proto != IPPROTO_UDP)) (proto != IPPROTO_TCP && proto != IPPROTO_UDP))
txq = txq % pi->nqsets; txq = txq % pi->nqsets;
......
...@@ -497,11 +497,6 @@ struct cxgb4_uld_info { ...@@ -497,11 +497,6 @@ struct cxgb4_uld_info {
#endif #endif
}; };
static inline bool cxgb4_is_ktls_skb(struct sk_buff *skb)
{
return skb->sk && tls_is_sk_tx_device_offloaded(skb->sk);
}
void cxgb4_uld_enable(struct adapter *adap); void cxgb4_uld_enable(struct adapter *adap);
void cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p); void cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p);
int cxgb4_unregister_uld(enum cxgb4_uld type); int cxgb4_unregister_uld(enum cxgb4_uld type);
......
...@@ -1530,7 +1530,7 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -1530,7 +1530,7 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
#endif /* CHELSIO_IPSEC_INLINE */ #endif /* CHELSIO_IPSEC_INLINE */
#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE) #if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
if (cxgb4_is_ktls_skb(skb) && if (tls_is_skb_tx_device_offloaded(skb) &&
(skb->len - skb_tcp_all_headers(skb))) (skb->len - skb_tcp_all_headers(skb)))
return adap->uld[CXGB4_ULD_KTLS].tx_handler(skb, dev); return adap->uld[CXGB4_ULD_KTLS].tx_handler(skb, dev);
#endif /* CHELSIO_TLS_DEVICE */ #endif /* CHELSIO_TLS_DEVICE */
......
...@@ -1946,7 +1946,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -1946,7 +1946,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
tls_ctx = tls_get_ctx(skb->sk); tls_ctx = tls_get_ctx(skb->sk);
tls_netdev = rcu_dereference_bh(tls_ctx->netdev); tls_netdev = rcu_dereference_bh(tls_ctx->netdev);
/* Don't quit on NULL: if tls_device_down is running in parallel, /* Don't quit on NULL: if tls_device_down is running in parallel,
* netdev might become NULL, even if tls_is_sk_tx_device_offloaded was * netdev might become NULL, even if tls_is_skb_tx_device_offloaded was
* true. Rather continue processing this packet. * true. Rather continue processing this packet.
*/ */
if (unlikely(tls_netdev && tls_netdev != dev)) if (unlikely(tls_netdev && tls_netdev != dev))
......
...@@ -348,8 +348,7 @@ netdev_tx_t fun_start_xmit(struct sk_buff *skb, struct net_device *netdev) ...@@ -348,8 +348,7 @@ netdev_tx_t fun_start_xmit(struct sk_buff *skb, struct net_device *netdev)
unsigned int tls_len = 0; unsigned int tls_len = 0;
unsigned int ndesc; unsigned int ndesc;
if (IS_ENABLED(CONFIG_TLS_DEVICE) && skb->sk && if (tls_is_skb_tx_device_offloaded(skb)) {
tls_is_sk_tx_device_offloaded(skb->sk)) {
skb = fun_tls_tx(skb, q, &tls_len); skb = fun_tls_tx(skb, q, &tls_len);
if (unlikely(!skb)) if (unlikely(!skb))
goto dropped; goto dropped;
......
...@@ -125,7 +125,7 @@ static inline bool mlx5e_accel_tx_begin(struct net_device *dev, ...@@ -125,7 +125,7 @@ static inline bool mlx5e_accel_tx_begin(struct net_device *dev,
#ifdef CONFIG_MLX5_EN_TLS #ifdef CONFIG_MLX5_EN_TLS
/* May send WQEs. */ /* May send WQEs. */
if (mlx5e_ktls_skb_offloaded(skb)) if (tls_is_skb_tx_device_offloaded(skb))
if (unlikely(!mlx5e_ktls_handle_tx_skb(dev, sq, skb, if (unlikely(!mlx5e_ktls_handle_tx_skb(dev, sq, skb,
&state->tls))) &state->tls)))
return false; return false;
......
...@@ -846,7 +846,7 @@ bool mlx5e_ktls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq, ...@@ -846,7 +846,7 @@ bool mlx5e_ktls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
tls_ctx = tls_get_ctx(skb->sk); tls_ctx = tls_get_ctx(skb->sk);
tls_netdev = rcu_dereference_bh(tls_ctx->netdev); tls_netdev = rcu_dereference_bh(tls_ctx->netdev);
/* Don't WARN on NULL: if tls_device_down is running in parallel, /* Don't WARN on NULL: if tls_device_down is running in parallel,
* netdev might become NULL, even if tls_is_sk_tx_device_offloaded was * netdev might become NULL, even if tls_is_skb_tx_device_offloaded was
* true. Rather continue processing this packet. * true. Rather continue processing this packet.
*/ */
if (WARN_ON_ONCE(tls_netdev && tls_netdev != netdev)) if (WARN_ON_ONCE(tls_netdev && tls_netdev != netdev))
......
...@@ -49,11 +49,6 @@ mlx5e_ktls_rx_pending_resync_list(struct mlx5e_channel *c, int budget) ...@@ -49,11 +49,6 @@ mlx5e_ktls_rx_pending_resync_list(struct mlx5e_channel *c, int budget)
return budget && test_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, &c->async_icosq.state); return budget && test_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, &c->async_icosq.state);
} }
static inline bool mlx5e_ktls_skb_offloaded(struct sk_buff *skb)
{
return skb->sk && tls_is_sk_tx_device_offloaded(skb->sk);
}
static inline void static inline void
mlx5e_ktls_handle_tx_wqe(struct mlx5_wqe_ctrl_seg *cseg, mlx5e_ktls_handle_tx_wqe(struct mlx5_wqe_ctrl_seg *cseg,
struct mlx5e_accel_tx_tls_state *state) struct mlx5e_accel_tx_tls_state *state)
......
...@@ -598,7 +598,7 @@ nfp_net_tls_tx(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec, ...@@ -598,7 +598,7 @@ nfp_net_tls_tx(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
if (likely(!dp->ktls_tx)) if (likely(!dp->ktls_tx))
return skb; return skb;
if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk)) if (!tls_is_skb_tx_device_offloaded(skb))
return skb; return skb;
datalen = skb->len - skb_tcp_all_headers(skb); datalen = skb->len - skb_tcp_all_headers(skb);
...@@ -666,7 +666,7 @@ void nfp_net_tls_tx_undo(struct sk_buff *skb, u64 tls_handle) ...@@ -666,7 +666,7 @@ void nfp_net_tls_tx_undo(struct sk_buff *skb, u64 tls_handle)
if (!tls_handle) if (!tls_handle)
return; return;
if (WARN_ON_ONCE(!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk))) if (WARN_ON_ONCE(!tls_is_skb_tx_device_offloaded(skb)))
return; return;
datalen = skb->len - skb_tcp_all_headers(skb); datalen = skb->len - skb_tcp_all_headers(skb);
......
...@@ -370,10 +370,12 @@ struct sk_buff * ...@@ -370,10 +370,12 @@ struct sk_buff *
tls_validate_xmit_skb_sw(struct sock *sk, struct net_device *dev, tls_validate_xmit_skb_sw(struct sock *sk, struct net_device *dev,
struct sk_buff *skb); struct sk_buff *skb);
static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk) static inline bool tls_is_skb_tx_device_offloaded(const struct sk_buff *skb)
{ {
#ifdef CONFIG_SOCK_VALIDATE_XMIT #ifdef CONFIG_TLS_DEVICE
return sk_fullsock(sk) && struct sock *sk = skb->sk;
return sk && sk_fullsock(sk) &&
(smp_load_acquire(&sk->sk_validate_xmit_skb) == (smp_load_acquire(&sk->sk_validate_xmit_skb) ==
&tls_validate_xmit_skb); &tls_validate_xmit_skb);
#else #else
......
...@@ -1219,7 +1219,7 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx) ...@@ -1219,7 +1219,7 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
tls_device_attach(ctx, sk, netdev); tls_device_attach(ctx, sk, netdev);
up_read(&device_offload_lock); up_read(&device_offload_lock);
/* following this assignment tls_is_sk_tx_device_offloaded /* following this assignment tls_is_skb_tx_device_offloaded
* will return true and the context might be accessed * will return true and the context might be accessed
* by the netdev's xmit function. * by the netdev's xmit function.
*/ */
...@@ -1372,7 +1372,7 @@ static int tls_device_down(struct net_device *netdev) ...@@ -1372,7 +1372,7 @@ static int tls_device_down(struct net_device *netdev)
list_for_each_entry_safe(ctx, tmp, &list, list) { list_for_each_entry_safe(ctx, tmp, &list, list) {
/* Stop offloaded TX and switch to the fallback. /* Stop offloaded TX and switch to the fallback.
* tls_is_sk_tx_device_offloaded will return false. * tls_is_skb_tx_device_offloaded will return false.
*/ */
WRITE_ONCE(ctx->sk->sk_validate_xmit_skb, tls_validate_xmit_skb_sw); WRITE_ONCE(ctx->sk->sk_validate_xmit_skb, tls_validate_xmit_skb_sw);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment