Commit 9ed431c1 authored by Jakub Kicinski's avatar Jakub Kicinski Committed by David S. Miller

nfp: tls: make use of kernel-driven TX resync

When TCP stream gets out of sync (driver stops receiving skbs
with expected TCP sequence numbers) request a TX resync from
the kernel.

We try to distinguish retransmissions from missed transmissions
by comparing the sequence number to expected - if it's further
than the expected one - we probably missed packets.
Signed-off-by: default avatarJakub Kicinski <jakub.kicinski@netronome.com>
Reviewed-by: default avatarDirk van der Merwe <dirk.vandermerwe@netronome.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 50180074
......@@ -13,7 +13,6 @@ struct nfp_net_tls_offload_ctx {
*/
u32 next_seq;
bool out_of_sync;
};
#ifdef CONFIG_TLS_DEVICE
......
......@@ -390,25 +390,30 @@ nfp_net_tls_resync(struct net_device *netdev, struct sock *sk, u32 seq,
struct nfp_net_tls_offload_ctx *ntls;
struct nfp_crypto_req_update *req;
struct sk_buff *skb;
gfp_t flags;
if (WARN_ON_ONCE(direction != TLS_OFFLOAD_CTX_DIR_RX))
return;
skb = nfp_net_tls_alloc_simple(nn, sizeof(*req), GFP_ATOMIC);
flags = direction == TLS_OFFLOAD_CTX_DIR_TX ? GFP_KERNEL : GFP_ATOMIC;
skb = nfp_net_tls_alloc_simple(nn, sizeof(*req), flags);
if (!skb)
return;
ntls = tls_driver_ctx(sk, TLS_OFFLOAD_CTX_DIR_RX);
ntls = tls_driver_ctx(sk, direction);
req = (void *)skb->data;
req->ep_id = 0;
req->opcode = NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_DEC;
req->opcode = nfp_tls_1_2_dir_to_opcode(direction);
memset(req->resv, 0, sizeof(req->resv));
memcpy(req->handle, ntls->fw_handle, sizeof(ntls->fw_handle));
req->tcp_seq = cpu_to_be32(seq);
memcpy(req->rec_no, rcd_sn, sizeof(req->rec_no));
nfp_ccm_mbox_post(nn, skb, NFP_CCM_TYPE_CRYPTO_UPDATE,
sizeof(struct nfp_crypto_reply_simple));
if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
nfp_net_tls_communicate_simple(nn, skb, "sync",
NFP_CCM_TYPE_CRYPTO_UPDATE);
ntls->next_seq = seq;
} else {
nfp_ccm_mbox_post(nn, skb, NFP_CCM_TYPE_CRYPTO_UPDATE,
sizeof(struct nfp_crypto_reply_simple));
}
}
static const struct tlsdev_ops nfp_net_tls_ops = {
......
......@@ -829,6 +829,7 @@ nfp_net_tls_tx(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
{
struct nfp_net_tls_offload_ctx *ntls;
struct sk_buff *nskb;
bool resync_pending;
u32 datalen, seq;
if (likely(!dp->ktls_tx))
......@@ -839,7 +840,8 @@ nfp_net_tls_tx(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
seq = ntohl(tcp_hdr(skb)->seq);
ntls = tls_driver_ctx(skb->sk, TLS_OFFLOAD_CTX_DIR_TX);
if (unlikely(ntls->next_seq != seq || ntls->out_of_sync)) {
resync_pending = tls_offload_tx_resync_pending(skb->sk);
if (unlikely(resync_pending || ntls->next_seq != seq)) {
/* Pure ACK out of order already */
if (!datalen)
return skb;
......@@ -869,8 +871,8 @@ nfp_net_tls_tx(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
}
/* jump forward, a TX may have gotten lost, need to sync TX */
if (!ntls->out_of_sync && seq - ntls->next_seq < U32_MAX / 4)
ntls->out_of_sync = true;
if (!resync_pending && seq - ntls->next_seq < U32_MAX / 4)
tls_offload_tx_resync_request(nskb->sk);
*nr_frags = 0;
return nskb;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment