Commit 138559b9 authored by Tariq Toukan's avatar Tariq Toukan Committed by Jakub Kicinski

net/tls: Fix wrong record sn in async mode of device resync

In async_resync mode, we log the TCP seq of records until the async request
is completed.  Later, in case one of the logged seqs matches the resync
request, we return it, together with its record serial number.  Before this
fix, we mistakenly returned the serial number of the current record
instead.

Fixes: ed9b7646 ("net/tls: Add asynchronous resync")
Signed-off-by: default avatarTariq Toukan <tariqt@nvidia.com>
Reviewed-by: default avatarBoris Pismenny <borisp@nvidia.com>
Link: https://lore.kernel.org/r/20201115131448.2702-1-tariqt@nvidia.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent a5bbcbf2
...@@ -300,7 +300,8 @@ enum tls_offload_sync_type { ...@@ -300,7 +300,8 @@ enum tls_offload_sync_type {
#define TLS_DEVICE_RESYNC_ASYNC_LOGMAX 13 #define TLS_DEVICE_RESYNC_ASYNC_LOGMAX 13
struct tls_offload_resync_async { struct tls_offload_resync_async {
atomic64_t req; atomic64_t req;
u32 loglen; u16 loglen;
u16 rcd_delta;
u32 log[TLS_DEVICE_RESYNC_ASYNC_LOGMAX]; u32 log[TLS_DEVICE_RESYNC_ASYNC_LOGMAX];
}; };
...@@ -471,6 +472,18 @@ static inline bool tls_bigint_increment(unsigned char *seq, int len) ...@@ -471,6 +472,18 @@ static inline bool tls_bigint_increment(unsigned char *seq, int len)
return (i == -1); return (i == -1);
} }
static inline void tls_bigint_subtract(unsigned char *seq, int n)
{
u64 rcd_sn;
__be64 *p;
BUILD_BUG_ON(TLS_MAX_REC_SEQ_SIZE != 8);
p = (__be64 *)seq;
rcd_sn = be64_to_cpu(*p);
*p = cpu_to_be64(rcd_sn - n);
}
static inline struct tls_context *tls_get_ctx(const struct sock *sk) static inline struct tls_context *tls_get_ctx(const struct sock *sk)
{ {
struct inet_connection_sock *icsk = inet_csk(sk); struct inet_connection_sock *icsk = inet_csk(sk);
...@@ -639,6 +652,7 @@ tls_offload_rx_resync_async_request_start(struct sock *sk, __be32 seq, u16 len) ...@@ -639,6 +652,7 @@ tls_offload_rx_resync_async_request_start(struct sock *sk, __be32 seq, u16 len)
atomic64_set(&rx_ctx->resync_async->req, ((u64)ntohl(seq) << 32) | atomic64_set(&rx_ctx->resync_async->req, ((u64)ntohl(seq) << 32) |
((u64)len << 16) | RESYNC_REQ | RESYNC_REQ_ASYNC); ((u64)len << 16) | RESYNC_REQ | RESYNC_REQ_ASYNC);
rx_ctx->resync_async->loglen = 0; rx_ctx->resync_async->loglen = 0;
rx_ctx->resync_async->rcd_delta = 0;
} }
static inline void static inline void
......
...@@ -694,36 +694,51 @@ static void tls_device_resync_rx(struct tls_context *tls_ctx, ...@@ -694,36 +694,51 @@ static void tls_device_resync_rx(struct tls_context *tls_ctx,
static bool static bool
tls_device_rx_resync_async(struct tls_offload_resync_async *resync_async, tls_device_rx_resync_async(struct tls_offload_resync_async *resync_async,
s64 resync_req, u32 *seq) s64 resync_req, u32 *seq, u16 *rcd_delta)
{ {
u32 is_async = resync_req & RESYNC_REQ_ASYNC; u32 is_async = resync_req & RESYNC_REQ_ASYNC;
u32 req_seq = resync_req >> 32; u32 req_seq = resync_req >> 32;
u32 req_end = req_seq + ((resync_req >> 16) & 0xffff); u32 req_end = req_seq + ((resync_req >> 16) & 0xffff);
u16 i;
*rcd_delta = 0;
if (is_async) { if (is_async) {
/* shouldn't get to wraparound:
* too long in async stage, something bad happened
*/
if (WARN_ON_ONCE(resync_async->rcd_delta == USHRT_MAX))
return false;
/* asynchronous stage: log all headers seq such that /* asynchronous stage: log all headers seq such that
* req_seq <= seq <= end_seq, and wait for real resync request * req_seq <= seq <= end_seq, and wait for real resync request
*/ */
if (between(*seq, req_seq, req_end) && if (before(*seq, req_seq))
return false;
if (!after(*seq, req_end) &&
resync_async->loglen < TLS_DEVICE_RESYNC_ASYNC_LOGMAX) resync_async->loglen < TLS_DEVICE_RESYNC_ASYNC_LOGMAX)
resync_async->log[resync_async->loglen++] = *seq; resync_async->log[resync_async->loglen++] = *seq;
resync_async->rcd_delta++;
return false; return false;
} }
/* synchronous stage: check against the logged entries and /* synchronous stage: check against the logged entries and
* proceed to check the next entries if no match was found * proceed to check the next entries if no match was found
*/ */
while (resync_async->loglen) { for (i = 0; i < resync_async->loglen; i++)
if (req_seq == resync_async->log[resync_async->loglen - 1] && if (req_seq == resync_async->log[i] &&
atomic64_try_cmpxchg(&resync_async->req, atomic64_try_cmpxchg(&resync_async->req, &resync_req, 0)) {
&resync_req, 0)) { *rcd_delta = resync_async->rcd_delta - i;
resync_async->loglen = 0;
*seq = req_seq; *seq = req_seq;
resync_async->loglen = 0;
resync_async->rcd_delta = 0;
return true; return true;
} }
resync_async->loglen--;
} resync_async->loglen = 0;
resync_async->rcd_delta = 0;
if (req_seq == *seq && if (req_seq == *seq &&
atomic64_try_cmpxchg(&resync_async->req, atomic64_try_cmpxchg(&resync_async->req,
...@@ -741,6 +756,7 @@ void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq) ...@@ -741,6 +756,7 @@ void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq)
u32 sock_data, is_req_pending; u32 sock_data, is_req_pending;
struct tls_prot_info *prot; struct tls_prot_info *prot;
s64 resync_req; s64 resync_req;
u16 rcd_delta;
u32 req_seq; u32 req_seq;
if (tls_ctx->rx_conf != TLS_HW) if (tls_ctx->rx_conf != TLS_HW)
...@@ -786,8 +802,9 @@ void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq) ...@@ -786,8 +802,9 @@ void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq)
return; return;
if (!tls_device_rx_resync_async(rx_ctx->resync_async, if (!tls_device_rx_resync_async(rx_ctx->resync_async,
resync_req, &seq)) resync_req, &seq, &rcd_delta))
return; return;
tls_bigint_subtract(rcd_sn, rcd_delta);
break; break;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment