Commit fd31f399 authored by Jakub Kicinski's avatar Jakub Kicinski Committed by David S. Miller

tls: rx: decrypt into a fresh skb

We currently CoW Rx skbs whenever we can't decrypt to a user
space buffer. The skbs can be enormous (64kB) and CoW does
a linear alloc which has a strong chance of failing under
memory pressure. Or even without, skb_cow_data() assumes
GFP_ATOMIC.

Allocate a new frag'd skb and decrypt into it. We finally
take advantage of the decrypted skb getting returned via
darg.
Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent cbbdee99
...@@ -39,6 +39,9 @@ ...@@ -39,6 +39,9 @@
#include <linux/skmsg.h> #include <linux/skmsg.h>
#include <net/tls.h> #include <net/tls.h>
#define TLS_PAGE_ORDER (min_t(unsigned int, PAGE_ALLOC_COSTLY_ORDER, \
TLS_MAX_PAYLOAD_SIZE >> PAGE_SHIFT))
#define __TLS_INC_STATS(net, field) \ #define __TLS_INC_STATS(net, field) \
__SNMP_INC_STATS((net)->mib.tls_statistics, field) __SNMP_INC_STATS((net)->mib.tls_statistics, field)
#define TLS_INC_STATS(net, field) \ #define TLS_INC_STATS(net, field) \
......
...@@ -1383,6 +1383,29 @@ static int tls_setup_from_iter(struct iov_iter *from, ...@@ -1383,6 +1383,29 @@ static int tls_setup_from_iter(struct iov_iter *from,
return rc; return rc;
} }
static struct sk_buff *
tls_alloc_clrtxt_skb(struct sock *sk, struct sk_buff *skb,
unsigned int full_len)
{
struct strp_msg *clr_rxm;
struct sk_buff *clr_skb;
int err;
clr_skb = alloc_skb_with_frags(0, full_len, TLS_PAGE_ORDER,
&err, sk->sk_allocation);
if (!clr_skb)
return NULL;
skb_copy_header(clr_skb, skb);
clr_skb->len = full_len;
clr_skb->data_len = full_len;
clr_rxm = strp_msg(clr_skb);
clr_rxm->offset = 0;
return clr_skb;
}
/* Decrypt handlers /* Decrypt handlers
* *
* tls_decrypt_sg() and tls_decrypt_device() are decrypt handlers. * tls_decrypt_sg() and tls_decrypt_device() are decrypt handlers.
...@@ -1410,34 +1433,40 @@ static int tls_decrypt_sg(struct sock *sk, struct iov_iter *out_iov, ...@@ -1410,34 +1433,40 @@ static int tls_decrypt_sg(struct sock *sk, struct iov_iter *out_iov,
struct tls_prot_info *prot = &tls_ctx->prot_info; struct tls_prot_info *prot = &tls_ctx->prot_info;
int n_sgin, n_sgout, aead_size, err, pages = 0; int n_sgin, n_sgout, aead_size, err, pages = 0;
struct sk_buff *skb = tls_strp_msg(ctx); struct sk_buff *skb = tls_strp_msg(ctx);
struct strp_msg *rxm = strp_msg(skb); const struct strp_msg *rxm = strp_msg(skb);
struct tls_msg *tlm = tls_msg(skb); const struct tls_msg *tlm = tls_msg(skb);
struct aead_request *aead_req; struct aead_request *aead_req;
struct sk_buff *unused;
struct scatterlist *sgin = NULL; struct scatterlist *sgin = NULL;
struct scatterlist *sgout = NULL; struct scatterlist *sgout = NULL;
const int data_len = rxm->full_len - prot->overhead_size; const int data_len = rxm->full_len - prot->overhead_size;
int tail_pages = !!prot->tail_size; int tail_pages = !!prot->tail_size;
struct tls_decrypt_ctx *dctx; struct tls_decrypt_ctx *dctx;
struct sk_buff *clear_skb;
int iv_offset = 0; int iv_offset = 0;
u8 *mem; u8 *mem;
n_sgin = skb_nsg(skb, rxm->offset + prot->prepend_size,
rxm->full_len - prot->prepend_size);
if (n_sgin < 1)
return n_sgin ?: -EBADMSG;
if (darg->zc && (out_iov || out_sg)) { if (darg->zc && (out_iov || out_sg)) {
clear_skb = NULL;
if (out_iov) if (out_iov)
n_sgout = 1 + tail_pages + n_sgout = 1 + tail_pages +
iov_iter_npages_cap(out_iov, INT_MAX, data_len); iov_iter_npages_cap(out_iov, INT_MAX, data_len);
else else
n_sgout = sg_nents(out_sg); n_sgout = sg_nents(out_sg);
n_sgin = skb_nsg(skb, rxm->offset + prot->prepend_size,
rxm->full_len - prot->prepend_size);
} else { } else {
n_sgout = 0;
darg->zc = false; darg->zc = false;
n_sgin = skb_cow_data(skb, 0, &unused);
}
if (n_sgin < 1) clear_skb = tls_alloc_clrtxt_skb(sk, skb, rxm->full_len);
return -EBADMSG; if (!clear_skb)
return -ENOMEM;
n_sgout = 1 + skb_shinfo(clear_skb)->nr_frags;
}
/* Increment to accommodate AAD */ /* Increment to accommodate AAD */
n_sgin = n_sgin + 1; n_sgin = n_sgin + 1;
...@@ -1449,8 +1478,10 @@ static int tls_decrypt_sg(struct sock *sk, struct iov_iter *out_iov, ...@@ -1449,8 +1478,10 @@ static int tls_decrypt_sg(struct sock *sk, struct iov_iter *out_iov,
aead_size = sizeof(*aead_req) + crypto_aead_reqsize(ctx->aead_recv); aead_size = sizeof(*aead_req) + crypto_aead_reqsize(ctx->aead_recv);
mem = kmalloc(aead_size + struct_size(dctx, sg, n_sgin + n_sgout), mem = kmalloc(aead_size + struct_size(dctx, sg, n_sgin + n_sgout),
sk->sk_allocation); sk->sk_allocation);
if (!mem) if (!mem) {
return -ENOMEM; err = -ENOMEM;
goto exit_free_skb;
}
/* Segment the allocated memory */ /* Segment the allocated memory */
aead_req = (struct aead_request *)mem; aead_req = (struct aead_request *)mem;
...@@ -1499,16 +1530,22 @@ static int tls_decrypt_sg(struct sock *sk, struct iov_iter *out_iov, ...@@ -1499,16 +1530,22 @@ static int tls_decrypt_sg(struct sock *sk, struct iov_iter *out_iov,
if (err < 0) if (err < 0)
goto exit_free; goto exit_free;
if (n_sgout) { if (clear_skb) {
if (out_iov) { sg_init_table(sgout, n_sgout);
sg_set_buf(&sgout[0], dctx->aad, prot->aad_size);
err = skb_to_sgvec(clear_skb, &sgout[1], prot->prepend_size,
data_len + prot->tail_size);
if (err < 0)
goto exit_free;
} else if (out_iov) {
sg_init_table(sgout, n_sgout); sg_init_table(sgout, n_sgout);
sg_set_buf(&sgout[0], dctx->aad, prot->aad_size); sg_set_buf(&sgout[0], dctx->aad, prot->aad_size);
err = tls_setup_from_iter(out_iov, data_len, err = tls_setup_from_iter(out_iov, data_len, &pages, &sgout[1],
&pages, &sgout[1],
(n_sgout - 1 - tail_pages)); (n_sgout - 1 - tail_pages));
if (err < 0) if (err < 0)
goto fallback_to_reg_recv; goto exit_free_pages;
if (prot->tail_size) { if (prot->tail_size) {
sg_unmark_end(&sgout[pages]); sg_unmark_end(&sgout[pages]);
...@@ -1518,14 +1555,6 @@ static int tls_decrypt_sg(struct sock *sk, struct iov_iter *out_iov, ...@@ -1518,14 +1555,6 @@ static int tls_decrypt_sg(struct sock *sk, struct iov_iter *out_iov,
} }
} else if (out_sg) { } else if (out_sg) {
memcpy(sgout, out_sg, n_sgout * sizeof(*sgout)); memcpy(sgout, out_sg, n_sgout * sizeof(*sgout));
} else {
goto fallback_to_reg_recv;
}
} else {
fallback_to_reg_recv:
sgout = sgin;
pages = 0;
darg->zc = false;
} }
/* Prepare and submit AEAD request */ /* Prepare and submit AEAD request */
...@@ -1534,7 +1563,8 @@ static int tls_decrypt_sg(struct sock *sk, struct iov_iter *out_iov, ...@@ -1534,7 +1563,8 @@ static int tls_decrypt_sg(struct sock *sk, struct iov_iter *out_iov,
if (err) if (err)
goto exit_free_pages; goto exit_free_pages;
darg->skb = tls_strp_msg(ctx); darg->skb = clear_skb ?: tls_strp_msg(ctx);
clear_skb = NULL;
if (unlikely(darg->async)) { if (unlikely(darg->async)) {
err = tls_strp_msg_hold(sk, skb, &ctx->async_hold); err = tls_strp_msg_hold(sk, skb, &ctx->async_hold);
...@@ -1552,6 +1582,8 @@ static int tls_decrypt_sg(struct sock *sk, struct iov_iter *out_iov, ...@@ -1552,6 +1582,8 @@ static int tls_decrypt_sg(struct sock *sk, struct iov_iter *out_iov,
put_page(sg_page(&sgout[pages])); put_page(sg_page(&sgout[pages]));
exit_free: exit_free:
kfree(mem); kfree(mem);
exit_free_skb:
consume_skb(clear_skb);
return err; return err;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment