Commit e870456d authored by Stephan Mueller's avatar Stephan Mueller Committed by Herbert Xu

crypto: algif_skcipher - overhaul memory management

The updated memory management is described in the top part of the code.
As one benefit of the changed memory management, the AIO and synchronous
operation is now implemented in one common function. The AF_ALG
operation uses the async kernel crypto API interface for each cipher
operation. Thus, the only difference between the AIO and sync operation
types visible from user space is:

1. the callback function to be invoked when the asynchronous operation
   is completed

2. whether to wait for the completion of the kernel crypto API operation
   or not

In addition, the code structure is adjusted to match the structure of
algif_aead for easier code assessment.

The user space interface changed slightly as follows: the old AIO
operation returned zero upon success and < 0 in case of an error to user
space. As all other AF_ALG interfaces (including the sync skcipher
interface) returned the number of processed bytes upon success and < 0
in case of an error, the new skcipher interface (regardless of AIO or
sync) returns the number of processed bytes in case of success.
Signed-off-by: default avatarStephan Mueller <smueller@chronox.de>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 10a515dd
...@@ -10,6 +10,21 @@ ...@@ -10,6 +10,21 @@
* Software Foundation; either version 2 of the License, or (at your option) * Software Foundation; either version 2 of the License, or (at your option)
* any later version. * any later version.
* *
* The following concept of the memory management is used:
*
* The kernel maintains two SGLs, the TX SGL and the RX SGL. The TX SGL is
* filled by user space with the data submitted via sendpage/sendmsg. Filling
* up the TX SGL does not cause a crypto operation -- the data will only be
* tracked by the kernel. Upon receipt of one recvmsg call, the caller must
* provide a buffer which is tracked with the RX SGL.
*
* During the processing of the recvmsg operation, the cipher request is
* allocated and prepared. As part of the recvmsg operation, the processed
* TX buffers are extracted from the TX SGL into a separate SGL.
*
* After the completion of the crypto operation, the RX SGL and the cipher
* request is released. The extracted TX SGL parts are released together with
* the RX SGL release.
*/ */
#include <crypto/scatterwalk.h> #include <crypto/scatterwalk.h>
...@@ -24,109 +39,94 @@ ...@@ -24,109 +39,94 @@
#include <linux/net.h> #include <linux/net.h>
#include <net/sock.h> #include <net/sock.h>
struct skcipher_sg_list { struct skcipher_tsgl {
struct list_head list; struct list_head list;
int cur; int cur;
struct scatterlist sg[0]; struct scatterlist sg[0];
}; };
struct skcipher_rsgl {
struct af_alg_sgl sgl;
struct list_head list;
size_t sg_num_bytes;
};
struct skcipher_async_req {
struct kiocb *iocb;
struct sock *sk;
struct skcipher_rsgl first_sgl;
struct list_head rsgl_list;
struct scatterlist *tsgl;
unsigned int tsgl_entries;
unsigned int areqlen;
struct skcipher_request req;
};
struct skcipher_tfm { struct skcipher_tfm {
struct crypto_skcipher *skcipher; struct crypto_skcipher *skcipher;
bool has_key; bool has_key;
}; };
struct skcipher_ctx { struct skcipher_ctx {
struct list_head tsgl; struct list_head tsgl_list;
struct af_alg_sgl rsgl;
void *iv; void *iv;
struct af_alg_completion completion; struct af_alg_completion completion;
atomic_t inflight;
size_t used; size_t used;
size_t rcvused;
unsigned int len;
bool more; bool more;
bool merge; bool merge;
bool enc; bool enc;
struct skcipher_request req; unsigned int len;
};
struct skcipher_async_rsgl {
struct af_alg_sgl sgl;
struct list_head list;
};
struct skcipher_async_req {
struct kiocb *iocb;
struct skcipher_async_rsgl first_sgl;
struct list_head list;
struct scatterlist *tsg;
atomic_t *inflight;
struct skcipher_request req;
}; };
#define MAX_SGL_ENTS ((4096 - sizeof(struct skcipher_sg_list)) / \ #define MAX_SGL_ENTS ((4096 - sizeof(struct skcipher_tsgl)) / \
sizeof(struct scatterlist) - 1) sizeof(struct scatterlist) - 1)
static void skcipher_free_async_sgls(struct skcipher_async_req *sreq) static inline int skcipher_sndbuf(struct sock *sk)
{ {
struct skcipher_async_rsgl *rsgl, *tmp; struct alg_sock *ask = alg_sk(sk);
struct scatterlist *sgl; struct skcipher_ctx *ctx = ask->private;
struct scatterlist *sg;
int i, n;
list_for_each_entry_safe(rsgl, tmp, &sreq->list, list) {
af_alg_free_sg(&rsgl->sgl);
if (rsgl != &sreq->first_sgl)
kfree(rsgl);
}
sgl = sreq->tsg;
n = sg_nents(sgl);
for_each_sg(sgl, sg, n, i)
put_page(sg_page(sg));
kfree(sreq->tsg); return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) -
ctx->used, 0);
} }
static void skcipher_async_cb(struct crypto_async_request *req, int err) static inline bool skcipher_writable(struct sock *sk)
{ {
struct skcipher_async_req *sreq = req->data; return PAGE_SIZE <= skcipher_sndbuf(sk);
struct kiocb *iocb = sreq->iocb;
atomic_dec(sreq->inflight);
skcipher_free_async_sgls(sreq);
kzfree(sreq);
iocb->ki_complete(iocb, err, err);
} }
static inline int skcipher_sndbuf(struct sock *sk) static inline int skcipher_rcvbuf(struct sock *sk)
{ {
struct alg_sock *ask = alg_sk(sk); struct alg_sock *ask = alg_sk(sk);
struct skcipher_ctx *ctx = ask->private; struct skcipher_ctx *ctx = ask->private;
return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) - return max_t(int, max_t(int, sk->sk_rcvbuf & PAGE_MASK, PAGE_SIZE) -
ctx->used, 0); ctx->rcvused, 0);
} }
static inline bool skcipher_writable(struct sock *sk) static inline bool skcipher_readable(struct sock *sk)
{ {
return PAGE_SIZE <= skcipher_sndbuf(sk); return PAGE_SIZE <= skcipher_rcvbuf(sk);
} }
static int skcipher_alloc_sgl(struct sock *sk) static int skcipher_alloc_tsgl(struct sock *sk)
{ {
struct alg_sock *ask = alg_sk(sk); struct alg_sock *ask = alg_sk(sk);
struct skcipher_ctx *ctx = ask->private; struct skcipher_ctx *ctx = ask->private;
struct skcipher_sg_list *sgl; struct skcipher_tsgl *sgl;
struct scatterlist *sg = NULL; struct scatterlist *sg = NULL;
sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list); sgl = list_entry(ctx->tsgl_list.prev, struct skcipher_tsgl, list);
if (!list_empty(&ctx->tsgl)) if (!list_empty(&ctx->tsgl_list))
sg = sgl->sg; sg = sgl->sg;
if (!sg || sgl->cur >= MAX_SGL_ENTS) { if (!sg || sgl->cur >= MAX_SGL_ENTS) {
...@@ -142,31 +142,66 @@ static int skcipher_alloc_sgl(struct sock *sk) ...@@ -142,31 +142,66 @@ static int skcipher_alloc_sgl(struct sock *sk)
if (sg) if (sg)
sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg); sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg);
list_add_tail(&sgl->list, &ctx->tsgl); list_add_tail(&sgl->list, &ctx->tsgl_list);
} }
return 0; return 0;
} }
static void skcipher_pull_sgl(struct sock *sk, size_t used, int put) static unsigned int skcipher_count_tsgl(struct sock *sk, size_t bytes)
{ {
struct alg_sock *ask = alg_sk(sk); struct alg_sock *ask = alg_sk(sk);
struct skcipher_ctx *ctx = ask->private; struct skcipher_ctx *ctx = ask->private;
struct skcipher_sg_list *sgl; struct skcipher_tsgl *sgl, *tmp;
unsigned int i;
unsigned int sgl_count = 0;
if (!bytes)
return 0;
list_for_each_entry_safe(sgl, tmp, &ctx->tsgl_list, list) {
struct scatterlist *sg = sgl->sg;
for (i = 0; i < sgl->cur; i++) {
sgl_count++;
if (sg[i].length >= bytes)
return sgl_count;
bytes -= sg[i].length;
}
}
return sgl_count;
}
static void skcipher_pull_tsgl(struct sock *sk, size_t used,
struct scatterlist *dst)
{
struct alg_sock *ask = alg_sk(sk);
struct skcipher_ctx *ctx = ask->private;
struct skcipher_tsgl *sgl;
struct scatterlist *sg; struct scatterlist *sg;
int i; unsigned int i;
while (!list_empty(&ctx->tsgl)) { while (!list_empty(&ctx->tsgl_list)) {
sgl = list_first_entry(&ctx->tsgl, struct skcipher_sg_list, sgl = list_first_entry(&ctx->tsgl_list, struct skcipher_tsgl,
list); list);
sg = sgl->sg; sg = sgl->sg;
for (i = 0; i < sgl->cur; i++) { for (i = 0; i < sgl->cur; i++) {
size_t plen = min_t(size_t, used, sg[i].length); size_t plen = min_t(size_t, used, sg[i].length);
struct page *page = sg_page(sg + i);
if (!sg_page(sg + i)) if (!page)
continue; continue;
/*
* Assumption: caller created skcipher_count_tsgl(len)
* SG entries in dst.
*/
if (dst)
sg_set_page(dst + i, page, plen, sg[i].offset);
sg[i].length -= plen; sg[i].length -= plen;
sg[i].offset += plen; sg[i].offset += plen;
...@@ -175,14 +210,14 @@ static void skcipher_pull_sgl(struct sock *sk, size_t used, int put) ...@@ -175,14 +210,14 @@ static void skcipher_pull_sgl(struct sock *sk, size_t used, int put)
if (sg[i].length) if (sg[i].length)
return; return;
if (put)
put_page(sg_page(sg + i)); if (!dst)
put_page(page);
sg_assign_page(sg + i, NULL); sg_assign_page(sg + i, NULL);
} }
list_del(&sgl->list); list_del(&sgl->list);
sock_kfree_s(sk, sgl, sock_kfree_s(sk, sgl, sizeof(*sgl) + sizeof(sgl->sg[0]) *
sizeof(*sgl) + sizeof(sgl->sg[0]) *
(MAX_SGL_ENTS + 1)); (MAX_SGL_ENTS + 1));
} }
...@@ -190,12 +225,33 @@ static void skcipher_pull_sgl(struct sock *sk, size_t used, int put) ...@@ -190,12 +225,33 @@ static void skcipher_pull_sgl(struct sock *sk, size_t used, int put)
ctx->merge = 0; ctx->merge = 0;
} }
static void skcipher_free_sgl(struct sock *sk) static void skcipher_free_areq_sgls(struct skcipher_async_req *areq)
{ {
struct sock *sk = areq->sk;
struct alg_sock *ask = alg_sk(sk); struct alg_sock *ask = alg_sk(sk);
struct skcipher_ctx *ctx = ask->private; struct skcipher_ctx *ctx = ask->private;
struct skcipher_rsgl *rsgl, *tmp;
struct scatterlist *tsgl;
struct scatterlist *sg;
unsigned int i;
list_for_each_entry_safe(rsgl, tmp, &areq->rsgl_list, list) {
ctx->rcvused -= rsgl->sg_num_bytes;
af_alg_free_sg(&rsgl->sgl);
list_del(&rsgl->list);
if (rsgl != &areq->first_sgl)
sock_kfree_s(sk, rsgl, sizeof(*rsgl));
}
tsgl = areq->tsgl;
for_each_sg(tsgl, sg, areq->tsgl_entries, i) {
if (!sg_page(sg))
continue;
put_page(sg_page(sg));
}
skcipher_pull_sgl(sk, ctx->used, 1); if (areq->tsgl && areq->tsgl_entries)
sock_kfree_s(sk, tsgl, areq->tsgl_entries * sizeof(*tsgl));
} }
static int skcipher_wait_for_wmem(struct sock *sk, unsigned flags) static int skcipher_wait_for_wmem(struct sock *sk, unsigned flags)
...@@ -302,7 +358,7 @@ static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg, ...@@ -302,7 +358,7 @@ static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg,
struct skcipher_tfm *skc = pask->private; struct skcipher_tfm *skc = pask->private;
struct crypto_skcipher *tfm = skc->skcipher; struct crypto_skcipher *tfm = skc->skcipher;
unsigned ivsize = crypto_skcipher_ivsize(tfm); unsigned ivsize = crypto_skcipher_ivsize(tfm);
struct skcipher_sg_list *sgl; struct skcipher_tsgl *sgl;
struct af_alg_control con = {}; struct af_alg_control con = {};
long copied = 0; long copied = 0;
bool enc = 0; bool enc = 0;
...@@ -349,8 +405,8 @@ static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg, ...@@ -349,8 +405,8 @@ static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg,
size_t plen; size_t plen;
if (ctx->merge) { if (ctx->merge) {
sgl = list_entry(ctx->tsgl.prev, sgl = list_entry(ctx->tsgl_list.prev,
struct skcipher_sg_list, list); struct skcipher_tsgl, list);
sg = sgl->sg + sgl->cur - 1; sg = sgl->sg + sgl->cur - 1;
len = min_t(unsigned long, len, len = min_t(unsigned long, len,
PAGE_SIZE - sg->offset - sg->length); PAGE_SIZE - sg->offset - sg->length);
...@@ -379,11 +435,12 @@ static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg, ...@@ -379,11 +435,12 @@ static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg,
len = min_t(unsigned long, len, skcipher_sndbuf(sk)); len = min_t(unsigned long, len, skcipher_sndbuf(sk));
err = skcipher_alloc_sgl(sk); err = skcipher_alloc_tsgl(sk);
if (err) if (err)
goto unlock; goto unlock;
sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list); sgl = list_entry(ctx->tsgl_list.prev, struct skcipher_tsgl,
list);
sg = sgl->sg; sg = sgl->sg;
if (sgl->cur) if (sgl->cur)
sg_unmark_end(sg + sgl->cur - 1); sg_unmark_end(sg + sgl->cur - 1);
...@@ -435,7 +492,7 @@ static ssize_t skcipher_sendpage(struct socket *sock, struct page *page, ...@@ -435,7 +492,7 @@ static ssize_t skcipher_sendpage(struct socket *sock, struct page *page,
struct sock *sk = sock->sk; struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk); struct alg_sock *ask = alg_sk(sk);
struct skcipher_ctx *ctx = ask->private; struct skcipher_ctx *ctx = ask->private;
struct skcipher_sg_list *sgl; struct skcipher_tsgl *sgl;
int err = -EINVAL; int err = -EINVAL;
if (flags & MSG_SENDPAGE_NOTLAST) if (flags & MSG_SENDPAGE_NOTLAST)
...@@ -454,12 +511,12 @@ static ssize_t skcipher_sendpage(struct socket *sock, struct page *page, ...@@ -454,12 +511,12 @@ static ssize_t skcipher_sendpage(struct socket *sock, struct page *page,
goto unlock; goto unlock;
} }
err = skcipher_alloc_sgl(sk); err = skcipher_alloc_tsgl(sk);
if (err) if (err)
goto unlock; goto unlock;
ctx->merge = 0; ctx->merge = 0;
sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list); sgl = list_entry(ctx->tsgl_list.prev, struct skcipher_tsgl, list);
if (sgl->cur) if (sgl->cur)
sg_unmark_end(sgl->sg + sgl->cur - 1); sg_unmark_end(sgl->sg + sgl->cur - 1);
...@@ -480,25 +537,29 @@ static ssize_t skcipher_sendpage(struct socket *sock, struct page *page, ...@@ -480,25 +537,29 @@ static ssize_t skcipher_sendpage(struct socket *sock, struct page *page,
return err ?: size; return err ?: size;
} }
static int skcipher_all_sg_nents(struct skcipher_ctx *ctx) static void skcipher_async_cb(struct crypto_async_request *req, int err)
{ {
struct skcipher_sg_list *sgl; struct skcipher_async_req *areq = req->data;
struct scatterlist *sg; struct sock *sk = areq->sk;
int nents = 0; struct kiocb *iocb = areq->iocb;
unsigned int resultlen;
list_for_each_entry(sgl, &ctx->tsgl, list) { lock_sock(sk);
sg = sgl->sg;
while (!sg->length) /* Buffer size written by crypto operation. */
sg++; resultlen = areq->req.cryptlen;
nents += sg_nents(sg); skcipher_free_areq_sgls(areq);
} sock_kfree_s(sk, areq, areq->areqlen);
return nents; __sock_put(sk);
iocb->ki_complete(iocb, err ? err : resultlen, 0);
release_sock(sk);
} }
static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg, static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
int flags) size_t ignored, int flags)
{ {
struct sock *sk = sock->sk; struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk); struct alg_sock *ask = alg_sk(sk);
...@@ -507,215 +568,166 @@ static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg, ...@@ -507,215 +568,166 @@ static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg,
struct skcipher_ctx *ctx = ask->private; struct skcipher_ctx *ctx = ask->private;
struct skcipher_tfm *skc = pask->private; struct skcipher_tfm *skc = pask->private;
struct crypto_skcipher *tfm = skc->skcipher; struct crypto_skcipher *tfm = skc->skcipher;
struct skcipher_sg_list *sgl; unsigned int bs = crypto_skcipher_blocksize(tfm);
struct scatterlist *sg; unsigned int areqlen = sizeof(struct skcipher_async_req) +
struct skcipher_async_req *sreq; crypto_skcipher_reqsize(tfm);
struct skcipher_request *req; struct skcipher_async_req *areq;
struct skcipher_async_rsgl *last_rsgl = NULL; struct skcipher_rsgl *last_rsgl = NULL;
unsigned int txbufs = 0, len = 0, tx_nents; int err = 0;
unsigned int reqsize = crypto_skcipher_reqsize(tfm); size_t len = 0;
unsigned int ivsize = crypto_skcipher_ivsize(tfm);
int err = -ENOMEM;
bool mark = false;
char *iv;
sreq = kzalloc(sizeof(*sreq) + reqsize + ivsize, GFP_KERNEL);
if (unlikely(!sreq))
goto out;
req = &sreq->req; /* Allocate cipher request for current operation. */
iv = (char *)(req + 1) + reqsize; areq = sock_kmalloc(sk, areqlen, GFP_KERNEL);
sreq->iocb = msg->msg_iocb; if (unlikely(!areq))
INIT_LIST_HEAD(&sreq->list); return -ENOMEM;
sreq->inflight = &ctx->inflight; areq->areqlen = areqlen;
areq->sk = sk;
INIT_LIST_HEAD(&areq->rsgl_list);
areq->tsgl = NULL;
areq->tsgl_entries = 0;
lock_sock(sk); /* convert iovecs of output buffers into RX SGL */
tx_nents = skcipher_all_sg_nents(ctx); while (msg_data_left(msg)) {
sreq->tsg = kcalloc(tx_nents, sizeof(*sg), GFP_KERNEL); struct skcipher_rsgl *rsgl;
if (unlikely(!sreq->tsg)) size_t seglen;
goto unlock;
sg_init_table(sreq->tsg, tx_nents);
memcpy(iv, ctx->iv, ivsize);
skcipher_request_set_tfm(req, tfm);
skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP,
skcipher_async_cb, sreq);
while (iov_iter_count(&msg->msg_iter)) { /* limit the amount of readable buffers */
struct skcipher_async_rsgl *rsgl; if (!skcipher_readable(sk))
int used; break;
if (!ctx->used) { if (!ctx->used) {
err = skcipher_wait_for_data(sk, flags); err = skcipher_wait_for_data(sk, flags);
if (err) if (err)
goto free; goto free;
} }
sgl = list_first_entry(&ctx->tsgl,
struct skcipher_sg_list, list);
sg = sgl->sg;
while (!sg->length)
sg++;
used = min_t(unsigned long, ctx->used, seglen = min_t(size_t, ctx->used, msg_data_left(msg));
iov_iter_count(&msg->msg_iter));
used = min_t(unsigned long, used, sg->length);
if (txbufs == tx_nents) {
struct scatterlist *tmp;
int x;
/* Ran out of tx slots in async request
* need to expand */
tmp = kcalloc(tx_nents * 2, sizeof(*tmp),
GFP_KERNEL);
if (!tmp) {
err = -ENOMEM;
goto free;
}
sg_init_table(tmp, tx_nents * 2); if (list_empty(&areq->rsgl_list)) {
for (x = 0; x < tx_nents; x++) rsgl = &areq->first_sgl;
sg_set_page(&tmp[x], sg_page(&sreq->tsg[x]),
sreq->tsg[x].length,
sreq->tsg[x].offset);
kfree(sreq->tsg);
sreq->tsg = tmp;
tx_nents *= 2;
mark = true;
}
/* Need to take over the tx sgl from ctx
* to the asynch req - these sgls will be freed later */
sg_set_page(sreq->tsg + txbufs++, sg_page(sg), sg->length,
sg->offset);
if (list_empty(&sreq->list)) {
rsgl = &sreq->first_sgl;
list_add_tail(&rsgl->list, &sreq->list);
} else { } else {
rsgl = kmalloc(sizeof(*rsgl), GFP_KERNEL); rsgl = sock_kmalloc(sk, sizeof(*rsgl), GFP_KERNEL);
if (!rsgl) { if (!rsgl) {
err = -ENOMEM; err = -ENOMEM;
goto free; goto free;
} }
list_add_tail(&rsgl->list, &sreq->list);
} }
used = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, used); rsgl->sgl.npages = 0;
err = used; list_add_tail(&rsgl->list, &areq->rsgl_list);
if (used < 0)
/* make one iovec available as scatterlist */
err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen);
if (err < 0)
goto free; goto free;
/* chain the new scatterlist with previous one */
if (last_rsgl) if (last_rsgl)
af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl); af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl);
last_rsgl = rsgl; last_rsgl = rsgl;
len += used; len += err;
skcipher_pull_sgl(sk, used, 0); ctx->rcvused += err;
iov_iter_advance(&msg->msg_iter, used); rsgl->sg_num_bytes = err;
iov_iter_advance(&msg->msg_iter, err);
} }
if (mark) /* Process only as much RX buffers for which we have TX data */
sg_mark_end(sreq->tsg + txbufs - 1); if (len > ctx->used)
len = ctx->used;
skcipher_request_set_crypt(req, sreq->tsg, sreq->first_sgl.sgl.sg, /*
len, iv); * If more buffers are to be expected to be processed, process only
err = ctx->enc ? crypto_skcipher_encrypt(req) : * full block size buffers.
crypto_skcipher_decrypt(req); */
if (ctx->more || len < ctx->used)
len -= len % bs;
/*
* Create a per request TX SGL for this request which tracks the
* SG entries from the global TX SGL.
*/
areq->tsgl_entries = skcipher_count_tsgl(sk, len);
if (!areq->tsgl_entries)
areq->tsgl_entries = 1;
areq->tsgl = sock_kmalloc(sk, sizeof(*areq->tsgl) * areq->tsgl_entries,
GFP_KERNEL);
if (!areq->tsgl) {
err = -ENOMEM;
goto free;
}
sg_init_table(areq->tsgl, areq->tsgl_entries);
skcipher_pull_tsgl(sk, len, areq->tsgl);
/* Initialize the crypto operation */
skcipher_request_set_tfm(&areq->req, tfm);
skcipher_request_set_crypt(&areq->req, areq->tsgl,
areq->first_sgl.sgl.sg, len, ctx->iv);
if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) {
/* AIO operation */
areq->iocb = msg->msg_iocb;
skcipher_request_set_callback(&areq->req,
CRYPTO_TFM_REQ_MAY_SLEEP,
skcipher_async_cb, areq);
err = ctx->enc ? crypto_skcipher_encrypt(&areq->req) :
crypto_skcipher_decrypt(&areq->req);
} else {
/* Synchronous operation */
skcipher_request_set_callback(&areq->req,
CRYPTO_TFM_REQ_MAY_SLEEP |
CRYPTO_TFM_REQ_MAY_BACKLOG,
af_alg_complete,
&ctx->completion);
err = af_alg_wait_for_completion(ctx->enc ?
crypto_skcipher_encrypt(&areq->req) :
crypto_skcipher_decrypt(&areq->req),
&ctx->completion);
}
/* AIO operation in progress */
if (err == -EINPROGRESS) { if (err == -EINPROGRESS) {
atomic_inc(&ctx->inflight); sock_hold(sk);
err = -EIOCBQUEUED; return -EIOCBQUEUED;
sreq = NULL;
goto unlock;
} }
free: free:
skcipher_free_async_sgls(sreq); skcipher_free_areq_sgls(areq);
unlock: if (areq)
skcipher_wmem_wakeup(sk); sock_kfree_s(sk, areq, areqlen);
release_sock(sk);
kzfree(sreq); return err ? err : len;
out:
return err;
} }
static int skcipher_recvmsg_sync(struct socket *sock, struct msghdr *msg, static int skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
int flags) size_t ignored, int flags)
{ {
struct sock *sk = sock->sk; struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk); int ret = 0;
struct sock *psk = ask->parent;
struct alg_sock *pask = alg_sk(psk);
struct skcipher_ctx *ctx = ask->private;
struct skcipher_tfm *skc = pask->private;
struct crypto_skcipher *tfm = skc->skcipher;
unsigned bs = crypto_skcipher_blocksize(tfm);
struct skcipher_sg_list *sgl;
struct scatterlist *sg;
int err = -EAGAIN;
int used;
long copied = 0;
lock_sock(sk); lock_sock(sk);
while (msg_data_left(msg)) { while (msg_data_left(msg)) {
if (!ctx->used) { int err = _skcipher_recvmsg(sock, msg, ignored, flags);
err = skcipher_wait_for_data(sk, flags);
if (err)
goto unlock;
}
used = min_t(unsigned long, ctx->used, msg_data_left(msg));
used = af_alg_make_sg(&ctx->rsgl, &msg->msg_iter, used);
err = used;
if (err < 0)
goto unlock;
if (ctx->more || used < ctx->used)
used -= used % bs;
err = -EINVAL;
if (!used)
goto free;
sgl = list_first_entry(&ctx->tsgl,
struct skcipher_sg_list, list);
sg = sgl->sg;
while (!sg->length)
sg++;
skcipher_request_set_crypt(&ctx->req, sg, ctx->rsgl.sg, used, /*
ctx->iv); * This error covers -EIOCBQUEUED which implies that we can
* only handle one AIO request. If the caller wants to have
err = af_alg_wait_for_completion( * multiple AIO requests in parallel, he must make multiple
ctx->enc ? * separate AIO calls.
crypto_skcipher_encrypt(&ctx->req) : */
crypto_skcipher_decrypt(&ctx->req), if (err <= 0) {
&ctx->completion); if (err == -EIOCBQUEUED)
ret = err;
free: goto out;
af_alg_free_sg(&ctx->rsgl);
if (err)
goto unlock;
copied += used;
skcipher_pull_sgl(sk, used, 1);
iov_iter_advance(&msg->msg_iter, used);
} }
err = 0; ret += err;
}
unlock: out:
skcipher_wmem_wakeup(sk); skcipher_wmem_wakeup(sk);
release_sock(sk); release_sock(sk);
return ret;
return copied ?: err;
}
static int skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
size_t ignored, int flags)
{
return (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) ?
skcipher_recvmsg_async(sock, msg, flags) :
skcipher_recvmsg_sync(sock, msg, flags);
} }
static unsigned int skcipher_poll(struct file *file, struct socket *sock, static unsigned int skcipher_poll(struct file *file, struct socket *sock,
...@@ -895,26 +907,16 @@ static int skcipher_setkey(void *private, const u8 *key, unsigned int keylen) ...@@ -895,26 +907,16 @@ static int skcipher_setkey(void *private, const u8 *key, unsigned int keylen)
return err; return err;
} }
static void skcipher_wait(struct sock *sk)
{
struct alg_sock *ask = alg_sk(sk);
struct skcipher_ctx *ctx = ask->private;
int ctr = 0;
while (atomic_read(&ctx->inflight) && ctr++ < 100)
msleep(100);
}
static void skcipher_sock_destruct(struct sock *sk) static void skcipher_sock_destruct(struct sock *sk)
{ {
struct alg_sock *ask = alg_sk(sk); struct alg_sock *ask = alg_sk(sk);
struct skcipher_ctx *ctx = ask->private; struct skcipher_ctx *ctx = ask->private;
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(&ctx->req); struct sock *psk = ask->parent;
struct alg_sock *pask = alg_sk(psk);
if (atomic_read(&ctx->inflight)) struct skcipher_tfm *skc = pask->private;
skcipher_wait(sk); struct crypto_skcipher *tfm = skc->skcipher;
skcipher_free_sgl(sk); skcipher_pull_tsgl(sk, ctx->used, NULL);
sock_kzfree_s(sk, ctx->iv, crypto_skcipher_ivsize(tfm)); sock_kzfree_s(sk, ctx->iv, crypto_skcipher_ivsize(tfm));
sock_kfree_s(sk, ctx, ctx->len); sock_kfree_s(sk, ctx, ctx->len);
af_alg_release_parent(sk); af_alg_release_parent(sk);
...@@ -926,7 +928,7 @@ static int skcipher_accept_parent_nokey(void *private, struct sock *sk) ...@@ -926,7 +928,7 @@ static int skcipher_accept_parent_nokey(void *private, struct sock *sk)
struct alg_sock *ask = alg_sk(sk); struct alg_sock *ask = alg_sk(sk);
struct skcipher_tfm *tfm = private; struct skcipher_tfm *tfm = private;
struct crypto_skcipher *skcipher = tfm->skcipher; struct crypto_skcipher *skcipher = tfm->skcipher;
unsigned int len = sizeof(*ctx) + crypto_skcipher_reqsize(skcipher); unsigned int len = sizeof(*ctx);
ctx = sock_kmalloc(sk, len, GFP_KERNEL); ctx = sock_kmalloc(sk, len, GFP_KERNEL);
if (!ctx) if (!ctx)
...@@ -941,22 +943,17 @@ static int skcipher_accept_parent_nokey(void *private, struct sock *sk) ...@@ -941,22 +943,17 @@ static int skcipher_accept_parent_nokey(void *private, struct sock *sk)
memset(ctx->iv, 0, crypto_skcipher_ivsize(skcipher)); memset(ctx->iv, 0, crypto_skcipher_ivsize(skcipher));
INIT_LIST_HEAD(&ctx->tsgl); INIT_LIST_HEAD(&ctx->tsgl_list);
ctx->len = len; ctx->len = len;
ctx->used = 0; ctx->used = 0;
ctx->rcvused = 0;
ctx->more = 0; ctx->more = 0;
ctx->merge = 0; ctx->merge = 0;
ctx->enc = 0; ctx->enc = 0;
atomic_set(&ctx->inflight, 0);
af_alg_init_completion(&ctx->completion); af_alg_init_completion(&ctx->completion);
ask->private = ctx; ask->private = ctx;
skcipher_request_set_tfm(&ctx->req, skcipher);
skcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_SLEEP |
CRYPTO_TFM_REQ_MAY_BACKLOG,
af_alg_complete, &ctx->completion);
sk->sk_destruct = skcipher_sock_destruct; sk->sk_destruct = skcipher_sock_destruct;
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment