Commit 64648a5f authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

Pull crypto fixes from Herbert Xu:
 "This fixes the following issues:

   - racy use of ctx->rcvused in af_alg

   - algif_aead crash in chacha20poly1305

   - freeing bogus pointer in pcrypt

   - build error on MIPS in mpi

   - memory leak in inside-secure

   - memory overwrite in inside-secure

   - NULL pointer dereference in inside-secure

   - state corruption in inside-secure

   - build error without CRYPTO_GF128MUL in chelsio

   - use after free in n2"

* 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6:
  crypto: inside-secure - do not use areq->result for partial results
  crypto: inside-secure - fix request allocations in invalidation path
  crypto: inside-secure - free requests even if their handling failed
  crypto: inside-secure - per request invalidation
  lib/mpi: Fix umul_ppmm() for MIPS64r6
  crypto: pcrypt - fix freeing pcrypt instances
  crypto: n2 - cure use after free
  crypto: af_alg - Fix race around ctx->rcvused by making it atomic_t
  crypto: chacha20poly1305 - validate the digest size
  crypto: chelsio - select CRYPTO_GF128MUL
parents d8887f1c 2973633e
......@@ -664,7 +664,7 @@ void af_alg_free_areq_sgls(struct af_alg_async_req *areq)
unsigned int i;
list_for_each_entry_safe(rsgl, tmp, &areq->rsgl_list, list) {
ctx->rcvused -= rsgl->sg_num_bytes;
atomic_sub(rsgl->sg_num_bytes, &ctx->rcvused);
af_alg_free_sg(&rsgl->sgl);
list_del(&rsgl->list);
if (rsgl != &areq->first_rsgl)
......@@ -1163,7 +1163,7 @@ int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags,
areq->last_rsgl = rsgl;
len += err;
ctx->rcvused += err;
atomic_add(err, &ctx->rcvused);
rsgl->sg_num_bytes = err;
iov_iter_advance(&msg->msg_iter, err);
}
......
......@@ -571,7 +571,7 @@ static int aead_accept_parent_nokey(void *private, struct sock *sk)
INIT_LIST_HEAD(&ctx->tsgl_list);
ctx->len = len;
ctx->used = 0;
ctx->rcvused = 0;
atomic_set(&ctx->rcvused, 0);
ctx->more = 0;
ctx->merge = 0;
ctx->enc = 0;
......
......@@ -390,7 +390,7 @@ static int skcipher_accept_parent_nokey(void *private, struct sock *sk)
INIT_LIST_HEAD(&ctx->tsgl_list);
ctx->len = len;
ctx->used = 0;
ctx->rcvused = 0;
atomic_set(&ctx->rcvused, 0);
ctx->more = 0;
ctx->merge = 0;
ctx->enc = 0;
......
......@@ -610,6 +610,11 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
algt->mask));
if (IS_ERR(poly))
return PTR_ERR(poly);
poly_hash = __crypto_hash_alg_common(poly);
err = -EINVAL;
if (poly_hash->digestsize != POLY1305_DIGEST_SIZE)
goto out_put_poly;
err = -ENOMEM;
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
......@@ -618,7 +623,6 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
ctx = aead_instance_ctx(inst);
ctx->saltlen = CHACHAPOLY_IV_SIZE - ivsize;
poly_hash = __crypto_hash_alg_common(poly);
err = crypto_init_ahash_spawn(&ctx->poly, poly_hash,
aead_crypto_instance(inst));
if (err)
......
......@@ -254,6 +254,14 @@ static void pcrypt_aead_exit_tfm(struct crypto_aead *tfm)
crypto_free_aead(ctx->child);
}
static void pcrypt_free(struct aead_instance *inst)
{
struct pcrypt_instance_ctx *ctx = aead_instance_ctx(inst);
crypto_drop_aead(&ctx->spawn);
kfree(inst);
}
static int pcrypt_init_instance(struct crypto_instance *inst,
struct crypto_alg *alg)
{
......@@ -319,6 +327,8 @@ static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb,
inst->alg.encrypt = pcrypt_aead_encrypt;
inst->alg.decrypt = pcrypt_aead_decrypt;
inst->free = pcrypt_free;
err = aead_register_instance(tmpl, inst);
if (err)
goto out_drop_aead;
......@@ -349,14 +359,6 @@ static int pcrypt_create(struct crypto_template *tmpl, struct rtattr **tb)
return -EINVAL;
}
static void pcrypt_free(struct crypto_instance *inst)
{
struct pcrypt_instance_ctx *ctx = crypto_instance_ctx(inst);
crypto_drop_aead(&ctx->spawn);
kfree(inst);
}
static int pcrypt_cpumask_change_notify(struct notifier_block *self,
unsigned long val, void *data)
{
......@@ -469,7 +471,6 @@ static void pcrypt_fini_padata(struct padata_pcrypt *pcrypt)
static struct crypto_template pcrypt_tmpl = {
.name = "pcrypt",
.create = pcrypt_create,
.free = pcrypt_free,
.module = THIS_MODULE,
};
......
......@@ -5,6 +5,7 @@ config CRYPTO_DEV_CHELSIO
select CRYPTO_SHA256
select CRYPTO_SHA512
select CRYPTO_AUTHENC
select CRYPTO_GF128MUL
---help---
The Chelsio Crypto Co-processor driver for T6 adapters.
......
......@@ -607,6 +607,7 @@ static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv
ndesc = ctx->handle_result(priv, ring, sreq->req,
&should_complete, &ret);
if (ndesc < 0) {
kfree(sreq);
dev_err(priv->dev, "failed to handle result (%d)", ndesc);
return;
}
......
......@@ -14,6 +14,7 @@
#include <crypto/aes.h>
#include <crypto/skcipher.h>
#include <crypto/internal/skcipher.h>
#include "safexcel.h"
......@@ -33,6 +34,10 @@ struct safexcel_cipher_ctx {
unsigned int key_len;
};
struct safexcel_cipher_req {
bool needs_inv;
};
static void safexcel_cipher_token(struct safexcel_cipher_ctx *ctx,
struct crypto_async_request *async,
struct safexcel_command_desc *cdesc,
......@@ -126,7 +131,7 @@ static int safexcel_context_control(struct safexcel_cipher_ctx *ctx,
return 0;
}
static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int ring,
struct crypto_async_request *async,
bool *should_complete, int *ret)
{
......@@ -265,7 +270,6 @@ static int safexcel_aes_send(struct crypto_async_request *async,
spin_unlock_bh(&priv->ring[ring].egress_lock);
request->req = &req->base;
ctx->base.handle_result = safexcel_handle_result;
*commands = n_cdesc;
*results = n_rdesc;
......@@ -341,8 +345,6 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
ring = safexcel_select_ring(priv);
ctx->base.ring = ring;
ctx->base.needs_inv = false;
ctx->base.send = safexcel_aes_send;
spin_lock_bh(&priv->ring[ring].queue_lock);
enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async);
......@@ -359,6 +361,26 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
return ndesc;
}
static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
struct crypto_async_request *async,
bool *should_complete, int *ret)
{
struct skcipher_request *req = skcipher_request_cast(async);
struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
int err;
if (sreq->needs_inv) {
sreq->needs_inv = false;
err = safexcel_handle_inv_result(priv, ring, async,
should_complete, ret);
} else {
err = safexcel_handle_req_result(priv, ring, async,
should_complete, ret);
}
return err;
}
static int safexcel_cipher_send_inv(struct crypto_async_request *async,
int ring, struct safexcel_request *request,
int *commands, int *results)
......@@ -368,8 +390,6 @@ static int safexcel_cipher_send_inv(struct crypto_async_request *async,
struct safexcel_crypto_priv *priv = ctx->priv;
int ret;
ctx->base.handle_result = safexcel_handle_inv_result;
ret = safexcel_invalidate_cache(async, &ctx->base, priv,
ctx->base.ctxr_dma, ring, request);
if (unlikely(ret))
......@@ -381,28 +401,46 @@ static int safexcel_cipher_send_inv(struct crypto_async_request *async,
return 0;
}
static int safexcel_send(struct crypto_async_request *async,
int ring, struct safexcel_request *request,
int *commands, int *results)
{
struct skcipher_request *req = skcipher_request_cast(async);
struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
int ret;
if (sreq->needs_inv)
ret = safexcel_cipher_send_inv(async, ring, request,
commands, results);
else
ret = safexcel_aes_send(async, ring, request,
commands, results);
return ret;
}
static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
struct safexcel_crypto_priv *priv = ctx->priv;
struct skcipher_request req;
SKCIPHER_REQUEST_ON_STACK(req, __crypto_skcipher_cast(tfm));
struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
struct safexcel_inv_result result = {};
int ring = ctx->base.ring;
memset(&req, 0, sizeof(struct skcipher_request));
memset(req, 0, sizeof(struct skcipher_request));
/* create invalidation request */
init_completion(&result.completion);
skcipher_request_set_callback(&req, CRYPTO_TFM_REQ_MAY_BACKLOG,
skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
safexcel_inv_complete, &result);
skcipher_request_set_tfm(&req, __crypto_skcipher_cast(tfm));
ctx = crypto_tfm_ctx(req.base.tfm);
skcipher_request_set_tfm(req, __crypto_skcipher_cast(tfm));
ctx = crypto_tfm_ctx(req->base.tfm);
ctx->base.exit_inv = true;
ctx->base.send = safexcel_cipher_send_inv;
sreq->needs_inv = true;
spin_lock_bh(&priv->ring[ring].queue_lock);
crypto_enqueue_request(&priv->ring[ring].queue, &req.base);
crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
spin_unlock_bh(&priv->ring[ring].queue_lock);
if (!priv->ring[ring].need_dequeue)
......@@ -424,19 +462,21 @@ static int safexcel_aes(struct skcipher_request *req,
enum safexcel_cipher_direction dir, u32 mode)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
struct safexcel_crypto_priv *priv = ctx->priv;
int ret, ring;
sreq->needs_inv = false;
ctx->direction = dir;
ctx->mode = mode;
if (ctx->base.ctxr) {
if (ctx->base.needs_inv)
ctx->base.send = safexcel_cipher_send_inv;
if (ctx->base.needs_inv) {
sreq->needs_inv = true;
ctx->base.needs_inv = false;
}
} else {
ctx->base.ring = safexcel_select_ring(priv);
ctx->base.send = safexcel_aes_send;
ctx->base.ctxr = dma_pool_zalloc(priv->context_pool,
EIP197_GFP_FLAGS(req->base),
&ctx->base.ctxr_dma);
......@@ -476,6 +516,11 @@ static int safexcel_skcipher_cra_init(struct crypto_tfm *tfm)
alg.skcipher.base);
ctx->priv = tmpl->priv;
ctx->base.send = safexcel_send;
ctx->base.handle_result = safexcel_handle_result;
crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
sizeof(struct safexcel_cipher_req));
return 0;
}
......
......@@ -32,9 +32,10 @@ struct safexcel_ahash_req {
bool last_req;
bool finish;
bool hmac;
bool needs_inv;
u8 state_sz; /* expected sate size, only set once */
u32 state[SHA256_DIGEST_SIZE / sizeof(u32)];
u32 state[SHA256_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32));
u64 len;
u64 processed;
......@@ -119,7 +120,7 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
}
}
static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int ring,
struct crypto_async_request *async,
bool *should_complete, int *ret)
{
......@@ -127,7 +128,7 @@ static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
struct ahash_request *areq = ahash_request_cast(async);
struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
struct safexcel_ahash_req *sreq = ahash_request_ctx(areq);
int cache_len, result_sz = sreq->state_sz;
int cache_len;
*ret = 0;
......@@ -148,8 +149,8 @@ static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
spin_unlock_bh(&priv->ring[ring].egress_lock);
if (sreq->finish)
result_sz = crypto_ahash_digestsize(ahash);
memcpy(sreq->state, areq->result, result_sz);
memcpy(areq->result, sreq->state,
crypto_ahash_digestsize(ahash));
dma_unmap_sg(priv->dev, areq->src,
sg_nents_for_len(areq->src, areq->nbytes), DMA_TO_DEVICE);
......@@ -165,9 +166,9 @@ static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
return 1;
}
static int safexcel_ahash_send(struct crypto_async_request *async, int ring,
struct safexcel_request *request, int *commands,
int *results)
static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
struct safexcel_request *request,
int *commands, int *results)
{
struct ahash_request *areq = ahash_request_cast(async);
struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
......@@ -273,7 +274,7 @@ static int safexcel_ahash_send(struct crypto_async_request *async, int ring,
/* Add the token */
safexcel_hash_token(first_cdesc, len, req->state_sz);
ctx->base.result_dma = dma_map_single(priv->dev, areq->result,
ctx->base.result_dma = dma_map_single(priv->dev, req->state,
req->state_sz, DMA_FROM_DEVICE);
if (dma_mapping_error(priv->dev, ctx->base.result_dma)) {
ret = -EINVAL;
......@@ -292,7 +293,6 @@ static int safexcel_ahash_send(struct crypto_async_request *async, int ring,
req->processed += len;
request->req = &areq->base;
ctx->base.handle_result = safexcel_handle_result;
*commands = n_cdesc;
*results = 1;
......@@ -374,8 +374,6 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
ring = safexcel_select_ring(priv);
ctx->base.ring = ring;
ctx->base.needs_inv = false;
ctx->base.send = safexcel_ahash_send;
spin_lock_bh(&priv->ring[ring].queue_lock);
enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async);
......@@ -392,6 +390,26 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
return 1;
}
static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
struct crypto_async_request *async,
bool *should_complete, int *ret)
{
struct ahash_request *areq = ahash_request_cast(async);
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
int err;
if (req->needs_inv) {
req->needs_inv = false;
err = safexcel_handle_inv_result(priv, ring, async,
should_complete, ret);
} else {
err = safexcel_handle_req_result(priv, ring, async,
should_complete, ret);
}
return err;
}
static int safexcel_ahash_send_inv(struct crypto_async_request *async,
int ring, struct safexcel_request *request,
int *commands, int *results)
......@@ -400,7 +418,6 @@ static int safexcel_ahash_send_inv(struct crypto_async_request *async,
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
int ret;
ctx->base.handle_result = safexcel_handle_inv_result;
ret = safexcel_invalidate_cache(async, &ctx->base, ctx->priv,
ctx->base.ctxr_dma, ring, request);
if (unlikely(ret))
......@@ -412,28 +429,46 @@ static int safexcel_ahash_send_inv(struct crypto_async_request *async,
return 0;
}
static int safexcel_ahash_send(struct crypto_async_request *async,
int ring, struct safexcel_request *request,
int *commands, int *results)
{
struct ahash_request *areq = ahash_request_cast(async);
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
int ret;
if (req->needs_inv)
ret = safexcel_ahash_send_inv(async, ring, request,
commands, results);
else
ret = safexcel_ahash_send_req(async, ring, request,
commands, results);
return ret;
}
static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
{
struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
struct safexcel_crypto_priv *priv = ctx->priv;
struct ahash_request req;
AHASH_REQUEST_ON_STACK(req, __crypto_ahash_cast(tfm));
struct safexcel_ahash_req *rctx = ahash_request_ctx(req);
struct safexcel_inv_result result = {};
int ring = ctx->base.ring;
memset(&req, 0, sizeof(struct ahash_request));
memset(req, 0, sizeof(struct ahash_request));
/* create invalidation request */
init_completion(&result.completion);
ahash_request_set_callback(&req, CRYPTO_TFM_REQ_MAY_BACKLOG,
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
safexcel_inv_complete, &result);
ahash_request_set_tfm(&req, __crypto_ahash_cast(tfm));
ctx = crypto_tfm_ctx(req.base.tfm);
ahash_request_set_tfm(req, __crypto_ahash_cast(tfm));
ctx = crypto_tfm_ctx(req->base.tfm);
ctx->base.exit_inv = true;
ctx->base.send = safexcel_ahash_send_inv;
rctx->needs_inv = true;
spin_lock_bh(&priv->ring[ring].queue_lock);
crypto_enqueue_request(&priv->ring[ring].queue, &req.base);
crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
spin_unlock_bh(&priv->ring[ring].queue_lock);
if (!priv->ring[ring].need_dequeue)
......@@ -481,14 +516,16 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq)
struct safexcel_crypto_priv *priv = ctx->priv;
int ret, ring;
ctx->base.send = safexcel_ahash_send;
req->needs_inv = false;
if (req->processed && ctx->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED)
ctx->base.needs_inv = safexcel_ahash_needs_inv_get(areq);
if (ctx->base.ctxr) {
if (ctx->base.needs_inv)
ctx->base.send = safexcel_ahash_send_inv;
if (ctx->base.needs_inv) {
ctx->base.needs_inv = false;
req->needs_inv = true;
}
} else {
ctx->base.ring = safexcel_select_ring(priv);
ctx->base.ctxr = dma_pool_zalloc(priv->context_pool,
......@@ -622,6 +659,8 @@ static int safexcel_ahash_cra_init(struct crypto_tfm *tfm)
struct safexcel_alg_template, alg.ahash);
ctx->priv = tmpl->priv;
ctx->base.send = safexcel_ahash_send;
ctx->base.handle_result = safexcel_handle_result;
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct safexcel_ahash_req));
......
......@@ -1625,6 +1625,7 @@ static int queue_cache_init(void)
CWQ_ENTRY_SIZE, 0, NULL);
if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) {
kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL;
return -ENOMEM;
}
return 0;
......@@ -1634,6 +1635,8 @@ static void queue_cache_destroy(void)
{
kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]);
queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL;
queue_cache[HV_NCS_QTYPE_CWQ - 1] = NULL;
}
static long spu_queue_register_workfn(void *arg)
......
......@@ -18,6 +18,7 @@
#include <linux/if_alg.h>
#include <linux/scatterlist.h>
#include <linux/types.h>
#include <linux/atomic.h>
#include <net/sock.h>
#include <crypto/aead.h>
......@@ -150,7 +151,7 @@ struct af_alg_ctx {
struct crypto_wait wait;
size_t used;
size_t rcvused;
atomic_t rcvused;
bool more;
bool merge;
......@@ -215,7 +216,7 @@ static inline int af_alg_rcvbuf(struct sock *sk)
struct af_alg_ctx *ctx = ask->private;
return max_t(int, max_t(int, sk->sk_rcvbuf & PAGE_MASK, PAGE_SIZE) -
ctx->rcvused, 0);
atomic_read(&ctx->rcvused), 0);
}
/**
......
......@@ -671,7 +671,23 @@ do { \
************** MIPS/64 **************
***************************************/
#if (defined(__mips) && __mips >= 3) && W_TYPE_SIZE == 64
#if (__GNUC__ >= 5) || (__GNUC__ >= 4 && __GNUC_MINOR__ >= 4)
#if defined(__mips_isa_rev) && __mips_isa_rev >= 6
/*
* GCC ends up emitting a __multi3 intrinsic call for MIPS64r6 with the plain C
* code below, so we special case MIPS64r6 until the compiler can do better.
*/
#define umul_ppmm(w1, w0, u, v) \
do { \
__asm__ ("dmulu %0,%1,%2" \
: "=d" ((UDItype)(w0)) \
: "d" ((UDItype)(u)), \
"d" ((UDItype)(v))); \
__asm__ ("dmuhu %0,%1,%2" \
: "=d" ((UDItype)(w1)) \
: "d" ((UDItype)(u)), \
"d" ((UDItype)(v))); \
} while (0)
#elif (__GNUC__ >= 5) || (__GNUC__ >= 4 && __GNUC_MINOR__ >= 4)
#define umul_ppmm(w1, w0, u, v) \
do { \
typedef unsigned int __ll_UTItype __attribute__((mode(TI))); \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment