Commit 32f44d62 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (35 commits)
  hwrng: timeriomem - Fix potential oops (request_mem_region/__devinit)
  crypto: api - Use formatting of module name
  crypto: testmgr - Allow hash test vectors longer than a page
  crypto: testmgr - Check all test vector lengths
  crypto: hifn_795x - fix __dev{init,exit} markings
  crypto: tcrypt - Do not exit on success in fips mode
  crypto: compress - Return produced bytes in crypto_{,de}compress_{update,final}
  hwrng: via_rng - Support VIA Nano hardware RNG on X86_64 builds
  hwrng: via_rng - Support VIA Nano hardware RNG
  hwrng: via_rng - The VIA Hardware RNG driver is for the CPU, not Chipset
  crypto: testmgr - Skip algs not flagged fips_allowed in fips mode
  crypto: testmgr - Mark algs allowed in fips mode
  crypto: testmgr - Add ctr(aes) test vectors
  crypto: testmgr - Dynamically allocate xbuf and axbuf
  crypto: testmgr - Print self-test pass notices in fips mode
  crypto: testmgr - Catch base cipher self-test failures in fips mode
  crypto: testmgr - Add ansi_cprng test vectors
  crypto: testmgr - Add infrastructure for ansi_cprng self-tests
  crypto: testmgr - Add self-tests for rfc4309(ccm(aes))
  crypto: testmgr - Handle AEAD test vectors expected to fail verification
  ...
parents f3ad1165 08ced854
...@@ -2,6 +2,8 @@ ...@@ -2,6 +2,8 @@
# Arch-specific CryptoAPI modules. # Arch-specific CryptoAPI modules.
# #
obj-$(CONFIG_CRYPTO_FPU) += fpu.o
obj-$(CONFIG_CRYPTO_AES_586) += aes-i586.o obj-$(CONFIG_CRYPTO_AES_586) += aes-i586.o
obj-$(CONFIG_CRYPTO_TWOFISH_586) += twofish-i586.o obj-$(CONFIG_CRYPTO_TWOFISH_586) += twofish-i586.o
obj-$(CONFIG_CRYPTO_SALSA20_586) += salsa20-i586.o obj-$(CONFIG_CRYPTO_SALSA20_586) += salsa20-i586.o
......
...@@ -21,6 +21,22 @@ ...@@ -21,6 +21,22 @@
#include <asm/i387.h> #include <asm/i387.h>
#include <asm/aes.h> #include <asm/aes.h>
#if defined(CONFIG_CRYPTO_CTR) || defined(CONFIG_CRYPTO_CTR_MODULE)
#define HAS_CTR
#endif
#if defined(CONFIG_CRYPTO_LRW) || defined(CONFIG_CRYPTO_LRW_MODULE)
#define HAS_LRW
#endif
#if defined(CONFIG_CRYPTO_PCBC) || defined(CONFIG_CRYPTO_PCBC_MODULE)
#define HAS_PCBC
#endif
#if defined(CONFIG_CRYPTO_XTS) || defined(CONFIG_CRYPTO_XTS_MODULE)
#define HAS_XTS
#endif
struct async_aes_ctx { struct async_aes_ctx {
struct cryptd_ablkcipher *cryptd_tfm; struct cryptd_ablkcipher *cryptd_tfm;
}; };
...@@ -137,6 +153,41 @@ static struct crypto_alg aesni_alg = { ...@@ -137,6 +153,41 @@ static struct crypto_alg aesni_alg = {
} }
}; };
static void __aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
aesni_enc(ctx, dst, src);
}
static void __aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
aesni_dec(ctx, dst, src);
}
static struct crypto_alg __aesni_alg = {
.cra_name = "__aes-aesni",
.cra_driver_name = "__driver-aes-aesni",
.cra_priority = 0,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(__aesni_alg.cra_list),
.cra_u = {
.cipher = {
.cia_min_keysize = AES_MIN_KEY_SIZE,
.cia_max_keysize = AES_MAX_KEY_SIZE,
.cia_setkey = aes_set_key,
.cia_encrypt = __aes_encrypt,
.cia_decrypt = __aes_decrypt
}
}
};
static int ecb_encrypt(struct blkcipher_desc *desc, static int ecb_encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src, struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes) unsigned int nbytes)
...@@ -277,8 +328,16 @@ static int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key, ...@@ -277,8 +328,16 @@ static int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key,
unsigned int key_len) unsigned int key_len)
{ {
struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
struct crypto_ablkcipher *child = &ctx->cryptd_tfm->base;
int err;
return crypto_ablkcipher_setkey(&ctx->cryptd_tfm->base, key, key_len); crypto_ablkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
crypto_ablkcipher_set_flags(child, crypto_ablkcipher_get_flags(tfm)
& CRYPTO_TFM_REQ_MASK);
err = crypto_ablkcipher_setkey(child, key, key_len);
crypto_ablkcipher_set_flags(tfm, crypto_ablkcipher_get_flags(child)
& CRYPTO_TFM_RES_MASK);
return err;
} }
static int ablk_encrypt(struct ablkcipher_request *req) static int ablk_encrypt(struct ablkcipher_request *req)
...@@ -411,6 +470,163 @@ static struct crypto_alg ablk_cbc_alg = { ...@@ -411,6 +470,163 @@ static struct crypto_alg ablk_cbc_alg = {
}, },
}; };
#ifdef HAS_CTR
static int ablk_ctr_init(struct crypto_tfm *tfm)
{
struct cryptd_ablkcipher *cryptd_tfm;
cryptd_tfm = cryptd_alloc_ablkcipher("fpu(ctr(__driver-aes-aesni))",
0, 0);
if (IS_ERR(cryptd_tfm))
return PTR_ERR(cryptd_tfm);
ablk_init_common(tfm, cryptd_tfm);
return 0;
}
static struct crypto_alg ablk_ctr_alg = {
.cra_name = "ctr(aes)",
.cra_driver_name = "ctr-aes-aesni",
.cra_priority = 400,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct async_aes_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(ablk_ctr_alg.cra_list),
.cra_init = ablk_ctr_init,
.cra_exit = ablk_exit,
.cra_u = {
.ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = ablk_set_key,
.encrypt = ablk_encrypt,
.decrypt = ablk_decrypt,
.geniv = "chainiv",
},
},
};
#endif
#ifdef HAS_LRW
static int ablk_lrw_init(struct crypto_tfm *tfm)
{
struct cryptd_ablkcipher *cryptd_tfm;
cryptd_tfm = cryptd_alloc_ablkcipher("fpu(lrw(__driver-aes-aesni))",
0, 0);
if (IS_ERR(cryptd_tfm))
return PTR_ERR(cryptd_tfm);
ablk_init_common(tfm, cryptd_tfm);
return 0;
}
static struct crypto_alg ablk_lrw_alg = {
.cra_name = "lrw(aes)",
.cra_driver_name = "lrw-aes-aesni",
.cra_priority = 400,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct async_aes_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(ablk_lrw_alg.cra_list),
.cra_init = ablk_lrw_init,
.cra_exit = ablk_exit,
.cra_u = {
.ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE + AES_BLOCK_SIZE,
.max_keysize = AES_MAX_KEY_SIZE + AES_BLOCK_SIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = ablk_set_key,
.encrypt = ablk_encrypt,
.decrypt = ablk_decrypt,
},
},
};
#endif
#ifdef HAS_PCBC
static int ablk_pcbc_init(struct crypto_tfm *tfm)
{
struct cryptd_ablkcipher *cryptd_tfm;
cryptd_tfm = cryptd_alloc_ablkcipher("fpu(pcbc(__driver-aes-aesni))",
0, 0);
if (IS_ERR(cryptd_tfm))
return PTR_ERR(cryptd_tfm);
ablk_init_common(tfm, cryptd_tfm);
return 0;
}
static struct crypto_alg ablk_pcbc_alg = {
.cra_name = "pcbc(aes)",
.cra_driver_name = "pcbc-aes-aesni",
.cra_priority = 400,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct async_aes_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(ablk_pcbc_alg.cra_list),
.cra_init = ablk_pcbc_init,
.cra_exit = ablk_exit,
.cra_u = {
.ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = ablk_set_key,
.encrypt = ablk_encrypt,
.decrypt = ablk_decrypt,
},
},
};
#endif
#ifdef HAS_XTS
static int ablk_xts_init(struct crypto_tfm *tfm)
{
struct cryptd_ablkcipher *cryptd_tfm;
cryptd_tfm = cryptd_alloc_ablkcipher("fpu(xts(__driver-aes-aesni))",
0, 0);
if (IS_ERR(cryptd_tfm))
return PTR_ERR(cryptd_tfm);
ablk_init_common(tfm, cryptd_tfm);
return 0;
}
static struct crypto_alg ablk_xts_alg = {
.cra_name = "xts(aes)",
.cra_driver_name = "xts-aes-aesni",
.cra_priority = 400,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct async_aes_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(ablk_xts_alg.cra_list),
.cra_init = ablk_xts_init,
.cra_exit = ablk_exit,
.cra_u = {
.ablkcipher = {
.min_keysize = 2 * AES_MIN_KEY_SIZE,
.max_keysize = 2 * AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = ablk_set_key,
.encrypt = ablk_encrypt,
.decrypt = ablk_decrypt,
},
},
};
#endif
static int __init aesni_init(void) static int __init aesni_init(void)
{ {
int err; int err;
...@@ -421,6 +637,8 @@ static int __init aesni_init(void) ...@@ -421,6 +637,8 @@ static int __init aesni_init(void)
} }
if ((err = crypto_register_alg(&aesni_alg))) if ((err = crypto_register_alg(&aesni_alg)))
goto aes_err; goto aes_err;
if ((err = crypto_register_alg(&__aesni_alg)))
goto __aes_err;
if ((err = crypto_register_alg(&blk_ecb_alg))) if ((err = crypto_register_alg(&blk_ecb_alg)))
goto blk_ecb_err; goto blk_ecb_err;
if ((err = crypto_register_alg(&blk_cbc_alg))) if ((err = crypto_register_alg(&blk_cbc_alg)))
...@@ -429,9 +647,41 @@ static int __init aesni_init(void) ...@@ -429,9 +647,41 @@ static int __init aesni_init(void)
goto ablk_ecb_err; goto ablk_ecb_err;
if ((err = crypto_register_alg(&ablk_cbc_alg))) if ((err = crypto_register_alg(&ablk_cbc_alg)))
goto ablk_cbc_err; goto ablk_cbc_err;
#ifdef HAS_CTR
if ((err = crypto_register_alg(&ablk_ctr_alg)))
goto ablk_ctr_err;
#endif
#ifdef HAS_LRW
if ((err = crypto_register_alg(&ablk_lrw_alg)))
goto ablk_lrw_err;
#endif
#ifdef HAS_PCBC
if ((err = crypto_register_alg(&ablk_pcbc_alg)))
goto ablk_pcbc_err;
#endif
#ifdef HAS_XTS
if ((err = crypto_register_alg(&ablk_xts_alg)))
goto ablk_xts_err;
#endif
return err; return err;
#ifdef HAS_XTS
ablk_xts_err:
#endif
#ifdef HAS_PCBC
crypto_unregister_alg(&ablk_pcbc_alg);
ablk_pcbc_err:
#endif
#ifdef HAS_LRW
crypto_unregister_alg(&ablk_lrw_alg);
ablk_lrw_err:
#endif
#ifdef HAS_CTR
crypto_unregister_alg(&ablk_ctr_alg);
ablk_ctr_err:
#endif
crypto_unregister_alg(&ablk_cbc_alg);
ablk_cbc_err: ablk_cbc_err:
crypto_unregister_alg(&ablk_ecb_alg); crypto_unregister_alg(&ablk_ecb_alg);
ablk_ecb_err: ablk_ecb_err:
...@@ -439,6 +689,8 @@ static int __init aesni_init(void) ...@@ -439,6 +689,8 @@ static int __init aesni_init(void)
blk_cbc_err: blk_cbc_err:
crypto_unregister_alg(&blk_ecb_alg); crypto_unregister_alg(&blk_ecb_alg);
blk_ecb_err: blk_ecb_err:
crypto_unregister_alg(&__aesni_alg);
__aes_err:
crypto_unregister_alg(&aesni_alg); crypto_unregister_alg(&aesni_alg);
aes_err: aes_err:
return err; return err;
...@@ -446,10 +698,23 @@ static int __init aesni_init(void) ...@@ -446,10 +698,23 @@ static int __init aesni_init(void)
static void __exit aesni_exit(void) static void __exit aesni_exit(void)
{ {
#ifdef HAS_XTS
crypto_unregister_alg(&ablk_xts_alg);
#endif
#ifdef HAS_PCBC
crypto_unregister_alg(&ablk_pcbc_alg);
#endif
#ifdef HAS_LRW
crypto_unregister_alg(&ablk_lrw_alg);
#endif
#ifdef HAS_CTR
crypto_unregister_alg(&ablk_ctr_alg);
#endif
crypto_unregister_alg(&ablk_cbc_alg); crypto_unregister_alg(&ablk_cbc_alg);
crypto_unregister_alg(&ablk_ecb_alg); crypto_unregister_alg(&ablk_ecb_alg);
crypto_unregister_alg(&blk_cbc_alg); crypto_unregister_alg(&blk_cbc_alg);
crypto_unregister_alg(&blk_ecb_alg); crypto_unregister_alg(&blk_ecb_alg);
crypto_unregister_alg(&__aesni_alg);
crypto_unregister_alg(&aesni_alg); crypto_unregister_alg(&aesni_alg);
} }
......
/*
* FPU: Wrapper for blkcipher touching fpu
*
* Copyright (c) Intel Corp.
* Author: Huang Ying <ying.huang@intel.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#include <crypto/algapi.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <asm/i387.h>
struct crypto_fpu_ctx {
struct crypto_blkcipher *child;
};
static int crypto_fpu_setkey(struct crypto_tfm *parent, const u8 *key,
unsigned int keylen)
{
struct crypto_fpu_ctx *ctx = crypto_tfm_ctx(parent);
struct crypto_blkcipher *child = ctx->child;
int err;
crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
crypto_blkcipher_set_flags(child, crypto_tfm_get_flags(parent) &
CRYPTO_TFM_REQ_MASK);
err = crypto_blkcipher_setkey(child, key, keylen);
crypto_tfm_set_flags(parent, crypto_blkcipher_get_flags(child) &
CRYPTO_TFM_RES_MASK);
return err;
}
static int crypto_fpu_encrypt(struct blkcipher_desc *desc_in,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
int err;
struct crypto_fpu_ctx *ctx = crypto_blkcipher_ctx(desc_in->tfm);
struct crypto_blkcipher *child = ctx->child;
struct blkcipher_desc desc = {
.tfm = child,
.info = desc_in->info,
.flags = desc_in->flags,
};
kernel_fpu_begin();
err = crypto_blkcipher_crt(desc.tfm)->encrypt(&desc, dst, src, nbytes);
kernel_fpu_end();
return err;
}
static int crypto_fpu_decrypt(struct blkcipher_desc *desc_in,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
int err;
struct crypto_fpu_ctx *ctx = crypto_blkcipher_ctx(desc_in->tfm);
struct crypto_blkcipher *child = ctx->child;
struct blkcipher_desc desc = {
.tfm = child,
.info = desc_in->info,
.flags = desc_in->flags,
};
kernel_fpu_begin();
err = crypto_blkcipher_crt(desc.tfm)->decrypt(&desc, dst, src, nbytes);
kernel_fpu_end();
return err;
}
static int crypto_fpu_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
struct crypto_spawn *spawn = crypto_instance_ctx(inst);
struct crypto_fpu_ctx *ctx = crypto_tfm_ctx(tfm);
struct crypto_blkcipher *cipher;
cipher = crypto_spawn_blkcipher(spawn);
if (IS_ERR(cipher))
return PTR_ERR(cipher);
ctx->child = cipher;
return 0;
}
static void crypto_fpu_exit_tfm(struct crypto_tfm *tfm)
{
struct crypto_fpu_ctx *ctx = crypto_tfm_ctx(tfm);
crypto_free_blkcipher(ctx->child);
}
static struct crypto_instance *crypto_fpu_alloc(struct rtattr **tb)
{
struct crypto_instance *inst;
struct crypto_alg *alg;
int err;
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER);
if (err)
return ERR_PTR(err);
alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_BLKCIPHER,
CRYPTO_ALG_TYPE_MASK);
if (IS_ERR(alg))
return ERR_CAST(alg);
inst = crypto_alloc_instance("fpu", alg);
if (IS_ERR(inst))
goto out_put_alg;
inst->alg.cra_flags = alg->cra_flags;
inst->alg.cra_priority = alg->cra_priority;
inst->alg.cra_blocksize = alg->cra_blocksize;
inst->alg.cra_alignmask = alg->cra_alignmask;
inst->alg.cra_type = alg->cra_type;
inst->alg.cra_blkcipher.ivsize = alg->cra_blkcipher.ivsize;
inst->alg.cra_blkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
inst->alg.cra_blkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
inst->alg.cra_ctxsize = sizeof(struct crypto_fpu_ctx);
inst->alg.cra_init = crypto_fpu_init_tfm;
inst->alg.cra_exit = crypto_fpu_exit_tfm;
inst->alg.cra_blkcipher.setkey = crypto_fpu_setkey;
inst->alg.cra_blkcipher.encrypt = crypto_fpu_encrypt;
inst->alg.cra_blkcipher.decrypt = crypto_fpu_decrypt;
out_put_alg:
crypto_mod_put(alg);
return inst;
}
static void crypto_fpu_free(struct crypto_instance *inst)
{
crypto_drop_spawn(crypto_instance_ctx(inst));
kfree(inst);
}
static struct crypto_template crypto_fpu_tmpl = {
.name = "fpu",
.alloc = crypto_fpu_alloc,
.free = crypto_fpu_free,
.module = THIS_MODULE,
};
static int __init crypto_fpu_module_init(void)
{
return crypto_register_template(&crypto_fpu_tmpl);
}
static void __exit crypto_fpu_module_exit(void)
{
crypto_unregister_template(&crypto_fpu_tmpl);
}
module_init(crypto_fpu_module_init);
module_exit(crypto_fpu_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("FPU block cipher wrapper");
...@@ -241,6 +241,11 @@ config CRYPTO_XTS ...@@ -241,6 +241,11 @@ config CRYPTO_XTS
key size 256, 384 or 512 bits. This implementation currently key size 256, 384 or 512 bits. This implementation currently
can't handle a sectorsize which is not a multiple of 16 bytes. can't handle a sectorsize which is not a multiple of 16 bytes.
config CRYPTO_FPU
tristate
select CRYPTO_BLKCIPHER
select CRYPTO_MANAGER
comment "Hash modes" comment "Hash modes"
config CRYPTO_HMAC config CRYPTO_HMAC
...@@ -486,6 +491,7 @@ config CRYPTO_AES_NI_INTEL ...@@ -486,6 +491,7 @@ config CRYPTO_AES_NI_INTEL
select CRYPTO_AES_X86_64 select CRYPTO_AES_X86_64
select CRYPTO_CRYPTD select CRYPTO_CRYPTD
select CRYPTO_ALGAPI select CRYPTO_ALGAPI
select CRYPTO_FPU
help help
Use Intel AES-NI instructions for AES algorithm. Use Intel AES-NI instructions for AES algorithm.
...@@ -505,6 +511,10 @@ config CRYPTO_AES_NI_INTEL ...@@ -505,6 +511,10 @@ config CRYPTO_AES_NI_INTEL
See <http://csrc.nist.gov/encryption/aes/> for more information. See <http://csrc.nist.gov/encryption/aes/> for more information.
In addition to AES cipher algorithm support, the
acceleration for some popular block cipher mode is supported
too, including ECB, CBC, CTR, LRW, PCBC, XTS.
config CRYPTO_ANUBIS config CRYPTO_ANUBIS
tristate "Anubis cipher algorithm" tristate "Anubis cipher algorithm"
select CRYPTO_ALGAPI select CRYPTO_ALGAPI
......
...@@ -280,29 +280,13 @@ static struct notifier_block cryptomgr_notifier = { ...@@ -280,29 +280,13 @@ static struct notifier_block cryptomgr_notifier = {
static int __init cryptomgr_init(void) static int __init cryptomgr_init(void)
{ {
int err; return crypto_register_notifier(&cryptomgr_notifier);
err = testmgr_init();
if (err)
return err;
err = crypto_register_notifier(&cryptomgr_notifier);
if (err)
goto free_testmgr;
return 0;
free_testmgr:
testmgr_exit();
return err;
} }
static void __exit cryptomgr_exit(void) static void __exit cryptomgr_exit(void)
{ {
int err = crypto_unregister_notifier(&cryptomgr_notifier); int err = crypto_unregister_notifier(&cryptomgr_notifier);
BUG_ON(err); BUG_ON(err);
testmgr_exit();
} }
subsys_initcall(cryptomgr_init); subsys_initcall(cryptomgr_init);
......
...@@ -217,14 +217,11 @@ struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask) ...@@ -217,14 +217,11 @@ struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask)
alg = crypto_alg_lookup(name, type, mask); alg = crypto_alg_lookup(name, type, mask);
if (!alg) { if (!alg) {
char tmp[CRYPTO_MAX_ALG_NAME]; request_module("%s", name);
request_module(name);
if (!((type ^ CRYPTO_ALG_NEED_FALLBACK) & mask & if (!((type ^ CRYPTO_ALG_NEED_FALLBACK) & mask &
CRYPTO_ALG_NEED_FALLBACK) && CRYPTO_ALG_NEED_FALLBACK))
snprintf(tmp, sizeof(tmp), "%s-all", name) < sizeof(tmp)) request_module("%s-all", name);
request_module(tmp);
alg = crypto_alg_lookup(name, type, mask); alg = crypto_alg_lookup(name, type, mask);
} }
...@@ -580,20 +577,17 @@ EXPORT_SYMBOL_GPL(crypto_alloc_tfm); ...@@ -580,20 +577,17 @@ EXPORT_SYMBOL_GPL(crypto_alloc_tfm);
void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm) void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm)
{ {
struct crypto_alg *alg; struct crypto_alg *alg;
int size;
if (unlikely(!mem)) if (unlikely(!mem))
return; return;
alg = tfm->__crt_alg; alg = tfm->__crt_alg;
size = ksize(mem);
if (!tfm->exit && alg->cra_exit) if (!tfm->exit && alg->cra_exit)
alg->cra_exit(tfm); alg->cra_exit(tfm);
crypto_exit_ops(tfm); crypto_exit_ops(tfm);
crypto_mod_put(alg); crypto_mod_put(alg);
memset(mem, 0, size); kzfree(mem);
kfree(mem);
} }
EXPORT_SYMBOL_GPL(crypto_destroy_tfm); EXPORT_SYMBOL_GPL(crypto_destroy_tfm);
......
...@@ -586,20 +586,24 @@ struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name, ...@@ -586,20 +586,24 @@ struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
u32 type, u32 mask) u32 type, u32 mask)
{ {
char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
struct crypto_ablkcipher *tfm; struct crypto_tfm *tfm;
if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
"cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
tfm = crypto_alloc_ablkcipher(cryptd_alg_name, type, mask); type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
type |= CRYPTO_ALG_TYPE_BLKCIPHER;
mask &= ~CRYPTO_ALG_TYPE_MASK;
mask |= (CRYPTO_ALG_GENIV | CRYPTO_ALG_TYPE_BLKCIPHER_MASK);
tfm = crypto_alloc_base(cryptd_alg_name, type, mask);
if (IS_ERR(tfm)) if (IS_ERR(tfm))
return ERR_CAST(tfm); return ERR_CAST(tfm);
if (crypto_ablkcipher_tfm(tfm)->__crt_alg->cra_module != THIS_MODULE) { if (tfm->__crt_alg->cra_module != THIS_MODULE) {
crypto_free_ablkcipher(tfm); crypto_free_tfm(tfm);
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
return __cryptd_ablkcipher_cast(tfm); return __cryptd_ablkcipher_cast(__crypto_ablkcipher_cast(tfm));
} }
EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher); EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher);
......
...@@ -121,9 +121,6 @@ int crypto_register_notifier(struct notifier_block *nb); ...@@ -121,9 +121,6 @@ int crypto_register_notifier(struct notifier_block *nb);
int crypto_unregister_notifier(struct notifier_block *nb); int crypto_unregister_notifier(struct notifier_block *nb);
int crypto_probing_notify(unsigned long val, void *v); int crypto_probing_notify(unsigned long val, void *v);
int __init testmgr_init(void);
void testmgr_exit(void);
static inline void crypto_alg_put(struct crypto_alg *alg) static inline void crypto_alg_put(struct crypto_alg *alg)
{ {
if (atomic_dec_and_test(&alg->cra_refcnt) && alg->cra_destroy) if (atomic_dec_and_test(&alg->cra_refcnt) && alg->cra_destroy)
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include <linux/string.h> #include <linux/string.h>
#include <crypto/compress.h> #include <crypto/compress.h>
#include <crypto/internal/compress.h>
#include "internal.h" #include "internal.h"
......
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#include <linux/timex.h> #include <linux/timex.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include "tcrypt.h" #include "tcrypt.h"
#include "internal.h"
/* /*
* Need slab memory for testing (size in number of pages). * Need slab memory for testing (size in number of pages).
...@@ -396,16 +397,16 @@ static void test_hash_speed(const char *algo, unsigned int sec, ...@@ -396,16 +397,16 @@ static void test_hash_speed(const char *algo, unsigned int sec,
struct scatterlist sg[TVMEMSIZE]; struct scatterlist sg[TVMEMSIZE];
struct crypto_hash *tfm; struct crypto_hash *tfm;
struct hash_desc desc; struct hash_desc desc;
char output[1024]; static char output[1024];
int i; int i;
int ret; int ret;
printk("\ntesting speed of %s\n", algo); printk(KERN_INFO "\ntesting speed of %s\n", algo);
tfm = crypto_alloc_hash(algo, 0, CRYPTO_ALG_ASYNC); tfm = crypto_alloc_hash(algo, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm)) { if (IS_ERR(tfm)) {
printk("failed to load transform for %s: %ld\n", algo, printk(KERN_ERR "failed to load transform for %s: %ld\n", algo,
PTR_ERR(tfm)); PTR_ERR(tfm));
return; return;
} }
...@@ -414,7 +415,7 @@ static void test_hash_speed(const char *algo, unsigned int sec, ...@@ -414,7 +415,7 @@ static void test_hash_speed(const char *algo, unsigned int sec,
desc.flags = 0; desc.flags = 0;
if (crypto_hash_digestsize(tfm) > sizeof(output)) { if (crypto_hash_digestsize(tfm) > sizeof(output)) {
printk("digestsize(%u) > outputbuffer(%zu)\n", printk(KERN_ERR "digestsize(%u) > outputbuffer(%zu)\n",
crypto_hash_digestsize(tfm), sizeof(output)); crypto_hash_digestsize(tfm), sizeof(output));
goto out; goto out;
} }
...@@ -427,12 +428,14 @@ static void test_hash_speed(const char *algo, unsigned int sec, ...@@ -427,12 +428,14 @@ static void test_hash_speed(const char *algo, unsigned int sec,
for (i = 0; speed[i].blen != 0; i++) { for (i = 0; speed[i].blen != 0; i++) {
if (speed[i].blen > TVMEMSIZE * PAGE_SIZE) { if (speed[i].blen > TVMEMSIZE * PAGE_SIZE) {
printk("template (%u) too big for tvmem (%lu)\n", printk(KERN_ERR
"template (%u) too big for tvmem (%lu)\n",
speed[i].blen, TVMEMSIZE * PAGE_SIZE); speed[i].blen, TVMEMSIZE * PAGE_SIZE);
goto out; goto out;
} }
printk("test%3u (%5u byte blocks,%5u bytes per update,%4u updates): ", printk(KERN_INFO "test%3u "
"(%5u byte blocks,%5u bytes per update,%4u updates): ",
i, speed[i].blen, speed[i].plen, speed[i].blen / speed[i].plen); i, speed[i].blen, speed[i].plen, speed[i].blen / speed[i].plen);
if (sec) if (sec)
...@@ -443,7 +446,7 @@ static void test_hash_speed(const char *algo, unsigned int sec, ...@@ -443,7 +446,7 @@ static void test_hash_speed(const char *algo, unsigned int sec,
speed[i].plen, output); speed[i].plen, output);
if (ret) { if (ret) {
printk("hashing failed ret=%d\n", ret); printk(KERN_ERR "hashing failed ret=%d\n", ret);
break; break;
} }
} }
...@@ -466,239 +469,255 @@ static void test_available(void) ...@@ -466,239 +469,255 @@ static void test_available(void)
static inline int tcrypt_test(const char *alg) static inline int tcrypt_test(const char *alg)
{ {
return alg_test(alg, alg, 0, 0); int ret;
ret = alg_test(alg, alg, 0, 0);
/* non-fips algs return -EINVAL in fips mode */
if (fips_enabled && ret == -EINVAL)
ret = 0;
return ret;
} }
static void do_test(int m) static int do_test(int m)
{ {
int i; int i;
int ret = 0;
switch (m) { switch (m) {
case 0: case 0:
for (i = 1; i < 200; i++) for (i = 1; i < 200; i++)
do_test(i); ret += do_test(i);
break; break;
case 1: case 1:
tcrypt_test("md5"); ret += tcrypt_test("md5");
break; break;
case 2: case 2:
tcrypt_test("sha1"); ret += tcrypt_test("sha1");
break; break;
case 3: case 3:
tcrypt_test("ecb(des)"); ret += tcrypt_test("ecb(des)");
tcrypt_test("cbc(des)"); ret += tcrypt_test("cbc(des)");
break; break;
case 4: case 4:
tcrypt_test("ecb(des3_ede)"); ret += tcrypt_test("ecb(des3_ede)");
tcrypt_test("cbc(des3_ede)"); ret += tcrypt_test("cbc(des3_ede)");
break; break;
case 5: case 5:
tcrypt_test("md4"); ret += tcrypt_test("md4");
break; break;
case 6: case 6:
tcrypt_test("sha256"); ret += tcrypt_test("sha256");
break; break;
case 7: case 7:
tcrypt_test("ecb(blowfish)"); ret += tcrypt_test("ecb(blowfish)");
tcrypt_test("cbc(blowfish)"); ret += tcrypt_test("cbc(blowfish)");
break; break;
case 8: case 8:
tcrypt_test("ecb(twofish)"); ret += tcrypt_test("ecb(twofish)");
tcrypt_test("cbc(twofish)"); ret += tcrypt_test("cbc(twofish)");
break; break;
case 9: case 9:
tcrypt_test("ecb(serpent)"); ret += tcrypt_test("ecb(serpent)");
break; break;
case 10: case 10:
tcrypt_test("ecb(aes)"); ret += tcrypt_test("ecb(aes)");
tcrypt_test("cbc(aes)"); ret += tcrypt_test("cbc(aes)");
tcrypt_test("lrw(aes)"); ret += tcrypt_test("lrw(aes)");
tcrypt_test("xts(aes)"); ret += tcrypt_test("xts(aes)");
tcrypt_test("rfc3686(ctr(aes))"); ret += tcrypt_test("ctr(aes)");
ret += tcrypt_test("rfc3686(ctr(aes))");
break; break;
case 11: case 11:
tcrypt_test("sha384"); ret += tcrypt_test("sha384");
break; break;
case 12: case 12:
tcrypt_test("sha512"); ret += tcrypt_test("sha512");
break; break;
case 13: case 13:
tcrypt_test("deflate"); ret += tcrypt_test("deflate");
break; break;
case 14: case 14:
tcrypt_test("ecb(cast5)"); ret += tcrypt_test("ecb(cast5)");
break; break;
case 15: case 15:
tcrypt_test("ecb(cast6)"); ret += tcrypt_test("ecb(cast6)");
break; break;
case 16: case 16:
tcrypt_test("ecb(arc4)"); ret += tcrypt_test("ecb(arc4)");
break; break;
case 17: case 17:
tcrypt_test("michael_mic"); ret += tcrypt_test("michael_mic");
break; break;
case 18: case 18:
tcrypt_test("crc32c"); ret += tcrypt_test("crc32c");
break; break;
case 19: case 19:
tcrypt_test("ecb(tea)"); ret += tcrypt_test("ecb(tea)");
break; break;
case 20: case 20:
tcrypt_test("ecb(xtea)"); ret += tcrypt_test("ecb(xtea)");
break; break;
case 21: case 21:
tcrypt_test("ecb(khazad)"); ret += tcrypt_test("ecb(khazad)");
break; break;
case 22: case 22:
tcrypt_test("wp512"); ret += tcrypt_test("wp512");
break; break;
case 23: case 23:
tcrypt_test("wp384"); ret += tcrypt_test("wp384");
break; break;
case 24: case 24:
tcrypt_test("wp256"); ret += tcrypt_test("wp256");
break; break;
case 25: case 25:
tcrypt_test("ecb(tnepres)"); ret += tcrypt_test("ecb(tnepres)");
break; break;
case 26: case 26:
tcrypt_test("ecb(anubis)"); ret += tcrypt_test("ecb(anubis)");
tcrypt_test("cbc(anubis)"); ret += tcrypt_test("cbc(anubis)");
break; break;
case 27: case 27:
tcrypt_test("tgr192"); ret += tcrypt_test("tgr192");
break; break;
case 28: case 28:
tcrypt_test("tgr160"); ret += tcrypt_test("tgr160");
break; break;
case 29: case 29:
tcrypt_test("tgr128"); ret += tcrypt_test("tgr128");
break; break;
case 30: case 30:
tcrypt_test("ecb(xeta)"); ret += tcrypt_test("ecb(xeta)");
break; break;
case 31: case 31:
tcrypt_test("pcbc(fcrypt)"); ret += tcrypt_test("pcbc(fcrypt)");
break; break;
case 32: case 32:
tcrypt_test("ecb(camellia)"); ret += tcrypt_test("ecb(camellia)");
tcrypt_test("cbc(camellia)"); ret += tcrypt_test("cbc(camellia)");
break; break;
case 33: case 33:
tcrypt_test("sha224"); ret += tcrypt_test("sha224");
break; break;
case 34: case 34:
tcrypt_test("salsa20"); ret += tcrypt_test("salsa20");
break; break;
case 35: case 35:
tcrypt_test("gcm(aes)"); ret += tcrypt_test("gcm(aes)");
break; break;
case 36: case 36:
tcrypt_test("lzo"); ret += tcrypt_test("lzo");
break; break;
case 37: case 37:
tcrypt_test("ccm(aes)"); ret += tcrypt_test("ccm(aes)");
break; break;
case 38: case 38:
tcrypt_test("cts(cbc(aes))"); ret += tcrypt_test("cts(cbc(aes))");
break; break;
case 39: case 39:
tcrypt_test("rmd128"); ret += tcrypt_test("rmd128");
break; break;
case 40: case 40:
tcrypt_test("rmd160"); ret += tcrypt_test("rmd160");
break; break;
case 41: case 41:
tcrypt_test("rmd256"); ret += tcrypt_test("rmd256");
break; break;
case 42: case 42:
tcrypt_test("rmd320"); ret += tcrypt_test("rmd320");
break; break;
case 43: case 43:
tcrypt_test("ecb(seed)"); ret += tcrypt_test("ecb(seed)");
break; break;
case 44: case 44:
tcrypt_test("zlib"); ret += tcrypt_test("zlib");
break;
case 45:
ret += tcrypt_test("rfc4309(ccm(aes))");
break; break;
case 100: case 100:
tcrypt_test("hmac(md5)"); ret += tcrypt_test("hmac(md5)");
break; break;
case 101: case 101:
tcrypt_test("hmac(sha1)"); ret += tcrypt_test("hmac(sha1)");
break; break;
case 102: case 102:
tcrypt_test("hmac(sha256)"); ret += tcrypt_test("hmac(sha256)");
break; break;
case 103: case 103:
tcrypt_test("hmac(sha384)"); ret += tcrypt_test("hmac(sha384)");
break; break;
case 104: case 104:
tcrypt_test("hmac(sha512)"); ret += tcrypt_test("hmac(sha512)");
break; break;
case 105: case 105:
tcrypt_test("hmac(sha224)"); ret += tcrypt_test("hmac(sha224)");
break; break;
case 106: case 106:
tcrypt_test("xcbc(aes)"); ret += tcrypt_test("xcbc(aes)");
break; break;
case 107: case 107:
tcrypt_test("hmac(rmd128)"); ret += tcrypt_test("hmac(rmd128)");
break; break;
case 108: case 108:
tcrypt_test("hmac(rmd160)"); ret += tcrypt_test("hmac(rmd160)");
break;
case 150:
ret += tcrypt_test("ansi_cprng");
break; break;
case 200: case 200:
...@@ -862,6 +881,8 @@ static void do_test(int m) ...@@ -862,6 +881,8 @@ static void do_test(int m)
test_available(); test_available();
break; break;
} }
return ret;
} }
static int __init tcrypt_mod_init(void) static int __init tcrypt_mod_init(void)
...@@ -875,15 +896,21 @@ static int __init tcrypt_mod_init(void) ...@@ -875,15 +896,21 @@ static int __init tcrypt_mod_init(void)
goto err_free_tv; goto err_free_tv;
} }
do_test(mode); err = do_test(mode);
if (err) {
printk(KERN_ERR "tcrypt: one or more tests failed!\n");
goto err_free_tv;
}
/* We intentionaly return -EAGAIN to prevent keeping /* We intentionaly return -EAGAIN to prevent keeping the module,
* the module. It does all its work from init() * unless we're running in fips mode. It does all its work from
* and doesn't offer any runtime functionality * init() and doesn't offer any runtime functionality, but in
* the fips case, checking for a successful load is helpful.
* => we don't need it in the memory, do we? * => we don't need it in the memory, do we?
* -- mludvig * -- mludvig
*/ */
err = -EAGAIN; if (!fips_enabled)
err = -EAGAIN;
err_free_tv: err_free_tv:
for (i = 0; i < TVMEMSIZE && tvmem[i]; i++) for (i = 0; i < TVMEMSIZE && tvmem[i]; i++)
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/string.h> #include <linux/string.h>
#include <crypto/rng.h>
#include "internal.h" #include "internal.h"
#include "testmgr.h" #include "testmgr.h"
...@@ -84,10 +85,16 @@ struct hash_test_suite { ...@@ -84,10 +85,16 @@ struct hash_test_suite {
unsigned int count; unsigned int count;
}; };
struct cprng_test_suite {
struct cprng_testvec *vecs;
unsigned int count;
};
struct alg_test_desc { struct alg_test_desc {
const char *alg; const char *alg;
int (*test)(const struct alg_test_desc *desc, const char *driver, int (*test)(const struct alg_test_desc *desc, const char *driver,
u32 type, u32 mask); u32 type, u32 mask);
int fips_allowed; /* set if alg is allowed in fips mode */
union { union {
struct aead_test_suite aead; struct aead_test_suite aead;
...@@ -95,14 +102,12 @@ struct alg_test_desc { ...@@ -95,14 +102,12 @@ struct alg_test_desc {
struct comp_test_suite comp; struct comp_test_suite comp;
struct pcomp_test_suite pcomp; struct pcomp_test_suite pcomp;
struct hash_test_suite hash; struct hash_test_suite hash;
struct cprng_test_suite cprng;
} suite; } suite;
}; };
static unsigned int IDX[8] = { IDX1, IDX2, IDX3, IDX4, IDX5, IDX6, IDX7, IDX8 }; static unsigned int IDX[8] = { IDX1, IDX2, IDX3, IDX4, IDX5, IDX6, IDX7, IDX8 };
static char *xbuf[XBUFSIZE];
static char *axbuf[XBUFSIZE];
static void hexdump(unsigned char *buf, unsigned int len) static void hexdump(unsigned char *buf, unsigned int len)
{ {
print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET, print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET,
...@@ -121,6 +126,33 @@ static void tcrypt_complete(struct crypto_async_request *req, int err) ...@@ -121,6 +126,33 @@ static void tcrypt_complete(struct crypto_async_request *req, int err)
complete(&res->completion); complete(&res->completion);
} }
static int testmgr_alloc_buf(char *buf[XBUFSIZE])
{
int i;
for (i = 0; i < XBUFSIZE; i++) {
buf[i] = (void *)__get_free_page(GFP_KERNEL);
if (!buf[i])
goto err_free_buf;
}
return 0;
err_free_buf:
while (i-- > 0)
free_page((unsigned long)buf[i]);
return -ENOMEM;
}
static void testmgr_free_buf(char *buf[XBUFSIZE])
{
int i;
for (i = 0; i < XBUFSIZE; i++)
free_page((unsigned long)buf[i]);
}
static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template, static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
unsigned int tcount) unsigned int tcount)
{ {
...@@ -130,8 +162,12 @@ static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template, ...@@ -130,8 +162,12 @@ static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
char result[64]; char result[64];
struct ahash_request *req; struct ahash_request *req;
struct tcrypt_result tresult; struct tcrypt_result tresult;
int ret;
void *hash_buff; void *hash_buff;
char *xbuf[XBUFSIZE];
int ret = -ENOMEM;
if (testmgr_alloc_buf(xbuf))
goto out_nobuf;
init_completion(&tresult.completion); init_completion(&tresult.completion);
...@@ -139,17 +175,25 @@ static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template, ...@@ -139,17 +175,25 @@ static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
if (!req) { if (!req) {
printk(KERN_ERR "alg: hash: Failed to allocate request for " printk(KERN_ERR "alg: hash: Failed to allocate request for "
"%s\n", algo); "%s\n", algo);
ret = -ENOMEM;
goto out_noreq; goto out_noreq;
} }
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
tcrypt_complete, &tresult); tcrypt_complete, &tresult);
j = 0;
for (i = 0; i < tcount; i++) { for (i = 0; i < tcount; i++) {
if (template[i].np)
continue;
j++;
memset(result, 0, 64); memset(result, 0, 64);
hash_buff = xbuf[0]; hash_buff = xbuf[0];
ret = -EINVAL;
if (WARN_ON(template[i].psize > PAGE_SIZE))
goto out;
memcpy(hash_buff, template[i].plaintext, template[i].psize); memcpy(hash_buff, template[i].plaintext, template[i].psize);
sg_init_one(&sg[0], hash_buff, template[i].psize); sg_init_one(&sg[0], hash_buff, template[i].psize);
...@@ -159,7 +203,7 @@ static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template, ...@@ -159,7 +203,7 @@ static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
template[i].ksize); template[i].ksize);
if (ret) { if (ret) {
printk(KERN_ERR "alg: hash: setkey failed on " printk(KERN_ERR "alg: hash: setkey failed on "
"test %d for %s: ret=%d\n", i + 1, algo, "test %d for %s: ret=%d\n", j, algo,
-ret); -ret);
goto out; goto out;
} }
...@@ -181,14 +225,14 @@ static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template, ...@@ -181,14 +225,14 @@ static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
/* fall through */ /* fall through */
default: default:
printk(KERN_ERR "alg: hash: digest failed on test %d " printk(KERN_ERR "alg: hash: digest failed on test %d "
"for %s: ret=%d\n", i + 1, algo, -ret); "for %s: ret=%d\n", j, algo, -ret);
goto out; goto out;
} }
if (memcmp(result, template[i].digest, if (memcmp(result, template[i].digest,
crypto_ahash_digestsize(tfm))) { crypto_ahash_digestsize(tfm))) {
printk(KERN_ERR "alg: hash: Test %d failed for %s\n", printk(KERN_ERR "alg: hash: Test %d failed for %s\n",
i + 1, algo); j, algo);
hexdump(result, crypto_ahash_digestsize(tfm)); hexdump(result, crypto_ahash_digestsize(tfm));
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
...@@ -203,7 +247,11 @@ static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template, ...@@ -203,7 +247,11 @@ static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
temp = 0; temp = 0;
sg_init_table(sg, template[i].np); sg_init_table(sg, template[i].np);
ret = -EINVAL;
for (k = 0; k < template[i].np; k++) { for (k = 0; k < template[i].np; k++) {
if (WARN_ON(offset_in_page(IDX[k]) +
template[i].tap[k] > PAGE_SIZE))
goto out;
sg_set_buf(&sg[k], sg_set_buf(&sg[k],
memcpy(xbuf[IDX[k] >> PAGE_SHIFT] + memcpy(xbuf[IDX[k] >> PAGE_SHIFT] +
offset_in_page(IDX[k]), offset_in_page(IDX[k]),
...@@ -265,6 +313,8 @@ static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template, ...@@ -265,6 +313,8 @@ static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
out: out:
ahash_request_free(req); ahash_request_free(req);
out_noreq: out_noreq:
testmgr_free_buf(xbuf);
out_nobuf:
return ret; return ret;
} }
...@@ -273,7 +323,7 @@ static int test_aead(struct crypto_aead *tfm, int enc, ...@@ -273,7 +323,7 @@ static int test_aead(struct crypto_aead *tfm, int enc,
{ {
const char *algo = crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm)); const char *algo = crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm));
unsigned int i, j, k, n, temp; unsigned int i, j, k, n, temp;
int ret = 0; int ret = -ENOMEM;
char *q; char *q;
char *key; char *key;
struct aead_request *req; struct aead_request *req;
...@@ -285,6 +335,13 @@ static int test_aead(struct crypto_aead *tfm, int enc, ...@@ -285,6 +335,13 @@ static int test_aead(struct crypto_aead *tfm, int enc,
void *input; void *input;
void *assoc; void *assoc;
char iv[MAX_IVLEN]; char iv[MAX_IVLEN];
char *xbuf[XBUFSIZE];
char *axbuf[XBUFSIZE];
if (testmgr_alloc_buf(xbuf))
goto out_noxbuf;
if (testmgr_alloc_buf(axbuf))
goto out_noaxbuf;
if (enc == ENCRYPT) if (enc == ENCRYPT)
e = "encryption"; e = "encryption";
...@@ -297,7 +354,6 @@ static int test_aead(struct crypto_aead *tfm, int enc, ...@@ -297,7 +354,6 @@ static int test_aead(struct crypto_aead *tfm, int enc,
if (!req) { if (!req) {
printk(KERN_ERR "alg: aead: Failed to allocate request for " printk(KERN_ERR "alg: aead: Failed to allocate request for "
"%s\n", algo); "%s\n", algo);
ret = -ENOMEM;
goto out; goto out;
} }
...@@ -314,6 +370,11 @@ static int test_aead(struct crypto_aead *tfm, int enc, ...@@ -314,6 +370,11 @@ static int test_aead(struct crypto_aead *tfm, int enc,
input = xbuf[0]; input = xbuf[0];
assoc = axbuf[0]; assoc = axbuf[0];
ret = -EINVAL;
if (WARN_ON(template[i].ilen > PAGE_SIZE ||
template[i].alen > PAGE_SIZE))
goto out;
memcpy(input, template[i].input, template[i].ilen); memcpy(input, template[i].input, template[i].ilen);
memcpy(assoc, template[i].assoc, template[i].alen); memcpy(assoc, template[i].assoc, template[i].alen);
if (template[i].iv) if (template[i].iv)
...@@ -363,6 +424,16 @@ static int test_aead(struct crypto_aead *tfm, int enc, ...@@ -363,6 +424,16 @@ static int test_aead(struct crypto_aead *tfm, int enc,
switch (ret) { switch (ret) {
case 0: case 0:
if (template[i].novrfy) {
/* verification was supposed to fail */
printk(KERN_ERR "alg: aead: %s failed "
"on test %d for %s: ret was 0, "
"expected -EBADMSG\n",
e, j, algo);
/* so really, we got a bad message */
ret = -EBADMSG;
goto out;
}
break; break;
case -EINPROGRESS: case -EINPROGRESS:
case -EBUSY: case -EBUSY:
...@@ -372,6 +443,10 @@ static int test_aead(struct crypto_aead *tfm, int enc, ...@@ -372,6 +443,10 @@ static int test_aead(struct crypto_aead *tfm, int enc,
INIT_COMPLETION(result.completion); INIT_COMPLETION(result.completion);
break; break;
} }
case -EBADMSG:
if (template[i].novrfy)
/* verification failure was expected */
continue;
/* fall through */ /* fall through */
default: default:
printk(KERN_ERR "alg: aead: %s failed on test " printk(KERN_ERR "alg: aead: %s failed on test "
...@@ -459,7 +534,11 @@ static int test_aead(struct crypto_aead *tfm, int enc, ...@@ -459,7 +534,11 @@ static int test_aead(struct crypto_aead *tfm, int enc,
} }
sg_init_table(asg, template[i].anp); sg_init_table(asg, template[i].anp);
ret = -EINVAL;
for (k = 0, temp = 0; k < template[i].anp; k++) { for (k = 0, temp = 0; k < template[i].anp; k++) {
if (WARN_ON(offset_in_page(IDX[k]) +
template[i].atap[k] > PAGE_SIZE))
goto out;
sg_set_buf(&asg[k], sg_set_buf(&asg[k],
memcpy(axbuf[IDX[k] >> PAGE_SHIFT] + memcpy(axbuf[IDX[k] >> PAGE_SHIFT] +
offset_in_page(IDX[k]), offset_in_page(IDX[k]),
...@@ -481,6 +560,16 @@ static int test_aead(struct crypto_aead *tfm, int enc, ...@@ -481,6 +560,16 @@ static int test_aead(struct crypto_aead *tfm, int enc,
switch (ret) { switch (ret) {
case 0: case 0:
if (template[i].novrfy) {
/* verification was supposed to fail */
printk(KERN_ERR "alg: aead: %s failed "
"on chunk test %d for %s: ret "
"was 0, expected -EBADMSG\n",
e, j, algo);
/* so really, we got a bad message */
ret = -EBADMSG;
goto out;
}
break; break;
case -EINPROGRESS: case -EINPROGRESS:
case -EBUSY: case -EBUSY:
...@@ -490,6 +579,10 @@ static int test_aead(struct crypto_aead *tfm, int enc, ...@@ -490,6 +579,10 @@ static int test_aead(struct crypto_aead *tfm, int enc,
INIT_COMPLETION(result.completion); INIT_COMPLETION(result.completion);
break; break;
} }
case -EBADMSG:
if (template[i].novrfy)
/* verification failure was expected */
continue;
/* fall through */ /* fall through */
default: default:
printk(KERN_ERR "alg: aead: %s failed on " printk(KERN_ERR "alg: aead: %s failed on "
...@@ -546,6 +639,10 @@ static int test_aead(struct crypto_aead *tfm, int enc, ...@@ -546,6 +639,10 @@ static int test_aead(struct crypto_aead *tfm, int enc,
out: out:
aead_request_free(req); aead_request_free(req);
testmgr_free_buf(axbuf);
out_noaxbuf:
testmgr_free_buf(xbuf);
out_noxbuf:
return ret; return ret;
} }
...@@ -554,10 +651,14 @@ static int test_cipher(struct crypto_cipher *tfm, int enc, ...@@ -554,10 +651,14 @@ static int test_cipher(struct crypto_cipher *tfm, int enc,
{ {
const char *algo = crypto_tfm_alg_driver_name(crypto_cipher_tfm(tfm)); const char *algo = crypto_tfm_alg_driver_name(crypto_cipher_tfm(tfm));
unsigned int i, j, k; unsigned int i, j, k;
int ret;
char *q; char *q;
const char *e; const char *e;
void *data; void *data;
char *xbuf[XBUFSIZE];
int ret = -ENOMEM;
if (testmgr_alloc_buf(xbuf))
goto out_nobuf;
if (enc == ENCRYPT) if (enc == ENCRYPT)
e = "encryption"; e = "encryption";
...@@ -571,6 +672,10 @@ static int test_cipher(struct crypto_cipher *tfm, int enc, ...@@ -571,6 +672,10 @@ static int test_cipher(struct crypto_cipher *tfm, int enc,
j++; j++;
ret = -EINVAL;
if (WARN_ON(template[i].ilen > PAGE_SIZE))
goto out;
data = xbuf[0]; data = xbuf[0];
memcpy(data, template[i].input, template[i].ilen); memcpy(data, template[i].input, template[i].ilen);
...@@ -611,6 +716,8 @@ static int test_cipher(struct crypto_cipher *tfm, int enc, ...@@ -611,6 +716,8 @@ static int test_cipher(struct crypto_cipher *tfm, int enc,
ret = 0; ret = 0;
out: out:
testmgr_free_buf(xbuf);
out_nobuf:
return ret; return ret;
} }
...@@ -620,7 +727,6 @@ static int test_skcipher(struct crypto_ablkcipher *tfm, int enc, ...@@ -620,7 +727,6 @@ static int test_skcipher(struct crypto_ablkcipher *tfm, int enc,
const char *algo = const char *algo =
crypto_tfm_alg_driver_name(crypto_ablkcipher_tfm(tfm)); crypto_tfm_alg_driver_name(crypto_ablkcipher_tfm(tfm));
unsigned int i, j, k, n, temp; unsigned int i, j, k, n, temp;
int ret;
char *q; char *q;
struct ablkcipher_request *req; struct ablkcipher_request *req;
struct scatterlist sg[8]; struct scatterlist sg[8];
...@@ -628,6 +734,11 @@ static int test_skcipher(struct crypto_ablkcipher *tfm, int enc, ...@@ -628,6 +734,11 @@ static int test_skcipher(struct crypto_ablkcipher *tfm, int enc,
struct tcrypt_result result; struct tcrypt_result result;
void *data; void *data;
char iv[MAX_IVLEN]; char iv[MAX_IVLEN];
char *xbuf[XBUFSIZE];
int ret = -ENOMEM;
if (testmgr_alloc_buf(xbuf))
goto out_nobuf;
if (enc == ENCRYPT) if (enc == ENCRYPT)
e = "encryption"; e = "encryption";
...@@ -640,7 +751,6 @@ static int test_skcipher(struct crypto_ablkcipher *tfm, int enc, ...@@ -640,7 +751,6 @@ static int test_skcipher(struct crypto_ablkcipher *tfm, int enc,
if (!req) { if (!req) {
printk(KERN_ERR "alg: skcipher: Failed to allocate request " printk(KERN_ERR "alg: skcipher: Failed to allocate request "
"for %s\n", algo); "for %s\n", algo);
ret = -ENOMEM;
goto out; goto out;
} }
...@@ -657,6 +767,10 @@ static int test_skcipher(struct crypto_ablkcipher *tfm, int enc, ...@@ -657,6 +767,10 @@ static int test_skcipher(struct crypto_ablkcipher *tfm, int enc,
if (!(template[i].np)) { if (!(template[i].np)) {
j++; j++;
ret = -EINVAL;
if (WARN_ON(template[i].ilen > PAGE_SIZE))
goto out;
data = xbuf[0]; data = xbuf[0];
memcpy(data, template[i].input, template[i].ilen); memcpy(data, template[i].input, template[i].ilen);
...@@ -825,6 +939,8 @@ static int test_skcipher(struct crypto_ablkcipher *tfm, int enc, ...@@ -825,6 +939,8 @@ static int test_skcipher(struct crypto_ablkcipher *tfm, int enc,
out: out:
ablkcipher_request_free(req); ablkcipher_request_free(req);
testmgr_free_buf(xbuf);
out_nobuf:
return ret; return ret;
} }
...@@ -837,7 +953,8 @@ static int test_comp(struct crypto_comp *tfm, struct comp_testvec *ctemplate, ...@@ -837,7 +953,8 @@ static int test_comp(struct crypto_comp *tfm, struct comp_testvec *ctemplate,
int ret; int ret;
for (i = 0; i < ctcount; i++) { for (i = 0; i < ctcount; i++) {
int ilen, dlen = COMP_BUF_SIZE; int ilen;
unsigned int dlen = COMP_BUF_SIZE;
memset(result, 0, sizeof (result)); memset(result, 0, sizeof (result));
...@@ -869,7 +986,8 @@ static int test_comp(struct crypto_comp *tfm, struct comp_testvec *ctemplate, ...@@ -869,7 +986,8 @@ static int test_comp(struct crypto_comp *tfm, struct comp_testvec *ctemplate,
} }
for (i = 0; i < dtcount; i++) { for (i = 0; i < dtcount; i++) {
int ilen, dlen = COMP_BUF_SIZE; int ilen;
unsigned int dlen = COMP_BUF_SIZE;
memset(result, 0, sizeof (result)); memset(result, 0, sizeof (result));
...@@ -914,24 +1032,25 @@ static int test_pcomp(struct crypto_pcomp *tfm, ...@@ -914,24 +1032,25 @@ static int test_pcomp(struct crypto_pcomp *tfm,
const char *algo = crypto_tfm_alg_driver_name(crypto_pcomp_tfm(tfm)); const char *algo = crypto_tfm_alg_driver_name(crypto_pcomp_tfm(tfm));
unsigned int i; unsigned int i;
char result[COMP_BUF_SIZE]; char result[COMP_BUF_SIZE];
int error; int res;
for (i = 0; i < ctcount; i++) { for (i = 0; i < ctcount; i++) {
struct comp_request req; struct comp_request req;
unsigned int produced = 0;
error = crypto_compress_setup(tfm, ctemplate[i].params, res = crypto_compress_setup(tfm, ctemplate[i].params,
ctemplate[i].paramsize); ctemplate[i].paramsize);
if (error) { if (res) {
pr_err("alg: pcomp: compression setup failed on test " pr_err("alg: pcomp: compression setup failed on test "
"%d for %s: error=%d\n", i + 1, algo, error); "%d for %s: error=%d\n", i + 1, algo, res);
return error; return res;
} }
error = crypto_compress_init(tfm); res = crypto_compress_init(tfm);
if (error) { if (res) {
pr_err("alg: pcomp: compression init failed on test " pr_err("alg: pcomp: compression init failed on test "
"%d for %s: error=%d\n", i + 1, algo, error); "%d for %s: error=%d\n", i + 1, algo, res);
return error; return res;
} }
memset(result, 0, sizeof(result)); memset(result, 0, sizeof(result));
...@@ -941,32 +1060,37 @@ static int test_pcomp(struct crypto_pcomp *tfm, ...@@ -941,32 +1060,37 @@ static int test_pcomp(struct crypto_pcomp *tfm,
req.next_out = result; req.next_out = result;
req.avail_out = ctemplate[i].outlen / 2; req.avail_out = ctemplate[i].outlen / 2;
error = crypto_compress_update(tfm, &req); res = crypto_compress_update(tfm, &req);
if (error && (error != -EAGAIN || req.avail_in)) { if (res < 0 && (res != -EAGAIN || req.avail_in)) {
pr_err("alg: pcomp: compression update failed on test " pr_err("alg: pcomp: compression update failed on test "
"%d for %s: error=%d\n", i + 1, algo, error); "%d for %s: error=%d\n", i + 1, algo, res);
return error; return res;
} }
if (res > 0)
produced += res;
/* Add remaining input data */ /* Add remaining input data */
req.avail_in += (ctemplate[i].inlen + 1) / 2; req.avail_in += (ctemplate[i].inlen + 1) / 2;
error = crypto_compress_update(tfm, &req); res = crypto_compress_update(tfm, &req);
if (error && (error != -EAGAIN || req.avail_in)) { if (res < 0 && (res != -EAGAIN || req.avail_in)) {
pr_err("alg: pcomp: compression update failed on test " pr_err("alg: pcomp: compression update failed on test "
"%d for %s: error=%d\n", i + 1, algo, error); "%d for %s: error=%d\n", i + 1, algo, res);
return error; return res;
} }
if (res > 0)
produced += res;
/* Provide remaining output space */ /* Provide remaining output space */
req.avail_out += COMP_BUF_SIZE - ctemplate[i].outlen / 2; req.avail_out += COMP_BUF_SIZE - ctemplate[i].outlen / 2;
error = crypto_compress_final(tfm, &req); res = crypto_compress_final(tfm, &req);
if (error) { if (res < 0) {
pr_err("alg: pcomp: compression final failed on test " pr_err("alg: pcomp: compression final failed on test "
"%d for %s: error=%d\n", i + 1, algo, error); "%d for %s: error=%d\n", i + 1, algo, res);
return error; return res;
} }
produced += res;
if (COMP_BUF_SIZE - req.avail_out != ctemplate[i].outlen) { if (COMP_BUF_SIZE - req.avail_out != ctemplate[i].outlen) {
pr_err("alg: comp: Compression test %d failed for %s: " pr_err("alg: comp: Compression test %d failed for %s: "
...@@ -976,6 +1100,13 @@ static int test_pcomp(struct crypto_pcomp *tfm, ...@@ -976,6 +1100,13 @@ static int test_pcomp(struct crypto_pcomp *tfm,
return -EINVAL; return -EINVAL;
} }
if (produced != ctemplate[i].outlen) {
pr_err("alg: comp: Compression test %d failed for %s: "
"returned len = %u (expected %d)\n", i + 1,
algo, produced, ctemplate[i].outlen);
return -EINVAL;
}
if (memcmp(result, ctemplate[i].output, ctemplate[i].outlen)) { if (memcmp(result, ctemplate[i].output, ctemplate[i].outlen)) {
pr_err("alg: pcomp: Compression test %d failed for " pr_err("alg: pcomp: Compression test %d failed for "
"%s\n", i + 1, algo); "%s\n", i + 1, algo);
...@@ -986,21 +1117,21 @@ static int test_pcomp(struct crypto_pcomp *tfm, ...@@ -986,21 +1117,21 @@ static int test_pcomp(struct crypto_pcomp *tfm,
for (i = 0; i < dtcount; i++) { for (i = 0; i < dtcount; i++) {
struct comp_request req; struct comp_request req;
unsigned int produced = 0;
error = crypto_decompress_setup(tfm, dtemplate[i].params, res = crypto_decompress_setup(tfm, dtemplate[i].params,
dtemplate[i].paramsize); dtemplate[i].paramsize);
if (error) { if (res) {
pr_err("alg: pcomp: decompression setup failed on " pr_err("alg: pcomp: decompression setup failed on "
"test %d for %s: error=%d\n", i + 1, algo, "test %d for %s: error=%d\n", i + 1, algo, res);
error); return res;
return error;
} }
error = crypto_decompress_init(tfm); res = crypto_decompress_init(tfm);
if (error) { if (res) {
pr_err("alg: pcomp: decompression init failed on test " pr_err("alg: pcomp: decompression init failed on test "
"%d for %s: error=%d\n", i + 1, algo, error); "%d for %s: error=%d\n", i + 1, algo, res);
return error; return res;
} }
memset(result, 0, sizeof(result)); memset(result, 0, sizeof(result));
...@@ -1010,35 +1141,38 @@ static int test_pcomp(struct crypto_pcomp *tfm, ...@@ -1010,35 +1141,38 @@ static int test_pcomp(struct crypto_pcomp *tfm,
req.next_out = result; req.next_out = result;
req.avail_out = dtemplate[i].outlen / 2; req.avail_out = dtemplate[i].outlen / 2;
error = crypto_decompress_update(tfm, &req); res = crypto_decompress_update(tfm, &req);
if (error && (error != -EAGAIN || req.avail_in)) { if (res < 0 && (res != -EAGAIN || req.avail_in)) {
pr_err("alg: pcomp: decompression update failed on " pr_err("alg: pcomp: decompression update failed on "
"test %d for %s: error=%d\n", i + 1, algo, "test %d for %s: error=%d\n", i + 1, algo, res);
error); return res;
return error;
} }
if (res > 0)
produced += res;
/* Add remaining input data */ /* Add remaining input data */
req.avail_in += (dtemplate[i].inlen + 1) / 2; req.avail_in += (dtemplate[i].inlen + 1) / 2;
error = crypto_decompress_update(tfm, &req); res = crypto_decompress_update(tfm, &req);
if (error && (error != -EAGAIN || req.avail_in)) { if (res < 0 && (res != -EAGAIN || req.avail_in)) {
pr_err("alg: pcomp: decompression update failed on " pr_err("alg: pcomp: decompression update failed on "
"test %d for %s: error=%d\n", i + 1, algo, "test %d for %s: error=%d\n", i + 1, algo, res);
error); return res;
return error;
} }
if (res > 0)
produced += res;
/* Provide remaining output space */ /* Provide remaining output space */
req.avail_out += COMP_BUF_SIZE - dtemplate[i].outlen / 2; req.avail_out += COMP_BUF_SIZE - dtemplate[i].outlen / 2;
error = crypto_decompress_final(tfm, &req); res = crypto_decompress_final(tfm, &req);
if (error && (error != -EAGAIN || req.avail_in)) { if (res < 0 && (res != -EAGAIN || req.avail_in)) {
pr_err("alg: pcomp: decompression final failed on " pr_err("alg: pcomp: decompression final failed on "
"test %d for %s: error=%d\n", i + 1, algo, "test %d for %s: error=%d\n", i + 1, algo, res);
error); return res;
return error;
} }
if (res > 0)
produced += res;
if (COMP_BUF_SIZE - req.avail_out != dtemplate[i].outlen) { if (COMP_BUF_SIZE - req.avail_out != dtemplate[i].outlen) {
pr_err("alg: comp: Decompression test %d failed for " pr_err("alg: comp: Decompression test %d failed for "
...@@ -1048,6 +1182,13 @@ static int test_pcomp(struct crypto_pcomp *tfm, ...@@ -1048,6 +1182,13 @@ static int test_pcomp(struct crypto_pcomp *tfm,
return -EINVAL; return -EINVAL;
} }
if (produced != dtemplate[i].outlen) {
pr_err("alg: comp: Decompression test %d failed for "
"%s: returned len = %u (expected %d)\n", i + 1,
algo, produced, dtemplate[i].outlen);
return -EINVAL;
}
if (memcmp(result, dtemplate[i].output, dtemplate[i].outlen)) { if (memcmp(result, dtemplate[i].output, dtemplate[i].outlen)) {
pr_err("alg: pcomp: Decompression test %d failed for " pr_err("alg: pcomp: Decompression test %d failed for "
"%s\n", i + 1, algo); "%s\n", i + 1, algo);
...@@ -1059,6 +1200,68 @@ static int test_pcomp(struct crypto_pcomp *tfm, ...@@ -1059,6 +1200,68 @@ static int test_pcomp(struct crypto_pcomp *tfm,
return 0; return 0;
} }
static int test_cprng(struct crypto_rng *tfm, struct cprng_testvec *template,
unsigned int tcount)
{
const char *algo = crypto_tfm_alg_driver_name(crypto_rng_tfm(tfm));
int err, i, j, seedsize;
u8 *seed;
char result[32];
seedsize = crypto_rng_seedsize(tfm);
seed = kmalloc(seedsize, GFP_KERNEL);
if (!seed) {
printk(KERN_ERR "alg: cprng: Failed to allocate seed space "
"for %s\n", algo);
return -ENOMEM;
}
for (i = 0; i < tcount; i++) {
memset(result, 0, 32);
memcpy(seed, template[i].v, template[i].vlen);
memcpy(seed + template[i].vlen, template[i].key,
template[i].klen);
memcpy(seed + template[i].vlen + template[i].klen,
template[i].dt, template[i].dtlen);
err = crypto_rng_reset(tfm, seed, seedsize);
if (err) {
printk(KERN_ERR "alg: cprng: Failed to reset rng "
"for %s\n", algo);
goto out;
}
for (j = 0; j < template[i].loops; j++) {
err = crypto_rng_get_bytes(tfm, result,
template[i].rlen);
if (err != template[i].rlen) {
printk(KERN_ERR "alg: cprng: Failed to obtain "
"the correct amount of random data for "
"%s (requested %d, got %d)\n", algo,
template[i].rlen, err);
goto out;
}
}
err = memcmp(result, template[i].result,
template[i].rlen);
if (err) {
printk(KERN_ERR "alg: cprng: Test %d failed for %s\n",
i, algo);
hexdump(result, template[i].rlen);
err = -EINVAL;
goto out;
}
}
out:
kfree(seed);
return err;
}
static int alg_test_aead(const struct alg_test_desc *desc, const char *driver, static int alg_test_aead(const struct alg_test_desc *desc, const char *driver,
u32 type, u32 mask) u32 type, u32 mask)
{ {
...@@ -1258,11 +1461,42 @@ static int alg_test_crc32c(const struct alg_test_desc *desc, ...@@ -1258,11 +1461,42 @@ static int alg_test_crc32c(const struct alg_test_desc *desc,
return err; return err;
} }
static int alg_test_cprng(const struct alg_test_desc *desc, const char *driver,
u32 type, u32 mask)
{
struct crypto_rng *rng;
int err;
rng = crypto_alloc_rng(driver, type, mask);
if (IS_ERR(rng)) {
printk(KERN_ERR "alg: cprng: Failed to load transform for %s: "
"%ld\n", driver, PTR_ERR(rng));
return PTR_ERR(rng);
}
err = test_cprng(rng, desc->suite.cprng.vecs, desc->suite.cprng.count);
crypto_free_rng(rng);
return err;
}
/* Please keep this list sorted by algorithm name. */ /* Please keep this list sorted by algorithm name. */
static const struct alg_test_desc alg_test_descs[] = { static const struct alg_test_desc alg_test_descs[] = {
{ {
.alg = "ansi_cprng",
.test = alg_test_cprng,
.fips_allowed = 1,
.suite = {
.cprng = {
.vecs = ansi_cprng_aes_tv_template,
.count = ANSI_CPRNG_AES_TEST_VECTORS
}
}
}, {
.alg = "cbc(aes)", .alg = "cbc(aes)",
.test = alg_test_skcipher, .test = alg_test_skcipher,
.fips_allowed = 1,
.suite = { .suite = {
.cipher = { .cipher = {
.enc = { .enc = {
...@@ -1338,6 +1572,7 @@ static const struct alg_test_desc alg_test_descs[] = { ...@@ -1338,6 +1572,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}, { }, {
.alg = "cbc(des3_ede)", .alg = "cbc(des3_ede)",
.test = alg_test_skcipher, .test = alg_test_skcipher,
.fips_allowed = 1,
.suite = { .suite = {
.cipher = { .cipher = {
.enc = { .enc = {
...@@ -1368,6 +1603,7 @@ static const struct alg_test_desc alg_test_descs[] = { ...@@ -1368,6 +1603,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}, { }, {
.alg = "ccm(aes)", .alg = "ccm(aes)",
.test = alg_test_aead, .test = alg_test_aead,
.fips_allowed = 1,
.suite = { .suite = {
.aead = { .aead = {
.enc = { .enc = {
...@@ -1383,12 +1619,29 @@ static const struct alg_test_desc alg_test_descs[] = { ...@@ -1383,12 +1619,29 @@ static const struct alg_test_desc alg_test_descs[] = {
}, { }, {
.alg = "crc32c", .alg = "crc32c",
.test = alg_test_crc32c, .test = alg_test_crc32c,
.fips_allowed = 1,
.suite = { .suite = {
.hash = { .hash = {
.vecs = crc32c_tv_template, .vecs = crc32c_tv_template,
.count = CRC32C_TEST_VECTORS .count = CRC32C_TEST_VECTORS
} }
} }
}, {
.alg = "ctr(aes)",
.test = alg_test_skcipher,
.fips_allowed = 1,
.suite = {
.cipher = {
.enc = {
.vecs = aes_ctr_enc_tv_template,
.count = AES_CTR_ENC_TEST_VECTORS
},
.dec = {
.vecs = aes_ctr_dec_tv_template,
.count = AES_CTR_DEC_TEST_VECTORS
}
}
}
}, { }, {
.alg = "cts(cbc(aes))", .alg = "cts(cbc(aes))",
.test = alg_test_skcipher, .test = alg_test_skcipher,
...@@ -1422,6 +1675,7 @@ static const struct alg_test_desc alg_test_descs[] = { ...@@ -1422,6 +1675,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}, { }, {
.alg = "ecb(aes)", .alg = "ecb(aes)",
.test = alg_test_skcipher, .test = alg_test_skcipher,
.fips_allowed = 1,
.suite = { .suite = {
.cipher = { .cipher = {
.enc = { .enc = {
...@@ -1527,6 +1781,7 @@ static const struct alg_test_desc alg_test_descs[] = { ...@@ -1527,6 +1781,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}, { }, {
.alg = "ecb(des)", .alg = "ecb(des)",
.test = alg_test_skcipher, .test = alg_test_skcipher,
.fips_allowed = 1,
.suite = { .suite = {
.cipher = { .cipher = {
.enc = { .enc = {
...@@ -1542,6 +1797,7 @@ static const struct alg_test_desc alg_test_descs[] = { ...@@ -1542,6 +1797,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}, { }, {
.alg = "ecb(des3_ede)", .alg = "ecb(des3_ede)",
.test = alg_test_skcipher, .test = alg_test_skcipher,
.fips_allowed = 1,
.suite = { .suite = {
.cipher = { .cipher = {
.enc = { .enc = {
...@@ -1677,6 +1933,7 @@ static const struct alg_test_desc alg_test_descs[] = { ...@@ -1677,6 +1933,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}, { }, {
.alg = "gcm(aes)", .alg = "gcm(aes)",
.test = alg_test_aead, .test = alg_test_aead,
.fips_allowed = 1,
.suite = { .suite = {
.aead = { .aead = {
.enc = { .enc = {
...@@ -1719,6 +1976,7 @@ static const struct alg_test_desc alg_test_descs[] = { ...@@ -1719,6 +1976,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}, { }, {
.alg = "hmac(sha1)", .alg = "hmac(sha1)",
.test = alg_test_hash, .test = alg_test_hash,
.fips_allowed = 1,
.suite = { .suite = {
.hash = { .hash = {
.vecs = hmac_sha1_tv_template, .vecs = hmac_sha1_tv_template,
...@@ -1728,6 +1986,7 @@ static const struct alg_test_desc alg_test_descs[] = { ...@@ -1728,6 +1986,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}, { }, {
.alg = "hmac(sha224)", .alg = "hmac(sha224)",
.test = alg_test_hash, .test = alg_test_hash,
.fips_allowed = 1,
.suite = { .suite = {
.hash = { .hash = {
.vecs = hmac_sha224_tv_template, .vecs = hmac_sha224_tv_template,
...@@ -1737,6 +1996,7 @@ static const struct alg_test_desc alg_test_descs[] = { ...@@ -1737,6 +1996,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}, { }, {
.alg = "hmac(sha256)", .alg = "hmac(sha256)",
.test = alg_test_hash, .test = alg_test_hash,
.fips_allowed = 1,
.suite = { .suite = {
.hash = { .hash = {
.vecs = hmac_sha256_tv_template, .vecs = hmac_sha256_tv_template,
...@@ -1746,6 +2006,7 @@ static const struct alg_test_desc alg_test_descs[] = { ...@@ -1746,6 +2006,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}, { }, {
.alg = "hmac(sha384)", .alg = "hmac(sha384)",
.test = alg_test_hash, .test = alg_test_hash,
.fips_allowed = 1,
.suite = { .suite = {
.hash = { .hash = {
.vecs = hmac_sha384_tv_template, .vecs = hmac_sha384_tv_template,
...@@ -1755,6 +2016,7 @@ static const struct alg_test_desc alg_test_descs[] = { ...@@ -1755,6 +2016,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}, { }, {
.alg = "hmac(sha512)", .alg = "hmac(sha512)",
.test = alg_test_hash, .test = alg_test_hash,
.fips_allowed = 1,
.suite = { .suite = {
.hash = { .hash = {
.vecs = hmac_sha512_tv_template, .vecs = hmac_sha512_tv_template,
...@@ -1836,15 +2098,32 @@ static const struct alg_test_desc alg_test_descs[] = { ...@@ -1836,15 +2098,32 @@ static const struct alg_test_desc alg_test_descs[] = {
}, { }, {
.alg = "rfc3686(ctr(aes))", .alg = "rfc3686(ctr(aes))",
.test = alg_test_skcipher, .test = alg_test_skcipher,
.fips_allowed = 1,
.suite = { .suite = {
.cipher = { .cipher = {
.enc = { .enc = {
.vecs = aes_ctr_enc_tv_template, .vecs = aes_ctr_rfc3686_enc_tv_template,
.count = AES_CTR_ENC_TEST_VECTORS .count = AES_CTR_3686_ENC_TEST_VECTORS
}, },
.dec = { .dec = {
.vecs = aes_ctr_dec_tv_template, .vecs = aes_ctr_rfc3686_dec_tv_template,
.count = AES_CTR_DEC_TEST_VECTORS .count = AES_CTR_3686_DEC_TEST_VECTORS
}
}
}
}, {
.alg = "rfc4309(ccm(aes))",
.test = alg_test_aead,
.fips_allowed = 1,
.suite = {
.aead = {
.enc = {
.vecs = aes_ccm_rfc4309_enc_tv_template,
.count = AES_CCM_4309_ENC_TEST_VECTORS
},
.dec = {
.vecs = aes_ccm_rfc4309_dec_tv_template,
.count = AES_CCM_4309_DEC_TEST_VECTORS
} }
} }
} }
...@@ -1898,6 +2177,7 @@ static const struct alg_test_desc alg_test_descs[] = { ...@@ -1898,6 +2177,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}, { }, {
.alg = "sha1", .alg = "sha1",
.test = alg_test_hash, .test = alg_test_hash,
.fips_allowed = 1,
.suite = { .suite = {
.hash = { .hash = {
.vecs = sha1_tv_template, .vecs = sha1_tv_template,
...@@ -1907,6 +2187,7 @@ static const struct alg_test_desc alg_test_descs[] = { ...@@ -1907,6 +2187,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}, { }, {
.alg = "sha224", .alg = "sha224",
.test = alg_test_hash, .test = alg_test_hash,
.fips_allowed = 1,
.suite = { .suite = {
.hash = { .hash = {
.vecs = sha224_tv_template, .vecs = sha224_tv_template,
...@@ -1916,6 +2197,7 @@ static const struct alg_test_desc alg_test_descs[] = { ...@@ -1916,6 +2197,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}, { }, {
.alg = "sha256", .alg = "sha256",
.test = alg_test_hash, .test = alg_test_hash,
.fips_allowed = 1,
.suite = { .suite = {
.hash = { .hash = {
.vecs = sha256_tv_template, .vecs = sha256_tv_template,
...@@ -1925,6 +2207,7 @@ static const struct alg_test_desc alg_test_descs[] = { ...@@ -1925,6 +2207,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}, { }, {
.alg = "sha384", .alg = "sha384",
.test = alg_test_hash, .test = alg_test_hash,
.fips_allowed = 1,
.suite = { .suite = {
.hash = { .hash = {
.vecs = sha384_tv_template, .vecs = sha384_tv_template,
...@@ -1934,6 +2217,7 @@ static const struct alg_test_desc alg_test_descs[] = { ...@@ -1934,6 +2217,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}, { }, {
.alg = "sha512", .alg = "sha512",
.test = alg_test_hash, .test = alg_test_hash,
.fips_allowed = 1,
.suite = { .suite = {
.hash = { .hash = {
.vecs = sha512_tv_template, .vecs = sha512_tv_template,
...@@ -2077,60 +2361,36 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask) ...@@ -2077,60 +2361,36 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
if (i < 0) if (i < 0)
goto notest; goto notest;
return alg_test_cipher(alg_test_descs + i, driver, type, mask); if (fips_enabled && !alg_test_descs[i].fips_allowed)
goto non_fips_alg;
rc = alg_test_cipher(alg_test_descs + i, driver, type, mask);
goto test_done;
} }
i = alg_find_test(alg); i = alg_find_test(alg);
if (i < 0) if (i < 0)
goto notest; goto notest;
if (fips_enabled && !alg_test_descs[i].fips_allowed)
goto non_fips_alg;
rc = alg_test_descs[i].test(alg_test_descs + i, driver, rc = alg_test_descs[i].test(alg_test_descs + i, driver,
type, mask); type, mask);
test_done:
if (fips_enabled && rc) if (fips_enabled && rc)
panic("%s: %s alg self test failed in fips mode!\n", driver, alg); panic("%s: %s alg self test failed in fips mode!\n", driver, alg);
if (fips_enabled && !rc)
printk(KERN_INFO "alg: self-tests for %s (%s) passed\n",
driver, alg);
return rc; return rc;
notest: notest:
printk(KERN_INFO "alg: No test for %s (%s)\n", alg, driver); printk(KERN_INFO "alg: No test for %s (%s)\n", alg, driver);
return 0; return 0;
non_fips_alg:
return -EINVAL;
} }
EXPORT_SYMBOL_GPL(alg_test); EXPORT_SYMBOL_GPL(alg_test);
int __init testmgr_init(void)
{
int i;
for (i = 0; i < XBUFSIZE; i++) {
xbuf[i] = (void *)__get_free_page(GFP_KERNEL);
if (!xbuf[i])
goto err_free_xbuf;
}
for (i = 0; i < XBUFSIZE; i++) {
axbuf[i] = (void *)__get_free_page(GFP_KERNEL);
if (!axbuf[i])
goto err_free_axbuf;
}
return 0;
err_free_axbuf:
for (i = 0; i < XBUFSIZE && axbuf[i]; i++)
free_page((unsigned long)axbuf[i]);
err_free_xbuf:
for (i = 0; i < XBUFSIZE && xbuf[i]; i++)
free_page((unsigned long)xbuf[i]);
return -ENOMEM;
}
void testmgr_exit(void)
{
int i;
for (i = 0; i < XBUFSIZE; i++)
free_page((unsigned long)axbuf[i]);
for (i = 0; i < XBUFSIZE; i++)
free_page((unsigned long)xbuf[i]);
}
...@@ -62,6 +62,7 @@ struct aead_testvec { ...@@ -62,6 +62,7 @@ struct aead_testvec {
int np; int np;
int anp; int anp;
unsigned char fail; unsigned char fail;
unsigned char novrfy; /* ccm dec verification failure expected */
unsigned char wk; /* weak key flag */ unsigned char wk; /* weak key flag */
unsigned char klen; unsigned char klen;
unsigned short ilen; unsigned short ilen;
...@@ -69,6 +70,18 @@ struct aead_testvec { ...@@ -69,6 +70,18 @@ struct aead_testvec {
unsigned short rlen; unsigned short rlen;
}; };
struct cprng_testvec {
char *key;
char *dt;
char *v;
char *result;
unsigned char klen;
unsigned short dtlen;
unsigned short vlen;
unsigned short rlen;
unsigned short loops;
};
static char zeroed_string[48]; static char zeroed_string[48];
/* /*
...@@ -2841,12 +2854,16 @@ static struct cipher_testvec cast6_dec_tv_template[] = { ...@@ -2841,12 +2854,16 @@ static struct cipher_testvec cast6_dec_tv_template[] = {
#define AES_LRW_DEC_TEST_VECTORS 8 #define AES_LRW_DEC_TEST_VECTORS 8
#define AES_XTS_ENC_TEST_VECTORS 4 #define AES_XTS_ENC_TEST_VECTORS 4
#define AES_XTS_DEC_TEST_VECTORS 4 #define AES_XTS_DEC_TEST_VECTORS 4
#define AES_CTR_ENC_TEST_VECTORS 7 #define AES_CTR_ENC_TEST_VECTORS 3
#define AES_CTR_DEC_TEST_VECTORS 6 #define AES_CTR_DEC_TEST_VECTORS 3
#define AES_CTR_3686_ENC_TEST_VECTORS 7
#define AES_CTR_3686_DEC_TEST_VECTORS 6
#define AES_GCM_ENC_TEST_VECTORS 9 #define AES_GCM_ENC_TEST_VECTORS 9
#define AES_GCM_DEC_TEST_VECTORS 8 #define AES_GCM_DEC_TEST_VECTORS 8
#define AES_CCM_ENC_TEST_VECTORS 7 #define AES_CCM_ENC_TEST_VECTORS 7
#define AES_CCM_DEC_TEST_VECTORS 7 #define AES_CCM_DEC_TEST_VECTORS 7
#define AES_CCM_4309_ENC_TEST_VECTORS 7
#define AES_CCM_4309_DEC_TEST_VECTORS 10
static struct cipher_testvec aes_enc_tv_template[] = { static struct cipher_testvec aes_enc_tv_template[] = {
{ /* From FIPS-197 */ { /* From FIPS-197 */
...@@ -3983,6 +4000,164 @@ static struct cipher_testvec aes_xts_dec_tv_template[] = { ...@@ -3983,6 +4000,164 @@ static struct cipher_testvec aes_xts_dec_tv_template[] = {
static struct cipher_testvec aes_ctr_enc_tv_template[] = { static struct cipher_testvec aes_ctr_enc_tv_template[] = {
{ /* From NIST Special Publication 800-38A, Appendix F.5 */
.key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
"\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
.klen = 16,
.iv = "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
"\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
.input = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
"\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
"\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
"\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
"\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
"\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
"\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
"\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
.ilen = 64,
.result = "\x87\x4d\x61\x91\xb6\x20\xe3\x26"
"\x1b\xef\x68\x64\x99\x0d\xb6\xce"
"\x98\x06\xf6\x6b\x79\x70\xfd\xff"
"\x86\x17\x18\x7b\xb9\xff\xfd\xff"
"\x5a\xe4\xdf\x3e\xdb\xd5\xd3\x5e"
"\x5b\x4f\x09\x02\x0d\xb0\x3e\xab"
"\x1e\x03\x1d\xda\x2f\xbe\x03\xd1"
"\x79\x21\x70\xa0\xf3\x00\x9c\xee",
.rlen = 64,
}, {
.key = "\x8e\x73\xb0\xf7\xda\x0e\x64\x52"
"\xc8\x10\xf3\x2b\x80\x90\x79\xe5"
"\x62\xf8\xea\xd2\x52\x2c\x6b\x7b",
.klen = 24,
.iv = "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
"\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
.input = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
"\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
"\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
"\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
"\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
"\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
"\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
"\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
.ilen = 64,
.result = "\x1a\xbc\x93\x24\x17\x52\x1c\xa2"
"\x4f\x2b\x04\x59\xfe\x7e\x6e\x0b"
"\x09\x03\x39\xec\x0a\xa6\xfa\xef"
"\xd5\xcc\xc2\xc6\xf4\xce\x8e\x94"
"\x1e\x36\xb2\x6b\xd1\xeb\xc6\x70"
"\xd1\xbd\x1d\x66\x56\x20\xab\xf7"
"\x4f\x78\xa7\xf6\xd2\x98\x09\x58"
"\x5a\x97\xda\xec\x58\xc6\xb0\x50",
.rlen = 64,
}, {
.key = "\x60\x3d\xeb\x10\x15\xca\x71\xbe"
"\x2b\x73\xae\xf0\x85\x7d\x77\x81"
"\x1f\x35\x2c\x07\x3b\x61\x08\xd7"
"\x2d\x98\x10\xa3\x09\x14\xdf\xf4",
.klen = 32,
.iv = "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
"\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
.input = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
"\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
"\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
"\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
"\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
"\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
"\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
"\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
.ilen = 64,
.result = "\x60\x1e\xc3\x13\x77\x57\x89\xa5"
"\xb7\xa7\xf5\x04\xbb\xf3\xd2\x28"
"\xf4\x43\xe3\xca\x4d\x62\xb5\x9a"
"\xca\x84\xe9\x90\xca\xca\xf5\xc5"
"\x2b\x09\x30\xda\xa2\x3d\xe9\x4c"
"\xe8\x70\x17\xba\x2d\x84\x98\x8d"
"\xdf\xc9\xc5\x8d\xb6\x7a\xad\xa6"
"\x13\xc2\xdd\x08\x45\x79\x41\xa6",
.rlen = 64,
}
};
static struct cipher_testvec aes_ctr_dec_tv_template[] = {
{ /* From NIST Special Publication 800-38A, Appendix F.5 */
.key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
"\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
.klen = 16,
.iv = "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
"\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
.input = "\x87\x4d\x61\x91\xb6\x20\xe3\x26"
"\x1b\xef\x68\x64\x99\x0d\xb6\xce"
"\x98\x06\xf6\x6b\x79\x70\xfd\xff"
"\x86\x17\x18\x7b\xb9\xff\xfd\xff"
"\x5a\xe4\xdf\x3e\xdb\xd5\xd3\x5e"
"\x5b\x4f\x09\x02\x0d\xb0\x3e\xab"
"\x1e\x03\x1d\xda\x2f\xbe\x03\xd1"
"\x79\x21\x70\xa0\xf3\x00\x9c\xee",
.ilen = 64,
.result = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
"\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
"\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
"\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
"\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
"\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
"\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
"\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
.rlen = 64,
}, {
.key = "\x8e\x73\xb0\xf7\xda\x0e\x64\x52"
"\xc8\x10\xf3\x2b\x80\x90\x79\xe5"
"\x62\xf8\xea\xd2\x52\x2c\x6b\x7b",
.klen = 24,
.iv = "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
"\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
.input = "\x1a\xbc\x93\x24\x17\x52\x1c\xa2"
"\x4f\x2b\x04\x59\xfe\x7e\x6e\x0b"
"\x09\x03\x39\xec\x0a\xa6\xfa\xef"
"\xd5\xcc\xc2\xc6\xf4\xce\x8e\x94"
"\x1e\x36\xb2\x6b\xd1\xeb\xc6\x70"
"\xd1\xbd\x1d\x66\x56\x20\xab\xf7"
"\x4f\x78\xa7\xf6\xd2\x98\x09\x58"
"\x5a\x97\xda\xec\x58\xc6\xb0\x50",
.ilen = 64,
.result = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
"\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
"\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
"\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
"\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
"\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
"\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
"\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
.rlen = 64,
}, {
.key = "\x60\x3d\xeb\x10\x15\xca\x71\xbe"
"\x2b\x73\xae\xf0\x85\x7d\x77\x81"
"\x1f\x35\x2c\x07\x3b\x61\x08\xd7"
"\x2d\x98\x10\xa3\x09\x14\xdf\xf4",
.klen = 32,
.iv = "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
"\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
.input = "\x60\x1e\xc3\x13\x77\x57\x89\xa5"
"\xb7\xa7\xf5\x04\xbb\xf3\xd2\x28"
"\xf4\x43\xe3\xca\x4d\x62\xb5\x9a"
"\xca\x84\xe9\x90\xca\xca\xf5\xc5"
"\x2b\x09\x30\xda\xa2\x3d\xe9\x4c"
"\xe8\x70\x17\xba\x2d\x84\x98\x8d"
"\xdf\xc9\xc5\x8d\xb6\x7a\xad\xa6"
"\x13\xc2\xdd\x08\x45\x79\x41\xa6",
.ilen = 64,
.result = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
"\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
"\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
"\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
"\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
"\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
"\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
"\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
.rlen = 64,
}
};
static struct cipher_testvec aes_ctr_rfc3686_enc_tv_template[] = {
{ /* From RFC 3686 */ { /* From RFC 3686 */
.key = "\xae\x68\x52\xf8\x12\x10\x67\xcc" .key = "\xae\x68\x52\xf8\x12\x10\x67\xcc"
"\x4b\xf7\xa5\x76\x55\x77\xf3\x9e" "\x4b\xf7\xa5\x76\x55\x77\xf3\x9e"
...@@ -5114,7 +5289,7 @@ static struct cipher_testvec aes_ctr_enc_tv_template[] = { ...@@ -5114,7 +5289,7 @@ static struct cipher_testvec aes_ctr_enc_tv_template[] = {
}, },
}; };
static struct cipher_testvec aes_ctr_dec_tv_template[] = { static struct cipher_testvec aes_ctr_rfc3686_dec_tv_template[] = {
{ /* From RFC 3686 */ { /* From RFC 3686 */
.key = "\xae\x68\x52\xf8\x12\x10\x67\xcc" .key = "\xae\x68\x52\xf8\x12\x10\x67\xcc"
"\x4b\xf7\xa5\x76\x55\x77\xf3\x9e" "\x4b\xf7\xa5\x76\x55\x77\xf3\x9e"
...@@ -5825,6 +6000,470 @@ static struct aead_testvec aes_ccm_dec_tv_template[] = { ...@@ -5825,6 +6000,470 @@ static struct aead_testvec aes_ccm_dec_tv_template[] = {
}, },
}; };
/*
* rfc4309 refers to section 8 of rfc3610 for test vectors, but they all
* use a 13-byte nonce, we only support an 11-byte nonce. Similarly, all of
* Special Publication 800-38C's test vectors also use nonce lengths our
* implementation doesn't support. The following are taken from fips cavs
* fax files on hand at Red Hat.
*
* nb: actual key lengths are (klen - 3), the last 3 bytes are actually
* part of the nonce which combine w/the iv, but need to be input this way.
*/
static struct aead_testvec aes_ccm_rfc4309_enc_tv_template[] = {
{
.key = "\x83\xac\x54\x66\xc2\xeb\xe5\x05"
"\x2e\x01\xd1\xfc\x5d\x82\x66\x2e"
"\x96\xac\x59",
.klen = 19,
.iv = "\x30\x07\xa1\xe2\xa2\xc7\x55\x24",
.alen = 0,
.input = "\x19\xc8\x81\xf6\xe9\x86\xff\x93"
"\x0b\x78\x67\xe5\xbb\xb7\xfc\x6e"
"\x83\x77\xb3\xa6\x0c\x8c\x9f\x9c"
"\x35\x2e\xad\xe0\x62\xf9\x91\xa1",
.ilen = 32,
.result = "\xab\x6f\xe1\x69\x1d\x19\x99\xa8"
"\x92\xa0\xc4\x6f\x7e\xe2\x8b\xb1"
"\x70\xbb\x8c\xa6\x4c\x6e\x97\x8a"
"\x57\x2b\xbe\x5d\x98\xa6\xb1\x32"
"\xda\x24\xea\xd9\xa1\x39\x98\xfd"
"\xa4\xbe\xd9\xf2\x1a\x6d\x22\xa8",
.rlen = 48,
}, {
.key = "\x1e\x2c\x7e\x01\x41\x9a\xef\xc0"
"\x0d\x58\x96\x6e\x5c\xa2\x4b\xd3"
"\x4f\xa3\x19",
.klen = 19,
.iv = "\xd3\x01\x5a\xd8\x30\x60\x15\x56",
.assoc = "\xda\xe6\x28\x9c\x45\x2d\xfd\x63"
"\x5e\xda\x4c\xb6\xe6\xfc\xf9\xb7"
"\x0c\x56\xcb\xe4\xe0\x05\x7a\xe1"
"\x0a\x63\x09\x78\xbc\x2c\x55\xde",
.alen = 32,
.input = "\x87\xa3\x36\xfd\x96\xb3\x93\x78"
"\xa9\x28\x63\xba\x12\xa3\x14\x85"
"\x57\x1e\x06\xc9\x7b\x21\xef\x76"
"\x7f\x38\x7e\x8e\x29\xa4\x3e\x7e",
.ilen = 32,
.result = "\x8a\x1e\x11\xf0\x02\x6b\xe2\x19"
"\xfc\x70\xc4\x6d\x8e\xb7\x99\xab"
"\xc5\x4b\xa2\xac\xd3\xf3\x48\xff"
"\x3b\xb5\xce\x53\xef\xde\xbb\x02"
"\xa9\x86\x15\x6c\x13\xfe\xda\x0a"
"\x22\xb8\x29\x3d\xd8\x39\x9a\x23",
.rlen = 48,
}, {
.key = "\xf4\x6b\xc2\x75\x62\xfe\xb4\xe1"
"\xa3\xf0\xff\xdd\x4e\x4b\x12\x75"
"\x53\x14\x73\x66\x8d\x88\xf6\x80"
"\xa0\x20\x35",
.klen = 27,
.iv = "\x26\xf2\x21\x8d\x50\x20\xda\xe2",
.assoc = "\x5b\x9e\x13\x67\x02\x5e\xef\xc1"
"\x6c\xf9\xd7\x1e\x52\x8f\x7a\x47"
"\xe9\xd4\xcf\x20\x14\x6e\xf0\x2d"
"\xd8\x9e\x2b\x56\x10\x23\x56\xe7",
.alen = 32,
.ilen = 0,
.result = "\x36\xea\x7a\x70\x08\xdc\x6a\xbc"
"\xad\x0c\x7a\x63\xf6\x61\xfd\x9b",
.rlen = 16,
}, {
.key = "\x56\xdf\x5c\x8f\x26\x3f\x0e\x42"
"\xef\x7a\xd3\xce\xfc\x84\x60\x62"
"\xca\xb4\x40\xaf\x5f\xc9\xc9\x01"
"\xd6\x3c\x8c",
.klen = 27,
.iv = "\x86\x84\xb6\xcd\xef\x09\x2e\x94",
.assoc = "\x02\x65\x78\x3c\xe9\x21\x30\x91"
"\xb1\xb9\xda\x76\x9a\x78\x6d\x95"
"\xf2\x88\x32\xa3\xf2\x50\xcb\x4c"
"\xe3\x00\x73\x69\x84\x69\x87\x79",
.alen = 32,
.input = "\x9f\xd2\x02\x4b\x52\x49\x31\x3c"
"\x43\x69\x3a\x2d\x8e\x70\xad\x7e"
"\xe0\xe5\x46\x09\x80\x89\x13\xb2"
"\x8c\x8b\xd9\x3f\x86\xfb\xb5\x6b",
.ilen = 32,
.result = "\x39\xdf\x7c\x3c\x5a\x29\xb9\x62"
"\x5d\x51\xc2\x16\xd8\xbd\x06\x9f"
"\x9b\x6a\x09\x70\xc1\x51\x83\xc2"
"\x66\x88\x1d\x4f\x9a\xda\xe0\x1e"
"\xc7\x79\x11\x58\xe5\x6b\x20\x40"
"\x7a\xea\x46\x42\x8b\xe4\x6f\xe1",
.rlen = 48,
}, {
.key = "\xe0\x8d\x99\x71\x60\xd7\x97\x1a"
"\xbd\x01\x99\xd5\x8a\xdf\x71\x3a"
"\xd3\xdf\x24\x4b\x5e\x3d\x4b\x4e"
"\x30\x7a\xb9\xd8\x53\x0a\x5e\x2b"
"\x1e\x29\x91",
.klen = 35,
.iv = "\xad\x8e\xc1\x53\x0a\xcf\x2d\xbe",
.assoc = "\x19\xb6\x1f\x57\xc4\xf3\xf0\x8b"
"\x78\x2b\x94\x02\x29\x0f\x42\x27"
"\x6b\x75\xcb\x98\x34\x08\x7e\x79"
"\xe4\x3e\x49\x0d\x84\x8b\x22\x87",
.alen = 32,
.input = "\xe1\xd9\xd8\x13\xeb\x3a\x75\x3f"
"\x9d\xbd\x5f\x66\xbe\xdc\xbb\x66"
"\xbf\x17\x99\x62\x4a\x39\x27\x1f"
"\x1d\xdc\x24\xae\x19\x2f\x98\x4c",
.ilen = 32,
.result = "\x19\xb8\x61\x33\x45\x2b\x43\x96"
"\x6f\x51\xd0\x20\x30\x7d\x9b\xc6"
"\x26\x3d\xf8\xc9\x65\x16\xa8\x9f"
"\xf0\x62\x17\x34\xf2\x1e\x8d\x75"
"\x4e\x13\xcc\xc0\xc3\x2a\x54\x2d",
.rlen = 40,
}, {
.key = "\x7c\xc8\x18\x3b\x8d\x99\xe0\x7c"
"\x45\x41\xb8\xbd\x5c\xa7\xc2\x32"
"\x8a\xb8\x02\x59\xa4\xfe\xa9\x2c"
"\x09\x75\x9a\x9b\x3c\x9b\x27\x39"
"\xf9\xd9\x4e",
.klen = 35,
.iv = "\x63\xb5\x3d\x9d\x43\xf6\x1e\x50",
.assoc = "\x57\xf5\x6b\x8b\x57\x5c\x3d\x3b"
"\x13\x02\x01\x0c\x83\x4c\x96\x35"
"\x8e\xd6\x39\xcf\x7d\x14\x9b\x94"
"\xb0\x39\x36\xe6\x8f\x57\xe0\x13",
.alen = 32,
.input = "\x3b\x6c\x29\x36\xb6\xef\x07\xa6"
"\x83\x72\x07\x4f\xcf\xfa\x66\x89"
"\x5f\xca\xb1\xba\xd5\x8f\x2c\x27"
"\x30\xdb\x75\x09\x93\xd4\x65\xe4",
.ilen = 32,
.result = "\xb0\x88\x5a\x33\xaa\xe5\xc7\x1d"
"\x85\x23\xc7\xc6\x2f\xf4\x1e\x3d"
"\xcc\x63\x44\x25\x07\x78\x4f\x9e"
"\x96\xb8\x88\xeb\xbc\x48\x1f\x06"
"\x39\xaf\x39\xac\xd8\x4a\x80\x39"
"\x7b\x72\x8a\xf7",
.rlen = 44,
}, {
.key = "\xab\xd0\xe9\x33\x07\x26\xe5\x83"
"\x8c\x76\x95\xd4\xb6\xdc\xf3\x46"
"\xf9\x8f\xad\xe3\x02\x13\x83\x77"
"\x3f\xb0\xf1\xa1\xa1\x22\x0f\x2b"
"\x24\xa7\x8b",
.klen = 35,
.iv = "\x07\xcb\xcc\x0e\xe6\x33\xbf\xf5",
.assoc = "\xd4\xdb\x30\x1d\x03\xfe\xfd\x5f"
"\x87\xd4\x8c\xb6\xb6\xf1\x7a\x5d"
"\xab\x90\x65\x8d\x8e\xca\x4d\x4f"
"\x16\x0c\x40\x90\x4b\xc7\x36\x73",
.alen = 32,
.input = "\xf5\xc6\x7d\x48\xc1\xb7\xe6\x92"
"\x97\x5a\xca\xc4\xa9\x6d\xf9\x3d"
"\x6c\xde\xbc\xf1\x90\xea\x6a\xb2"
"\x35\x86\x36\xaf\x5c\xfe\x4b\x3a",
.ilen = 32,
.result = "\x83\x6f\x40\x87\x72\xcf\xc1\x13"
"\xef\xbb\x80\x21\x04\x6c\x58\x09"
"\x07\x1b\xfc\xdf\xc0\x3f\x5b\xc7"
"\xe0\x79\xa8\x6e\x71\x7c\x3f\xcf"
"\x5c\xda\xb2\x33\xe5\x13\xe2\x0d"
"\x74\xd1\xef\xb5\x0f\x3a\xb5\xf8",
.rlen = 48,
},
};
static struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = {
{
.key = "\xab\x2f\x8a\x74\xb7\x1c\xd2\xb1"
"\xff\x80\x2e\x48\x7d\x82\xf8\xb9"
"\xc6\xfb\x7d",
.klen = 19,
.iv = "\x80\x0d\x13\xab\xd8\xa6\xb2\xd8",
.alen = 0,
.input = "\xd5\xe8\x93\x9f\xc7\x89\x2e\x2b",
.ilen = 8,
.result = "\x00",
.rlen = 0,
.novrfy = 1,
}, {
.key = "\xab\x2f\x8a\x74\xb7\x1c\xd2\xb1"
"\xff\x80\x2e\x48\x7d\x82\xf8\xb9"
"\xaf\x94\x87",
.klen = 19,
.iv = "\x78\x35\x82\x81\x7f\x88\x94\x68",
.alen = 0,
.input = "\x41\x3c\xb8\x87\x73\xcb\xf3\xf3",
.ilen = 8,
.result = "\x00",
.rlen = 0,
}, {
.key = "\x61\x0e\x8c\xae\xe3\x23\xb6\x38"
"\x76\x1c\xf6\x3a\x67\xa3\x9c\xd8"
"\xc6\xfb\x7d",
.klen = 19,
.iv = "\x80\x0d\x13\xab\xd8\xa6\xb2\xd8",
.assoc = "\xf3\x94\x87\x78\x35\x82\x81\x7f"
"\x88\x94\x68\xb1\x78\x6b\x2b\xd6"
"\x04\x1f\x4e\xed\x78\xd5\x33\x66"
"\xd8\x94\x99\x91\x81\x54\x62\x57",
.alen = 32,
.input = "\xf0\x7c\x29\x02\xae\x1c\x2f\x55"
"\xd0\xd1\x3d\x1a\xa3\x6d\xe4\x0a"
"\x86\xb0\x87\x6b\x62\x33\x8c\x34"
"\xce\xab\x57\xcc\x79\x0b\xe0\x6f"
"\x5c\x3e\x48\x1f\x6c\x46\xf7\x51"
"\x8b\x84\x83\x2a\xc1\x05\xb8\xc5",
.ilen = 48,
.result = "\x50\x82\x3e\x07\xe2\x1e\xb6\xfb"
"\x33\xe4\x73\xce\xd2\xfb\x95\x79"
"\xe8\xb4\xb5\x77\x11\x10\x62\x6f"
"\x6a\x82\xd1\x13\xec\xf5\xd0\x48",
.rlen = 32,
.novrfy = 1,
}, {
.key = "\x61\x0e\x8c\xae\xe3\x23\xb6\x38"
"\x76\x1c\xf6\x3a\x67\xa3\x9c\xd8"
"\x05\xe0\xc9",
.klen = 19,
.iv = "\x0f\xed\x34\xea\x97\xd4\x3b\xdf",
.assoc = "\x49\x5c\x50\x1f\x1d\x94\xcc\x81"
"\xba\xb7\xb6\x03\xaf\xa5\xc1\xa1"
"\xd8\x5c\x42\x68\xe0\x6c\xda\x89"
"\x05\xac\x56\xac\x1b\x2a\xd3\x86",
.alen = 32,
.input = "\x39\xbe\x7d\x15\x62\x77\xf3\x3c"
"\xad\x83\x52\x6d\x71\x03\x25\x1c"
"\xed\x81\x3a\x9a\x16\x7d\x19\x80"
"\x72\x04\x72\xd0\xf6\xff\x05\x0f"
"\xb7\x14\x30\x00\x32\x9e\xa0\xa6"
"\x9e\x5a\x18\xa1\xb8\xfe\xdb\xd3",
.ilen = 48,
.result = "\x75\x05\xbe\xc2\xd9\x1e\xde\x60"
"\x47\x3d\x8c\x7d\xbd\xb5\xd9\xb7"
"\xf2\xae\x61\x05\x8f\x82\x24\x3f"
"\x9c\x67\x91\xe1\x38\x4f\xe4\x0c",
.rlen = 32,
}, {
.key = "\x39\xbb\xa7\xbe\x59\x97\x9e\x73"
"\xa2\xbc\x6b\x98\xd7\x75\x7f\xe3"
"\xa4\x48\x93\x39\x26\x71\x4a\xc6"
"\xee\x49\x83",
.klen = 27,
.iv = "\xe9\xa9\xff\xe9\x57\xba\xfd\x9e",
.assoc = "\x44\xa6\x2c\x05\xe9\xe1\x43\xb1"
"\x58\x7c\xf2\x5c\x6d\x39\x0a\x64"
"\xa4\xf0\x13\x05\xd1\x77\x99\x67"
"\x11\xc4\xc6\xdb\x00\x56\x36\x61",
.alen = 32,
.input = "\x71\x99\xfa\xf4\x44\x12\x68\x9b",
.ilen = 8,
.result = "\x00",
.rlen = 0,
}, {
.key = "\x58\x5d\xa0\x96\x65\x1a\x04\xd7"
"\x96\xe5\xc5\x68\xaa\x95\x35\xe0"
"\x29\xa0\xba\x9e\x48\x78\xd1\xba"
"\xee\x49\x83",
.klen = 27,
.iv = "\xe9\xa9\xff\xe9\x57\xba\xfd\x9e",
.assoc = "\x44\xa6\x2c\x05\xe9\xe1\x43\xb1"
"\x58\x7c\xf2\x5c\x6d\x39\x0a\x64"
"\xa4\xf0\x13\x05\xd1\x77\x99\x67"
"\x11\xc4\xc6\xdb\x00\x56\x36\x61",
.alen = 32,
.input = "\xfb\xe5\x5d\x34\xbe\xe5\xe8\xe7"
"\x5a\xef\x2f\xbf\x1f\x7f\xd4\xb2"
"\x66\xca\x61\x1e\x96\x7a\x61\xb3"
"\x1c\x16\x45\x52\xba\x04\x9c\x9f"
"\xb1\xd2\x40\xbc\x52\x7c\x6f\xb1",
.ilen = 40,
.result = "\x85\x34\x66\x42\xc8\x92\x0f\x36"
"\x58\xe0\x6b\x91\x3c\x98\x5c\xbb"
"\x0a\x85\xcc\x02\xad\x7a\x96\xe9"
"\x65\x43\xa4\xc3\x0f\xdc\x55\x81",
.rlen = 32,
}, {
.key = "\x58\x5d\xa0\x96\x65\x1a\x04\xd7"
"\x96\xe5\xc5\x68\xaa\x95\x35\xe0"
"\x29\xa0\xba\x9e\x48\x78\xd1\xba"
"\xd1\xfc\x57",
.klen = 27,
.iv = "\x9c\xfe\xb8\x9c\xad\x71\xaa\x1f",
.assoc = "\x86\x67\xa5\xa9\x14\x5f\x0d\xc6"
"\xff\x14\xc7\x44\xbf\x6c\x3a\xc3"
"\xff\xb6\x81\xbd\xe2\xd5\x06\xc7"
"\x3c\xa1\x52\x13\x03\x8a\x23\x3a",
.alen = 32,
.input = "\x3f\x66\xb0\x9d\xe5\x4b\x38\x00"
"\xc6\x0e\x6e\xe5\xd6\x98\xa6\x37"
"\x8c\x26\x33\xc6\xb2\xa2\x17\xfa"
"\x64\x19\xc0\x30\xd7\xfc\x14\x6b"
"\xe3\x33\xc2\x04\xb0\x37\xbe\x3f"
"\xa9\xb4\x2d\x68\x03\xa3\x44\xef",
.ilen = 48,
.result = "\x02\x87\x4d\x28\x80\x6e\xb2\xed"
"\x99\x2a\xa8\xca\x04\x25\x45\x90"
"\x1d\xdd\x5a\xd9\xe4\xdb\x9c\x9c"
"\x49\xe9\x01\xfe\xa7\x80\x6d\x6b",
.rlen = 32,
.novrfy = 1,
}, {
.key = "\xa4\x4b\x54\x29\x0a\xb8\x6d\x01"
"\x5b\x80\x2a\xcf\x25\xc4\xb7\x5c"
"\x20\x2c\xad\x30\xc2\x2b\x41\xfb"
"\x0e\x85\xbc\x33\xad\x0f\x2b\xff"
"\xee\x49\x83",
.klen = 35,
.iv = "\xe9\xa9\xff\xe9\x57\xba\xfd\x9e",
.alen = 0,
.input = "\x1f\xb8\x8f\xa3\xdd\x54\x00\xf2",
.ilen = 8,
.result = "\x00",
.rlen = 0,
}, {
.key = "\x39\xbb\xa7\xbe\x59\x97\x9e\x73"
"\xa2\xbc\x6b\x98\xd7\x75\x7f\xe3"
"\xa4\x48\x93\x39\x26\x71\x4a\xc6"
"\xae\x8f\x11\x4c\xc2\x9c\x4a\xbb"
"\x85\x34\x66",
.klen = 35,
.iv = "\x42\xc8\x92\x0f\x36\x58\xe0\x6b",
.alen = 0,
.input = "\x48\x01\x5e\x02\x24\x04\x66\x47"
"\xa1\xea\x6f\xaf\xe8\xfc\xfb\xdd"
"\xa5\xa9\x87\x8d\x84\xee\x2e\x77"
"\xbb\x86\xb9\xf5\x5c\x6c\xff\xf6"
"\x72\xc3\x8e\xf7\x70\xb1\xb2\x07"
"\xbc\xa8\xa3\xbd\x83\x7c\x1d\x2a",
.ilen = 48,
.result = "\xdc\x56\xf2\x71\xb0\xb1\xa0\x6c"
"\xf0\x97\x3a\xfb\x6d\xe7\x32\x99"
"\x3e\xaf\x70\x5e\xb2\x4d\xea\x39"
"\x89\xd4\x75\x7a\x63\xb1\xda\x93",
.rlen = 32,
.novrfy = 1,
}, {
.key = "\x58\x5d\xa0\x96\x65\x1a\x04\xd7"
"\x96\xe5\xc5\x68\xaa\x95\x35\xe0"
"\x29\xa0\xba\x9e\x48\x78\xd1\xba"
"\x0d\x1a\x53\x3b\xb5\xe3\xf8\x8b"
"\xcf\x76\x3f",
.klen = 35,
.iv = "\xd9\x95\x75\x8f\x44\x89\x40\x7b",
.assoc = "\x8f\x86\x6c\x4d\x1d\xc5\x39\x88"
"\xc8\xf3\x5c\x52\x10\x63\x6f\x2b"
"\x8a\x2a\xc5\x6f\x30\x23\x58\x7b"
"\xfb\x36\x03\x11\xb4\xd9\xf2\xfe",
.alen = 32,
.input = "\x48\x58\xd6\xf3\xad\x63\x58\xbf"
"\xae\xc7\x5e\xae\x83\x8f\x7b\xe4"
"\x78\x5c\x4c\x67\x71\x89\x94\xbf"
"\x47\xf1\x63\x7e\x1c\x59\xbd\xc5"
"\x7f\x44\x0a\x0c\x01\x18\x07\x92"
"\xe1\xd3\x51\xce\x32\x6d\x0c\x5b",
.ilen = 48,
.result = "\xc2\x54\xc8\xde\x78\x87\x77\x40"
"\x49\x71\xe4\xb7\xe7\xcb\x76\x61"
"\x0a\x41\xb9\xe9\xc0\x76\x54\xab"
"\x04\x49\x3b\x19\x93\x57\x25\x5d",
.rlen = 32,
},
};
/*
* ANSI X9.31 Continuous Pseudo-Random Number Generator (AES mode)
* test vectors, taken from Appendix B.2.9 and B.2.10:
* http://csrc.nist.gov/groups/STM/cavp/documents/rng/RNGVS.pdf
* Only AES-128 is supported at this time.
*/
#define ANSI_CPRNG_AES_TEST_VECTORS 6
static struct cprng_testvec ansi_cprng_aes_tv_template[] = {
{
.key = "\xf3\xb1\x66\x6d\x13\x60\x72\x42"
"\xed\x06\x1c\xab\xb8\xd4\x62\x02",
.klen = 16,
.dt = "\xe6\xb3\xbe\x78\x2a\x23\xfa\x62"
"\xd7\x1d\x4a\xfb\xb0\xe9\x22\xf9",
.dtlen = 16,
.v = "\x80\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x00",
.vlen = 16,
.result = "\x59\x53\x1e\xd1\x3b\xb0\xc0\x55"
"\x84\x79\x66\x85\xc1\x2f\x76\x41",
.rlen = 16,
.loops = 1,
}, {
.key = "\xf3\xb1\x66\x6d\x13\x60\x72\x42"
"\xed\x06\x1c\xab\xb8\xd4\x62\x02",
.klen = 16,
.dt = "\xe6\xb3\xbe\x78\x2a\x23\xfa\x62"
"\xd7\x1d\x4a\xfb\xb0\xe9\x22\xfa",
.dtlen = 16,
.v = "\xc0\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x00",
.vlen = 16,
.result = "\x7c\x22\x2c\xf4\xca\x8f\xa2\x4c"
"\x1c\x9c\xb6\x41\xa9\xf3\x22\x0d",
.rlen = 16,
.loops = 1,
}, {
.key = "\xf3\xb1\x66\x6d\x13\x60\x72\x42"
"\xed\x06\x1c\xab\xb8\xd4\x62\x02",
.klen = 16,
.dt = "\xe6\xb3\xbe\x78\x2a\x23\xfa\x62"
"\xd7\x1d\x4a\xfb\xb0\xe9\x22\xfb",
.dtlen = 16,
.v = "\xe0\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x00",
.vlen = 16,
.result = "\x8a\xaa\x00\x39\x66\x67\x5b\xe5"
"\x29\x14\x28\x81\xa9\x4d\x4e\xc7",
.rlen = 16,
.loops = 1,
}, {
.key = "\xf3\xb1\x66\x6d\x13\x60\x72\x42"
"\xed\x06\x1c\xab\xb8\xd4\x62\x02",
.klen = 16,
.dt = "\xe6\xb3\xbe\x78\x2a\x23\xfa\x62"
"\xd7\x1d\x4a\xfb\xb0\xe9\x22\xfc",
.dtlen = 16,
.v = "\xf0\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x00",
.vlen = 16,
.result = "\x88\xdd\xa4\x56\x30\x24\x23\xe5"
"\xf6\x9d\xa5\x7e\x7b\x95\xc7\x3a",
.rlen = 16,
.loops = 1,
}, {
.key = "\xf3\xb1\x66\x6d\x13\x60\x72\x42"
"\xed\x06\x1c\xab\xb8\xd4\x62\x02",
.klen = 16,
.dt = "\xe6\xb3\xbe\x78\x2a\x23\xfa\x62"
"\xd7\x1d\x4a\xfb\xb0\xe9\x22\xfd",
.dtlen = 16,
.v = "\xf8\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x00",
.vlen = 16,
.result = "\x05\x25\x92\x46\x61\x79\xd2\xcb"
"\x78\xc4\x0b\x14\x0a\x5a\x9a\xc8",
.rlen = 16,
.loops = 1,
}, { /* Monte Carlo Test */
.key = "\x9f\x5b\x51\x20\x0b\xf3\x34\xb5"
"\xd8\x2b\xe8\xc3\x72\x55\xc8\x48",
.klen = 16,
.dt = "\x63\x76\xbb\xe5\x29\x02\xba\x3b"
"\x67\xc9\x25\xfa\x70\x1f\x11\xac",
.dtlen = 16,
.v = "\x57\x2c\x8e\x76\x87\x26\x47\x97"
"\x7e\x74\xfb\xdd\xc4\x95\x01\xd1",
.vlen = 16,
.result = "\x48\xe9\xbd\x0d\x06\xee\x18\xfb"
"\xe4\x57\x90\xd5\xc3\xfc\x9b\x73",
.rlen = 16,
.loops = 10000,
},
};
/* Cast5 test vectors from RFC 2144 */ /* Cast5 test vectors from RFC 2144 */
#define CAST5_ENC_TEST_VECTORS 3 #define CAST5_ENC_TEST_VECTORS 3
#define CAST5_DEC_TEST_VECTORS 3 #define CAST5_DEC_TEST_VECTORS 3
......
...@@ -165,15 +165,15 @@ static int zlib_compress_update(struct crypto_pcomp *tfm, ...@@ -165,15 +165,15 @@ static int zlib_compress_update(struct crypto_pcomp *tfm,
return -EINVAL; return -EINVAL;
} }
ret = req->avail_out - stream->avail_out;
pr_debug("avail_in %u, avail_out %u (consumed %u, produced %u)\n", pr_debug("avail_in %u, avail_out %u (consumed %u, produced %u)\n",
stream->avail_in, stream->avail_out, stream->avail_in, stream->avail_out,
req->avail_in - stream->avail_in, req->avail_in - stream->avail_in, ret);
req->avail_out - stream->avail_out);
req->next_in = stream->next_in; req->next_in = stream->next_in;
req->avail_in = stream->avail_in; req->avail_in = stream->avail_in;
req->next_out = stream->next_out; req->next_out = stream->next_out;
req->avail_out = stream->avail_out; req->avail_out = stream->avail_out;
return 0; return ret;
} }
static int zlib_compress_final(struct crypto_pcomp *tfm, static int zlib_compress_final(struct crypto_pcomp *tfm,
...@@ -195,15 +195,15 @@ static int zlib_compress_final(struct crypto_pcomp *tfm, ...@@ -195,15 +195,15 @@ static int zlib_compress_final(struct crypto_pcomp *tfm,
return -EINVAL; return -EINVAL;
} }
ret = req->avail_out - stream->avail_out;
pr_debug("avail_in %u, avail_out %u (consumed %u, produced %u)\n", pr_debug("avail_in %u, avail_out %u (consumed %u, produced %u)\n",
stream->avail_in, stream->avail_out, stream->avail_in, stream->avail_out,
req->avail_in - stream->avail_in, req->avail_in - stream->avail_in, ret);
req->avail_out - stream->avail_out);
req->next_in = stream->next_in; req->next_in = stream->next_in;
req->avail_in = stream->avail_in; req->avail_in = stream->avail_in;
req->next_out = stream->next_out; req->next_out = stream->next_out;
req->avail_out = stream->avail_out; req->avail_out = stream->avail_out;
return 0; return ret;
} }
...@@ -280,15 +280,15 @@ static int zlib_decompress_update(struct crypto_pcomp *tfm, ...@@ -280,15 +280,15 @@ static int zlib_decompress_update(struct crypto_pcomp *tfm,
return -EINVAL; return -EINVAL;
} }
ret = req->avail_out - stream->avail_out;
pr_debug("avail_in %u, avail_out %u (consumed %u, produced %u)\n", pr_debug("avail_in %u, avail_out %u (consumed %u, produced %u)\n",
stream->avail_in, stream->avail_out, stream->avail_in, stream->avail_out,
req->avail_in - stream->avail_in, req->avail_in - stream->avail_in, ret);
req->avail_out - stream->avail_out);
req->next_in = stream->next_in; req->next_in = stream->next_in;
req->avail_in = stream->avail_in; req->avail_in = stream->avail_in;
req->next_out = stream->next_out; req->next_out = stream->next_out;
req->avail_out = stream->avail_out; req->avail_out = stream->avail_out;
return 0; return ret;
} }
static int zlib_decompress_final(struct crypto_pcomp *tfm, static int zlib_decompress_final(struct crypto_pcomp *tfm,
...@@ -328,15 +328,15 @@ static int zlib_decompress_final(struct crypto_pcomp *tfm, ...@@ -328,15 +328,15 @@ static int zlib_decompress_final(struct crypto_pcomp *tfm,
return -EINVAL; return -EINVAL;
} }
ret = req->avail_out - stream->avail_out;
pr_debug("avail_in %u, avail_out %u (consumed %u, produced %u)\n", pr_debug("avail_in %u, avail_out %u (consumed %u, produced %u)\n",
stream->avail_in, stream->avail_out, stream->avail_in, stream->avail_out,
req->avail_in - stream->avail_in, req->avail_in - stream->avail_in, ret);
req->avail_out - stream->avail_out);
req->next_in = stream->next_in; req->next_in = stream->next_in;
req->avail_in = stream->avail_in; req->avail_in = stream->avail_in;
req->next_out = stream->next_out; req->next_out = stream->next_out;
req->avail_out = stream->avail_out; req->avail_out = stream->avail_out;
return 0; return ret;
} }
......
...@@ -88,7 +88,7 @@ config HW_RANDOM_N2RNG ...@@ -88,7 +88,7 @@ config HW_RANDOM_N2RNG
config HW_RANDOM_VIA config HW_RANDOM_VIA
tristate "VIA HW Random Number Generator support" tristate "VIA HW Random Number Generator support"
depends on HW_RANDOM && X86_32 depends on HW_RANDOM && X86
default HW_RANDOM default HW_RANDOM
---help--- ---help---
This driver provides kernel-side support for the Random Number This driver provides kernel-side support for the Random Number
......
...@@ -89,7 +89,7 @@ static struct hwrng omap_rng_ops = { ...@@ -89,7 +89,7 @@ static struct hwrng omap_rng_ops = {
.data_read = omap_rng_data_read, .data_read = omap_rng_data_read,
}; };
static int __init omap_rng_probe(struct platform_device *pdev) static int __devinit omap_rng_probe(struct platform_device *pdev)
{ {
struct resource *res, *mem; struct resource *res, *mem;
int ret; int ret;
......
...@@ -88,9 +88,9 @@ static struct hwrng timeriomem_rng_ops = { ...@@ -88,9 +88,9 @@ static struct hwrng timeriomem_rng_ops = {
.priv = 0, .priv = 0,
}; };
static int __init timeriomem_rng_probe(struct platform_device *pdev) static int __devinit timeriomem_rng_probe(struct platform_device *pdev)
{ {
struct resource *res, *mem; struct resource *res;
int ret; int ret;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0); res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
...@@ -98,21 +98,12 @@ static int __init timeriomem_rng_probe(struct platform_device *pdev) ...@@ -98,21 +98,12 @@ static int __init timeriomem_rng_probe(struct platform_device *pdev)
if (!res) if (!res)
return -ENOENT; return -ENOENT;
mem = request_mem_region(res->start, res->end - res->start + 1,
pdev->name);
if (mem == NULL)
return -EBUSY;
dev_set_drvdata(&pdev->dev, mem);
timeriomem_rng_data = pdev->dev.platform_data; timeriomem_rng_data = pdev->dev.platform_data;
timeriomem_rng_data->address = ioremap(res->start, timeriomem_rng_data->address = ioremap(res->start,
res->end - res->start + 1); res->end - res->start + 1);
if (!timeriomem_rng_data->address) { if (!timeriomem_rng_data->address)
ret = -ENOMEM; return -EIO;
goto err_ioremap;
}
if (timeriomem_rng_data->period != 0 if (timeriomem_rng_data->period != 0
&& usecs_to_jiffies(timeriomem_rng_data->period) > 0) { && usecs_to_jiffies(timeriomem_rng_data->period) > 0) {
...@@ -125,7 +116,7 @@ static int __init timeriomem_rng_probe(struct platform_device *pdev) ...@@ -125,7 +116,7 @@ static int __init timeriomem_rng_probe(struct platform_device *pdev)
ret = hwrng_register(&timeriomem_rng_ops); ret = hwrng_register(&timeriomem_rng_ops);
if (ret) if (ret)
goto err_register; goto failed;
dev_info(&pdev->dev, "32bits from 0x%p @ %dus\n", dev_info(&pdev->dev, "32bits from 0x%p @ %dus\n",
timeriomem_rng_data->address, timeriomem_rng_data->address,
...@@ -133,24 +124,19 @@ static int __init timeriomem_rng_probe(struct platform_device *pdev) ...@@ -133,24 +124,19 @@ static int __init timeriomem_rng_probe(struct platform_device *pdev)
return 0; return 0;
err_register: failed:
dev_err(&pdev->dev, "problem registering\n"); dev_err(&pdev->dev, "problem registering\n");
iounmap(timeriomem_rng_data->address); iounmap(timeriomem_rng_data->address);
err_ioremap:
release_resource(mem);
return ret; return ret;
} }
static int __devexit timeriomem_rng_remove(struct platform_device *pdev) static int __devexit timeriomem_rng_remove(struct platform_device *pdev)
{ {
struct resource *mem = dev_get_drvdata(&pdev->dev);
del_timer_sync(&timeriomem_rng_timer); del_timer_sync(&timeriomem_rng_timer);
hwrng_unregister(&timeriomem_rng_ops); hwrng_unregister(&timeriomem_rng_ops);
iounmap(timeriomem_rng_data->address); iounmap(timeriomem_rng_data->address);
release_resource(mem);
return 0; return 0;
} }
......
...@@ -132,6 +132,19 @@ static int via_rng_init(struct hwrng *rng) ...@@ -132,6 +132,19 @@ static int via_rng_init(struct hwrng *rng)
struct cpuinfo_x86 *c = &cpu_data(0); struct cpuinfo_x86 *c = &cpu_data(0);
u32 lo, hi, old_lo; u32 lo, hi, old_lo;
/* VIA Nano CPUs don't have the MSR_VIA_RNG anymore. The RNG
* is always enabled if CPUID rng_en is set. There is no
* RNG configuration like it used to be the case in this
* register */
if ((c->x86 == 6) && (c->x86_model >= 0x0f)) {
if (!cpu_has_xstore_enabled) {
printk(KERN_ERR PFX "can't enable hardware RNG "
"if XSTORE is not enabled\n");
return -ENODEV;
}
return 0;
}
/* Control the RNG via MSR. Tread lightly and pay very close /* Control the RNG via MSR. Tread lightly and pay very close
* close attention to values written, as the reserved fields * close attention to values written, as the reserved fields
* are documented to be "undefined and unpredictable"; but it * are documented to be "undefined and unpredictable"; but it
...@@ -205,5 +218,5 @@ static void __exit mod_exit(void) ...@@ -205,5 +218,5 @@ static void __exit mod_exit(void)
module_init(mod_init); module_init(mod_init);
module_exit(mod_exit); module_exit(mod_exit);
MODULE_DESCRIPTION("H/W RNG driver for VIA chipsets"); MODULE_DESCRIPTION("H/W RNG driver for VIA CPU with PadLock");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
...@@ -12,7 +12,7 @@ if CRYPTO_HW ...@@ -12,7 +12,7 @@ if CRYPTO_HW
config CRYPTO_DEV_PADLOCK config CRYPTO_DEV_PADLOCK
tristate "Support for VIA PadLock ACE" tristate "Support for VIA PadLock ACE"
depends on X86_32 && !UML depends on X86 && !UML
select CRYPTO_ALGAPI select CRYPTO_ALGAPI
help help
Some VIA processors come with an integrated crypto engine Some VIA processors come with an integrated crypto engine
......
...@@ -2564,7 +2564,7 @@ static void hifn_tasklet_callback(unsigned long data) ...@@ -2564,7 +2564,7 @@ static void hifn_tasklet_callback(unsigned long data)
hifn_process_queue(dev); hifn_process_queue(dev);
} }
static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id) static int __devinit hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{ {
int err, i; int err, i;
struct hifn_device *dev; struct hifn_device *dev;
...@@ -2696,7 +2696,7 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -2696,7 +2696,7 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return err; return err;
} }
static void hifn_remove(struct pci_dev *pdev) static void __devexit hifn_remove(struct pci_dev *pdev)
{ {
int i; int i;
struct hifn_device *dev; struct hifn_device *dev;
...@@ -2744,7 +2744,7 @@ static struct pci_driver hifn_pci_driver = { ...@@ -2744,7 +2744,7 @@ static struct pci_driver hifn_pci_driver = {
.remove = __devexit_p(hifn_remove), .remove = __devexit_p(hifn_remove),
}; };
static int __devinit hifn_init(void) static int __init hifn_init(void)
{ {
unsigned int freq; unsigned int freq;
int err; int err;
...@@ -2789,7 +2789,7 @@ static int __devinit hifn_init(void) ...@@ -2789,7 +2789,7 @@ static int __devinit hifn_init(void)
return 0; return 0;
} }
static void __devexit hifn_fini(void) static void __exit hifn_fini(void)
{ {
pci_unregister_driver(&hifn_pci_driver); pci_unregister_driver(&hifn_pci_driver);
......
...@@ -154,7 +154,11 @@ static inline void padlock_reset_key(struct cword *cword) ...@@ -154,7 +154,11 @@ static inline void padlock_reset_key(struct cword *cword)
int cpu = raw_smp_processor_id(); int cpu = raw_smp_processor_id();
if (cword != per_cpu(last_cword, cpu)) if (cword != per_cpu(last_cword, cpu))
#ifndef CONFIG_X86_64
asm volatile ("pushfl; popfl"); asm volatile ("pushfl; popfl");
#else
asm volatile ("pushfq; popfq");
#endif
} }
static inline void padlock_store_cword(struct cword *cword) static inline void padlock_store_cword(struct cword *cword)
...@@ -208,10 +212,19 @@ static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key, ...@@ -208,10 +212,19 @@ static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key,
asm volatile ("test $1, %%cl;" asm volatile ("test $1, %%cl;"
"je 1f;" "je 1f;"
#ifndef CONFIG_X86_64
"lea -1(%%ecx), %%eax;" "lea -1(%%ecx), %%eax;"
"mov $1, %%ecx;" "mov $1, %%ecx;"
#else
"lea -1(%%rcx), %%rax;"
"mov $1, %%rcx;"
#endif
".byte 0xf3,0x0f,0xa7,0xc8;" /* rep xcryptecb */ ".byte 0xf3,0x0f,0xa7,0xc8;" /* rep xcryptecb */
#ifndef CONFIG_X86_64
"mov %%eax, %%ecx;" "mov %%eax, %%ecx;"
#else
"mov %%rax, %%rcx;"
#endif
"1:" "1:"
".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ ".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
: "+S"(input), "+D"(output) : "+S"(input), "+D"(output)
......
...@@ -44,6 +44,8 @@ ...@@ -44,6 +44,8 @@
#include <crypto/sha.h> #include <crypto/sha.h>
#include <crypto/aead.h> #include <crypto/aead.h>
#include <crypto/authenc.h> #include <crypto/authenc.h>
#include <crypto/skcipher.h>
#include <crypto/scatterwalk.h>
#include "talitos.h" #include "talitos.h"
...@@ -339,7 +341,8 @@ static void flush_channel(struct device *dev, int ch, int error, int reset_ch) ...@@ -339,7 +341,8 @@ static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
status = error; status = error;
dma_unmap_single(dev, request->dma_desc, dma_unmap_single(dev, request->dma_desc,
sizeof(struct talitos_desc), DMA_BIDIRECTIONAL); sizeof(struct talitos_desc),
DMA_BIDIRECTIONAL);
/* copy entries so we can call callback outside lock */ /* copy entries so we can call callback outside lock */
saved_req.desc = request->desc; saved_req.desc = request->desc;
...@@ -413,7 +416,8 @@ static struct talitos_desc *current_desc(struct device *dev, int ch) ...@@ -413,7 +416,8 @@ static struct talitos_desc *current_desc(struct device *dev, int ch)
/* /*
* user diagnostics; report root cause of error based on execution unit status * user diagnostics; report root cause of error based on execution unit status
*/ */
static void report_eu_error(struct device *dev, int ch, struct talitos_desc *desc) static void report_eu_error(struct device *dev, int ch,
struct talitos_desc *desc)
{ {
struct talitos_private *priv = dev_get_drvdata(dev); struct talitos_private *priv = dev_get_drvdata(dev);
int i; int i;
...@@ -684,8 +688,8 @@ struct talitos_ctx { ...@@ -684,8 +688,8 @@ struct talitos_ctx {
unsigned int authsize; unsigned int authsize;
}; };
static int aead_authenc_setauthsize(struct crypto_aead *authenc, static int aead_setauthsize(struct crypto_aead *authenc,
unsigned int authsize) unsigned int authsize)
{ {
struct talitos_ctx *ctx = crypto_aead_ctx(authenc); struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
...@@ -694,8 +698,8 @@ static int aead_authenc_setauthsize(struct crypto_aead *authenc, ...@@ -694,8 +698,8 @@ static int aead_authenc_setauthsize(struct crypto_aead *authenc,
return 0; return 0;
} }
static int aead_authenc_setkey(struct crypto_aead *authenc, static int aead_setkey(struct crypto_aead *authenc,
const u8 *key, unsigned int keylen) const u8 *key, unsigned int keylen)
{ {
struct talitos_ctx *ctx = crypto_aead_ctx(authenc); struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
struct rtattr *rta = (void *)key; struct rtattr *rta = (void *)key;
...@@ -740,7 +744,7 @@ static int aead_authenc_setkey(struct crypto_aead *authenc, ...@@ -740,7 +744,7 @@ static int aead_authenc_setkey(struct crypto_aead *authenc,
} }
/* /*
* ipsec_esp_edesc - s/w-extended ipsec_esp descriptor * talitos_edesc - s/w-extended descriptor
* @src_nents: number of segments in input scatterlist * @src_nents: number of segments in input scatterlist
* @dst_nents: number of segments in output scatterlist * @dst_nents: number of segments in output scatterlist
* @dma_len: length of dma mapped link_tbl space * @dma_len: length of dma mapped link_tbl space
...@@ -752,17 +756,67 @@ static int aead_authenc_setkey(struct crypto_aead *authenc, ...@@ -752,17 +756,67 @@ static int aead_authenc_setkey(struct crypto_aead *authenc,
* is greater than 1, an integrity check value is concatenated to the end * is greater than 1, an integrity check value is concatenated to the end
* of link_tbl data * of link_tbl data
*/ */
struct ipsec_esp_edesc { struct talitos_edesc {
int src_nents; int src_nents;
int dst_nents; int dst_nents;
int src_is_chained;
int dst_is_chained;
int dma_len; int dma_len;
dma_addr_t dma_link_tbl; dma_addr_t dma_link_tbl;
struct talitos_desc desc; struct talitos_desc desc;
struct talitos_ptr link_tbl[0]; struct talitos_ptr link_tbl[0];
}; };
static int talitos_map_sg(struct device *dev, struct scatterlist *sg,
unsigned int nents, enum dma_data_direction dir,
int chained)
{
if (unlikely(chained))
while (sg) {
dma_map_sg(dev, sg, 1, dir);
sg = scatterwalk_sg_next(sg);
}
else
dma_map_sg(dev, sg, nents, dir);
return nents;
}
static void talitos_unmap_sg_chain(struct device *dev, struct scatterlist *sg,
enum dma_data_direction dir)
{
while (sg) {
dma_unmap_sg(dev, sg, 1, dir);
sg = scatterwalk_sg_next(sg);
}
}
static void talitos_sg_unmap(struct device *dev,
struct talitos_edesc *edesc,
struct scatterlist *src,
struct scatterlist *dst)
{
unsigned int src_nents = edesc->src_nents ? : 1;
unsigned int dst_nents = edesc->dst_nents ? : 1;
if (src != dst) {
if (edesc->src_is_chained)
talitos_unmap_sg_chain(dev, src, DMA_TO_DEVICE);
else
dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
if (edesc->dst_is_chained)
talitos_unmap_sg_chain(dev, dst, DMA_FROM_DEVICE);
else
dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
} else
if (edesc->src_is_chained)
talitos_unmap_sg_chain(dev, src, DMA_BIDIRECTIONAL);
else
dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
}
static void ipsec_esp_unmap(struct device *dev, static void ipsec_esp_unmap(struct device *dev,
struct ipsec_esp_edesc *edesc, struct talitos_edesc *edesc,
struct aead_request *areq) struct aead_request *areq)
{ {
unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6], DMA_FROM_DEVICE); unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6], DMA_FROM_DEVICE);
...@@ -772,15 +826,7 @@ static void ipsec_esp_unmap(struct device *dev, ...@@ -772,15 +826,7 @@ static void ipsec_esp_unmap(struct device *dev,
dma_unmap_sg(dev, areq->assoc, 1, DMA_TO_DEVICE); dma_unmap_sg(dev, areq->assoc, 1, DMA_TO_DEVICE);
if (areq->src != areq->dst) { talitos_sg_unmap(dev, edesc, areq->src, areq->dst);
dma_unmap_sg(dev, areq->src, edesc->src_nents ? : 1,
DMA_TO_DEVICE);
dma_unmap_sg(dev, areq->dst, edesc->dst_nents ? : 1,
DMA_FROM_DEVICE);
} else {
dma_unmap_sg(dev, areq->src, edesc->src_nents ? : 1,
DMA_BIDIRECTIONAL);
}
if (edesc->dma_len) if (edesc->dma_len)
dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len, dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
...@@ -795,13 +841,14 @@ static void ipsec_esp_encrypt_done(struct device *dev, ...@@ -795,13 +841,14 @@ static void ipsec_esp_encrypt_done(struct device *dev,
int err) int err)
{ {
struct aead_request *areq = context; struct aead_request *areq = context;
struct ipsec_esp_edesc *edesc =
container_of(desc, struct ipsec_esp_edesc, desc);
struct crypto_aead *authenc = crypto_aead_reqtfm(areq); struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
struct talitos_ctx *ctx = crypto_aead_ctx(authenc); struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
struct talitos_edesc *edesc;
struct scatterlist *sg; struct scatterlist *sg;
void *icvdata; void *icvdata;
edesc = container_of(desc, struct talitos_edesc, desc);
ipsec_esp_unmap(dev, edesc, areq); ipsec_esp_unmap(dev, edesc, areq);
/* copy the generated ICV to dst */ /* copy the generated ICV to dst */
...@@ -819,17 +866,18 @@ static void ipsec_esp_encrypt_done(struct device *dev, ...@@ -819,17 +866,18 @@ static void ipsec_esp_encrypt_done(struct device *dev,
} }
static void ipsec_esp_decrypt_swauth_done(struct device *dev, static void ipsec_esp_decrypt_swauth_done(struct device *dev,
struct talitos_desc *desc, void *context, struct talitos_desc *desc,
int err) void *context, int err)
{ {
struct aead_request *req = context; struct aead_request *req = context;
struct ipsec_esp_edesc *edesc =
container_of(desc, struct ipsec_esp_edesc, desc);
struct crypto_aead *authenc = crypto_aead_reqtfm(req); struct crypto_aead *authenc = crypto_aead_reqtfm(req);
struct talitos_ctx *ctx = crypto_aead_ctx(authenc); struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
struct talitos_edesc *edesc;
struct scatterlist *sg; struct scatterlist *sg;
void *icvdata; void *icvdata;
edesc = container_of(desc, struct talitos_edesc, desc);
ipsec_esp_unmap(dev, edesc, req); ipsec_esp_unmap(dev, edesc, req);
if (!err) { if (!err) {
...@@ -851,20 +899,20 @@ static void ipsec_esp_decrypt_swauth_done(struct device *dev, ...@@ -851,20 +899,20 @@ static void ipsec_esp_decrypt_swauth_done(struct device *dev,
} }
static void ipsec_esp_decrypt_hwauth_done(struct device *dev, static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
struct talitos_desc *desc, void *context, struct talitos_desc *desc,
int err) void *context, int err)
{ {
struct aead_request *req = context; struct aead_request *req = context;
struct ipsec_esp_edesc *edesc = struct talitos_edesc *edesc;
container_of(desc, struct ipsec_esp_edesc, desc);
edesc = container_of(desc, struct talitos_edesc, desc);
ipsec_esp_unmap(dev, edesc, req); ipsec_esp_unmap(dev, edesc, req);
/* check ICV auth status */ /* check ICV auth status */
if (!err) if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
if ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) != DESC_HDR_LO_ICCR1_PASS))
DESC_HDR_LO_ICCR1_PASS) err = -EBADMSG;
err = -EBADMSG;
kfree(edesc); kfree(edesc);
...@@ -886,7 +934,7 @@ static int sg_to_link_tbl(struct scatterlist *sg, int sg_count, ...@@ -886,7 +934,7 @@ static int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
link_tbl_ptr->j_extent = 0; link_tbl_ptr->j_extent = 0;
link_tbl_ptr++; link_tbl_ptr++;
cryptlen -= sg_dma_len(sg); cryptlen -= sg_dma_len(sg);
sg = sg_next(sg); sg = scatterwalk_sg_next(sg);
} }
/* adjust (decrease) last one (or two) entry's len to cryptlen */ /* adjust (decrease) last one (or two) entry's len to cryptlen */
...@@ -910,7 +958,7 @@ static int sg_to_link_tbl(struct scatterlist *sg, int sg_count, ...@@ -910,7 +958,7 @@ static int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
/* /*
* fill in and submit ipsec_esp descriptor * fill in and submit ipsec_esp descriptor
*/ */
static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq, static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
u8 *giv, u64 seq, u8 *giv, u64 seq,
void (*callback) (struct device *dev, void (*callback) (struct device *dev,
struct talitos_desc *desc, struct talitos_desc *desc,
...@@ -952,32 +1000,31 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq, ...@@ -952,32 +1000,31 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq,
desc->ptr[4].len = cpu_to_be16(cryptlen); desc->ptr[4].len = cpu_to_be16(cryptlen);
desc->ptr[4].j_extent = authsize; desc->ptr[4].j_extent = authsize;
if (areq->src == areq->dst) sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ? : 1,
sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ? : 1, (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
DMA_BIDIRECTIONAL); : DMA_TO_DEVICE,
else edesc->src_is_chained);
sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ? : 1,
DMA_TO_DEVICE);
if (sg_count == 1) { if (sg_count == 1) {
desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->src)); desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->src));
} else { } else {
sg_link_tbl_len = cryptlen; sg_link_tbl_len = cryptlen;
if ((edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV) && if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
(edesc->desc.hdr & DESC_HDR_MODE0_ENCRYPT) == 0) {
sg_link_tbl_len = cryptlen + authsize; sg_link_tbl_len = cryptlen + authsize;
}
sg_count = sg_to_link_tbl(areq->src, sg_count, sg_link_tbl_len, sg_count = sg_to_link_tbl(areq->src, sg_count, sg_link_tbl_len,
&edesc->link_tbl[0]); &edesc->link_tbl[0]);
if (sg_count > 1) { if (sg_count > 1) {
desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP; desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
desc->ptr[4].ptr = cpu_to_be32(edesc->dma_link_tbl); desc->ptr[4].ptr = cpu_to_be32(edesc->dma_link_tbl);
dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl, dma_sync_single_for_device(dev, edesc->dma_link_tbl,
edesc->dma_len, DMA_BIDIRECTIONAL); edesc->dma_len,
DMA_BIDIRECTIONAL);
} else { } else {
/* Only one segment now, so no link tbl needed */ /* Only one segment now, so no link tbl needed */
desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->src)); desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->
src));
} }
} }
...@@ -985,10 +1032,11 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq, ...@@ -985,10 +1032,11 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq,
desc->ptr[5].len = cpu_to_be16(cryptlen); desc->ptr[5].len = cpu_to_be16(cryptlen);
desc->ptr[5].j_extent = authsize; desc->ptr[5].j_extent = authsize;
if (areq->src != areq->dst) { if (areq->src != areq->dst)
sg_count = dma_map_sg(dev, areq->dst, edesc->dst_nents ? : 1, sg_count = talitos_map_sg(dev, areq->dst,
DMA_FROM_DEVICE); edesc->dst_nents ? : 1,
} DMA_FROM_DEVICE,
edesc->dst_is_chained);
if (sg_count == 1) { if (sg_count == 1) {
desc->ptr[5].ptr = cpu_to_be32(sg_dma_address(areq->dst)); desc->ptr[5].ptr = cpu_to_be32(sg_dma_address(areq->dst));
...@@ -1033,49 +1081,55 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq, ...@@ -1033,49 +1081,55 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq,
return ret; return ret;
} }
/* /*
* derive number of elements in scatterlist * derive number of elements in scatterlist
*/ */
static int sg_count(struct scatterlist *sg_list, int nbytes) static int sg_count(struct scatterlist *sg_list, int nbytes, int *chained)
{ {
struct scatterlist *sg = sg_list; struct scatterlist *sg = sg_list;
int sg_nents = 0; int sg_nents = 0;
while (nbytes) { *chained = 0;
while (nbytes > 0) {
sg_nents++; sg_nents++;
nbytes -= sg->length; nbytes -= sg->length;
sg = sg_next(sg); if (!sg_is_last(sg) && (sg + 1)->length == 0)
*chained = 1;
sg = scatterwalk_sg_next(sg);
} }
return sg_nents; return sg_nents;
} }
/* /*
* allocate and map the ipsec_esp extended descriptor * allocate and map the extended descriptor
*/ */
static struct ipsec_esp_edesc *ipsec_esp_edesc_alloc(struct aead_request *areq, static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
int icv_stashing) struct scatterlist *src,
struct scatterlist *dst,
unsigned int cryptlen,
unsigned int authsize,
int icv_stashing,
u32 cryptoflags)
{ {
struct crypto_aead *authenc = crypto_aead_reqtfm(areq); struct talitos_edesc *edesc;
struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
struct ipsec_esp_edesc *edesc;
int src_nents, dst_nents, alloc_len, dma_len; int src_nents, dst_nents, alloc_len, dma_len;
gfp_t flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : int src_chained, dst_chained = 0;
gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
GFP_ATOMIC; GFP_ATOMIC;
if (areq->cryptlen + ctx->authsize > TALITOS_MAX_DATA_LEN) { if (cryptlen + authsize > TALITOS_MAX_DATA_LEN) {
dev_err(ctx->dev, "cryptlen exceeds h/w max limit\n"); dev_err(dev, "length exceeds h/w max limit\n");
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
src_nents = sg_count(areq->src, areq->cryptlen + ctx->authsize); src_nents = sg_count(src, cryptlen + authsize, &src_chained);
src_nents = (src_nents == 1) ? 0 : src_nents; src_nents = (src_nents == 1) ? 0 : src_nents;
if (areq->dst == areq->src) { if (dst == src) {
dst_nents = src_nents; dst_nents = src_nents;
} else { } else {
dst_nents = sg_count(areq->dst, areq->cryptlen + ctx->authsize); dst_nents = sg_count(dst, cryptlen + authsize, &dst_chained);
dst_nents = (dst_nents == 1) ? 0 : dst_nents; dst_nents = (dst_nents == 1) ? 0 : dst_nents;
} }
...@@ -1084,39 +1138,52 @@ static struct ipsec_esp_edesc *ipsec_esp_edesc_alloc(struct aead_request *areq, ...@@ -1084,39 +1138,52 @@ static struct ipsec_esp_edesc *ipsec_esp_edesc_alloc(struct aead_request *areq,
* allowing for two separate entries for ICV and generated ICV (+ 2), * allowing for two separate entries for ICV and generated ICV (+ 2),
* and the ICV data itself * and the ICV data itself
*/ */
alloc_len = sizeof(struct ipsec_esp_edesc); alloc_len = sizeof(struct talitos_edesc);
if (src_nents || dst_nents) { if (src_nents || dst_nents) {
dma_len = (src_nents + dst_nents + 2) * dma_len = (src_nents + dst_nents + 2) *
sizeof(struct talitos_ptr) + ctx->authsize; sizeof(struct talitos_ptr) + authsize;
alloc_len += dma_len; alloc_len += dma_len;
} else { } else {
dma_len = 0; dma_len = 0;
alloc_len += icv_stashing ? ctx->authsize : 0; alloc_len += icv_stashing ? authsize : 0;
} }
edesc = kmalloc(alloc_len, GFP_DMA | flags); edesc = kmalloc(alloc_len, GFP_DMA | flags);
if (!edesc) { if (!edesc) {
dev_err(ctx->dev, "could not allocate edescriptor\n"); dev_err(dev, "could not allocate edescriptor\n");
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
edesc->src_nents = src_nents; edesc->src_nents = src_nents;
edesc->dst_nents = dst_nents; edesc->dst_nents = dst_nents;
edesc->src_is_chained = src_chained;
edesc->dst_is_chained = dst_chained;
edesc->dma_len = dma_len; edesc->dma_len = dma_len;
edesc->dma_link_tbl = dma_map_single(ctx->dev, &edesc->link_tbl[0], edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
edesc->dma_len, DMA_BIDIRECTIONAL); edesc->dma_len, DMA_BIDIRECTIONAL);
return edesc; return edesc;
} }
static int aead_authenc_encrypt(struct aead_request *req) static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq,
int icv_stashing)
{
struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
areq->cryptlen, ctx->authsize, icv_stashing,
areq->base.flags);
}
static int aead_encrypt(struct aead_request *req)
{ {
struct crypto_aead *authenc = crypto_aead_reqtfm(req); struct crypto_aead *authenc = crypto_aead_reqtfm(req);
struct talitos_ctx *ctx = crypto_aead_ctx(authenc); struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
struct ipsec_esp_edesc *edesc; struct talitos_edesc *edesc;
/* allocate extended descriptor */ /* allocate extended descriptor */
edesc = ipsec_esp_edesc_alloc(req, 0); edesc = aead_edesc_alloc(req, 0);
if (IS_ERR(edesc)) if (IS_ERR(edesc))
return PTR_ERR(edesc); return PTR_ERR(edesc);
...@@ -1126,70 +1193,67 @@ static int aead_authenc_encrypt(struct aead_request *req) ...@@ -1126,70 +1193,67 @@ static int aead_authenc_encrypt(struct aead_request *req)
return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_encrypt_done); return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_encrypt_done);
} }
static int aead_decrypt(struct aead_request *req)
static int aead_authenc_decrypt(struct aead_request *req)
{ {
struct crypto_aead *authenc = crypto_aead_reqtfm(req); struct crypto_aead *authenc = crypto_aead_reqtfm(req);
struct talitos_ctx *ctx = crypto_aead_ctx(authenc); struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
unsigned int authsize = ctx->authsize; unsigned int authsize = ctx->authsize;
struct talitos_private *priv = dev_get_drvdata(ctx->dev); struct talitos_private *priv = dev_get_drvdata(ctx->dev);
struct ipsec_esp_edesc *edesc; struct talitos_edesc *edesc;
struct scatterlist *sg; struct scatterlist *sg;
void *icvdata; void *icvdata;
req->cryptlen -= authsize; req->cryptlen -= authsize;
/* allocate extended descriptor */ /* allocate extended descriptor */
edesc = ipsec_esp_edesc_alloc(req, 1); edesc = aead_edesc_alloc(req, 1);
if (IS_ERR(edesc)) if (IS_ERR(edesc))
return PTR_ERR(edesc); return PTR_ERR(edesc);
if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) && if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
(((!edesc->src_nents && !edesc->dst_nents) || ((!edesc->src_nents && !edesc->dst_nents) ||
priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT))) { priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
/* decrypt and check the ICV */ /* decrypt and check the ICV */
edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND | edesc->desc.hdr = ctx->desc_hdr_template |
DESC_HDR_DIR_INBOUND |
DESC_HDR_MODE1_MDEU_CICV; DESC_HDR_MODE1_MDEU_CICV;
/* reset integrity check result bits */ /* reset integrity check result bits */
edesc->desc.hdr_lo = 0; edesc->desc.hdr_lo = 0;
return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_decrypt_hwauth_done); return ipsec_esp(edesc, req, NULL, 0,
ipsec_esp_decrypt_hwauth_done);
} else { }
/* Have to check the ICV with software */
edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND; /* Have to check the ICV with software */
edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
/* stash incoming ICV for later cmp with ICV generated by the h/w */ /* stash incoming ICV for later cmp with ICV generated by the h/w */
if (edesc->dma_len) if (edesc->dma_len)
icvdata = &edesc->link_tbl[edesc->src_nents + icvdata = &edesc->link_tbl[edesc->src_nents +
edesc->dst_nents + 2]; edesc->dst_nents + 2];
else else
icvdata = &edesc->link_tbl[0]; icvdata = &edesc->link_tbl[0];
sg = sg_last(req->src, edesc->src_nents ? : 1); sg = sg_last(req->src, edesc->src_nents ? : 1);
memcpy(icvdata, (char *)sg_virt(sg) + sg->length - ctx->authsize, memcpy(icvdata, (char *)sg_virt(sg) + sg->length - ctx->authsize,
ctx->authsize); ctx->authsize);
return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_decrypt_swauth_done); return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_decrypt_swauth_done);
}
} }
static int aead_authenc_givencrypt( static int aead_givencrypt(struct aead_givcrypt_request *req)
struct aead_givcrypt_request *req)
{ {
struct aead_request *areq = &req->areq; struct aead_request *areq = &req->areq;
struct crypto_aead *authenc = crypto_aead_reqtfm(areq); struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
struct talitos_ctx *ctx = crypto_aead_ctx(authenc); struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
struct ipsec_esp_edesc *edesc; struct talitos_edesc *edesc;
/* allocate extended descriptor */ /* allocate extended descriptor */
edesc = ipsec_esp_edesc_alloc(areq, 0); edesc = aead_edesc_alloc(areq, 0);
if (IS_ERR(edesc)) if (IS_ERR(edesc))
return PTR_ERR(edesc); return PTR_ERR(edesc);
...@@ -1204,31 +1268,228 @@ static int aead_authenc_givencrypt( ...@@ -1204,31 +1268,228 @@ static int aead_authenc_givencrypt(
ipsec_esp_encrypt_done); ipsec_esp_encrypt_done);
} }
static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
const u8 *key, unsigned int keylen)
{
struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
struct ablkcipher_alg *alg = crypto_ablkcipher_alg(cipher);
if (keylen > TALITOS_MAX_KEY_SIZE)
goto badkey;
if (keylen < alg->min_keysize || keylen > alg->max_keysize)
goto badkey;
memcpy(&ctx->key, key, keylen);
ctx->keylen = keylen;
return 0;
badkey:
crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
static void common_nonsnoop_unmap(struct device *dev,
struct talitos_edesc *edesc,
struct ablkcipher_request *areq)
{
unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
talitos_sg_unmap(dev, edesc, areq->src, areq->dst);
if (edesc->dma_len)
dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
DMA_BIDIRECTIONAL);
}
static void ablkcipher_done(struct device *dev,
struct talitos_desc *desc, void *context,
int err)
{
struct ablkcipher_request *areq = context;
struct talitos_edesc *edesc;
edesc = container_of(desc, struct talitos_edesc, desc);
common_nonsnoop_unmap(dev, edesc, areq);
kfree(edesc);
areq->base.complete(&areq->base, err);
}
static int common_nonsnoop(struct talitos_edesc *edesc,
struct ablkcipher_request *areq,
u8 *giv,
void (*callback) (struct device *dev,
struct talitos_desc *desc,
void *context, int error))
{
struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
struct device *dev = ctx->dev;
struct talitos_desc *desc = &edesc->desc;
unsigned int cryptlen = areq->nbytes;
unsigned int ivsize;
int sg_count, ret;
/* first DWORD empty */
desc->ptr[0].len = 0;
desc->ptr[0].ptr = 0;
desc->ptr[0].j_extent = 0;
/* cipher iv */
ivsize = crypto_ablkcipher_ivsize(cipher);
map_single_talitos_ptr(dev, &desc->ptr[1], ivsize, giv ?: areq->info, 0,
DMA_TO_DEVICE);
/* cipher key */
map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
(char *)&ctx->key, 0, DMA_TO_DEVICE);
/*
* cipher in
*/
desc->ptr[3].len = cpu_to_be16(cryptlen);
desc->ptr[3].j_extent = 0;
sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ? : 1,
(areq->src == areq->dst) ? DMA_BIDIRECTIONAL
: DMA_TO_DEVICE,
edesc->src_is_chained);
if (sg_count == 1) {
desc->ptr[3].ptr = cpu_to_be32(sg_dma_address(areq->src));
} else {
sg_count = sg_to_link_tbl(areq->src, sg_count, cryptlen,
&edesc->link_tbl[0]);
if (sg_count > 1) {
desc->ptr[3].j_extent |= DESC_PTR_LNKTBL_JUMP;
desc->ptr[3].ptr = cpu_to_be32(edesc->dma_link_tbl);
dma_sync_single_for_device(dev, edesc->dma_link_tbl,
edesc->dma_len,
DMA_BIDIRECTIONAL);
} else {
/* Only one segment now, so no link tbl needed */
desc->ptr[3].ptr = cpu_to_be32(sg_dma_address(areq->
src));
}
}
/* cipher out */
desc->ptr[4].len = cpu_to_be16(cryptlen);
desc->ptr[4].j_extent = 0;
if (areq->src != areq->dst)
sg_count = talitos_map_sg(dev, areq->dst,
edesc->dst_nents ? : 1,
DMA_FROM_DEVICE,
edesc->dst_is_chained);
if (sg_count == 1) {
desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->dst));
} else {
struct talitos_ptr *link_tbl_ptr =
&edesc->link_tbl[edesc->src_nents + 1];
desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
desc->ptr[4].ptr = cpu_to_be32((struct talitos_ptr *)
edesc->dma_link_tbl +
edesc->src_nents + 1);
sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen,
link_tbl_ptr);
dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
edesc->dma_len, DMA_BIDIRECTIONAL);
}
/* iv out */
map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv, 0,
DMA_FROM_DEVICE);
/* last DWORD empty */
desc->ptr[6].len = 0;
desc->ptr[6].ptr = 0;
desc->ptr[6].j_extent = 0;
ret = talitos_submit(dev, desc, callback, areq);
if (ret != -EINPROGRESS) {
common_nonsnoop_unmap(dev, edesc, areq);
kfree(edesc);
}
return ret;
}
static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
areq)
{
struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, areq->nbytes,
0, 0, areq->base.flags);
}
static int ablkcipher_encrypt(struct ablkcipher_request *areq)
{
struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
struct talitos_edesc *edesc;
/* allocate extended descriptor */
edesc = ablkcipher_edesc_alloc(areq);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
/* set encrypt */
edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
return common_nonsnoop(edesc, areq, NULL, ablkcipher_done);
}
static int ablkcipher_decrypt(struct ablkcipher_request *areq)
{
struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
struct talitos_edesc *edesc;
/* allocate extended descriptor */
edesc = ablkcipher_edesc_alloc(areq);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
return common_nonsnoop(edesc, areq, NULL, ablkcipher_done);
}
struct talitos_alg_template { struct talitos_alg_template {
char name[CRYPTO_MAX_ALG_NAME]; struct crypto_alg alg;
char driver_name[CRYPTO_MAX_ALG_NAME];
unsigned int blocksize;
struct aead_alg aead;
struct device *dev;
__be32 desc_hdr_template; __be32 desc_hdr_template;
}; };
static struct talitos_alg_template driver_algs[] = { static struct talitos_alg_template driver_algs[] = {
/* single-pass ipsec_esp descriptor */ /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */
{ {
.name = "authenc(hmac(sha1),cbc(aes))", .alg = {
.driver_name = "authenc-hmac-sha1-cbc-aes-talitos", .cra_name = "authenc(hmac(sha1),cbc(aes))",
.blocksize = AES_BLOCK_SIZE, .cra_driver_name = "authenc-hmac-sha1-cbc-aes-talitos",
.aead = { .cra_blocksize = AES_BLOCK_SIZE,
.setkey = aead_authenc_setkey, .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
.setauthsize = aead_authenc_setauthsize, .cra_type = &crypto_aead_type,
.encrypt = aead_authenc_encrypt, .cra_aead = {
.decrypt = aead_authenc_decrypt, .setkey = aead_setkey,
.givencrypt = aead_authenc_givencrypt, .setauthsize = aead_setauthsize,
.geniv = "<built-in>", .encrypt = aead_encrypt,
.ivsize = AES_BLOCK_SIZE, .decrypt = aead_decrypt,
.maxauthsize = SHA1_DIGEST_SIZE, .givencrypt = aead_givencrypt,
}, .geniv = "<built-in>",
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
}
},
.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
DESC_HDR_SEL0_AESU | DESC_HDR_SEL0_AESU |
DESC_HDR_MODE0_AESU_CBC | DESC_HDR_MODE0_AESU_CBC |
...@@ -1238,19 +1499,23 @@ static struct talitos_alg_template driver_algs[] = { ...@@ -1238,19 +1499,23 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEU_SHA1_HMAC, DESC_HDR_MODE1_MDEU_SHA1_HMAC,
}, },
{ {
.name = "authenc(hmac(sha1),cbc(des3_ede))", .alg = {
.driver_name = "authenc-hmac-sha1-cbc-3des-talitos", .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
.blocksize = DES3_EDE_BLOCK_SIZE, .cra_driver_name = "authenc-hmac-sha1-cbc-3des-talitos",
.aead = { .cra_blocksize = DES3_EDE_BLOCK_SIZE,
.setkey = aead_authenc_setkey, .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
.setauthsize = aead_authenc_setauthsize, .cra_type = &crypto_aead_type,
.encrypt = aead_authenc_encrypt, .cra_aead = {
.decrypt = aead_authenc_decrypt, .setkey = aead_setkey,
.givencrypt = aead_authenc_givencrypt, .setauthsize = aead_setauthsize,
.geniv = "<built-in>", .encrypt = aead_encrypt,
.ivsize = DES3_EDE_BLOCK_SIZE, .decrypt = aead_decrypt,
.maxauthsize = SHA1_DIGEST_SIZE, .givencrypt = aead_givencrypt,
}, .geniv = "<built-in>",
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
}
},
.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
DESC_HDR_SEL0_DEU | DESC_HDR_SEL0_DEU |
DESC_HDR_MODE0_DEU_CBC | DESC_HDR_MODE0_DEU_CBC |
...@@ -1261,19 +1526,23 @@ static struct talitos_alg_template driver_algs[] = { ...@@ -1261,19 +1526,23 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEU_SHA1_HMAC, DESC_HDR_MODE1_MDEU_SHA1_HMAC,
}, },
{ {
.name = "authenc(hmac(sha256),cbc(aes))", .alg = {
.driver_name = "authenc-hmac-sha256-cbc-aes-talitos", .cra_name = "authenc(hmac(sha256),cbc(aes))",
.blocksize = AES_BLOCK_SIZE, .cra_driver_name = "authenc-hmac-sha256-cbc-aes-talitos",
.aead = { .cra_blocksize = AES_BLOCK_SIZE,
.setkey = aead_authenc_setkey, .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
.setauthsize = aead_authenc_setauthsize, .cra_type = &crypto_aead_type,
.encrypt = aead_authenc_encrypt, .cra_aead = {
.decrypt = aead_authenc_decrypt, .setkey = aead_setkey,
.givencrypt = aead_authenc_givencrypt, .setauthsize = aead_setauthsize,
.geniv = "<built-in>", .encrypt = aead_encrypt,
.ivsize = AES_BLOCK_SIZE, .decrypt = aead_decrypt,
.maxauthsize = SHA256_DIGEST_SIZE, .givencrypt = aead_givencrypt,
}, .geniv = "<built-in>",
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
}
},
.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
DESC_HDR_SEL0_AESU | DESC_HDR_SEL0_AESU |
DESC_HDR_MODE0_AESU_CBC | DESC_HDR_MODE0_AESU_CBC |
...@@ -1283,19 +1552,23 @@ static struct talitos_alg_template driver_algs[] = { ...@@ -1283,19 +1552,23 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEU_SHA256_HMAC, DESC_HDR_MODE1_MDEU_SHA256_HMAC,
}, },
{ {
.name = "authenc(hmac(sha256),cbc(des3_ede))", .alg = {
.driver_name = "authenc-hmac-sha256-cbc-3des-talitos", .cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
.blocksize = DES3_EDE_BLOCK_SIZE, .cra_driver_name = "authenc-hmac-sha256-cbc-3des-talitos",
.aead = { .cra_blocksize = DES3_EDE_BLOCK_SIZE,
.setkey = aead_authenc_setkey, .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
.setauthsize = aead_authenc_setauthsize, .cra_type = &crypto_aead_type,
.encrypt = aead_authenc_encrypt, .cra_aead = {
.decrypt = aead_authenc_decrypt, .setkey = aead_setkey,
.givencrypt = aead_authenc_givencrypt, .setauthsize = aead_setauthsize,
.geniv = "<built-in>", .encrypt = aead_encrypt,
.ivsize = DES3_EDE_BLOCK_SIZE, .decrypt = aead_decrypt,
.maxauthsize = SHA256_DIGEST_SIZE, .givencrypt = aead_givencrypt,
}, .geniv = "<built-in>",
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
}
},
.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
DESC_HDR_SEL0_DEU | DESC_HDR_SEL0_DEU |
DESC_HDR_MODE0_DEU_CBC | DESC_HDR_MODE0_DEU_CBC |
...@@ -1306,19 +1579,23 @@ static struct talitos_alg_template driver_algs[] = { ...@@ -1306,19 +1579,23 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEU_SHA256_HMAC, DESC_HDR_MODE1_MDEU_SHA256_HMAC,
}, },
{ {
.name = "authenc(hmac(md5),cbc(aes))", .alg = {
.driver_name = "authenc-hmac-md5-cbc-aes-talitos", .cra_name = "authenc(hmac(md5),cbc(aes))",
.blocksize = AES_BLOCK_SIZE, .cra_driver_name = "authenc-hmac-md5-cbc-aes-talitos",
.aead = { .cra_blocksize = AES_BLOCK_SIZE,
.setkey = aead_authenc_setkey, .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
.setauthsize = aead_authenc_setauthsize, .cra_type = &crypto_aead_type,
.encrypt = aead_authenc_encrypt, .cra_aead = {
.decrypt = aead_authenc_decrypt, .setkey = aead_setkey,
.givencrypt = aead_authenc_givencrypt, .setauthsize = aead_setauthsize,
.geniv = "<built-in>", .encrypt = aead_encrypt,
.ivsize = AES_BLOCK_SIZE, .decrypt = aead_decrypt,
.maxauthsize = MD5_DIGEST_SIZE, .givencrypt = aead_givencrypt,
}, .geniv = "<built-in>",
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
}
},
.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
DESC_HDR_SEL0_AESU | DESC_HDR_SEL0_AESU |
DESC_HDR_MODE0_AESU_CBC | DESC_HDR_MODE0_AESU_CBC |
...@@ -1328,19 +1605,23 @@ static struct talitos_alg_template driver_algs[] = { ...@@ -1328,19 +1605,23 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEU_MD5_HMAC, DESC_HDR_MODE1_MDEU_MD5_HMAC,
}, },
{ {
.name = "authenc(hmac(md5),cbc(des3_ede))", .alg = {
.driver_name = "authenc-hmac-md5-cbc-3des-talitos", .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
.blocksize = DES3_EDE_BLOCK_SIZE, .cra_driver_name = "authenc-hmac-md5-cbc-3des-talitos",
.aead = { .cra_blocksize = DES3_EDE_BLOCK_SIZE,
.setkey = aead_authenc_setkey, .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
.setauthsize = aead_authenc_setauthsize, .cra_type = &crypto_aead_type,
.encrypt = aead_authenc_encrypt, .cra_aead = {
.decrypt = aead_authenc_decrypt, .setkey = aead_setkey,
.givencrypt = aead_authenc_givencrypt, .setauthsize = aead_setauthsize,
.geniv = "<built-in>", .encrypt = aead_encrypt,
.ivsize = DES3_EDE_BLOCK_SIZE, .decrypt = aead_decrypt,
.maxauthsize = MD5_DIGEST_SIZE, .givencrypt = aead_givencrypt,
}, .geniv = "<built-in>",
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
}
},
.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
DESC_HDR_SEL0_DEU | DESC_HDR_SEL0_DEU |
DESC_HDR_MODE0_DEU_CBC | DESC_HDR_MODE0_DEU_CBC |
...@@ -1349,6 +1630,52 @@ static struct talitos_alg_template driver_algs[] = { ...@@ -1349,6 +1630,52 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEU_INIT | DESC_HDR_MODE1_MDEU_INIT |
DESC_HDR_MODE1_MDEU_PAD | DESC_HDR_MODE1_MDEU_PAD |
DESC_HDR_MODE1_MDEU_MD5_HMAC, DESC_HDR_MODE1_MDEU_MD5_HMAC,
},
/* ABLKCIPHER algorithms. */
{
.alg = {
.cra_name = "cbc(aes)",
.cra_driver_name = "cbc-aes-talitos",
.cra_blocksize = AES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
CRYPTO_ALG_ASYNC,
.cra_type = &crypto_ablkcipher_type,
.cra_ablkcipher = {
.setkey = ablkcipher_setkey,
.encrypt = ablkcipher_encrypt,
.decrypt = ablkcipher_decrypt,
.geniv = "eseqiv",
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
DESC_HDR_SEL0_AESU |
DESC_HDR_MODE0_AESU_CBC,
},
{
.alg = {
.cra_name = "cbc(des3_ede)",
.cra_driver_name = "cbc-3des-talitos",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
CRYPTO_ALG_ASYNC,
.cra_type = &crypto_ablkcipher_type,
.cra_ablkcipher = {
.setkey = ablkcipher_setkey,
.encrypt = ablkcipher_encrypt,
.decrypt = ablkcipher_decrypt,
.geniv = "eseqiv",
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.ivsize = DES3_EDE_BLOCK_SIZE,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
DESC_HDR_SEL0_DEU |
DESC_HDR_MODE0_DEU_CBC |
DESC_HDR_MODE0_DEU_3DES,
} }
}; };
...@@ -1362,12 +1689,14 @@ struct talitos_crypto_alg { ...@@ -1362,12 +1689,14 @@ struct talitos_crypto_alg {
static int talitos_cra_init(struct crypto_tfm *tfm) static int talitos_cra_init(struct crypto_tfm *tfm)
{ {
struct crypto_alg *alg = tfm->__crt_alg; struct crypto_alg *alg = tfm->__crt_alg;
struct talitos_crypto_alg *talitos_alg = struct talitos_crypto_alg *talitos_alg;
container_of(alg, struct talitos_crypto_alg, crypto_alg);
struct talitos_ctx *ctx = crypto_tfm_ctx(tfm); struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
talitos_alg = container_of(alg, struct talitos_crypto_alg, crypto_alg);
/* update context with ptr to dev */ /* update context with ptr to dev */
ctx->dev = talitos_alg->dev; ctx->dev = talitos_alg->dev;
/* copy descriptor header template value */ /* copy descriptor header template value */
ctx->desc_hdr_template = talitos_alg->desc_hdr_template; ctx->desc_hdr_template = talitos_alg->desc_hdr_template;
...@@ -1453,19 +1782,13 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev, ...@@ -1453,19 +1782,13 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
alg = &t_alg->crypto_alg; alg = &t_alg->crypto_alg;
*alg = template->alg;
snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
template->driver_name);
alg->cra_module = THIS_MODULE; alg->cra_module = THIS_MODULE;
alg->cra_init = talitos_cra_init; alg->cra_init = talitos_cra_init;
alg->cra_priority = TALITOS_CRA_PRIORITY; alg->cra_priority = TALITOS_CRA_PRIORITY;
alg->cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC;
alg->cra_blocksize = template->blocksize;
alg->cra_alignmask = 0; alg->cra_alignmask = 0;
alg->cra_type = &crypto_aead_type;
alg->cra_ctxsize = sizeof(struct talitos_ctx); alg->cra_ctxsize = sizeof(struct talitos_ctx);
alg->cra_u.aead = template->aead;
t_alg->desc_hdr_template = template->desc_hdr_template; t_alg->desc_hdr_template = template->desc_hdr_template;
t_alg->dev = dev; t_alg->dev = dev;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment