Commit 32f44d62 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (35 commits)
  hwrng: timeriomem - Fix potential oops (request_mem_region/__devinit)
  crypto: api - Use formatting of module name
  crypto: testmgr - Allow hash test vectors longer than a page
  crypto: testmgr - Check all test vector lengths
  crypto: hifn_795x - fix __dev{init,exit} markings
  crypto: tcrypt - Do not exit on success in fips mode
  crypto: compress - Return produced bytes in crypto_{,de}compress_{update,final}
  hwrng: via_rng - Support VIA Nano hardware RNG on X86_64 builds
  hwrng: via_rng - Support VIA Nano hardware RNG
  hwrng: via_rng - The VIA Hardware RNG driver is for the CPU, not Chipset
  crypto: testmgr - Skip algs not flagged fips_allowed in fips mode
  crypto: testmgr - Mark algs allowed in fips mode
  crypto: testmgr - Add ctr(aes) test vectors
  crypto: testmgr - Dynamically allocate xbuf and axbuf
  crypto: testmgr - Print self-test pass notices in fips mode
  crypto: testmgr - Catch base cipher self-test failures in fips mode
  crypto: testmgr - Add ansi_cprng test vectors
  crypto: testmgr - Add infrastructure for ansi_cprng self-tests
  crypto: testmgr - Add self-tests for rfc4309(ccm(aes))
  crypto: testmgr - Handle AEAD test vectors expected to fail verification
  ...
parents f3ad1165 08ced854
...@@ -2,6 +2,8 @@ ...@@ -2,6 +2,8 @@
# Arch-specific CryptoAPI modules. # Arch-specific CryptoAPI modules.
# #
obj-$(CONFIG_CRYPTO_FPU) += fpu.o
obj-$(CONFIG_CRYPTO_AES_586) += aes-i586.o obj-$(CONFIG_CRYPTO_AES_586) += aes-i586.o
obj-$(CONFIG_CRYPTO_TWOFISH_586) += twofish-i586.o obj-$(CONFIG_CRYPTO_TWOFISH_586) += twofish-i586.o
obj-$(CONFIG_CRYPTO_SALSA20_586) += salsa20-i586.o obj-$(CONFIG_CRYPTO_SALSA20_586) += salsa20-i586.o
......
...@@ -21,6 +21,22 @@ ...@@ -21,6 +21,22 @@
#include <asm/i387.h> #include <asm/i387.h>
#include <asm/aes.h> #include <asm/aes.h>
#if defined(CONFIG_CRYPTO_CTR) || defined(CONFIG_CRYPTO_CTR_MODULE)
#define HAS_CTR
#endif
#if defined(CONFIG_CRYPTO_LRW) || defined(CONFIG_CRYPTO_LRW_MODULE)
#define HAS_LRW
#endif
#if defined(CONFIG_CRYPTO_PCBC) || defined(CONFIG_CRYPTO_PCBC_MODULE)
#define HAS_PCBC
#endif
#if defined(CONFIG_CRYPTO_XTS) || defined(CONFIG_CRYPTO_XTS_MODULE)
#define HAS_XTS
#endif
struct async_aes_ctx { struct async_aes_ctx {
struct cryptd_ablkcipher *cryptd_tfm; struct cryptd_ablkcipher *cryptd_tfm;
}; };
...@@ -137,6 +153,41 @@ static struct crypto_alg aesni_alg = { ...@@ -137,6 +153,41 @@ static struct crypto_alg aesni_alg = {
} }
}; };
static void __aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
aesni_enc(ctx, dst, src);
}
static void __aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
aesni_dec(ctx, dst, src);
}
static struct crypto_alg __aesni_alg = {
.cra_name = "__aes-aesni",
.cra_driver_name = "__driver-aes-aesni",
.cra_priority = 0,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(__aesni_alg.cra_list),
.cra_u = {
.cipher = {
.cia_min_keysize = AES_MIN_KEY_SIZE,
.cia_max_keysize = AES_MAX_KEY_SIZE,
.cia_setkey = aes_set_key,
.cia_encrypt = __aes_encrypt,
.cia_decrypt = __aes_decrypt
}
}
};
static int ecb_encrypt(struct blkcipher_desc *desc, static int ecb_encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src, struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes) unsigned int nbytes)
...@@ -277,8 +328,16 @@ static int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key, ...@@ -277,8 +328,16 @@ static int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key,
unsigned int key_len) unsigned int key_len)
{ {
struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
struct crypto_ablkcipher *child = &ctx->cryptd_tfm->base;
int err;
return crypto_ablkcipher_setkey(&ctx->cryptd_tfm->base, key, key_len); crypto_ablkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
crypto_ablkcipher_set_flags(child, crypto_ablkcipher_get_flags(tfm)
& CRYPTO_TFM_REQ_MASK);
err = crypto_ablkcipher_setkey(child, key, key_len);
crypto_ablkcipher_set_flags(tfm, crypto_ablkcipher_get_flags(child)
& CRYPTO_TFM_RES_MASK);
return err;
} }
static int ablk_encrypt(struct ablkcipher_request *req) static int ablk_encrypt(struct ablkcipher_request *req)
...@@ -411,6 +470,163 @@ static struct crypto_alg ablk_cbc_alg = { ...@@ -411,6 +470,163 @@ static struct crypto_alg ablk_cbc_alg = {
}, },
}; };
#ifdef HAS_CTR
static int ablk_ctr_init(struct crypto_tfm *tfm)
{
struct cryptd_ablkcipher *cryptd_tfm;
cryptd_tfm = cryptd_alloc_ablkcipher("fpu(ctr(__driver-aes-aesni))",
0, 0);
if (IS_ERR(cryptd_tfm))
return PTR_ERR(cryptd_tfm);
ablk_init_common(tfm, cryptd_tfm);
return 0;
}
static struct crypto_alg ablk_ctr_alg = {
.cra_name = "ctr(aes)",
.cra_driver_name = "ctr-aes-aesni",
.cra_priority = 400,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct async_aes_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(ablk_ctr_alg.cra_list),
.cra_init = ablk_ctr_init,
.cra_exit = ablk_exit,
.cra_u = {
.ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = ablk_set_key,
.encrypt = ablk_encrypt,
.decrypt = ablk_decrypt,
.geniv = "chainiv",
},
},
};
#endif
#ifdef HAS_LRW
static int ablk_lrw_init(struct crypto_tfm *tfm)
{
struct cryptd_ablkcipher *cryptd_tfm;
cryptd_tfm = cryptd_alloc_ablkcipher("fpu(lrw(__driver-aes-aesni))",
0, 0);
if (IS_ERR(cryptd_tfm))
return PTR_ERR(cryptd_tfm);
ablk_init_common(tfm, cryptd_tfm);
return 0;
}
static struct crypto_alg ablk_lrw_alg = {
.cra_name = "lrw(aes)",
.cra_driver_name = "lrw-aes-aesni",
.cra_priority = 400,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct async_aes_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(ablk_lrw_alg.cra_list),
.cra_init = ablk_lrw_init,
.cra_exit = ablk_exit,
.cra_u = {
.ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE + AES_BLOCK_SIZE,
.max_keysize = AES_MAX_KEY_SIZE + AES_BLOCK_SIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = ablk_set_key,
.encrypt = ablk_encrypt,
.decrypt = ablk_decrypt,
},
},
};
#endif
#ifdef HAS_PCBC
static int ablk_pcbc_init(struct crypto_tfm *tfm)
{
struct cryptd_ablkcipher *cryptd_tfm;
cryptd_tfm = cryptd_alloc_ablkcipher("fpu(pcbc(__driver-aes-aesni))",
0, 0);
if (IS_ERR(cryptd_tfm))
return PTR_ERR(cryptd_tfm);
ablk_init_common(tfm, cryptd_tfm);
return 0;
}
static struct crypto_alg ablk_pcbc_alg = {
.cra_name = "pcbc(aes)",
.cra_driver_name = "pcbc-aes-aesni",
.cra_priority = 400,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct async_aes_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(ablk_pcbc_alg.cra_list),
.cra_init = ablk_pcbc_init,
.cra_exit = ablk_exit,
.cra_u = {
.ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = ablk_set_key,
.encrypt = ablk_encrypt,
.decrypt = ablk_decrypt,
},
},
};
#endif
#ifdef HAS_XTS
static int ablk_xts_init(struct crypto_tfm *tfm)
{
struct cryptd_ablkcipher *cryptd_tfm;
cryptd_tfm = cryptd_alloc_ablkcipher("fpu(xts(__driver-aes-aesni))",
0, 0);
if (IS_ERR(cryptd_tfm))
return PTR_ERR(cryptd_tfm);
ablk_init_common(tfm, cryptd_tfm);
return 0;
}
static struct crypto_alg ablk_xts_alg = {
.cra_name = "xts(aes)",
.cra_driver_name = "xts-aes-aesni",
.cra_priority = 400,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct async_aes_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(ablk_xts_alg.cra_list),
.cra_init = ablk_xts_init,
.cra_exit = ablk_exit,
.cra_u = {
.ablkcipher = {
.min_keysize = 2 * AES_MIN_KEY_SIZE,
.max_keysize = 2 * AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = ablk_set_key,
.encrypt = ablk_encrypt,
.decrypt = ablk_decrypt,
},
},
};
#endif
static int __init aesni_init(void) static int __init aesni_init(void)
{ {
int err; int err;
...@@ -421,6 +637,8 @@ static int __init aesni_init(void) ...@@ -421,6 +637,8 @@ static int __init aesni_init(void)
} }
if ((err = crypto_register_alg(&aesni_alg))) if ((err = crypto_register_alg(&aesni_alg)))
goto aes_err; goto aes_err;
if ((err = crypto_register_alg(&__aesni_alg)))
goto __aes_err;
if ((err = crypto_register_alg(&blk_ecb_alg))) if ((err = crypto_register_alg(&blk_ecb_alg)))
goto blk_ecb_err; goto blk_ecb_err;
if ((err = crypto_register_alg(&blk_cbc_alg))) if ((err = crypto_register_alg(&blk_cbc_alg)))
...@@ -429,9 +647,41 @@ static int __init aesni_init(void) ...@@ -429,9 +647,41 @@ static int __init aesni_init(void)
goto ablk_ecb_err; goto ablk_ecb_err;
if ((err = crypto_register_alg(&ablk_cbc_alg))) if ((err = crypto_register_alg(&ablk_cbc_alg)))
goto ablk_cbc_err; goto ablk_cbc_err;
#ifdef HAS_CTR
if ((err = crypto_register_alg(&ablk_ctr_alg)))
goto ablk_ctr_err;
#endif
#ifdef HAS_LRW
if ((err = crypto_register_alg(&ablk_lrw_alg)))
goto ablk_lrw_err;
#endif
#ifdef HAS_PCBC
if ((err = crypto_register_alg(&ablk_pcbc_alg)))
goto ablk_pcbc_err;
#endif
#ifdef HAS_XTS
if ((err = crypto_register_alg(&ablk_xts_alg)))
goto ablk_xts_err;
#endif
return err; return err;
#ifdef HAS_XTS
ablk_xts_err:
#endif
#ifdef HAS_PCBC
crypto_unregister_alg(&ablk_pcbc_alg);
ablk_pcbc_err:
#endif
#ifdef HAS_LRW
crypto_unregister_alg(&ablk_lrw_alg);
ablk_lrw_err:
#endif
#ifdef HAS_CTR
crypto_unregister_alg(&ablk_ctr_alg);
ablk_ctr_err:
#endif
crypto_unregister_alg(&ablk_cbc_alg);
ablk_cbc_err: ablk_cbc_err:
crypto_unregister_alg(&ablk_ecb_alg); crypto_unregister_alg(&ablk_ecb_alg);
ablk_ecb_err: ablk_ecb_err:
...@@ -439,6 +689,8 @@ static int __init aesni_init(void) ...@@ -439,6 +689,8 @@ static int __init aesni_init(void)
blk_cbc_err: blk_cbc_err:
crypto_unregister_alg(&blk_ecb_alg); crypto_unregister_alg(&blk_ecb_alg);
blk_ecb_err: blk_ecb_err:
crypto_unregister_alg(&__aesni_alg);
__aes_err:
crypto_unregister_alg(&aesni_alg); crypto_unregister_alg(&aesni_alg);
aes_err: aes_err:
return err; return err;
...@@ -446,10 +698,23 @@ static int __init aesni_init(void) ...@@ -446,10 +698,23 @@ static int __init aesni_init(void)
static void __exit aesni_exit(void) static void __exit aesni_exit(void)
{ {
#ifdef HAS_XTS
crypto_unregister_alg(&ablk_xts_alg);
#endif
#ifdef HAS_PCBC
crypto_unregister_alg(&ablk_pcbc_alg);
#endif
#ifdef HAS_LRW
crypto_unregister_alg(&ablk_lrw_alg);
#endif
#ifdef HAS_CTR
crypto_unregister_alg(&ablk_ctr_alg);
#endif
crypto_unregister_alg(&ablk_cbc_alg); crypto_unregister_alg(&ablk_cbc_alg);
crypto_unregister_alg(&ablk_ecb_alg); crypto_unregister_alg(&ablk_ecb_alg);
crypto_unregister_alg(&blk_cbc_alg); crypto_unregister_alg(&blk_cbc_alg);
crypto_unregister_alg(&blk_ecb_alg); crypto_unregister_alg(&blk_ecb_alg);
crypto_unregister_alg(&__aesni_alg);
crypto_unregister_alg(&aesni_alg); crypto_unregister_alg(&aesni_alg);
} }
......
/*
* FPU: Wrapper for blkcipher touching fpu
*
* Copyright (c) Intel Corp.
* Author: Huang Ying <ying.huang@intel.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#include <crypto/algapi.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <asm/i387.h>
struct crypto_fpu_ctx {
struct crypto_blkcipher *child;
};
static int crypto_fpu_setkey(struct crypto_tfm *parent, const u8 *key,
unsigned int keylen)
{
struct crypto_fpu_ctx *ctx = crypto_tfm_ctx(parent);
struct crypto_blkcipher *child = ctx->child;
int err;
crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
crypto_blkcipher_set_flags(child, crypto_tfm_get_flags(parent) &
CRYPTO_TFM_REQ_MASK);
err = crypto_blkcipher_setkey(child, key, keylen);
crypto_tfm_set_flags(parent, crypto_blkcipher_get_flags(child) &
CRYPTO_TFM_RES_MASK);
return err;
}
static int crypto_fpu_encrypt(struct blkcipher_desc *desc_in,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
int err;
struct crypto_fpu_ctx *ctx = crypto_blkcipher_ctx(desc_in->tfm);
struct crypto_blkcipher *child = ctx->child;
struct blkcipher_desc desc = {
.tfm = child,
.info = desc_in->info,
.flags = desc_in->flags,
};
kernel_fpu_begin();
err = crypto_blkcipher_crt(desc.tfm)->encrypt(&desc, dst, src, nbytes);
kernel_fpu_end();
return err;
}
static int crypto_fpu_decrypt(struct blkcipher_desc *desc_in,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
int err;
struct crypto_fpu_ctx *ctx = crypto_blkcipher_ctx(desc_in->tfm);
struct crypto_blkcipher *child = ctx->child;
struct blkcipher_desc desc = {
.tfm = child,
.info = desc_in->info,
.flags = desc_in->flags,
};
kernel_fpu_begin();
err = crypto_blkcipher_crt(desc.tfm)->decrypt(&desc, dst, src, nbytes);
kernel_fpu_end();
return err;
}
static int crypto_fpu_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
struct crypto_spawn *spawn = crypto_instance_ctx(inst);
struct crypto_fpu_ctx *ctx = crypto_tfm_ctx(tfm);
struct crypto_blkcipher *cipher;
cipher = crypto_spawn_blkcipher(spawn);
if (IS_ERR(cipher))
return PTR_ERR(cipher);
ctx->child = cipher;
return 0;
}
static void crypto_fpu_exit_tfm(struct crypto_tfm *tfm)
{
struct crypto_fpu_ctx *ctx = crypto_tfm_ctx(tfm);
crypto_free_blkcipher(ctx->child);
}
static struct crypto_instance *crypto_fpu_alloc(struct rtattr **tb)
{
struct crypto_instance *inst;
struct crypto_alg *alg;
int err;
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER);
if (err)
return ERR_PTR(err);
alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_BLKCIPHER,
CRYPTO_ALG_TYPE_MASK);
if (IS_ERR(alg))
return ERR_CAST(alg);
inst = crypto_alloc_instance("fpu", alg);
if (IS_ERR(inst))
goto out_put_alg;
inst->alg.cra_flags = alg->cra_flags;
inst->alg.cra_priority = alg->cra_priority;
inst->alg.cra_blocksize = alg->cra_blocksize;
inst->alg.cra_alignmask = alg->cra_alignmask;
inst->alg.cra_type = alg->cra_type;
inst->alg.cra_blkcipher.ivsize = alg->cra_blkcipher.ivsize;
inst->alg.cra_blkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
inst->alg.cra_blkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
inst->alg.cra_ctxsize = sizeof(struct crypto_fpu_ctx);
inst->alg.cra_init = crypto_fpu_init_tfm;
inst->alg.cra_exit = crypto_fpu_exit_tfm;
inst->alg.cra_blkcipher.setkey = crypto_fpu_setkey;
inst->alg.cra_blkcipher.encrypt = crypto_fpu_encrypt;
inst->alg.cra_blkcipher.decrypt = crypto_fpu_decrypt;
out_put_alg:
crypto_mod_put(alg);
return inst;
}
static void crypto_fpu_free(struct crypto_instance *inst)
{
crypto_drop_spawn(crypto_instance_ctx(inst));
kfree(inst);
}
static struct crypto_template crypto_fpu_tmpl = {
.name = "fpu",
.alloc = crypto_fpu_alloc,
.free = crypto_fpu_free,
.module = THIS_MODULE,
};
static int __init crypto_fpu_module_init(void)
{
return crypto_register_template(&crypto_fpu_tmpl);
}
static void __exit crypto_fpu_module_exit(void)
{
crypto_unregister_template(&crypto_fpu_tmpl);
}
module_init(crypto_fpu_module_init);
module_exit(crypto_fpu_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("FPU block cipher wrapper");
...@@ -241,6 +241,11 @@ config CRYPTO_XTS ...@@ -241,6 +241,11 @@ config CRYPTO_XTS
key size 256, 384 or 512 bits. This implementation currently key size 256, 384 or 512 bits. This implementation currently
can't handle a sectorsize which is not a multiple of 16 bytes. can't handle a sectorsize which is not a multiple of 16 bytes.
config CRYPTO_FPU
tristate
select CRYPTO_BLKCIPHER
select CRYPTO_MANAGER
comment "Hash modes" comment "Hash modes"
config CRYPTO_HMAC config CRYPTO_HMAC
...@@ -486,6 +491,7 @@ config CRYPTO_AES_NI_INTEL ...@@ -486,6 +491,7 @@ config CRYPTO_AES_NI_INTEL
select CRYPTO_AES_X86_64 select CRYPTO_AES_X86_64
select CRYPTO_CRYPTD select CRYPTO_CRYPTD
select CRYPTO_ALGAPI select CRYPTO_ALGAPI
select CRYPTO_FPU
help help
Use Intel AES-NI instructions for AES algorithm. Use Intel AES-NI instructions for AES algorithm.
...@@ -505,6 +511,10 @@ config CRYPTO_AES_NI_INTEL ...@@ -505,6 +511,10 @@ config CRYPTO_AES_NI_INTEL
See <http://csrc.nist.gov/encryption/aes/> for more information. See <http://csrc.nist.gov/encryption/aes/> for more information.
In addition to AES cipher algorithm support, the
acceleration for some popular block cipher mode is supported
too, including ECB, CBC, CTR, LRW, PCBC, XTS.
config CRYPTO_ANUBIS config CRYPTO_ANUBIS
tristate "Anubis cipher algorithm" tristate "Anubis cipher algorithm"
select CRYPTO_ALGAPI select CRYPTO_ALGAPI
......
...@@ -280,29 +280,13 @@ static struct notifier_block cryptomgr_notifier = { ...@@ -280,29 +280,13 @@ static struct notifier_block cryptomgr_notifier = {
static int __init cryptomgr_init(void) static int __init cryptomgr_init(void)
{ {
int err; return crypto_register_notifier(&cryptomgr_notifier);
err = testmgr_init();
if (err)
return err;
err = crypto_register_notifier(&cryptomgr_notifier);
if (err)
goto free_testmgr;
return 0;
free_testmgr:
testmgr_exit();
return err;
} }
static void __exit cryptomgr_exit(void) static void __exit cryptomgr_exit(void)
{ {
int err = crypto_unregister_notifier(&cryptomgr_notifier); int err = crypto_unregister_notifier(&cryptomgr_notifier);
BUG_ON(err); BUG_ON(err);
testmgr_exit();
} }
subsys_initcall(cryptomgr_init); subsys_initcall(cryptomgr_init);
......
...@@ -217,14 +217,11 @@ struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask) ...@@ -217,14 +217,11 @@ struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask)
alg = crypto_alg_lookup(name, type, mask); alg = crypto_alg_lookup(name, type, mask);
if (!alg) { if (!alg) {
char tmp[CRYPTO_MAX_ALG_NAME]; request_module("%s", name);
request_module(name);
if (!((type ^ CRYPTO_ALG_NEED_FALLBACK) & mask & if (!((type ^ CRYPTO_ALG_NEED_FALLBACK) & mask &
CRYPTO_ALG_NEED_FALLBACK) && CRYPTO_ALG_NEED_FALLBACK))
snprintf(tmp, sizeof(tmp), "%s-all", name) < sizeof(tmp)) request_module("%s-all", name);
request_module(tmp);
alg = crypto_alg_lookup(name, type, mask); alg = crypto_alg_lookup(name, type, mask);
} }
...@@ -580,20 +577,17 @@ EXPORT_SYMBOL_GPL(crypto_alloc_tfm); ...@@ -580,20 +577,17 @@ EXPORT_SYMBOL_GPL(crypto_alloc_tfm);
void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm) void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm)
{ {
struct crypto_alg *alg; struct crypto_alg *alg;
int size;
if (unlikely(!mem)) if (unlikely(!mem))
return; return;
alg = tfm->__crt_alg; alg = tfm->__crt_alg;
size = ksize(mem);
if (!tfm->exit && alg->cra_exit) if (!tfm->exit && alg->cra_exit)
alg->cra_exit(tfm); alg->cra_exit(tfm);
crypto_exit_ops(tfm); crypto_exit_ops(tfm);
crypto_mod_put(alg); crypto_mod_put(alg);
memset(mem, 0, size); kzfree(mem);
kfree(mem);
} }
EXPORT_SYMBOL_GPL(crypto_destroy_tfm); EXPORT_SYMBOL_GPL(crypto_destroy_tfm);
......
...@@ -586,20 +586,24 @@ struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name, ...@@ -586,20 +586,24 @@ struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
u32 type, u32 mask) u32 type, u32 mask)
{ {
char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
struct crypto_ablkcipher *tfm; struct crypto_tfm *tfm;
if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
"cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
tfm = crypto_alloc_ablkcipher(cryptd_alg_name, type, mask); type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
type |= CRYPTO_ALG_TYPE_BLKCIPHER;
mask &= ~CRYPTO_ALG_TYPE_MASK;
mask |= (CRYPTO_ALG_GENIV | CRYPTO_ALG_TYPE_BLKCIPHER_MASK);
tfm = crypto_alloc_base(cryptd_alg_name, type, mask);
if (IS_ERR(tfm)) if (IS_ERR(tfm))
return ERR_CAST(tfm); return ERR_CAST(tfm);
if (crypto_ablkcipher_tfm(tfm)->__crt_alg->cra_module != THIS_MODULE) { if (tfm->__crt_alg->cra_module != THIS_MODULE) {
crypto_free_ablkcipher(tfm); crypto_free_tfm(tfm);
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
return __cryptd_ablkcipher_cast(tfm); return __cryptd_ablkcipher_cast(__crypto_ablkcipher_cast(tfm));
} }
EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher); EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher);
......
...@@ -121,9 +121,6 @@ int crypto_register_notifier(struct notifier_block *nb); ...@@ -121,9 +121,6 @@ int crypto_register_notifier(struct notifier_block *nb);
int crypto_unregister_notifier(struct notifier_block *nb); int crypto_unregister_notifier(struct notifier_block *nb);
int crypto_probing_notify(unsigned long val, void *v); int crypto_probing_notify(unsigned long val, void *v);
int __init testmgr_init(void);
void testmgr_exit(void);
static inline void crypto_alg_put(struct crypto_alg *alg) static inline void crypto_alg_put(struct crypto_alg *alg)
{ {
if (atomic_dec_and_test(&alg->cra_refcnt) && alg->cra_destroy) if (atomic_dec_and_test(&alg->cra_refcnt) && alg->cra_destroy)
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include <linux/string.h> #include <linux/string.h>
#include <crypto/compress.h> #include <crypto/compress.h>
#include <crypto/internal/compress.h>
#include "internal.h" #include "internal.h"
......
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#include <linux/timex.h> #include <linux/timex.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include "tcrypt.h" #include "tcrypt.h"
#include "internal.h"
/* /*
* Need slab memory for testing (size in number of pages). * Need slab memory for testing (size in number of pages).
...@@ -396,16 +397,16 @@ static void test_hash_speed(const char *algo, unsigned int sec, ...@@ -396,16 +397,16 @@ static void test_hash_speed(const char *algo, unsigned int sec,
struct scatterlist sg[TVMEMSIZE]; struct scatterlist sg[TVMEMSIZE];
struct crypto_hash *tfm; struct crypto_hash *tfm;
struct hash_desc desc; struct hash_desc desc;
char output[1024]; static char output[1024];
int i; int i;
int ret; int ret;
printk("\ntesting speed of %s\n", algo); printk(KERN_INFO "\ntesting speed of %s\n", algo);
tfm = crypto_alloc_hash(algo, 0, CRYPTO_ALG_ASYNC); tfm = crypto_alloc_hash(algo, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm)) { if (IS_ERR(tfm)) {
printk("failed to load transform for %s: %ld\n", algo, printk(KERN_ERR "failed to load transform for %s: %ld\n", algo,
PTR_ERR(tfm)); PTR_ERR(tfm));
return; return;
} }
...@@ -414,7 +415,7 @@ static void test_hash_speed(const char *algo, unsigned int sec, ...@@ -414,7 +415,7 @@ static void test_hash_speed(const char *algo, unsigned int sec,
desc.flags = 0; desc.flags = 0;
if (crypto_hash_digestsize(tfm) > sizeof(output)) { if (crypto_hash_digestsize(tfm) > sizeof(output)) {
printk("digestsize(%u) > outputbuffer(%zu)\n", printk(KERN_ERR "digestsize(%u) > outputbuffer(%zu)\n",
crypto_hash_digestsize(tfm), sizeof(output)); crypto_hash_digestsize(tfm), sizeof(output));
goto out; goto out;
} }
...@@ -427,12 +428,14 @@ static void test_hash_speed(const char *algo, unsigned int sec, ...@@ -427,12 +428,14 @@ static void test_hash_speed(const char *algo, unsigned int sec,
for (i = 0; speed[i].blen != 0; i++) { for (i = 0; speed[i].blen != 0; i++) {
if (speed[i].blen > TVMEMSIZE * PAGE_SIZE) { if (speed[i].blen > TVMEMSIZE * PAGE_SIZE) {
printk("template (%u) too big for tvmem (%lu)\n", printk(KERN_ERR
"template (%u) too big for tvmem (%lu)\n",
speed[i].blen, TVMEMSIZE * PAGE_SIZE); speed[i].blen, TVMEMSIZE * PAGE_SIZE);
goto out; goto out;
} }
printk("test%3u (%5u byte blocks,%5u bytes per update,%4u updates): ", printk(KERN_INFO "test%3u "
"(%5u byte blocks,%5u bytes per update,%4u updates): ",
i, speed[i].blen, speed[i].plen, speed[i].blen / speed[i].plen); i, speed[i].blen, speed[i].plen, speed[i].blen / speed[i].plen);
if (sec) if (sec)
...@@ -443,7 +446,7 @@ static void test_hash_speed(const char *algo, unsigned int sec, ...@@ -443,7 +446,7 @@ static void test_hash_speed(const char *algo, unsigned int sec,
speed[i].plen, output); speed[i].plen, output);
if (ret) { if (ret) {
printk("hashing failed ret=%d\n", ret); printk(KERN_ERR "hashing failed ret=%d\n", ret);
break; break;
} }
} }
...@@ -466,239 +469,255 @@ static void test_available(void) ...@@ -466,239 +469,255 @@ static void test_available(void)
static inline int tcrypt_test(const char *alg) static inline int tcrypt_test(const char *alg)
{ {
return alg_test(alg, alg, 0, 0); int ret;
ret = alg_test(alg, alg, 0, 0);
/* non-fips algs return -EINVAL in fips mode */
if (fips_enabled && ret == -EINVAL)
ret = 0;
return ret;
} }
static void do_test(int m) static int do_test(int m)
{ {
int i; int i;
int ret = 0;
switch (m) { switch (m) {
case 0: case 0:
for (i = 1; i < 200; i++) for (i = 1; i < 200; i++)
do_test(i); ret += do_test(i);
break; break;
case 1: case 1:
tcrypt_test("md5"); ret += tcrypt_test("md5");
break; break;
case 2: case 2:
tcrypt_test("sha1"); ret += tcrypt_test("sha1");
break; break;
case 3: case 3:
tcrypt_test("ecb(des)"); ret += tcrypt_test("ecb(des)");
tcrypt_test("cbc(des)"); ret += tcrypt_test("cbc(des)");
break; break;
case 4: case 4:
tcrypt_test("ecb(des3_ede)"); ret += tcrypt_test("ecb(des3_ede)");
tcrypt_test("cbc(des3_ede)"); ret += tcrypt_test("cbc(des3_ede)");
break; break;
case 5: case 5:
tcrypt_test("md4"); ret += tcrypt_test("md4");
break; break;
case 6: case 6:
tcrypt_test("sha256"); ret += tcrypt_test("sha256");
break; break;
case 7: case 7:
tcrypt_test("ecb(blowfish)"); ret += tcrypt_test("ecb(blowfish)");
tcrypt_test("cbc(blowfish)"); ret += tcrypt_test("cbc(blowfish)");
break; break;
case 8: case 8:
tcrypt_test("ecb(twofish)"); ret += tcrypt_test("ecb(twofish)");
tcrypt_test("cbc(twofish)"); ret += tcrypt_test("cbc(twofish)");
break; break;
case 9: case 9:
tcrypt_test("ecb(serpent)"); ret += tcrypt_test("ecb(serpent)");
break; break;
case 10: case 10:
tcrypt_test("ecb(aes)"); ret += tcrypt_test("ecb(aes)");
tcrypt_test("cbc(aes)"); ret += tcrypt_test("cbc(aes)");
tcrypt_test("lrw(aes)"); ret += tcrypt_test("lrw(aes)");
tcrypt_test("xts(aes)"); ret += tcrypt_test("xts(aes)");
tcrypt_test("rfc3686(ctr(aes))"); ret += tcrypt_test("ctr(aes)");
ret += tcrypt_test("rfc3686(ctr(aes))");
break; break;
case 11: case 11:
tcrypt_test("sha384"); ret += tcrypt_test("sha384");
break; break;
case 12: case 12:
tcrypt_test("sha512"); ret += tcrypt_test("sha512");
break; break;
case 13: case 13:
tcrypt_test("deflate"); ret += tcrypt_test("deflate");
break; break;
case 14: case 14:
tcrypt_test("ecb(cast5)"); ret += tcrypt_test("ecb(cast5)");
break; break;
case 15: case 15:
tcrypt_test("ecb(cast6)"); ret += tcrypt_test("ecb(cast6)");
break; break;
case 16: case 16:
tcrypt_test("ecb(arc4)"); ret += tcrypt_test("ecb(arc4)");
break; break;
case 17: case 17:
tcrypt_test("michael_mic"); ret += tcrypt_test("michael_mic");
break; break;
case 18: case 18:
tcrypt_test("crc32c"); ret += tcrypt_test("crc32c");
break; break;
case 19: case 19:
tcrypt_test("ecb(tea)"); ret += tcrypt_test("ecb(tea)");
break; break;
case 20: case 20:
tcrypt_test("ecb(xtea)"); ret += tcrypt_test("ecb(xtea)");
break; break;
case 21: case 21:
tcrypt_test("ecb(khazad)"); ret += tcrypt_test("ecb(khazad)");
break; break;
case 22: case 22:
tcrypt_test("wp512"); ret += tcrypt_test("wp512");
break; break;
case 23: case 23:
tcrypt_test("wp384"); ret += tcrypt_test("wp384");
break; break;
case 24: case 24:
tcrypt_test("wp256"); ret += tcrypt_test("wp256");
break; break;
case 25: case 25:
tcrypt_test("ecb(tnepres)"); ret += tcrypt_test("ecb(tnepres)");
break; break;
case 26: case 26:
tcrypt_test("ecb(anubis)"); ret += tcrypt_test("ecb(anubis)");
tcrypt_test("cbc(anubis)"); ret += tcrypt_test("cbc(anubis)");
break; break;
case 27: case 27:
tcrypt_test("tgr192"); ret += tcrypt_test("tgr192");
break; break;
case 28: case 28:
tcrypt_test("tgr160"); ret += tcrypt_test("tgr160");
break; break;
case 29: case 29:
tcrypt_test("tgr128"); ret += tcrypt_test("tgr128");
break; break;
case 30: case 30:
tcrypt_test("ecb(xeta)"); ret += tcrypt_test("ecb(xeta)");
break; break;
case 31: case 31:
tcrypt_test("pcbc(fcrypt)"); ret += tcrypt_test("pcbc(fcrypt)");
break; break;
case 32: case 32:
tcrypt_test("ecb(camellia)"); ret += tcrypt_test("ecb(camellia)");
tcrypt_test("cbc(camellia)"); ret += tcrypt_test("cbc(camellia)");
break; break;
case 33: case 33:
tcrypt_test("sha224"); ret += tcrypt_test("sha224");
break; break;
case 34: case 34:
tcrypt_test("salsa20"); ret += tcrypt_test("salsa20");
break; break;
case 35: case 35:
tcrypt_test("gcm(aes)"); ret += tcrypt_test("gcm(aes)");
break; break;
case 36: case 36:
tcrypt_test("lzo"); ret += tcrypt_test("lzo");
break; break;
case 37: case 37:
tcrypt_test("ccm(aes)"); ret += tcrypt_test("ccm(aes)");
break; break;
case 38: case 38:
tcrypt_test("cts(cbc(aes))"); ret += tcrypt_test("cts(cbc(aes))");
break; break;
case 39: case 39:
tcrypt_test("rmd128"); ret += tcrypt_test("rmd128");
break; break;
case 40: case 40:
tcrypt_test("rmd160"); ret += tcrypt_test("rmd160");
break; break;
case 41: case 41:
tcrypt_test("rmd256"); ret += tcrypt_test("rmd256");
break; break;
case 42: case 42:
tcrypt_test("rmd320"); ret += tcrypt_test("rmd320");
break; break;
case 43: case 43:
tcrypt_test("ecb(seed)"); ret += tcrypt_test("ecb(seed)");
break; break;
case 44: case 44:
tcrypt_test("zlib"); ret += tcrypt_test("zlib");
break;
case 45:
ret += tcrypt_test("rfc4309(ccm(aes))");
break; break;
case 100: case 100:
tcrypt_test("hmac(md5)"); ret += tcrypt_test("hmac(md5)");
break; break;
case 101: case 101:
tcrypt_test("hmac(sha1)"); ret += tcrypt_test("hmac(sha1)");
break; break;
case 102: case 102:
tcrypt_test("hmac(sha256)"); ret += tcrypt_test("hmac(sha256)");
break; break;
case 103: case 103:
tcrypt_test("hmac(sha384)"); ret += tcrypt_test("hmac(sha384)");
break; break;
case 104: case 104:
tcrypt_test("hmac(sha512)"); ret += tcrypt_test("hmac(sha512)");
break; break;
case 105: case 105:
tcrypt_test("hmac(sha224)"); ret += tcrypt_test("hmac(sha224)");
break; break;
case 106: case 106:
tcrypt_test("xcbc(aes)"); ret += tcrypt_test("xcbc(aes)");
break; break;
case 107: case 107:
tcrypt_test("hmac(rmd128)"); ret += tcrypt_test("hmac(rmd128)");
break; break;
case 108: case 108:
tcrypt_test("hmac(rmd160)"); ret += tcrypt_test("hmac(rmd160)");
break;
case 150:
ret += tcrypt_test("ansi_cprng");
break; break;
case 200: case 200:
...@@ -862,6 +881,8 @@ static void do_test(int m) ...@@ -862,6 +881,8 @@ static void do_test(int m)
test_available(); test_available();
break; break;
} }
return ret;
} }
static int __init tcrypt_mod_init(void) static int __init tcrypt_mod_init(void)
...@@ -875,14 +896,20 @@ static int __init tcrypt_mod_init(void) ...@@ -875,14 +896,20 @@ static int __init tcrypt_mod_init(void)
goto err_free_tv; goto err_free_tv;
} }
do_test(mode); err = do_test(mode);
if (err) {
printk(KERN_ERR "tcrypt: one or more tests failed!\n");
goto err_free_tv;
}
/* We intentionaly return -EAGAIN to prevent keeping /* We intentionaly return -EAGAIN to prevent keeping the module,
* the module. It does all its work from init() * unless we're running in fips mode. It does all its work from
* and doesn't offer any runtime functionality * init() and doesn't offer any runtime functionality, but in
* the fips case, checking for a successful load is helpful.
* => we don't need it in the memory, do we? * => we don't need it in the memory, do we?
* -- mludvig * -- mludvig
*/ */
if (!fips_enabled)
err = -EAGAIN; err = -EAGAIN;
err_free_tv: err_free_tv:
......
This diff is collapsed.
This diff is collapsed.
...@@ -165,15 +165,15 @@ static int zlib_compress_update(struct crypto_pcomp *tfm, ...@@ -165,15 +165,15 @@ static int zlib_compress_update(struct crypto_pcomp *tfm,
return -EINVAL; return -EINVAL;
} }
ret = req->avail_out - stream->avail_out;
pr_debug("avail_in %u, avail_out %u (consumed %u, produced %u)\n", pr_debug("avail_in %u, avail_out %u (consumed %u, produced %u)\n",
stream->avail_in, stream->avail_out, stream->avail_in, stream->avail_out,
req->avail_in - stream->avail_in, req->avail_in - stream->avail_in, ret);
req->avail_out - stream->avail_out);
req->next_in = stream->next_in; req->next_in = stream->next_in;
req->avail_in = stream->avail_in; req->avail_in = stream->avail_in;
req->next_out = stream->next_out; req->next_out = stream->next_out;
req->avail_out = stream->avail_out; req->avail_out = stream->avail_out;
return 0; return ret;
} }
static int zlib_compress_final(struct crypto_pcomp *tfm, static int zlib_compress_final(struct crypto_pcomp *tfm,
...@@ -195,15 +195,15 @@ static int zlib_compress_final(struct crypto_pcomp *tfm, ...@@ -195,15 +195,15 @@ static int zlib_compress_final(struct crypto_pcomp *tfm,
return -EINVAL; return -EINVAL;
} }
ret = req->avail_out - stream->avail_out;
pr_debug("avail_in %u, avail_out %u (consumed %u, produced %u)\n", pr_debug("avail_in %u, avail_out %u (consumed %u, produced %u)\n",
stream->avail_in, stream->avail_out, stream->avail_in, stream->avail_out,
req->avail_in - stream->avail_in, req->avail_in - stream->avail_in, ret);
req->avail_out - stream->avail_out);
req->next_in = stream->next_in; req->next_in = stream->next_in;
req->avail_in = stream->avail_in; req->avail_in = stream->avail_in;
req->next_out = stream->next_out; req->next_out = stream->next_out;
req->avail_out = stream->avail_out; req->avail_out = stream->avail_out;
return 0; return ret;
} }
...@@ -280,15 +280,15 @@ static int zlib_decompress_update(struct crypto_pcomp *tfm, ...@@ -280,15 +280,15 @@ static int zlib_decompress_update(struct crypto_pcomp *tfm,
return -EINVAL; return -EINVAL;
} }
ret = req->avail_out - stream->avail_out;
pr_debug("avail_in %u, avail_out %u (consumed %u, produced %u)\n", pr_debug("avail_in %u, avail_out %u (consumed %u, produced %u)\n",
stream->avail_in, stream->avail_out, stream->avail_in, stream->avail_out,
req->avail_in - stream->avail_in, req->avail_in - stream->avail_in, ret);
req->avail_out - stream->avail_out);
req->next_in = stream->next_in; req->next_in = stream->next_in;
req->avail_in = stream->avail_in; req->avail_in = stream->avail_in;
req->next_out = stream->next_out; req->next_out = stream->next_out;
req->avail_out = stream->avail_out; req->avail_out = stream->avail_out;
return 0; return ret;
} }
static int zlib_decompress_final(struct crypto_pcomp *tfm, static int zlib_decompress_final(struct crypto_pcomp *tfm,
...@@ -328,15 +328,15 @@ static int zlib_decompress_final(struct crypto_pcomp *tfm, ...@@ -328,15 +328,15 @@ static int zlib_decompress_final(struct crypto_pcomp *tfm,
return -EINVAL; return -EINVAL;
} }
ret = req->avail_out - stream->avail_out;
pr_debug("avail_in %u, avail_out %u (consumed %u, produced %u)\n", pr_debug("avail_in %u, avail_out %u (consumed %u, produced %u)\n",
stream->avail_in, stream->avail_out, stream->avail_in, stream->avail_out,
req->avail_in - stream->avail_in, req->avail_in - stream->avail_in, ret);
req->avail_out - stream->avail_out);
req->next_in = stream->next_in; req->next_in = stream->next_in;
req->avail_in = stream->avail_in; req->avail_in = stream->avail_in;
req->next_out = stream->next_out; req->next_out = stream->next_out;
req->avail_out = stream->avail_out; req->avail_out = stream->avail_out;
return 0; return ret;
} }
......
...@@ -88,7 +88,7 @@ config HW_RANDOM_N2RNG ...@@ -88,7 +88,7 @@ config HW_RANDOM_N2RNG
config HW_RANDOM_VIA config HW_RANDOM_VIA
tristate "VIA HW Random Number Generator support" tristate "VIA HW Random Number Generator support"
depends on HW_RANDOM && X86_32 depends on HW_RANDOM && X86
default HW_RANDOM default HW_RANDOM
---help--- ---help---
This driver provides kernel-side support for the Random Number This driver provides kernel-side support for the Random Number
......
...@@ -89,7 +89,7 @@ static struct hwrng omap_rng_ops = { ...@@ -89,7 +89,7 @@ static struct hwrng omap_rng_ops = {
.data_read = omap_rng_data_read, .data_read = omap_rng_data_read,
}; };
static int __init omap_rng_probe(struct platform_device *pdev) static int __devinit omap_rng_probe(struct platform_device *pdev)
{ {
struct resource *res, *mem; struct resource *res, *mem;
int ret; int ret;
......
...@@ -88,9 +88,9 @@ static struct hwrng timeriomem_rng_ops = { ...@@ -88,9 +88,9 @@ static struct hwrng timeriomem_rng_ops = {
.priv = 0, .priv = 0,
}; };
static int __init timeriomem_rng_probe(struct platform_device *pdev) static int __devinit timeriomem_rng_probe(struct platform_device *pdev)
{ {
struct resource *res, *mem; struct resource *res;
int ret; int ret;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0); res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
...@@ -98,21 +98,12 @@ static int __init timeriomem_rng_probe(struct platform_device *pdev) ...@@ -98,21 +98,12 @@ static int __init timeriomem_rng_probe(struct platform_device *pdev)
if (!res) if (!res)
return -ENOENT; return -ENOENT;
mem = request_mem_region(res->start, res->end - res->start + 1,
pdev->name);
if (mem == NULL)
return -EBUSY;
dev_set_drvdata(&pdev->dev, mem);
timeriomem_rng_data = pdev->dev.platform_data; timeriomem_rng_data = pdev->dev.platform_data;
timeriomem_rng_data->address = ioremap(res->start, timeriomem_rng_data->address = ioremap(res->start,
res->end - res->start + 1); res->end - res->start + 1);
if (!timeriomem_rng_data->address) { if (!timeriomem_rng_data->address)
ret = -ENOMEM; return -EIO;
goto err_ioremap;
}
if (timeriomem_rng_data->period != 0 if (timeriomem_rng_data->period != 0
&& usecs_to_jiffies(timeriomem_rng_data->period) > 0) { && usecs_to_jiffies(timeriomem_rng_data->period) > 0) {
...@@ -125,7 +116,7 @@ static int __init timeriomem_rng_probe(struct platform_device *pdev) ...@@ -125,7 +116,7 @@ static int __init timeriomem_rng_probe(struct platform_device *pdev)
ret = hwrng_register(&timeriomem_rng_ops); ret = hwrng_register(&timeriomem_rng_ops);
if (ret) if (ret)
goto err_register; goto failed;
dev_info(&pdev->dev, "32bits from 0x%p @ %dus\n", dev_info(&pdev->dev, "32bits from 0x%p @ %dus\n",
timeriomem_rng_data->address, timeriomem_rng_data->address,
...@@ -133,24 +124,19 @@ static int __init timeriomem_rng_probe(struct platform_device *pdev) ...@@ -133,24 +124,19 @@ static int __init timeriomem_rng_probe(struct platform_device *pdev)
return 0; return 0;
err_register: failed:
dev_err(&pdev->dev, "problem registering\n"); dev_err(&pdev->dev, "problem registering\n");
iounmap(timeriomem_rng_data->address); iounmap(timeriomem_rng_data->address);
err_ioremap:
release_resource(mem);
return ret; return ret;
} }
static int __devexit timeriomem_rng_remove(struct platform_device *pdev) static int __devexit timeriomem_rng_remove(struct platform_device *pdev)
{ {
struct resource *mem = dev_get_drvdata(&pdev->dev);
del_timer_sync(&timeriomem_rng_timer); del_timer_sync(&timeriomem_rng_timer);
hwrng_unregister(&timeriomem_rng_ops); hwrng_unregister(&timeriomem_rng_ops);
iounmap(timeriomem_rng_data->address); iounmap(timeriomem_rng_data->address);
release_resource(mem);
return 0; return 0;
} }
......
...@@ -132,6 +132,19 @@ static int via_rng_init(struct hwrng *rng) ...@@ -132,6 +132,19 @@ static int via_rng_init(struct hwrng *rng)
struct cpuinfo_x86 *c = &cpu_data(0); struct cpuinfo_x86 *c = &cpu_data(0);
u32 lo, hi, old_lo; u32 lo, hi, old_lo;
/* VIA Nano CPUs don't have the MSR_VIA_RNG anymore. The RNG
* is always enabled if CPUID rng_en is set. There is no
* RNG configuration like it used to be the case in this
* register */
if ((c->x86 == 6) && (c->x86_model >= 0x0f)) {
if (!cpu_has_xstore_enabled) {
printk(KERN_ERR PFX "can't enable hardware RNG "
"if XSTORE is not enabled\n");
return -ENODEV;
}
return 0;
}
/* Control the RNG via MSR. Tread lightly and pay very close /* Control the RNG via MSR. Tread lightly and pay very close
* close attention to values written, as the reserved fields * close attention to values written, as the reserved fields
* are documented to be "undefined and unpredictable"; but it * are documented to be "undefined and unpredictable"; but it
...@@ -205,5 +218,5 @@ static void __exit mod_exit(void) ...@@ -205,5 +218,5 @@ static void __exit mod_exit(void)
module_init(mod_init); module_init(mod_init);
module_exit(mod_exit); module_exit(mod_exit);
MODULE_DESCRIPTION("H/W RNG driver for VIA chipsets"); MODULE_DESCRIPTION("H/W RNG driver for VIA CPU with PadLock");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
...@@ -12,7 +12,7 @@ if CRYPTO_HW ...@@ -12,7 +12,7 @@ if CRYPTO_HW
config CRYPTO_DEV_PADLOCK config CRYPTO_DEV_PADLOCK
tristate "Support for VIA PadLock ACE" tristate "Support for VIA PadLock ACE"
depends on X86_32 && !UML depends on X86 && !UML
select CRYPTO_ALGAPI select CRYPTO_ALGAPI
help help
Some VIA processors come with an integrated crypto engine Some VIA processors come with an integrated crypto engine
......
...@@ -2564,7 +2564,7 @@ static void hifn_tasklet_callback(unsigned long data) ...@@ -2564,7 +2564,7 @@ static void hifn_tasklet_callback(unsigned long data)
hifn_process_queue(dev); hifn_process_queue(dev);
} }
static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id) static int __devinit hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{ {
int err, i; int err, i;
struct hifn_device *dev; struct hifn_device *dev;
...@@ -2696,7 +2696,7 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -2696,7 +2696,7 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return err; return err;
} }
static void hifn_remove(struct pci_dev *pdev) static void __devexit hifn_remove(struct pci_dev *pdev)
{ {
int i; int i;
struct hifn_device *dev; struct hifn_device *dev;
...@@ -2744,7 +2744,7 @@ static struct pci_driver hifn_pci_driver = { ...@@ -2744,7 +2744,7 @@ static struct pci_driver hifn_pci_driver = {
.remove = __devexit_p(hifn_remove), .remove = __devexit_p(hifn_remove),
}; };
static int __devinit hifn_init(void) static int __init hifn_init(void)
{ {
unsigned int freq; unsigned int freq;
int err; int err;
...@@ -2789,7 +2789,7 @@ static int __devinit hifn_init(void) ...@@ -2789,7 +2789,7 @@ static int __devinit hifn_init(void)
return 0; return 0;
} }
static void __devexit hifn_fini(void) static void __exit hifn_fini(void)
{ {
pci_unregister_driver(&hifn_pci_driver); pci_unregister_driver(&hifn_pci_driver);
......
...@@ -154,7 +154,11 @@ static inline void padlock_reset_key(struct cword *cword) ...@@ -154,7 +154,11 @@ static inline void padlock_reset_key(struct cword *cword)
int cpu = raw_smp_processor_id(); int cpu = raw_smp_processor_id();
if (cword != per_cpu(last_cword, cpu)) if (cword != per_cpu(last_cword, cpu))
#ifndef CONFIG_X86_64
asm volatile ("pushfl; popfl"); asm volatile ("pushfl; popfl");
#else
asm volatile ("pushfq; popfq");
#endif
} }
static inline void padlock_store_cword(struct cword *cword) static inline void padlock_store_cword(struct cword *cword)
...@@ -208,10 +212,19 @@ static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key, ...@@ -208,10 +212,19 @@ static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key,
asm volatile ("test $1, %%cl;" asm volatile ("test $1, %%cl;"
"je 1f;" "je 1f;"
#ifndef CONFIG_X86_64
"lea -1(%%ecx), %%eax;" "lea -1(%%ecx), %%eax;"
"mov $1, %%ecx;" "mov $1, %%ecx;"
#else
"lea -1(%%rcx), %%rax;"
"mov $1, %%rcx;"
#endif
".byte 0xf3,0x0f,0xa7,0xc8;" /* rep xcryptecb */ ".byte 0xf3,0x0f,0xa7,0xc8;" /* rep xcryptecb */
#ifndef CONFIG_X86_64
"mov %%eax, %%ecx;" "mov %%eax, %%ecx;"
#else
"mov %%rax, %%rcx;"
#endif
"1:" "1:"
".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ ".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
: "+S"(input), "+D"(output) : "+S"(input), "+D"(output)
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment