Commit 3b23e665 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (50 commits)
  crypto: ixp4xx - Select CRYPTO_AUTHENC
  crypto: s390 - Respect STFL bit
  crypto: talitos - Add support for sha256 and md5 variants
  crypto: hash - Move ahash functions into crypto/hash.h
  crypto: crc32c - Add ahash implementation
  crypto: hash - Added scatter list walking helper
  crypto: prng - Deterministic CPRNG
  crypto: hash - Removed vestigial ahash fields
  crypto: hash - Fixed digest size check
  crypto: rmd - sparse annotations
  crypto: rmd128 - sparse annotations
  crypto: camellia - Use kernel-provided bitops, unaligned access helpers
  crypto: talitos - Use proper form for algorithm driver names
  crypto: talitos - Add support for 3des
  crypto: padlock - Make module loading quieter when hardware isn't available
  crypto: tcrpyt - Remove unnecessary kmap/kunmap calls
  crypto: ixp4xx - Hardware crypto support for IXP4xx CPUs
  crypto: talitos - Freescale integrated security engine (SEC) driver
  [CRYPTO] tcrypt: Add self test for des3_ebe cipher operating in cbc mode
  [CRYPTO] rmd: Use pointer form of endian swapping operations
  ...
parents 6c118e43 090657e4
...@@ -296,6 +296,10 @@ static inline int crypt_s390_func_available(int func) ...@@ -296,6 +296,10 @@ static inline int crypt_s390_func_available(int func)
unsigned char status[16]; unsigned char status[16];
int ret; int ret;
/* check if CPACF facility (bit 17) is available */
if (!(stfl() & 1ULL << (31 - 17)))
return 0;
switch (func & CRYPT_S390_OP_MASK) { switch (func & CRYPT_S390_OP_MASK) {
case CRYPT_S390_KM: case CRYPT_S390_KM:
ret = crypt_s390_km(KM_QUERY, &status, NULL, NULL, 0); ret = crypt_s390_km(KM_QUERY, &status, NULL, NULL, 0);
......
...@@ -65,6 +65,7 @@ config CRYPTO_NULL ...@@ -65,6 +65,7 @@ config CRYPTO_NULL
config CRYPTO_CRYPTD config CRYPTO_CRYPTD
tristate "Software async crypto daemon" tristate "Software async crypto daemon"
select CRYPTO_BLKCIPHER select CRYPTO_BLKCIPHER
select CRYPTO_HASH
select CRYPTO_MANAGER select CRYPTO_MANAGER
help help
This is a generic software asynchronous crypto daemon that This is a generic software asynchronous crypto daemon that
...@@ -212,7 +213,7 @@ comment "Digest" ...@@ -212,7 +213,7 @@ comment "Digest"
config CRYPTO_CRC32C config CRYPTO_CRC32C
tristate "CRC32c CRC algorithm" tristate "CRC32c CRC algorithm"
select CRYPTO_ALGAPI select CRYPTO_HASH
select LIBCRC32C select LIBCRC32C
help help
Castagnoli, et al Cyclic Redundancy-Check Algorithm. Used Castagnoli, et al Cyclic Redundancy-Check Algorithm. Used
...@@ -241,6 +242,57 @@ config CRYPTO_MICHAEL_MIC ...@@ -241,6 +242,57 @@ config CRYPTO_MICHAEL_MIC
should not be used for other purposes because of the weakness should not be used for other purposes because of the weakness
of the algorithm. of the algorithm.
config CRYPTO_RMD128
tristate "RIPEMD-128 digest algorithm"
select CRYPTO_ALGAPI
help
RIPEMD-128 (ISO/IEC 10118-3:2004).
RIPEMD-128 is a 128-bit cryptographic hash function. It should only
to be used as a secure replacement for RIPEMD. For other use cases
RIPEMD-160 should be used.
Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel.
See <http://home.esat.kuleuven.be/~bosselae/ripemd160.html>
config CRYPTO_RMD160
tristate "RIPEMD-160 digest algorithm"
select CRYPTO_ALGAPI
help
RIPEMD-160 (ISO/IEC 10118-3:2004).
RIPEMD-160 is a 160-bit cryptographic hash function. It is intended
to be used as a secure replacement for the 128-bit hash functions
MD4, MD5 and it's predecessor RIPEMD (not to be confused with RIPEMD-128).
It's speed is comparable to SHA1 and there are no known attacks against
RIPEMD-160.
Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel.
See <http://home.esat.kuleuven.be/~bosselae/ripemd160.html>
config CRYPTO_RMD256
tristate "RIPEMD-256 digest algorithm"
select CRYPTO_ALGAPI
help
RIPEMD-256 is an optional extension of RIPEMD-128 with a 256 bit hash.
It is intended for applications that require longer hash-results, without
needing a larger security level (than RIPEMD-128).
Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel.
See <http://home.esat.kuleuven.be/~bosselae/ripemd160.html>
config CRYPTO_RMD320
tristate "RIPEMD-320 digest algorithm"
select CRYPTO_ALGAPI
help
RIPEMD-320 is an optional extension of RIPEMD-160 with a 320 bit hash.
It is intended for applications that require longer hash-results, without
needing a larger security level (than RIPEMD-160).
Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel.
See <http://home.esat.kuleuven.be/~bosselae/ripemd160.html>
config CRYPTO_SHA1 config CRYPTO_SHA1
tristate "SHA1 digest algorithm" tristate "SHA1 digest algorithm"
select CRYPTO_ALGAPI select CRYPTO_ALGAPI
...@@ -614,6 +666,15 @@ config CRYPTO_LZO ...@@ -614,6 +666,15 @@ config CRYPTO_LZO
help help
This is the LZO algorithm. This is the LZO algorithm.
comment "Random Number Generation"
config CRYPTO_PRNG
tristate "Pseudo Random Number Generation for Cryptographic modules"
help
This option enables the generic pseudo random number generator
for cryptographic modules. Uses the Algorithm specified in
ANSI X9.31 A.2.4
source "drivers/crypto/Kconfig" source "drivers/crypto/Kconfig"
endif # if CRYPTO endif # if CRYPTO
...@@ -19,6 +19,7 @@ obj-$(CONFIG_CRYPTO_BLKCIPHER) += crypto_blkcipher.o ...@@ -19,6 +19,7 @@ obj-$(CONFIG_CRYPTO_BLKCIPHER) += crypto_blkcipher.o
obj-$(CONFIG_CRYPTO_SEQIV) += seqiv.o obj-$(CONFIG_CRYPTO_SEQIV) += seqiv.o
crypto_hash-objs := hash.o crypto_hash-objs := hash.o
crypto_hash-objs += ahash.o
obj-$(CONFIG_CRYPTO_HASH) += crypto_hash.o obj-$(CONFIG_CRYPTO_HASH) += crypto_hash.o
obj-$(CONFIG_CRYPTO_MANAGER) += cryptomgr.o obj-$(CONFIG_CRYPTO_MANAGER) += cryptomgr.o
...@@ -27,6 +28,10 @@ obj-$(CONFIG_CRYPTO_XCBC) += xcbc.o ...@@ -27,6 +28,10 @@ obj-$(CONFIG_CRYPTO_XCBC) += xcbc.o
obj-$(CONFIG_CRYPTO_NULL) += crypto_null.o obj-$(CONFIG_CRYPTO_NULL) += crypto_null.o
obj-$(CONFIG_CRYPTO_MD4) += md4.o obj-$(CONFIG_CRYPTO_MD4) += md4.o
obj-$(CONFIG_CRYPTO_MD5) += md5.o obj-$(CONFIG_CRYPTO_MD5) += md5.o
obj-$(CONFIG_CRYPTO_RMD128) += rmd128.o
obj-$(CONFIG_CRYPTO_RMD160) += rmd160.o
obj-$(CONFIG_CRYPTO_RMD256) += rmd256.o
obj-$(CONFIG_CRYPTO_RMD320) += rmd320.o
obj-$(CONFIG_CRYPTO_SHA1) += sha1_generic.o obj-$(CONFIG_CRYPTO_SHA1) += sha1_generic.o
obj-$(CONFIG_CRYPTO_SHA256) += sha256_generic.o obj-$(CONFIG_CRYPTO_SHA256) += sha256_generic.o
obj-$(CONFIG_CRYPTO_SHA512) += sha512_generic.o obj-$(CONFIG_CRYPTO_SHA512) += sha512_generic.o
...@@ -64,7 +69,7 @@ obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o ...@@ -64,7 +69,7 @@ obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o
obj-$(CONFIG_CRYPTO_CRC32C) += crc32c.o obj-$(CONFIG_CRYPTO_CRC32C) += crc32c.o
obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o
obj-$(CONFIG_CRYPTO_LZO) += lzo.o obj-$(CONFIG_CRYPTO_LZO) += lzo.o
obj-$(CONFIG_CRYPTO_PRNG) += prng.o
obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o
# #
......
/*
* Asynchronous Cryptographic Hash operations.
*
* This is the asynchronous version of hash.c with notification of
* completion via a callback.
*
* Copyright (c) 2008 Loc Ho <lho@amcc.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#include <crypto/internal/hash.h>
#include <crypto/scatterwalk.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
#include "internal.h"
static int hash_walk_next(struct crypto_hash_walk *walk)
{
unsigned int alignmask = walk->alignmask;
unsigned int offset = walk->offset;
unsigned int nbytes = min(walk->entrylen,
((unsigned int)(PAGE_SIZE)) - offset);
walk->data = crypto_kmap(walk->pg, 0);
walk->data += offset;
if (offset & alignmask)
nbytes = alignmask + 1 - (offset & alignmask);
walk->entrylen -= nbytes;
return nbytes;
}
static int hash_walk_new_entry(struct crypto_hash_walk *walk)
{
struct scatterlist *sg;
sg = walk->sg;
walk->pg = sg_page(sg);
walk->offset = sg->offset;
walk->entrylen = sg->length;
if (walk->entrylen > walk->total)
walk->entrylen = walk->total;
walk->total -= walk->entrylen;
return hash_walk_next(walk);
}
int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
{
unsigned int alignmask = walk->alignmask;
unsigned int nbytes = walk->entrylen;
walk->data -= walk->offset;
if (nbytes && walk->offset & alignmask && !err) {
walk->offset += alignmask - 1;
walk->offset = ALIGN(walk->offset, alignmask + 1);
walk->data += walk->offset;
nbytes = min(nbytes,
((unsigned int)(PAGE_SIZE)) - walk->offset);
walk->entrylen -= nbytes;
return nbytes;
}
crypto_kunmap(walk->data, 0);
crypto_yield(walk->flags);
if (err)
return err;
walk->offset = 0;
if (nbytes)
return hash_walk_next(walk);
if (!walk->total)
return 0;
walk->sg = scatterwalk_sg_next(walk->sg);
return hash_walk_new_entry(walk);
}
EXPORT_SYMBOL_GPL(crypto_hash_walk_done);
int crypto_hash_walk_first(struct ahash_request *req,
struct crypto_hash_walk *walk)
{
walk->total = req->nbytes;
if (!walk->total)
return 0;
walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
walk->sg = req->src;
walk->flags = req->base.flags;
return hash_walk_new_entry(walk);
}
EXPORT_SYMBOL_GPL(crypto_hash_walk_first);
static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
struct ahash_alg *ahash = crypto_ahash_alg(tfm);
unsigned long alignmask = crypto_ahash_alignmask(tfm);
int ret;
u8 *buffer, *alignbuffer;
unsigned long absize;
absize = keylen + alignmask;
buffer = kmalloc(absize, GFP_ATOMIC);
if (!buffer)
return -ENOMEM;
alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
memcpy(alignbuffer, key, keylen);
ret = ahash->setkey(tfm, alignbuffer, keylen);
memset(alignbuffer, 0, keylen);
kfree(buffer);
return ret;
}
static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
struct ahash_alg *ahash = crypto_ahash_alg(tfm);
unsigned long alignmask = crypto_ahash_alignmask(tfm);
if ((unsigned long)key & alignmask)
return ahash_setkey_unaligned(tfm, key, keylen);
return ahash->setkey(tfm, key, keylen);
}
static unsigned int crypto_ahash_ctxsize(struct crypto_alg *alg, u32 type,
u32 mask)
{
return alg->cra_ctxsize;
}
static int crypto_init_ahash_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
{
struct ahash_alg *alg = &tfm->__crt_alg->cra_ahash;
struct ahash_tfm *crt = &tfm->crt_ahash;
if (alg->digestsize > PAGE_SIZE / 8)
return -EINVAL;
crt->init = alg->init;
crt->update = alg->update;
crt->final = alg->final;
crt->digest = alg->digest;
crt->setkey = ahash_setkey;
crt->digestsize = alg->digestsize;
return 0;
}
static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
__attribute__ ((unused));
static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
{
seq_printf(m, "type : ahash\n");
seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
"yes" : "no");
seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
seq_printf(m, "digestsize : %u\n", alg->cra_hash.digestsize);
}
const struct crypto_type crypto_ahash_type = {
.ctxsize = crypto_ahash_ctxsize,
.init = crypto_init_ahash_ops,
#ifdef CONFIG_PROC_FS
.show = crypto_ahash_show,
#endif
};
EXPORT_SYMBOL_GPL(crypto_ahash_type);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Asynchronous cryptographic hash type");
...@@ -235,8 +235,12 @@ static int crypto_init_ops(struct crypto_tfm *tfm, u32 type, u32 mask) ...@@ -235,8 +235,12 @@ static int crypto_init_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
return crypto_init_cipher_ops(tfm); return crypto_init_cipher_ops(tfm);
case CRYPTO_ALG_TYPE_DIGEST: case CRYPTO_ALG_TYPE_DIGEST:
return crypto_init_digest_ops(tfm); if ((mask & CRYPTO_ALG_TYPE_HASH_MASK) !=
CRYPTO_ALG_TYPE_HASH_MASK)
return crypto_init_digest_ops_async(tfm);
else
return crypto_init_digest_ops(tfm);
case CRYPTO_ALG_TYPE_COMPRESS: case CRYPTO_ALG_TYPE_COMPRESS:
return crypto_init_compress_ops(tfm); return crypto_init_compress_ops(tfm);
......
...@@ -35,6 +35,8 @@ ...@@ -35,6 +35,8 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/bitops.h>
#include <asm/unaligned.h>
static const u32 camellia_sp1110[256] = { static const u32 camellia_sp1110[256] = {
0x70707000,0x82828200,0x2c2c2c00,0xececec00, 0x70707000,0x82828200,0x2c2c2c00,0xececec00,
...@@ -335,20 +337,6 @@ static const u32 camellia_sp4404[256] = { ...@@ -335,20 +337,6 @@ static const u32 camellia_sp4404[256] = {
/* /*
* macros * macros
*/ */
#define GETU32(v, pt) \
do { \
/* latest breed of gcc is clever enough to use move */ \
memcpy(&(v), (pt), 4); \
(v) = be32_to_cpu(v); \
} while(0)
/* rotation right shift 1byte */
#define ROR8(x) (((x) >> 8) + ((x) << 24))
/* rotation left shift 1bit */
#define ROL1(x) (((x) << 1) + ((x) >> 31))
/* rotation left shift 1byte */
#define ROL8(x) (((x) << 8) + ((x) >> 24))
#define ROLDQ(ll, lr, rl, rr, w0, w1, bits) \ #define ROLDQ(ll, lr, rl, rr, w0, w1, bits) \
do { \ do { \
w0 = ll; \ w0 = ll; \
...@@ -383,7 +371,7 @@ static const u32 camellia_sp4404[256] = { ...@@ -383,7 +371,7 @@ static const u32 camellia_sp4404[256] = {
^ camellia_sp3033[(u8)(il >> 8)] \ ^ camellia_sp3033[(u8)(il >> 8)] \
^ camellia_sp4404[(u8)(il )]; \ ^ camellia_sp4404[(u8)(il )]; \
yl ^= yr; \ yl ^= yr; \
yr = ROR8(yr); \ yr = ror32(yr, 8); \
yr ^= yl; \ yr ^= yl; \
} while(0) } while(0)
...@@ -405,7 +393,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) ...@@ -405,7 +393,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
subL[7] ^= subL[1]; subR[7] ^= subR[1]; subL[7] ^= subL[1]; subR[7] ^= subR[1];
subL[1] ^= subR[1] & ~subR[9]; subL[1] ^= subR[1] & ~subR[9];
dw = subL[1] & subL[9], dw = subL[1] & subL[9],
subR[1] ^= ROL1(dw); /* modified for FLinv(kl2) */ subR[1] ^= rol32(dw, 1); /* modified for FLinv(kl2) */
/* round 8 */ /* round 8 */
subL[11] ^= subL[1]; subR[11] ^= subR[1]; subL[11] ^= subL[1]; subR[11] ^= subR[1];
/* round 10 */ /* round 10 */
...@@ -414,7 +402,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) ...@@ -414,7 +402,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
subL[15] ^= subL[1]; subR[15] ^= subR[1]; subL[15] ^= subL[1]; subR[15] ^= subR[1];
subL[1] ^= subR[1] & ~subR[17]; subL[1] ^= subR[1] & ~subR[17];
dw = subL[1] & subL[17], dw = subL[1] & subL[17],
subR[1] ^= ROL1(dw); /* modified for FLinv(kl4) */ subR[1] ^= rol32(dw, 1); /* modified for FLinv(kl4) */
/* round 14 */ /* round 14 */
subL[19] ^= subL[1]; subR[19] ^= subR[1]; subL[19] ^= subL[1]; subR[19] ^= subR[1];
/* round 16 */ /* round 16 */
...@@ -430,7 +418,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) ...@@ -430,7 +418,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
} else { } else {
subL[1] ^= subR[1] & ~subR[25]; subL[1] ^= subR[1] & ~subR[25];
dw = subL[1] & subL[25], dw = subL[1] & subL[25],
subR[1] ^= ROL1(dw); /* modified for FLinv(kl6) */ subR[1] ^= rol32(dw, 1); /* modified for FLinv(kl6) */
/* round 20 */ /* round 20 */
subL[27] ^= subL[1]; subR[27] ^= subR[1]; subL[27] ^= subL[1]; subR[27] ^= subR[1];
/* round 22 */ /* round 22 */
...@@ -450,7 +438,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) ...@@ -450,7 +438,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
subL[26] ^= kw4l; subR[26] ^= kw4r; subL[26] ^= kw4l; subR[26] ^= kw4r;
kw4l ^= kw4r & ~subR[24]; kw4l ^= kw4r & ~subR[24];
dw = kw4l & subL[24], dw = kw4l & subL[24],
kw4r ^= ROL1(dw); /* modified for FL(kl5) */ kw4r ^= rol32(dw, 1); /* modified for FL(kl5) */
} }
/* round 17 */ /* round 17 */
subL[22] ^= kw4l; subR[22] ^= kw4r; subL[22] ^= kw4l; subR[22] ^= kw4r;
...@@ -460,7 +448,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) ...@@ -460,7 +448,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
subL[18] ^= kw4l; subR[18] ^= kw4r; subL[18] ^= kw4l; subR[18] ^= kw4r;
kw4l ^= kw4r & ~subR[16]; kw4l ^= kw4r & ~subR[16];
dw = kw4l & subL[16], dw = kw4l & subL[16],
kw4r ^= ROL1(dw); /* modified for FL(kl3) */ kw4r ^= rol32(dw, 1); /* modified for FL(kl3) */
/* round 11 */ /* round 11 */
subL[14] ^= kw4l; subR[14] ^= kw4r; subL[14] ^= kw4l; subR[14] ^= kw4r;
/* round 9 */ /* round 9 */
...@@ -469,7 +457,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) ...@@ -469,7 +457,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
subL[10] ^= kw4l; subR[10] ^= kw4r; subL[10] ^= kw4l; subR[10] ^= kw4r;
kw4l ^= kw4r & ~subR[8]; kw4l ^= kw4r & ~subR[8];
dw = kw4l & subL[8], dw = kw4l & subL[8],
kw4r ^= ROL1(dw); /* modified for FL(kl1) */ kw4r ^= rol32(dw, 1); /* modified for FL(kl1) */
/* round 5 */ /* round 5 */
subL[6] ^= kw4l; subR[6] ^= kw4r; subL[6] ^= kw4l; subR[6] ^= kw4r;
/* round 3 */ /* round 3 */
...@@ -494,7 +482,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) ...@@ -494,7 +482,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
SUBKEY_R(6) = subR[5] ^ subR[7]; SUBKEY_R(6) = subR[5] ^ subR[7];
tl = subL[10] ^ (subR[10] & ~subR[8]); tl = subL[10] ^ (subR[10] & ~subR[8]);
dw = tl & subL[8], /* FL(kl1) */ dw = tl & subL[8], /* FL(kl1) */
tr = subR[10] ^ ROL1(dw); tr = subR[10] ^ rol32(dw, 1);
SUBKEY_L(7) = subL[6] ^ tl; /* round 6 */ SUBKEY_L(7) = subL[6] ^ tl; /* round 6 */
SUBKEY_R(7) = subR[6] ^ tr; SUBKEY_R(7) = subR[6] ^ tr;
SUBKEY_L(8) = subL[8]; /* FL(kl1) */ SUBKEY_L(8) = subL[8]; /* FL(kl1) */
...@@ -503,7 +491,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) ...@@ -503,7 +491,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
SUBKEY_R(9) = subR[9]; SUBKEY_R(9) = subR[9];
tl = subL[7] ^ (subR[7] & ~subR[9]); tl = subL[7] ^ (subR[7] & ~subR[9]);
dw = tl & subL[9], /* FLinv(kl2) */ dw = tl & subL[9], /* FLinv(kl2) */
tr = subR[7] ^ ROL1(dw); tr = subR[7] ^ rol32(dw, 1);
SUBKEY_L(10) = tl ^ subL[11]; /* round 7 */ SUBKEY_L(10) = tl ^ subL[11]; /* round 7 */
SUBKEY_R(10) = tr ^ subR[11]; SUBKEY_R(10) = tr ^ subR[11];
SUBKEY_L(11) = subL[10] ^ subL[12]; /* round 8 */ SUBKEY_L(11) = subL[10] ^ subL[12]; /* round 8 */
...@@ -516,7 +504,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) ...@@ -516,7 +504,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
SUBKEY_R(14) = subR[13] ^ subR[15]; SUBKEY_R(14) = subR[13] ^ subR[15];
tl = subL[18] ^ (subR[18] & ~subR[16]); tl = subL[18] ^ (subR[18] & ~subR[16]);
dw = tl & subL[16], /* FL(kl3) */ dw = tl & subL[16], /* FL(kl3) */
tr = subR[18] ^ ROL1(dw); tr = subR[18] ^ rol32(dw, 1);
SUBKEY_L(15) = subL[14] ^ tl; /* round 12 */ SUBKEY_L(15) = subL[14] ^ tl; /* round 12 */
SUBKEY_R(15) = subR[14] ^ tr; SUBKEY_R(15) = subR[14] ^ tr;
SUBKEY_L(16) = subL[16]; /* FL(kl3) */ SUBKEY_L(16) = subL[16]; /* FL(kl3) */
...@@ -525,7 +513,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) ...@@ -525,7 +513,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
SUBKEY_R(17) = subR[17]; SUBKEY_R(17) = subR[17];
tl = subL[15] ^ (subR[15] & ~subR[17]); tl = subL[15] ^ (subR[15] & ~subR[17]);
dw = tl & subL[17], /* FLinv(kl4) */ dw = tl & subL[17], /* FLinv(kl4) */
tr = subR[15] ^ ROL1(dw); tr = subR[15] ^ rol32(dw, 1);
SUBKEY_L(18) = tl ^ subL[19]; /* round 13 */ SUBKEY_L(18) = tl ^ subL[19]; /* round 13 */
SUBKEY_R(18) = tr ^ subR[19]; SUBKEY_R(18) = tr ^ subR[19];
SUBKEY_L(19) = subL[18] ^ subL[20]; /* round 14 */ SUBKEY_L(19) = subL[18] ^ subL[20]; /* round 14 */
...@@ -544,7 +532,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) ...@@ -544,7 +532,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
} else { } else {
tl = subL[26] ^ (subR[26] & ~subR[24]); tl = subL[26] ^ (subR[26] & ~subR[24]);
dw = tl & subL[24], /* FL(kl5) */ dw = tl & subL[24], /* FL(kl5) */
tr = subR[26] ^ ROL1(dw); tr = subR[26] ^ rol32(dw, 1);
SUBKEY_L(23) = subL[22] ^ tl; /* round 18 */ SUBKEY_L(23) = subL[22] ^ tl; /* round 18 */
SUBKEY_R(23) = subR[22] ^ tr; SUBKEY_R(23) = subR[22] ^ tr;
SUBKEY_L(24) = subL[24]; /* FL(kl5) */ SUBKEY_L(24) = subL[24]; /* FL(kl5) */
...@@ -553,7 +541,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) ...@@ -553,7 +541,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
SUBKEY_R(25) = subR[25]; SUBKEY_R(25) = subR[25];
tl = subL[23] ^ (subR[23] & ~subR[25]); tl = subL[23] ^ (subR[23] & ~subR[25]);
dw = tl & subL[25], /* FLinv(kl6) */ dw = tl & subL[25], /* FLinv(kl6) */
tr = subR[23] ^ ROL1(dw); tr = subR[23] ^ rol32(dw, 1);
SUBKEY_L(26) = tl ^ subL[27]; /* round 19 */ SUBKEY_L(26) = tl ^ subL[27]; /* round 19 */
SUBKEY_R(26) = tr ^ subR[27]; SUBKEY_R(26) = tr ^ subR[27];
SUBKEY_L(27) = subL[26] ^ subL[28]; /* round 20 */ SUBKEY_L(27) = subL[26] ^ subL[28]; /* round 20 */
...@@ -573,17 +561,17 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) ...@@ -573,17 +561,17 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
/* apply the inverse of the last half of P-function */ /* apply the inverse of the last half of P-function */
i = 2; i = 2;
do { do {
dw = SUBKEY_L(i + 0) ^ SUBKEY_R(i + 0); dw = ROL8(dw);/* round 1 */ dw = SUBKEY_L(i + 0) ^ SUBKEY_R(i + 0); dw = rol32(dw, 8);/* round 1 */
SUBKEY_R(i + 0) = SUBKEY_L(i + 0) ^ dw; SUBKEY_L(i + 0) = dw; SUBKEY_R(i + 0) = SUBKEY_L(i + 0) ^ dw; SUBKEY_L(i + 0) = dw;
dw = SUBKEY_L(i + 1) ^ SUBKEY_R(i + 1); dw = ROL8(dw);/* round 2 */ dw = SUBKEY_L(i + 1) ^ SUBKEY_R(i + 1); dw = rol32(dw, 8);/* round 2 */
SUBKEY_R(i + 1) = SUBKEY_L(i + 1) ^ dw; SUBKEY_L(i + 1) = dw; SUBKEY_R(i + 1) = SUBKEY_L(i + 1) ^ dw; SUBKEY_L(i + 1) = dw;
dw = SUBKEY_L(i + 2) ^ SUBKEY_R(i + 2); dw = ROL8(dw);/* round 3 */ dw = SUBKEY_L(i + 2) ^ SUBKEY_R(i + 2); dw = rol32(dw, 8);/* round 3 */
SUBKEY_R(i + 2) = SUBKEY_L(i + 2) ^ dw; SUBKEY_L(i + 2) = dw; SUBKEY_R(i + 2) = SUBKEY_L(i + 2) ^ dw; SUBKEY_L(i + 2) = dw;
dw = SUBKEY_L(i + 3) ^ SUBKEY_R(i + 3); dw = ROL8(dw);/* round 4 */ dw = SUBKEY_L(i + 3) ^ SUBKEY_R(i + 3); dw = rol32(dw, 8);/* round 4 */
SUBKEY_R(i + 3) = SUBKEY_L(i + 3) ^ dw; SUBKEY_L(i + 3) = dw; SUBKEY_R(i + 3) = SUBKEY_L(i + 3) ^ dw; SUBKEY_L(i + 3) = dw;
dw = SUBKEY_L(i + 4) ^ SUBKEY_R(i + 4); dw = ROL8(dw);/* round 5 */ dw = SUBKEY_L(i + 4) ^ SUBKEY_R(i + 4); dw = rol32(dw, 9);/* round 5 */
SUBKEY_R(i + 4) = SUBKEY_L(i + 4) ^ dw; SUBKEY_L(i + 4) = dw; SUBKEY_R(i + 4) = SUBKEY_L(i + 4) ^ dw; SUBKEY_L(i + 4) = dw;
dw = SUBKEY_L(i + 5) ^ SUBKEY_R(i + 5); dw = ROL8(dw);/* round 6 */ dw = SUBKEY_L(i + 5) ^ SUBKEY_R(i + 5); dw = rol32(dw, 8);/* round 6 */
SUBKEY_R(i + 5) = SUBKEY_L(i + 5) ^ dw; SUBKEY_L(i + 5) = dw; SUBKEY_R(i + 5) = SUBKEY_L(i + 5) ^ dw; SUBKEY_L(i + 5) = dw;
i += 8; i += 8;
} while (i < max); } while (i < max);
...@@ -599,10 +587,10 @@ static void camellia_setup128(const unsigned char *key, u32 *subkey) ...@@ -599,10 +587,10 @@ static void camellia_setup128(const unsigned char *key, u32 *subkey)
/** /**
* k == kll || klr || krl || krr (|| is concatenation) * k == kll || klr || krl || krr (|| is concatenation)
*/ */
GETU32(kll, key ); kll = get_unaligned_be32(key);
GETU32(klr, key + 4); klr = get_unaligned_be32(key + 4);
GETU32(krl, key + 8); krl = get_unaligned_be32(key + 8);
GETU32(krr, key + 12); krr = get_unaligned_be32(key + 12);
/* generate KL dependent subkeys */ /* generate KL dependent subkeys */
/* kw1 */ /* kw1 */
...@@ -707,14 +695,14 @@ static void camellia_setup256(const unsigned char *key, u32 *subkey) ...@@ -707,14 +695,14 @@ static void camellia_setup256(const unsigned char *key, u32 *subkey)
* key = (kll || klr || krl || krr || krll || krlr || krrl || krrr) * key = (kll || klr || krl || krr || krll || krlr || krrl || krrr)
* (|| is concatenation) * (|| is concatenation)
*/ */
GETU32(kll, key ); kll = get_unaligned_be32(key);
GETU32(klr, key + 4); klr = get_unaligned_be32(key + 4);
GETU32(krl, key + 8); krl = get_unaligned_be32(key + 8);
GETU32(krr, key + 12); krr = get_unaligned_be32(key + 12);
GETU32(krll, key + 16); krll = get_unaligned_be32(key + 16);
GETU32(krlr, key + 20); krlr = get_unaligned_be32(key + 20);
GETU32(krrl, key + 24); krrl = get_unaligned_be32(key + 24);
GETU32(krrr, key + 28); krrr = get_unaligned_be32(key + 28);
/* generate KL dependent subkeys */ /* generate KL dependent subkeys */
/* kw1 */ /* kw1 */
...@@ -870,13 +858,13 @@ static void camellia_setup192(const unsigned char *key, u32 *subkey) ...@@ -870,13 +858,13 @@ static void camellia_setup192(const unsigned char *key, u32 *subkey)
t0 &= ll; \ t0 &= ll; \
t2 |= rr; \ t2 |= rr; \
rl ^= t2; \ rl ^= t2; \
lr ^= ROL1(t0); \ lr ^= rol32(t0, 1); \
t3 = krl; \ t3 = krl; \
t1 = klr; \ t1 = klr; \
t3 &= rl; \ t3 &= rl; \
t1 |= lr; \ t1 |= lr; \
ll ^= t1; \ ll ^= t1; \
rr ^= ROL1(t3); \ rr ^= rol32(t3, 1); \
} while(0) } while(0)
#define CAMELLIA_ROUNDSM(xl, xr, kl, kr, yl, yr, il, ir) \ #define CAMELLIA_ROUNDSM(xl, xr, kl, kr, yl, yr, il, ir) \
...@@ -892,7 +880,7 @@ static void camellia_setup192(const unsigned char *key, u32 *subkey) ...@@ -892,7 +880,7 @@ static void camellia_setup192(const unsigned char *key, u32 *subkey)
il ^= kl; \ il ^= kl; \
ir ^= il ^ kr; \ ir ^= il ^ kr; \
yl ^= ir; \ yl ^= ir; \
yr ^= ROR8(il) ^ ir; \ yr ^= ror32(il, 8) ^ ir; \
} while(0) } while(0)
/* max = 24: 128bit encrypt, max = 32: 256bit encrypt */ /* max = 24: 128bit encrypt, max = 32: 256bit encrypt */
......
...@@ -5,20 +5,23 @@ ...@@ -5,20 +5,23 @@
* *
* This module file is a wrapper to invoke the lib/crc32c routines. * This module file is a wrapper to invoke the lib/crc32c routines.
* *
* Copyright (c) 2008 Herbert Xu <herbert@gondor.apana.org.au>
*
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free * under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option) * Software Foundation; either version 2 of the License, or (at your option)
* any later version. * any later version.
* *
*/ */
#include <crypto/internal/hash.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/crypto.h>
#include <linux/crc32c.h> #include <linux/crc32c.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#define CHKSUM_BLOCK_SIZE 32 #define CHKSUM_BLOCK_SIZE 1
#define CHKSUM_DIGEST_SIZE 4 #define CHKSUM_DIGEST_SIZE 4
struct chksum_ctx { struct chksum_ctx {
...@@ -71,7 +74,7 @@ static void chksum_final(struct crypto_tfm *tfm, u8 *out) ...@@ -71,7 +74,7 @@ static void chksum_final(struct crypto_tfm *tfm, u8 *out)
*(__le32 *)out = ~cpu_to_le32(mctx->crc); *(__le32 *)out = ~cpu_to_le32(mctx->crc);
} }
static int crc32c_cra_init(struct crypto_tfm *tfm) static int crc32c_cra_init_old(struct crypto_tfm *tfm)
{ {
struct chksum_ctx *mctx = crypto_tfm_ctx(tfm); struct chksum_ctx *mctx = crypto_tfm_ctx(tfm);
...@@ -79,14 +82,14 @@ static int crc32c_cra_init(struct crypto_tfm *tfm) ...@@ -79,14 +82,14 @@ static int crc32c_cra_init(struct crypto_tfm *tfm)
return 0; return 0;
} }
static struct crypto_alg alg = { static struct crypto_alg old_alg = {
.cra_name = "crc32c", .cra_name = "crc32c",
.cra_flags = CRYPTO_ALG_TYPE_DIGEST, .cra_flags = CRYPTO_ALG_TYPE_DIGEST,
.cra_blocksize = CHKSUM_BLOCK_SIZE, .cra_blocksize = CHKSUM_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct chksum_ctx), .cra_ctxsize = sizeof(struct chksum_ctx),
.cra_module = THIS_MODULE, .cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(alg.cra_list), .cra_list = LIST_HEAD_INIT(old_alg.cra_list),
.cra_init = crc32c_cra_init, .cra_init = crc32c_cra_init_old,
.cra_u = { .cra_u = {
.digest = { .digest = {
.dia_digestsize= CHKSUM_DIGEST_SIZE, .dia_digestsize= CHKSUM_DIGEST_SIZE,
...@@ -98,14 +101,125 @@ static struct crypto_alg alg = { ...@@ -98,14 +101,125 @@ static struct crypto_alg alg = {
} }
}; };
/*
* Setting the seed allows arbitrary accumulators and flexible XOR policy
* If your algorithm starts with ~0, then XOR with ~0 before you set
* the seed.
*/
static int crc32c_setkey(struct crypto_ahash *hash, const u8 *key,
unsigned int keylen)
{
u32 *mctx = crypto_ahash_ctx(hash);
if (keylen != sizeof(u32)) {
crypto_ahash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
*mctx = le32_to_cpup((__le32 *)key);
return 0;
}
static int crc32c_init(struct ahash_request *req)
{
u32 *mctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
u32 *crcp = ahash_request_ctx(req);
*crcp = *mctx;
return 0;
}
static int crc32c_update(struct ahash_request *req)
{
struct crypto_hash_walk walk;
u32 *crcp = ahash_request_ctx(req);
u32 crc = *crcp;
int nbytes;
for (nbytes = crypto_hash_walk_first(req, &walk); nbytes;
nbytes = crypto_hash_walk_done(&walk, 0))
crc = crc32c(crc, walk.data, nbytes);
*crcp = crc;
return 0;
}
static int crc32c_final(struct ahash_request *req)
{
u32 *crcp = ahash_request_ctx(req);
*(__le32 *)req->result = ~cpu_to_le32p(crcp);
return 0;
}
static int crc32c_digest(struct ahash_request *req)
{
struct crypto_hash_walk walk;
u32 *mctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
u32 crc = *mctx;
int nbytes;
for (nbytes = crypto_hash_walk_first(req, &walk); nbytes;
nbytes = crypto_hash_walk_done(&walk, 0))
crc = crc32c(crc, walk.data, nbytes);
*(__le32 *)req->result = ~cpu_to_le32(crc);
return 0;
}
static int crc32c_cra_init(struct crypto_tfm *tfm)
{
u32 *key = crypto_tfm_ctx(tfm);
*key = ~0;
tfm->crt_ahash.reqsize = sizeof(u32);
return 0;
}
static struct crypto_alg alg = {
.cra_name = "crc32c",
.cra_driver_name = "crc32c-generic",
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_AHASH,
.cra_blocksize = CHKSUM_BLOCK_SIZE,
.cra_alignmask = 3,
.cra_ctxsize = sizeof(u32),
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(alg.cra_list),
.cra_init = crc32c_cra_init,
.cra_type = &crypto_ahash_type,
.cra_u = {
.ahash = {
.digestsize = CHKSUM_DIGEST_SIZE,
.setkey = crc32c_setkey,
.init = crc32c_init,
.update = crc32c_update,
.final = crc32c_final,
.digest = crc32c_digest,
}
}
};
static int __init crc32c_mod_init(void) static int __init crc32c_mod_init(void)
{ {
return crypto_register_alg(&alg); int err;
err = crypto_register_alg(&old_alg);
if (err)
return err;
err = crypto_register_alg(&alg);
if (err)
crypto_unregister_alg(&old_alg);
return err;
} }
static void __exit crc32c_mod_fini(void) static void __exit crc32c_mod_fini(void)
{ {
crypto_unregister_alg(&alg); crypto_unregister_alg(&alg);
crypto_unregister_alg(&old_alg);
} }
module_init(crc32c_mod_init); module_init(crc32c_mod_init);
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
*/ */
#include <crypto/algapi.h> #include <crypto/algapi.h>
#include <crypto/internal/hash.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/kernel.h> #include <linux/kernel.h>
...@@ -45,6 +46,13 @@ struct cryptd_blkcipher_request_ctx { ...@@ -45,6 +46,13 @@ struct cryptd_blkcipher_request_ctx {
crypto_completion_t complete; crypto_completion_t complete;
}; };
struct cryptd_hash_ctx {
struct crypto_hash *child;
};
struct cryptd_hash_request_ctx {
crypto_completion_t complete;
};
static inline struct cryptd_state *cryptd_get_state(struct crypto_tfm *tfm) static inline struct cryptd_state *cryptd_get_state(struct crypto_tfm *tfm)
{ {
...@@ -82,10 +90,8 @@ static void cryptd_blkcipher_crypt(struct ablkcipher_request *req, ...@@ -82,10 +90,8 @@ static void cryptd_blkcipher_crypt(struct ablkcipher_request *req,
rctx = ablkcipher_request_ctx(req); rctx = ablkcipher_request_ctx(req);
if (unlikely(err == -EINPROGRESS)) { if (unlikely(err == -EINPROGRESS))
rctx->complete(&req->base, err); goto out;
return;
}
desc.tfm = child; desc.tfm = child;
desc.info = req->info; desc.info = req->info;
...@@ -95,8 +101,9 @@ static void cryptd_blkcipher_crypt(struct ablkcipher_request *req, ...@@ -95,8 +101,9 @@ static void cryptd_blkcipher_crypt(struct ablkcipher_request *req,
req->base.complete = rctx->complete; req->base.complete = rctx->complete;
out:
local_bh_disable(); local_bh_disable();
req->base.complete(&req->base, err); rctx->complete(&req->base, err);
local_bh_enable(); local_bh_enable();
} }
...@@ -261,6 +268,240 @@ static struct crypto_instance *cryptd_alloc_blkcipher( ...@@ -261,6 +268,240 @@ static struct crypto_instance *cryptd_alloc_blkcipher(
return inst; return inst;
} }
static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
struct crypto_spawn *spawn = &ictx->spawn;
struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
struct crypto_hash *cipher;
cipher = crypto_spawn_hash(spawn);
if (IS_ERR(cipher))
return PTR_ERR(cipher);
ctx->child = cipher;
tfm->crt_ahash.reqsize =
sizeof(struct cryptd_hash_request_ctx);
return 0;
}
static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm)
{
struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
struct cryptd_state *state = cryptd_get_state(tfm);
int active;
mutex_lock(&state->mutex);
active = ahash_tfm_in_queue(&state->queue,
__crypto_ahash_cast(tfm));
mutex_unlock(&state->mutex);
BUG_ON(active);
crypto_free_hash(ctx->child);
}
static int cryptd_hash_setkey(struct crypto_ahash *parent,
const u8 *key, unsigned int keylen)
{
struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent);
struct crypto_hash *child = ctx->child;
int err;
crypto_hash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
crypto_hash_set_flags(child, crypto_ahash_get_flags(parent) &
CRYPTO_TFM_REQ_MASK);
err = crypto_hash_setkey(child, key, keylen);
crypto_ahash_set_flags(parent, crypto_hash_get_flags(child) &
CRYPTO_TFM_RES_MASK);
return err;
}
static int cryptd_hash_enqueue(struct ahash_request *req,
crypto_completion_t complete)
{
struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct cryptd_state *state =
cryptd_get_state(crypto_ahash_tfm(tfm));
int err;
rctx->complete = req->base.complete;
req->base.complete = complete;
spin_lock_bh(&state->lock);
err = ahash_enqueue_request(&state->queue, req);
spin_unlock_bh(&state->lock);
wake_up_process(state->task);
return err;
}
static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
{
struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
struct crypto_hash *child = ctx->child;
struct ahash_request *req = ahash_request_cast(req_async);
struct cryptd_hash_request_ctx *rctx;
struct hash_desc desc;
rctx = ahash_request_ctx(req);
if (unlikely(err == -EINPROGRESS))
goto out;
desc.tfm = child;
desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
err = crypto_hash_crt(child)->init(&desc);
req->base.complete = rctx->complete;
out:
local_bh_disable();
rctx->complete(&req->base, err);
local_bh_enable();
}
static int cryptd_hash_init_enqueue(struct ahash_request *req)
{
return cryptd_hash_enqueue(req, cryptd_hash_init);
}
static void cryptd_hash_update(struct crypto_async_request *req_async, int err)
{
struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
struct crypto_hash *child = ctx->child;
struct ahash_request *req = ahash_request_cast(req_async);
struct cryptd_hash_request_ctx *rctx;
struct hash_desc desc;
rctx = ahash_request_ctx(req);
if (unlikely(err == -EINPROGRESS))
goto out;
desc.tfm = child;
desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
err = crypto_hash_crt(child)->update(&desc,
req->src,
req->nbytes);
req->base.complete = rctx->complete;
out:
local_bh_disable();
rctx->complete(&req->base, err);
local_bh_enable();
}
static int cryptd_hash_update_enqueue(struct ahash_request *req)
{
return cryptd_hash_enqueue(req, cryptd_hash_update);
}
static void cryptd_hash_final(struct crypto_async_request *req_async, int err)
{
struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
struct crypto_hash *child = ctx->child;
struct ahash_request *req = ahash_request_cast(req_async);
struct cryptd_hash_request_ctx *rctx;
struct hash_desc desc;
rctx = ahash_request_ctx(req);
if (unlikely(err == -EINPROGRESS))
goto out;
desc.tfm = child;
desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
err = crypto_hash_crt(child)->final(&desc, req->result);
req->base.complete = rctx->complete;
out:
local_bh_disable();
rctx->complete(&req->base, err);
local_bh_enable();
}
static int cryptd_hash_final_enqueue(struct ahash_request *req)
{
return cryptd_hash_enqueue(req, cryptd_hash_final);
}
static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
{
struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
struct crypto_hash *child = ctx->child;
struct ahash_request *req = ahash_request_cast(req_async);
struct cryptd_hash_request_ctx *rctx;
struct hash_desc desc;
rctx = ahash_request_ctx(req);
if (unlikely(err == -EINPROGRESS))
goto out;
desc.tfm = child;
desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
err = crypto_hash_crt(child)->digest(&desc,
req->src,
req->nbytes,
req->result);
req->base.complete = rctx->complete;
out:
local_bh_disable();
rctx->complete(&req->base, err);
local_bh_enable();
}
static int cryptd_hash_digest_enqueue(struct ahash_request *req)
{
return cryptd_hash_enqueue(req, cryptd_hash_digest);
}
static struct crypto_instance *cryptd_alloc_hash(
struct rtattr **tb, struct cryptd_state *state)
{
struct crypto_instance *inst;
struct crypto_alg *alg;
alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_HASH,
CRYPTO_ALG_TYPE_HASH_MASK);
if (IS_ERR(alg))
return ERR_PTR(PTR_ERR(alg));
inst = cryptd_alloc_instance(alg, state);
if (IS_ERR(inst))
goto out_put_alg;
inst->alg.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC;
inst->alg.cra_type = &crypto_ahash_type;
inst->alg.cra_ahash.digestsize = alg->cra_hash.digestsize;
inst->alg.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
inst->alg.cra_init = cryptd_hash_init_tfm;
inst->alg.cra_exit = cryptd_hash_exit_tfm;
inst->alg.cra_ahash.init = cryptd_hash_init_enqueue;
inst->alg.cra_ahash.update = cryptd_hash_update_enqueue;
inst->alg.cra_ahash.final = cryptd_hash_final_enqueue;
inst->alg.cra_ahash.setkey = cryptd_hash_setkey;
inst->alg.cra_ahash.digest = cryptd_hash_digest_enqueue;
out_put_alg:
crypto_mod_put(alg);
return inst;
}
static struct cryptd_state state; static struct cryptd_state state;
static struct crypto_instance *cryptd_alloc(struct rtattr **tb) static struct crypto_instance *cryptd_alloc(struct rtattr **tb)
...@@ -274,6 +515,8 @@ static struct crypto_instance *cryptd_alloc(struct rtattr **tb) ...@@ -274,6 +515,8 @@ static struct crypto_instance *cryptd_alloc(struct rtattr **tb)
switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
case CRYPTO_ALG_TYPE_BLKCIPHER: case CRYPTO_ALG_TYPE_BLKCIPHER:
return cryptd_alloc_blkcipher(tb, &state); return cryptd_alloc_blkcipher(tb, &state);
case CRYPTO_ALG_TYPE_DIGEST:
return cryptd_alloc_hash(tb, &state);
} }
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
* *
*/ */
#include <crypto/internal/hash.h>
#include <crypto/scatterwalk.h> #include <crypto/scatterwalk.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/errno.h> #include <linux/errno.h>
...@@ -141,7 +142,7 @@ int crypto_init_digest_ops(struct crypto_tfm *tfm) ...@@ -141,7 +142,7 @@ int crypto_init_digest_ops(struct crypto_tfm *tfm)
struct hash_tfm *ops = &tfm->crt_hash; struct hash_tfm *ops = &tfm->crt_hash;
struct digest_alg *dalg = &tfm->__crt_alg->cra_digest; struct digest_alg *dalg = &tfm->__crt_alg->cra_digest;
if (dalg->dia_digestsize > crypto_tfm_alg_blocksize(tfm)) if (dalg->dia_digestsize > PAGE_SIZE / 8)
return -EINVAL; return -EINVAL;
ops->init = init; ops->init = init;
...@@ -157,3 +158,83 @@ int crypto_init_digest_ops(struct crypto_tfm *tfm) ...@@ -157,3 +158,83 @@ int crypto_init_digest_ops(struct crypto_tfm *tfm)
void crypto_exit_digest_ops(struct crypto_tfm *tfm) void crypto_exit_digest_ops(struct crypto_tfm *tfm)
{ {
} }
static int digest_async_nosetkey(struct crypto_ahash *tfm_async, const u8 *key,
unsigned int keylen)
{
crypto_ahash_clear_flags(tfm_async, CRYPTO_TFM_RES_MASK);
return -ENOSYS;
}
static int digest_async_setkey(struct crypto_ahash *tfm_async, const u8 *key,
unsigned int keylen)
{
struct crypto_tfm *tfm = crypto_ahash_tfm(tfm_async);
struct digest_alg *dalg = &tfm->__crt_alg->cra_digest;
crypto_ahash_clear_flags(tfm_async, CRYPTO_TFM_RES_MASK);
return dalg->dia_setkey(tfm, key, keylen);
}
static int digest_async_init(struct ahash_request *req)
{
struct crypto_tfm *tfm = req->base.tfm;
struct digest_alg *dalg = &tfm->__crt_alg->cra_digest;
dalg->dia_init(tfm);
return 0;
}
static int digest_async_update(struct ahash_request *req)
{
struct crypto_tfm *tfm = req->base.tfm;
struct hash_desc desc = {
.tfm = __crypto_hash_cast(tfm),
.flags = req->base.flags,
};
update(&desc, req->src, req->nbytes);
return 0;
}
static int digest_async_final(struct ahash_request *req)
{
struct crypto_tfm *tfm = req->base.tfm;
struct hash_desc desc = {
.tfm = __crypto_hash_cast(tfm),
.flags = req->base.flags,
};
final(&desc, req->result);
return 0;
}
static int digest_async_digest(struct ahash_request *req)
{
struct crypto_tfm *tfm = req->base.tfm;
struct hash_desc desc = {
.tfm = __crypto_hash_cast(tfm),
.flags = req->base.flags,
};
return digest(&desc, req->src, req->nbytes, req->result);
}
int crypto_init_digest_ops_async(struct crypto_tfm *tfm)
{
struct ahash_tfm *crt = &tfm->crt_ahash;
struct digest_alg *dalg = &tfm->__crt_alg->cra_digest;
if (dalg->dia_digestsize > crypto_tfm_alg_blocksize(tfm))
return -EINVAL;
crt->init = digest_async_init;
crt->update = digest_async_update;
crt->final = digest_async_final;
crt->digest = digest_async_digest;
crt->setkey = dalg->dia_setkey ? digest_async_setkey :
digest_async_nosetkey;
crt->digestsize = dalg->dia_digestsize;
return 0;
}
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
* any later version. * any later version.
*/ */
#include <crypto/internal/hash.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/module.h> #include <linux/module.h>
...@@ -59,24 +60,107 @@ static int hash_setkey(struct crypto_hash *crt, const u8 *key, ...@@ -59,24 +60,107 @@ static int hash_setkey(struct crypto_hash *crt, const u8 *key,
return alg->setkey(crt, key, keylen); return alg->setkey(crt, key, keylen);
} }
static int crypto_init_hash_ops(struct crypto_tfm *tfm, u32 type, u32 mask) static int hash_async_setkey(struct crypto_ahash *tfm_async, const u8 *key,
unsigned int keylen)
{
struct crypto_tfm *tfm = crypto_ahash_tfm(tfm_async);
struct crypto_hash *tfm_hash = __crypto_hash_cast(tfm);
struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
return alg->setkey(tfm_hash, key, keylen);
}
static int hash_async_init(struct ahash_request *req)
{
struct crypto_tfm *tfm = req->base.tfm;
struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
struct hash_desc desc = {
.tfm = __crypto_hash_cast(tfm),
.flags = req->base.flags,
};
return alg->init(&desc);
}
static int hash_async_update(struct ahash_request *req)
{
struct crypto_tfm *tfm = req->base.tfm;
struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
struct hash_desc desc = {
.tfm = __crypto_hash_cast(tfm),
.flags = req->base.flags,
};
return alg->update(&desc, req->src, req->nbytes);
}
static int hash_async_final(struct ahash_request *req)
{
struct crypto_tfm *tfm = req->base.tfm;
struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
struct hash_desc desc = {
.tfm = __crypto_hash_cast(tfm),
.flags = req->base.flags,
};
return alg->final(&desc, req->result);
}
static int hash_async_digest(struct ahash_request *req)
{
struct crypto_tfm *tfm = req->base.tfm;
struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
struct hash_desc desc = {
.tfm = __crypto_hash_cast(tfm),
.flags = req->base.flags,
};
return alg->digest(&desc, req->src, req->nbytes, req->result);
}
static int crypto_init_hash_ops_async(struct crypto_tfm *tfm)
{
struct ahash_tfm *crt = &tfm->crt_ahash;
struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
crt->init = hash_async_init;
crt->update = hash_async_update;
crt->final = hash_async_final;
crt->digest = hash_async_digest;
crt->setkey = hash_async_setkey;
crt->digestsize = alg->digestsize;
return 0;
}
static int crypto_init_hash_ops_sync(struct crypto_tfm *tfm)
{ {
struct hash_tfm *crt = &tfm->crt_hash; struct hash_tfm *crt = &tfm->crt_hash;
struct hash_alg *alg = &tfm->__crt_alg->cra_hash; struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
if (alg->digestsize > crypto_tfm_alg_blocksize(tfm)) crt->init = alg->init;
return -EINVAL; crt->update = alg->update;
crt->final = alg->final;
crt->init = alg->init; crt->digest = alg->digest;
crt->update = alg->update; crt->setkey = hash_setkey;
crt->final = alg->final;
crt->digest = alg->digest;
crt->setkey = hash_setkey;
crt->digestsize = alg->digestsize; crt->digestsize = alg->digestsize;
return 0; return 0;
} }
static int crypto_init_hash_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
{
struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
if (alg->digestsize > PAGE_SIZE / 8)
return -EINVAL;
if ((mask & CRYPTO_ALG_TYPE_HASH_MASK) != CRYPTO_ALG_TYPE_HASH_MASK)
return crypto_init_hash_ops_async(tfm);
else
return crypto_init_hash_ops_sync(tfm);
}
static void crypto_hash_show(struct seq_file *m, struct crypto_alg *alg) static void crypto_hash_show(struct seq_file *m, struct crypto_alg *alg)
__attribute__ ((unused)); __attribute__ ((unused));
static void crypto_hash_show(struct seq_file *m, struct crypto_alg *alg) static void crypto_hash_show(struct seq_file *m, struct crypto_alg *alg)
......
...@@ -226,6 +226,7 @@ static struct crypto_instance *hmac_alloc(struct rtattr **tb) ...@@ -226,6 +226,7 @@ static struct crypto_instance *hmac_alloc(struct rtattr **tb)
struct crypto_instance *inst; struct crypto_instance *inst;
struct crypto_alg *alg; struct crypto_alg *alg;
int err; int err;
int ds;
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_HASH); err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_HASH);
if (err) if (err)
...@@ -236,6 +237,13 @@ static struct crypto_instance *hmac_alloc(struct rtattr **tb) ...@@ -236,6 +237,13 @@ static struct crypto_instance *hmac_alloc(struct rtattr **tb)
if (IS_ERR(alg)) if (IS_ERR(alg))
return ERR_CAST(alg); return ERR_CAST(alg);
inst = ERR_PTR(-EINVAL);
ds = (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
CRYPTO_ALG_TYPE_HASH ? alg->cra_hash.digestsize :
alg->cra_digest.dia_digestsize;
if (ds > alg->cra_blocksize)
goto out_put_alg;
inst = crypto_alloc_instance("hmac", alg); inst = crypto_alloc_instance("hmac", alg);
if (IS_ERR(inst)) if (IS_ERR(inst))
goto out_put_alg; goto out_put_alg;
...@@ -246,14 +254,10 @@ static struct crypto_instance *hmac_alloc(struct rtattr **tb) ...@@ -246,14 +254,10 @@ static struct crypto_instance *hmac_alloc(struct rtattr **tb)
inst->alg.cra_alignmask = alg->cra_alignmask; inst->alg.cra_alignmask = alg->cra_alignmask;
inst->alg.cra_type = &crypto_hash_type; inst->alg.cra_type = &crypto_hash_type;
inst->alg.cra_hash.digestsize = inst->alg.cra_hash.digestsize = ds;
(alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
CRYPTO_ALG_TYPE_HASH ? alg->cra_hash.digestsize :
alg->cra_digest.dia_digestsize;
inst->alg.cra_ctxsize = sizeof(struct hmac_ctx) + inst->alg.cra_ctxsize = sizeof(struct hmac_ctx) +
ALIGN(inst->alg.cra_blocksize * 2 + ALIGN(inst->alg.cra_blocksize * 2 + ds,
inst->alg.cra_hash.digestsize,
sizeof(void *)); sizeof(void *));
inst->alg.cra_init = hmac_init_tfm; inst->alg.cra_init = hmac_init_tfm;
......
...@@ -86,6 +86,7 @@ struct crypto_alg *__crypto_alg_lookup(const char *name, u32 type, u32 mask); ...@@ -86,6 +86,7 @@ struct crypto_alg *__crypto_alg_lookup(const char *name, u32 type, u32 mask);
struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask); struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask);
int crypto_init_digest_ops(struct crypto_tfm *tfm); int crypto_init_digest_ops(struct crypto_tfm *tfm);
int crypto_init_digest_ops_async(struct crypto_tfm *tfm);
int crypto_init_cipher_ops(struct crypto_tfm *tfm); int crypto_init_cipher_ops(struct crypto_tfm *tfm);
int crypto_init_compress_ops(struct crypto_tfm *tfm); int crypto_init_compress_ops(struct crypto_tfm *tfm);
......
/*
* PRNG: Pseudo Random Number Generator
* Based on NIST Recommended PRNG From ANSI X9.31 Appendix A.2.4 using
* AES 128 cipher in RFC3686 ctr mode
*
* (C) Neil Horman <nhorman@tuxdriver.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* any later version.
*
*
*/
#include <linux/err.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/scatterlist.h>
#include <linux/string.h>
#include <linux/crypto.h>
#include <linux/highmem.h>
#include <linux/moduleparam.h>
#include <linux/jiffies.h>
#include <linux/timex.h>
#include <linux/interrupt.h>
#include <linux/miscdevice.h>
#include "prng.h"
#define TEST_PRNG_ON_START 0
#define DEFAULT_PRNG_KEY "0123456789abcdef1011"
#define DEFAULT_PRNG_KSZ 20
#define DEFAULT_PRNG_IV "defaultv"
#define DEFAULT_PRNG_IVSZ 8
#define DEFAULT_BLK_SZ 16
#define DEFAULT_V_SEED "zaybxcwdveuftgsh"
/*
* Flags for the prng_context flags field
*/
#define PRNG_FIXED_SIZE 0x1
#define PRNG_NEED_RESET 0x2
/*
* Note: DT is our counter value
* I is our intermediate value
* V is our seed vector
* See http://csrc.nist.gov/groups/STM/cavp/documents/rng/931rngext.pdf
* for implementation details
*/
struct prng_context {
char *prng_key;
char *prng_iv;
spinlock_t prng_lock;
unsigned char rand_data[DEFAULT_BLK_SZ];
unsigned char last_rand_data[DEFAULT_BLK_SZ];
unsigned char DT[DEFAULT_BLK_SZ];
unsigned char I[DEFAULT_BLK_SZ];
unsigned char V[DEFAULT_BLK_SZ];
u32 rand_data_valid;
struct crypto_blkcipher *tfm;
u32 flags;
};
static int dbg;
static void hexdump(char *note, unsigned char *buf, unsigned int len)
{
if (dbg) {
printk(KERN_CRIT "%s", note);
print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET,
16, 1,
buf, len, false);
}
}
#define dbgprint(format, args...) do {if(dbg) printk(format, ##args);} while(0)
static void xor_vectors(unsigned char *in1, unsigned char *in2,
unsigned char *out, unsigned int size)
{
int i;
for (i=0;i<size;i++)
out[i] = in1[i] ^ in2[i];
}
/*
* Returns DEFAULT_BLK_SZ bytes of random data per call
* returns 0 if generation succeded, <0 if something went wrong
*/
static int _get_more_prng_bytes(struct prng_context *ctx)
{
int i;
struct blkcipher_desc desc;
struct scatterlist sg_in, sg_out;
int ret;
unsigned char tmp[DEFAULT_BLK_SZ];
desc.tfm = ctx->tfm;
desc.flags = 0;
dbgprint(KERN_CRIT "Calling _get_more_prng_bytes for context %p\n",ctx);
hexdump("Input DT: ", ctx->DT, DEFAULT_BLK_SZ);
hexdump("Input I: ", ctx->I, DEFAULT_BLK_SZ);
hexdump("Input V: ", ctx->V, DEFAULT_BLK_SZ);
/*
* This algorithm is a 3 stage state machine
*/
for (i=0;i<3;i++) {
desc.tfm = ctx->tfm;
desc.flags = 0;
switch (i) {
case 0:
/*
* Start by encrypting the counter value
* This gives us an intermediate value I
*/
memcpy(tmp, ctx->DT, DEFAULT_BLK_SZ);
sg_init_one(&sg_out, &ctx->I[0], DEFAULT_BLK_SZ);
hexdump("tmp stage 0: ", tmp, DEFAULT_BLK_SZ);
break;
case 1:
/*
* Next xor I with our secret vector V
* encrypt that result to obtain our
* pseudo random data which we output
*/
xor_vectors(ctx->I, ctx->V, tmp, DEFAULT_BLK_SZ);
sg_init_one(&sg_out, &ctx->rand_data[0], DEFAULT_BLK_SZ);
hexdump("tmp stage 1: ", tmp, DEFAULT_BLK_SZ);
break;
case 2:
/*
* First check that we didn't produce the same random data
* that we did last time around through this
*/
if (!memcmp(ctx->rand_data, ctx->last_rand_data, DEFAULT_BLK_SZ)) {
printk(KERN_ERR "ctx %p Failed repetition check!\n",
ctx);
ctx->flags |= PRNG_NEED_RESET;
return -1;
}
memcpy(ctx->last_rand_data, ctx->rand_data, DEFAULT_BLK_SZ);
/*
* Lastly xor the random data with I
* and encrypt that to obtain a new secret vector V
*/
xor_vectors(ctx->rand_data, ctx->I, tmp, DEFAULT_BLK_SZ);
sg_init_one(&sg_out, &ctx->V[0], DEFAULT_BLK_SZ);
hexdump("tmp stage 2: ", tmp, DEFAULT_BLK_SZ);
break;
}
/* Initialize our input buffer */
sg_init_one(&sg_in, &tmp[0], DEFAULT_BLK_SZ);
/* do the encryption */
ret = crypto_blkcipher_encrypt(&desc, &sg_out, &sg_in, DEFAULT_BLK_SZ);
/* And check the result */
if (ret) {
dbgprint(KERN_CRIT "Encryption of new block failed for context %p\n",ctx);
ctx->rand_data_valid = DEFAULT_BLK_SZ;
return -1;
}
}
/*
* Now update our DT value
*/
for (i=DEFAULT_BLK_SZ-1;i>0;i--) {
ctx->DT[i] = ctx->DT[i-1];
}
ctx->DT[0] += 1;
dbgprint("Returning new block for context %p\n",ctx);
ctx->rand_data_valid = 0;
hexdump("Output DT: ", ctx->DT, DEFAULT_BLK_SZ);
hexdump("Output I: ", ctx->I, DEFAULT_BLK_SZ);
hexdump("Output V: ", ctx->V, DEFAULT_BLK_SZ);
hexdump("New Random Data: ", ctx->rand_data, DEFAULT_BLK_SZ);
return 0;
}
/* Our exported functions */
int get_prng_bytes(char *buf, int nbytes, struct prng_context *ctx)
{
unsigned long flags;
unsigned char *ptr = buf;
unsigned int byte_count = (unsigned int)nbytes;
int err;
if (nbytes < 0)
return -EINVAL;
spin_lock_irqsave(&ctx->prng_lock, flags);
err = -EFAULT;
if (ctx->flags & PRNG_NEED_RESET)
goto done;
/*
* If the FIXED_SIZE flag is on, only return whole blocks of
* pseudo random data
*/
err = -EINVAL;
if (ctx->flags & PRNG_FIXED_SIZE) {
if (nbytes < DEFAULT_BLK_SZ)
goto done;
byte_count = DEFAULT_BLK_SZ;
}
err = byte_count;
dbgprint(KERN_CRIT "getting %d random bytes for context %p\n",byte_count, ctx);
remainder:
if (ctx->rand_data_valid == DEFAULT_BLK_SZ) {
if (_get_more_prng_bytes(ctx) < 0) {
memset(buf, 0, nbytes);
err = -EFAULT;
goto done;
}
}
/*
* Copy up to the next whole block size
*/
if (byte_count < DEFAULT_BLK_SZ) {
for (;ctx->rand_data_valid < DEFAULT_BLK_SZ; ctx->rand_data_valid++) {
*ptr = ctx->rand_data[ctx->rand_data_valid];
ptr++;
byte_count--;
if (byte_count == 0)
goto done;
}
}
/*
* Now copy whole blocks
*/
for(;byte_count >= DEFAULT_BLK_SZ; byte_count -= DEFAULT_BLK_SZ) {
if (_get_more_prng_bytes(ctx) < 0) {
memset(buf, 0, nbytes);
err = -1;
goto done;
}
memcpy(ptr, ctx->rand_data, DEFAULT_BLK_SZ);
ctx->rand_data_valid += DEFAULT_BLK_SZ;
ptr += DEFAULT_BLK_SZ;
}
/*
* Now copy any extra partial data
*/
if (byte_count)
goto remainder;
done:
spin_unlock_irqrestore(&ctx->prng_lock, flags);
dbgprint(KERN_CRIT "returning %d from get_prng_bytes in context %p\n",err, ctx);
return err;
}
EXPORT_SYMBOL_GPL(get_prng_bytes);
struct prng_context *alloc_prng_context(void)
{
struct prng_context *ctx=kzalloc(sizeof(struct prng_context), GFP_KERNEL);
spin_lock_init(&ctx->prng_lock);
if (reset_prng_context(ctx, NULL, NULL, NULL, NULL)) {
kfree(ctx);
ctx = NULL;
}
dbgprint(KERN_CRIT "returning context %p\n",ctx);
return ctx;
}
EXPORT_SYMBOL_GPL(alloc_prng_context);
void free_prng_context(struct prng_context *ctx)
{
crypto_free_blkcipher(ctx->tfm);
kfree(ctx);
}
EXPORT_SYMBOL_GPL(free_prng_context);
int reset_prng_context(struct prng_context *ctx,
unsigned char *key, unsigned char *iv,
unsigned char *V, unsigned char *DT)
{
int ret;
int iv_len;
int rc = -EFAULT;
spin_lock(&ctx->prng_lock);
ctx->flags |= PRNG_NEED_RESET;
if (key)
memcpy(ctx->prng_key,key,strlen(ctx->prng_key));
else
ctx->prng_key = DEFAULT_PRNG_KEY;
if (iv)
memcpy(ctx->prng_iv,iv, strlen(ctx->prng_iv));
else
ctx->prng_iv = DEFAULT_PRNG_IV;
if (V)
memcpy(ctx->V,V,DEFAULT_BLK_SZ);
else
memcpy(ctx->V,DEFAULT_V_SEED,DEFAULT_BLK_SZ);
if (DT)
memcpy(ctx->DT, DT, DEFAULT_BLK_SZ);
else
memset(ctx->DT, 0, DEFAULT_BLK_SZ);
memset(ctx->rand_data,0,DEFAULT_BLK_SZ);
memset(ctx->last_rand_data,0,DEFAULT_BLK_SZ);
if (ctx->tfm)
crypto_free_blkcipher(ctx->tfm);
ctx->tfm = crypto_alloc_blkcipher("rfc3686(ctr(aes))",0,0);
if (!ctx->tfm) {
dbgprint(KERN_CRIT "Failed to alloc crypto tfm for context %p\n",ctx->tfm);
goto out;
}
ctx->rand_data_valid = DEFAULT_BLK_SZ;
ret = crypto_blkcipher_setkey(ctx->tfm, ctx->prng_key, strlen(ctx->prng_key));
if (ret) {
dbgprint(KERN_CRIT "PRNG: setkey() failed flags=%x\n",
crypto_blkcipher_get_flags(ctx->tfm));
crypto_free_blkcipher(ctx->tfm);
goto out;
}
iv_len = crypto_blkcipher_ivsize(ctx->tfm);
if (iv_len) {
crypto_blkcipher_set_iv(ctx->tfm, ctx->prng_iv, iv_len);
}
rc = 0;
ctx->flags &= ~PRNG_NEED_RESET;
out:
spin_unlock(&ctx->prng_lock);
return rc;
}
EXPORT_SYMBOL_GPL(reset_prng_context);
/* Module initalization */
static int __init prng_mod_init(void)
{
#ifdef TEST_PRNG_ON_START
int i;
unsigned char tmpbuf[DEFAULT_BLK_SZ];
struct prng_context *ctx = alloc_prng_context();
if (ctx == NULL)
return -EFAULT;
for (i=0;i<16;i++) {
if (get_prng_bytes(tmpbuf, DEFAULT_BLK_SZ, ctx) < 0) {
free_prng_context(ctx);
return -EFAULT;
}
}
free_prng_context(ctx);
#endif
return 0;
}
static void __exit prng_mod_fini(void)
{
return;
}
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Software Pseudo Random Number Generator");
MODULE_AUTHOR("Neil Horman <nhorman@tuxdriver.com>");
module_param(dbg, int, 0);
MODULE_PARM_DESC(dbg, "Boolean to enable debugging (0/1 == off/on)");
module_init(prng_mod_init);
module_exit(prng_mod_fini);
/*
* PRNG: Pseudo Random Number Generator
*
* (C) Neil Horman <nhorman@tuxdriver.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* any later version.
*
*
*/
#ifndef _PRNG_H_
#define _PRNG_H_
struct prng_context;
int get_prng_bytes(char *buf, int nbytes, struct prng_context *ctx);
struct prng_context *alloc_prng_context(void);
int reset_prng_context(struct prng_context *ctx,
unsigned char *key, unsigned char *iv,
unsigned char *V,
unsigned char *DT);
void free_prng_context(struct prng_context *ctx);
#endif
/*
* Common values for RIPEMD algorithms
*/
#ifndef _CRYPTO_RMD_H
#define _CRYPTO_RMD_H
#define RMD128_DIGEST_SIZE 16
#define RMD128_BLOCK_SIZE 64
#define RMD160_DIGEST_SIZE 20
#define RMD160_BLOCK_SIZE 64
#define RMD256_DIGEST_SIZE 32
#define RMD256_BLOCK_SIZE 64
#define RMD320_DIGEST_SIZE 40
#define RMD320_BLOCK_SIZE 64
/* initial values */
#define RMD_H0 0x67452301UL
#define RMD_H1 0xefcdab89UL
#define RMD_H2 0x98badcfeUL
#define RMD_H3 0x10325476UL
#define RMD_H4 0xc3d2e1f0UL
#define RMD_H5 0x76543210UL
#define RMD_H6 0xfedcba98UL
#define RMD_H7 0x89abcdefUL
#define RMD_H8 0x01234567UL
#define RMD_H9 0x3c2d1e0fUL
/* constants */
#define RMD_K1 0x00000000UL
#define RMD_K2 0x5a827999UL
#define RMD_K3 0x6ed9eba1UL
#define RMD_K4 0x8f1bbcdcUL
#define RMD_K5 0xa953fd4eUL
#define RMD_K6 0x50a28be6UL
#define RMD_K7 0x5c4dd124UL
#define RMD_K8 0x6d703ef3UL
#define RMD_K9 0x7a6d76e9UL
#endif
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -174,4 +174,30 @@ config CRYPTO_DEV_HIFN_795X_RNG ...@@ -174,4 +174,30 @@ config CRYPTO_DEV_HIFN_795X_RNG
Select this option if you want to enable the random number generator Select this option if you want to enable the random number generator
on the HIFN 795x crypto adapters. on the HIFN 795x crypto adapters.
config CRYPTO_DEV_TALITOS
tristate "Talitos Freescale Security Engine (SEC)"
select CRYPTO_ALGAPI
select CRYPTO_AUTHENC
select HW_RANDOM
depends on FSL_SOC
help
Say 'Y' here to use the Freescale Security Engine (SEC)
to offload cryptographic algorithm computation.
The Freescale SEC is present on PowerQUICC 'E' processors, such
as the MPC8349E and MPC8548E.
To compile this driver as a module, choose M here: the module
will be called talitos.
config CRYPTO_DEV_IXP4XX
tristate "Driver for IXP4xx crypto hardware acceleration"
depends on ARCH_IXP4XX
select CRYPTO_DES
select CRYPTO_ALGAPI
select CRYPTO_AUTHENC
select CRYPTO_BLKCIPHER
help
Driver for the IXP4xx NPE crypto engine.
endif # CRYPTO_HW endif # CRYPTO_HW
...@@ -2,3 +2,5 @@ obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o ...@@ -2,3 +2,5 @@ obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o
obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o
obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
This diff is collapsed.
This diff is collapsed.
...@@ -385,12 +385,12 @@ static int __init padlock_init(void) ...@@ -385,12 +385,12 @@ static int __init padlock_init(void)
int ret; int ret;
if (!cpu_has_xcrypt) { if (!cpu_has_xcrypt) {
printk(KERN_ERR PFX "VIA PadLock not detected.\n"); printk(KERN_NOTICE PFX "VIA PadLock not detected.\n");
return -ENODEV; return -ENODEV;
} }
if (!cpu_has_xcrypt_enabled) { if (!cpu_has_xcrypt_enabled) {
printk(KERN_ERR PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n"); printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
return -ENODEV; return -ENODEV;
} }
......
...@@ -254,12 +254,12 @@ static int __init padlock_init(void) ...@@ -254,12 +254,12 @@ static int __init padlock_init(void)
int rc = -ENODEV; int rc = -ENODEV;
if (!cpu_has_phe) { if (!cpu_has_phe) {
printk(KERN_ERR PFX "VIA PadLock Hash Engine not detected.\n"); printk(KERN_NOTICE PFX "VIA PadLock Hash Engine not detected.\n");
return -ENODEV; return -ENODEV;
} }
if (!cpu_has_phe_enabled) { if (!cpu_has_phe_enabled) {
printk(KERN_ERR PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n"); printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
return -ENODEV; return -ENODEV;
} }
......
This diff is collapsed.
This diff is collapsed.
/*
* Hash: Hash algorithms under the crypto API
*
* Copyright (c) 2008 Herbert Xu <herbert@gondor.apana.org.au>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#ifndef _CRYPTO_HASH_H
#define _CRYPTO_HASH_H
#include <linux/crypto.h>
struct crypto_ahash {
struct crypto_tfm base;
};
static inline struct crypto_ahash *__crypto_ahash_cast(struct crypto_tfm *tfm)
{
return (struct crypto_ahash *)tfm;
}
static inline struct crypto_ahash *crypto_alloc_ahash(const char *alg_name,
u32 type, u32 mask)
{
type &= ~CRYPTO_ALG_TYPE_MASK;
mask &= ~CRYPTO_ALG_TYPE_MASK;
type |= CRYPTO_ALG_TYPE_AHASH;
mask |= CRYPTO_ALG_TYPE_AHASH_MASK;
return __crypto_ahash_cast(crypto_alloc_base(alg_name, type, mask));
}
static inline struct crypto_tfm *crypto_ahash_tfm(struct crypto_ahash *tfm)
{
return &tfm->base;
}
static inline void crypto_free_ahash(struct crypto_ahash *tfm)
{
crypto_free_tfm(crypto_ahash_tfm(tfm));
}
static inline unsigned int crypto_ahash_alignmask(
struct crypto_ahash *tfm)
{
return crypto_tfm_alg_alignmask(crypto_ahash_tfm(tfm));
}
static inline struct ahash_tfm *crypto_ahash_crt(struct crypto_ahash *tfm)
{
return &crypto_ahash_tfm(tfm)->crt_ahash;
}
static inline unsigned int crypto_ahash_digestsize(struct crypto_ahash *tfm)
{
return crypto_ahash_crt(tfm)->digestsize;
}
static inline u32 crypto_ahash_get_flags(struct crypto_ahash *tfm)
{
return crypto_tfm_get_flags(crypto_ahash_tfm(tfm));
}
static inline void crypto_ahash_set_flags(struct crypto_ahash *tfm, u32 flags)
{
crypto_tfm_set_flags(crypto_ahash_tfm(tfm), flags);
}
static inline void crypto_ahash_clear_flags(struct crypto_ahash *tfm, u32 flags)
{
crypto_tfm_clear_flags(crypto_ahash_tfm(tfm), flags);
}
static inline struct crypto_ahash *crypto_ahash_reqtfm(
struct ahash_request *req)
{
return __crypto_ahash_cast(req->base.tfm);
}
static inline unsigned int crypto_ahash_reqsize(struct crypto_ahash *tfm)
{
return crypto_ahash_crt(tfm)->reqsize;
}
static inline int crypto_ahash_setkey(struct crypto_ahash *tfm,
const u8 *key, unsigned int keylen)
{
struct ahash_tfm *crt = crypto_ahash_crt(tfm);
return crt->setkey(tfm, key, keylen);
}
static inline int crypto_ahash_digest(struct ahash_request *req)
{
struct ahash_tfm *crt = crypto_ahash_crt(crypto_ahash_reqtfm(req));
return crt->digest(req);
}
static inline void ahash_request_set_tfm(struct ahash_request *req,
struct crypto_ahash *tfm)
{
req->base.tfm = crypto_ahash_tfm(tfm);
}
static inline struct ahash_request *ahash_request_alloc(
struct crypto_ahash *tfm, gfp_t gfp)
{
struct ahash_request *req;
req = kmalloc(sizeof(struct ahash_request) +
crypto_ahash_reqsize(tfm), gfp);
if (likely(req))
ahash_request_set_tfm(req, tfm);
return req;
}
static inline void ahash_request_free(struct ahash_request *req)
{
kfree(req);
}
static inline struct ahash_request *ahash_request_cast(
struct crypto_async_request *req)
{
return container_of(req, struct ahash_request, base);
}
static inline void ahash_request_set_callback(struct ahash_request *req,
u32 flags,
crypto_completion_t complete,
void *data)
{
req->base.complete = complete;
req->base.data = data;
req->base.flags = flags;
}
static inline void ahash_request_set_crypt(struct ahash_request *req,
struct scatterlist *src, u8 *result,
unsigned int nbytes)
{
req->src = src;
req->nbytes = nbytes;
req->result = result;
}
#endif /* _CRYPTO_HASH_H */
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment