Commit dc47d381 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://github.com/herbertx/crypto

* git://github.com/herbertx/crypto: (48 commits)
  crypto: user - Depend on NET instead of selecting it
  crypto: user - Add dependency on NET
  crypto: talitos - handle descriptor not found in error path
  crypto: user - Initialise match in crypto_alg_match
  crypto: testmgr - add twofish tests
  crypto: testmgr - add blowfish test-vectors
  crypto: Make hifn_795x build depend on !ARCH_DMA_ADDR_T_64BIT
  crypto: twofish-x86_64-3way - fix ctr blocksize to 1
  crypto: blowfish-x86_64 - fix ctr blocksize to 1
  crypto: whirlpool - count rounds from 0
  crypto: Add userspace report for compress type algorithms
  crypto: Add userspace report for cipher type algorithms
  crypto: Add userspace report for rng type algorithms
  crypto: Add userspace report for pcompress type algorithms
  crypto: Add userspace report for nivaead type algorithms
  crypto: Add userspace report for aead type algorithms
  crypto: Add userspace report for givcipher type algorithms
  crypto: Add userspace report for ablkcipher type algorithms
  crypto: Add userspace report for blkcipher type algorithms
  crypto: Add userspace report for ahash type algorithms
  ...
parents f6d90b4f 5db017aa
Picochip picoXcell SPAcc (Security Protocol Accelerator) bindings
Picochip picoXcell devices contain crypto offload engines that may be used for
IPSEC and femtocell layer 2 ciphering.
Required properties:
- compatible : "picochip,spacc-ipsec" for the IPSEC offload engine
"picochip,spacc-l2" for the femtocell layer 2 ciphering engine.
- reg : Offset and length of the register set for this device
- interrupt-parent : The interrupt controller that controls the SPAcc
interrupt.
- interrupts : The interrupt line from the SPAcc.
- ref-clock : The input clock that drives the SPAcc.
Example SPAcc node:
spacc@10000 {
compatible = "picochip,spacc-ipsec";
reg = <0x100000 0x10000>;
interrupt-parent = <&vic0>;
interrupts = <24>;
ref-clock = <&ipsec_clk>, "ref";
};
...@@ -7,21 +7,33 @@ obj-$(CONFIG_CRYPTO_TWOFISH_586) += twofish-i586.o ...@@ -7,21 +7,33 @@ obj-$(CONFIG_CRYPTO_TWOFISH_586) += twofish-i586.o
obj-$(CONFIG_CRYPTO_SALSA20_586) += salsa20-i586.o obj-$(CONFIG_CRYPTO_SALSA20_586) += salsa20-i586.o
obj-$(CONFIG_CRYPTO_AES_X86_64) += aes-x86_64.o obj-$(CONFIG_CRYPTO_AES_X86_64) += aes-x86_64.o
obj-$(CONFIG_CRYPTO_BLOWFISH_X86_64) += blowfish-x86_64.o
obj-$(CONFIG_CRYPTO_TWOFISH_X86_64) += twofish-x86_64.o obj-$(CONFIG_CRYPTO_TWOFISH_X86_64) += twofish-x86_64.o
obj-$(CONFIG_CRYPTO_TWOFISH_X86_64_3WAY) += twofish-x86_64-3way.o
obj-$(CONFIG_CRYPTO_SALSA20_X86_64) += salsa20-x86_64.o obj-$(CONFIG_CRYPTO_SALSA20_X86_64) += salsa20-x86_64.o
obj-$(CONFIG_CRYPTO_AES_NI_INTEL) += aesni-intel.o obj-$(CONFIG_CRYPTO_AES_NI_INTEL) += aesni-intel.o
obj-$(CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL) += ghash-clmulni-intel.o obj-$(CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL) += ghash-clmulni-intel.o
obj-$(CONFIG_CRYPTO_CRC32C_INTEL) += crc32c-intel.o obj-$(CONFIG_CRYPTO_CRC32C_INTEL) += crc32c-intel.o
obj-$(CONFIG_CRYPTO_SHA1_SSSE3) += sha1-ssse3.o
aes-i586-y := aes-i586-asm_32.o aes_glue.o aes-i586-y := aes-i586-asm_32.o aes_glue.o
twofish-i586-y := twofish-i586-asm_32.o twofish_glue.o twofish-i586-y := twofish-i586-asm_32.o twofish_glue.o
salsa20-i586-y := salsa20-i586-asm_32.o salsa20_glue.o salsa20-i586-y := salsa20-i586-asm_32.o salsa20_glue.o
aes-x86_64-y := aes-x86_64-asm_64.o aes_glue.o aes-x86_64-y := aes-x86_64-asm_64.o aes_glue.o
blowfish-x86_64-y := blowfish-x86_64-asm_64.o blowfish_glue.o
twofish-x86_64-y := twofish-x86_64-asm_64.o twofish_glue.o twofish-x86_64-y := twofish-x86_64-asm_64.o twofish_glue.o
twofish-x86_64-3way-y := twofish-x86_64-asm_64-3way.o twofish_glue_3way.o
salsa20-x86_64-y := salsa20-x86_64-asm_64.o salsa20_glue.o salsa20-x86_64-y := salsa20-x86_64-asm_64.o salsa20_glue.o
aesni-intel-y := aesni-intel_asm.o aesni-intel_glue.o fpu.o aesni-intel-y := aesni-intel_asm.o aesni-intel_glue.o fpu.o
ghash-clmulni-intel-y := ghash-clmulni-intel_asm.o ghash-clmulni-intel_glue.o ghash-clmulni-intel-y := ghash-clmulni-intel_asm.o ghash-clmulni-intel_glue.o
# enable AVX support only when $(AS) can actually assemble the instructions
ifeq ($(call as-instr,vpxor %xmm0$(comma)%xmm1$(comma)%xmm2,yes,no),yes)
AFLAGS_sha1_ssse3_asm.o += -DSHA1_ENABLE_AVX_SUPPORT
CFLAGS_sha1_ssse3_glue.o += -DSHA1_ENABLE_AVX_SUPPORT
endif
sha1-ssse3-y := sha1_ssse3_asm.o sha1_ssse3_glue.o
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
*/ */
#include <crypto/aes.h> #include <crypto/aes.h>
#include <asm/aes.h>
asmlinkage void aes_enc_blk(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in); asmlinkage void aes_enc_blk(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in);
asmlinkage void aes_dec_blk(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in); asmlinkage void aes_dec_blk(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in);
......
/*
* Blowfish Cipher Algorithm (x86_64)
*
* Copyright (C) 2011 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
* USA
*
*/
.file "blowfish-x86_64-asm.S"
.text
/* structure of crypto context */
#define p 0
#define s0 ((16 + 2) * 4)
#define s1 ((16 + 2 + (1 * 256)) * 4)
#define s2 ((16 + 2 + (2 * 256)) * 4)
#define s3 ((16 + 2 + (3 * 256)) * 4)
/* register macros */
#define CTX %rdi
#define RIO %rsi
#define RX0 %rax
#define RX1 %rbx
#define RX2 %rcx
#define RX3 %rdx
#define RX0d %eax
#define RX1d %ebx
#define RX2d %ecx
#define RX3d %edx
#define RX0bl %al
#define RX1bl %bl
#define RX2bl %cl
#define RX3bl %dl
#define RX0bh %ah
#define RX1bh %bh
#define RX2bh %ch
#define RX3bh %dh
#define RT0 %rbp
#define RT1 %rsi
#define RT2 %r8
#define RT3 %r9
#define RT0d %ebp
#define RT1d %esi
#define RT2d %r8d
#define RT3d %r9d
#define RKEY %r10
/***********************************************************************
* 1-way blowfish
***********************************************************************/
#define F() \
rorq $16, RX0; \
movzbl RX0bh, RT0d; \
movzbl RX0bl, RT1d; \
rolq $16, RX0; \
movl s0(CTX,RT0,4), RT0d; \
addl s1(CTX,RT1,4), RT0d; \
movzbl RX0bh, RT1d; \
movzbl RX0bl, RT2d; \
rolq $32, RX0; \
xorl s2(CTX,RT1,4), RT0d; \
addl s3(CTX,RT2,4), RT0d; \
xorq RT0, RX0;
#define add_roundkey_enc(n) \
xorq p+4*(n)(CTX), RX0;
#define round_enc(n) \
add_roundkey_enc(n); \
\
F(); \
F();
#define add_roundkey_dec(n) \
movq p+4*(n-1)(CTX), RT0; \
rorq $32, RT0; \
xorq RT0, RX0;
#define round_dec(n) \
add_roundkey_dec(n); \
\
F(); \
F(); \
#define read_block() \
movq (RIO), RX0; \
rorq $32, RX0; \
bswapq RX0;
#define write_block() \
bswapq RX0; \
movq RX0, (RIO);
#define xor_block() \
bswapq RX0; \
xorq RX0, (RIO);
.align 8
.global __blowfish_enc_blk
.type __blowfish_enc_blk,@function;
__blowfish_enc_blk:
/* input:
* %rdi: ctx, CTX
* %rsi: dst
* %rdx: src
* %rcx: bool, if true: xor output
*/
movq %rbp, %r11;
movq %rsi, %r10;
movq %rdx, RIO;
read_block();
round_enc(0);
round_enc(2);
round_enc(4);
round_enc(6);
round_enc(8);
round_enc(10);
round_enc(12);
round_enc(14);
add_roundkey_enc(16);
movq %r11, %rbp;
movq %r10, RIO;
test %cl, %cl;
jnz __enc_xor;
write_block();
ret;
__enc_xor:
xor_block();
ret;
.align 8
.global blowfish_dec_blk
.type blowfish_dec_blk,@function;
blowfish_dec_blk:
/* input:
* %rdi: ctx, CTX
* %rsi: dst
* %rdx: src
*/
movq %rbp, %r11;
movq %rsi, %r10;
movq %rdx, RIO;
read_block();
round_dec(17);
round_dec(15);
round_dec(13);
round_dec(11);
round_dec(9);
round_dec(7);
round_dec(5);
round_dec(3);
add_roundkey_dec(1);
movq %r10, RIO;
write_block();
movq %r11, %rbp;
ret;
/**********************************************************************
4-way blowfish, four blocks parallel
**********************************************************************/
/* F() for 4-way. Slower when used alone/1-way, but faster when used
* parallel/4-way (tested on AMD Phenom II & Intel Xeon E7330).
*/
#define F4(x) \
movzbl x ## bh, RT1d; \
movzbl x ## bl, RT3d; \
rorq $16, x; \
movzbl x ## bh, RT0d; \
movzbl x ## bl, RT2d; \
rorq $16, x; \
movl s0(CTX,RT0,4), RT0d; \
addl s1(CTX,RT2,4), RT0d; \
xorl s2(CTX,RT1,4), RT0d; \
addl s3(CTX,RT3,4), RT0d; \
xorq RT0, x;
#define add_preloaded_roundkey4() \
xorq RKEY, RX0; \
xorq RKEY, RX1; \
xorq RKEY, RX2; \
xorq RKEY, RX3;
#define preload_roundkey_enc(n) \
movq p+4*(n)(CTX), RKEY;
#define add_roundkey_enc4(n) \
add_preloaded_roundkey4(); \
preload_roundkey_enc(n + 2);
#define round_enc4(n) \
add_roundkey_enc4(n); \
\
F4(RX0); \
F4(RX1); \
F4(RX2); \
F4(RX3); \
\
F4(RX0); \
F4(RX1); \
F4(RX2); \
F4(RX3);
#define preload_roundkey_dec(n) \
movq p+4*((n)-1)(CTX), RKEY; \
rorq $32, RKEY;
#define add_roundkey_dec4(n) \
add_preloaded_roundkey4(); \
preload_roundkey_dec(n - 2);
#define round_dec4(n) \
add_roundkey_dec4(n); \
\
F4(RX0); \
F4(RX1); \
F4(RX2); \
F4(RX3); \
\
F4(RX0); \
F4(RX1); \
F4(RX2); \
F4(RX3);
#define read_block4() \
movq (RIO), RX0; \
rorq $32, RX0; \
bswapq RX0; \
\
movq 8(RIO), RX1; \
rorq $32, RX1; \
bswapq RX1; \
\
movq 16(RIO), RX2; \
rorq $32, RX2; \
bswapq RX2; \
\
movq 24(RIO), RX3; \
rorq $32, RX3; \
bswapq RX3;
#define write_block4() \
bswapq RX0; \
movq RX0, (RIO); \
\
bswapq RX1; \
movq RX1, 8(RIO); \
\
bswapq RX2; \
movq RX2, 16(RIO); \
\
bswapq RX3; \
movq RX3, 24(RIO);
#define xor_block4() \
bswapq RX0; \
xorq RX0, (RIO); \
\
bswapq RX1; \
xorq RX1, 8(RIO); \
\
bswapq RX2; \
xorq RX2, 16(RIO); \
\
bswapq RX3; \
xorq RX3, 24(RIO);
.align 8
.global __blowfish_enc_blk_4way
.type __blowfish_enc_blk_4way,@function;
__blowfish_enc_blk_4way:
/* input:
* %rdi: ctx, CTX
* %rsi: dst
* %rdx: src
* %rcx: bool, if true: xor output
*/
pushq %rbp;
pushq %rbx;
pushq %rcx;
preload_roundkey_enc(0);
movq %rsi, %r11;
movq %rdx, RIO;
read_block4();
round_enc4(0);
round_enc4(2);
round_enc4(4);
round_enc4(6);
round_enc4(8);
round_enc4(10);
round_enc4(12);
round_enc4(14);
add_preloaded_roundkey4();
popq %rbp;
movq %r11, RIO;
test %bpl, %bpl;
jnz __enc_xor4;
write_block4();
popq %rbx;
popq %rbp;
ret;
__enc_xor4:
xor_block4();
popq %rbx;
popq %rbp;
ret;
.align 8
.global blowfish_dec_blk_4way
.type blowfish_dec_blk_4way,@function;
blowfish_dec_blk_4way:
/* input:
* %rdi: ctx, CTX
* %rsi: dst
* %rdx: src
*/
pushq %rbp;
pushq %rbx;
preload_roundkey_dec(17);
movq %rsi, %r11;
movq %rdx, RIO;
read_block4();
round_dec4(17);
round_dec4(15);
round_dec4(13);
round_dec4(11);
round_dec4(9);
round_dec4(7);
round_dec4(5);
round_dec4(3);
add_preloaded_roundkey4();
movq %r11, RIO;
write_block4();
popq %rbx;
popq %rbp;
ret;
This diff is collapsed.
This diff is collapsed.
/*
* Cryptographic API.
*
* Glue code for the SHA1 Secure Hash Algorithm assembler implementation using
* Supplemental SSE3 instructions.
*
* This file is based on sha1_generic.c
*
* Copyright (c) Alan Smithee.
* Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk>
* Copyright (c) Jean-Francois Dive <jef@linuxbe.org>
* Copyright (c) Mathias Krause <minipli@googlemail.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <crypto/internal/hash.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/cryptohash.h>
#include <linux/types.h>
#include <crypto/sha.h>
#include <asm/byteorder.h>
#include <asm/i387.h>
#include <asm/xcr.h>
#include <asm/xsave.h>
asmlinkage void sha1_transform_ssse3(u32 *digest, const char *data,
unsigned int rounds);
#ifdef SHA1_ENABLE_AVX_SUPPORT
asmlinkage void sha1_transform_avx(u32 *digest, const char *data,
unsigned int rounds);
#endif
static asmlinkage void (*sha1_transform_asm)(u32 *, const char *, unsigned int);
static int sha1_ssse3_init(struct shash_desc *desc)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
*sctx = (struct sha1_state){
.state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 },
};
return 0;
}
static int __sha1_ssse3_update(struct shash_desc *desc, const u8 *data,
unsigned int len, unsigned int partial)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
unsigned int done = 0;
sctx->count += len;
if (partial) {
done = SHA1_BLOCK_SIZE - partial;
memcpy(sctx->buffer + partial, data, done);
sha1_transform_asm(sctx->state, sctx->buffer, 1);
}
if (len - done >= SHA1_BLOCK_SIZE) {
const unsigned int rounds = (len - done) / SHA1_BLOCK_SIZE;
sha1_transform_asm(sctx->state, data + done, rounds);
done += rounds * SHA1_BLOCK_SIZE;
}
memcpy(sctx->buffer, data + done, len - done);
return 0;
}
static int sha1_ssse3_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
unsigned int partial = sctx->count % SHA1_BLOCK_SIZE;
int res;
/* Handle the fast case right here */
if (partial + len < SHA1_BLOCK_SIZE) {
sctx->count += len;
memcpy(sctx->buffer + partial, data, len);
return 0;
}
if (!irq_fpu_usable()) {
res = crypto_sha1_update(desc, data, len);
} else {
kernel_fpu_begin();
res = __sha1_ssse3_update(desc, data, len, partial);
kernel_fpu_end();
}
return res;
}
/* Add padding and return the message digest. */
static int sha1_ssse3_final(struct shash_desc *desc, u8 *out)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
unsigned int i, index, padlen;
__be32 *dst = (__be32 *)out;
__be64 bits;
static const u8 padding[SHA1_BLOCK_SIZE] = { 0x80, };
bits = cpu_to_be64(sctx->count << 3);
/* Pad out to 56 mod 64 and append length */
index = sctx->count % SHA1_BLOCK_SIZE;
padlen = (index < 56) ? (56 - index) : ((SHA1_BLOCK_SIZE+56) - index);
if (!irq_fpu_usable()) {
crypto_sha1_update(desc, padding, padlen);
crypto_sha1_update(desc, (const u8 *)&bits, sizeof(bits));
} else {
kernel_fpu_begin();
/* We need to fill a whole block for __sha1_ssse3_update() */
if (padlen <= 56) {
sctx->count += padlen;
memcpy(sctx->buffer + index, padding, padlen);
} else {
__sha1_ssse3_update(desc, padding, padlen, index);
}
__sha1_ssse3_update(desc, (const u8 *)&bits, sizeof(bits), 56);
kernel_fpu_end();
}
/* Store state in digest */
for (i = 0; i < 5; i++)
dst[i] = cpu_to_be32(sctx->state[i]);
/* Wipe context */
memset(sctx, 0, sizeof(*sctx));
return 0;
}
static int sha1_ssse3_export(struct shash_desc *desc, void *out)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
memcpy(out, sctx, sizeof(*sctx));
return 0;
}
static int sha1_ssse3_import(struct shash_desc *desc, const void *in)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
memcpy(sctx, in, sizeof(*sctx));
return 0;
}
static struct shash_alg alg = {
.digestsize = SHA1_DIGEST_SIZE,
.init = sha1_ssse3_init,
.update = sha1_ssse3_update,
.final = sha1_ssse3_final,
.export = sha1_ssse3_export,
.import = sha1_ssse3_import,
.descsize = sizeof(struct sha1_state),
.statesize = sizeof(struct sha1_state),
.base = {
.cra_name = "sha1",
.cra_driver_name= "sha1-ssse3",
.cra_priority = 150,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
};
#ifdef SHA1_ENABLE_AVX_SUPPORT
static bool __init avx_usable(void)
{
u64 xcr0;
if (!cpu_has_avx || !cpu_has_osxsave)
return false;
xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
if ((xcr0 & (XSTATE_SSE | XSTATE_YMM)) != (XSTATE_SSE | XSTATE_YMM)) {
pr_info("AVX detected but unusable.\n");
return false;
}
return true;
}
#endif
static int __init sha1_ssse3_mod_init(void)
{
/* test for SSSE3 first */
if (cpu_has_ssse3)
sha1_transform_asm = sha1_transform_ssse3;
#ifdef SHA1_ENABLE_AVX_SUPPORT
/* allow AVX to override SSSE3, it's a little faster */
if (avx_usable())
sha1_transform_asm = sha1_transform_avx;
#endif
if (sha1_transform_asm) {
pr_info("Using %s optimized SHA-1 implementation\n",
sha1_transform_asm == sha1_transform_ssse3 ? "SSSE3"
: "AVX");
return crypto_register_shash(&alg);
}
pr_info("Neither AVX nor SSSE3 is available/usable.\n");
return -ENODEV;
}
static void __exit sha1_ssse3_mod_fini(void)
{
crypto_unregister_shash(&alg);
}
module_init(sha1_ssse3_mod_init);
module_exit(sha1_ssse3_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, Supplemental SSE3 accelerated");
MODULE_ALIAS("sha1");
...@@ -26,7 +26,7 @@ ...@@ -26,7 +26,7 @@
#define in_blk 12 /* input byte array address parameter*/ #define in_blk 12 /* input byte array address parameter*/
#define out_blk 8 /* output byte array address parameter*/ #define out_blk 8 /* output byte array address parameter*/
#define tfm 4 /* Twofish context structure */ #define ctx 4 /* Twofish context structure */
#define a_offset 0 #define a_offset 0
#define b_offset 4 #define b_offset 4
...@@ -229,8 +229,8 @@ twofish_enc_blk: ...@@ -229,8 +229,8 @@ twofish_enc_blk:
push %esi push %esi
push %edi push %edi
mov tfm + 16(%esp), %ebp /* abuse the base pointer: set new base bointer to the crypto tfm */ mov ctx + 16(%esp), %ebp /* abuse the base pointer: set new base
add $crypto_tfm_ctx_offset, %ebp /* ctx address */ * pointer to the ctx address */
mov in_blk+16(%esp),%edi /* input address in edi */ mov in_blk+16(%esp),%edi /* input address in edi */
mov (%edi), %eax mov (%edi), %eax
...@@ -285,8 +285,8 @@ twofish_dec_blk: ...@@ -285,8 +285,8 @@ twofish_dec_blk:
push %edi push %edi
mov tfm + 16(%esp), %ebp /* abuse the base pointer: set new base bointer to the crypto tfm */ mov ctx + 16(%esp), %ebp /* abuse the base pointer: set new base
add $crypto_tfm_ctx_offset, %ebp /* ctx address */ * pointer to the ctx address */
mov in_blk+16(%esp),%edi /* input address in edi */ mov in_blk+16(%esp),%edi /* input address in edi */
mov (%edi), %eax mov (%edi), %eax
......
/*
* Twofish Cipher 3-way parallel algorithm (x86_64)
*
* Copyright (C) 2011 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
* USA
*
*/
.file "twofish-x86_64-asm-3way.S"
.text
/* structure of crypto context */
#define s0 0
#define s1 1024
#define s2 2048
#define s3 3072
#define w 4096
#define k 4128
/**********************************************************************
3-way twofish
**********************************************************************/
#define CTX %rdi
#define RIO %rdx
#define RAB0 %rax
#define RAB1 %rbx
#define RAB2 %rcx
#define RAB0d %eax
#define RAB1d %ebx
#define RAB2d %ecx
#define RAB0bh %ah
#define RAB1bh %bh
#define RAB2bh %ch
#define RAB0bl %al
#define RAB1bl %bl
#define RAB2bl %cl
#define RCD0 %r8
#define RCD1 %r9
#define RCD2 %r10
#define RCD0d %r8d
#define RCD1d %r9d
#define RCD2d %r10d
#define RX0 %rbp
#define RX1 %r11
#define RX2 %r12
#define RX0d %ebp
#define RX1d %r11d
#define RX2d %r12d
#define RY0 %r13
#define RY1 %r14
#define RY2 %r15
#define RY0d %r13d
#define RY1d %r14d
#define RY2d %r15d
#define RT0 %rdx
#define RT1 %rsi
#define RT0d %edx
#define RT1d %esi
#define do16bit_ror(rot, op1, op2, T0, T1, tmp1, tmp2, ab, dst) \
movzbl ab ## bl, tmp2 ## d; \
movzbl ab ## bh, tmp1 ## d; \
rorq $(rot), ab; \
op1##l T0(CTX, tmp2, 4), dst ## d; \
op2##l T1(CTX, tmp1, 4), dst ## d;
/*
* Combined G1 & G2 function. Reordered with help of rotates to have moves
* at begining.
*/
#define g1g2_3(ab, cd, Tx0, Tx1, Tx2, Tx3, Ty0, Ty1, Ty2, Ty3, x, y) \
/* G1,1 && G2,1 */ \
do16bit_ror(32, mov, xor, Tx0, Tx1, RT0, x ## 0, ab ## 0, x ## 0); \
do16bit_ror(48, mov, xor, Ty1, Ty2, RT0, y ## 0, ab ## 0, y ## 0); \
\
do16bit_ror(32, mov, xor, Tx0, Tx1, RT0, x ## 1, ab ## 1, x ## 1); \
do16bit_ror(48, mov, xor, Ty1, Ty2, RT0, y ## 1, ab ## 1, y ## 1); \
\
do16bit_ror(32, mov, xor, Tx0, Tx1, RT0, x ## 2, ab ## 2, x ## 2); \
do16bit_ror(48, mov, xor, Ty1, Ty2, RT0, y ## 2, ab ## 2, y ## 2); \
\
/* G1,2 && G2,2 */ \
do16bit_ror(32, xor, xor, Tx2, Tx3, RT0, RT1, ab ## 0, x ## 0); \
do16bit_ror(16, xor, xor, Ty3, Ty0, RT0, RT1, ab ## 0, y ## 0); \
xchgq cd ## 0, ab ## 0; \
\
do16bit_ror(32, xor, xor, Tx2, Tx3, RT0, RT1, ab ## 1, x ## 1); \
do16bit_ror(16, xor, xor, Ty3, Ty0, RT0, RT1, ab ## 1, y ## 1); \
xchgq cd ## 1, ab ## 1; \
\
do16bit_ror(32, xor, xor, Tx2, Tx3, RT0, RT1, ab ## 2, x ## 2); \
do16bit_ror(16, xor, xor, Ty3, Ty0, RT0, RT1, ab ## 2, y ## 2); \
xchgq cd ## 2, ab ## 2;
#define enc_round_end(ab, x, y, n) \
addl y ## d, x ## d; \
addl x ## d, y ## d; \
addl k+4*(2*(n))(CTX), x ## d; \
xorl ab ## d, x ## d; \
addl k+4*(2*(n)+1)(CTX), y ## d; \
shrq $32, ab; \
roll $1, ab ## d; \
xorl y ## d, ab ## d; \
shlq $32, ab; \
rorl $1, x ## d; \
orq x, ab;
#define dec_round_end(ba, x, y, n) \
addl y ## d, x ## d; \
addl x ## d, y ## d; \
addl k+4*(2*(n))(CTX), x ## d; \
addl k+4*(2*(n)+1)(CTX), y ## d; \
xorl ba ## d, y ## d; \
shrq $32, ba; \
roll $1, ba ## d; \
xorl x ## d, ba ## d; \
shlq $32, ba; \
rorl $1, y ## d; \
orq y, ba;
#define encrypt_round3(ab, cd, n) \
g1g2_3(ab, cd, s0, s1, s2, s3, s0, s1, s2, s3, RX, RY); \
\
enc_round_end(ab ## 0, RX0, RY0, n); \
enc_round_end(ab ## 1, RX1, RY1, n); \
enc_round_end(ab ## 2, RX2, RY2, n);
#define decrypt_round3(ba, dc, n) \
g1g2_3(ba, dc, s1, s2, s3, s0, s3, s0, s1, s2, RY, RX); \
\
dec_round_end(ba ## 0, RX0, RY0, n); \
dec_round_end(ba ## 1, RX1, RY1, n); \
dec_round_end(ba ## 2, RX2, RY2, n);
#define encrypt_cycle3(ab, cd, n) \
encrypt_round3(ab, cd, n*2); \
encrypt_round3(ab, cd, (n*2)+1);
#define decrypt_cycle3(ba, dc, n) \
decrypt_round3(ba, dc, (n*2)+1); \
decrypt_round3(ba, dc, (n*2));
#define inpack3(in, n, xy, m) \
movq 4*(n)(in), xy ## 0; \
xorq w+4*m(CTX), xy ## 0; \
\
movq 4*(4+(n))(in), xy ## 1; \
xorq w+4*m(CTX), xy ## 1; \
\
movq 4*(8+(n))(in), xy ## 2; \
xorq w+4*m(CTX), xy ## 2;
#define outunpack3(op, out, n, xy, m) \
xorq w+4*m(CTX), xy ## 0; \
op ## q xy ## 0, 4*(n)(out); \
\
xorq w+4*m(CTX), xy ## 1; \
op ## q xy ## 1, 4*(4+(n))(out); \
\
xorq w+4*m(CTX), xy ## 2; \
op ## q xy ## 2, 4*(8+(n))(out);
#define inpack_enc3() \
inpack3(RIO, 0, RAB, 0); \
inpack3(RIO, 2, RCD, 2);
#define outunpack_enc3(op) \
outunpack3(op, RIO, 2, RAB, 6); \
outunpack3(op, RIO, 0, RCD, 4);
#define inpack_dec3() \
inpack3(RIO, 0, RAB, 4); \
rorq $32, RAB0; \
rorq $32, RAB1; \
rorq $32, RAB2; \
inpack3(RIO, 2, RCD, 6); \
rorq $32, RCD0; \
rorq $32, RCD1; \
rorq $32, RCD2;
#define outunpack_dec3() \
rorq $32, RCD0; \
rorq $32, RCD1; \
rorq $32, RCD2; \
outunpack3(mov, RIO, 0, RCD, 0); \
rorq $32, RAB0; \
rorq $32, RAB1; \
rorq $32, RAB2; \
outunpack3(mov, RIO, 2, RAB, 2);
.align 8
.global __twofish_enc_blk_3way
.type __twofish_enc_blk_3way,@function;
__twofish_enc_blk_3way:
/* input:
* %rdi: ctx, CTX
* %rsi: dst
* %rdx: src, RIO
* %rcx: bool, if true: xor output
*/
pushq %r15;
pushq %r14;
pushq %r13;
pushq %r12;
pushq %rbp;
pushq %rbx;
pushq %rcx; /* bool xor */
pushq %rsi; /* dst */
inpack_enc3();
encrypt_cycle3(RAB, RCD, 0);
encrypt_cycle3(RAB, RCD, 1);
encrypt_cycle3(RAB, RCD, 2);
encrypt_cycle3(RAB, RCD, 3);
encrypt_cycle3(RAB, RCD, 4);
encrypt_cycle3(RAB, RCD, 5);
encrypt_cycle3(RAB, RCD, 6);
encrypt_cycle3(RAB, RCD, 7);
popq RIO; /* dst */
popq %rbp; /* bool xor */
testb %bpl, %bpl;
jnz __enc_xor3;
outunpack_enc3(mov);
popq %rbx;
popq %rbp;
popq %r12;
popq %r13;
popq %r14;
popq %r15;
ret;
__enc_xor3:
outunpack_enc3(xor);
popq %rbx;
popq %rbp;
popq %r12;
popq %r13;
popq %r14;
popq %r15;
ret;
.global twofish_dec_blk_3way
.type twofish_dec_blk_3way,@function;
twofish_dec_blk_3way:
/* input:
* %rdi: ctx, CTX
* %rsi: dst
* %rdx: src, RIO
*/
pushq %r15;
pushq %r14;
pushq %r13;
pushq %r12;
pushq %rbp;
pushq %rbx;
pushq %rsi; /* dst */
inpack_dec3();
decrypt_cycle3(RAB, RCD, 7);
decrypt_cycle3(RAB, RCD, 6);
decrypt_cycle3(RAB, RCD, 5);
decrypt_cycle3(RAB, RCD, 4);
decrypt_cycle3(RAB, RCD, 3);
decrypt_cycle3(RAB, RCD, 2);
decrypt_cycle3(RAB, RCD, 1);
decrypt_cycle3(RAB, RCD, 0);
popq RIO; /* dst */
outunpack_dec3();
popq %rbx;
popq %rbp;
popq %r12;
popq %r13;
popq %r14;
popq %r15;
ret;
...@@ -221,10 +221,9 @@ ...@@ -221,10 +221,9 @@
twofish_enc_blk: twofish_enc_blk:
pushq R1 pushq R1
/* %rdi contains the crypto tfm address */ /* %rdi contains the ctx address */
/* %rsi contains the output address */ /* %rsi contains the output address */
/* %rdx contains the input address */ /* %rdx contains the input address */
add $crypto_tfm_ctx_offset, %rdi /* set ctx address */
/* ctx address is moved to free one non-rex register /* ctx address is moved to free one non-rex register
as target for the 8bit high operations */ as target for the 8bit high operations */
mov %rdi, %r11 mov %rdi, %r11
...@@ -274,10 +273,9 @@ twofish_enc_blk: ...@@ -274,10 +273,9 @@ twofish_enc_blk:
twofish_dec_blk: twofish_dec_blk:
pushq R1 pushq R1
/* %rdi contains the crypto tfm address */ /* %rdi contains the ctx address */
/* %rsi contains the output address */ /* %rsi contains the output address */
/* %rdx contains the input address */ /* %rdx contains the input address */
add $crypto_tfm_ctx_offset, %rdi /* set ctx address */
/* ctx address is moved to free one non-rex register /* ctx address is moved to free one non-rex register
as target for the 8bit high operations */ as target for the 8bit high operations */
mov %rdi, %r11 mov %rdi, %r11
......
...@@ -44,17 +44,21 @@ ...@@ -44,17 +44,21 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/types.h> #include <linux/types.h>
asmlinkage void twofish_enc_blk(struct crypto_tfm *tfm, u8 *dst, const u8 *src); asmlinkage void twofish_enc_blk(struct twofish_ctx *ctx, u8 *dst,
asmlinkage void twofish_dec_blk(struct crypto_tfm *tfm, u8 *dst, const u8 *src); const u8 *src);
EXPORT_SYMBOL_GPL(twofish_enc_blk);
asmlinkage void twofish_dec_blk(struct twofish_ctx *ctx, u8 *dst,
const u8 *src);
EXPORT_SYMBOL_GPL(twofish_dec_blk);
static void twofish_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) static void twofish_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{ {
twofish_enc_blk(tfm, dst, src); twofish_enc_blk(crypto_tfm_ctx(tfm), dst, src);
} }
static void twofish_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) static void twofish_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{ {
twofish_dec_blk(tfm, dst, src); twofish_dec_blk(crypto_tfm_ctx(tfm), dst, src);
} }
static struct crypto_alg alg = { static struct crypto_alg alg = {
......
This diff is collapsed.
...@@ -259,7 +259,9 @@ extern const char * const x86_power_flags[32]; ...@@ -259,7 +259,9 @@ extern const char * const x86_power_flags[32];
#define cpu_has_xmm boot_cpu_has(X86_FEATURE_XMM) #define cpu_has_xmm boot_cpu_has(X86_FEATURE_XMM)
#define cpu_has_xmm2 boot_cpu_has(X86_FEATURE_XMM2) #define cpu_has_xmm2 boot_cpu_has(X86_FEATURE_XMM2)
#define cpu_has_xmm3 boot_cpu_has(X86_FEATURE_XMM3) #define cpu_has_xmm3 boot_cpu_has(X86_FEATURE_XMM3)
#define cpu_has_ssse3 boot_cpu_has(X86_FEATURE_SSSE3)
#define cpu_has_aes boot_cpu_has(X86_FEATURE_AES) #define cpu_has_aes boot_cpu_has(X86_FEATURE_AES)
#define cpu_has_avx boot_cpu_has(X86_FEATURE_AVX)
#define cpu_has_ht boot_cpu_has(X86_FEATURE_HT) #define cpu_has_ht boot_cpu_has(X86_FEATURE_HT)
#define cpu_has_mp boot_cpu_has(X86_FEATURE_MP) #define cpu_has_mp boot_cpu_has(X86_FEATURE_MP)
#define cpu_has_nx boot_cpu_has(X86_FEATURE_NX) #define cpu_has_nx boot_cpu_has(X86_FEATURE_NX)
...@@ -287,6 +289,7 @@ extern const char * const x86_power_flags[32]; ...@@ -287,6 +289,7 @@ extern const char * const x86_power_flags[32];
#define cpu_has_xmm4_2 boot_cpu_has(X86_FEATURE_XMM4_2) #define cpu_has_xmm4_2 boot_cpu_has(X86_FEATURE_XMM4_2)
#define cpu_has_x2apic boot_cpu_has(X86_FEATURE_X2APIC) #define cpu_has_x2apic boot_cpu_has(X86_FEATURE_X2APIC)
#define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE) #define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE)
#define cpu_has_osxsave boot_cpu_has(X86_FEATURE_OSXSAVE)
#define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR) #define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR)
#define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ) #define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ)
#define cpu_has_perfctr_core boot_cpu_has(X86_FEATURE_PERFCTR_CORE) #define cpu_has_perfctr_core boot_cpu_has(X86_FEATURE_PERFCTR_CORE)
......
...@@ -100,6 +100,14 @@ config CRYPTO_MANAGER2 ...@@ -100,6 +100,14 @@ config CRYPTO_MANAGER2
select CRYPTO_BLKCIPHER2 select CRYPTO_BLKCIPHER2
select CRYPTO_PCOMP2 select CRYPTO_PCOMP2
config CRYPTO_USER
tristate "Userspace cryptographic algorithm configuration"
depends on NET
select CRYPTO_MANAGER
help
Userapace configuration for cryptographic instantiations such as
cbc(aes).
config CRYPTO_MANAGER_DISABLE_TESTS config CRYPTO_MANAGER_DISABLE_TESTS
bool "Disable run-time self tests" bool "Disable run-time self tests"
default y default y
...@@ -407,6 +415,16 @@ config CRYPTO_SHA1 ...@@ -407,6 +415,16 @@ config CRYPTO_SHA1
help help
SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2). SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2).
config CRYPTO_SHA1_SSSE3
tristate "SHA1 digest algorithm (SSSE3/AVX)"
depends on X86 && 64BIT
select CRYPTO_SHA1
select CRYPTO_HASH
help
SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented
using Supplemental SSE3 (SSSE3) instructions or Advanced Vector
Extensions (AVX), when available.
config CRYPTO_SHA256 config CRYPTO_SHA256
tristate "SHA224 and SHA256 digest algorithm" tristate "SHA224 and SHA256 digest algorithm"
select CRYPTO_HASH select CRYPTO_HASH
...@@ -590,6 +608,7 @@ config CRYPTO_ARC4 ...@@ -590,6 +608,7 @@ config CRYPTO_ARC4
config CRYPTO_BLOWFISH config CRYPTO_BLOWFISH
tristate "Blowfish cipher algorithm" tristate "Blowfish cipher algorithm"
select CRYPTO_ALGAPI select CRYPTO_ALGAPI
select CRYPTO_BLOWFISH_COMMON
help help
Blowfish cipher algorithm, by Bruce Schneier. Blowfish cipher algorithm, by Bruce Schneier.
...@@ -600,6 +619,30 @@ config CRYPTO_BLOWFISH ...@@ -600,6 +619,30 @@ config CRYPTO_BLOWFISH
See also: See also:
<http://www.schneier.com/blowfish.html> <http://www.schneier.com/blowfish.html>
config CRYPTO_BLOWFISH_COMMON
tristate
help
Common parts of the Blowfish cipher algorithm shared by the
generic c and the assembler implementations.
See also:
<http://www.schneier.com/blowfish.html>
config CRYPTO_BLOWFISH_X86_64
tristate "Blowfish cipher algorithm (x86_64)"
depends on (X86 || UML_X86) && 64BIT
select CRYPTO_ALGAPI
select CRYPTO_BLOWFISH_COMMON
help
Blowfish cipher algorithm (x86_64), by Bruce Schneier.
This is a variable key length cipher which can use keys from 32
bits to 448 bits in length. It's fast, simple and specifically
designed for use on "large microprocessors".
See also:
<http://www.schneier.com/blowfish.html>
config CRYPTO_CAMELLIA config CRYPTO_CAMELLIA
tristate "Camellia cipher algorithms" tristate "Camellia cipher algorithms"
depends on CRYPTO depends on CRYPTO
...@@ -793,6 +836,26 @@ config CRYPTO_TWOFISH_X86_64 ...@@ -793,6 +836,26 @@ config CRYPTO_TWOFISH_X86_64
See also: See also:
<http://www.schneier.com/twofish.html> <http://www.schneier.com/twofish.html>
config CRYPTO_TWOFISH_X86_64_3WAY
tristate "Twofish cipher algorithm (x86_64, 3-way parallel)"
depends on (X86 || UML_X86) && 64BIT
select CRYPTO_ALGAPI
select CRYPTO_TWOFISH_COMMON
select CRYPTO_TWOFISH_X86_64
help
Twofish cipher algorithm (x86_64, 3-way parallel).
Twofish was submitted as an AES (Advanced Encryption Standard)
candidate cipher by researchers at CounterPane Systems. It is a
16 round block cipher supporting key sizes of 128, 192, and 256
bits.
This module provides Twofish cipher algorithm that processes three
blocks parallel, utilizing resources of out-of-order CPUs better.
See also:
<http://www.schneier.com/twofish.html>
comment "Compression" comment "Compression"
config CRYPTO_DEFLATE config CRYPTO_DEFLATE
......
...@@ -31,6 +31,7 @@ obj-$(CONFIG_CRYPTO_PCOMP2) += pcompress.o ...@@ -31,6 +31,7 @@ obj-$(CONFIG_CRYPTO_PCOMP2) += pcompress.o
cryptomgr-y := algboss.o testmgr.o cryptomgr-y := algboss.o testmgr.o
obj-$(CONFIG_CRYPTO_MANAGER2) += cryptomgr.o obj-$(CONFIG_CRYPTO_MANAGER2) += cryptomgr.o
obj-$(CONFIG_CRYPTO_USER) += crypto_user.o
obj-$(CONFIG_CRYPTO_HMAC) += hmac.o obj-$(CONFIG_CRYPTO_HMAC) += hmac.o
obj-$(CONFIG_CRYPTO_VMAC) += vmac.o obj-$(CONFIG_CRYPTO_VMAC) += vmac.o
obj-$(CONFIG_CRYPTO_XCBC) += xcbc.o obj-$(CONFIG_CRYPTO_XCBC) += xcbc.o
...@@ -60,7 +61,8 @@ obj-$(CONFIG_CRYPTO_PCRYPT) += pcrypt.o ...@@ -60,7 +61,8 @@ obj-$(CONFIG_CRYPTO_PCRYPT) += pcrypt.o
obj-$(CONFIG_CRYPTO_CRYPTD) += cryptd.o obj-$(CONFIG_CRYPTO_CRYPTD) += cryptd.o
obj-$(CONFIG_CRYPTO_DES) += des_generic.o obj-$(CONFIG_CRYPTO_DES) += des_generic.o
obj-$(CONFIG_CRYPTO_FCRYPT) += fcrypt.o obj-$(CONFIG_CRYPTO_FCRYPT) += fcrypt.o
obj-$(CONFIG_CRYPTO_BLOWFISH) += blowfish.o obj-$(CONFIG_CRYPTO_BLOWFISH) += blowfish_generic.o
obj-$(CONFIG_CRYPTO_BLOWFISH_COMMON) += blowfish_common.o
obj-$(CONFIG_CRYPTO_TWOFISH) += twofish_generic.o obj-$(CONFIG_CRYPTO_TWOFISH) += twofish_generic.o
obj-$(CONFIG_CRYPTO_TWOFISH_COMMON) += twofish_common.o obj-$(CONFIG_CRYPTO_TWOFISH_COMMON) += twofish_common.o
obj-$(CONFIG_CRYPTO_SERPENT) += serpent.o obj-$(CONFIG_CRYPTO_SERPENT) += serpent.o
......
...@@ -23,6 +23,8 @@ ...@@ -23,6 +23,8 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/cryptouser.h>
#include <net/netlink.h>
#include <crypto/scatterwalk.h> #include <crypto/scatterwalk.h>
...@@ -381,6 +383,28 @@ static int crypto_init_ablkcipher_ops(struct crypto_tfm *tfm, u32 type, ...@@ -381,6 +383,28 @@ static int crypto_init_ablkcipher_ops(struct crypto_tfm *tfm, u32 type,
return 0; return 0;
} }
static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_blkcipher rblkcipher;
snprintf(rblkcipher.type, CRYPTO_MAX_ALG_NAME, "%s", "ablkcipher");
snprintf(rblkcipher.geniv, CRYPTO_MAX_ALG_NAME, "%s",
alg->cra_ablkcipher.geniv ?: "<default>");
rblkcipher.blocksize = alg->cra_blocksize;
rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
rblkcipher.max_keysize = alg->cra_ablkcipher.max_keysize;
rblkcipher.ivsize = alg->cra_ablkcipher.ivsize;
NLA_PUT(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
sizeof(struct crypto_report_blkcipher), &rblkcipher);
return 0;
nla_put_failure:
return -EMSGSIZE;
}
static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg) static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg)
__attribute__ ((unused)); __attribute__ ((unused));
static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg) static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg)
...@@ -403,6 +427,7 @@ const struct crypto_type crypto_ablkcipher_type = { ...@@ -403,6 +427,7 @@ const struct crypto_type crypto_ablkcipher_type = {
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
.show = crypto_ablkcipher_show, .show = crypto_ablkcipher_show,
#endif #endif
.report = crypto_ablkcipher_report,
}; };
EXPORT_SYMBOL_GPL(crypto_ablkcipher_type); EXPORT_SYMBOL_GPL(crypto_ablkcipher_type);
...@@ -432,6 +457,28 @@ static int crypto_init_givcipher_ops(struct crypto_tfm *tfm, u32 type, ...@@ -432,6 +457,28 @@ static int crypto_init_givcipher_ops(struct crypto_tfm *tfm, u32 type,
return 0; return 0;
} }
static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_blkcipher rblkcipher;
snprintf(rblkcipher.type, CRYPTO_MAX_ALG_NAME, "%s", "givcipher");
snprintf(rblkcipher.geniv, CRYPTO_MAX_ALG_NAME, "%s",
alg->cra_ablkcipher.geniv ?: "<built-in>");
rblkcipher.blocksize = alg->cra_blocksize;
rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
rblkcipher.max_keysize = alg->cra_ablkcipher.max_keysize;
rblkcipher.ivsize = alg->cra_ablkcipher.ivsize;
NLA_PUT(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
sizeof(struct crypto_report_blkcipher), &rblkcipher);
return 0;
nla_put_failure:
return -EMSGSIZE;
}
static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg) static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg)
__attribute__ ((unused)); __attribute__ ((unused));
static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg) static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg)
...@@ -454,6 +501,7 @@ const struct crypto_type crypto_givcipher_type = { ...@@ -454,6 +501,7 @@ const struct crypto_type crypto_givcipher_type = {
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
.show = crypto_givcipher_show, .show = crypto_givcipher_show,
#endif #endif
.report = crypto_givcipher_report,
}; };
EXPORT_SYMBOL_GPL(crypto_givcipher_type); EXPORT_SYMBOL_GPL(crypto_givcipher_type);
......
...@@ -21,6 +21,8 @@ ...@@ -21,6 +21,8 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/cryptouser.h>
#include <net/netlink.h>
#include "internal.h" #include "internal.h"
...@@ -109,6 +111,28 @@ static int crypto_init_aead_ops(struct crypto_tfm *tfm, u32 type, u32 mask) ...@@ -109,6 +111,28 @@ static int crypto_init_aead_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
return 0; return 0;
} }
static int crypto_aead_report(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_aead raead;
struct aead_alg *aead = &alg->cra_aead;
snprintf(raead.type, CRYPTO_MAX_ALG_NAME, "%s", "aead");
snprintf(raead.geniv, CRYPTO_MAX_ALG_NAME, "%s",
aead->geniv ?: "<built-in>");
raead.blocksize = alg->cra_blocksize;
raead.maxauthsize = aead->maxauthsize;
raead.ivsize = aead->ivsize;
NLA_PUT(skb, CRYPTOCFGA_REPORT_AEAD,
sizeof(struct crypto_report_aead), &raead);
return 0;
nla_put_failure:
return -EMSGSIZE;
}
static void crypto_aead_show(struct seq_file *m, struct crypto_alg *alg) static void crypto_aead_show(struct seq_file *m, struct crypto_alg *alg)
__attribute__ ((unused)); __attribute__ ((unused));
static void crypto_aead_show(struct seq_file *m, struct crypto_alg *alg) static void crypto_aead_show(struct seq_file *m, struct crypto_alg *alg)
...@@ -130,6 +154,7 @@ const struct crypto_type crypto_aead_type = { ...@@ -130,6 +154,7 @@ const struct crypto_type crypto_aead_type = {
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
.show = crypto_aead_show, .show = crypto_aead_show,
#endif #endif
.report = crypto_aead_report,
}; };
EXPORT_SYMBOL_GPL(crypto_aead_type); EXPORT_SYMBOL_GPL(crypto_aead_type);
...@@ -165,6 +190,28 @@ static int crypto_init_nivaead_ops(struct crypto_tfm *tfm, u32 type, u32 mask) ...@@ -165,6 +190,28 @@ static int crypto_init_nivaead_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
return 0; return 0;
} }
static int crypto_nivaead_report(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_aead raead;
struct aead_alg *aead = &alg->cra_aead;
snprintf(raead.type, CRYPTO_MAX_ALG_NAME, "%s", "nivaead");
snprintf(raead.geniv, CRYPTO_MAX_ALG_NAME, "%s", aead->geniv);
raead.blocksize = alg->cra_blocksize;
raead.maxauthsize = aead->maxauthsize;
raead.ivsize = aead->ivsize;
NLA_PUT(skb, CRYPTOCFGA_REPORT_AEAD,
sizeof(struct crypto_report_aead), &raead);
return 0;
nla_put_failure:
return -EMSGSIZE;
}
static void crypto_nivaead_show(struct seq_file *m, struct crypto_alg *alg) static void crypto_nivaead_show(struct seq_file *m, struct crypto_alg *alg)
__attribute__ ((unused)); __attribute__ ((unused));
static void crypto_nivaead_show(struct seq_file *m, struct crypto_alg *alg) static void crypto_nivaead_show(struct seq_file *m, struct crypto_alg *alg)
...@@ -186,6 +233,7 @@ const struct crypto_type crypto_nivaead_type = { ...@@ -186,6 +233,7 @@ const struct crypto_type crypto_nivaead_type = {
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
.show = crypto_nivaead_show, .show = crypto_nivaead_show,
#endif #endif
.report = crypto_nivaead_report,
}; };
EXPORT_SYMBOL_GPL(crypto_nivaead_type); EXPORT_SYMBOL_GPL(crypto_nivaead_type);
......
...@@ -21,6 +21,8 @@ ...@@ -21,6 +21,8 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/cryptouser.h>
#include <net/netlink.h>
#include "internal.h" #include "internal.h"
...@@ -397,6 +399,24 @@ static unsigned int crypto_ahash_extsize(struct crypto_alg *alg) ...@@ -397,6 +399,24 @@ static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
return sizeof(struct crypto_shash *); return sizeof(struct crypto_shash *);
} }
static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_hash rhash;
snprintf(rhash.type, CRYPTO_MAX_ALG_NAME, "%s", "ahash");
rhash.blocksize = alg->cra_blocksize;
rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize;
NLA_PUT(skb, CRYPTOCFGA_REPORT_HASH,
sizeof(struct crypto_report_hash), &rhash);
return 0;
nla_put_failure:
return -EMSGSIZE;
}
static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg) static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
__attribute__ ((unused)); __attribute__ ((unused));
static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg) static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
...@@ -415,6 +435,7 @@ const struct crypto_type crypto_ahash_type = { ...@@ -415,6 +435,7 @@ const struct crypto_type crypto_ahash_type = {
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
.show = crypto_ahash_show, .show = crypto_ahash_show,
#endif #endif
.report = crypto_ahash_report,
.maskclear = ~CRYPTO_ALG_TYPE_MASK, .maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_AHASH_MASK, .maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
.type = CRYPTO_ALG_TYPE_AHASH, .type = CRYPTO_ALG_TYPE_AHASH,
......
...@@ -22,8 +22,6 @@ ...@@ -22,8 +22,6 @@
#include "internal.h" #include "internal.h"
static void crypto_remove_final(struct list_head *list);
static LIST_HEAD(crypto_template_list); static LIST_HEAD(crypto_template_list);
void crypto_larval_error(const char *name, u32 type, u32 mask) void crypto_larval_error(const char *name, u32 type, u32 mask)
...@@ -129,9 +127,8 @@ static void crypto_remove_spawn(struct crypto_spawn *spawn, ...@@ -129,9 +127,8 @@ static void crypto_remove_spawn(struct crypto_spawn *spawn,
BUG_ON(!list_empty(&inst->alg.cra_users)); BUG_ON(!list_empty(&inst->alg.cra_users));
} }
static void crypto_remove_spawns(struct crypto_alg *alg, void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list,
struct list_head *list, struct crypto_alg *nalg)
struct crypto_alg *nalg)
{ {
u32 new_type = (nalg ?: alg)->cra_flags; u32 new_type = (nalg ?: alg)->cra_flags;
struct crypto_spawn *spawn, *n; struct crypto_spawn *spawn, *n;
...@@ -177,6 +174,7 @@ static void crypto_remove_spawns(struct crypto_alg *alg, ...@@ -177,6 +174,7 @@ static void crypto_remove_spawns(struct crypto_alg *alg,
crypto_remove_spawn(spawn, list); crypto_remove_spawn(spawn, list);
} }
} }
EXPORT_SYMBOL_GPL(crypto_remove_spawns);
static struct crypto_larval *__crypto_register_alg(struct crypto_alg *alg) static struct crypto_larval *__crypto_register_alg(struct crypto_alg *alg)
{ {
...@@ -321,7 +319,7 @@ void crypto_alg_tested(const char *name, int err) ...@@ -321,7 +319,7 @@ void crypto_alg_tested(const char *name, int err)
} }
EXPORT_SYMBOL_GPL(crypto_alg_tested); EXPORT_SYMBOL_GPL(crypto_alg_tested);
static void crypto_remove_final(struct list_head *list) void crypto_remove_final(struct list_head *list)
{ {
struct crypto_alg *alg; struct crypto_alg *alg;
struct crypto_alg *n; struct crypto_alg *n;
...@@ -331,6 +329,7 @@ static void crypto_remove_final(struct list_head *list) ...@@ -331,6 +329,7 @@ static void crypto_remove_final(struct list_head *list)
crypto_alg_put(alg); crypto_alg_put(alg);
} }
} }
EXPORT_SYMBOL_GPL(crypto_remove_final);
static void crypto_wait_for_test(struct crypto_larval *larval) static void crypto_wait_for_test(struct crypto_larval *larval)
{ {
...@@ -493,6 +492,7 @@ int crypto_register_instance(struct crypto_template *tmpl, ...@@ -493,6 +492,7 @@ int crypto_register_instance(struct crypto_template *tmpl,
goto err; goto err;
inst->alg.cra_module = tmpl->module; inst->alg.cra_module = tmpl->module;
inst->alg.cra_flags |= CRYPTO_ALG_INSTANCE;
down_write(&crypto_alg_sem); down_write(&crypto_alg_sem);
......
...@@ -24,6 +24,8 @@ ...@@ -24,6 +24,8 @@
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/cryptouser.h>
#include <net/netlink.h>
#include "internal.h" #include "internal.h"
...@@ -492,6 +494,28 @@ static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask) ...@@ -492,6 +494,28 @@ static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
return crypto_init_blkcipher_ops_async(tfm); return crypto_init_blkcipher_ops_async(tfm);
} }
static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_blkcipher rblkcipher;
snprintf(rblkcipher.type, CRYPTO_MAX_ALG_NAME, "%s", "blkcipher");
snprintf(rblkcipher.geniv, CRYPTO_MAX_ALG_NAME, "%s",
alg->cra_blkcipher.geniv ?: "<default>");
rblkcipher.blocksize = alg->cra_blocksize;
rblkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
rblkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
rblkcipher.ivsize = alg->cra_blkcipher.ivsize;
NLA_PUT(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
sizeof(struct crypto_report_blkcipher), &rblkcipher);
return 0;
nla_put_failure:
return -EMSGSIZE;
}
static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg) static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
__attribute__ ((unused)); __attribute__ ((unused));
static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg) static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
...@@ -511,6 +535,7 @@ const struct crypto_type crypto_blkcipher_type = { ...@@ -511,6 +535,7 @@ const struct crypto_type crypto_blkcipher_type = {
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
.show = crypto_blkcipher_show, .show = crypto_blkcipher_show,
#endif #endif
.report = crypto_blkcipher_report,
}; };
EXPORT_SYMBOL_GPL(crypto_blkcipher_type); EXPORT_SYMBOL_GPL(crypto_blkcipher_type);
......
/* /*
* Cryptographic API. * Cryptographic API.
* *
* Common Blowfish algorithm parts shared between the c and assembler
* implementations.
*
* Blowfish Cipher Algorithm, by Bruce Schneier. * Blowfish Cipher Algorithm, by Bruce Schneier.
* http://www.counterpane.com/blowfish.html * http://www.counterpane.com/blowfish.html
* *
...@@ -22,15 +25,7 @@ ...@@ -22,15 +25,7 @@
#include <asm/byteorder.h> #include <asm/byteorder.h>
#include <linux/crypto.h> #include <linux/crypto.h>
#include <linux/types.h> #include <linux/types.h>
#include <crypto/blowfish.h>
#define BF_BLOCK_SIZE 8
#define BF_MIN_KEY_SIZE 4
#define BF_MAX_KEY_SIZE 56
struct bf_ctx {
u32 p[18];
u32 s[1024];
};
static const u32 bf_pbox[16 + 2] = { static const u32 bf_pbox[16 + 2] = {
0x243f6a88, 0x85a308d3, 0x13198a2e, 0x03707344, 0x243f6a88, 0x85a308d3, 0x13198a2e, 0x03707344,
...@@ -309,9 +304,9 @@ static const u32 bf_sbox[256 * 4] = { ...@@ -309,9 +304,9 @@ static const u32 bf_sbox[256 * 4] = {
#define GET32_0(x) (((x) >> (24)) & (0xff)) #define GET32_0(x) (((x) >> (24)) & (0xff))
#define bf_F(x) (((S[GET32_0(x)] + S[256 + GET32_1(x)]) ^ \ #define bf_F(x) (((S[GET32_0(x)] + S[256 + GET32_1(x)]) ^ \
S[512 + GET32_2(x)]) + S[768 + GET32_3(x)]) S[512 + GET32_2(x)]) + S[768 + GET32_3(x)])
#define ROUND(a, b, n) b ^= P[n]; a ^= bf_F (b) #define ROUND(a, b, n) ({ b ^= P[n]; a ^= bf_F(b); })
/* /*
* The blowfish encipher, processes 64-bit blocks. * The blowfish encipher, processes 64-bit blocks.
...@@ -348,57 +343,10 @@ static void encrypt_block(struct bf_ctx *bctx, u32 *dst, u32 *src) ...@@ -348,57 +343,10 @@ static void encrypt_block(struct bf_ctx *bctx, u32 *dst, u32 *src)
dst[1] = yl; dst[1] = yl;
} }
static void bf_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
const __be32 *in_blk = (const __be32 *)src;
__be32 *const out_blk = (__be32 *)dst;
u32 in32[2], out32[2];
in32[0] = be32_to_cpu(in_blk[0]);
in32[1] = be32_to_cpu(in_blk[1]);
encrypt_block(crypto_tfm_ctx(tfm), out32, in32);
out_blk[0] = cpu_to_be32(out32[0]);
out_blk[1] = cpu_to_be32(out32[1]);
}
static void bf_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct bf_ctx *ctx = crypto_tfm_ctx(tfm);
const __be32 *in_blk = (const __be32 *)src;
__be32 *const out_blk = (__be32 *)dst;
const u32 *P = ctx->p;
const u32 *S = ctx->s;
u32 yl = be32_to_cpu(in_blk[0]);
u32 yr = be32_to_cpu(in_blk[1]);
ROUND(yr, yl, 17);
ROUND(yl, yr, 16);
ROUND(yr, yl, 15);
ROUND(yl, yr, 14);
ROUND(yr, yl, 13);
ROUND(yl, yr, 12);
ROUND(yr, yl, 11);
ROUND(yl, yr, 10);
ROUND(yr, yl, 9);
ROUND(yl, yr, 8);
ROUND(yr, yl, 7);
ROUND(yl, yr, 6);
ROUND(yr, yl, 5);
ROUND(yl, yr, 4);
ROUND(yr, yl, 3);
ROUND(yl, yr, 2);
yl ^= P[1];
yr ^= P[0];
out_blk[0] = cpu_to_be32(yr);
out_blk[1] = cpu_to_be32(yl);
}
/* /*
* Calculates the blowfish S and P boxes for encryption and decryption. * Calculates the blowfish S and P boxes for encryption and decryption.
*/ */
static int bf_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) int blowfish_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
{ {
struct bf_ctx *ctx = crypto_tfm_ctx(tfm); struct bf_ctx *ctx = crypto_tfm_ctx(tfm);
u32 *P = ctx->p; u32 *P = ctx->p;
...@@ -448,35 +396,7 @@ static int bf_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) ...@@ -448,35 +396,7 @@ static int bf_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
/* Bruce says not to bother with the weak key check. */ /* Bruce says not to bother with the weak key check. */
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(blowfish_setkey);
static struct crypto_alg alg = {
.cra_name = "blowfish",
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = BF_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct bf_ctx),
.cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(alg.cra_list),
.cra_u = { .cipher = {
.cia_min_keysize = BF_MIN_KEY_SIZE,
.cia_max_keysize = BF_MAX_KEY_SIZE,
.cia_setkey = bf_setkey,
.cia_encrypt = bf_encrypt,
.cia_decrypt = bf_decrypt } }
};
static int __init blowfish_mod_init(void)
{
return crypto_register_alg(&alg);
}
static void __exit blowfish_mod_fini(void)
{
crypto_unregister_alg(&alg);
}
module_init(blowfish_mod_init);
module_exit(blowfish_mod_fini);
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Blowfish Cipher Algorithm"); MODULE_DESCRIPTION("Blowfish Cipher common functions");
/*
* Cryptographic API.
*
* Blowfish Cipher Algorithm, by Bruce Schneier.
* http://www.counterpane.com/blowfish.html
*
* Adapted from Kerneli implementation.
*
* Copyright (c) Herbert Valerio Riedel <hvr@hvrlab.org>
* Copyright (c) Kyle McMartin <kyle@debian.org>
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <asm/byteorder.h>
#include <linux/crypto.h>
#include <linux/types.h>
#include <crypto/blowfish.h>
/*
* Round loop unrolling macros, S is a pointer to a S-Box array
* organized in 4 unsigned longs at a row.
*/
#define GET32_3(x) (((x) & 0xff))
#define GET32_2(x) (((x) >> (8)) & (0xff))
#define GET32_1(x) (((x) >> (16)) & (0xff))
#define GET32_0(x) (((x) >> (24)) & (0xff))
#define bf_F(x) (((S[GET32_0(x)] + S[256 + GET32_1(x)]) ^ \
S[512 + GET32_2(x)]) + S[768 + GET32_3(x)])
#define ROUND(a, b, n) ({ b ^= P[n]; a ^= bf_F(b); })
static void bf_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct bf_ctx *ctx = crypto_tfm_ctx(tfm);
const __be32 *in_blk = (const __be32 *)src;
__be32 *const out_blk = (__be32 *)dst;
const u32 *P = ctx->p;
const u32 *S = ctx->s;
u32 yl = be32_to_cpu(in_blk[0]);
u32 yr = be32_to_cpu(in_blk[1]);
ROUND(yr, yl, 0);
ROUND(yl, yr, 1);
ROUND(yr, yl, 2);
ROUND(yl, yr, 3);
ROUND(yr, yl, 4);
ROUND(yl, yr, 5);
ROUND(yr, yl, 6);
ROUND(yl, yr, 7);
ROUND(yr, yl, 8);
ROUND(yl, yr, 9);
ROUND(yr, yl, 10);
ROUND(yl, yr, 11);
ROUND(yr, yl, 12);
ROUND(yl, yr, 13);
ROUND(yr, yl, 14);
ROUND(yl, yr, 15);
yl ^= P[16];
yr ^= P[17];
out_blk[0] = cpu_to_be32(yr);
out_blk[1] = cpu_to_be32(yl);
}
static void bf_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct bf_ctx *ctx = crypto_tfm_ctx(tfm);
const __be32 *in_blk = (const __be32 *)src;
__be32 *const out_blk = (__be32 *)dst;
const u32 *P = ctx->p;
const u32 *S = ctx->s;
u32 yl = be32_to_cpu(in_blk[0]);
u32 yr = be32_to_cpu(in_blk[1]);
ROUND(yr, yl, 17);
ROUND(yl, yr, 16);
ROUND(yr, yl, 15);
ROUND(yl, yr, 14);
ROUND(yr, yl, 13);
ROUND(yl, yr, 12);
ROUND(yr, yl, 11);
ROUND(yl, yr, 10);
ROUND(yr, yl, 9);
ROUND(yl, yr, 8);
ROUND(yr, yl, 7);
ROUND(yl, yr, 6);
ROUND(yr, yl, 5);
ROUND(yl, yr, 4);
ROUND(yr, yl, 3);
ROUND(yl, yr, 2);
yl ^= P[1];
yr ^= P[0];
out_blk[0] = cpu_to_be32(yr);
out_blk[1] = cpu_to_be32(yl);
}
static struct crypto_alg alg = {
.cra_name = "blowfish",
.cra_driver_name = "blowfish-generic",
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = BF_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct bf_ctx),
.cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(alg.cra_list),
.cra_u = { .cipher = {
.cia_min_keysize = BF_MIN_KEY_SIZE,
.cia_max_keysize = BF_MAX_KEY_SIZE,
.cia_setkey = blowfish_setkey,
.cia_encrypt = bf_encrypt,
.cia_decrypt = bf_decrypt } }
};
static int __init blowfish_mod_init(void)
{
return crypto_register_alg(&alg);
}
static void __exit blowfish_mod_fini(void)
{
crypto_unregister_alg(&alg);
}
module_init(blowfish_mod_init);
module_exit(blowfish_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Blowfish Cipher Algorithm");
MODULE_ALIAS("blowfish");
...@@ -945,7 +945,7 @@ static void __exit cryptd_exit(void) ...@@ -945,7 +945,7 @@ static void __exit cryptd_exit(void)
crypto_unregister_template(&cryptd_tmpl); crypto_unregister_template(&cryptd_tmpl);
} }
module_init(cryptd_init); subsys_initcall(cryptd_init);
module_exit(cryptd_exit); module_exit(cryptd_exit);
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
......
This diff is collapsed.
...@@ -86,6 +86,9 @@ struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask); ...@@ -86,6 +86,9 @@ struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask);
void crypto_larval_error(const char *name, u32 type, u32 mask); void crypto_larval_error(const char *name, u32 type, u32 mask);
void crypto_alg_tested(const char *name, int err); void crypto_alg_tested(const char *name, int err);
void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list,
struct crypto_alg *nalg);
void crypto_remove_final(struct list_head *list);
void crypto_shoot_alg(struct crypto_alg *alg); void crypto_shoot_alg(struct crypto_alg *alg);
struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type, struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type,
u32 mask); u32 mask);
......
...@@ -24,6 +24,8 @@ ...@@ -24,6 +24,8 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/cryptouser.h>
#include <net/netlink.h>
#include <crypto/compress.h> #include <crypto/compress.h>
#include <crypto/internal/compress.h> #include <crypto/internal/compress.h>
...@@ -46,6 +48,21 @@ static int crypto_pcomp_init_tfm(struct crypto_tfm *tfm) ...@@ -46,6 +48,21 @@ static int crypto_pcomp_init_tfm(struct crypto_tfm *tfm)
return 0; return 0;
} }
static int crypto_pcomp_report(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_comp rpcomp;
snprintf(rpcomp.type, CRYPTO_MAX_ALG_NAME, "%s", "pcomp");
NLA_PUT(skb, CRYPTOCFGA_REPORT_COMPRESS,
sizeof(struct crypto_report_comp), &rpcomp);
return 0;
nla_put_failure:
return -EMSGSIZE;
}
static void crypto_pcomp_show(struct seq_file *m, struct crypto_alg *alg) static void crypto_pcomp_show(struct seq_file *m, struct crypto_alg *alg)
__attribute__ ((unused)); __attribute__ ((unused));
static void crypto_pcomp_show(struct seq_file *m, struct crypto_alg *alg) static void crypto_pcomp_show(struct seq_file *m, struct crypto_alg *alg)
...@@ -60,6 +77,7 @@ static const struct crypto_type crypto_pcomp_type = { ...@@ -60,6 +77,7 @@ static const struct crypto_type crypto_pcomp_type = {
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
.show = crypto_pcomp_show, .show = crypto_pcomp_show,
#endif #endif
.report = crypto_pcomp_report,
.maskclear = ~CRYPTO_ALG_TYPE_MASK, .maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_MASK, .maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_PCOMPRESS, .type = CRYPTO_ALG_TYPE_PCOMPRESS,
......
...@@ -21,6 +21,8 @@ ...@@ -21,6 +21,8 @@
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/cryptouser.h>
#include <net/netlink.h>
static DEFINE_MUTEX(crypto_default_rng_lock); static DEFINE_MUTEX(crypto_default_rng_lock);
struct crypto_rng *crypto_default_rng; struct crypto_rng *crypto_default_rng;
...@@ -58,6 +60,23 @@ static int crypto_init_rng_ops(struct crypto_tfm *tfm, u32 type, u32 mask) ...@@ -58,6 +60,23 @@ static int crypto_init_rng_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
return 0; return 0;
} }
static int crypto_rng_report(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_rng rrng;
snprintf(rrng.type, CRYPTO_MAX_ALG_NAME, "%s", "rng");
rrng.seedsize = alg->cra_rng.seedsize;
NLA_PUT(skb, CRYPTOCFGA_REPORT_RNG,
sizeof(struct crypto_report_rng), &rrng);
return 0;
nla_put_failure:
return -EMSGSIZE;
}
static void crypto_rng_show(struct seq_file *m, struct crypto_alg *alg) static void crypto_rng_show(struct seq_file *m, struct crypto_alg *alg)
__attribute__ ((unused)); __attribute__ ((unused));
static void crypto_rng_show(struct seq_file *m, struct crypto_alg *alg) static void crypto_rng_show(struct seq_file *m, struct crypto_alg *alg)
...@@ -78,6 +97,7 @@ const struct crypto_type crypto_rng_type = { ...@@ -78,6 +97,7 @@ const struct crypto_type crypto_rng_type = {
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
.show = crypto_rng_show, .show = crypto_rng_show,
#endif #endif
.report = crypto_rng_report,
}; };
EXPORT_SYMBOL_GPL(crypto_rng_type); EXPORT_SYMBOL_GPL(crypto_rng_type);
......
...@@ -36,7 +36,7 @@ static int sha1_init(struct shash_desc *desc) ...@@ -36,7 +36,7 @@ static int sha1_init(struct shash_desc *desc)
return 0; return 0;
} }
static int sha1_update(struct shash_desc *desc, const u8 *data, int crypto_sha1_update(struct shash_desc *desc, const u8 *data,
unsigned int len) unsigned int len)
{ {
struct sha1_state *sctx = shash_desc_ctx(desc); struct sha1_state *sctx = shash_desc_ctx(desc);
...@@ -71,6 +71,7 @@ static int sha1_update(struct shash_desc *desc, const u8 *data, ...@@ -71,6 +71,7 @@ static int sha1_update(struct shash_desc *desc, const u8 *data,
return 0; return 0;
} }
EXPORT_SYMBOL(crypto_sha1_update);
/* Add padding and return the message digest. */ /* Add padding and return the message digest. */
...@@ -87,10 +88,10 @@ static int sha1_final(struct shash_desc *desc, u8 *out) ...@@ -87,10 +88,10 @@ static int sha1_final(struct shash_desc *desc, u8 *out)
/* Pad out to 56 mod 64 */ /* Pad out to 56 mod 64 */
index = sctx->count & 0x3f; index = sctx->count & 0x3f;
padlen = (index < 56) ? (56 - index) : ((64+56) - index); padlen = (index < 56) ? (56 - index) : ((64+56) - index);
sha1_update(desc, padding, padlen); crypto_sha1_update(desc, padding, padlen);
/* Append length */ /* Append length */
sha1_update(desc, (const u8 *)&bits, sizeof(bits)); crypto_sha1_update(desc, (const u8 *)&bits, sizeof(bits));
/* Store state in digest */ /* Store state in digest */
for (i = 0; i < 5; i++) for (i = 0; i < 5; i++)
...@@ -121,7 +122,7 @@ static int sha1_import(struct shash_desc *desc, const void *in) ...@@ -121,7 +122,7 @@ static int sha1_import(struct shash_desc *desc, const void *in)
static struct shash_alg alg = { static struct shash_alg alg = {
.digestsize = SHA1_DIGEST_SIZE, .digestsize = SHA1_DIGEST_SIZE,
.init = sha1_init, .init = sha1_init,
.update = sha1_update, .update = crypto_sha1_update,
.final = sha1_final, .final = sha1_final,
.export = sha1_export, .export = sha1_export,
.import = sha1_import, .import = sha1_import,
......
...@@ -17,6 +17,8 @@ ...@@ -17,6 +17,8 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/cryptouser.h>
#include <net/netlink.h>
#include "internal.h" #include "internal.h"
...@@ -522,6 +524,24 @@ static unsigned int crypto_shash_extsize(struct crypto_alg *alg) ...@@ -522,6 +524,24 @@ static unsigned int crypto_shash_extsize(struct crypto_alg *alg)
return alg->cra_ctxsize; return alg->cra_ctxsize;
} }
static int crypto_shash_report(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_hash rhash;
struct shash_alg *salg = __crypto_shash_alg(alg);
snprintf(rhash.type, CRYPTO_MAX_ALG_NAME, "%s", "shash");
rhash.blocksize = alg->cra_blocksize;
rhash.digestsize = salg->digestsize;
NLA_PUT(skb, CRYPTOCFGA_REPORT_HASH,
sizeof(struct crypto_report_hash), &rhash);
return 0;
nla_put_failure:
return -EMSGSIZE;
}
static void crypto_shash_show(struct seq_file *m, struct crypto_alg *alg) static void crypto_shash_show(struct seq_file *m, struct crypto_alg *alg)
__attribute__ ((unused)); __attribute__ ((unused));
static void crypto_shash_show(struct seq_file *m, struct crypto_alg *alg) static void crypto_shash_show(struct seq_file *m, struct crypto_alg *alg)
...@@ -541,6 +561,7 @@ static const struct crypto_type crypto_shash_type = { ...@@ -541,6 +561,7 @@ static const struct crypto_type crypto_shash_type = {
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
.show = crypto_shash_show, .show = crypto_shash_show,
#endif #endif
.report = crypto_shash_report,
.maskclear = ~CRYPTO_ALG_TYPE_MASK, .maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_MASK, .maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_SHASH, .type = CRYPTO_ALG_TYPE_SHASH,
......
...@@ -782,11 +782,13 @@ static int do_test(int m) ...@@ -782,11 +782,13 @@ static int do_test(int m)
case 7: case 7:
ret += tcrypt_test("ecb(blowfish)"); ret += tcrypt_test("ecb(blowfish)");
ret += tcrypt_test("cbc(blowfish)"); ret += tcrypt_test("cbc(blowfish)");
ret += tcrypt_test("ctr(blowfish)");
break; break;
case 8: case 8:
ret += tcrypt_test("ecb(twofish)"); ret += tcrypt_test("ecb(twofish)");
ret += tcrypt_test("cbc(twofish)"); ret += tcrypt_test("cbc(twofish)");
ret += tcrypt_test("ctr(twofish)");
break; break;
case 9: case 9:
...@@ -1039,6 +1041,10 @@ static int do_test(int m) ...@@ -1039,6 +1041,10 @@ static int do_test(int m)
speed_template_16_24_32); speed_template_16_24_32);
test_cipher_speed("cbc(twofish)", DECRYPT, sec, NULL, 0, test_cipher_speed("cbc(twofish)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32); speed_template_16_24_32);
test_cipher_speed("ctr(twofish)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("ctr(twofish)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
break; break;
case 203: case 203:
...@@ -1050,6 +1056,10 @@ static int do_test(int m) ...@@ -1050,6 +1056,10 @@ static int do_test(int m)
speed_template_8_32); speed_template_8_32);
test_cipher_speed("cbc(blowfish)", DECRYPT, sec, NULL, 0, test_cipher_speed("cbc(blowfish)", DECRYPT, sec, NULL, 0,
speed_template_8_32); speed_template_8_32);
test_cipher_speed("ctr(blowfish)", ENCRYPT, sec, NULL, 0,
speed_template_8_32);
test_cipher_speed("ctr(blowfish)", DECRYPT, sec, NULL, 0,
speed_template_8_32);
break; break;
case 204: case 204:
......
...@@ -1755,6 +1755,36 @@ static const struct alg_test_desc alg_test_descs[] = { ...@@ -1755,6 +1755,36 @@ static const struct alg_test_desc alg_test_descs[] = {
} }
} }
} }
}, {
.alg = "ctr(blowfish)",
.test = alg_test_skcipher,
.suite = {
.cipher = {
.enc = {
.vecs = bf_ctr_enc_tv_template,
.count = BF_CTR_ENC_TEST_VECTORS
},
.dec = {
.vecs = bf_ctr_dec_tv_template,
.count = BF_CTR_DEC_TEST_VECTORS
}
}
}
}, {
.alg = "ctr(twofish)",
.test = alg_test_skcipher,
.suite = {
.cipher = {
.enc = {
.vecs = tf_ctr_enc_tv_template,
.count = TF_CTR_ENC_TEST_VECTORS
},
.dec = {
.vecs = tf_ctr_dec_tv_template,
.count = TF_CTR_DEC_TEST_VECTORS
}
}
}
}, { }, {
.alg = "cts(cbc(aes))", .alg = "cts(cbc(aes))",
.test = alg_test_skcipher, .test = alg_test_skcipher,
......
This diff is collapsed.
...@@ -762,11 +762,17 @@ static const u64 C7[256] = { ...@@ -762,11 +762,17 @@ static const u64 C7[256] = {
0x86228644a411c286ULL, 0x86228644a411c286ULL,
}; };
static const u64 rc[WHIRLPOOL_ROUNDS + 1] = { static const u64 rc[WHIRLPOOL_ROUNDS] = {
0x0000000000000000ULL, 0x1823c6e887b8014fULL, 0x36a6d2f5796f9152ULL, 0x1823c6e887b8014fULL,
0x60bc9b8ea30c7b35ULL, 0x1de0d7c22e4bfe57ULL, 0x157737e59ff04adaULL, 0x36a6d2f5796f9152ULL,
0x58c9290ab1a06b85ULL, 0xbd5d10f4cb3e0567ULL, 0xe427418ba77d95d8ULL, 0x60bc9b8ea30c7b35ULL,
0xfbee7c66dd17479eULL, 0xca2dbf07ad5a8333ULL, 0x1de0d7c22e4bfe57ULL,
0x157737e59ff04adaULL,
0x58c9290ab1a06b85ULL,
0xbd5d10f4cb3e0567ULL,
0xe427418ba77d95d8ULL,
0xfbee7c66dd17479eULL,
0xca2dbf07ad5a8333ULL,
}; };
/** /**
...@@ -793,7 +799,7 @@ static void wp512_process_buffer(struct wp512_ctx *wctx) { ...@@ -793,7 +799,7 @@ static void wp512_process_buffer(struct wp512_ctx *wctx) {
state[6] = block[6] ^ (K[6] = wctx->hash[6]); state[6] = block[6] ^ (K[6] = wctx->hash[6]);
state[7] = block[7] ^ (K[7] = wctx->hash[7]); state[7] = block[7] ^ (K[7] = wctx->hash[7]);
for (r = 1; r <= WHIRLPOOL_ROUNDS; r++) { for (r = 0; r < WHIRLPOOL_ROUNDS; r++) {
L[0] = C0[(int)(K[0] >> 56) ] ^ L[0] = C0[(int)(K[0] >> 56) ] ^
C1[(int)(K[7] >> 48) & 0xff] ^ C1[(int)(K[7] >> 48) & 0xff] ^
......
...@@ -200,6 +200,7 @@ config CRYPTO_DEV_HIFN_795X ...@@ -200,6 +200,7 @@ config CRYPTO_DEV_HIFN_795X
select CRYPTO_BLKCIPHER select CRYPTO_BLKCIPHER
select HW_RANDOM if CRYPTO_DEV_HIFN_795X_RNG select HW_RANDOM if CRYPTO_DEV_HIFN_795X_RNG
depends on PCI depends on PCI
depends on !ARCH_DMA_ADDR_T_64BIT
help help
This option allows you to have support for HIFN 795x crypto adapters. This option allows you to have support for HIFN 795x crypto adapters.
...@@ -266,7 +267,7 @@ config CRYPTO_DEV_OMAP_AES ...@@ -266,7 +267,7 @@ config CRYPTO_DEV_OMAP_AES
config CRYPTO_DEV_PICOXCELL config CRYPTO_DEV_PICOXCELL
tristate "Support for picoXcell IPSEC and Layer2 crypto engines" tristate "Support for picoXcell IPSEC and Layer2 crypto engines"
depends on ARCH_PICOXCELL depends on ARCH_PICOXCELL && HAVE_CLK
select CRYPTO_AES select CRYPTO_AES
select CRYPTO_AUTHENC select CRYPTO_AUTHENC
select CRYPTO_ALGAPI select CRYPTO_ALGAPI
......
...@@ -2744,10 +2744,8 @@ static int __init hifn_init(void) ...@@ -2744,10 +2744,8 @@ static int __init hifn_init(void)
unsigned int freq; unsigned int freq;
int err; int err;
if (sizeof(dma_addr_t) > 4) { /* HIFN supports only 32-bit addresses */
printk(KERN_INFO "HIFN supports only 32-bit addresses.\n"); BUILD_BUG_ON(sizeof(dma_addr_t) != 4);
return -EINVAL;
}
if (strncmp(hifn_pll_ref, "ext", 3) && if (strncmp(hifn_pll_ref, "ext", 3) &&
strncmp(hifn_pll_ref, "pci", 3)) { strncmp(hifn_pll_ref, "pci", 3)) {
......
...@@ -1006,9 +1006,9 @@ static int n2_do_ecb(struct ablkcipher_request *req, bool encrypt) ...@@ -1006,9 +1006,9 @@ static int n2_do_ecb(struct ablkcipher_request *req, bool encrypt)
spin_unlock_irqrestore(&qp->lock, flags); spin_unlock_irqrestore(&qp->lock, flags);
out:
put_cpu(); put_cpu();
out:
n2_chunk_complete(req, NULL); n2_chunk_complete(req, NULL);
return err; return err;
} }
...@@ -1096,9 +1096,9 @@ static int n2_do_chaining(struct ablkcipher_request *req, bool encrypt) ...@@ -1096,9 +1096,9 @@ static int n2_do_chaining(struct ablkcipher_request *req, bool encrypt)
spin_unlock_irqrestore(&qp->lock, flags); spin_unlock_irqrestore(&qp->lock, flags);
out:
put_cpu(); put_cpu();
out:
n2_chunk_complete(req, err ? NULL : final_iv_addr); n2_chunk_complete(req, err ? NULL : final_iv_addr);
return err; return err;
} }
......
...@@ -508,10 +508,8 @@ static int __init padlock_init(void) ...@@ -508,10 +508,8 @@ static int __init padlock_init(void)
int ret; int ret;
struct cpuinfo_x86 *c = &cpu_data(0); struct cpuinfo_x86 *c = &cpu_data(0);
if (!cpu_has_xcrypt) { if (!cpu_has_xcrypt)
printk(KERN_NOTICE PFX "VIA PadLock not detected.\n");
return -ENODEV; return -ENODEV;
}
if (!cpu_has_xcrypt_enabled) { if (!cpu_has_xcrypt_enabled) {
printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n"); printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
......
...@@ -34,6 +34,7 @@ ...@@ -34,6 +34,7 @@
#include <linux/io.h> #include <linux/io.h>
#include <linux/list.h> #include <linux/list.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/pm.h> #include <linux/pm.h>
#include <linux/rtnetlink.h> #include <linux/rtnetlink.h>
...@@ -1241,8 +1242,8 @@ static void spacc_spacc_complete(unsigned long data) ...@@ -1241,8 +1242,8 @@ static void spacc_spacc_complete(unsigned long data)
spin_unlock_irqrestore(&engine->hw_lock, flags); spin_unlock_irqrestore(&engine->hw_lock, flags);
list_for_each_entry_safe(req, tmp, &completed, list) { list_for_each_entry_safe(req, tmp, &completed, list) {
req->complete(req);
list_del(&req->list); list_del(&req->list);
req->complete(req);
} }
} }
...@@ -1657,10 +1658,33 @@ static struct spacc_alg l2_engine_algs[] = { ...@@ -1657,10 +1658,33 @@ static struct spacc_alg l2_engine_algs[] = {
}, },
}; };
static int __devinit spacc_probe(struct platform_device *pdev, #ifdef CONFIG_OF
unsigned max_ctxs, size_t cipher_pg_sz, static const struct of_device_id spacc_of_id_table[] = {
size_t hash_pg_sz, size_t fifo_sz, { .compatible = "picochip,spacc-ipsec" },
struct spacc_alg *algs, size_t num_algs) { .compatible = "picochip,spacc-l2" },
{}
};
#else /* CONFIG_OF */
#define spacc_of_id_table NULL
#endif /* CONFIG_OF */
static bool spacc_is_compatible(struct platform_device *pdev,
const char *spacc_type)
{
const struct platform_device_id *platid = platform_get_device_id(pdev);
if (platid && !strcmp(platid->name, spacc_type))
return true;
#ifdef CONFIG_OF
if (of_device_is_compatible(pdev->dev.of_node, spacc_type))
return true;
#endif /* CONFIG_OF */
return false;
}
static int __devinit spacc_probe(struct platform_device *pdev)
{ {
int i, err, ret = -EINVAL; int i, err, ret = -EINVAL;
struct resource *mem, *irq; struct resource *mem, *irq;
...@@ -1669,13 +1693,25 @@ static int __devinit spacc_probe(struct platform_device *pdev, ...@@ -1669,13 +1693,25 @@ static int __devinit spacc_probe(struct platform_device *pdev,
if (!engine) if (!engine)
return -ENOMEM; return -ENOMEM;
engine->max_ctxs = max_ctxs; if (spacc_is_compatible(pdev, "picochip,spacc-ipsec")) {
engine->cipher_pg_sz = cipher_pg_sz; engine->max_ctxs = SPACC_CRYPTO_IPSEC_MAX_CTXS;
engine->hash_pg_sz = hash_pg_sz; engine->cipher_pg_sz = SPACC_CRYPTO_IPSEC_CIPHER_PG_SZ;
engine->fifo_sz = fifo_sz; engine->hash_pg_sz = SPACC_CRYPTO_IPSEC_HASH_PG_SZ;
engine->algs = algs; engine->fifo_sz = SPACC_CRYPTO_IPSEC_FIFO_SZ;
engine->num_algs = num_algs; engine->algs = ipsec_engine_algs;
engine->name = dev_name(&pdev->dev); engine->num_algs = ARRAY_SIZE(ipsec_engine_algs);
} else if (spacc_is_compatible(pdev, "picochip,spacc-l2")) {
engine->max_ctxs = SPACC_CRYPTO_L2_MAX_CTXS;
engine->cipher_pg_sz = SPACC_CRYPTO_L2_CIPHER_PG_SZ;
engine->hash_pg_sz = SPACC_CRYPTO_L2_HASH_PG_SZ;
engine->fifo_sz = SPACC_CRYPTO_L2_FIFO_SZ;
engine->algs = l2_engine_algs;
engine->num_algs = ARRAY_SIZE(l2_engine_algs);
} else {
return -EINVAL;
}
engine->name = dev_name(&pdev->dev);
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
...@@ -1711,7 +1747,7 @@ static int __devinit spacc_probe(struct platform_device *pdev, ...@@ -1711,7 +1747,7 @@ static int __devinit spacc_probe(struct platform_device *pdev,
spin_lock_init(&engine->hw_lock); spin_lock_init(&engine->hw_lock);
engine->clk = clk_get(&pdev->dev, NULL); engine->clk = clk_get(&pdev->dev, "ref");
if (IS_ERR(engine->clk)) { if (IS_ERR(engine->clk)) {
dev_info(&pdev->dev, "clk unavailable\n"); dev_info(&pdev->dev, "clk unavailable\n");
device_remove_file(&pdev->dev, &dev_attr_stat_irq_thresh); device_remove_file(&pdev->dev, &dev_attr_stat_irq_thresh);
...@@ -1800,72 +1836,33 @@ static int __devexit spacc_remove(struct platform_device *pdev) ...@@ -1800,72 +1836,33 @@ static int __devexit spacc_remove(struct platform_device *pdev)
return 0; return 0;
} }
static int __devinit ipsec_probe(struct platform_device *pdev) static const struct platform_device_id spacc_id_table[] = {
{ { "picochip,spacc-ipsec", },
return spacc_probe(pdev, SPACC_CRYPTO_IPSEC_MAX_CTXS, { "picochip,spacc-l2", },
SPACC_CRYPTO_IPSEC_CIPHER_PG_SZ,
SPACC_CRYPTO_IPSEC_HASH_PG_SZ,
SPACC_CRYPTO_IPSEC_FIFO_SZ, ipsec_engine_algs,
ARRAY_SIZE(ipsec_engine_algs));
}
static struct platform_driver ipsec_driver = {
.probe = ipsec_probe,
.remove = __devexit_p(spacc_remove),
.driver = {
.name = "picoxcell-ipsec",
#ifdef CONFIG_PM
.pm = &spacc_pm_ops,
#endif /* CONFIG_PM */
},
}; };
static int __devinit l2_probe(struct platform_device *pdev) static struct platform_driver spacc_driver = {
{ .probe = spacc_probe,
return spacc_probe(pdev, SPACC_CRYPTO_L2_MAX_CTXS,
SPACC_CRYPTO_L2_CIPHER_PG_SZ,
SPACC_CRYPTO_L2_HASH_PG_SZ, SPACC_CRYPTO_L2_FIFO_SZ,
l2_engine_algs, ARRAY_SIZE(l2_engine_algs));
}
static struct platform_driver l2_driver = {
.probe = l2_probe,
.remove = __devexit_p(spacc_remove), .remove = __devexit_p(spacc_remove),
.driver = { .driver = {
.name = "picoxcell-l2", .name = "picochip,spacc",
#ifdef CONFIG_PM #ifdef CONFIG_PM
.pm = &spacc_pm_ops, .pm = &spacc_pm_ops,
#endif /* CONFIG_PM */ #endif /* CONFIG_PM */
.of_match_table = spacc_of_id_table,
}, },
.id_table = spacc_id_table,
}; };
static int __init spacc_init(void) static int __init spacc_init(void)
{ {
int ret = platform_driver_register(&ipsec_driver); return platform_driver_register(&spacc_driver);
if (ret) {
pr_err("failed to register ipsec spacc driver");
goto out;
}
ret = platform_driver_register(&l2_driver);
if (ret) {
pr_err("failed to register l2 spacc driver");
goto l2_failed;
}
return 0;
l2_failed:
platform_driver_unregister(&ipsec_driver);
out:
return ret;
} }
module_init(spacc_init); module_init(spacc_init);
static void __exit spacc_exit(void) static void __exit spacc_exit(void)
{ {
platform_driver_unregister(&ipsec_driver); platform_driver_unregister(&spacc_driver);
platform_driver_unregister(&l2_driver);
} }
module_exit(spacc_exit); module_exit(spacc_exit);
......
...@@ -416,7 +416,7 @@ static void talitos_done(unsigned long data) ...@@ -416,7 +416,7 @@ static void talitos_done(unsigned long data)
/* /*
* locate current (offending) descriptor * locate current (offending) descriptor
*/ */
static struct talitos_desc *current_desc(struct device *dev, int ch) static u32 current_desc_hdr(struct device *dev, int ch)
{ {
struct talitos_private *priv = dev_get_drvdata(dev); struct talitos_private *priv = dev_get_drvdata(dev);
int tail = priv->chan[ch].tail; int tail = priv->chan[ch].tail;
...@@ -428,23 +428,25 @@ static struct talitos_desc *current_desc(struct device *dev, int ch) ...@@ -428,23 +428,25 @@ static struct talitos_desc *current_desc(struct device *dev, int ch)
tail = (tail + 1) & (priv->fifo_len - 1); tail = (tail + 1) & (priv->fifo_len - 1);
if (tail == priv->chan[ch].tail) { if (tail == priv->chan[ch].tail) {
dev_err(dev, "couldn't locate current descriptor\n"); dev_err(dev, "couldn't locate current descriptor\n");
return NULL; return 0;
} }
} }
return priv->chan[ch].fifo[tail].desc; return priv->chan[ch].fifo[tail].desc->hdr;
} }
/* /*
* user diagnostics; report root cause of error based on execution unit status * user diagnostics; report root cause of error based on execution unit status
*/ */
static void report_eu_error(struct device *dev, int ch, static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
struct talitos_desc *desc)
{ {
struct talitos_private *priv = dev_get_drvdata(dev); struct talitos_private *priv = dev_get_drvdata(dev);
int i; int i;
switch (desc->hdr & DESC_HDR_SEL0_MASK) { if (!desc_hdr)
desc_hdr = in_be32(priv->reg + TALITOS_DESCBUF(ch));
switch (desc_hdr & DESC_HDR_SEL0_MASK) {
case DESC_HDR_SEL0_AFEU: case DESC_HDR_SEL0_AFEU:
dev_err(dev, "AFEUISR 0x%08x_%08x\n", dev_err(dev, "AFEUISR 0x%08x_%08x\n",
in_be32(priv->reg + TALITOS_AFEUISR), in_be32(priv->reg + TALITOS_AFEUISR),
...@@ -488,7 +490,7 @@ static void report_eu_error(struct device *dev, int ch, ...@@ -488,7 +490,7 @@ static void report_eu_error(struct device *dev, int ch,
break; break;
} }
switch (desc->hdr & DESC_HDR_SEL1_MASK) { switch (desc_hdr & DESC_HDR_SEL1_MASK) {
case DESC_HDR_SEL1_MDEUA: case DESC_HDR_SEL1_MDEUA:
case DESC_HDR_SEL1_MDEUB: case DESC_HDR_SEL1_MDEUB:
dev_err(dev, "MDEUISR 0x%08x_%08x\n", dev_err(dev, "MDEUISR 0x%08x_%08x\n",
...@@ -550,7 +552,7 @@ static void talitos_error(unsigned long data, u32 isr, u32 isr_lo) ...@@ -550,7 +552,7 @@ static void talitos_error(unsigned long data, u32 isr, u32 isr_lo)
if (v_lo & TALITOS_CCPSR_LO_IEU) if (v_lo & TALITOS_CCPSR_LO_IEU)
dev_err(dev, "invalid execution unit error\n"); dev_err(dev, "invalid execution unit error\n");
if (v_lo & TALITOS_CCPSR_LO_EU) if (v_lo & TALITOS_CCPSR_LO_EU)
report_eu_error(dev, ch, current_desc(dev, ch)); report_eu_error(dev, ch, current_desc_hdr(dev, ch));
if (v_lo & TALITOS_CCPSR_LO_GB) if (v_lo & TALITOS_CCPSR_LO_GB)
dev_err(dev, "gather boundary error\n"); dev_err(dev, "gather boundary error\n");
if (v_lo & TALITOS_CCPSR_LO_GRL) if (v_lo & TALITOS_CCPSR_LO_GRL)
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include <linux/crypto.h> #include <linux/crypto.h>
#include <linux/list.h> #include <linux/list.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/skbuff.h>
struct module; struct module;
struct rtattr; struct rtattr;
...@@ -26,6 +27,7 @@ struct crypto_type { ...@@ -26,6 +27,7 @@ struct crypto_type {
int (*init)(struct crypto_tfm *tfm, u32 type, u32 mask); int (*init)(struct crypto_tfm *tfm, u32 type, u32 mask);
int (*init_tfm)(struct crypto_tfm *tfm); int (*init_tfm)(struct crypto_tfm *tfm);
void (*show)(struct seq_file *m, struct crypto_alg *alg); void (*show)(struct seq_file *m, struct crypto_alg *alg);
int (*report)(struct sk_buff *skb, struct crypto_alg *alg);
struct crypto_alg *(*lookup)(const char *name, u32 type, u32 mask); struct crypto_alg *(*lookup)(const char *name, u32 type, u32 mask);
unsigned int type; unsigned int type;
......
/*
* Common values for blowfish algorithms
*/
#ifndef _CRYPTO_BLOWFISH_H
#define _CRYPTO_BLOWFISH_H
#include <linux/types.h>
#include <linux/crypto.h>
#define BF_BLOCK_SIZE 8
#define BF_MIN_KEY_SIZE 4
#define BF_MAX_KEY_SIZE 56
struct bf_ctx {
u32 p[18];
u32 s[1024];
};
int blowfish_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int key_len);
#endif
...@@ -82,4 +82,9 @@ struct sha512_state { ...@@ -82,4 +82,9 @@ struct sha512_state {
u8 buf[SHA512_BLOCK_SIZE]; u8 buf[SHA512_BLOCK_SIZE];
}; };
struct shash_desc;
extern int crypto_sha1_update(struct shash_desc *desc, const u8 *data,
unsigned int len);
#endif #endif
...@@ -71,6 +71,11 @@ ...@@ -71,6 +71,11 @@
#define CRYPTO_ALG_TESTED 0x00000400 #define CRYPTO_ALG_TESTED 0x00000400
/*
* Set if the algorithm is an instance that is build from templates.
*/
#define CRYPTO_ALG_INSTANCE 0x00000800
/* /*
* Transform masks and values (for crt_flags). * Transform masks and values (for crt_flags).
*/ */
......
/*
* Crypto user configuration API.
*
* Copyright (C) 2011 secunet Security Networks AG
* Copyright (C) 2011 Steffen Klassert <steffen.klassert@secunet.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*/
/* Netlink configuration messages. */
enum {
CRYPTO_MSG_BASE = 0x10,
CRYPTO_MSG_NEWALG = 0x10,
CRYPTO_MSG_DELALG,
CRYPTO_MSG_UPDATEALG,
CRYPTO_MSG_GETALG,
__CRYPTO_MSG_MAX
};
#define CRYPTO_MSG_MAX (__CRYPTO_MSG_MAX - 1)
#define CRYPTO_NR_MSGTYPES (CRYPTO_MSG_MAX + 1 - CRYPTO_MSG_BASE)
#define CRYPTO_MAX_NAME CRYPTO_MAX_ALG_NAME
/* Netlink message attributes. */
enum crypto_attr_type_t {
CRYPTOCFGA_UNSPEC,
CRYPTOCFGA_PRIORITY_VAL, /* __u32 */
CRYPTOCFGA_REPORT_LARVAL, /* struct crypto_report_larval */
CRYPTOCFGA_REPORT_HASH, /* struct crypto_report_hash */
CRYPTOCFGA_REPORT_BLKCIPHER, /* struct crypto_report_blkcipher */
CRYPTOCFGA_REPORT_AEAD, /* struct crypto_report_aead */
CRYPTOCFGA_REPORT_COMPRESS, /* struct crypto_report_comp */
CRYPTOCFGA_REPORT_RNG, /* struct crypto_report_rng */
CRYPTOCFGA_REPORT_CIPHER, /* struct crypto_report_cipher */
__CRYPTOCFGA_MAX
#define CRYPTOCFGA_MAX (__CRYPTOCFGA_MAX - 1)
};
struct crypto_user_alg {
char cru_name[CRYPTO_MAX_ALG_NAME];
char cru_driver_name[CRYPTO_MAX_ALG_NAME];
char cru_module_name[CRYPTO_MAX_ALG_NAME];
__u32 cru_type;
__u32 cru_mask;
__u32 cru_refcnt;
__u32 cru_flags;
};
struct crypto_report_larval {
char type[CRYPTO_MAX_NAME];
};
struct crypto_report_hash {
char type[CRYPTO_MAX_NAME];
unsigned int blocksize;
unsigned int digestsize;
};
struct crypto_report_cipher {
char type[CRYPTO_MAX_ALG_NAME];
unsigned int blocksize;
unsigned int min_keysize;
unsigned int max_keysize;
};
struct crypto_report_blkcipher {
char type[CRYPTO_MAX_NAME];
char geniv[CRYPTO_MAX_NAME];
unsigned int blocksize;
unsigned int min_keysize;
unsigned int max_keysize;
unsigned int ivsize;
};
struct crypto_report_aead {
char type[CRYPTO_MAX_NAME];
char geniv[CRYPTO_MAX_NAME];
unsigned int blocksize;
unsigned int maxauthsize;
unsigned int ivsize;
};
struct crypto_report_comp {
char type[CRYPTO_MAX_NAME];
};
struct crypto_report_rng {
char type[CRYPTO_MAX_NAME];
unsigned int seedsize;
};
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#define NETLINK_SCSITRANSPORT 18 /* SCSI Transports */ #define NETLINK_SCSITRANSPORT 18 /* SCSI Transports */
#define NETLINK_ECRYPTFS 19 #define NETLINK_ECRYPTFS 19
#define NETLINK_RDMA 20 #define NETLINK_RDMA 20
#define NETLINK_CRYPTO 21 /* Crypto layer */
#define MAX_LINKS 32 #define MAX_LINKS 32
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment