Commit 4fe71dba authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6:
  crypto: aesni-intel - Fix irq_fpu_usable usage
  crypto: padlock-sha - Fix stack alignment
parents 4223a4a1 13b79b97
...@@ -82,7 +82,7 @@ static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx, ...@@ -82,7 +82,7 @@ static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
return -EINVAL; return -EINVAL;
} }
if (irq_fpu_usable()) if (!irq_fpu_usable())
err = crypto_aes_expand_key(ctx, in_key, key_len); err = crypto_aes_expand_key(ctx, in_key, key_len);
else { else {
kernel_fpu_begin(); kernel_fpu_begin();
...@@ -103,7 +103,7 @@ static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) ...@@ -103,7 +103,7 @@ static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{ {
struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm)); struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
if (irq_fpu_usable()) if (!irq_fpu_usable())
crypto_aes_encrypt_x86(ctx, dst, src); crypto_aes_encrypt_x86(ctx, dst, src);
else { else {
kernel_fpu_begin(); kernel_fpu_begin();
...@@ -116,7 +116,7 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) ...@@ -116,7 +116,7 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{ {
struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm)); struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
if (irq_fpu_usable()) if (!irq_fpu_usable())
crypto_aes_decrypt_x86(ctx, dst, src); crypto_aes_decrypt_x86(ctx, dst, src);
else { else {
kernel_fpu_begin(); kernel_fpu_begin();
...@@ -342,7 +342,7 @@ static int ablk_encrypt(struct ablkcipher_request *req) ...@@ -342,7 +342,7 @@ static int ablk_encrypt(struct ablkcipher_request *req)
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
if (irq_fpu_usable()) { if (!irq_fpu_usable()) {
struct ablkcipher_request *cryptd_req = struct ablkcipher_request *cryptd_req =
ablkcipher_request_ctx(req); ablkcipher_request_ctx(req);
memcpy(cryptd_req, req, sizeof(*req)); memcpy(cryptd_req, req, sizeof(*req));
...@@ -363,7 +363,7 @@ static int ablk_decrypt(struct ablkcipher_request *req) ...@@ -363,7 +363,7 @@ static int ablk_decrypt(struct ablkcipher_request *req)
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
if (irq_fpu_usable()) { if (!irq_fpu_usable()) {
struct ablkcipher_request *cryptd_req = struct ablkcipher_request *cryptd_req =
ablkcipher_request_ctx(req); ablkcipher_request_ctx(req);
memcpy(cryptd_req, req, sizeof(*req)); memcpy(cryptd_req, req, sizeof(*req));
......
...@@ -24,6 +24,12 @@ ...@@ -24,6 +24,12 @@
#include <asm/i387.h> #include <asm/i387.h>
#include "padlock.h" #include "padlock.h"
#ifdef CONFIG_64BIT
#define STACK_ALIGN 16
#else
#define STACK_ALIGN 4
#endif
struct padlock_sha_desc { struct padlock_sha_desc {
struct shash_desc fallback; struct shash_desc fallback;
}; };
...@@ -64,7 +70,9 @@ static int padlock_sha1_finup(struct shash_desc *desc, const u8 *in, ...@@ -64,7 +70,9 @@ static int padlock_sha1_finup(struct shash_desc *desc, const u8 *in,
/* We can't store directly to *out as it may be unaligned. */ /* We can't store directly to *out as it may be unaligned. */
/* BTW Don't reduce the buffer size below 128 Bytes! /* BTW Don't reduce the buffer size below 128 Bytes!
* PadLock microcode needs it that big. */ * PadLock microcode needs it that big. */
char result[128] __attribute__ ((aligned(PADLOCK_ALIGNMENT))); char buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
((aligned(STACK_ALIGN)));
char *result = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
struct padlock_sha_desc *dctx = shash_desc_ctx(desc); struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
struct sha1_state state; struct sha1_state state;
unsigned int space; unsigned int space;
...@@ -128,7 +136,9 @@ static int padlock_sha256_finup(struct shash_desc *desc, const u8 *in, ...@@ -128,7 +136,9 @@ static int padlock_sha256_finup(struct shash_desc *desc, const u8 *in,
/* We can't store directly to *out as it may be unaligned. */ /* We can't store directly to *out as it may be unaligned. */
/* BTW Don't reduce the buffer size below 128 Bytes! /* BTW Don't reduce the buffer size below 128 Bytes!
* PadLock microcode needs it that big. */ * PadLock microcode needs it that big. */
char result[128] __attribute__ ((aligned(PADLOCK_ALIGNMENT))); char buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
((aligned(STACK_ALIGN)));
char *result = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
struct padlock_sha_desc *dctx = shash_desc_ctx(desc); struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
struct sha256_state state; struct sha256_state state;
unsigned int space; unsigned int space;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment