Commit 5f76eea8 authored by David Hildenbrand's avatar David Hildenbrand Committed by Ingo Molnar

sched/preempt, powerpc: Disable preemption in enable_kernel_altivec() explicitly

enable_kernel_altivec() has to be called with disabled preemption.
Let's make this explicit, to prepare for pagefault_disable() not
touching preemption anymore.
Reviewed-and-tested-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarDavid Hildenbrand <dahi@linux.vnet.ibm.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: default avatarBenjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: David.Laight@ACULAB.COM
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: airlied@linux.ie
Cc: akpm@linux-foundation.org
Cc: bigeasy@linutronix.de
Cc: borntraeger@de.ibm.com
Cc: daniel.vetter@intel.com
Cc: heiko.carstens@de.ibm.com
Cc: herbert@gondor.apana.org.au
Cc: hocko@suse.cz
Cc: hughd@google.com
Cc: mst@redhat.com
Cc: paulus@samba.org
Cc: ralf@linux-mips.org
Cc: schwidefsky@de.ibm.com
Cc: yang.shi@windriver.com
Link: http://lkml.kernel.org/r/1431359540-32227-14-git-send-email-dahi@linux.vnet.ibm.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 2f09b227
...@@ -27,11 +27,11 @@ int enter_vmx_usercopy(void) ...@@ -27,11 +27,11 @@ int enter_vmx_usercopy(void)
if (in_interrupt()) if (in_interrupt())
return 0; return 0;
/* This acts as preempt_disable() as well and will make preempt_disable();
* enable_kernel_altivec(). We need to disable page faults /*
* as they can call schedule and thus make us lose the VMX * We need to disable page faults as they can call schedule and
* context. So on page faults, we just fail which will cause * thus make us lose the VMX context. So on page faults, we just
* a fallback to the normal non-vmx copy. * fail which will cause a fallback to the normal non-vmx copy.
*/ */
pagefault_disable(); pagefault_disable();
...@@ -47,6 +47,7 @@ int enter_vmx_usercopy(void) ...@@ -47,6 +47,7 @@ int enter_vmx_usercopy(void)
int exit_vmx_usercopy(void) int exit_vmx_usercopy(void)
{ {
pagefault_enable(); pagefault_enable();
preempt_enable();
return 0; return 0;
} }
......
...@@ -78,12 +78,14 @@ static int p8_aes_setkey(struct crypto_tfm *tfm, const u8 *key, ...@@ -78,12 +78,14 @@ static int p8_aes_setkey(struct crypto_tfm *tfm, const u8 *key,
int ret; int ret;
struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm); struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm);
preempt_disable();
pagefault_disable(); pagefault_disable();
enable_kernel_altivec(); enable_kernel_altivec();
ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key); ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key); ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
pagefault_enable(); pagefault_enable();
preempt_enable();
ret += crypto_cipher_setkey(ctx->fallback, key, keylen); ret += crypto_cipher_setkey(ctx->fallback, key, keylen);
return ret; return ret;
} }
...@@ -95,10 +97,12 @@ static void p8_aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) ...@@ -95,10 +97,12 @@ static void p8_aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
if (in_interrupt()) { if (in_interrupt()) {
crypto_cipher_encrypt_one(ctx->fallback, dst, src); crypto_cipher_encrypt_one(ctx->fallback, dst, src);
} else { } else {
preempt_disable();
pagefault_disable(); pagefault_disable();
enable_kernel_altivec(); enable_kernel_altivec();
aes_p8_encrypt(src, dst, &ctx->enc_key); aes_p8_encrypt(src, dst, &ctx->enc_key);
pagefault_enable(); pagefault_enable();
preempt_enable();
} }
} }
...@@ -109,10 +113,12 @@ static void p8_aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) ...@@ -109,10 +113,12 @@ static void p8_aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
if (in_interrupt()) { if (in_interrupt()) {
crypto_cipher_decrypt_one(ctx->fallback, dst, src); crypto_cipher_decrypt_one(ctx->fallback, dst, src);
} else { } else {
preempt_disable();
pagefault_disable(); pagefault_disable();
enable_kernel_altivec(); enable_kernel_altivec();
aes_p8_decrypt(src, dst, &ctx->dec_key); aes_p8_decrypt(src, dst, &ctx->dec_key);
pagefault_enable(); pagefault_enable();
preempt_enable();
} }
} }
......
...@@ -79,11 +79,13 @@ static int p8_aes_cbc_setkey(struct crypto_tfm *tfm, const u8 *key, ...@@ -79,11 +79,13 @@ static int p8_aes_cbc_setkey(struct crypto_tfm *tfm, const u8 *key,
int ret; int ret;
struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm); struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
preempt_disable();
pagefault_disable(); pagefault_disable();
enable_kernel_altivec(); enable_kernel_altivec();
ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key); ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key); ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
pagefault_enable(); pagefault_enable();
preempt_enable();
ret += crypto_blkcipher_setkey(ctx->fallback, key, keylen); ret += crypto_blkcipher_setkey(ctx->fallback, key, keylen);
return ret; return ret;
...@@ -106,6 +108,7 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc, ...@@ -106,6 +108,7 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
if (in_interrupt()) { if (in_interrupt()) {
ret = crypto_blkcipher_encrypt(&fallback_desc, dst, src, nbytes); ret = crypto_blkcipher_encrypt(&fallback_desc, dst, src, nbytes);
} else { } else {
preempt_disable();
pagefault_disable(); pagefault_disable();
enable_kernel_altivec(); enable_kernel_altivec();
...@@ -119,6 +122,7 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc, ...@@ -119,6 +122,7 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
} }
pagefault_enable(); pagefault_enable();
preempt_enable();
} }
return ret; return ret;
...@@ -141,6 +145,7 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc, ...@@ -141,6 +145,7 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc,
if (in_interrupt()) { if (in_interrupt()) {
ret = crypto_blkcipher_decrypt(&fallback_desc, dst, src, nbytes); ret = crypto_blkcipher_decrypt(&fallback_desc, dst, src, nbytes);
} else { } else {
preempt_disable();
pagefault_disable(); pagefault_disable();
enable_kernel_altivec(); enable_kernel_altivec();
...@@ -154,6 +159,7 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc, ...@@ -154,6 +159,7 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc,
} }
pagefault_enable(); pagefault_enable();
preempt_enable();
} }
return ret; return ret;
......
...@@ -114,11 +114,13 @@ static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key, ...@@ -114,11 +114,13 @@ static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key,
if (keylen != GHASH_KEY_LEN) if (keylen != GHASH_KEY_LEN)
return -EINVAL; return -EINVAL;
preempt_disable();
pagefault_disable(); pagefault_disable();
enable_kernel_altivec(); enable_kernel_altivec();
enable_kernel_fp(); enable_kernel_fp();
gcm_init_p8(ctx->htable, (const u64 *) key); gcm_init_p8(ctx->htable, (const u64 *) key);
pagefault_enable(); pagefault_enable();
preempt_enable();
return crypto_shash_setkey(ctx->fallback, key, keylen); return crypto_shash_setkey(ctx->fallback, key, keylen);
} }
...@@ -140,23 +142,27 @@ static int p8_ghash_update(struct shash_desc *desc, ...@@ -140,23 +142,27 @@ static int p8_ghash_update(struct shash_desc *desc,
} }
memcpy(dctx->buffer + dctx->bytes, src, memcpy(dctx->buffer + dctx->bytes, src,
GHASH_DIGEST_SIZE - dctx->bytes); GHASH_DIGEST_SIZE - dctx->bytes);
preempt_disable();
pagefault_disable(); pagefault_disable();
enable_kernel_altivec(); enable_kernel_altivec();
enable_kernel_fp(); enable_kernel_fp();
gcm_ghash_p8(dctx->shash, ctx->htable, dctx->buffer, gcm_ghash_p8(dctx->shash, ctx->htable, dctx->buffer,
GHASH_DIGEST_SIZE); GHASH_DIGEST_SIZE);
pagefault_enable(); pagefault_enable();
preempt_enable();
src += GHASH_DIGEST_SIZE - dctx->bytes; src += GHASH_DIGEST_SIZE - dctx->bytes;
srclen -= GHASH_DIGEST_SIZE - dctx->bytes; srclen -= GHASH_DIGEST_SIZE - dctx->bytes;
dctx->bytes = 0; dctx->bytes = 0;
} }
len = srclen & ~(GHASH_DIGEST_SIZE - 1); len = srclen & ~(GHASH_DIGEST_SIZE - 1);
if (len) { if (len) {
preempt_disable();
pagefault_disable(); pagefault_disable();
enable_kernel_altivec(); enable_kernel_altivec();
enable_kernel_fp(); enable_kernel_fp();
gcm_ghash_p8(dctx->shash, ctx->htable, src, len); gcm_ghash_p8(dctx->shash, ctx->htable, src, len);
pagefault_enable(); pagefault_enable();
preempt_enable();
src += len; src += len;
srclen -= len; srclen -= len;
} }
...@@ -180,12 +186,14 @@ static int p8_ghash_final(struct shash_desc *desc, u8 *out) ...@@ -180,12 +186,14 @@ static int p8_ghash_final(struct shash_desc *desc, u8 *out)
if (dctx->bytes) { if (dctx->bytes) {
for (i = dctx->bytes; i < GHASH_DIGEST_SIZE; i++) for (i = dctx->bytes; i < GHASH_DIGEST_SIZE; i++)
dctx->buffer[i] = 0; dctx->buffer[i] = 0;
preempt_disable();
pagefault_disable(); pagefault_disable();
enable_kernel_altivec(); enable_kernel_altivec();
enable_kernel_fp(); enable_kernel_fp();
gcm_ghash_p8(dctx->shash, ctx->htable, dctx->buffer, gcm_ghash_p8(dctx->shash, ctx->htable, dctx->buffer,
GHASH_DIGEST_SIZE); GHASH_DIGEST_SIZE);
pagefault_enable(); pagefault_enable();
preempt_enable();
dctx->bytes = 0; dctx->bytes = 0;
} }
memcpy(out, dctx->shash, GHASH_DIGEST_SIZE); memcpy(out, dctx->shash, GHASH_DIGEST_SIZE);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment