Commit 1c16dfbe authored by Ard Biesheuvel's avatar Ard Biesheuvel Committed by Herbert Xu

crypto: memneq - avoid implicit unaligned accesses

The C standard does not support dereferencing pointers that are not
aligned with respect to the pointed-to type, and doing so is technically
undefined behavior, even if the underlying hardware supports it.

This means that conditionally dereferencing such pointers based on
whether CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y is not the right thing
to do, and actually results in alignment faults on ARM, which are fixed
up on a slow path. Instead, we should use the unaligned accessors in
such cases: on architectures that don't care about alignment, they will
result in identical codegen whereas, e.g., codegen on ARM will avoid
doubleword loads and stores but use ordinary ones, which are able to
tolerate misalignment.

Link: https://lore.kernel.org/linux-crypto/CAHk-=wiKkdYLY0bv+nXrcJz3NH9mAqPAafX7PpW5EwVtxsEu7Q@mail.gmail.com/Signed-off-by: default avatarArd Biesheuvel <ardb@kernel.org>
Reviewed-by: default avatarArnd Bergmann <arnd@arndb.de>
Reviewed-by: default avatarEric Biggers <ebiggers@google.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 66eae850
...@@ -60,6 +60,7 @@ ...@@ -60,6 +60,7 @@
*/ */
#include <crypto/algapi.h> #include <crypto/algapi.h>
#include <asm/unaligned.h>
#ifndef __HAVE_ARCH_CRYPTO_MEMNEQ #ifndef __HAVE_ARCH_CRYPTO_MEMNEQ
...@@ -71,7 +72,8 @@ __crypto_memneq_generic(const void *a, const void *b, size_t size) ...@@ -71,7 +72,8 @@ __crypto_memneq_generic(const void *a, const void *b, size_t size)
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
while (size >= sizeof(unsigned long)) { while (size >= sizeof(unsigned long)) {
neq |= *(unsigned long *)a ^ *(unsigned long *)b; neq |= get_unaligned((unsigned long *)a) ^
get_unaligned((unsigned long *)b);
OPTIMIZER_HIDE_VAR(neq); OPTIMIZER_HIDE_VAR(neq);
a += sizeof(unsigned long); a += sizeof(unsigned long);
b += sizeof(unsigned long); b += sizeof(unsigned long);
...@@ -95,18 +97,24 @@ static inline unsigned long __crypto_memneq_16(const void *a, const void *b) ...@@ -95,18 +97,24 @@ static inline unsigned long __crypto_memneq_16(const void *a, const void *b)
#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
if (sizeof(unsigned long) == 8) { if (sizeof(unsigned long) == 8) {
neq |= *(unsigned long *)(a) ^ *(unsigned long *)(b); neq |= get_unaligned((unsigned long *)a) ^
get_unaligned((unsigned long *)b);
OPTIMIZER_HIDE_VAR(neq); OPTIMIZER_HIDE_VAR(neq);
neq |= *(unsigned long *)(a+8) ^ *(unsigned long *)(b+8); neq |= get_unaligned((unsigned long *)(a + 8)) ^
get_unaligned((unsigned long *)(b + 8));
OPTIMIZER_HIDE_VAR(neq); OPTIMIZER_HIDE_VAR(neq);
} else if (sizeof(unsigned int) == 4) { } else if (sizeof(unsigned int) == 4) {
neq |= *(unsigned int *)(a) ^ *(unsigned int *)(b); neq |= get_unaligned((unsigned int *)a) ^
get_unaligned((unsigned int *)b);
OPTIMIZER_HIDE_VAR(neq); OPTIMIZER_HIDE_VAR(neq);
neq |= *(unsigned int *)(a+4) ^ *(unsigned int *)(b+4); neq |= get_unaligned((unsigned int *)(a + 4)) ^
get_unaligned((unsigned int *)(b + 4));
OPTIMIZER_HIDE_VAR(neq); OPTIMIZER_HIDE_VAR(neq);
neq |= *(unsigned int *)(a+8) ^ *(unsigned int *)(b+8); neq |= get_unaligned((unsigned int *)(a + 8)) ^
get_unaligned((unsigned int *)(b + 8));
OPTIMIZER_HIDE_VAR(neq); OPTIMIZER_HIDE_VAR(neq);
neq |= *(unsigned int *)(a+12) ^ *(unsigned int *)(b+12); neq |= get_unaligned((unsigned int *)(a + 12)) ^
get_unaligned((unsigned int *)(b + 12));
OPTIMIZER_HIDE_VAR(neq); OPTIMIZER_HIDE_VAR(neq);
} else } else
#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ #endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment