Commit da154e82 authored by Borislav Petkov's avatar Borislav Petkov Committed by Ingo Molnar

x86/cpufeature: Replace cpu_has_avx with boot_cpu_has() usage

Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-crypto@vger.kernel.org
Link: http://lkml.kernel.org/r/1459801503-15600-4-git-send-email-bp@alien8.deSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 1f4dd793
...@@ -1477,7 +1477,7 @@ static int __init aesni_init(void) ...@@ -1477,7 +1477,7 @@ static int __init aesni_init(void)
} }
aesni_ctr_enc_tfm = aesni_ctr_enc; aesni_ctr_enc_tfm = aesni_ctr_enc;
#ifdef CONFIG_AS_AVX #ifdef CONFIG_AS_AVX
if (cpu_has_avx) { if (boot_cpu_has(X86_FEATURE_AVX)) {
/* optimize performance of ctr mode encryption transform */ /* optimize performance of ctr mode encryption transform */
aesni_ctr_enc_tfm = aesni_ctr_enc_avx_tfm; aesni_ctr_enc_tfm = aesni_ctr_enc_avx_tfm;
pr_info("AES CTR mode by8 optimization enabled\n"); pr_info("AES CTR mode by8 optimization enabled\n");
......
...@@ -562,7 +562,8 @@ static int __init camellia_aesni_init(void) ...@@ -562,7 +562,8 @@ static int __init camellia_aesni_init(void)
{ {
const char *feature_name; const char *feature_name;
if (!boot_cpu_has(X86_FEATURE_AVX2) || !cpu_has_avx || if (!boot_cpu_has(X86_FEATURE_AVX) ||
!boot_cpu_has(X86_FEATURE_AVX2) ||
!boot_cpu_has(X86_FEATURE_AES) || !boot_cpu_has(X86_FEATURE_AES) ||
!boot_cpu_has(X86_FEATURE_OSXSAVE)) { !boot_cpu_has(X86_FEATURE_OSXSAVE)) {
pr_info("AVX2 or AES-NI instructions are not detected.\n"); pr_info("AVX2 or AES-NI instructions are not detected.\n");
......
...@@ -554,7 +554,7 @@ static int __init camellia_aesni_init(void) ...@@ -554,7 +554,7 @@ static int __init camellia_aesni_init(void)
{ {
const char *feature_name; const char *feature_name;
if (!cpu_has_avx || if (!boot_cpu_has(X86_FEATURE_AVX) ||
!boot_cpu_has(X86_FEATURE_AES) || !boot_cpu_has(X86_FEATURE_AES) ||
!boot_cpu_has(X86_FEATURE_OSXSAVE)) { !boot_cpu_has(X86_FEATURE_OSXSAVE)) {
pr_info("AVX or AES-NI instructions are not detected.\n"); pr_info("AVX or AES-NI instructions are not detected.\n");
......
...@@ -129,7 +129,8 @@ static int __init chacha20_simd_mod_init(void) ...@@ -129,7 +129,8 @@ static int __init chacha20_simd_mod_init(void)
return -ENODEV; return -ENODEV;
#ifdef CONFIG_AS_AVX2 #ifdef CONFIG_AS_AVX2
chacha20_use_avx2 = cpu_has_avx && boot_cpu_has(X86_FEATURE_AVX2) && chacha20_use_avx2 = boot_cpu_has(X86_FEATURE_AVX) &&
boot_cpu_has(X86_FEATURE_AVX2) &&
cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL); cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL);
#endif #endif
return crypto_register_alg(&alg); return crypto_register_alg(&alg);
......
...@@ -183,7 +183,8 @@ static int __init poly1305_simd_mod_init(void) ...@@ -183,7 +183,8 @@ static int __init poly1305_simd_mod_init(void)
return -ENODEV; return -ENODEV;
#ifdef CONFIG_AS_AVX2 #ifdef CONFIG_AS_AVX2
poly1305_use_avx2 = cpu_has_avx && boot_cpu_has(X86_FEATURE_AVX2) && poly1305_use_avx2 = boot_cpu_has(X86_FEATURE_AVX) &&
boot_cpu_has(X86_FEATURE_AVX2) &&
cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL); cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL);
alg.descsize = sizeof(struct poly1305_simd_desc_ctx); alg.descsize = sizeof(struct poly1305_simd_desc_ctx);
if (poly1305_use_avx2) if (poly1305_use_avx2)
......
...@@ -166,7 +166,7 @@ static struct shash_alg sha1_avx_alg = { ...@@ -166,7 +166,7 @@ static struct shash_alg sha1_avx_alg = {
static bool avx_usable(void) static bool avx_usable(void)
{ {
if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) { if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) {
if (cpu_has_avx) if (boot_cpu_has(X86_FEATURE_AVX))
pr_info("AVX detected but unusable.\n"); pr_info("AVX detected but unusable.\n");
return false; return false;
} }
......
...@@ -201,7 +201,7 @@ static struct shash_alg sha256_avx_algs[] = { { ...@@ -201,7 +201,7 @@ static struct shash_alg sha256_avx_algs[] = { {
static bool avx_usable(void) static bool avx_usable(void)
{ {
if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) { if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) {
if (cpu_has_avx) if (boot_cpu_has(X86_FEATURE_AVX))
pr_info("AVX detected but unusable.\n"); pr_info("AVX detected but unusable.\n");
return false; return false;
} }
......
...@@ -151,7 +151,7 @@ asmlinkage void sha512_transform_avx(u64 *digest, const char *data, ...@@ -151,7 +151,7 @@ asmlinkage void sha512_transform_avx(u64 *digest, const char *data,
static bool avx_usable(void) static bool avx_usable(void)
{ {
if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) { if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) {
if (cpu_has_avx) if (boot_cpu_has(X86_FEATURE_AVX))
pr_info("AVX detected but unusable.\n"); pr_info("AVX detected but unusable.\n");
return false; return false;
} }
......
...@@ -123,7 +123,6 @@ extern const char * const x86_bug_flags[NBUGINTS*32]; ...@@ -123,7 +123,6 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
#define cpu_has_apic boot_cpu_has(X86_FEATURE_APIC) #define cpu_has_apic boot_cpu_has(X86_FEATURE_APIC)
#define cpu_has_fxsr boot_cpu_has(X86_FEATURE_FXSR) #define cpu_has_fxsr boot_cpu_has(X86_FEATURE_FXSR)
#define cpu_has_xmm boot_cpu_has(X86_FEATURE_XMM) #define cpu_has_xmm boot_cpu_has(X86_FEATURE_XMM)
#define cpu_has_avx boot_cpu_has(X86_FEATURE_AVX)
#define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE) #define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE)
#define cpu_has_xsaves boot_cpu_has(X86_FEATURE_XSAVES) #define cpu_has_xsaves boot_cpu_has(X86_FEATURE_XSAVES)
/* /*
......
...@@ -167,12 +167,12 @@ static struct xor_block_template xor_block_avx = { ...@@ -167,12 +167,12 @@ static struct xor_block_template xor_block_avx = {
#define AVX_XOR_SPEED \ #define AVX_XOR_SPEED \
do { \ do { \
if (cpu_has_avx && boot_cpu_has(X86_FEATURE_OSXSAVE)) \ if (boot_cpu_has(X86_FEATURE_AVX) && boot_cpu_has(X86_FEATURE_OSXSAVE)) \
xor_speed(&xor_block_avx); \ xor_speed(&xor_block_avx); \
} while (0) } while (0)
#define AVX_SELECT(FASTEST) \ #define AVX_SELECT(FASTEST) \
(cpu_has_avx && boot_cpu_has(X86_FEATURE_OSXSAVE) ? &xor_block_avx : FASTEST) (boot_cpu_has(X86_FEATURE_AVX) && boot_cpu_has(X86_FEATURE_OSXSAVE) ? &xor_block_avx : FASTEST)
#else #else
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment