Commit 21d9bb4a authored by Borislav Petkov's avatar Borislav Petkov Committed by Ingo Molnar

x86/mm: Make the SME mask a u64

The SME encryption mask is for masking 64-bit pagetable entries. It
being an unsigned long works fine on X86_64 but on 32-bit builds in
truncates bits leading to Xen guests crashing very early.

And regardless, the whole SME mask handling shouldnt've leaked into
32-bit because SME is X86_64-only feature. So, first make the mask u64.
And then, add trivial 32-bit versions of the __sme_* macros so that
nothing happens there.
Reported-and-tested-by: default avatarBoris Ostrovsky <boris.ostrovsky@oracle.com>
Tested-by: default avatarBrijesh Singh <brijesh.singh@amd.com>
Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
Acked-by: default avatarTom Lendacky <Thomas.Lendacky@amd.com>
Acked-by: default avatarThomas Gleixner <tglx@linutronix.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas <Thomas.Lendacky@amd.com>
Fixes: 21729f81 ("x86/mm: Provide general kernel support for memory encryption")
Link: http://lkml.kernel.org/r/20170907093837.76zojtkgebwtqc74@pd.tnicSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 1c9fe440
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
#ifdef CONFIG_AMD_MEM_ENCRYPT #ifdef CONFIG_AMD_MEM_ENCRYPT
extern unsigned long sme_me_mask; extern u64 sme_me_mask;
void sme_encrypt_execute(unsigned long encrypted_kernel_vaddr, void sme_encrypt_execute(unsigned long encrypted_kernel_vaddr,
unsigned long decrypted_kernel_vaddr, unsigned long decrypted_kernel_vaddr,
...@@ -49,7 +49,7 @@ void swiotlb_set_mem_attributes(void *vaddr, unsigned long size); ...@@ -49,7 +49,7 @@ void swiotlb_set_mem_attributes(void *vaddr, unsigned long size);
#else /* !CONFIG_AMD_MEM_ENCRYPT */ #else /* !CONFIG_AMD_MEM_ENCRYPT */
#define sme_me_mask 0UL #define sme_me_mask 0ULL
static inline void __init sme_early_encrypt(resource_size_t paddr, static inline void __init sme_early_encrypt(resource_size_t paddr,
unsigned long size) { } unsigned long size) { }
......
...@@ -37,7 +37,7 @@ static char sme_cmdline_off[] __initdata = "off"; ...@@ -37,7 +37,7 @@ static char sme_cmdline_off[] __initdata = "off";
* reside in the .data section so as not to be zeroed out when the .bss * reside in the .data section so as not to be zeroed out when the .bss
* section is later cleared. * section is later cleared.
*/ */
unsigned long sme_me_mask __section(.data) = 0; u64 sme_me_mask __section(.data) = 0;
EXPORT_SYMBOL_GPL(sme_me_mask); EXPORT_SYMBOL_GPL(sme_me_mask);
/* Buffer used for early in-place encryption by BSP, no locking needed */ /* Buffer used for early in-place encryption by BSP, no locking needed */
......
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
#else /* !CONFIG_ARCH_HAS_MEM_ENCRYPT */ #else /* !CONFIG_ARCH_HAS_MEM_ENCRYPT */
#define sme_me_mask 0UL #define sme_me_mask 0ULL
#endif /* CONFIG_ARCH_HAS_MEM_ENCRYPT */ #endif /* CONFIG_ARCH_HAS_MEM_ENCRYPT */
...@@ -30,18 +30,23 @@ static inline bool sme_active(void) ...@@ -30,18 +30,23 @@ static inline bool sme_active(void)
return !!sme_me_mask; return !!sme_me_mask;
} }
static inline unsigned long sme_get_me_mask(void) static inline u64 sme_get_me_mask(void)
{ {
return sme_me_mask; return sme_me_mask;
} }
#ifdef CONFIG_AMD_MEM_ENCRYPT
/* /*
* The __sme_set() and __sme_clr() macros are useful for adding or removing * The __sme_set() and __sme_clr() macros are useful for adding or removing
* the encryption mask from a value (e.g. when dealing with pagetable * the encryption mask from a value (e.g. when dealing with pagetable
* entries). * entries).
*/ */
#define __sme_set(x) ((unsigned long)(x) | sme_me_mask) #define __sme_set(x) ((x) | sme_me_mask)
#define __sme_clr(x) ((unsigned long)(x) & ~sme_me_mask) #define __sme_clr(x) ((x) & ~sme_me_mask)
#else
#define __sme_set(x) (x)
#define __sme_clr(x) (x)
#endif
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment