Commit 1c811d40 authored by Ard Biesheuvel's avatar Ard Biesheuvel Committed by Borislav Petkov (AMD)

x86/sev: Fix position dependent variable references in startup code

The early startup code executes from a 1:1 mapping of memory, which
differs from the mapping that the code was linked and/or relocated to
run at. The latter mapping is not active yet at this point, and so
symbol references that rely on it will fault.

Given that the core kernel is built without -fPIC, symbol references are
typically emitted as absolute, and so any such references occuring in
the early startup code will therefore crash the kernel.

While an attempt was made to work around this for the early SEV/SME
startup code, by forcing RIP-relative addressing for certain global
SEV/SME variables via inline assembly (see snp_cpuid_get_table() for
example), RIP-relative addressing must be pervasively enforced for
SEV/SME global variables when accessed prior to page table fixups.

__startup_64() already handles this issue for select non-SEV/SME global
variables using fixup_pointer(), which adjusts the pointer relative to a
`physaddr` argument. To avoid having to pass around this `physaddr`
argument across all functions needing to apply pointer fixups, introduce
a macro RIP_RELATIVE_REF() which generates a RIP-relative reference to
a given global variable. It is used where necessary to force
RIP-relative accesses to global variables.

For backporting purposes, this patch makes no attempt at cleaning up
other occurrences of this pattern, involving either inline asm or
fixup_pointer(). Those will be addressed later.

  [ bp: Call it "rip_rel_ref" everywhere like other code shortens
    "rIP-relative reference" and make the asm wrapper __always_inline. ]
Co-developed-by: default avatarKevin Loughlin <kevinloughlin@google.com>
Signed-off-by: default avatarKevin Loughlin <kevinloughlin@google.com>
Signed-off-by: default avatarArd Biesheuvel <ardb@kernel.org>
Signed-off-by: default avatarBorislav Petkov (AMD) <bp@alien8.de>
Cc: <stable@kernel.org>
Link: https://lore.kernel.org/all/20240130220845.1978329-1-kevinloughlin@google.com
parent f9e6f00d
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
#include <asm/processor.h> #include <asm/processor.h>
enum cc_vendor cc_vendor __ro_after_init = CC_VENDOR_NONE; enum cc_vendor cc_vendor __ro_after_init = CC_VENDOR_NONE;
static u64 cc_mask __ro_after_init; u64 cc_mask __ro_after_init;
static bool noinstr intel_cc_platform_has(enum cc_attr attr) static bool noinstr intel_cc_platform_has(enum cc_attr attr)
{ {
...@@ -148,8 +148,3 @@ u64 cc_mkdec(u64 val) ...@@ -148,8 +148,3 @@ u64 cc_mkdec(u64 val)
} }
} }
EXPORT_SYMBOL_GPL(cc_mkdec); EXPORT_SYMBOL_GPL(cc_mkdec);
__init void cc_set_mask(u64 mask)
{
cc_mask = mask;
}
...@@ -113,6 +113,20 @@ ...@@ -113,6 +113,20 @@
#endif #endif
#ifndef __ASSEMBLY__
#ifndef __pic__
static __always_inline __pure void *rip_rel_ptr(void *p)
{
asm("leaq %c1(%%rip), %0" : "=r"(p) : "i"(p));
return p;
}
#define RIP_REL_REF(var) (*(typeof(&(var)))rip_rel_ptr(&(var)))
#else
#define RIP_REL_REF(var) (var)
#endif
#endif
/* /*
* Macros to generate condition code outputs from inline assembly, * Macros to generate condition code outputs from inline assembly,
* The output operand must be type "bool". * The output operand must be type "bool".
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
#ifndef _ASM_X86_COCO_H #ifndef _ASM_X86_COCO_H
#define _ASM_X86_COCO_H #define _ASM_X86_COCO_H
#include <asm/asm.h>
#include <asm/types.h> #include <asm/types.h>
enum cc_vendor { enum cc_vendor {
...@@ -11,9 +12,14 @@ enum cc_vendor { ...@@ -11,9 +12,14 @@ enum cc_vendor {
}; };
extern enum cc_vendor cc_vendor; extern enum cc_vendor cc_vendor;
extern u64 cc_mask;
#ifdef CONFIG_ARCH_HAS_CC_PLATFORM #ifdef CONFIG_ARCH_HAS_CC_PLATFORM
void cc_set_mask(u64 mask); static inline void cc_set_mask(u64 mask)
{
RIP_REL_REF(cc_mask) = mask;
}
u64 cc_mkenc(u64 val); u64 cc_mkenc(u64 val);
u64 cc_mkdec(u64 val); u64 cc_mkdec(u64 val);
#else #else
......
...@@ -15,7 +15,8 @@ ...@@ -15,7 +15,8 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/cc_platform.h> #include <linux/cc_platform.h>
#include <asm/bootparam.h> #include <asm/asm.h>
struct boot_params;
#ifdef CONFIG_X86_MEM_ENCRYPT #ifdef CONFIG_X86_MEM_ENCRYPT
void __init mem_encrypt_init(void); void __init mem_encrypt_init(void);
...@@ -58,6 +59,11 @@ void __init mem_encrypt_free_decrypted_mem(void); ...@@ -58,6 +59,11 @@ void __init mem_encrypt_free_decrypted_mem(void);
void __init sev_es_init_vc_handling(void); void __init sev_es_init_vc_handling(void);
static inline u64 sme_get_me_mask(void)
{
return RIP_REL_REF(sme_me_mask);
}
#define __bss_decrypted __section(".bss..decrypted") #define __bss_decrypted __section(".bss..decrypted")
#else /* !CONFIG_AMD_MEM_ENCRYPT */ #else /* !CONFIG_AMD_MEM_ENCRYPT */
...@@ -89,6 +95,8 @@ early_set_mem_enc_dec_hypercall(unsigned long vaddr, unsigned long size, bool en ...@@ -89,6 +95,8 @@ early_set_mem_enc_dec_hypercall(unsigned long vaddr, unsigned long size, bool en
static inline void mem_encrypt_free_decrypted_mem(void) { } static inline void mem_encrypt_free_decrypted_mem(void) { }
static inline u64 sme_get_me_mask(void) { return 0; }
#define __bss_decrypted #define __bss_decrypted
#endif /* CONFIG_AMD_MEM_ENCRYPT */ #endif /* CONFIG_AMD_MEM_ENCRYPT */
...@@ -106,11 +114,6 @@ void add_encrypt_protection_map(void); ...@@ -106,11 +114,6 @@ void add_encrypt_protection_map(void);
extern char __start_bss_decrypted[], __end_bss_decrypted[], __start_bss_decrypted_unused[]; extern char __start_bss_decrypted[], __end_bss_decrypted[], __start_bss_decrypted_unused[];
static inline u64 sme_get_me_mask(void)
{
return sme_me_mask;
}
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* __X86_MEM_ENCRYPT_H__ */ #endif /* __X86_MEM_ENCRYPT_H__ */
...@@ -560,9 +560,9 @@ static int snp_cpuid(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid_le ...@@ -560,9 +560,9 @@ static int snp_cpuid(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid_le
leaf->eax = leaf->ebx = leaf->ecx = leaf->edx = 0; leaf->eax = leaf->ebx = leaf->ecx = leaf->edx = 0;
/* Skip post-processing for out-of-range zero leafs. */ /* Skip post-processing for out-of-range zero leafs. */
if (!(leaf->fn <= cpuid_std_range_max || if (!(leaf->fn <= RIP_REL_REF(cpuid_std_range_max) ||
(leaf->fn >= 0x40000000 && leaf->fn <= cpuid_hyp_range_max) || (leaf->fn >= 0x40000000 && leaf->fn <= RIP_REL_REF(cpuid_hyp_range_max)) ||
(leaf->fn >= 0x80000000 && leaf->fn <= cpuid_ext_range_max))) (leaf->fn >= 0x80000000 && leaf->fn <= RIP_REL_REF(cpuid_ext_range_max))))
return 0; return 0;
} }
...@@ -1072,11 +1072,11 @@ static void __init setup_cpuid_table(const struct cc_blob_sev_info *cc_info) ...@@ -1072,11 +1072,11 @@ static void __init setup_cpuid_table(const struct cc_blob_sev_info *cc_info)
const struct snp_cpuid_fn *fn = &cpuid_table->fn[i]; const struct snp_cpuid_fn *fn = &cpuid_table->fn[i];
if (fn->eax_in == 0x0) if (fn->eax_in == 0x0)
cpuid_std_range_max = fn->eax; RIP_REL_REF(cpuid_std_range_max) = fn->eax;
else if (fn->eax_in == 0x40000000) else if (fn->eax_in == 0x40000000)
cpuid_hyp_range_max = fn->eax; RIP_REL_REF(cpuid_hyp_range_max) = fn->eax;
else if (fn->eax_in == 0x80000000) else if (fn->eax_in == 0x80000000)
cpuid_ext_range_max = fn->eax; RIP_REL_REF(cpuid_ext_range_max) = fn->eax;
} }
} }
......
...@@ -748,7 +748,7 @@ void __init early_snp_set_memory_private(unsigned long vaddr, unsigned long padd ...@@ -748,7 +748,7 @@ void __init early_snp_set_memory_private(unsigned long vaddr, unsigned long padd
* This eliminates worries about jump tables or checking boot_cpu_data * This eliminates worries about jump tables or checking boot_cpu_data
* in the cc_platform_has() function. * in the cc_platform_has() function.
*/ */
if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED)) if (!(RIP_REL_REF(sev_status) & MSR_AMD64_SEV_SNP_ENABLED))
return; return;
/* /*
...@@ -767,7 +767,7 @@ void __init early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr ...@@ -767,7 +767,7 @@ void __init early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr
* This eliminates worries about jump tables or checking boot_cpu_data * This eliminates worries about jump tables or checking boot_cpu_data
* in the cc_platform_has() function. * in the cc_platform_has() function.
*/ */
if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED)) if (!(RIP_REL_REF(sev_status) & MSR_AMD64_SEV_SNP_ENABLED))
return; return;
/* Ask hypervisor to mark the memory pages shared in the RMP table. */ /* Ask hypervisor to mark the memory pages shared in the RMP table. */
......
...@@ -304,7 +304,8 @@ void __init sme_encrypt_kernel(struct boot_params *bp) ...@@ -304,7 +304,8 @@ void __init sme_encrypt_kernel(struct boot_params *bp)
* instrumentation or checking boot_cpu_data in the cc_platform_has() * instrumentation or checking boot_cpu_data in the cc_platform_has()
* function. * function.
*/ */
if (!sme_get_me_mask() || sev_status & MSR_AMD64_SEV_ENABLED) if (!sme_get_me_mask() ||
RIP_REL_REF(sev_status) & MSR_AMD64_SEV_ENABLED)
return; return;
/* /*
...@@ -541,11 +542,11 @@ void __init sme_enable(struct boot_params *bp) ...@@ -541,11 +542,11 @@ void __init sme_enable(struct boot_params *bp)
me_mask = 1UL << (ebx & 0x3f); me_mask = 1UL << (ebx & 0x3f);
/* Check the SEV MSR whether SEV or SME is enabled */ /* Check the SEV MSR whether SEV or SME is enabled */
sev_status = __rdmsr(MSR_AMD64_SEV); RIP_REL_REF(sev_status) = msr = __rdmsr(MSR_AMD64_SEV);
feature_mask = (sev_status & MSR_AMD64_SEV_ENABLED) ? AMD_SEV_BIT : AMD_SME_BIT; feature_mask = (msr & MSR_AMD64_SEV_ENABLED) ? AMD_SEV_BIT : AMD_SME_BIT;
/* The SEV-SNP CC blob should never be present unless SEV-SNP is enabled. */ /* The SEV-SNP CC blob should never be present unless SEV-SNP is enabled. */
if (snp && !(sev_status & MSR_AMD64_SEV_SNP_ENABLED)) if (snp && !(msr & MSR_AMD64_SEV_SNP_ENABLED))
snp_abort(); snp_abort();
/* Check if memory encryption is enabled */ /* Check if memory encryption is enabled */
...@@ -571,7 +572,6 @@ void __init sme_enable(struct boot_params *bp) ...@@ -571,7 +572,6 @@ void __init sme_enable(struct boot_params *bp)
return; return;
} else { } else {
/* SEV state cannot be controlled by a command line option */ /* SEV state cannot be controlled by a command line option */
sme_me_mask = me_mask;
goto out; goto out;
} }
...@@ -590,16 +590,13 @@ void __init sme_enable(struct boot_params *bp) ...@@ -590,16 +590,13 @@ void __init sme_enable(struct boot_params *bp)
cmdline_ptr = (const char *)((u64)bp->hdr.cmd_line_ptr | cmdline_ptr = (const char *)((u64)bp->hdr.cmd_line_ptr |
((u64)bp->ext_cmd_line_ptr << 32)); ((u64)bp->ext_cmd_line_ptr << 32));
if (cmdline_find_option(cmdline_ptr, cmdline_arg, buffer, sizeof(buffer)) < 0) if (cmdline_find_option(cmdline_ptr, cmdline_arg, buffer, sizeof(buffer)) < 0 ||
goto out; strncmp(buffer, cmdline_on, sizeof(buffer)))
return;
if (!strncmp(buffer, cmdline_on, sizeof(buffer)))
sme_me_mask = me_mask;
out: out:
if (sme_me_mask) { RIP_REL_REF(sme_me_mask) = me_mask;
physical_mask &= ~sme_me_mask; physical_mask &= ~me_mask;
cc_vendor = CC_VENDOR_AMD; cc_vendor = CC_VENDOR_AMD;
cc_set_mask(sme_me_mask); cc_set_mask(me_mask);
}
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment