Commit 0ecaefb3 authored by Borislav Petkov (AMD)'s avatar Borislav Petkov (AMD)

x86/CPU/AMD: Track SNP host status with cc_platform_*()

The host SNP worthiness can determined later, after alternatives have
been patched, in snp_rmptable_init() depending on cmdline options like
iommu=pt which is incompatible with SNP, for example.

Which means that one cannot use X86_FEATURE_SEV_SNP and will need to
have a special flag for that control.

Use that newly added CC_ATTR_HOST_SEV_SNP in the appropriate places.

Move kdump_sev_callback() to its rightful place, while at it.

Fixes: 216d106c ("x86/sev: Add SEV-SNP host initialization support")
Signed-off-by: default avatarBorislav Petkov (AMD) <bp@alien8.de>
Reviewed-by: default avatarTom Lendacky <thomas.lendacky@amd.com>
Tested-by: default avatarSrikanth Aithal <sraithal@amd.com>
Link: https://lore.kernel.org/r/20240327154317.29909-6-bp@alien8.de
parent bc6f707f
...@@ -228,7 +228,6 @@ int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, struct sn ...@@ -228,7 +228,6 @@ int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, struct sn
void snp_accept_memory(phys_addr_t start, phys_addr_t end); void snp_accept_memory(phys_addr_t start, phys_addr_t end);
u64 snp_get_unsupported_features(u64 status); u64 snp_get_unsupported_features(u64 status);
u64 sev_get_status(void); u64 sev_get_status(void);
void kdump_sev_callback(void);
void sev_show_status(void); void sev_show_status(void);
#else #else
static inline void sev_es_ist_enter(struct pt_regs *regs) { } static inline void sev_es_ist_enter(struct pt_regs *regs) { }
...@@ -258,7 +257,6 @@ static inline int snp_issue_guest_request(u64 exit_code, struct snp_req_data *in ...@@ -258,7 +257,6 @@ static inline int snp_issue_guest_request(u64 exit_code, struct snp_req_data *in
static inline void snp_accept_memory(phys_addr_t start, phys_addr_t end) { } static inline void snp_accept_memory(phys_addr_t start, phys_addr_t end) { }
static inline u64 snp_get_unsupported_features(u64 status) { return 0; } static inline u64 snp_get_unsupported_features(u64 status) { return 0; }
static inline u64 sev_get_status(void) { return 0; } static inline u64 sev_get_status(void) { return 0; }
static inline void kdump_sev_callback(void) { }
static inline void sev_show_status(void) { } static inline void sev_show_status(void) { }
#endif #endif
...@@ -270,6 +268,7 @@ int psmash(u64 pfn); ...@@ -270,6 +268,7 @@ int psmash(u64 pfn);
int rmp_make_private(u64 pfn, u64 gpa, enum pg_level level, u32 asid, bool immutable); int rmp_make_private(u64 pfn, u64 gpa, enum pg_level level, u32 asid, bool immutable);
int rmp_make_shared(u64 pfn, enum pg_level level); int rmp_make_shared(u64 pfn, enum pg_level level);
void snp_leak_pages(u64 pfn, unsigned int npages); void snp_leak_pages(u64 pfn, unsigned int npages);
void kdump_sev_callback(void);
#else #else
static inline bool snp_probe_rmptable_info(void) { return false; } static inline bool snp_probe_rmptable_info(void) { return false; }
static inline int snp_lookup_rmpentry(u64 pfn, bool *assigned, int *level) { return -ENODEV; } static inline int snp_lookup_rmpentry(u64 pfn, bool *assigned, int *level) { return -ENODEV; }
...@@ -282,6 +281,7 @@ static inline int rmp_make_private(u64 pfn, u64 gpa, enum pg_level level, u32 as ...@@ -282,6 +281,7 @@ static inline int rmp_make_private(u64 pfn, u64 gpa, enum pg_level level, u32 as
} }
static inline int rmp_make_shared(u64 pfn, enum pg_level level) { return -ENODEV; } static inline int rmp_make_shared(u64 pfn, enum pg_level level) { return -ENODEV; }
static inline void snp_leak_pages(u64 pfn, unsigned int npages) {} static inline void snp_leak_pages(u64 pfn, unsigned int npages) {}
static inline void kdump_sev_callback(void) { }
#endif #endif
#endif #endif
...@@ -345,6 +345,28 @@ static void srat_detect_node(struct cpuinfo_x86 *c) ...@@ -345,6 +345,28 @@ static void srat_detect_node(struct cpuinfo_x86 *c)
#endif #endif
} }
static void bsp_determine_snp(struct cpuinfo_x86 *c)
{
#ifdef CONFIG_ARCH_HAS_CC_PLATFORM
cc_vendor = CC_VENDOR_AMD;
if (cpu_has(c, X86_FEATURE_SEV_SNP)) {
/*
* RMP table entry format is not architectural and is defined by the
* per-processor PPR. Restrict SNP support on the known CPU models
* for which the RMP table entry format is currently defined for.
*/
if (!cpu_has(c, X86_FEATURE_HYPERVISOR) &&
c->x86 >= 0x19 && snp_probe_rmptable_info()) {
cc_platform_set(CC_ATTR_HOST_SEV_SNP);
} else {
setup_clear_cpu_cap(X86_FEATURE_SEV_SNP);
cc_platform_clear(CC_ATTR_HOST_SEV_SNP);
}
}
#endif
}
static void bsp_init_amd(struct cpuinfo_x86 *c) static void bsp_init_amd(struct cpuinfo_x86 *c)
{ {
if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) { if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
...@@ -452,21 +474,7 @@ static void bsp_init_amd(struct cpuinfo_x86 *c) ...@@ -452,21 +474,7 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
break; break;
} }
if (cpu_has(c, X86_FEATURE_SEV_SNP)) { bsp_determine_snp(c);
/*
* RMP table entry format is not architectural and it can vary by processor
* and is defined by the per-processor PPR. Restrict SNP support on the
* known CPU model and family for which the RMP table entry format is
* currently defined for.
*/
if (!boot_cpu_has(X86_FEATURE_ZEN3) &&
!boot_cpu_has(X86_FEATURE_ZEN4) &&
!boot_cpu_has(X86_FEATURE_ZEN5))
setup_clear_cpu_cap(X86_FEATURE_SEV_SNP);
else if (!snp_probe_rmptable_info())
setup_clear_cpu_cap(X86_FEATURE_SEV_SNP);
}
return; return;
warn: warn:
......
...@@ -108,7 +108,7 @@ static inline void k8_check_syscfg_dram_mod_en(void) ...@@ -108,7 +108,7 @@ static inline void k8_check_syscfg_dram_mod_en(void)
(boot_cpu_data.x86 >= 0x0f))) (boot_cpu_data.x86 >= 0x0f)))
return; return;
if (cpu_feature_enabled(X86_FEATURE_SEV_SNP)) if (cc_platform_has(CC_ATTR_HOST_SEV_SNP))
return; return;
rdmsr(MSR_AMD64_SYSCFG, lo, hi); rdmsr(MSR_AMD64_SYSCFG, lo, hi);
......
...@@ -2284,16 +2284,6 @@ static int __init snp_init_platform_device(void) ...@@ -2284,16 +2284,6 @@ static int __init snp_init_platform_device(void)
} }
device_initcall(snp_init_platform_device); device_initcall(snp_init_platform_device);
void kdump_sev_callback(void)
{
/*
* Do wbinvd() on remote CPUs when SNP is enabled in order to
* safely do SNP_SHUTDOWN on the local CPU.
*/
if (cpu_feature_enabled(X86_FEATURE_SEV_SNP))
wbinvd();
}
void sev_show_status(void) void sev_show_status(void)
{ {
int i; int i;
......
...@@ -3174,7 +3174,7 @@ struct page *snp_safe_alloc_page(struct kvm_vcpu *vcpu) ...@@ -3174,7 +3174,7 @@ struct page *snp_safe_alloc_page(struct kvm_vcpu *vcpu)
unsigned long pfn; unsigned long pfn;
struct page *p; struct page *p;
if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP)) if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
return alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); return alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
/* /*
......
...@@ -77,7 +77,7 @@ static int __mfd_enable(unsigned int cpu) ...@@ -77,7 +77,7 @@ static int __mfd_enable(unsigned int cpu)
{ {
u64 val; u64 val;
if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP)) if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
return 0; return 0;
rdmsrl(MSR_AMD64_SYSCFG, val); rdmsrl(MSR_AMD64_SYSCFG, val);
...@@ -98,7 +98,7 @@ static int __snp_enable(unsigned int cpu) ...@@ -98,7 +98,7 @@ static int __snp_enable(unsigned int cpu)
{ {
u64 val; u64 val;
if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP)) if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
return 0; return 0;
rdmsrl(MSR_AMD64_SYSCFG, val); rdmsrl(MSR_AMD64_SYSCFG, val);
...@@ -174,11 +174,11 @@ static int __init snp_rmptable_init(void) ...@@ -174,11 +174,11 @@ static int __init snp_rmptable_init(void)
u64 rmptable_size; u64 rmptable_size;
u64 val; u64 val;
if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP)) if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
return 0; return 0;
if (!amd_iommu_snp_en) if (!amd_iommu_snp_en)
return 0; goto nosnp;
if (!probed_rmp_size) if (!probed_rmp_size)
goto nosnp; goto nosnp;
...@@ -225,7 +225,7 @@ static int __init snp_rmptable_init(void) ...@@ -225,7 +225,7 @@ static int __init snp_rmptable_init(void)
return 0; return 0;
nosnp: nosnp:
setup_clear_cpu_cap(X86_FEATURE_SEV_SNP); cc_platform_clear(CC_ATTR_HOST_SEV_SNP);
return -ENOSYS; return -ENOSYS;
} }
...@@ -246,7 +246,7 @@ static struct rmpentry *__snp_lookup_rmpentry(u64 pfn, int *level) ...@@ -246,7 +246,7 @@ static struct rmpentry *__snp_lookup_rmpentry(u64 pfn, int *level)
{ {
struct rmpentry *large_entry, *entry; struct rmpentry *large_entry, *entry;
if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP)) if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
return ERR_PTR(-ENODEV); return ERR_PTR(-ENODEV);
entry = get_rmpentry(pfn); entry = get_rmpentry(pfn);
...@@ -363,7 +363,7 @@ int psmash(u64 pfn) ...@@ -363,7 +363,7 @@ int psmash(u64 pfn)
unsigned long paddr = pfn << PAGE_SHIFT; unsigned long paddr = pfn << PAGE_SHIFT;
int ret; int ret;
if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP)) if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
return -ENODEV; return -ENODEV;
if (!pfn_valid(pfn)) if (!pfn_valid(pfn))
...@@ -472,7 +472,7 @@ static int rmpupdate(u64 pfn, struct rmp_state *state) ...@@ -472,7 +472,7 @@ static int rmpupdate(u64 pfn, struct rmp_state *state)
unsigned long paddr = pfn << PAGE_SHIFT; unsigned long paddr = pfn << PAGE_SHIFT;
int ret, level; int ret, level;
if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP)) if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
return -ENODEV; return -ENODEV;
level = RMP_TO_PG_LEVEL(state->pagesize); level = RMP_TO_PG_LEVEL(state->pagesize);
...@@ -558,3 +558,13 @@ void snp_leak_pages(u64 pfn, unsigned int npages) ...@@ -558,3 +558,13 @@ void snp_leak_pages(u64 pfn, unsigned int npages)
spin_unlock(&snp_leaked_pages_list_lock); spin_unlock(&snp_leaked_pages_list_lock);
} }
EXPORT_SYMBOL_GPL(snp_leak_pages); EXPORT_SYMBOL_GPL(snp_leak_pages);
void kdump_sev_callback(void)
{
/*
* Do wbinvd() on remote CPUs when SNP is enabled in order to
* safely do SNP_SHUTDOWN on the local CPU.
*/
if (cc_platform_has(CC_ATTR_HOST_SEV_SNP))
wbinvd();
}
...@@ -1090,7 +1090,7 @@ static int __sev_snp_init_locked(int *error) ...@@ -1090,7 +1090,7 @@ static int __sev_snp_init_locked(int *error)
void *arg = &data; void *arg = &data;
int cmd, rc = 0; int cmd, rc = 0;
if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP)) if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
return -ENODEV; return -ENODEV;
sev = psp->sev_data; sev = psp->sev_data;
......
...@@ -3228,7 +3228,7 @@ static bool __init detect_ivrs(void) ...@@ -3228,7 +3228,7 @@ static bool __init detect_ivrs(void)
static void iommu_snp_enable(void) static void iommu_snp_enable(void)
{ {
#ifdef CONFIG_KVM_AMD_SEV #ifdef CONFIG_KVM_AMD_SEV
if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP)) if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
return; return;
/* /*
* The SNP support requires that IOMMU must be enabled, and is * The SNP support requires that IOMMU must be enabled, and is
...@@ -3236,12 +3236,14 @@ static void iommu_snp_enable(void) ...@@ -3236,12 +3236,14 @@ static void iommu_snp_enable(void)
*/ */
if (no_iommu || iommu_default_passthrough()) { if (no_iommu || iommu_default_passthrough()) {
pr_err("SNP: IOMMU disabled or configured in passthrough mode, SNP cannot be supported.\n"); pr_err("SNP: IOMMU disabled or configured in passthrough mode, SNP cannot be supported.\n");
cc_platform_clear(CC_ATTR_HOST_SEV_SNP);
return; return;
} }
amd_iommu_snp_en = check_feature(FEATURE_SNP); amd_iommu_snp_en = check_feature(FEATURE_SNP);
if (!amd_iommu_snp_en) { if (!amd_iommu_snp_en) {
pr_err("SNP: IOMMU SNP feature not enabled, SNP cannot be supported.\n"); pr_err("SNP: IOMMU SNP feature not enabled, SNP cannot be supported.\n");
cc_platform_clear(CC_ATTR_HOST_SEV_SNP);
return; return;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment