Commit 6e5772c8 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'x86_cc_for_v5.16_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull generic confidential computing updates from Borislav Petkov:
 "Add an interface called cc_platform_has() which is supposed to be used
  by confidential computing solutions to query different aspects of the
  system.

  The intent behind it is to unify testing of such aspects instead of
  having each confidential computing solution add its own set of tests
  to code paths in the kernel, leading to an unwieldy mess"

* tag 'x86_cc_for_v5.16_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  treewide: Replace the use of mem_encrypt_active() with cc_platform_has()
  x86/sev: Replace occurrences of sev_es_active() with cc_platform_has()
  x86/sev: Replace occurrences of sev_active() with cc_platform_has()
  x86/sme: Replace occurrences of sme_active() with cc_platform_has()
  powerpc/pseries/svm: Add a powerpc version of cc_platform_has()
  x86/sev: Add an x86 version of cc_platform_has()
  arch/cc: Introduce a function to check for confidential computing features
  x86/ioremap: Selectively build arch override encryption functions
parents 57f45de7 e9d1d2bb
...@@ -1234,6 +1234,9 @@ config RELR ...@@ -1234,6 +1234,9 @@ config RELR
config ARCH_HAS_MEM_ENCRYPT config ARCH_HAS_MEM_ENCRYPT
bool bool
config ARCH_HAS_CC_PLATFORM
bool
config HAVE_SPARSE_SYSCALL_NR config HAVE_SPARSE_SYSCALL_NR
bool bool
help help
......
...@@ -10,11 +10,6 @@ ...@@ -10,11 +10,6 @@
#include <asm/svm.h> #include <asm/svm.h>
static inline bool mem_encrypt_active(void)
{
return is_secure_guest();
}
static inline bool force_dma_unencrypted(struct device *dev) static inline bool force_dma_unencrypted(struct device *dev)
{ {
return is_secure_guest(); return is_secure_guest();
......
...@@ -159,6 +159,7 @@ config PPC_SVM ...@@ -159,6 +159,7 @@ config PPC_SVM
select SWIOTLB select SWIOTLB
select ARCH_HAS_MEM_ENCRYPT select ARCH_HAS_MEM_ENCRYPT
select ARCH_HAS_FORCE_DMA_UNENCRYPTED select ARCH_HAS_FORCE_DMA_UNENCRYPTED
select ARCH_HAS_CC_PLATFORM
help help
There are certain POWER platforms which support secure guests using There are certain POWER platforms which support secure guests using
the Protected Execution Facility, with the help of an Ultravisor the Protected Execution Facility, with the help of an Ultravisor
......
...@@ -31,3 +31,5 @@ obj-$(CONFIG_FA_DUMP) += rtas-fadump.o ...@@ -31,3 +31,5 @@ obj-$(CONFIG_FA_DUMP) += rtas-fadump.o
obj-$(CONFIG_SUSPEND) += suspend.o obj-$(CONFIG_SUSPEND) += suspend.o
obj-$(CONFIG_PPC_VAS) += vas.o obj-$(CONFIG_PPC_VAS) += vas.o
obj-$(CONFIG_ARCH_HAS_CC_PLATFORM) += cc_platform.o
// SPDX-License-Identifier: GPL-2.0-only
/*
* Confidential Computing Platform Capability checks
*
* Copyright (C) 2021 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
*/
#include <linux/export.h>
#include <linux/cc_platform.h>
#include <asm/machdep.h>
#include <asm/svm.h>
bool cc_platform_has(enum cc_attr attr)
{
switch (attr) {
case CC_ATTR_MEM_ENCRYPT:
return is_secure_guest();
default:
return false;
}
}
EXPORT_SYMBOL_GPL(cc_platform_has);
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/memblock.h> #include <linux/memblock.h>
#include <linux/cc_platform.h>
#include <asm/machdep.h> #include <asm/machdep.h>
#include <asm/svm.h> #include <asm/svm.h>
#include <asm/swiotlb.h> #include <asm/swiotlb.h>
...@@ -63,7 +64,7 @@ void __init svm_swiotlb_init(void) ...@@ -63,7 +64,7 @@ void __init svm_swiotlb_init(void)
int set_memory_encrypted(unsigned long addr, int numpages) int set_memory_encrypted(unsigned long addr, int numpages)
{ {
if (!mem_encrypt_active()) if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT))
return 0; return 0;
if (!PAGE_ALIGNED(addr)) if (!PAGE_ALIGNED(addr))
...@@ -76,7 +77,7 @@ int set_memory_encrypted(unsigned long addr, int numpages) ...@@ -76,7 +77,7 @@ int set_memory_encrypted(unsigned long addr, int numpages)
int set_memory_decrypted(unsigned long addr, int numpages) int set_memory_decrypted(unsigned long addr, int numpages)
{ {
if (!mem_encrypt_active()) if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT))
return 0; return 0;
if (!PAGE_ALIGNED(addr)) if (!PAGE_ALIGNED(addr))
......
...@@ -4,8 +4,6 @@ ...@@ -4,8 +4,6 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
static inline bool mem_encrypt_active(void) { return false; }
int set_memory_encrypted(unsigned long addr, int numpages); int set_memory_encrypted(unsigned long addr, int numpages);
int set_memory_decrypted(unsigned long addr, int numpages); int set_memory_decrypted(unsigned long addr, int numpages);
......
...@@ -1530,6 +1530,7 @@ config AMD_MEM_ENCRYPT ...@@ -1530,6 +1530,7 @@ config AMD_MEM_ENCRYPT
select ARCH_HAS_FORCE_DMA_UNENCRYPTED select ARCH_HAS_FORCE_DMA_UNENCRYPTED
select INSTRUCTION_DECODER select INSTRUCTION_DECODER
select ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS select ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS
select ARCH_HAS_CC_PLATFORM
help help
Say yes to enable support for the encryption of system memory. Say yes to enable support for the encryption of system memory.
This requires an AMD processor that supports Secure Memory This requires an AMD processor that supports Secure Memory
......
...@@ -391,6 +391,7 @@ extern void arch_io_free_memtype_wc(resource_size_t start, resource_size_t size) ...@@ -391,6 +391,7 @@ extern void arch_io_free_memtype_wc(resource_size_t start, resource_size_t size)
#define arch_io_reserve_memtype_wc arch_io_reserve_memtype_wc #define arch_io_reserve_memtype_wc arch_io_reserve_memtype_wc
#endif #endif
#ifdef CONFIG_AMD_MEM_ENCRYPT
extern bool arch_memremap_can_ram_remap(resource_size_t offset, extern bool arch_memremap_can_ram_remap(resource_size_t offset,
unsigned long size, unsigned long size,
unsigned long flags); unsigned long flags);
...@@ -398,6 +399,13 @@ extern bool arch_memremap_can_ram_remap(resource_size_t offset, ...@@ -398,6 +399,13 @@ extern bool arch_memremap_can_ram_remap(resource_size_t offset,
extern bool phys_mem_access_encrypted(unsigned long phys_addr, extern bool phys_mem_access_encrypted(unsigned long phys_addr,
unsigned long size); unsigned long size);
#else
static inline bool phys_mem_access_encrypted(unsigned long phys_addr,
unsigned long size)
{
return true;
}
#endif
/** /**
* iosubmit_cmds512 - copy data to single MMIO location, in 512-bit units * iosubmit_cmds512 - copy data to single MMIO location, in 512-bit units
......
...@@ -129,7 +129,7 @@ relocate_kernel(unsigned long indirection_page, ...@@ -129,7 +129,7 @@ relocate_kernel(unsigned long indirection_page,
unsigned long page_list, unsigned long page_list,
unsigned long start_address, unsigned long start_address,
unsigned int preserve_context, unsigned int preserve_context,
unsigned int sme_active); unsigned int host_mem_enc_active);
#endif #endif
#define ARCH_HAS_KIMAGE_ARCH #define ARCH_HAS_KIMAGE_ARCH
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <linux/init.h> #include <linux/init.h>
#include <linux/cc_platform.h>
#include <asm/bootparam.h> #include <asm/bootparam.h>
...@@ -50,9 +51,6 @@ void __init mem_encrypt_free_decrypted_mem(void); ...@@ -50,9 +51,6 @@ void __init mem_encrypt_free_decrypted_mem(void);
void __init mem_encrypt_init(void); void __init mem_encrypt_init(void);
void __init sev_es_init_vc_handling(void); void __init sev_es_init_vc_handling(void);
bool sme_active(void);
bool sev_active(void);
bool sev_es_active(void);
#define __bss_decrypted __section(".bss..decrypted") #define __bss_decrypted __section(".bss..decrypted")
...@@ -75,9 +73,6 @@ static inline void __init sme_encrypt_kernel(struct boot_params *bp) { } ...@@ -75,9 +73,6 @@ static inline void __init sme_encrypt_kernel(struct boot_params *bp) { }
static inline void __init sme_enable(struct boot_params *bp) { } static inline void __init sme_enable(struct boot_params *bp) { }
static inline void sev_es_init_vc_handling(void) { } static inline void sev_es_init_vc_handling(void) { }
static inline bool sme_active(void) { return false; }
static inline bool sev_active(void) { return false; }
static inline bool sev_es_active(void) { return false; }
static inline int __init static inline int __init
early_set_memory_decrypted(unsigned long vaddr, unsigned long size) { return 0; } early_set_memory_decrypted(unsigned long vaddr, unsigned long size) { return 0; }
...@@ -101,11 +96,6 @@ static inline void mem_encrypt_free_decrypted_mem(void) { } ...@@ -101,11 +96,6 @@ static inline void mem_encrypt_free_decrypted_mem(void) { }
extern char __start_bss_decrypted[], __end_bss_decrypted[], __start_bss_decrypted_unused[]; extern char __start_bss_decrypted[], __end_bss_decrypted[], __start_bss_decrypted_unused[];
static inline bool mem_encrypt_active(void)
{
return sme_me_mask;
}
static inline u64 sme_get_me_mask(void) static inline u64 sme_get_me_mask(void)
{ {
return sme_me_mask; return sme_me_mask;
......
...@@ -21,6 +21,7 @@ CFLAGS_REMOVE_ftrace.o = -pg ...@@ -21,6 +21,7 @@ CFLAGS_REMOVE_ftrace.o = -pg
CFLAGS_REMOVE_early_printk.o = -pg CFLAGS_REMOVE_early_printk.o = -pg
CFLAGS_REMOVE_head64.o = -pg CFLAGS_REMOVE_head64.o = -pg
CFLAGS_REMOVE_sev.o = -pg CFLAGS_REMOVE_sev.o = -pg
CFLAGS_REMOVE_cc_platform.o = -pg
endif endif
KASAN_SANITIZE_head$(BITS).o := n KASAN_SANITIZE_head$(BITS).o := n
...@@ -29,6 +30,7 @@ KASAN_SANITIZE_dumpstack_$(BITS).o := n ...@@ -29,6 +30,7 @@ KASAN_SANITIZE_dumpstack_$(BITS).o := n
KASAN_SANITIZE_stacktrace.o := n KASAN_SANITIZE_stacktrace.o := n
KASAN_SANITIZE_paravirt.o := n KASAN_SANITIZE_paravirt.o := n
KASAN_SANITIZE_sev.o := n KASAN_SANITIZE_sev.o := n
KASAN_SANITIZE_cc_platform.o := n
# With some compiler versions the generated code results in boot hangs, caused # With some compiler versions the generated code results in boot hangs, caused
# by several compilation units. To be safe, disable all instrumentation. # by several compilation units. To be safe, disable all instrumentation.
...@@ -47,6 +49,7 @@ endif ...@@ -47,6 +49,7 @@ endif
KCOV_INSTRUMENT := n KCOV_INSTRUMENT := n
CFLAGS_head$(BITS).o += -fno-stack-protector CFLAGS_head$(BITS).o += -fno-stack-protector
CFLAGS_cc_platform.o += -fno-stack-protector
CFLAGS_irq.o := -I $(srctree)/$(src)/../include/asm/trace CFLAGS_irq.o := -I $(srctree)/$(src)/../include/asm/trace
...@@ -147,6 +150,9 @@ obj-$(CONFIG_UNWINDER_FRAME_POINTER) += unwind_frame.o ...@@ -147,6 +150,9 @@ obj-$(CONFIG_UNWINDER_FRAME_POINTER) += unwind_frame.o
obj-$(CONFIG_UNWINDER_GUESS) += unwind_guess.o obj-$(CONFIG_UNWINDER_GUESS) += unwind_guess.o
obj-$(CONFIG_AMD_MEM_ENCRYPT) += sev.o obj-$(CONFIG_AMD_MEM_ENCRYPT) += sev.o
obj-$(CONFIG_ARCH_HAS_CC_PLATFORM) += cc_platform.o
### ###
# 64 bit specific files # 64 bit specific files
ifeq ($(CONFIG_X86_64),y) ifeq ($(CONFIG_X86_64),y)
......
// SPDX-License-Identifier: GPL-2.0-only
/*
* Confidential Computing Platform Capability checks
*
* Copyright (C) 2021 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
*/
#include <linux/export.h>
#include <linux/cc_platform.h>
#include <linux/mem_encrypt.h>
#include <asm/processor.h>
static bool __maybe_unused intel_cc_platform_has(enum cc_attr attr)
{
#ifdef CONFIG_INTEL_TDX_GUEST
return false;
#else
return false;
#endif
}
/*
* SME and SEV are very similar but they are not the same, so there are
* times that the kernel will need to distinguish between SME and SEV. The
* cc_platform_has() function is used for this. When a distinction isn't
* needed, the CC_ATTR_MEM_ENCRYPT attribute can be used.
*
* The trampoline code is a good example for this requirement. Before
* paging is activated, SME will access all memory as decrypted, but SEV
* will access all memory as encrypted. So, when APs are being brought
* up under SME the trampoline area cannot be encrypted, whereas under SEV
* the trampoline area must be encrypted.
*/
static bool amd_cc_platform_has(enum cc_attr attr)
{
#ifdef CONFIG_AMD_MEM_ENCRYPT
switch (attr) {
case CC_ATTR_MEM_ENCRYPT:
return sme_me_mask;
case CC_ATTR_HOST_MEM_ENCRYPT:
return sme_me_mask && !(sev_status & MSR_AMD64_SEV_ENABLED);
case CC_ATTR_GUEST_MEM_ENCRYPT:
return sev_status & MSR_AMD64_SEV_ENABLED;
case CC_ATTR_GUEST_STATE_ENCRYPT:
return sev_status & MSR_AMD64_SEV_ES_ENABLED;
default:
return false;
}
#else
return false;
#endif
}
bool cc_platform_has(enum cc_attr attr)
{
if (sme_me_mask)
return amd_cc_platform_has(attr);
return false;
}
EXPORT_SYMBOL_GPL(cc_platform_has);
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include <linux/crash_dump.h> #include <linux/crash_dump.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/io.h> #include <linux/io.h>
#include <linux/cc_platform.h>
static ssize_t __copy_oldmem_page(unsigned long pfn, char *buf, size_t csize, static ssize_t __copy_oldmem_page(unsigned long pfn, char *buf, size_t csize,
unsigned long offset, int userbuf, unsigned long offset, int userbuf,
...@@ -73,5 +74,6 @@ ssize_t copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize, ...@@ -73,5 +74,6 @@ ssize_t copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize,
ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos) ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos)
{ {
return read_from_oldmem(buf, count, ppos, 0, sev_active()); return read_from_oldmem(buf, count, ppos, 0,
cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT));
} }
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
#include <linux/start_kernel.h> #include <linux/start_kernel.h>
#include <linux/io.h> #include <linux/io.h>
#include <linux/memblock.h> #include <linux/memblock.h>
#include <linux/mem_encrypt.h> #include <linux/cc_platform.h>
#include <linux/pgtable.h> #include <linux/pgtable.h>
#include <asm/processor.h> #include <asm/processor.h>
...@@ -284,8 +284,13 @@ unsigned long __head __startup_64(unsigned long physaddr, ...@@ -284,8 +284,13 @@ unsigned long __head __startup_64(unsigned long physaddr,
* The bss section will be memset to zero later in the initialization so * The bss section will be memset to zero later in the initialization so
* there is no need to zero it after changing the memory encryption * there is no need to zero it after changing the memory encryption
* attribute. * attribute.
*
* This is early code, use an open coded check for SME instead of
* using cc_platform_has(). This eliminates worries about removing
* instrumentation or checking boot_cpu_data in the cc_platform_has()
* function.
*/ */
if (mem_encrypt_active()) { if (sme_get_me_mask()) {
vaddr = (unsigned long)__start_bss_decrypted; vaddr = (unsigned long)__start_bss_decrypted;
vaddr_end = (unsigned long)__end_bss_decrypted; vaddr_end = (unsigned long)__end_bss_decrypted;
for (; vaddr < vaddr_end; vaddr += PMD_SIZE) { for (; vaddr < vaddr_end; vaddr += PMD_SIZE) {
......
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#include <linux/nmi.h> #include <linux/nmi.h>
#include <linux/swait.h> #include <linux/swait.h>
#include <linux/syscore_ops.h> #include <linux/syscore_ops.h>
#include <linux/cc_platform.h>
#include <asm/timer.h> #include <asm/timer.h>
#include <asm/cpu.h> #include <asm/cpu.h>
#include <asm/traps.h> #include <asm/traps.h>
...@@ -418,7 +419,7 @@ static void __init sev_map_percpu_data(void) ...@@ -418,7 +419,7 @@ static void __init sev_map_percpu_data(void)
{ {
int cpu; int cpu;
if (!sev_active()) if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
return; return;
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
......
...@@ -16,9 +16,9 @@ ...@@ -16,9 +16,9 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/set_memory.h> #include <linux/set_memory.h>
#include <linux/cc_platform.h>
#include <asm/hypervisor.h> #include <asm/hypervisor.h>
#include <asm/mem_encrypt.h>
#include <asm/x86_init.h> #include <asm/x86_init.h>
#include <asm/kvmclock.h> #include <asm/kvmclock.h>
...@@ -223,7 +223,7 @@ static void __init kvmclock_init_mem(void) ...@@ -223,7 +223,7 @@ static void __init kvmclock_init_mem(void)
* hvclock is shared between the guest and the hypervisor, must * hvclock is shared between the guest and the hypervisor, must
* be mapped decrypted. * be mapped decrypted.
*/ */
if (sev_active()) { if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
r = set_memory_decrypted((unsigned long) hvclock_mem, r = set_memory_decrypted((unsigned long) hvclock_mem,
1UL << order); 1UL << order);
if (r) { if (r) {
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include <linux/suspend.h> #include <linux/suspend.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/efi.h> #include <linux/efi.h>
#include <linux/cc_platform.h>
#include <asm/init.h> #include <asm/init.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
...@@ -166,7 +167,7 @@ static int init_transition_pgtable(struct kimage *image, pgd_t *pgd) ...@@ -166,7 +167,7 @@ static int init_transition_pgtable(struct kimage *image, pgd_t *pgd)
} }
pte = pte_offset_kernel(pmd, vaddr); pte = pte_offset_kernel(pmd, vaddr);
if (sev_active()) if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
prot = PAGE_KERNEL_EXEC; prot = PAGE_KERNEL_EXEC;
set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, prot)); set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, prot));
...@@ -206,7 +207,7 @@ static int init_pgtable(struct kimage *image, unsigned long start_pgtable) ...@@ -206,7 +207,7 @@ static int init_pgtable(struct kimage *image, unsigned long start_pgtable)
level4p = (pgd_t *)__va(start_pgtable); level4p = (pgd_t *)__va(start_pgtable);
clear_page(level4p); clear_page(level4p);
if (sev_active()) { if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
info.page_flag |= _PAGE_ENC; info.page_flag |= _PAGE_ENC;
info.kernpg_flag |= _PAGE_ENC; info.kernpg_flag |= _PAGE_ENC;
} }
...@@ -358,7 +359,7 @@ void machine_kexec(struct kimage *image) ...@@ -358,7 +359,7 @@ void machine_kexec(struct kimage *image)
(unsigned long)page_list, (unsigned long)page_list,
image->start, image->start,
image->preserve_context, image->preserve_context,
sme_active()); cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT));
#ifdef CONFIG_KEXEC_JUMP #ifdef CONFIG_KEXEC_JUMP
if (image->preserve_context) if (image->preserve_context)
...@@ -569,12 +570,12 @@ void arch_kexec_unprotect_crashkres(void) ...@@ -569,12 +570,12 @@ void arch_kexec_unprotect_crashkres(void)
*/ */
int arch_kexec_post_alloc_pages(void *vaddr, unsigned int pages, gfp_t gfp) int arch_kexec_post_alloc_pages(void *vaddr, unsigned int pages, gfp_t gfp)
{ {
if (sev_active()) if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
return 0; return 0;
/* /*
* If SME is active we need to be sure that kexec pages are * If host memory encryption is active we need to be sure that kexec
* not encrypted because when we boot to the new kernel the * pages are not encrypted because when we boot to the new kernel the
* pages won't be accessed encrypted (initially). * pages won't be accessed encrypted (initially).
*/ */
return set_memory_decrypted((unsigned long)vaddr, pages); return set_memory_decrypted((unsigned long)vaddr, pages);
...@@ -582,12 +583,12 @@ int arch_kexec_post_alloc_pages(void *vaddr, unsigned int pages, gfp_t gfp) ...@@ -582,12 +583,12 @@ int arch_kexec_post_alloc_pages(void *vaddr, unsigned int pages, gfp_t gfp)
void arch_kexec_pre_free_pages(void *vaddr, unsigned int pages) void arch_kexec_pre_free_pages(void *vaddr, unsigned int pages)
{ {
if (sev_active()) if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
return; return;
/* /*
* If SME is active we need to reset the pages back to being * If host memory encryption is active we need to reset the pages back
* an encrypted mapping before freeing them. * to being an encrypted mapping before freeing them.
*/ */
set_memory_encrypted((unsigned long)vaddr, pages); set_memory_encrypted((unsigned long)vaddr, pages);
} }
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
#include <linux/swiotlb.h> #include <linux/swiotlb.h>
#include <linux/memblock.h> #include <linux/memblock.h>
#include <linux/dma-direct.h> #include <linux/dma-direct.h>
#include <linux/mem_encrypt.h> #include <linux/cc_platform.h>
#include <asm/iommu.h> #include <asm/iommu.h>
#include <asm/swiotlb.h> #include <asm/swiotlb.h>
...@@ -45,11 +45,10 @@ int __init pci_swiotlb_detect_4gb(void) ...@@ -45,11 +45,10 @@ int __init pci_swiotlb_detect_4gb(void)
swiotlb = 1; swiotlb = 1;
/* /*
* If SME is active then swiotlb will be set to 1 so that bounce * Set swiotlb to 1 so that bounce buffers are allocated and used for
* buffers are allocated and used for devices that do not support * devices that can't support DMA to encrypted memory.
* the addressing range required for the encryption mask.
*/ */
if (sme_active()) if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
swiotlb = 1; swiotlb = 1;
return swiotlb; return swiotlb;
......
...@@ -47,7 +47,7 @@ SYM_CODE_START_NOALIGN(relocate_kernel) ...@@ -47,7 +47,7 @@ SYM_CODE_START_NOALIGN(relocate_kernel)
* %rsi page_list * %rsi page_list
* %rdx start address * %rdx start address
* %rcx preserve_context * %rcx preserve_context
* %r8 sme_active * %r8 host_mem_enc_active
*/ */
/* Save the CPU context, used for jumping back */ /* Save the CPU context, used for jumping back */
......
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
#include <linux/sched/debug.h> /* For show_regs() */ #include <linux/sched/debug.h> /* For show_regs() */
#include <linux/percpu-defs.h> #include <linux/percpu-defs.h>
#include <linux/mem_encrypt.h> #include <linux/cc_platform.h>
#include <linux/printk.h> #include <linux/printk.h>
#include <linux/mm_types.h> #include <linux/mm_types.h>
#include <linux/set_memory.h> #include <linux/set_memory.h>
...@@ -615,7 +615,7 @@ int __init sev_es_efi_map_ghcbs(pgd_t *pgd) ...@@ -615,7 +615,7 @@ int __init sev_es_efi_map_ghcbs(pgd_t *pgd)
int cpu; int cpu;
u64 pfn; u64 pfn;
if (!sev_es_active()) if (!cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT))
return 0; return 0;
pflags = _PAGE_NX | _PAGE_RW; pflags = _PAGE_NX | _PAGE_RW;
...@@ -774,7 +774,7 @@ void __init sev_es_init_vc_handling(void) ...@@ -774,7 +774,7 @@ void __init sev_es_init_vc_handling(void)
BUILD_BUG_ON(offsetof(struct sev_es_runtime_data, ghcb_page) % PAGE_SIZE); BUILD_BUG_ON(offsetof(struct sev_es_runtime_data, ghcb_page) % PAGE_SIZE);
if (!sev_es_active()) if (!cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT))
return; return;
if (!sev_es_check_cpu_features()) if (!sev_es_check_cpu_features())
......
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#include <linux/pagemap.h> #include <linux/pagemap.h>
#include <linux/swap.h> #include <linux/swap.h>
#include <linux/rwsem.h> #include <linux/rwsem.h>
#include <linux/cc_platform.h>
#include <asm/apic.h> #include <asm/apic.h>
#include <asm/perf_event.h> #include <asm/perf_event.h>
...@@ -456,7 +457,7 @@ static int has_svm(void) ...@@ -456,7 +457,7 @@ static int has_svm(void)
return 0; return 0;
} }
if (sev_active()) { if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
pr_info("KVM is unsupported when running as an SEV guest\n"); pr_info("KVM is unsupported when running as an SEV guest\n");
return 0; return 0;
} }
......
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/mmiotrace.h> #include <linux/mmiotrace.h>
#include <linux/mem_encrypt.h> #include <linux/cc_platform.h>
#include <linux/efi.h> #include <linux/efi.h>
#include <linux/pgtable.h> #include <linux/pgtable.h>
...@@ -92,7 +92,7 @@ static unsigned int __ioremap_check_ram(struct resource *res) ...@@ -92,7 +92,7 @@ static unsigned int __ioremap_check_ram(struct resource *res)
*/ */
static unsigned int __ioremap_check_encrypted(struct resource *res) static unsigned int __ioremap_check_encrypted(struct resource *res)
{ {
if (!sev_active()) if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
return 0; return 0;
switch (res->desc) { switch (res->desc) {
...@@ -112,7 +112,7 @@ static unsigned int __ioremap_check_encrypted(struct resource *res) ...@@ -112,7 +112,7 @@ static unsigned int __ioremap_check_encrypted(struct resource *res)
*/ */
static void __ioremap_check_other(resource_size_t addr, struct ioremap_desc *desc) static void __ioremap_check_other(resource_size_t addr, struct ioremap_desc *desc)
{ {
if (!sev_active()) if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
return; return;
if (!IS_ENABLED(CONFIG_EFI)) if (!IS_ENABLED(CONFIG_EFI))
...@@ -508,6 +508,7 @@ void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr) ...@@ -508,6 +508,7 @@ void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
memunmap((void *)((unsigned long)addr & PAGE_MASK)); memunmap((void *)((unsigned long)addr & PAGE_MASK));
} }
#ifdef CONFIG_AMD_MEM_ENCRYPT
/* /*
* Examine the physical address to determine if it is an area of memory * Examine the physical address to determine if it is an area of memory
* that should be mapped decrypted. If the memory is not part of the * that should be mapped decrypted. If the memory is not part of the
...@@ -555,7 +556,7 @@ static bool memremap_should_map_decrypted(resource_size_t phys_addr, ...@@ -555,7 +556,7 @@ static bool memremap_should_map_decrypted(resource_size_t phys_addr,
case E820_TYPE_NVS: case E820_TYPE_NVS:
case E820_TYPE_UNUSABLE: case E820_TYPE_UNUSABLE:
/* For SEV, these areas are encrypted */ /* For SEV, these areas are encrypted */
if (sev_active()) if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
break; break;
fallthrough; fallthrough;
...@@ -693,7 +694,7 @@ static bool __init early_memremap_is_setup_data(resource_size_t phys_addr, ...@@ -693,7 +694,7 @@ static bool __init early_memremap_is_setup_data(resource_size_t phys_addr,
bool arch_memremap_can_ram_remap(resource_size_t phys_addr, unsigned long size, bool arch_memremap_can_ram_remap(resource_size_t phys_addr, unsigned long size,
unsigned long flags) unsigned long flags)
{ {
if (!mem_encrypt_active()) if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT))
return true; return true;
if (flags & MEMREMAP_ENC) if (flags & MEMREMAP_ENC)
...@@ -702,7 +703,7 @@ bool arch_memremap_can_ram_remap(resource_size_t phys_addr, unsigned long size, ...@@ -702,7 +703,7 @@ bool arch_memremap_can_ram_remap(resource_size_t phys_addr, unsigned long size,
if (flags & MEMREMAP_DEC) if (flags & MEMREMAP_DEC)
return false; return false;
if (sme_active()) { if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) {
if (memremap_is_setup_data(phys_addr, size) || if (memremap_is_setup_data(phys_addr, size) ||
memremap_is_efi_data(phys_addr, size)) memremap_is_efi_data(phys_addr, size))
return false; return false;
...@@ -723,12 +724,12 @@ pgprot_t __init early_memremap_pgprot_adjust(resource_size_t phys_addr, ...@@ -723,12 +724,12 @@ pgprot_t __init early_memremap_pgprot_adjust(resource_size_t phys_addr,
{ {
bool encrypted_prot; bool encrypted_prot;
if (!mem_encrypt_active()) if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT))
return prot; return prot;
encrypted_prot = true; encrypted_prot = true;
if (sme_active()) { if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) {
if (early_memremap_is_setup_data(phys_addr, size) || if (early_memremap_is_setup_data(phys_addr, size) ||
memremap_is_efi_data(phys_addr, size)) memremap_is_efi_data(phys_addr, size))
encrypted_prot = false; encrypted_prot = false;
...@@ -746,7 +747,6 @@ bool phys_mem_access_encrypted(unsigned long phys_addr, unsigned long size) ...@@ -746,7 +747,6 @@ bool phys_mem_access_encrypted(unsigned long phys_addr, unsigned long size)
return arch_memremap_can_ram_remap(phys_addr, size, 0); return arch_memremap_can_ram_remap(phys_addr, size, 0);
} }
#ifdef CONFIG_AMD_MEM_ENCRYPT
/* Remap memory with encryption */ /* Remap memory with encryption */
void __init *early_memremap_encrypted(resource_size_t phys_addr, void __init *early_memremap_encrypted(resource_size_t phys_addr,
unsigned long size) unsigned long size)
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#include <linux/bitops.h> #include <linux/bitops.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/virtio_config.h> #include <linux/virtio_config.h>
#include <linux/cc_platform.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/fixmap.h> #include <asm/fixmap.h>
...@@ -143,7 +144,7 @@ void __init sme_unmap_bootdata(char *real_mode_data) ...@@ -143,7 +144,7 @@ void __init sme_unmap_bootdata(char *real_mode_data)
struct boot_params *boot_data; struct boot_params *boot_data;
unsigned long cmdline_paddr; unsigned long cmdline_paddr;
if (!sme_active()) if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
return; return;
/* Get the command line address before unmapping the real_mode_data */ /* Get the command line address before unmapping the real_mode_data */
...@@ -163,7 +164,7 @@ void __init sme_map_bootdata(char *real_mode_data) ...@@ -163,7 +164,7 @@ void __init sme_map_bootdata(char *real_mode_data)
struct boot_params *boot_data; struct boot_params *boot_data;
unsigned long cmdline_paddr; unsigned long cmdline_paddr;
if (!sme_active()) if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
return; return;
__sme_early_map_unmap_mem(real_mode_data, sizeof(boot_params), true); __sme_early_map_unmap_mem(real_mode_data, sizeof(boot_params), true);
...@@ -193,7 +194,7 @@ void __init sme_early_init(void) ...@@ -193,7 +194,7 @@ void __init sme_early_init(void)
for (i = 0; i < ARRAY_SIZE(protection_map); i++) for (i = 0; i < ARRAY_SIZE(protection_map); i++)
protection_map[i] = pgprot_encrypted(protection_map[i]); protection_map[i] = pgprot_encrypted(protection_map[i]);
if (sev_active()) if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
swiotlb_force = SWIOTLB_FORCE; swiotlb_force = SWIOTLB_FORCE;
} }
...@@ -202,7 +203,7 @@ void __init sev_setup_arch(void) ...@@ -202,7 +203,7 @@ void __init sev_setup_arch(void)
phys_addr_t total_mem = memblock_phys_mem_size(); phys_addr_t total_mem = memblock_phys_mem_size();
unsigned long size; unsigned long size;
if (!sev_active()) if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
return; return;
/* /*
...@@ -360,42 +361,13 @@ int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size) ...@@ -360,42 +361,13 @@ int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size)
return early_set_memory_enc_dec(vaddr, size, true); return early_set_memory_enc_dec(vaddr, size, true);
} }
/*
* SME and SEV are very similar but they are not the same, so there are
* times that the kernel will need to distinguish between SME and SEV. The
* sme_active() and sev_active() functions are used for this. When a
* distinction isn't needed, the mem_encrypt_active() function can be used.
*
* The trampoline code is a good example for this requirement. Before
* paging is activated, SME will access all memory as decrypted, but SEV
* will access all memory as encrypted. So, when APs are being brought
* up under SME the trampoline area cannot be encrypted, whereas under SEV
* the trampoline area must be encrypted.
*/
bool sev_active(void)
{
return sev_status & MSR_AMD64_SEV_ENABLED;
}
bool sme_active(void)
{
return sme_me_mask && !sev_active();
}
EXPORT_SYMBOL_GPL(sev_active);
/* Needs to be called from non-instrumentable code */
bool noinstr sev_es_active(void)
{
return sev_status & MSR_AMD64_SEV_ES_ENABLED;
}
/* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */ /* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */
bool force_dma_unencrypted(struct device *dev) bool force_dma_unencrypted(struct device *dev)
{ {
/* /*
* For SEV, all DMA must be to unencrypted addresses. * For SEV, all DMA must be to unencrypted addresses.
*/ */
if (sev_active()) if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
return true; return true;
/* /*
...@@ -403,7 +375,7 @@ bool force_dma_unencrypted(struct device *dev) ...@@ -403,7 +375,7 @@ bool force_dma_unencrypted(struct device *dev)
* device does not support DMA to addresses that include the * device does not support DMA to addresses that include the
* encryption mask. * encryption mask.
*/ */
if (sme_active()) { if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) {
u64 dma_enc_mask = DMA_BIT_MASK(__ffs64(sme_me_mask)); u64 dma_enc_mask = DMA_BIT_MASK(__ffs64(sme_me_mask));
u64 dma_dev_mask = min_not_zero(dev->coherent_dma_mask, u64 dma_dev_mask = min_not_zero(dev->coherent_dma_mask,
dev->bus_dma_limit); dev->bus_dma_limit);
...@@ -428,7 +400,7 @@ void __init mem_encrypt_free_decrypted_mem(void) ...@@ -428,7 +400,7 @@ void __init mem_encrypt_free_decrypted_mem(void)
* The unused memory range was mapped decrypted, change the encryption * The unused memory range was mapped decrypted, change the encryption
* attribute from decrypted to encrypted before freeing it. * attribute from decrypted to encrypted before freeing it.
*/ */
if (mem_encrypt_active()) { if (cc_platform_has(CC_ATTR_MEM_ENCRYPT)) {
r = set_memory_encrypted(vaddr, npages); r = set_memory_encrypted(vaddr, npages);
if (r) { if (r) {
pr_warn("failed to free unused decrypted pages\n"); pr_warn("failed to free unused decrypted pages\n");
...@@ -444,7 +416,7 @@ static void print_mem_encrypt_feature_info(void) ...@@ -444,7 +416,7 @@ static void print_mem_encrypt_feature_info(void)
pr_info("AMD Memory Encryption Features active:"); pr_info("AMD Memory Encryption Features active:");
/* Secure Memory Encryption */ /* Secure Memory Encryption */
if (sme_active()) { if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) {
/* /*
* SME is mutually exclusive with any of the SEV * SME is mutually exclusive with any of the SEV
* features below. * features below.
...@@ -454,11 +426,11 @@ static void print_mem_encrypt_feature_info(void) ...@@ -454,11 +426,11 @@ static void print_mem_encrypt_feature_info(void)
} }
/* Secure Encrypted Virtualization */ /* Secure Encrypted Virtualization */
if (sev_active()) if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
pr_cont(" SEV"); pr_cont(" SEV");
/* Encrypted Register State */ /* Encrypted Register State */
if (sev_es_active()) if (cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT))
pr_cont(" SEV-ES"); pr_cont(" SEV-ES");
pr_cont("\n"); pr_cont("\n");
...@@ -477,7 +449,8 @@ void __init mem_encrypt_init(void) ...@@ -477,7 +449,8 @@ void __init mem_encrypt_init(void)
* With SEV, we need to unroll the rep string I/O instructions, * With SEV, we need to unroll the rep string I/O instructions,
* but SEV-ES supports them through the #VC handler. * but SEV-ES supports them through the #VC handler.
*/ */
if (sev_active() && !sev_es_active()) if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT) &&
!cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT))
static_branch_enable(&sev_enable_key); static_branch_enable(&sev_enable_key);
print_mem_encrypt_feature_info(); print_mem_encrypt_feature_info();
...@@ -485,6 +458,6 @@ void __init mem_encrypt_init(void) ...@@ -485,6 +458,6 @@ void __init mem_encrypt_init(void)
int arch_has_restricted_virtio_memory_access(void) int arch_has_restricted_virtio_memory_access(void)
{ {
return sev_active(); return cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT);
} }
EXPORT_SYMBOL_GPL(arch_has_restricted_virtio_memory_access); EXPORT_SYMBOL_GPL(arch_has_restricted_virtio_memory_access);
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/mem_encrypt.h> #include <linux/mem_encrypt.h>
#include <linux/cc_platform.h>
#include <asm/setup.h> #include <asm/setup.h>
#include <asm/sections.h> #include <asm/sections.h>
...@@ -287,7 +288,13 @@ void __init sme_encrypt_kernel(struct boot_params *bp) ...@@ -287,7 +288,13 @@ void __init sme_encrypt_kernel(struct boot_params *bp)
unsigned long pgtable_area_len; unsigned long pgtable_area_len;
unsigned long decrypted_base; unsigned long decrypted_base;
if (!sme_active()) /*
* This is early code, use an open coded check for SME instead of
* using cc_platform_has(). This eliminates worries about removing
* instrumentation or checking boot_cpu_data in the cc_platform_has()
* function.
*/
if (!sme_get_me_mask() || sev_status & MSR_AMD64_SEV_ENABLED)
return; return;
/* /*
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <linux/libnvdimm.h> #include <linux/libnvdimm.h>
#include <linux/vmstat.h> #include <linux/vmstat.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/cc_platform.h>
#include <asm/e820/api.h> #include <asm/e820/api.h>
#include <asm/processor.h> #include <asm/processor.h>
...@@ -1986,7 +1987,7 @@ static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc) ...@@ -1986,7 +1987,7 @@ static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
int ret; int ret;
/* Nothing to do if memory encryption is not active */ /* Nothing to do if memory encryption is not active */
if (!mem_encrypt_active()) if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT))
return 0; return 0;
/* Should not be working on unaligned addresses */ /* Should not be working on unaligned addresses */
......
...@@ -33,7 +33,7 @@ ...@@ -33,7 +33,7 @@
#include <linux/reboot.h> #include <linux/reboot.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/ucs2_string.h> #include <linux/ucs2_string.h>
#include <linux/mem_encrypt.h> #include <linux/cc_platform.h>
#include <linux/sched/task.h> #include <linux/sched/task.h>
#include <asm/setup.h> #include <asm/setup.h>
...@@ -284,7 +284,8 @@ static void __init __map_region(efi_memory_desc_t *md, u64 va) ...@@ -284,7 +284,8 @@ static void __init __map_region(efi_memory_desc_t *md, u64 va)
if (!(md->attribute & EFI_MEMORY_WB)) if (!(md->attribute & EFI_MEMORY_WB))
flags |= _PAGE_PCD; flags |= _PAGE_PCD;
if (sev_active() && md->type != EFI_MEMORY_MAPPED_IO) if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT) &&
md->type != EFI_MEMORY_MAPPED_IO)
flags |= _PAGE_ENC; flags |= _PAGE_ENC;
pfn = md->phys_addr >> PAGE_SHIFT; pfn = md->phys_addr >> PAGE_SHIFT;
...@@ -390,7 +391,7 @@ static int __init efi_update_mem_attr(struct mm_struct *mm, efi_memory_desc_t *m ...@@ -390,7 +391,7 @@ static int __init efi_update_mem_attr(struct mm_struct *mm, efi_memory_desc_t *m
if (!(md->attribute & EFI_MEMORY_RO)) if (!(md->attribute & EFI_MEMORY_RO))
pf |= _PAGE_RW; pf |= _PAGE_RW;
if (sev_active()) if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
pf |= _PAGE_ENC; pf |= _PAGE_ENC;
return efi_update_mappings(md, pf); return efi_update_mappings(md, pf);
...@@ -438,7 +439,7 @@ void __init efi_runtime_update_mappings(void) ...@@ -438,7 +439,7 @@ void __init efi_runtime_update_mappings(void)
(md->type != EFI_RUNTIME_SERVICES_CODE)) (md->type != EFI_RUNTIME_SERVICES_CODE))
pf |= _PAGE_RW; pf |= _PAGE_RW;
if (sev_active()) if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
pf |= _PAGE_ENC; pf |= _PAGE_ENC;
efi_update_mappings(md, pf); efi_update_mappings(md, pf);
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
#include <linux/io.h> #include <linux/io.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/memblock.h> #include <linux/memblock.h>
#include <linux/mem_encrypt.h> #include <linux/cc_platform.h>
#include <linux/pgtable.h> #include <linux/pgtable.h>
#include <asm/set_memory.h> #include <asm/set_memory.h>
...@@ -44,10 +44,10 @@ void __init reserve_real_mode(void) ...@@ -44,10 +44,10 @@ void __init reserve_real_mode(void)
static void sme_sev_setup_real_mode(struct trampoline_header *th) static void sme_sev_setup_real_mode(struct trampoline_header *th)
{ {
#ifdef CONFIG_AMD_MEM_ENCRYPT #ifdef CONFIG_AMD_MEM_ENCRYPT
if (sme_active()) if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
th->flags |= TH_FLAGS_SME_ACTIVE; th->flags |= TH_FLAGS_SME_ACTIVE;
if (sev_es_active()) { if (cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT)) {
/* /*
* Skip the call to verify_cpu() in secondary_startup_64 as it * Skip the call to verify_cpu() in secondary_startup_64 as it
* will cause #VC exceptions when the AP can't handle them yet. * will cause #VC exceptions when the AP can't handle them yet.
...@@ -81,7 +81,7 @@ static void __init setup_real_mode(void) ...@@ -81,7 +81,7 @@ static void __init setup_real_mode(void)
* decrypted memory in order to bring up other processors * decrypted memory in order to bring up other processors
* successfully. This is not needed for SEV. * successfully. This is not needed for SEV.
*/ */
if (sme_active()) if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
set_memory_decrypted((unsigned long)base, size >> PAGE_SHIFT); set_memory_decrypted((unsigned long)base, size >> PAGE_SHIFT);
memcpy(base, real_mode_blob, size); memcpy(base, real_mode_blob, size);
......
...@@ -38,6 +38,7 @@ ...@@ -38,6 +38,7 @@
#include <drm/drm_probe_helper.h> #include <drm/drm_probe_helper.h>
#include <linux/mmu_notifier.h> #include <linux/mmu_notifier.h>
#include <linux/suspend.h> #include <linux/suspend.h>
#include <linux/cc_platform.h>
#include "amdgpu.h" #include "amdgpu.h"
#include "amdgpu_irq.h" #include "amdgpu_irq.h"
...@@ -1269,7 +1270,8 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, ...@@ -1269,7 +1270,8 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
* however, SME requires an indirect IOMMU mapping because the encryption * however, SME requires an indirect IOMMU mapping because the encryption
* bit is beyond the DMA mask of the chip. * bit is beyond the DMA mask of the chip.
*/ */
if (mem_encrypt_active() && ((flags & AMD_ASIC_MASK) == CHIP_RAVEN)) { if (cc_platform_has(CC_ATTR_MEM_ENCRYPT) &&
((flags & AMD_ASIC_MASK) == CHIP_RAVEN)) {
dev_info(&pdev->dev, dev_info(&pdev->dev,
"SME is not compatible with RAVEN\n"); "SME is not compatible with RAVEN\n");
return -ENOTSUPP; return -ENOTSUPP;
......
...@@ -31,7 +31,7 @@ ...@@ -31,7 +31,7 @@
#include <linux/dma-buf-map.h> #include <linux/dma-buf-map.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/highmem.h> #include <linux/highmem.h>
#include <linux/mem_encrypt.h> #include <linux/cc_platform.h>
#include <xen/xen.h> #include <xen/xen.h>
#include <drm/drm_cache.h> #include <drm/drm_cache.h>
...@@ -204,7 +204,7 @@ bool drm_need_swiotlb(int dma_bits) ...@@ -204,7 +204,7 @@ bool drm_need_swiotlb(int dma_bits)
* Enforce dma_alloc_coherent when memory encryption is active as well * Enforce dma_alloc_coherent when memory encryption is active as well
* for the same reasons as for Xen paravirtual hosts. * for the same reasons as for Xen paravirtual hosts.
*/ */
if (mem_encrypt_active()) if (cc_platform_has(CC_ATTR_MEM_ENCRYPT))
return true; return true;
for (tmp = iomem_resource.child; tmp; tmp = tmp->sibling) for (tmp = iomem_resource.child; tmp; tmp = tmp->sibling)
......
...@@ -29,7 +29,7 @@ ...@@ -29,7 +29,7 @@
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/mem_encrypt.h> #include <linux/cc_platform.h>
#include <drm/drm_aperture.h> #include <drm/drm_aperture.h>
#include <drm/drm_drv.h> #include <drm/drm_drv.h>
...@@ -666,7 +666,7 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv) ...@@ -666,7 +666,7 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv)
[vmw_dma_map_bind] = "Giving up DMA mappings early."}; [vmw_dma_map_bind] = "Giving up DMA mappings early."};
/* TTM currently doesn't fully support SEV encryption. */ /* TTM currently doesn't fully support SEV encryption. */
if (mem_encrypt_active()) if (cc_platform_has(CC_ATTR_MEM_ENCRYPT))
return -EINVAL; return -EINVAL;
if (vmw_force_coherent) if (vmw_force_coherent)
......
...@@ -28,7 +28,7 @@ ...@@ -28,7 +28,7 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/mem_encrypt.h> #include <linux/cc_platform.h>
#include <asm/hypervisor.h> #include <asm/hypervisor.h>
#include <drm/drm_ioctl.h> #include <drm/drm_ioctl.h>
...@@ -160,7 +160,7 @@ static unsigned long vmw_port_hb_out(struct rpc_channel *channel, ...@@ -160,7 +160,7 @@ static unsigned long vmw_port_hb_out(struct rpc_channel *channel,
unsigned long msg_len = strlen(msg); unsigned long msg_len = strlen(msg);
/* HB port can't access encrypted memory. */ /* HB port can't access encrypted memory. */
if (hb && !mem_encrypt_active()) { if (hb && !cc_platform_has(CC_ATTR_MEM_ENCRYPT)) {
unsigned long bp = channel->cookie_high; unsigned long bp = channel->cookie_high;
u32 channel_id = (channel->channel_id << 16); u32 channel_id = (channel->channel_id << 16);
...@@ -216,7 +216,7 @@ static unsigned long vmw_port_hb_in(struct rpc_channel *channel, char *reply, ...@@ -216,7 +216,7 @@ static unsigned long vmw_port_hb_in(struct rpc_channel *channel, char *reply,
unsigned long si, di, eax, ebx, ecx, edx; unsigned long si, di, eax, ebx, ecx, edx;
/* HB port can't access encrypted memory */ /* HB port can't access encrypted memory */
if (hb && !mem_encrypt_active()) { if (hb && !cc_platform_has(CC_ATTR_MEM_ENCRYPT)) {
unsigned long bp = channel->cookie_low; unsigned long bp = channel->cookie_low;
u32 channel_id = (channel->channel_id << 16); u32 channel_id = (channel->channel_id << 16);
......
...@@ -20,7 +20,7 @@ ...@@ -20,7 +20,7 @@
#include <linux/amd-iommu.h> #include <linux/amd-iommu.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/kmemleak.h> #include <linux/kmemleak.h>
#include <linux/mem_encrypt.h> #include <linux/cc_platform.h>
#include <asm/pci-direct.h> #include <asm/pci-direct.h>
#include <asm/iommu.h> #include <asm/iommu.h>
#include <asm/apic.h> #include <asm/apic.h>
...@@ -964,7 +964,7 @@ static bool copy_device_table(void) ...@@ -964,7 +964,7 @@ static bool copy_device_table(void)
pr_err("The address of old device table is above 4G, not trustworthy!\n"); pr_err("The address of old device table is above 4G, not trustworthy!\n");
return false; return false;
} }
old_devtb = (sme_active() && is_kdump_kernel()) old_devtb = (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT) && is_kdump_kernel())
? (__force void *)ioremap_encrypted(old_devtb_phys, ? (__force void *)ioremap_encrypted(old_devtb_phys,
dev_table_size) dev_table_size)
: memremap(old_devtb_phys, dev_table_size, MEMREMAP_WB); : memremap(old_devtb_phys, dev_table_size, MEMREMAP_WB);
...@@ -3032,7 +3032,8 @@ static int __init amd_iommu_init(void) ...@@ -3032,7 +3032,8 @@ static int __init amd_iommu_init(void)
static bool amd_iommu_sme_check(void) static bool amd_iommu_sme_check(void)
{ {
if (!sme_active() || (boot_cpu_data.x86 != 0x17)) if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT) ||
(boot_cpu_data.x86 != 0x17))
return true; return true;
/* For Fam17h, a specific level of support is required */ /* For Fam17h, a specific level of support is required */
......
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
#include <linux/irqdomain.h> #include <linux/irqdomain.h>
#include <linux/percpu.h> #include <linux/percpu.h>
#include <linux/io-pgtable.h> #include <linux/io-pgtable.h>
#include <linux/cc_platform.h>
#include <asm/irq_remapping.h> #include <asm/irq_remapping.h>
#include <asm/io_apic.h> #include <asm/io_apic.h>
#include <asm/apic.h> #include <asm/apic.h>
...@@ -2238,7 +2239,7 @@ static int amd_iommu_def_domain_type(struct device *dev) ...@@ -2238,7 +2239,7 @@ static int amd_iommu_def_domain_type(struct device *dev)
* active, because some of those devices (AMD GPUs) don't have the * active, because some of those devices (AMD GPUs) don't have the
* encryption bit in their DMA-mask and require remapping. * encryption bit in their DMA-mask and require remapping.
*/ */
if (!mem_encrypt_active() && dev_data->iommu_v2) if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT) && dev_data->iommu_v2)
return IOMMU_DOMAIN_IDENTITY; return IOMMU_DOMAIN_IDENTITY;
return 0; return 0;
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include <linux/wait.h> #include <linux/wait.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/gfp.h> #include <linux/gfp.h>
#include <linux/cc_platform.h>
#include "amd_iommu.h" #include "amd_iommu.h"
...@@ -742,7 +743,7 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids) ...@@ -742,7 +743,7 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
* When memory encryption is active the device is likely not in a * When memory encryption is active the device is likely not in a
* direct-mapped domain. Forbid using IOMMUv2 functionality for now. * direct-mapped domain. Forbid using IOMMUv2 functionality for now.
*/ */
if (mem_encrypt_active()) if (cc_platform_has(CC_ATTR_MEM_ENCRYPT))
return -ENODEV; return -ENODEV;
if (!amd_iommu_v2_supported()) if (!amd_iommu_v2_supported())
......
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#include <linux/property.h> #include <linux/property.h>
#include <linux/fsl/mc.h> #include <linux/fsl/mc.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/cc_platform.h>
#include <trace/events/iommu.h> #include <trace/events/iommu.h>
static struct kset *iommu_group_kset; static struct kset *iommu_group_kset;
...@@ -130,7 +131,7 @@ static int __init iommu_subsys_init(void) ...@@ -130,7 +131,7 @@ static int __init iommu_subsys_init(void)
else else
iommu_set_default_translated(false); iommu_set_default_translated(false);
if (iommu_default_passthrough() && mem_encrypt_active()) { if (iommu_default_passthrough() && cc_platform_has(CC_ATTR_MEM_ENCRYPT)) {
pr_info("Memory encryption detected - Disabling default IOMMU Passthrough\n"); pr_info("Memory encryption detected - Disabling default IOMMU Passthrough\n");
iommu_set_default_translated(false); iommu_set_default_translated(false);
} }
......
...@@ -26,7 +26,7 @@ ...@@ -26,7 +26,7 @@
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/pagemap.h> #include <linux/pagemap.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/mem_encrypt.h> #include <linux/cc_platform.h>
#include <asm/io.h> #include <asm/io.h>
#include "internal.h" #include "internal.h"
...@@ -177,7 +177,7 @@ ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos) ...@@ -177,7 +177,7 @@ ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos)
*/ */
ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos) ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos)
{ {
return read_from_oldmem(buf, count, ppos, 0, mem_encrypt_active()); return read_from_oldmem(buf, count, ppos, 0, cc_platform_has(CC_ATTR_MEM_ENCRYPT));
} }
/* /*
...@@ -378,7 +378,7 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos, ...@@ -378,7 +378,7 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
buflen); buflen);
start = m->paddr + *fpos - m->offset; start = m->paddr + *fpos - m->offset;
tmp = read_from_oldmem(buffer, tsz, &start, tmp = read_from_oldmem(buffer, tsz, &start,
userbuf, mem_encrypt_active()); userbuf, cc_platform_has(CC_ATTR_MEM_ENCRYPT));
if (tmp < 0) if (tmp < 0)
return tmp; return tmp;
buflen -= tsz; buflen -= tsz;
......
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Confidential Computing Platform Capability checks
*
* Copyright (C) 2021 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
*/
#ifndef _LINUX_CC_PLATFORM_H
#define _LINUX_CC_PLATFORM_H
#include <linux/types.h>
#include <linux/stddef.h>
/**
* enum cc_attr - Confidential computing attributes
*
* These attributes represent confidential computing features that are
* currently active.
*/
enum cc_attr {
/**
* @CC_ATTR_MEM_ENCRYPT: Memory encryption is active
*
* The platform/OS is running with active memory encryption. This
* includes running either as a bare-metal system or a hypervisor
* and actively using memory encryption or as a guest/virtual machine
* and actively using memory encryption.
*
* Examples include SME, SEV and SEV-ES.
*/
CC_ATTR_MEM_ENCRYPT,
/**
* @CC_ATTR_HOST_MEM_ENCRYPT: Host memory encryption is active
*
* The platform/OS is running as a bare-metal system or a hypervisor
* and actively using memory encryption.
*
* Examples include SME.
*/
CC_ATTR_HOST_MEM_ENCRYPT,
/**
* @CC_ATTR_GUEST_MEM_ENCRYPT: Guest memory encryption is active
*
* The platform/OS is running as a guest/virtual machine and actively
* using memory encryption.
*
* Examples include SEV and SEV-ES.
*/
CC_ATTR_GUEST_MEM_ENCRYPT,
/**
* @CC_ATTR_GUEST_STATE_ENCRYPT: Guest state encryption is active
*
* The platform/OS is running as a guest/virtual machine and actively
* using memory encryption and register state encryption.
*
* Examples include SEV-ES.
*/
CC_ATTR_GUEST_STATE_ENCRYPT,
};
#ifdef CONFIG_ARCH_HAS_CC_PLATFORM
/**
* cc_platform_has() - Checks if the specified cc_attr attribute is active
* @attr: Confidential computing attribute to check
*
* The cc_platform_has() function will return an indicator as to whether the
* specified Confidential Computing attribute is currently active.
*
* Context: Any context
* Return:
* * TRUE - Specified Confidential Computing attribute is active
* * FALSE - Specified Confidential Computing attribute is not active
*/
bool cc_platform_has(enum cc_attr attr);
#else /* !CONFIG_ARCH_HAS_CC_PLATFORM */
static inline bool cc_platform_has(enum cc_attr attr) { return false; }
#endif /* CONFIG_ARCH_HAS_CC_PLATFORM */
#endif /* _LINUX_CC_PLATFORM_H */
...@@ -16,10 +16,6 @@ ...@@ -16,10 +16,6 @@
#include <asm/mem_encrypt.h> #include <asm/mem_encrypt.h>
#else /* !CONFIG_ARCH_HAS_MEM_ENCRYPT */
static inline bool mem_encrypt_active(void) { return false; }
#endif /* CONFIG_ARCH_HAS_MEM_ENCRYPT */ #endif /* CONFIG_ARCH_HAS_MEM_ENCRYPT */
#ifdef CONFIG_AMD_MEM_ENCRYPT #ifdef CONFIG_AMD_MEM_ENCRYPT
......
...@@ -34,7 +34,7 @@ ...@@ -34,7 +34,7 @@
#include <linux/highmem.h> #include <linux/highmem.h>
#include <linux/gfp.h> #include <linux/gfp.h>
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
#include <linux/mem_encrypt.h> #include <linux/cc_platform.h>
#include <linux/set_memory.h> #include <linux/set_memory.h>
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS
#include <linux/debugfs.h> #include <linux/debugfs.h>
...@@ -552,7 +552,7 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr, ...@@ -552,7 +552,7 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
if (!mem) if (!mem)
panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer"); panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer");
if (mem_encrypt_active()) if (cc_platform_has(CC_ATTR_MEM_ENCRYPT))
pr_warn_once("Memory encryption is active and system is using DMA bounce buffers\n"); pr_warn_once("Memory encryption is active and system is using DMA bounce buffers\n");
if (mapping_size > alloc_size) { if (mapping_size > alloc_size) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment