Commit e9d1d2bb authored by Tom Lendacky's avatar Tom Lendacky Committed by Borislav Petkov

treewide: Replace the use of mem_encrypt_active() with cc_platform_has()

Replace uses of mem_encrypt_active() with calls to cc_platform_has() with
the CC_ATTR_MEM_ENCRYPT attribute.

Remove the implementation of mem_encrypt_active() across all arches.

For s390, since the default implementation of the cc_platform_has()
matches the s390 implementation of mem_encrypt_active(), cc_platform_has()
does not need to be implemented in s390 (the config option
ARCH_HAS_CC_PLATFORM is not set).
Signed-off-by: default avatarTom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
Link: https://lkml.kernel.org/r/20210928191009.32551-9-bp@alien8.de
parent 6283f2ef
...@@ -10,11 +10,6 @@ ...@@ -10,11 +10,6 @@
#include <asm/svm.h> #include <asm/svm.h>
static inline bool mem_encrypt_active(void)
{
return is_secure_guest();
}
static inline bool force_dma_unencrypted(struct device *dev) static inline bool force_dma_unencrypted(struct device *dev)
{ {
return is_secure_guest(); return is_secure_guest();
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/memblock.h> #include <linux/memblock.h>
#include <linux/cc_platform.h>
#include <asm/machdep.h> #include <asm/machdep.h>
#include <asm/svm.h> #include <asm/svm.h>
#include <asm/swiotlb.h> #include <asm/swiotlb.h>
...@@ -63,7 +64,7 @@ void __init svm_swiotlb_init(void) ...@@ -63,7 +64,7 @@ void __init svm_swiotlb_init(void)
int set_memory_encrypted(unsigned long addr, int numpages) int set_memory_encrypted(unsigned long addr, int numpages)
{ {
if (!mem_encrypt_active()) if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT))
return 0; return 0;
if (!PAGE_ALIGNED(addr)) if (!PAGE_ALIGNED(addr))
...@@ -76,7 +77,7 @@ int set_memory_encrypted(unsigned long addr, int numpages) ...@@ -76,7 +77,7 @@ int set_memory_encrypted(unsigned long addr, int numpages)
int set_memory_decrypted(unsigned long addr, int numpages) int set_memory_decrypted(unsigned long addr, int numpages)
{ {
if (!mem_encrypt_active()) if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT))
return 0; return 0;
if (!PAGE_ALIGNED(addr)) if (!PAGE_ALIGNED(addr))
......
...@@ -4,8 +4,6 @@ ...@@ -4,8 +4,6 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
static inline bool mem_encrypt_active(void) { return false; }
int set_memory_encrypted(unsigned long addr, int numpages); int set_memory_encrypted(unsigned long addr, int numpages);
int set_memory_decrypted(unsigned long addr, int numpages); int set_memory_decrypted(unsigned long addr, int numpages);
......
...@@ -96,11 +96,6 @@ static inline void mem_encrypt_free_decrypted_mem(void) { } ...@@ -96,11 +96,6 @@ static inline void mem_encrypt_free_decrypted_mem(void) { }
extern char __start_bss_decrypted[], __end_bss_decrypted[], __start_bss_decrypted_unused[]; extern char __start_bss_decrypted[], __end_bss_decrypted[], __start_bss_decrypted_unused[];
static inline bool mem_encrypt_active(void)
{
return sme_me_mask;
}
static inline u64 sme_get_me_mask(void) static inline u64 sme_get_me_mask(void)
{ {
return sme_me_mask; return sme_me_mask;
......
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
#include <linux/start_kernel.h> #include <linux/start_kernel.h>
#include <linux/io.h> #include <linux/io.h>
#include <linux/memblock.h> #include <linux/memblock.h>
#include <linux/mem_encrypt.h> #include <linux/cc_platform.h>
#include <linux/pgtable.h> #include <linux/pgtable.h>
#include <asm/processor.h> #include <asm/processor.h>
...@@ -284,8 +284,13 @@ unsigned long __head __startup_64(unsigned long physaddr, ...@@ -284,8 +284,13 @@ unsigned long __head __startup_64(unsigned long physaddr,
* The bss section will be memset to zero later in the initialization so * The bss section will be memset to zero later in the initialization so
* there is no need to zero it after changing the memory encryption * there is no need to zero it after changing the memory encryption
* attribute. * attribute.
*
* This is early code, use an open coded check for SME instead of
* using cc_platform_has(). This eliminates worries about removing
* instrumentation or checking boot_cpu_data in the cc_platform_has()
* function.
*/ */
if (mem_encrypt_active()) { if (sme_get_me_mask()) {
vaddr = (unsigned long)__start_bss_decrypted; vaddr = (unsigned long)__start_bss_decrypted;
vaddr_end = (unsigned long)__end_bss_decrypted; vaddr_end = (unsigned long)__end_bss_decrypted;
for (; vaddr < vaddr_end; vaddr += PMD_SIZE) { for (; vaddr < vaddr_end; vaddr += PMD_SIZE) {
......
...@@ -694,7 +694,7 @@ static bool __init early_memremap_is_setup_data(resource_size_t phys_addr, ...@@ -694,7 +694,7 @@ static bool __init early_memremap_is_setup_data(resource_size_t phys_addr,
bool arch_memremap_can_ram_remap(resource_size_t phys_addr, unsigned long size, bool arch_memremap_can_ram_remap(resource_size_t phys_addr, unsigned long size,
unsigned long flags) unsigned long flags)
{ {
if (!mem_encrypt_active()) if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT))
return true; return true;
if (flags & MEMREMAP_ENC) if (flags & MEMREMAP_ENC)
...@@ -724,7 +724,7 @@ pgprot_t __init early_memremap_pgprot_adjust(resource_size_t phys_addr, ...@@ -724,7 +724,7 @@ pgprot_t __init early_memremap_pgprot_adjust(resource_size_t phys_addr,
{ {
bool encrypted_prot; bool encrypted_prot;
if (!mem_encrypt_active()) if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT))
return prot; return prot;
encrypted_prot = true; encrypted_prot = true;
......
...@@ -400,7 +400,7 @@ void __init mem_encrypt_free_decrypted_mem(void) ...@@ -400,7 +400,7 @@ void __init mem_encrypt_free_decrypted_mem(void)
* The unused memory range was mapped decrypted, change the encryption * The unused memory range was mapped decrypted, change the encryption
* attribute from decrypted to encrypted before freeing it. * attribute from decrypted to encrypted before freeing it.
*/ */
if (mem_encrypt_active()) { if (cc_platform_has(CC_ATTR_MEM_ENCRYPT)) {
r = set_memory_encrypted(vaddr, npages); r = set_memory_encrypted(vaddr, npages);
if (r) { if (r) {
pr_warn("failed to free unused decrypted pages\n"); pr_warn("failed to free unused decrypted pages\n");
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <linux/libnvdimm.h> #include <linux/libnvdimm.h>
#include <linux/vmstat.h> #include <linux/vmstat.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/cc_platform.h>
#include <asm/e820/api.h> #include <asm/e820/api.h>
#include <asm/processor.h> #include <asm/processor.h>
...@@ -1986,7 +1987,7 @@ static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc) ...@@ -1986,7 +1987,7 @@ static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
int ret; int ret;
/* Nothing to do if memory encryption is not active */ /* Nothing to do if memory encryption is not active */
if (!mem_encrypt_active()) if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT))
return 0; return 0;
/* Should not be working on unaligned addresses */ /* Should not be working on unaligned addresses */
......
...@@ -38,6 +38,7 @@ ...@@ -38,6 +38,7 @@
#include <drm/drm_probe_helper.h> #include <drm/drm_probe_helper.h>
#include <linux/mmu_notifier.h> #include <linux/mmu_notifier.h>
#include <linux/suspend.h> #include <linux/suspend.h>
#include <linux/cc_platform.h>
#include "amdgpu.h" #include "amdgpu.h"
#include "amdgpu_irq.h" #include "amdgpu_irq.h"
...@@ -1269,7 +1270,8 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, ...@@ -1269,7 +1270,8 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
* however, SME requires an indirect IOMMU mapping because the encryption * however, SME requires an indirect IOMMU mapping because the encryption
* bit is beyond the DMA mask of the chip. * bit is beyond the DMA mask of the chip.
*/ */
if (mem_encrypt_active() && ((flags & AMD_ASIC_MASK) == CHIP_RAVEN)) { if (cc_platform_has(CC_ATTR_MEM_ENCRYPT) &&
((flags & AMD_ASIC_MASK) == CHIP_RAVEN)) {
dev_info(&pdev->dev, dev_info(&pdev->dev,
"SME is not compatible with RAVEN\n"); "SME is not compatible with RAVEN\n");
return -ENOTSUPP; return -ENOTSUPP;
......
...@@ -31,7 +31,7 @@ ...@@ -31,7 +31,7 @@
#include <linux/dma-buf-map.h> #include <linux/dma-buf-map.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/highmem.h> #include <linux/highmem.h>
#include <linux/mem_encrypt.h> #include <linux/cc_platform.h>
#include <xen/xen.h> #include <xen/xen.h>
#include <drm/drm_cache.h> #include <drm/drm_cache.h>
...@@ -204,7 +204,7 @@ bool drm_need_swiotlb(int dma_bits) ...@@ -204,7 +204,7 @@ bool drm_need_swiotlb(int dma_bits)
* Enforce dma_alloc_coherent when memory encryption is active as well * Enforce dma_alloc_coherent when memory encryption is active as well
* for the same reasons as for Xen paravirtual hosts. * for the same reasons as for Xen paravirtual hosts.
*/ */
if (mem_encrypt_active()) if (cc_platform_has(CC_ATTR_MEM_ENCRYPT))
return true; return true;
for (tmp = iomem_resource.child; tmp; tmp = tmp->sibling) for (tmp = iomem_resource.child; tmp; tmp = tmp->sibling)
......
...@@ -29,7 +29,7 @@ ...@@ -29,7 +29,7 @@
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/mem_encrypt.h> #include <linux/cc_platform.h>
#include <drm/drm_aperture.h> #include <drm/drm_aperture.h>
#include <drm/drm_drv.h> #include <drm/drm_drv.h>
...@@ -666,7 +666,7 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv) ...@@ -666,7 +666,7 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv)
[vmw_dma_map_bind] = "Giving up DMA mappings early."}; [vmw_dma_map_bind] = "Giving up DMA mappings early."};
/* TTM currently doesn't fully support SEV encryption. */ /* TTM currently doesn't fully support SEV encryption. */
if (mem_encrypt_active()) if (cc_platform_has(CC_ATTR_MEM_ENCRYPT))
return -EINVAL; return -EINVAL;
if (vmw_force_coherent) if (vmw_force_coherent)
......
...@@ -28,7 +28,7 @@ ...@@ -28,7 +28,7 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/mem_encrypt.h> #include <linux/cc_platform.h>
#include <asm/hypervisor.h> #include <asm/hypervisor.h>
#include <drm/drm_ioctl.h> #include <drm/drm_ioctl.h>
...@@ -160,7 +160,7 @@ static unsigned long vmw_port_hb_out(struct rpc_channel *channel, ...@@ -160,7 +160,7 @@ static unsigned long vmw_port_hb_out(struct rpc_channel *channel,
unsigned long msg_len = strlen(msg); unsigned long msg_len = strlen(msg);
/* HB port can't access encrypted memory. */ /* HB port can't access encrypted memory. */
if (hb && !mem_encrypt_active()) { if (hb && !cc_platform_has(CC_ATTR_MEM_ENCRYPT)) {
unsigned long bp = channel->cookie_high; unsigned long bp = channel->cookie_high;
u32 channel_id = (channel->channel_id << 16); u32 channel_id = (channel->channel_id << 16);
...@@ -216,7 +216,7 @@ static unsigned long vmw_port_hb_in(struct rpc_channel *channel, char *reply, ...@@ -216,7 +216,7 @@ static unsigned long vmw_port_hb_in(struct rpc_channel *channel, char *reply,
unsigned long si, di, eax, ebx, ecx, edx; unsigned long si, di, eax, ebx, ecx, edx;
/* HB port can't access encrypted memory */ /* HB port can't access encrypted memory */
if (hb && !mem_encrypt_active()) { if (hb && !cc_platform_has(CC_ATTR_MEM_ENCRYPT)) {
unsigned long bp = channel->cookie_low; unsigned long bp = channel->cookie_low;
u32 channel_id = (channel->channel_id << 16); u32 channel_id = (channel->channel_id << 16);
......
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
#include <linux/irqdomain.h> #include <linux/irqdomain.h>
#include <linux/percpu.h> #include <linux/percpu.h>
#include <linux/io-pgtable.h> #include <linux/io-pgtable.h>
#include <linux/cc_platform.h>
#include <asm/irq_remapping.h> #include <asm/irq_remapping.h>
#include <asm/io_apic.h> #include <asm/io_apic.h>
#include <asm/apic.h> #include <asm/apic.h>
...@@ -2238,7 +2239,7 @@ static int amd_iommu_def_domain_type(struct device *dev) ...@@ -2238,7 +2239,7 @@ static int amd_iommu_def_domain_type(struct device *dev)
* active, because some of those devices (AMD GPUs) don't have the * active, because some of those devices (AMD GPUs) don't have the
* encryption bit in their DMA-mask and require remapping. * encryption bit in their DMA-mask and require remapping.
*/ */
if (!mem_encrypt_active() && dev_data->iommu_v2) if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT) && dev_data->iommu_v2)
return IOMMU_DOMAIN_IDENTITY; return IOMMU_DOMAIN_IDENTITY;
return 0; return 0;
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include <linux/wait.h> #include <linux/wait.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/gfp.h> #include <linux/gfp.h>
#include <linux/cc_platform.h>
#include "amd_iommu.h" #include "amd_iommu.h"
...@@ -742,7 +743,7 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids) ...@@ -742,7 +743,7 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
* When memory encryption is active the device is likely not in a * When memory encryption is active the device is likely not in a
* direct-mapped domain. Forbid using IOMMUv2 functionality for now. * direct-mapped domain. Forbid using IOMMUv2 functionality for now.
*/ */
if (mem_encrypt_active()) if (cc_platform_has(CC_ATTR_MEM_ENCRYPT))
return -ENODEV; return -ENODEV;
if (!amd_iommu_v2_supported()) if (!amd_iommu_v2_supported())
......
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#include <linux/property.h> #include <linux/property.h>
#include <linux/fsl/mc.h> #include <linux/fsl/mc.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/cc_platform.h>
#include <trace/events/iommu.h> #include <trace/events/iommu.h>
static struct kset *iommu_group_kset; static struct kset *iommu_group_kset;
...@@ -130,7 +131,7 @@ static int __init iommu_subsys_init(void) ...@@ -130,7 +131,7 @@ static int __init iommu_subsys_init(void)
else else
iommu_set_default_translated(false); iommu_set_default_translated(false);
if (iommu_default_passthrough() && mem_encrypt_active()) { if (iommu_default_passthrough() && cc_platform_has(CC_ATTR_MEM_ENCRYPT)) {
pr_info("Memory encryption detected - Disabling default IOMMU Passthrough\n"); pr_info("Memory encryption detected - Disabling default IOMMU Passthrough\n");
iommu_set_default_translated(false); iommu_set_default_translated(false);
} }
......
...@@ -26,7 +26,7 @@ ...@@ -26,7 +26,7 @@
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/pagemap.h> #include <linux/pagemap.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/mem_encrypt.h> #include <linux/cc_platform.h>
#include <asm/io.h> #include <asm/io.h>
#include "internal.h" #include "internal.h"
...@@ -177,7 +177,7 @@ ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos) ...@@ -177,7 +177,7 @@ ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos)
*/ */
ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos) ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos)
{ {
return read_from_oldmem(buf, count, ppos, 0, mem_encrypt_active()); return read_from_oldmem(buf, count, ppos, 0, cc_platform_has(CC_ATTR_MEM_ENCRYPT));
} }
/* /*
...@@ -378,7 +378,7 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos, ...@@ -378,7 +378,7 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
buflen); buflen);
start = m->paddr + *fpos - m->offset; start = m->paddr + *fpos - m->offset;
tmp = read_from_oldmem(buffer, tsz, &start, tmp = read_from_oldmem(buffer, tsz, &start,
userbuf, mem_encrypt_active()); userbuf, cc_platform_has(CC_ATTR_MEM_ENCRYPT));
if (tmp < 0) if (tmp < 0)
return tmp; return tmp;
buflen -= tsz; buflen -= tsz;
......
...@@ -16,10 +16,6 @@ ...@@ -16,10 +16,6 @@
#include <asm/mem_encrypt.h> #include <asm/mem_encrypt.h>
#else /* !CONFIG_ARCH_HAS_MEM_ENCRYPT */
static inline bool mem_encrypt_active(void) { return false; }
#endif /* CONFIG_ARCH_HAS_MEM_ENCRYPT */ #endif /* CONFIG_ARCH_HAS_MEM_ENCRYPT */
#ifdef CONFIG_AMD_MEM_ENCRYPT #ifdef CONFIG_AMD_MEM_ENCRYPT
......
...@@ -34,7 +34,7 @@ ...@@ -34,7 +34,7 @@
#include <linux/highmem.h> #include <linux/highmem.h>
#include <linux/gfp.h> #include <linux/gfp.h>
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
#include <linux/mem_encrypt.h> #include <linux/cc_platform.h>
#include <linux/set_memory.h> #include <linux/set_memory.h>
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS
#include <linux/debugfs.h> #include <linux/debugfs.h>
...@@ -552,7 +552,7 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr, ...@@ -552,7 +552,7 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
if (!mem) if (!mem)
panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer"); panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer");
if (mem_encrypt_active()) if (cc_platform_has(CC_ATTR_MEM_ENCRYPT))
pr_warn_once("Memory encryption is active and system is using DMA bounce buffers\n"); pr_warn_once("Memory encryption is active and system is using DMA bounce buffers\n");
if (mapping_size > alloc_size) { if (mapping_size > alloc_size) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment