Commit e7de6c7c authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Ingo Molnar

dma/swiotlb: Remove swiotlb_set_mem_attributes()

Now that set_memory_decrypted() is always available we can just call it
directly.
Tested-by: default avatarTom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarThomas Gleixner <tglx@linutronix.de>
Reviewed-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Reviewed-by: default avatarTom Lendacky <thomas.lendacky@amd.com>
Cc: David Woodhouse <dwmw2@infradead.org>
Cc: Joerg Roedel <joro@8bytes.org>
Cc: Jon Mason <jdmason@kudzu.us>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Muli Ben-Yehuda <mulix@mulix.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: iommu@lists.linux-foundation.org
Link: http://lkml.kernel.org/r/20180319103826.12853-12-hch@lst.deSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent b7fa0746
...@@ -49,8 +49,6 @@ int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size); ...@@ -49,8 +49,6 @@ int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size);
/* Architecture __weak replacement functions */ /* Architecture __weak replacement functions */
void __init mem_encrypt_init(void); void __init mem_encrypt_init(void);
void swiotlb_set_mem_attributes(void *vaddr, unsigned long size);
bool sme_active(void); bool sme_active(void);
bool sev_active(void); bool sev_active(void);
......
...@@ -441,11 +441,3 @@ void __init mem_encrypt_init(void) ...@@ -441,11 +441,3 @@ void __init mem_encrypt_init(void)
: "Secure Memory Encryption (SME)"); : "Secure Memory Encryption (SME)");
} }
void swiotlb_set_mem_attributes(void *vaddr, unsigned long size)
{
WARN(PAGE_ALIGN(size) != size,
"size is not page-aligned (%#lx)\n", size);
/* Make the SWIOTLB buffer area decrypted */
set_memory_decrypted((unsigned long)vaddr, size >> PAGE_SHIFT);
}
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
#include <linux/gfp.h> #include <linux/gfp.h>
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
#include <linux/mem_encrypt.h> #include <linux/mem_encrypt.h>
#include <linux/set_memory.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/dma.h> #include <asm/dma.h>
...@@ -156,8 +157,6 @@ unsigned long swiotlb_size_or_default(void) ...@@ -156,8 +157,6 @@ unsigned long swiotlb_size_or_default(void)
return size ? size : (IO_TLB_DEFAULT_SIZE); return size ? size : (IO_TLB_DEFAULT_SIZE);
} }
void __weak swiotlb_set_mem_attributes(void *vaddr, unsigned long size) { }
/* For swiotlb, clear memory encryption mask from dma addresses */ /* For swiotlb, clear memory encryption mask from dma addresses */
static dma_addr_t swiotlb_phys_to_dma(struct device *hwdev, static dma_addr_t swiotlb_phys_to_dma(struct device *hwdev,
phys_addr_t address) phys_addr_t address)
...@@ -209,12 +208,12 @@ void __init swiotlb_update_mem_attributes(void) ...@@ -209,12 +208,12 @@ void __init swiotlb_update_mem_attributes(void)
vaddr = phys_to_virt(io_tlb_start); vaddr = phys_to_virt(io_tlb_start);
bytes = PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT); bytes = PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT);
swiotlb_set_mem_attributes(vaddr, bytes); set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT);
memset(vaddr, 0, bytes); memset(vaddr, 0, bytes);
vaddr = phys_to_virt(io_tlb_overflow_buffer); vaddr = phys_to_virt(io_tlb_overflow_buffer);
bytes = PAGE_ALIGN(io_tlb_overflow); bytes = PAGE_ALIGN(io_tlb_overflow);
swiotlb_set_mem_attributes(vaddr, bytes); set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT);
memset(vaddr, 0, bytes); memset(vaddr, 0, bytes);
} }
...@@ -355,7 +354,7 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs) ...@@ -355,7 +354,7 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
io_tlb_start = virt_to_phys(tlb); io_tlb_start = virt_to_phys(tlb);
io_tlb_end = io_tlb_start + bytes; io_tlb_end = io_tlb_start + bytes;
swiotlb_set_mem_attributes(tlb, bytes); set_memory_decrypted((unsigned long)tlb, bytes >> PAGE_SHIFT);
memset(tlb, 0, bytes); memset(tlb, 0, bytes);
/* /*
...@@ -366,7 +365,8 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs) ...@@ -366,7 +365,8 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
if (!v_overflow_buffer) if (!v_overflow_buffer)
goto cleanup2; goto cleanup2;
swiotlb_set_mem_attributes(v_overflow_buffer, io_tlb_overflow); set_memory_decrypted((unsigned long)v_overflow_buffer,
io_tlb_overflow >> PAGE_SHIFT);
memset(v_overflow_buffer, 0, io_tlb_overflow); memset(v_overflow_buffer, 0, io_tlb_overflow);
io_tlb_overflow_buffer = virt_to_phys(v_overflow_buffer); io_tlb_overflow_buffer = virt_to_phys(v_overflow_buffer);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment