Commit 99c5c4c6 authored by Kirill A. Shutemov's avatar Kirill A. Shutemov Committed by Borislav Petkov (AMD)

x86/mm: Make x86_platform.guest.enc_status_change_*() return an error

TDX is going to have more than one reason to fail enc_status_change_prepare().

Change the callback to return errno instead of assuming -EIO. Change
enc_status_change_finish() too to keep the interface symmetric.
Signed-off-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Signed-off-by: default avatarBorislav Petkov (AMD) <bp@alien8.de>
Reviewed-by: default avatarDave Hansen <dave.hansen@intel.com>
Reviewed-by: default avatarKai Huang <kai.huang@intel.com>
Reviewed-by: default avatarMichael Kelley <mhklinux@outlook.com>
Tested-by: default avatarTao Liu <ltao@redhat.com>
Link: https://lore.kernel.org/r/20240614095904.1345461-8-kirill.shutemov@linux.intel.com
parent de606131
...@@ -798,28 +798,30 @@ static bool tdx_enc_status_changed(unsigned long vaddr, int numpages, bool enc) ...@@ -798,28 +798,30 @@ static bool tdx_enc_status_changed(unsigned long vaddr, int numpages, bool enc)
return true; return true;
} }
static bool tdx_enc_status_change_prepare(unsigned long vaddr, int numpages, static int tdx_enc_status_change_prepare(unsigned long vaddr, int numpages,
bool enc) bool enc)
{ {
/* /*
* Only handle shared->private conversion here. * Only handle shared->private conversion here.
* See the comment in tdx_early_init(). * See the comment in tdx_early_init().
*/ */
if (enc) if (enc && !tdx_enc_status_changed(vaddr, numpages, enc))
return tdx_enc_status_changed(vaddr, numpages, enc); return -EIO;
return true;
return 0;
} }
static bool tdx_enc_status_change_finish(unsigned long vaddr, int numpages, static int tdx_enc_status_change_finish(unsigned long vaddr, int numpages,
bool enc) bool enc)
{ {
/* /*
* Only handle private->shared conversion here. * Only handle private->shared conversion here.
* See the comment in tdx_early_init(). * See the comment in tdx_early_init().
*/ */
if (!enc) if (!enc && !tdx_enc_status_changed(vaddr, numpages, enc))
return tdx_enc_status_changed(vaddr, numpages, enc); return -EIO;
return true;
return 0;
} }
void __init tdx_early_init(void) void __init tdx_early_init(void)
......
...@@ -523,9 +523,9 @@ static int hv_mark_gpa_visibility(u16 count, const u64 pfn[], ...@@ -523,9 +523,9 @@ static int hv_mark_gpa_visibility(u16 count, const u64 pfn[],
* transition is complete, hv_vtom_set_host_visibility() marks the pages * transition is complete, hv_vtom_set_host_visibility() marks the pages
* as "present" again. * as "present" again.
*/ */
static bool hv_vtom_clear_present(unsigned long kbuffer, int pagecount, bool enc) static int hv_vtom_clear_present(unsigned long kbuffer, int pagecount, bool enc)
{ {
return !set_memory_np(kbuffer, pagecount); return set_memory_np(kbuffer, pagecount);
} }
/* /*
...@@ -536,20 +536,19 @@ static bool hv_vtom_clear_present(unsigned long kbuffer, int pagecount, bool enc ...@@ -536,20 +536,19 @@ static bool hv_vtom_clear_present(unsigned long kbuffer, int pagecount, bool enc
* with host. This function works as wrap of hv_mark_gpa_visibility() * with host. This function works as wrap of hv_mark_gpa_visibility()
* with memory base and size. * with memory base and size.
*/ */
static bool hv_vtom_set_host_visibility(unsigned long kbuffer, int pagecount, bool enc) static int hv_vtom_set_host_visibility(unsigned long kbuffer, int pagecount, bool enc)
{ {
enum hv_mem_host_visibility visibility = enc ? enum hv_mem_host_visibility visibility = enc ?
VMBUS_PAGE_NOT_VISIBLE : VMBUS_PAGE_VISIBLE_READ_WRITE; VMBUS_PAGE_NOT_VISIBLE : VMBUS_PAGE_VISIBLE_READ_WRITE;
u64 *pfn_array; u64 *pfn_array;
phys_addr_t paddr; phys_addr_t paddr;
int i, pfn, err;
void *vaddr; void *vaddr;
int ret = 0; int ret = 0;
bool result = true;
int i, pfn;
pfn_array = kmalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL); pfn_array = kmalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL);
if (!pfn_array) { if (!pfn_array) {
result = false; ret = -ENOMEM;
goto err_set_memory_p; goto err_set_memory_p;
} }
...@@ -568,10 +567,8 @@ static bool hv_vtom_set_host_visibility(unsigned long kbuffer, int pagecount, bo ...@@ -568,10 +567,8 @@ static bool hv_vtom_set_host_visibility(unsigned long kbuffer, int pagecount, bo
if (pfn == HV_MAX_MODIFY_GPA_REP_COUNT || i == pagecount - 1) { if (pfn == HV_MAX_MODIFY_GPA_REP_COUNT || i == pagecount - 1) {
ret = hv_mark_gpa_visibility(pfn, pfn_array, ret = hv_mark_gpa_visibility(pfn, pfn_array,
visibility); visibility);
if (ret) { if (ret)
result = false;
goto err_free_pfn_array; goto err_free_pfn_array;
}
pfn = 0; pfn = 0;
} }
} }
...@@ -586,10 +583,11 @@ static bool hv_vtom_set_host_visibility(unsigned long kbuffer, int pagecount, bo ...@@ -586,10 +583,11 @@ static bool hv_vtom_set_host_visibility(unsigned long kbuffer, int pagecount, bo
* order to avoid leaving the memory range in a "broken" state. Setting * order to avoid leaving the memory range in a "broken" state. Setting
* the PRESENT bits shouldn't fail, but return an error if it does. * the PRESENT bits shouldn't fail, but return an error if it does.
*/ */
if (set_memory_p(kbuffer, pagecount)) err = set_memory_p(kbuffer, pagecount);
result = false; if (err && !ret)
ret = err;
return result; return ret;
} }
static bool hv_vtom_tlb_flush_required(bool private) static bool hv_vtom_tlb_flush_required(bool private)
......
...@@ -151,8 +151,8 @@ struct x86_init_acpi { ...@@ -151,8 +151,8 @@ struct x86_init_acpi {
* @enc_cache_flush_required Returns true if a cache flush is needed before changing page encryption status * @enc_cache_flush_required Returns true if a cache flush is needed before changing page encryption status
*/ */
struct x86_guest { struct x86_guest {
bool (*enc_status_change_prepare)(unsigned long vaddr, int npages, bool enc); int (*enc_status_change_prepare)(unsigned long vaddr, int npages, bool enc);
bool (*enc_status_change_finish)(unsigned long vaddr, int npages, bool enc); int (*enc_status_change_finish)(unsigned long vaddr, int npages, bool enc);
bool (*enc_tlb_flush_required)(bool enc); bool (*enc_tlb_flush_required)(bool enc);
bool (*enc_cache_flush_required)(void); bool (*enc_cache_flush_required)(void);
}; };
......
...@@ -134,8 +134,8 @@ struct x86_cpuinit_ops x86_cpuinit = { ...@@ -134,8 +134,8 @@ struct x86_cpuinit_ops x86_cpuinit = {
static void default_nmi_init(void) { }; static void default_nmi_init(void) { };
static bool enc_status_change_prepare_noop(unsigned long vaddr, int npages, bool enc) { return true; } static int enc_status_change_prepare_noop(unsigned long vaddr, int npages, bool enc) { return 0; }
static bool enc_status_change_finish_noop(unsigned long vaddr, int npages, bool enc) { return true; } static int enc_status_change_finish_noop(unsigned long vaddr, int npages, bool enc) { return 0; }
static bool enc_tlb_flush_required_noop(bool enc) { return false; } static bool enc_tlb_flush_required_noop(bool enc) { return false; }
static bool enc_cache_flush_required_noop(void) { return false; } static bool enc_cache_flush_required_noop(void) { return false; }
static bool is_private_mmio_noop(u64 addr) {return false; } static bool is_private_mmio_noop(u64 addr) {return false; }
......
...@@ -283,7 +283,7 @@ static void enc_dec_hypercall(unsigned long vaddr, unsigned long size, bool enc) ...@@ -283,7 +283,7 @@ static void enc_dec_hypercall(unsigned long vaddr, unsigned long size, bool enc)
#endif #endif
} }
static bool amd_enc_status_change_prepare(unsigned long vaddr, int npages, bool enc) static int amd_enc_status_change_prepare(unsigned long vaddr, int npages, bool enc)
{ {
/* /*
* To maintain the security guarantees of SEV-SNP guests, make sure * To maintain the security guarantees of SEV-SNP guests, make sure
...@@ -292,11 +292,11 @@ static bool amd_enc_status_change_prepare(unsigned long vaddr, int npages, bool ...@@ -292,11 +292,11 @@ static bool amd_enc_status_change_prepare(unsigned long vaddr, int npages, bool
if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP) && !enc) if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP) && !enc)
snp_set_memory_shared(vaddr, npages); snp_set_memory_shared(vaddr, npages);
return true; return 0;
} }
/* Return true unconditionally: return value doesn't matter for the SEV side */ /* Return true unconditionally: return value doesn't matter for the SEV side */
static bool amd_enc_status_change_finish(unsigned long vaddr, int npages, bool enc) static int amd_enc_status_change_finish(unsigned long vaddr, int npages, bool enc)
{ {
/* /*
* After memory is mapped encrypted in the page table, validate it * After memory is mapped encrypted in the page table, validate it
...@@ -308,7 +308,7 @@ static bool amd_enc_status_change_finish(unsigned long vaddr, int npages, bool e ...@@ -308,7 +308,7 @@ static bool amd_enc_status_change_finish(unsigned long vaddr, int npages, bool e
if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
enc_dec_hypercall(vaddr, npages << PAGE_SHIFT, enc); enc_dec_hypercall(vaddr, npages << PAGE_SHIFT, enc);
return true; return 0;
} }
static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc) static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc)
......
...@@ -2196,7 +2196,8 @@ static int __set_memory_enc_pgtable(unsigned long addr, int numpages, bool enc) ...@@ -2196,7 +2196,8 @@ static int __set_memory_enc_pgtable(unsigned long addr, int numpages, bool enc)
cpa_flush(&cpa, x86_platform.guest.enc_cache_flush_required()); cpa_flush(&cpa, x86_platform.guest.enc_cache_flush_required());
/* Notify hypervisor that we are about to set/clr encryption attribute. */ /* Notify hypervisor that we are about to set/clr encryption attribute. */
if (!x86_platform.guest.enc_status_change_prepare(addr, numpages, enc)) ret = x86_platform.guest.enc_status_change_prepare(addr, numpages, enc);
if (ret)
goto vmm_fail; goto vmm_fail;
ret = __change_page_attr_set_clr(&cpa, 1); ret = __change_page_attr_set_clr(&cpa, 1);
...@@ -2214,16 +2215,17 @@ static int __set_memory_enc_pgtable(unsigned long addr, int numpages, bool enc) ...@@ -2214,16 +2215,17 @@ static int __set_memory_enc_pgtable(unsigned long addr, int numpages, bool enc)
return ret; return ret;
/* Notify hypervisor that we have successfully set/clr encryption attribute. */ /* Notify hypervisor that we have successfully set/clr encryption attribute. */
if (!x86_platform.guest.enc_status_change_finish(addr, numpages, enc)) ret = x86_platform.guest.enc_status_change_finish(addr, numpages, enc);
if (ret)
goto vmm_fail; goto vmm_fail;
return 0; return 0;
vmm_fail: vmm_fail:
WARN_ONCE(1, "CPA VMM failure to convert memory (addr=%p, numpages=%d) to %s.\n", WARN_ONCE(1, "CPA VMM failure to convert memory (addr=%p, numpages=%d) to %s: %d\n",
(void *)addr, numpages, enc ? "private" : "shared"); (void *)addr, numpages, enc ? "private" : "shared", ret);
return -EIO; return ret;
} }
static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc) static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment