Commit a67f6b60 authored by Dexuan Cui's avatar Dexuan Cui Committed by Wei Liu

x86/hyperv: Move the code in ivm.c around to avoid unnecessary ifdef's

Group the code this way so that we can avoid too many ifdef's:

  Data only used in an SNP VM with the paravisor;
  Functions only used in an SNP VM with the paravisor;

  Data only used in an SNP VM without the paravisor;
  Functions only used in an SNP VM without the paravisor;

  Functions only used in a TDX VM, with and without the paravisor;

  Functions used in an SNP or TDX VM, when the paravisor is present;

  Functions always used, even in a regular non-CoCo VM.

No functional change.
Signed-off-by: default avatarDexuan Cui <decui@microsoft.com>
Reviewed-by: default avatarMichael Kelley <mikelley@microsoft.com>
Reviewed-by: default avatarTianyu Lan <tiala@microsoft.com>
Signed-off-by: default avatarWei Liu <wei.liu@kernel.org>
Link: https://lore.kernel.org/r/20230824080712.30327-11-decui@microsoft.com
parent e3131f1c
......@@ -30,9 +30,6 @@
#define GHCB_USAGE_HYPERV_CALL 1
static u8 ap_start_input_arg[PAGE_SIZE] __bss_decrypted __aligned(PAGE_SIZE);
static u8 ap_start_stack[PAGE_SIZE] __aligned(PAGE_SIZE);
union hv_ghcb {
struct ghcb ghcb;
struct {
......@@ -66,10 +63,10 @@ union hv_ghcb {
} hypercall;
} __packed __aligned(HV_HYP_PAGE_SIZE);
static DEFINE_PER_CPU(struct sev_es_save_area *, hv_sev_vmsa);
/* Only used in an SNP VM with the paravisor */
static u16 hv_ghcb_version __ro_after_init;
/* Functions only used in an SNP VM with the paravisor go here. */
u64 hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size)
{
union hv_ghcb *hv_ghcb;
......@@ -247,6 +244,140 @@ static void hv_ghcb_msr_read(u64 msr, u64 *value)
local_irq_restore(flags);
}
/* Only used in a fully enlightened SNP VM, i.e. without the paravisor */
static u8 ap_start_input_arg[PAGE_SIZE] __bss_decrypted __aligned(PAGE_SIZE);
static u8 ap_start_stack[PAGE_SIZE] __aligned(PAGE_SIZE);
static DEFINE_PER_CPU(struct sev_es_save_area *, hv_sev_vmsa);
/* Functions only used in an SNP VM without the paravisor go here. */
#define hv_populate_vmcb_seg(seg, gdtr_base) \
do { \
if (seg.selector) { \
seg.base = 0; \
seg.limit = HV_AP_SEGMENT_LIMIT; \
seg.attrib = *(u16 *)(gdtr_base + seg.selector + 5); \
seg.attrib = (seg.attrib & 0xFF) | ((seg.attrib >> 4) & 0xF00); \
} \
} while (0) \
static int snp_set_vmsa(void *va, bool vmsa)
{
u64 attrs;
/*
* Running at VMPL0 allows the kernel to change the VMSA bit for a page
* using the RMPADJUST instruction. However, for the instruction to
* succeed it must target the permissions of a lesser privileged
* (higher numbered) VMPL level, so use VMPL1 (refer to the RMPADJUST
* instruction in the AMD64 APM Volume 3).
*/
attrs = 1;
if (vmsa)
attrs |= RMPADJUST_VMSA_PAGE_BIT;
return rmpadjust((unsigned long)va, RMP_PG_SIZE_4K, attrs);
}
static void snp_cleanup_vmsa(struct sev_es_save_area *vmsa)
{
int err;
err = snp_set_vmsa(vmsa, false);
if (err)
pr_err("clear VMSA page failed (%u), leaking page\n", err);
else
free_page((unsigned long)vmsa);
}
int hv_snp_boot_ap(int cpu, unsigned long start_ip)
{
struct sev_es_save_area *vmsa = (struct sev_es_save_area *)
__get_free_page(GFP_KERNEL | __GFP_ZERO);
struct sev_es_save_area *cur_vmsa;
struct desc_ptr gdtr;
u64 ret, retry = 5;
struct hv_enable_vp_vtl *start_vp_input;
unsigned long flags;
if (!vmsa)
return -ENOMEM;
native_store_gdt(&gdtr);
vmsa->gdtr.base = gdtr.address;
vmsa->gdtr.limit = gdtr.size;
asm volatile("movl %%es, %%eax;" : "=a" (vmsa->es.selector));
hv_populate_vmcb_seg(vmsa->es, vmsa->gdtr.base);
asm volatile("movl %%cs, %%eax;" : "=a" (vmsa->cs.selector));
hv_populate_vmcb_seg(vmsa->cs, vmsa->gdtr.base);
asm volatile("movl %%ss, %%eax;" : "=a" (vmsa->ss.selector));
hv_populate_vmcb_seg(vmsa->ss, vmsa->gdtr.base);
asm volatile("movl %%ds, %%eax;" : "=a" (vmsa->ds.selector));
hv_populate_vmcb_seg(vmsa->ds, vmsa->gdtr.base);
vmsa->efer = native_read_msr(MSR_EFER);
asm volatile("movq %%cr4, %%rax;" : "=a" (vmsa->cr4));
asm volatile("movq %%cr3, %%rax;" : "=a" (vmsa->cr3));
asm volatile("movq %%cr0, %%rax;" : "=a" (vmsa->cr0));
vmsa->xcr0 = 1;
vmsa->g_pat = HV_AP_INIT_GPAT_DEFAULT;
vmsa->rip = (u64)secondary_startup_64_no_verify;
vmsa->rsp = (u64)&ap_start_stack[PAGE_SIZE];
/*
* Set the SNP-specific fields for this VMSA:
* VMPL level
* SEV_FEATURES (matches the SEV STATUS MSR right shifted 2 bits)
*/
vmsa->vmpl = 0;
vmsa->sev_features = sev_status >> 2;
ret = snp_set_vmsa(vmsa, true);
if (!ret) {
pr_err("RMPADJUST(%llx) failed: %llx\n", (u64)vmsa, ret);
free_page((u64)vmsa);
return ret;
}
local_irq_save(flags);
start_vp_input = (struct hv_enable_vp_vtl *)ap_start_input_arg;
memset(start_vp_input, 0, sizeof(*start_vp_input));
start_vp_input->partition_id = -1;
start_vp_input->vp_index = cpu;
start_vp_input->target_vtl.target_vtl = ms_hyperv.vtl;
*(u64 *)&start_vp_input->vp_context = __pa(vmsa) | 1;
do {
ret = hv_do_hypercall(HVCALL_START_VP,
start_vp_input, NULL);
} while (hv_result(ret) == HV_STATUS_TIME_OUT && retry--);
local_irq_restore(flags);
if (!hv_result_success(ret)) {
pr_err("HvCallStartVirtualProcessor failed: %llx\n", ret);
snp_cleanup_vmsa(vmsa);
vmsa = NULL;
}
cur_vmsa = per_cpu(hv_sev_vmsa, cpu);
/* Free up any previous VMSA page */
if (cur_vmsa)
snp_cleanup_vmsa(cur_vmsa);
/* Record the current VMSA page */
per_cpu(hv_sev_vmsa, cpu) = vmsa;
return ret;
}
#else
static inline void hv_ghcb_msr_write(u64 msr, u64 value) {}
static inline void hv_ghcb_msr_read(u64 msr, u64 *value) {}
......@@ -282,6 +413,20 @@ static void hv_tdx_msr_read(u64 msr, u64 *val)
else
*val = args.r11;
}
u64 hv_tdx_hypercall(u64 control, u64 param1, u64 param2)
{
struct tdx_hypercall_args args = { };
args.r10 = control;
args.rdx = param1;
args.r8 = param2;
(void)__tdx_hypercall_ret(&args);
return args.r11;
}
#else
static inline void hv_tdx_msr_write(u64 msr, u64 value) {}
static inline void hv_tdx_msr_read(u64 msr, u64 *value) {}
......@@ -309,9 +454,7 @@ void hv_ivm_msr_read(u64 msr, u64 *value)
else if (hv_isolation_type_snp())
hv_ghcb_msr_read(msr, value);
}
#endif
#if defined(CONFIG_AMD_MEM_ENCRYPT) || defined(CONFIG_INTEL_TDX_GUEST)
/*
* hv_mark_gpa_visibility - Set pages visible to host via hvcall.
*
......@@ -432,141 +575,6 @@ static bool hv_is_private_mmio(u64 addr)
return false;
}
#endif /* defined(CONFIG_AMD_MEM_ENCRYPT) || defined(CONFIG_INTEL_TDX_GUEST) */
#ifdef CONFIG_AMD_MEM_ENCRYPT
#define hv_populate_vmcb_seg(seg, gdtr_base) \
do { \
if (seg.selector) { \
seg.base = 0; \
seg.limit = HV_AP_SEGMENT_LIMIT; \
seg.attrib = *(u16 *)(gdtr_base + seg.selector + 5); \
seg.attrib = (seg.attrib & 0xFF) | ((seg.attrib >> 4) & 0xF00); \
} \
} while (0) \
static int snp_set_vmsa(void *va, bool vmsa)
{
u64 attrs;
/*
* Running at VMPL0 allows the kernel to change the VMSA bit for a page
* using the RMPADJUST instruction. However, for the instruction to
* succeed it must target the permissions of a lesser privileged
* (higher numbered) VMPL level, so use VMPL1 (refer to the RMPADJUST
* instruction in the AMD64 APM Volume 3).
*/
attrs = 1;
if (vmsa)
attrs |= RMPADJUST_VMSA_PAGE_BIT;
return rmpadjust((unsigned long)va, RMP_PG_SIZE_4K, attrs);
}
static void snp_cleanup_vmsa(struct sev_es_save_area *vmsa)
{
int err;
err = snp_set_vmsa(vmsa, false);
if (err)
pr_err("clear VMSA page failed (%u), leaking page\n", err);
else
free_page((unsigned long)vmsa);
}
int hv_snp_boot_ap(int cpu, unsigned long start_ip)
{
struct sev_es_save_area *vmsa = (struct sev_es_save_area *)
__get_free_page(GFP_KERNEL | __GFP_ZERO);
struct sev_es_save_area *cur_vmsa;
struct desc_ptr gdtr;
u64 ret, retry = 5;
struct hv_enable_vp_vtl *start_vp_input;
unsigned long flags;
if (!vmsa)
return -ENOMEM;
native_store_gdt(&gdtr);
vmsa->gdtr.base = gdtr.address;
vmsa->gdtr.limit = gdtr.size;
asm volatile("movl %%es, %%eax;" : "=a" (vmsa->es.selector));
hv_populate_vmcb_seg(vmsa->es, vmsa->gdtr.base);
asm volatile("movl %%cs, %%eax;" : "=a" (vmsa->cs.selector));
hv_populate_vmcb_seg(vmsa->cs, vmsa->gdtr.base);
asm volatile("movl %%ss, %%eax;" : "=a" (vmsa->ss.selector));
hv_populate_vmcb_seg(vmsa->ss, vmsa->gdtr.base);
asm volatile("movl %%ds, %%eax;" : "=a" (vmsa->ds.selector));
hv_populate_vmcb_seg(vmsa->ds, vmsa->gdtr.base);
vmsa->efer = native_read_msr(MSR_EFER);
asm volatile("movq %%cr4, %%rax;" : "=a" (vmsa->cr4));
asm volatile("movq %%cr3, %%rax;" : "=a" (vmsa->cr3));
asm volatile("movq %%cr0, %%rax;" : "=a" (vmsa->cr0));
vmsa->xcr0 = 1;
vmsa->g_pat = HV_AP_INIT_GPAT_DEFAULT;
vmsa->rip = (u64)secondary_startup_64_no_verify;
vmsa->rsp = (u64)&ap_start_stack[PAGE_SIZE];
/*
* Set the SNP-specific fields for this VMSA:
* VMPL level
* SEV_FEATURES (matches the SEV STATUS MSR right shifted 2 bits)
*/
vmsa->vmpl = 0;
vmsa->sev_features = sev_status >> 2;
ret = snp_set_vmsa(vmsa, true);
if (!ret) {
pr_err("RMPADJUST(%llx) failed: %llx\n", (u64)vmsa, ret);
free_page((u64)vmsa);
return ret;
}
local_irq_save(flags);
start_vp_input = (struct hv_enable_vp_vtl *)ap_start_input_arg;
memset(start_vp_input, 0, sizeof(*start_vp_input));
start_vp_input->partition_id = -1;
start_vp_input->vp_index = cpu;
start_vp_input->target_vtl.target_vtl = ms_hyperv.vtl;
*(u64 *)&start_vp_input->vp_context = __pa(vmsa) | 1;
do {
ret = hv_do_hypercall(HVCALL_START_VP,
start_vp_input, NULL);
} while (hv_result(ret) == HV_STATUS_TIME_OUT && retry--);
local_irq_restore(flags);
if (!hv_result_success(ret)) {
pr_err("HvCallStartVirtualProcessor failed: %llx\n", ret);
snp_cleanup_vmsa(vmsa);
vmsa = NULL;
}
cur_vmsa = per_cpu(hv_sev_vmsa, cpu);
/* Free up any previous VMSA page */
if (cur_vmsa)
snp_cleanup_vmsa(cur_vmsa);
/* Record the current VMSA page */
per_cpu(hv_sev_vmsa, cpu) = vmsa;
return ret;
}
#endif /* CONFIG_AMD_MEM_ENCRYPT */
#if defined(CONFIG_AMD_MEM_ENCRYPT) || defined(CONFIG_INTEL_TDX_GUEST)
void __init hv_vtom_init(void)
{
enum hv_isolation_type type = hv_get_isolation_type();
......@@ -654,20 +662,3 @@ bool hv_isolation_type_tdx(void)
{
return static_branch_unlikely(&isolation_type_tdx);
}
#ifdef CONFIG_INTEL_TDX_GUEST
u64 hv_tdx_hypercall(u64 control, u64 param1, u64 param2)
{
struct tdx_hypercall_args args = { };
args.r10 = control;
args.rdx = param1;
args.r8 = param2;
(void)__tdx_hypercall_ret(&args);
return args.r11;
}
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment