Commit a071261d authored by Andrew Scull's avatar Andrew Scull Committed by Marc Zyngier

KVM: arm64: nVHE: Fix pointers during SMCCC convertion

The host need not concern itself with the pointer differences for the
hyp interfaces that are shared between VHE and nVHE so leave it to the
hyp to handle.

As the SMCCC function IDs are converted into function calls, it is a
suitable place to also convert any pointer arguments into hyp pointers.
This, additionally, eases the reuse of the handlers in different
contexts.
Signed-off-by: default avatarAndrew Scull <ascull@google.com>
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20200915104643.2543892-20-ascull@google.com
parent 04e4caa8
...@@ -24,7 +24,7 @@ static void handle_host_hcall(unsigned long func_id, ...@@ -24,7 +24,7 @@ static void handle_host_hcall(unsigned long func_id,
unsigned long r1 = host_ctxt->regs.regs[1]; unsigned long r1 = host_ctxt->regs.regs[1];
struct kvm_vcpu *vcpu = (struct kvm_vcpu *)r1; struct kvm_vcpu *vcpu = (struct kvm_vcpu *)r1;
ret = __kvm_vcpu_run(vcpu); ret = __kvm_vcpu_run(kern_hyp_va(vcpu));
break; break;
} }
case KVM_HOST_SMCCC_FUNC(__kvm_flush_vm_context): case KVM_HOST_SMCCC_FUNC(__kvm_flush_vm_context):
...@@ -36,21 +36,21 @@ static void handle_host_hcall(unsigned long func_id, ...@@ -36,21 +36,21 @@ static void handle_host_hcall(unsigned long func_id,
phys_addr_t ipa = host_ctxt->regs.regs[2]; phys_addr_t ipa = host_ctxt->regs.regs[2];
int level = host_ctxt->regs.regs[3]; int level = host_ctxt->regs.regs[3];
__kvm_tlb_flush_vmid_ipa(mmu, ipa, level); __kvm_tlb_flush_vmid_ipa(kern_hyp_va(mmu), ipa, level);
break; break;
} }
case KVM_HOST_SMCCC_FUNC(__kvm_tlb_flush_vmid): { case KVM_HOST_SMCCC_FUNC(__kvm_tlb_flush_vmid): {
unsigned long r1 = host_ctxt->regs.regs[1]; unsigned long r1 = host_ctxt->regs.regs[1];
struct kvm_s2_mmu *mmu = (struct kvm_s2_mmu *)r1; struct kvm_s2_mmu *mmu = (struct kvm_s2_mmu *)r1;
__kvm_tlb_flush_vmid(mmu); __kvm_tlb_flush_vmid(kern_hyp_va(mmu));
break; break;
} }
case KVM_HOST_SMCCC_FUNC(__kvm_tlb_flush_local_vmid): { case KVM_HOST_SMCCC_FUNC(__kvm_tlb_flush_local_vmid): {
unsigned long r1 = host_ctxt->regs.regs[1]; unsigned long r1 = host_ctxt->regs.regs[1];
struct kvm_s2_mmu *mmu = (struct kvm_s2_mmu *)r1; struct kvm_s2_mmu *mmu = (struct kvm_s2_mmu *)r1;
__kvm_tlb_flush_local_vmid(mmu); __kvm_tlb_flush_local_vmid(kern_hyp_va(mmu));
break; break;
} }
case KVM_HOST_SMCCC_FUNC(__kvm_timer_set_cntvoff): { case KVM_HOST_SMCCC_FUNC(__kvm_timer_set_cntvoff): {
...@@ -84,14 +84,14 @@ static void handle_host_hcall(unsigned long func_id, ...@@ -84,14 +84,14 @@ static void handle_host_hcall(unsigned long func_id,
unsigned long r1 = host_ctxt->regs.regs[1]; unsigned long r1 = host_ctxt->regs.regs[1];
struct vgic_v3_cpu_if *cpu_if = (struct vgic_v3_cpu_if *)r1; struct vgic_v3_cpu_if *cpu_if = (struct vgic_v3_cpu_if *)r1;
__vgic_v3_save_aprs(cpu_if); __vgic_v3_save_aprs(kern_hyp_va(cpu_if));
break; break;
} }
case KVM_HOST_SMCCC_FUNC(__vgic_v3_restore_aprs): { case KVM_HOST_SMCCC_FUNC(__vgic_v3_restore_aprs): {
unsigned long r1 = host_ctxt->regs.regs[1]; unsigned long r1 = host_ctxt->regs.regs[1];
struct vgic_v3_cpu_if *cpu_if = (struct vgic_v3_cpu_if *)r1; struct vgic_v3_cpu_if *cpu_if = (struct vgic_v3_cpu_if *)r1;
__vgic_v3_restore_aprs(cpu_if); __vgic_v3_restore_aprs(kern_hyp_va(cpu_if));
break; break;
} }
default: default:
......
...@@ -176,8 +176,6 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -176,8 +176,6 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
pmr_sync(); pmr_sync();
} }
vcpu = kern_hyp_va(vcpu);
host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt; host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
host_ctxt->__hyp_running_vcpu = vcpu; host_ctxt->__hyp_running_vcpu = vcpu;
guest_ctxt = &vcpu->arch.ctxt; guest_ctxt = &vcpu->arch.ctxt;
......
...@@ -54,7 +54,6 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, ...@@ -54,7 +54,6 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
dsb(ishst); dsb(ishst);
/* Switch to requested VMID */ /* Switch to requested VMID */
mmu = kern_hyp_va(mmu);
__tlb_switch_to_guest(mmu, &cxt); __tlb_switch_to_guest(mmu, &cxt);
/* /*
...@@ -108,7 +107,6 @@ void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu) ...@@ -108,7 +107,6 @@ void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
dsb(ishst); dsb(ishst);
/* Switch to requested VMID */ /* Switch to requested VMID */
mmu = kern_hyp_va(mmu);
__tlb_switch_to_guest(mmu, &cxt); __tlb_switch_to_guest(mmu, &cxt);
__tlbi(vmalls12e1is); __tlbi(vmalls12e1is);
......
...@@ -662,7 +662,7 @@ void vgic_v3_load(struct kvm_vcpu *vcpu) ...@@ -662,7 +662,7 @@ void vgic_v3_load(struct kvm_vcpu *vcpu)
if (likely(cpu_if->vgic_sre)) if (likely(cpu_if->vgic_sre))
kvm_call_hyp(__vgic_v3_write_vmcr, cpu_if->vgic_vmcr); kvm_call_hyp(__vgic_v3_write_vmcr, cpu_if->vgic_vmcr);
kvm_call_hyp(__vgic_v3_restore_aprs, kern_hyp_va(cpu_if)); kvm_call_hyp(__vgic_v3_restore_aprs, cpu_if);
if (has_vhe()) if (has_vhe())
__vgic_v3_activate_traps(cpu_if); __vgic_v3_activate_traps(cpu_if);
...@@ -686,7 +686,7 @@ void vgic_v3_put(struct kvm_vcpu *vcpu) ...@@ -686,7 +686,7 @@ void vgic_v3_put(struct kvm_vcpu *vcpu)
vgic_v3_vmcr_sync(vcpu); vgic_v3_vmcr_sync(vcpu);
kvm_call_hyp(__vgic_v3_save_aprs, kern_hyp_va(cpu_if)); kvm_call_hyp(__vgic_v3_save_aprs, cpu_if);
if (has_vhe()) if (has_vhe())
__vgic_v3_deactivate_traps(cpu_if); __vgic_v3_deactivate_traps(cpu_if);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment