Commit 6c85b7bc authored by Sukadev Bhattiprolu's avatar Sukadev Bhattiprolu Committed by Michael Ellerman

powerpc/kvm: Use UV_RETURN ucall to return to ultravisor

When an SVM makes an hypercall or incurs some other exception, the
Ultravisor usually forwards (a.k.a. reflects) the exceptions to the
Hypervisor. After processing the exception, Hypervisor uses the
UV_RETURN ultracall to return control back to the SVM.

The expected register state on entry to this ultracall is:

* Non-volatile registers are restored to their original values.
* If returning from an hypercall, register R0 contains the return value
  (unlike other ultracalls) and, registers R4 through R12 contain any
  output values of the hypercall.
* R3 contains the ultracall number, i.e UV_RETURN.
* If returning with a synthesized interrupt, R2 contains the
  synthesized interrupt number.

Thanks to input from Paul Mackerras, Ram Pai and Mike Anderson.
Signed-off-by: default avatarSukadev Bhattiprolu <sukadev@linux.vnet.ibm.com>
Signed-off-by: default avatarClaudio Carvalho <cclaudio@linux.ibm.com>
Acked-by: default avatarPaul Mackerras <paulus@ozlabs.org>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20190822034838.27876-8-cclaudio@linux.ibm.com
parent 512a5a64
...@@ -283,6 +283,7 @@ struct kvm_arch { ...@@ -283,6 +283,7 @@ struct kvm_arch {
cpumask_t cpu_in_guest; cpumask_t cpu_in_guest;
u8 radix; u8 radix;
u8 fwnmi_enabled; u8 fwnmi_enabled;
u8 secure_guest;
bool threads_indep; bool threads_indep;
bool nested_enable; bool nested_enable;
pgd_t *pgtable; pgd_t *pgtable;
......
...@@ -24,5 +24,6 @@ ...@@ -24,5 +24,6 @@
/* opcodes */ /* opcodes */
#define UV_WRITE_PATE 0xF104 #define UV_WRITE_PATE 0xF104
#define UV_RETURN 0xF11C
#endif /* _ASM_POWERPC_ULTRAVISOR_API_H */ #endif /* _ASM_POWERPC_ULTRAVISOR_API_H */
...@@ -506,6 +506,7 @@ int main(void) ...@@ -506,6 +506,7 @@ int main(void)
OFFSET(KVM_VRMA_SLB_V, kvm, arch.vrma_slb_v); OFFSET(KVM_VRMA_SLB_V, kvm, arch.vrma_slb_v);
OFFSET(KVM_RADIX, kvm, arch.radix); OFFSET(KVM_RADIX, kvm, arch.radix);
OFFSET(KVM_FWNMI, kvm, arch.fwnmi_enabled); OFFSET(KVM_FWNMI, kvm, arch.fwnmi_enabled);
OFFSET(KVM_SECURE_GUEST, kvm, arch.secure_guest);
OFFSET(VCPU_DSISR, kvm_vcpu, arch.shregs.dsisr); OFFSET(VCPU_DSISR, kvm_vcpu, arch.shregs.dsisr);
OFFSET(VCPU_DAR, kvm_vcpu, arch.shregs.dar); OFFSET(VCPU_DAR, kvm_vcpu, arch.shregs.dar);
OFFSET(VCPU_VPA, kvm_vcpu, arch.vpa.pinned_addr); OFFSET(VCPU_VPA, kvm_vcpu, arch.vpa.pinned_addr);
......
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
#include <asm/asm-compat.h> #include <asm/asm-compat.h>
#include <asm/feature-fixups.h> #include <asm/feature-fixups.h>
#include <asm/cpuidle.h> #include <asm/cpuidle.h>
#include <asm/ultravisor-api.h>
/* Sign-extend HDEC if not on POWER9 */ /* Sign-extend HDEC if not on POWER9 */
#define EXTEND_HDEC(reg) \ #define EXTEND_HDEC(reg) \
...@@ -1085,16 +1086,10 @@ BEGIN_FTR_SECTION ...@@ -1085,16 +1086,10 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
ld r5, VCPU_LR(r4) ld r5, VCPU_LR(r4)
ld r6, VCPU_CR(r4)
mtlr r5 mtlr r5
mtcr r6
ld r1, VCPU_GPR(R1)(r4) ld r1, VCPU_GPR(R1)(r4)
ld r2, VCPU_GPR(R2)(r4)
ld r3, VCPU_GPR(R3)(r4)
ld r5, VCPU_GPR(R5)(r4) ld r5, VCPU_GPR(R5)(r4)
ld r6, VCPU_GPR(R6)(r4)
ld r7, VCPU_GPR(R7)(r4)
ld r8, VCPU_GPR(R8)(r4) ld r8, VCPU_GPR(R8)(r4)
ld r9, VCPU_GPR(R9)(r4) ld r9, VCPU_GPR(R9)(r4)
ld r10, VCPU_GPR(R10)(r4) ld r10, VCPU_GPR(R10)(r4)
...@@ -1112,10 +1107,42 @@ BEGIN_FTR_SECTION ...@@ -1112,10 +1107,42 @@ BEGIN_FTR_SECTION
mtspr SPRN_HDSISR, r0 mtspr SPRN_HDSISR, r0
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
ld r6, VCPU_KVM(r4)
lbz r7, KVM_SECURE_GUEST(r6)
cmpdi r7, 0
ld r6, VCPU_GPR(R6)(r4)
ld r7, VCPU_GPR(R7)(r4)
bne ret_to_ultra
lwz r0, VCPU_CR(r4)
mtcr r0
ld r0, VCPU_GPR(R0)(r4) ld r0, VCPU_GPR(R0)(r4)
ld r2, VCPU_GPR(R2)(r4)
ld r3, VCPU_GPR(R3)(r4)
ld r4, VCPU_GPR(R4)(r4) ld r4, VCPU_GPR(R4)(r4)
HRFI_TO_GUEST HRFI_TO_GUEST
b . b .
/*
* Use UV_RETURN ultracall to return control back to the Ultravisor after
* processing an hypercall or interrupt that was forwarded (a.k.a. reflected)
* to the Hypervisor.
*
* All registers have already been loaded, except:
* R0 = hcall result
* R2 = SRR1, so UV can detect a synthesized interrupt (if any)
* R3 = UV_RETURN
*/
ret_to_ultra:
lwz r0, VCPU_CR(r4)
mtcr r0
ld r0, VCPU_GPR(R3)(r4)
mfspr r2, SPRN_SRR1
li r3, 0
ori r3, r3, UV_RETURN
ld r4, VCPU_GPR(R4)(r4)
sc 2
/* /*
* Enter the guest on a P9 or later system where we have exactly * Enter the guest on a P9 or later system where we have exactly
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment