Commit f4c51f84 authored by Paul Mackerras's avatar Paul Mackerras Committed by Michael Ellerman

KVM: PPC: Book3S HV: Modify guest entry/exit paths to handle radix guests

This adds code to  branch around the parts that radix guests don't
need - clearing and loading the SLB with the guest SLB contents,
saving the guest SLB contents on exit, and restoring the host SLB
contents.

Since the host is now using radix, we need to save and restore the
host value for the PID register.

On hypervisor data/instruction storage interrupts, we don't do the
guest HPT lookup on radix, but just save the guest physical address
for the fault (from the ASDR register) in the vcpu struct.
Signed-off-by: default avatarPaul Mackerras <paulus@ozlabs.org>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 9e04ba69
...@@ -606,6 +606,7 @@ struct kvm_vcpu_arch { ...@@ -606,6 +606,7 @@ struct kvm_vcpu_arch {
ulong fault_dar; ulong fault_dar;
u32 fault_dsisr; u32 fault_dsisr;
unsigned long intr_msr; unsigned long intr_msr;
ulong fault_gpa; /* guest real address of page fault (POWER9) */
#endif #endif
#ifdef CONFIG_BOOKE #ifdef CONFIG_BOOKE
......
...@@ -498,6 +498,7 @@ int main(void) ...@@ -498,6 +498,7 @@ int main(void)
DEFINE(KVM_NEED_FLUSH, offsetof(struct kvm, arch.need_tlb_flush.bits)); DEFINE(KVM_NEED_FLUSH, offsetof(struct kvm, arch.need_tlb_flush.bits));
DEFINE(KVM_ENABLED_HCALLS, offsetof(struct kvm, arch.enabled_hcalls)); DEFINE(KVM_ENABLED_HCALLS, offsetof(struct kvm, arch.enabled_hcalls));
DEFINE(KVM_VRMA_SLB_V, offsetof(struct kvm, arch.vrma_slb_v)); DEFINE(KVM_VRMA_SLB_V, offsetof(struct kvm, arch.vrma_slb_v));
DEFINE(KVM_RADIX, offsetof(struct kvm, arch.radix));
DEFINE(VCPU_DSISR, offsetof(struct kvm_vcpu, arch.shregs.dsisr)); DEFINE(VCPU_DSISR, offsetof(struct kvm_vcpu, arch.shregs.dsisr));
DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar)); DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar));
DEFINE(VCPU_VPA, offsetof(struct kvm_vcpu, arch.vpa.pinned_addr)); DEFINE(VCPU_VPA, offsetof(struct kvm_vcpu, arch.vpa.pinned_addr));
...@@ -537,6 +538,7 @@ int main(void) ...@@ -537,6 +538,7 @@ int main(void)
DEFINE(VCPU_SLB_NR, offsetof(struct kvm_vcpu, arch.slb_nr)); DEFINE(VCPU_SLB_NR, offsetof(struct kvm_vcpu, arch.slb_nr));
DEFINE(VCPU_FAULT_DSISR, offsetof(struct kvm_vcpu, arch.fault_dsisr)); DEFINE(VCPU_FAULT_DSISR, offsetof(struct kvm_vcpu, arch.fault_dsisr));
DEFINE(VCPU_FAULT_DAR, offsetof(struct kvm_vcpu, arch.fault_dar)); DEFINE(VCPU_FAULT_DAR, offsetof(struct kvm_vcpu, arch.fault_dar));
DEFINE(VCPU_FAULT_GPA, offsetof(struct kvm_vcpu, arch.fault_gpa));
DEFINE(VCPU_INTR_MSR, offsetof(struct kvm_vcpu, arch.intr_msr)); DEFINE(VCPU_INTR_MSR, offsetof(struct kvm_vcpu, arch.intr_msr));
DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst)); DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst));
DEFINE(VCPU_TRAP, offsetof(struct kvm_vcpu, arch.trap)); DEFINE(VCPU_TRAP, offsetof(struct kvm_vcpu, arch.trap));
......
...@@ -518,6 +518,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) ...@@ -518,6 +518,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
/* Stack frame offsets */ /* Stack frame offsets */
#define STACK_SLOT_TID (112-16) #define STACK_SLOT_TID (112-16)
#define STACK_SLOT_PSSCR (112-24) #define STACK_SLOT_PSSCR (112-24)
#define STACK_SLOT_PID (112-32)
.global kvmppc_hv_entry .global kvmppc_hv_entry
kvmppc_hv_entry: kvmppc_hv_entry:
...@@ -530,6 +531,7 @@ kvmppc_hv_entry: ...@@ -530,6 +531,7 @@ kvmppc_hv_entry:
* R1 = host R1 * R1 = host R1
* R2 = TOC * R2 = TOC
* all other volatile GPRS = free * all other volatile GPRS = free
* Does not preserve non-volatile GPRs or CR fields
*/ */
mflr r0 mflr r0
std r0, PPC_LR_STKOFF(r1) std r0, PPC_LR_STKOFF(r1)
...@@ -549,32 +551,38 @@ kvmppc_hv_entry: ...@@ -549,32 +551,38 @@ kvmppc_hv_entry:
bl kvmhv_start_timing bl kvmhv_start_timing
1: 1:
#endif #endif
/* Clear out SLB */
/* Use cr7 as an indication of radix mode */
ld r5, HSTATE_KVM_VCORE(r13)
ld r9, VCORE_KVM(r5) /* pointer to struct kvm */
lbz r0, KVM_RADIX(r9)
cmpwi cr7, r0, 0
/* Clear out SLB if hash */
bne cr7, 2f
li r6,0 li r6,0
slbmte r6,r6 slbmte r6,r6
slbia slbia
ptesync ptesync
2:
/* /*
* POWER7/POWER8 host -> guest partition switch code. * POWER7/POWER8 host -> guest partition switch code.
* We don't have to lock against concurrent tlbies, * We don't have to lock against concurrent tlbies,
* but we do have to coordinate across hardware threads. * but we do have to coordinate across hardware threads.
*/ */
/* Set bit in entry map iff exit map is zero. */ /* Set bit in entry map iff exit map is zero. */
ld r5, HSTATE_KVM_VCORE(r13)
li r7, 1 li r7, 1
lbz r6, HSTATE_PTID(r13) lbz r6, HSTATE_PTID(r13)
sld r7, r7, r6 sld r7, r7, r6
addi r9, r5, VCORE_ENTRY_EXIT addi r8, r5, VCORE_ENTRY_EXIT
21: lwarx r3, 0, r9 21: lwarx r3, 0, r8
cmpwi r3, 0x100 /* any threads starting to exit? */ cmpwi r3, 0x100 /* any threads starting to exit? */
bge secondary_too_late /* if so we're too late to the party */ bge secondary_too_late /* if so we're too late to the party */
or r3, r3, r7 or r3, r3, r7
stwcx. r3, 0, r9 stwcx. r3, 0, r8
bne 21b bne 21b
/* Primary thread switches to guest partition. */ /* Primary thread switches to guest partition. */
ld r9,VCORE_KVM(r5) /* pointer to struct kvm */
cmpwi r6,0 cmpwi r6,0
bne 10f bne 10f
lwz r7,KVM_LPID(r9) lwz r7,KVM_LPID(r9)
...@@ -658,7 +666,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) ...@@ -658,7 +666,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
beq kvmppc_primary_no_guest beq kvmppc_primary_no_guest
kvmppc_got_guest: kvmppc_got_guest:
/* Load up guest SLB entries */ /* Load up guest SLB entries (N.B. slb_max will be 0 for radix) */
lwz r5,VCPU_SLB_MAX(r4) lwz r5,VCPU_SLB_MAX(r4)
cmpwi r5,0 cmpwi r5,0
beq 9f beq 9f
...@@ -696,8 +704,10 @@ kvmppc_got_guest: ...@@ -696,8 +704,10 @@ kvmppc_got_guest:
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
mfspr r5, SPRN_TIDR mfspr r5, SPRN_TIDR
mfspr r6, SPRN_PSSCR mfspr r6, SPRN_PSSCR
mfspr r7, SPRN_PID
std r5, STACK_SLOT_TID(r1) std r5, STACK_SLOT_TID(r1)
std r6, STACK_SLOT_PSSCR(r1) std r6, STACK_SLOT_PSSCR(r1)
std r7, STACK_SLOT_PID(r1)
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
...@@ -1293,11 +1303,15 @@ mc_cont: ...@@ -1293,11 +1303,15 @@ mc_cont:
mtspr SPRN_CTRLT,r6 mtspr SPRN_CTRLT,r6
4: 4:
/* Read the guest SLB and save it away */ /* Read the guest SLB and save it away */
ld r5, VCPU_KVM(r9)
lbz r0, KVM_RADIX(r5)
cmpwi r0, 0
li r5, 0
bne 3f /* for radix, save 0 entries */
lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */ lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */
mtctr r0 mtctr r0
li r6,0 li r6,0
addi r7,r9,VCPU_SLB addi r7,r9,VCPU_SLB
li r5,0
1: slbmfee r8,r6 1: slbmfee r8,r6
andis. r0,r8,SLB_ESID_V@h andis. r0,r8,SLB_ESID_V@h
beq 2f beq 2f
...@@ -1309,7 +1323,7 @@ mc_cont: ...@@ -1309,7 +1323,7 @@ mc_cont:
addi r5,r5,1 addi r5,r5,1
2: addi r6,r6,1 2: addi r6,r6,1
bdnz 1b bdnz 1b
stw r5,VCPU_SLB_MAX(r9) 3: stw r5,VCPU_SLB_MAX(r9)
/* /*
* Save the guest PURR/SPURR * Save the guest PURR/SPURR
...@@ -1558,8 +1572,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) ...@@ -1558,8 +1572,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
ld r5, STACK_SLOT_TID(r1) ld r5, STACK_SLOT_TID(r1)
ld r6, STACK_SLOT_PSSCR(r1) ld r6, STACK_SLOT_PSSCR(r1)
ld r7, STACK_SLOT_PID(r1)
mtspr SPRN_TIDR, r5 mtspr SPRN_TIDR, r5
mtspr SPRN_PSSCR, r6 mtspr SPRN_PSSCR, r6
mtspr SPRN_PID, r7
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
/* /*
...@@ -1671,6 +1687,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) ...@@ -1671,6 +1687,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
isync isync
/* load host SLB entries */ /* load host SLB entries */
BEGIN_MMU_FTR_SECTION
b 0f
END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
ld r8,PACA_SLBSHADOWPTR(r13) ld r8,PACA_SLBSHADOWPTR(r13)
.rept SLB_NUM_BOLTED .rept SLB_NUM_BOLTED
...@@ -1683,7 +1702,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) ...@@ -1683,7 +1702,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
slbmte r6,r5 slbmte r6,r5
1: addi r8,r8,16 1: addi r8,r8,16
.endr .endr
0:
#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
/* Finish timing, if we have a vcpu */ /* Finish timing, if we have a vcpu */
ld r4, HSTATE_KVM_VCPU(r13) ld r4, HSTATE_KVM_VCPU(r13)
...@@ -1710,8 +1729,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) ...@@ -1710,8 +1729,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
* reflect the HDSI to the guest as a DSI. * reflect the HDSI to the guest as a DSI.
*/ */
kvmppc_hdsi: kvmppc_hdsi:
ld r3, VCPU_KVM(r9)
lbz r0, KVM_RADIX(r3)
cmpwi r0, 0
mfspr r4, SPRN_HDAR mfspr r4, SPRN_HDAR
mfspr r6, SPRN_HDSISR mfspr r6, SPRN_HDSISR
bne .Lradix_hdsi /* on radix, just save DAR/DSISR/ASDR */
/* HPTE not found fault or protection fault? */ /* HPTE not found fault or protection fault? */
andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h
beq 1f /* if not, send it to the guest */ beq 1f /* if not, send it to the guest */
...@@ -1788,11 +1811,23 @@ fast_interrupt_c_return: ...@@ -1788,11 +1811,23 @@ fast_interrupt_c_return:
stb r0, HSTATE_IN_GUEST(r13) stb r0, HSTATE_IN_GUEST(r13)
b guest_exit_cont b guest_exit_cont
.Lradix_hdsi:
std r4, VCPU_FAULT_DAR(r9)
stw r6, VCPU_FAULT_DSISR(r9)
.Lradix_hisi:
mfspr r5, SPRN_ASDR
std r5, VCPU_FAULT_GPA(r9)
b guest_exit_cont
/* /*
* Similarly for an HISI, reflect it to the guest as an ISI unless * Similarly for an HISI, reflect it to the guest as an ISI unless
* it is an HPTE not found fault for a page that we have paged out. * it is an HPTE not found fault for a page that we have paged out.
*/ */
kvmppc_hisi: kvmppc_hisi:
ld r3, VCPU_KVM(r9)
lbz r0, KVM_RADIX(r3)
cmpwi r0, 0
bne .Lradix_hisi /* for radix, just save ASDR */
andis. r0, r11, SRR1_ISI_NOPT@h andis. r0, r11, SRR1_ISI_NOPT@h
beq 1f beq 1f
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment