Commit f08cbf5c authored by Nicholas Piggin's avatar Nicholas Piggin Committed by Michael Ellerman

KVM: PPC: Book3S HV P9: Avoid changing MSR[RI] in entry and exit

kvm_hstate.in_guest provides the equivalent of MSR[RI]=0 protection,
and it covers the existing MSR[RI]=0 section in late entry and early
exit, so clearing and setting MSR[RI] in those cases does not
actually do anything useful.

Remove the RI manipulation and replace it with comments. Make the
in_guest memory accesses a bit closer to a proper critical section
pattern. This speeds up guest entry/exit performance.

This also removes the MSR[RI] warnings which aren't very interesting
and would cause crashes if they hit due to causing an interrupt in
non-recoverable code.
Signed-off-by: default avatarNicholas Piggin <npiggin@gmail.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20211123095231.1036501-48-npiggin@gmail.com
parent 241d1f19
...@@ -904,7 +904,15 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc ...@@ -904,7 +904,15 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc
* But TM could be split out if this would be a significant benefit. * But TM could be split out if this would be a significant benefit.
*/ */
local_paca->kvm_hstate.in_guest = KVM_GUEST_MODE_HV_P9; /*
* MSR[RI] does not need to be cleared (and is not, for radix guests
* with no prefetch bug), because in_guest is set. If we take a SRESET
* or MCE with in_guest set but still in HV mode, then
* kvmppc_p9_bad_interrupt handles the interrupt, which effectively
* clears MSR[RI] and doesn't return.
*/
WRITE_ONCE(local_paca->kvm_hstate.in_guest, KVM_GUEST_MODE_HV_P9);
barrier(); /* Open in_guest critical section */
/* /*
* Hash host, hash guest, or radix guest with prefetch bug, all have * Hash host, hash guest, or radix guest with prefetch bug, all have
...@@ -916,14 +924,10 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc ...@@ -916,14 +924,10 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc
save_clear_host_mmu(kvm); save_clear_host_mmu(kvm);
if (kvm_is_radix(kvm)) { if (kvm_is_radix(kvm))
switch_mmu_to_guest_radix(kvm, vcpu, lpcr); switch_mmu_to_guest_radix(kvm, vcpu, lpcr);
if (!cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) else
__mtmsrd(0, 1); /* clear RI */
} else {
switch_mmu_to_guest_hpt(kvm, vcpu, lpcr); switch_mmu_to_guest_hpt(kvm, vcpu, lpcr);
}
/* TLBIEL uses LPID=LPIDR, so run this after setting guest LPID */ /* TLBIEL uses LPID=LPIDR, so run this after setting guest LPID */
check_need_tlb_flush(kvm, vc->pcpu, nested); check_need_tlb_flush(kvm, vc->pcpu, nested);
...@@ -978,19 +982,16 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc ...@@ -978,19 +982,16 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc
vcpu->arch.regs.gpr[3] = local_paca->kvm_hstate.scratch2; vcpu->arch.regs.gpr[3] = local_paca->kvm_hstate.scratch2;
/* /*
* Only set RI after reading machine check regs (DAR, DSISR, SRR0/1) * After reading machine check regs (DAR, DSISR, SRR0/1) and hstate
* and hstate scratch (which we need to move into exsave to make * scratch (which we need to move into exsave to make re-entrant vs
* re-entrant vs SRESET/MCE) * SRESET/MCE), register state is protected from reentrancy. However
* timebase, MMU, among other state is still set to guest, so don't
* enable MSR[RI] here. It gets enabled at the end, after in_guest
* is cleared.
*
* It is possible an NMI could come in here, which is why it is
* important to save the above state early so it can be debugged.
*/ */
if (ri_set) {
if (unlikely(!(mfmsr() & MSR_RI))) {
__mtmsrd(MSR_RI, 1);
WARN_ON_ONCE(1);
}
} else {
WARN_ON_ONCE(mfmsr() & MSR_RI);
__mtmsrd(MSR_RI, 1);
}
vcpu->arch.regs.gpr[9] = exsave[EX_R9/sizeof(u64)]; vcpu->arch.regs.gpr[9] = exsave[EX_R9/sizeof(u64)];
vcpu->arch.regs.gpr[10] = exsave[EX_R10/sizeof(u64)]; vcpu->arch.regs.gpr[10] = exsave[EX_R10/sizeof(u64)];
...@@ -1048,13 +1049,6 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc ...@@ -1048,13 +1049,6 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc
*/ */
mtspr(SPRN_HSRR0, vcpu->arch.regs.nip); mtspr(SPRN_HSRR0, vcpu->arch.regs.nip);
mtspr(SPRN_HSRR1, vcpu->arch.shregs.msr); mtspr(SPRN_HSRR1, vcpu->arch.shregs.msr);
/*
* tm_return_to_guest re-loads SRR0/1, DAR,
* DSISR after RI is cleared, in case they had
* been clobbered by a MCE.
*/
__mtmsrd(0, 1); /* clear RI */
goto tm_return_to_guest; goto tm_return_to_guest;
} }
} }
...@@ -1154,7 +1148,9 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc ...@@ -1154,7 +1148,9 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc
restore_p9_host_os_sprs(vcpu, &host_os_sprs); restore_p9_host_os_sprs(vcpu, &host_os_sprs);
local_paca->kvm_hstate.in_guest = KVM_GUEST_MODE_NONE; barrier(); /* Close in_guest critical section */
WRITE_ONCE(local_paca->kvm_hstate.in_guest, KVM_GUEST_MODE_NONE);
/* Interrupts are recoverable at this point */
/* /*
* cp_abort is required if the processor supports local copy-paste * cp_abort is required if the processor supports local copy-paste
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment