Commit b49c65c5 authored by Nicholas Piggin's avatar Nicholas Piggin Committed by Michael Ellerman

KVM: PPC: Book3S HV P9: Improve mfmsr performance on entry

Rearrange the MSR saving on entry so it does not follow the mtmsrd to
disable interrupts, avoiding a possible RAW scoreboard stall.
Signed-off-by: default avatarNicholas Piggin <npiggin@gmail.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20211123095231.1036501-46-npiggin@gmail.com
parent 46dea77f
...@@ -154,6 +154,8 @@ static inline bool kvmhv_vcpu_is_radix(struct kvm_vcpu *vcpu) ...@@ -154,6 +154,8 @@ static inline bool kvmhv_vcpu_is_radix(struct kvm_vcpu *vcpu)
return radix; return radix;
} }
unsigned long kvmppc_msr_hard_disable_set_facilities(struct kvm_vcpu *vcpu, unsigned long msr);
int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpcr, u64 *tb); int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpcr, u64 *tb);
#define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */ #define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */
......
...@@ -3896,6 +3896,8 @@ static int kvmhv_vcpu_entry_p9_nested(struct kvm_vcpu *vcpu, u64 time_limit, uns ...@@ -3896,6 +3896,8 @@ static int kvmhv_vcpu_entry_p9_nested(struct kvm_vcpu *vcpu, u64 time_limit, uns
s64 dec; s64 dec;
int trap; int trap;
msr = mfmsr();
save_p9_host_os_sprs(&host_os_sprs); save_p9_host_os_sprs(&host_os_sprs);
/* /*
...@@ -3906,24 +3908,10 @@ static int kvmhv_vcpu_entry_p9_nested(struct kvm_vcpu *vcpu, u64 time_limit, uns ...@@ -3906,24 +3908,10 @@ static int kvmhv_vcpu_entry_p9_nested(struct kvm_vcpu *vcpu, u64 time_limit, uns
*/ */
host_psscr = mfspr(SPRN_PSSCR_PR); host_psscr = mfspr(SPRN_PSSCR_PR);
hard_irq_disable(); kvmppc_msr_hard_disable_set_facilities(vcpu, msr);
if (lazy_irq_pending()) if (lazy_irq_pending())
return 0; return 0;
/* MSR bits may have been cleared by context switch */
msr = 0;
if (IS_ENABLED(CONFIG_PPC_FPU))
msr |= MSR_FP;
if (cpu_has_feature(CPU_FTR_ALTIVEC))
msr |= MSR_VEC;
if (cpu_has_feature(CPU_FTR_VSX))
msr |= MSR_VSX;
if ((cpu_has_feature(CPU_FTR_TM) ||
cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) &&
(vcpu->arch.hfscr & HFSCR_TM))
msr |= MSR_TM;
msr = msr_check_and_set(msr);
if (unlikely(load_vcpu_state(vcpu, &host_os_sprs))) if (unlikely(load_vcpu_state(vcpu, &host_os_sprs)))
msr = mfmsr(); /* TM restore can update msr */ msr = mfmsr(); /* TM restore can update msr */
......
...@@ -707,6 +707,44 @@ static void check_need_tlb_flush(struct kvm *kvm, int pcpu, ...@@ -707,6 +707,44 @@ static void check_need_tlb_flush(struct kvm *kvm, int pcpu,
cpumask_clear_cpu(pcpu, need_tlb_flush); cpumask_clear_cpu(pcpu, need_tlb_flush);
} }
unsigned long kvmppc_msr_hard_disable_set_facilities(struct kvm_vcpu *vcpu, unsigned long msr)
{
unsigned long msr_needed = 0;
msr &= ~MSR_EE;
/* MSR bits may have been cleared by context switch so must recheck */
if (IS_ENABLED(CONFIG_PPC_FPU))
msr_needed |= MSR_FP;
if (cpu_has_feature(CPU_FTR_ALTIVEC))
msr_needed |= MSR_VEC;
if (cpu_has_feature(CPU_FTR_VSX))
msr_needed |= MSR_VSX;
if ((cpu_has_feature(CPU_FTR_TM) ||
cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) &&
(vcpu->arch.hfscr & HFSCR_TM))
msr_needed |= MSR_TM;
/*
* This could be combined with MSR[RI] clearing, but that expands
* the unrecoverable window. It would be better to cover unrecoverable
* with KVM bad interrupt handling rather than use MSR[RI] at all.
*
* Much more difficult and less worthwhile to combine with IR/DR
* disable.
*/
if ((msr & msr_needed) != msr_needed) {
msr |= msr_needed;
__mtmsrd(msr, 0);
} else {
__hard_irq_disable();
}
local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
return msr;
}
EXPORT_SYMBOL_GPL(kvmppc_msr_hard_disable_set_facilities);
int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpcr, u64 *tb) int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpcr, u64 *tb)
{ {
struct p9_host_os_sprs host_os_sprs; struct p9_host_os_sprs host_os_sprs;
...@@ -740,6 +778,9 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc ...@@ -740,6 +778,9 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc
vcpu->arch.ceded = 0; vcpu->arch.ceded = 0;
/* Save MSR for restore, with EE clear. */
msr = mfmsr() & ~MSR_EE;
host_hfscr = mfspr(SPRN_HFSCR); host_hfscr = mfspr(SPRN_HFSCR);
host_ciabr = mfspr(SPRN_CIABR); host_ciabr = mfspr(SPRN_CIABR);
host_psscr = mfspr(SPRN_PSSCR_PR); host_psscr = mfspr(SPRN_PSSCR_PR);
...@@ -761,35 +802,12 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc ...@@ -761,35 +802,12 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc
save_p9_host_os_sprs(&host_os_sprs); save_p9_host_os_sprs(&host_os_sprs);
/* msr = kvmppc_msr_hard_disable_set_facilities(vcpu, msr);
* This could be combined with MSR[RI] clearing, but that expands
* the unrecoverable window. It would be better to cover unrecoverable
* with KVM bad interrupt handling rather than use MSR[RI] at all.
*
* Much more difficult and less worthwhile to combine with IR/DR
* disable.
*/
hard_irq_disable();
if (lazy_irq_pending()) { if (lazy_irq_pending()) {
trap = 0; trap = 0;
goto out; goto out;
} }
/* MSR bits may have been cleared by context switch */
msr = 0;
if (IS_ENABLED(CONFIG_PPC_FPU))
msr |= MSR_FP;
if (cpu_has_feature(CPU_FTR_ALTIVEC))
msr |= MSR_VEC;
if (cpu_has_feature(CPU_FTR_VSX))
msr |= MSR_VSX;
if ((cpu_has_feature(CPU_FTR_TM) ||
cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) &&
(vcpu->arch.hfscr & HFSCR_TM))
msr |= MSR_TM;
msr = msr_check_and_set(msr);
/* Save MSR for restore. This is after hard disable, so EE is clear. */
if (unlikely(load_vcpu_state(vcpu, &host_os_sprs))) if (unlikely(load_vcpu_state(vcpu, &host_os_sprs)))
msr = mfmsr(); /* MSR may have been updated */ msr = mfmsr(); /* MSR may have been updated */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment