Commit eacc8188 authored by Nicholas Piggin's avatar Nicholas Piggin Committed by Michael Ellerman

KVM: PPC: Book3S HV: POWER10 enable HAIL when running radix guests

HV interrupts may be taken with the MMU enabled when radix guests are
running. Enable LPCR[HAIL] on ISA v3.1 processors for radix guests.
Make this depend on the host LPCR[HAIL] being enabled. Currently that is
always enabled, but having this test means any issue that might require
LPCR[HAIL] to be disabled in the host will not have to be duplicated in
KVM.

This optimisation takes 1380 cycles off a NULL hcall entry+exit micro
benchmark on a POWER10.
Signed-off-by: default avatarNicholas Piggin <npiggin@gmail.com>
Reviewed-by: default avatarFabiano Rosas <farosas@linux.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20211123095231.1036501-9-npiggin@gmail.com
parent 25aa1458
...@@ -5073,6 +5073,8 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu) ...@@ -5073,6 +5073,8 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
*/ */
int kvmppc_switch_mmu_to_hpt(struct kvm *kvm) int kvmppc_switch_mmu_to_hpt(struct kvm *kvm)
{ {
unsigned long lpcr, lpcr_mask;
if (nesting_enabled(kvm)) if (nesting_enabled(kvm))
kvmhv_release_all_nested(kvm); kvmhv_release_all_nested(kvm);
kvmppc_rmap_reset(kvm); kvmppc_rmap_reset(kvm);
...@@ -5082,8 +5084,13 @@ int kvmppc_switch_mmu_to_hpt(struct kvm *kvm) ...@@ -5082,8 +5084,13 @@ int kvmppc_switch_mmu_to_hpt(struct kvm *kvm)
kvm->arch.radix = 0; kvm->arch.radix = 0;
spin_unlock(&kvm->mmu_lock); spin_unlock(&kvm->mmu_lock);
kvmppc_free_radix(kvm); kvmppc_free_radix(kvm);
kvmppc_update_lpcr(kvm, LPCR_VPM1,
LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR); lpcr = LPCR_VPM1;
lpcr_mask = LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR;
if (cpu_has_feature(CPU_FTR_ARCH_31))
lpcr_mask |= LPCR_HAIL;
kvmppc_update_lpcr(kvm, lpcr, lpcr_mask);
return 0; return 0;
} }
...@@ -5093,6 +5100,7 @@ int kvmppc_switch_mmu_to_hpt(struct kvm *kvm) ...@@ -5093,6 +5100,7 @@ int kvmppc_switch_mmu_to_hpt(struct kvm *kvm)
*/ */
int kvmppc_switch_mmu_to_radix(struct kvm *kvm) int kvmppc_switch_mmu_to_radix(struct kvm *kvm)
{ {
unsigned long lpcr, lpcr_mask;
int err; int err;
err = kvmppc_init_vm_radix(kvm); err = kvmppc_init_vm_radix(kvm);
...@@ -5104,8 +5112,17 @@ int kvmppc_switch_mmu_to_radix(struct kvm *kvm) ...@@ -5104,8 +5112,17 @@ int kvmppc_switch_mmu_to_radix(struct kvm *kvm)
kvm->arch.radix = 1; kvm->arch.radix = 1;
spin_unlock(&kvm->mmu_lock); spin_unlock(&kvm->mmu_lock);
kvmppc_free_hpt(&kvm->arch.hpt); kvmppc_free_hpt(&kvm->arch.hpt);
kvmppc_update_lpcr(kvm, LPCR_UPRT | LPCR_GTSE | LPCR_HR,
LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR); lpcr = LPCR_UPRT | LPCR_GTSE | LPCR_HR;
lpcr_mask = LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR;
if (cpu_has_feature(CPU_FTR_ARCH_31)) {
lpcr_mask |= LPCR_HAIL;
if (cpu_has_feature(CPU_FTR_HVMODE) &&
(kvm->arch.host_lpcr & LPCR_HAIL))
lpcr |= LPCR_HAIL;
}
kvmppc_update_lpcr(kvm, lpcr, lpcr_mask);
return 0; return 0;
} }
...@@ -5269,6 +5286,10 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm) ...@@ -5269,6 +5286,10 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm)
kvm->arch.mmu_ready = 1; kvm->arch.mmu_ready = 1;
lpcr &= ~LPCR_VPM1; lpcr &= ~LPCR_VPM1;
lpcr |= LPCR_UPRT | LPCR_GTSE | LPCR_HR; lpcr |= LPCR_UPRT | LPCR_GTSE | LPCR_HR;
if (cpu_has_feature(CPU_FTR_HVMODE) &&
cpu_has_feature(CPU_FTR_ARCH_31) &&
(kvm->arch.host_lpcr & LPCR_HAIL))
lpcr |= LPCR_HAIL;
ret = kvmppc_init_vm_radix(kvm); ret = kvmppc_init_vm_radix(kvm);
if (ret) { if (ret) {
kvmppc_free_lpid(kvm->arch.lpid); kvmppc_free_lpid(kvm->arch.lpid);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment