Commit 48013cbc authored by Nicholas Piggin's avatar Nicholas Piggin Committed by Michael Ellerman

KVM: PPC: Book3S HV P9: Move radix MMU switching instructions together

Switching the MMU from radix<->radix mode is tricky particularly as the
MMU can remain enabled and requires a certain sequence of SPR updates.
Move these together into their own functions.

This also includes the radix TLB check / flush because it's tied in to
MMU switching due to tlbiel getting LPID from LPIDR.
Signed-off-by: default avatarNicholas Piggin <npiggin@gmail.com>
Reviewed-by: default avatarAlexey Kardashevskiy <aik@ozlabs.ru>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20210528090752.3542186-13-npiggin@gmail.com
parent 09512c29
...@@ -3478,12 +3478,49 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) ...@@ -3478,12 +3478,49 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
trace_kvmppc_run_core(vc, 1); trace_kvmppc_run_core(vc, 1);
} }
static void switch_mmu_to_guest_radix(struct kvm *kvm, struct kvm_vcpu *vcpu, u64 lpcr)
{
struct kvmppc_vcore *vc = vcpu->arch.vcore;
struct kvm_nested_guest *nested = vcpu->arch.nested;
u32 lpid;
lpid = nested ? nested->shadow_lpid : kvm->arch.lpid;
/*
* All the isync()s are overkill but trivially follow the ISA
* requirements. Some can likely be replaced with justification
* comment for why they are not needed.
*/
isync();
mtspr(SPRN_LPID, lpid);
isync();
mtspr(SPRN_LPCR, lpcr);
isync();
mtspr(SPRN_PID, vcpu->arch.pid);
isync();
/* TLBIEL must have LPIDR set, so set guest LPID before flushing. */
kvmppc_check_need_tlb_flush(kvm, vc->pcpu, nested);
}
static void switch_mmu_to_host_radix(struct kvm *kvm, u32 pid)
{
isync();
mtspr(SPRN_PID, pid);
isync();
mtspr(SPRN_LPID, kvm->arch.host_lpid);
isync();
mtspr(SPRN_LPCR, kvm->arch.host_lpcr);
isync();
}
/* /*
* Load up hypervisor-mode registers on P9. * Load up hypervisor-mode registers on P9.
*/ */
static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit, static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
unsigned long lpcr) unsigned long lpcr)
{ {
struct kvm *kvm = vcpu->kvm;
struct kvmppc_vcore *vc = vcpu->arch.vcore; struct kvmppc_vcore *vc = vcpu->arch.vcore;
s64 hdec; s64 hdec;
u64 tb, purr, spurr; u64 tb, purr, spurr;
...@@ -3535,7 +3572,6 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit, ...@@ -3535,7 +3572,6 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
} }
mtspr(SPRN_CIABR, vcpu->arch.ciabr); mtspr(SPRN_CIABR, vcpu->arch.ciabr);
mtspr(SPRN_IC, vcpu->arch.ic); mtspr(SPRN_IC, vcpu->arch.ic);
mtspr(SPRN_PID, vcpu->arch.pid);
mtspr(SPRN_PSSCR, vcpu->arch.psscr | PSSCR_EC | mtspr(SPRN_PSSCR, vcpu->arch.psscr | PSSCR_EC |
(local_paca->kvm_hstate.fake_suspend << PSSCR_FAKE_SUSPEND_LG)); (local_paca->kvm_hstate.fake_suspend << PSSCR_FAKE_SUSPEND_LG));
...@@ -3549,8 +3585,7 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit, ...@@ -3549,8 +3585,7 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
mtspr(SPRN_AMOR, ~0UL); mtspr(SPRN_AMOR, ~0UL);
mtspr(SPRN_LPCR, lpcr); switch_mmu_to_guest_radix(kvm, vcpu, lpcr);
isync();
/* /*
* P9 suppresses the HDEC exception when LPCR[HDICE] = 0, * P9 suppresses the HDEC exception when LPCR[HDICE] = 0,
...@@ -3593,7 +3628,6 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit, ...@@ -3593,7 +3628,6 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
mtspr(SPRN_DAWR1, host_dawr1); mtspr(SPRN_DAWR1, host_dawr1);
mtspr(SPRN_DAWRX1, host_dawrx1); mtspr(SPRN_DAWRX1, host_dawrx1);
} }
mtspr(SPRN_PID, host_pidr);
/* /*
* Since this is radix, do a eieio; tlbsync; ptesync sequence in * Since this is radix, do a eieio; tlbsync; ptesync sequence in
...@@ -3608,9 +3642,6 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit, ...@@ -3608,9 +3642,6 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
if (cpu_has_feature(CPU_FTR_ARCH_31)) if (cpu_has_feature(CPU_FTR_ARCH_31))
asm volatile(PPC_CP_ABORT); asm volatile(PPC_CP_ABORT);
mtspr(SPRN_LPID, vcpu->kvm->arch.host_lpid); /* restore host LPID */
isync();
vc->dpdes = mfspr(SPRN_DPDES); vc->dpdes = mfspr(SPRN_DPDES);
vc->vtb = mfspr(SPRN_VTB); vc->vtb = mfspr(SPRN_VTB);
mtspr(SPRN_DPDES, 0); mtspr(SPRN_DPDES, 0);
...@@ -3627,7 +3658,8 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit, ...@@ -3627,7 +3658,8 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
} }
mtspr(SPRN_HDEC, 0x7fffffff); mtspr(SPRN_HDEC, 0x7fffffff);
mtspr(SPRN_LPCR, vcpu->kvm->arch.host_lpcr);
switch_mmu_to_host_radix(kvm, host_pidr);
return trap; return trap;
} }
...@@ -4181,7 +4213,7 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit, ...@@ -4181,7 +4213,7 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
{ {
struct kvm_run *run = vcpu->run; struct kvm_run *run = vcpu->run;
int trap, r, pcpu; int trap, r, pcpu;
int srcu_idx, lpid; int srcu_idx;
struct kvmppc_vcore *vc; struct kvmppc_vcore *vc;
struct kvm *kvm = vcpu->kvm; struct kvm *kvm = vcpu->kvm;
struct kvm_nested_guest *nested = vcpu->arch.nested; struct kvm_nested_guest *nested = vcpu->arch.nested;
...@@ -4255,13 +4287,6 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit, ...@@ -4255,13 +4287,6 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
vc->vcore_state = VCORE_RUNNING; vc->vcore_state = VCORE_RUNNING;
trace_kvmppc_run_core(vc, 0); trace_kvmppc_run_core(vc, 0);
if (cpu_has_feature(CPU_FTR_HVMODE)) {
lpid = nested ? nested->shadow_lpid : kvm->arch.lpid;
mtspr(SPRN_LPID, lpid);
isync();
kvmppc_check_need_tlb_flush(kvm, pcpu, nested);
}
guest_enter_irqoff(); guest_enter_irqoff();
srcu_idx = srcu_read_lock(&kvm->srcu); srcu_idx = srcu_read_lock(&kvm->srcu);
...@@ -4280,11 +4305,6 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit, ...@@ -4280,11 +4305,6 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
srcu_read_unlock(&kvm->srcu, srcu_idx); srcu_read_unlock(&kvm->srcu, srcu_idx);
if (cpu_has_feature(CPU_FTR_HVMODE)) {
mtspr(SPRN_LPID, kvm->arch.host_lpid);
isync();
}
set_irq_happened(trap); set_irq_happened(trap);
kvmppc_set_host_core(pcpu); kvmppc_set_host_core(pcpu);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment