Commit 94a04bc2 authored by Nicholas Piggin's avatar Nicholas Piggin Committed by Michael Ellerman

KVM: PPC: Book3S HV: POWER9 does not require secondary thread management

POWER9 CPUs have independent MMU contexts per thread, so KVM does not
need to quiesce secondary threads, so the hwthread_req/hwthread_state
protocol does not have to be used. So patch it away on POWER9, and patch
away the branch from the Linux idle wakeup to kvm_start_guest that is
never used.

Add a warning and error out of kvmppc_grab_hwthread in case it is ever
called on POWER9.

This avoids a hwsync in the idle wakeup path on POWER9.
Signed-off-by: default avatarNicholas Piggin <npiggin@gmail.com>
Acked-by: default avatarPaul Mackerras <paulus@ozlabs.org>
[mpe: Use WARN(...) instead of WARN_ON()/pr_err(...)]
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 94171b19
...@@ -104,6 +104,10 @@ struct kvmppc_host_state { ...@@ -104,6 +104,10 @@ struct kvmppc_host_state {
u8 napping; u8 napping;
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
/*
* hwthread_req/hwthread_state pair is used to pull sibling threads
* out of guest on pre-ISAv3.0B CPUs where threads share MMU.
*/
u8 hwthread_req; u8 hwthread_req;
u8 hwthread_state; u8 hwthread_state;
u8 host_ipi; u8 host_ipi;
......
...@@ -242,13 +242,20 @@ enter_winkle: ...@@ -242,13 +242,20 @@ enter_winkle:
/* /*
* r3 - PSSCR value corresponding to the requested stop state. * r3 - PSSCR value corresponding to the requested stop state.
*/ */
power_enter_stop:
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
/* Tell KVM we're entering idle */ power_enter_stop_kvm_rm:
/*
* This is currently unused because POWER9 KVM does not have to
* gather secondary threads into sibling mode, but the code is
* here in case that function is required.
*
* Tell KVM we're entering idle.
*/
li r4,KVM_HWTHREAD_IN_IDLE li r4,KVM_HWTHREAD_IN_IDLE
/* DO THIS IN REAL MODE! See comment above. */ /* DO THIS IN REAL MODE! See comment above. */
stb r4,HSTATE_HWTHREAD_STATE(r13) stb r4,HSTATE_HWTHREAD_STATE(r13)
#endif #endif
power_enter_stop:
/* /*
* Check if we are executing the lite variant with ESL=EC=0 * Check if we are executing the lite variant with ESL=EC=0
*/ */
...@@ -411,6 +418,18 @@ pnv_powersave_wakeup_mce: ...@@ -411,6 +418,18 @@ pnv_powersave_wakeup_mce:
b pnv_powersave_wakeup b pnv_powersave_wakeup
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
kvm_start_guest_check:
li r0,KVM_HWTHREAD_IN_KERNEL
stb r0,HSTATE_HWTHREAD_STATE(r13)
/* Order setting hwthread_state vs. testing hwthread_req */
sync
lbz r0,HSTATE_HWTHREAD_REQ(r13)
cmpwi r0,0
beqlr
b kvm_start_guest
#endif
/* /*
* Called from reset vector for powersave wakeups. * Called from reset vector for powersave wakeups.
* cr3 - set to gt if waking up with partial/complete hypervisor state loss * cr3 - set to gt if waking up with partial/complete hypervisor state loss
...@@ -435,15 +454,9 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300) ...@@ -435,15 +454,9 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300)
mr r3,r12 mr r3,r12
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
li r0,KVM_HWTHREAD_IN_KERNEL BEGIN_FTR_SECTION
stb r0,HSTATE_HWTHREAD_STATE(r13) bl kvm_start_guest_check
/* Order setting hwthread_state vs. testing hwthread_req */ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
sync
lbz r0,HSTATE_HWTHREAD_REQ(r13)
cmpwi r0,0
beq 1f
b kvm_start_guest
1:
#endif #endif
/* Return SRR1 from power7_nap() */ /* Return SRR1 from power7_nap() */
......
...@@ -2111,6 +2111,15 @@ static int kvmppc_grab_hwthread(int cpu) ...@@ -2111,6 +2111,15 @@ static int kvmppc_grab_hwthread(int cpu)
struct paca_struct *tpaca; struct paca_struct *tpaca;
long timeout = 10000; long timeout = 10000;
/*
* ISA v3.0 idle routines do not set hwthread_state or test
* hwthread_req, so they can not grab idle threads.
*/
if (cpu_has_feature(CPU_FTR_ARCH_300)) {
WARN(1, "KVM: can not control sibling threads\n");
return -EBUSY;
}
tpaca = &paca[cpu]; tpaca = &paca[cpu];
/* Ensure the thread won't go into the kernel if it wakes */ /* Ensure the thread won't go into the kernel if it wakes */
...@@ -2145,10 +2154,12 @@ static void kvmppc_release_hwthread(int cpu) ...@@ -2145,10 +2154,12 @@ static void kvmppc_release_hwthread(int cpu)
struct paca_struct *tpaca; struct paca_struct *tpaca;
tpaca = &paca[cpu]; tpaca = &paca[cpu];
tpaca->kvm_hstate.hwthread_req = 0;
tpaca->kvm_hstate.kvm_vcpu = NULL; tpaca->kvm_hstate.kvm_vcpu = NULL;
tpaca->kvm_hstate.kvm_vcore = NULL; tpaca->kvm_hstate.kvm_vcore = NULL;
tpaca->kvm_hstate.kvm_split_mode = NULL; tpaca->kvm_hstate.kvm_split_mode = NULL;
if (!cpu_has_feature(CPU_FTR_ARCH_300))
tpaca->kvm_hstate.hwthread_req = 0;
} }
static void radix_flush_cpu(struct kvm *kvm, int cpu, struct kvm_vcpu *vcpu) static void radix_flush_cpu(struct kvm *kvm, int cpu, struct kvm_vcpu *vcpu)
......
...@@ -149,9 +149,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) ...@@ -149,9 +149,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
subf r4, r4, r3 subf r4, r4, r3
mtspr SPRN_DEC, r4 mtspr SPRN_DEC, r4
BEGIN_FTR_SECTION
/* hwthread_req may have got set by cede or no vcpu, so clear it */ /* hwthread_req may have got set by cede or no vcpu, so clear it */
li r0, 0 li r0, 0
stb r0, HSTATE_HWTHREAD_REQ(r13) stb r0, HSTATE_HWTHREAD_REQ(r13)
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
/* /*
* For external interrupts we need to call the Linux * For external interrupts we need to call the Linux
...@@ -314,6 +316,7 @@ kvm_novcpu_exit: ...@@ -314,6 +316,7 @@ kvm_novcpu_exit:
* Relocation is off and most register values are lost. * Relocation is off and most register values are lost.
* r13 points to the PACA. * r13 points to the PACA.
* r3 contains the SRR1 wakeup value, SRR1 is trashed. * r3 contains the SRR1 wakeup value, SRR1 is trashed.
* This is not used by ISAv3.0B processors.
*/ */
.globl kvm_start_guest .globl kvm_start_guest
kvm_start_guest: kvm_start_guest:
...@@ -432,6 +435,9 @@ kvm_secondary_got_guest: ...@@ -432,6 +435,9 @@ kvm_secondary_got_guest:
* While waiting we also need to check if we get given a vcpu to run. * While waiting we also need to check if we get given a vcpu to run.
*/ */
kvm_no_guest: kvm_no_guest:
BEGIN_FTR_SECTION
twi 31,0,0
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
lbz r3, HSTATE_HWTHREAD_REQ(r13) lbz r3, HSTATE_HWTHREAD_REQ(r13)
cmpwi r3, 0 cmpwi r3, 0
bne 53f bne 53f
...@@ -2466,8 +2472,10 @@ kvm_do_nap: ...@@ -2466,8 +2472,10 @@ kvm_do_nap:
clrrdi r0, r0, 1 clrrdi r0, r0, 1
mtspr SPRN_CTRLT, r0 mtspr SPRN_CTRLT, r0
BEGIN_FTR_SECTION
li r0,1 li r0,1
stb r0,HSTATE_HWTHREAD_REQ(r13) stb r0,HSTATE_HWTHREAD_REQ(r13)
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
mfspr r5,SPRN_LPCR mfspr r5,SPRN_LPCR
ori r5,r5,LPCR_PECE0 | LPCR_PECE1 ori r5,r5,LPCR_PECE0 | LPCR_PECE1
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment