Commit 3d3319b4 authored by Alexander Graf's avatar Alexander Graf

KVM: PPC: Book3S: PR: Enable interrupts earlier

Now that the svcpu sync is interrupt aware we can enable interrupts
earlier in the exit code path again, moving 32bit and 64bit closer
together.

While at it, document the fact that we're always executing the exit
path with interrupts enabled so that the next person doesn't trap
over this.
Signed-off-by: default avatarAlexander Graf <agraf@suse.de>
parent 40fdd8c8
...@@ -129,6 +129,7 @@ kvm_start_lightweight: ...@@ -129,6 +129,7 @@ kvm_start_lightweight:
* R12 = exit handler id * R12 = exit handler id
* R13 = PACA * R13 = PACA
* SVCPU.* = guest * * SVCPU.* = guest *
* MSR.EE = 1
* *
*/ */
...@@ -148,11 +149,6 @@ kvm_start_lightweight: ...@@ -148,11 +149,6 @@ kvm_start_lightweight:
nop nop
#ifdef CONFIG_PPC_BOOK3S_64 #ifdef CONFIG_PPC_BOOK3S_64
/* Re-enable interrupts */
ld r3, HSTATE_HOST_MSR(r13)
ori r3, r3, MSR_EE
MTMSR_EERI(r3)
/* /*
* Reload kernel SPRG3 value. * Reload kernel SPRG3 value.
* No need to save guest value as usermode can't modify SPRG3. * No need to save guest value as usermode can't modify SPRG3.
......
...@@ -153,15 +153,11 @@ _GLOBAL(kvmppc_entry_trampoline) ...@@ -153,15 +153,11 @@ _GLOBAL(kvmppc_entry_trampoline)
li r6, MSR_IR | MSR_DR li r6, MSR_IR | MSR_DR
andc r6, r5, r6 /* Clear DR and IR in MSR value */ andc r6, r5, r6 /* Clear DR and IR in MSR value */
#ifdef CONFIG_PPC_BOOK3S_32
/* /*
* Set EE in HOST_MSR so that it's enabled when we get into our * Set EE in HOST_MSR so that it's enabled when we get into our
* C exit handler function. On 64-bit we delay enabling * C exit handler function.
* interrupts until we have finished transferring stuff
* to or from the PACA.
*/ */
ori r5, r5, MSR_EE ori r5, r5, MSR_EE
#endif
mtsrr0 r7 mtsrr0 r7
mtsrr1 r6 mtsrr1 r6
RFI RFI
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment