Commit 53af3ba2 authored by Paul Mackerras's avatar Paul Mackerras Committed by Michael Ellerman

KVM: PPC: Book3S HV: Allow guest exit path to have MMU on

If we allow LPCR[AIL] to be set for radix guests, then interrupts from
the guest to the host can be delivered by the hardware with relocation
on, and thus the code path starting at kvmppc_interrupt_hv can be
executed in virtual mode (MMU on) for radix guests (previously it was
only ever executed in real mode).

Most of the code is indifferent to whether the MMU is on or off, but
the calls to OPAL that use the real-mode OPAL entry code need to
be switched to use the virtual-mode code instead.  The affected
calls are the calls to the OPAL XICS emulation functions in
kvmppc_read_one_intr() and related functions.  We test the MSR[IR]
bit to detect whether we are in real or virtual mode, and call the
opal_rm_* or opal_* function as appropriate.

The other place that depends on the MMU being off is the optimization
where the guest exit code jumps to the external interrupt vector or
hypervisor doorbell interrupt vector, or returns to its caller (which
is __kvmppc_vcore_entry).  If the MMU is on and we are returning to
the caller, then we don't need to use an rfid instruction since the
MMU is already on; a simple blr suffices.  If there is an external
or hypervisor doorbell interrupt to handle, we branch to the
relocation-on version of the interrupt vector.
Signed-off-by: default avatarPaul Mackerras <paulus@ozlabs.org>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent a29ebeaf
...@@ -29,6 +29,11 @@ ...@@ -29,6 +29,11 @@
#include <asm/opal.h> #include <asm/opal.h>
#include <asm/smp.h> #include <asm/smp.h>
static bool in_realmode(void)
{
return !(mfmsr() & MSR_IR);
}
#define KVM_CMA_CHUNK_ORDER 18 #define KVM_CMA_CHUNK_ORDER 18
/* /*
...@@ -200,7 +205,6 @@ static inline void rm_writeb(unsigned long paddr, u8 val) ...@@ -200,7 +205,6 @@ static inline void rm_writeb(unsigned long paddr, u8 val)
/* /*
* Send an interrupt or message to another CPU. * Send an interrupt or message to another CPU.
* This can only be called in real mode.
* The caller needs to include any barrier needed to order writes * The caller needs to include any barrier needed to order writes
* to memory vs. the IPI/message. * to memory vs. the IPI/message.
*/ */
...@@ -226,7 +230,9 @@ void kvmhv_rm_send_ipi(int cpu) ...@@ -226,7 +230,9 @@ void kvmhv_rm_send_ipi(int cpu)
/* Else poke the target with an IPI */ /* Else poke the target with an IPI */
xics_phys = paca[cpu].kvm_hstate.xics_phys; xics_phys = paca[cpu].kvm_hstate.xics_phys;
if (xics_phys) if (!in_realmode())
opal_int_set_mfrr(get_hard_smp_processor_id(cpu), IPI_PRIORITY);
else if (xics_phys)
rm_writeb(xics_phys + XICS_MFRR, IPI_PRIORITY); rm_writeb(xics_phys + XICS_MFRR, IPI_PRIORITY);
else else
opal_rm_int_set_mfrr(get_hard_smp_processor_id(cpu), opal_rm_int_set_mfrr(get_hard_smp_processor_id(cpu),
...@@ -412,14 +418,15 @@ static long kvmppc_read_one_intr(bool *again) ...@@ -412,14 +418,15 @@ static long kvmppc_read_one_intr(bool *again)
/* Now read the interrupt from the ICP */ /* Now read the interrupt from the ICP */
xics_phys = local_paca->kvm_hstate.xics_phys; xics_phys = local_paca->kvm_hstate.xics_phys;
if (!xics_phys) { rc = 0;
/* Use OPAL to read the XIRR */ if (!in_realmode())
rc = opal_int_get_xirr(&xirr, false);
else if (!xics_phys)
rc = opal_rm_int_get_xirr(&xirr, false); rc = opal_rm_int_get_xirr(&xirr, false);
else
xirr = _lwzcix(xics_phys + XICS_XIRR);
if (rc < 0) if (rc < 0)
return 1; return 1;
} else {
xirr = _lwzcix(xics_phys + XICS_XIRR);
}
/* /*
* Save XIRR for later. Since we get control in reverse endian * Save XIRR for later. Since we get control in reverse endian
...@@ -445,15 +452,19 @@ static long kvmppc_read_one_intr(bool *again) ...@@ -445,15 +452,19 @@ static long kvmppc_read_one_intr(bool *again)
* If it is an IPI, clear the MFRR and EOI it. * If it is an IPI, clear the MFRR and EOI it.
*/ */
if (xisr == XICS_IPI) { if (xisr == XICS_IPI) {
if (xics_phys) { rc = 0;
if (!in_realmode()) {
opal_int_set_mfrr(hard_smp_processor_id(), 0xff);
rc = opal_int_eoi(h_xirr);
} else if (xics_phys) {
_stbcix(xics_phys + XICS_MFRR, 0xff); _stbcix(xics_phys + XICS_MFRR, 0xff);
_stwcix(xics_phys + XICS_XIRR, xirr); _stwcix(xics_phys + XICS_XIRR, xirr);
} else { } else {
opal_rm_int_set_mfrr(hard_smp_processor_id(), 0xff); opal_rm_int_set_mfrr(hard_smp_processor_id(), 0xff);
rc = opal_rm_int_eoi(h_xirr); rc = opal_rm_int_eoi(h_xirr);
}
/* If rc > 0, there is another interrupt pending */ /* If rc > 0, there is another interrupt pending */
*again = rc > 0; *again = rc > 0;
}
/* /*
* Need to ensure side effects of above stores * Need to ensure side effects of above stores
...@@ -471,7 +482,10 @@ static long kvmppc_read_one_intr(bool *again) ...@@ -471,7 +482,10 @@ static long kvmppc_read_one_intr(bool *again)
/* We raced with the host, /* We raced with the host,
* we need to resend that IPI, bummer * we need to resend that IPI, bummer
*/ */
if (xics_phys) if (!in_realmode())
opal_int_set_mfrr(hard_smp_processor_id(),
IPI_PRIORITY);
else if (xics_phys)
_stbcix(xics_phys + XICS_MFRR, IPI_PRIORITY); _stbcix(xics_phys + XICS_MFRR, IPI_PRIORITY);
else else
opal_rm_int_set_mfrr(hard_smp_processor_id(), opal_rm_int_set_mfrr(hard_smp_processor_id(),
......
...@@ -70,11 +70,9 @@ static inline void icp_send_hcore_msg(int hcore, struct kvm_vcpu *vcpu) ...@@ -70,11 +70,9 @@ static inline void icp_send_hcore_msg(int hcore, struct kvm_vcpu *vcpu)
hcpu = hcore << threads_shift; hcpu = hcore << threads_shift;
kvmppc_host_rm_ops_hv->rm_core[hcore].rm_data = vcpu; kvmppc_host_rm_ops_hv->rm_core[hcore].rm_data = vcpu;
smp_muxed_ipi_set_message(hcpu, PPC_MSG_RM_HOST_ACTION); smp_muxed_ipi_set_message(hcpu, PPC_MSG_RM_HOST_ACTION);
if (paca[hcpu].kvm_hstate.xics_phys) kvmppc_set_host_ipi(hcpu, 1);
icp_native_cause_ipi_rm(hcpu); smp_mb();
else kvmhv_rm_send_ipi(hcpu);
opal_rm_int_set_mfrr(get_hard_smp_processor_id(hcpu),
IPI_PRIORITY);
} }
#else #else
static inline void icp_send_hcore_msg(int hcore, struct kvm_vcpu *vcpu) { } static inline void icp_send_hcore_msg(int hcore, struct kvm_vcpu *vcpu) { }
......
...@@ -148,6 +148,15 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) ...@@ -148,6 +148,15 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
addi r1, r1, 112 addi r1, r1, 112
ld r7, HSTATE_HOST_MSR(r13) ld r7, HSTATE_HOST_MSR(r13)
/*
* If we came back from the guest via a relocation-on interrupt,
* we will be in virtual mode at this point, which makes it a
* little easier to get back to the caller.
*/
mfmsr r0
andi. r0, r0, MSR_IR /* in real mode? */
bne .Lvirt_return
cmpwi cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK cmpwi cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK
cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
beq 11f beq 11f
...@@ -181,6 +190,26 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) ...@@ -181,6 +190,26 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
mtspr SPRN_HSRR1, r7 mtspr SPRN_HSRR1, r7
ba 0xe80 ba 0xe80
/* Virtual-mode return - can't get here for HMI or machine check */
.Lvirt_return:
cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
beq 16f
cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL
beq 17f
andi. r0, r7, MSR_EE /* were interrupts hard-enabled? */
beq 18f
mtmsrd r7, 1 /* if so then re-enable them */
18: mtlr r8
blr
16: mtspr SPRN_HSRR0, r8 /* jump to reloc-on external vector */
mtspr SPRN_HSRR1, r7
b exc_virt_0x4500_hardware_interrupt
17: mtspr SPRN_HSRR0, r8
mtspr SPRN_HSRR1, r7
b exc_virt_0x4e80_h_doorbell
kvmppc_primary_no_guest: kvmppc_primary_no_guest:
/* We handle this much like a ceded vcpu */ /* We handle this much like a ceded vcpu */
/* put the HDEC into the DEC, since HDEC interrupts don't wake us */ /* put the HDEC into the DEC, since HDEC interrupts don't wake us */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment