Commit 2ce008c8 authored by Nicholas Piggin's avatar Nicholas Piggin Committed by Michael Ellerman

KVM: PPC: Book3S HV: Remove unused nested HV tests in XICS emulation

Commit f3c18e93 ("KVM: PPC: Book3S HV: Use XICS hypercalls when
running as a nested hypervisor") added nested HV tests in XICS
hypercalls, but not all are required.

* icp_eoi is only called by kvmppc_deliver_irq_passthru which is only
  called by kvmppc_check_passthru which is only caled by
  kvmppc_read_one_intr.

* kvmppc_read_one_intr is only called by kvmppc_read_intr which is only
  called by the L0 HV rmhandlers code.

* kvmhv_rm_send_ipi is called by:
  - kvmhv_interrupt_vcore which is only called by kvmhv_commence_exit
    which is only called by the L0 HV rmhandlers code.
  - icp_send_hcore_msg which is only called by icp_rm_set_vcpu_irq.
  - icp_rm_set_vcpu_irq which is only called by icp_rm_try_update
  - icp_rm_set_vcpu_irq is not nested HV safe because it writes to
    LPCR directly without a kvmhv_on_pseries test. Nested handlers
    should not in general be using the rm handlers.

The important test seems to be in kvmppc_ipi_thread, which sends the
virt-mode H_IPI handler kick to use smp_call_function rather than
msgsnd.
Signed-off-by: default avatarNicholas Piggin <npiggin@gmail.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20210528090752.3542186-26-npiggin@gmail.com
parent dcbac73a
...@@ -199,15 +199,6 @@ void kvmhv_rm_send_ipi(int cpu) ...@@ -199,15 +199,6 @@ void kvmhv_rm_send_ipi(int cpu)
void __iomem *xics_phys; void __iomem *xics_phys;
unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER); unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
/* For a nested hypervisor, use the XICS via hcall */
if (kvmhv_on_pseries()) {
unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
plpar_hcall_raw(H_IPI, retbuf, get_hard_smp_processor_id(cpu),
IPI_PRIORITY);
return;
}
/* On POWER9 we can use msgsnd for any destination cpu. */ /* On POWER9 we can use msgsnd for any destination cpu. */
if (cpu_has_feature(CPU_FTR_ARCH_300)) { if (cpu_has_feature(CPU_FTR_ARCH_300)) {
msg |= get_hard_smp_processor_id(cpu); msg |= get_hard_smp_processor_id(cpu);
...@@ -420,19 +411,12 @@ static long kvmppc_read_one_intr(bool *again) ...@@ -420,19 +411,12 @@ static long kvmppc_read_one_intr(bool *again)
return 1; return 1;
/* Now read the interrupt from the ICP */ /* Now read the interrupt from the ICP */
if (kvmhv_on_pseries()) {
unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
rc = plpar_hcall_raw(H_XIRR, retbuf, 0xFF);
xirr = cpu_to_be32(retbuf[0]);
} else {
xics_phys = local_paca->kvm_hstate.xics_phys; xics_phys = local_paca->kvm_hstate.xics_phys;
rc = 0; rc = 0;
if (!xics_phys) if (!xics_phys)
rc = opal_int_get_xirr(&xirr, false); rc = opal_int_get_xirr(&xirr, false);
else else
xirr = __raw_rm_readl(xics_phys + XICS_XIRR); xirr = __raw_rm_readl(xics_phys + XICS_XIRR);
}
if (rc < 0) if (rc < 0)
return 1; return 1;
...@@ -461,13 +445,7 @@ static long kvmppc_read_one_intr(bool *again) ...@@ -461,13 +445,7 @@ static long kvmppc_read_one_intr(bool *again)
*/ */
if (xisr == XICS_IPI) { if (xisr == XICS_IPI) {
rc = 0; rc = 0;
if (kvmhv_on_pseries()) { if (xics_phys) {
unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
plpar_hcall_raw(H_IPI, retbuf,
hard_smp_processor_id(), 0xff);
plpar_hcall_raw(H_EOI, retbuf, h_xirr);
} else if (xics_phys) {
__raw_rm_writeb(0xff, xics_phys + XICS_MFRR); __raw_rm_writeb(0xff, xics_phys + XICS_MFRR);
__raw_rm_writel(xirr, xics_phys + XICS_XIRR); __raw_rm_writel(xirr, xics_phys + XICS_XIRR);
} else { } else {
...@@ -493,13 +471,7 @@ static long kvmppc_read_one_intr(bool *again) ...@@ -493,13 +471,7 @@ static long kvmppc_read_one_intr(bool *again)
/* We raced with the host, /* We raced with the host,
* we need to resend that IPI, bummer * we need to resend that IPI, bummer
*/ */
if (kvmhv_on_pseries()) { if (xics_phys)
unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
plpar_hcall_raw(H_IPI, retbuf,
hard_smp_processor_id(),
IPI_PRIORITY);
} else if (xics_phys)
__raw_rm_writeb(IPI_PRIORITY, __raw_rm_writeb(IPI_PRIORITY,
xics_phys + XICS_MFRR); xics_phys + XICS_MFRR);
else else
......
...@@ -141,13 +141,6 @@ static void icp_rm_set_vcpu_irq(struct kvm_vcpu *vcpu, ...@@ -141,13 +141,6 @@ static void icp_rm_set_vcpu_irq(struct kvm_vcpu *vcpu,
return; return;
} }
if (xive_enabled() && kvmhv_on_pseries()) {
/* No XICS access or hypercalls available, too hard */
this_icp->rm_action |= XICS_RM_KICK_VCPU;
this_icp->rm_kick_target = vcpu;
return;
}
/* /*
* Check if the core is loaded, * Check if the core is loaded,
* if not, find an available host core to post to wake the VCPU, * if not, find an available host core to post to wake the VCPU,
...@@ -771,14 +764,6 @@ static void icp_eoi(struct irq_chip *c, u32 hwirq, __be32 xirr, bool *again) ...@@ -771,14 +764,6 @@ static void icp_eoi(struct irq_chip *c, u32 hwirq, __be32 xirr, bool *again)
void __iomem *xics_phys; void __iomem *xics_phys;
int64_t rc; int64_t rc;
if (kvmhv_on_pseries()) {
unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
iosync();
plpar_hcall_raw(H_EOI, retbuf, hwirq);
return;
}
rc = pnv_opal_pci_msi_eoi(c, hwirq); rc = pnv_opal_pci_msi_eoi(c, hwirq);
if (rc) if (rc)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment