Commit b0221556 authored by Suresh Warrier's avatar Suresh Warrier Committed by Alexander Graf

KVM: PPC: Book3S HV: Move virtual mode ICP functions to real-mode

Interrupt-based hypercalls return H_TOO_HARD to inform KVM that it needs
to switch to the host to complete the rest of hypercall function in
virtual mode. This patch ports the virtual mode ICS/ICP reject and resend
functions to be runnable in hypervisor real mode, thus avoiding the need
to switch to the host to execute these functions in virtual mode. However,
the hypercalls continue to return H_TOO_HARD for vcpu_wakeup and notify
events - these events cannot be done in real mode and they will still need
a switch to host virtual mode.

There are sufficient differences between the real mode code and the
virtual mode code for the ICS/ICP resend and reject functions that
for now the code has been duplicated instead of sharing common code.
In the future, we can look at creating common functions.
Signed-off-by: default avatarSuresh Warrier <warrier@linux.vnet.ibm.com>
Signed-off-by: default avatarPaul Mackerras <paulus@samba.org>
Signed-off-by: default avatarAlexander Graf <agraf@suse.de>
parent 34cb7954
...@@ -23,12 +23,39 @@ ...@@ -23,12 +23,39 @@
#define DEBUG_PASSUP #define DEBUG_PASSUP
static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
u32 new_irq);
static inline void rm_writeb(unsigned long paddr, u8 val) static inline void rm_writeb(unsigned long paddr, u8 val)
{ {
__asm__ __volatile__("sync; stbcix %0,0,%1" __asm__ __volatile__("sync; stbcix %0,0,%1"
: : "r" (val), "r" (paddr) : "memory"); : : "r" (val), "r" (paddr) : "memory");
} }
/* -- ICS routines -- */
static void ics_rm_check_resend(struct kvmppc_xics *xics,
struct kvmppc_ics *ics, struct kvmppc_icp *icp)
{
int i;
arch_spin_lock(&ics->lock);
for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
struct ics_irq_state *state = &ics->irq_state[i];
if (!state->resend)
continue;
arch_spin_unlock(&ics->lock);
icp_rm_deliver_irq(xics, icp, state->number);
arch_spin_lock(&ics->lock);
}
arch_spin_unlock(&ics->lock);
}
/* -- ICP routines -- */
static void icp_rm_set_vcpu_irq(struct kvm_vcpu *vcpu, static void icp_rm_set_vcpu_irq(struct kvm_vcpu *vcpu,
struct kvm_vcpu *this_vcpu) struct kvm_vcpu *this_vcpu)
{ {
...@@ -116,6 +143,178 @@ static inline int check_too_hard(struct kvmppc_xics *xics, ...@@ -116,6 +143,178 @@ static inline int check_too_hard(struct kvmppc_xics *xics,
return (xics->real_mode_dbg || icp->rm_action) ? H_TOO_HARD : H_SUCCESS; return (xics->real_mode_dbg || icp->rm_action) ? H_TOO_HARD : H_SUCCESS;
} }
static void icp_rm_check_resend(struct kvmppc_xics *xics,
struct kvmppc_icp *icp)
{
u32 icsid;
/* Order this load with the test for need_resend in the caller */
smp_rmb();
for_each_set_bit(icsid, icp->resend_map, xics->max_icsid + 1) {
struct kvmppc_ics *ics = xics->ics[icsid];
if (!test_and_clear_bit(icsid, icp->resend_map))
continue;
if (!ics)
continue;
ics_rm_check_resend(xics, ics, icp);
}
}
static bool icp_rm_try_to_deliver(struct kvmppc_icp *icp, u32 irq, u8 priority,
u32 *reject)
{
union kvmppc_icp_state old_state, new_state;
bool success;
do {
old_state = new_state = READ_ONCE(icp->state);
*reject = 0;
/* See if we can deliver */
success = new_state.cppr > priority &&
new_state.mfrr > priority &&
new_state.pending_pri > priority;
/*
* If we can, check for a rejection and perform the
* delivery
*/
if (success) {
*reject = new_state.xisr;
new_state.xisr = irq;
new_state.pending_pri = priority;
} else {
/*
* If we failed to deliver we set need_resend
* so a subsequent CPPR state change causes us
* to try a new delivery.
*/
new_state.need_resend = true;
}
} while (!icp_rm_try_update(icp, old_state, new_state));
return success;
}
static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
u32 new_irq)
{
struct ics_irq_state *state;
struct kvmppc_ics *ics;
u32 reject;
u16 src;
/*
* This is used both for initial delivery of an interrupt and
* for subsequent rejection.
*
* Rejection can be racy vs. resends. We have evaluated the
* rejection in an atomic ICP transaction which is now complete,
* so potentially the ICP can already accept the interrupt again.
*
* So we need to retry the delivery. Essentially the reject path
* boils down to a failed delivery. Always.
*
* Now the interrupt could also have moved to a different target,
* thus we may need to re-do the ICP lookup as well
*/
again:
/* Get the ICS state and lock it */
ics = kvmppc_xics_find_ics(xics, new_irq, &src);
if (!ics) {
/* Unsafe increment, but this does not need to be accurate */
return;
}
state = &ics->irq_state[src];
/* Get a lock on the ICS */
arch_spin_lock(&ics->lock);
/* Get our server */
if (!icp || state->server != icp->server_num) {
icp = kvmppc_xics_find_server(xics->kvm, state->server);
if (!icp) {
/* Unsafe increment again*/
goto out;
}
}
/* Clear the resend bit of that interrupt */
state->resend = 0;
/*
* If masked, bail out
*
* Note: PAPR doesn't mention anything about masked pending
* when doing a resend, only when doing a delivery.
*
* However that would have the effect of losing a masked
* interrupt that was rejected and isn't consistent with
* the whole masked_pending business which is about not
* losing interrupts that occur while masked.
*
* I don't differentiate normal deliveries and resends, this
* implementation will differ from PAPR and not lose such
* interrupts.
*/
if (state->priority == MASKED) {
state->masked_pending = 1;
goto out;
}
/*
* Try the delivery, this will set the need_resend flag
* in the ICP as part of the atomic transaction if the
* delivery is not possible.
*
* Note that if successful, the new delivery might have itself
* rejected an interrupt that was "delivered" before we took the
* ics spin lock.
*
* In this case we do the whole sequence all over again for the
* new guy. We cannot assume that the rejected interrupt is less
* favored than the new one, and thus doesn't need to be delivered,
* because by the time we exit icp_rm_try_to_deliver() the target
* processor may well have already consumed & completed it, and thus
* the rejected interrupt might actually be already acceptable.
*/
if (icp_rm_try_to_deliver(icp, new_irq, state->priority, &reject)) {
/*
* Delivery was successful, did we reject somebody else ?
*/
if (reject && reject != XICS_IPI) {
arch_spin_unlock(&ics->lock);
new_irq = reject;
goto again;
}
} else {
/*
* We failed to deliver the interrupt we need to set the
* resend map bit and mark the ICS state as needing a resend
*/
set_bit(ics->icsid, icp->resend_map);
state->resend = 1;
/*
* If the need_resend flag got cleared in the ICP some time
* between icp_rm_try_to_deliver() atomic update and now, then
* we know it might have missed the resend_map bit. So we
* retry
*/
smp_mb();
if (!icp->state.need_resend) {
arch_spin_unlock(&ics->lock);
goto again;
}
}
out:
arch_spin_unlock(&ics->lock);
}
static void icp_rm_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp, static void icp_rm_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
u8 new_cppr) u8 new_cppr)
{ {
...@@ -184,8 +383,7 @@ static void icp_rm_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp, ...@@ -184,8 +383,7 @@ static void icp_rm_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
* separately here as well. * separately here as well.
*/ */
if (resend) { if (resend) {
icp->rm_action |= XICS_RM_CHECK_RESEND; icp_rm_check_resend(xics, icp);
icp->rm_resend_icp = icp;
} }
} }
...@@ -300,16 +498,14 @@ int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server, ...@@ -300,16 +498,14 @@ int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
} }
} while (!icp_rm_try_update(icp, old_state, new_state)); } while (!icp_rm_try_update(icp, old_state, new_state));
/* Pass rejects to virtual mode */ /* Handle reject in real mode */
if (reject && reject != XICS_IPI) { if (reject && reject != XICS_IPI) {
this_icp->rm_action |= XICS_RM_REJECT; icp_rm_deliver_irq(xics, icp, reject);
this_icp->rm_reject = reject;
} }
/* Pass resends to virtual mode */ /* Handle resends in real mode */
if (resend) { if (resend) {
this_icp->rm_action |= XICS_RM_CHECK_RESEND; icp_rm_check_resend(xics, icp);
this_icp->rm_resend_icp = icp;
} }
return check_too_hard(xics, this_icp); return check_too_hard(xics, this_icp);
...@@ -365,10 +561,12 @@ int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr) ...@@ -365,10 +561,12 @@ int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
} while (!icp_rm_try_update(icp, old_state, new_state)); } while (!icp_rm_try_update(icp, old_state, new_state));
/* Pass rejects to virtual mode */ /*
* Check for rejects. They are handled by doing a new delivery
* attempt (see comments in icp_rm_deliver_irq).
*/
if (reject && reject != XICS_IPI) { if (reject && reject != XICS_IPI) {
icp->rm_action |= XICS_RM_REJECT; icp_rm_deliver_irq(xics, icp, reject);
icp->rm_reject = reject;
} }
bail: bail:
return check_too_hard(xics, icp); return check_too_hard(xics, icp);
...@@ -416,10 +614,9 @@ int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr) ...@@ -416,10 +614,9 @@ int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
goto bail; goto bail;
state = &ics->irq_state[src]; state = &ics->irq_state[src];
/* Still asserted, resend it, we make it look like a reject */ /* Still asserted, resend it */
if (state->asserted) { if (state->asserted) {
icp->rm_action |= XICS_RM_REJECT; icp_rm_deliver_irq(xics, icp, irq);
icp->rm_reject = irq;
} }
if (!hlist_empty(&vcpu->kvm->irq_ack_notifier_list)) { if (!hlist_empty(&vcpu->kvm->irq_ack_notifier_list)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment