Commit a53c17d2 authored by Gleb Natapov's avatar Gleb Natapov Committed by Avi Kivity

KVM: ioapic/msi interrupt delivery consolidation

ioapic_deliver() and kvm_set_msi() have code duplication. Move
the code into ioapic_deliver_entry() function and call it from
both places.
Signed-off-by: default avatarGleb Natapov <gleb@redhat.com>
Signed-off-by: default avatarMarcelo Tosatti <mtosatti@redhat.com>
parent 6da7e3f6
...@@ -364,7 +364,7 @@ void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq, ...@@ -364,7 +364,7 @@ void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask); void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask);
#ifdef __KVM_HAVE_IOAPIC #ifdef __KVM_HAVE_IOAPIC
void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic, void kvm_get_intr_delivery_bitmask(struct kvm *kvm,
union kvm_ioapic_redirect_entry *entry, union kvm_ioapic_redirect_entry *entry,
unsigned long *deliver_bitmask); unsigned long *deliver_bitmask);
#endif #endif
......
...@@ -142,54 +142,57 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val) ...@@ -142,54 +142,57 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
} }
} }
static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq) int ioapic_deliver_entry(struct kvm *kvm, union kvm_ioapic_redirect_entry *e)
{ {
union kvm_ioapic_redirect_entry entry = ioapic->redirtbl[irq];
DECLARE_BITMAP(deliver_bitmask, KVM_MAX_VCPUS); DECLARE_BITMAP(deliver_bitmask, KVM_MAX_VCPUS);
struct kvm_vcpu *vcpu; int i, r = -1;
int vcpu_id, r = -1;
ioapic_debug("dest=%x dest_mode=%x delivery_mode=%x " kvm_get_intr_delivery_bitmask(kvm, e, deliver_bitmask);
"vector=%x trig_mode=%x\n",
entry.fields.dest, entry.fields.dest_mode,
entry.fields.delivery_mode, entry.fields.vector,
entry.fields.trig_mode);
/* Always delivery PIT interrupt to vcpu 0 */
#ifdef CONFIG_X86
if (irq == 0) {
bitmap_zero(deliver_bitmask, KVM_MAX_VCPUS);
__set_bit(0, deliver_bitmask);
} else
#endif
kvm_get_intr_delivery_bitmask(ioapic, &entry, deliver_bitmask);
if (find_first_bit(deliver_bitmask, KVM_MAX_VCPUS) >= KVM_MAX_VCPUS) { if (find_first_bit(deliver_bitmask, KVM_MAX_VCPUS) >= KVM_MAX_VCPUS) {
ioapic_debug("no target on destination\n"); ioapic_debug("no target on destination\n");
return 0; return r;
} }
while ((vcpu_id = find_first_bit(deliver_bitmask, KVM_MAX_VCPUS)) while ((i = find_first_bit(deliver_bitmask, KVM_MAX_VCPUS))
< KVM_MAX_VCPUS) { < KVM_MAX_VCPUS) {
__clear_bit(vcpu_id, deliver_bitmask); struct kvm_vcpu *vcpu = kvm->vcpus[i];
vcpu = ioapic->kvm->vcpus[vcpu_id]; __clear_bit(i, deliver_bitmask);
if (vcpu) { if (vcpu) {
if (r < 0) if (r < 0)
r = 0; r = 0;
r += kvm_apic_set_irq(vcpu, r += kvm_apic_set_irq(vcpu, e->fields.vector,
entry.fields.vector, e->fields.delivery_mode,
entry.fields.trig_mode, e->fields.trig_mode);
entry.fields.delivery_mode);
} else } else
ioapic_debug("null destination vcpu: " ioapic_debug("null destination vcpu: "
"mask=%x vector=%x delivery_mode=%x\n", "mask=%x vector=%x delivery_mode=%x\n",
entry.fields.deliver_bitmask, e->fields.deliver_bitmask,
entry.fields.vector, e->fields.vector, e->fields.delivery_mode);
entry.fields.delivery_mode);
} }
return r; return r;
} }
static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq)
{
union kvm_ioapic_redirect_entry entry = ioapic->redirtbl[irq];
ioapic_debug("dest=%x dest_mode=%x delivery_mode=%x "
"vector=%x trig_mode=%x\n",
entry.fields.dest, entry.fields.dest_mode,
entry.fields.delivery_mode, entry.fields.vector,
entry.fields.trig_mode);
#ifdef CONFIG_X86
/* Always delivery PIT interrupt to vcpu 0 */
if (irq == 0) {
entry.fields.dest_mode = 0; /* Physical mode. */
entry.fields.dest_id = ioapic->kvm->vcpus[0]->vcpu_id;
}
#endif
return ioapic_deliver_entry(ioapic->kvm, &entry);
}
int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level) int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level)
{ {
u32 old_irr = ioapic->irr; u32 old_irr = ioapic->irr;
......
...@@ -70,8 +70,8 @@ void kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode); ...@@ -70,8 +70,8 @@ void kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode);
int kvm_ioapic_init(struct kvm *kvm); int kvm_ioapic_init(struct kvm *kvm);
int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level); int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level);
void kvm_ioapic_reset(struct kvm_ioapic *ioapic); void kvm_ioapic_reset(struct kvm_ioapic *ioapic);
void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic, void kvm_get_intr_delivery_bitmask(struct kvm *kvm,
union kvm_ioapic_redirect_entry *entry, union kvm_ioapic_redirect_entry *entry,
unsigned long *deliver_bitmask); unsigned long *deliver_bitmask);
int ioapic_deliver_entry(struct kvm *kvm, union kvm_ioapic_redirect_entry *e);
#endif #endif
...@@ -43,12 +43,11 @@ static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e, ...@@ -43,12 +43,11 @@ static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e,
return kvm_ioapic_set_irq(kvm->arch.vioapic, e->irqchip.pin, level); return kvm_ioapic_set_irq(kvm->arch.vioapic, e->irqchip.pin, level);
} }
void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic, void kvm_get_intr_delivery_bitmask(struct kvm *kvm,
union kvm_ioapic_redirect_entry *entry, union kvm_ioapic_redirect_entry *entry,
unsigned long *deliver_bitmask) unsigned long *deliver_bitmask)
{ {
int i; int i;
struct kvm *kvm = ioapic->kvm;
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
bitmap_zero(deliver_bitmask, KVM_MAX_VCPUS); bitmap_zero(deliver_bitmask, KVM_MAX_VCPUS);
...@@ -90,7 +89,7 @@ void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic, ...@@ -90,7 +89,7 @@ void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic,
switch (entry->fields.delivery_mode) { switch (entry->fields.delivery_mode) {
case IOAPIC_LOWEST_PRIORITY: case IOAPIC_LOWEST_PRIORITY:
/* Select one in deliver_bitmask */ /* Select one in deliver_bitmask */
vcpu = kvm_get_lowest_prio_vcpu(ioapic->kvm, vcpu = kvm_get_lowest_prio_vcpu(kvm,
entry->fields.vector, deliver_bitmask); entry->fields.vector, deliver_bitmask);
bitmap_zero(deliver_bitmask, KVM_MAX_VCPUS); bitmap_zero(deliver_bitmask, KVM_MAX_VCPUS);
if (!vcpu) if (!vcpu)
...@@ -111,13 +110,7 @@ void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic, ...@@ -111,13 +110,7 @@ void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic,
static int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, static int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
struct kvm *kvm, int level) struct kvm *kvm, int level)
{ {
int vcpu_id, r = -1;
struct kvm_vcpu *vcpu;
struct kvm_ioapic *ioapic = ioapic_irqchip(kvm);
union kvm_ioapic_redirect_entry entry; union kvm_ioapic_redirect_entry entry;
DECLARE_BITMAP(deliver_bitmask, KVM_MAX_VCPUS);
BUG_ON(!ioapic);
entry.bits = 0; entry.bits = 0;
entry.fields.dest_id = (e->msi.address_lo & entry.fields.dest_id = (e->msi.address_lo &
...@@ -133,26 +126,7 @@ static int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, ...@@ -133,26 +126,7 @@ static int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
(unsigned long *)&e->msi.data); (unsigned long *)&e->msi.data);
/* TODO Deal with RH bit of MSI message address */ /* TODO Deal with RH bit of MSI message address */
return ioapic_deliver_entry(kvm, &entry);
kvm_get_intr_delivery_bitmask(ioapic, &entry, deliver_bitmask);
if (find_first_bit(deliver_bitmask, KVM_MAX_VCPUS) >= KVM_MAX_VCPUS) {
printk(KERN_WARNING "kvm: no destination for MSI delivery!");
return -1;
}
while ((vcpu_id = find_first_bit(deliver_bitmask,
KVM_MAX_VCPUS)) < KVM_MAX_VCPUS) {
__clear_bit(vcpu_id, deliver_bitmask);
vcpu = ioapic->kvm->vcpus[vcpu_id];
if (vcpu) {
if (r < 0)
r = 0;
r += kvm_apic_set_irq(vcpu, entry.fields.vector,
entry.fields.dest_mode,
entry.fields.trig_mode);
}
}
return r;
} }
/* This should be called with the kvm->lock mutex held /* This should be called with the kvm->lock mutex held
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment