Commit b97e6de9 authored by Paolo Bonzini's avatar Paolo Bonzini

KVM: x86: merge kvm_arch_set_irq with kvm_set_msi_inatomic

We do not want to do too much work in atomic context, in particular
not walking all the VCPUs of the virtual machine.  So we want
to distinguish the architecture-specific injection function for irqfd
from kvm_set_msi.  Since it's still empty, reuse the newly added
kvm_arch_set_irq and rename it to kvm_arch_set_irq_inatomic.
Reviewed-by: default avatarRadim Krčmář <rkrcmar@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 0669a510
...@@ -124,12 +124,16 @@ int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, ...@@ -124,12 +124,16 @@ int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
} }
static int kvm_set_msi_inatomic(struct kvm_kernel_irq_routing_entry *e, int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,
struct kvm *kvm) struct kvm *kvm, int irq_source_id, int level,
bool line_status)
{ {
struct kvm_lapic_irq irq; struct kvm_lapic_irq irq;
int r; int r;
if (unlikely(e->type != KVM_IRQ_ROUTING_MSI))
return -EWOULDBLOCK;
kvm_set_msi_irq(e, &irq); kvm_set_msi_irq(e, &irq);
if (kvm_irq_delivery_to_apic_fast(kvm, NULL, &irq, &r, NULL)) if (kvm_irq_delivery_to_apic_fast(kvm, NULL, &irq, &r, NULL))
...@@ -165,10 +169,8 @@ int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level) ...@@ -165,10 +169,8 @@ int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level)
idx = srcu_read_lock(&kvm->irq_srcu); idx = srcu_read_lock(&kvm->irq_srcu);
if (kvm_irq_map_gsi(kvm, entries, irq) > 0) { if (kvm_irq_map_gsi(kvm, entries, irq) > 0) {
e = &entries[0]; e = &entries[0];
if (likely(e->type == KVM_IRQ_ROUTING_MSI)) ret = kvm_arch_set_irq_inatomic(e, kvm, irq_source_id,
ret = kvm_set_msi_inatomic(e, kvm); irq, level);
else
ret = -EWOULDBLOCK;
} }
srcu_read_unlock(&kvm->irq_srcu, idx); srcu_read_unlock(&kvm->irq_srcu, idx);
return ret; return ret;
......
...@@ -830,10 +830,9 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level, ...@@ -830,10 +830,9 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level); int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level);
int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm, int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm,
int irq_source_id, int level, bool line_status); int irq_source_id, int level, bool line_status);
int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,
int kvm_arch_set_irq(struct kvm_kernel_irq_routing_entry *irq, struct kvm *kvm, struct kvm *kvm, int irq_source_id,
int irq_source_id, int level, bool line_status); int level, bool line_status);
bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin); bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin);
void kvm_notify_acked_gsi(struct kvm *kvm, int gsi); void kvm_notify_acked_gsi(struct kvm *kvm, int gsi);
void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin); void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
......
...@@ -171,7 +171,7 @@ irqfd_deactivate(struct kvm_kernel_irqfd *irqfd) ...@@ -171,7 +171,7 @@ irqfd_deactivate(struct kvm_kernel_irqfd *irqfd)
queue_work(irqfd_cleanup_wq, &irqfd->shutdown); queue_work(irqfd_cleanup_wq, &irqfd->shutdown);
} }
int __attribute__((weak)) kvm_arch_set_irq( int __attribute__((weak)) kvm_arch_set_irq_inatomic(
struct kvm_kernel_irq_routing_entry *irq, struct kvm_kernel_irq_routing_entry *irq,
struct kvm *kvm, int irq_source_id, struct kvm *kvm, int irq_source_id,
int level, int level,
...@@ -201,12 +201,9 @@ irqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key) ...@@ -201,12 +201,9 @@ irqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key)
irq = irqfd->irq_entry; irq = irqfd->irq_entry;
} while (read_seqcount_retry(&irqfd->irq_entry_sc, seq)); } while (read_seqcount_retry(&irqfd->irq_entry_sc, seq));
/* An event has been signaled, inject an interrupt */ /* An event has been signaled, inject an interrupt */
if (irq.type == KVM_IRQ_ROUTING_MSI) if (kvm_arch_set_irq_inatomic(&irq, kvm,
kvm_set_msi(&irq, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1, KVM_USERSPACE_IRQ_SOURCE_ID, 1,
false); false) == -EWOULDBLOCK)
else if (kvm_arch_set_irq(&irq, kvm,
KVM_USERSPACE_IRQ_SOURCE_ID, 1,
false) == -EWOULDBLOCK)
schedule_work(&irqfd->inject); schedule_work(&irqfd->inject);
srcu_read_unlock(&kvm->irq_srcu, idx); srcu_read_unlock(&kvm->irq_srcu, idx);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment