Commit fe4c7b19 authored by Gleb Natapov's avatar Gleb Natapov Committed by Avi Kivity

KVM: reuse (pop|push)_irq from svm.c in vmx.c

The prioritized bit vector manipulation functions are useful in both vmx and
svm.
Signed-off-by: default avatarGleb Natapov <gleb@redhat.com>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent 61c50edf
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include "irq.h" #include "irq.h"
#include "mmu.h" #include "mmu.h"
#include "kvm_cache_regs.h" #include "kvm_cache_regs.h"
#include "x86.h"
#include <linux/module.h> #include <linux/module.h>
#include <linux/kernel.h> #include <linux/kernel.h>
...@@ -132,24 +133,6 @@ static inline u32 svm_has(u32 feat) ...@@ -132,24 +133,6 @@ static inline u32 svm_has(u32 feat)
return svm_features & feat; return svm_features & feat;
} }
static inline u8 pop_irq(struct kvm_vcpu *vcpu)
{
int word_index = __ffs(vcpu->arch.irq_summary);
int bit_index = __ffs(vcpu->arch.irq_pending[word_index]);
int irq = word_index * BITS_PER_LONG + bit_index;
clear_bit(bit_index, &vcpu->arch.irq_pending[word_index]);
if (!vcpu->arch.irq_pending[word_index])
clear_bit(word_index, &vcpu->arch.irq_summary);
return irq;
}
static inline void push_irq(struct kvm_vcpu *vcpu, u8 irq)
{
set_bit(irq, vcpu->arch.irq_pending);
set_bit(irq / BITS_PER_LONG, &vcpu->arch.irq_summary);
}
static inline void clgi(void) static inline void clgi(void)
{ {
asm volatile (__ex(SVM_CLGI)); asm volatile (__ex(SVM_CLGI));
...@@ -1116,7 +1099,7 @@ static int pf_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) ...@@ -1116,7 +1099,7 @@ static int pf_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
if (!irqchip_in_kernel(kvm) && if (!irqchip_in_kernel(kvm) &&
is_external_interrupt(exit_int_info)) { is_external_interrupt(exit_int_info)) {
event_injection = true; event_injection = true;
push_irq(&svm->vcpu, exit_int_info & SVM_EVTINJ_VEC_MASK); kvm_push_irq(&svm->vcpu, exit_int_info & SVM_EVTINJ_VEC_MASK);
} }
fault_address = svm->vmcb->control.exit_info_2; fault_address = svm->vmcb->control.exit_info_2;
...@@ -2336,7 +2319,7 @@ static void kvm_reput_irq(struct vcpu_svm *svm) ...@@ -2336,7 +2319,7 @@ static void kvm_reput_irq(struct vcpu_svm *svm)
if ((control->int_ctl & V_IRQ_MASK) if ((control->int_ctl & V_IRQ_MASK)
&& !irqchip_in_kernel(svm->vcpu.kvm)) { && !irqchip_in_kernel(svm->vcpu.kvm)) {
control->int_ctl &= ~V_IRQ_MASK; control->int_ctl &= ~V_IRQ_MASK;
push_irq(&svm->vcpu, control->int_vector); kvm_push_irq(&svm->vcpu, control->int_vector);
} }
svm->vcpu.arch.interrupt_window_open = svm->vcpu.arch.interrupt_window_open =
...@@ -2346,7 +2329,7 @@ static void kvm_reput_irq(struct vcpu_svm *svm) ...@@ -2346,7 +2329,7 @@ static void kvm_reput_irq(struct vcpu_svm *svm)
static void svm_do_inject_vector(struct vcpu_svm *svm) static void svm_do_inject_vector(struct vcpu_svm *svm)
{ {
svm_inject_irq(svm, pop_irq(&svm->vcpu)); svm_inject_irq(svm, kvm_pop_irq(&svm->vcpu));
} }
static void do_interrupt_requests(struct kvm_vcpu *vcpu, static void do_interrupt_requests(struct kvm_vcpu *vcpu,
......
...@@ -2489,18 +2489,6 @@ static void vmx_update_window_states(struct kvm_vcpu *vcpu) ...@@ -2489,18 +2489,6 @@ static void vmx_update_window_states(struct kvm_vcpu *vcpu)
GUEST_INTR_STATE_MOV_SS))); GUEST_INTR_STATE_MOV_SS)));
} }
static void kvm_do_inject_irq(struct kvm_vcpu *vcpu)
{
int word_index = __ffs(vcpu->arch.irq_summary);
int bit_index = __ffs(vcpu->arch.irq_pending[word_index]);
int irq = word_index * BITS_PER_LONG + bit_index;
clear_bit(bit_index, &vcpu->arch.irq_pending[word_index]);
if (!vcpu->arch.irq_pending[word_index])
clear_bit(word_index, &vcpu->arch.irq_summary);
kvm_queue_interrupt(vcpu, irq);
}
static void do_interrupt_requests(struct kvm_vcpu *vcpu, static void do_interrupt_requests(struct kvm_vcpu *vcpu,
struct kvm_run *kvm_run) struct kvm_run *kvm_run)
{ {
...@@ -2534,7 +2522,7 @@ static void do_interrupt_requests(struct kvm_vcpu *vcpu, ...@@ -2534,7 +2522,7 @@ static void do_interrupt_requests(struct kvm_vcpu *vcpu,
if (vcpu->arch.interrupt_window_open) { if (vcpu->arch.interrupt_window_open) {
if (vcpu->arch.irq_summary && !vcpu->arch.interrupt.pending) if (vcpu->arch.irq_summary && !vcpu->arch.interrupt.pending)
kvm_do_inject_irq(vcpu); kvm_queue_interrupt(vcpu, kvm_pop_irq(vcpu));
if (vcpu->arch.interrupt.pending) if (vcpu->arch.interrupt.pending)
vmx_inject_irq(vcpu, vcpu->arch.interrupt.nr); vmx_inject_irq(vcpu, vcpu->arch.interrupt.nr);
...@@ -2619,8 +2607,7 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -2619,8 +2607,7 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
if (!irqchip_in_kernel(vcpu->kvm) && is_external_interrupt(vect_info)) { if (!irqchip_in_kernel(vcpu->kvm) && is_external_interrupt(vect_info)) {
int irq = vect_info & VECTORING_INFO_VECTOR_MASK; int irq = vect_info & VECTORING_INFO_VECTOR_MASK;
set_bit(irq, vcpu->arch.irq_pending); kvm_push_irq(vcpu, irq);
set_bit(irq / BITS_PER_LONG, &vcpu->arch.irq_summary);
} }
if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR) if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR)
......
...@@ -19,4 +19,22 @@ static inline void kvm_clear_interrupt_queue(struct kvm_vcpu *vcpu) ...@@ -19,4 +19,22 @@ static inline void kvm_clear_interrupt_queue(struct kvm_vcpu *vcpu)
vcpu->arch.interrupt.pending = false; vcpu->arch.interrupt.pending = false;
} }
static inline u8 kvm_pop_irq(struct kvm_vcpu *vcpu)
{
int word_index = __ffs(vcpu->arch.irq_summary);
int bit_index = __ffs(vcpu->arch.irq_pending[word_index]);
int irq = word_index * BITS_PER_LONG + bit_index;
clear_bit(bit_index, &vcpu->arch.irq_pending[word_index]);
if (!vcpu->arch.irq_pending[word_index])
clear_bit(word_index, &vcpu->arch.irq_summary);
return irq;
}
static inline void kvm_push_irq(struct kvm_vcpu *vcpu, u8 irq)
{
set_bit(irq, vcpu->arch.irq_pending);
set_bit(irq / BITS_PER_LONG, &vcpu->arch.irq_summary);
}
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment