Commit e9fd761a authored by Paolo Bonzini's avatar Paolo Bonzini

KVM: nSVM: remove HF_VINTR_MASK

Now that the int_ctl field is stored in svm->nested.ctl.int_ctl, we can
use it instead of vcpu->arch.hflags to check whether L2 is running
in V_INTR_MASKING mode.
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 36e2e983
...@@ -1596,7 +1596,6 @@ enum { ...@@ -1596,7 +1596,6 @@ enum {
#define HF_GIF_MASK (1 << 0) #define HF_GIF_MASK (1 << 0)
#define HF_HIF_MASK (1 << 1) #define HF_HIF_MASK (1 << 1)
#define HF_VINTR_MASK (1 << 2)
#define HF_NMI_MASK (1 << 3) #define HF_NMI_MASK (1 << 3)
#define HF_IRET_MASK (1 << 4) #define HF_IRET_MASK (1 << 4)
#define HF_GUEST_MASK (1 << 5) /* VCPU is in guest-mode */ #define HF_GUEST_MASK (1 << 5) /* VCPU is in guest-mode */
......
...@@ -118,7 +118,7 @@ void recalc_intercepts(struct vcpu_svm *svm) ...@@ -118,7 +118,7 @@ void recalc_intercepts(struct vcpu_svm *svm)
c->intercept_exceptions = h->intercept_exceptions; c->intercept_exceptions = h->intercept_exceptions;
c->intercept = h->intercept; c->intercept = h->intercept;
if (svm->vcpu.arch.hflags & HF_VINTR_MASK) { if (g->int_ctl & V_INTR_MASKING_MASK) {
/* We only want the cr8 intercept bits of L1 */ /* We only want the cr8 intercept bits of L1 */
c->intercept_cr &= ~(1U << INTERCEPT_CR8_READ); c->intercept_cr &= ~(1U << INTERCEPT_CR8_READ);
c->intercept_cr &= ~(1U << INTERCEPT_CR8_WRITE); c->intercept_cr &= ~(1U << INTERCEPT_CR8_WRITE);
...@@ -338,10 +338,6 @@ static void nested_prepare_vmcb_control(struct vcpu_svm *svm) ...@@ -338,10 +338,6 @@ static void nested_prepare_vmcb_control(struct vcpu_svm *svm)
kvm_mmu_reset_context(&svm->vcpu); kvm_mmu_reset_context(&svm->vcpu);
svm_flush_tlb(&svm->vcpu); svm_flush_tlb(&svm->vcpu);
if (svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK)
svm->vcpu.arch.hflags |= HF_VINTR_MASK;
else
svm->vcpu.arch.hflags &= ~HF_VINTR_MASK;
svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset = svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset =
svm->vcpu.arch.l1_tsc_offset + svm->nested.ctl.tsc_offset; svm->vcpu.arch.l1_tsc_offset + svm->nested.ctl.tsc_offset;
......
...@@ -3104,7 +3104,7 @@ bool svm_interrupt_blocked(struct kvm_vcpu *vcpu) ...@@ -3104,7 +3104,7 @@ bool svm_interrupt_blocked(struct kvm_vcpu *vcpu)
if (is_guest_mode(vcpu)) { if (is_guest_mode(vcpu)) {
/* As long as interrupts are being delivered... */ /* As long as interrupts are being delivered... */
if ((svm->vcpu.arch.hflags & HF_VINTR_MASK) if ((svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK)
? !(svm->vcpu.arch.hflags & HF_HIF_MASK) ? !(svm->vcpu.arch.hflags & HF_HIF_MASK)
: !(kvm_get_rflags(vcpu) & X86_EFLAGS_IF)) : !(kvm_get_rflags(vcpu) & X86_EFLAGS_IF))
return true; return true;
......
...@@ -367,7 +367,9 @@ void svm_set_gif(struct vcpu_svm *svm, bool value); ...@@ -367,7 +367,9 @@ void svm_set_gif(struct vcpu_svm *svm, bool value);
static inline bool svm_nested_virtualize_tpr(struct kvm_vcpu *vcpu) static inline bool svm_nested_virtualize_tpr(struct kvm_vcpu *vcpu)
{ {
return is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK); struct vcpu_svm *svm = to_svm(vcpu);
return is_guest_mode(vcpu) && (svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK);
} }
static inline bool nested_exit_on_smi(struct vcpu_svm *svm) static inline bool nested_exit_on_smi(struct vcpu_svm *svm)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment