Commit e71237d3 authored by Oliver Upton's avatar Oliver Upton Committed by Paolo Bonzini

KVM: nVMX: Refactor IO bitmap checks into helper function

Checks against the IO bitmap are useful for both instruction emulation
and VM-exit reflection. Refactor the IO bitmap checks into a helper
function.
Signed-off-by: default avatarOliver Upton <oupton@google.com>
Reviewed-by: default avatarVitaly Kuznetsov <vkuznets@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 07721fee
...@@ -5312,24 +5312,17 @@ static int handle_vmfunc(struct kvm_vcpu *vcpu) ...@@ -5312,24 +5312,17 @@ static int handle_vmfunc(struct kvm_vcpu *vcpu)
return 1; return 1;
} }
/*
static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu, * Return true if an IO instruction with the specified port and size should cause
struct vmcs12 *vmcs12) * a VM-exit into L1.
*/
bool nested_vmx_check_io_bitmaps(struct kvm_vcpu *vcpu, unsigned int port,
int size)
{ {
unsigned long exit_qualification; struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
gpa_t bitmap, last_bitmap; gpa_t bitmap, last_bitmap;
unsigned int port;
int size;
u8 b; u8 b;
if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING);
exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
port = exit_qualification >> 16;
size = (exit_qualification & 7) + 1;
last_bitmap = (gpa_t)-1; last_bitmap = (gpa_t)-1;
b = -1; b = -1;
...@@ -5356,6 +5349,24 @@ static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu, ...@@ -5356,6 +5349,24 @@ static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
return false; return false;
} }
static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{
unsigned long exit_qualification;
unsigned int port;
int size;
if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING);
exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
port = exit_qualification >> 16;
size = (exit_qualification & 7) + 1;
return nested_vmx_check_io_bitmaps(vcpu, port, size);
}
/* /*
* Return 1 if we should exit from L2 to L1 to handle an MSR access, * Return 1 if we should exit from L2 to L1 to handle an MSR access,
* rather than handle it ourselves in L0. I.e., check whether L1 expressed * rather than handle it ourselves in L0. I.e., check whether L1 expressed
......
...@@ -33,6 +33,8 @@ int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata); ...@@ -33,6 +33,8 @@ int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata);
int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification, int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
u32 vmx_instruction_info, bool wr, int len, gva_t *ret); u32 vmx_instruction_info, bool wr, int len, gva_t *ret);
void nested_vmx_pmu_entry_exit_ctls_update(struct kvm_vcpu *vcpu); void nested_vmx_pmu_entry_exit_ctls_update(struct kvm_vcpu *vcpu);
bool nested_vmx_check_io_bitmaps(struct kvm_vcpu *vcpu, unsigned int port,
int size);
static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu) static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment