Commit e08d26f0 authored by Paolo Bonzini's avatar Paolo Bonzini Committed by Radim Krčmář

KVM: x86: simplify ept_misconfig

Calling handle_mmio_page_fault() has been unnecessary since commit
e9ee956e ("KVM: x86: MMU: Move handle_mmio_page_fault() call to
kvm_mmu_page_fault()", 2016-02-22).

handle_mmio_page_fault() can now be made static.
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Reviewed-by: default avatarDavid Hildenbrand <david@redhat.com>
Signed-off-by: default avatarRadim Krčmář <rkrcmar@redhat.com>
parent 076b925d
...@@ -3648,7 +3648,23 @@ walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep) ...@@ -3648,7 +3648,23 @@ walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
return reserved; return reserved;
} }
int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct) /*
* Return values of handle_mmio_page_fault:
* RET_MMIO_PF_EMULATE: it is a real mmio page fault, emulate the instruction
* directly.
* RET_MMIO_PF_INVALID: invalid spte is detected then let the real page
* fault path update the mmio spte.
* RET_MMIO_PF_RETRY: let CPU fault again on the address.
* RET_MMIO_PF_BUG: a bug was detected (and a WARN was printed).
*/
enum {
RET_MMIO_PF_EMULATE = 1,
RET_MMIO_PF_INVALID = 2,
RET_MMIO_PF_RETRY = 0,
RET_MMIO_PF_BUG = -1
};
static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct)
{ {
u64 spte; u64 spte;
bool reserved; bool reserved;
...@@ -4837,6 +4853,7 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code, ...@@ -4837,6 +4853,7 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
return 1; return 1;
if (r < 0) if (r < 0)
return r; return r;
/* Must be RET_MMIO_PF_INVALID. */
} }
r = vcpu->arch.mmu.page_fault(vcpu, cr2, lower_32_bits(error_code), r = vcpu->arch.mmu.page_fault(vcpu, cr2, lower_32_bits(error_code),
......
...@@ -56,23 +56,6 @@ void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value); ...@@ -56,23 +56,6 @@ void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value);
void void
reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context); reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
/*
* Return values of handle_mmio_page_fault:
* RET_MMIO_PF_EMULATE: it is a real mmio page fault, emulate the instruction
* directly.
* RET_MMIO_PF_INVALID: invalid spte is detected then let the real page
* fault path update the mmio spte.
* RET_MMIO_PF_RETRY: let CPU fault again on the address.
* RET_MMIO_PF_BUG: a bug was detected (and a WARN was printed).
*/
enum {
RET_MMIO_PF_EMULATE = 1,
RET_MMIO_PF_INVALID = 2,
RET_MMIO_PF_RETRY = 0,
RET_MMIO_PF_BUG = -1
};
int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct);
void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu); void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu);
void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly, void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
bool accessed_dirty); bool accessed_dirty);
......
...@@ -6410,17 +6410,10 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu) ...@@ -6410,17 +6410,10 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
return kvm_skip_emulated_instruction(vcpu); return kvm_skip_emulated_instruction(vcpu);
} }
ret = handle_mmio_page_fault(vcpu, gpa, true);
vcpu->arch.gpa_available = true; vcpu->arch.gpa_available = true;
if (likely(ret == RET_MMIO_PF_EMULATE)) ret = kvm_mmu_page_fault(vcpu, gpa, PFERR_RSVD_MASK, NULL, 0);
return x86_emulate_instruction(vcpu, gpa, 0, NULL, 0) == if (ret >= 0)
EMULATE_DONE; return ret;
if (unlikely(ret == RET_MMIO_PF_INVALID))
return kvm_mmu_page_fault(vcpu, gpa, 0, NULL, 0);
if (unlikely(ret == RET_MMIO_PF_RETRY))
return 1;
/* It is the real ept misconfig */ /* It is the real ept misconfig */
WARN_ON(1); WARN_ON(1);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment