Commit 95b3cf69 authored by Xiao Guangrong's avatar Xiao Guangrong Committed by Marcelo Tosatti

KVM: x86: let reexecute_instruction work for tdp

Currently, reexecute_instruction refused to retry all instructions if
tdp is enabled. If nested npt is used, the emulation may be caused by
shadow page, it can be fixed by dropping the shadow page. And the only
condition that tdp can not retry the instruction is the access fault
on error pfn
Reviewed-by: default avatarGleb Natapov <gleb@redhat.com>
Signed-off-by: default avatarXiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Signed-off-by: default avatarMarcelo Tosatti <mtosatti@redhat.com>
parent 22368028
...@@ -4751,25 +4751,25 @@ static int handle_emulation_failure(struct kvm_vcpu *vcpu) ...@@ -4751,25 +4751,25 @@ static int handle_emulation_failure(struct kvm_vcpu *vcpu)
return r; return r;
} }
static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t gva) static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2)
{ {
gpa_t gpa; gpa_t gpa = cr2;
pfn_t pfn; pfn_t pfn;
if (tdp_enabled) if (!vcpu->arch.mmu.direct_map) {
return false; /*
* Write permission should be allowed since only
gpa = kvm_mmu_gva_to_gpa_system(vcpu, gva, NULL); * write access need to be emulated.
if (gpa == UNMAPPED_GVA) */
return true; /* let cpu generate fault */ gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL);
/* /*
* if emulation was due to access to shadowed page table * If the mapping is invalid in guest, let cpu retry
* and it failed try to unshadow page and re-enter the * it to generate fault.
* guest to let CPU execute the instruction. */
*/ if (gpa == UNMAPPED_GVA)
if (kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa))) return true;
return true; }
/* /*
* Do not retry the unhandleable instruction if it faults on the * Do not retry the unhandleable instruction if it faults on the
...@@ -4778,12 +4778,37 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t gva) ...@@ -4778,12 +4778,37 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t gva)
* instruction -> ... * instruction -> ...
*/ */
pfn = gfn_to_pfn(vcpu->kvm, gpa_to_gfn(gpa)); pfn = gfn_to_pfn(vcpu->kvm, gpa_to_gfn(gpa));
if (!is_error_noslot_pfn(pfn)) {
kvm_release_pfn_clean(pfn); /*
* If the instruction failed on the error pfn, it can not be fixed,
* report the error to userspace.
*/
if (is_error_noslot_pfn(pfn))
return false;
kvm_release_pfn_clean(pfn);
/* The instructions are well-emulated on direct mmu. */
if (vcpu->arch.mmu.direct_map) {
unsigned int indirect_shadow_pages;
spin_lock(&vcpu->kvm->mmu_lock);
indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages;
spin_unlock(&vcpu->kvm->mmu_lock);
if (indirect_shadow_pages)
kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
return true; return true;
} }
return false; /*
* if emulation was due to access to shadowed page table
* and it failed try to unshadow page and re-enter the
* guest to let CPU execute the instruction.
*/
kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
return true;
} }
static bool retry_instruction(struct x86_emulate_ctxt *ctxt, static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment