Commit 93c05d3e authored by Xiao Guangrong's avatar Xiao Guangrong Committed by Marcelo Tosatti

KVM: x86: improve reexecute_instruction

The current reexecute_instruction can not well detect the failed instruction
emulation. It allows guest to retry all the instructions except it accesses
on error pfn

For example, some cases are nested-write-protect - if the page we want to
write is used as PDE but it chains to itself. Under this case, we should
stop the emulation and report the case to userspace
Reviewed-by: default avatarGleb Natapov <gleb@redhat.com>
Signed-off-by: default avatarXiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Signed-off-by: default avatarMarcelo Tosatti <mtosatti@redhat.com>
parent 95b3cf69
...@@ -497,6 +497,13 @@ struct kvm_vcpu_arch { ...@@ -497,6 +497,13 @@ struct kvm_vcpu_arch {
u64 msr_val; u64 msr_val;
struct gfn_to_hva_cache data; struct gfn_to_hva_cache data;
} pv_eoi; } pv_eoi;
/*
* Indicate whether the access faults on its page table in guest
* which is set when fix page fault and used to detect unhandeable
* instruction.
*/
bool write_fault_to_shadow_pgtable;
}; };
struct kvm_lpage_info { struct kvm_lpage_info {
......
...@@ -497,26 +497,34 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, ...@@ -497,26 +497,34 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
* created when kvm establishes shadow page table that stop kvm using large * created when kvm establishes shadow page table that stop kvm using large
* page size. Do it early can avoid unnecessary #PF and emulation. * page size. Do it early can avoid unnecessary #PF and emulation.
* *
* @write_fault_to_shadow_pgtable will return true if the fault gfn is
* currently used as its page table.
*
* Note: the PDPT page table is not checked for PAE-32 bit guest. It is ok * Note: the PDPT page table is not checked for PAE-32 bit guest. It is ok
* since the PDPT is always shadowed, that means, we can not use large page * since the PDPT is always shadowed, that means, we can not use large page
* size to map the gfn which is used as PDPT. * size to map the gfn which is used as PDPT.
*/ */
static bool static bool
FNAME(is_self_change_mapping)(struct kvm_vcpu *vcpu, FNAME(is_self_change_mapping)(struct kvm_vcpu *vcpu,
struct guest_walker *walker, int user_fault) struct guest_walker *walker, int user_fault,
bool *write_fault_to_shadow_pgtable)
{ {
int level; int level;
gfn_t mask = ~(KVM_PAGES_PER_HPAGE(walker->level) - 1); gfn_t mask = ~(KVM_PAGES_PER_HPAGE(walker->level) - 1);
bool self_changed = false;
if (!(walker->pte_access & ACC_WRITE_MASK || if (!(walker->pte_access & ACC_WRITE_MASK ||
(!is_write_protection(vcpu) && !user_fault))) (!is_write_protection(vcpu) && !user_fault)))
return false; return false;
for (level = walker->level; level <= walker->max_level; level++) for (level = walker->level; level <= walker->max_level; level++) {
if (!((walker->gfn ^ walker->table_gfn[level - 1]) & mask)) gfn_t gfn = walker->gfn ^ walker->table_gfn[level - 1];
return true;
self_changed |= !(gfn & mask);
*write_fault_to_shadow_pgtable |= !gfn;
}
return false; return self_changed;
} }
/* /*
...@@ -544,7 +552,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code, ...@@ -544,7 +552,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
int level = PT_PAGE_TABLE_LEVEL; int level = PT_PAGE_TABLE_LEVEL;
int force_pt_level; int force_pt_level;
unsigned long mmu_seq; unsigned long mmu_seq;
bool map_writable; bool map_writable, is_self_change_mapping;
pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code); pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
...@@ -572,9 +580,14 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code, ...@@ -572,9 +580,14 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
return 0; return 0;
} }
vcpu->arch.write_fault_to_shadow_pgtable = false;
is_self_change_mapping = FNAME(is_self_change_mapping)(vcpu,
&walker, user_fault, &vcpu->arch.write_fault_to_shadow_pgtable);
if (walker.level >= PT_DIRECTORY_LEVEL) if (walker.level >= PT_DIRECTORY_LEVEL)
force_pt_level = mapping_level_dirty_bitmap(vcpu, walker.gfn) force_pt_level = mapping_level_dirty_bitmap(vcpu, walker.gfn)
|| FNAME(is_self_change_mapping)(vcpu, &walker, user_fault); || is_self_change_mapping;
else else
force_pt_level = 1; force_pt_level = 1;
if (!force_pt_level) { if (!force_pt_level) {
......
...@@ -4751,7 +4751,8 @@ static int handle_emulation_failure(struct kvm_vcpu *vcpu) ...@@ -4751,7 +4751,8 @@ static int handle_emulation_failure(struct kvm_vcpu *vcpu)
return r; return r;
} }
static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2) static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2,
bool write_fault_to_shadow_pgtable)
{ {
gpa_t gpa = cr2; gpa_t gpa = cr2;
pfn_t pfn; pfn_t pfn;
...@@ -4808,7 +4809,13 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2) ...@@ -4808,7 +4809,13 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2)
* guest to let CPU execute the instruction. * guest to let CPU execute the instruction.
*/ */
kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
return true;
/*
* If the access faults on its page table, it can not
* be fixed by unprotecting shadow page and it should
* be reported to userspace.
*/
return !write_fault_to_shadow_pgtable;
} }
static bool retry_instruction(struct x86_emulate_ctxt *ctxt, static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
...@@ -4867,7 +4874,13 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, ...@@ -4867,7 +4874,13 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
int r; int r;
struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
bool writeback = true; bool writeback = true;
bool write_fault_to_spt = vcpu->arch.write_fault_to_shadow_pgtable;
/*
* Clear write_fault_to_shadow_pgtable here to ensure it is
* never reused.
*/
vcpu->arch.write_fault_to_shadow_pgtable = false;
kvm_clear_exception_queue(vcpu); kvm_clear_exception_queue(vcpu);
if (!(emulation_type & EMULTYPE_NO_DECODE)) { if (!(emulation_type & EMULTYPE_NO_DECODE)) {
...@@ -4886,7 +4899,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, ...@@ -4886,7 +4899,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
if (r != EMULATION_OK) { if (r != EMULATION_OK) {
if (emulation_type & EMULTYPE_TRAP_UD) if (emulation_type & EMULTYPE_TRAP_UD)
return EMULATE_FAIL; return EMULATE_FAIL;
if (reexecute_instruction(vcpu, cr2)) if (reexecute_instruction(vcpu, cr2,
write_fault_to_spt))
return EMULATE_DONE; return EMULATE_DONE;
if (emulation_type & EMULTYPE_SKIP) if (emulation_type & EMULTYPE_SKIP)
return EMULATE_FAIL; return EMULATE_FAIL;
...@@ -4916,7 +4930,7 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, ...@@ -4916,7 +4930,7 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
return EMULATE_DONE; return EMULATE_DONE;
if (r == EMULATION_FAILED) { if (r == EMULATION_FAILED) {
if (reexecute_instruction(vcpu, cr2)) if (reexecute_instruction(vcpu, cr2, write_fault_to_spt))
return EMULATE_DONE; return EMULATE_DONE;
return handle_emulation_failure(vcpu); return handle_emulation_failure(vcpu);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment