Commit 13d22b6a authored by Avi Kivity's avatar Avi Kivity

KVM: MMU: Simplify walk_addr_generic() loop

The page table walk is coded as an infinite loop, with a special
case on the last pte.

Code it as an ordinary loop with a termination condition on the last
pte (large page or walk length exhausted), and put the last pte handling
code after the loop where it belongs.
Reviewed-by: default avatarXiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent 97d64b78
...@@ -171,12 +171,15 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker, ...@@ -171,12 +171,15 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
gfn_t table_gfn; gfn_t table_gfn;
unsigned index, pt_access, pte_access; unsigned index, pt_access, pte_access;
gpa_t pte_gpa; gpa_t pte_gpa;
bool eperm, last_gpte; bool eperm;
int offset; int offset;
const int write_fault = access & PFERR_WRITE_MASK; const int write_fault = access & PFERR_WRITE_MASK;
const int user_fault = access & PFERR_USER_MASK; const int user_fault = access & PFERR_USER_MASK;
const int fetch_fault = access & PFERR_FETCH_MASK; const int fetch_fault = access & PFERR_FETCH_MASK;
u16 errcode = 0; u16 errcode = 0;
gpa_t real_gpa;
gfn_t gfn;
u32 ac;
trace_kvm_mmu_pagetable_walk(addr, access); trace_kvm_mmu_pagetable_walk(addr, access);
retry_walk: retry_walk:
...@@ -197,12 +200,16 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker, ...@@ -197,12 +200,16 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) || ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) ||
(mmu->get_cr3(vcpu) & CR3_NONPAE_RESERVED_BITS) == 0); (mmu->get_cr3(vcpu) & CR3_NONPAE_RESERVED_BITS) == 0);
pt_access = ACC_ALL; pt_access = pte_access = ACC_ALL;
++walker->level;
for (;;) { do {
gfn_t real_gfn; gfn_t real_gfn;
unsigned long host_addr; unsigned long host_addr;
pt_access &= pte_access;
--walker->level;
index = PT_INDEX(addr, walker->level); index = PT_INDEX(addr, walker->level);
table_gfn = gpte_to_gfn(pte); table_gfn = gpte_to_gfn(pte);
...@@ -239,46 +246,29 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker, ...@@ -239,46 +246,29 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
pte_access = pt_access & gpte_access(vcpu, pte); pte_access = pt_access & gpte_access(vcpu, pte);
last_gpte = FNAME(is_last_gpte)(walker, vcpu, mmu, pte);
walker->ptes[walker->level - 1] = pte; walker->ptes[walker->level - 1] = pte;
} while (!FNAME(is_last_gpte)(walker, vcpu, mmu, pte));
if (last_gpte) { eperm |= permission_fault(mmu, pte_access, access);
int lvl = walker->level; if (unlikely(eperm)) {
gpa_t real_gpa; errcode |= PFERR_PRESENT_MASK;
gfn_t gfn; goto error;
u32 ac; }
gfn = gpte_to_gfn_lvl(pte, lvl); gfn = gpte_to_gfn_lvl(pte, walker->level);
gfn += (addr & PT_LVL_OFFSET_MASK(lvl)) >> PAGE_SHIFT; gfn += (addr & PT_LVL_OFFSET_MASK(walker->level)) >> PAGE_SHIFT;
if (PTTYPE == 32 && if (PTTYPE == 32 && walker->level == PT_DIRECTORY_LEVEL && is_cpuid_PSE36())
walker->level == PT_DIRECTORY_LEVEL &&
is_cpuid_PSE36())
gfn += pse36_gfn_delta(pte); gfn += pse36_gfn_delta(pte);
ac = write_fault | fetch_fault | user_fault; ac = write_fault | fetch_fault | user_fault;
real_gpa = mmu->translate_gpa(vcpu, gfn_to_gpa(gfn), real_gpa = mmu->translate_gpa(vcpu, gfn_to_gpa(gfn), ac);
ac);
if (real_gpa == UNMAPPED_GVA) if (real_gpa == UNMAPPED_GVA)
return 0; return 0;
walker->gfn = real_gpa >> PAGE_SHIFT; walker->gfn = real_gpa >> PAGE_SHIFT;
break;
}
pt_access &= pte_access;
--walker->level;
}
eperm |= permission_fault(mmu, pte_access, access);
if (unlikely(eperm)) {
errcode |= PFERR_PRESENT_MASK;
goto error;
}
if (!write_fault) if (!write_fault)
protect_clean_gpte(&pte_access, pte); protect_clean_gpte(&pte_access, pte);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment