Commit f78978aa authored by Xiao Guangrong's avatar Xiao Guangrong Committed by Avi Kivity

KVM: MMU: only update unsync page in invlpg path

Only unsync pages need updated at invlpg time since other shadow
pages are write-protected
Signed-off-by: default avatarXiao Guangrong <xiaoguangrong@cn.fujitsu.com>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent e02aa901
...@@ -461,6 +461,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, ...@@ -461,6 +461,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
{ {
struct kvm_shadow_walk_iterator iterator; struct kvm_shadow_walk_iterator iterator;
struct kvm_mmu_page *sp;
gpa_t pte_gpa = -1; gpa_t pte_gpa = -1;
int level; int level;
u64 *sptep; u64 *sptep;
...@@ -472,10 +473,13 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) ...@@ -472,10 +473,13 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
level = iterator.level; level = iterator.level;
sptep = iterator.sptep; sptep = iterator.sptep;
sp = page_header(__pa(sptep));
if (is_last_spte(*sptep, level)) { if (is_last_spte(*sptep, level)) {
struct kvm_mmu_page *sp = page_header(__pa(sptep));
int offset, shift; int offset, shift;
if (!sp->unsync)
break;
shift = PAGE_SHIFT - shift = PAGE_SHIFT -
(PT_LEVEL_BITS - PT64_LEVEL_BITS) * level; (PT_LEVEL_BITS - PT64_LEVEL_BITS) * level;
offset = sp->role.quadrant << shift; offset = sp->role.quadrant << shift;
...@@ -493,7 +497,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) ...@@ -493,7 +497,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
break; break;
} }
if (!is_shadow_present_pte(*sptep)) if (!is_shadow_present_pte(*sptep) || !sp->unsync_children)
break; break;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment