Commit 2df4a5eb authored by Lai Jiangshan's avatar Lai Jiangshan Committed by Paolo Bonzini

KVM: X86: Remove mmu parameter from load_pdptrs()

It uses vcpu->arch.walk_mmu always; nested EPT does not have PDPTRs,
and nested NPT treats them like all other non-leaf page table levels
instead of caching them.
Signed-off-by: default avatarLai Jiangshan <laijs@linux.alibaba.com>
Message-Id: <20211124122055.64424-11-jiangshanlai@gmail.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent bb3b394d
...@@ -1593,7 +1593,7 @@ void kvm_mmu_zap_all(struct kvm *kvm); ...@@ -1593,7 +1593,7 @@ void kvm_mmu_zap_all(struct kvm *kvm);
void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen); void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen);
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long kvm_nr_mmu_pages); void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long kvm_nr_mmu_pages);
int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3); int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3);
int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
const void *val, int bytes); const void *val, int bytes);
......
...@@ -461,7 +461,7 @@ static int nested_svm_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, ...@@ -461,7 +461,7 @@ static int nested_svm_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3,
return -EINVAL; return -EINVAL;
if (reload_pdptrs && !nested_npt && is_pae_paging(vcpu) && if (reload_pdptrs && !nested_npt && is_pae_paging(vcpu) &&
CC(!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))) CC(!load_pdptrs(vcpu, cr3)))
return -EINVAL; return -EINVAL;
if (!nested_npt) if (!nested_npt)
...@@ -1517,7 +1517,7 @@ static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu) ...@@ -1517,7 +1517,7 @@ static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu)
* the guest CR3 might be restored prior to setting the nested * the guest CR3 might be restored prior to setting the nested
* state which can lead to a load of wrong PDPTRs. * state which can lead to a load of wrong PDPTRs.
*/ */
if (CC(!load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3))) if (CC(!load_pdptrs(vcpu, vcpu->arch.cr3)))
return false; return false;
if (!nested_svm_vmrun_msrpm(svm)) { if (!nested_svm_vmrun_msrpm(svm)) {
......
...@@ -1605,7 +1605,7 @@ static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg) ...@@ -1605,7 +1605,7 @@ static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
* it is always updated per SDM when moving to CRs. * it is always updated per SDM when moving to CRs.
*/ */
if (npt_enabled) if (npt_enabled)
load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu)); load_pdptrs(vcpu, kvm_read_cr3(vcpu));
break; break;
default: default:
KVM_BUG_ON(1, vcpu->kvm); KVM_BUG_ON(1, vcpu->kvm);
......
...@@ -1103,7 +1103,7 @@ static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, ...@@ -1103,7 +1103,7 @@ static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3,
* must not be dereferenced. * must not be dereferenced.
*/ */
if (reload_pdptrs && !nested_ept && is_pae_paging(vcpu) && if (reload_pdptrs && !nested_ept && is_pae_paging(vcpu) &&
CC(!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))) { CC(!load_pdptrs(vcpu, cr3))) {
*entry_failure_code = ENTRY_FAIL_PDPTE; *entry_failure_code = ENTRY_FAIL_PDPTE;
return -EINVAL; return -EINVAL;
} }
...@@ -3147,7 +3147,7 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu) ...@@ -3147,7 +3147,7 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
* the guest CR3 might be restored prior to setting the nested * the guest CR3 might be restored prior to setting the nested
* state which can lead to a load of wrong PDPTRs. * state which can lead to a load of wrong PDPTRs.
*/ */
if (CC(!load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3))) if (CC(!load_pdptrs(vcpu, vcpu->arch.cr3)))
return false; return false;
} }
......
...@@ -798,8 +798,9 @@ static inline u64 pdptr_rsvd_bits(struct kvm_vcpu *vcpu) ...@@ -798,8 +798,9 @@ static inline u64 pdptr_rsvd_bits(struct kvm_vcpu *vcpu)
/* /*
* Load the pae pdptrs. Return 1 if they are all valid, 0 otherwise. * Load the pae pdptrs. Return 1 if they are all valid, 0 otherwise.
*/ */
int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3) int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
{ {
struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT; gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
gpa_t real_gpa; gpa_t real_gpa;
int i; int i;
...@@ -890,7 +891,7 @@ int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) ...@@ -890,7 +891,7 @@ int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
#endif #endif
if (!(vcpu->arch.efer & EFER_LME) && (cr0 & X86_CR0_PG) && if (!(vcpu->arch.efer & EFER_LME) && (cr0 & X86_CR0_PG) &&
is_pae(vcpu) && ((cr0 ^ old_cr0) & X86_CR0_PDPTR_BITS) && is_pae(vcpu) && ((cr0 ^ old_cr0) & X86_CR0_PDPTR_BITS) &&
!load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu))) !load_pdptrs(vcpu, kvm_read_cr3(vcpu)))
return 1; return 1;
if (!(cr0 & X86_CR0_PG) && kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)) if (!(cr0 & X86_CR0_PG) && kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE))
...@@ -1064,8 +1065,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) ...@@ -1064,8 +1065,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
return 1; return 1;
} else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE) } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
&& ((cr4 ^ old_cr4) & X86_CR4_PDPTR_BITS) && ((cr4 ^ old_cr4) & X86_CR4_PDPTR_BITS)
&& !load_pdptrs(vcpu, vcpu->arch.walk_mmu, && !load_pdptrs(vcpu, kvm_read_cr3(vcpu)))
kvm_read_cr3(vcpu)))
return 1; return 1;
if ((cr4 & X86_CR4_PCIDE) && !(old_cr4 & X86_CR4_PCIDE)) { if ((cr4 & X86_CR4_PCIDE) && !(old_cr4 & X86_CR4_PCIDE)) {
...@@ -1154,7 +1154,7 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) ...@@ -1154,7 +1154,7 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
if (kvm_vcpu_is_illegal_gpa(vcpu, cr3)) if (kvm_vcpu_is_illegal_gpa(vcpu, cr3))
return 1; return 1;
if (is_pae_paging(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) if (is_pae_paging(vcpu) && !load_pdptrs(vcpu, cr3))
return 1; return 1;
if (cr3 != kvm_read_cr3(vcpu)) if (cr3 != kvm_read_cr3(vcpu))
...@@ -10597,7 +10597,7 @@ static int __set_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs, ...@@ -10597,7 +10597,7 @@ static int __set_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs,
if (update_pdptrs) { if (update_pdptrs) {
idx = srcu_read_lock(&vcpu->kvm->srcu); idx = srcu_read_lock(&vcpu->kvm->srcu);
if (is_pae_paging(vcpu)) { if (is_pae_paging(vcpu)) {
load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu)); load_pdptrs(vcpu, kvm_read_cr3(vcpu));
*mmu_reset_needed = 1; *mmu_reset_needed = 1;
} }
srcu_read_unlock(&vcpu->kvm->srcu, idx); srcu_read_unlock(&vcpu->kvm->srcu, idx);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment