Commit ff03a073 authored by Joerg Roedel's avatar Joerg Roedel Committed by Avi Kivity

KVM: MMU: Add kvm_mmu parameter to load_pdptrs function

This function need to be able to load the pdptrs from any
mmu context currently in use. So change this function to
take an kvm_mmu parameter to fit these needs.
As a side effect this patch also moves the cached pdptrs
from vcpu_arch into the kvm_mmu struct.
Signed-off-by: default avatarJoerg Roedel <joerg.roedel@amd.com>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent d47f00a6
...@@ -257,6 +257,8 @@ struct kvm_mmu { ...@@ -257,6 +257,8 @@ struct kvm_mmu {
u64 *pae_root; u64 *pae_root;
u64 rsvd_bits_mask[2][4]; u64 rsvd_bits_mask[2][4];
u64 pdptrs[4]; /* pae */
}; };
struct kvm_vcpu_arch { struct kvm_vcpu_arch {
...@@ -276,7 +278,6 @@ struct kvm_vcpu_arch { ...@@ -276,7 +278,6 @@ struct kvm_vcpu_arch {
unsigned long cr4_guest_owned_bits; unsigned long cr4_guest_owned_bits;
unsigned long cr8; unsigned long cr8;
u32 hflags; u32 hflags;
u64 pdptrs[4]; /* pae */
u64 efer; u64 efer;
u64 apic_base; u64 apic_base;
struct kvm_lapic *apic; /* kernel irqchip context */ struct kvm_lapic *apic; /* kernel irqchip context */
...@@ -592,7 +593,7 @@ void kvm_mmu_zap_all(struct kvm *kvm); ...@@ -592,7 +593,7 @@ void kvm_mmu_zap_all(struct kvm *kvm);
unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm); unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages); void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3); int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);
int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
const void *val, int bytes); const void *val, int bytes);
......
...@@ -42,7 +42,7 @@ static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index) ...@@ -42,7 +42,7 @@ static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
(unsigned long *)&vcpu->arch.regs_avail)) (unsigned long *)&vcpu->arch.regs_avail))
kvm_x86_ops->cache_reg(vcpu, VCPU_EXREG_PDPTR); kvm_x86_ops->cache_reg(vcpu, VCPU_EXREG_PDPTR);
return vcpu->arch.pdptrs[index]; return vcpu->arch.walk_mmu->pdptrs[index];
} }
static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask) static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask)
......
...@@ -1010,7 +1010,7 @@ static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg) ...@@ -1010,7 +1010,7 @@ static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
switch (reg) { switch (reg) {
case VCPU_EXREG_PDPTR: case VCPU_EXREG_PDPTR:
BUG_ON(!npt_enabled); BUG_ON(!npt_enabled);
load_pdptrs(vcpu, vcpu->arch.cr3); load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3);
break; break;
default: default:
BUG(); BUG();
......
...@@ -1842,20 +1842,20 @@ static void ept_load_pdptrs(struct kvm_vcpu *vcpu) ...@@ -1842,20 +1842,20 @@ static void ept_load_pdptrs(struct kvm_vcpu *vcpu)
return; return;
if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) { if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
vmcs_write64(GUEST_PDPTR0, vcpu->arch.pdptrs[0]); vmcs_write64(GUEST_PDPTR0, vcpu->arch.mmu.pdptrs[0]);
vmcs_write64(GUEST_PDPTR1, vcpu->arch.pdptrs[1]); vmcs_write64(GUEST_PDPTR1, vcpu->arch.mmu.pdptrs[1]);
vmcs_write64(GUEST_PDPTR2, vcpu->arch.pdptrs[2]); vmcs_write64(GUEST_PDPTR2, vcpu->arch.mmu.pdptrs[2]);
vmcs_write64(GUEST_PDPTR3, vcpu->arch.pdptrs[3]); vmcs_write64(GUEST_PDPTR3, vcpu->arch.mmu.pdptrs[3]);
} }
} }
static void ept_save_pdptrs(struct kvm_vcpu *vcpu) static void ept_save_pdptrs(struct kvm_vcpu *vcpu)
{ {
if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) { if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
vcpu->arch.pdptrs[0] = vmcs_read64(GUEST_PDPTR0); vcpu->arch.mmu.pdptrs[0] = vmcs_read64(GUEST_PDPTR0);
vcpu->arch.pdptrs[1] = vmcs_read64(GUEST_PDPTR1); vcpu->arch.mmu.pdptrs[1] = vmcs_read64(GUEST_PDPTR1);
vcpu->arch.pdptrs[2] = vmcs_read64(GUEST_PDPTR2); vcpu->arch.mmu.pdptrs[2] = vmcs_read64(GUEST_PDPTR2);
vcpu->arch.pdptrs[3] = vmcs_read64(GUEST_PDPTR3); vcpu->arch.mmu.pdptrs[3] = vmcs_read64(GUEST_PDPTR3);
} }
__set_bit(VCPU_EXREG_PDPTR, __set_bit(VCPU_EXREG_PDPTR,
......
...@@ -418,15 +418,15 @@ int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, ...@@ -418,15 +418,15 @@ int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
/* /*
* Load the pae pdptrs. Return true is they are all valid. * Load the pae pdptrs. Return true is they are all valid.
*/ */
int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3) int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3)
{ {
gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT; gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2; unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
int i; int i;
int ret; int ret;
u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)]; u64 pdpte[ARRAY_SIZE(mmu->pdptrs)];
ret = kvm_read_nested_guest_page(vcpu, pdpt_gfn, pdpte, ret = kvm_read_guest_page_mmu(vcpu, mmu, pdpt_gfn, pdpte,
offset * sizeof(u64), sizeof(pdpte), offset * sizeof(u64), sizeof(pdpte),
PFERR_USER_MASK|PFERR_WRITE_MASK); PFERR_USER_MASK|PFERR_WRITE_MASK);
if (ret < 0) { if (ret < 0) {
...@@ -442,7 +442,7 @@ int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3) ...@@ -442,7 +442,7 @@ int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
} }
ret = 1; ret = 1;
memcpy(vcpu->arch.pdptrs, pdpte, sizeof(vcpu->arch.pdptrs)); memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs));
__set_bit(VCPU_EXREG_PDPTR, __set_bit(VCPU_EXREG_PDPTR,
(unsigned long *)&vcpu->arch.regs_avail); (unsigned long *)&vcpu->arch.regs_avail);
__set_bit(VCPU_EXREG_PDPTR, __set_bit(VCPU_EXREG_PDPTR,
...@@ -455,7 +455,7 @@ EXPORT_SYMBOL_GPL(load_pdptrs); ...@@ -455,7 +455,7 @@ EXPORT_SYMBOL_GPL(load_pdptrs);
static bool pdptrs_changed(struct kvm_vcpu *vcpu) static bool pdptrs_changed(struct kvm_vcpu *vcpu)
{ {
u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)]; u64 pdpte[ARRAY_SIZE(vcpu->arch.walk_mmu->pdptrs)];
bool changed = true; bool changed = true;
int offset; int offset;
gfn_t gfn; gfn_t gfn;
...@@ -474,7 +474,7 @@ static bool pdptrs_changed(struct kvm_vcpu *vcpu) ...@@ -474,7 +474,7 @@ static bool pdptrs_changed(struct kvm_vcpu *vcpu)
PFERR_USER_MASK | PFERR_WRITE_MASK); PFERR_USER_MASK | PFERR_WRITE_MASK);
if (r < 0) if (r < 0)
goto out; goto out;
changed = memcmp(pdpte, vcpu->arch.pdptrs, sizeof(pdpte)) != 0; changed = memcmp(pdpte, vcpu->arch.walk_mmu->pdptrs, sizeof(pdpte)) != 0;
out: out:
return changed; return changed;
...@@ -513,7 +513,8 @@ int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) ...@@ -513,7 +513,8 @@ int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
return 1; return 1;
} else } else
#endif #endif
if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3)) if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
vcpu->arch.cr3))
return 1; return 1;
} }
...@@ -602,7 +603,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) ...@@ -602,7 +603,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
return 1; return 1;
} else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE) } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
&& ((cr4 ^ old_cr4) & pdptr_bits) && ((cr4 ^ old_cr4) & pdptr_bits)
&& !load_pdptrs(vcpu, vcpu->arch.cr3)) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3))
return 1; return 1;
if (cr4 & X86_CR4_VMXE) if (cr4 & X86_CR4_VMXE)
...@@ -635,7 +636,8 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) ...@@ -635,7 +636,8 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
if (is_pae(vcpu)) { if (is_pae(vcpu)) {
if (cr3 & CR3_PAE_RESERVED_BITS) if (cr3 & CR3_PAE_RESERVED_BITS)
return 1; return 1;
if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) if (is_paging(vcpu) &&
!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
return 1; return 1;
} }
/* /*
...@@ -5422,7 +5424,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, ...@@ -5422,7 +5424,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4; mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
kvm_x86_ops->set_cr4(vcpu, sregs->cr4); kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
if (!is_long_mode(vcpu) && is_pae(vcpu)) { if (!is_long_mode(vcpu) && is_pae(vcpu)) {
load_pdptrs(vcpu, vcpu->arch.cr3); load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3);
mmu_reset_needed = 1; mmu_reset_needed = 1;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment