Commit 727a7e27 authored by Paolo Bonzini's avatar Paolo Bonzini

KVM: x86: rename set_cr3 callback and related flags to load_mmu_pgd

The set_cr3 callback is not setting the guest CR3, it is setting the
root of the guest page tables, either shadow or two-dimensional.
To make this clearer as well as to indicate that the MMU calls it
via kvm_mmu_load_cr3, rename it to load_mmu_pgd.
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 689f3bf2
...@@ -58,7 +58,7 @@ ...@@ -58,7 +58,7 @@
#define KVM_REQ_TRIPLE_FAULT KVM_ARCH_REQ(2) #define KVM_REQ_TRIPLE_FAULT KVM_ARCH_REQ(2)
#define KVM_REQ_MMU_SYNC KVM_ARCH_REQ(3) #define KVM_REQ_MMU_SYNC KVM_ARCH_REQ(3)
#define KVM_REQ_CLOCK_UPDATE KVM_ARCH_REQ(4) #define KVM_REQ_CLOCK_UPDATE KVM_ARCH_REQ(4)
#define KVM_REQ_LOAD_CR3 KVM_ARCH_REQ(5) #define KVM_REQ_LOAD_MMU_PGD KVM_ARCH_REQ(5)
#define KVM_REQ_EVENT KVM_ARCH_REQ(6) #define KVM_REQ_EVENT KVM_ARCH_REQ(6)
#define KVM_REQ_APF_HALT KVM_ARCH_REQ(7) #define KVM_REQ_APF_HALT KVM_ARCH_REQ(7)
#define KVM_REQ_STEAL_UPDATE KVM_ARCH_REQ(8) #define KVM_REQ_STEAL_UPDATE KVM_ARCH_REQ(8)
...@@ -1091,7 +1091,6 @@ struct kvm_x86_ops { ...@@ -1091,7 +1091,6 @@ struct kvm_x86_ops {
void (*decache_cr0_guest_bits)(struct kvm_vcpu *vcpu); void (*decache_cr0_guest_bits)(struct kvm_vcpu *vcpu);
void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu); void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu);
void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0); void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
int (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4); int (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer); void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
void (*get_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); void (*get_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
...@@ -1155,6 +1154,8 @@ struct kvm_x86_ops { ...@@ -1155,6 +1154,8 @@ struct kvm_x86_ops {
int (*get_tdp_level)(struct kvm_vcpu *vcpu); int (*get_tdp_level)(struct kvm_vcpu *vcpu);
u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio); u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
void (*load_mmu_pgd)(struct kvm_vcpu *vcpu, unsigned long cr3);
bool (*has_wbinvd_exit)(void); bool (*has_wbinvd_exit)(void);
u64 (*read_l1_tsc_offset)(struct kvm_vcpu *vcpu); u64 (*read_l1_tsc_offset)(struct kvm_vcpu *vcpu);
......
...@@ -98,8 +98,8 @@ static inline unsigned long kvm_get_active_pcid(struct kvm_vcpu *vcpu) ...@@ -98,8 +98,8 @@ static inline unsigned long kvm_get_active_pcid(struct kvm_vcpu *vcpu)
static inline void kvm_mmu_load_pgd(struct kvm_vcpu *vcpu) static inline void kvm_mmu_load_pgd(struct kvm_vcpu *vcpu)
{ {
if (VALID_PAGE(vcpu->arch.mmu->root_hpa)) if (VALID_PAGE(vcpu->arch.mmu->root_hpa))
kvm_x86_ops->set_cr3(vcpu, vcpu->arch.mmu->root_hpa | kvm_x86_ops->load_mmu_pgd(vcpu, vcpu->arch.mmu->root_hpa |
kvm_get_active_pcid(vcpu)); kvm_get_active_pcid(vcpu));
} }
int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
......
...@@ -4311,7 +4311,7 @@ static bool fast_cr3_switch(struct kvm_vcpu *vcpu, gpa_t new_cr3, ...@@ -4311,7 +4311,7 @@ static bool fast_cr3_switch(struct kvm_vcpu *vcpu, gpa_t new_cr3,
* accompanied by KVM_REQ_MMU_RELOAD, which will free * accompanied by KVM_REQ_MMU_RELOAD, which will free
* the root set here and allocate a new one. * the root set here and allocate a new one.
*/ */
kvm_make_request(KVM_REQ_LOAD_CR3, vcpu); kvm_make_request(KVM_REQ_LOAD_MMU_PGD, vcpu);
if (!skip_tlb_flush) { if (!skip_tlb_flush) {
kvm_make_request(KVM_REQ_MMU_SYNC, vcpu); kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
...@@ -5182,7 +5182,7 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu) ...@@ -5182,7 +5182,7 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
kvm_mmu_sync_roots(vcpu); kvm_mmu_sync_roots(vcpu);
if (r) if (r)
goto out; goto out;
kvm_mmu_load_cr3(vcpu); kvm_mmu_load_pgd(vcpu);
kvm_x86_ops->tlb_flush(vcpu, true); kvm_x86_ops->tlb_flush(vcpu, true);
out: out:
return r; return r;
......
...@@ -5942,7 +5942,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -5942,7 +5942,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
} }
STACK_FRAME_NON_STANDARD(svm_vcpu_run); STACK_FRAME_NON_STANDARD(svm_vcpu_run);
static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root) static void svm_load_mmu_pgd(struct kvm_vcpu *vcpu, unsigned long root)
{ {
struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu);
bool update_guest_cr3 = true; bool update_guest_cr3 = true;
...@@ -7354,7 +7354,6 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = { ...@@ -7354,7 +7354,6 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
.decache_cr0_guest_bits = svm_decache_cr0_guest_bits, .decache_cr0_guest_bits = svm_decache_cr0_guest_bits,
.decache_cr4_guest_bits = svm_decache_cr4_guest_bits, .decache_cr4_guest_bits = svm_decache_cr4_guest_bits,
.set_cr0 = svm_set_cr0, .set_cr0 = svm_set_cr0,
.set_cr3 = svm_set_cr3,
.set_cr4 = svm_set_cr4, .set_cr4 = svm_set_cr4,
.set_efer = svm_set_efer, .set_efer = svm_set_efer,
.get_idt = svm_get_idt, .get_idt = svm_get_idt,
...@@ -7414,6 +7413,8 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = { ...@@ -7414,6 +7413,8 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
.read_l1_tsc_offset = svm_read_l1_tsc_offset, .read_l1_tsc_offset = svm_read_l1_tsc_offset,
.write_l1_tsc_offset = svm_write_l1_tsc_offset, .write_l1_tsc_offset = svm_write_l1_tsc_offset,
.load_mmu_pgd = svm_load_mmu_pgd,
.check_intercept = svm_check_intercept, .check_intercept = svm_check_intercept,
.handle_exit_irqoff = svm_handle_exit_irqoff, .handle_exit_irqoff = svm_handle_exit_irqoff,
......
...@@ -2473,9 +2473,9 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, ...@@ -2473,9 +2473,9 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
* If L1 use EPT, then L0 needs to execute INVEPT on * If L1 use EPT, then L0 needs to execute INVEPT on
* EPTP02 instead of EPTP01. Therefore, delay TLB * EPTP02 instead of EPTP01. Therefore, delay TLB
* flush until vmcs02->eptp is fully updated by * flush until vmcs02->eptp is fully updated by
* KVM_REQ_LOAD_CR3. Note that this assumes * KVM_REQ_LOAD_MMU_PGD. Note that this assumes
* KVM_REQ_TLB_FLUSH is evaluated after * KVM_REQ_TLB_FLUSH is evaluated after
* KVM_REQ_LOAD_CR3 in vcpu_enter_guest(). * KVM_REQ_LOAD_MMU_PGD in vcpu_enter_guest().
*/ */
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
} }
...@@ -2520,7 +2520,7 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, ...@@ -2520,7 +2520,7 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
/* /*
* Immediately write vmcs02.GUEST_CR3. It will be propagated to vmcs12 * Immediately write vmcs02.GUEST_CR3. It will be propagated to vmcs12
* on nested VM-Exit, which can occur without actually running L2 and * on nested VM-Exit, which can occur without actually running L2 and
* thus without hitting vmx_set_cr3(), e.g. if L1 is entering L2 with * thus without hitting vmx_load_mmu_pgd(), e.g. if L1 is entering L2 with
* vmcs12.GUEST_ACTIVITYSTATE=HLT, in which case KVM will intercept the * vmcs12.GUEST_ACTIVITYSTATE=HLT, in which case KVM will intercept the
* transition to HLT instead of running L2. * transition to HLT instead of running L2.
*/ */
...@@ -4031,7 +4031,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, ...@@ -4031,7 +4031,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
* *
* If vmcs12 uses EPT, we need to execute this flush on EPTP01 * If vmcs12 uses EPT, we need to execute this flush on EPTP01
* and therefore we request the TLB flush to happen only after VMCS EPTP * and therefore we request the TLB flush to happen only after VMCS EPTP
* has been set by KVM_REQ_LOAD_CR3. * has been set by KVM_REQ_LOAD_MMU_PGD.
*/ */
if (enable_vpid && if (enable_vpid &&
(!nested_cpu_has_vpid(vmcs12) || !nested_has_guest_tlb_tag(vcpu))) { (!nested_cpu_has_vpid(vmcs12) || !nested_has_guest_tlb_tag(vcpu))) {
......
...@@ -2995,7 +2995,7 @@ u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa) ...@@ -2995,7 +2995,7 @@ u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa)
return eptp; return eptp;
} }
void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) void vmx_load_mmu_pgd(struct kvm_vcpu *vcpu, unsigned long cr3)
{ {
struct kvm *kvm = vcpu->kvm; struct kvm *kvm = vcpu->kvm;
bool update_guest_cr3 = true; bool update_guest_cr3 = true;
...@@ -7859,7 +7859,6 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = { ...@@ -7859,7 +7859,6 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
.decache_cr0_guest_bits = vmx_decache_cr0_guest_bits, .decache_cr0_guest_bits = vmx_decache_cr0_guest_bits,
.decache_cr4_guest_bits = vmx_decache_cr4_guest_bits, .decache_cr4_guest_bits = vmx_decache_cr4_guest_bits,
.set_cr0 = vmx_set_cr0, .set_cr0 = vmx_set_cr0,
.set_cr3 = vmx_set_cr3,
.set_cr4 = vmx_set_cr4, .set_cr4 = vmx_set_cr4,
.set_efer = vmx_set_efer, .set_efer = vmx_set_efer,
.get_idt = vmx_get_idt, .get_idt = vmx_get_idt,
...@@ -7922,6 +7921,8 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = { ...@@ -7922,6 +7921,8 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
.read_l1_tsc_offset = vmx_read_l1_tsc_offset, .read_l1_tsc_offset = vmx_read_l1_tsc_offset,
.write_l1_tsc_offset = vmx_write_l1_tsc_offset, .write_l1_tsc_offset = vmx_write_l1_tsc_offset,
.load_mmu_pgd = vmx_load_mmu_pgd,
.check_intercept = vmx_check_intercept, .check_intercept = vmx_check_intercept,
.handle_exit_irqoff = vmx_handle_exit_irqoff, .handle_exit_irqoff = vmx_handle_exit_irqoff,
......
...@@ -334,9 +334,9 @@ u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu); ...@@ -334,9 +334,9 @@ u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu);
void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask); void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask);
void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer); void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer);
void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
void set_cr4_guest_host_mask(struct vcpu_vmx *vmx); void set_cr4_guest_host_mask(struct vcpu_vmx *vmx);
void vmx_load_mmu_pgd(struct kvm_vcpu *vcpu, unsigned long cr3);
void ept_save_pdptrs(struct kvm_vcpu *vcpu); void ept_save_pdptrs(struct kvm_vcpu *vcpu);
void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
......
...@@ -8186,8 +8186,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) ...@@ -8186,8 +8186,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
} }
if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu)) if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu))
kvm_mmu_sync_roots(vcpu); kvm_mmu_sync_roots(vcpu);
if (kvm_check_request(KVM_REQ_LOAD_CR3, vcpu)) if (kvm_check_request(KVM_REQ_LOAD_MMU_PGD, vcpu))
kvm_mmu_load_cr3(vcpu); kvm_mmu_load_pgd(vcpu);
if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
kvm_vcpu_flush_tlb(vcpu, true); kvm_vcpu_flush_tlb(vcpu, true);
if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) { if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment