Commit bd31fe49 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: VMX: Add proper cache tracking for CR0

Move CR0 caching into the standard register caching mechanism in order
to take advantage of the availability checks provided by regs_avail.
This avoids multiple VMREADs in the (uncommon) case where kvm_read_cr0()
is called multiple times in a single VM-Exit, and more importantly
eliminates a kvm_x86_ops hook, saves a retpoline on SVM when reading
CR0, and squashes the confusing naming discrepancy of "cache_reg" vs.
"decache_cr0_guest_bits".

No functional change intended.
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20200502043234.12481-8-sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent f98c1e77
...@@ -167,6 +167,7 @@ enum kvm_reg { ...@@ -167,6 +167,7 @@ enum kvm_reg {
NR_VCPU_REGS, NR_VCPU_REGS,
VCPU_EXREG_PDPTR = NR_VCPU_REGS, VCPU_EXREG_PDPTR = NR_VCPU_REGS,
VCPU_EXREG_CR0,
VCPU_EXREG_CR3, VCPU_EXREG_CR3,
VCPU_EXREG_CR4, VCPU_EXREG_CR4,
VCPU_EXREG_RFLAGS, VCPU_EXREG_RFLAGS,
...@@ -1092,7 +1093,6 @@ struct kvm_x86_ops { ...@@ -1092,7 +1093,6 @@ struct kvm_x86_ops {
void (*set_segment)(struct kvm_vcpu *vcpu, void (*set_segment)(struct kvm_vcpu *vcpu,
struct kvm_segment *var, int seg); struct kvm_segment *var, int seg);
void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l); void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
void (*decache_cr0_guest_bits)(struct kvm_vcpu *vcpu);
void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0); void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
int (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4); int (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer); void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
......
...@@ -116,8 +116,9 @@ static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index) ...@@ -116,8 +116,9 @@ static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask) static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask)
{ {
ulong tmask = mask & KVM_POSSIBLE_CR0_GUEST_BITS; ulong tmask = mask & KVM_POSSIBLE_CR0_GUEST_BITS;
if (tmask & vcpu->arch.cr0_guest_owned_bits) if ((tmask & vcpu->arch.cr0_guest_owned_bits) &&
kvm_x86_ops.decache_cr0_guest_bits(vcpu); !kvm_register_is_available(vcpu, VCPU_EXREG_CR0))
kvm_x86_ops.cache_reg(vcpu, VCPU_EXREG_CR0);
return vcpu->arch.cr0 & mask; return vcpu->arch.cr0 & mask;
} }
......
...@@ -1521,10 +1521,6 @@ static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) ...@@ -1521,10 +1521,6 @@ static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
mark_dirty(svm->vmcb, VMCB_DT); mark_dirty(svm->vmcb, VMCB_DT);
} }
static void svm_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
{
}
static void update_cr0_intercept(struct vcpu_svm *svm) static void update_cr0_intercept(struct vcpu_svm *svm)
{ {
ulong gcr0 = svm->vcpu.arch.cr0; ulong gcr0 = svm->vcpu.arch.cr0;
...@@ -4002,7 +3998,6 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { ...@@ -4002,7 +3998,6 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.set_segment = svm_set_segment, .set_segment = svm_set_segment,
.get_cpl = svm_get_cpl, .get_cpl = svm_get_cpl,
.get_cs_db_l_bits = kvm_get_cs_db_l_bits, .get_cs_db_l_bits = kvm_get_cs_db_l_bits,
.decache_cr0_guest_bits = svm_decache_cr0_guest_bits,
.set_cr0 = svm_set_cr0, .set_cr0 = svm_set_cr0,
.set_cr4 = svm_set_cr4, .set_cr4 = svm_set_cr4,
.set_efer = svm_set_efer, .set_efer = svm_set_efer,
......
...@@ -2219,6 +2219,12 @@ static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg) ...@@ -2219,6 +2219,12 @@ static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
if (enable_ept) if (enable_ept)
ept_save_pdptrs(vcpu); ept_save_pdptrs(vcpu);
break; break;
case VCPU_EXREG_CR0:
guest_owned_bits = vcpu->arch.cr0_guest_owned_bits;
vcpu->arch.cr0 &= ~guest_owned_bits;
vcpu->arch.cr0 |= vmcs_readl(GUEST_CR0) & guest_owned_bits;
break;
case VCPU_EXREG_CR3: case VCPU_EXREG_CR3:
if (enable_unrestricted_guest || (enable_ept && is_paging(vcpu))) if (enable_unrestricted_guest || (enable_ept && is_paging(vcpu)))
vcpu->arch.cr3 = vmcs_readl(GUEST_CR3); vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
...@@ -2922,14 +2928,6 @@ static void vmx_flush_tlb_guest(struct kvm_vcpu *vcpu) ...@@ -2922,14 +2928,6 @@ static void vmx_flush_tlb_guest(struct kvm_vcpu *vcpu)
vpid_sync_context(to_vmx(vcpu)->vpid); vpid_sync_context(to_vmx(vcpu)->vpid);
} }
static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
{
ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits;
vcpu->arch.cr0 &= ~cr0_guest_owned_bits;
vcpu->arch.cr0 |= vmcs_readl(GUEST_CR0) & cr0_guest_owned_bits;
}
static void ept_load_pdptrs(struct kvm_vcpu *vcpu) static void ept_load_pdptrs(struct kvm_vcpu *vcpu)
{ {
struct kvm_mmu *mmu = vcpu->arch.walk_mmu; struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
...@@ -3019,6 +3017,7 @@ void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) ...@@ -3019,6 +3017,7 @@ void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
vmcs_writel(CR0_READ_SHADOW, cr0); vmcs_writel(CR0_READ_SHADOW, cr0);
vmcs_writel(GUEST_CR0, hw_cr0); vmcs_writel(GUEST_CR0, hw_cr0);
vcpu->arch.cr0 = cr0; vcpu->arch.cr0 = cr0;
kvm_register_mark_available(vcpu, VCPU_EXREG_CR0);
/* depends on vcpu->arch.cr0 to be set to a new value */ /* depends on vcpu->arch.cr0 to be set to a new value */
vmx->emulation_required = emulation_required(vcpu); vmx->emulation_required = emulation_required(vcpu);
...@@ -7809,7 +7808,6 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = { ...@@ -7809,7 +7808,6 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
.set_segment = vmx_set_segment, .set_segment = vmx_set_segment,
.get_cpl = vmx_get_cpl, .get_cpl = vmx_get_cpl,
.get_cs_db_l_bits = vmx_get_cs_db_l_bits, .get_cs_db_l_bits = vmx_get_cs_db_l_bits,
.decache_cr0_guest_bits = vmx_decache_cr0_guest_bits,
.set_cr0 = vmx_set_cr0, .set_cr0 = vmx_set_cr0,
.set_cr4 = vmx_set_cr4, .set_cr4 = vmx_set_cr4,
.set_efer = vmx_set_efer, .set_efer = vmx_set_efer,
......
...@@ -452,6 +452,7 @@ static inline void vmx_register_cache_reset(struct kvm_vcpu *vcpu) ...@@ -452,6 +452,7 @@ static inline void vmx_register_cache_reset(struct kvm_vcpu *vcpu)
| (1 << VCPU_EXREG_RFLAGS) | (1 << VCPU_EXREG_RFLAGS)
| (1 << VCPU_EXREG_PDPTR) | (1 << VCPU_EXREG_PDPTR)
| (1 << VCPU_EXREG_SEGMENTS) | (1 << VCPU_EXREG_SEGMENTS)
| (1 << VCPU_EXREG_CR0)
| (1 << VCPU_EXREG_CR3) | (1 << VCPU_EXREG_CR3)
| (1 << VCPU_EXREG_CR4) | (1 << VCPU_EXREG_CR4)
| (1 << VCPU_EXREG_EXIT_INFO_1) | (1 << VCPU_EXREG_EXIT_INFO_1)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment