Commit 607475cf authored by Binbin Wu's avatar Binbin Wu Committed by Sean Christopherson

KVM: x86: Add helpers to query individual CR0/CR4 bits

Add helpers to check if a specific CR0/CR4 bit is set to avoid a plethora
of implicit casts from the "unsigned long" return of kvm_read_cr*_bits(),
and to make each caller's intent more obvious.

Defer converting helpers that do truly ugly casts from "unsigned long" to
"int", e.g. is_pse(), to a future commit so that their conversion is more
isolated.

Opportunistically drop the superfluous pcid_enabled from kvm_set_cr3();
the local variable is used only once, immediately after its declaration.
Suggested-by: default avatarSean Christopherson <seanjc@google.com>
Signed-off-by: default avatarBinbin Wu <binbin.wu@linux.intel.com>
Link: https://lore.kernel.org/r/20230322045824.22970-2-binbin.wu@linux.intel.com
[sean: move "obvious" conversions to this commit, massage changelog]
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
parent 0c928ff2
...@@ -266,7 +266,7 @@ static void __kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu, struct kvm_cpuid_e ...@@ -266,7 +266,7 @@ static void __kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu, struct kvm_cpuid_e
/* Update OSXSAVE bit */ /* Update OSXSAVE bit */
if (boot_cpu_has(X86_FEATURE_XSAVE)) if (boot_cpu_has(X86_FEATURE_XSAVE))
cpuid_entry_change(best, X86_FEATURE_OSXSAVE, cpuid_entry_change(best, X86_FEATURE_OSXSAVE,
kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE)); kvm_is_cr4_bit_set(vcpu, X86_CR4_OSXSAVE));
cpuid_entry_change(best, X86_FEATURE_APIC, cpuid_entry_change(best, X86_FEATURE_APIC,
vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE); vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE);
...@@ -275,7 +275,7 @@ static void __kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu, struct kvm_cpuid_e ...@@ -275,7 +275,7 @@ static void __kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu, struct kvm_cpuid_e
best = cpuid_entry2_find(entries, nent, 7, 0); best = cpuid_entry2_find(entries, nent, 7, 0);
if (best && boot_cpu_has(X86_FEATURE_PKU) && best->function == 0x7) if (best && boot_cpu_has(X86_FEATURE_PKU) && best->function == 0x7)
cpuid_entry_change(best, X86_FEATURE_OSPKE, cpuid_entry_change(best, X86_FEATURE_OSPKE,
kvm_read_cr4_bits(vcpu, X86_CR4_PKE)); kvm_is_cr4_bit_set(vcpu, X86_CR4_PKE));
best = cpuid_entry2_find(entries, nent, 0xD, 0); best = cpuid_entry2_find(entries, nent, 0xD, 0);
if (best) if (best)
......
...@@ -157,6 +157,14 @@ static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask) ...@@ -157,6 +157,14 @@ static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask)
return vcpu->arch.cr0 & mask; return vcpu->arch.cr0 & mask;
} }
static __always_inline bool kvm_is_cr0_bit_set(struct kvm_vcpu *vcpu,
unsigned long cr0_bit)
{
BUILD_BUG_ON(!is_power_of_2(cr0_bit));
return !!kvm_read_cr0_bits(vcpu, cr0_bit);
}
static inline ulong kvm_read_cr0(struct kvm_vcpu *vcpu) static inline ulong kvm_read_cr0(struct kvm_vcpu *vcpu)
{ {
return kvm_read_cr0_bits(vcpu, ~0UL); return kvm_read_cr0_bits(vcpu, ~0UL);
...@@ -171,6 +179,14 @@ static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask) ...@@ -171,6 +179,14 @@ static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask)
return vcpu->arch.cr4 & mask; return vcpu->arch.cr4 & mask;
} }
static __always_inline bool kvm_is_cr4_bit_set(struct kvm_vcpu *vcpu,
unsigned long cr4_bit)
{
BUILD_BUG_ON(!is_power_of_2(cr4_bit));
return !!kvm_read_cr4_bits(vcpu, cr4_bit);
}
static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu) static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu)
{ {
if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3)) if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3))
......
...@@ -132,7 +132,7 @@ static inline unsigned long kvm_get_pcid(struct kvm_vcpu *vcpu, gpa_t cr3) ...@@ -132,7 +132,7 @@ static inline unsigned long kvm_get_pcid(struct kvm_vcpu *vcpu, gpa_t cr3)
{ {
BUILD_BUG_ON((X86_CR3_PCID_MASK & PAGE_MASK) != 0); BUILD_BUG_ON((X86_CR3_PCID_MASK & PAGE_MASK) != 0);
return kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE) return kvm_is_cr4_bit_set(vcpu, X86_CR4_PCIDE)
? cr3 & X86_CR3_PCID_MASK ? cr3 & X86_CR3_PCID_MASK
: 0; : 0;
} }
......
...@@ -540,9 +540,9 @@ int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data) ...@@ -540,9 +540,9 @@ int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
if (!pmc) if (!pmc)
return 1; return 1;
if (!(kvm_read_cr4_bits(vcpu, X86_CR4_PCE)) && if (!kvm_is_cr4_bit_set(vcpu, X86_CR4_PCE) &&
(static_call(kvm_x86_get_cpl)(vcpu) != 0) && (static_call(kvm_x86_get_cpl)(vcpu) != 0) &&
(kvm_read_cr0_bits(vcpu, X86_CR0_PE))) kvm_is_cr0_bit_set(vcpu, X86_CR0_PE))
return 1; return 1;
*data = pmc_read_counter(pmc) & mask; *data = pmc_read_counter(pmc) & mask;
......
...@@ -5154,7 +5154,7 @@ static int handle_vmxon(struct kvm_vcpu *vcpu) ...@@ -5154,7 +5154,7 @@ static int handle_vmxon(struct kvm_vcpu *vcpu)
* does force CR0.PE=1, but only to also force VM86 in order to emulate * does force CR0.PE=1, but only to also force VM86 in order to emulate
* Real Mode, and so there's no need to check CR0.PE manually. * Real Mode, and so there's no need to check CR0.PE manually.
*/ */
if (!kvm_read_cr4_bits(vcpu, X86_CR4_VMXE)) { if (!kvm_is_cr4_bit_set(vcpu, X86_CR4_VMXE)) {
kvm_queue_exception(vcpu, UD_VECTOR); kvm_queue_exception(vcpu, UD_VECTOR);
return 1; return 1;
} }
......
...@@ -5180,7 +5180,7 @@ bool vmx_guest_inject_ac(struct kvm_vcpu *vcpu) ...@@ -5180,7 +5180,7 @@ bool vmx_guest_inject_ac(struct kvm_vcpu *vcpu)
if (!boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT)) if (!boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT))
return true; return true;
return vmx_get_cpl(vcpu) == 3 && kvm_read_cr0_bits(vcpu, X86_CR0_AM) && return vmx_get_cpl(vcpu) == 3 && kvm_is_cr0_bit_set(vcpu, X86_CR0_AM) &&
(kvm_get_rflags(vcpu) & X86_EFLAGS_AC); (kvm_get_rflags(vcpu) & X86_EFLAGS_AC);
} }
......
...@@ -841,7 +841,7 @@ bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl) ...@@ -841,7 +841,7 @@ bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl)
bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr) bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr)
{ {
if ((dr != 4 && dr != 5) || !kvm_read_cr4_bits(vcpu, X86_CR4_DE)) if ((dr != 4 && dr != 5) || !kvm_is_cr4_bit_set(vcpu, X86_CR4_DE))
return true; return true;
kvm_queue_exception(vcpu, UD_VECTOR); kvm_queue_exception(vcpu, UD_VECTOR);
...@@ -983,7 +983,7 @@ int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) ...@@ -983,7 +983,7 @@ int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
return 1; return 1;
if (!(cr0 & X86_CR0_PG) && if (!(cr0 & X86_CR0_PG) &&
(is_64_bit_mode(vcpu) || kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE))) (is_64_bit_mode(vcpu) || kvm_is_cr4_bit_set(vcpu, X86_CR4_PCIDE)))
return 1; return 1;
static_call(kvm_x86_set_cr0)(vcpu, cr0); static_call(kvm_x86_set_cr0)(vcpu, cr0);
...@@ -1005,7 +1005,7 @@ void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu) ...@@ -1005,7 +1005,7 @@ void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu)
if (vcpu->arch.guest_state_protected) if (vcpu->arch.guest_state_protected)
return; return;
if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE)) { if (kvm_is_cr4_bit_set(vcpu, X86_CR4_OSXSAVE)) {
if (vcpu->arch.xcr0 != host_xcr0) if (vcpu->arch.xcr0 != host_xcr0)
xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0); xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
...@@ -1019,7 +1019,7 @@ void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu) ...@@ -1019,7 +1019,7 @@ void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu)
if (static_cpu_has(X86_FEATURE_PKU) && if (static_cpu_has(X86_FEATURE_PKU) &&
vcpu->arch.pkru != vcpu->arch.host_pkru && vcpu->arch.pkru != vcpu->arch.host_pkru &&
((vcpu->arch.xcr0 & XFEATURE_MASK_PKRU) || ((vcpu->arch.xcr0 & XFEATURE_MASK_PKRU) ||
kvm_read_cr4_bits(vcpu, X86_CR4_PKE))) kvm_is_cr4_bit_set(vcpu, X86_CR4_PKE)))
write_pkru(vcpu->arch.pkru); write_pkru(vcpu->arch.pkru);
#endif /* CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS */ #endif /* CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS */
} }
...@@ -1033,14 +1033,14 @@ void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu) ...@@ -1033,14 +1033,14 @@ void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu)
#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
if (static_cpu_has(X86_FEATURE_PKU) && if (static_cpu_has(X86_FEATURE_PKU) &&
((vcpu->arch.xcr0 & XFEATURE_MASK_PKRU) || ((vcpu->arch.xcr0 & XFEATURE_MASK_PKRU) ||
kvm_read_cr4_bits(vcpu, X86_CR4_PKE))) { kvm_is_cr4_bit_set(vcpu, X86_CR4_PKE))) {
vcpu->arch.pkru = rdpkru(); vcpu->arch.pkru = rdpkru();
if (vcpu->arch.pkru != vcpu->arch.host_pkru) if (vcpu->arch.pkru != vcpu->arch.host_pkru)
write_pkru(vcpu->arch.host_pkru); write_pkru(vcpu->arch.host_pkru);
} }
#endif /* CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS */ #endif /* CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS */
if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE)) { if (kvm_is_cr4_bit_set(vcpu, X86_CR4_OSXSAVE)) {
if (vcpu->arch.xcr0 != host_xcr0) if (vcpu->arch.xcr0 != host_xcr0)
xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0); xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0);
...@@ -1245,7 +1245,7 @@ static void kvm_invalidate_pcid(struct kvm_vcpu *vcpu, unsigned long pcid) ...@@ -1245,7 +1245,7 @@ static void kvm_invalidate_pcid(struct kvm_vcpu *vcpu, unsigned long pcid)
* PCIDs for them are also 0, because MOV to CR3 always flushes the TLB * PCIDs for them are also 0, because MOV to CR3 always flushes the TLB
* with PCIDE=0. * with PCIDE=0.
*/ */
if (!kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)) if (!kvm_is_cr4_bit_set(vcpu, X86_CR4_PCIDE))
return; return;
for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
...@@ -1260,9 +1260,7 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) ...@@ -1260,9 +1260,7 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
bool skip_tlb_flush = false; bool skip_tlb_flush = false;
unsigned long pcid = 0; unsigned long pcid = 0;
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
bool pcid_enabled = kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE); if (kvm_is_cr4_bit_set(vcpu, X86_CR4_PCIDE)) {
if (pcid_enabled) {
skip_tlb_flush = cr3 & X86_CR3_PCID_NOFLUSH; skip_tlb_flush = cr3 & X86_CR3_PCID_NOFLUSH;
cr3 &= ~X86_CR3_PCID_NOFLUSH; cr3 &= ~X86_CR3_PCID_NOFLUSH;
pcid = cr3 & X86_CR3_PCID_MASK; pcid = cr3 & X86_CR3_PCID_MASK;
...@@ -5051,7 +5049,7 @@ static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu, ...@@ -5051,7 +5049,7 @@ static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu,
return 0; return 0;
if (mce->status & MCI_STATUS_UC) { if (mce->status & MCI_STATUS_UC) {
if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) || if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) ||
!kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) { !kvm_is_cr4_bit_set(vcpu, X86_CR4_MCE)) {
kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
return 0; return 0;
} }
...@@ -13254,7 +13252,7 @@ int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva) ...@@ -13254,7 +13252,7 @@ int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva)
return 1; return 1;
} }
pcid_enabled = kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE); pcid_enabled = kvm_is_cr4_bit_set(vcpu, X86_CR4_PCIDE);
switch (type) { switch (type) {
case INVPCID_TYPE_INDIV_ADDR: case INVPCID_TYPE_INDIV_ADDR:
......
...@@ -123,7 +123,7 @@ static inline bool kvm_exception_is_soft(unsigned int nr) ...@@ -123,7 +123,7 @@ static inline bool kvm_exception_is_soft(unsigned int nr)
static inline bool is_protmode(struct kvm_vcpu *vcpu) static inline bool is_protmode(struct kvm_vcpu *vcpu)
{ {
return kvm_read_cr0_bits(vcpu, X86_CR0_PE); return kvm_is_cr0_bit_set(vcpu, X86_CR0_PE);
} }
static inline int is_long_mode(struct kvm_vcpu *vcpu) static inline int is_long_mode(struct kvm_vcpu *vcpu)
...@@ -193,7 +193,7 @@ static inline bool is_pae_paging(struct kvm_vcpu *vcpu) ...@@ -193,7 +193,7 @@ static inline bool is_pae_paging(struct kvm_vcpu *vcpu)
static inline u8 vcpu_virt_addr_bits(struct kvm_vcpu *vcpu) static inline u8 vcpu_virt_addr_bits(struct kvm_vcpu *vcpu)
{ {
return kvm_read_cr4_bits(vcpu, X86_CR4_LA57) ? 57 : 48; return kvm_is_cr4_bit_set(vcpu, X86_CR4_LA57) ? 57 : 48;
} }
static inline bool is_noncanonical_address(u64 la, struct kvm_vcpu *vcpu) static inline bool is_noncanonical_address(u64 la, struct kvm_vcpu *vcpu)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment