Commit d64d83d1 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: x86: Handle PKU CPUID adjustment in VMX code

Move the setting of the PKU CPUID bit into VMX to eliminate an instance
of the undesirable "unsigned f_* = *_supported ? F(*) : 0" pattern in
the common CPUID handling code.  Drop ->pku_supported(), CPUID
adjustment was the only user.

Note, some AMD CPUs now support PKU, but SVM doesn't yet support
exposing it to a guest.

No functional change intended.
Reviewed-by: default avatarVitaly Kuznetsov <vkuznets@redhat.com>
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent e574768f
...@@ -1180,7 +1180,6 @@ struct kvm_x86_ops { ...@@ -1180,7 +1180,6 @@ struct kvm_x86_ops {
bool (*xsaves_supported)(void); bool (*xsaves_supported)(void);
bool (*umip_emulated)(void); bool (*umip_emulated)(void);
bool (*pt_supported)(void); bool (*pt_supported)(void);
bool (*pku_supported)(void);
int (*check_nested_events)(struct kvm_vcpu *vcpu); int (*check_nested_events)(struct kvm_vcpu *vcpu);
void (*request_immediate_exit)(struct kvm_vcpu *vcpu); void (*request_immediate_exit)(struct kvm_vcpu *vcpu);
......
...@@ -333,7 +333,6 @@ static inline void do_cpuid_7_mask(struct kvm_cpuid_entry2 *entry) ...@@ -333,7 +333,6 @@ static inline void do_cpuid_7_mask(struct kvm_cpuid_entry2 *entry)
{ {
unsigned f_intel_pt = kvm_x86_ops->pt_supported() ? F(INTEL_PT) : 0; unsigned f_intel_pt = kvm_x86_ops->pt_supported() ? F(INTEL_PT) : 0;
unsigned f_la57; unsigned f_la57;
unsigned f_pku = kvm_x86_ops->pku_supported() ? F(PKU) : 0;
/* cpuid 7.0.ebx */ /* cpuid 7.0.ebx */
const u32 kvm_cpuid_7_0_ebx_x86_features = const u32 kvm_cpuid_7_0_ebx_x86_features =
...@@ -373,10 +372,6 @@ static inline void do_cpuid_7_mask(struct kvm_cpuid_entry2 *entry) ...@@ -373,10 +372,6 @@ static inline void do_cpuid_7_mask(struct kvm_cpuid_entry2 *entry)
cpuid_entry_mask(entry, CPUID_7_ECX); cpuid_entry_mask(entry, CPUID_7_ECX);
/* Set LA57 based on hardware capability. */ /* Set LA57 based on hardware capability. */
entry->ecx |= f_la57; entry->ecx |= f_la57;
entry->ecx |= f_pku;
/* PKU is not yet implemented for shadow paging. */
if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE))
cpuid_entry_clear(entry, X86_FEATURE_PKU);
entry->edx &= kvm_cpuid_7_0_edx_x86_features; entry->edx &= kvm_cpuid_7_0_edx_x86_features;
cpuid_entry_mask(entry, CPUID_7_EDX); cpuid_entry_mask(entry, CPUID_7_EDX);
......
...@@ -6093,11 +6093,6 @@ static bool svm_has_wbinvd_exit(void) ...@@ -6093,11 +6093,6 @@ static bool svm_has_wbinvd_exit(void)
return true; return true;
} }
static bool svm_pku_supported(void)
{
return false;
}
#define PRE_EX(exit) { .exit_code = (exit), \ #define PRE_EX(exit) { .exit_code = (exit), \
.stage = X86_ICPT_PRE_EXCEPT, } .stage = X86_ICPT_PRE_EXCEPT, }
#define POST_EX(exit) { .exit_code = (exit), \ #define POST_EX(exit) { .exit_code = (exit), \
...@@ -7458,7 +7453,6 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = { ...@@ -7458,7 +7453,6 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
.xsaves_supported = svm_xsaves_supported, .xsaves_supported = svm_xsaves_supported,
.umip_emulated = svm_umip_emulated, .umip_emulated = svm_umip_emulated,
.pt_supported = svm_pt_supported, .pt_supported = svm_pt_supported,
.pku_supported = svm_pku_supported,
.set_supported_cpuid = svm_set_supported_cpuid, .set_supported_cpuid = svm_set_supported_cpuid,
......
...@@ -146,11 +146,6 @@ static inline bool vmx_umip_emulated(void) ...@@ -146,11 +146,6 @@ static inline bool vmx_umip_emulated(void)
SECONDARY_EXEC_DESC; SECONDARY_EXEC_DESC;
} }
static inline bool vmx_pku_supported(void)
{
return boot_cpu_has(X86_FEATURE_PKU);
}
static inline bool cpu_has_vmx_rdtscp(void) static inline bool cpu_has_vmx_rdtscp(void)
{ {
return vmcs_config.cpu_based_2nd_exec_ctrl & return vmcs_config.cpu_based_2nd_exec_ctrl &
......
...@@ -7137,6 +7137,11 @@ static void vmx_set_supported_cpuid(struct kvm_cpuid_entry2 *entry) ...@@ -7137,6 +7137,11 @@ static void vmx_set_supported_cpuid(struct kvm_cpuid_entry2 *entry)
cpuid_entry_set(entry, X86_FEATURE_INVPCID); cpuid_entry_set(entry, X86_FEATURE_INVPCID);
if (vmx_umip_emulated()) if (vmx_umip_emulated())
cpuid_entry_set(entry, X86_FEATURE_UMIP); cpuid_entry_set(entry, X86_FEATURE_UMIP);
/* PKU is not yet implemented for shadow paging. */
if (enable_ept && boot_cpu_has(X86_FEATURE_PKU) &&
boot_cpu_has(X86_FEATURE_OSPKE))
cpuid_entry_set(entry, X86_FEATURE_PKU);
break; break;
default: default:
break; break;
...@@ -7938,7 +7943,6 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = { ...@@ -7938,7 +7943,6 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
.xsaves_supported = vmx_xsaves_supported, .xsaves_supported = vmx_xsaves_supported,
.umip_emulated = vmx_umip_emulated, .umip_emulated = vmx_umip_emulated,
.pt_supported = vmx_pt_supported, .pt_supported = vmx_pt_supported,
.pku_supported = vmx_pku_supported,
.request_immediate_exit = vmx_request_immediate_exit, .request_immediate_exit = vmx_request_immediate_exit,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment