Commit 674ea351 authored by Paolo Bonzini's avatar Paolo Bonzini

KVM: x86: optimize check for valid PAT value

This check will soon be done on every nested vmentry and vmexit,
"parallelize" it using bitwise operations.
Reviewed-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent f16cb57b
...@@ -48,11 +48,6 @@ static bool msr_mtrr_valid(unsigned msr) ...@@ -48,11 +48,6 @@ static bool msr_mtrr_valid(unsigned msr)
return false; return false;
} }
static bool valid_pat_type(unsigned t)
{
return t < 8 && (1 << t) & 0xf3; /* 0, 1, 4, 5, 6, 7 */
}
static bool valid_mtrr_type(unsigned t) static bool valid_mtrr_type(unsigned t)
{ {
return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */ return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */
...@@ -67,10 +62,7 @@ bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data) ...@@ -67,10 +62,7 @@ bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
return false; return false;
if (msr == MSR_IA32_CR_PAT) { if (msr == MSR_IA32_CR_PAT) {
for (i = 0; i < 8; i++) return kvm_pat_valid(data);
if (!valid_pat_type((data >> (i * 8)) & 0xff))
return false;
return true;
} else if (msr == MSR_MTRRdefType) { } else if (msr == MSR_MTRRdefType) {
if (data & ~0xcff) if (data & ~0xcff)
return false; return false;
......
...@@ -1891,7 +1891,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -1891,7 +1891,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
break; break;
case MSR_IA32_CR_PAT: case MSR_IA32_CR_PAT:
if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) { if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data)) if (!kvm_pat_valid(data))
return 1; return 1;
vmcs_write64(GUEST_IA32_PAT, data); vmcs_write64(GUEST_IA32_PAT, data);
vcpu->arch.pat = data; vcpu->arch.pat = data;
......
...@@ -347,6 +347,16 @@ static inline void kvm_after_interrupt(struct kvm_vcpu *vcpu) ...@@ -347,6 +347,16 @@ static inline void kvm_after_interrupt(struct kvm_vcpu *vcpu)
__this_cpu_write(current_vcpu, NULL); __this_cpu_write(current_vcpu, NULL);
} }
static inline bool kvm_pat_valid(u64 data)
{
if (data & 0xF8F8F8F8F8F8F8F8ull)
return false;
/* 0, 1, 4, 5, 6, 7 are valid values. */
return (data | ((data & 0x0202020202020202ull) << 1)) == data;
}
void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu); void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu);
void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu); void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu);
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment