Commit aaecae7b authored by Sean Christopherson's avatar Sean Christopherson

KVM: x86: Rename KVM_MSR_RET_INVALID to KVM_MSR_RET_UNSUPPORTED

Rename the "INVALID" internal MSR error return code to "UNSUPPORTED" to
try and make it more clear that access was denied because the MSR itself
is unsupported/unknown.  "INVALID" is too ambiguous, as it could just as
easily mean the value for WRMSR as invalid.

Avoid UNKNOWN and UNIMPLEMENTED, as the error code is used for MSRs that
_are_ actually implemented by KVM, e.g. if the MSR is unsupported because
an associated feature flag is not present in guest CPUID.

Opportunistically beef up the comments for the internal MSR error codes.

Link: https://lore.kernel.org/r/20240802181935.292540-4-seanjc@google.comSigned-off-by: default avatarSean Christopherson <seanjc@google.com>
parent b58b808c
...@@ -2835,7 +2835,7 @@ static int svm_get_msr_feature(struct kvm_msr_entry *msr) ...@@ -2835,7 +2835,7 @@ static int svm_get_msr_feature(struct kvm_msr_entry *msr)
msr->data |= MSR_AMD64_DE_CFG_LFENCE_SERIALIZE; msr->data |= MSR_AMD64_DE_CFG_LFENCE_SERIALIZE;
break; break;
default: default:
return KVM_MSR_RET_INVALID; return KVM_MSR_RET_UNSUPPORTED;
} }
return 0; return 0;
......
...@@ -2006,7 +2006,7 @@ int vmx_get_msr_feature(struct kvm_msr_entry *msr) ...@@ -2006,7 +2006,7 @@ int vmx_get_msr_feature(struct kvm_msr_entry *msr)
return 1; return 1;
return vmx_get_vmx_msr(&vmcs_config.nested, msr->index, &msr->data); return vmx_get_vmx_msr(&vmcs_config.nested, msr->index, &msr->data);
default: default:
return KVM_MSR_RET_INVALID; return KVM_MSR_RET_UNSUPPORTED;
} }
} }
......
...@@ -1687,7 +1687,7 @@ static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data) ...@@ -1687,7 +1687,7 @@ static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
msr.index = index; msr.index = index;
r = kvm_get_msr_feature(&msr); r = kvm_get_msr_feature(&msr);
if (r == KVM_MSR_RET_INVALID && kvm_msr_ignored_check(index, 0, false)) if (r == KVM_MSR_RET_UNSUPPORTED && kvm_msr_ignored_check(index, 0, false))
r = 0; r = 0;
*data = msr.data; *data = msr.data;
...@@ -1884,7 +1884,7 @@ static int kvm_set_msr_ignored_check(struct kvm_vcpu *vcpu, ...@@ -1884,7 +1884,7 @@ static int kvm_set_msr_ignored_check(struct kvm_vcpu *vcpu,
{ {
int ret = __kvm_set_msr(vcpu, index, data, host_initiated); int ret = __kvm_set_msr(vcpu, index, data, host_initiated);
if (ret == KVM_MSR_RET_INVALID) if (ret == KVM_MSR_RET_UNSUPPORTED)
if (kvm_msr_ignored_check(index, data, true)) if (kvm_msr_ignored_check(index, data, true))
ret = 0; ret = 0;
...@@ -1929,7 +1929,7 @@ static int kvm_get_msr_ignored_check(struct kvm_vcpu *vcpu, ...@@ -1929,7 +1929,7 @@ static int kvm_get_msr_ignored_check(struct kvm_vcpu *vcpu,
{ {
int ret = __kvm_get_msr(vcpu, index, data, host_initiated); int ret = __kvm_get_msr(vcpu, index, data, host_initiated);
if (ret == KVM_MSR_RET_INVALID) { if (ret == KVM_MSR_RET_UNSUPPORTED) {
/* Unconditionally clear *data for simplicity */ /* Unconditionally clear *data for simplicity */
*data = 0; *data = 0;
if (kvm_msr_ignored_check(index, 0, false)) if (kvm_msr_ignored_check(index, 0, false))
...@@ -1998,7 +1998,7 @@ static int complete_fast_rdmsr(struct kvm_vcpu *vcpu) ...@@ -1998,7 +1998,7 @@ static int complete_fast_rdmsr(struct kvm_vcpu *vcpu)
static u64 kvm_msr_reason(int r) static u64 kvm_msr_reason(int r)
{ {
switch (r) { switch (r) {
case KVM_MSR_RET_INVALID: case KVM_MSR_RET_UNSUPPORTED:
return KVM_MSR_EXIT_REASON_UNKNOWN; return KVM_MSR_EXIT_REASON_UNKNOWN;
case KVM_MSR_RET_FILTERED: case KVM_MSR_RET_FILTERED:
return KVM_MSR_EXIT_REASON_FILTER; return KVM_MSR_EXIT_REASON_FILTER;
...@@ -4146,7 +4146,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -4146,7 +4146,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
kvm_is_msr_to_save(msr)) kvm_is_msr_to_save(msr))
break; break;
return KVM_MSR_RET_INVALID; return KVM_MSR_RET_UNSUPPORTED;
} }
return 0; return 0;
} }
...@@ -4507,7 +4507,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -4507,7 +4507,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
break; break;
} }
return KVM_MSR_RET_INVALID; return KVM_MSR_RET_UNSUPPORTED;
} }
return 0; return 0;
} }
......
...@@ -512,11 +512,18 @@ enum kvm_msr_access { ...@@ -512,11 +512,18 @@ enum kvm_msr_access {
/* /*
* Internal error codes that are used to indicate that MSR emulation encountered * Internal error codes that are used to indicate that MSR emulation encountered
* an error that should result in #GP in the guest, unless userspace * an error that should result in #GP in the guest, unless userspace handles it.
* handles it. * Note, '1', '0', and negative numbers are off limits, as they are used by KVM
* as part of KVM's lightly documented internal KVM_RUN return codes.
*
* UNSUPPORTED - The MSR isn't supported, either because it is completely
* unknown to KVM, or because the MSR should not exist according
* to the vCPU model.
*
* FILTERED - Access to the MSR is denied by a userspace MSR filter.
*/ */
#define KVM_MSR_RET_INVALID 2 /* in-kernel MSR emulation #GP condition */ #define KVM_MSR_RET_UNSUPPORTED 2
#define KVM_MSR_RET_FILTERED 3 /* #GP due to userspace MSR filter */ #define KVM_MSR_RET_FILTERED 3
#define __cr4_reserved_bits(__cpu_has, __c) \ #define __cr4_reserved_bits(__cpu_has, __c) \
({ \ ({ \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment