Commit 74c6c98a authored by Sean Christopherson's avatar Sean Christopherson

KVM: x86: Refactor kvm_x86_ops.get_msr_feature() to avoid kvm_msr_entry

Refactor get_msr_feature() to take the index and data pointer as distinct
parameters in anticipation of eliminating "struct kvm_msr_entry" usage
further up the primary callchain.

No functional change intended.

Link: https://lore.kernel.org/r/20240802181935.292540-5-seanjc@google.comSigned-off-by: default avatarSean Christopherson <seanjc@google.com>
parent aaecae7b
......@@ -1806,7 +1806,7 @@ struct kvm_x86_ops {
int (*vm_move_enc_context_from)(struct kvm *kvm, unsigned int source_fd);
void (*guest_memory_reclaimed)(struct kvm *kvm);
int (*get_msr_feature)(struct kvm_msr_entry *entry);
int (*get_msr_feature)(u32 msr, u64 *data);
int (*check_emulate_instruction)(struct kvm_vcpu *vcpu, int emul_type,
void *insn, int insn_len);
......
......@@ -2825,14 +2825,14 @@ static int efer_trap(struct kvm_vcpu *vcpu)
return kvm_complete_insn_gp(vcpu, ret);
}
static int svm_get_msr_feature(struct kvm_msr_entry *msr)
static int svm_get_msr_feature(u32 msr, u64 *data)
{
msr->data = 0;
*data = 0;
switch (msr->index) {
switch (msr) {
case MSR_AMD64_DE_CFG:
if (cpu_feature_enabled(X86_FEATURE_LFENCE_RDTSC))
msr->data |= MSR_AMD64_DE_CFG_LFENCE_SERIALIZE;
*data |= MSR_AMD64_DE_CFG_LFENCE_SERIALIZE;
break;
default:
return KVM_MSR_RET_UNSUPPORTED;
......@@ -3179,14 +3179,12 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
kvm_pr_unimpl_wrmsr(vcpu, ecx, data);
break;
case MSR_AMD64_DE_CFG: {
struct kvm_msr_entry msr_entry;
u64 supported_de_cfg;
msr_entry.index = msr->index;
if (svm_get_msr_feature(&msr_entry))
if (svm_get_msr_feature(ecx, &supported_de_cfg))
return 1;
/* Check the supported bits */
if (data & ~msr_entry.data)
if (data & ~supported_de_cfg)
return 1;
/*
......
......@@ -1998,13 +1998,13 @@ static inline bool is_vmx_feature_control_msr_valid(struct vcpu_vmx *vmx,
return !(msr->data & ~valid_bits);
}
int vmx_get_msr_feature(struct kvm_msr_entry *msr)
int vmx_get_msr_feature(u32 msr, u64 *data)
{
switch (msr->index) {
switch (msr) {
case KVM_FIRST_EMULATED_VMX_MSR ... KVM_LAST_EMULATED_VMX_MSR:
if (!nested)
return 1;
return vmx_get_vmx_msr(&vmcs_config.nested, msr->index, &msr->data);
return vmx_get_vmx_msr(&vmcs_config.nested, msr, data);
default:
return KVM_MSR_RET_UNSUPPORTED;
}
......
......@@ -56,7 +56,7 @@ bool vmx_has_emulated_msr(struct kvm *kvm, u32 index);
void vmx_msr_filter_changed(struct kvm_vcpu *vcpu);
void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu);
void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu);
int vmx_get_msr_feature(struct kvm_msr_entry *msr);
int vmx_get_msr_feature(u32 msr, u64 *data);
int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg);
void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
......
......@@ -1672,7 +1672,7 @@ static int kvm_get_msr_feature(struct kvm_msr_entry *msr)
rdmsrl_safe(msr->index, &msr->data);
break;
default:
return kvm_x86_call(get_msr_feature)(msr);
return kvm_x86_call(get_msr_feature)(msr->index, &msr->data);
}
return 0;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment