Commit f20935d8 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: x86: Refactor up kvm_{g,s}et_msr() to simplify callers

Refactor the top-level MSR accessors to take/return the index and value
directly instead of requiring the caller to dump them into a msr_data
struct.

No functional change intended.
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent b274a290
...@@ -1326,8 +1326,8 @@ int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu, ...@@ -1326,8 +1326,8 @@ int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu,
void kvm_enable_efer_bits(u64); void kvm_enable_efer_bits(u64);
bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer); bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer);
int kvm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr); int kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data);
int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr); int kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data);
struct x86_emulate_ctxt; struct x86_emulate_ctxt;
......
...@@ -4425,15 +4425,10 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) ...@@ -4425,15 +4425,10 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
static int wrmsr_interception(struct vcpu_svm *svm) static int wrmsr_interception(struct vcpu_svm *svm)
{ {
struct msr_data msr;
u32 ecx = kvm_rcx_read(&svm->vcpu); u32 ecx = kvm_rcx_read(&svm->vcpu);
u64 data = kvm_read_edx_eax(&svm->vcpu); u64 data = kvm_read_edx_eax(&svm->vcpu);
msr.data = data; if (kvm_set_msr(&svm->vcpu, ecx, data)) {
msr.index = ecx;
msr.host_initiated = false;
if (kvm_set_msr(&svm->vcpu, &msr)) {
trace_kvm_msr_write_ex(ecx, data); trace_kvm_msr_write_ex(ecx, data);
kvm_inject_gp(&svm->vcpu, 0); kvm_inject_gp(&svm->vcpu, 0);
return 1; return 1;
......
...@@ -864,9 +864,7 @@ static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count) ...@@ -864,9 +864,7 @@ static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
{ {
u32 i; u32 i;
struct vmx_msr_entry e; struct vmx_msr_entry e;
struct msr_data msr;
msr.host_initiated = false;
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
if (kvm_vcpu_read_guest(vcpu, gpa + i * sizeof(e), if (kvm_vcpu_read_guest(vcpu, gpa + i * sizeof(e),
&e, sizeof(e))) { &e, sizeof(e))) {
...@@ -881,9 +879,7 @@ static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count) ...@@ -881,9 +879,7 @@ static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
__func__, i, e.index, e.reserved); __func__, i, e.index, e.reserved);
goto fail; goto fail;
} }
msr.index = e.index; if (kvm_set_msr(vcpu, e.index, e.value)) {
msr.data = e.value;
if (kvm_set_msr(vcpu, &msr)) {
pr_debug_ratelimited( pr_debug_ratelimited(
"%s cannot write MSR (%u, 0x%x, 0x%llx)\n", "%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
__func__, i, e.index, e.value); __func__, i, e.index, e.value);
...@@ -897,11 +893,11 @@ static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count) ...@@ -897,11 +893,11 @@ static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count) static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
{ {
u64 data;
u32 i; u32 i;
struct vmx_msr_entry e; struct vmx_msr_entry e;
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
struct msr_data msr_info;
if (kvm_vcpu_read_guest(vcpu, if (kvm_vcpu_read_guest(vcpu,
gpa + i * sizeof(e), gpa + i * sizeof(e),
&e, 2 * sizeof(u32))) { &e, 2 * sizeof(u32))) {
...@@ -916,9 +912,7 @@ static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count) ...@@ -916,9 +912,7 @@ static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
__func__, i, e.index, e.reserved); __func__, i, e.index, e.reserved);
return -EINVAL; return -EINVAL;
} }
msr_info.host_initiated = false; if (kvm_get_msr(vcpu, e.index, &data)) {
msr_info.index = e.index;
if (kvm_get_msr(vcpu, &msr_info)) {
pr_debug_ratelimited( pr_debug_ratelimited(
"%s cannot read MSR (%u, 0x%x)\n", "%s cannot read MSR (%u, 0x%x)\n",
__func__, i, e.index); __func__, i, e.index);
...@@ -927,10 +921,10 @@ static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count) ...@@ -927,10 +921,10 @@ static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
if (kvm_vcpu_write_guest(vcpu, if (kvm_vcpu_write_guest(vcpu,
gpa + i * sizeof(e) + gpa + i * sizeof(e) +
offsetof(struct vmx_msr_entry, value), offsetof(struct vmx_msr_entry, value),
&msr_info.data, sizeof(msr_info.data))) { &data, sizeof(data))) {
pr_debug_ratelimited( pr_debug_ratelimited(
"%s cannot write MSR (%u, 0x%x, 0x%llx)\n", "%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
__func__, i, e.index, msr_info.data); __func__, i, e.index, data);
return -EINVAL; return -EINVAL;
} }
} }
...@@ -3889,7 +3883,6 @@ static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu) ...@@ -3889,7 +3883,6 @@ static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu)
struct vmcs12 *vmcs12 = get_vmcs12(vcpu); struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
struct vmx_msr_entry g, h; struct vmx_msr_entry g, h;
struct msr_data msr;
gpa_t gpa; gpa_t gpa;
u32 i, j; u32 i, j;
...@@ -3949,7 +3942,6 @@ static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu) ...@@ -3949,7 +3942,6 @@ static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu)
* from the guest value. The intent is to stuff host state as * from the guest value. The intent is to stuff host state as
* silently as possible, not to fully process the exit load list. * silently as possible, not to fully process the exit load list.
*/ */
msr.host_initiated = false;
for (i = 0; i < vmcs12->vm_entry_msr_load_count; i++) { for (i = 0; i < vmcs12->vm_entry_msr_load_count; i++) {
gpa = vmcs12->vm_entry_msr_load_addr + (i * sizeof(g)); gpa = vmcs12->vm_entry_msr_load_addr + (i * sizeof(g));
if (kvm_vcpu_read_guest(vcpu, gpa, &g, sizeof(g))) { if (kvm_vcpu_read_guest(vcpu, gpa, &g, sizeof(g))) {
...@@ -3979,9 +3971,7 @@ static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu) ...@@ -3979,9 +3971,7 @@ static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu)
goto vmabort; goto vmabort;
} }
msr.index = h.index; if (kvm_set_msr(vcpu, h.index, h.value)) {
msr.data = h.value;
if (kvm_set_msr(vcpu, &msr)) {
pr_debug_ratelimited( pr_debug_ratelimited(
"%s WRMSR failed (%u, 0x%x, 0x%llx)\n", "%s WRMSR failed (%u, 0x%x, 0x%llx)\n",
__func__, j, h.index, h.value); __func__, j, h.index, h.value);
......
...@@ -4886,14 +4886,10 @@ static int handle_rdmsr(struct kvm_vcpu *vcpu) ...@@ -4886,14 +4886,10 @@ static int handle_rdmsr(struct kvm_vcpu *vcpu)
static int handle_wrmsr(struct kvm_vcpu *vcpu) static int handle_wrmsr(struct kvm_vcpu *vcpu)
{ {
struct msr_data msr;
u32 ecx = kvm_rcx_read(vcpu); u32 ecx = kvm_rcx_read(vcpu);
u64 data = kvm_read_edx_eax(vcpu); u64 data = kvm_read_edx_eax(vcpu);
msr.data = data; if (kvm_set_msr(vcpu, ecx, data) != 0) {
msr.index = ecx;
msr.host_initiated = false;
if (kvm_set_msr(vcpu, &msr) != 0) {
trace_kvm_msr_write_ex(ecx, data); trace_kvm_msr_write_ex(ecx, data);
kvm_inject_gp(vcpu, 0); kvm_inject_gp(vcpu, 0);
return 1; return 1;
......
...@@ -1363,19 +1363,23 @@ void kvm_enable_efer_bits(u64 mask) ...@@ -1363,19 +1363,23 @@ void kvm_enable_efer_bits(u64 mask)
EXPORT_SYMBOL_GPL(kvm_enable_efer_bits); EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
/* /*
* Writes msr value into into the appropriate "register". * Write @data into the MSR specified by @index. Select MSR specific fault
* checks are bypassed if @host_initiated is %true.
* Returns 0 on success, non-0 otherwise. * Returns 0 on success, non-0 otherwise.
* Assumes vcpu_load() was already called. * Assumes vcpu_load() was already called.
*/ */
int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) static int __kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data,
bool host_initiated)
{ {
switch (msr->index) { struct msr_data msr;
switch (index) {
case MSR_FS_BASE: case MSR_FS_BASE:
case MSR_GS_BASE: case MSR_GS_BASE:
case MSR_KERNEL_GS_BASE: case MSR_KERNEL_GS_BASE:
case MSR_CSTAR: case MSR_CSTAR:
case MSR_LSTAR: case MSR_LSTAR:
if (is_noncanonical_address(msr->data, vcpu)) if (is_noncanonical_address(data, vcpu))
return 1; return 1;
break; break;
case MSR_IA32_SYSENTER_EIP: case MSR_IA32_SYSENTER_EIP:
...@@ -1392,38 +1396,60 @@ int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) ...@@ -1392,38 +1396,60 @@ int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
* value, and that something deterministic happens if the guest * value, and that something deterministic happens if the guest
* invokes 64-bit SYSENTER. * invokes 64-bit SYSENTER.
*/ */
msr->data = get_canonical(msr->data, vcpu_virt_addr_bits(vcpu)); data = get_canonical(data, vcpu_virt_addr_bits(vcpu));
} }
return kvm_x86_ops->set_msr(vcpu, msr);
msr.data = data;
msr.index = index;
msr.host_initiated = host_initiated;
return kvm_x86_ops->set_msr(vcpu, &msr);
} }
EXPORT_SYMBOL_GPL(kvm_set_msr);
/* /*
* Adapt set_msr() to msr_io()'s calling convention * Read the MSR specified by @index into @data. Select MSR specific fault
* checks are bypassed if @host_initiated is %true.
* Returns 0 on success, non-0 otherwise.
* Assumes vcpu_load() was already called.
*/ */
static int do_get_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data) static int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data,
bool host_initiated)
{ {
struct msr_data msr; struct msr_data msr;
int r; int ret;
msr.index = index; msr.index = index;
msr.host_initiated = true; msr.host_initiated = host_initiated;
r = kvm_get_msr(vcpu, &msr);
if (r)
return r;
ret = kvm_x86_ops->get_msr(vcpu, &msr);
if (!ret)
*data = msr.data; *data = msr.data;
return 0; return ret;
} }
static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data) int kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data)
{ {
struct msr_data msr; return __kvm_get_msr(vcpu, index, data, false);
}
EXPORT_SYMBOL_GPL(kvm_get_msr);
msr.data = *data; int kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data)
msr.index = index; {
msr.host_initiated = true; return __kvm_set_msr(vcpu, index, data, false);
return kvm_set_msr(vcpu, &msr); }
EXPORT_SYMBOL_GPL(kvm_set_msr);
/*
* Adapt set_msr() to msr_io()'s calling convention
*/
static int do_get_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
{
return __kvm_get_msr(vcpu, index, data, true);
}
static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
{
return __kvm_set_msr(vcpu, index, *data, true);
} }
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
...@@ -2762,18 +2788,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -2762,18 +2788,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
} }
EXPORT_SYMBOL_GPL(kvm_set_msr_common); EXPORT_SYMBOL_GPL(kvm_set_msr_common);
/*
* Reads an msr value (of 'msr_index') into 'pdata'.
* Returns 0 on success, non-0 otherwise.
* Assumes vcpu_load() was already called.
*/
int kvm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
{
return kvm_x86_ops->get_msr(vcpu, msr);
}
EXPORT_SYMBOL_GPL(kvm_get_msr);
static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host) static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
{ {
u64 data; u64 data;
...@@ -5977,28 +5991,13 @@ static void emulator_set_segment(struct x86_emulate_ctxt *ctxt, u16 selector, ...@@ -5977,28 +5991,13 @@ static void emulator_set_segment(struct x86_emulate_ctxt *ctxt, u16 selector,
static int emulator_get_msr(struct x86_emulate_ctxt *ctxt, static int emulator_get_msr(struct x86_emulate_ctxt *ctxt,
u32 msr_index, u64 *pdata) u32 msr_index, u64 *pdata)
{ {
struct msr_data msr; return kvm_get_msr(emul_to_vcpu(ctxt), msr_index, pdata);
int r;
msr.index = msr_index;
msr.host_initiated = false;
r = kvm_get_msr(emul_to_vcpu(ctxt), &msr);
if (r)
return r;
*pdata = msr.data;
return 0;
} }
static int emulator_set_msr(struct x86_emulate_ctxt *ctxt, static int emulator_set_msr(struct x86_emulate_ctxt *ctxt,
u32 msr_index, u64 data) u32 msr_index, u64 data)
{ {
struct msr_data msr; return kvm_set_msr(emul_to_vcpu(ctxt), msr_index, data);
msr.data = data;
msr.index = msr_index;
msr.host_initiated = false;
return kvm_set_msr(emul_to_vcpu(ctxt), &msr);
} }
static u64 emulator_get_smbase(struct x86_emulate_ctxt *ctxt) static u64 emulator_get_smbase(struct x86_emulate_ctxt *ctxt)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment