Commit 3eb90017 authored by Alexander Graf's avatar Alexander Graf Committed by Paolo Bonzini

KVM: x86: VMX: Prevent MSR passthrough when MSR access is denied

We will introduce the concept of MSRs that may not be handled in kernel
space soon. Some MSRs are directly passed through to the guest, effectively
making them handled by KVM from user space's point of view.

This patch introduces all logic required to ensure that MSRs that
user space wants trapped are not marked as direct access for guests.
Signed-off-by: default avatarAlexander Graf <graf@amazon.com>
Message-Id: <20200925143422.21718-7-graf@amazon.com>
[Replace "_idx" with "_slot". - Paolo]
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent fd6fa73d
...@@ -145,6 +145,26 @@ module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO); ...@@ -145,6 +145,26 @@ module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO);
RTIT_STATUS_ERROR | RTIT_STATUS_STOPPED | \ RTIT_STATUS_ERROR | RTIT_STATUS_STOPPED | \
RTIT_STATUS_BYTECNT)) RTIT_STATUS_BYTECNT))
/*
* List of MSRs that can be directly passed to the guest.
* In addition to these x2apic and PT MSRs are handled specially.
*/
static u32 vmx_possible_passthrough_msrs[MAX_POSSIBLE_PASSTHROUGH_MSRS] = {
MSR_IA32_SPEC_CTRL,
MSR_IA32_PRED_CMD,
MSR_IA32_TSC,
MSR_FS_BASE,
MSR_GS_BASE,
MSR_KERNEL_GS_BASE,
MSR_IA32_SYSENTER_CS,
MSR_IA32_SYSENTER_ESP,
MSR_IA32_SYSENTER_EIP,
MSR_CORE_C1_RES,
MSR_CORE_C3_RESIDENCY,
MSR_CORE_C6_RESIDENCY,
MSR_CORE_C7_RESIDENCY,
};
/* /*
* These 2 parameters are used to config the controls for Pause-Loop Exiting: * These 2 parameters are used to config the controls for Pause-Loop Exiting:
* ple_gap: upper bound on the amount of time between two successive * ple_gap: upper bound on the amount of time between two successive
...@@ -611,6 +631,41 @@ static inline bool report_flexpriority(void) ...@@ -611,6 +631,41 @@ static inline bool report_flexpriority(void)
return flexpriority_enabled; return flexpriority_enabled;
} }
static int possible_passthrough_msr_slot(u32 msr)
{
u32 i;
for (i = 0; i < ARRAY_SIZE(vmx_possible_passthrough_msrs); i++)
if (vmx_possible_passthrough_msrs[i] == msr)
return i;
return -ENOENT;
}
static bool is_valid_passthrough_msr(u32 msr)
{
bool r;
switch (msr) {
case 0x800 ... 0x8ff:
/* x2APIC MSRs. These are handled in vmx_update_msr_bitmap_x2apic() */
return true;
case MSR_IA32_RTIT_STATUS:
case MSR_IA32_RTIT_OUTPUT_BASE:
case MSR_IA32_RTIT_OUTPUT_MASK:
case MSR_IA32_RTIT_CR3_MATCH:
case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B:
/* PT MSRs. These are handled in pt_update_intercept_for_msr() */
return true;
}
r = possible_passthrough_msr_slot(msr) != -ENOENT;
WARN(!r, "Invalid MSR %x, please adapt vmx_possible_passthrough_msrs[]", msr);
return r;
}
static inline int __vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr) static inline int __vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr)
{ {
int i; int i;
...@@ -3583,12 +3638,51 @@ void free_vpid(int vpid) ...@@ -3583,12 +3638,51 @@ void free_vpid(int vpid)
spin_unlock(&vmx_vpid_lock); spin_unlock(&vmx_vpid_lock);
} }
static void vmx_clear_msr_bitmap_read(ulong *msr_bitmap, u32 msr)
{
int f = sizeof(unsigned long);
if (msr <= 0x1fff)
__clear_bit(msr, msr_bitmap + 0x000 / f);
else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff))
__clear_bit(msr & 0x1fff, msr_bitmap + 0x400 / f);
}
static void vmx_clear_msr_bitmap_write(ulong *msr_bitmap, u32 msr)
{
int f = sizeof(unsigned long);
if (msr <= 0x1fff)
__clear_bit(msr, msr_bitmap + 0x800 / f);
else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff))
__clear_bit(msr & 0x1fff, msr_bitmap + 0xc00 / f);
}
static void vmx_set_msr_bitmap_read(ulong *msr_bitmap, u32 msr)
{
int f = sizeof(unsigned long);
if (msr <= 0x1fff)
__set_bit(msr, msr_bitmap + 0x000 / f);
else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff))
__set_bit(msr & 0x1fff, msr_bitmap + 0x400 / f);
}
static void vmx_set_msr_bitmap_write(ulong *msr_bitmap, u32 msr)
{
int f = sizeof(unsigned long);
if (msr <= 0x1fff)
__set_bit(msr, msr_bitmap + 0x800 / f);
else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff))
__set_bit(msr & 0x1fff, msr_bitmap + 0xc00 / f);
}
static __always_inline void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, static __always_inline void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu,
u32 msr, int type) u32 msr, int type)
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap; unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap;
int f = sizeof(unsigned long);
if (!cpu_has_vmx_msr_bitmap()) if (!cpu_has_vmx_msr_bitmap())
return; return;
...@@ -3597,30 +3691,37 @@ static __always_inline void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, ...@@ -3597,30 +3691,37 @@ static __always_inline void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu,
evmcs_touch_msr_bitmap(); evmcs_touch_msr_bitmap();
/* /*
* See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals * Mark the desired intercept state in shadow bitmap, this is needed
* have the write-low and read-high bitmap offsets the wrong way round. * for resync when the MSR filters change.
* We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff. */
*/ if (is_valid_passthrough_msr(msr)) {
if (msr <= 0x1fff) { int idx = possible_passthrough_msr_slot(msr);
if (type & MSR_TYPE_R)
/* read-low */ if (idx != -ENOENT) {
__clear_bit(msr, msr_bitmap + 0x000 / f); if (type & MSR_TYPE_R)
clear_bit(idx, vmx->shadow_msr_intercept.read);
if (type & MSR_TYPE_W)
clear_bit(idx, vmx->shadow_msr_intercept.write);
}
}
if (type & MSR_TYPE_W) if ((type & MSR_TYPE_R) &&
/* write-low */ !kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_READ)) {
__clear_bit(msr, msr_bitmap + 0x800 / f); vmx_set_msr_bitmap_read(msr_bitmap, msr);
type &= ~MSR_TYPE_R;
}
} else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) { if ((type & MSR_TYPE_W) &&
msr &= 0x1fff; !kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_WRITE)) {
if (type & MSR_TYPE_R) vmx_set_msr_bitmap_write(msr_bitmap, msr);
/* read-high */ type &= ~MSR_TYPE_W;
__clear_bit(msr, msr_bitmap + 0x400 / f); }
if (type & MSR_TYPE_W) if (type & MSR_TYPE_R)
/* write-high */ vmx_clear_msr_bitmap_read(msr_bitmap, msr);
__clear_bit(msr, msr_bitmap + 0xc00 / f);
} if (type & MSR_TYPE_W)
vmx_clear_msr_bitmap_write(msr_bitmap, msr);
} }
static __always_inline void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, static __always_inline void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu,
...@@ -3628,7 +3729,6 @@ static __always_inline void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, ...@@ -3628,7 +3729,6 @@ static __always_inline void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu,
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap; unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap;
int f = sizeof(unsigned long);
if (!cpu_has_vmx_msr_bitmap()) if (!cpu_has_vmx_msr_bitmap())
return; return;
...@@ -3637,30 +3737,25 @@ static __always_inline void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, ...@@ -3637,30 +3737,25 @@ static __always_inline void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu,
evmcs_touch_msr_bitmap(); evmcs_touch_msr_bitmap();
/* /*
* See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals * Mark the desired intercept state in shadow bitmap, this is needed
* have the write-low and read-high bitmap offsets the wrong way round. * for resync when the MSR filter changes.
* We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff. */
*/ if (is_valid_passthrough_msr(msr)) {
if (msr <= 0x1fff) { int idx = possible_passthrough_msr_slot(msr);
if (type & MSR_TYPE_R)
/* read-low */ if (idx != -ENOENT) {
__set_bit(msr, msr_bitmap + 0x000 / f); if (type & MSR_TYPE_R)
set_bit(idx, vmx->shadow_msr_intercept.read);
if (type & MSR_TYPE_W) if (type & MSR_TYPE_W)
/* write-low */ set_bit(idx, vmx->shadow_msr_intercept.write);
__set_bit(msr, msr_bitmap + 0x800 / f); }
}
} else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
msr &= 0x1fff;
if (type & MSR_TYPE_R)
/* read-high */
__set_bit(msr, msr_bitmap + 0x400 / f);
if (type & MSR_TYPE_W) if (type & MSR_TYPE_R)
/* write-high */ vmx_set_msr_bitmap_read(msr_bitmap, msr);
__set_bit(msr, msr_bitmap + 0xc00 / f);
} if (type & MSR_TYPE_W)
vmx_set_msr_bitmap_write(msr_bitmap, msr);
} }
static __always_inline void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, static __always_inline void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu,
...@@ -3687,15 +3782,14 @@ static u8 vmx_msr_bitmap_mode(struct kvm_vcpu *vcpu) ...@@ -3687,15 +3782,14 @@ static u8 vmx_msr_bitmap_mode(struct kvm_vcpu *vcpu)
return mode; return mode;
} }
static void vmx_update_msr_bitmap_x2apic(struct kvm_vcpu *vcpu, static void vmx_update_msr_bitmap_x2apic(struct kvm_vcpu *vcpu, u8 mode)
unsigned long *msr_bitmap, u8 mode)
{ {
int msr; int msr;
for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) { for (msr = 0x800; msr <= 0x8ff; msr++) {
unsigned word = msr / BITS_PER_LONG; bool intercepted = !!(mode & MSR_BITMAP_MODE_X2APIC_APICV);
msr_bitmap[word] = (mode & MSR_BITMAP_MODE_X2APIC_APICV) ? 0 : ~0;
msr_bitmap[word + (0x800 / sizeof(long))] = ~0; vmx_set_intercept_for_msr(vcpu, msr, MSR_TYPE_RW, intercepted);
} }
if (mode & MSR_BITMAP_MODE_X2APIC) { if (mode & MSR_BITMAP_MODE_X2APIC) {
...@@ -3715,7 +3809,6 @@ static void vmx_update_msr_bitmap_x2apic(struct kvm_vcpu *vcpu, ...@@ -3715,7 +3809,6 @@ static void vmx_update_msr_bitmap_x2apic(struct kvm_vcpu *vcpu,
void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu) void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu)
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap;
u8 mode = vmx_msr_bitmap_mode(vcpu); u8 mode = vmx_msr_bitmap_mode(vcpu);
u8 changed = mode ^ vmx->msr_bitmap_mode; u8 changed = mode ^ vmx->msr_bitmap_mode;
...@@ -3723,7 +3816,7 @@ void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu) ...@@ -3723,7 +3816,7 @@ void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu)
return; return;
if (changed & (MSR_BITMAP_MODE_X2APIC | MSR_BITMAP_MODE_X2APIC_APICV)) if (changed & (MSR_BITMAP_MODE_X2APIC | MSR_BITMAP_MODE_X2APIC_APICV))
vmx_update_msr_bitmap_x2apic(vcpu, msr_bitmap, mode); vmx_update_msr_bitmap_x2apic(vcpu, mode);
vmx->msr_bitmap_mode = mode; vmx->msr_bitmap_mode = mode;
} }
...@@ -3764,6 +3857,29 @@ static bool vmx_guest_apic_has_interrupt(struct kvm_vcpu *vcpu) ...@@ -3764,6 +3857,29 @@ static bool vmx_guest_apic_has_interrupt(struct kvm_vcpu *vcpu)
return ((rvi & 0xf0) > (vppr & 0xf0)); return ((rvi & 0xf0) > (vppr & 0xf0));
} }
static void vmx_msr_filter_changed(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
u32 i;
/*
* Set intercept permissions for all potentially passed through MSRs
* again. They will automatically get filtered through the MSR filter,
* so we are back in sync after this.
*/
for (i = 0; i < ARRAY_SIZE(vmx_possible_passthrough_msrs); i++) {
u32 msr = vmx_possible_passthrough_msrs[i];
bool read = test_bit(i, vmx->shadow_msr_intercept.read);
bool write = test_bit(i, vmx->shadow_msr_intercept.write);
vmx_set_intercept_for_msr(vcpu, msr, MSR_TYPE_R, read);
vmx_set_intercept_for_msr(vcpu, msr, MSR_TYPE_W, write);
}
pt_update_intercept_for_msr(vcpu);
vmx_update_msr_bitmap_x2apic(vcpu, vmx_msr_bitmap_mode(vcpu));
}
static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu, static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu,
bool nested) bool nested)
{ {
...@@ -6749,6 +6865,10 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu) ...@@ -6749,6 +6865,10 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
if (err < 0) if (err < 0)
goto free_pml; goto free_pml;
/* The MSR bitmap starts with all ones */
bitmap_fill(vmx->shadow_msr_intercept.read, MAX_POSSIBLE_PASSTHROUGH_MSRS);
bitmap_fill(vmx->shadow_msr_intercept.write, MAX_POSSIBLE_PASSTHROUGH_MSRS);
msr_bitmap = vmx->vmcs01.msr_bitmap; msr_bitmap = vmx->vmcs01.msr_bitmap;
vmx_disable_intercept_for_msr(vcpu, MSR_IA32_TSC, MSR_TYPE_R); vmx_disable_intercept_for_msr(vcpu, MSR_IA32_TSC, MSR_TYPE_R);
vmx_disable_intercept_for_msr(vcpu, MSR_FS_BASE, MSR_TYPE_RW); vmx_disable_intercept_for_msr(vcpu, MSR_FS_BASE, MSR_TYPE_RW);
...@@ -7563,6 +7683,8 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = { ...@@ -7563,6 +7683,8 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
.can_emulate_instruction = vmx_can_emulate_instruction, .can_emulate_instruction = vmx_can_emulate_instruction,
.apic_init_signal_blocked = vmx_apic_init_signal_blocked, .apic_init_signal_blocked = vmx_apic_init_signal_blocked,
.migrate_timers = vmx_migrate_timers, .migrate_timers = vmx_migrate_timers,
.msr_filter_changed = vmx_msr_filter_changed,
}; };
static __init int hardware_setup(void) static __init int hardware_setup(void)
......
...@@ -279,6 +279,13 @@ struct vcpu_vmx { ...@@ -279,6 +279,13 @@ struct vcpu_vmx {
u64 ept_pointer; u64 ept_pointer;
struct pt_desc pt_desc; struct pt_desc pt_desc;
/* Save desired MSR intercept (read: pass-through) state */
#define MAX_POSSIBLE_PASSTHROUGH_MSRS 13
struct {
DECLARE_BITMAP(read, MAX_POSSIBLE_PASSTHROUGH_MSRS);
DECLARE_BITMAP(write, MAX_POSSIBLE_PASSTHROUGH_MSRS);
} shadow_msr_intercept;
}; };
enum ept_pointers_status { enum ept_pointers_status {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment