Commit 2ae5510a authored by Juerg Haefliger's avatar Juerg Haefliger

UBUNTU: SAUCE: x86/msr: Rename MSR spec control feature bits

CVE-2018-3639 (x86)

With the previous commit 50d36d375b89 ("x86/msr: Add definitions for
new speculation control MSRs") the spec control bits from upstream were
introduced, so rename the existing bits to match upstream:
  FEATURE_ENABLE_IBRS -> SPEC_CTRL_IBRS
  FEATURE_SET_IBPB -> PRED_CMD_IBPB
Signed-off-by: default avatarJuerg Haefliger <juergh@canonical.com>
parent dc380358
...@@ -438,8 +438,6 @@ ...@@ -438,8 +438,6 @@
#define FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX (1<<1) #define FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX (1<<1)
#define FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX (1<<2) #define FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX (1<<2)
#define FEATURE_CONTROL_LMCE (1<<20) #define FEATURE_CONTROL_LMCE (1<<20)
#define FEATURE_ENABLE_IBRS (1<<0)
#define FEATURE_SET_IBPB (1<<0)
#define MSR_IA32_APICBASE 0x0000001b #define MSR_IA32_APICBASE 0x0000001b
#define MSR_IA32_APICBASE_BSP (1<<8) #define MSR_IA32_APICBASE_BSP (1<<8)
......
...@@ -114,7 +114,7 @@ static inline void mwait_idle_with_hints(unsigned long eax, unsigned long ecx) ...@@ -114,7 +114,7 @@ static inline void mwait_idle_with_hints(unsigned long eax, unsigned long ecx)
__mwait(eax, ecx); __mwait(eax, ecx);
if (ibrs_inuse) if (ibrs_inuse)
native_wrmsrl(MSR_IA32_SPEC_CTRL, FEATURE_ENABLE_IBRS); native_wrmsrl(MSR_IA32_SPEC_CTRL, SPEC_CTRL_IBRS);
} }
current_clr_polling(); current_clr_polling();
} }
......
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
pushq %rdx; \ pushq %rdx; \
movl $MSR_IA32_SPEC_CTRL, %ecx; \ movl $MSR_IA32_SPEC_CTRL, %ecx; \
movl $0, %edx; \ movl $0, %edx; \
movl $FEATURE_ENABLE_IBRS, %eax; \ movl $SPEC_CTRL_IBRS, %eax; \
wrmsr; \ wrmsr; \
popq %rdx; \ popq %rdx; \
popq %rcx; \ popq %rcx; \
...@@ -25,7 +25,7 @@ ...@@ -25,7 +25,7 @@
#define __ASM_ENABLE_IBRS_CLOBBER \ #define __ASM_ENABLE_IBRS_CLOBBER \
movl $MSR_IA32_SPEC_CTRL, %ecx; \ movl $MSR_IA32_SPEC_CTRL, %ecx; \
movl $0, %edx; \ movl $0, %edx; \
movl $FEATURE_ENABLE_IBRS, %eax; \ movl $SPEC_CTRL_IBRS, %eax; \
wrmsr; wrmsr;
#define __ASM_DISABLE_IBRS \ #define __ASM_DISABLE_IBRS \
pushq %rax; \ pushq %rax; \
......
...@@ -431,10 +431,10 @@ static void mwait_idle(void) ...@@ -431,10 +431,10 @@ static void mwait_idle(void)
if (!need_resched()) { if (!need_resched()) {
__sti_mwait(0, 0); __sti_mwait(0, 0);
if (ibrs_inuse) if (ibrs_inuse)
native_wrmsrl(MSR_IA32_SPEC_CTRL, FEATURE_ENABLE_IBRS); native_wrmsrl(MSR_IA32_SPEC_CTRL, SPEC_CTRL_IBRS);
} else { } else {
if (ibrs_inuse) if (ibrs_inuse)
native_wrmsrl(MSR_IA32_SPEC_CTRL, FEATURE_ENABLE_IBRS); native_wrmsrl(MSR_IA32_SPEC_CTRL, SPEC_CTRL_IBRS);
local_irq_enable(); local_irq_enable();
} }
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
......
...@@ -1656,7 +1656,7 @@ void native_play_dead(void) ...@@ -1656,7 +1656,7 @@ void native_play_dead(void)
hlt_play_dead(); hlt_play_dead();
if (ibrs_inuse) if (ibrs_inuse)
native_wrmsrl(MSR_IA32_SPEC_CTRL, FEATURE_ENABLE_IBRS); native_wrmsrl(MSR_IA32_SPEC_CTRL, SPEC_CTRL_IBRS);
} }
#else /* ... !CONFIG_HOTPLUG_CPU */ #else /* ... !CONFIG_HOTPLUG_CPU */
......
...@@ -1222,7 +1222,7 @@ static void svm_free_vcpu(struct kvm_vcpu *vcpu) ...@@ -1222,7 +1222,7 @@ static void svm_free_vcpu(struct kvm_vcpu *vcpu)
* block speculative execution. * block speculative execution.
*/ */
if (ibpb_inuse) if (ibpb_inuse)
wrmsrl(MSR_IA32_PRED_CMD, FEATURE_SET_IBPB); wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB);
} }
static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
...@@ -1257,7 +1257,7 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) ...@@ -1257,7 +1257,7 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
if (sd->current_vmcb != svm->vmcb) { if (sd->current_vmcb != svm->vmcb) {
sd->current_vmcb = svm->vmcb; sd->current_vmcb = svm->vmcb;
if (ibpb_inuse) if (ibpb_inuse)
wrmsrl(MSR_IA32_PRED_CMD, FEATURE_SET_IBPB); wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB);
} }
} }
...@@ -3858,7 +3858,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -3858,7 +3858,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
local_irq_enable(); local_irq_enable();
if (ibrs_inuse && (svm->spec_ctrl != FEATURE_ENABLE_IBRS)) if (ibrs_inuse && (svm->spec_ctrl != SPEC_CTRL_IBRS))
wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl); wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
asm volatile ( asm volatile (
...@@ -3939,8 +3939,8 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -3939,8 +3939,8 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
if (ibrs_inuse) { if (ibrs_inuse) {
rdmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl); rdmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
if (svm->spec_ctrl != FEATURE_ENABLE_IBRS) if (svm->spec_ctrl != SPEC_CTRL_IBRS)
wrmsrl(MSR_IA32_SPEC_CTRL, FEATURE_ENABLE_IBRS); wrmsrl(MSR_IA32_SPEC_CTRL, SPEC_CTRL_IBRS);
} }
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
......
...@@ -2078,7 +2078,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) ...@@ -2078,7 +2078,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs; per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
vmcs_load(vmx->loaded_vmcs->vmcs); vmcs_load(vmx->loaded_vmcs->vmcs);
if (ibpb_inuse) if (ibpb_inuse)
native_wrmsrl(MSR_IA32_PRED_CMD, FEATURE_SET_IBPB); native_wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB);
} }
if (vmx->loaded_vmcs->cpu != cpu) { if (vmx->loaded_vmcs->cpu != cpu) {
...@@ -8619,7 +8619,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -8619,7 +8619,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
if (ibrs_inuse) if (ibrs_inuse)
add_atomic_switch_msr(vmx, MSR_IA32_SPEC_CTRL, add_atomic_switch_msr(vmx, MSR_IA32_SPEC_CTRL,
vcpu->arch.spec_ctrl, FEATURE_ENABLE_IBRS); vcpu->arch.spec_ctrl, SPEC_CTRL_IBRS);
debugctlmsr = get_debugctlmsr(); debugctlmsr = get_debugctlmsr();
......
...@@ -124,7 +124,7 @@ static void delay_mwaitx(unsigned long __loops) ...@@ -124,7 +124,7 @@ static void delay_mwaitx(unsigned long __loops)
__mwaitx(MWAITX_DISABLE_CSTATES, delay, MWAITX_ECX_TIMER_ENABLE); __mwaitx(MWAITX_DISABLE_CSTATES, delay, MWAITX_ECX_TIMER_ENABLE);
if (ibrs_inuse && (delay > IBRS_DISABLE_THRESHOLD)) if (ibrs_inuse && (delay > IBRS_DISABLE_THRESHOLD))
native_wrmsrl(MSR_IA32_SPEC_CTRL, FEATURE_ENABLE_IBRS); native_wrmsrl(MSR_IA32_SPEC_CTRL, SPEC_CTRL_IBRS);
end = rdtsc_ordered(); end = rdtsc_ordered();
......
...@@ -161,7 +161,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, ...@@ -161,7 +161,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
if (tsk && tsk->mm && if (tsk && tsk->mm &&
get_dumpable(tsk->mm) != SUID_DUMP_USER && get_dumpable(tsk->mm) != SUID_DUMP_USER &&
ibpb_inuse && boot_cpu_has(X86_FEATURE_SPEC_CTRL)) ibpb_inuse && boot_cpu_has(X86_FEATURE_SPEC_CTRL))
native_wrmsrl(MSR_IA32_PRED_CMD, FEATURE_SET_IBPB); native_wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB);
/* Load per-mm CR4 state */ /* Load per-mm CR4 state */
load_mm_cr4(next); load_mm_cr4(next);
......
...@@ -2432,7 +2432,7 @@ int proc_dointvec_ibrs_ctrl(struct ctl_table *table, int write, ...@@ -2432,7 +2432,7 @@ int proc_dointvec_ibrs_ctrl(struct ctl_table *table, int write,
clear_ibrs_disabled(); clear_ibrs_disabled();
if (ibrs_supported) { if (ibrs_supported) {
for_each_online_cpu(cpu) for_each_online_cpu(cpu)
wrmsrl_on_cpu(cpu, MSR_IA32_SPEC_CTRL, FEATURE_ENABLE_IBRS); wrmsrl_on_cpu(cpu, MSR_IA32_SPEC_CTRL, SPEC_CTRL_IBRS);
} else { } else {
sysctl_ibrs_enabled = 0; sysctl_ibrs_enabled = 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment