Commit 2ae5510a authored by Juerg Haefliger's avatar Juerg Haefliger

UBUNTU: SAUCE: x86/msr: Rename MSR spec control feature bits

CVE-2018-3639 (x86)

With the previous commit 50d36d375b89 ("x86/msr: Add definitions for
new speculation control MSRs") the spec control bits from upstream were
introduced, so rename the existing bits to match upstream:
  FEATURE_ENABLE_IBRS -> SPEC_CTRL_IBRS
  FEATURE_SET_IBPB -> PRED_CMD_IBPB
Signed-off-by: default avatarJuerg Haefliger <juergh@canonical.com>
parent dc380358
......@@ -438,8 +438,6 @@
#define FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX (1<<1)
#define FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX (1<<2)
#define FEATURE_CONTROL_LMCE (1<<20)
#define FEATURE_ENABLE_IBRS (1<<0)
#define FEATURE_SET_IBPB (1<<0)
#define MSR_IA32_APICBASE 0x0000001b
#define MSR_IA32_APICBASE_BSP (1<<8)
......
......@@ -114,7 +114,7 @@ static inline void mwait_idle_with_hints(unsigned long eax, unsigned long ecx)
__mwait(eax, ecx);
if (ibrs_inuse)
native_wrmsrl(MSR_IA32_SPEC_CTRL, FEATURE_ENABLE_IBRS);
native_wrmsrl(MSR_IA32_SPEC_CTRL, SPEC_CTRL_IBRS);
}
current_clr_polling();
}
......
......@@ -17,7 +17,7 @@
pushq %rdx; \
movl $MSR_IA32_SPEC_CTRL, %ecx; \
movl $0, %edx; \
movl $FEATURE_ENABLE_IBRS, %eax; \
movl $SPEC_CTRL_IBRS, %eax; \
wrmsr; \
popq %rdx; \
popq %rcx; \
......@@ -25,7 +25,7 @@
#define __ASM_ENABLE_IBRS_CLOBBER \
movl $MSR_IA32_SPEC_CTRL, %ecx; \
movl $0, %edx; \
movl $FEATURE_ENABLE_IBRS, %eax; \
movl $SPEC_CTRL_IBRS, %eax; \
wrmsr;
#define __ASM_DISABLE_IBRS \
pushq %rax; \
......
......@@ -431,10 +431,10 @@ static void mwait_idle(void)
if (!need_resched()) {
__sti_mwait(0, 0);
if (ibrs_inuse)
native_wrmsrl(MSR_IA32_SPEC_CTRL, FEATURE_ENABLE_IBRS);
native_wrmsrl(MSR_IA32_SPEC_CTRL, SPEC_CTRL_IBRS);
} else {
if (ibrs_inuse)
native_wrmsrl(MSR_IA32_SPEC_CTRL, FEATURE_ENABLE_IBRS);
native_wrmsrl(MSR_IA32_SPEC_CTRL, SPEC_CTRL_IBRS);
local_irq_enable();
}
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
......
......@@ -1656,7 +1656,7 @@ void native_play_dead(void)
hlt_play_dead();
if (ibrs_inuse)
native_wrmsrl(MSR_IA32_SPEC_CTRL, FEATURE_ENABLE_IBRS);
native_wrmsrl(MSR_IA32_SPEC_CTRL, SPEC_CTRL_IBRS);
}
#else /* ... !CONFIG_HOTPLUG_CPU */
......
......@@ -1222,7 +1222,7 @@ static void svm_free_vcpu(struct kvm_vcpu *vcpu)
* block speculative execution.
*/
if (ibpb_inuse)
wrmsrl(MSR_IA32_PRED_CMD, FEATURE_SET_IBPB);
wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB);
}
static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
......@@ -1257,7 +1257,7 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
if (sd->current_vmcb != svm->vmcb) {
sd->current_vmcb = svm->vmcb;
if (ibpb_inuse)
wrmsrl(MSR_IA32_PRED_CMD, FEATURE_SET_IBPB);
wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB);
}
}
......@@ -3858,7 +3858,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
local_irq_enable();
if (ibrs_inuse && (svm->spec_ctrl != FEATURE_ENABLE_IBRS))
if (ibrs_inuse && (svm->spec_ctrl != SPEC_CTRL_IBRS))
wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
asm volatile (
......@@ -3939,8 +3939,8 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
if (ibrs_inuse) {
rdmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
if (svm->spec_ctrl != FEATURE_ENABLE_IBRS)
wrmsrl(MSR_IA32_SPEC_CTRL, FEATURE_ENABLE_IBRS);
if (svm->spec_ctrl != SPEC_CTRL_IBRS)
wrmsrl(MSR_IA32_SPEC_CTRL, SPEC_CTRL_IBRS);
}
#ifdef CONFIG_X86_64
......
......@@ -2078,7 +2078,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
vmcs_load(vmx->loaded_vmcs->vmcs);
if (ibpb_inuse)
native_wrmsrl(MSR_IA32_PRED_CMD, FEATURE_SET_IBPB);
native_wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB);
}
if (vmx->loaded_vmcs->cpu != cpu) {
......@@ -8619,7 +8619,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
if (ibrs_inuse)
add_atomic_switch_msr(vmx, MSR_IA32_SPEC_CTRL,
vcpu->arch.spec_ctrl, FEATURE_ENABLE_IBRS);
vcpu->arch.spec_ctrl, SPEC_CTRL_IBRS);
debugctlmsr = get_debugctlmsr();
......
......@@ -124,7 +124,7 @@ static void delay_mwaitx(unsigned long __loops)
__mwaitx(MWAITX_DISABLE_CSTATES, delay, MWAITX_ECX_TIMER_ENABLE);
if (ibrs_inuse && (delay > IBRS_DISABLE_THRESHOLD))
native_wrmsrl(MSR_IA32_SPEC_CTRL, FEATURE_ENABLE_IBRS);
native_wrmsrl(MSR_IA32_SPEC_CTRL, SPEC_CTRL_IBRS);
end = rdtsc_ordered();
......
......@@ -161,7 +161,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
if (tsk && tsk->mm &&
get_dumpable(tsk->mm) != SUID_DUMP_USER &&
ibpb_inuse && boot_cpu_has(X86_FEATURE_SPEC_CTRL))
native_wrmsrl(MSR_IA32_PRED_CMD, FEATURE_SET_IBPB);
native_wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB);
/* Load per-mm CR4 state */
load_mm_cr4(next);
......
......@@ -2432,7 +2432,7 @@ int proc_dointvec_ibrs_ctrl(struct ctl_table *table, int write,
clear_ibrs_disabled();
if (ibrs_supported) {
for_each_online_cpu(cpu)
wrmsrl_on_cpu(cpu, MSR_IA32_SPEC_CTRL, FEATURE_ENABLE_IBRS);
wrmsrl_on_cpu(cpu, MSR_IA32_SPEC_CTRL, SPEC_CTRL_IBRS);
} else {
sysctl_ibrs_enabled = 0;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment