Commit 07c94a38 authored by Borislav Petkov's avatar Borislav Petkov Committed by Thomas Gleixner

x86/amd: Simplify AMD E400 aware idle routine

Reorganize the E400 detection now that we have everything in place:
switch the CPUs to broadcast mode after the LAPIC has been initialized
and remove the facilities that were used previously on the idle path.

Unfortunately static_cpu_has_bug() cannpt be used in the E400 idle routine
because alternatives have been applied when the actual detection happens,
so the static switching does not take effect and the test will stay
false. Use boot_cpu_has_bug() instead which is definitely an improvement
over the RDMSR and the cpumask handling.
Suggested-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
Cc: Jiri Olsa <jolsa@redhat.com>
Link: http://lkml.kernel.org/r/20161209182912.2726-5-bp@alien8.deSigned-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent e7ff3a47
......@@ -94,7 +94,7 @@ static inline unsigned int acpi_processor_cstate_check(unsigned int max_cstate)
boot_cpu_data.x86_model <= 0x05 &&
boot_cpu_data.x86_mask < 0x0A)
return 1;
else if (amd_e400_c1e_detected)
else if (boot_cpu_has(X86_BUG_AMD_APIC_C1E))
return 1;
else
return max_cstate;
......
......@@ -621,10 +621,9 @@ static inline void sync_core(void)
}
extern void select_idle_routine(const struct cpuinfo_x86 *c);
extern void init_amd_e400_c1e_mask(void);
extern void amd_e400_c1e_apic_setup(void);
extern unsigned long boot_option_idle_override;
extern bool amd_e400_c1e_detected;
enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT,
IDLE_POLL};
......
......@@ -894,11 +894,13 @@ void __init setup_boot_APIC_clock(void)
/* Setup the lapic or request the broadcast */
setup_APIC_timer();
amd_e400_c1e_apic_setup();
}
void setup_secondary_APIC_clock(void)
{
setup_APIC_timer();
amd_e400_c1e_apic_setup();
}
/*
......
......@@ -1144,7 +1144,6 @@ void enable_sep_cpu(void)
void __init identify_boot_cpu(void)
{
identify_cpu(&boot_cpu_data);
init_amd_e400_c1e_mask();
#ifdef CONFIG_X86_32
sysenter_setup();
enable_sep_cpu();
......
......@@ -289,59 +289,33 @@ void stop_this_cpu(void *dummy)
halt();
}
bool amd_e400_c1e_detected;
EXPORT_SYMBOL(amd_e400_c1e_detected);
static cpumask_var_t amd_e400_c1e_mask;
void amd_e400_remove_cpu(int cpu)
{
if (amd_e400_c1e_mask != NULL)
cpumask_clear_cpu(cpu, amd_e400_c1e_mask);
}
/*
* AMD Erratum 400 aware idle routine. We check for C1E active in the interrupt
* pending message MSR. If we detect C1E, then we handle it the same
* way as C3 power states (local apic timer and TSC stop)
* AMD Erratum 400 aware idle routine. We handle it the same way as C3 power
* states (local apic timer and TSC stop).
*/
static void amd_e400_idle(void)
{
if (!amd_e400_c1e_detected) {
u32 lo, hi;
rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi);
if (lo & K8_INTP_C1E_ACTIVE_MASK) {
amd_e400_c1e_detected = true;
if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
mark_tsc_unstable("TSC halt in AMD C1E");
pr_info("System has AMD C1E enabled\n");
}
/*
* We cannot use static_cpu_has_bug() here because X86_BUG_AMD_APIC_C1E
* gets set after static_cpu_has() places have been converted via
* alternatives.
*/
if (!boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E)) {
default_idle();
return;
}
if (amd_e400_c1e_detected) {
int cpu = smp_processor_id();
tick_broadcast_enter();
if (!cpumask_test_cpu(cpu, amd_e400_c1e_mask)) {
cpumask_set_cpu(cpu, amd_e400_c1e_mask);
/* Force broadcast so ACPI can not interfere. */
tick_broadcast_force();
pr_info("Switch to broadcast mode on CPU%d\n", cpu);
}
tick_broadcast_enter();
default_idle();
default_idle();
/*
* The switch back from broadcast mode needs to be
* called with interrupts disabled.
*/
local_irq_disable();
tick_broadcast_exit();
local_irq_enable();
} else
default_idle();
/*
* The switch back from broadcast mode needs to be called with
* interrupts disabled.
*/
local_irq_disable();
tick_broadcast_exit();
local_irq_enable();
}
/*
......@@ -411,11 +385,14 @@ void select_idle_routine(const struct cpuinfo_x86 *c)
x86_idle = default_idle;
}
void __init init_amd_e400_c1e_mask(void)
void amd_e400_c1e_apic_setup(void)
{
/* If we're using amd_e400_idle, we need to allocate amd_e400_c1e_mask. */
if (x86_idle == amd_e400_idle)
zalloc_cpumask_var(&amd_e400_c1e_mask, GFP_KERNEL);
if (boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E)) {
pr_info("Switch to broadcast mode on CPU%d\n", smp_processor_id());
local_irq_disable();
tick_broadcast_force();
local_irq_enable();
}
}
void __init arch_post_acpi_subsys_init(void)
......
......@@ -1575,7 +1575,6 @@ void play_dead_common(void)
{
idle_task_exit();
reset_lazy_tlbstate();
amd_e400_remove_cpu(raw_smp_processor_id());
/* Ack it */
(void)cpu_report_death();
......
......@@ -141,7 +141,7 @@ static void lapic_timer_check_state(int state, struct acpi_processor *pr,
if (cpu_has(&cpu_data(pr->id), X86_FEATURE_ARAT))
return;
if (amd_e400_c1e_detected)
if (boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E))
type = ACPI_STATE_C1;
/*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment