Commit ee1ca48f authored by Pallipadi, Venkatesh's avatar Pallipadi, Venkatesh Committed by Len Brown

ACPI: Disable ARB_DISABLE on platforms where it is not needed

ARB_DISABLE is a NOP on all of the recent Intel platforms.

For such platforms, reduce contention on c3_lock
by skipping the fake ARB_DISABLE.
Signed-off-by: default avatarVenkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: default avatarLen Brown <len.brown@intel.com>
parent cd86a536
...@@ -34,12 +34,22 @@ void acpi_processor_power_init_bm_check(struct acpi_processor_flags *flags, ...@@ -34,12 +34,22 @@ void acpi_processor_power_init_bm_check(struct acpi_processor_flags *flags,
flags->bm_check = 1; flags->bm_check = 1;
else if (c->x86_vendor == X86_VENDOR_INTEL) { else if (c->x86_vendor == X86_VENDOR_INTEL) {
/* /*
* Today all CPUs that support C3 share cache. * Today all MP CPUs that support C3 share cache.
* TBD: This needs to look at cache shared map, once * And caches should not be flushed by software while
* multi-core detection patch makes to the base. * entering C3 type state.
*/ */
flags->bm_check = 1; flags->bm_check = 1;
} }
/*
* On all recent Intel platforms, ARB_DISABLE is a nop.
* So, set bm_control to zero to indicate that ARB_DISABLE
* is not required while entering C3 type state on
* P4, Core and beyond CPUs
*/
if (c->x86_vendor == X86_VENDOR_INTEL &&
(c->x86 > 0x6 || (c->x86 == 6 && c->x86_model >= 14)))
flags->bm_control = 0;
} }
EXPORT_SYMBOL(acpi_processor_power_init_bm_check); EXPORT_SYMBOL(acpi_processor_power_init_bm_check);
......
...@@ -512,7 +512,8 @@ static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx) ...@@ -512,7 +512,8 @@ static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx)
static void acpi_processor_power_verify_c3(struct acpi_processor *pr, static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
struct acpi_processor_cx *cx) struct acpi_processor_cx *cx)
{ {
static int bm_check_flag; static int bm_check_flag = -1;
static int bm_control_flag = -1;
if (!cx->address) if (!cx->address)
...@@ -542,12 +543,14 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr, ...@@ -542,12 +543,14 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
} }
/* All the logic here assumes flags.bm_check is same across all CPUs */ /* All the logic here assumes flags.bm_check is same across all CPUs */
if (!bm_check_flag) { if (bm_check_flag == -1) {
/* Determine whether bm_check is needed based on CPU */ /* Determine whether bm_check is needed based on CPU */
acpi_processor_power_init_bm_check(&(pr->flags), pr->id); acpi_processor_power_init_bm_check(&(pr->flags), pr->id);
bm_check_flag = pr->flags.bm_check; bm_check_flag = pr->flags.bm_check;
bm_control_flag = pr->flags.bm_control;
} else { } else {
pr->flags.bm_check = bm_check_flag; pr->flags.bm_check = bm_check_flag;
pr->flags.bm_control = bm_control_flag;
} }
if (pr->flags.bm_check) { if (pr->flags.bm_check) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment