Commit 6acc71cc authored by Marc Zyngier's avatar Marc Zyngier

arm64: arch_timer: Allows a CPU-specific erratum to only affect a subset of CPUs

Instead of applying a CPU-specific workaround to all CPUs in the system,
allow it to only affect a subset of them (typical big-little case).

This is done by turning the erratum pointer into a per-CPU variable.
Acked-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
parent 8c64621b
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#include <linux/bug.h> #include <linux/bug.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/jump_label.h> #include <linux/jump_label.h>
#include <linux/smp.h>
#include <linux/types.h> #include <linux/types.h>
#include <clocksource/arm_arch_timer.h> #include <clocksource/arm_arch_timer.h>
...@@ -55,16 +56,24 @@ struct arch_timer_erratum_workaround { ...@@ -55,16 +56,24 @@ struct arch_timer_erratum_workaround {
int (*set_next_event_virt)(unsigned long, struct clock_event_device *); int (*set_next_event_virt)(unsigned long, struct clock_event_device *);
}; };
extern const struct arch_timer_erratum_workaround *timer_unstable_counter_workaround; DECLARE_PER_CPU(const struct arch_timer_erratum_workaround *,
timer_unstable_counter_workaround);
#define arch_timer_reg_read_stable(reg) \ #define arch_timer_reg_read_stable(reg) \
({ \ ({ \
u64 _val; \ u64 _val; \
if (needs_unstable_timer_counter_workaround() && \ if (needs_unstable_timer_counter_workaround()) { \
timer_unstable_counter_workaround->read_##reg) \ const struct arch_timer_erratum_workaround *wa; \
_val = timer_unstable_counter_workaround->read_##reg(); \ preempt_disable(); \
wa = __this_cpu_read(timer_unstable_counter_workaround); \
if (wa && wa->read_##reg) \
_val = wa->read_##reg(); \
else \ else \
_val = read_sysreg(reg); \ _val = read_sysreg(reg); \
preempt_enable(); \
} else { \
_val = read_sysreg(reg); \
} \
_val; \ _val; \
}) })
......
...@@ -235,7 +235,8 @@ static u64 notrace hisi_161010101_read_cntvct_el0(void) ...@@ -235,7 +235,8 @@ static u64 notrace hisi_161010101_read_cntvct_el0(void)
#endif #endif
#ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND #ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
const struct arch_timer_erratum_workaround *timer_unstable_counter_workaround = NULL; DEFINE_PER_CPU(const struct arch_timer_erratum_workaround *,
timer_unstable_counter_workaround);
EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround); EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround);
DEFINE_STATIC_KEY_FALSE(arch_timer_read_ool_enabled); DEFINE_STATIC_KEY_FALSE(arch_timer_read_ool_enabled);
...@@ -338,9 +339,18 @@ arch_timer_iterate_errata(enum arch_timer_erratum_match_type type, ...@@ -338,9 +339,18 @@ arch_timer_iterate_errata(enum arch_timer_erratum_match_type type,
} }
static static
void arch_timer_enable_workaround(const struct arch_timer_erratum_workaround *wa) void arch_timer_enable_workaround(const struct arch_timer_erratum_workaround *wa,
bool local)
{ {
timer_unstable_counter_workaround = wa; int i;
if (local) {
__this_cpu_write(timer_unstable_counter_workaround, wa);
} else {
for_each_possible_cpu(i)
per_cpu(timer_unstable_counter_workaround, i) = wa;
}
static_branch_enable(&arch_timer_read_ool_enabled); static_branch_enable(&arch_timer_read_ool_enabled);
} }
...@@ -369,14 +379,17 @@ static void arch_timer_check_ool_workaround(enum arch_timer_erratum_match_type t ...@@ -369,14 +379,17 @@ static void arch_timer_check_ool_workaround(enum arch_timer_erratum_match_type t
return; return;
if (needs_unstable_timer_counter_workaround()) { if (needs_unstable_timer_counter_workaround()) {
if (wa != timer_unstable_counter_workaround) const struct arch_timer_erratum_workaround *__wa;
__wa = __this_cpu_read(timer_unstable_counter_workaround);
if (__wa && wa != __wa)
pr_warn("Can't enable workaround for %s (clashes with %s\n)", pr_warn("Can't enable workaround for %s (clashes with %s\n)",
wa->desc, wa->desc, __wa->desc);
timer_unstable_counter_workaround->desc);
if (__wa)
return; return;
} }
arch_timer_enable_workaround(wa); arch_timer_enable_workaround(wa, local);
pr_info("Enabling %s workaround for %s\n", pr_info("Enabling %s workaround for %s\n",
local ? "local" : "global", wa->desc); local ? "local" : "global", wa->desc);
} }
...@@ -384,13 +397,18 @@ static void arch_timer_check_ool_workaround(enum arch_timer_erratum_match_type t ...@@ -384,13 +397,18 @@ static void arch_timer_check_ool_workaround(enum arch_timer_erratum_match_type t
#define erratum_handler(fn, r, ...) \ #define erratum_handler(fn, r, ...) \
({ \ ({ \
bool __val; \ bool __val; \
if (needs_unstable_timer_counter_workaround() && \ if (needs_unstable_timer_counter_workaround()) { \
timer_unstable_counter_workaround->fn) { \ const struct arch_timer_erratum_workaround *__wa; \
r = timer_unstable_counter_workaround->fn(__VA_ARGS__); \ __wa = __this_cpu_read(timer_unstable_counter_workaround); \
if (__wa && __wa->fn) { \
r = __wa->fn(__VA_ARGS__); \
__val = true; \ __val = true; \
} else { \ } else { \
__val = false; \ __val = false; \
} \ } \
} else { \
__val = false; \
} \
__val; \ __val; \
}) })
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment