Commit b57fc9e8 authored by Catalin Marinas's avatar Catalin Marinas

arm64: Fix !CONFIG_SMP kernel build

Commit fb4a9602 (arm64: kernel: fix per-cpu offset restore on
resume) uses per_cpu_offset() unconditionally during CPU wakeup,
however, this is only defined for the SMP case.
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Reported-by: default avatarDave P Martin <Dave.Martin@arm.com>
parent 84fe6826
...@@ -16,6 +16,8 @@ ...@@ -16,6 +16,8 @@
#ifndef __ASM_PERCPU_H #ifndef __ASM_PERCPU_H
#define __ASM_PERCPU_H #define __ASM_PERCPU_H
#ifdef CONFIG_SMP
static inline void set_my_cpu_offset(unsigned long off) static inline void set_my_cpu_offset(unsigned long off)
{ {
asm volatile("msr tpidr_el1, %0" :: "r" (off) : "memory"); asm volatile("msr tpidr_el1, %0" :: "r" (off) : "memory");
...@@ -36,6 +38,12 @@ static inline unsigned long __my_cpu_offset(void) ...@@ -36,6 +38,12 @@ static inline unsigned long __my_cpu_offset(void)
} }
#define __my_cpu_offset __my_cpu_offset() #define __my_cpu_offset __my_cpu_offset()
#else /* !CONFIG_SMP */
#define set_my_cpu_offset(x) do { } while (0)
#endif /* CONFIG_SMP */
#include <asm-generic/percpu.h> #include <asm-generic/percpu.h>
#endif /* __ASM_PERCPU_H */ #endif /* __ASM_PERCPU_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment