Commit 5809f9d4 authored by Eric Dumazet's avatar Eric Dumazet Committed by Andi Kleen

[PATCH] x86-64: get rid of ARCH_HAVE_XTIME_LOCK

ARCH_HAVE_XTIME_LOCK is used by x86_64 arch .  This arch needs to place a
read only copy of xtime_lock into vsyscall page.  This read only copy is
named __xtime_lock, and xtime_lock is defined in
arch/x86_64/kernel/vmlinux.lds.S as an alias.  So the declaration of
xtime_lock in kernel/timer.c was guarded by ARCH_HAVE_XTIME_LOCK define,
defined to true on x86_64.

We can get same result with _attribute__((weak)) in the declaration. linker
should do the job.
Signed-off-by: default avatarEric Dumazet <dada1@cosmosbay.com>
Signed-off-by: default avatarAndi Kleen <ak@suse.de>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
parent 26054ed0
......@@ -56,11 +56,6 @@ extern struct vxtime_data vxtime;
extern int vgetcpu_mode;
extern struct timezone sys_tz;
extern int sysctl_vsyscall;
extern seqlock_t xtime_lock;
extern int sysctl_vsyscall;
#define ARCH_HAVE_XTIME_LOCK 1
#endif /* __KERNEL__ */
......
......@@ -90,7 +90,7 @@ static inline struct timespec timespec_sub(struct timespec lhs,
extern struct timespec xtime;
extern struct timespec wall_to_monotonic;
extern seqlock_t xtime_lock;
extern seqlock_t xtime_lock __attribute__((weak));
void timekeeping_init(void);
......
......@@ -1162,11 +1162,9 @@ static inline void calc_load(unsigned long ticks)
* This read-write spinlock protects us from races in SMP while
* playing with xtime and avenrun.
*/
#ifndef ARCH_HAVE_XTIME_LOCK
__cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock);
__attribute__((weak)) __cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock);
EXPORT_SYMBOL(xtime_lock);
#endif
/*
* This function runs timers and the timer-tq in bottom half context.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment