Commit bcd55074 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull timer core updates from Thomas Gleixner.

* 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  ia64: vsyscall: Add missing paranthesis
  alarmtimer: Don't call rtc_timer_init() when CONFIG_RTC_CLASS=n
  x86: vdso: Put declaration before code
  x86-64: Inline vdso clock_gettime helpers
  x86-64: Simplify and optimize vdso clock_gettime monotonic variants
  kernel-time: fix s/then/than/ spelling errors
  time: remove no_sync_cmos_clock
  time: Avoid scary backtraces when warning of > 11% adj
  alarmtimer: Make sure we initialize the rtctimer
  ntp: Fix leap-second hrtimer livelock
  x86, tsc: Skip refined tsc calibration on systems with reliable TSC
  rtc: Provide flag for rtc devices that don't support UIE
  ia64: vsyscall: Use seqcount instead of seqlock
  x86: vdso: Use seqcount instead of seqlock
  x86: vdso: Remove bogus locking in update_vsyscall_tz()
  time: Remove bogus comments
  time: Fix change_clocksource locking
  time: x86: Fix race switching from vsyscall to non-vsyscall clock
parents 93f37888 646783a3
...@@ -269,8 +269,8 @@ void foo(void) ...@@ -269,8 +269,8 @@ void foo(void)
BLANK(); BLANK();
/* used by fsys_gettimeofday in arch/ia64/kernel/fsys.S */ /* used by fsys_gettimeofday in arch/ia64/kernel/fsys.S */
DEFINE(IA64_GTOD_LOCK_OFFSET, DEFINE(IA64_GTOD_SEQ_OFFSET,
offsetof (struct fsyscall_gtod_data_t, lock)); offsetof (struct fsyscall_gtod_data_t, seq));
DEFINE(IA64_GTOD_WALL_TIME_OFFSET, DEFINE(IA64_GTOD_WALL_TIME_OFFSET,
offsetof (struct fsyscall_gtod_data_t, wall_time)); offsetof (struct fsyscall_gtod_data_t, wall_time));
DEFINE(IA64_GTOD_MONO_TIME_OFFSET, DEFINE(IA64_GTOD_MONO_TIME_OFFSET,
......
...@@ -173,7 +173,7 @@ ENTRY(fsys_set_tid_address) ...@@ -173,7 +173,7 @@ ENTRY(fsys_set_tid_address)
FSYS_RETURN FSYS_RETURN
END(fsys_set_tid_address) END(fsys_set_tid_address)
#if IA64_GTOD_LOCK_OFFSET !=0 #if IA64_GTOD_SEQ_OFFSET !=0
#error fsys_gettimeofday incompatible with changes to struct fsyscall_gtod_data_t #error fsys_gettimeofday incompatible with changes to struct fsyscall_gtod_data_t
#endif #endif
#if IA64_ITC_JITTER_OFFSET !=0 #if IA64_ITC_JITTER_OFFSET !=0
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
*/ */
struct fsyscall_gtod_data_t { struct fsyscall_gtod_data_t {
seqlock_t lock; seqcount_t seq;
struct timespec wall_time; struct timespec wall_time;
struct timespec monotonic_time; struct timespec monotonic_time;
cycle_t clk_mask; cycle_t clk_mask;
......
...@@ -34,9 +34,7 @@ ...@@ -34,9 +34,7 @@
static cycle_t itc_get_cycles(struct clocksource *cs); static cycle_t itc_get_cycles(struct clocksource *cs);
struct fsyscall_gtod_data_t fsyscall_gtod_data = { struct fsyscall_gtod_data_t fsyscall_gtod_data;
.lock = __SEQLOCK_UNLOCKED(fsyscall_gtod_data.lock),
};
struct itc_jitter_data_t itc_jitter_data; struct itc_jitter_data_t itc_jitter_data;
...@@ -459,9 +457,7 @@ void update_vsyscall_tz(void) ...@@ -459,9 +457,7 @@ void update_vsyscall_tz(void)
void update_vsyscall(struct timespec *wall, struct timespec *wtm, void update_vsyscall(struct timespec *wall, struct timespec *wtm,
struct clocksource *c, u32 mult) struct clocksource *c, u32 mult)
{ {
unsigned long flags; write_seqcount_begin(&fsyscall_gtod_data.seq);
write_seqlock_irqsave(&fsyscall_gtod_data.lock, flags);
/* copy fsyscall clock data */ /* copy fsyscall clock data */
fsyscall_gtod_data.clk_mask = c->mask; fsyscall_gtod_data.clk_mask = c->mask;
...@@ -484,6 +480,6 @@ void update_vsyscall(struct timespec *wall, struct timespec *wtm, ...@@ -484,6 +480,6 @@ void update_vsyscall(struct timespec *wall, struct timespec *wtm,
fsyscall_gtod_data.monotonic_time.tv_sec++; fsyscall_gtod_data.monotonic_time.tv_sec++;
} }
write_sequnlock_irqrestore(&fsyscall_gtod_data.lock, flags); write_seqcount_end(&fsyscall_gtod_data.seq);
} }
...@@ -5,13 +5,8 @@ ...@@ -5,13 +5,8 @@
#include <linux/clocksource.h> #include <linux/clocksource.h>
struct vsyscall_gtod_data { struct vsyscall_gtod_data {
seqlock_t lock; seqcount_t seq;
/* open coded 'struct timespec' */
time_t wall_time_sec;
u32 wall_time_nsec;
struct timezone sys_tz;
struct { /* extract of a clocksource struct */ struct { /* extract of a clocksource struct */
int vclock_mode; int vclock_mode;
cycle_t cycle_last; cycle_t cycle_last;
...@@ -19,8 +14,16 @@ struct vsyscall_gtod_data { ...@@ -19,8 +14,16 @@ struct vsyscall_gtod_data {
u32 mult; u32 mult;
u32 shift; u32 shift;
} clock; } clock;
struct timespec wall_to_monotonic;
/* open coded 'struct timespec' */
time_t wall_time_sec;
u32 wall_time_nsec;
u32 monotonic_time_nsec;
time_t monotonic_time_sec;
struct timezone sys_tz;
struct timespec wall_time_coarse; struct timespec wall_time_coarse;
struct timespec monotonic_time_coarse;
}; };
extern struct vsyscall_gtod_data vsyscall_gtod_data; extern struct vsyscall_gtod_data vsyscall_gtod_data;
......
...@@ -933,6 +933,16 @@ static int __init init_tsc_clocksource(void) ...@@ -933,6 +933,16 @@ static int __init init_tsc_clocksource(void)
clocksource_tsc.rating = 0; clocksource_tsc.rating = 0;
clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS; clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS;
} }
/*
* Trust the results of the earlier calibration on systems
* exporting a reliable TSC.
*/
if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) {
clocksource_register_khz(&clocksource_tsc, tsc_khz);
return 0;
}
schedule_delayed_work(&tsc_irqwork, 0); schedule_delayed_work(&tsc_irqwork, 0);
return 0; return 0;
} }
......
...@@ -52,10 +52,7 @@ ...@@ -52,10 +52,7 @@
#include "vsyscall_trace.h" #include "vsyscall_trace.h"
DEFINE_VVAR(int, vgetcpu_mode); DEFINE_VVAR(int, vgetcpu_mode);
DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) = DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data);
{
.lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
};
static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE; static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
...@@ -80,20 +77,15 @@ early_param("vsyscall", vsyscall_setup); ...@@ -80,20 +77,15 @@ early_param("vsyscall", vsyscall_setup);
void update_vsyscall_tz(void) void update_vsyscall_tz(void)
{ {
unsigned long flags;
write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
/* sys_tz has changed */
vsyscall_gtod_data.sys_tz = sys_tz; vsyscall_gtod_data.sys_tz = sys_tz;
write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags);
} }
void update_vsyscall(struct timespec *wall_time, struct timespec *wtm, void update_vsyscall(struct timespec *wall_time, struct timespec *wtm,
struct clocksource *clock, u32 mult) struct clocksource *clock, u32 mult)
{ {
unsigned long flags; struct timespec monotonic;
write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags); write_seqcount_begin(&vsyscall_gtod_data.seq);
/* copy vsyscall data */ /* copy vsyscall data */
vsyscall_gtod_data.clock.vclock_mode = clock->archdata.vclock_mode; vsyscall_gtod_data.clock.vclock_mode = clock->archdata.vclock_mode;
...@@ -101,12 +93,19 @@ void update_vsyscall(struct timespec *wall_time, struct timespec *wtm, ...@@ -101,12 +93,19 @@ void update_vsyscall(struct timespec *wall_time, struct timespec *wtm,
vsyscall_gtod_data.clock.mask = clock->mask; vsyscall_gtod_data.clock.mask = clock->mask;
vsyscall_gtod_data.clock.mult = mult; vsyscall_gtod_data.clock.mult = mult;
vsyscall_gtod_data.clock.shift = clock->shift; vsyscall_gtod_data.clock.shift = clock->shift;
vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec; vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec;
vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec; vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec;
vsyscall_gtod_data.wall_to_monotonic = *wtm;
monotonic = timespec_add(*wall_time, *wtm);
vsyscall_gtod_data.monotonic_time_sec = monotonic.tv_sec;
vsyscall_gtod_data.monotonic_time_nsec = monotonic.tv_nsec;
vsyscall_gtod_data.wall_time_coarse = __current_kernel_time(); vsyscall_gtod_data.wall_time_coarse = __current_kernel_time();
vsyscall_gtod_data.monotonic_time_coarse =
timespec_add(vsyscall_gtod_data.wall_time_coarse, *wtm);
write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags); write_seqcount_end(&vsyscall_gtod_data.seq);
} }
static void warn_bad_vsyscall(const char *level, struct pt_regs *regs, static void warn_bad_vsyscall(const char *level, struct pt_regs *regs,
......
...@@ -70,100 +70,98 @@ notrace static long vdso_fallback_gettime(long clock, struct timespec *ts) ...@@ -70,100 +70,98 @@ notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
return ret; return ret;
} }
notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
{
long ret;
asm("syscall" : "=a" (ret) :
"0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
return ret;
}
notrace static inline long vgetns(void) notrace static inline long vgetns(void)
{ {
long v; long v;
cycles_t cycles; cycles_t cycles;
if (gtod->clock.vclock_mode == VCLOCK_TSC) if (gtod->clock.vclock_mode == VCLOCK_TSC)
cycles = vread_tsc(); cycles = vread_tsc();
else else if (gtod->clock.vclock_mode == VCLOCK_HPET)
cycles = vread_hpet(); cycles = vread_hpet();
else
return 0;
v = (cycles - gtod->clock.cycle_last) & gtod->clock.mask; v = (cycles - gtod->clock.cycle_last) & gtod->clock.mask;
return (v * gtod->clock.mult) >> gtod->clock.shift; return (v * gtod->clock.mult) >> gtod->clock.shift;
} }
notrace static noinline int do_realtime(struct timespec *ts) /* Code size doesn't matter (vdso is 4k anyway) and this is faster. */
notrace static int __always_inline do_realtime(struct timespec *ts)
{ {
unsigned long seq, ns; unsigned long seq, ns;
int mode;
do { do {
seq = read_seqbegin(&gtod->lock); seq = read_seqcount_begin(&gtod->seq);
mode = gtod->clock.vclock_mode;
ts->tv_sec = gtod->wall_time_sec; ts->tv_sec = gtod->wall_time_sec;
ts->tv_nsec = gtod->wall_time_nsec; ts->tv_nsec = gtod->wall_time_nsec;
ns = vgetns(); ns = vgetns();
} while (unlikely(read_seqretry(&gtod->lock, seq))); } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
timespec_add_ns(ts, ns); timespec_add_ns(ts, ns);
return 0; return mode;
} }
notrace static noinline int do_monotonic(struct timespec *ts) notrace static int do_monotonic(struct timespec *ts)
{ {
unsigned long seq, ns, secs; unsigned long seq, ns;
int mode;
do { do {
seq = read_seqbegin(&gtod->lock); seq = read_seqcount_begin(&gtod->seq);
secs = gtod->wall_time_sec; mode = gtod->clock.vclock_mode;
ns = gtod->wall_time_nsec + vgetns(); ts->tv_sec = gtod->monotonic_time_sec;
secs += gtod->wall_to_monotonic.tv_sec; ts->tv_nsec = gtod->monotonic_time_nsec;
ns += gtod->wall_to_monotonic.tv_nsec; ns = vgetns();
} while (unlikely(read_seqretry(&gtod->lock, seq))); } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
timespec_add_ns(ts, ns);
/* wall_time_nsec, vgetns(), and wall_to_monotonic.tv_nsec
* are all guaranteed to be nonnegative.
*/
while (ns >= NSEC_PER_SEC) {
ns -= NSEC_PER_SEC;
++secs;
}
ts->tv_sec = secs;
ts->tv_nsec = ns;
return 0; return mode;
} }
notrace static noinline int do_realtime_coarse(struct timespec *ts) notrace static int do_realtime_coarse(struct timespec *ts)
{ {
unsigned long seq; unsigned long seq;
do { do {
seq = read_seqbegin(&gtod->lock); seq = read_seqcount_begin(&gtod->seq);
ts->tv_sec = gtod->wall_time_coarse.tv_sec; ts->tv_sec = gtod->wall_time_coarse.tv_sec;
ts->tv_nsec = gtod->wall_time_coarse.tv_nsec; ts->tv_nsec = gtod->wall_time_coarse.tv_nsec;
} while (unlikely(read_seqretry(&gtod->lock, seq))); } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
return 0; return 0;
} }
notrace static noinline int do_monotonic_coarse(struct timespec *ts) notrace static int do_monotonic_coarse(struct timespec *ts)
{ {
unsigned long seq, ns, secs; unsigned long seq;
do { do {
seq = read_seqbegin(&gtod->lock); seq = read_seqcount_begin(&gtod->seq);
secs = gtod->wall_time_coarse.tv_sec; ts->tv_sec = gtod->monotonic_time_coarse.tv_sec;
ns = gtod->wall_time_coarse.tv_nsec; ts->tv_nsec = gtod->monotonic_time_coarse.tv_nsec;
secs += gtod->wall_to_monotonic.tv_sec; } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
ns += gtod->wall_to_monotonic.tv_nsec;
} while (unlikely(read_seqretry(&gtod->lock, seq)));
/* wall_time_nsec and wall_to_monotonic.tv_nsec are
* guaranteed to be between 0 and NSEC_PER_SEC.
*/
if (ns >= NSEC_PER_SEC) {
ns -= NSEC_PER_SEC;
++secs;
}
ts->tv_sec = secs;
ts->tv_nsec = ns;
return 0; return 0;
} }
notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts) notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
{ {
int ret = VCLOCK_NONE;
switch (clock) { switch (clock) {
case CLOCK_REALTIME: case CLOCK_REALTIME:
if (likely(gtod->clock.vclock_mode != VCLOCK_NONE)) ret = do_realtime(ts);
return do_realtime(ts);
break; break;
case CLOCK_MONOTONIC: case CLOCK_MONOTONIC:
if (likely(gtod->clock.vclock_mode != VCLOCK_NONE)) ret = do_monotonic(ts);
return do_monotonic(ts);
break; break;
case CLOCK_REALTIME_COARSE: case CLOCK_REALTIME_COARSE:
return do_realtime_coarse(ts); return do_realtime_coarse(ts);
...@@ -171,20 +169,22 @@ notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts) ...@@ -171,20 +169,22 @@ notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
return do_monotonic_coarse(ts); return do_monotonic_coarse(ts);
} }
if (ret == VCLOCK_NONE)
return vdso_fallback_gettime(clock, ts); return vdso_fallback_gettime(clock, ts);
return 0;
} }
int clock_gettime(clockid_t, struct timespec *) int clock_gettime(clockid_t, struct timespec *)
__attribute__((weak, alias("__vdso_clock_gettime"))); __attribute__((weak, alias("__vdso_clock_gettime")));
notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz) notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
{ {
long ret; long ret = VCLOCK_NONE;
if (likely(gtod->clock.vclock_mode != VCLOCK_NONE)) {
if (likely(tv != NULL)) { if (likely(tv != NULL)) {
BUILD_BUG_ON(offsetof(struct timeval, tv_usec) != BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
offsetof(struct timespec, tv_nsec) || offsetof(struct timespec, tv_nsec) ||
sizeof(*tv) != sizeof(struct timespec)); sizeof(*tv) != sizeof(struct timespec));
do_realtime((struct timespec *)tv); ret = do_realtime((struct timespec *)tv);
tv->tv_usec /= 1000; tv->tv_usec /= 1000;
} }
if (unlikely(tz != NULL)) { if (unlikely(tz != NULL)) {
...@@ -192,11 +192,10 @@ notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz) ...@@ -192,11 +192,10 @@ notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
tz->tz_minuteswest = gtod->sys_tz.tz_minuteswest; tz->tz_minuteswest = gtod->sys_tz.tz_minuteswest;
tz->tz_dsttime = gtod->sys_tz.tz_dsttime; tz->tz_dsttime = gtod->sys_tz.tz_dsttime;
} }
if (ret == VCLOCK_NONE)
return vdso_fallback_gtod(tv, tz);
return 0; return 0;
}
asm("syscall" : "=a" (ret) :
"0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
return ret;
} }
int gettimeofday(struct timeval *, struct timezone *) int gettimeofday(struct timeval *, struct timezone *)
__attribute__((weak, alias("__vdso_gettimeofday"))); __attribute__((weak, alias("__vdso_gettimeofday")));
......
...@@ -458,6 +458,11 @@ int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled) ...@@ -458,6 +458,11 @@ int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled)
if (rtc->uie_rtctimer.enabled == enabled) if (rtc->uie_rtctimer.enabled == enabled)
goto out; goto out;
if (rtc->uie_unsupported) {
err = -EINVAL;
goto out;
}
if (enabled) { if (enabled) {
struct rtc_time tm; struct rtc_time tm;
ktime_t now, onesec; ktime_t now, onesec;
......
...@@ -360,6 +360,8 @@ static int __devinit mpc5121_rtc_probe(struct platform_device *op) ...@@ -360,6 +360,8 @@ static int __devinit mpc5121_rtc_probe(struct platform_device *op)
&mpc5200_rtc_ops, THIS_MODULE); &mpc5200_rtc_ops, THIS_MODULE);
} }
rtc->rtc->uie_unsupported = 1;
if (IS_ERR(rtc->rtc)) { if (IS_ERR(rtc->rtc)) {
err = PTR_ERR(rtc->rtc); err = PTR_ERR(rtc->rtc);
goto out_free_irq; goto out_free_irq;
......
...@@ -202,7 +202,8 @@ struct rtc_device ...@@ -202,7 +202,8 @@ struct rtc_device
struct hrtimer pie_timer; /* sub second exp, so needs hrtimer */ struct hrtimer pie_timer; /* sub second exp, so needs hrtimer */
int pie_enabled; int pie_enabled;
struct work_struct irqwork; struct work_struct irqwork;
/* Some hardware can't support UIE mode */
int uie_unsupported;
#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL #ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
struct work_struct uie_task; struct work_struct uie_task;
......
...@@ -116,7 +116,6 @@ static inline struct timespec timespec_sub(struct timespec lhs, ...@@ -116,7 +116,6 @@ static inline struct timespec timespec_sub(struct timespec lhs,
extern void read_persistent_clock(struct timespec *ts); extern void read_persistent_clock(struct timespec *ts);
extern void read_boot_clock(struct timespec *ts); extern void read_boot_clock(struct timespec *ts);
extern int update_persistent_clock(struct timespec now); extern int update_persistent_clock(struct timespec now);
extern int no_sync_cmos_clock __read_mostly;
void timekeeping_init(void); void timekeeping_init(void);
extern int timekeeping_suspended; extern int timekeeping_suspended;
......
...@@ -252,7 +252,7 @@ extern void ntp_clear(void); ...@@ -252,7 +252,7 @@ extern void ntp_clear(void);
/* Returns how long ticks are at present, in ns / 2^NTP_SCALE_SHIFT. */ /* Returns how long ticks are at present, in ns / 2^NTP_SCALE_SHIFT. */
extern u64 ntp_tick_length(void); extern u64 ntp_tick_length(void);
extern void second_overflow(void); extern int second_overflow(unsigned long secs);
extern int do_adjtimex(struct timex *); extern int do_adjtimex(struct timex *);
extern void hardpps(const struct timespec *, const struct timespec *); extern void hardpps(const struct timespec *, const struct timespec *);
......
...@@ -163,7 +163,6 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz) ...@@ -163,7 +163,6 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
return error; return error;
if (tz) { if (tz) {
/* SMP safe, global irq locking makes it work. */
sys_tz = *tz; sys_tz = *tz;
update_vsyscall_tz(); update_vsyscall_tz();
if (firsttime) { if (firsttime) {
...@@ -173,12 +172,7 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz) ...@@ -173,12 +172,7 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
} }
} }
if (tv) if (tv)
{
/* SMP safe, again the code in arch/foo/time.c should
* globally block out interrupts when it runs.
*/
return do_settimeofday(tv); return do_settimeofday(tv);
}
return 0; return 0;
} }
......
...@@ -96,6 +96,11 @@ static int alarmtimer_rtc_add_device(struct device *dev, ...@@ -96,6 +96,11 @@ static int alarmtimer_rtc_add_device(struct device *dev,
return 0; return 0;
} }
static inline void alarmtimer_rtc_timer_init(void)
{
rtc_timer_init(&rtctimer, NULL, NULL);
}
static struct class_interface alarmtimer_rtc_interface = { static struct class_interface alarmtimer_rtc_interface = {
.add_dev = &alarmtimer_rtc_add_device, .add_dev = &alarmtimer_rtc_add_device,
}; };
...@@ -117,6 +122,7 @@ static inline struct rtc_device *alarmtimer_get_rtcdev(void) ...@@ -117,6 +122,7 @@ static inline struct rtc_device *alarmtimer_get_rtcdev(void)
#define rtcdev (NULL) #define rtcdev (NULL)
static inline int alarmtimer_rtc_interface_setup(void) { return 0; } static inline int alarmtimer_rtc_interface_setup(void) { return 0; }
static inline void alarmtimer_rtc_interface_remove(void) { } static inline void alarmtimer_rtc_interface_remove(void) { }
static inline void alarmtimer_rtc_timer_init(void) { }
#endif #endif
/** /**
...@@ -783,6 +789,8 @@ static int __init alarmtimer_init(void) ...@@ -783,6 +789,8 @@ static int __init alarmtimer_init(void)
.nsleep = alarm_timer_nsleep, .nsleep = alarm_timer_nsleep,
}; };
alarmtimer_rtc_timer_init();
posix_timers_register_clock(CLOCK_REALTIME_ALARM, &alarm_clock); posix_timers_register_clock(CLOCK_REALTIME_ALARM, &alarm_clock);
posix_timers_register_clock(CLOCK_BOOTTIME_ALARM, &alarm_clock); posix_timers_register_clock(CLOCK_BOOTTIME_ALARM, &alarm_clock);
......
...@@ -500,7 +500,7 @@ static u32 clocksource_max_adjustment(struct clocksource *cs) ...@@ -500,7 +500,7 @@ static u32 clocksource_max_adjustment(struct clocksource *cs)
{ {
u64 ret; u64 ret;
/* /*
* We won't try to correct for more then 11% adjustments (110,000 ppm), * We won't try to correct for more than 11% adjustments (110,000 ppm),
*/ */
ret = (u64)cs->mult * 11; ret = (u64)cs->mult * 11;
do_div(ret,100); do_div(ret,100);
......
...@@ -34,8 +34,6 @@ unsigned long tick_nsec; ...@@ -34,8 +34,6 @@ unsigned long tick_nsec;
static u64 tick_length; static u64 tick_length;
static u64 tick_length_base; static u64 tick_length_base;
static struct hrtimer leap_timer;
#define MAX_TICKADJ 500LL /* usecs */ #define MAX_TICKADJ 500LL /* usecs */
#define MAX_TICKADJ_SCALED \ #define MAX_TICKADJ_SCALED \
(((MAX_TICKADJ * NSEC_PER_USEC) << NTP_SCALE_SHIFT) / NTP_INTERVAL_FREQ) (((MAX_TICKADJ * NSEC_PER_USEC) << NTP_SCALE_SHIFT) / NTP_INTERVAL_FREQ)
...@@ -381,70 +379,63 @@ u64 ntp_tick_length(void) ...@@ -381,70 +379,63 @@ u64 ntp_tick_length(void)
/* /*
* Leap second processing. If in leap-insert state at the end of the * this routine handles the overflow of the microsecond field
* day, the system clock is set back one second; if in leap-delete *
* state, the system clock is set ahead one second. * The tricky bits of code to handle the accurate clock support
* were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame.
* They were originally developed for SUN and DEC kernels.
* All the kudos should go to Dave for this stuff.
*
* Also handles leap second processing, and returns leap offset
*/ */
static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer) int second_overflow(unsigned long secs)
{ {
enum hrtimer_restart res = HRTIMER_NORESTART; s64 delta;
unsigned long flags;
int leap = 0; int leap = 0;
unsigned long flags;
spin_lock_irqsave(&ntp_lock, flags); spin_lock_irqsave(&ntp_lock, flags);
/*
* Leap second processing. If in leap-insert state at the end of the
* day, the system clock is set back one second; if in leap-delete
* state, the system clock is set ahead one second.
*/
switch (time_state) { switch (time_state) {
case TIME_OK: case TIME_OK:
if (time_status & STA_INS)
time_state = TIME_INS;
else if (time_status & STA_DEL)
time_state = TIME_DEL;
break; break;
case TIME_INS: case TIME_INS:
if (secs % 86400 == 0) {
leap = -1; leap = -1;
time_state = TIME_OOP; time_state = TIME_OOP;
printk(KERN_NOTICE printk(KERN_NOTICE
"Clock: inserting leap second 23:59:60 UTC\n"); "Clock: inserting leap second 23:59:60 UTC\n");
hrtimer_add_expires_ns(&leap_timer, NSEC_PER_SEC); }
res = HRTIMER_RESTART;
break; break;
case TIME_DEL: case TIME_DEL:
if ((secs + 1) % 86400 == 0) {
leap = 1; leap = 1;
time_tai--; time_tai--;
time_state = TIME_WAIT; time_state = TIME_WAIT;
printk(KERN_NOTICE printk(KERN_NOTICE
"Clock: deleting leap second 23:59:59 UTC\n"); "Clock: deleting leap second 23:59:59 UTC\n");
}
break; break;
case TIME_OOP: case TIME_OOP:
time_tai++; time_tai++;
time_state = TIME_WAIT; time_state = TIME_WAIT;
/* fall through */ break;
case TIME_WAIT: case TIME_WAIT:
if (!(time_status & (STA_INS | STA_DEL))) if (!(time_status & (STA_INS | STA_DEL)))
time_state = TIME_OK; time_state = TIME_OK;
break; break;
} }
spin_unlock_irqrestore(&ntp_lock, flags);
/*
* We have to call this outside of the ntp_lock to keep
* the proper locking hierarchy
*/
if (leap)
timekeeping_leap_insert(leap);
return res;
}
/*
* this routine handles the overflow of the microsecond field
*
* The tricky bits of code to handle the accurate clock support
* were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame.
* They were originally developed for SUN and DEC kernels.
* All the kudos should go to Dave for this stuff.
*/
void second_overflow(void)
{
s64 delta;
unsigned long flags;
spin_lock_irqsave(&ntp_lock, flags);
/* Bump the maxerror field */ /* Bump the maxerror field */
time_maxerror += MAXFREQ / NSEC_PER_USEC; time_maxerror += MAXFREQ / NSEC_PER_USEC;
...@@ -481,15 +472,17 @@ void second_overflow(void) ...@@ -481,15 +472,17 @@ void second_overflow(void)
tick_length += (s64)(time_adjust * NSEC_PER_USEC / NTP_INTERVAL_FREQ) tick_length += (s64)(time_adjust * NSEC_PER_USEC / NTP_INTERVAL_FREQ)
<< NTP_SCALE_SHIFT; << NTP_SCALE_SHIFT;
time_adjust = 0; time_adjust = 0;
out: out:
spin_unlock_irqrestore(&ntp_lock, flags); spin_unlock_irqrestore(&ntp_lock, flags);
return leap;
} }
#ifdef CONFIG_GENERIC_CMOS_UPDATE #ifdef CONFIG_GENERIC_CMOS_UPDATE
/* Disable the cmos update - used by virtualization and embedded */
int no_sync_cmos_clock __read_mostly;
static void sync_cmos_clock(struct work_struct *work); static void sync_cmos_clock(struct work_struct *work);
static DECLARE_DELAYED_WORK(sync_cmos_work, sync_cmos_clock); static DECLARE_DELAYED_WORK(sync_cmos_work, sync_cmos_clock);
...@@ -536,7 +529,6 @@ static void sync_cmos_clock(struct work_struct *work) ...@@ -536,7 +529,6 @@ static void sync_cmos_clock(struct work_struct *work)
static void notify_cmos_timer(void) static void notify_cmos_timer(void)
{ {
if (!no_sync_cmos_clock)
schedule_delayed_work(&sync_cmos_work, 0); schedule_delayed_work(&sync_cmos_work, 0);
} }
...@@ -544,27 +536,6 @@ static void notify_cmos_timer(void) ...@@ -544,27 +536,6 @@ static void notify_cmos_timer(void)
static inline void notify_cmos_timer(void) { } static inline void notify_cmos_timer(void) { }
#endif #endif
/*
* Start the leap seconds timer:
*/
static inline void ntp_start_leap_timer(struct timespec *ts)
{
long now = ts->tv_sec;
if (time_status & STA_INS) {
time_state = TIME_INS;
now += 86400 - now % 86400;
hrtimer_start(&leap_timer, ktime_set(now, 0), HRTIMER_MODE_ABS);
return;
}
if (time_status & STA_DEL) {
time_state = TIME_DEL;
now += 86400 - (now + 1) % 86400;
hrtimer_start(&leap_timer, ktime_set(now, 0), HRTIMER_MODE_ABS);
}
}
/* /*
* Propagate a new txc->status value into the NTP state: * Propagate a new txc->status value into the NTP state:
...@@ -589,22 +560,6 @@ static inline void process_adj_status(struct timex *txc, struct timespec *ts) ...@@ -589,22 +560,6 @@ static inline void process_adj_status(struct timex *txc, struct timespec *ts)
time_status &= STA_RONLY; time_status &= STA_RONLY;
time_status |= txc->status & ~STA_RONLY; time_status |= txc->status & ~STA_RONLY;
switch (time_state) {
case TIME_OK:
ntp_start_leap_timer(ts);
break;
case TIME_INS:
case TIME_DEL:
time_state = TIME_OK;
ntp_start_leap_timer(ts);
case TIME_WAIT:
if (!(time_status & (STA_INS | STA_DEL)))
time_state = TIME_OK;
break;
case TIME_OOP:
hrtimer_restart(&leap_timer);
break;
}
} }
/* /*
* Called with the xtime lock held, so we can access and modify * Called with the xtime lock held, so we can access and modify
...@@ -686,9 +641,6 @@ int do_adjtimex(struct timex *txc) ...@@ -686,9 +641,6 @@ int do_adjtimex(struct timex *txc)
(txc->tick < 900000/USER_HZ || (txc->tick < 900000/USER_HZ ||
txc->tick > 1100000/USER_HZ)) txc->tick > 1100000/USER_HZ))
return -EINVAL; return -EINVAL;
if (txc->modes & ADJ_STATUS && time_state != TIME_OK)
hrtimer_cancel(&leap_timer);
} }
if (txc->modes & ADJ_SETOFFSET) { if (txc->modes & ADJ_SETOFFSET) {
...@@ -1010,6 +962,4 @@ __setup("ntp_tick_adj=", ntp_tick_adj_setup); ...@@ -1010,6 +962,4 @@ __setup("ntp_tick_adj=", ntp_tick_adj_setup);
void __init ntp_init(void) void __init ntp_init(void)
{ {
ntp_clear(); ntp_clear();
hrtimer_init(&leap_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
leap_timer.function = ntp_leap_second;
} }
...@@ -184,18 +184,6 @@ static void timekeeping_update(bool clearntp) ...@@ -184,18 +184,6 @@ static void timekeeping_update(bool clearntp)
} }
void timekeeping_leap_insert(int leapsecond)
{
unsigned long flags;
write_seqlock_irqsave(&timekeeper.lock, flags);
timekeeper.xtime.tv_sec += leapsecond;
timekeeper.wall_to_monotonic.tv_sec -= leapsecond;
timekeeping_update(false);
write_sequnlock_irqrestore(&timekeeper.lock, flags);
}
/** /**
* timekeeping_forward_now - update clock to the current time * timekeeping_forward_now - update clock to the current time
* *
...@@ -448,9 +436,12 @@ EXPORT_SYMBOL(timekeeping_inject_offset); ...@@ -448,9 +436,12 @@ EXPORT_SYMBOL(timekeeping_inject_offset);
static int change_clocksource(void *data) static int change_clocksource(void *data)
{ {
struct clocksource *new, *old; struct clocksource *new, *old;
unsigned long flags;
new = (struct clocksource *) data; new = (struct clocksource *) data;
write_seqlock_irqsave(&timekeeper.lock, flags);
timekeeping_forward_now(); timekeeping_forward_now();
if (!new->enable || new->enable(new) == 0) { if (!new->enable || new->enable(new) == 0) {
old = timekeeper.clock; old = timekeeper.clock;
...@@ -458,6 +449,10 @@ static int change_clocksource(void *data) ...@@ -458,6 +449,10 @@ static int change_clocksource(void *data)
if (old->disable) if (old->disable)
old->disable(old); old->disable(old);
} }
timekeeping_update(true);
write_sequnlock_irqrestore(&timekeeper.lock, flags);
return 0; return 0;
} }
...@@ -827,7 +822,7 @@ static void timekeeping_adjust(s64 offset) ...@@ -827,7 +822,7 @@ static void timekeeping_adjust(s64 offset)
int adj; int adj;
/* /*
* The point of this is to check if the error is greater then half * The point of this is to check if the error is greater than half
* an interval. * an interval.
* *
* First we shift it down from NTP_SHIFT to clocksource->shifted nsecs. * First we shift it down from NTP_SHIFT to clocksource->shifted nsecs.
...@@ -835,7 +830,7 @@ static void timekeeping_adjust(s64 offset) ...@@ -835,7 +830,7 @@ static void timekeeping_adjust(s64 offset)
* Note we subtract one in the shift, so that error is really error*2. * Note we subtract one in the shift, so that error is really error*2.
* This "saves" dividing(shifting) interval twice, but keeps the * This "saves" dividing(shifting) interval twice, but keeps the
* (error > interval) comparison as still measuring if error is * (error > interval) comparison as still measuring if error is
* larger then half an interval. * larger than half an interval.
* *
* Note: It does not "save" on aggravation when reading the code. * Note: It does not "save" on aggravation when reading the code.
*/ */
...@@ -843,7 +838,7 @@ static void timekeeping_adjust(s64 offset) ...@@ -843,7 +838,7 @@ static void timekeeping_adjust(s64 offset)
if (error > interval) { if (error > interval) {
/* /*
* We now divide error by 4(via shift), which checks if * We now divide error by 4(via shift), which checks if
* the error is greater then twice the interval. * the error is greater than twice the interval.
* If it is greater, we need a bigadjust, if its smaller, * If it is greater, we need a bigadjust, if its smaller,
* we can adjust by 1. * we can adjust by 1.
*/ */
...@@ -874,13 +869,15 @@ static void timekeeping_adjust(s64 offset) ...@@ -874,13 +869,15 @@ static void timekeeping_adjust(s64 offset)
} else /* No adjustment needed */ } else /* No adjustment needed */
return; return;
WARN_ONCE(timekeeper.clock->maxadj && if (unlikely(timekeeper.clock->maxadj &&
(timekeeper.mult + adj > timekeeper.clock->mult + (timekeeper.mult + adj >
timekeeper.clock->maxadj), timekeeper.clock->mult + timekeeper.clock->maxadj))) {
"Adjusting %s more then 11%% (%ld vs %ld)\n", printk_once(KERN_WARNING
"Adjusting %s more than 11%% (%ld vs %ld)\n",
timekeeper.clock->name, (long)timekeeper.mult + adj, timekeeper.clock->name, (long)timekeeper.mult + adj,
(long)timekeeper.clock->mult + (long)timekeeper.clock->mult +
timekeeper.clock->maxadj); timekeeper.clock->maxadj);
}
/* /*
* So the following can be confusing. * So the following can be confusing.
* *
...@@ -952,7 +949,7 @@ static cycle_t logarithmic_accumulation(cycle_t offset, int shift) ...@@ -952,7 +949,7 @@ static cycle_t logarithmic_accumulation(cycle_t offset, int shift)
u64 nsecps = (u64)NSEC_PER_SEC << timekeeper.shift; u64 nsecps = (u64)NSEC_PER_SEC << timekeeper.shift;
u64 raw_nsecs; u64 raw_nsecs;
/* If the offset is smaller then a shifted interval, do nothing */ /* If the offset is smaller than a shifted interval, do nothing */
if (offset < timekeeper.cycle_interval<<shift) if (offset < timekeeper.cycle_interval<<shift)
return offset; return offset;
...@@ -962,9 +959,11 @@ static cycle_t logarithmic_accumulation(cycle_t offset, int shift) ...@@ -962,9 +959,11 @@ static cycle_t logarithmic_accumulation(cycle_t offset, int shift)
timekeeper.xtime_nsec += timekeeper.xtime_interval << shift; timekeeper.xtime_nsec += timekeeper.xtime_interval << shift;
while (timekeeper.xtime_nsec >= nsecps) { while (timekeeper.xtime_nsec >= nsecps) {
int leap;
timekeeper.xtime_nsec -= nsecps; timekeeper.xtime_nsec -= nsecps;
timekeeper.xtime.tv_sec++; timekeeper.xtime.tv_sec++;
second_overflow(); leap = second_overflow(timekeeper.xtime.tv_sec);
timekeeper.xtime.tv_sec += leap;
} }
/* Accumulate raw time */ /* Accumulate raw time */
...@@ -1018,13 +1017,13 @@ static void update_wall_time(void) ...@@ -1018,13 +1017,13 @@ static void update_wall_time(void)
* With NO_HZ we may have to accumulate many cycle_intervals * With NO_HZ we may have to accumulate many cycle_intervals
* (think "ticks") worth of time at once. To do this efficiently, * (think "ticks") worth of time at once. To do this efficiently,
* we calculate the largest doubling multiple of cycle_intervals * we calculate the largest doubling multiple of cycle_intervals
* that is smaller then the offset. We then accumulate that * that is smaller than the offset. We then accumulate that
* chunk in one go, and then try to consume the next smaller * chunk in one go, and then try to consume the next smaller
* doubled multiple. * doubled multiple.
*/ */
shift = ilog2(offset) - ilog2(timekeeper.cycle_interval); shift = ilog2(offset) - ilog2(timekeeper.cycle_interval);
shift = max(0, shift); shift = max(0, shift);
/* Bound shift to one less then what overflows tick_length */ /* Bound shift to one less than what overflows tick_length */
maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1; maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1;
shift = min(shift, maxshift); shift = min(shift, maxshift);
while (offset >= timekeeper.cycle_interval) { while (offset >= timekeeper.cycle_interval) {
...@@ -1072,12 +1071,14 @@ static void update_wall_time(void) ...@@ -1072,12 +1071,14 @@ static void update_wall_time(void)
/* /*
* Finally, make sure that after the rounding * Finally, make sure that after the rounding
* xtime.tv_nsec isn't larger then NSEC_PER_SEC * xtime.tv_nsec isn't larger than NSEC_PER_SEC
*/ */
if (unlikely(timekeeper.xtime.tv_nsec >= NSEC_PER_SEC)) { if (unlikely(timekeeper.xtime.tv_nsec >= NSEC_PER_SEC)) {
int leap;
timekeeper.xtime.tv_nsec -= NSEC_PER_SEC; timekeeper.xtime.tv_nsec -= NSEC_PER_SEC;
timekeeper.xtime.tv_sec++; timekeeper.xtime.tv_sec++;
second_overflow(); leap = second_overflow(timekeeper.xtime.tv_sec);
timekeeper.xtime.tv_sec += leap;
} }
timekeeping_update(false); timekeeping_update(false);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment