Commit d28ede83 authored by Thomas Gleixner's avatar Thomas Gleixner Committed by John Stultz

timekeeping: Create struct tk_read_base and use it in struct timekeeper

The members of the new struct are the required ones for the new NMI
safe accessor to clcok monotonic. In order to reuse the existing
timekeeping code and to make the update of the fast NMI safe
timekeepers a simple memcpy use the struct for the timekeeper as well
and convert all users.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Signed-off-by: default avatarJohn Stultz <john.stultz@linaro.org>
parent 6d3aadf3
......@@ -211,7 +211,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
void update_vsyscall(struct timekeeper *tk)
{
struct timespec xtime_coarse;
u32 use_syscall = strcmp(tk->clock->name, "arch_sys_counter");
u32 use_syscall = strcmp(tk->tkr.clock->name, "arch_sys_counter");
++vdso_data->tb_seq_count;
smp_wmb();
......@@ -224,11 +224,11 @@ void update_vsyscall(struct timekeeper *tk)
vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec;
if (!use_syscall) {
vdso_data->cs_cycle_last = tk->cycle_last;
vdso_data->cs_cycle_last = tk->tkr.cycle_last;
vdso_data->xtime_clock_sec = tk->xtime_sec;
vdso_data->xtime_clock_nsec = tk->xtime_nsec;
vdso_data->cs_mult = tk->mult;
vdso_data->cs_shift = tk->shift;
vdso_data->xtime_clock_nsec = tk->tkr.xtime_nsec;
vdso_data->cs_mult = tk->tkr.mult;
vdso_data->cs_shift = tk->tkr.shift;
}
smp_wmb();
......
......@@ -214,26 +214,26 @@ void update_vsyscall(struct timekeeper *tk)
{
u64 nsecps;
if (tk->clock != &clocksource_tod)
if (tk->tkr.clock != &clocksource_tod)
return;
/* Make userspace gettimeofday spin until we're done. */
++vdso_data->tb_update_count;
smp_wmb();
vdso_data->xtime_tod_stamp = tk->cycle_last;
vdso_data->xtime_tod_stamp = tk->tkr.cycle_last;
vdso_data->xtime_clock_sec = tk->xtime_sec;
vdso_data->xtime_clock_nsec = tk->xtime_nsec;
vdso_data->xtime_clock_nsec = tk->tkr.xtime_nsec;
vdso_data->wtom_clock_sec =
tk->xtime_sec + tk->wall_to_monotonic.tv_sec;
vdso_data->wtom_clock_nsec = tk->xtime_nsec +
+ ((u64) tk->wall_to_monotonic.tv_nsec << tk->shift);
nsecps = (u64) NSEC_PER_SEC << tk->shift;
vdso_data->wtom_clock_nsec = tk->tkr.xtime_nsec +
+ ((u64) tk->wall_to_monotonic.tv_nsec << tk->tkr.shift);
nsecps = (u64) NSEC_PER_SEC << tk->tkr.shift;
while (vdso_data->wtom_clock_nsec >= nsecps) {
vdso_data->wtom_clock_nsec -= nsecps;
vdso_data->wtom_clock_sec++;
}
vdso_data->tk_mult = tk->mult;
vdso_data->tk_shift = tk->shift;
vdso_data->tk_mult = tk->tkr.mult;
vdso_data->tk_shift = tk->tkr.shift;
smp_wmb();
++vdso_data->tb_update_count;
}
......
......@@ -261,7 +261,7 @@ void update_vsyscall_tz(void)
void update_vsyscall(struct timekeeper *tk)
{
struct timespec *wtm = &tk->wall_to_monotonic;
struct clocksource *clock = tk->clock;
struct clocksource *clock = tk->tkr.clock;
if (clock != &cycle_counter_cs)
return;
......@@ -269,13 +269,13 @@ void update_vsyscall(struct timekeeper *tk)
/* Userspace gettimeofday will spin while this value is odd. */
++vdso_data->tb_update_count;
smp_wmb();
vdso_data->xtime_tod_stamp = tk->cycle_last;
vdso_data->xtime_tod_stamp = tk->tkr.cycle_last;
vdso_data->xtime_clock_sec = tk->xtime_sec;
vdso_data->xtime_clock_nsec = tk->xtime_nsec;
vdso_data->xtime_clock_nsec = tk->tkr.xtime_nsec;
vdso_data->wtom_clock_sec = wtm->tv_sec;
vdso_data->wtom_clock_nsec = wtm->tv_nsec;
vdso_data->mult = tk->mult;
vdso_data->shift = tk->shift;
vdso_data->mult = tk->tkr.mult;
vdso_data->shift = tk->tkr.shift;
smp_wmb();
++vdso_data->tb_update_count;
}
......@@ -31,29 +31,30 @@ void update_vsyscall(struct timekeeper *tk)
gtod_write_begin(vdata);
/* copy vsyscall data */
vdata->vclock_mode = tk->clock->archdata.vclock_mode;
vdata->cycle_last = tk->cycle_last;
vdata->mask = tk->clock->mask;
vdata->mult = tk->mult;
vdata->shift = tk->shift;
vdata->vclock_mode = tk->tkr.clock->archdata.vclock_mode;
vdata->cycle_last = tk->tkr.cycle_last;
vdata->mask = tk->tkr.mask;
vdata->mult = tk->tkr.mult;
vdata->shift = tk->tkr.shift;
vdata->wall_time_sec = tk->xtime_sec;
vdata->wall_time_snsec = tk->xtime_nsec;
vdata->wall_time_snsec = tk->tkr.xtime_nsec;
vdata->monotonic_time_sec = tk->xtime_sec
+ tk->wall_to_monotonic.tv_sec;
vdata->monotonic_time_snsec = tk->xtime_nsec
vdata->monotonic_time_snsec = tk->tkr.xtime_nsec
+ ((u64)tk->wall_to_monotonic.tv_nsec
<< tk->shift);
<< tk->tkr.shift);
while (vdata->monotonic_time_snsec >=
(((u64)NSEC_PER_SEC) << tk->shift)) {
(((u64)NSEC_PER_SEC) << tk->tkr.shift)) {
vdata->monotonic_time_snsec -=
((u64)NSEC_PER_SEC) << tk->shift;
((u64)NSEC_PER_SEC) << tk->tkr.shift;
vdata->monotonic_time_sec++;
}
vdata->wall_time_coarse_sec = tk->xtime_sec;
vdata->wall_time_coarse_nsec = (long)(tk->xtime_nsec >> tk->shift);
vdata->wall_time_coarse_nsec = (long)(tk->tkr.xtime_nsec >>
tk->tkr.shift);
vdata->monotonic_time_coarse_sec =
vdata->wall_time_coarse_sec + tk->wall_to_monotonic.tv_sec;
......
......@@ -995,19 +995,19 @@ static void update_pvclock_gtod(struct timekeeper *tk)
struct pvclock_gtod_data *vdata = &pvclock_gtod_data;
u64 boot_ns;
boot_ns = ktime_to_ns(ktime_add(tk->base_mono, tk->offs_boot));
boot_ns = ktime_to_ns(ktime_add(tk->tkr.base_mono, tk->offs_boot));
write_seqcount_begin(&vdata->seq);
/* copy pvclock gtod data */
vdata->clock.vclock_mode = tk->clock->archdata.vclock_mode;
vdata->clock.cycle_last = tk->cycle_last;
vdata->clock.mask = tk->clock->mask;
vdata->clock.mult = tk->mult;
vdata->clock.shift = tk->shift;
vdata->clock.vclock_mode = tk->tkr.clock->archdata.vclock_mode;
vdata->clock.cycle_last = tk->tkr.cycle_last;
vdata->clock.mask = tk->tkr.mask;
vdata->clock.mult = tk->tkr.mult;
vdata->clock.shift = tk->tkr.shift;
vdata->boot_ns = boot_ns;
vdata->nsec_base = tk->xtime_nsec;
vdata->nsec_base = tk->tkr.xtime_nsec;
write_seqcount_end(&vdata->seq);
}
......
......@@ -10,80 +10,87 @@
#include <linux/jiffies.h>
#include <linux/time.h>
/*
* Structure holding internal timekeeping values.
*
* Note: wall_to_monotonic is what we need to add to xtime (or xtime
* corrected for sub jiffie times) to get to monotonic time.
* Monotonic is pegged at zero at system boot time, so
* wall_to_monotonic will be negative, however, we will ALWAYS keep
* the tv_nsec part positive so we can use the usual normalization.
/**
* struct tk_read_base - base structure for timekeeping readout
* @clock: Current clocksource used for timekeeping.
* @read: Read function of @clock
* @mask: Bitmask for two's complement subtraction of non 64bit clocks
* @cycle_last: @clock cycle value at last update
* @mult: NTP adjusted multiplier for scaled math conversion
* @shift: Shift value for scaled math conversion
* @xtime_nsec: Shifted (fractional) nano seconds offset for readout
* @base_mono: ktime_t (nanoseconds) base time for readout
*
* wall_to_monotonic is moved after resume from suspend for the
* monotonic time not to jump. To calculate the real boot time offset
* we need to do offs_real - offs_boot.
* This struct has size 56 byte on 64 bit. Together with a seqcount it
* occupies a single 64byte cache line.
*
* - wall_to_monotonic is no longer the boot time, getboottime must be
* used instead.
* The struct is separate from struct timekeeper as it is also used
* for a fast NMI safe accessor to clock monotonic.
*/
struct timekeeper {
/* Current clocksource used for timekeeping. */
struct tk_read_base {
struct clocksource *clock;
/* Read function of @clock */
cycle_t (*read)(struct clocksource *cs);
/* Bitmask for two's complement subtraction of non 64bit counters */
cycle_t mask;
/* Last cycle value */
cycle_t cycle_last;
/* NTP adjusted clock multiplier */
u32 mult;
/* The shift value of the current clocksource. */
u32 shift;
/* Clock shifted nano seconds */
u64 xtime_nsec;
/* Monotonic base time */
ktime_t base_mono;
};
/* Current CLOCK_REALTIME time in seconds */
/**
* struct timekeeper - Structure holding internal timekeeping values.
* @tkr: The readout base structure
* @xtime_sec: Current CLOCK_REALTIME time in seconds
* @wall_to_monotonic: CLOCK_REALTIME to CLOCK_MONOTONIC offset
* @offs_real: Offset clock monotonic -> clock realtime
* @offs_boot: Offset clock monotonic -> clock boottime
* @offs_tai: Offset clock monotonic -> clock tai
* @tai_offset: The current UTC to TAI offset in seconds
* @base_raw: Monotonic raw base time in ktime_t format
* @raw_time: Monotonic raw base time in timespec64 format
* @cycle_interval: Number of clock cycles in one NTP interval
* @xtime_interval: Number of clock shifted nano seconds in one NTP
* interval.
* @xtime_remainder: Shifted nano seconds left over when rounding
* @cycle_interval
* @raw_interval: Raw nano seconds accumulated per NTP interval.
* @ntp_error: Difference between accumulated time and NTP time in ntp
* shifted nano seconds.
* @ntp_error_shift: Shift conversion between clock shifted nano seconds and
* ntp shifted nano seconds.
*
* Note: For timespec(64) based interfaces wall_to_monotonic is what
* we need to add to xtime (or xtime corrected for sub jiffie times)
* to get to monotonic time. Monotonic is pegged at zero at system
* boot time, so wall_to_monotonic will be negative, however, we will
* ALWAYS keep the tv_nsec part positive so we can use the usual
* normalization.
*
* wall_to_monotonic is moved after resume from suspend for the
* monotonic time not to jump. We need to add total_sleep_time to
* wall_to_monotonic to get the real boot based time offset.
*
* wall_to_monotonic is no longer the boot time, getboottime must be
* used instead.
*/
struct timekeeper {
struct tk_read_base tkr;
u64 xtime_sec;
/* CLOCK_REALTIME to CLOCK_MONOTONIC offset */
struct timespec64 wall_to_monotonic;
/* Offset clock monotonic -> clock realtime */
ktime_t offs_real;
/* Offset clock monotonic -> clock boottime */
ktime_t offs_boot;
/* Offset clock monotonic -> clock tai */
ktime_t offs_tai;
/* The current UTC to TAI offset in seconds */
s32 tai_offset;
/* Monotonic raw base time */
ktime_t base_raw;
/* The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock. */
struct timespec64 raw_time;
/* Number of clock cycles in one NTP interval. */
/* The following members are for timekeeping internal use */
cycle_t cycle_interval;
/* Number of clock shifted nano seconds in one NTP interval. */
u64 xtime_interval;
/* shifted nano seconds left over when rounding cycle_interval */
s64 xtime_remainder;
/* Raw nano seconds accumulated per NTP interval. */
u32 raw_interval;
/*
* Difference between accumulated time and NTP time in ntp
* shifted nano seconds.
*/
s64 ntp_error;
/*
* Shift conversion between clock shifted nano seconds and
* ntp shifted nano seconds.
*/
u32 ntp_error_shift;
};
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment