Commit 78410af5 authored by Chris Metcalf's avatar Chris Metcalf

tile: add clock_gettime support to vDSO

This change adds support for clock_gettime with CLOCK_REALTIME
and CLOCK_MONOTONIC using vDSO.  It also updates the vdso
struct nomenclature used for the clocks to match the x86 code
to keep it easier to update going forward.

We also support the *_COARSE clockid_t, for apps that want speed
but aren't concerned about fine-grained timestamps; this saves
about 20 cycles per call (see http://lwn.net/Articles/342018/).
Signed-off-by: default avatarChris Metcalf <cmetcalf@tilera.com>
Acked-by: default avatarJohn Stultz <john.stultz@linaro.org>
parent 94fb1afb
......@@ -29,13 +29,18 @@
struct vdso_data {
seqcount_t tz_seq; /* Timezone seqlock */
seqcount_t tb_seq; /* Timebase seqlock */
__u64 xtime_tod_stamp; /* TOD clock for xtime */
__u64 xtime_clock_sec; /* Kernel time second */
__u64 xtime_clock_nsec; /* Kernel time nanosecond */
__u64 wtom_clock_sec; /* Wall to monotonic clock second */
__u64 wtom_clock_nsec; /* Wall to monotonic clock nanosecond */
__u64 cycle_last; /* TOD clock for xtime */
__u64 mask; /* Cycle mask */
__u32 mult; /* Cycle to nanosecond multiplier */
__u32 shift; /* Cycle to nanosecond divisor (power of two) */
__u64 wall_time_sec;
__u64 wall_time_snsec;
__u64 monotonic_time_sec;
__u64 monotonic_time_snsec;
__u64 wall_time_coarse_sec;
__u64 wall_time_coarse_nsec;
__u64 monotonic_time_coarse_sec;
__u64 monotonic_time_coarse_nsec;
__u32 tz_minuteswest; /* Minutes west of Greenwich */
__u32 tz_dsttime; /* Type of dst correction */
};
......
......@@ -257,21 +257,44 @@ void update_vsyscall_tz(void)
void update_vsyscall(struct timekeeper *tk)
{
struct timespec *wtm = &tk->wall_to_monotonic;
struct clocksource *clock = tk->tkr.clock;
if (clock != &cycle_counter_cs)
if (tk->tkr.clock != &cycle_counter_cs)
return;
write_seqcount_begin(&vdso_data->tb_seq);
vdso_data->xtime_tod_stamp = tk->tkr.cycle_last;
vdso_data->xtime_clock_sec = tk->xtime_sec;
vdso_data->xtime_clock_nsec = tk->tkr.xtime_nsec;
vdso_data->wtom_clock_sec = wtm->tv_sec;
vdso_data->wtom_clock_nsec = wtm->tv_nsec;
vdso_data->mult = tk->tkr.mult;
vdso_data->shift = tk->tkr.shift;
vdso_data->cycle_last = tk->tkr.cycle_last;
vdso_data->mask = tk->tkr.mask;
vdso_data->mult = tk->tkr.mult;
vdso_data->shift = tk->tkr.shift;
vdso_data->wall_time_sec = tk->xtime_sec;
vdso_data->wall_time_snsec = tk->tkr.xtime_nsec;
vdso_data->monotonic_time_sec = tk->xtime_sec
+ tk->wall_to_monotonic.tv_sec;
vdso_data->monotonic_time_snsec = tk->tkr.xtime_nsec
+ ((u64)tk->wall_to_monotonic.tv_nsec
<< tk->tkr.shift);
while (vdso_data->monotonic_time_snsec >=
(((u64)NSEC_PER_SEC) << tk->tkr.shift)) {
vdso_data->monotonic_time_snsec -=
((u64)NSEC_PER_SEC) << tk->tkr.shift;
vdso_data->monotonic_time_sec++;
}
vdso_data->wall_time_coarse_sec = tk->xtime_sec;
vdso_data->wall_time_coarse_nsec = (long)(tk->tkr.xtime_nsec >>
tk->tkr.shift);
vdso_data->monotonic_time_coarse_sec =
vdso_data->wall_time_coarse_sec + tk->wall_to_monotonic.tv_sec;
vdso_data->monotonic_time_coarse_nsec =
vdso_data->wall_time_coarse_nsec + tk->wall_to_monotonic.tv_nsec;
while (vdso_data->monotonic_time_coarse_nsec >= NSEC_PER_SEC) {
vdso_data->monotonic_time_coarse_nsec -= NSEC_PER_SEC;
vdso_data->monotonic_time_coarse_sec++;
}
write_seqcount_end(&vdso_data->tb_seq);
}
......@@ -82,6 +82,8 @@ VERSION
__vdso_rt_sigreturn;
__vdso_gettimeofday;
gettimeofday;
__vdso_clock_gettime;
clock_gettime;
local:*;
};
}
......@@ -15,6 +15,7 @@
#define VDSO_BUILD /* avoid some shift warnings for -m32 in <asm/page.h> */
#include <linux/time.h>
#include <asm/timex.h>
#include <asm/unistd.h>
#include <asm/vdso.h>
#if CHIP_HAS_SPLIT_CYCLE()
......@@ -35,6 +36,11 @@ static inline cycles_t get_cycles_inline(void)
#define get_cycles get_cycles_inline
#endif
struct syscall_return_value {
long value;
long error;
};
/*
* Find out the vDSO data page address in the process address space.
*/
......@@ -50,11 +56,82 @@ inline unsigned long get_datapage(void)
return ret;
}
int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
static inline u64 vgetsns(struct vdso_data *vdso)
{
return ((get_cycles() - vdso->cycle_last) & vdso->mask) * vdso->mult;
}
static inline int do_realtime(struct vdso_data *vdso, struct timespec *ts)
{
unsigned count;
u64 ns;
do {
count = read_seqcount_begin(&vdso->tb_seq);
ts->tv_sec = vdso->wall_time_sec;
ns = vdso->wall_time_snsec;
ns += vgetsns(vdso);
ns >>= vdso->shift;
} while (unlikely(read_seqcount_retry(&vdso->tb_seq, count)));
ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
ts->tv_nsec = ns;
return 0;
}
static inline int do_monotonic(struct vdso_data *vdso, struct timespec *ts)
{
unsigned count;
u64 ns;
do {
count = read_seqcount_begin(&vdso->tb_seq);
ts->tv_sec = vdso->monotonic_time_sec;
ns = vdso->monotonic_time_snsec;
ns += vgetsns(vdso);
ns >>= vdso->shift;
} while (unlikely(read_seqcount_retry(&vdso->tb_seq, count)));
ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
ts->tv_nsec = ns;
return 0;
}
static inline int do_realtime_coarse(struct vdso_data *vdso,
struct timespec *ts)
{
unsigned count;
do {
count = read_seqcount_begin(&vdso->tb_seq);
ts->tv_sec = vdso->wall_time_coarse_sec;
ts->tv_nsec = vdso->wall_time_coarse_nsec;
} while (unlikely(read_seqcount_retry(&vdso->tb_seq, count)));
return 0;
}
static inline int do_monotonic_coarse(struct vdso_data *vdso,
struct timespec *ts)
{
cycles_t cycles;
unsigned count;
unsigned long sec, ns;
do {
count = read_seqcount_begin(&vdso->tb_seq);
ts->tv_sec = vdso->monotonic_time_coarse_sec;
ts->tv_nsec = vdso->monotonic_time_coarse_nsec;
} while (unlikely(read_seqcount_retry(&vdso->tb_seq, count)));
return 0;
}
struct syscall_return_value __vdso_gettimeofday(struct timeval *tv,
struct timezone *tz)
{
struct syscall_return_value ret = { 0, 0 };
unsigned count;
struct vdso_data *vdso = (struct vdso_data *)get_datapage();
/* The use of the timezone is obsolete, normally tz is NULL. */
......@@ -67,25 +144,55 @@ int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
}
if (unlikely(tv == NULL))
return 0;
return ret;
do {
count = read_seqcount_begin(&vdso->tb_seq);
sec = vdso->xtime_clock_sec;
cycles = get_cycles() - vdso->xtime_tod_stamp;
ns = (cycles * vdso->mult) + vdso->xtime_clock_nsec;
ns >>= vdso->shift;
if (ns >= NSEC_PER_SEC) {
ns -= NSEC_PER_SEC;
sec += 1;
}
} while (unlikely(read_seqcount_retry(&vdso->tb_seq, count)));
do_realtime(vdso, (struct timespec *)tv);
tv->tv_usec /= 1000;
tv->tv_sec = sec;
tv->tv_usec = ns / 1000;
return 0;
return ret;
}
int gettimeofday(struct timeval *tv, struct timezone *tz)
__attribute__((weak, alias("__vdso_gettimeofday")));
static struct syscall_return_value vdso_fallback_gettime(long clock,
struct timespec *ts)
{
struct syscall_return_value ret;
__asm__ __volatile__ (
"swint1"
: "=R00" (ret.value), "=R01" (ret.error)
: "R10" (__NR_clock_gettime), "R00" (clock), "R01" (ts)
: "r2", "r3", "r4", "r5", "r6", "r7",
"r8", "r9", "r11", "r12", "r13", "r14", "r15",
"r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
"r24", "r25", "r26", "r27", "r28", "r29", "memory");
return ret;
}
struct syscall_return_value __vdso_clock_gettime(clockid_t clock,
struct timespec *ts)
{
struct vdso_data *vdso = (struct vdso_data *)get_datapage();
struct syscall_return_value ret = { 0, 0 };
switch (clock) {
case CLOCK_REALTIME:
do_realtime(vdso, ts);
return ret;
case CLOCK_MONOTONIC:
do_monotonic(vdso, ts);
return ret;
case CLOCK_REALTIME_COARSE:
do_realtime_coarse(vdso, ts);
return ret;
case CLOCK_MONOTONIC_COARSE:
do_monotonic_coarse(vdso, ts);
return ret;
default:
return vdso_fallback_gettime(clock, ts);
}
}
int clock_gettime(clockid_t clock, struct timespec *ts)
__attribute__((weak, alias("__vdso_clock_gettime")));
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment