Commit 8a284c06 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull timer updates from Thomas Gleixner:
 "The timer department delivers this time:

   - Support for cross clock domain timestamps in the core code plus a
     first user.  That allows more precise timestamping for PTP and
     later for audio and other peripherals.

     The ptp/e1000e patches have been acked by the relevant maintainers
     and are carried in the timer tree to avoid merge ordering issues.

   - Support for unregistering the current clocksource watchdog.  That
     lifts a limitation for switching clocksources which has been there
     from day 1

   - The usual pile of fixes and updates to the core and the drivers.
     Nothing outstanding and exciting"

* 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (26 commits)
  time/timekeeping: Work around false positive GCC warning
  e1000e: Adds hardware supported cross timestamp on e1000e nic
  ptp: Add PTP_SYS_OFFSET_PRECISE for driver crosstimestamping
  x86/tsc: Always Running Timer (ART) correlated clocksource
  hrtimer: Revert CLOCK_MONOTONIC_RAW support
  time: Add history to cross timestamp interface supporting slower devices
  time: Add driver cross timestamp interface for higher precision time synchronization
  time: Remove duplicated code in ktime_get_raw_and_real()
  time: Add timekeeping snapshot code capturing system time and counter
  time: Add cycles to nanoseconds translation
  jiffies: Use CLOCKSOURCE_MASK instead of constant
  clocksource: Introduce clocksource_freq2mult()
  clockevents/drivers/exynos_mct: Implement ->set_state_oneshot_stopped()
  clockevents/drivers/arm_global_timer: Implement ->set_state_oneshot_stopped()
  clockevents/drivers/arm_arch_timer: Implement ->set_state_oneshot_stopped()
  clocksource/drivers/arm_global_timer: Register delay timer
  clocksource/drivers/lpc32xx: Support timer-based ARM delay
  clocksource/drivers/lpc32xx: Support periodic mode
  clocksource/drivers/lpc32xx: Don't use the prescaler counter for clockevents
  clocksource/drivers/rockchip: Add err handle for rk_timer_init
  ...
parents 208de214 6436257b
...@@ -277,13 +277,15 @@ int main(int argc, char *argv[]) ...@@ -277,13 +277,15 @@ int main(int argc, char *argv[])
" %d external time stamp channels\n" " %d external time stamp channels\n"
" %d programmable periodic signals\n" " %d programmable periodic signals\n"
" %d pulse per second\n" " %d pulse per second\n"
" %d programmable pins\n", " %d programmable pins\n"
" %d cross timestamping\n",
caps.max_adj, caps.max_adj,
caps.n_alarm, caps.n_alarm,
caps.n_ext_ts, caps.n_ext_ts,
caps.n_per_out, caps.n_per_out,
caps.pps, caps.pps,
caps.n_pins); caps.n_pins,
caps.cross_timestamping);
} }
} }
......
...@@ -85,7 +85,7 @@ ...@@ -85,7 +85,7 @@
#define X86_FEATURE_P4 ( 3*32+ 7) /* "" P4 */ #define X86_FEATURE_P4 ( 3*32+ 7) /* "" P4 */
#define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */ #define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */
#define X86_FEATURE_UP ( 3*32+ 9) /* smp kernel running on up */ #define X86_FEATURE_UP ( 3*32+ 9) /* smp kernel running on up */
/* free, was #define X86_FEATURE_FXSAVE_LEAK ( 3*32+10) * "" FXSAVE leaks FOP/FIP/FOP */ #define X86_FEATURE_ART ( 3*32+10) /* Platform has always running timer (ART) */
#define X86_FEATURE_ARCH_PERFMON ( 3*32+11) /* Intel Architectural PerfMon */ #define X86_FEATURE_ARCH_PERFMON ( 3*32+11) /* Intel Architectural PerfMon */
#define X86_FEATURE_PEBS ( 3*32+12) /* Precise-Event Based Sampling */ #define X86_FEATURE_PEBS ( 3*32+12) /* Precise-Event Based Sampling */
#define X86_FEATURE_BTS ( 3*32+13) /* Branch Trace Store */ #define X86_FEATURE_BTS ( 3*32+13) /* Branch Trace Store */
......
...@@ -29,6 +29,8 @@ static inline cycles_t get_cycles(void) ...@@ -29,6 +29,8 @@ static inline cycles_t get_cycles(void)
return rdtsc(); return rdtsc();
} }
extern struct system_counterval_t convert_art_to_tsc(cycle_t art);
extern void tsc_init(void); extern void tsc_init(void);
extern void mark_tsc_unstable(char *reason); extern void mark_tsc_unstable(char *reason);
extern int unsynchronized_tsc(void); extern int unsynchronized_tsc(void);
......
...@@ -43,6 +43,11 @@ static DEFINE_STATIC_KEY_FALSE(__use_tsc); ...@@ -43,6 +43,11 @@ static DEFINE_STATIC_KEY_FALSE(__use_tsc);
int tsc_clocksource_reliable; int tsc_clocksource_reliable;
static u32 art_to_tsc_numerator;
static u32 art_to_tsc_denominator;
static u64 art_to_tsc_offset;
struct clocksource *art_related_clocksource;
/* /*
* Use a ring-buffer like data structure, where a writer advances the head by * Use a ring-buffer like data structure, where a writer advances the head by
* writing a new data entry and a reader advances the tail when it observes a * writing a new data entry and a reader advances the tail when it observes a
...@@ -964,6 +969,37 @@ core_initcall(cpufreq_tsc); ...@@ -964,6 +969,37 @@ core_initcall(cpufreq_tsc);
#endif /* CONFIG_CPU_FREQ */ #endif /* CONFIG_CPU_FREQ */
#define ART_CPUID_LEAF (0x15)
#define ART_MIN_DENOMINATOR (1)
/*
* If ART is present detect the numerator:denominator to convert to TSC
*/
static void detect_art(void)
{
unsigned int unused[2];
if (boot_cpu_data.cpuid_level < ART_CPUID_LEAF)
return;
cpuid(ART_CPUID_LEAF, &art_to_tsc_denominator,
&art_to_tsc_numerator, unused, unused+1);
/* Don't enable ART in a VM, non-stop TSC required */
if (boot_cpu_has(X86_FEATURE_HYPERVISOR) ||
!boot_cpu_has(X86_FEATURE_NONSTOP_TSC) ||
art_to_tsc_denominator < ART_MIN_DENOMINATOR)
return;
if (rdmsrl_safe(MSR_IA32_TSC_ADJUST, &art_to_tsc_offset))
return;
/* Make this sticky over multiple CPU init calls */
setup_force_cpu_cap(X86_FEATURE_ART);
}
/* clocksource code */ /* clocksource code */
static struct clocksource clocksource_tsc; static struct clocksource clocksource_tsc;
...@@ -1071,6 +1107,25 @@ int unsynchronized_tsc(void) ...@@ -1071,6 +1107,25 @@ int unsynchronized_tsc(void)
return 0; return 0;
} }
/*
* Convert ART to TSC given numerator/denominator found in detect_art()
*/
struct system_counterval_t convert_art_to_tsc(cycle_t art)
{
u64 tmp, res, rem;
rem = do_div(art, art_to_tsc_denominator);
res = art * art_to_tsc_numerator;
tmp = rem * art_to_tsc_numerator;
do_div(tmp, art_to_tsc_denominator);
res += tmp + art_to_tsc_offset;
return (struct system_counterval_t) {.cs = art_related_clocksource,
.cycles = res};
}
EXPORT_SYMBOL(convert_art_to_tsc);
static void tsc_refine_calibration_work(struct work_struct *work); static void tsc_refine_calibration_work(struct work_struct *work);
static DECLARE_DELAYED_WORK(tsc_irqwork, tsc_refine_calibration_work); static DECLARE_DELAYED_WORK(tsc_irqwork, tsc_refine_calibration_work);
...@@ -1142,6 +1197,8 @@ static void tsc_refine_calibration_work(struct work_struct *work) ...@@ -1142,6 +1197,8 @@ static void tsc_refine_calibration_work(struct work_struct *work)
(unsigned long)tsc_khz % 1000); (unsigned long)tsc_khz % 1000);
out: out:
if (boot_cpu_has(X86_FEATURE_ART))
art_related_clocksource = &clocksource_tsc;
clocksource_register_khz(&clocksource_tsc, tsc_khz); clocksource_register_khz(&clocksource_tsc, tsc_khz);
} }
...@@ -1235,6 +1292,8 @@ void __init tsc_init(void) ...@@ -1235,6 +1292,8 @@ void __init tsc_init(void)
mark_tsc_unstable("TSCs unsynchronized"); mark_tsc_unstable("TSCs unsynchronized");
check_system_tsc_reliable(); check_system_tsc_reliable();
detect_art();
} }
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
......
...@@ -160,6 +160,7 @@ config CLKSRC_EFM32 ...@@ -160,6 +160,7 @@ config CLKSRC_EFM32
config CLKSRC_LPC32XX config CLKSRC_LPC32XX
bool "Clocksource for LPC32XX" if COMPILE_TEST bool "Clocksource for LPC32XX" if COMPILE_TEST
depends on GENERIC_CLOCKEVENTS && HAS_IOMEM depends on GENERIC_CLOCKEVENTS && HAS_IOMEM
depends on ARM
select CLKSRC_MMIO select CLKSRC_MMIO
select CLKSRC_OF select CLKSRC_OF
help help
......
...@@ -32,6 +32,14 @@ ...@@ -32,6 +32,14 @@
#define CNTTIDR 0x08 #define CNTTIDR 0x08
#define CNTTIDR_VIRT(n) (BIT(1) << ((n) * 4)) #define CNTTIDR_VIRT(n) (BIT(1) << ((n) * 4))
#define CNTACR(n) (0x40 + ((n) * 4))
#define CNTACR_RPCT BIT(0)
#define CNTACR_RVCT BIT(1)
#define CNTACR_RFRQ BIT(2)
#define CNTACR_RVOFF BIT(3)
#define CNTACR_RWVT BIT(4)
#define CNTACR_RWPT BIT(5)
#define CNTVCT_LO 0x08 #define CNTVCT_LO 0x08
#define CNTVCT_HI 0x0c #define CNTVCT_HI 0x0c
#define CNTFRQ 0x10 #define CNTFRQ 0x10
...@@ -266,10 +274,12 @@ static void __arch_timer_setup(unsigned type, ...@@ -266,10 +274,12 @@ static void __arch_timer_setup(unsigned type,
if (arch_timer_use_virtual) { if (arch_timer_use_virtual) {
clk->irq = arch_timer_ppi[VIRT_PPI]; clk->irq = arch_timer_ppi[VIRT_PPI];
clk->set_state_shutdown = arch_timer_shutdown_virt; clk->set_state_shutdown = arch_timer_shutdown_virt;
clk->set_state_oneshot_stopped = arch_timer_shutdown_virt;
clk->set_next_event = arch_timer_set_next_event_virt; clk->set_next_event = arch_timer_set_next_event_virt;
} else { } else {
clk->irq = arch_timer_ppi[PHYS_SECURE_PPI]; clk->irq = arch_timer_ppi[PHYS_SECURE_PPI];
clk->set_state_shutdown = arch_timer_shutdown_phys; clk->set_state_shutdown = arch_timer_shutdown_phys;
clk->set_state_oneshot_stopped = arch_timer_shutdown_phys;
clk->set_next_event = arch_timer_set_next_event_phys; clk->set_next_event = arch_timer_set_next_event_phys;
} }
} else { } else {
...@@ -279,10 +289,12 @@ static void __arch_timer_setup(unsigned type, ...@@ -279,10 +289,12 @@ static void __arch_timer_setup(unsigned type,
clk->cpumask = cpu_all_mask; clk->cpumask = cpu_all_mask;
if (arch_timer_mem_use_virtual) { if (arch_timer_mem_use_virtual) {
clk->set_state_shutdown = arch_timer_shutdown_virt_mem; clk->set_state_shutdown = arch_timer_shutdown_virt_mem;
clk->set_state_oneshot_stopped = arch_timer_shutdown_virt_mem;
clk->set_next_event = clk->set_next_event =
arch_timer_set_next_event_virt_mem; arch_timer_set_next_event_virt_mem;
} else { } else {
clk->set_state_shutdown = arch_timer_shutdown_phys_mem; clk->set_state_shutdown = arch_timer_shutdown_phys_mem;
clk->set_state_oneshot_stopped = arch_timer_shutdown_phys_mem;
clk->set_next_event = clk->set_next_event =
arch_timer_set_next_event_phys_mem; arch_timer_set_next_event_phys_mem;
} }
...@@ -757,7 +769,6 @@ static void __init arch_timer_mem_init(struct device_node *np) ...@@ -757,7 +769,6 @@ static void __init arch_timer_mem_init(struct device_node *np)
} }
cnttidr = readl_relaxed(cntctlbase + CNTTIDR); cnttidr = readl_relaxed(cntctlbase + CNTTIDR);
iounmap(cntctlbase);
/* /*
* Try to find a virtual capable frame. Otherwise fall back to a * Try to find a virtual capable frame. Otherwise fall back to a
...@@ -765,20 +776,31 @@ static void __init arch_timer_mem_init(struct device_node *np) ...@@ -765,20 +776,31 @@ static void __init arch_timer_mem_init(struct device_node *np)
*/ */
for_each_available_child_of_node(np, frame) { for_each_available_child_of_node(np, frame) {
int n; int n;
u32 cntacr;
if (of_property_read_u32(frame, "frame-number", &n)) { if (of_property_read_u32(frame, "frame-number", &n)) {
pr_err("arch_timer: Missing frame-number\n"); pr_err("arch_timer: Missing frame-number\n");
of_node_put(best_frame);
of_node_put(frame); of_node_put(frame);
return; goto out;
} }
if (cnttidr & CNTTIDR_VIRT(n)) { /* Try enabling everything, and see what sticks */
cntacr = CNTACR_RFRQ | CNTACR_RWPT | CNTACR_RPCT |
CNTACR_RWVT | CNTACR_RVOFF | CNTACR_RVCT;
writel_relaxed(cntacr, cntctlbase + CNTACR(n));
cntacr = readl_relaxed(cntctlbase + CNTACR(n));
if ((cnttidr & CNTTIDR_VIRT(n)) &&
!(~cntacr & (CNTACR_RWVT | CNTACR_RVCT))) {
of_node_put(best_frame); of_node_put(best_frame);
best_frame = frame; best_frame = frame;
arch_timer_mem_use_virtual = true; arch_timer_mem_use_virtual = true;
break; break;
} }
if (~cntacr & (CNTACR_RWPT | CNTACR_RPCT))
continue;
of_node_put(best_frame); of_node_put(best_frame);
best_frame = of_node_get(frame); best_frame = of_node_get(frame);
} }
...@@ -786,24 +808,26 @@ static void __init arch_timer_mem_init(struct device_node *np) ...@@ -786,24 +808,26 @@ static void __init arch_timer_mem_init(struct device_node *np)
base = arch_counter_base = of_iomap(best_frame, 0); base = arch_counter_base = of_iomap(best_frame, 0);
if (!base) { if (!base) {
pr_err("arch_timer: Can't map frame's registers\n"); pr_err("arch_timer: Can't map frame's registers\n");
of_node_put(best_frame); goto out;
return;
} }
if (arch_timer_mem_use_virtual) if (arch_timer_mem_use_virtual)
irq = irq_of_parse_and_map(best_frame, 1); irq = irq_of_parse_and_map(best_frame, 1);
else else
irq = irq_of_parse_and_map(best_frame, 0); irq = irq_of_parse_and_map(best_frame, 0);
of_node_put(best_frame);
if (!irq) { if (!irq) {
pr_err("arch_timer: Frame missing %s irq", pr_err("arch_timer: Frame missing %s irq",
arch_timer_mem_use_virtual ? "virt" : "phys"); arch_timer_mem_use_virtual ? "virt" : "phys");
return; goto out;
} }
arch_timer_detect_rate(base, np); arch_timer_detect_rate(base, np);
arch_timer_mem_register(base, irq); arch_timer_mem_register(base, irq);
arch_timer_common_init(); arch_timer_common_init();
out:
iounmap(cntctlbase);
of_node_put(best_frame);
} }
CLOCKSOURCE_OF_DECLARE(armv7_arch_timer_mem, "arm,armv7-timer-mem", CLOCKSOURCE_OF_DECLARE(armv7_arch_timer_mem, "arm,armv7-timer-mem",
arch_timer_mem_init); arch_timer_mem_init);
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <linux/clockchips.h> #include <linux/clockchips.h>
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/clk.h> #include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/io.h> #include <linux/io.h>
#include <linux/of.h> #include <linux/of.h>
...@@ -174,6 +175,7 @@ static int gt_clockevents_init(struct clock_event_device *clk) ...@@ -174,6 +175,7 @@ static int gt_clockevents_init(struct clock_event_device *clk)
clk->set_state_shutdown = gt_clockevent_shutdown; clk->set_state_shutdown = gt_clockevent_shutdown;
clk->set_state_periodic = gt_clockevent_set_periodic; clk->set_state_periodic = gt_clockevent_set_periodic;
clk->set_state_oneshot = gt_clockevent_shutdown; clk->set_state_oneshot = gt_clockevent_shutdown;
clk->set_state_oneshot_stopped = gt_clockevent_shutdown;
clk->set_next_event = gt_clockevent_set_next_event; clk->set_next_event = gt_clockevent_set_next_event;
clk->cpumask = cpumask_of(cpu); clk->cpumask = cpumask_of(cpu);
clk->rating = 300; clk->rating = 300;
...@@ -221,6 +223,21 @@ static u64 notrace gt_sched_clock_read(void) ...@@ -221,6 +223,21 @@ static u64 notrace gt_sched_clock_read(void)
} }
#endif #endif
static unsigned long gt_read_long(void)
{
return readl_relaxed(gt_base + GT_COUNTER0);
}
static struct delay_timer gt_delay_timer = {
.read_current_timer = gt_read_long,
};
static void __init gt_delay_timer_init(void)
{
gt_delay_timer.freq = gt_clk_rate;
register_current_timer_delay(&gt_delay_timer);
}
static void __init gt_clocksource_init(void) static void __init gt_clocksource_init(void)
{ {
writel(0, gt_base + GT_CONTROL); writel(0, gt_base + GT_CONTROL);
...@@ -317,6 +334,7 @@ static void __init global_timer_of_register(struct device_node *np) ...@@ -317,6 +334,7 @@ static void __init global_timer_of_register(struct device_node *np)
/* Immediately configure the timer on the boot CPU */ /* Immediately configure the timer on the boot CPU */
gt_clocksource_init(); gt_clocksource_init();
gt_clockevents_init(this_cpu_ptr(gt_evt)); gt_clockevents_init(this_cpu_ptr(gt_evt));
gt_delay_timer_init();
return; return;
......
...@@ -313,6 +313,7 @@ static struct clock_event_device mct_comp_device = { ...@@ -313,6 +313,7 @@ static struct clock_event_device mct_comp_device = {
.set_state_periodic = mct_set_state_periodic, .set_state_periodic = mct_set_state_periodic,
.set_state_shutdown = mct_set_state_shutdown, .set_state_shutdown = mct_set_state_shutdown,
.set_state_oneshot = mct_set_state_shutdown, .set_state_oneshot = mct_set_state_shutdown,
.set_state_oneshot_stopped = mct_set_state_shutdown,
.tick_resume = mct_set_state_shutdown, .tick_resume = mct_set_state_shutdown,
}; };
...@@ -452,6 +453,7 @@ static int exynos4_local_timer_setup(struct mct_clock_event_device *mevt) ...@@ -452,6 +453,7 @@ static int exynos4_local_timer_setup(struct mct_clock_event_device *mevt)
evt->set_state_periodic = set_state_periodic; evt->set_state_periodic = set_state_periodic;
evt->set_state_shutdown = set_state_shutdown; evt->set_state_shutdown = set_state_shutdown;
evt->set_state_oneshot = set_state_shutdown; evt->set_state_oneshot = set_state_shutdown;
evt->set_state_oneshot_stopped = set_state_shutdown;
evt->tick_resume = set_state_shutdown; evt->tick_resume = set_state_shutdown;
evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
evt->rating = 450; evt->rating = 450;
......
...@@ -122,23 +122,23 @@ static void __init rk_timer_init(struct device_node *np) ...@@ -122,23 +122,23 @@ static void __init rk_timer_init(struct device_node *np)
pclk = of_clk_get_by_name(np, "pclk"); pclk = of_clk_get_by_name(np, "pclk");
if (IS_ERR(pclk)) { if (IS_ERR(pclk)) {
pr_err("Failed to get pclk for '%s'\n", TIMER_NAME); pr_err("Failed to get pclk for '%s'\n", TIMER_NAME);
return; goto out_unmap;
} }
if (clk_prepare_enable(pclk)) { if (clk_prepare_enable(pclk)) {
pr_err("Failed to enable pclk for '%s'\n", TIMER_NAME); pr_err("Failed to enable pclk for '%s'\n", TIMER_NAME);
return; goto out_unmap;
} }
timer_clk = of_clk_get_by_name(np, "timer"); timer_clk = of_clk_get_by_name(np, "timer");
if (IS_ERR(timer_clk)) { if (IS_ERR(timer_clk)) {
pr_err("Failed to get timer clock for '%s'\n", TIMER_NAME); pr_err("Failed to get timer clock for '%s'\n", TIMER_NAME);
return; goto out_timer_clk;
} }
if (clk_prepare_enable(timer_clk)) { if (clk_prepare_enable(timer_clk)) {
pr_err("Failed to enable timer clock\n"); pr_err("Failed to enable timer clock\n");
return; goto out_timer_clk;
} }
bc_timer.freq = clk_get_rate(timer_clk); bc_timer.freq = clk_get_rate(timer_clk);
...@@ -146,7 +146,7 @@ static void __init rk_timer_init(struct device_node *np) ...@@ -146,7 +146,7 @@ static void __init rk_timer_init(struct device_node *np)
irq = irq_of_parse_and_map(np, 0); irq = irq_of_parse_and_map(np, 0);
if (!irq) { if (!irq) {
pr_err("Failed to map interrupts for '%s'\n", TIMER_NAME); pr_err("Failed to map interrupts for '%s'\n", TIMER_NAME);
return; goto out_irq;
} }
ce->name = TIMER_NAME; ce->name = TIMER_NAME;
...@@ -164,10 +164,19 @@ static void __init rk_timer_init(struct device_node *np) ...@@ -164,10 +164,19 @@ static void __init rk_timer_init(struct device_node *np)
ret = request_irq(irq, rk_timer_interrupt, IRQF_TIMER, TIMER_NAME, ce); ret = request_irq(irq, rk_timer_interrupt, IRQF_TIMER, TIMER_NAME, ce);
if (ret) { if (ret) {
pr_err("Failed to initialize '%s': %d\n", TIMER_NAME, ret); pr_err("Failed to initialize '%s': %d\n", TIMER_NAME, ret);
return; goto out_irq;
} }
clockevents_config_and_register(ce, bc_timer.freq, 1, UINT_MAX); clockevents_config_and_register(ce, bc_timer.freq, 1, UINT_MAX);
return;
out_irq:
clk_disable_unprepare(timer_clk);
out_timer_clk:
clk_disable_unprepare(pclk);
out_unmap:
iounmap(bc_timer.base);
} }
CLOCKSOURCE_OF_DECLARE(rk_timer, "rockchip,rk3288-timer", rk_timer_init); CLOCKSOURCE_OF_DECLARE(rk_timer, "rockchip,rk3288-timer", rk_timer_init);
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <linux/clk.h> #include <linux/clk.h>
#include <linux/clockchips.h> #include <linux/clockchips.h>
#include <linux/clocksource.h> #include <linux/clocksource.h>
#include <linux/delay.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/irq.h> #include <linux/irq.h>
#include <linux/kernel.h> #include <linux/kernel.h>
...@@ -43,6 +44,7 @@ ...@@ -43,6 +44,7 @@
struct lpc32xx_clock_event_ddata { struct lpc32xx_clock_event_ddata {
struct clock_event_device evtdev; struct clock_event_device evtdev;
void __iomem *base; void __iomem *base;
u32 ticks_per_jiffy;
}; };
/* Needed for the sched clock */ /* Needed for the sched clock */
...@@ -53,6 +55,15 @@ static u64 notrace lpc32xx_read_sched_clock(void) ...@@ -53,6 +55,15 @@ static u64 notrace lpc32xx_read_sched_clock(void)
return readl(clocksource_timer_counter); return readl(clocksource_timer_counter);
} }
static unsigned long lpc32xx_delay_timer_read(void)
{
return readl(clocksource_timer_counter);
}
static struct delay_timer lpc32xx_delay_timer = {
.read_current_timer = lpc32xx_delay_timer_read,
};
static int lpc32xx_clkevt_next_event(unsigned long delta, static int lpc32xx_clkevt_next_event(unsigned long delta,
struct clock_event_device *evtdev) struct clock_event_device *evtdev)
{ {
...@@ -60,14 +71,13 @@ static int lpc32xx_clkevt_next_event(unsigned long delta, ...@@ -60,14 +71,13 @@ static int lpc32xx_clkevt_next_event(unsigned long delta,
container_of(evtdev, struct lpc32xx_clock_event_ddata, evtdev); container_of(evtdev, struct lpc32xx_clock_event_ddata, evtdev);
/* /*
* Place timer in reset and program the delta in the prescale * Place timer in reset and program the delta in the match
* register (PR). When the prescale counter matches the value * channel 0 (MR0). When the timer counter matches the value
* in PR the counter register is incremented and the compare * in MR0 register the match will trigger an interrupt.
* match will trigger. After setup the timer is released from * After setup the timer is released from reset and enabled.
* reset and enabled.
*/ */
writel_relaxed(LPC32XX_TIMER_TCR_CRST, ddata->base + LPC32XX_TIMER_TCR); writel_relaxed(LPC32XX_TIMER_TCR_CRST, ddata->base + LPC32XX_TIMER_TCR);
writel_relaxed(delta, ddata->base + LPC32XX_TIMER_PR); writel_relaxed(delta, ddata->base + LPC32XX_TIMER_MR0);
writel_relaxed(LPC32XX_TIMER_TCR_CEN, ddata->base + LPC32XX_TIMER_TCR); writel_relaxed(LPC32XX_TIMER_TCR_CEN, ddata->base + LPC32XX_TIMER_TCR);
return 0; return 0;
...@@ -86,11 +96,39 @@ static int lpc32xx_clkevt_shutdown(struct clock_event_device *evtdev) ...@@ -86,11 +96,39 @@ static int lpc32xx_clkevt_shutdown(struct clock_event_device *evtdev)
static int lpc32xx_clkevt_oneshot(struct clock_event_device *evtdev) static int lpc32xx_clkevt_oneshot(struct clock_event_device *evtdev)
{ {
struct lpc32xx_clock_event_ddata *ddata =
container_of(evtdev, struct lpc32xx_clock_event_ddata, evtdev);
/* /*
* When using oneshot, we must also disable the timer * When using oneshot, we must also disable the timer
* to wait for the first call to set_next_event(). * to wait for the first call to set_next_event().
*/ */
return lpc32xx_clkevt_shutdown(evtdev); writel_relaxed(0, ddata->base + LPC32XX_TIMER_TCR);
/* Enable interrupt, reset on match and stop on match (MCR). */
writel_relaxed(LPC32XX_TIMER_MCR_MR0I | LPC32XX_TIMER_MCR_MR0R |
LPC32XX_TIMER_MCR_MR0S, ddata->base + LPC32XX_TIMER_MCR);
return 0;
}
static int lpc32xx_clkevt_periodic(struct clock_event_device *evtdev)
{
struct lpc32xx_clock_event_ddata *ddata =
container_of(evtdev, struct lpc32xx_clock_event_ddata, evtdev);
/* Enable interrupt and reset on match. */
writel_relaxed(LPC32XX_TIMER_MCR_MR0I | LPC32XX_TIMER_MCR_MR0R,
ddata->base + LPC32XX_TIMER_MCR);
/*
* Place timer in reset and program the delta in the match
* channel 0 (MR0).
*/
writel_relaxed(LPC32XX_TIMER_TCR_CRST, ddata->base + LPC32XX_TIMER_TCR);
writel_relaxed(ddata->ticks_per_jiffy, ddata->base + LPC32XX_TIMER_MR0);
writel_relaxed(LPC32XX_TIMER_TCR_CEN, ddata->base + LPC32XX_TIMER_TCR);
return 0;
} }
static irqreturn_t lpc32xx_clock_event_handler(int irq, void *dev_id) static irqreturn_t lpc32xx_clock_event_handler(int irq, void *dev_id)
...@@ -108,11 +146,13 @@ static irqreturn_t lpc32xx_clock_event_handler(int irq, void *dev_id) ...@@ -108,11 +146,13 @@ static irqreturn_t lpc32xx_clock_event_handler(int irq, void *dev_id)
static struct lpc32xx_clock_event_ddata lpc32xx_clk_event_ddata = { static struct lpc32xx_clock_event_ddata lpc32xx_clk_event_ddata = {
.evtdev = { .evtdev = {
.name = "lpc3220 clockevent", .name = "lpc3220 clockevent",
.features = CLOCK_EVT_FEAT_ONESHOT, .features = CLOCK_EVT_FEAT_ONESHOT |
CLOCK_EVT_FEAT_PERIODIC,
.rating = 300, .rating = 300,
.set_next_event = lpc32xx_clkevt_next_event, .set_next_event = lpc32xx_clkevt_next_event,
.set_state_shutdown = lpc32xx_clkevt_shutdown, .set_state_shutdown = lpc32xx_clkevt_shutdown,
.set_state_oneshot = lpc32xx_clkevt_oneshot, .set_state_oneshot = lpc32xx_clkevt_oneshot,
.set_state_periodic = lpc32xx_clkevt_periodic,
}, },
}; };
...@@ -162,6 +202,8 @@ static int __init lpc32xx_clocksource_init(struct device_node *np) ...@@ -162,6 +202,8 @@ static int __init lpc32xx_clocksource_init(struct device_node *np)
} }
clocksource_timer_counter = base + LPC32XX_TIMER_TC; clocksource_timer_counter = base + LPC32XX_TIMER_TC;
lpc32xx_delay_timer.freq = rate;
register_current_timer_delay(&lpc32xx_delay_timer);
sched_clock_register(lpc32xx_read_sched_clock, 32, rate); sched_clock_register(lpc32xx_read_sched_clock, 32, rate);
return 0; return 0;
...@@ -210,18 +252,16 @@ static int __init lpc32xx_clockevent_init(struct device_node *np) ...@@ -210,18 +252,16 @@ static int __init lpc32xx_clockevent_init(struct device_node *np)
/* /*
* Disable timer and clear any pending interrupt (IR) on match * Disable timer and clear any pending interrupt (IR) on match
* channel 0 (MR0). Configure a compare match value of 1 on MR0 * channel 0 (MR0). Clear the prescaler as it's not used.
* and enable interrupt, reset on match and stop on match (MCR).
*/ */
writel_relaxed(0, base + LPC32XX_TIMER_TCR); writel_relaxed(0, base + LPC32XX_TIMER_TCR);
writel_relaxed(0, base + LPC32XX_TIMER_PR);
writel_relaxed(0, base + LPC32XX_TIMER_CTCR); writel_relaxed(0, base + LPC32XX_TIMER_CTCR);
writel_relaxed(LPC32XX_TIMER_IR_MR0INT, base + LPC32XX_TIMER_IR); writel_relaxed(LPC32XX_TIMER_IR_MR0INT, base + LPC32XX_TIMER_IR);
writel_relaxed(1, base + LPC32XX_TIMER_MR0);
writel_relaxed(LPC32XX_TIMER_MCR_MR0I | LPC32XX_TIMER_MCR_MR0R |
LPC32XX_TIMER_MCR_MR0S, base + LPC32XX_TIMER_MCR);
rate = clk_get_rate(clk); rate = clk_get_rate(clk);
lpc32xx_clk_event_ddata.base = base; lpc32xx_clk_event_ddata.base = base;
lpc32xx_clk_event_ddata.ticks_per_jiffy = DIV_ROUND_CLOSEST(rate, HZ);
clockevents_config_and_register(&lpc32xx_clk_event_ddata.evtdev, clockevents_config_and_register(&lpc32xx_clk_event_ddata.evtdev,
rate, 1, -1); rate, 1, -1);
......
...@@ -83,6 +83,15 @@ config E1000E ...@@ -83,6 +83,15 @@ config E1000E
To compile this driver as a module, choose M here. The module To compile this driver as a module, choose M here. The module
will be called e1000e. will be called e1000e.
config E1000E_HWTS
bool "Support HW cross-timestamp on PCH devices"
default y
depends on E1000E && X86
---help---
Say Y to enable hardware supported cross-timestamping on PCH
devices. The cross-timestamp is available through the PTP clock
driver precise cross-timestamp ioctl (PTP_SYS_OFFSET_PRECISE).
config IGB config IGB
tristate "Intel(R) 82575/82576 PCI-Express Gigabit Ethernet support" tristate "Intel(R) 82575/82576 PCI-Express Gigabit Ethernet support"
depends on PCI depends on PCI
......
...@@ -528,6 +528,11 @@ ...@@ -528,6 +528,11 @@
#define E1000_RXCW_C 0x20000000 /* Receive config */ #define E1000_RXCW_C 0x20000000 /* Receive config */
#define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */ #define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */
/* HH Time Sync */
#define E1000_TSYNCTXCTL_MAX_ALLOWED_DLY_MASK 0x0000F000 /* max delay */
#define E1000_TSYNCTXCTL_SYNC_COMP 0x40000000 /* sync complete */
#define E1000_TSYNCTXCTL_START_SYNC 0x80000000 /* initiate sync */
#define E1000_TSYNCTXCTL_VALID 0x00000001 /* Tx timestamp valid */ #define E1000_TSYNCTXCTL_VALID 0x00000001 /* Tx timestamp valid */
#define E1000_TSYNCTXCTL_ENABLED 0x00000010 /* enable Tx timestamping */ #define E1000_TSYNCTXCTL_ENABLED 0x00000010 /* enable Tx timestamping */
......
...@@ -26,6 +26,12 @@ ...@@ -26,6 +26,12 @@
#include "e1000.h" #include "e1000.h"
#ifdef CONFIG_E1000E_HWTS
#include <linux/clocksource.h>
#include <linux/ktime.h>
#include <asm/tsc.h>
#endif
/** /**
* e1000e_phc_adjfreq - adjust the frequency of the hardware clock * e1000e_phc_adjfreq - adjust the frequency of the hardware clock
* @ptp: ptp clock structure * @ptp: ptp clock structure
...@@ -98,6 +104,78 @@ static int e1000e_phc_adjtime(struct ptp_clock_info *ptp, s64 delta) ...@@ -98,6 +104,78 @@ static int e1000e_phc_adjtime(struct ptp_clock_info *ptp, s64 delta)
return 0; return 0;
} }
#ifdef CONFIG_E1000E_HWTS
#define MAX_HW_WAIT_COUNT (3)
/**
* e1000e_phc_get_syncdevicetime - Callback given to timekeeping code reads system/device registers
* @device: current device time
* @system: system counter value read synchronously with device time
* @ctx: context provided by timekeeping code
*
* Read device and system (ART) clock simultaneously and return the corrected
* clock values in ns.
**/
static int e1000e_phc_get_syncdevicetime(ktime_t *device,
struct system_counterval_t *system,
void *ctx)
{
struct e1000_adapter *adapter = (struct e1000_adapter *)ctx;
struct e1000_hw *hw = &adapter->hw;
unsigned long flags;
int i;
u32 tsync_ctrl;
cycle_t dev_cycles;
cycle_t sys_cycles;
tsync_ctrl = er32(TSYNCTXCTL);
tsync_ctrl |= E1000_TSYNCTXCTL_START_SYNC |
E1000_TSYNCTXCTL_MAX_ALLOWED_DLY_MASK;
ew32(TSYNCTXCTL, tsync_ctrl);
for (i = 0; i < MAX_HW_WAIT_COUNT; ++i) {
udelay(1);
tsync_ctrl = er32(TSYNCTXCTL);
if (tsync_ctrl & E1000_TSYNCTXCTL_SYNC_COMP)
break;
}
if (i == MAX_HW_WAIT_COUNT)
return -ETIMEDOUT;
dev_cycles = er32(SYSSTMPH);
dev_cycles <<= 32;
dev_cycles |= er32(SYSSTMPL);
spin_lock_irqsave(&adapter->systim_lock, flags);
*device = ns_to_ktime(timecounter_cyc2time(&adapter->tc, dev_cycles));
spin_unlock_irqrestore(&adapter->systim_lock, flags);
sys_cycles = er32(PLTSTMPH);
sys_cycles <<= 32;
sys_cycles |= er32(PLTSTMPL);
*system = convert_art_to_tsc(sys_cycles);
return 0;
}
/**
* e1000e_phc_getsynctime - Reads the current system/device cross timestamp
* @ptp: ptp clock structure
* @cts: structure containing timestamp
*
* Read device and system (ART) clock simultaneously and return the scaled
* clock values in ns.
**/
static int e1000e_phc_getcrosststamp(struct ptp_clock_info *ptp,
struct system_device_crosststamp *xtstamp)
{
struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter,
ptp_clock_info);
return get_device_system_crosststamp(e1000e_phc_get_syncdevicetime,
adapter, NULL, xtstamp);
}
#endif/*CONFIG_E1000E_HWTS*/
/** /**
* e1000e_phc_gettime - Reads the current time from the hardware clock * e1000e_phc_gettime - Reads the current time from the hardware clock
* @ptp: ptp clock structure * @ptp: ptp clock structure
...@@ -236,6 +314,13 @@ void e1000e_ptp_init(struct e1000_adapter *adapter) ...@@ -236,6 +314,13 @@ void e1000e_ptp_init(struct e1000_adapter *adapter)
break; break;
} }
#ifdef CONFIG_E1000E_HWTS
/* CPU must have ART and GBe must be from Sunrise Point or greater */
if (hw->mac.type >= e1000_pch_spt && boot_cpu_has(X86_FEATURE_ART))
adapter->ptp_clock_info.getcrosststamp =
e1000e_phc_getcrosststamp;
#endif/*CONFIG_E1000E_HWTS*/
INIT_DELAYED_WORK(&adapter->systim_overflow_work, INIT_DELAYED_WORK(&adapter->systim_overflow_work,
e1000e_systim_overflow_work); e1000e_systim_overflow_work);
......
...@@ -245,6 +245,10 @@ ...@@ -245,6 +245,10 @@
#define E1000_SYSTIML 0x0B600 /* System time register Low - RO */ #define E1000_SYSTIML 0x0B600 /* System time register Low - RO */
#define E1000_SYSTIMH 0x0B604 /* System time register High - RO */ #define E1000_SYSTIMH 0x0B604 /* System time register High - RO */
#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */ #define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */
#define E1000_SYSSTMPL 0x0B648 /* HH Timesync system stamp low register */
#define E1000_SYSSTMPH 0x0B64C /* HH Timesync system stamp hi register */
#define E1000_PLTSTMPL 0x0B640 /* HH Timesync platform stamp low register */
#define E1000_PLTSTMPH 0x0B644 /* HH Timesync platform stamp hi register */
#define E1000_RXMTRL 0x0B634 /* Time sync Rx EtherType and Msg Type - RW */ #define E1000_RXMTRL 0x0B634 /* Time sync Rx EtherType and Msg Type - RW */
#define E1000_RXUDP 0x0B638 /* Time Sync Rx UDP Port - RW */ #define E1000_RXUDP 0x0B638 /* Time Sync Rx UDP Port - RW */
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include <linux/poll.h> #include <linux/poll.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/timekeeping.h>
#include "ptp_private.h" #include "ptp_private.h"
...@@ -120,11 +121,13 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg) ...@@ -120,11 +121,13 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
struct ptp_clock_caps caps; struct ptp_clock_caps caps;
struct ptp_clock_request req; struct ptp_clock_request req;
struct ptp_sys_offset *sysoff = NULL; struct ptp_sys_offset *sysoff = NULL;
struct ptp_sys_offset_precise precise_offset;
struct ptp_pin_desc pd; struct ptp_pin_desc pd;
struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock); struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
struct ptp_clock_info *ops = ptp->info; struct ptp_clock_info *ops = ptp->info;
struct ptp_clock_time *pct; struct ptp_clock_time *pct;
struct timespec64 ts; struct timespec64 ts;
struct system_device_crosststamp xtstamp;
int enable, err = 0; int enable, err = 0;
unsigned int i, pin_index; unsigned int i, pin_index;
...@@ -138,6 +141,7 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg) ...@@ -138,6 +141,7 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
caps.n_per_out = ptp->info->n_per_out; caps.n_per_out = ptp->info->n_per_out;
caps.pps = ptp->info->pps; caps.pps = ptp->info->pps;
caps.n_pins = ptp->info->n_pins; caps.n_pins = ptp->info->n_pins;
caps.cross_timestamping = ptp->info->getcrosststamp != NULL;
if (copy_to_user((void __user *)arg, &caps, sizeof(caps))) if (copy_to_user((void __user *)arg, &caps, sizeof(caps)))
err = -EFAULT; err = -EFAULT;
break; break;
...@@ -180,6 +184,29 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg) ...@@ -180,6 +184,29 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
err = ops->enable(ops, &req, enable); err = ops->enable(ops, &req, enable);
break; break;
case PTP_SYS_OFFSET_PRECISE:
if (!ptp->info->getcrosststamp) {
err = -EOPNOTSUPP;
break;
}
err = ptp->info->getcrosststamp(ptp->info, &xtstamp);
if (err)
break;
ts = ktime_to_timespec64(xtstamp.device);
precise_offset.device.sec = ts.tv_sec;
precise_offset.device.nsec = ts.tv_nsec;
ts = ktime_to_timespec64(xtstamp.sys_realtime);
precise_offset.sys_realtime.sec = ts.tv_sec;
precise_offset.sys_realtime.nsec = ts.tv_nsec;
ts = ktime_to_timespec64(xtstamp.sys_monoraw);
precise_offset.sys_monoraw.sec = ts.tv_sec;
precise_offset.sys_monoraw.nsec = ts.tv_nsec;
if (copy_to_user((void __user *)arg, &precise_offset,
sizeof(precise_offset)))
err = -EFAULT;
break;
case PTP_SYS_OFFSET: case PTP_SYS_OFFSET:
sysoff = kmalloc(sizeof(*sysoff), GFP_KERNEL); sysoff = kmalloc(sizeof(*sysoff), GFP_KERNEL);
if (!sysoff) { if (!sysoff) {
......
...@@ -190,9 +190,9 @@ extern void clockevents_config_and_register(struct clock_event_device *dev, ...@@ -190,9 +190,9 @@ extern void clockevents_config_and_register(struct clock_event_device *dev,
extern int clockevents_update_freq(struct clock_event_device *ce, u32 freq); extern int clockevents_update_freq(struct clock_event_device *ce, u32 freq);
static inline void static inline void
clockevents_calc_mult_shift(struct clock_event_device *ce, u32 freq, u32 minsec) clockevents_calc_mult_shift(struct clock_event_device *ce, u32 freq, u32 maxsec)
{ {
return clocks_calc_mult_shift(&ce->mult, &ce->shift, NSEC_PER_SEC, freq, minsec); return clocks_calc_mult_shift(&ce->mult, &ce->shift, NSEC_PER_SEC, freq, maxsec);
} }
extern void clockevents_suspend(void); extern void clockevents_suspend(void);
......
...@@ -118,6 +118,23 @@ struct clocksource { ...@@ -118,6 +118,23 @@ struct clocksource {
/* simplify initialization of mask field */ /* simplify initialization of mask field */
#define CLOCKSOURCE_MASK(bits) (cycle_t)((bits) < 64 ? ((1ULL<<(bits))-1) : -1) #define CLOCKSOURCE_MASK(bits) (cycle_t)((bits) < 64 ? ((1ULL<<(bits))-1) : -1)
static inline u32 clocksource_freq2mult(u32 freq, u32 shift_constant, u64 from)
{
/* freq = cyc/from
* mult/2^shift = ns/cyc
* mult = ns/cyc * 2^shift
* mult = from/freq * 2^shift
* mult = from * 2^shift / freq
* mult = (from<<shift) / freq
*/
u64 tmp = ((u64)from) << shift_constant;
tmp += freq/2; /* round for do_div */
do_div(tmp, freq);
return (u32)tmp;
}
/** /**
* clocksource_khz2mult - calculates mult from khz and shift * clocksource_khz2mult - calculates mult from khz and shift
* @khz: Clocksource frequency in KHz * @khz: Clocksource frequency in KHz
...@@ -128,19 +145,7 @@ struct clocksource { ...@@ -128,19 +145,7 @@ struct clocksource {
*/ */
static inline u32 clocksource_khz2mult(u32 khz, u32 shift_constant) static inline u32 clocksource_khz2mult(u32 khz, u32 shift_constant)
{ {
/* khz = cyc/(Million ns) return clocksource_freq2mult(khz, shift_constant, NSEC_PER_MSEC);
* mult/2^shift = ns/cyc
* mult = ns/cyc * 2^shift
* mult = 1Million/khz * 2^shift
* mult = 1000000 * 2^shift / khz
* mult = (1000000<<shift) / khz
*/
u64 tmp = ((u64)1000000) << shift_constant;
tmp += khz/2; /* round for do_div */
do_div(tmp, khz);
return (u32)tmp;
} }
/** /**
...@@ -154,19 +159,7 @@ static inline u32 clocksource_khz2mult(u32 khz, u32 shift_constant) ...@@ -154,19 +159,7 @@ static inline u32 clocksource_khz2mult(u32 khz, u32 shift_constant)
*/ */
static inline u32 clocksource_hz2mult(u32 hz, u32 shift_constant) static inline u32 clocksource_hz2mult(u32 hz, u32 shift_constant)
{ {
/* hz = cyc/(Billion ns) return clocksource_freq2mult(hz, shift_constant, NSEC_PER_SEC);
* mult/2^shift = ns/cyc
* mult = ns/cyc * 2^shift
* mult = 1Billion/hz * 2^shift
* mult = 1000000000 * 2^shift / hz
* mult = (1000000000<<shift) / hz
*/
u64 tmp = ((u64)1000000000) << shift_constant;
tmp += hz/2; /* round for do_div */
do_div(tmp, hz);
return (u32)tmp;
} }
/** /**
......
...@@ -111,22 +111,17 @@ static inline void timespec_to_pps_ktime(struct pps_ktime *kt, ...@@ -111,22 +111,17 @@ static inline void timespec_to_pps_ktime(struct pps_ktime *kt,
kt->nsec = ts.tv_nsec; kt->nsec = ts.tv_nsec;
} }
#ifdef CONFIG_NTP_PPS
static inline void pps_get_ts(struct pps_event_time *ts) static inline void pps_get_ts(struct pps_event_time *ts)
{ {
ktime_get_raw_and_real_ts64(&ts->ts_raw, &ts->ts_real); struct system_time_snapshot snap;
}
#else /* CONFIG_NTP_PPS */ ktime_get_snapshot(&snap);
ts->ts_real = ktime_to_timespec64(snap.real);
static inline void pps_get_ts(struct pps_event_time *ts) #ifdef CONFIG_NTP_PPS
{ ts->ts_raw = ktime_to_timespec64(snap.raw);
ktime_get_real_ts64(&ts->ts_real); #endif
} }
#endif /* CONFIG_NTP_PPS */
/* Subtract known time delay from PPS event time(s) */ /* Subtract known time delay from PPS event time(s) */
static inline void pps_sub_ts(struct pps_event_time *ts, struct timespec64 delta) static inline void pps_sub_ts(struct pps_event_time *ts, struct timespec64 delta)
{ {
......
...@@ -38,6 +38,7 @@ struct ptp_clock_request { ...@@ -38,6 +38,7 @@ struct ptp_clock_request {
}; };
}; };
struct system_device_crosststamp;
/** /**
* struct ptp_clock_info - decribes a PTP hardware clock * struct ptp_clock_info - decribes a PTP hardware clock
* *
...@@ -67,6 +68,11 @@ struct ptp_clock_request { ...@@ -67,6 +68,11 @@ struct ptp_clock_request {
* @gettime64: Reads the current time from the hardware clock. * @gettime64: Reads the current time from the hardware clock.
* parameter ts: Holds the result. * parameter ts: Holds the result.
* *
* @getcrosststamp: Reads the current time from the hardware clock and
* system clock simultaneously.
* parameter cts: Contains timestamp (device,system) pair,
* where system time is realtime and monotonic.
*
* @settime64: Set the current time on the hardware clock. * @settime64: Set the current time on the hardware clock.
* parameter ts: Time value to set. * parameter ts: Time value to set.
* *
...@@ -105,6 +111,8 @@ struct ptp_clock_info { ...@@ -105,6 +111,8 @@ struct ptp_clock_info {
int (*adjfreq)(struct ptp_clock_info *ptp, s32 delta); int (*adjfreq)(struct ptp_clock_info *ptp, s32 delta);
int (*adjtime)(struct ptp_clock_info *ptp, s64 delta); int (*adjtime)(struct ptp_clock_info *ptp, s64 delta);
int (*gettime64)(struct ptp_clock_info *ptp, struct timespec64 *ts); int (*gettime64)(struct ptp_clock_info *ptp, struct timespec64 *ts);
int (*getcrosststamp)(struct ptp_clock_info *ptp,
struct system_device_crosststamp *cts);
int (*settime64)(struct ptp_clock_info *p, const struct timespec64 *ts); int (*settime64)(struct ptp_clock_info *p, const struct timespec64 *ts);
int (*enable)(struct ptp_clock_info *ptp, int (*enable)(struct ptp_clock_info *ptp,
struct ptp_clock_request *request, int on); struct ptp_clock_request *request, int on);
......
...@@ -50,6 +50,7 @@ struct tk_read_base { ...@@ -50,6 +50,7 @@ struct tk_read_base {
* @offs_tai: Offset clock monotonic -> clock tai * @offs_tai: Offset clock monotonic -> clock tai
* @tai_offset: The current UTC to TAI offset in seconds * @tai_offset: The current UTC to TAI offset in seconds
* @clock_was_set_seq: The sequence number of clock was set events * @clock_was_set_seq: The sequence number of clock was set events
* @cs_was_changed_seq: The sequence number of clocksource change events
* @next_leap_ktime: CLOCK_MONOTONIC time value of a pending leap-second * @next_leap_ktime: CLOCK_MONOTONIC time value of a pending leap-second
* @raw_time: Monotonic raw base time in timespec64 format * @raw_time: Monotonic raw base time in timespec64 format
* @cycle_interval: Number of clock cycles in one NTP interval * @cycle_interval: Number of clock cycles in one NTP interval
...@@ -91,6 +92,7 @@ struct timekeeper { ...@@ -91,6 +92,7 @@ struct timekeeper {
ktime_t offs_tai; ktime_t offs_tai;
s32 tai_offset; s32 tai_offset;
unsigned int clock_was_set_seq; unsigned int clock_was_set_seq;
u8 cs_was_changed_seq;
ktime_t next_leap_ktime; ktime_t next_leap_ktime;
struct timespec64 raw_time; struct timespec64 raw_time;
......
...@@ -266,6 +266,64 @@ extern void timekeeping_inject_sleeptime64(struct timespec64 *delta); ...@@ -266,6 +266,64 @@ extern void timekeeping_inject_sleeptime64(struct timespec64 *delta);
extern void ktime_get_raw_and_real_ts64(struct timespec64 *ts_raw, extern void ktime_get_raw_and_real_ts64(struct timespec64 *ts_raw,
struct timespec64 *ts_real); struct timespec64 *ts_real);
/*
* struct system_time_snapshot - simultaneous raw/real time capture with
* counter value
* @cycles: Clocksource counter value to produce the system times
* @real: Realtime system time
* @raw: Monotonic raw system time
* @clock_was_set_seq: The sequence number of clock was set events
* @cs_was_changed_seq: The sequence number of clocksource change events
*/
struct system_time_snapshot {
cycle_t cycles;
ktime_t real;
ktime_t raw;
unsigned int clock_was_set_seq;
u8 cs_was_changed_seq;
};
/*
* struct system_device_crosststamp - system/device cross-timestamp
* (syncronized capture)
* @device: Device time
* @sys_realtime: Realtime simultaneous with device time
* @sys_monoraw: Monotonic raw simultaneous with device time
*/
struct system_device_crosststamp {
ktime_t device;
ktime_t sys_realtime;
ktime_t sys_monoraw;
};
/*
* struct system_counterval_t - system counter value with the pointer to the
* corresponding clocksource
* @cycles: System counter value
* @cs: Clocksource corresponding to system counter value. Used by
* timekeeping code to verify comparibility of two cycle values
*/
struct system_counterval_t {
cycle_t cycles;
struct clocksource *cs;
};
/*
* Get cross timestamp between system clock and device clock
*/
extern int get_device_system_crosststamp(
int (*get_time_fn)(ktime_t *device_time,
struct system_counterval_t *system_counterval,
void *ctx),
void *ctx,
struct system_time_snapshot *history,
struct system_device_crosststamp *xtstamp);
/*
* Simultaneously snapshot realtime and monotonic raw clocks
*/
extern void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot);
/* /*
* Persistent clock related interfaces * Persistent clock related interfaces
*/ */
......
...@@ -51,7 +51,9 @@ struct ptp_clock_caps { ...@@ -51,7 +51,9 @@ struct ptp_clock_caps {
int n_per_out; /* Number of programmable periodic signals. */ int n_per_out; /* Number of programmable periodic signals. */
int pps; /* Whether the clock supports a PPS callback. */ int pps; /* Whether the clock supports a PPS callback. */
int n_pins; /* Number of input/output pins. */ int n_pins; /* Number of input/output pins. */
int rsv[14]; /* Reserved for future use. */ /* Whether the clock supports precise system-device cross timestamps */
int cross_timestamping;
int rsv[13]; /* Reserved for future use. */
}; };
struct ptp_extts_request { struct ptp_extts_request {
...@@ -81,6 +83,13 @@ struct ptp_sys_offset { ...@@ -81,6 +83,13 @@ struct ptp_sys_offset {
struct ptp_clock_time ts[2 * PTP_MAX_SAMPLES + 1]; struct ptp_clock_time ts[2 * PTP_MAX_SAMPLES + 1];
}; };
struct ptp_sys_offset_precise {
struct ptp_clock_time device;
struct ptp_clock_time sys_realtime;
struct ptp_clock_time sys_monoraw;
unsigned int rsv[4]; /* Reserved for future use. */
};
enum ptp_pin_function { enum ptp_pin_function {
PTP_PF_NONE, PTP_PF_NONE,
PTP_PF_EXTTS, PTP_PF_EXTTS,
...@@ -124,6 +133,8 @@ struct ptp_pin_desc { ...@@ -124,6 +133,8 @@ struct ptp_pin_desc {
#define PTP_SYS_OFFSET _IOW(PTP_CLK_MAGIC, 5, struct ptp_sys_offset) #define PTP_SYS_OFFSET _IOW(PTP_CLK_MAGIC, 5, struct ptp_sys_offset)
#define PTP_PIN_GETFUNC _IOWR(PTP_CLK_MAGIC, 6, struct ptp_pin_desc) #define PTP_PIN_GETFUNC _IOWR(PTP_CLK_MAGIC, 6, struct ptp_pin_desc)
#define PTP_PIN_SETFUNC _IOW(PTP_CLK_MAGIC, 7, struct ptp_pin_desc) #define PTP_PIN_SETFUNC _IOW(PTP_CLK_MAGIC, 7, struct ptp_pin_desc)
#define PTP_SYS_OFFSET_PRECISE \
_IOWR(PTP_CLK_MAGIC, 8, struct ptp_sys_offset_precise)
struct ptp_extts_event { struct ptp_extts_event {
struct ptp_clock_time t; /* Time event occured. */ struct ptp_clock_time t; /* Time event occured. */
......
...@@ -323,13 +323,42 @@ static void clocksource_enqueue_watchdog(struct clocksource *cs) ...@@ -323,13 +323,42 @@ static void clocksource_enqueue_watchdog(struct clocksource *cs)
/* cs is a watchdog. */ /* cs is a watchdog. */
if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
}
spin_unlock_irqrestore(&watchdog_lock, flags);
}
static void clocksource_select_watchdog(bool fallback)
{
struct clocksource *cs, *old_wd;
unsigned long flags;
spin_lock_irqsave(&watchdog_lock, flags);
/* save current watchdog */
old_wd = watchdog;
if (fallback)
watchdog = NULL;
list_for_each_entry(cs, &clocksource_list, list) {
/* cs is a clocksource to be watched. */
if (cs->flags & CLOCK_SOURCE_MUST_VERIFY)
continue;
/* Skip current if we were requested for a fallback. */
if (fallback && cs == old_wd)
continue;
/* Pick the best watchdog. */ /* Pick the best watchdog. */
if (!watchdog || cs->rating > watchdog->rating) { if (!watchdog || cs->rating > watchdog->rating)
watchdog = cs; watchdog = cs;
/* Reset watchdog cycles */
clocksource_reset_watchdog();
}
} }
/* If we failed to find a fallback restore the old one. */
if (!watchdog)
watchdog = old_wd;
/* If we changed the watchdog we need to reset cycles. */
if (watchdog != old_wd)
clocksource_reset_watchdog();
/* Check if the watchdog timer needs to be started. */ /* Check if the watchdog timer needs to be started. */
clocksource_start_watchdog(); clocksource_start_watchdog();
spin_unlock_irqrestore(&watchdog_lock, flags); spin_unlock_irqrestore(&watchdog_lock, flags);
...@@ -404,6 +433,7 @@ static void clocksource_enqueue_watchdog(struct clocksource *cs) ...@@ -404,6 +433,7 @@ static void clocksource_enqueue_watchdog(struct clocksource *cs)
cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
} }
static void clocksource_select_watchdog(bool fallback) { }
static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { } static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { }
static inline void clocksource_resume_watchdog(void) { } static inline void clocksource_resume_watchdog(void) { }
static inline int __clocksource_watchdog_kthread(void) { return 0; } static inline int __clocksource_watchdog_kthread(void) { return 0; }
...@@ -736,6 +766,7 @@ int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq) ...@@ -736,6 +766,7 @@ int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq)
clocksource_enqueue(cs); clocksource_enqueue(cs);
clocksource_enqueue_watchdog(cs); clocksource_enqueue_watchdog(cs);
clocksource_select(); clocksource_select();
clocksource_select_watchdog(false);
mutex_unlock(&clocksource_mutex); mutex_unlock(&clocksource_mutex);
return 0; return 0;
} }
...@@ -758,6 +789,7 @@ void clocksource_change_rating(struct clocksource *cs, int rating) ...@@ -758,6 +789,7 @@ void clocksource_change_rating(struct clocksource *cs, int rating)
mutex_lock(&clocksource_mutex); mutex_lock(&clocksource_mutex);
__clocksource_change_rating(cs, rating); __clocksource_change_rating(cs, rating);
clocksource_select(); clocksource_select();
clocksource_select_watchdog(false);
mutex_unlock(&clocksource_mutex); mutex_unlock(&clocksource_mutex);
} }
EXPORT_SYMBOL(clocksource_change_rating); EXPORT_SYMBOL(clocksource_change_rating);
...@@ -767,12 +799,12 @@ EXPORT_SYMBOL(clocksource_change_rating); ...@@ -767,12 +799,12 @@ EXPORT_SYMBOL(clocksource_change_rating);
*/ */
static int clocksource_unbind(struct clocksource *cs) static int clocksource_unbind(struct clocksource *cs)
{ {
/* if (clocksource_is_watchdog(cs)) {
* I really can't convince myself to support this on hardware /* Select and try to install a replacement watchdog. */
* designed by lobotomized monkeys. clocksource_select_watchdog(true);
*/
if (clocksource_is_watchdog(cs)) if (clocksource_is_watchdog(cs))
return -EBUSY; return -EBUSY;
}
if (cs == curr_clocksource) { if (cs == curr_clocksource) {
/* Select and try to install a replacement clock source */ /* Select and try to install a replacement clock source */
......
...@@ -68,7 +68,7 @@ static struct clocksource clocksource_jiffies = { ...@@ -68,7 +68,7 @@ static struct clocksource clocksource_jiffies = {
.name = "jiffies", .name = "jiffies",
.rating = 1, /* lowest valid rating*/ .rating = 1, /* lowest valid rating*/
.read = jiffies_read, .read = jiffies_read,
.mask = 0xffffffff, /*32bits*/ .mask = CLOCKSOURCE_MASK(32),
.mult = NSEC_PER_JIFFY << JIFFIES_SHIFT, /* details above */ .mult = NSEC_PER_JIFFY << JIFFIES_SHIFT, /* details above */
.shift = JIFFIES_SHIFT, .shift = JIFFIES_SHIFT,
.max_cycles = 10, .max_cycles = 10,
......
...@@ -233,6 +233,7 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock) ...@@ -233,6 +233,7 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
u64 tmp, ntpinterval; u64 tmp, ntpinterval;
struct clocksource *old_clock; struct clocksource *old_clock;
++tk->cs_was_changed_seq;
old_clock = tk->tkr_mono.clock; old_clock = tk->tkr_mono.clock;
tk->tkr_mono.clock = clock; tk->tkr_mono.clock = clock;
tk->tkr_mono.read = clock->read; tk->tkr_mono.read = clock->read;
...@@ -298,17 +299,34 @@ u32 (*arch_gettimeoffset)(void) = default_arch_gettimeoffset; ...@@ -298,17 +299,34 @@ u32 (*arch_gettimeoffset)(void) = default_arch_gettimeoffset;
static inline u32 arch_gettimeoffset(void) { return 0; } static inline u32 arch_gettimeoffset(void) { return 0; }
#endif #endif
static inline s64 timekeeping_delta_to_ns(struct tk_read_base *tkr,
cycle_t delta)
{
s64 nsec;
nsec = delta * tkr->mult + tkr->xtime_nsec;
nsec >>= tkr->shift;
/* If arch requires, add in get_arch_timeoffset() */
return nsec + arch_gettimeoffset();
}
static inline s64 timekeeping_get_ns(struct tk_read_base *tkr) static inline s64 timekeeping_get_ns(struct tk_read_base *tkr)
{ {
cycle_t delta; cycle_t delta;
s64 nsec;
delta = timekeeping_get_delta(tkr); delta = timekeeping_get_delta(tkr);
return timekeeping_delta_to_ns(tkr, delta);
}
nsec = (delta * tkr->mult + tkr->xtime_nsec) >> tkr->shift; static inline s64 timekeeping_cycles_to_ns(struct tk_read_base *tkr,
cycle_t cycles)
{
cycle_t delta;
/* If arch requires, add in get_arch_timeoffset() */ /* calculate the delta since the last update_wall_time */
return nsec + arch_gettimeoffset(); delta = clocksource_delta(cycles, tkr->cycle_last, tkr->mask);
return timekeeping_delta_to_ns(tkr, delta);
} }
/** /**
...@@ -857,44 +875,262 @@ time64_t __ktime_get_real_seconds(void) ...@@ -857,44 +875,262 @@ time64_t __ktime_get_real_seconds(void)
return tk->xtime_sec; return tk->xtime_sec;
} }
#ifdef CONFIG_NTP_PPS
/** /**
* ktime_get_raw_and_real_ts64 - get day and raw monotonic time in timespec format * ktime_get_snapshot - snapshots the realtime/monotonic raw clocks with counter
* @ts_raw: pointer to the timespec to be set to raw monotonic time * @systime_snapshot: pointer to struct receiving the system time snapshot
* @ts_real: pointer to the timespec to be set to the time of day
*
* This function reads both the time of day and raw monotonic time at the
* same time atomically and stores the resulting timestamps in timespec
* format.
*/ */
void ktime_get_raw_and_real_ts64(struct timespec64 *ts_raw, struct timespec64 *ts_real) void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot)
{ {
struct timekeeper *tk = &tk_core.timekeeper; struct timekeeper *tk = &tk_core.timekeeper;
unsigned long seq; unsigned long seq;
s64 nsecs_raw, nsecs_real; ktime_t base_raw;
ktime_t base_real;
s64 nsec_raw;
s64 nsec_real;
cycle_t now;
WARN_ON_ONCE(timekeeping_suspended); WARN_ON_ONCE(timekeeping_suspended);
do { do {
seq = read_seqcount_begin(&tk_core.seq); seq = read_seqcount_begin(&tk_core.seq);
*ts_raw = tk->raw_time; now = tk->tkr_mono.read(tk->tkr_mono.clock);
ts_real->tv_sec = tk->xtime_sec; systime_snapshot->cs_was_changed_seq = tk->cs_was_changed_seq;
ts_real->tv_nsec = 0; systime_snapshot->clock_was_set_seq = tk->clock_was_set_seq;
base_real = ktime_add(tk->tkr_mono.base,
tk_core.timekeeper.offs_real);
base_raw = tk->tkr_raw.base;
nsec_real = timekeeping_cycles_to_ns(&tk->tkr_mono, now);
nsec_raw = timekeeping_cycles_to_ns(&tk->tkr_raw, now);
} while (read_seqcount_retry(&tk_core.seq, seq));
nsecs_raw = timekeeping_get_ns(&tk->tkr_raw); systime_snapshot->cycles = now;
nsecs_real = timekeeping_get_ns(&tk->tkr_mono); systime_snapshot->real = ktime_add_ns(base_real, nsec_real);
systime_snapshot->raw = ktime_add_ns(base_raw, nsec_raw);
}
EXPORT_SYMBOL_GPL(ktime_get_snapshot);
} while (read_seqcount_retry(&tk_core.seq, seq)); /* Scale base by mult/div checking for overflow */
static int scale64_check_overflow(u64 mult, u64 div, u64 *base)
{
u64 tmp, rem;
tmp = div64_u64_rem(*base, div, &rem);
if (((int)sizeof(u64)*8 - fls64(mult) < fls64(tmp)) ||
((int)sizeof(u64)*8 - fls64(mult) < fls64(rem)))
return -EOVERFLOW;
tmp *= mult;
rem *= mult;
do_div(rem, div);
*base = tmp + rem;
return 0;
}
/**
* adjust_historical_crosststamp - adjust crosstimestamp previous to current interval
* @history: Snapshot representing start of history
* @partial_history_cycles: Cycle offset into history (fractional part)
* @total_history_cycles: Total history length in cycles
* @discontinuity: True indicates clock was set on history period
* @ts: Cross timestamp that should be adjusted using
* partial/total ratio
*
* Helper function used by get_device_system_crosststamp() to correct the
* crosstimestamp corresponding to the start of the current interval to the
* system counter value (timestamp point) provided by the driver. The
* total_history_* quantities are the total history starting at the provided
* reference point and ending at the start of the current interval. The cycle
* count between the driver timestamp point and the start of the current
* interval is partial_history_cycles.
*/
static int adjust_historical_crosststamp(struct system_time_snapshot *history,
cycle_t partial_history_cycles,
cycle_t total_history_cycles,
bool discontinuity,
struct system_device_crosststamp *ts)
{
struct timekeeper *tk = &tk_core.timekeeper;
u64 corr_raw, corr_real;
bool interp_forward;
int ret;
if (total_history_cycles == 0 || partial_history_cycles == 0)
return 0;
/* Interpolate shortest distance from beginning or end of history */
interp_forward = partial_history_cycles > total_history_cycles/2 ?
true : false;
partial_history_cycles = interp_forward ?
total_history_cycles - partial_history_cycles :
partial_history_cycles;
/*
* Scale the monotonic raw time delta by:
* partial_history_cycles / total_history_cycles
*/
corr_raw = (u64)ktime_to_ns(
ktime_sub(ts->sys_monoraw, history->raw));
ret = scale64_check_overflow(partial_history_cycles,
total_history_cycles, &corr_raw);
if (ret)
return ret;
/*
* If there is a discontinuity in the history, scale monotonic raw
* correction by:
* mult(real)/mult(raw) yielding the realtime correction
* Otherwise, calculate the realtime correction similar to monotonic
* raw calculation
*/
if (discontinuity) {
corr_real = mul_u64_u32_div
(corr_raw, tk->tkr_mono.mult, tk->tkr_raw.mult);
} else {
corr_real = (u64)ktime_to_ns(
ktime_sub(ts->sys_realtime, history->real));
ret = scale64_check_overflow(partial_history_cycles,
total_history_cycles, &corr_real);
if (ret)
return ret;
}
/* Fixup monotonic raw and real time time values */
if (interp_forward) {
ts->sys_monoraw = ktime_add_ns(history->raw, corr_raw);
ts->sys_realtime = ktime_add_ns(history->real, corr_real);
} else {
ts->sys_monoraw = ktime_sub_ns(ts->sys_monoraw, corr_raw);
ts->sys_realtime = ktime_sub_ns(ts->sys_realtime, corr_real);
}
return 0;
}
timespec64_add_ns(ts_raw, nsecs_raw); /*
timespec64_add_ns(ts_real, nsecs_real); * cycle_between - true if test occurs chronologically between before and after
*/
static bool cycle_between(cycle_t before, cycle_t test, cycle_t after)
{
if (test > before && test < after)
return true;
if (test < before && before > after)
return true;
return false;
} }
EXPORT_SYMBOL(ktime_get_raw_and_real_ts64);
#endif /* CONFIG_NTP_PPS */ /**
* get_device_system_crosststamp - Synchronously capture system/device timestamp
* @get_time_fn: Callback to get simultaneous device time and
* system counter from the device driver
* @ctx: Context passed to get_time_fn()
* @history_begin: Historical reference point used to interpolate system
* time when counter provided by the driver is before the current interval
* @xtstamp: Receives simultaneously captured system and device time
*
* Reads a timestamp from a device and correlates it to system time
*/
int get_device_system_crosststamp(int (*get_time_fn)
(ktime_t *device_time,
struct system_counterval_t *sys_counterval,
void *ctx),
void *ctx,
struct system_time_snapshot *history_begin,
struct system_device_crosststamp *xtstamp)
{
struct system_counterval_t system_counterval;
struct timekeeper *tk = &tk_core.timekeeper;
cycle_t cycles, now, interval_start;
unsigned int clock_was_set_seq = 0;
ktime_t base_real, base_raw;
s64 nsec_real, nsec_raw;
u8 cs_was_changed_seq;
unsigned long seq;
bool do_interp;
int ret;
do {
seq = read_seqcount_begin(&tk_core.seq);
/*
* Try to synchronously capture device time and a system
* counter value calling back into the device driver
*/
ret = get_time_fn(&xtstamp->device, &system_counterval, ctx);
if (ret)
return ret;
/*
* Verify that the clocksource associated with the captured
* system counter value is the same as the currently installed
* timekeeper clocksource
*/
if (tk->tkr_mono.clock != system_counterval.cs)
return -ENODEV;
cycles = system_counterval.cycles;
/*
* Check whether the system counter value provided by the
* device driver is on the current timekeeping interval.
*/
now = tk->tkr_mono.read(tk->tkr_mono.clock);
interval_start = tk->tkr_mono.cycle_last;
if (!cycle_between(interval_start, cycles, now)) {
clock_was_set_seq = tk->clock_was_set_seq;
cs_was_changed_seq = tk->cs_was_changed_seq;
cycles = interval_start;
do_interp = true;
} else {
do_interp = false;
}
base_real = ktime_add(tk->tkr_mono.base,
tk_core.timekeeper.offs_real);
base_raw = tk->tkr_raw.base;
nsec_real = timekeeping_cycles_to_ns(&tk->tkr_mono,
system_counterval.cycles);
nsec_raw = timekeeping_cycles_to_ns(&tk->tkr_raw,
system_counterval.cycles);
} while (read_seqcount_retry(&tk_core.seq, seq));
xtstamp->sys_realtime = ktime_add_ns(base_real, nsec_real);
xtstamp->sys_monoraw = ktime_add_ns(base_raw, nsec_raw);
/*
* Interpolate if necessary, adjusting back from the start of the
* current interval
*/
if (do_interp) {
cycle_t partial_history_cycles, total_history_cycles;
bool discontinuity;
/*
* Check that the counter value occurs after the provided
* history reference and that the history doesn't cross a
* clocksource change
*/
if (!history_begin ||
!cycle_between(history_begin->cycles,
system_counterval.cycles, cycles) ||
history_begin->cs_was_changed_seq != cs_was_changed_seq)
return -EINVAL;
partial_history_cycles = cycles - system_counterval.cycles;
total_history_cycles = cycles - history_begin->cycles;
discontinuity =
history_begin->clock_was_set_seq != clock_was_set_seq;
ret = adjust_historical_crosststamp(history_begin,
partial_history_cycles,
total_history_cycles,
discontinuity, xtstamp);
if (ret)
return ret;
}
return 0;
}
EXPORT_SYMBOL_GPL(get_device_system_crosststamp);
/** /**
* do_gettimeofday - Returns the time of day in a timeval * do_gettimeofday - Returns the time of day in a timeval
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment