Commit 5115ec96 authored by David S. Miller's avatar David S. Miller

Merge branch 'timecounter'

Richard Cochran says:

====================
Time Counter fixes and improvements

Several PTP Hardware Clock (PHC) drivers implement the clock in
software using the timecounter/cyclecounter code. This series adds one
simple improvement and one more subtle fix to the shared timecounter
facility. Credit for this series goes to Janusz Użycki, who pointed
the issues out to me off list.

Patch #1 simply move the timecounter code into its own file. When
working on this series, it was really annoying to see half the kernel
recompile after every tweak to the timecounter stuff. There is no
reason to keep this together with the clocksource code.

Patch #2 implements an improved adjtime() method, and patches 3-10
convert all of the drivers over to the new method.

Patch #11 fixes a subtle but important issue with the timecounter WRT
frequency adjustment. As it stands now, a timecounter based PHC will
exhibit a variable frequency resolution (and variable time error)
depending on how often the clock is read.

In timecounter_read_delta(), the expression

   (delta * cc->mult) >> cc->shift;

can lose resolution from the adjusted value of 'mult'. If the value
of 'delta' is too small, then small changes in 'mult' have no effect.
However, if the delta value is large enough, then small changes in
'mult' will have an effect.

Reading the clock too often means smaller 'delta' values which in turn
will spoil the fine adjustments made to 'mult'. Up until now, this
effect did not show up in my testing. The following example explains
why.

The CPTS has an input clock of 250 MHz, and the clock source uses
mult=0x80000000 and shift=29, making the ticks to nanoseconds
conversion like this:

   ticks * 2^31
   ------------
       2^29

Imagine what happens if the clock is read every 10 milliseconds. Ten
milliseconds are about 2500000 ticks, which corresponds to about 21
bits. The product in the numerator has then 52 bits. After the shift
operation, 23 bits are preserved. This results in a frequency
adjustment resolution of about 0.1 ppm (not _too_ bad.)

A frequency resolution of 1 ppm requires 20 bits.
A frequency resolution of 1 ppb requires 30 bits.

For the 250 MHz CPTS clock, reading every 4 seconds yields a 1 ppb
resolution (which is the finest that our API allows).

However, the error can be much higher if the clock is read too often
or if time stamps occur close in time to read operations. In general
it is really not acceptable to allow the rate of clock readings to
influence the clock accuracy.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 2c90331c 2eebdde6
...@@ -171,15 +171,9 @@ static int xgbe_adjtime(struct ptp_clock_info *info, s64 delta) ...@@ -171,15 +171,9 @@ static int xgbe_adjtime(struct ptp_clock_info *info, s64 delta)
struct xgbe_prv_data, struct xgbe_prv_data,
ptp_clock_info); ptp_clock_info);
unsigned long flags; unsigned long flags;
u64 nsec;
spin_lock_irqsave(&pdata->tstamp_lock, flags); spin_lock_irqsave(&pdata->tstamp_lock, flags);
timecounter_adjtime(&pdata->tstamp_tc, delta);
nsec = timecounter_read(&pdata->tstamp_tc);
nsec += delta;
timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc, nsec);
spin_unlock_irqrestore(&pdata->tstamp_lock, flags); spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
return 0; return 0;
......
...@@ -124,7 +124,7 @@ ...@@ -124,7 +124,7 @@
#include <linux/if_vlan.h> #include <linux/if_vlan.h>
#include <linux/bitops.h> #include <linux/bitops.h>
#include <linux/ptp_clock_kernel.h> #include <linux/ptp_clock_kernel.h>
#include <linux/clocksource.h> #include <linux/timecounter.h>
#include <linux/net_tstamp.h> #include <linux/net_tstamp.h>
#include <net/dcbnl.h> #include <net/dcbnl.h>
......
...@@ -22,7 +22,7 @@ ...@@ -22,7 +22,7 @@
#include <linux/ptp_clock_kernel.h> #include <linux/ptp_clock_kernel.h>
#include <linux/net_tstamp.h> #include <linux/net_tstamp.h>
#include <linux/clocksource.h> #include <linux/timecounter.h>
/* compilation time flags */ /* compilation time flags */
......
...@@ -13267,14 +13267,10 @@ static int bnx2x_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) ...@@ -13267,14 +13267,10 @@ static int bnx2x_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
static int bnx2x_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) static int bnx2x_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{ {
struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info); struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
u64 now;
DP(BNX2X_MSG_PTP, "PTP adjtime called, delta = %llx\n", delta); DP(BNX2X_MSG_PTP, "PTP adjtime called, delta = %llx\n", delta);
now = timecounter_read(&bp->timecounter); timecounter_adjtime(&bp->timecounter, delta);
now += delta;
/* Re-init the timecounter */
timecounter_init(&bp->timecounter, &bp->cyclecounter, now);
return 0; return 0;
} }
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <linux/clocksource.h> #include <linux/clocksource.h>
#include <linux/net_tstamp.h> #include <linux/net_tstamp.h>
#include <linux/ptp_clock_kernel.h> #include <linux/ptp_clock_kernel.h>
#include <linux/timecounter.h>
#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
defined(CONFIG_M520x) || defined(CONFIG_M532x) || \ defined(CONFIG_M520x) || defined(CONFIG_M532x) || \
......
...@@ -374,23 +374,9 @@ static int fec_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) ...@@ -374,23 +374,9 @@ static int fec_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
struct fec_enet_private *fep = struct fec_enet_private *fep =
container_of(ptp, struct fec_enet_private, ptp_caps); container_of(ptp, struct fec_enet_private, ptp_caps);
unsigned long flags; unsigned long flags;
u64 now;
u32 counter;
spin_lock_irqsave(&fep->tmreg_lock, flags); spin_lock_irqsave(&fep->tmreg_lock, flags);
timecounter_adjtime(&fep->tc, delta);
now = timecounter_read(&fep->tc);
now += delta;
/* Get the timer value based on adjusted timestamp.
* Update the counter with the masked value.
*/
counter = now & fep->cc.mask;
writel(counter, fep->hwp + FEC_ATIME);
/* reset the timecounter */
timecounter_init(&fep->tc, &fep->cc, now);
spin_unlock_irqrestore(&fep->tmreg_lock, flags); spin_unlock_irqrestore(&fep->tmreg_lock, flags);
return 0; return 0;
......
...@@ -34,7 +34,7 @@ ...@@ -34,7 +34,7 @@
#include <linux/pci-aspm.h> #include <linux/pci-aspm.h>
#include <linux/crc32.h> #include <linux/crc32.h>
#include <linux/if_vlan.h> #include <linux/if_vlan.h>
#include <linux/clocksource.h> #include <linux/timecounter.h>
#include <linux/net_tstamp.h> #include <linux/net_tstamp.h>
#include <linux/ptp_clock_kernel.h> #include <linux/ptp_clock_kernel.h>
#include <linux/ptp_classify.h> #include <linux/ptp_classify.h>
......
...@@ -90,12 +90,9 @@ static int e1000e_phc_adjtime(struct ptp_clock_info *ptp, s64 delta) ...@@ -90,12 +90,9 @@ static int e1000e_phc_adjtime(struct ptp_clock_info *ptp, s64 delta)
struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter, struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter,
ptp_clock_info); ptp_clock_info);
unsigned long flags; unsigned long flags;
s64 now;
spin_lock_irqsave(&adapter->systim_lock, flags); spin_lock_irqsave(&adapter->systim_lock, flags);
now = timecounter_read(&adapter->tc); timecounter_adjtime(&adapter->tc, delta);
now += delta;
timecounter_init(&adapter->tc, &adapter->cc, now);
spin_unlock_irqrestore(&adapter->systim_lock, flags); spin_unlock_irqrestore(&adapter->systim_lock, flags);
return 0; return 0;
......
...@@ -29,7 +29,7 @@ ...@@ -29,7 +29,7 @@
#include "e1000_mac.h" #include "e1000_mac.h"
#include "e1000_82575.h" #include "e1000_82575.h"
#include <linux/clocksource.h> #include <linux/timecounter.h>
#include <linux/net_tstamp.h> #include <linux/net_tstamp.h>
#include <linux/ptp_clock_kernel.h> #include <linux/ptp_clock_kernel.h>
#include <linux/bitops.h> #include <linux/bitops.h>
......
...@@ -256,14 +256,9 @@ static int igb_ptp_adjtime_82576(struct ptp_clock_info *ptp, s64 delta) ...@@ -256,14 +256,9 @@ static int igb_ptp_adjtime_82576(struct ptp_clock_info *ptp, s64 delta)
struct igb_adapter *igb = container_of(ptp, struct igb_adapter, struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
ptp_caps); ptp_caps);
unsigned long flags; unsigned long flags;
s64 now;
spin_lock_irqsave(&igb->tmreg_lock, flags); spin_lock_irqsave(&igb->tmreg_lock, flags);
timecounter_adjtime(&igb->tc, delta);
now = timecounter_read(&igb->tc);
now += delta;
timecounter_init(&igb->tc, &igb->cc, now);
spin_unlock_irqrestore(&igb->tmreg_lock, flags); spin_unlock_irqrestore(&igb->tmreg_lock, flags);
return 0; return 0;
......
...@@ -38,7 +38,7 @@ ...@@ -38,7 +38,7 @@
#include <linux/if_vlan.h> #include <linux/if_vlan.h>
#include <linux/jiffies.h> #include <linux/jiffies.h>
#include <linux/clocksource.h> #include <linux/timecounter.h>
#include <linux/net_tstamp.h> #include <linux/net_tstamp.h>
#include <linux/ptp_clock_kernel.h> #include <linux/ptp_clock_kernel.h>
......
...@@ -261,18 +261,9 @@ static int ixgbe_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) ...@@ -261,18 +261,9 @@ static int ixgbe_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
struct ixgbe_adapter *adapter = struct ixgbe_adapter *adapter =
container_of(ptp, struct ixgbe_adapter, ptp_caps); container_of(ptp, struct ixgbe_adapter, ptp_caps);
unsigned long flags; unsigned long flags;
u64 now;
spin_lock_irqsave(&adapter->tmreg_lock, flags); spin_lock_irqsave(&adapter->tmreg_lock, flags);
timecounter_adjtime(&adapter->tc, delta);
now = timecounter_read(&adapter->tc);
now += delta;
/* reset the timecounter */
timecounter_init(&adapter->tc,
&adapter->cc,
now);
spin_unlock_irqrestore(&adapter->tmreg_lock, flags); spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
ixgbe_ptp_setup_sdp(adapter); ixgbe_ptp_setup_sdp(adapter);
......
...@@ -147,12 +147,9 @@ static int mlx4_en_phc_adjtime(struct ptp_clock_info *ptp, s64 delta) ...@@ -147,12 +147,9 @@ static int mlx4_en_phc_adjtime(struct ptp_clock_info *ptp, s64 delta)
struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev, struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev,
ptp_clock_info); ptp_clock_info);
unsigned long flags; unsigned long flags;
s64 now;
write_lock_irqsave(&mdev->clock_lock, flags); write_lock_irqsave(&mdev->clock_lock, flags);
now = timecounter_read(&mdev->clock); timecounter_adjtime(&mdev->clock, delta);
now += delta;
timecounter_init(&mdev->clock, &mdev->cycles, now);
write_unlock_irqrestore(&mdev->clock_lock, flags); write_unlock_irqrestore(&mdev->clock_lock, flags);
return 0; return 0;
...@@ -243,7 +240,7 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev) ...@@ -243,7 +240,7 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
{ {
struct mlx4_dev *dev = mdev->dev; struct mlx4_dev *dev = mdev->dev;
unsigned long flags; unsigned long flags;
u64 ns; u64 ns, zero = 0;
rwlock_init(&mdev->clock_lock); rwlock_init(&mdev->clock_lock);
...@@ -268,7 +265,7 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev) ...@@ -268,7 +265,7 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
/* Calculate period in seconds to call the overflow watchdog - to make /* Calculate period in seconds to call the overflow watchdog - to make
* sure counter is checked at least once every wrap around. * sure counter is checked at least once every wrap around.
*/ */
ns = cyclecounter_cyc2ns(&mdev->cycles, mdev->cycles.mask); ns = cyclecounter_cyc2ns(&mdev->cycles, mdev->cycles.mask, zero, &zero);
do_div(ns, NSEC_PER_SEC / 2 / HZ); do_div(ns, NSEC_PER_SEC / 2 / HZ);
mdev->overflow_period = ns; mdev->overflow_period = ns;
......
...@@ -157,14 +157,11 @@ static int cpts_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) ...@@ -157,14 +157,11 @@ static int cpts_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
static int cpts_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) static int cpts_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{ {
s64 now;
unsigned long flags; unsigned long flags;
struct cpts *cpts = container_of(ptp, struct cpts, info); struct cpts *cpts = container_of(ptp, struct cpts, info);
spin_lock_irqsave(&cpts->lock, flags); spin_lock_irqsave(&cpts->lock, flags);
now = timecounter_read(&cpts->tc); timecounter_adjtime(&cpts->tc, delta);
now += delta;
timecounter_init(&cpts->tc, &cpts->cc, now);
spin_unlock_irqrestore(&cpts->lock, flags); spin_unlock_irqrestore(&cpts->lock, flags);
return 0; return 0;
......
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#include <linux/list.h> #include <linux/list.h>
#include <linux/ptp_clock_kernel.h> #include <linux/ptp_clock_kernel.h>
#include <linux/skbuff.h> #include <linux/skbuff.h>
#include <linux/timecounter.h>
struct cpsw_cpts { struct cpsw_cpts {
u32 idver; /* Identification and version */ u32 idver; /* Identification and version */
......
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
#ifndef __CLKSOURCE_ARM_ARCH_TIMER_H #ifndef __CLKSOURCE_ARM_ARCH_TIMER_H
#define __CLKSOURCE_ARM_ARCH_TIMER_H #define __CLKSOURCE_ARM_ARCH_TIMER_H
#include <linux/clocksource.h> #include <linux/timecounter.h>
#include <linux/types.h> #include <linux/types.h>
#define ARCH_TIMER_CTRL_ENABLE (1 << 0) #define ARCH_TIMER_CTRL_ENABLE (1 << 0)
......
...@@ -18,8 +18,6 @@ ...@@ -18,8 +18,6 @@
#include <asm/div64.h> #include <asm/div64.h>
#include <asm/io.h> #include <asm/io.h>
/* clocksource cycle base type */
typedef u64 cycle_t;
struct clocksource; struct clocksource;
struct module; struct module;
...@@ -27,106 +25,6 @@ struct module; ...@@ -27,106 +25,6 @@ struct module;
#include <asm/clocksource.h> #include <asm/clocksource.h>
#endif #endif
/**
* struct cyclecounter - hardware abstraction for a free running counter
* Provides completely state-free accessors to the underlying hardware.
* Depending on which hardware it reads, the cycle counter may wrap
* around quickly. Locking rules (if necessary) have to be defined
* by the implementor and user of specific instances of this API.
*
* @read: returns the current cycle value
* @mask: bitmask for two's complement
* subtraction of non 64 bit counters,
* see CLOCKSOURCE_MASK() helper macro
* @mult: cycle to nanosecond multiplier
* @shift: cycle to nanosecond divisor (power of two)
*/
struct cyclecounter {
cycle_t (*read)(const struct cyclecounter *cc);
cycle_t mask;
u32 mult;
u32 shift;
};
/**
* struct timecounter - layer above a %struct cyclecounter which counts nanoseconds
* Contains the state needed by timecounter_read() to detect
* cycle counter wrap around. Initialize with
* timecounter_init(). Also used to convert cycle counts into the
* corresponding nanosecond counts with timecounter_cyc2time(). Users
* of this code are responsible for initializing the underlying
* cycle counter hardware, locking issues and reading the time
* more often than the cycle counter wraps around. The nanosecond
* counter will only wrap around after ~585 years.
*
* @cc: the cycle counter used by this instance
* @cycle_last: most recent cycle counter value seen by
* timecounter_read()
* @nsec: continuously increasing count
*/
struct timecounter {
const struct cyclecounter *cc;
cycle_t cycle_last;
u64 nsec;
};
/**
* cyclecounter_cyc2ns - converts cycle counter cycles to nanoseconds
* @cc: Pointer to cycle counter.
* @cycles: Cycles
*
* XXX - This could use some mult_lxl_ll() asm optimization. Same code
* as in cyc2ns, but with unsigned result.
*/
static inline u64 cyclecounter_cyc2ns(const struct cyclecounter *cc,
cycle_t cycles)
{
u64 ret = (u64)cycles;
ret = (ret * cc->mult) >> cc->shift;
return ret;
}
/**
* timecounter_init - initialize a time counter
* @tc: Pointer to time counter which is to be initialized/reset
* @cc: A cycle counter, ready to be used.
* @start_tstamp: Arbitrary initial time stamp.
*
* After this call the current cycle register (roughly) corresponds to
* the initial time stamp. Every call to timecounter_read() increments
* the time stamp counter by the number of elapsed nanoseconds.
*/
extern void timecounter_init(struct timecounter *tc,
const struct cyclecounter *cc,
u64 start_tstamp);
/**
* timecounter_read - return nanoseconds elapsed since timecounter_init()
* plus the initial time stamp
* @tc: Pointer to time counter.
*
* In other words, keeps track of time since the same epoch as
* the function which generated the initial time stamp.
*/
extern u64 timecounter_read(struct timecounter *tc);
/**
* timecounter_cyc2time - convert a cycle counter to same
* time base as values returned by
* timecounter_read()
* @tc: Pointer to time counter.
* @cycle_tstamp: a value returned by tc->cc->read()
*
* Cycle counts that are converted correctly as long as they
* fall into the interval [-1/2 max cycle count, +1/2 max cycle count],
* with "max cycle count" == cs->mask+1.
*
* This allows conversion of cycle counter values which were generated
* in the past.
*/
extern u64 timecounter_cyc2time(struct timecounter *tc,
cycle_t cycle_tstamp);
/** /**
* struct clocksource - hardware abstraction for a free running counter * struct clocksource - hardware abstraction for a free running counter
* Provides mostly state-free accessors to the underlying hardware. * Provides mostly state-free accessors to the underlying hardware.
......
...@@ -42,7 +42,7 @@ ...@@ -42,7 +42,7 @@
#include <linux/atomic.h> #include <linux/atomic.h>
#include <linux/clocksource.h> #include <linux/timecounter.h>
#define MAX_MSIX_P_PORT 17 #define MAX_MSIX_P_PORT 17
#define MAX_MSIX 64 #define MAX_MSIX 64
......
/*
* linux/include/linux/timecounter.h
*
* based on code that migrated away from
* linux/include/linux/clocksource.h
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _LINUX_TIMECOUNTER_H
#define _LINUX_TIMECOUNTER_H
#include <linux/types.h>
/**
* struct cyclecounter - hardware abstraction for a free running counter
* Provides completely state-free accessors to the underlying hardware.
* Depending on which hardware it reads, the cycle counter may wrap
* around quickly. Locking rules (if necessary) have to be defined
* by the implementor and user of specific instances of this API.
*
* @read: returns the current cycle value
* @mask: bitmask for two's complement
* subtraction of non 64 bit counters,
* see CLOCKSOURCE_MASK() helper macro
* @mult: cycle to nanosecond multiplier
* @shift: cycle to nanosecond divisor (power of two)
*/
struct cyclecounter {
cycle_t (*read)(const struct cyclecounter *cc);
cycle_t mask;
u32 mult;
u32 shift;
};
/**
* struct timecounter - layer above a %struct cyclecounter which counts nanoseconds
* Contains the state needed by timecounter_read() to detect
* cycle counter wrap around. Initialize with
* timecounter_init(). Also used to convert cycle counts into the
* corresponding nanosecond counts with timecounter_cyc2time(). Users
* of this code are responsible for initializing the underlying
* cycle counter hardware, locking issues and reading the time
* more often than the cycle counter wraps around. The nanosecond
* counter will only wrap around after ~585 years.
*
* @cc: the cycle counter used by this instance
* @cycle_last: most recent cycle counter value seen by
* timecounter_read()
* @nsec: continuously increasing count
* @mask: bit mask for maintaining the 'frac' field
* @frac: accumulated fractional nanoseconds
*/
struct timecounter {
const struct cyclecounter *cc;
cycle_t cycle_last;
u64 nsec;
u64 mask;
u64 frac;
};
/**
* cyclecounter_cyc2ns - converts cycle counter cycles to nanoseconds
* @cc: Pointer to cycle counter.
* @cycles: Cycles
* @mask: bit mask for maintaining the 'frac' field
* @frac: pointer to storage for the fractional nanoseconds.
*/
static inline u64 cyclecounter_cyc2ns(const struct cyclecounter *cc,
cycle_t cycles, u64 mask, u64 *frac)
{
u64 ns = (u64) cycles;
ns = (ns * cc->mult) + *frac;
*frac = ns & mask;
return ns >> cc->shift;
}
/**
* timecounter_adjtime - Shifts the time of the clock.
* @delta: Desired change in nanoseconds.
*/
static inline void timecounter_adjtime(struct timecounter *tc, s64 delta)
{
tc->nsec += delta;
}
/**
* timecounter_init - initialize a time counter
* @tc: Pointer to time counter which is to be initialized/reset
* @cc: A cycle counter, ready to be used.
* @start_tstamp: Arbitrary initial time stamp.
*
* After this call the current cycle register (roughly) corresponds to
* the initial time stamp. Every call to timecounter_read() increments
* the time stamp counter by the number of elapsed nanoseconds.
*/
extern void timecounter_init(struct timecounter *tc,
const struct cyclecounter *cc,
u64 start_tstamp);
/**
* timecounter_read - return nanoseconds elapsed since timecounter_init()
* plus the initial time stamp
* @tc: Pointer to time counter.
*
* In other words, keeps track of time since the same epoch as
* the function which generated the initial time stamp.
*/
extern u64 timecounter_read(struct timecounter *tc);
/**
* timecounter_cyc2time - convert a cycle counter to same
* time base as values returned by
* timecounter_read()
* @tc: Pointer to time counter.
* @cycle_tstamp: a value returned by tc->cc->read()
*
* Cycle counts that are converted correctly as long as they
* fall into the interval [-1/2 max cycle count, +1/2 max cycle count],
* with "max cycle count" == cs->mask+1.
*
* This allows conversion of cycle counter values which were generated
* in the past.
*/
extern u64 timecounter_cyc2time(struct timecounter *tc,
cycle_t cycle_tstamp);
#endif
...@@ -213,5 +213,8 @@ struct callback_head { ...@@ -213,5 +213,8 @@ struct callback_head {
}; };
#define rcu_head callback_head #define rcu_head callback_head
/* clocksource cycle base type */
typedef u64 cycle_t;
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* _LINUX_TYPES_H */ #endif /* _LINUX_TYPES_H */
obj-y += time.o timer.o hrtimer.o itimer.o posix-timers.o posix-cpu-timers.o obj-y += time.o timer.o hrtimer.o itimer.o posix-timers.o posix-cpu-timers.o
obj-y += timekeeping.o ntp.o clocksource.o jiffies.o timer_list.o obj-y += timekeeping.o ntp.o clocksource.o jiffies.o timer_list.o
obj-y += timeconv.o posix-clock.o alarmtimer.o obj-y += timeconv.o timecounter.o posix-clock.o alarmtimer.o
obj-$(CONFIG_GENERIC_CLOCKEVENTS_BUILD) += clockevents.o obj-$(CONFIG_GENERIC_CLOCKEVENTS_BUILD) += clockevents.o
obj-$(CONFIG_GENERIC_CLOCKEVENTS) += tick-common.o obj-$(CONFIG_GENERIC_CLOCKEVENTS) += tick-common.o
......
...@@ -34,82 +34,6 @@ ...@@ -34,82 +34,6 @@
#include "tick-internal.h" #include "tick-internal.h"
#include "timekeeping_internal.h" #include "timekeeping_internal.h"
void timecounter_init(struct timecounter *tc,
const struct cyclecounter *cc,
u64 start_tstamp)
{
tc->cc = cc;
tc->cycle_last = cc->read(cc);
tc->nsec = start_tstamp;
}
EXPORT_SYMBOL_GPL(timecounter_init);
/**
* timecounter_read_delta - get nanoseconds since last call of this function
* @tc: Pointer to time counter
*
* When the underlying cycle counter runs over, this will be handled
* correctly as long as it does not run over more than once between
* calls.
*
* The first call to this function for a new time counter initializes
* the time tracking and returns an undefined result.
*/
static u64 timecounter_read_delta(struct timecounter *tc)
{
cycle_t cycle_now, cycle_delta;
u64 ns_offset;
/* read cycle counter: */
cycle_now = tc->cc->read(tc->cc);
/* calculate the delta since the last timecounter_read_delta(): */
cycle_delta = (cycle_now - tc->cycle_last) & tc->cc->mask;
/* convert to nanoseconds: */
ns_offset = cyclecounter_cyc2ns(tc->cc, cycle_delta);
/* update time stamp of timecounter_read_delta() call: */
tc->cycle_last = cycle_now;
return ns_offset;
}
u64 timecounter_read(struct timecounter *tc)
{
u64 nsec;
/* increment time by nanoseconds since last call */
nsec = timecounter_read_delta(tc);
nsec += tc->nsec;
tc->nsec = nsec;
return nsec;
}
EXPORT_SYMBOL_GPL(timecounter_read);
u64 timecounter_cyc2time(struct timecounter *tc,
cycle_t cycle_tstamp)
{
u64 cycle_delta = (cycle_tstamp - tc->cycle_last) & tc->cc->mask;
u64 nsec;
/*
* Instead of always treating cycle_tstamp as more recent
* than tc->cycle_last, detect when it is too far in the
* future and treat it as old time stamp instead.
*/
if (cycle_delta > tc->cc->mask / 2) {
cycle_delta = (tc->cycle_last - cycle_tstamp) & tc->cc->mask;
nsec = tc->nsec - cyclecounter_cyc2ns(tc->cc, cycle_delta);
} else {
nsec = cyclecounter_cyc2ns(tc->cc, cycle_delta) + tc->nsec;
}
return nsec;
}
EXPORT_SYMBOL_GPL(timecounter_cyc2time);
/** /**
* clocks_calc_mult_shift - calculate mult/shift factors for scaled math of clocks * clocks_calc_mult_shift - calculate mult/shift factors for scaled math of clocks
* @mult: pointer to mult variable * @mult: pointer to mult variable
......
/*
* linux/kernel/time/timecounter.c
*
* based on code that migrated away from
* linux/kernel/time/clocksource.c
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/export.h>
#include <linux/timecounter.h>
void timecounter_init(struct timecounter *tc,
const struct cyclecounter *cc,
u64 start_tstamp)
{
tc->cc = cc;
tc->cycle_last = cc->read(cc);
tc->nsec = start_tstamp;
tc->mask = (1ULL << cc->shift) - 1;
tc->frac = 0;
}
EXPORT_SYMBOL_GPL(timecounter_init);
/**
* timecounter_read_delta - get nanoseconds since last call of this function
* @tc: Pointer to time counter
*
* When the underlying cycle counter runs over, this will be handled
* correctly as long as it does not run over more than once between
* calls.
*
* The first call to this function for a new time counter initializes
* the time tracking and returns an undefined result.
*/
static u64 timecounter_read_delta(struct timecounter *tc)
{
cycle_t cycle_now, cycle_delta;
u64 ns_offset;
/* read cycle counter: */
cycle_now = tc->cc->read(tc->cc);
/* calculate the delta since the last timecounter_read_delta(): */
cycle_delta = (cycle_now - tc->cycle_last) & tc->cc->mask;
/* convert to nanoseconds: */
ns_offset = cyclecounter_cyc2ns(tc->cc, cycle_delta,
tc->mask, &tc->frac);
/* update time stamp of timecounter_read_delta() call: */
tc->cycle_last = cycle_now;
return ns_offset;
}
u64 timecounter_read(struct timecounter *tc)
{
u64 nsec;
/* increment time by nanoseconds since last call */
nsec = timecounter_read_delta(tc);
nsec += tc->nsec;
tc->nsec = nsec;
return nsec;
}
EXPORT_SYMBOL_GPL(timecounter_read);
/*
* This is like cyclecounter_cyc2ns(), but it is used for computing a
* time previous to the time stored in the cycle counter.
*/
static u64 cc_cyc2ns_backwards(const struct cyclecounter *cc,
cycle_t cycles, u64 mask, u64 frac)
{
u64 ns = (u64) cycles;
ns = ((ns * cc->mult) - frac) >> cc->shift;
return ns;
}
u64 timecounter_cyc2time(struct timecounter *tc,
cycle_t cycle_tstamp)
{
u64 delta = (cycle_tstamp - tc->cycle_last) & tc->cc->mask;
u64 nsec = tc->nsec, frac = tc->frac;
/*
* Instead of always treating cycle_tstamp as more recent
* than tc->cycle_last, detect when it is too far in the
* future and treat it as old time stamp instead.
*/
if (delta > tc->cc->mask / 2) {
delta = (tc->cycle_last - cycle_tstamp) & tc->cc->mask;
nsec -= cc_cyc2ns_backwards(tc->cc, delta, tc->mask, frac);
} else {
nsec += cyclecounter_cyc2ns(tc->cc, delta, tc->mask, &frac);
}
return nsec;
}
EXPORT_SYMBOL_GPL(timecounter_cyc2time);
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
#ifndef __SOUND_HDA_PRIV_H #ifndef __SOUND_HDA_PRIV_H
#define __SOUND_HDA_PRIV_H #define __SOUND_HDA_PRIV_H
#include <linux/clocksource.h> #include <linux/timecounter.h>
#include <sound/core.h> #include <sound/core.h>
#include <sound/pcm.h> #include <sound/pcm.h>
......
...@@ -152,7 +152,8 @@ void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu) ...@@ -152,7 +152,8 @@ void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu)
return; return;
} }
ns = cyclecounter_cyc2ns(timecounter->cc, cval - now); ns = cyclecounter_cyc2ns(timecounter->cc, cval - now, timecounter->mask,
&timecounter->frac);
timer_arm(timer, ns); timer_arm(timer, ns);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment