Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
97ac984d
Commit
97ac984d
authored
Feb 15, 2012
by
Thomas Gleixner
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'fortglx/3.4/time' of
git://git.linaro.org/people/jstultz/linux
into timers/core
parents
8682df25
cc06268c
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
248 additions
and
190 deletions
+248
-190
include/linux/timex.h
include/linux/timex.h
+1
-16
kernel/time/ntp.c
kernel/time/ntp.c
+63
-20
kernel/time/timekeeping.c
kernel/time/timekeeping.c
+184
-154
No files found.
include/linux/timex.h
View file @
97ac984d
...
...
@@ -234,23 +234,9 @@ struct timex {
extern
unsigned
long
tick_usec
;
/* USER_HZ period (usec) */
extern
unsigned
long
tick_nsec
;
/* ACTHZ period (nsec) */
/*
* phase-lock loop variables
*/
extern
int
time_status
;
/* clock synchronization status bits */
extern
void
ntp_init
(
void
);
extern
void
ntp_clear
(
void
);
/**
* ntp_synced - Returns 1 if the NTP status is not UNSYNC
*
*/
static
inline
int
ntp_synced
(
void
)
{
return
!
(
time_status
&
STA_UNSYNC
);
}
/* Required to safely shift negative values */
#define shift_right(x, s) ({ \
__typeof__(x) __x = (x); \
...
...
@@ -264,10 +250,9 @@ static inline int ntp_synced(void)
#define NTP_INTERVAL_LENGTH (NSEC_PER_SEC/NTP_INTERVAL_FREQ)
/* Returns how long ticks are at present, in ns / 2^NTP_SCALE_SHIFT. */
extern
u64
tick_length
;
extern
u64
ntp_tick_length
(
void
)
;
extern
void
second_overflow
(
void
);
extern
void
update_ntp_one_tick
(
void
);
extern
int
do_adjtimex
(
struct
timex
*
);
extern
void
hardpps
(
const
struct
timespec
*
,
const
struct
timespec
*
);
...
...
kernel/time/ntp.c
View file @
97ac984d
...
...
@@ -22,13 +22,16 @@
* NTP timekeeping variables:
*/
DEFINE_SPINLOCK
(
ntp_lock
);
/* USER_HZ period (usecs): */
unsigned
long
tick_usec
=
TICK_USEC
;
/* ACTHZ period (nsecs): */
unsigned
long
tick_nsec
;
u64
tick_length
;
static
u64
tick_length
;
static
u64
tick_length_base
;
static
struct
hrtimer
leap_timer
;
...
...
@@ -49,7 +52,7 @@ static struct hrtimer leap_timer;
static
int
time_state
=
TIME_OK
;
/* clock status bits: */
int
time_status
=
STA_UNSYNC
;
static
int
time_status
=
STA_UNSYNC
;
/* TAI offset (secs): */
static
long
time_tai
;
...
...
@@ -133,7 +136,7 @@ static inline void pps_reset_freq_interval(void)
/**
* pps_clear - Clears the PPS state variables
*
* Must be called while holding a write on the
xtime
_lock
* Must be called while holding a write on the
ntp
_lock
*/
static
inline
void
pps_clear
(
void
)
{
...
...
@@ -149,7 +152,7 @@ static inline void pps_clear(void)
* the last PPS signal. When it reaches 0, indicate that PPS signal is
* missing.
*
* Must be called while holding a write on the
xtime
_lock
* Must be called while holding a write on the
ntp
_lock
*/
static
inline
void
pps_dec_valid
(
void
)
{
...
...
@@ -233,6 +236,17 @@ static inline void pps_fill_timex(struct timex *txc)
#endif
/* CONFIG_NTP_PPS */
/**
* ntp_synced - Returns 1 if the NTP status is not UNSYNC
*
*/
static
inline
int
ntp_synced
(
void
)
{
return
!
(
time_status
&
STA_UNSYNC
);
}
/*
* NTP methods:
*/
...
...
@@ -330,11 +344,13 @@ static void ntp_update_offset(long offset)
/**
* ntp_clear - Clears the NTP state variables
*
* Must be called while holding a write on the xtime_lock
*/
void
ntp_clear
(
void
)
{
unsigned
long
flags
;
spin_lock_irqsave
(
&
ntp_lock
,
flags
);
time_adjust
=
0
;
/* stop active adjtime() */
time_status
|=
STA_UNSYNC
;
time_maxerror
=
NTP_PHASE_LIMIT
;
...
...
@@ -347,8 +363,23 @@ void ntp_clear(void)
/* Clear PPS state variables */
pps_clear
();
spin_unlock_irqrestore
(
&
ntp_lock
,
flags
);
}
u64
ntp_tick_length
(
void
)
{
unsigned
long
flags
;
s64
ret
;
spin_lock_irqsave
(
&
ntp_lock
,
flags
);
ret
=
tick_length
;
spin_unlock_irqrestore
(
&
ntp_lock
,
flags
);
return
ret
;
}
/*
* Leap second processing. If in leap-insert state at the end of the
* day, the system clock is set back one second; if in leap-delete
...
...
@@ -357,14 +388,15 @@ void ntp_clear(void)
static
enum
hrtimer_restart
ntp_leap_second
(
struct
hrtimer
*
timer
)
{
enum
hrtimer_restart
res
=
HRTIMER_NORESTART
;
unsigned
long
flags
;
int
leap
=
0
;
write_seqlock
(
&
xtime_lock
);
spin_lock_irqsave
(
&
ntp_lock
,
flags
);
switch
(
time_state
)
{
case
TIME_OK
:
break
;
case
TIME_INS
:
timekeeping_leap_insert
(
-
1
)
;
leap
=
-
1
;
time_state
=
TIME_OOP
;
printk
(
KERN_NOTICE
"Clock: inserting leap second 23:59:60 UTC
\n
"
);
...
...
@@ -372,7 +404,7 @@ static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer)
res
=
HRTIMER_RESTART
;
break
;
case
TIME_DEL
:
timekeeping_leap_insert
(
1
)
;
leap
=
1
;
time_tai
--
;
time_state
=
TIME_WAIT
;
printk
(
KERN_NOTICE
...
...
@@ -387,8 +419,14 @@ static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer)
time_state
=
TIME_OK
;
break
;
}
spin_unlock_irqrestore
(
&
ntp_lock
,
flags
);
write_sequnlock
(
&
xtime_lock
);
/*
* We have to call this outside of the ntp_lock to keep
* the proper locking hierarchy
*/
if
(
leap
)
timekeeping_leap_insert
(
leap
);
return
res
;
}
...
...
@@ -404,6 +442,9 @@ static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer)
void
second_overflow
(
void
)
{
s64
delta
;
unsigned
long
flags
;
spin_lock_irqsave
(
&
ntp_lock
,
flags
);
/* Bump the maxerror field */
time_maxerror
+=
MAXFREQ
/
NSEC_PER_USEC
;
...
...
@@ -423,23 +464,25 @@ void second_overflow(void)
pps_dec_valid
();
if
(
!
time_adjust
)
return
;
goto
out
;
if
(
time_adjust
>
MAX_TICKADJ
)
{
time_adjust
-=
MAX_TICKADJ
;
tick_length
+=
MAX_TICKADJ_SCALED
;
return
;
goto
out
;
}
if
(
time_adjust
<
-
MAX_TICKADJ
)
{
time_adjust
+=
MAX_TICKADJ
;
tick_length
-=
MAX_TICKADJ_SCALED
;
return
;
goto
out
;
}
tick_length
+=
(
s64
)(
time_adjust
*
NSEC_PER_USEC
/
NTP_INTERVAL_FREQ
)
<<
NTP_SCALE_SHIFT
;
time_adjust
=
0
;
out:
spin_unlock_irqrestore
(
&
ntp_lock
,
flags
);
}
#ifdef CONFIG_GENERIC_CMOS_UPDATE
...
...
@@ -663,7 +706,7 @@ int do_adjtimex(struct timex *txc)
getnstimeofday
(
&
ts
);
write_seqlock_irq
(
&
xtime
_lock
);
spin_lock_irq
(
&
ntp
_lock
);
if
(
txc
->
modes
&
ADJ_ADJTIME
)
{
long
save_adjust
=
time_adjust
;
...
...
@@ -705,7 +748,7 @@ int do_adjtimex(struct timex *txc)
/* fill PPS status fields */
pps_fill_timex
(
txc
);
write_sequnlock_irq
(
&
xtime
_lock
);
spin_unlock_irq
(
&
ntp
_lock
);
txc
->
time
.
tv_sec
=
ts
.
tv_sec
;
txc
->
time
.
tv_usec
=
ts
.
tv_nsec
;
...
...
@@ -903,7 +946,7 @@ void hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts)
pts_norm
=
pps_normalize_ts
(
*
phase_ts
);
write_seqlock_irqsave
(
&
xtime
_lock
,
flags
);
spin_lock_irqsave
(
&
ntp
_lock
,
flags
);
/* clear the error bits, they will be set again if needed */
time_status
&=
~
(
STA_PPSJITTER
|
STA_PPSWANDER
|
STA_PPSERROR
);
...
...
@@ -916,7 +959,7 @@ void hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts)
* just start the frequency interval */
if
(
unlikely
(
pps_fbase
.
tv_sec
==
0
))
{
pps_fbase
=
*
raw_ts
;
write_sequnlock_irqrestore
(
&
xtime
_lock
,
flags
);
spin_unlock_irqrestore
(
&
ntp
_lock
,
flags
);
return
;
}
...
...
@@ -931,7 +974,7 @@ void hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts)
time_status
|=
STA_PPSJITTER
;
/* restart the frequency calibration interval */
pps_fbase
=
*
raw_ts
;
write_sequnlock_irqrestore
(
&
xtime
_lock
,
flags
);
spin_unlock_irqrestore
(
&
ntp
_lock
,
flags
);
pr_err
(
"hardpps: PPSJITTER: bad pulse
\n
"
);
return
;
}
...
...
@@ -948,7 +991,7 @@ void hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts)
hardpps_update_phase
(
pts_norm
.
nsec
);
write_sequnlock_irqrestore
(
&
xtime
_lock
,
flags
);
spin_unlock_irqrestore
(
&
ntp
_lock
,
flags
);
}
EXPORT_SYMBOL
(
hardpps
);
...
...
kernel/time/timekeeping.c
View file @
97ac984d
...
...
@@ -25,6 +25,8 @@
struct
timekeeper
{
/* Current clocksource used for timekeeping. */
struct
clocksource
*
clock
;
/* NTP adjusted clock multiplier */
u32
mult
;
/* The shift value of the current clocksource. */
int
shift
;
...
...
@@ -45,12 +47,47 @@ struct timekeeper {
/* Shift conversion between clock shifted nano seconds and
* ntp shifted nano seconds. */
int
ntp_error_shift
;
/* NTP adjusted clock multiplier */
u32
mult
;
/* The current time */
struct
timespec
xtime
;
/*
* wall_to_monotonic is what we need to add to xtime (or xtime corrected
* for sub jiffie times) to get to monotonic time. Monotonic is pegged
* at zero at system boot time, so wall_to_monotonic will be negative,
* however, we will ALWAYS keep the tv_nsec part positive so we can use
* the usual normalization.
*
* wall_to_monotonic is moved after resume from suspend for the
* monotonic time not to jump. We need to add total_sleep_time to
* wall_to_monotonic to get the real boot based time offset.
*
* - wall_to_monotonic is no longer the boot time, getboottime must be
* used instead.
*/
struct
timespec
wall_to_monotonic
;
/* time spent in suspend */
struct
timespec
total_sleep_time
;
/* The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock. */
struct
timespec
raw_time
;
/* Seqlock for all timekeeper values */
seqlock_t
lock
;
};
static
struct
timekeeper
timekeeper
;
/*
* This read-write spinlock protects us from races in SMP while
* playing with xtime.
*/
__cacheline_aligned_in_smp
DEFINE_SEQLOCK
(
xtime_lock
);
/* flag for if timekeeping is suspended */
int
__read_mostly
timekeeping_suspended
;
/**
* timekeeper_setup_internals - Set up internals to use clocksource clock.
*
...
...
@@ -135,47 +172,28 @@ static inline s64 timekeeping_get_ns_raw(void)
return
clocksource_cyc2ns
(
cycle_delta
,
clock
->
mult
,
clock
->
shift
);
}
/*
* This read-write spinlock protects us from races in SMP while
* playing with xtime.
*/
__cacheline_aligned_in_smp
DEFINE_SEQLOCK
(
xtime_lock
);
/*
* The current time
* wall_to_monotonic is what we need to add to xtime (or xtime corrected
* for sub jiffie times) to get to monotonic time. Monotonic is pegged
* at zero at system boot time, so wall_to_monotonic will be negative,
* however, we will ALWAYS keep the tv_nsec part positive so we can use
* the usual normalization.
*
* wall_to_monotonic is moved after resume from suspend for the monotonic
* time not to jump. We need to add total_sleep_time to wall_to_monotonic
* to get the real boot based time offset.
*
* - wall_to_monotonic is no longer the boot time, getboottime must be
* used instead.
*/
static
struct
timespec
xtime
__attribute__
((
aligned
(
16
)));
static
struct
timespec
wall_to_monotonic
__attribute__
((
aligned
(
16
)));
static
struct
timespec
total_sleep_time
;
/*
* The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock.
*/
static
struct
timespec
raw_time
;
/* must hold write on timekeeper.lock */
static
void
timekeeping_update
(
bool
clearntp
)
{
if
(
clearntp
)
{
timekeeper
.
ntp_error
=
0
;
ntp_clear
();
}
update_vsyscall
(
&
timekeeper
.
xtime
,
&
timekeeper
.
wall_to_monotonic
,
timekeeper
.
clock
,
timekeeper
.
mult
);
}
/* flag for if timekeeping is suspended */
int
__read_mostly
timekeeping_suspended
;
/* must hold xtime_lock */
void
timekeeping_leap_insert
(
int
leapsecond
)
{
xtime
.
tv_sec
+=
leapsecond
;
wall_to_monotonic
.
tv_sec
-=
leapsecond
;
update_vsyscall
(
&
xtime
,
&
wall_to_monotonic
,
timekeeper
.
clock
,
timekeeper
.
mult
);
unsigned
long
flags
;
write_seqlock_irqsave
(
&
timekeeper
.
lock
,
flags
);
timekeeper
.
xtime
.
tv_sec
+=
leapsecond
;
timekeeper
.
wall_to_monotonic
.
tv_sec
-=
leapsecond
;
timekeeping_update
(
false
);
write_sequnlock_irqrestore
(
&
timekeeper
.
lock
,
flags
);
}
/**
...
...
@@ -202,10 +220,10 @@ static void timekeeping_forward_now(void)
/* If arch requires, add in gettimeoffset() */
nsec
+=
arch_gettimeoffset
();
timespec_add_ns
(
&
xtime
,
nsec
);
timespec_add_ns
(
&
timekeeper
.
xtime
,
nsec
);
nsec
=
clocksource_cyc2ns
(
cycle_delta
,
clock
->
mult
,
clock
->
shift
);
timespec_add_ns
(
&
raw_time
,
nsec
);
timespec_add_ns
(
&
timekeeper
.
raw_time
,
nsec
);
}
/**
...
...
@@ -222,15 +240,15 @@ void getnstimeofday(struct timespec *ts)
WARN_ON
(
timekeeping_suspended
);
do
{
seq
=
read_seqbegin
(
&
xtime_
lock
);
seq
=
read_seqbegin
(
&
timekeeper
.
lock
);
*
ts
=
xtime
;
*
ts
=
timekeeper
.
xtime
;
nsecs
=
timekeeping_get_ns
();
/* If arch requires, add in gettimeoffset() */
nsecs
+=
arch_gettimeoffset
();
}
while
(
read_seqretry
(
&
xtime_
lock
,
seq
));
}
while
(
read_seqretry
(
&
timekeeper
.
lock
,
seq
));
timespec_add_ns
(
ts
,
nsecs
);
}
...
...
@@ -245,14 +263,16 @@ ktime_t ktime_get(void)
WARN_ON
(
timekeeping_suspended
);
do
{
seq
=
read_seqbegin
(
&
xtime_lock
);
secs
=
xtime
.
tv_sec
+
wall_to_monotonic
.
tv_sec
;
nsecs
=
xtime
.
tv_nsec
+
wall_to_monotonic
.
tv_nsec
;
seq
=
read_seqbegin
(
&
timekeeper
.
lock
);
secs
=
timekeeper
.
xtime
.
tv_sec
+
timekeeper
.
wall_to_monotonic
.
tv_sec
;
nsecs
=
timekeeper
.
xtime
.
tv_nsec
+
timekeeper
.
wall_to_monotonic
.
tv_nsec
;
nsecs
+=
timekeeping_get_ns
();
/* If arch requires, add in gettimeoffset() */
nsecs
+=
arch_gettimeoffset
();
}
while
(
read_seqretry
(
&
xtime_
lock
,
seq
));
}
while
(
read_seqretry
(
&
timekeeper
.
lock
,
seq
));
/*
* Use ktime_set/ktime_add_ns to create a proper ktime on
* 32-bit architectures without CONFIG_KTIME_SCALAR.
...
...
@@ -278,14 +298,14 @@ void ktime_get_ts(struct timespec *ts)
WARN_ON
(
timekeeping_suspended
);
do
{
seq
=
read_seqbegin
(
&
xtime_
lock
);
*
ts
=
xtime
;
tomono
=
wall_to_monotonic
;
seq
=
read_seqbegin
(
&
timekeeper
.
lock
);
*
ts
=
timekeeper
.
xtime
;
tomono
=
timekeeper
.
wall_to_monotonic
;
nsecs
=
timekeeping_get_ns
();
/* If arch requires, add in gettimeoffset() */
nsecs
+=
arch_gettimeoffset
();
}
while
(
read_seqretry
(
&
xtime_
lock
,
seq
));
}
while
(
read_seqretry
(
&
timekeeper
.
lock
,
seq
));
set_normalized_timespec
(
ts
,
ts
->
tv_sec
+
tomono
.
tv_sec
,
ts
->
tv_nsec
+
tomono
.
tv_nsec
+
nsecs
);
...
...
@@ -313,10 +333,10 @@ void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real)
do
{
u32
arch_offset
;
seq
=
read_seqbegin
(
&
xtime_
lock
);
seq
=
read_seqbegin
(
&
timekeeper
.
lock
);
*
ts_raw
=
raw_time
;
*
ts_real
=
xtime
;
*
ts_raw
=
timekeeper
.
raw_time
;
*
ts_real
=
timekeeper
.
xtime
;
nsecs_raw
=
timekeeping_get_ns_raw
();
nsecs_real
=
timekeeping_get_ns
();
...
...
@@ -326,7 +346,7 @@ void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real)
nsecs_raw
+=
arch_offset
;
nsecs_real
+=
arch_offset
;
}
while
(
read_seqretry
(
&
xtime_
lock
,
seq
));
}
while
(
read_seqretry
(
&
timekeeper
.
lock
,
seq
));
timespec_add_ns
(
ts_raw
,
nsecs_raw
);
timespec_add_ns
(
ts_real
,
nsecs_real
);
...
...
@@ -365,23 +385,19 @@ int do_settimeofday(const struct timespec *tv)
if
((
unsigned
long
)
tv
->
tv_nsec
>=
NSEC_PER_SEC
)
return
-
EINVAL
;
write_seqlock_irqsave
(
&
xtime_
lock
,
flags
);
write_seqlock_irqsave
(
&
timekeeper
.
lock
,
flags
);
timekeeping_forward_now
();
ts_delta
.
tv_sec
=
tv
->
tv_sec
-
xtime
.
tv_sec
;
ts_delta
.
tv_nsec
=
tv
->
tv_nsec
-
xtime
.
tv_nsec
;
wall_to_monotonic
=
timespec_sub
(
wall_to_monotonic
,
ts_delta
);
xtime
=
*
tv
;
ts_delta
.
tv_sec
=
tv
->
tv_sec
-
timekeeper
.
xtime
.
tv_sec
;
ts_delta
.
tv_nsec
=
tv
->
tv_nsec
-
timekeeper
.
xtime
.
tv_nsec
;
timekeeper
.
wall_to_monotonic
=
timespec_sub
(
timekeeper
.
wall_to_monotonic
,
ts_delta
);
timekeeper
.
ntp_error
=
0
;
ntp_clear
();
update_vsyscall
(
&
xtime
,
&
wall_to_monotonic
,
timekeeper
.
clock
,
timekeeper
.
mult
);
timekeeper
.
xtime
=
*
tv
;
timekeeping_update
(
true
);
write_sequnlock_irqrestore
(
&
xtime_
lock
,
flags
);
write_sequnlock_irqrestore
(
&
timekeeper
.
lock
,
flags
);
/* signal hrtimers about time change */
clock_was_set
();
...
...
@@ -405,20 +421,17 @@ int timekeeping_inject_offset(struct timespec *ts)
if
((
unsigned
long
)
ts
->
tv_nsec
>=
NSEC_PER_SEC
)
return
-
EINVAL
;
write_seqlock_irqsave
(
&
xtime_
lock
,
flags
);
write_seqlock_irqsave
(
&
timekeeper
.
lock
,
flags
);
timekeeping_forward_now
();
xtime
=
timespec_add
(
xtime
,
*
ts
);
wall_to_monotonic
=
timespec_sub
(
wall_to_monotonic
,
*
ts
);
timekeeper
.
ntp_error
=
0
;
ntp_clear
();
timekeeper
.
xtime
=
timespec_add
(
timekeeper
.
xtime
,
*
ts
);
timekeeper
.
wall_to_monotonic
=
timespec_sub
(
timekeeper
.
wall_to_monotonic
,
*
ts
);
update_vsyscall
(
&
xtime
,
&
wall_to_monotonic
,
timekeeper
.
clock
,
timekeeper
.
mult
);
timekeeping_update
(
true
);
write_sequnlock_irqrestore
(
&
xtime_
lock
,
flags
);
write_sequnlock_irqrestore
(
&
timekeeper
.
lock
,
flags
);
/* signal hrtimers about time change */
clock_was_set
();
...
...
@@ -490,11 +503,11 @@ void getrawmonotonic(struct timespec *ts)
s64
nsecs
;
do
{
seq
=
read_seqbegin
(
&
xtime_
lock
);
seq
=
read_seqbegin
(
&
timekeeper
.
lock
);
nsecs
=
timekeeping_get_ns_raw
();
*
ts
=
raw_time
;
*
ts
=
timekeeper
.
raw_time
;
}
while
(
read_seqretry
(
&
xtime_
lock
,
seq
));
}
while
(
read_seqretry
(
&
timekeeper
.
lock
,
seq
));
timespec_add_ns
(
ts
,
nsecs
);
}
...
...
@@ -510,24 +523,30 @@ int timekeeping_valid_for_hres(void)
int
ret
;
do
{
seq
=
read_seqbegin
(
&
xtime_
lock
);
seq
=
read_seqbegin
(
&
timekeeper
.
lock
);
ret
=
timekeeper
.
clock
->
flags
&
CLOCK_SOURCE_VALID_FOR_HRES
;
}
while
(
read_seqretry
(
&
xtime_
lock
,
seq
));
}
while
(
read_seqretry
(
&
timekeeper
.
lock
,
seq
));
return
ret
;
}
/**
* timekeeping_max_deferment - Returns max time the clocksource can be deferred
*
* Caller must observe xtime_lock via read_seqbegin/read_seqretry to
* ensure that the clocksource does not change!
*/
u64
timekeeping_max_deferment
(
void
)
{
return
timekeeper
.
clock
->
max_idle_ns
;
unsigned
long
seq
;
u64
ret
;
do
{
seq
=
read_seqbegin
(
&
timekeeper
.
lock
);
ret
=
timekeeper
.
clock
->
max_idle_ns
;
}
while
(
read_seqretry
(
&
timekeeper
.
lock
,
seq
));
return
ret
;
}
/**
...
...
@@ -572,28 +591,29 @@ void __init timekeeping_init(void)
read_persistent_clock
(
&
now
);
read_boot_clock
(
&
boot
);
write_seqlock_irqsave
(
&
xtime_lock
,
flags
);
seqlock_init
(
&
timekeeper
.
lock
);
ntp_init
();
write_seqlock_irqsave
(
&
timekeeper
.
lock
,
flags
);
clock
=
clocksource_default_clock
();
if
(
clock
->
enable
)
clock
->
enable
(
clock
);
timekeeper_setup_internals
(
clock
);
xtime
.
tv_sec
=
now
.
tv_sec
;
xtime
.
tv_nsec
=
now
.
tv_nsec
;
raw_time
.
tv_sec
=
0
;
raw_time
.
tv_nsec
=
0
;
timekeeper
.
xtime
.
tv_sec
=
now
.
tv_sec
;
timekeeper
.
xtime
.
tv_nsec
=
now
.
tv_nsec
;
timekeeper
.
raw_time
.
tv_sec
=
0
;
timekeeper
.
raw_time
.
tv_nsec
=
0
;
if
(
boot
.
tv_sec
==
0
&&
boot
.
tv_nsec
==
0
)
{
boot
.
tv_sec
=
xtime
.
tv_sec
;
boot
.
tv_nsec
=
xtime
.
tv_nsec
;
boot
.
tv_sec
=
timekeeper
.
xtime
.
tv_sec
;
boot
.
tv_nsec
=
timekeeper
.
xtime
.
tv_nsec
;
}
set_normalized_timespec
(
&
wall_to_monotonic
,
set_normalized_timespec
(
&
timekeeper
.
wall_to_monotonic
,
-
boot
.
tv_sec
,
-
boot
.
tv_nsec
);
total_sleep_time
.
tv_sec
=
0
;
total_sleep_time
.
tv_nsec
=
0
;
write_sequnlock_irqrestore
(
&
xtime_
lock
,
flags
);
t
imekeeper
.
t
otal_sleep_time
.
tv_sec
=
0
;
t
imekeeper
.
t
otal_sleep_time
.
tv_nsec
=
0
;
write_sequnlock_irqrestore
(
&
timekeeper
.
lock
,
flags
);
}
/* time in seconds when suspend began */
...
...
@@ -614,9 +634,11 @@ static void __timekeeping_inject_sleeptime(struct timespec *delta)
return
;
}
xtime
=
timespec_add
(
xtime
,
*
delta
);
wall_to_monotonic
=
timespec_sub
(
wall_to_monotonic
,
*
delta
);
total_sleep_time
=
timespec_add
(
total_sleep_time
,
*
delta
);
timekeeper
.
xtime
=
timespec_add
(
timekeeper
.
xtime
,
*
delta
);
timekeeper
.
wall_to_monotonic
=
timespec_sub
(
timekeeper
.
wall_to_monotonic
,
*
delta
);
timekeeper
.
total_sleep_time
=
timespec_add
(
timekeeper
.
total_sleep_time
,
*
delta
);
}
...
...
@@ -640,17 +662,15 @@ void timekeeping_inject_sleeptime(struct timespec *delta)
if
(
!
(
ts
.
tv_sec
==
0
&&
ts
.
tv_nsec
==
0
))
return
;
write_seqlock_irqsave
(
&
xtime_lock
,
flags
);
write_seqlock_irqsave
(
&
timekeeper
.
lock
,
flags
);
timekeeping_forward_now
();
__timekeeping_inject_sleeptime
(
delta
);
timekeeper
.
ntp_error
=
0
;
ntp_clear
();
update_vsyscall
(
&
xtime
,
&
wall_to_monotonic
,
timekeeper
.
clock
,
timekeeper
.
mult
);
timekeeping_update
(
true
);
write_sequnlock_irqrestore
(
&
xtime_
lock
,
flags
);
write_sequnlock_irqrestore
(
&
timekeeper
.
lock
,
flags
);
/* signal hrtimers about time change */
clock_was_set
();
...
...
@@ -673,7 +693,7 @@ static void timekeeping_resume(void)
clocksource_resume
();
write_seqlock_irqsave
(
&
xtime_
lock
,
flags
);
write_seqlock_irqsave
(
&
timekeeper
.
lock
,
flags
);
if
(
timespec_compare
(
&
ts
,
&
timekeeping_suspend_time
)
>
0
)
{
ts
=
timespec_sub
(
ts
,
timekeeping_suspend_time
);
...
...
@@ -683,7 +703,7 @@ static void timekeeping_resume(void)
timekeeper
.
clock
->
cycle_last
=
timekeeper
.
clock
->
read
(
timekeeper
.
clock
);
timekeeper
.
ntp_error
=
0
;
timekeeping_suspended
=
0
;
write_sequnlock_irqrestore
(
&
xtime_
lock
,
flags
);
write_sequnlock_irqrestore
(
&
timekeeper
.
lock
,
flags
);
touch_softlockup_watchdog
();
...
...
@@ -701,7 +721,7 @@ static int timekeeping_suspend(void)
read_persistent_clock
(
&
timekeeping_suspend_time
);
write_seqlock_irqsave
(
&
xtime_
lock
,
flags
);
write_seqlock_irqsave
(
&
timekeeper
.
lock
,
flags
);
timekeeping_forward_now
();
timekeeping_suspended
=
1
;
...
...
@@ -711,7 +731,7 @@ static int timekeeping_suspend(void)
* try to compensate so the difference in system time
* and persistent_clock time stays close to constant.
*/
delta
=
timespec_sub
(
xtime
,
timekeeping_suspend_time
);
delta
=
timespec_sub
(
timekeeper
.
xtime
,
timekeeping_suspend_time
);
delta_delta
=
timespec_sub
(
delta
,
old_delta
);
if
(
abs
(
delta_delta
.
tv_sec
)
>=
2
)
{
/*
...
...
@@ -724,7 +744,7 @@ static int timekeeping_suspend(void)
timekeeping_suspend_time
=
timespec_add
(
timekeeping_suspend_time
,
delta_delta
);
}
write_sequnlock_irqrestore
(
&
xtime_
lock
,
flags
);
write_sequnlock_irqrestore
(
&
timekeeper
.
lock
,
flags
);
clockevents_notify
(
CLOCK_EVT_NOTIFY_SUSPEND
,
NULL
);
clocksource_suspend
();
...
...
@@ -775,7 +795,7 @@ static __always_inline int timekeeping_bigadjust(s64 error, s64 *interval,
* Now calculate the error in (1 << look_ahead) ticks, but first
* remove the single look ahead already included in the error.
*/
tick_error
=
tick_length
>>
(
timekeeper
.
ntp_error_shift
+
1
);
tick_error
=
ntp_tick_length
()
>>
(
timekeeper
.
ntp_error_shift
+
1
);
tick_error
-=
timekeeper
.
xtime_interval
>>
1
;
error
=
((
error
-
tick_error
)
>>
look_ahead
)
+
tick_error
;
...
...
@@ -943,22 +963,22 @@ static cycle_t logarithmic_accumulation(cycle_t offset, int shift)
timekeeper
.
xtime_nsec
+=
timekeeper
.
xtime_interval
<<
shift
;
while
(
timekeeper
.
xtime_nsec
>=
nsecps
)
{
timekeeper
.
xtime_nsec
-=
nsecps
;
xtime
.
tv_sec
++
;
timekeeper
.
xtime
.
tv_sec
++
;
second_overflow
();
}
/* Accumulate raw time */
raw_nsecs
=
timekeeper
.
raw_interval
<<
shift
;
raw_nsecs
+=
raw_time
.
tv_nsec
;
raw_nsecs
+=
timekeeper
.
raw_time
.
tv_nsec
;
if
(
raw_nsecs
>=
NSEC_PER_SEC
)
{
u64
raw_secs
=
raw_nsecs
;
raw_nsecs
=
do_div
(
raw_secs
,
NSEC_PER_SEC
);
raw_time
.
tv_sec
+=
raw_secs
;
timekeeper
.
raw_time
.
tv_sec
+=
raw_secs
;
}
raw_time
.
tv_nsec
=
raw_nsecs
;
timekeeper
.
raw_time
.
tv_nsec
=
raw_nsecs
;
/* Accumulate error between NTP and clock interval */
timekeeper
.
ntp_error
+=
tick_length
<<
shift
;
timekeeper
.
ntp_error
+=
ntp_tick_length
()
<<
shift
;
timekeeper
.
ntp_error
-=
(
timekeeper
.
xtime_interval
+
timekeeper
.
xtime_remainder
)
<<
(
timekeeper
.
ntp_error_shift
+
shift
);
...
...
@@ -970,17 +990,19 @@ static cycle_t logarithmic_accumulation(cycle_t offset, int shift)
/**
* update_wall_time - Uses the current clocksource to increment the wall time
*
* Called from the timer interrupt, must hold a write on xtime_lock.
*/
static
void
update_wall_time
(
void
)
{
struct
clocksource
*
clock
;
cycle_t
offset
;
int
shift
=
0
,
maxshift
;
unsigned
long
flags
;
write_seqlock_irqsave
(
&
timekeeper
.
lock
,
flags
);
/* Make sure we're fully resumed: */
if
(
unlikely
(
timekeeping_suspended
))
return
;
goto
out
;
clock
=
timekeeper
.
clock
;
...
...
@@ -989,7 +1011,8 @@ static void update_wall_time(void)
#else
offset
=
(
clock
->
read
(
clock
)
-
clock
->
cycle_last
)
&
clock
->
mask
;
#endif
timekeeper
.
xtime_nsec
=
(
s64
)
xtime
.
tv_nsec
<<
timekeeper
.
shift
;
timekeeper
.
xtime_nsec
=
(
s64
)
timekeeper
.
xtime
.
tv_nsec
<<
timekeeper
.
shift
;
/*
* With NO_HZ we may have to accumulate many cycle_intervals
...
...
@@ -1002,7 +1025,7 @@ static void update_wall_time(void)
shift
=
ilog2
(
offset
)
-
ilog2
(
timekeeper
.
cycle_interval
);
shift
=
max
(
0
,
shift
);
/* Bound shift to one less then what overflows tick_length */
maxshift
=
(
8
*
sizeof
(
tick_length
)
-
(
ilog2
(
tick_length
)
+
1
))
-
1
;
maxshift
=
(
64
-
(
ilog2
(
ntp_tick_length
()
)
+
1
))
-
1
;
shift
=
min
(
shift
,
maxshift
);
while
(
offset
>=
timekeeper
.
cycle_interval
)
{
offset
=
logarithmic_accumulation
(
offset
,
shift
);
...
...
@@ -1040,8 +1063,10 @@ static void update_wall_time(void)
* Store full nanoseconds into xtime after rounding it up and
* add the remainder to the error difference.
*/
xtime
.
tv_nsec
=
((
s64
)
timekeeper
.
xtime_nsec
>>
timekeeper
.
shift
)
+
1
;
timekeeper
.
xtime_nsec
-=
(
s64
)
xtime
.
tv_nsec
<<
timekeeper
.
shift
;
timekeeper
.
xtime
.
tv_nsec
=
((
s64
)
timekeeper
.
xtime_nsec
>>
timekeeper
.
shift
)
+
1
;
timekeeper
.
xtime_nsec
-=
(
s64
)
timekeeper
.
xtime
.
tv_nsec
<<
timekeeper
.
shift
;
timekeeper
.
ntp_error
+=
timekeeper
.
xtime_nsec
<<
timekeeper
.
ntp_error_shift
;
...
...
@@ -1049,15 +1074,17 @@ static void update_wall_time(void)
* Finally, make sure that after the rounding
* xtime.tv_nsec isn't larger then NSEC_PER_SEC
*/
if
(
unlikely
(
xtime
.
tv_nsec
>=
NSEC_PER_SEC
))
{
xtime
.
tv_nsec
-=
NSEC_PER_SEC
;
xtime
.
tv_sec
++
;
if
(
unlikely
(
timekeeper
.
xtime
.
tv_nsec
>=
NSEC_PER_SEC
))
{
timekeeper
.
xtime
.
tv_nsec
-=
NSEC_PER_SEC
;
timekeeper
.
xtime
.
tv_sec
++
;
second_overflow
();
}
/* check to see if there is a new clocksource to use */
update_vsyscall
(
&
xtime
,
&
wall_to_monotonic
,
timekeeper
.
clock
,
timekeeper
.
mult
);
timekeeping_update
(
false
);
out:
write_sequnlock_irqrestore
(
&
timekeeper
.
lock
,
flags
);
}
/**
...
...
@@ -1074,8 +1101,10 @@ static void update_wall_time(void)
void
getboottime
(
struct
timespec
*
ts
)
{
struct
timespec
boottime
=
{
.
tv_sec
=
wall_to_monotonic
.
tv_sec
+
total_sleep_time
.
tv_sec
,
.
tv_nsec
=
wall_to_monotonic
.
tv_nsec
+
total_sleep_time
.
tv_nsec
.
tv_sec
=
timekeeper
.
wall_to_monotonic
.
tv_sec
+
timekeeper
.
total_sleep_time
.
tv_sec
,
.
tv_nsec
=
timekeeper
.
wall_to_monotonic
.
tv_nsec
+
timekeeper
.
total_sleep_time
.
tv_nsec
};
set_normalized_timespec
(
ts
,
-
boottime
.
tv_sec
,
-
boottime
.
tv_nsec
);
...
...
@@ -1101,13 +1130,13 @@ void get_monotonic_boottime(struct timespec *ts)
WARN_ON
(
timekeeping_suspended
);
do
{
seq
=
read_seqbegin
(
&
xtime_
lock
);
*
ts
=
xtime
;
tomono
=
wall_to_monotonic
;
sleep
=
total_sleep_time
;
seq
=
read_seqbegin
(
&
timekeeper
.
lock
);
*
ts
=
timekeeper
.
xtime
;
tomono
=
timekeeper
.
wall_to_monotonic
;
sleep
=
t
imekeeper
.
t
otal_sleep_time
;
nsecs
=
timekeeping_get_ns
();
}
while
(
read_seqretry
(
&
xtime_
lock
,
seq
));
}
while
(
read_seqretry
(
&
timekeeper
.
lock
,
seq
));
set_normalized_timespec
(
ts
,
ts
->
tv_sec
+
tomono
.
tv_sec
+
sleep
.
tv_sec
,
ts
->
tv_nsec
+
tomono
.
tv_nsec
+
sleep
.
tv_nsec
+
nsecs
);
...
...
@@ -1137,19 +1166,19 @@ EXPORT_SYMBOL_GPL(ktime_get_boottime);
*/
void
monotonic_to_bootbased
(
struct
timespec
*
ts
)
{
*
ts
=
timespec_add
(
*
ts
,
total_sleep_time
);
*
ts
=
timespec_add
(
*
ts
,
t
imekeeper
.
t
otal_sleep_time
);
}
EXPORT_SYMBOL_GPL
(
monotonic_to_bootbased
);
unsigned
long
get_seconds
(
void
)
{
return
xtime
.
tv_sec
;
return
timekeeper
.
xtime
.
tv_sec
;
}
EXPORT_SYMBOL
(
get_seconds
);
struct
timespec
__current_kernel_time
(
void
)
{
return
xtime
;
return
timekeeper
.
xtime
;
}
struct
timespec
current_kernel_time
(
void
)
...
...
@@ -1158,10 +1187,10 @@ struct timespec current_kernel_time(void)
unsigned
long
seq
;
do
{
seq
=
read_seqbegin
(
&
xtime_
lock
);
seq
=
read_seqbegin
(
&
timekeeper
.
lock
);
now
=
xtime
;
}
while
(
read_seqretry
(
&
xtime_
lock
,
seq
));
now
=
timekeeper
.
xtime
;
}
while
(
read_seqretry
(
&
timekeeper
.
lock
,
seq
));
return
now
;
}
...
...
@@ -1173,11 +1202,11 @@ struct timespec get_monotonic_coarse(void)
unsigned
long
seq
;
do
{
seq
=
read_seqbegin
(
&
xtime_
lock
);
seq
=
read_seqbegin
(
&
timekeeper
.
lock
);
now
=
xtime
;
mono
=
wall_to_monotonic
;
}
while
(
read_seqretry
(
&
xtime_
lock
,
seq
));
now
=
timekeeper
.
xtime
;
mono
=
timekeeper
.
wall_to_monotonic
;
}
while
(
read_seqretry
(
&
timekeeper
.
lock
,
seq
));
set_normalized_timespec
(
&
now
,
now
.
tv_sec
+
mono
.
tv_sec
,
now
.
tv_nsec
+
mono
.
tv_nsec
);
...
...
@@ -1209,11 +1238,11 @@ void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim,
unsigned
long
seq
;
do
{
seq
=
read_seqbegin
(
&
xtime_
lock
);
*
xtim
=
xtime
;
*
wtom
=
wall_to_monotonic
;
*
sleep
=
total_sleep_time
;
}
while
(
read_seqretry
(
&
xtime_
lock
,
seq
));
seq
=
read_seqbegin
(
&
timekeeper
.
lock
);
*
xtim
=
timekeeper
.
xtime
;
*
wtom
=
timekeeper
.
wall_to_monotonic
;
*
sleep
=
t
imekeeper
.
t
otal_sleep_time
;
}
while
(
read_seqretry
(
&
timekeeper
.
lock
,
seq
));
}
/**
...
...
@@ -1225,9 +1254,10 @@ ktime_t ktime_get_monotonic_offset(void)
struct
timespec
wtom
;
do
{
seq
=
read_seqbegin
(
&
xtime_lock
);
wtom
=
wall_to_monotonic
;
}
while
(
read_seqretry
(
&
xtime_lock
,
seq
));
seq
=
read_seqbegin
(
&
timekeeper
.
lock
);
wtom
=
timekeeper
.
wall_to_monotonic
;
}
while
(
read_seqretry
(
&
timekeeper
.
lock
,
seq
));
return
timespec_to_ktime
(
wtom
);
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment