Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
7c526e1f
Commit
7c526e1f
authored
Mar 26, 2009
by
Ingo Molnar
Browse files
Options
Browse Files
Download
Plain Diff
Merge branches 'timers/new-apis', 'timers/ntp' and 'timers/urgent' into timers/core
parents
e8684605
74019224
a2a5ac86
37bebc70
Changes
8
Show whitespace changes
Inline
Side-by-side
Showing
8 changed files
with
357 additions
and
234 deletions
+357
-234
arch/powerpc/platforms/cell/spufs/sched.c
arch/powerpc/platforms/cell/spufs/sched.c
+1
-1
drivers/infiniband/hw/ipath/ipath_driver.c
drivers/infiniband/hw/ipath/ipath_driver.c
+3
-3
include/linux/timer.h
include/linux/timer.h
+2
-20
include/linux/timex.h
include/linux/timex.h
+1
-1
kernel/posix-cpu-timers.c
kernel/posix-cpu-timers.c
+2
-1
kernel/relay.c
kernel/relay.c
+1
-1
kernel/time/ntp.c
kernel/time/ntp.c
+274
-170
kernel/timer.c
kernel/timer.c
+73
-37
No files found.
arch/powerpc/platforms/cell/spufs/sched.c
View file @
7c526e1f
...
@@ -508,7 +508,7 @@ static void __spu_add_to_rq(struct spu_context *ctx)
...
@@ -508,7 +508,7 @@ static void __spu_add_to_rq(struct spu_context *ctx)
list_add_tail
(
&
ctx
->
rq
,
&
spu_prio
->
runq
[
ctx
->
prio
]);
list_add_tail
(
&
ctx
->
rq
,
&
spu_prio
->
runq
[
ctx
->
prio
]);
set_bit
(
ctx
->
prio
,
spu_prio
->
bitmap
);
set_bit
(
ctx
->
prio
,
spu_prio
->
bitmap
);
if
(
!
spu_prio
->
nr_waiting
++
)
if
(
!
spu_prio
->
nr_waiting
++
)
__
mod_timer
(
&
spusched_timer
,
jiffies
+
SPUSCHED_TICK
);
mod_timer
(
&
spusched_timer
,
jiffies
+
SPUSCHED_TICK
);
}
}
}
}
...
...
drivers/infiniband/hw/ipath/ipath_driver.c
View file @
7c526e1f
...
@@ -2715,7 +2715,7 @@ static void ipath_hol_signal_up(struct ipath_devdata *dd)
...
@@ -2715,7 +2715,7 @@ static void ipath_hol_signal_up(struct ipath_devdata *dd)
* to prevent HoL blocking, then start the HoL timer that
* to prevent HoL blocking, then start the HoL timer that
* periodically continues, then stop procs, so they can detect
* periodically continues, then stop procs, so they can detect
* link down if they want, and do something about it.
* link down if they want, and do something about it.
* Timer may already be running, so use
__
mod_timer, not add_timer.
* Timer may already be running, so use mod_timer, not add_timer.
*/
*/
void
ipath_hol_down
(
struct
ipath_devdata
*
dd
)
void
ipath_hol_down
(
struct
ipath_devdata
*
dd
)
{
{
...
@@ -2724,7 +2724,7 @@ void ipath_hol_down(struct ipath_devdata *dd)
...
@@ -2724,7 +2724,7 @@ void ipath_hol_down(struct ipath_devdata *dd)
dd
->
ipath_hol_next
=
IPATH_HOL_DOWNCONT
;
dd
->
ipath_hol_next
=
IPATH_HOL_DOWNCONT
;
dd
->
ipath_hol_timer
.
expires
=
jiffies
+
dd
->
ipath_hol_timer
.
expires
=
jiffies
+
msecs_to_jiffies
(
ipath_hol_timeout_ms
);
msecs_to_jiffies
(
ipath_hol_timeout_ms
);
__
mod_timer
(
&
dd
->
ipath_hol_timer
,
dd
->
ipath_hol_timer
.
expires
);
mod_timer
(
&
dd
->
ipath_hol_timer
,
dd
->
ipath_hol_timer
.
expires
);
}
}
/*
/*
...
@@ -2763,7 +2763,7 @@ void ipath_hol_event(unsigned long opaque)
...
@@ -2763,7 +2763,7 @@ void ipath_hol_event(unsigned long opaque)
else
{
else
{
dd
->
ipath_hol_timer
.
expires
=
jiffies
+
dd
->
ipath_hol_timer
.
expires
=
jiffies
+
msecs_to_jiffies
(
ipath_hol_timeout_ms
);
msecs_to_jiffies
(
ipath_hol_timeout_ms
);
__
mod_timer
(
&
dd
->
ipath_hol_timer
,
mod_timer
(
&
dd
->
ipath_hol_timer
,
dd
->
ipath_hol_timer
.
expires
);
dd
->
ipath_hol_timer
.
expires
);
}
}
}
}
...
...
include/linux/timer.h
View file @
7c526e1f
...
@@ -86,8 +86,8 @@ static inline int timer_pending(const struct timer_list * timer)
...
@@ -86,8 +86,8 @@ static inline int timer_pending(const struct timer_list * timer)
extern
void
add_timer_on
(
struct
timer_list
*
timer
,
int
cpu
);
extern
void
add_timer_on
(
struct
timer_list
*
timer
,
int
cpu
);
extern
int
del_timer
(
struct
timer_list
*
timer
);
extern
int
del_timer
(
struct
timer_list
*
timer
);
extern
int
__mod_timer
(
struct
timer_list
*
timer
,
unsigned
long
expires
);
extern
int
mod_timer
(
struct
timer_list
*
timer
,
unsigned
long
expires
);
extern
int
mod_timer
(
struct
timer_list
*
timer
,
unsigned
long
expires
);
extern
int
mod_timer_pending
(
struct
timer_list
*
timer
,
unsigned
long
expires
);
/*
/*
* The jiffies value which is added to now, when there is no timer
* The jiffies value which is added to now, when there is no timer
...
@@ -146,25 +146,7 @@ static inline void timer_stats_timer_clear_start_info(struct timer_list *timer)
...
@@ -146,25 +146,7 @@ static inline void timer_stats_timer_clear_start_info(struct timer_list *timer)
}
}
#endif
#endif
/**
extern
void
add_timer
(
struct
timer_list
*
timer
);
* add_timer - start a timer
* @timer: the timer to be added
*
* The kernel will do a ->function(->data) callback from the
* timer interrupt at the ->expires point in the future. The
* current time is 'jiffies'.
*
* The timer's ->expires, ->function (and if the handler uses it, ->data)
* fields must be set prior calling this function.
*
* Timers with an ->expires field in the past will be executed in the next
* timer tick.
*/
static
inline
void
add_timer
(
struct
timer_list
*
timer
)
{
BUG_ON
(
timer_pending
(
timer
));
__mod_timer
(
timer
,
timer
->
expires
);
}
#ifdef CONFIG_SMP
#ifdef CONFIG_SMP
extern
int
try_to_del_timer_sync
(
struct
timer_list
*
timer
);
extern
int
try_to_del_timer_sync
(
struct
timer_list
*
timer
);
...
...
include/linux/timex.h
View file @
7c526e1f
...
@@ -190,7 +190,7 @@ struct timex {
...
@@ -190,7 +190,7 @@ struct timex {
* offset and maximum frequency tolerance.
* offset and maximum frequency tolerance.
*/
*/
#define SHIFT_USEC 16
/* frequency offset scale (shift) */
#define SHIFT_USEC 16
/* frequency offset scale (shift) */
#define PPM_SCALE (NSEC_PER_USEC << (NTP_SCALE_SHIFT - SHIFT_USEC))
#define PPM_SCALE (
(s64)
NSEC_PER_USEC << (NTP_SCALE_SHIFT - SHIFT_USEC))
#define PPM_SCALE_INV_SHIFT 19
#define PPM_SCALE_INV_SHIFT 19
#define PPM_SCALE_INV ((1ll << (PPM_SCALE_INV_SHIFT + NTP_SCALE_SHIFT)) / \
#define PPM_SCALE_INV ((1ll << (PPM_SCALE_INV_SHIFT + NTP_SCALE_SHIFT)) / \
PPM_SCALE + 1)
PPM_SCALE + 1)
...
...
kernel/posix-cpu-timers.c
View file @
7c526e1f
...
@@ -1370,7 +1370,8 @@ static inline int fastpath_timer_check(struct task_struct *tsk)
...
@@ -1370,7 +1370,8 @@ static inline int fastpath_timer_check(struct task_struct *tsk)
if
(
task_cputime_expired
(
&
group_sample
,
&
sig
->
cputime_expires
))
if
(
task_cputime_expired
(
&
group_sample
,
&
sig
->
cputime_expires
))
return
1
;
return
1
;
}
}
return
0
;
return
sig
->
rlim
[
RLIMIT_CPU
].
rlim_cur
!=
RLIM_INFINITY
;
}
}
/*
/*
...
...
kernel/relay.c
View file @
7c526e1f
...
@@ -750,7 +750,7 @@ size_t relay_switch_subbuf(struct rchan_buf *buf, size_t length)
...
@@ -750,7 +750,7 @@ size_t relay_switch_subbuf(struct rchan_buf *buf, size_t length)
* from the scheduler (trying to re-grab
* from the scheduler (trying to re-grab
* rq->lock), so defer it.
* rq->lock), so defer it.
*/
*/
__
mod_timer
(
&
buf
->
timer
,
jiffies
+
1
);
mod_timer
(
&
buf
->
timer
,
jiffies
+
1
);
}
}
old
=
buf
->
data
;
old
=
buf
->
data
;
...
...
kernel/time/ntp.c
View file @
7c526e1f
/*
/*
* linux/kernel/time/ntp.c
*
* NTP state machine interfaces and logic.
* NTP state machine interfaces and logic.
*
*
* This code was mainly moved from kernel/timer.c and kernel/time.c
* This code was mainly moved from kernel/timer.c and kernel/time.c
* Please see those files for relevant copyright info and historical
* Please see those files for relevant copyright info and historical
* changelogs.
* changelogs.
*/
*/
#include <linux/mm.h>
#include <linux/time.h>
#include <linux/timex.h>
#include <linux/jiffies.h>
#include <linux/hrtimer.h>
#include <linux/capability.h>
#include <linux/capability.h>
#include <linux/math64.h>
#include <linux/clocksource.h>
#include <linux/clocksource.h>
#include <linux/workqueue.h>
#include <linux/workqueue.h>
#include <asm/timex.h>
#include <linux/hrtimer.h>
#include <linux/jiffies.h>
#include <linux/math64.h>
#include <linux/timex.h>
#include <linux/time.h>
#include <linux/mm.h>
/*
/*
*
Timekeeping variables
*
NTP timekeeping variables:
*/
*/
unsigned
long
tick_usec
=
TICK_USEC
;
/* USER_HZ period (usec) */
unsigned
long
tick_nsec
;
/* ACTHZ period (nsec) */
/* USER_HZ period (usecs): */
unsigned
long
tick_usec
=
TICK_USEC
;
/* ACTHZ period (nsecs): */
unsigned
long
tick_nsec
;
u64
tick_length
;
u64
tick_length
;
static
u64
tick_length_base
;
static
u64
tick_length_base
;
static
struct
hrtimer
leap_timer
;
static
struct
hrtimer
leap_timer
;
#define MAX_TICKADJ 500
/* micro
secs */
#define MAX_TICKADJ 500
LL
/* u
secs */
#define MAX_TICKADJ_SCALED
(((u64)(MAX_TICKADJ * NSEC_PER_USEC) <<
\
#define MAX_TICKADJ_SCALED \
NTP_SCALE_SHIFT) / NTP_INTERVAL_FREQ)
(((MAX_TICKADJ * NSEC_PER_USEC) <<
NTP_SCALE_SHIFT) / NTP_INTERVAL_FREQ)
/*
/*
* phase-lock loop variables
* phase-lock loop variables
*/
*/
/* TIME_ERROR prevents overwriting the CMOS clock */
static
int
time_state
=
TIME_OK
;
/* clock synchronization status */
/*
int
time_status
=
STA_UNSYNC
;
/* clock status bits */
* clock synchronization status
static
long
time_tai
;
/* TAI offset (s) */
*
static
s64
time_offset
;
/* time adjustment (ns) */
* (TIME_ERROR prevents overwriting the CMOS clock)
static
long
time_constant
=
2
;
/* pll time constant */
*/
long
time_maxerror
=
NTP_PHASE_LIMIT
;
/* maximum error (us) */
static
int
time_state
=
TIME_OK
;
long
time_esterror
=
NTP_PHASE_LIMIT
;
/* estimated error (us) */
static
s64
time_freq
;
/* frequency offset (scaled ns/s)*/
/* clock status bits: */
static
long
time_reftime
;
/* time at last adjustment (s) */
int
time_status
=
STA_UNSYNC
;
/* TAI offset (secs): */
static
long
time_tai
;
/* time adjustment (nsecs): */
static
s64
time_offset
;
/* pll time constant: */
static
long
time_constant
=
2
;
/* maximum error (usecs): */
long
time_maxerror
=
NTP_PHASE_LIMIT
;
/* estimated error (usecs): */
long
time_esterror
=
NTP_PHASE_LIMIT
;
/* frequency offset (scaled nsecs/secs): */
static
s64
time_freq
;
/* time at last adjustment (secs): */
static
long
time_reftime
;
long
time_adjust
;
long
time_adjust
;
static
long
ntp_tick_adj
;
/* constant (boot-param configurable) NTP tick adjustment (upscaled) */
static
s64
ntp_tick_adj
;
/*
* NTP methods:
*/
/*
* Update (tick_length, tick_length_base, tick_nsec), based
* on (tick_usec, ntp_tick_adj, time_freq):
*/
static
void
ntp_update_frequency
(
void
)
static
void
ntp_update_frequency
(
void
)
{
{
u64
second_length
=
(
u64
)(
tick_usec
*
NSEC_PER_USEC
*
USER_HZ
)
u64
second_length
;
u64
new_base
;
second_length
=
(
u64
)(
tick_usec
*
NSEC_PER_USEC
*
USER_HZ
)
<<
NTP_SCALE_SHIFT
;
<<
NTP_SCALE_SHIFT
;
second_length
+=
(
s64
)
ntp_tick_adj
<<
NTP_SCALE_SHIFT
;
second_length
+=
time_freq
;
tick_length_base
=
second_length
;
second_length
+=
ntp_tick_adj
;
second_length
+=
time_freq
;
tick_nsec
=
div_u64
(
second_length
,
HZ
)
>>
NTP_SCALE_SHIFT
;
tick_nsec
=
div_u64
(
second_length
,
HZ
)
>>
NTP_SCALE_SHIFT
;
tick_length_base
=
div_u64
(
tick_length_base
,
NTP_INTERVAL_FREQ
);
new_base
=
div_u64
(
second_length
,
NTP_INTERVAL_FREQ
);
/*
* Don't wait for the next second_overflow, apply
* the change to the tick length immediately:
*/
tick_length
+=
new_base
-
tick_length_base
;
tick_length_base
=
new_base
;
}
static
inline
s64
ntp_update_offset_fll
(
s64
offset64
,
long
secs
)
{
time_status
&=
~
STA_MODE
;
if
(
secs
<
MINSEC
)
return
0
;
if
(
!
(
time_status
&
STA_FLL
)
&&
(
secs
<=
MAXSEC
))
return
0
;
time_status
|=
STA_MODE
;
return
div_s64
(
offset64
<<
(
NTP_SCALE_SHIFT
-
SHIFT_FLL
),
secs
);
}
}
static
void
ntp_update_offset
(
long
offset
)
static
void
ntp_update_offset
(
long
offset
)
{
{
long
mtemp
;
s64
freq_adj
;
s64
freq_adj
;
s64
offset64
;
long
secs
;
if
(
!
(
time_status
&
STA_PLL
))
if
(
!
(
time_status
&
STA_PLL
))
return
;
return
;
...
@@ -84,24 +142,23 @@ static void ntp_update_offset(long offset)
...
@@ -84,24 +142,23 @@ static void ntp_update_offset(long offset)
* Select how the frequency is to be controlled
* Select how the frequency is to be controlled
* and in which mode (PLL or FLL).
* and in which mode (PLL or FLL).
*/
*/
if
(
time_status
&
STA_FREQHOLD
||
time_reftime
==
0
)
secs
=
xtime
.
tv_sec
-
time_reftime
;
time_reftime
=
xtime
.
tv_sec
;
if
(
unlikely
(
time_status
&
STA_FREQHOLD
))
mtemp
=
xtime
.
tv_sec
-
time_reftime
;
secs
=
0
;
time_reftime
=
xtime
.
tv_sec
;
time_reftime
=
xtime
.
tv_sec
;
freq_adj
=
(
s64
)
offset
*
mtemp
;
offset64
=
offset
;
freq_adj
<<=
NTP_SCALE_SHIFT
-
2
*
(
SHIFT_PLL
+
2
+
time_constant
);
freq_adj
=
(
offset64
*
secs
)
<<
time_status
&=
~
STA_MODE
;
(
NTP_SCALE_SHIFT
-
2
*
(
SHIFT_PLL
+
2
+
time_constant
));
if
(
mtemp
>=
MINSEC
&&
(
time_status
&
STA_FLL
||
mtemp
>
MAXSEC
))
{
freq_adj
+=
div_s64
((
s64
)
offset
<<
(
NTP_SCALE_SHIFT
-
SHIFT_FLL
),
freq_adj
+=
ntp_update_offset_fll
(
offset64
,
secs
);
mtemp
);
time_status
|=
STA_MODE
;
freq_adj
=
min
(
freq_adj
+
time_freq
,
MAXFREQ_SCALED
);
}
freq_adj
+=
time_freq
;
freq_adj
=
min
(
freq_adj
,
MAXFREQ_SCALED
);
time_freq
=
max
(
freq_adj
,
-
MAXFREQ_SCALED
);
time_freq
=
max
(
freq_adj
,
-
MAXFREQ_SCALED
);
time_offset
=
div_s64
(
(
s64
)
offset
<<
NTP_SCALE_SHIFT
,
NTP_INTERVAL_FREQ
);
time_offset
=
div_s64
(
offset64
<<
NTP_SCALE_SHIFT
,
NTP_INTERVAL_FREQ
);
}
}
/**
/**
...
@@ -140,8 +197,8 @@ static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer)
...
@@ -140,8 +197,8 @@ static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer)
xtime
.
tv_sec
--
;
xtime
.
tv_sec
--
;
wall_to_monotonic
.
tv_sec
++
;
wall_to_monotonic
.
tv_sec
++
;
time_state
=
TIME_OOP
;
time_state
=
TIME_OOP
;
printk
(
KERN_NOTICE
"Clock: "
printk
(
KERN_NOTICE
"
inserting leap second 23:59:60 UTC
\n
"
);
"Clock:
inserting leap second 23:59:60 UTC
\n
"
);
hrtimer_add_expires_ns
(
&
leap_timer
,
NSEC_PER_SEC
);
hrtimer_add_expires_ns
(
&
leap_timer
,
NSEC_PER_SEC
);
res
=
HRTIMER_RESTART
;
res
=
HRTIMER_RESTART
;
break
;
break
;
...
@@ -150,8 +207,8 @@ static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer)
...
@@ -150,8 +207,8 @@ static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer)
time_tai
--
;
time_tai
--
;
wall_to_monotonic
.
tv_sec
--
;
wall_to_monotonic
.
tv_sec
--
;
time_state
=
TIME_WAIT
;
time_state
=
TIME_WAIT
;
printk
(
KERN_NOTICE
"Clock: "
printk
(
KERN_NOTICE
"
deleting leap second 23:59:59 UTC
\n
"
);
"Clock:
deleting leap second 23:59:59 UTC
\n
"
);
break
;
break
;
case
TIME_OOP
:
case
TIME_OOP
:
time_tai
++
;
time_tai
++
;
...
@@ -179,7 +236,7 @@ static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer)
...
@@ -179,7 +236,7 @@ static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer)
*/
*/
void
second_overflow
(
void
)
void
second_overflow
(
void
)
{
{
s64
time_adj
;
s64
delta
;
/* Bump the maxerror field */
/* Bump the maxerror field */
time_maxerror
+=
MAXFREQ
/
NSEC_PER_USEC
;
time_maxerror
+=
MAXFREQ
/
NSEC_PER_USEC
;
...
@@ -193,23 +250,29 @@ void second_overflow(void)
...
@@ -193,23 +250,29 @@ void second_overflow(void)
* reduced by a fixed factor times the time constant.
* reduced by a fixed factor times the time constant.
*/
*/
tick_length
=
tick_length_base
;
tick_length
=
tick_length_base
;
time_adj
=
shift_right
(
time_offset
,
SHIFT_PLL
+
time_constant
);
time_offset
-=
time_adj
;
tick_length
+=
time_adj
;
if
(
unlikely
(
time_adjust
))
{
delta
=
shift_right
(
time_offset
,
SHIFT_PLL
+
time_constant
);
time_offset
-=
delta
;
tick_length
+=
delta
;
if
(
!
time_adjust
)
return
;
if
(
time_adjust
>
MAX_TICKADJ
)
{
if
(
time_adjust
>
MAX_TICKADJ
)
{
time_adjust
-=
MAX_TICKADJ
;
time_adjust
-=
MAX_TICKADJ
;
tick_length
+=
MAX_TICKADJ_SCALED
;
tick_length
+=
MAX_TICKADJ_SCALED
;
}
else
if
(
time_adjust
<
-
MAX_TICKADJ
)
{
return
;
}
if
(
time_adjust
<
-
MAX_TICKADJ
)
{
time_adjust
+=
MAX_TICKADJ
;
time_adjust
+=
MAX_TICKADJ
;
tick_length
-=
MAX_TICKADJ_SCALED
;
tick_length
-=
MAX_TICKADJ_SCALED
;
}
else
{
return
;
tick_length
+=
(
s64
)(
time_adjust
*
NSEC_PER_USEC
/
NTP_INTERVAL_FREQ
)
<<
NTP_SCALE_SHIFT
;
time_adjust
=
0
;
}
}
}
tick_length
+=
(
s64
)(
time_adjust
*
NSEC_PER_USEC
/
NTP_INTERVAL_FREQ
)
<<
NTP_SCALE_SHIFT
;
time_adjust
=
0
;
}
}
#ifdef CONFIG_GENERIC_CMOS_UPDATE
#ifdef CONFIG_GENERIC_CMOS_UPDATE
...
@@ -233,12 +296,13 @@ static void sync_cmos_clock(struct work_struct *work)
...
@@ -233,12 +296,13 @@ static void sync_cmos_clock(struct work_struct *work)
* This code is run on a timer. If the clock is set, that timer
* This code is run on a timer. If the clock is set, that timer
* may not expire at the correct time. Thus, we adjust...
* may not expire at the correct time. Thus, we adjust...
*/
*/
if
(
!
ntp_synced
())
if
(
!
ntp_synced
())
{
/*
/*
* Not synced, exit, do not restart a timer (if one is
* Not synced, exit, do not restart a timer (if one is
* running, let it run out).
* running, let it run out).
*/
*/
return
;
return
;
}
getnstimeofday
(
&
now
);
getnstimeofday
(
&
now
);
if
(
abs
(
now
.
tv_nsec
-
(
NSEC_PER_SEC
/
2
))
<=
tick_nsec
/
2
)
if
(
abs
(
now
.
tv_nsec
-
(
NSEC_PER_SEC
/
2
))
<=
tick_nsec
/
2
)
...
@@ -270,85 +334,57 @@ static void notify_cmos_timer(void)
...
@@ -270,85 +334,57 @@ static void notify_cmos_timer(void)
static
inline
void
notify_cmos_timer
(
void
)
{
}
static
inline
void
notify_cmos_timer
(
void
)
{
}
#endif
#endif
/*
adjtimex mainly allows reading (and writing, if superuser) of
/*
*
kernel time-keeping variables. used by xntpd.
*
Start the leap seconds timer:
*/
*/
int
do_adjtimex
(
struct
timex
*
txc
)
static
inline
void
ntp_start_leap_timer
(
struct
timespec
*
ts
)
{
{
struct
timespec
ts
;
long
now
=
ts
->
tv_sec
;
int
result
;
/* Validate the data before disabling interrupts */
if
(
time_status
&
STA_INS
)
{
if
(
txc
->
modes
&
ADJ_ADJTIME
)
{
time_state
=
TIME_INS
;
/* singleshot must not be used with any other mode bits */
now
+=
86400
-
now
%
86400
;
if
(
!
(
txc
->
modes
&
ADJ_OFFSET_SINGLESHOT
))
hrtimer_start
(
&
leap_timer
,
ktime_set
(
now
,
0
),
HRTIMER_MODE_ABS
);
return
-
EINVAL
;
if
(
!
(
txc
->
modes
&
ADJ_OFFSET_READONLY
)
&&
!
capable
(
CAP_SYS_TIME
))
return
-
EPERM
;
}
else
{
/* In order to modify anything, you gotta be super-user! */
if
(
txc
->
modes
&&
!
capable
(
CAP_SYS_TIME
))
return
-
EPERM
;
/* if the quartz is off by more than 10% something is VERY wrong! */
if
(
txc
->
modes
&
ADJ_TICK
&&
(
txc
->
tick
<
900000
/
USER_HZ
||
txc
->
tick
>
1100000
/
USER_HZ
))
return
-
EINVAL
;
if
(
txc
->
modes
&
ADJ_STATUS
&&
time_state
!=
TIME_OK
)
return
;
hrtimer_cancel
(
&
leap_timer
);
}
}
getnstimeofday
(
&
ts
);
if
(
time_status
&
STA_DEL
)
{
time_state
=
TIME_DEL
;
write_seqlock_irq
(
&
xtime_lock
);
now
+=
86400
-
(
now
+
1
)
%
86400
;
hrtimer_start
(
&
leap_timer
,
ktime_set
(
now
,
0
),
HRTIMER_MODE_ABS
);
/* If there are input parameters, then process them */
if
(
txc
->
modes
&
ADJ_ADJTIME
)
{
long
save_adjust
=
time_adjust
;
if
(
!
(
txc
->
modes
&
ADJ_OFFSET_READONLY
))
{
/* adjtime() is independent from ntp_adjtime() */
time_adjust
=
txc
->
offset
;
ntp_update_frequency
();
}
txc
->
offset
=
save_adjust
;
goto
adj_done
;
}
}
if
(
txc
->
modes
)
{
}
long
sec
;
if
(
txc
->
modes
&
ADJ_STATUS
)
{
/*
if
((
time_status
&
STA_PLL
)
&&
* Propagate a new txc->status value into the NTP state:
!
(
txc
->
status
&
STA_PLL
))
{
*/
static
inline
void
process_adj_status
(
struct
timex
*
txc
,
struct
timespec
*
ts
)
{
if
((
time_status
&
STA_PLL
)
&&
!
(
txc
->
status
&
STA_PLL
))
{
time_state
=
TIME_OK
;
time_state
=
TIME_OK
;
time_status
=
STA_UNSYNC
;
time_status
=
STA_UNSYNC
;
}
}
/*
* If we turn on PLL adjustments then reset the
* reference time to current time.
*/
if
(
!
(
time_status
&
STA_PLL
)
&&
(
txc
->
status
&
STA_PLL
))
time_reftime
=
xtime
.
tv_sec
;
/* only set allowed bits */
/* only set allowed bits */
time_status
&=
STA_RONLY
;
time_status
&=
STA_RONLY
;
time_status
|=
txc
->
status
&
~
STA_RONLY
;
time_status
|=
txc
->
status
&
~
STA_RONLY
;
switch
(
time_state
)
{
switch
(
time_state
)
{
case
TIME_OK
:
case
TIME_OK
:
start_timer:
ntp_start_leap_timer
(
ts
);
sec
=
ts
.
tv_sec
;
if
(
time_status
&
STA_INS
)
{
time_state
=
TIME_INS
;
sec
+=
86400
-
sec
%
86400
;
hrtimer_start
(
&
leap_timer
,
ktime_set
(
sec
,
0
),
HRTIMER_MODE_ABS
);
}
else
if
(
time_status
&
STA_DEL
)
{
time_state
=
TIME_DEL
;
sec
+=
86400
-
(
sec
+
1
)
%
86400
;
hrtimer_start
(
&
leap_timer
,
ktime_set
(
sec
,
0
),
HRTIMER_MODE_ABS
);
}
break
;
break
;
case
TIME_INS
:
case
TIME_INS
:
case
TIME_DEL
:
case
TIME_DEL
:
time_state
=
TIME_OK
;
time_state
=
TIME_OK
;
goto
start_timer
;
ntp_start_leap_timer
(
ts
);
break
;
case
TIME_WAIT
:
case
TIME_WAIT
:
if
(
!
(
time_status
&
(
STA_INS
|
STA_DEL
)))
if
(
!
(
time_status
&
(
STA_INS
|
STA_DEL
)))
time_state
=
TIME_OK
;
time_state
=
TIME_OK
;
...
@@ -357,21 +393,31 @@ int do_adjtimex(struct timex *txc)
...
@@ -357,21 +393,31 @@ int do_adjtimex(struct timex *txc)
hrtimer_restart
(
&
leap_timer
);
hrtimer_restart
(
&
leap_timer
);
break
;
break
;
}
}
}
}
/*
* Called with the xtime lock held, so we can access and modify
* all the global NTP state:
*/
static
inline
void
process_adjtimex_modes
(
struct
timex
*
txc
,
struct
timespec
*
ts
)
{
if
(
txc
->
modes
&
ADJ_STATUS
)
process_adj_status
(
txc
,
ts
);
if
(
txc
->
modes
&
ADJ_NANO
)
if
(
txc
->
modes
&
ADJ_NANO
)
time_status
|=
STA_NANO
;
time_status
|=
STA_NANO
;
if
(
txc
->
modes
&
ADJ_MICRO
)
if
(
txc
->
modes
&
ADJ_MICRO
)
time_status
&=
~
STA_NANO
;
time_status
&=
~
STA_NANO
;
if
(
txc
->
modes
&
ADJ_FREQUENCY
)
{
if
(
txc
->
modes
&
ADJ_FREQUENCY
)
{
time_freq
=
(
s64
)
txc
->
freq
*
PPM_SCALE
;
time_freq
=
txc
->
freq
*
PPM_SCALE
;
time_freq
=
min
(
time_freq
,
MAXFREQ_SCALED
);
time_freq
=
min
(
time_freq
,
MAXFREQ_SCALED
);
time_freq
=
max
(
time_freq
,
-
MAXFREQ_SCALED
);
time_freq
=
max
(
time_freq
,
-
MAXFREQ_SCALED
);
}
}
if
(
txc
->
modes
&
ADJ_MAXERROR
)
if
(
txc
->
modes
&
ADJ_MAXERROR
)
time_maxerror
=
txc
->
maxerror
;
time_maxerror
=
txc
->
maxerror
;
if
(
txc
->
modes
&
ADJ_ESTERROR
)
if
(
txc
->
modes
&
ADJ_ESTERROR
)
time_esterror
=
txc
->
esterror
;
time_esterror
=
txc
->
esterror
;
...
@@ -388,25 +434,80 @@ int do_adjtimex(struct timex *txc)
...
@@ -388,25 +434,80 @@ int do_adjtimex(struct timex *txc)
if
(
txc
->
modes
&
ADJ_OFFSET
)
if
(
txc
->
modes
&
ADJ_OFFSET
)
ntp_update_offset
(
txc
->
offset
);
ntp_update_offset
(
txc
->
offset
);
if
(
txc
->
modes
&
ADJ_TICK
)
if
(
txc
->
modes
&
ADJ_TICK
)
tick_usec
=
txc
->
tick
;
tick_usec
=
txc
->
tick
;
if
(
txc
->
modes
&
(
ADJ_TICK
|
ADJ_FREQUENCY
|
ADJ_OFFSET
))
if
(
txc
->
modes
&
(
ADJ_TICK
|
ADJ_FREQUENCY
|
ADJ_OFFSET
))
ntp_update_frequency
();
ntp_update_frequency
();
}
/*
* adjtimex mainly allows reading (and writing, if superuser) of
* kernel time-keeping variables. used by xntpd.
*/
int
do_adjtimex
(
struct
timex
*
txc
)
{
struct
timespec
ts
;
int
result
;
/* Validate the data before disabling interrupts */
if
(
txc
->
modes
&
ADJ_ADJTIME
)
{
/* singleshot must not be used with any other mode bits */
if
(
!
(
txc
->
modes
&
ADJ_OFFSET_SINGLESHOT
))
return
-
EINVAL
;
if
(
!
(
txc
->
modes
&
ADJ_OFFSET_READONLY
)
&&
!
capable
(
CAP_SYS_TIME
))
return
-
EPERM
;
}
else
{
/* In order to modify anything, you gotta be super-user! */
if
(
txc
->
modes
&&
!
capable
(
CAP_SYS_TIME
))
return
-
EPERM
;
/*
* if the quartz is off by more than 10% then
* something is VERY wrong!
*/
if
(
txc
->
modes
&
ADJ_TICK
&&
(
txc
->
tick
<
900000
/
USER_HZ
||
txc
->
tick
>
1100000
/
USER_HZ
))
return
-
EINVAL
;
if
(
txc
->
modes
&
ADJ_STATUS
&&
time_state
!=
TIME_OK
)
hrtimer_cancel
(
&
leap_timer
);
}
}
getnstimeofday
(
&
ts
);
write_seqlock_irq
(
&
xtime_lock
);
if
(
txc
->
modes
&
ADJ_ADJTIME
)
{
long
save_adjust
=
time_adjust
;
if
(
!
(
txc
->
modes
&
ADJ_OFFSET_READONLY
))
{
/* adjtime() is independent from ntp_adjtime() */
time_adjust
=
txc
->
offset
;
ntp_update_frequency
();
}
txc
->
offset
=
save_adjust
;
}
else
{
/* If there are input parameters, then process them: */
if
(
txc
->
modes
)
process_adjtimex_modes
(
txc
,
&
ts
);
txc
->
offset
=
shift_right
(
time_offset
*
NTP_INTERVAL_FREQ
,
txc
->
offset
=
shift_right
(
time_offset
*
NTP_INTERVAL_FREQ
,
NTP_SCALE_SHIFT
);
NTP_SCALE_SHIFT
);
if
(
!
(
time_status
&
STA_NANO
))
if
(
!
(
time_status
&
STA_NANO
))
txc
->
offset
/=
NSEC_PER_USEC
;
txc
->
offset
/=
NSEC_PER_USEC
;
}
adj_done:
result
=
time_state
;
/* mostly `TIME_OK' */
result
=
time_state
;
/* mostly `TIME_OK' */
if
(
time_status
&
(
STA_UNSYNC
|
STA_CLOCKERR
))
if
(
time_status
&
(
STA_UNSYNC
|
STA_CLOCKERR
))
result
=
TIME_ERROR
;
result
=
TIME_ERROR
;
txc
->
freq
=
shift_right
((
time_freq
>>
PPM_SCALE_INV_SHIFT
)
*
txc
->
freq
=
shift_right
((
time_freq
>>
PPM_SCALE_INV_SHIFT
)
*
(
s64
)
PPM_SCALE_INV
,
NTP_SCALE_SHIFT
);
PPM_SCALE_INV
,
NTP_SCALE_SHIFT
);
txc
->
maxerror
=
time_maxerror
;
txc
->
maxerror
=
time_maxerror
;
txc
->
esterror
=
time_esterror
;
txc
->
esterror
=
time_esterror
;
txc
->
status
=
time_status
;
txc
->
status
=
time_status
;
...
@@ -425,6 +526,7 @@ int do_adjtimex(struct timex *txc)
...
@@ -425,6 +526,7 @@ int do_adjtimex(struct timex *txc)
txc
->
calcnt
=
0
;
txc
->
calcnt
=
0
;
txc
->
errcnt
=
0
;
txc
->
errcnt
=
0
;
txc
->
stbcnt
=
0
;
txc
->
stbcnt
=
0
;
write_sequnlock_irq
(
&
xtime_lock
);
write_sequnlock_irq
(
&
xtime_lock
);
txc
->
time
.
tv_sec
=
ts
.
tv_sec
;
txc
->
time
.
tv_sec
=
ts
.
tv_sec
;
...
@@ -440,6 +542,8 @@ int do_adjtimex(struct timex *txc)
...
@@ -440,6 +542,8 @@ int do_adjtimex(struct timex *txc)
static
int
__init
ntp_tick_adj_setup
(
char
*
str
)
static
int
__init
ntp_tick_adj_setup
(
char
*
str
)
{
{
ntp_tick_adj
=
simple_strtol
(
str
,
NULL
,
0
);
ntp_tick_adj
=
simple_strtol
(
str
,
NULL
,
0
);
ntp_tick_adj
<<=
NTP_SCALE_SHIFT
;
return
1
;
return
1
;
}
}
...
...
kernel/timer.c
View file @
7c526e1f
...
@@ -589,11 +589,14 @@ static struct tvec_base *lock_timer_base(struct timer_list *timer,
...
@@ -589,11 +589,14 @@ static struct tvec_base *lock_timer_base(struct timer_list *timer,
}
}
}
}
int
__mod_timer
(
struct
timer_list
*
timer
,
unsigned
long
expires
)
static
inline
int
__mod_timer
(
struct
timer_list
*
timer
,
unsigned
long
expires
,
bool
pending_only
)
{
{
struct
tvec_base
*
base
,
*
new_base
;
struct
tvec_base
*
base
,
*
new_base
;
unsigned
long
flags
;
unsigned
long
flags
;
int
ret
=
0
;
int
ret
;
ret
=
0
;
timer_stats_timer_set_start_info
(
timer
);
timer_stats_timer_set_start_info
(
timer
);
BUG_ON
(
!
timer
->
function
);
BUG_ON
(
!
timer
->
function
);
...
@@ -603,6 +606,9 @@ int __mod_timer(struct timer_list *timer, unsigned long expires)
...
@@ -603,6 +606,9 @@ int __mod_timer(struct timer_list *timer, unsigned long expires)
if
(
timer_pending
(
timer
))
{
if
(
timer_pending
(
timer
))
{
detach_timer
(
timer
,
0
);
detach_timer
(
timer
,
0
);
ret
=
1
;
ret
=
1
;
}
else
{
if
(
pending_only
)
goto
out_unlock
;
}
}
debug_timer_activate
(
timer
);
debug_timer_activate
(
timer
);
...
@@ -629,42 +635,28 @@ int __mod_timer(struct timer_list *timer, unsigned long expires)
...
@@ -629,42 +635,28 @@ int __mod_timer(struct timer_list *timer, unsigned long expires)
timer
->
expires
=
expires
;
timer
->
expires
=
expires
;
internal_add_timer
(
base
,
timer
);
internal_add_timer
(
base
,
timer
);
out_unlock:
spin_unlock_irqrestore
(
&
base
->
lock
,
flags
);
spin_unlock_irqrestore
(
&
base
->
lock
,
flags
);
return
ret
;
return
ret
;
}
}
EXPORT_SYMBOL
(
__mod_timer
);
/**
/**
*
add_timer_on - start a timer on a particular CPU
*
mod_timer_pending - modify a pending timer's timeout
* @timer: the
timer to be add
ed
* @timer: the
pending timer to be modifi
ed
* @
cpu: the CPU to start it on
* @
expires: new timeout in jiffies
*
*
* This is not very scalable on SMP. Double adds are not possible.
* mod_timer_pending() is the same for pending timers as mod_timer(),
* but will not re-activate and modify already deleted timers.
*
* It is useful for unserialized use of timers.
*/
*/
void
add_timer_on
(
struct
timer_list
*
timer
,
int
cpu
)
int
mod_timer_pending
(
struct
timer_list
*
timer
,
unsigned
long
expires
)
{
{
struct
tvec_base
*
base
=
per_cpu
(
tvec_bases
,
cpu
);
return
__mod_timer
(
timer
,
expires
,
true
);
unsigned
long
flags
;
timer_stats_timer_set_start_info
(
timer
);
BUG_ON
(
timer_pending
(
timer
)
||
!
timer
->
function
);
spin_lock_irqsave
(
&
base
->
lock
,
flags
);
timer_set_base
(
timer
,
base
);
debug_timer_activate
(
timer
);
internal_add_timer
(
base
,
timer
);
/*
* Check whether the other CPU is idle and needs to be
* triggered to reevaluate the timer wheel when nohz is
* active. We are protected against the other CPU fiddling
* with the timer by holding the timer base lock. This also
* makes sure that a CPU on the way to idle can not evaluate
* the timer wheel.
*/
wake_up_idle_cpu
(
cpu
);
spin_unlock_irqrestore
(
&
base
->
lock
,
flags
);
}
}
EXPORT_SYMBOL
(
mod_timer_pending
);
/**
/**
* mod_timer - modify a timer's timeout
* mod_timer - modify a timer's timeout
...
@@ -688,9 +680,6 @@ void add_timer_on(struct timer_list *timer, int cpu)
...
@@ -688,9 +680,6 @@ void add_timer_on(struct timer_list *timer, int cpu)
*/
*/
int
mod_timer
(
struct
timer_list
*
timer
,
unsigned
long
expires
)
int
mod_timer
(
struct
timer_list
*
timer
,
unsigned
long
expires
)
{
{
BUG_ON
(
!
timer
->
function
);
timer_stats_timer_set_start_info
(
timer
);
/*
/*
* This is a common optimization triggered by the
* This is a common optimization triggered by the
* networking code - if the timer is re-modified
* networking code - if the timer is re-modified
...
@@ -699,11 +688,61 @@ int mod_timer(struct timer_list *timer, unsigned long expires)
...
@@ -699,11 +688,61 @@ int mod_timer(struct timer_list *timer, unsigned long expires)
if
(
timer
->
expires
==
expires
&&
timer_pending
(
timer
))
if
(
timer
->
expires
==
expires
&&
timer_pending
(
timer
))
return
1
;
return
1
;
return
__mod_timer
(
timer
,
expires
);
return
__mod_timer
(
timer
,
expires
,
false
);
}
}
EXPORT_SYMBOL
(
mod_timer
);
EXPORT_SYMBOL
(
mod_timer
);
/**
* add_timer - start a timer
* @timer: the timer to be added
*
* The kernel will do a ->function(->data) callback from the
* timer interrupt at the ->expires point in the future. The
* current time is 'jiffies'.
*
* The timer's ->expires, ->function (and if the handler uses it, ->data)
* fields must be set prior calling this function.
*
* Timers with an ->expires field in the past will be executed in the next
* timer tick.
*/
void
add_timer
(
struct
timer_list
*
timer
)
{
BUG_ON
(
timer_pending
(
timer
));
mod_timer
(
timer
,
timer
->
expires
);
}
EXPORT_SYMBOL
(
add_timer
);
/**
* add_timer_on - start a timer on a particular CPU
* @timer: the timer to be added
* @cpu: the CPU to start it on
*
* This is not very scalable on SMP. Double adds are not possible.
*/
void
add_timer_on
(
struct
timer_list
*
timer
,
int
cpu
)
{
struct
tvec_base
*
base
=
per_cpu
(
tvec_bases
,
cpu
);
unsigned
long
flags
;
timer_stats_timer_set_start_info
(
timer
);
BUG_ON
(
timer_pending
(
timer
)
||
!
timer
->
function
);
spin_lock_irqsave
(
&
base
->
lock
,
flags
);
timer_set_base
(
timer
,
base
);
debug_timer_activate
(
timer
);
internal_add_timer
(
base
,
timer
);
/*
* Check whether the other CPU is idle and needs to be
* triggered to reevaluate the timer wheel when nohz is
* active. We are protected against the other CPU fiddling
* with the timer by holding the timer base lock. This also
* makes sure that a CPU on the way to idle can not evaluate
* the timer wheel.
*/
wake_up_idle_cpu
(
cpu
);
spin_unlock_irqrestore
(
&
base
->
lock
,
flags
);
}
/**
/**
* del_timer - deactive a timer.
* del_timer - deactive a timer.
* @timer: the timer to be deactivated
* @timer: the timer to be deactivated
...
@@ -733,7 +772,6 @@ int del_timer(struct timer_list *timer)
...
@@ -733,7 +772,6 @@ int del_timer(struct timer_list *timer)
return
ret
;
return
ret
;
}
}
EXPORT_SYMBOL
(
del_timer
);
EXPORT_SYMBOL
(
del_timer
);
#ifdef CONFIG_SMP
#ifdef CONFIG_SMP
...
@@ -767,7 +805,6 @@ int try_to_del_timer_sync(struct timer_list *timer)
...
@@ -767,7 +805,6 @@ int try_to_del_timer_sync(struct timer_list *timer)
return
ret
;
return
ret
;
}
}
EXPORT_SYMBOL
(
try_to_del_timer_sync
);
EXPORT_SYMBOL
(
try_to_del_timer_sync
);
/**
/**
...
@@ -796,7 +833,6 @@ int del_timer_sync(struct timer_list *timer)
...
@@ -796,7 +833,6 @@ int del_timer_sync(struct timer_list *timer)
cpu_relax
();
cpu_relax
();
}
}
}
}
EXPORT_SYMBOL
(
del_timer_sync
);
EXPORT_SYMBOL
(
del_timer_sync
);
#endif
#endif
...
@@ -1268,7 +1304,7 @@ signed long __sched schedule_timeout(signed long timeout)
...
@@ -1268,7 +1304,7 @@ signed long __sched schedule_timeout(signed long timeout)
expire
=
timeout
+
jiffies
;
expire
=
timeout
+
jiffies
;
setup_timer_on_stack
(
&
timer
,
process_timeout
,
(
unsigned
long
)
current
);
setup_timer_on_stack
(
&
timer
,
process_timeout
,
(
unsigned
long
)
current
);
__mod_timer
(
&
timer
,
expire
);
__mod_timer
(
&
timer
,
expire
,
false
);
schedule
();
schedule
();
del_singleshot_timer_sync
(
&
timer
);
del_singleshot_timer_sync
(
&
timer
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment