Commit bb758e96 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'timers-core-for-linus' of...

Merge branch 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  hrtimers: fix warning in kernel/hrtimer.c
  x86: make sure we really have an hpet mapping before using it
  x86: enable HPET on Fujitsu u9200
  linux/timex.h: cleanup for userspace
  posix-timers: simplify de_thread()->exit_itimers() path
  posix-timers: check ->it_signal instead of ->it_pid to validate the timer
  posix-timers: use "struct pid*" instead of "struct task_struct*"
  nohz: suppress needless timer reprogramming
  clocksource, acpi_pm.c: put acpi_pm_read_slow() under CONFIG_PCI
  nohz: no softirq pending warnings for offline cpus
  hrtimer: removing all ur callback modes, fix
  hrtimer: removing all ur callback modes, fix hotplug
  hrtimer: removing all ur callback modes
  x86: correct link to HPET timer specification
  rtc-cmos: export second NVRAM bank

Fixed up conflicts in sound/drivers/pcsp/pcsp.c and sound/core/hrtimer.c
manually.
parents 5f34fe1c 32e8d186
...@@ -479,7 +479,7 @@ config HPET_TIMER ...@@ -479,7 +479,7 @@ config HPET_TIMER
The HPET provides a stable time base on SMP The HPET provides a stable time base on SMP
systems, unlike the TSC, but it is more expensive to access, systems, unlike the TSC, but it is more expensive to access,
as it is off-chip. You can find the HPET spec at as it is off-chip. You can find the HPET spec at
<http://www.intel.com/hardwaredesign/hpetspec.htm>. <http://www.intel.com/hardwaredesign/hpetspec_1.pdf>.
You can safely choose Y here. However, HPET will only be You can safely choose Y here. However, HPET will only be
activated if the platform and the BIOS support this feature. activated if the platform and the BIOS support this feature.
......
...@@ -813,7 +813,7 @@ int __init hpet_enable(void) ...@@ -813,7 +813,7 @@ int __init hpet_enable(void)
out_nohpet: out_nohpet:
hpet_clear_mapping(); hpet_clear_mapping();
boot_hpet_disable = 1; hpet_address = 0;
return 0; return 0;
} }
...@@ -836,9 +836,10 @@ static __init int hpet_late_init(void) ...@@ -836,9 +836,10 @@ static __init int hpet_late_init(void)
hpet_address = force_hpet_address; hpet_address = force_hpet_address;
hpet_enable(); hpet_enable();
}
if (!hpet_virt_address) if (!hpet_virt_address)
return -ENODEV; return -ENODEV;
}
hpet_reserve_platform_timers(hpet_readl(HPET_ID)); hpet_reserve_platform_timers(hpet_readl(HPET_ID));
......
...@@ -168,6 +168,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_31, ...@@ -168,6 +168,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_31,
ich_force_enable_hpet); ich_force_enable_hpet);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_1, DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_1,
ich_force_enable_hpet); ich_force_enable_hpet);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_4,
ich_force_enable_hpet);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_7, DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_7,
ich_force_enable_hpet); ich_force_enable_hpet);
......
...@@ -46,7 +46,7 @@ ...@@ -46,7 +46,7 @@
/* /*
* The High Precision Event Timer driver. * The High Precision Event Timer driver.
* This driver is closely modelled after the rtc.c driver. * This driver is closely modelled after the rtc.c driver.
* http://www.intel.com/hardwaredesign/hpetspec.htm * http://www.intel.com/hardwaredesign/hpetspec_1.pdf
*/ */
#define HPET_USER_FREQ (64) #define HPET_USER_FREQ (64)
#define HPET_DRIFT (500) #define HPET_DRIFT (500)
......
...@@ -57,11 +57,6 @@ u32 acpi_pm_read_verified(void) ...@@ -57,11 +57,6 @@ u32 acpi_pm_read_verified(void)
return v2; return v2;
} }
static cycle_t acpi_pm_read_slow(void)
{
return (cycle_t)acpi_pm_read_verified();
}
static cycle_t acpi_pm_read(void) static cycle_t acpi_pm_read(void)
{ {
return (cycle_t)read_pmtmr(); return (cycle_t)read_pmtmr();
...@@ -88,6 +83,11 @@ static int __init acpi_pm_good_setup(char *__str) ...@@ -88,6 +83,11 @@ static int __init acpi_pm_good_setup(char *__str)
} }
__setup("acpi_pm_good", acpi_pm_good_setup); __setup("acpi_pm_good", acpi_pm_good_setup);
static cycle_t acpi_pm_read_slow(void)
{
return (cycle_t)acpi_pm_read_verified();
}
static inline void acpi_pm_need_workaround(void) static inline void acpi_pm_need_workaround(void)
{ {
clocksource_acpi_pm.read = acpi_pm_read_slow; clocksource_acpi_pm.read = acpi_pm_read_slow;
......
...@@ -697,7 +697,7 @@ static enum hrtimer_restart ads7846_timer(struct hrtimer *handle) ...@@ -697,7 +697,7 @@ static enum hrtimer_restart ads7846_timer(struct hrtimer *handle)
struct ads7846 *ts = container_of(handle, struct ads7846, timer); struct ads7846 *ts = container_of(handle, struct ads7846, timer);
int status = 0; int status = 0;
spin_lock_irq(&ts->lock); spin_lock(&ts->lock);
if (unlikely(!get_pendown_state(ts) || if (unlikely(!get_pendown_state(ts) ||
device_suspended(&ts->spi->dev))) { device_suspended(&ts->spi->dev))) {
...@@ -728,7 +728,7 @@ static enum hrtimer_restart ads7846_timer(struct hrtimer *handle) ...@@ -728,7 +728,7 @@ static enum hrtimer_restart ads7846_timer(struct hrtimer *handle)
dev_err(&ts->spi->dev, "spi_async --> %d\n", status); dev_err(&ts->spi->dev, "spi_async --> %d\n", status);
} }
spin_unlock_irq(&ts->lock); spin_unlock(&ts->lock);
return HRTIMER_NORESTART; return HRTIMER_NORESTART;
} }
......
...@@ -773,7 +773,6 @@ static int de_thread(struct task_struct *tsk) ...@@ -773,7 +773,6 @@ static int de_thread(struct task_struct *tsk)
struct signal_struct *sig = tsk->signal; struct signal_struct *sig = tsk->signal;
struct sighand_struct *oldsighand = tsk->sighand; struct sighand_struct *oldsighand = tsk->sighand;
spinlock_t *lock = &oldsighand->siglock; spinlock_t *lock = &oldsighand->siglock;
struct task_struct *leader = NULL;
int count; int count;
if (thread_group_empty(tsk)) if (thread_group_empty(tsk))
...@@ -811,7 +810,7 @@ static int de_thread(struct task_struct *tsk) ...@@ -811,7 +810,7 @@ static int de_thread(struct task_struct *tsk)
* and to assume its PID: * and to assume its PID:
*/ */
if (!thread_group_leader(tsk)) { if (!thread_group_leader(tsk)) {
leader = tsk->group_leader; struct task_struct *leader = tsk->group_leader;
sig->notify_count = -1; /* for exit_notify() */ sig->notify_count = -1; /* for exit_notify() */
for (;;) { for (;;) {
...@@ -863,8 +862,9 @@ static int de_thread(struct task_struct *tsk) ...@@ -863,8 +862,9 @@ static int de_thread(struct task_struct *tsk)
BUG_ON(leader->exit_state != EXIT_ZOMBIE); BUG_ON(leader->exit_state != EXIT_ZOMBIE);
leader->exit_state = EXIT_DEAD; leader->exit_state = EXIT_DEAD;
write_unlock_irq(&tasklist_lock); write_unlock_irq(&tasklist_lock);
release_task(leader);
} }
sig->group_exit_task = NULL; sig->group_exit_task = NULL;
...@@ -873,8 +873,6 @@ static int de_thread(struct task_struct *tsk) ...@@ -873,8 +873,6 @@ static int de_thread(struct task_struct *tsk)
no_thread_group: no_thread_group:
exit_itimers(sig); exit_itimers(sig);
flush_itimer_signals(); flush_itimer_signals();
if (leader)
release_task(leader);
if (atomic_read(&oldsighand->count) != 1) { if (atomic_read(&oldsighand->count) != 1) {
struct sighand_struct *newsighand; struct sighand_struct *newsighand;
......
...@@ -42,26 +42,6 @@ enum hrtimer_restart { ...@@ -42,26 +42,6 @@ enum hrtimer_restart {
HRTIMER_RESTART, /* Timer must be restarted */ HRTIMER_RESTART, /* Timer must be restarted */
}; };
/*
* hrtimer callback modes:
*
* HRTIMER_CB_SOFTIRQ: Callback must run in softirq context
* HRTIMER_CB_IRQSAFE_PERCPU: Callback must run in hardirq context
* Special mode for tick emulation and
* scheduler timer. Such timers are per
* cpu and not allowed to be migrated on
* cpu unplug.
* HRTIMER_CB_IRQSAFE_UNLOCKED: Callback should run in hardirq context
* with timer->base lock unlocked
* used for timers which call wakeup to
* avoid lock order problems with rq->lock
*/
enum hrtimer_cb_mode {
HRTIMER_CB_SOFTIRQ,
HRTIMER_CB_IRQSAFE_PERCPU,
HRTIMER_CB_IRQSAFE_UNLOCKED,
};
/* /*
* Values to track state of the timer * Values to track state of the timer
* *
...@@ -70,7 +50,6 @@ enum hrtimer_cb_mode { ...@@ -70,7 +50,6 @@ enum hrtimer_cb_mode {
* 0x00 inactive * 0x00 inactive
* 0x01 enqueued into rbtree * 0x01 enqueued into rbtree
* 0x02 callback function running * 0x02 callback function running
* 0x04 callback pending (high resolution mode)
* *
* Special cases: * Special cases:
* 0x03 callback function running and enqueued * 0x03 callback function running and enqueued
...@@ -92,8 +71,7 @@ enum hrtimer_cb_mode { ...@@ -92,8 +71,7 @@ enum hrtimer_cb_mode {
#define HRTIMER_STATE_INACTIVE 0x00 #define HRTIMER_STATE_INACTIVE 0x00
#define HRTIMER_STATE_ENQUEUED 0x01 #define HRTIMER_STATE_ENQUEUED 0x01
#define HRTIMER_STATE_CALLBACK 0x02 #define HRTIMER_STATE_CALLBACK 0x02
#define HRTIMER_STATE_PENDING 0x04 #define HRTIMER_STATE_MIGRATE 0x04
#define HRTIMER_STATE_MIGRATE 0x08
/** /**
* struct hrtimer - the basic hrtimer structure * struct hrtimer - the basic hrtimer structure
...@@ -109,8 +87,6 @@ enum hrtimer_cb_mode { ...@@ -109,8 +87,6 @@ enum hrtimer_cb_mode {
* @function: timer expiry callback function * @function: timer expiry callback function
* @base: pointer to the timer base (per cpu and per clock) * @base: pointer to the timer base (per cpu and per clock)
* @state: state information (See bit values above) * @state: state information (See bit values above)
* @cb_mode: high resolution timer feature to select the callback execution
* mode
* @cb_entry: list head to enqueue an expired timer into the callback list * @cb_entry: list head to enqueue an expired timer into the callback list
* @start_site: timer statistics field to store the site where the timer * @start_site: timer statistics field to store the site where the timer
* was started * was started
...@@ -129,7 +105,6 @@ struct hrtimer { ...@@ -129,7 +105,6 @@ struct hrtimer {
struct hrtimer_clock_base *base; struct hrtimer_clock_base *base;
unsigned long state; unsigned long state;
struct list_head cb_entry; struct list_head cb_entry;
enum hrtimer_cb_mode cb_mode;
#ifdef CONFIG_TIMER_STATS #ifdef CONFIG_TIMER_STATS
int start_pid; int start_pid;
void *start_site; void *start_site;
...@@ -188,15 +163,11 @@ struct hrtimer_clock_base { ...@@ -188,15 +163,11 @@ struct hrtimer_clock_base {
* @check_clocks: Indictator, when set evaluate time source and clock * @check_clocks: Indictator, when set evaluate time source and clock
* event devices whether high resolution mode can be * event devices whether high resolution mode can be
* activated. * activated.
* @cb_pending: Expired timers are moved from the rbtree to this
* list in the timer interrupt. The list is processed
* in the softirq.
* @nr_events: Total number of timer interrupt events * @nr_events: Total number of timer interrupt events
*/ */
struct hrtimer_cpu_base { struct hrtimer_cpu_base {
spinlock_t lock; spinlock_t lock;
struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES]; struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
struct list_head cb_pending;
#ifdef CONFIG_HIGH_RES_TIMERS #ifdef CONFIG_HIGH_RES_TIMERS
ktime_t expires_next; ktime_t expires_next;
int hres_active; int hres_active;
...@@ -404,8 +375,7 @@ static inline int hrtimer_active(const struct hrtimer *timer) ...@@ -404,8 +375,7 @@ static inline int hrtimer_active(const struct hrtimer *timer)
*/ */
static inline int hrtimer_is_queued(struct hrtimer *timer) static inline int hrtimer_is_queued(struct hrtimer *timer)
{ {
return timer->state & return timer->state & HRTIMER_STATE_ENQUEUED;
(HRTIMER_STATE_ENQUEUED | HRTIMER_STATE_PENDING);
} }
/* /*
......
...@@ -251,9 +251,6 @@ enum ...@@ -251,9 +251,6 @@ enum
BLOCK_SOFTIRQ, BLOCK_SOFTIRQ,
TASKLET_SOFTIRQ, TASKLET_SOFTIRQ,
SCHED_SOFTIRQ, SCHED_SOFTIRQ,
#ifdef CONFIG_HIGH_RES_TIMERS
HRTIMER_SOFTIRQ,
#endif
RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */ RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */
NR_SOFTIRQS NR_SOFTIRQS
......
...@@ -45,7 +45,11 @@ struct k_itimer { ...@@ -45,7 +45,11 @@ struct k_itimer {
int it_requeue_pending; /* waiting to requeue this timer */ int it_requeue_pending; /* waiting to requeue this timer */
#define REQUEUE_PENDING 1 #define REQUEUE_PENDING 1
int it_sigev_notify; /* notify word of sigevent struct */ int it_sigev_notify; /* notify word of sigevent struct */
struct task_struct *it_process; /* process to send signal to */ struct signal_struct *it_signal;
union {
struct pid *it_pid; /* pid of process to send signal to */
struct task_struct *it_process; /* for clock_nanosleep */
};
struct sigqueue *sigq; /* signal queue entry. */ struct sigqueue *sigq; /* signal queue entry. */
union { union {
struct { struct {
......
...@@ -53,46 +53,10 @@ ...@@ -53,46 +53,10 @@
#ifndef _LINUX_TIMEX_H #ifndef _LINUX_TIMEX_H
#define _LINUX_TIMEX_H #define _LINUX_TIMEX_H
#include <linux/compiler.h>
#include <linux/time.h> #include <linux/time.h>
#include <asm/param.h>
#define NTP_API 4 /* NTP API version */ #define NTP_API 4 /* NTP API version */
/*
* SHIFT_KG and SHIFT_KF establish the damping of the PLL and are chosen
* for a slightly underdamped convergence characteristic. SHIFT_KH
* establishes the damping of the FLL and is chosen by wisdom and black
* art.
*
* MAXTC establishes the maximum time constant of the PLL. With the
* SHIFT_KG and SHIFT_KF values given and a time constant range from
* zero to MAXTC, the PLL will converge in 15 minutes to 16 hours,
* respectively.
*/
#define SHIFT_PLL 4 /* PLL frequency factor (shift) */
#define SHIFT_FLL 2 /* FLL frequency factor (shift) */
#define MAXTC 10 /* maximum time constant (shift) */
/*
* SHIFT_USEC defines the scaling (shift) of the time_freq and
* time_tolerance variables, which represent the current frequency
* offset and maximum frequency tolerance.
*/
#define SHIFT_USEC 16 /* frequency offset scale (shift) */
#define PPM_SCALE (NSEC_PER_USEC << (NTP_SCALE_SHIFT - SHIFT_USEC))
#define PPM_SCALE_INV_SHIFT 19
#define PPM_SCALE_INV ((1ll << (PPM_SCALE_INV_SHIFT + NTP_SCALE_SHIFT)) / \
PPM_SCALE + 1)
#define MAXPHASE 500000000l /* max phase error (ns) */
#define MAXFREQ 500000 /* max frequency error (ns/s) */
#define MAXFREQ_SCALED ((s64)MAXFREQ << NTP_SCALE_SHIFT)
#define MINSEC 256 /* min interval between updates (s) */
#define MAXSEC 2048 /* max interval between updates (s) */
#define NTP_PHASE_LIMIT ((MAXPHASE / NSEC_PER_USEC) << 5) /* beyond max. dispersion */
/* /*
* syscall interface - used (mainly by NTP daemon) * syscall interface - used (mainly by NTP daemon)
* to discipline kernel clock oscillator * to discipline kernel clock oscillator
...@@ -199,8 +163,45 @@ struct timex { ...@@ -199,8 +163,45 @@ struct timex {
#define TIME_BAD TIME_ERROR /* bw compat */ #define TIME_BAD TIME_ERROR /* bw compat */
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <linux/compiler.h>
#include <linux/types.h>
#include <linux/param.h>
#include <asm/timex.h> #include <asm/timex.h>
/*
* SHIFT_KG and SHIFT_KF establish the damping of the PLL and are chosen
* for a slightly underdamped convergence characteristic. SHIFT_KH
* establishes the damping of the FLL and is chosen by wisdom and black
* art.
*
* MAXTC establishes the maximum time constant of the PLL. With the
* SHIFT_KG and SHIFT_KF values given and a time constant range from
* zero to MAXTC, the PLL will converge in 15 minutes to 16 hours,
* respectively.
*/
#define SHIFT_PLL 4 /* PLL frequency factor (shift) */
#define SHIFT_FLL 2 /* FLL frequency factor (shift) */
#define MAXTC 10 /* maximum time constant (shift) */
/*
* SHIFT_USEC defines the scaling (shift) of the time_freq and
* time_tolerance variables, which represent the current frequency
* offset and maximum frequency tolerance.
*/
#define SHIFT_USEC 16 /* frequency offset scale (shift) */
#define PPM_SCALE (NSEC_PER_USEC << (NTP_SCALE_SHIFT - SHIFT_USEC))
#define PPM_SCALE_INV_SHIFT 19
#define PPM_SCALE_INV ((1ll << (PPM_SCALE_INV_SHIFT + NTP_SCALE_SHIFT)) / \
PPM_SCALE + 1)
#define MAXPHASE 500000000l /* max phase error (ns) */
#define MAXFREQ 500000 /* max frequency error (ns/s) */
#define MAXFREQ_SCALED ((s64)MAXFREQ << NTP_SCALE_SHIFT)
#define MINSEC 256 /* min interval between updates (s) */
#define MAXSEC 2048 /* max interval between updates (s) */
#define NTP_PHASE_LIMIT ((MAXPHASE / NSEC_PER_USEC) << 5) /* beyond max. dispersion */
/* /*
* kernel variables * kernel variables
* Note: maximum error = NTP synch distance = dispersion + delay / 2; * Note: maximum error = NTP synch distance = dispersion + delay / 2;
......
...@@ -442,22 +442,6 @@ static inline void debug_hrtimer_activate(struct hrtimer *timer) { } ...@@ -442,22 +442,6 @@ static inline void debug_hrtimer_activate(struct hrtimer *timer) { }
static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { } static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { }
#endif #endif
/*
* Check, whether the timer is on the callback pending list
*/
static inline int hrtimer_cb_pending(const struct hrtimer *timer)
{
return timer->state & HRTIMER_STATE_PENDING;
}
/*
* Remove a timer from the callback pending list
*/
static inline void hrtimer_remove_cb_pending(struct hrtimer *timer)
{
list_del_init(&timer->cb_entry);
}
/* High resolution timer related functions */ /* High resolution timer related functions */
#ifdef CONFIG_HIGH_RES_TIMERS #ifdef CONFIG_HIGH_RES_TIMERS
...@@ -651,6 +635,8 @@ static inline void hrtimer_init_timer_hres(struct hrtimer *timer) ...@@ -651,6 +635,8 @@ static inline void hrtimer_init_timer_hres(struct hrtimer *timer)
{ {
} }
static void __run_hrtimer(struct hrtimer *timer);
/* /*
* When High resolution timers are active, try to reprogram. Note, that in case * When High resolution timers are active, try to reprogram. Note, that in case
* the state has HRTIMER_STATE_CALLBACK set, no reprogramming and no expiry * the state has HRTIMER_STATE_CALLBACK set, no reprogramming and no expiry
...@@ -661,31 +647,14 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, ...@@ -661,31 +647,14 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
struct hrtimer_clock_base *base) struct hrtimer_clock_base *base)
{ {
if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) { if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) {
/* Timer is expired, act upon the callback mode */
switch(timer->cb_mode) {
case HRTIMER_CB_IRQSAFE_PERCPU:
case HRTIMER_CB_IRQSAFE_UNLOCKED:
/* /*
* This is solely for the sched tick emulation with * XXX: recursion check?
* dynamic tick support to ensure that we do not * hrtimer_forward() should round up with timer granularity
* restart the tick right on the edge and end up with * so that we never get into inf recursion here,
* the tick timer in the softirq ! The calling site * it doesn't do that though
* takes care of this. Also used for hrtimer sleeper !
*/ */
debug_hrtimer_deactivate(timer); __run_hrtimer(timer);
return 1;
case HRTIMER_CB_SOFTIRQ:
/*
* Move everything else into the softirq pending list !
*/
list_add_tail(&timer->cb_entry,
&base->cpu_base->cb_pending);
timer->state = HRTIMER_STATE_PENDING;
return 1; return 1;
default:
BUG();
}
} }
return 0; return 0;
} }
...@@ -724,11 +693,6 @@ static int hrtimer_switch_to_hres(void) ...@@ -724,11 +693,6 @@ static int hrtimer_switch_to_hres(void)
return 1; return 1;
} }
static inline void hrtimer_raise_softirq(void)
{
raise_softirq(HRTIMER_SOFTIRQ);
}
#else #else
static inline int hrtimer_hres_active(void) { return 0; } static inline int hrtimer_hres_active(void) { return 0; }
...@@ -747,7 +711,6 @@ static inline int hrtimer_reprogram(struct hrtimer *timer, ...@@ -747,7 +711,6 @@ static inline int hrtimer_reprogram(struct hrtimer *timer,
{ {
return 0; return 0;
} }
static inline void hrtimer_raise_softirq(void) { }
#endif /* CONFIG_HIGH_RES_TIMERS */ #endif /* CONFIG_HIGH_RES_TIMERS */
...@@ -890,10 +853,7 @@ static void __remove_hrtimer(struct hrtimer *timer, ...@@ -890,10 +853,7 @@ static void __remove_hrtimer(struct hrtimer *timer,
struct hrtimer_clock_base *base, struct hrtimer_clock_base *base,
unsigned long newstate, int reprogram) unsigned long newstate, int reprogram)
{ {
/* High res. callback list. NOP for !HIGHRES */ if (timer->state & HRTIMER_STATE_ENQUEUED) {
if (hrtimer_cb_pending(timer))
hrtimer_remove_cb_pending(timer);
else {
/* /*
* Remove the timer from the rbtree and replace the * Remove the timer from the rbtree and replace the
* first entry pointer if necessary. * first entry pointer if necessary.
...@@ -953,7 +913,7 @@ hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, unsigned long delta_n ...@@ -953,7 +913,7 @@ hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, unsigned long delta_n
{ {
struct hrtimer_clock_base *base, *new_base; struct hrtimer_clock_base *base, *new_base;
unsigned long flags; unsigned long flags;
int ret, raise; int ret;
base = lock_hrtimer_base(timer, &flags); base = lock_hrtimer_base(timer, &flags);
...@@ -988,26 +948,8 @@ hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, unsigned long delta_n ...@@ -988,26 +948,8 @@ hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, unsigned long delta_n
enqueue_hrtimer(timer, new_base, enqueue_hrtimer(timer, new_base,
new_base->cpu_base == &__get_cpu_var(hrtimer_bases)); new_base->cpu_base == &__get_cpu_var(hrtimer_bases));
/*
* The timer may be expired and moved to the cb_pending
* list. We can not raise the softirq with base lock held due
* to a possible deadlock with runqueue lock.
*/
raise = timer->state == HRTIMER_STATE_PENDING;
/*
* We use preempt_disable to prevent this task from migrating after
* setting up the softirq and raising it. Otherwise, if me migrate
* we will raise the softirq on the wrong CPU.
*/
preempt_disable();
unlock_hrtimer_base(timer, &flags); unlock_hrtimer_base(timer, &flags);
if (raise)
hrtimer_raise_softirq();
preempt_enable();
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(hrtimer_start_range_ns); EXPORT_SYMBOL_GPL(hrtimer_start_range_ns);
...@@ -1192,75 +1134,6 @@ int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp) ...@@ -1192,75 +1134,6 @@ int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp)
} }
EXPORT_SYMBOL_GPL(hrtimer_get_res); EXPORT_SYMBOL_GPL(hrtimer_get_res);
static void run_hrtimer_pending(struct hrtimer_cpu_base *cpu_base)
{
spin_lock_irq(&cpu_base->lock);
while (!list_empty(&cpu_base->cb_pending)) {
enum hrtimer_restart (*fn)(struct hrtimer *);
struct hrtimer *timer;
int restart;
int emulate_hardirq_ctx = 0;
timer = list_entry(cpu_base->cb_pending.next,
struct hrtimer, cb_entry);
debug_hrtimer_deactivate(timer);
timer_stats_account_hrtimer(timer);
fn = timer->function;
/*
* A timer might have been added to the cb_pending list
* when it was migrated during a cpu-offline operation.
* Emulate hardirq context for such timers.
*/
if (timer->cb_mode == HRTIMER_CB_IRQSAFE_PERCPU ||
timer->cb_mode == HRTIMER_CB_IRQSAFE_UNLOCKED)
emulate_hardirq_ctx = 1;
__remove_hrtimer(timer, timer->base, HRTIMER_STATE_CALLBACK, 0);
spin_unlock_irq(&cpu_base->lock);
if (unlikely(emulate_hardirq_ctx)) {
local_irq_disable();
restart = fn(timer);
local_irq_enable();
} else
restart = fn(timer);
spin_lock_irq(&cpu_base->lock);
timer->state &= ~HRTIMER_STATE_CALLBACK;
if (restart == HRTIMER_RESTART) {
BUG_ON(hrtimer_active(timer));
/*
* Enqueue the timer, allow reprogramming of the event
* device
*/
enqueue_hrtimer(timer, timer->base, 1);
} else if (hrtimer_active(timer)) {
/*
* If the timer was rearmed on another CPU, reprogram
* the event device.
*/
struct hrtimer_clock_base *base = timer->base;
if (base->first == &timer->node &&
hrtimer_reprogram(timer, base)) {
/*
* Timer is expired. Thus move it from tree to
* pending list again.
*/
__remove_hrtimer(timer, base,
HRTIMER_STATE_PENDING, 0);
list_add_tail(&timer->cb_entry,
&base->cpu_base->cb_pending);
}
}
}
spin_unlock_irq(&cpu_base->lock);
}
static void __run_hrtimer(struct hrtimer *timer) static void __run_hrtimer(struct hrtimer *timer)
{ {
struct hrtimer_clock_base *base = timer->base; struct hrtimer_clock_base *base = timer->base;
...@@ -1268,25 +1141,21 @@ static void __run_hrtimer(struct hrtimer *timer) ...@@ -1268,25 +1141,21 @@ static void __run_hrtimer(struct hrtimer *timer)
enum hrtimer_restart (*fn)(struct hrtimer *); enum hrtimer_restart (*fn)(struct hrtimer *);
int restart; int restart;
WARN_ON(!irqs_disabled());
debug_hrtimer_deactivate(timer); debug_hrtimer_deactivate(timer);
__remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0); __remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0);
timer_stats_account_hrtimer(timer); timer_stats_account_hrtimer(timer);
fn = timer->function; fn = timer->function;
if (timer->cb_mode == HRTIMER_CB_IRQSAFE_PERCPU ||
timer->cb_mode == HRTIMER_CB_IRQSAFE_UNLOCKED) {
/* /*
* Used for scheduler timers, avoid lock inversion with * Because we run timers from hardirq context, there is no chance
* rq->lock and tasklist_lock. * they get migrated to another cpu, therefore its safe to unlock
* * the timer base.
* These timers are required to deal with enqueue expiry
* themselves and are not allowed to migrate.
*/ */
spin_unlock(&cpu_base->lock); spin_unlock(&cpu_base->lock);
restart = fn(timer); restart = fn(timer);
spin_lock(&cpu_base->lock); spin_lock(&cpu_base->lock);
} else
restart = fn(timer);
/* /*
* Note: We clear the CALLBACK bit after enqueue_hrtimer to avoid * Note: We clear the CALLBACK bit after enqueue_hrtimer to avoid
...@@ -1311,7 +1180,7 @@ void hrtimer_interrupt(struct clock_event_device *dev) ...@@ -1311,7 +1180,7 @@ void hrtimer_interrupt(struct clock_event_device *dev)
struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
struct hrtimer_clock_base *base; struct hrtimer_clock_base *base;
ktime_t expires_next, now; ktime_t expires_next, now;
int i, raise = 0; int i;
BUG_ON(!cpu_base->hres_active); BUG_ON(!cpu_base->hres_active);
cpu_base->nr_events++; cpu_base->nr_events++;
...@@ -1360,16 +1229,6 @@ void hrtimer_interrupt(struct clock_event_device *dev) ...@@ -1360,16 +1229,6 @@ void hrtimer_interrupt(struct clock_event_device *dev)
break; break;
} }
/* Move softirq callbacks to the pending list */
if (timer->cb_mode == HRTIMER_CB_SOFTIRQ) {
__remove_hrtimer(timer, base,
HRTIMER_STATE_PENDING, 0);
list_add_tail(&timer->cb_entry,
&base->cpu_base->cb_pending);
raise = 1;
continue;
}
__run_hrtimer(timer); __run_hrtimer(timer);
} }
spin_unlock(&cpu_base->lock); spin_unlock(&cpu_base->lock);
...@@ -1383,10 +1242,6 @@ void hrtimer_interrupt(struct clock_event_device *dev) ...@@ -1383,10 +1242,6 @@ void hrtimer_interrupt(struct clock_event_device *dev)
if (tick_program_event(expires_next, 0)) if (tick_program_event(expires_next, 0))
goto retry; goto retry;
} }
/* Raise softirq ? */
if (raise)
raise_softirq(HRTIMER_SOFTIRQ);
} }
/** /**
...@@ -1413,11 +1268,6 @@ void hrtimer_peek_ahead_timers(void) ...@@ -1413,11 +1268,6 @@ void hrtimer_peek_ahead_timers(void)
local_irq_restore(flags); local_irq_restore(flags);
} }
static void run_hrtimer_softirq(struct softirq_action *h)
{
run_hrtimer_pending(&__get_cpu_var(hrtimer_bases));
}
#endif /* CONFIG_HIGH_RES_TIMERS */ #endif /* CONFIG_HIGH_RES_TIMERS */
/* /*
...@@ -1429,8 +1279,6 @@ static void run_hrtimer_softirq(struct softirq_action *h) ...@@ -1429,8 +1279,6 @@ static void run_hrtimer_softirq(struct softirq_action *h)
*/ */
void hrtimer_run_pending(void) void hrtimer_run_pending(void)
{ {
struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
if (hrtimer_hres_active()) if (hrtimer_hres_active())
return; return;
...@@ -1444,8 +1292,6 @@ void hrtimer_run_pending(void) ...@@ -1444,8 +1292,6 @@ void hrtimer_run_pending(void)
*/ */
if (tick_check_oneshot_change(!hrtimer_is_hres_enabled())) if (tick_check_oneshot_change(!hrtimer_is_hres_enabled()))
hrtimer_switch_to_hres(); hrtimer_switch_to_hres();
run_hrtimer_pending(cpu_base);
} }
/* /*
...@@ -1482,14 +1328,6 @@ void hrtimer_run_queues(void) ...@@ -1482,14 +1328,6 @@ void hrtimer_run_queues(void)
hrtimer_get_expires_tv64(timer)) hrtimer_get_expires_tv64(timer))
break; break;
if (timer->cb_mode == HRTIMER_CB_SOFTIRQ) {
__remove_hrtimer(timer, base,
HRTIMER_STATE_PENDING, 0);
list_add_tail(&timer->cb_entry,
&base->cpu_base->cb_pending);
continue;
}
__run_hrtimer(timer); __run_hrtimer(timer);
} }
spin_unlock(&cpu_base->lock); spin_unlock(&cpu_base->lock);
...@@ -1516,9 +1354,6 @@ void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task) ...@@ -1516,9 +1354,6 @@ void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task)
{ {
sl->timer.function = hrtimer_wakeup; sl->timer.function = hrtimer_wakeup;
sl->task = task; sl->task = task;
#ifdef CONFIG_HIGH_RES_TIMERS
sl->timer.cb_mode = HRTIMER_CB_IRQSAFE_UNLOCKED;
#endif
} }
static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode) static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode)
...@@ -1655,36 +1490,22 @@ static void __cpuinit init_hrtimers_cpu(int cpu) ...@@ -1655,36 +1490,22 @@ static void __cpuinit init_hrtimers_cpu(int cpu)
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++)
cpu_base->clock_base[i].cpu_base = cpu_base; cpu_base->clock_base[i].cpu_base = cpu_base;
INIT_LIST_HEAD(&cpu_base->cb_pending);
hrtimer_init_hres(cpu_base); hrtimer_init_hres(cpu_base);
} }
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
static int migrate_hrtimer_list(struct hrtimer_clock_base *old_base, static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
struct hrtimer_clock_base *new_base, int dcpu) struct hrtimer_clock_base *new_base)
{ {
struct hrtimer *timer; struct hrtimer *timer;
struct rb_node *node; struct rb_node *node;
int raise = 0;
while ((node = rb_first(&old_base->active))) { while ((node = rb_first(&old_base->active))) {
timer = rb_entry(node, struct hrtimer, node); timer = rb_entry(node, struct hrtimer, node);
BUG_ON(hrtimer_callback_running(timer)); BUG_ON(hrtimer_callback_running(timer));
debug_hrtimer_deactivate(timer); debug_hrtimer_deactivate(timer);
/*
* Should not happen. Per CPU timers should be
* canceled _before_ the migration code is called
*/
if (timer->cb_mode == HRTIMER_CB_IRQSAFE_PERCPU) {
__remove_hrtimer(timer, old_base,
HRTIMER_STATE_INACTIVE, 0);
WARN(1, "hrtimer (%p %p)active but cpu %d dead\n",
timer, timer->function, dcpu);
continue;
}
/* /*
* Mark it as STATE_MIGRATE not INACTIVE otherwise the * Mark it as STATE_MIGRATE not INACTIVE otherwise the
* timer could be seen as !active and just vanish away * timer could be seen as !active and just vanish away
...@@ -1693,69 +1514,34 @@ static int migrate_hrtimer_list(struct hrtimer_clock_base *old_base, ...@@ -1693,69 +1514,34 @@ static int migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
__remove_hrtimer(timer, old_base, HRTIMER_STATE_MIGRATE, 0); __remove_hrtimer(timer, old_base, HRTIMER_STATE_MIGRATE, 0);
timer->base = new_base; timer->base = new_base;
/* /*
* Enqueue the timer. Allow reprogramming of the event device * Enqueue the timers on the new cpu, but do not reprogram
* the timer as that would enable a deadlock between
* hrtimer_enqueue_reprogramm() running the timer and us still
* holding a nested base lock.
*
* Instead we tickle the hrtimer interrupt after the migration
* is done, which will run all expired timers and re-programm
* the timer device.
*/ */
enqueue_hrtimer(timer, new_base, 1); enqueue_hrtimer(timer, new_base, 0);
#ifdef CONFIG_HIGH_RES_TIMERS
/*
* Happens with high res enabled when the timer was
* already expired and the callback mode is
* HRTIMER_CB_IRQSAFE_UNLOCKED (hrtimer_sleeper). The
* enqueue code does not move them to the soft irq
* pending list for performance/latency reasons, but
* in the migration state, we need to do that
* otherwise we end up with a stale timer.
*/
if (timer->state == HRTIMER_STATE_MIGRATE) {
timer->state = HRTIMER_STATE_PENDING;
list_add_tail(&timer->cb_entry,
&new_base->cpu_base->cb_pending);
raise = 1;
}
#endif
/* Clear the migration state bit */ /* Clear the migration state bit */
timer->state &= ~HRTIMER_STATE_MIGRATE; timer->state &= ~HRTIMER_STATE_MIGRATE;
} }
return raise;
} }
#ifdef CONFIG_HIGH_RES_TIMERS static int migrate_hrtimers(int scpu)
static int migrate_hrtimer_pending(struct hrtimer_cpu_base *old_base,
struct hrtimer_cpu_base *new_base)
{
struct hrtimer *timer;
int raise = 0;
while (!list_empty(&old_base->cb_pending)) {
timer = list_entry(old_base->cb_pending.next,
struct hrtimer, cb_entry);
__remove_hrtimer(timer, timer->base, HRTIMER_STATE_PENDING, 0);
timer->base = &new_base->clock_base[timer->base->index];
list_add_tail(&timer->cb_entry, &new_base->cb_pending);
raise = 1;
}
return raise;
}
#else
static int migrate_hrtimer_pending(struct hrtimer_cpu_base *old_base,
struct hrtimer_cpu_base *new_base)
{
return 0;
}
#endif
static void migrate_hrtimers(int cpu)
{ {
struct hrtimer_cpu_base *old_base, *new_base; struct hrtimer_cpu_base *old_base, *new_base;
int i, raise = 0; int dcpu, i;
BUG_ON(cpu_online(cpu)); BUG_ON(cpu_online(scpu));
old_base = &per_cpu(hrtimer_bases, cpu); old_base = &per_cpu(hrtimer_bases, scpu);
new_base = &get_cpu_var(hrtimer_bases); new_base = &get_cpu_var(hrtimer_bases);
tick_cancel_sched_timer(cpu); dcpu = smp_processor_id();
tick_cancel_sched_timer(scpu);
/* /*
* The caller is globally serialized and nobody else * The caller is globally serialized and nobody else
* takes two locks at once, deadlock is not possible. * takes two locks at once, deadlock is not possible.
...@@ -1764,41 +1550,47 @@ static void migrate_hrtimers(int cpu) ...@@ -1764,41 +1550,47 @@ static void migrate_hrtimers(int cpu)
spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
if (migrate_hrtimer_list(&old_base->clock_base[i], migrate_hrtimer_list(&old_base->clock_base[i],
&new_base->clock_base[i], cpu)) &new_base->clock_base[i]);
raise = 1;
} }
if (migrate_hrtimer_pending(old_base, new_base))
raise = 1;
spin_unlock(&old_base->lock); spin_unlock(&old_base->lock);
spin_unlock_irq(&new_base->lock); spin_unlock_irq(&new_base->lock);
put_cpu_var(hrtimer_bases); put_cpu_var(hrtimer_bases);
if (raise) return dcpu;
hrtimer_raise_softirq(); }
static void tickle_timers(void *arg)
{
hrtimer_peek_ahead_timers();
} }
#endif /* CONFIG_HOTPLUG_CPU */ #endif /* CONFIG_HOTPLUG_CPU */
static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self, static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu) unsigned long action, void *hcpu)
{ {
unsigned int cpu = (long)hcpu; int scpu = (long)hcpu;
switch (action) { switch (action) {
case CPU_UP_PREPARE: case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN: case CPU_UP_PREPARE_FROZEN:
init_hrtimers_cpu(cpu); init_hrtimers_cpu(scpu);
break; break;
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
case CPU_DEAD: case CPU_DEAD:
case CPU_DEAD_FROZEN: case CPU_DEAD_FROZEN:
clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &cpu); {
migrate_hrtimers(cpu); int dcpu;
clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &scpu);
dcpu = migrate_hrtimers(scpu);
smp_call_function_single(dcpu, tickle_timers, NULL, 0);
break; break;
}
#endif #endif
default: default:
...@@ -1817,9 +1609,6 @@ void __init hrtimers_init(void) ...@@ -1817,9 +1609,6 @@ void __init hrtimers_init(void)
hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE, hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE,
(void *)(long)smp_processor_id()); (void *)(long)smp_processor_id());
register_cpu_notifier(&hrtimers_nb); register_cpu_notifier(&hrtimers_nb);
#ifdef CONFIG_HIGH_RES_TIMERS
open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq);
#endif
} }
/** /**
......
...@@ -116,7 +116,7 @@ static DEFINE_SPINLOCK(idr_lock); ...@@ -116,7 +116,7 @@ static DEFINE_SPINLOCK(idr_lock);
* must supply functions here, even if the function just returns * must supply functions here, even if the function just returns
* ENOSYS. The standard POSIX timer management code assumes the * ENOSYS. The standard POSIX timer management code assumes the
* following: 1.) The k_itimer struct (sched.h) is used for the * following: 1.) The k_itimer struct (sched.h) is used for the
* timer. 2.) The list, it_lock, it_clock, it_id and it_process * timer. 2.) The list, it_lock, it_clock, it_id and it_pid
* fields are not modified by timer code. * fields are not modified by timer code.
* *
* At this time all functions EXCEPT clock_nanosleep can be * At this time all functions EXCEPT clock_nanosleep can be
...@@ -319,7 +319,8 @@ void do_schedule_next_timer(struct siginfo *info) ...@@ -319,7 +319,8 @@ void do_schedule_next_timer(struct siginfo *info)
int posix_timer_event(struct k_itimer *timr, int si_private) int posix_timer_event(struct k_itimer *timr, int si_private)
{ {
int shared, ret; struct task_struct *task;
int shared, ret = -1;
/* /*
* FIXME: if ->sigq is queued we can race with * FIXME: if ->sigq is queued we can race with
* dequeue_signal()->do_schedule_next_timer(). * dequeue_signal()->do_schedule_next_timer().
...@@ -333,8 +334,13 @@ int posix_timer_event(struct k_itimer *timr, int si_private) ...@@ -333,8 +334,13 @@ int posix_timer_event(struct k_itimer *timr, int si_private)
*/ */
timr->sigq->info.si_sys_private = si_private; timr->sigq->info.si_sys_private = si_private;
rcu_read_lock();
task = pid_task(timr->it_pid, PIDTYPE_PID);
if (task) {
shared = !(timr->it_sigev_notify & SIGEV_THREAD_ID); shared = !(timr->it_sigev_notify & SIGEV_THREAD_ID);
ret = send_sigqueue(timr->sigq, timr->it_process, shared); ret = send_sigqueue(timr->sigq, task, shared);
}
rcu_read_unlock();
/* If we failed to send the signal the timer stops. */ /* If we failed to send the signal the timer stops. */
return ret > 0; return ret > 0;
} }
...@@ -411,7 +417,7 @@ static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer) ...@@ -411,7 +417,7 @@ static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer)
return ret; return ret;
} }
static struct task_struct * good_sigevent(sigevent_t * event) static struct pid *good_sigevent(sigevent_t * event)
{ {
struct task_struct *rtn = current->group_leader; struct task_struct *rtn = current->group_leader;
...@@ -425,7 +431,7 @@ static struct task_struct * good_sigevent(sigevent_t * event) ...@@ -425,7 +431,7 @@ static struct task_struct * good_sigevent(sigevent_t * event)
((event->sigev_signo <= 0) || (event->sigev_signo > SIGRTMAX))) ((event->sigev_signo <= 0) || (event->sigev_signo > SIGRTMAX)))
return NULL; return NULL;
return rtn; return task_pid(rtn);
} }
void register_posix_clock(const clockid_t clock_id, struct k_clock *new_clock) void register_posix_clock(const clockid_t clock_id, struct k_clock *new_clock)
...@@ -464,6 +470,7 @@ static void release_posix_timer(struct k_itimer *tmr, int it_id_set) ...@@ -464,6 +470,7 @@ static void release_posix_timer(struct k_itimer *tmr, int it_id_set)
idr_remove(&posix_timers_id, tmr->it_id); idr_remove(&posix_timers_id, tmr->it_id);
spin_unlock_irqrestore(&idr_lock, flags); spin_unlock_irqrestore(&idr_lock, flags);
} }
put_pid(tmr->it_pid);
sigqueue_free(tmr->sigq); sigqueue_free(tmr->sigq);
kmem_cache_free(posix_timers_cache, tmr); kmem_cache_free(posix_timers_cache, tmr);
} }
...@@ -477,7 +484,6 @@ sys_timer_create(const clockid_t which_clock, ...@@ -477,7 +484,6 @@ sys_timer_create(const clockid_t which_clock,
{ {
struct k_itimer *new_timer; struct k_itimer *new_timer;
int error, new_timer_id; int error, new_timer_id;
struct task_struct *process;
sigevent_t event; sigevent_t event;
int it_id_set = IT_ID_NOT_SET; int it_id_set = IT_ID_NOT_SET;
...@@ -531,11 +537,9 @@ sys_timer_create(const clockid_t which_clock, ...@@ -531,11 +537,9 @@ sys_timer_create(const clockid_t which_clock,
goto out; goto out;
} }
rcu_read_lock(); rcu_read_lock();
process = good_sigevent(&event); new_timer->it_pid = get_pid(good_sigevent(&event));
if (process)
get_task_struct(process);
rcu_read_unlock(); rcu_read_unlock();
if (!process) { if (!new_timer->it_pid) {
error = -EINVAL; error = -EINVAL;
goto out; goto out;
} }
...@@ -543,8 +547,7 @@ sys_timer_create(const clockid_t which_clock, ...@@ -543,8 +547,7 @@ sys_timer_create(const clockid_t which_clock,
event.sigev_notify = SIGEV_SIGNAL; event.sigev_notify = SIGEV_SIGNAL;
event.sigev_signo = SIGALRM; event.sigev_signo = SIGALRM;
event.sigev_value.sival_int = new_timer->it_id; event.sigev_value.sival_int = new_timer->it_id;
process = current->group_leader; new_timer->it_pid = get_pid(task_tgid(current));
get_task_struct(process);
} }
new_timer->it_sigev_notify = event.sigev_notify; new_timer->it_sigev_notify = event.sigev_notify;
...@@ -554,7 +557,7 @@ sys_timer_create(const clockid_t which_clock, ...@@ -554,7 +557,7 @@ sys_timer_create(const clockid_t which_clock,
new_timer->sigq->info.si_code = SI_TIMER; new_timer->sigq->info.si_code = SI_TIMER;
spin_lock_irq(&current->sighand->siglock); spin_lock_irq(&current->sighand->siglock);
new_timer->it_process = process; new_timer->it_signal = current->signal;
list_add(&new_timer->list, &current->signal->posix_timers); list_add(&new_timer->list, &current->signal->posix_timers);
spin_unlock_irq(&current->sighand->siglock); spin_unlock_irq(&current->sighand->siglock);
...@@ -589,8 +592,7 @@ static struct k_itimer *lock_timer(timer_t timer_id, unsigned long *flags) ...@@ -589,8 +592,7 @@ static struct k_itimer *lock_timer(timer_t timer_id, unsigned long *flags)
timr = idr_find(&posix_timers_id, (int)timer_id); timr = idr_find(&posix_timers_id, (int)timer_id);
if (timr) { if (timr) {
spin_lock(&timr->it_lock); spin_lock(&timr->it_lock);
if (timr->it_process && if (timr->it_signal == current->signal) {
same_thread_group(timr->it_process, current)) {
spin_unlock(&idr_lock); spin_unlock(&idr_lock);
return timr; return timr;
} }
...@@ -837,8 +839,7 @@ sys_timer_delete(timer_t timer_id) ...@@ -837,8 +839,7 @@ sys_timer_delete(timer_t timer_id)
* This keeps any tasks waiting on the spin lock from thinking * This keeps any tasks waiting on the spin lock from thinking
* they got something (see the lock code above). * they got something (see the lock code above).
*/ */
put_task_struct(timer->it_process); timer->it_signal = NULL;
timer->it_process = NULL;
unlock_timer(timer, flags); unlock_timer(timer, flags);
release_posix_timer(timer, IT_ID_SET); release_posix_timer(timer, IT_ID_SET);
...@@ -864,8 +865,7 @@ static void itimer_delete(struct k_itimer *timer) ...@@ -864,8 +865,7 @@ static void itimer_delete(struct k_itimer *timer)
* This keeps any tasks waiting on the spin lock from thinking * This keeps any tasks waiting on the spin lock from thinking
* they got something (see the lock code above). * they got something (see the lock code above).
*/ */
put_task_struct(timer->it_process); timer->it_signal = NULL;
timer->it_process = NULL;
unlock_timer(timer, flags); unlock_timer(timer, flags);
release_posix_timer(timer, IT_ID_SET); release_posix_timer(timer, IT_ID_SET);
......
...@@ -209,7 +209,6 @@ void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime) ...@@ -209,7 +209,6 @@ void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
hrtimer_init(&rt_b->rt_period_timer, hrtimer_init(&rt_b->rt_period_timer,
CLOCK_MONOTONIC, HRTIMER_MODE_REL); CLOCK_MONOTONIC, HRTIMER_MODE_REL);
rt_b->rt_period_timer.function = sched_rt_period_timer; rt_b->rt_period_timer.function = sched_rt_period_timer;
rt_b->rt_period_timer.cb_mode = HRTIMER_CB_IRQSAFE_UNLOCKED;
} }
static inline int rt_bandwidth_enabled(void) static inline int rt_bandwidth_enabled(void)
...@@ -1139,7 +1138,6 @@ static void init_rq_hrtick(struct rq *rq) ...@@ -1139,7 +1138,6 @@ static void init_rq_hrtick(struct rq *rq)
hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
rq->hrtick_timer.function = hrtick; rq->hrtick_timer.function = hrtick;
rq->hrtick_timer.cb_mode = HRTIMER_CB_IRQSAFE_PERCPU;
} }
#else /* CONFIG_SCHED_HRTICK */ #else /* CONFIG_SCHED_HRTICK */
static inline void hrtick_clear(struct rq *rq) static inline void hrtick_clear(struct rq *rq)
......
...@@ -131,7 +131,7 @@ static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer) ...@@ -131,7 +131,7 @@ static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer)
{ {
enum hrtimer_restart res = HRTIMER_NORESTART; enum hrtimer_restart res = HRTIMER_NORESTART;
write_seqlock_irq(&xtime_lock); write_seqlock(&xtime_lock);
switch (time_state) { switch (time_state) {
case TIME_OK: case TIME_OK:
...@@ -164,7 +164,7 @@ static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer) ...@@ -164,7 +164,7 @@ static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer)
} }
update_vsyscall(&xtime, clock); update_vsyscall(&xtime, clock);
write_sequnlock_irq(&xtime_lock); write_sequnlock(&xtime_lock);
return res; return res;
} }
......
...@@ -247,7 +247,7 @@ void tick_nohz_stop_sched_tick(int inidle) ...@@ -247,7 +247,7 @@ void tick_nohz_stop_sched_tick(int inidle)
if (need_resched()) if (need_resched())
goto end; goto end;
if (unlikely(local_softirq_pending())) { if (unlikely(local_softirq_pending() && cpu_online(cpu))) {
static int ratelimit; static int ratelimit;
if (ratelimit < 10) { if (ratelimit < 10) {
...@@ -282,8 +282,31 @@ void tick_nohz_stop_sched_tick(int inidle) ...@@ -282,8 +282,31 @@ void tick_nohz_stop_sched_tick(int inidle)
/* Schedule the tick, if we are at least one jiffie off */ /* Schedule the tick, if we are at least one jiffie off */
if ((long)delta_jiffies >= 1) { if ((long)delta_jiffies >= 1) {
/*
* calculate the expiry time for the next timer wheel
* timer
*/
expires = ktime_add_ns(last_update, tick_period.tv64 *
delta_jiffies);
/*
* If this cpu is the one which updates jiffies, then
* give up the assignment and let it be taken by the
* cpu which runs the tick timer next, which might be
* this cpu as well. If we don't drop this here the
* jiffies might be stale and do_timer() never
* invoked.
*/
if (cpu == tick_do_timer_cpu)
tick_do_timer_cpu = TICK_DO_TIMER_NONE;
if (delta_jiffies > 1) if (delta_jiffies > 1)
cpu_set(cpu, nohz_cpu_mask); cpu_set(cpu, nohz_cpu_mask);
/* Skip reprogram of event if its not changed */
if (ts->tick_stopped && ktime_equal(expires, dev->next_event))
goto out;
/* /*
* nohz_stop_sched_tick can be called several times before * nohz_stop_sched_tick can be called several times before
* the nohz_restart_sched_tick is called. This happens when * the nohz_restart_sched_tick is called. This happens when
...@@ -306,17 +329,6 @@ void tick_nohz_stop_sched_tick(int inidle) ...@@ -306,17 +329,6 @@ void tick_nohz_stop_sched_tick(int inidle)
rcu_enter_nohz(); rcu_enter_nohz();
} }
/*
* If this cpu is the one which updates jiffies, then
* give up the assignment and let it be taken by the
* cpu which runs the tick timer next, which might be
* this cpu as well. If we don't drop this here the
* jiffies might be stale and do_timer() never
* invoked.
*/
if (cpu == tick_do_timer_cpu)
tick_do_timer_cpu = TICK_DO_TIMER_NONE;
ts->idle_sleeps++; ts->idle_sleeps++;
/* /*
...@@ -332,12 +344,7 @@ void tick_nohz_stop_sched_tick(int inidle) ...@@ -332,12 +344,7 @@ void tick_nohz_stop_sched_tick(int inidle)
goto out; goto out;
} }
/* /* Mark expiries */
* calculate the expiry time for the next timer wheel
* timer
*/
expires = ktime_add_ns(last_update, tick_period.tv64 *
delta_jiffies);
ts->idle_expires = expires; ts->idle_expires = expires;
if (ts->nohz_mode == NOHZ_MODE_HIGHRES) { if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
...@@ -681,7 +688,6 @@ void tick_setup_sched_timer(void) ...@@ -681,7 +688,6 @@ void tick_setup_sched_timer(void)
*/ */
hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
ts->sched_timer.function = tick_sched_timer; ts->sched_timer.function = tick_sched_timer;
ts->sched_timer.cb_mode = HRTIMER_CB_IRQSAFE_PERCPU;
/* Get the next period (per cpu) */ /* Get the next period (per cpu) */
hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update()); hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update());
......
...@@ -202,7 +202,6 @@ static void start_stack_timer(int cpu) ...@@ -202,7 +202,6 @@ static void start_stack_timer(int cpu)
hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
hrtimer->function = stack_trace_timer_fn; hrtimer->function = stack_trace_timer_fn;
hrtimer->cb_mode = HRTIMER_CB_IRQSAFE_PERCPU;
hrtimer_start(hrtimer, ns_to_ktime(sample_period), HRTIMER_MODE_REL); hrtimer_start(hrtimer, ns_to_ktime(sample_period), HRTIMER_MODE_REL);
} }
......
...@@ -57,7 +57,6 @@ static int snd_hrtimer_open(struct snd_timer *t) ...@@ -57,7 +57,6 @@ static int snd_hrtimer_open(struct snd_timer *t)
return -ENOMEM; return -ENOMEM;
hrtimer_init(&stime->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL); hrtimer_init(&stime->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
stime->timer = t; stime->timer = t;
stime->hrt.cb_mode = HRTIMER_CB_IRQSAFE_UNLOCKED;
stime->hrt.function = snd_hrtimer_callback; stime->hrt.function = snd_hrtimer_callback;
t->private_data = stime; t->private_data = stime;
return 0; return 0;
......
...@@ -96,7 +96,6 @@ static int __devinit snd_card_pcsp_probe(int devnum, struct device *dev) ...@@ -96,7 +96,6 @@ static int __devinit snd_card_pcsp_probe(int devnum, struct device *dev)
return -EINVAL; return -EINVAL;
hrtimer_init(&pcsp_chip.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); hrtimer_init(&pcsp_chip.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
pcsp_chip.timer.cb_mode = HRTIMER_CB_IRQSAFE_UNLOCKED;
pcsp_chip.timer.function = pcsp_do_timer; pcsp_chip.timer.function = pcsp_do_timer;
card = snd_card_new(index, id, THIS_MODULE, 0); card = snd_card_new(index, id, THIS_MODULE, 0);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment