Commit 99fa0ad9 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'suspend-to-idle-3.20-rc1' of...

Merge tag 'suspend-to-idle-3.20-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm

Pull suspend-to-idle updates from Rafael Wysocki:
 "Suspend-to-idle timer quiescing support for v3.20-rc1

  Until now suspend-to-idle has not been able to save much more energy
  than runtime PM because of timer interrupts that periodically bring
  CPUs out of idle while they are waiting for a wakeup interrupt.  Of
  course, the timer interrupts are not wakeup ones, so the handling of
  them can be deferred until a real wakeup interrupt happens, but at the
  same time we don't want to mass-expire timers at that point.

  The solution is to suspend the entire timekeeping when the last CPU is
  entering an idle state and resume it when the first CPU goes out of
  idle.  That has to be done with care, though, so as to avoid accessing
  suspended clocksources etc.  end we need extra support from idle
  drivers for that.

  This series of commits adds support for quiescing timers during
  suspend-to-idle and adds the requisite callbacks to intel_idle and the
  ACPI cpuidle driver"

* tag 'suspend-to-idle-3.20-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm:
  ACPI / idle: Implement ->enter_freeze callback routine
  intel_idle: Add ->enter_freeze callbacks
  PM / sleep: Make it possible to quiesce timers during suspend-to-idle
  timekeeping: Make it safe to use the fast timekeeper while suspended
  timekeeping: Pass readout base to update_fast_timekeeper()
  PM / sleep: Re-implement suspend-to-idle handling
parents 1d9e7140 5f508185
...@@ -732,9 +732,8 @@ static int acpi_idle_play_dead(struct cpuidle_device *dev, int index) ...@@ -732,9 +732,8 @@ static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
static bool acpi_idle_fallback_to_c1(struct acpi_processor *pr) static bool acpi_idle_fallback_to_c1(struct acpi_processor *pr)
{ {
return IS_ENABLED(CONFIG_HOTPLUG_CPU) && num_online_cpus() > 1 && return IS_ENABLED(CONFIG_HOTPLUG_CPU) && !pr->flags.has_cst &&
!(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED) && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED);
!pr->flags.has_cst;
} }
static int c3_cpu_count; static int c3_cpu_count;
...@@ -744,9 +743,10 @@ static DEFINE_RAW_SPINLOCK(c3_lock); ...@@ -744,9 +743,10 @@ static DEFINE_RAW_SPINLOCK(c3_lock);
* acpi_idle_enter_bm - enters C3 with proper BM handling * acpi_idle_enter_bm - enters C3 with proper BM handling
* @pr: Target processor * @pr: Target processor
* @cx: Target state context * @cx: Target state context
* @timer_bc: Whether or not to change timer mode to broadcast
*/ */
static void acpi_idle_enter_bm(struct acpi_processor *pr, static void acpi_idle_enter_bm(struct acpi_processor *pr,
struct acpi_processor_cx *cx) struct acpi_processor_cx *cx, bool timer_bc)
{ {
acpi_unlazy_tlb(smp_processor_id()); acpi_unlazy_tlb(smp_processor_id());
...@@ -754,6 +754,7 @@ static void acpi_idle_enter_bm(struct acpi_processor *pr, ...@@ -754,6 +754,7 @@ static void acpi_idle_enter_bm(struct acpi_processor *pr,
* Must be done before busmaster disable as we might need to * Must be done before busmaster disable as we might need to
* access HPET ! * access HPET !
*/ */
if (timer_bc)
lapic_timer_state_broadcast(pr, cx, 1); lapic_timer_state_broadcast(pr, cx, 1);
/* /*
...@@ -784,6 +785,7 @@ static void acpi_idle_enter_bm(struct acpi_processor *pr, ...@@ -784,6 +785,7 @@ static void acpi_idle_enter_bm(struct acpi_processor *pr,
raw_spin_unlock(&c3_lock); raw_spin_unlock(&c3_lock);
} }
if (timer_bc)
lapic_timer_state_broadcast(pr, cx, 0); lapic_timer_state_broadcast(pr, cx, 0);
} }
...@@ -798,12 +800,12 @@ static int acpi_idle_enter(struct cpuidle_device *dev, ...@@ -798,12 +800,12 @@ static int acpi_idle_enter(struct cpuidle_device *dev,
return -EINVAL; return -EINVAL;
if (cx->type != ACPI_STATE_C1) { if (cx->type != ACPI_STATE_C1) {
if (acpi_idle_fallback_to_c1(pr)) { if (acpi_idle_fallback_to_c1(pr) && num_online_cpus() > 1) {
index = CPUIDLE_DRIVER_STATE_START; index = CPUIDLE_DRIVER_STATE_START;
cx = per_cpu(acpi_cstate[index], dev->cpu); cx = per_cpu(acpi_cstate[index], dev->cpu);
} else if (cx->type == ACPI_STATE_C3 && pr->flags.bm_check) { } else if (cx->type == ACPI_STATE_C3 && pr->flags.bm_check) {
if (cx->bm_sts_skip || !acpi_idle_bm_check()) { if (cx->bm_sts_skip || !acpi_idle_bm_check()) {
acpi_idle_enter_bm(pr, cx); acpi_idle_enter_bm(pr, cx, true);
return index; return index;
} else if (drv->safe_state_index >= 0) { } else if (drv->safe_state_index >= 0) {
index = drv->safe_state_index; index = drv->safe_state_index;
...@@ -827,6 +829,27 @@ static int acpi_idle_enter(struct cpuidle_device *dev, ...@@ -827,6 +829,27 @@ static int acpi_idle_enter(struct cpuidle_device *dev,
return index; return index;
} }
static void acpi_idle_enter_freeze(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
if (cx->type == ACPI_STATE_C3) {
struct acpi_processor *pr = __this_cpu_read(processors);
if (unlikely(!pr))
return;
if (pr->flags.bm_check) {
acpi_idle_enter_bm(pr, cx, false);
return;
} else {
ACPI_FLUSH_CPU_CACHE();
}
}
acpi_idle_do_entry(cx);
}
struct cpuidle_driver acpi_idle_driver = { struct cpuidle_driver acpi_idle_driver = {
.name = "acpi_idle", .name = "acpi_idle",
.owner = THIS_MODULE, .owner = THIS_MODULE,
...@@ -925,6 +948,15 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr) ...@@ -925,6 +948,15 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
state->enter_dead = acpi_idle_play_dead; state->enter_dead = acpi_idle_play_dead;
drv->safe_state_index = count; drv->safe_state_index = count;
} }
/*
* Halt-induced C1 is not good for ->enter_freeze, because it
* re-enables interrupts on exit. Moreover, C1 is generally not
* particularly interesting from the suspend-to-idle angle, so
* avoid C1 and the situations in which we may need to fall back
* to it altogether.
*/
if (cx->type != ACPI_STATE_C1 && !acpi_idle_fallback_to_c1(pr))
state->enter_freeze = acpi_idle_enter_freeze;
count++; count++;
if (count == CPUIDLE_STATE_MAX) if (count == CPUIDLE_STATE_MAX)
......
...@@ -19,6 +19,8 @@ ...@@ -19,6 +19,8 @@
#include <linux/ktime.h> #include <linux/ktime.h>
#include <linux/hrtimer.h> #include <linux/hrtimer.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/suspend.h>
#include <linux/tick.h>
#include <trace/events/power.h> #include <trace/events/power.h>
#include "cpuidle.h" #include "cpuidle.h"
...@@ -32,7 +34,6 @@ LIST_HEAD(cpuidle_detected_devices); ...@@ -32,7 +34,6 @@ LIST_HEAD(cpuidle_detected_devices);
static int enabled_devices; static int enabled_devices;
static int off __read_mostly; static int off __read_mostly;
static int initialized __read_mostly; static int initialized __read_mostly;
static bool use_deepest_state __read_mostly;
int cpuidle_disabled(void) int cpuidle_disabled(void)
{ {
...@@ -66,36 +67,23 @@ int cpuidle_play_dead(void) ...@@ -66,36 +67,23 @@ int cpuidle_play_dead(void)
} }
/** /**
* cpuidle_use_deepest_state - Enable/disable the "deepest idle" mode. * cpuidle_find_deepest_state - Find deepest state meeting specific conditions.
* @enable: Whether enable or disable the feature. * @drv: cpuidle driver for the given CPU.
* * @dev: cpuidle device for the given CPU.
* If the "deepest idle" mode is enabled, cpuidle will ignore the governor and * @freeze: Whether or not the state should be suitable for suspend-to-idle.
* always use the state with the greatest exit latency (out of the states that
* are not disabled).
*
* This function can only be called after cpuidle_pause() to avoid races.
*/
void cpuidle_use_deepest_state(bool enable)
{
use_deepest_state = enable;
}
/**
* cpuidle_find_deepest_state - Find the state of the greatest exit latency.
* @drv: cpuidle driver for a given CPU.
* @dev: cpuidle device for a given CPU.
*/ */
static int cpuidle_find_deepest_state(struct cpuidle_driver *drv, static int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
struct cpuidle_device *dev) struct cpuidle_device *dev, bool freeze)
{ {
unsigned int latency_req = 0; unsigned int latency_req = 0;
int i, ret = CPUIDLE_DRIVER_STATE_START - 1; int i, ret = freeze ? -1 : CPUIDLE_DRIVER_STATE_START - 1;
for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) { for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) {
struct cpuidle_state *s = &drv->states[i]; struct cpuidle_state *s = &drv->states[i];
struct cpuidle_state_usage *su = &dev->states_usage[i]; struct cpuidle_state_usage *su = &dev->states_usage[i];
if (s->disabled || su->disable || s->exit_latency <= latency_req) if (s->disabled || su->disable || s->exit_latency <= latency_req
|| (freeze && !s->enter_freeze))
continue; continue;
latency_req = s->exit_latency; latency_req = s->exit_latency;
...@@ -104,6 +92,63 @@ static int cpuidle_find_deepest_state(struct cpuidle_driver *drv, ...@@ -104,6 +92,63 @@ static int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
return ret; return ret;
} }
static void enter_freeze_proper(struct cpuidle_driver *drv,
struct cpuidle_device *dev, int index)
{
tick_freeze();
/*
* The state used here cannot be a "coupled" one, because the "coupled"
* cpuidle mechanism enables interrupts and doing that with timekeeping
* suspended is generally unsafe.
*/
drv->states[index].enter_freeze(dev, drv, index);
WARN_ON(!irqs_disabled());
/*
* timekeeping_resume() that will be called by tick_unfreeze() for the
* last CPU executing it calls functions containing RCU read-side
* critical sections, so tell RCU about that.
*/
RCU_NONIDLE(tick_unfreeze());
}
/**
* cpuidle_enter_freeze - Enter an idle state suitable for suspend-to-idle.
*
* If there are states with the ->enter_freeze callback, find the deepest of
* them and enter it with frozen tick. Otherwise, find the deepest state
* available and enter it normally.
*/
void cpuidle_enter_freeze(void)
{
struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
int index;
/*
* Find the deepest state with ->enter_freeze present, which guarantees
* that interrupts won't be enabled when it exits and allows the tick to
* be frozen safely.
*/
index = cpuidle_find_deepest_state(drv, dev, true);
if (index >= 0) {
enter_freeze_proper(drv, dev, index);
return;
}
/*
* It is not safe to freeze the tick, find the deepest state available
* at all and try to enter it normally.
*/
index = cpuidle_find_deepest_state(drv, dev, false);
if (index >= 0)
cpuidle_enter(drv, dev, index);
else
arch_cpu_idle();
/* Interrupts are enabled again here. */
local_irq_disable();
}
/** /**
* cpuidle_enter_state - enter the state and update stats * cpuidle_enter_state - enter the state and update stats
* @dev: cpuidle device for this cpu * @dev: cpuidle device for this cpu
...@@ -166,9 +211,6 @@ int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) ...@@ -166,9 +211,6 @@ int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
if (!drv || !dev || !dev->enabled) if (!drv || !dev || !dev->enabled)
return -EBUSY; return -EBUSY;
if (unlikely(use_deepest_state))
return cpuidle_find_deepest_state(drv, dev);
return cpuidle_curr_governor->select(drv, dev); return cpuidle_curr_governor->select(drv, dev);
} }
...@@ -200,7 +242,7 @@ int cpuidle_enter(struct cpuidle_driver *drv, struct cpuidle_device *dev, ...@@ -200,7 +242,7 @@ int cpuidle_enter(struct cpuidle_driver *drv, struct cpuidle_device *dev,
*/ */
void cpuidle_reflect(struct cpuidle_device *dev, int index) void cpuidle_reflect(struct cpuidle_device *dev, int index)
{ {
if (cpuidle_curr_governor->reflect && !unlikely(use_deepest_state)) if (cpuidle_curr_governor->reflect)
cpuidle_curr_governor->reflect(dev, index); cpuidle_curr_governor->reflect(dev, index);
} }
......
This diff is collapsed.
...@@ -50,6 +50,15 @@ struct cpuidle_state { ...@@ -50,6 +50,15 @@ struct cpuidle_state {
int index); int index);
int (*enter_dead) (struct cpuidle_device *dev, int index); int (*enter_dead) (struct cpuidle_device *dev, int index);
/*
* CPUs execute ->enter_freeze with the local tick or entire timekeeping
* suspended, so it must not re-enable interrupts at any point (even
* temporarily) or attempt to change states of clock event devices.
*/
void (*enter_freeze) (struct cpuidle_device *dev,
struct cpuidle_driver *drv,
int index);
}; };
/* Idle State Flags */ /* Idle State Flags */
...@@ -141,7 +150,7 @@ extern void cpuidle_resume(void); ...@@ -141,7 +150,7 @@ extern void cpuidle_resume(void);
extern int cpuidle_enable_device(struct cpuidle_device *dev); extern int cpuidle_enable_device(struct cpuidle_device *dev);
extern void cpuidle_disable_device(struct cpuidle_device *dev); extern void cpuidle_disable_device(struct cpuidle_device *dev);
extern int cpuidle_play_dead(void); extern int cpuidle_play_dead(void);
extern void cpuidle_use_deepest_state(bool enable); extern void cpuidle_enter_freeze(void);
extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev); extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev);
#else #else
...@@ -174,7 +183,7 @@ static inline int cpuidle_enable_device(struct cpuidle_device *dev) ...@@ -174,7 +183,7 @@ static inline int cpuidle_enable_device(struct cpuidle_device *dev)
{return -ENODEV; } {return -ENODEV; }
static inline void cpuidle_disable_device(struct cpuidle_device *dev) { } static inline void cpuidle_disable_device(struct cpuidle_device *dev) { }
static inline int cpuidle_play_dead(void) {return -ENODEV; } static inline int cpuidle_play_dead(void) {return -ENODEV; }
static inline void cpuidle_use_deepest_state(bool enable) {} static inline void cpuidle_enter_freeze(void) { }
static inline struct cpuidle_driver *cpuidle_get_cpu_driver( static inline struct cpuidle_driver *cpuidle_get_cpu_driver(
struct cpuidle_device *dev) {return NULL; } struct cpuidle_device *dev) {return NULL; }
#endif #endif
......
...@@ -201,6 +201,21 @@ struct platform_freeze_ops { ...@@ -201,6 +201,21 @@ struct platform_freeze_ops {
*/ */
extern void suspend_set_ops(const struct platform_suspend_ops *ops); extern void suspend_set_ops(const struct platform_suspend_ops *ops);
extern int suspend_valid_only_mem(suspend_state_t state); extern int suspend_valid_only_mem(suspend_state_t state);
/* Suspend-to-idle state machnine. */
enum freeze_state {
FREEZE_STATE_NONE, /* Not suspended/suspending. */
FREEZE_STATE_ENTER, /* Enter suspend-to-idle. */
FREEZE_STATE_WAKE, /* Wake up from suspend-to-idle. */
};
extern enum freeze_state __read_mostly suspend_freeze_state;
static inline bool idle_should_freeze(void)
{
return unlikely(suspend_freeze_state == FREEZE_STATE_ENTER);
}
extern void freeze_set_ops(const struct platform_freeze_ops *ops); extern void freeze_set_ops(const struct platform_freeze_ops *ops);
extern void freeze_wake(void); extern void freeze_wake(void);
...@@ -228,6 +243,7 @@ extern int pm_suspend(suspend_state_t state); ...@@ -228,6 +243,7 @@ extern int pm_suspend(suspend_state_t state);
static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {} static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; } static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
static inline bool idle_should_freeze(void) { return false; }
static inline void freeze_set_ops(const struct platform_freeze_ops *ops) {} static inline void freeze_set_ops(const struct platform_freeze_ops *ops) {}
static inline void freeze_wake(void) {} static inline void freeze_wake(void) {}
#endif /* !CONFIG_SUSPEND */ #endif /* !CONFIG_SUSPEND */
......
...@@ -79,6 +79,9 @@ extern void __init tick_init(void); ...@@ -79,6 +79,9 @@ extern void __init tick_init(void);
extern int tick_is_oneshot_available(void); extern int tick_is_oneshot_available(void);
extern struct tick_device *tick_get_device(int cpu); extern struct tick_device *tick_get_device(int cpu);
extern void tick_freeze(void);
extern void tick_unfreeze(void);
# ifdef CONFIG_HIGH_RES_TIMERS # ifdef CONFIG_HIGH_RES_TIMERS
extern int tick_init_highres(void); extern int tick_init_highres(void);
extern int tick_program_event(ktime_t expires, int force); extern int tick_program_event(ktime_t expires, int force);
...@@ -119,6 +122,8 @@ static inline int tick_oneshot_mode_active(void) { return 0; } ...@@ -119,6 +122,8 @@ static inline int tick_oneshot_mode_active(void) { return 0; }
#else /* CONFIG_GENERIC_CLOCKEVENTS */ #else /* CONFIG_GENERIC_CLOCKEVENTS */
static inline void tick_init(void) { } static inline void tick_init(void) { }
static inline void tick_freeze(void) { }
static inline void tick_unfreeze(void) { }
static inline void tick_cancel_sched_timer(int cpu) { } static inline void tick_cancel_sched_timer(int cpu) { }
static inline void tick_clock_notify(void) { } static inline void tick_clock_notify(void) { }
static inline int tick_check_oneshot_change(int allow_nohz) { return 0; } static inline int tick_check_oneshot_change(int allow_nohz) { return 0; }
...@@ -226,5 +231,4 @@ static inline void tick_nohz_task_switch(struct task_struct *tsk) ...@@ -226,5 +231,4 @@ static inline void tick_nohz_task_switch(struct task_struct *tsk)
__tick_nohz_task_switch(tsk); __tick_nohz_task_switch(tsk);
} }
#endif #endif
...@@ -37,7 +37,9 @@ const char *pm_states[PM_SUSPEND_MAX]; ...@@ -37,7 +37,9 @@ const char *pm_states[PM_SUSPEND_MAX];
static const struct platform_suspend_ops *suspend_ops; static const struct platform_suspend_ops *suspend_ops;
static const struct platform_freeze_ops *freeze_ops; static const struct platform_freeze_ops *freeze_ops;
static DECLARE_WAIT_QUEUE_HEAD(suspend_freeze_wait_head); static DECLARE_WAIT_QUEUE_HEAD(suspend_freeze_wait_head);
static bool suspend_freeze_wake;
enum freeze_state __read_mostly suspend_freeze_state;
static DEFINE_SPINLOCK(suspend_freeze_lock);
void freeze_set_ops(const struct platform_freeze_ops *ops) void freeze_set_ops(const struct platform_freeze_ops *ops)
{ {
...@@ -48,22 +50,49 @@ void freeze_set_ops(const struct platform_freeze_ops *ops) ...@@ -48,22 +50,49 @@ void freeze_set_ops(const struct platform_freeze_ops *ops)
static void freeze_begin(void) static void freeze_begin(void)
{ {
suspend_freeze_wake = false; suspend_freeze_state = FREEZE_STATE_NONE;
} }
static void freeze_enter(void) static void freeze_enter(void)
{ {
cpuidle_use_deepest_state(true); spin_lock_irq(&suspend_freeze_lock);
if (pm_wakeup_pending())
goto out;
suspend_freeze_state = FREEZE_STATE_ENTER;
spin_unlock_irq(&suspend_freeze_lock);
get_online_cpus();
cpuidle_resume(); cpuidle_resume();
wait_event(suspend_freeze_wait_head, suspend_freeze_wake);
/* Push all the CPUs into the idle loop. */
wake_up_all_idle_cpus();
pr_debug("PM: suspend-to-idle\n");
/* Make the current CPU wait so it can enter the idle loop too. */
wait_event(suspend_freeze_wait_head,
suspend_freeze_state == FREEZE_STATE_WAKE);
pr_debug("PM: resume from suspend-to-idle\n");
cpuidle_pause(); cpuidle_pause();
cpuidle_use_deepest_state(false); put_online_cpus();
spin_lock_irq(&suspend_freeze_lock);
out:
suspend_freeze_state = FREEZE_STATE_NONE;
spin_unlock_irq(&suspend_freeze_lock);
} }
void freeze_wake(void) void freeze_wake(void)
{ {
suspend_freeze_wake = true; unsigned long flags;
spin_lock_irqsave(&suspend_freeze_lock, flags);
if (suspend_freeze_state > FREEZE_STATE_NONE) {
suspend_freeze_state = FREEZE_STATE_WAKE;
wake_up(&suspend_freeze_wait_head); wake_up(&suspend_freeze_wait_head);
}
spin_unlock_irqrestore(&suspend_freeze_lock, flags);
} }
EXPORT_SYMBOL_GPL(freeze_wake); EXPORT_SYMBOL_GPL(freeze_wake);
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#include <linux/tick.h> #include <linux/tick.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/stackprotector.h> #include <linux/stackprotector.h>
#include <linux/suspend.h>
#include <asm/tlb.h> #include <asm/tlb.h>
...@@ -104,6 +105,21 @@ static void cpuidle_idle_call(void) ...@@ -104,6 +105,21 @@ static void cpuidle_idle_call(void)
*/ */
rcu_idle_enter(); rcu_idle_enter();
/*
* Suspend-to-idle ("freeze") is a system state in which all user space
* has been frozen, all I/O devices have been suspended and the only
* activity happens here and in iterrupts (if any). In that case bypass
* the cpuidle governor and go stratight for the deepest idle state
* available. Possibly also suspend the local tick and the entire
* timekeeping to prevent timer interrupts from kicking us out of idle
* until a proper wakeup interrupt happens.
*/
if (idle_should_freeze()) {
cpuidle_enter_freeze();
local_irq_enable();
goto exit_idle;
}
/* /*
* Ask the cpuidle framework to choose a convenient idle state. * Ask the cpuidle framework to choose a convenient idle state.
* Fall back to the default arch idle method on errors. * Fall back to the default arch idle method on errors.
......
...@@ -394,6 +394,56 @@ void tick_resume(void) ...@@ -394,6 +394,56 @@ void tick_resume(void)
} }
} }
static DEFINE_RAW_SPINLOCK(tick_freeze_lock);
static unsigned int tick_freeze_depth;
/**
* tick_freeze - Suspend the local tick and (possibly) timekeeping.
*
* Check if this is the last online CPU executing the function and if so,
* suspend timekeeping. Otherwise suspend the local tick.
*
* Call with interrupts disabled. Must be balanced with %tick_unfreeze().
* Interrupts must not be enabled before the subsequent %tick_unfreeze().
*/
void tick_freeze(void)
{
raw_spin_lock(&tick_freeze_lock);
tick_freeze_depth++;
if (tick_freeze_depth == num_online_cpus()) {
timekeeping_suspend();
} else {
tick_suspend();
tick_suspend_broadcast();
}
raw_spin_unlock(&tick_freeze_lock);
}
/**
* tick_unfreeze - Resume the local tick and (possibly) timekeeping.
*
* Check if this is the first CPU executing the function and if so, resume
* timekeeping. Otherwise resume the local tick.
*
* Call with interrupts disabled. Must be balanced with %tick_freeze().
* Interrupts must not be enabled after the preceding %tick_freeze().
*/
void tick_unfreeze(void)
{
raw_spin_lock(&tick_freeze_lock);
if (tick_freeze_depth == num_online_cpus())
timekeeping_resume();
else
tick_resume();
tick_freeze_depth--;
raw_spin_unlock(&tick_freeze_lock);
}
/** /**
* tick_init - initialize the tick control * tick_init - initialize the tick control
*/ */
......
...@@ -230,9 +230,7 @@ static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk) ...@@ -230,9 +230,7 @@ static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk)
/** /**
* update_fast_timekeeper - Update the fast and NMI safe monotonic timekeeper. * update_fast_timekeeper - Update the fast and NMI safe monotonic timekeeper.
* @tk: The timekeeper from which we take the update * @tkr: Timekeeping readout base from which we take the update
* @tkf: The fast timekeeper to update
* @tbase: The time base for the fast timekeeper (mono/raw)
* *
* We want to use this from any context including NMI and tracing / * We want to use this from any context including NMI and tracing /
* instrumenting the timekeeping code itself. * instrumenting the timekeeping code itself.
...@@ -244,11 +242,11 @@ static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk) ...@@ -244,11 +242,11 @@ static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk)
* smp_wmb(); <- Ensure that the last base[1] update is visible * smp_wmb(); <- Ensure that the last base[1] update is visible
* tkf->seq++; * tkf->seq++;
* smp_wmb(); <- Ensure that the seqcount update is visible * smp_wmb(); <- Ensure that the seqcount update is visible
* update(tkf->base[0], tk); * update(tkf->base[0], tkr);
* smp_wmb(); <- Ensure that the base[0] update is visible * smp_wmb(); <- Ensure that the base[0] update is visible
* tkf->seq++; * tkf->seq++;
* smp_wmb(); <- Ensure that the seqcount update is visible * smp_wmb(); <- Ensure that the seqcount update is visible
* update(tkf->base[1], tk); * update(tkf->base[1], tkr);
* *
* The reader side does: * The reader side does:
* *
...@@ -269,7 +267,7 @@ static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk) ...@@ -269,7 +267,7 @@ static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk)
* slightly wrong timestamp (a few nanoseconds). See * slightly wrong timestamp (a few nanoseconds). See
* @ktime_get_mono_fast_ns. * @ktime_get_mono_fast_ns.
*/ */
static void update_fast_timekeeper(struct timekeeper *tk) static void update_fast_timekeeper(struct tk_read_base *tkr)
{ {
struct tk_read_base *base = tk_fast_mono.base; struct tk_read_base *base = tk_fast_mono.base;
...@@ -277,7 +275,7 @@ static void update_fast_timekeeper(struct timekeeper *tk) ...@@ -277,7 +275,7 @@ static void update_fast_timekeeper(struct timekeeper *tk)
raw_write_seqcount_latch(&tk_fast_mono.seq); raw_write_seqcount_latch(&tk_fast_mono.seq);
/* Update base[0] */ /* Update base[0] */
memcpy(base, &tk->tkr, sizeof(*base)); memcpy(base, tkr, sizeof(*base));
/* Force readers back to base[0] */ /* Force readers back to base[0] */
raw_write_seqcount_latch(&tk_fast_mono.seq); raw_write_seqcount_latch(&tk_fast_mono.seq);
...@@ -334,6 +332,35 @@ u64 notrace ktime_get_mono_fast_ns(void) ...@@ -334,6 +332,35 @@ u64 notrace ktime_get_mono_fast_ns(void)
} }
EXPORT_SYMBOL_GPL(ktime_get_mono_fast_ns); EXPORT_SYMBOL_GPL(ktime_get_mono_fast_ns);
/* Suspend-time cycles value for halted fast timekeeper. */
static cycle_t cycles_at_suspend;
static cycle_t dummy_clock_read(struct clocksource *cs)
{
return cycles_at_suspend;
}
/**
* halt_fast_timekeeper - Prevent fast timekeeper from accessing clocksource.
* @tk: Timekeeper to snapshot.
*
* It generally is unsafe to access the clocksource after timekeeping has been
* suspended, so take a snapshot of the readout base of @tk and use it as the
* fast timekeeper's readout base while suspended. It will return the same
* number of cycles every time until timekeeping is resumed at which time the
* proper readout base for the fast timekeeper will be restored automatically.
*/
static void halt_fast_timekeeper(struct timekeeper *tk)
{
static struct tk_read_base tkr_dummy;
struct tk_read_base *tkr = &tk->tkr;
memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
cycles_at_suspend = tkr->read(tkr->clock);
tkr_dummy.read = dummy_clock_read;
update_fast_timekeeper(&tkr_dummy);
}
#ifdef CONFIG_GENERIC_TIME_VSYSCALL_OLD #ifdef CONFIG_GENERIC_TIME_VSYSCALL_OLD
static inline void update_vsyscall(struct timekeeper *tk) static inline void update_vsyscall(struct timekeeper *tk)
...@@ -462,7 +489,7 @@ static void timekeeping_update(struct timekeeper *tk, unsigned int action) ...@@ -462,7 +489,7 @@ static void timekeeping_update(struct timekeeper *tk, unsigned int action)
memcpy(&shadow_timekeeper, &tk_core.timekeeper, memcpy(&shadow_timekeeper, &tk_core.timekeeper,
sizeof(tk_core.timekeeper)); sizeof(tk_core.timekeeper));
update_fast_timekeeper(tk); update_fast_timekeeper(&tk->tkr);
} }
/** /**
...@@ -1170,7 +1197,7 @@ void timekeeping_inject_sleeptime64(struct timespec64 *delta) ...@@ -1170,7 +1197,7 @@ void timekeeping_inject_sleeptime64(struct timespec64 *delta)
* xtime/wall_to_monotonic/jiffies/etc are * xtime/wall_to_monotonic/jiffies/etc are
* still managed by arch specific suspend/resume code. * still managed by arch specific suspend/resume code.
*/ */
static void timekeeping_resume(void) void timekeeping_resume(void)
{ {
struct timekeeper *tk = &tk_core.timekeeper; struct timekeeper *tk = &tk_core.timekeeper;
struct clocksource *clock = tk->tkr.clock; struct clocksource *clock = tk->tkr.clock;
...@@ -1251,7 +1278,7 @@ static void timekeeping_resume(void) ...@@ -1251,7 +1278,7 @@ static void timekeeping_resume(void)
hrtimers_resume(); hrtimers_resume();
} }
static int timekeeping_suspend(void) int timekeeping_suspend(void)
{ {
struct timekeeper *tk = &tk_core.timekeeper; struct timekeeper *tk = &tk_core.timekeeper;
unsigned long flags; unsigned long flags;
...@@ -1296,6 +1323,7 @@ static int timekeeping_suspend(void) ...@@ -1296,6 +1323,7 @@ static int timekeeping_suspend(void)
} }
timekeeping_update(tk, TK_MIRROR); timekeeping_update(tk, TK_MIRROR);
halt_fast_timekeeper(tk);
write_seqcount_end(&tk_core.seq); write_seqcount_end(&tk_core.seq);
raw_spin_unlock_irqrestore(&timekeeper_lock, flags); raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
......
...@@ -16,5 +16,7 @@ extern int timekeeping_inject_offset(struct timespec *ts); ...@@ -16,5 +16,7 @@ extern int timekeeping_inject_offset(struct timespec *ts);
extern s32 timekeeping_get_tai_offset(void); extern s32 timekeeping_get_tai_offset(void);
extern void timekeeping_set_tai_offset(s32 tai_offset); extern void timekeeping_set_tai_offset(s32 tai_offset);
extern void timekeeping_clocktai(struct timespec *ts); extern void timekeeping_clocktai(struct timespec *ts);
extern int timekeeping_suspend(void);
extern void timekeeping_resume(void);
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment