Commit a46d3f9b authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull timer updates from Thomas Gleixner:
 "The timer departement presents:

   - A rather large rework of the hrtimer infrastructure which
     introduces softirq based hrtimers to replace the spread of
     hrtimer/tasklet combos which force the actual callback execution
     into softirq context. The approach is completely different from the
     initial implementation which you cursed at 10 years ago rightfully.

     The softirq based timers have their own queues and there is no
     nasty indirection and list reshuffling in the hard interrupt
     anymore. This comes with conversion of some of the hrtimer/tasklet
     users, the rest and the final removal of that horrible interface
     will come towards the end of the merge window or go through the
     relevant maintainer trees.

     Note: The top commit merged the last minute bugfix for the 10 years
     old CPU hotplug bug as I wanted to make sure that I fatfinger the
     merge conflict resolution myself.

   - The overhaul of the STM32 clocksource/clockevents driver

   - A new driver for the Spreadtrum SC9860 timer

   - A new driver dor the Actions Semi S700 timer

   - The usual set of fixes and updates all over the place"

* 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (53 commits)
  usb/gadget/NCM: Replace tasklet with softirq hrtimer
  ALSA/dummy: Replace tasklet with softirq hrtimer
  hrtimer: Implement SOFT/HARD clock base selection
  hrtimer: Implement support for softirq based hrtimers
  hrtimer: Prepare handling of hard and softirq based hrtimers
  hrtimer: Add clock bases and hrtimer mode for softirq context
  hrtimer: Use irqsave/irqrestore around __run_hrtimer()
  hrtimer: Factor out __hrtimer_next_event_base()
  hrtimer: Factor out __hrtimer_start_range_ns()
  hrtimer: Remove the 'base' parameter from hrtimer_reprogram()
  hrtimer: Make remote enqueue decision less restrictive
  hrtimer: Unify remote enqueue handling
  hrtimer: Unify hrtimer removal handling
  hrtimer: Make hrtimer_force_reprogramm() unconditionally available
  hrtimer: Make hrtimer_reprogramm() unconditional
  hrtimer: Make hrtimer_cpu_base.next_timer handling unconditional
  hrtimer: Make the remote enqueue check unconditional
  hrtimer: Use accesor functions instead of direct access
  hrtimer: Make the hrtimer_cpu_base::hres_active field unconditional, to simplify the code
  hrtimer: Make room in 'struct hrtimer_cpu_base'
  ...
parents 7bcd3425 303c146d
...@@ -2,6 +2,7 @@ Actions Semi Owl Timer ...@@ -2,6 +2,7 @@ Actions Semi Owl Timer
Required properties: Required properties:
- compatible : "actions,s500-timer" for S500 - compatible : "actions,s500-timer" for S500
"actions,s700-timer" for S700
"actions,s900-timer" for S900 "actions,s900-timer" for S900
- reg : Offset and length of the register set for the device. - reg : Offset and length of the register set for the device.
- interrupts : Should contain the interrupts. - interrupts : Should contain the interrupts.
......
Spreadtrum timers
The Spreadtrum SC9860 platform provides 3 general-purpose timers.
These timers can support 32bit or 64bit counter, as well as supporting
period mode or one-shot mode, and they are can be wakeup source
during deep sleep.
Required properties:
- compatible: should be "sprd,sc9860-timer" for SC9860 platform.
- reg: The register address of the timer device.
- interrupts: Should contain the interrupt for the timer device.
- clocks: The phandle to the source clock (usually a 32.768 KHz fixed clock).
Example:
timer@40050000 {
compatible = "sprd,sc9860-timer";
reg = <0 0x40050000 0 0x20>;
interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&ext_32k>;
};
...@@ -269,6 +269,7 @@ config CLKSRC_STM32 ...@@ -269,6 +269,7 @@ config CLKSRC_STM32
bool "Clocksource for STM32 SoCs" if !ARCH_STM32 bool "Clocksource for STM32 SoCs" if !ARCH_STM32
depends on OF && ARM && (ARCH_STM32 || COMPILE_TEST) depends on OF && ARM && (ARCH_STM32 || COMPILE_TEST)
select CLKSRC_MMIO select CLKSRC_MMIO
select TIMER_OF
config CLKSRC_MPS2 config CLKSRC_MPS2
bool "Clocksource for MPS2 SoCs" if COMPILE_TEST bool "Clocksource for MPS2 SoCs" if COMPILE_TEST
...@@ -441,6 +442,13 @@ config MTK_TIMER ...@@ -441,6 +442,13 @@ config MTK_TIMER
help help
Support for Mediatek timer driver. Support for Mediatek timer driver.
config SPRD_TIMER
bool "Spreadtrum timer driver" if COMPILE_TEST
depends on HAS_IOMEM
select TIMER_OF
help
Enables support for the Spreadtrum timer driver.
config SYS_SUPPORTS_SH_MTU2 config SYS_SUPPORTS_SH_MTU2
bool bool
......
...@@ -54,6 +54,7 @@ obj-$(CONFIG_CLKSRC_TI_32K) += timer-ti-32k.o ...@@ -54,6 +54,7 @@ obj-$(CONFIG_CLKSRC_TI_32K) += timer-ti-32k.o
obj-$(CONFIG_CLKSRC_NPS) += timer-nps.o obj-$(CONFIG_CLKSRC_NPS) += timer-nps.o
obj-$(CONFIG_OXNAS_RPS_TIMER) += timer-oxnas-rps.o obj-$(CONFIG_OXNAS_RPS_TIMER) += timer-oxnas-rps.o
obj-$(CONFIG_OWL_TIMER) += owl-timer.o obj-$(CONFIG_OWL_TIMER) += owl-timer.o
obj-$(CONFIG_SPRD_TIMER) += timer-sprd.o
obj-$(CONFIG_ARC_TIMERS) += arc_timer.o obj-$(CONFIG_ARC_TIMERS) += arc_timer.o
obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o
......
...@@ -168,5 +168,6 @@ static int __init owl_timer_init(struct device_node *node) ...@@ -168,5 +168,6 @@ static int __init owl_timer_init(struct device_node *node)
return 0; return 0;
} }
CLOCKSOURCE_OF_DECLARE(owl_s500, "actions,s500-timer", owl_timer_init); TIMER_OF_DECLARE(owl_s500, "actions,s500-timer", owl_timer_init);
CLOCKSOURCE_OF_DECLARE(owl_s900, "actions,s900-timer", owl_timer_init); TIMER_OF_DECLARE(owl_s700, "actions,s700-timer", owl_timer_init);
TIMER_OF_DECLARE(owl_s900, "actions,s900-timer", owl_timer_init);
...@@ -384,7 +384,7 @@ static int __init tcb_clksrc_init(void) ...@@ -384,7 +384,7 @@ static int __init tcb_clksrc_init(void)
printk(bootinfo, clksrc.name, CONFIG_ATMEL_TCB_CLKSRC_BLOCK, printk(bootinfo, clksrc.name, CONFIG_ATMEL_TCB_CLKSRC_BLOCK,
divided_rate / 1000000, divided_rate / 1000000,
((divided_rate + 500000) % 1000000) / 1000); ((divided_rate % 1000000) + 500) / 1000);
if (tc->tcb_config && tc->tcb_config->counter_width == 32) { if (tc->tcb_config && tc->tcb_config->counter_width == 32) {
/* use apropriate function to read 32 bit counter */ /* use apropriate function to read 32 bit counter */
......
...@@ -24,7 +24,13 @@ ...@@ -24,7 +24,13 @@
#include "timer-of.h" #include "timer-of.h"
static __init void timer_irq_exit(struct of_timer_irq *of_irq) /**
* timer_of_irq_exit - Release the interrupt
* @of_irq: an of_timer_irq structure pointer
*
* Free the irq resource
*/
static __init void timer_of_irq_exit(struct of_timer_irq *of_irq)
{ {
struct timer_of *to = container_of(of_irq, struct timer_of, of_irq); struct timer_of *to = container_of(of_irq, struct timer_of, of_irq);
...@@ -34,7 +40,23 @@ static __init void timer_irq_exit(struct of_timer_irq *of_irq) ...@@ -34,7 +40,23 @@ static __init void timer_irq_exit(struct of_timer_irq *of_irq)
free_irq(of_irq->irq, clkevt); free_irq(of_irq->irq, clkevt);
} }
static __init int timer_irq_init(struct device_node *np, /**
* timer_of_irq_init - Request the interrupt
* @np: a device tree node pointer
* @of_irq: an of_timer_irq structure pointer
*
* Get the interrupt number from the DT from its definition and
* request it. The interrupt is gotten by falling back the following way:
*
* - Get interrupt number by name
* - Get interrupt number by index
*
* When the interrupt is per CPU, 'request_percpu_irq()' is called,
* otherwise 'request_irq()' is used.
*
* Returns 0 on success, < 0 otherwise
*/
static __init int timer_of_irq_init(struct device_node *np,
struct of_timer_irq *of_irq) struct of_timer_irq *of_irq)
{ {
int ret; int ret;
...@@ -72,14 +94,29 @@ static __init int timer_irq_init(struct device_node *np, ...@@ -72,14 +94,29 @@ static __init int timer_irq_init(struct device_node *np,
return 0; return 0;
} }
static __init void timer_clk_exit(struct of_timer_clk *of_clk) /**
* timer_of_clk_exit - Release the clock resources
* @of_clk: a of_timer_clk structure pointer
*
* Disables and releases the refcount on the clk
*/
static __init void timer_of_clk_exit(struct of_timer_clk *of_clk)
{ {
of_clk->rate = 0; of_clk->rate = 0;
clk_disable_unprepare(of_clk->clk); clk_disable_unprepare(of_clk->clk);
clk_put(of_clk->clk); clk_put(of_clk->clk);
} }
static __init int timer_clk_init(struct device_node *np, /**
* timer_of_clk_init - Initialize the clock resources
* @np: a device tree node pointer
* @of_clk: a of_timer_clk structure pointer
*
* Get the clock by name or by index, enable it and get the rate
*
* Returns 0 on success, < 0 otherwise
*/
static __init int timer_of_clk_init(struct device_node *np,
struct of_timer_clk *of_clk) struct of_timer_clk *of_clk)
{ {
int ret; int ret;
...@@ -116,19 +153,19 @@ static __init int timer_clk_init(struct device_node *np, ...@@ -116,19 +153,19 @@ static __init int timer_clk_init(struct device_node *np,
goto out; goto out;
} }
static __init void timer_base_exit(struct of_timer_base *of_base) static __init void timer_of_base_exit(struct of_timer_base *of_base)
{ {
iounmap(of_base->base); iounmap(of_base->base);
} }
static __init int timer_base_init(struct device_node *np, static __init int timer_of_base_init(struct device_node *np,
struct of_timer_base *of_base) struct of_timer_base *of_base)
{ {
const char *name = of_base->name ? of_base->name : np->full_name; of_base->base = of_base->name ?
of_io_request_and_map(np, of_base->index, of_base->name) :
of_base->base = of_io_request_and_map(np, of_base->index, name); of_iomap(np, of_base->index);
if (IS_ERR(of_base->base)) { if (IS_ERR(of_base->base)) {
pr_err("Failed to iomap (%s)\n", name); pr_err("Failed to iomap (%s)\n", of_base->name);
return PTR_ERR(of_base->base); return PTR_ERR(of_base->base);
} }
...@@ -141,21 +178,21 @@ int __init timer_of_init(struct device_node *np, struct timer_of *to) ...@@ -141,21 +178,21 @@ int __init timer_of_init(struct device_node *np, struct timer_of *to)
int flags = 0; int flags = 0;
if (to->flags & TIMER_OF_BASE) { if (to->flags & TIMER_OF_BASE) {
ret = timer_base_init(np, &to->of_base); ret = timer_of_base_init(np, &to->of_base);
if (ret) if (ret)
goto out_fail; goto out_fail;
flags |= TIMER_OF_BASE; flags |= TIMER_OF_BASE;
} }
if (to->flags & TIMER_OF_CLOCK) { if (to->flags & TIMER_OF_CLOCK) {
ret = timer_clk_init(np, &to->of_clk); ret = timer_of_clk_init(np, &to->of_clk);
if (ret) if (ret)
goto out_fail; goto out_fail;
flags |= TIMER_OF_CLOCK; flags |= TIMER_OF_CLOCK;
} }
if (to->flags & TIMER_OF_IRQ) { if (to->flags & TIMER_OF_IRQ) {
ret = timer_irq_init(np, &to->of_irq); ret = timer_of_irq_init(np, &to->of_irq);
if (ret) if (ret)
goto out_fail; goto out_fail;
flags |= TIMER_OF_IRQ; flags |= TIMER_OF_IRQ;
...@@ -163,17 +200,20 @@ int __init timer_of_init(struct device_node *np, struct timer_of *to) ...@@ -163,17 +200,20 @@ int __init timer_of_init(struct device_node *np, struct timer_of *to)
if (!to->clkevt.name) if (!to->clkevt.name)
to->clkevt.name = np->name; to->clkevt.name = np->name;
to->np = np;
return ret; return ret;
out_fail: out_fail:
if (flags & TIMER_OF_IRQ) if (flags & TIMER_OF_IRQ)
timer_irq_exit(&to->of_irq); timer_of_irq_exit(&to->of_irq);
if (flags & TIMER_OF_CLOCK) if (flags & TIMER_OF_CLOCK)
timer_clk_exit(&to->of_clk); timer_of_clk_exit(&to->of_clk);
if (flags & TIMER_OF_BASE) if (flags & TIMER_OF_BASE)
timer_base_exit(&to->of_base); timer_of_base_exit(&to->of_base);
return ret; return ret;
} }
...@@ -187,11 +227,11 @@ int __init timer_of_init(struct device_node *np, struct timer_of *to) ...@@ -187,11 +227,11 @@ int __init timer_of_init(struct device_node *np, struct timer_of *to)
void __init timer_of_cleanup(struct timer_of *to) void __init timer_of_cleanup(struct timer_of *to)
{ {
if (to->flags & TIMER_OF_IRQ) if (to->flags & TIMER_OF_IRQ)
timer_irq_exit(&to->of_irq); timer_of_irq_exit(&to->of_irq);
if (to->flags & TIMER_OF_CLOCK) if (to->flags & TIMER_OF_CLOCK)
timer_clk_exit(&to->of_clk); timer_of_clk_exit(&to->of_clk);
if (to->flags & TIMER_OF_BASE) if (to->flags & TIMER_OF_BASE)
timer_base_exit(&to->of_base); timer_of_base_exit(&to->of_base);
} }
...@@ -33,6 +33,7 @@ struct of_timer_clk { ...@@ -33,6 +33,7 @@ struct of_timer_clk {
struct timer_of { struct timer_of {
unsigned int flags; unsigned int flags;
struct device_node *np;
struct clock_event_device clkevt; struct clock_event_device clkevt;
struct of_timer_base of_base; struct of_timer_base of_base;
struct of_timer_irq of_irq; struct of_timer_irq of_irq;
......
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2017 Spreadtrum Communications Inc.
*/
#include <linux/init.h>
#include <linux/interrupt.h>
#include "timer-of.h"
#define TIMER_NAME "sprd_timer"
#define TIMER_LOAD_LO 0x0
#define TIMER_LOAD_HI 0x4
#define TIMER_VALUE_LO 0x8
#define TIMER_VALUE_HI 0xc
#define TIMER_CTL 0x10
#define TIMER_CTL_PERIOD_MODE BIT(0)
#define TIMER_CTL_ENABLE BIT(1)
#define TIMER_CTL_64BIT_WIDTH BIT(16)
#define TIMER_INT 0x14
#define TIMER_INT_EN BIT(0)
#define TIMER_INT_RAW_STS BIT(1)
#define TIMER_INT_MASK_STS BIT(2)
#define TIMER_INT_CLR BIT(3)
#define TIMER_VALUE_SHDW_LO 0x18
#define TIMER_VALUE_SHDW_HI 0x1c
#define TIMER_VALUE_LO_MASK GENMASK(31, 0)
static void sprd_timer_enable(void __iomem *base, u32 flag)
{
u32 val = readl_relaxed(base + TIMER_CTL);
val |= TIMER_CTL_ENABLE;
if (flag & TIMER_CTL_64BIT_WIDTH)
val |= TIMER_CTL_64BIT_WIDTH;
else
val &= ~TIMER_CTL_64BIT_WIDTH;
if (flag & TIMER_CTL_PERIOD_MODE)
val |= TIMER_CTL_PERIOD_MODE;
else
val &= ~TIMER_CTL_PERIOD_MODE;
writel_relaxed(val, base + TIMER_CTL);
}
static void sprd_timer_disable(void __iomem *base)
{
u32 val = readl_relaxed(base + TIMER_CTL);
val &= ~TIMER_CTL_ENABLE;
writel_relaxed(val, base + TIMER_CTL);
}
static void sprd_timer_update_counter(void __iomem *base, unsigned long cycles)
{
writel_relaxed(cycles & TIMER_VALUE_LO_MASK, base + TIMER_LOAD_LO);
writel_relaxed(0, base + TIMER_LOAD_HI);
}
static void sprd_timer_enable_interrupt(void __iomem *base)
{
writel_relaxed(TIMER_INT_EN, base + TIMER_INT);
}
static void sprd_timer_clear_interrupt(void __iomem *base)
{
u32 val = readl_relaxed(base + TIMER_INT);
val |= TIMER_INT_CLR;
writel_relaxed(val, base + TIMER_INT);
}
static int sprd_timer_set_next_event(unsigned long cycles,
struct clock_event_device *ce)
{
struct timer_of *to = to_timer_of(ce);
sprd_timer_disable(timer_of_base(to));
sprd_timer_update_counter(timer_of_base(to), cycles);
sprd_timer_enable(timer_of_base(to), 0);
return 0;
}
static int sprd_timer_set_periodic(struct clock_event_device *ce)
{
struct timer_of *to = to_timer_of(ce);
sprd_timer_disable(timer_of_base(to));
sprd_timer_update_counter(timer_of_base(to), timer_of_period(to));
sprd_timer_enable(timer_of_base(to), TIMER_CTL_PERIOD_MODE);
return 0;
}
static int sprd_timer_shutdown(struct clock_event_device *ce)
{
struct timer_of *to = to_timer_of(ce);
sprd_timer_disable(timer_of_base(to));
return 0;
}
static irqreturn_t sprd_timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *ce = (struct clock_event_device *)dev_id;
struct timer_of *to = to_timer_of(ce);
sprd_timer_clear_interrupt(timer_of_base(to));
if (clockevent_state_oneshot(ce))
sprd_timer_disable(timer_of_base(to));
ce->event_handler(ce);
return IRQ_HANDLED;
}
static struct timer_of to = {
.flags = TIMER_OF_IRQ | TIMER_OF_BASE | TIMER_OF_CLOCK,
.clkevt = {
.name = TIMER_NAME,
.rating = 300,
.features = CLOCK_EVT_FEAT_DYNIRQ | CLOCK_EVT_FEAT_PERIODIC |
CLOCK_EVT_FEAT_ONESHOT,
.set_state_shutdown = sprd_timer_shutdown,
.set_state_periodic = sprd_timer_set_periodic,
.set_next_event = sprd_timer_set_next_event,
.cpumask = cpu_possible_mask,
},
.of_irq = {
.handler = sprd_timer_interrupt,
.flags = IRQF_TIMER | IRQF_IRQPOLL,
},
};
static int __init sprd_timer_init(struct device_node *np)
{
int ret;
ret = timer_of_init(np, &to);
if (ret)
return ret;
sprd_timer_enable_interrupt(timer_of_base(&to));
clockevents_config_and_register(&to.clkevt, timer_of_rate(&to),
1, UINT_MAX);
return 0;
}
TIMER_OF_DECLARE(sc9860_timer, "sprd,sc9860-timer", sprd_timer_init);
This diff is collapsed.
...@@ -73,9 +73,7 @@ struct f_ncm { ...@@ -73,9 +73,7 @@ struct f_ncm {
struct sk_buff *skb_tx_ndp; struct sk_buff *skb_tx_ndp;
u16 ndp_dgram_count; u16 ndp_dgram_count;
bool timer_force_tx; bool timer_force_tx;
struct tasklet_struct tx_tasklet;
struct hrtimer task_timer; struct hrtimer task_timer;
bool timer_stopping; bool timer_stopping;
}; };
...@@ -1104,7 +1102,7 @@ static struct sk_buff *ncm_wrap_ntb(struct gether *port, ...@@ -1104,7 +1102,7 @@ static struct sk_buff *ncm_wrap_ntb(struct gether *port,
/* Delay the timer. */ /* Delay the timer. */
hrtimer_start(&ncm->task_timer, TX_TIMEOUT_NSECS, hrtimer_start(&ncm->task_timer, TX_TIMEOUT_NSECS,
HRTIMER_MODE_REL); HRTIMER_MODE_REL_SOFT);
/* Add the datagram position entries */ /* Add the datagram position entries */
ntb_ndp = skb_put_zero(ncm->skb_tx_ndp, dgram_idx_len); ntb_ndp = skb_put_zero(ncm->skb_tx_ndp, dgram_idx_len);
...@@ -1148,17 +1146,15 @@ static struct sk_buff *ncm_wrap_ntb(struct gether *port, ...@@ -1148,17 +1146,15 @@ static struct sk_buff *ncm_wrap_ntb(struct gether *port,
} }
/* /*
* This transmits the NTB if there are frames waiting. * The transmit should only be run if no skb data has been sent
* for a certain duration.
*/ */
static void ncm_tx_tasklet(unsigned long data) static enum hrtimer_restart ncm_tx_timeout(struct hrtimer *data)
{ {
struct f_ncm *ncm = (void *)data; struct f_ncm *ncm = container_of(data, struct f_ncm, task_timer);
if (ncm->timer_stopping)
return;
/* Only send if data is available. */ /* Only send if data is available. */
if (ncm->skb_tx_data) { if (!ncm->timer_stopping && ncm->skb_tx_data) {
ncm->timer_force_tx = true; ncm->timer_force_tx = true;
/* XXX This allowance of a NULL skb argument to ndo_start_xmit /* XXX This allowance of a NULL skb argument to ndo_start_xmit
...@@ -1171,16 +1167,6 @@ static void ncm_tx_tasklet(unsigned long data) ...@@ -1171,16 +1167,6 @@ static void ncm_tx_tasklet(unsigned long data)
ncm->timer_force_tx = false; ncm->timer_force_tx = false;
} }
}
/*
* The transmit should only be run if no skb data has been sent
* for a certain duration.
*/
static enum hrtimer_restart ncm_tx_timeout(struct hrtimer *data)
{
struct f_ncm *ncm = container_of(data, struct f_ncm, task_timer);
tasklet_schedule(&ncm->tx_tasklet);
return HRTIMER_NORESTART; return HRTIMER_NORESTART;
} }
...@@ -1513,8 +1499,7 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f) ...@@ -1513,8 +1499,7 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
ncm->port.open = ncm_open; ncm->port.open = ncm_open;
ncm->port.close = ncm_close; ncm->port.close = ncm_close;
tasklet_init(&ncm->tx_tasklet, ncm_tx_tasklet, (unsigned long) ncm); hrtimer_init(&ncm->task_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT);
hrtimer_init(&ncm->task_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
ncm->task_timer.function = ncm_tx_timeout; ncm->task_timer.function = ncm_tx_timeout;
DBG(cdev, "CDC Network: %s speed IN/%s OUT/%s NOTIFY/%s\n", DBG(cdev, "CDC Network: %s speed IN/%s OUT/%s NOTIFY/%s\n",
...@@ -1623,7 +1608,6 @@ static void ncm_unbind(struct usb_configuration *c, struct usb_function *f) ...@@ -1623,7 +1608,6 @@ static void ncm_unbind(struct usb_configuration *c, struct usb_function *f)
DBG(c->cdev, "ncm unbind\n"); DBG(c->cdev, "ncm unbind\n");
hrtimer_cancel(&ncm->task_timer); hrtimer_cancel(&ncm->task_timer);
tasklet_kill(&ncm->tx_tasklet);
ncm_string_defs[0].id = 0; ncm_string_defs[0].id = 0;
usb_free_all_descriptors(f); usb_free_all_descriptors(f);
......
...@@ -28,13 +28,29 @@ struct hrtimer_cpu_base; ...@@ -28,13 +28,29 @@ struct hrtimer_cpu_base;
/* /*
* Mode arguments of xxx_hrtimer functions: * Mode arguments of xxx_hrtimer functions:
*
* HRTIMER_MODE_ABS - Time value is absolute
* HRTIMER_MODE_REL - Time value is relative to now
* HRTIMER_MODE_PINNED - Timer is bound to CPU (is only considered
* when starting the timer)
* HRTIMER_MODE_SOFT - Timer callback function will be executed in
* soft irq context
*/ */
enum hrtimer_mode { enum hrtimer_mode {
HRTIMER_MODE_ABS = 0x0, /* Time value is absolute */ HRTIMER_MODE_ABS = 0x00,
HRTIMER_MODE_REL = 0x1, /* Time value is relative to now */ HRTIMER_MODE_REL = 0x01,
HRTIMER_MODE_PINNED = 0x02, /* Timer is bound to CPU */ HRTIMER_MODE_PINNED = 0x02,
HRTIMER_MODE_ABS_PINNED = 0x02, HRTIMER_MODE_SOFT = 0x04,
HRTIMER_MODE_REL_PINNED = 0x03,
HRTIMER_MODE_ABS_PINNED = HRTIMER_MODE_ABS | HRTIMER_MODE_PINNED,
HRTIMER_MODE_REL_PINNED = HRTIMER_MODE_REL | HRTIMER_MODE_PINNED,
HRTIMER_MODE_ABS_SOFT = HRTIMER_MODE_ABS | HRTIMER_MODE_SOFT,
HRTIMER_MODE_REL_SOFT = HRTIMER_MODE_REL | HRTIMER_MODE_SOFT,
HRTIMER_MODE_ABS_PINNED_SOFT = HRTIMER_MODE_ABS_PINNED | HRTIMER_MODE_SOFT,
HRTIMER_MODE_REL_PINNED_SOFT = HRTIMER_MODE_REL_PINNED | HRTIMER_MODE_SOFT,
}; };
/* /*
...@@ -87,6 +103,7 @@ enum hrtimer_restart { ...@@ -87,6 +103,7 @@ enum hrtimer_restart {
* @base: pointer to the timer base (per cpu and per clock) * @base: pointer to the timer base (per cpu and per clock)
* @state: state information (See bit values above) * @state: state information (See bit values above)
* @is_rel: Set if the timer was armed relative * @is_rel: Set if the timer was armed relative
* @is_soft: Set if hrtimer will be expired in soft interrupt context.
* *
* The hrtimer structure must be initialized by hrtimer_init() * The hrtimer structure must be initialized by hrtimer_init()
*/ */
...@@ -97,6 +114,7 @@ struct hrtimer { ...@@ -97,6 +114,7 @@ struct hrtimer {
struct hrtimer_clock_base *base; struct hrtimer_clock_base *base;
u8 state; u8 state;
u8 is_rel; u8 is_rel;
u8 is_soft;
}; };
/** /**
...@@ -112,9 +130,9 @@ struct hrtimer_sleeper { ...@@ -112,9 +130,9 @@ struct hrtimer_sleeper {
}; };
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
# define HRTIMER_CLOCK_BASE_ALIGN 64 # define __hrtimer_clock_base_align ____cacheline_aligned
#else #else
# define HRTIMER_CLOCK_BASE_ALIGN 32 # define __hrtimer_clock_base_align
#endif #endif
/** /**
...@@ -123,48 +141,57 @@ struct hrtimer_sleeper { ...@@ -123,48 +141,57 @@ struct hrtimer_sleeper {
* @index: clock type index for per_cpu support when moving a * @index: clock type index for per_cpu support when moving a
* timer to a base on another cpu. * timer to a base on another cpu.
* @clockid: clock id for per_cpu support * @clockid: clock id for per_cpu support
* @seq: seqcount around __run_hrtimer
* @running: pointer to the currently running hrtimer
* @active: red black tree root node for the active timers * @active: red black tree root node for the active timers
* @get_time: function to retrieve the current time of the clock * @get_time: function to retrieve the current time of the clock
* @offset: offset of this clock to the monotonic base * @offset: offset of this clock to the monotonic base
*/ */
struct hrtimer_clock_base { struct hrtimer_clock_base {
struct hrtimer_cpu_base *cpu_base; struct hrtimer_cpu_base *cpu_base;
int index; unsigned int index;
clockid_t clockid; clockid_t clockid;
seqcount_t seq;
struct hrtimer *running;
struct timerqueue_head active; struct timerqueue_head active;
ktime_t (*get_time)(void); ktime_t (*get_time)(void);
ktime_t offset; ktime_t offset;
} __attribute__((__aligned__(HRTIMER_CLOCK_BASE_ALIGN))); } __hrtimer_clock_base_align;
enum hrtimer_base_type { enum hrtimer_base_type {
HRTIMER_BASE_MONOTONIC, HRTIMER_BASE_MONOTONIC,
HRTIMER_BASE_REALTIME, HRTIMER_BASE_REALTIME,
HRTIMER_BASE_BOOTTIME, HRTIMER_BASE_BOOTTIME,
HRTIMER_BASE_TAI, HRTIMER_BASE_TAI,
HRTIMER_BASE_MONOTONIC_SOFT,
HRTIMER_BASE_REALTIME_SOFT,
HRTIMER_BASE_BOOTTIME_SOFT,
HRTIMER_BASE_TAI_SOFT,
HRTIMER_MAX_CLOCK_BASES, HRTIMER_MAX_CLOCK_BASES,
}; };
/* /**
* struct hrtimer_cpu_base - the per cpu clock bases * struct hrtimer_cpu_base - the per cpu clock bases
* @lock: lock protecting the base and associated clock bases * @lock: lock protecting the base and associated clock bases
* and timers * and timers
* @seq: seqcount around __run_hrtimer
* @running: pointer to the currently running hrtimer
* @cpu: cpu number * @cpu: cpu number
* @active_bases: Bitfield to mark bases with active timers * @active_bases: Bitfield to mark bases with active timers
* @clock_was_set_seq: Sequence counter of clock was set events * @clock_was_set_seq: Sequence counter of clock was set events
* @migration_enabled: The migration of hrtimers to other cpus is enabled
* @nohz_active: The nohz functionality is enabled
* @expires_next: absolute time of the next event which was scheduled
* via clock_set_next_event()
* @next_timer: Pointer to the first expiring timer
* @in_hrtirq: hrtimer_interrupt() is currently executing
* @hres_active: State of high resolution mode * @hres_active: State of high resolution mode
* @in_hrtirq: hrtimer_interrupt() is currently executing
* @hang_detected: The last hrtimer interrupt detected a hang * @hang_detected: The last hrtimer interrupt detected a hang
* @softirq_activated: displays, if the softirq is raised - update of softirq
* related settings is not required then.
* @nr_events: Total number of hrtimer interrupt events * @nr_events: Total number of hrtimer interrupt events
* @nr_retries: Total number of hrtimer interrupt retries * @nr_retries: Total number of hrtimer interrupt retries
* @nr_hangs: Total number of hrtimer interrupt hangs * @nr_hangs: Total number of hrtimer interrupt hangs
* @max_hang_time: Maximum time spent in hrtimer_interrupt * @max_hang_time: Maximum time spent in hrtimer_interrupt
* @expires_next: absolute time of the next event, is required for remote
* hrtimer enqueue; it is the total first expiry time (hard
* and soft hrtimer are taken into account)
* @next_timer: Pointer to the first expiring timer
* @softirq_expires_next: Time to check, if soft queues needs also to be expired
* @softirq_next_timer: Pointer to the first expiring softirq based timer
* @clock_base: array of clock bases for this cpu * @clock_base: array of clock bases for this cpu
* *
* Note: next_timer is just an optimization for __remove_hrtimer(). * Note: next_timer is just an optimization for __remove_hrtimer().
...@@ -173,31 +200,28 @@ enum hrtimer_base_type { ...@@ -173,31 +200,28 @@ enum hrtimer_base_type {
*/ */
struct hrtimer_cpu_base { struct hrtimer_cpu_base {
raw_spinlock_t lock; raw_spinlock_t lock;
seqcount_t seq;
struct hrtimer *running;
unsigned int cpu; unsigned int cpu;
unsigned int active_bases; unsigned int active_bases;
unsigned int clock_was_set_seq; unsigned int clock_was_set_seq;
bool migration_enabled; unsigned int hres_active : 1,
bool nohz_active; in_hrtirq : 1,
hang_detected : 1,
softirq_activated : 1;
#ifdef CONFIG_HIGH_RES_TIMERS #ifdef CONFIG_HIGH_RES_TIMERS
unsigned int in_hrtirq : 1,
hres_active : 1,
hang_detected : 1;
ktime_t expires_next;
struct hrtimer *next_timer;
unsigned int nr_events; unsigned int nr_events;
unsigned int nr_retries; unsigned short nr_retries;
unsigned int nr_hangs; unsigned short nr_hangs;
unsigned int max_hang_time; unsigned int max_hang_time;
#endif #endif
ktime_t expires_next;
struct hrtimer *next_timer;
ktime_t softirq_expires_next;
struct hrtimer *softirq_next_timer;
struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES]; struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
} ____cacheline_aligned; } ____cacheline_aligned;
static inline void hrtimer_set_expires(struct hrtimer *timer, ktime_t time) static inline void hrtimer_set_expires(struct hrtimer *timer, ktime_t time)
{ {
BUILD_BUG_ON(sizeof(struct hrtimer_clock_base) > HRTIMER_CLOCK_BASE_ALIGN);
timer->node.expires = time; timer->node.expires = time;
timer->_softexpires = time; timer->_softexpires = time;
} }
...@@ -266,16 +290,17 @@ static inline ktime_t hrtimer_cb_get_time(struct hrtimer *timer) ...@@ -266,16 +290,17 @@ static inline ktime_t hrtimer_cb_get_time(struct hrtimer *timer)
return timer->base->get_time(); return timer->base->get_time();
} }
static inline int hrtimer_is_hres_active(struct hrtimer *timer)
{
return IS_ENABLED(CONFIG_HIGH_RES_TIMERS) ?
timer->base->cpu_base->hres_active : 0;
}
#ifdef CONFIG_HIGH_RES_TIMERS #ifdef CONFIG_HIGH_RES_TIMERS
struct clock_event_device; struct clock_event_device;
extern void hrtimer_interrupt(struct clock_event_device *dev); extern void hrtimer_interrupt(struct clock_event_device *dev);
static inline int hrtimer_is_hres_active(struct hrtimer *timer)
{
return timer->base->cpu_base->hres_active;
}
/* /*
* The resolution of the clocks. The resolution value is returned in * The resolution of the clocks. The resolution value is returned in
* the clock_getres() system call to give application programmers an * the clock_getres() system call to give application programmers an
...@@ -298,11 +323,6 @@ extern unsigned int hrtimer_resolution; ...@@ -298,11 +323,6 @@ extern unsigned int hrtimer_resolution;
#define hrtimer_resolution (unsigned int)LOW_RES_NSEC #define hrtimer_resolution (unsigned int)LOW_RES_NSEC
static inline int hrtimer_is_hres_active(struct hrtimer *timer)
{
return 0;
}
static inline void clock_was_set_delayed(void) { } static inline void clock_was_set_delayed(void) { }
#endif #endif
...@@ -365,11 +385,12 @@ extern void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, ...@@ -365,11 +385,12 @@ extern void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
u64 range_ns, const enum hrtimer_mode mode); u64 range_ns, const enum hrtimer_mode mode);
/** /**
* hrtimer_start - (re)start an hrtimer on the current CPU * hrtimer_start - (re)start an hrtimer
* @timer: the timer to be added * @timer: the timer to be added
* @tim: expiry time * @tim: expiry time
* @mode: expiry mode: absolute (HRTIMER_MODE_ABS) or * @mode: timer mode: absolute (HRTIMER_MODE_ABS) or
* relative (HRTIMER_MODE_REL) * relative (HRTIMER_MODE_REL), and pinned (HRTIMER_MODE_PINNED);
* softirq based mode is considered for debug purpose only!
*/ */
static inline void hrtimer_start(struct hrtimer *timer, ktime_t tim, static inline void hrtimer_start(struct hrtimer *timer, ktime_t tim,
const enum hrtimer_mode mode) const enum hrtimer_mode mode)
...@@ -422,7 +443,7 @@ static inline int hrtimer_is_queued(struct hrtimer *timer) ...@@ -422,7 +443,7 @@ static inline int hrtimer_is_queued(struct hrtimer *timer)
*/ */
static inline int hrtimer_callback_running(struct hrtimer *timer) static inline int hrtimer_callback_running(struct hrtimer *timer)
{ {
return timer->base->cpu_base->running == timer; return timer->base->running == timer;
} }
/* Forward a hrtimer so it expires after now: */ /* Forward a hrtimer so it expires after now: */
...@@ -466,7 +487,7 @@ extern int schedule_hrtimeout_range(ktime_t *expires, u64 delta, ...@@ -466,7 +487,7 @@ extern int schedule_hrtimeout_range(ktime_t *expires, u64 delta,
extern int schedule_hrtimeout_range_clock(ktime_t *expires, extern int schedule_hrtimeout_range_clock(ktime_t *expires,
u64 delta, u64 delta,
const enum hrtimer_mode mode, const enum hrtimer_mode mode,
int clock); clockid_t clock_id);
extern int schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode); extern int schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode);
/* Soft interrupt function to run the hrtimer queues: */ /* Soft interrupt function to run the hrtimer queues: */
......
...@@ -42,13 +42,26 @@ struct cpu_timer_list { ...@@ -42,13 +42,26 @@ struct cpu_timer_list {
#define CLOCKFD CPUCLOCK_MAX #define CLOCKFD CPUCLOCK_MAX
#define CLOCKFD_MASK (CPUCLOCK_PERTHREAD_MASK|CPUCLOCK_CLOCK_MASK) #define CLOCKFD_MASK (CPUCLOCK_PERTHREAD_MASK|CPUCLOCK_CLOCK_MASK)
#define MAKE_PROCESS_CPUCLOCK(pid, clock) \ static inline clockid_t make_process_cpuclock(const unsigned int pid,
((~(clockid_t) (pid) << 3) | (clockid_t) (clock)) const clockid_t clock)
#define MAKE_THREAD_CPUCLOCK(tid, clock) \ {
MAKE_PROCESS_CPUCLOCK((tid), (clock) | CPUCLOCK_PERTHREAD_MASK) return ((~pid) << 3) | clock;
}
static inline clockid_t make_thread_cpuclock(const unsigned int tid,
const clockid_t clock)
{
return make_process_cpuclock(tid, clock | CPUCLOCK_PERTHREAD_MASK);
}
#define FD_TO_CLOCKID(fd) ((~(clockid_t) (fd) << 3) | CLOCKFD) static inline clockid_t fd_to_clockid(const int fd)
#define CLOCKID_TO_FD(clk) ((unsigned int) ~((clk) >> 3)) {
return make_process_cpuclock((unsigned int) fd, CLOCKFD);
}
static inline int clockid_to_fd(const clockid_t clk)
{
return ~(clk >> 3);
}
#define REQUEUE_PENDING 1 #define REQUEUE_PENDING 1
......
...@@ -136,6 +136,24 @@ DEFINE_EVENT(timer_class, timer_cancel, ...@@ -136,6 +136,24 @@ DEFINE_EVENT(timer_class, timer_cancel,
TP_ARGS(timer) TP_ARGS(timer)
); );
#define decode_clockid(type) \
__print_symbolic(type, \
{ CLOCK_REALTIME, "CLOCK_REALTIME" }, \
{ CLOCK_MONOTONIC, "CLOCK_MONOTONIC" }, \
{ CLOCK_BOOTTIME, "CLOCK_BOOTTIME" }, \
{ CLOCK_TAI, "CLOCK_TAI" })
#define decode_hrtimer_mode(mode) \
__print_symbolic(mode, \
{ HRTIMER_MODE_ABS, "ABS" }, \
{ HRTIMER_MODE_REL, "REL" }, \
{ HRTIMER_MODE_ABS_PINNED, "ABS|PINNED" }, \
{ HRTIMER_MODE_REL_PINNED, "REL|PINNED" }, \
{ HRTIMER_MODE_ABS_SOFT, "ABS|SOFT" }, \
{ HRTIMER_MODE_REL_SOFT, "REL|SOFT" }, \
{ HRTIMER_MODE_ABS_PINNED_SOFT, "ABS|PINNED|SOFT" }, \
{ HRTIMER_MODE_REL_PINNED_SOFT, "REL|PINNED|SOFT" })
/** /**
* hrtimer_init - called when the hrtimer is initialized * hrtimer_init - called when the hrtimer is initialized
* @hrtimer: pointer to struct hrtimer * @hrtimer: pointer to struct hrtimer
...@@ -162,10 +180,8 @@ TRACE_EVENT(hrtimer_init, ...@@ -162,10 +180,8 @@ TRACE_EVENT(hrtimer_init,
), ),
TP_printk("hrtimer=%p clockid=%s mode=%s", __entry->hrtimer, TP_printk("hrtimer=%p clockid=%s mode=%s", __entry->hrtimer,
__entry->clockid == CLOCK_REALTIME ? decode_clockid(__entry->clockid),
"CLOCK_REALTIME" : "CLOCK_MONOTONIC", decode_hrtimer_mode(__entry->mode))
__entry->mode == HRTIMER_MODE_ABS ?
"HRTIMER_MODE_ABS" : "HRTIMER_MODE_REL")
); );
/** /**
...@@ -174,15 +190,16 @@ TRACE_EVENT(hrtimer_init, ...@@ -174,15 +190,16 @@ TRACE_EVENT(hrtimer_init,
*/ */
TRACE_EVENT(hrtimer_start, TRACE_EVENT(hrtimer_start,
TP_PROTO(struct hrtimer *hrtimer), TP_PROTO(struct hrtimer *hrtimer, enum hrtimer_mode mode),
TP_ARGS(hrtimer), TP_ARGS(hrtimer, mode),
TP_STRUCT__entry( TP_STRUCT__entry(
__field( void *, hrtimer ) __field( void *, hrtimer )
__field( void *, function ) __field( void *, function )
__field( s64, expires ) __field( s64, expires )
__field( s64, softexpires ) __field( s64, softexpires )
__field( enum hrtimer_mode, mode )
), ),
TP_fast_assign( TP_fast_assign(
...@@ -190,12 +207,14 @@ TRACE_EVENT(hrtimer_start, ...@@ -190,12 +207,14 @@ TRACE_EVENT(hrtimer_start,
__entry->function = hrtimer->function; __entry->function = hrtimer->function;
__entry->expires = hrtimer_get_expires(hrtimer); __entry->expires = hrtimer_get_expires(hrtimer);
__entry->softexpires = hrtimer_get_softexpires(hrtimer); __entry->softexpires = hrtimer_get_softexpires(hrtimer);
__entry->mode = mode;
), ),
TP_printk("hrtimer=%p function=%pf expires=%llu softexpires=%llu", TP_printk("hrtimer=%p function=%pf expires=%llu softexpires=%llu "
__entry->hrtimer, __entry->function, "mode=%s", __entry->hrtimer, __entry->function,
(unsigned long long) __entry->expires, (unsigned long long) __entry->expires,
(unsigned long long) __entry->softexpires) (unsigned long long) __entry->softexpires,
decode_hrtimer_mode(__entry->mode))
); );
/** /**
......
This diff is collapsed.
...@@ -216,7 +216,7 @@ struct posix_clock_desc { ...@@ -216,7 +216,7 @@ struct posix_clock_desc {
static int get_clock_desc(const clockid_t id, struct posix_clock_desc *cd) static int get_clock_desc(const clockid_t id, struct posix_clock_desc *cd)
{ {
struct file *fp = fget(CLOCKID_TO_FD(id)); struct file *fp = fget(clockid_to_fd(id));
int err = -EINVAL; int err = -EINVAL;
if (!fp) if (!fp)
......
...@@ -1189,9 +1189,8 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx, ...@@ -1189,9 +1189,8 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
u64 now; u64 now;
WARN_ON_ONCE(clock_idx == CPUCLOCK_SCHED); WARN_ON_ONCE(clock_idx == CPUCLOCK_SCHED);
cpu_timer_sample_group(clock_idx, tsk, &now);
if (oldval) { if (oldval && cpu_timer_sample_group(clock_idx, tsk, &now) != -EINVAL) {
/* /*
* We are setting itimer. The *oldval is absolute and we update * We are setting itimer. The *oldval is absolute and we update
* it to be relative, *newval argument is relative and we update * it to be relative, *newval argument is relative and we update
...@@ -1363,8 +1362,8 @@ static long posix_cpu_nsleep_restart(struct restart_block *restart_block) ...@@ -1363,8 +1362,8 @@ static long posix_cpu_nsleep_restart(struct restart_block *restart_block)
return do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t); return do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t);
} }
#define PROCESS_CLOCK MAKE_PROCESS_CPUCLOCK(0, CPUCLOCK_SCHED) #define PROCESS_CLOCK make_process_cpuclock(0, CPUCLOCK_SCHED)
#define THREAD_CLOCK MAKE_THREAD_CPUCLOCK(0, CPUCLOCK_SCHED) #define THREAD_CLOCK make_thread_cpuclock(0, CPUCLOCK_SCHED)
static int process_cpu_clock_getres(const clockid_t which_clock, static int process_cpu_clock_getres(const clockid_t which_clock,
struct timespec64 *tp) struct timespec64 *tp)
......
...@@ -150,16 +150,15 @@ static inline void tick_nohz_init(void) { } ...@@ -150,16 +150,15 @@ static inline void tick_nohz_init(void) { }
#ifdef CONFIG_NO_HZ_COMMON #ifdef CONFIG_NO_HZ_COMMON
extern unsigned long tick_nohz_active; extern unsigned long tick_nohz_active;
#else extern void timers_update_nohz(void);
# ifdef CONFIG_SMP
extern struct static_key_false timers_migration_enabled;
# endif
#else /* CONFIG_NO_HZ_COMMON */
static inline void timers_update_nohz(void) { }
#define tick_nohz_active (0) #define tick_nohz_active (0)
#endif #endif
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
extern void timers_update_migration(bool update_nohz);
#else
static inline void timers_update_migration(bool update_nohz) { }
#endif
DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases); DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
extern u64 get_next_timer_interrupt(unsigned long basej, u64 basem); extern u64 get_next_timer_interrupt(unsigned long basej, u64 basem);
......
...@@ -1107,7 +1107,7 @@ static inline void tick_nohz_activate(struct tick_sched *ts, int mode) ...@@ -1107,7 +1107,7 @@ static inline void tick_nohz_activate(struct tick_sched *ts, int mode)
ts->nohz_mode = mode; ts->nohz_mode = mode;
/* One update is enough */ /* One update is enough */
if (!test_and_set_bit(0, &tick_nohz_active)) if (!test_and_set_bit(0, &tick_nohz_active))
timers_update_migration(true); timers_update_nohz();
} }
/** /**
......
...@@ -200,8 +200,6 @@ struct timer_base { ...@@ -200,8 +200,6 @@ struct timer_base {
unsigned long clk; unsigned long clk;
unsigned long next_expiry; unsigned long next_expiry;
unsigned int cpu; unsigned int cpu;
bool migration_enabled;
bool nohz_active;
bool is_idle; bool is_idle;
bool must_forward_clk; bool must_forward_clk;
DECLARE_BITMAP(pending_map, WHEEL_SIZE); DECLARE_BITMAP(pending_map, WHEEL_SIZE);
...@@ -210,45 +208,64 @@ struct timer_base { ...@@ -210,45 +208,64 @@ struct timer_base {
static DEFINE_PER_CPU(struct timer_base, timer_bases[NR_BASES]); static DEFINE_PER_CPU(struct timer_base, timer_bases[NR_BASES]);
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) #ifdef CONFIG_NO_HZ_COMMON
static DEFINE_STATIC_KEY_FALSE(timers_nohz_active);
static DEFINE_MUTEX(timer_keys_mutex);
static void timer_update_keys(struct work_struct *work);
static DECLARE_WORK(timer_update_work, timer_update_keys);
#ifdef CONFIG_SMP
unsigned int sysctl_timer_migration = 1; unsigned int sysctl_timer_migration = 1;
void timers_update_migration(bool update_nohz) DEFINE_STATIC_KEY_FALSE(timers_migration_enabled);
static void timers_update_migration(void)
{ {
bool on = sysctl_timer_migration && tick_nohz_active; if (sysctl_timer_migration && tick_nohz_active)
unsigned int cpu; static_branch_enable(&timers_migration_enabled);
else
static_branch_disable(&timers_migration_enabled);
}
#else
static inline void timers_update_migration(void) { }
#endif /* !CONFIG_SMP */
/* Avoid the loop, if nothing to update */ static void timer_update_keys(struct work_struct *work)
if (this_cpu_read(timer_bases[BASE_STD].migration_enabled) == on) {
return; mutex_lock(&timer_keys_mutex);
timers_update_migration();
static_branch_enable(&timers_nohz_active);
mutex_unlock(&timer_keys_mutex);
}
for_each_possible_cpu(cpu) { void timers_update_nohz(void)
per_cpu(timer_bases[BASE_STD].migration_enabled, cpu) = on; {
per_cpu(timer_bases[BASE_DEF].migration_enabled, cpu) = on; schedule_work(&timer_update_work);
per_cpu(hrtimer_bases.migration_enabled, cpu) = on;
if (!update_nohz)
continue;
per_cpu(timer_bases[BASE_STD].nohz_active, cpu) = true;
per_cpu(timer_bases[BASE_DEF].nohz_active, cpu) = true;
per_cpu(hrtimer_bases.nohz_active, cpu) = true;
}
} }
int timer_migration_handler(struct ctl_table *table, int write, int timer_migration_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, void __user *buffer, size_t *lenp,
loff_t *ppos) loff_t *ppos)
{ {
static DEFINE_MUTEX(mutex);
int ret; int ret;
mutex_lock(&mutex); mutex_lock(&timer_keys_mutex);
ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
if (!ret && write) if (!ret && write)
timers_update_migration(false); timers_update_migration();
mutex_unlock(&mutex); mutex_unlock(&timer_keys_mutex);
return ret; return ret;
} }
#endif
static inline bool is_timers_nohz_active(void)
{
return static_branch_unlikely(&timers_nohz_active);
}
#else
static inline bool is_timers_nohz_active(void) { return false; }
#endif /* NO_HZ_COMMON */
static unsigned long round_jiffies_common(unsigned long j, int cpu, static unsigned long round_jiffies_common(unsigned long j, int cpu,
bool force_up) bool force_up)
...@@ -534,7 +551,7 @@ __internal_add_timer(struct timer_base *base, struct timer_list *timer) ...@@ -534,7 +551,7 @@ __internal_add_timer(struct timer_base *base, struct timer_list *timer)
static void static void
trigger_dyntick_cpu(struct timer_base *base, struct timer_list *timer) trigger_dyntick_cpu(struct timer_base *base, struct timer_list *timer)
{ {
if (!IS_ENABLED(CONFIG_NO_HZ_COMMON) || !base->nohz_active) if (!is_timers_nohz_active())
return; return;
/* /*
...@@ -849,21 +866,20 @@ static inline struct timer_base *get_timer_base(u32 tflags) ...@@ -849,21 +866,20 @@ static inline struct timer_base *get_timer_base(u32 tflags)
return get_timer_cpu_base(tflags, tflags & TIMER_CPUMASK); return get_timer_cpu_base(tflags, tflags & TIMER_CPUMASK);
} }
#ifdef CONFIG_NO_HZ_COMMON
static inline struct timer_base * static inline struct timer_base *
get_target_base(struct timer_base *base, unsigned tflags) get_target_base(struct timer_base *base, unsigned tflags)
{ {
#ifdef CONFIG_SMP #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
if ((tflags & TIMER_PINNED) || !base->migration_enabled) if (static_branch_likely(&timers_migration_enabled) &&
return get_timer_this_cpu_base(tflags); !(tflags & TIMER_PINNED))
return get_timer_cpu_base(tflags, get_nohz_timer_target()); return get_timer_cpu_base(tflags, get_nohz_timer_target());
#else
return get_timer_this_cpu_base(tflags);
#endif #endif
return get_timer_this_cpu_base(tflags);
} }
static inline void forward_timer_base(struct timer_base *base) static inline void forward_timer_base(struct timer_base *base)
{ {
#ifdef CONFIG_NO_HZ_COMMON
unsigned long jnow; unsigned long jnow;
/* /*
...@@ -887,16 +903,8 @@ static inline void forward_timer_base(struct timer_base *base) ...@@ -887,16 +903,8 @@ static inline void forward_timer_base(struct timer_base *base)
base->clk = jnow; base->clk = jnow;
else else
base->clk = base->next_expiry; base->clk = base->next_expiry;
}
#else
static inline struct timer_base *
get_target_base(struct timer_base *base, unsigned tflags)
{
return get_timer_this_cpu_base(tflags);
}
static inline void forward_timer_base(struct timer_base *base) { }
#endif #endif
}
/* /*
......
...@@ -375,17 +375,9 @@ struct dummy_hrtimer_pcm { ...@@ -375,17 +375,9 @@ struct dummy_hrtimer_pcm {
ktime_t period_time; ktime_t period_time;
atomic_t running; atomic_t running;
struct hrtimer timer; struct hrtimer timer;
struct tasklet_struct tasklet;
struct snd_pcm_substream *substream; struct snd_pcm_substream *substream;
}; };
static void dummy_hrtimer_pcm_elapsed(unsigned long priv)
{
struct dummy_hrtimer_pcm *dpcm = (struct dummy_hrtimer_pcm *)priv;
if (atomic_read(&dpcm->running))
snd_pcm_period_elapsed(dpcm->substream);
}
static enum hrtimer_restart dummy_hrtimer_callback(struct hrtimer *timer) static enum hrtimer_restart dummy_hrtimer_callback(struct hrtimer *timer)
{ {
struct dummy_hrtimer_pcm *dpcm; struct dummy_hrtimer_pcm *dpcm;
...@@ -393,7 +385,14 @@ static enum hrtimer_restart dummy_hrtimer_callback(struct hrtimer *timer) ...@@ -393,7 +385,14 @@ static enum hrtimer_restart dummy_hrtimer_callback(struct hrtimer *timer)
dpcm = container_of(timer, struct dummy_hrtimer_pcm, timer); dpcm = container_of(timer, struct dummy_hrtimer_pcm, timer);
if (!atomic_read(&dpcm->running)) if (!atomic_read(&dpcm->running))
return HRTIMER_NORESTART; return HRTIMER_NORESTART;
tasklet_schedule(&dpcm->tasklet); /*
* In cases of XRUN and draining, this calls .trigger to stop PCM
* substream.
*/
snd_pcm_period_elapsed(dpcm->substream);
if (!atomic_read(&dpcm->running))
return HRTIMER_NORESTART;
hrtimer_forward_now(timer, dpcm->period_time); hrtimer_forward_now(timer, dpcm->period_time);
return HRTIMER_RESTART; return HRTIMER_RESTART;
} }
...@@ -403,7 +402,7 @@ static int dummy_hrtimer_start(struct snd_pcm_substream *substream) ...@@ -403,7 +402,7 @@ static int dummy_hrtimer_start(struct snd_pcm_substream *substream)
struct dummy_hrtimer_pcm *dpcm = substream->runtime->private_data; struct dummy_hrtimer_pcm *dpcm = substream->runtime->private_data;
dpcm->base_time = hrtimer_cb_get_time(&dpcm->timer); dpcm->base_time = hrtimer_cb_get_time(&dpcm->timer);
hrtimer_start(&dpcm->timer, dpcm->period_time, HRTIMER_MODE_REL); hrtimer_start(&dpcm->timer, dpcm->period_time, HRTIMER_MODE_REL_SOFT);
atomic_set(&dpcm->running, 1); atomic_set(&dpcm->running, 1);
return 0; return 0;
} }
...@@ -413,6 +412,7 @@ static int dummy_hrtimer_stop(struct snd_pcm_substream *substream) ...@@ -413,6 +412,7 @@ static int dummy_hrtimer_stop(struct snd_pcm_substream *substream)
struct dummy_hrtimer_pcm *dpcm = substream->runtime->private_data; struct dummy_hrtimer_pcm *dpcm = substream->runtime->private_data;
atomic_set(&dpcm->running, 0); atomic_set(&dpcm->running, 0);
if (!hrtimer_callback_running(&dpcm->timer))
hrtimer_cancel(&dpcm->timer); hrtimer_cancel(&dpcm->timer);
return 0; return 0;
} }
...@@ -420,7 +420,6 @@ static int dummy_hrtimer_stop(struct snd_pcm_substream *substream) ...@@ -420,7 +420,6 @@ static int dummy_hrtimer_stop(struct snd_pcm_substream *substream)
static inline void dummy_hrtimer_sync(struct dummy_hrtimer_pcm *dpcm) static inline void dummy_hrtimer_sync(struct dummy_hrtimer_pcm *dpcm)
{ {
hrtimer_cancel(&dpcm->timer); hrtimer_cancel(&dpcm->timer);
tasklet_kill(&dpcm->tasklet);
} }
static snd_pcm_uframes_t static snd_pcm_uframes_t
...@@ -465,12 +464,10 @@ static int dummy_hrtimer_create(struct snd_pcm_substream *substream) ...@@ -465,12 +464,10 @@ static int dummy_hrtimer_create(struct snd_pcm_substream *substream)
if (!dpcm) if (!dpcm)
return -ENOMEM; return -ENOMEM;
substream->runtime->private_data = dpcm; substream->runtime->private_data = dpcm;
hrtimer_init(&dpcm->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); hrtimer_init(&dpcm->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT);
dpcm->timer.function = dummy_hrtimer_callback; dpcm->timer.function = dummy_hrtimer_callback;
dpcm->substream = substream; dpcm->substream = substream;
atomic_set(&dpcm->running, 0); atomic_set(&dpcm->running, 0);
tasklet_init(&dpcm->tasklet, dummy_hrtimer_pcm_elapsed,
(unsigned long)dpcm);
return 0; return 0;
} }
......
...@@ -60,9 +60,7 @@ static int clock_adjtime(clockid_t id, struct timex *tx) ...@@ -60,9 +60,7 @@ static int clock_adjtime(clockid_t id, struct timex *tx)
static clockid_t get_clockid(int fd) static clockid_t get_clockid(int fd)
{ {
#define CLOCKFD 3 #define CLOCKFD 3
#define FD_TO_CLOCKID(fd) ((~(clockid_t) (fd) << 3) | CLOCKFD) return (((unsigned int) ~fd) << 3) | CLOCKFD;
return FD_TO_CLOCKID(fd);
} }
static void handle_alarm(int s) static void handle_alarm(int s)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment