Commit cd336f65 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'timers-core-2023-06-26' of ssh://gitolite.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull timer updates from Thomas Gleixner:
 "Time, timekeeping and related device driver updates:

  Core:

   - A set of fixes, cleanups and enhancements to the posix timer code:

       - Prevent another possible live lock scenario in the exit() path,
         which affects POSIX_CPU_TIMERS_TASK_WORK enabled architectures.

       - Fix a loop termination issue which was reported syzcaller/KSAN
         in the posix timer ID allocation code.

         That triggered a deeper look into the posix-timer code which
         unearthed more small issues.

       - Add missing READ/WRITE_ONCE() annotations

       - Fix or remove completely outdated comments

       - Document places which are subtle and completely undocumented.

   - Add missing hrtimer modes to the trace event decoder

   - Small cleanups and enhancements all over the place

  Drivers:

   - Rework the Hyper-V clocksource and sched clock setup code

   - Remove a deprecated clocksource driver

   - Small fixes and enhancements all over the place"

* tag 'timers-core-2023-06-26' of ssh://gitolite.kernel.org/pub/scm/linux/kernel/git/tip/tip: (39 commits)
  clocksource/drivers/cadence-ttc: Fix memory leak in ttc_timer_probe
  dt-bindings: timers: Add Ralink SoCs timer
  clocksource/drivers/hyper-v: Rework clocksource and sched clock setup
  dt-bindings: timer: brcm,kona-timer: convert to YAML
  clocksource/drivers/imx-gpt: Fold <soc/imx/timer.h> into its only user
  clk: imx: Drop inclusion of unused header <soc/imx/timer.h>
  hrtimer: Add missing sparse annotations to hrtimer locking
  clocksource/drivers/imx-gpt: Use only a single name for functions
  clocksource/drivers/loongson1: Move PWM timer to clocksource framework
  dt-bindings: timer: Add Loongson-1 clocksource
  MIPS: Loongson32: Remove deprecated PWM timer clocksource
  clocksource/drivers/ingenic-timer: Use pm_sleep_ptr() macro
  tracing/timer: Add missing hrtimer modes to decode_hrtimer_mode().
  posix-timers: Add sys_ni_posix_timers() prototype
  tick/rcu: Fix bogus ratelimit condition
  alarmtimer: Remove unnecessary (void *) cast
  alarmtimer: Remove unnecessary initialization of variable 'ret'
  posix-timers: Refer properly to CONFIG_HIGH_RES_TIMERS
  posix-timers: Polish coding style in a few places
  posix-timers: Remove pointless comments
  ...
parents 9244724f d2b32be7
Broadcom Kona Family timer
-----------------------------------------------------
This timer is used in the following Broadcom SoCs:
BCM11130, BCM11140, BCM11351, BCM28145, BCM28155
Required properties:
- compatible : "brcm,kona-timer"
- DEPRECATED: compatible : "bcm,kona-timer"
- reg : Register range for the timer
- interrupts : interrupt for the timer
- clocks: phandle + clock specifier pair of the external clock
- clock-frequency: frequency that the clock operates
Only one of clocks or clock-frequency should be specified.
Refer to clocks/clock-bindings.txt for generic clock consumer properties.
Example:
timer@35006000 {
compatible = "brcm,kona-timer";
reg = <0x35006000 0x1000>;
interrupts = <0x0 7 0x4>;
clocks = <&hub_timer_clk>;
};
# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
%YAML 1.2
---
$id: http://devicetree.org/schemas/timer/brcm,kona-timer.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Broadcom Kona family timer
maintainers:
- Florian Fainelli <f.fainelli@gmail.com>
properties:
compatible:
const: brcm,kona-timer
reg:
maxItems: 1
interrupts:
maxItems: 1
clocks:
maxItems: 1
clock-frequency: true
oneOf:
- required:
- clocks
- required:
- clock-frequency
required:
- compatible
- reg
- interrupts
additionalProperties: false
examples:
- |
#include <dt-bindings/clock/bcm281xx.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/interrupt-controller/irq.h>
timer@35006000 {
compatible = "brcm,kona-timer";
reg = <0x35006000 0x1000>;
interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&aon_ccu BCM281XX_AON_CCU_HUB_TIMER>;
};
...
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/timer/loongson,ls1x-pwmtimer.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Loongson-1 PWM timer
maintainers:
- Keguang Zhang <keguang.zhang@gmail.com>
description:
Loongson-1 PWM timer can be used for system clock source
and clock event timers.
properties:
compatible:
const: loongson,ls1b-pwmtimer
reg:
maxItems: 1
clocks:
maxItems: 1
interrupts:
maxItems: 1
required:
- compatible
- reg
- clocks
- interrupts
additionalProperties: false
examples:
- |
#include <dt-bindings/clock/loongson,ls1x-clk.h>
#include <dt-bindings/interrupt-controller/irq.h>
clocksource: timer@1fe5c030 {
compatible = "loongson,ls1b-pwmtimer";
reg = <0x1fe5c030 0x10>;
clocks = <&clkc LS1X_CLKID_APB>;
interrupt-parent = <&intc0>;
interrupts = <20 IRQ_TYPE_LEVEL_HIGH>;
};
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/timer/ralink,rt2880-timer.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Timer present in Ralink family SoCs
maintainers:
- Sergio Paracuellos <sergio.paracuellos@gmail.com>
properties:
compatible:
const: ralink,rt2880-timer
reg:
maxItems: 1
clocks:
maxItems: 1
interrupts:
maxItems: 1
required:
- compatible
- reg
- clocks
- interrupts
additionalProperties: false
examples:
- |
timer@100 {
compatible = "ralink,rt2880-timer";
reg = <0x100 0x20>;
clocks = <&sysc 3>;
interrupt-parent = <&intc>;
interrupts = <1>;
};
...
...@@ -1014,8 +1014,6 @@ SYSCALL_DEFINE2(osf_settimeofday, struct timeval32 __user *, tv, ...@@ -1014,8 +1014,6 @@ SYSCALL_DEFINE2(osf_settimeofday, struct timeval32 __user *, tv,
return do_sys_settimeofday64(tv ? &kts : NULL, tz ? &ktz : NULL); return do_sys_settimeofday64(tv ? &kts : NULL, tz ? &ktz : NULL);
} }
asmlinkage long sys_ni_posix_timers(void);
SYSCALL_DEFINE2(osf_utimes, const char __user *, filename, SYSCALL_DEFINE2(osf_utimes, const char __user *, filename,
struct timeval32 __user *, tvs) struct timeval32 __user *, tvs)
{ {
......
...@@ -47,7 +47,6 @@ ...@@ -47,7 +47,6 @@
#include <regs-clk.h> #include <regs-clk.h>
#include <regs-mux.h> #include <regs-mux.h>
#include <regs-pwm.h>
#include <regs-rtc.h> #include <regs-rtc.h>
#include <regs-wdt.h> #include <regs-wdt.h>
......
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (c) 2014 Zhang, Keguang <keguang.zhang@gmail.com>
*
* Loongson 1 PWM Register Definitions.
*/
#ifndef __ASM_MACH_LOONGSON32_REGS_PWM_H
#define __ASM_MACH_LOONGSON32_REGS_PWM_H
/* Loongson 1 PWM Timer Register Definitions */
#define PWM_CNT 0x0
#define PWM_HRC 0x4
#define PWM_LRC 0x8
#define PWM_CTRL 0xc
/* PWM Control Register Bits */
#define CNT_RST BIT(7)
#define INT_SR BIT(6)
#define INT_EN BIT(5)
#define PWM_SINGLE BIT(4)
#define PWM_OE BIT(3)
#define CNT_EN BIT(0)
#endif /* __ASM_MACH_LOONGSON32_REGS_PWM_H */
...@@ -35,41 +35,4 @@ config LOONGSON1_LS1C ...@@ -35,41 +35,4 @@ config LOONGSON1_LS1C
select COMMON_CLK select COMMON_CLK
endchoice endchoice
menuconfig CEVT_CSRC_LS1X
bool "Use PWM Timer for clockevent/clocksource"
select MIPS_EXTERNAL_TIMER
depends on CPU_LOONGSON32
help
This option changes the default clockevent/clocksource to PWM Timer,
and is required by Loongson1 CPUFreq support.
If unsure, say N.
choice
prompt "Select clockevent/clocksource"
depends on CEVT_CSRC_LS1X
default TIMER_USE_PWM0
config TIMER_USE_PWM0
bool "Use PWM Timer 0"
help
Use PWM Timer 0 as the default clockevent/clocksourcer.
config TIMER_USE_PWM1
bool "Use PWM Timer 1"
help
Use PWM Timer 1 as the default clockevent/clocksourcer.
config TIMER_USE_PWM2
bool "Use PWM Timer 2"
help
Use PWM Timer 2 as the default clockevent/clocksourcer.
config TIMER_USE_PWM3
bool "Use PWM Timer 3"
help
Use PWM Timer 3 as the default clockevent/clocksourcer.
endchoice
endif # MACH_LOONGSON32 endif # MACH_LOONGSON32
...@@ -5,208 +5,8 @@ ...@@ -5,208 +5,8 @@
#include <linux/clk.h> #include <linux/clk.h>
#include <linux/of_clk.h> #include <linux/of_clk.h>
#include <linux/interrupt.h>
#include <linux/sizes.h>
#include <asm/time.h> #include <asm/time.h>
#include <loongson1.h>
#include <platform.h>
#ifdef CONFIG_CEVT_CSRC_LS1X
#if defined(CONFIG_TIMER_USE_PWM1)
#define LS1X_TIMER_BASE LS1X_PWM1_BASE
#define LS1X_TIMER_IRQ LS1X_PWM1_IRQ
#elif defined(CONFIG_TIMER_USE_PWM2)
#define LS1X_TIMER_BASE LS1X_PWM2_BASE
#define LS1X_TIMER_IRQ LS1X_PWM2_IRQ
#elif defined(CONFIG_TIMER_USE_PWM3)
#define LS1X_TIMER_BASE LS1X_PWM3_BASE
#define LS1X_TIMER_IRQ LS1X_PWM3_IRQ
#else
#define LS1X_TIMER_BASE LS1X_PWM0_BASE
#define LS1X_TIMER_IRQ LS1X_PWM0_IRQ
#endif
DEFINE_RAW_SPINLOCK(ls1x_timer_lock);
static void __iomem *timer_reg_base;
static uint32_t ls1x_jiffies_per_tick;
static inline void ls1x_pwmtimer_set_period(uint32_t period)
{
__raw_writel(period, timer_reg_base + PWM_HRC);
__raw_writel(period, timer_reg_base + PWM_LRC);
}
static inline void ls1x_pwmtimer_restart(void)
{
__raw_writel(0x0, timer_reg_base + PWM_CNT);
__raw_writel(INT_EN | CNT_EN, timer_reg_base + PWM_CTRL);
}
void __init ls1x_pwmtimer_init(void)
{
timer_reg_base = ioremap(LS1X_TIMER_BASE, SZ_16);
if (!timer_reg_base)
panic("Failed to remap timer registers");
ls1x_jiffies_per_tick = DIV_ROUND_CLOSEST(mips_hpt_frequency, HZ);
ls1x_pwmtimer_set_period(ls1x_jiffies_per_tick);
ls1x_pwmtimer_restart();
}
static u64 ls1x_clocksource_read(struct clocksource *cs)
{
unsigned long flags;
int count;
u32 jifs;
static int old_count;
static u32 old_jifs;
raw_spin_lock_irqsave(&ls1x_timer_lock, flags);
/*
* Although our caller may have the read side of xtime_lock,
* this is now a seqlock, and we are cheating in this routine
* by having side effects on state that we cannot undo if
* there is a collision on the seqlock and our caller has to
* retry. (Namely, old_jifs and old_count.) So we must treat
* jiffies as volatile despite the lock. We read jiffies
* before latching the timer count to guarantee that although
* the jiffies value might be older than the count (that is,
* the counter may underflow between the last point where
* jiffies was incremented and the point where we latch the
* count), it cannot be newer.
*/
jifs = jiffies;
/* read the count */
count = __raw_readl(timer_reg_base + PWM_CNT);
/*
* It's possible for count to appear to go the wrong way for this
* reason:
*
* The timer counter underflows, but we haven't handled the resulting
* interrupt and incremented jiffies yet.
*
* Previous attempts to handle these cases intelligently were buggy, so
* we just do the simple thing now.
*/
if (count < old_count && jifs == old_jifs)
count = old_count;
old_count = count;
old_jifs = jifs;
raw_spin_unlock_irqrestore(&ls1x_timer_lock, flags);
return (u64) (jifs * ls1x_jiffies_per_tick) + count;
}
static struct clocksource ls1x_clocksource = {
.name = "ls1x-pwmtimer",
.read = ls1x_clocksource_read,
.mask = CLOCKSOURCE_MASK(24),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static irqreturn_t ls1x_clockevent_isr(int irq, void *devid)
{
struct clock_event_device *cd = devid;
ls1x_pwmtimer_restart();
cd->event_handler(cd);
return IRQ_HANDLED;
}
static int ls1x_clockevent_set_state_periodic(struct clock_event_device *cd)
{
raw_spin_lock(&ls1x_timer_lock);
ls1x_pwmtimer_set_period(ls1x_jiffies_per_tick);
ls1x_pwmtimer_restart();
__raw_writel(INT_EN | CNT_EN, timer_reg_base + PWM_CTRL);
raw_spin_unlock(&ls1x_timer_lock);
return 0;
}
static int ls1x_clockevent_tick_resume(struct clock_event_device *cd)
{
raw_spin_lock(&ls1x_timer_lock);
__raw_writel(INT_EN | CNT_EN, timer_reg_base + PWM_CTRL);
raw_spin_unlock(&ls1x_timer_lock);
return 0;
}
static int ls1x_clockevent_set_state_shutdown(struct clock_event_device *cd)
{
raw_spin_lock(&ls1x_timer_lock);
__raw_writel(__raw_readl(timer_reg_base + PWM_CTRL) & ~CNT_EN,
timer_reg_base + PWM_CTRL);
raw_spin_unlock(&ls1x_timer_lock);
return 0;
}
static int ls1x_clockevent_set_next(unsigned long evt,
struct clock_event_device *cd)
{
raw_spin_lock(&ls1x_timer_lock);
ls1x_pwmtimer_set_period(evt);
ls1x_pwmtimer_restart();
raw_spin_unlock(&ls1x_timer_lock);
return 0;
}
static struct clock_event_device ls1x_clockevent = {
.name = "ls1x-pwmtimer",
.features = CLOCK_EVT_FEAT_PERIODIC,
.rating = 300,
.irq = LS1X_TIMER_IRQ,
.set_next_event = ls1x_clockevent_set_next,
.set_state_shutdown = ls1x_clockevent_set_state_shutdown,
.set_state_periodic = ls1x_clockevent_set_state_periodic,
.set_state_oneshot = ls1x_clockevent_set_state_shutdown,
.tick_resume = ls1x_clockevent_tick_resume,
};
static void __init ls1x_time_init(void)
{
struct clock_event_device *cd = &ls1x_clockevent;
int ret;
if (!mips_hpt_frequency)
panic("Invalid timer clock rate");
ls1x_pwmtimer_init();
clockevent_set_clock(cd, mips_hpt_frequency);
cd->max_delta_ns = clockevent_delta2ns(0xffffff, cd);
cd->max_delta_ticks = 0xffffff;
cd->min_delta_ns = clockevent_delta2ns(0x000300, cd);
cd->min_delta_ticks = 0x000300;
cd->cpumask = cpumask_of(smp_processor_id());
clockevents_register_device(cd);
ls1x_clocksource.rating = 200 + mips_hpt_frequency / 10000000;
ret = clocksource_register_hz(&ls1x_clocksource, mips_hpt_frequency);
if (ret)
panic(KERN_ERR "Failed to register clocksource: %d\n", ret);
if (request_irq(LS1X_TIMER_IRQ, ls1x_clockevent_isr,
IRQF_PERCPU | IRQF_TIMER, "ls1x-pwmtimer",
&ls1x_clockevent))
pr_err("Failed to register ls1x-pwmtimer interrupt\n");
}
#endif /* CONFIG_CEVT_CSRC_LS1X */
void __init plat_time_init(void) void __init plat_time_init(void)
{ {
struct clk *clk = NULL; struct clk *clk = NULL;
...@@ -214,20 +14,10 @@ void __init plat_time_init(void) ...@@ -214,20 +14,10 @@ void __init plat_time_init(void)
/* initialize LS1X clocks */ /* initialize LS1X clocks */
of_clk_init(NULL); of_clk_init(NULL);
#ifdef CONFIG_CEVT_CSRC_LS1X
/* setup LS1X PWM timer */
clk = clk_get(NULL, "ls1x-pwmtimer");
if (IS_ERR(clk))
panic("unable to get timer clock, err=%ld", PTR_ERR(clk));
mips_hpt_frequency = clk_get_rate(clk);
ls1x_time_init();
#else
/* setup mips r4k timer */ /* setup mips r4k timer */
clk = clk_get(NULL, "cpu_clk"); clk = clk_get(NULL, "cpu_clk");
if (IS_ERR(clk)) if (IS_ERR(clk))
panic("unable to get cpu clock, err=%ld", PTR_ERR(clk)); panic("unable to get cpu clock, err=%ld", PTR_ERR(clk));
mips_hpt_frequency = clk_get_rate(clk) / 2; mips_hpt_frequency = clk_get_rate(clk) / 2;
#endif /* CONFIG_CEVT_CSRC_LS1X */
} }
...@@ -10,7 +10,6 @@ ...@@ -10,7 +10,6 @@
#include <linux/of.h> #include <linux/of.h>
#include <linux/of_address.h> #include <linux/of_address.h>
#include <dt-bindings/clock/imx1-clock.h> #include <dt-bindings/clock/imx1-clock.h>
#include <soc/imx/timer.h>
#include <asm/irq.h> #include <asm/irq.h>
#include "clk.h" #include "clk.h"
......
...@@ -8,7 +8,6 @@ ...@@ -8,7 +8,6 @@
#include <linux/of_address.h> #include <linux/of_address.h>
#include <dt-bindings/clock/imx27-clock.h> #include <dt-bindings/clock/imx27-clock.h>
#include <soc/imx/revision.h> #include <soc/imx/revision.h>
#include <soc/imx/timer.h>
#include <asm/irq.h> #include <asm/irq.h>
#include "clk.h" #include "clk.h"
......
...@@ -11,7 +11,6 @@ ...@@ -11,7 +11,6 @@
#include <linux/of.h> #include <linux/of.h>
#include <linux/of_address.h> #include <linux/of_address.h>
#include <soc/imx/revision.h> #include <soc/imx/revision.h>
#include <soc/imx/timer.h>
#include <asm/irq.h> #include <asm/irq.h>
#include "clk.h" #include "clk.h"
......
...@@ -10,7 +10,6 @@ ...@@ -10,7 +10,6 @@
#include <linux/of.h> #include <linux/of.h>
#include <linux/err.h> #include <linux/err.h>
#include <soc/imx/revision.h> #include <soc/imx/revision.h>
#include <soc/imx/timer.h>
#include <asm/irq.h> #include <asm/irq.h>
#include "clk.h" #include "clk.h"
......
...@@ -612,6 +612,15 @@ config TIMER_IMX_SYS_CTR ...@@ -612,6 +612,15 @@ config TIMER_IMX_SYS_CTR
Enable this option to use i.MX system counter timer as a Enable this option to use i.MX system counter timer as a
clockevent. clockevent.
config CLKSRC_LOONGSON1_PWM
bool "Clocksource using Loongson1 PWM"
depends on MACH_LOONGSON32 || COMPILE_TEST
select MIPS_EXTERNAL_TIMER
select TIMER_OF
help
Enable this option to use Loongson1 PWM timer as clocksource
instead of the performance counter.
config CLKSRC_ST_LPC config CLKSRC_ST_LPC
bool "Low power clocksource found in the LPC" if COMPILE_TEST bool "Low power clocksource found in the LPC" if COMPILE_TEST
select TIMER_OF if OF select TIMER_OF if OF
......
...@@ -89,3 +89,4 @@ obj-$(CONFIG_MICROCHIP_PIT64B) += timer-microchip-pit64b.o ...@@ -89,3 +89,4 @@ obj-$(CONFIG_MICROCHIP_PIT64B) += timer-microchip-pit64b.o
obj-$(CONFIG_MSC313E_TIMER) += timer-msc313e.o obj-$(CONFIG_MSC313E_TIMER) += timer-msc313e.o
obj-$(CONFIG_GOLDFISH_TIMER) += timer-goldfish.o obj-$(CONFIG_GOLDFISH_TIMER) += timer-goldfish.o
obj-$(CONFIG_GXP_TIMER) += timer-gxp.o obj-$(CONFIG_GXP_TIMER) += timer-gxp.o
obj-$(CONFIG_CLKSRC_LOONGSON1_PWM) += timer-loongson1-pwm.o
...@@ -475,15 +475,9 @@ static u64 notrace read_hv_clock_msr_cs(struct clocksource *arg) ...@@ -475,15 +475,9 @@ static u64 notrace read_hv_clock_msr_cs(struct clocksource *arg)
return read_hv_clock_msr(); return read_hv_clock_msr();
} }
static u64 notrace read_hv_sched_clock_msr(void)
{
return (read_hv_clock_msr() - hv_sched_clock_offset) *
(NSEC_PER_SEC / HV_CLOCK_HZ);
}
static struct clocksource hyperv_cs_msr = { static struct clocksource hyperv_cs_msr = {
.name = "hyperv_clocksource_msr", .name = "hyperv_clocksource_msr",
.rating = 500, .rating = 495,
.read = read_hv_clock_msr_cs, .read = read_hv_clock_msr_cs,
.mask = CLOCKSOURCE_MASK(64), .mask = CLOCKSOURCE_MASK(64),
.flags = CLOCK_SOURCE_IS_CONTINUOUS, .flags = CLOCK_SOURCE_IS_CONTINUOUS,
...@@ -513,7 +507,7 @@ static __always_inline void hv_setup_sched_clock(void *sched_clock) ...@@ -513,7 +507,7 @@ static __always_inline void hv_setup_sched_clock(void *sched_clock)
static __always_inline void hv_setup_sched_clock(void *sched_clock) {} static __always_inline void hv_setup_sched_clock(void *sched_clock) {}
#endif /* CONFIG_GENERIC_SCHED_CLOCK */ #endif /* CONFIG_GENERIC_SCHED_CLOCK */
static bool __init hv_init_tsc_clocksource(void) static void __init hv_init_tsc_clocksource(void)
{ {
union hv_reference_tsc_msr tsc_msr; union hv_reference_tsc_msr tsc_msr;
...@@ -524,17 +518,14 @@ static bool __init hv_init_tsc_clocksource(void) ...@@ -524,17 +518,14 @@ static bool __init hv_init_tsc_clocksource(void)
* Hyper-V Reference TSC rating, causing the generic TSC to be used. * Hyper-V Reference TSC rating, causing the generic TSC to be used.
* TSC_INVARIANT is not offered on ARM64, so the Hyper-V Reference * TSC_INVARIANT is not offered on ARM64, so the Hyper-V Reference
* TSC will be preferred over the virtualized ARM64 arch counter. * TSC will be preferred over the virtualized ARM64 arch counter.
* While the Hyper-V MSR clocksource won't be used since the
* Reference TSC clocksource is present, change its rating as
* well for consistency.
*/ */
if (ms_hyperv.features & HV_ACCESS_TSC_INVARIANT) { if (ms_hyperv.features & HV_ACCESS_TSC_INVARIANT) {
hyperv_cs_tsc.rating = 250; hyperv_cs_tsc.rating = 250;
hyperv_cs_msr.rating = 250; hyperv_cs_msr.rating = 245;
} }
if (!(ms_hyperv.features & HV_MSR_REFERENCE_TSC_AVAILABLE)) if (!(ms_hyperv.features & HV_MSR_REFERENCE_TSC_AVAILABLE))
return false; return;
hv_read_reference_counter = read_hv_clock_tsc; hv_read_reference_counter = read_hv_clock_tsc;
...@@ -565,33 +556,34 @@ static bool __init hv_init_tsc_clocksource(void) ...@@ -565,33 +556,34 @@ static bool __init hv_init_tsc_clocksource(void)
clocksource_register_hz(&hyperv_cs_tsc, NSEC_PER_SEC/100); clocksource_register_hz(&hyperv_cs_tsc, NSEC_PER_SEC/100);
/*
* If TSC is invariant, then let it stay as the sched clock since it
* will be faster than reading the TSC page. But if not invariant, use
* the TSC page so that live migrations across hosts with different
* frequencies is handled correctly.
*/
if (!(ms_hyperv.features & HV_ACCESS_TSC_INVARIANT)) {
hv_sched_clock_offset = hv_read_reference_counter(); hv_sched_clock_offset = hv_read_reference_counter();
hv_setup_sched_clock(read_hv_sched_clock_tsc); hv_setup_sched_clock(read_hv_sched_clock_tsc);
}
return true;
} }
void __init hv_init_clocksource(void) void __init hv_init_clocksource(void)
{ {
/* /*
* Try to set up the TSC page clocksource. If it succeeds, we're * Try to set up the TSC page clocksource, then the MSR clocksource.
* done. Otherwise, set up the MSR clocksource. At least one of * At least one of these will always be available except on very old
* these will always be available except on very old versions of * versions of Hyper-V on x86. In that case we won't have a Hyper-V
* Hyper-V on x86. In that case we won't have a Hyper-V
* clocksource, but Linux will still run with a clocksource based * clocksource, but Linux will still run with a clocksource based
* on the emulated PIT or LAPIC timer. * on the emulated PIT or LAPIC timer.
*
* Never use the MSR clocksource as sched clock. It's too slow.
* Better to use the native sched clock as the fallback.
*/ */
if (hv_init_tsc_clocksource()) hv_init_tsc_clocksource();
return;
if (!(ms_hyperv.features & HV_MSR_TIME_REF_COUNT_AVAILABLE)) if (ms_hyperv.features & HV_MSR_TIME_REF_COUNT_AVAILABLE)
return;
hv_read_reference_counter = read_hv_clock_msr;
clocksource_register_hz(&hyperv_cs_msr, NSEC_PER_SEC/100); clocksource_register_hz(&hyperv_cs_msr, NSEC_PER_SEC/100);
hv_sched_clock_offset = hv_read_reference_counter();
hv_setup_sched_clock(read_hv_sched_clock_msr);
} }
void __init hv_remap_tsc_clocksource(void) void __init hv_remap_tsc_clocksource(void)
......
...@@ -369,7 +369,7 @@ static int __init ingenic_tcu_probe(struct platform_device *pdev) ...@@ -369,7 +369,7 @@ static int __init ingenic_tcu_probe(struct platform_device *pdev)
return 0; return 0;
} }
static int __maybe_unused ingenic_tcu_suspend(struct device *dev) static int ingenic_tcu_suspend(struct device *dev)
{ {
struct ingenic_tcu *tcu = dev_get_drvdata(dev); struct ingenic_tcu *tcu = dev_get_drvdata(dev);
unsigned int cpu; unsigned int cpu;
...@@ -382,7 +382,7 @@ static int __maybe_unused ingenic_tcu_suspend(struct device *dev) ...@@ -382,7 +382,7 @@ static int __maybe_unused ingenic_tcu_suspend(struct device *dev)
return 0; return 0;
} }
static int __maybe_unused ingenic_tcu_resume(struct device *dev) static int ingenic_tcu_resume(struct device *dev)
{ {
struct ingenic_tcu *tcu = dev_get_drvdata(dev); struct ingenic_tcu *tcu = dev_get_drvdata(dev);
unsigned int cpu; unsigned int cpu;
...@@ -406,7 +406,7 @@ static int __maybe_unused ingenic_tcu_resume(struct device *dev) ...@@ -406,7 +406,7 @@ static int __maybe_unused ingenic_tcu_resume(struct device *dev)
return ret; return ret;
} }
static const struct dev_pm_ops __maybe_unused ingenic_tcu_pm_ops = { static const struct dev_pm_ops ingenic_tcu_pm_ops = {
/* _noirq: We want the TCU clocks to be gated last / ungated first */ /* _noirq: We want the TCU clocks to be gated last / ungated first */
.suspend_noirq = ingenic_tcu_suspend, .suspend_noirq = ingenic_tcu_suspend,
.resume_noirq = ingenic_tcu_resume, .resume_noirq = ingenic_tcu_resume,
...@@ -415,9 +415,7 @@ static const struct dev_pm_ops __maybe_unused ingenic_tcu_pm_ops = { ...@@ -415,9 +415,7 @@ static const struct dev_pm_ops __maybe_unused ingenic_tcu_pm_ops = {
static struct platform_driver ingenic_tcu_driver = { static struct platform_driver ingenic_tcu_driver = {
.driver = { .driver = {
.name = "ingenic-tcu-timer", .name = "ingenic-tcu-timer",
#ifdef CONFIG_PM_SLEEP .pm = pm_sleep_ptr(&ingenic_tcu_pm_ops),
.pm = &ingenic_tcu_pm_ops,
#endif
.of_match_table = ingenic_tcu_of_match, .of_match_table = ingenic_tcu_of_match,
}, },
}; };
......
...@@ -486,10 +486,10 @@ static int __init ttc_timer_probe(struct platform_device *pdev) ...@@ -486,10 +486,10 @@ static int __init ttc_timer_probe(struct platform_device *pdev)
* and use it. Note that the event timer uses the interrupt and it's the * and use it. Note that the event timer uses the interrupt and it's the
* 2nd TTC hence the irq_of_parse_and_map(,1) * 2nd TTC hence the irq_of_parse_and_map(,1)
*/ */
timer_baseaddr = of_iomap(timer, 0); timer_baseaddr = devm_of_iomap(&pdev->dev, timer, 0, NULL);
if (!timer_baseaddr) { if (IS_ERR(timer_baseaddr)) {
pr_err("ERROR: invalid timer base address\n"); pr_err("ERROR: invalid timer base address\n");
return -ENXIO; return PTR_ERR(timer_baseaddr);
} }
irq = irq_of_parse_and_map(timer, 1); irq = irq_of_parse_and_map(timer, 1);
...@@ -513,20 +513,27 @@ static int __init ttc_timer_probe(struct platform_device *pdev) ...@@ -513,20 +513,27 @@ static int __init ttc_timer_probe(struct platform_device *pdev)
clk_ce = of_clk_get(timer, clksel); clk_ce = of_clk_get(timer, clksel);
if (IS_ERR(clk_ce)) { if (IS_ERR(clk_ce)) {
pr_err("ERROR: timer input clock not found\n"); pr_err("ERROR: timer input clock not found\n");
return PTR_ERR(clk_ce); ret = PTR_ERR(clk_ce);
goto put_clk_cs;
} }
ret = ttc_setup_clocksource(clk_cs, timer_baseaddr, timer_width); ret = ttc_setup_clocksource(clk_cs, timer_baseaddr, timer_width);
if (ret) if (ret)
return ret; goto put_clk_ce;
ret = ttc_setup_clockevent(clk_ce, timer_baseaddr + 4, irq); ret = ttc_setup_clockevent(clk_ce, timer_baseaddr + 4, irq);
if (ret) if (ret)
return ret; goto put_clk_ce;
pr_info("%pOFn #0 at %p, irq=%d\n", timer, timer_baseaddr, irq); pr_info("%pOFn #0 at %p, irq=%d\n", timer, timer_baseaddr, irq);
return 0; return 0;
put_clk_ce:
clk_put(clk_ce);
put_clk_cs:
clk_put(clk_cs);
return ret;
} }
static const struct of_device_id ttc_timer_of_match[] = { static const struct of_device_id ttc_timer_of_match[] = {
......
...@@ -16,7 +16,6 @@ ...@@ -16,7 +16,6 @@
#include <linux/of.h> #include <linux/of.h>
#include <linux/of_address.h> #include <linux/of_address.h>
#include <linux/of_irq.h> #include <linux/of_irq.h>
#include <soc/imx/timer.h>
/* /*
* There are 4 versions of the timer hardware on Freescale MXC hardware. * There are 4 versions of the timer hardware on Freescale MXC hardware.
...@@ -25,6 +24,12 @@ ...@@ -25,6 +24,12 @@
* - MX25, MX31, MX35, MX37, MX51, MX6Q(rev1.0) * - MX25, MX31, MX35, MX37, MX51, MX6Q(rev1.0)
* - MX6DL, MX6SX, MX6Q(rev1.1+) * - MX6DL, MX6SX, MX6Q(rev1.1+)
*/ */
enum imx_gpt_type {
GPT_TYPE_IMX1, /* i.MX1 */
GPT_TYPE_IMX21, /* i.MX21/27 */
GPT_TYPE_IMX31, /* i.MX31/35/25/37/51/6Q */
GPT_TYPE_IMX6DL, /* i.MX6DL/SX/SL */
};
/* defines common for all i.MX */ /* defines common for all i.MX */
#define MXC_TCTL 0x00 #define MXC_TCTL 0x00
...@@ -93,13 +98,11 @@ static void imx1_gpt_irq_disable(struct imx_timer *imxtm) ...@@ -93,13 +98,11 @@ static void imx1_gpt_irq_disable(struct imx_timer *imxtm)
tmp = readl_relaxed(imxtm->base + MXC_TCTL); tmp = readl_relaxed(imxtm->base + MXC_TCTL);
writel_relaxed(tmp & ~MX1_2_TCTL_IRQEN, imxtm->base + MXC_TCTL); writel_relaxed(tmp & ~MX1_2_TCTL_IRQEN, imxtm->base + MXC_TCTL);
} }
#define imx21_gpt_irq_disable imx1_gpt_irq_disable
static void imx31_gpt_irq_disable(struct imx_timer *imxtm) static void imx31_gpt_irq_disable(struct imx_timer *imxtm)
{ {
writel_relaxed(0, imxtm->base + V2_IR); writel_relaxed(0, imxtm->base + V2_IR);
} }
#define imx6dl_gpt_irq_disable imx31_gpt_irq_disable
static void imx1_gpt_irq_enable(struct imx_timer *imxtm) static void imx1_gpt_irq_enable(struct imx_timer *imxtm)
{ {
...@@ -108,13 +111,11 @@ static void imx1_gpt_irq_enable(struct imx_timer *imxtm) ...@@ -108,13 +111,11 @@ static void imx1_gpt_irq_enable(struct imx_timer *imxtm)
tmp = readl_relaxed(imxtm->base + MXC_TCTL); tmp = readl_relaxed(imxtm->base + MXC_TCTL);
writel_relaxed(tmp | MX1_2_TCTL_IRQEN, imxtm->base + MXC_TCTL); writel_relaxed(tmp | MX1_2_TCTL_IRQEN, imxtm->base + MXC_TCTL);
} }
#define imx21_gpt_irq_enable imx1_gpt_irq_enable
static void imx31_gpt_irq_enable(struct imx_timer *imxtm) static void imx31_gpt_irq_enable(struct imx_timer *imxtm)
{ {
writel_relaxed(1<<0, imxtm->base + V2_IR); writel_relaxed(1<<0, imxtm->base + V2_IR);
} }
#define imx6dl_gpt_irq_enable imx31_gpt_irq_enable
static void imx1_gpt_irq_acknowledge(struct imx_timer *imxtm) static void imx1_gpt_irq_acknowledge(struct imx_timer *imxtm)
{ {
...@@ -131,7 +132,6 @@ static void imx31_gpt_irq_acknowledge(struct imx_timer *imxtm) ...@@ -131,7 +132,6 @@ static void imx31_gpt_irq_acknowledge(struct imx_timer *imxtm)
{ {
writel_relaxed(V2_TSTAT_OF1, imxtm->base + V2_TSTAT); writel_relaxed(V2_TSTAT_OF1, imxtm->base + V2_TSTAT);
} }
#define imx6dl_gpt_irq_acknowledge imx31_gpt_irq_acknowledge
static void __iomem *sched_clock_reg; static void __iomem *sched_clock_reg;
...@@ -296,7 +296,6 @@ static void imx1_gpt_setup_tctl(struct imx_timer *imxtm) ...@@ -296,7 +296,6 @@ static void imx1_gpt_setup_tctl(struct imx_timer *imxtm)
tctl_val = MX1_2_TCTL_FRR | MX1_2_TCTL_CLK_PCLK1 | MXC_TCTL_TEN; tctl_val = MX1_2_TCTL_FRR | MX1_2_TCTL_CLK_PCLK1 | MXC_TCTL_TEN;
writel_relaxed(tctl_val, imxtm->base + MXC_TCTL); writel_relaxed(tctl_val, imxtm->base + MXC_TCTL);
} }
#define imx21_gpt_setup_tctl imx1_gpt_setup_tctl
static void imx31_gpt_setup_tctl(struct imx_timer *imxtm) static void imx31_gpt_setup_tctl(struct imx_timer *imxtm)
{ {
...@@ -343,10 +342,10 @@ static const struct imx_gpt_data imx21_gpt_data = { ...@@ -343,10 +342,10 @@ static const struct imx_gpt_data imx21_gpt_data = {
.reg_tstat = MX1_2_TSTAT, .reg_tstat = MX1_2_TSTAT,
.reg_tcn = MX1_2_TCN, .reg_tcn = MX1_2_TCN,
.reg_tcmp = MX1_2_TCMP, .reg_tcmp = MX1_2_TCMP,
.gpt_irq_enable = imx21_gpt_irq_enable, .gpt_irq_enable = imx1_gpt_irq_enable,
.gpt_irq_disable = imx21_gpt_irq_disable, .gpt_irq_disable = imx1_gpt_irq_disable,
.gpt_irq_acknowledge = imx21_gpt_irq_acknowledge, .gpt_irq_acknowledge = imx21_gpt_irq_acknowledge,
.gpt_setup_tctl = imx21_gpt_setup_tctl, .gpt_setup_tctl = imx1_gpt_setup_tctl,
.set_next_event = mx1_2_set_next_event, .set_next_event = mx1_2_set_next_event,
}; };
...@@ -365,9 +364,9 @@ static const struct imx_gpt_data imx6dl_gpt_data = { ...@@ -365,9 +364,9 @@ static const struct imx_gpt_data imx6dl_gpt_data = {
.reg_tstat = V2_TSTAT, .reg_tstat = V2_TSTAT,
.reg_tcn = V2_TCN, .reg_tcn = V2_TCN,
.reg_tcmp = V2_TCMP, .reg_tcmp = V2_TCMP,
.gpt_irq_enable = imx6dl_gpt_irq_enable, .gpt_irq_enable = imx31_gpt_irq_enable,
.gpt_irq_disable = imx6dl_gpt_irq_disable, .gpt_irq_disable = imx31_gpt_irq_disable,
.gpt_irq_acknowledge = imx6dl_gpt_irq_acknowledge, .gpt_irq_acknowledge = imx31_gpt_irq_acknowledge,
.gpt_setup_tctl = imx6dl_gpt_setup_tctl, .gpt_setup_tctl = imx6dl_gpt_setup_tctl,
.set_next_event = v2_set_next_event, .set_next_event = v2_set_next_event,
}; };
......
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Clocksource driver for Loongson-1 SoC
*
* Copyright (c) 2023 Keguang Zhang <keguang.zhang@gmail.com>
*/
#include <linux/clockchips.h>
#include <linux/interrupt.h>
#include <linux/sizes.h>
#include "timer-of.h"
/* Loongson-1 PWM Timer Register Definitions */
#define PWM_CNTR 0x0
#define PWM_HRC 0x4
#define PWM_LRC 0x8
#define PWM_CTRL 0xc
/* PWM Control Register Bits */
#define INT_LRC_EN BIT(11)
#define INT_HRC_EN BIT(10)
#define CNTR_RST BIT(7)
#define INT_SR BIT(6)
#define INT_EN BIT(5)
#define PWM_SINGLE BIT(4)
#define PWM_OE BIT(3)
#define CNT_EN BIT(0)
#define CNTR_WIDTH 24
DEFINE_RAW_SPINLOCK(ls1x_timer_lock);
struct ls1x_clocksource {
void __iomem *reg_base;
unsigned long ticks_per_jiffy;
struct clocksource clksrc;
};
static inline struct ls1x_clocksource *to_ls1x_clksrc(struct clocksource *c)
{
return container_of(c, struct ls1x_clocksource, clksrc);
}
static inline void ls1x_pwmtimer_set_period(unsigned int period,
struct timer_of *to)
{
writel(period, timer_of_base(to) + PWM_LRC);
writel(period, timer_of_base(to) + PWM_HRC);
}
static inline void ls1x_pwmtimer_clear(struct timer_of *to)
{
writel(0, timer_of_base(to) + PWM_CNTR);
}
static inline void ls1x_pwmtimer_start(struct timer_of *to)
{
writel((INT_EN | PWM_OE | CNT_EN), timer_of_base(to) + PWM_CTRL);
}
static inline void ls1x_pwmtimer_stop(struct timer_of *to)
{
writel(0, timer_of_base(to) + PWM_CTRL);
}
static inline void ls1x_pwmtimer_irq_ack(struct timer_of *to)
{
int val;
val = readl(timer_of_base(to) + PWM_CTRL);
val |= INT_SR;
writel(val, timer_of_base(to) + PWM_CTRL);
}
static irqreturn_t ls1x_clockevent_isr(int irq, void *dev_id)
{
struct clock_event_device *clkevt = dev_id;
struct timer_of *to = to_timer_of(clkevt);
ls1x_pwmtimer_irq_ack(to);
ls1x_pwmtimer_clear(to);
ls1x_pwmtimer_start(to);
clkevt->event_handler(clkevt);
return IRQ_HANDLED;
}
static int ls1x_clockevent_set_state_periodic(struct clock_event_device *clkevt)
{
struct timer_of *to = to_timer_of(clkevt);
raw_spin_lock(&ls1x_timer_lock);
ls1x_pwmtimer_set_period(timer_of_period(to), to);
ls1x_pwmtimer_clear(to);
ls1x_pwmtimer_start(to);
raw_spin_unlock(&ls1x_timer_lock);
return 0;
}
static int ls1x_clockevent_tick_resume(struct clock_event_device *clkevt)
{
raw_spin_lock(&ls1x_timer_lock);
ls1x_pwmtimer_start(to_timer_of(clkevt));
raw_spin_unlock(&ls1x_timer_lock);
return 0;
}
static int ls1x_clockevent_set_state_shutdown(struct clock_event_device *clkevt)
{
raw_spin_lock(&ls1x_timer_lock);
ls1x_pwmtimer_stop(to_timer_of(clkevt));
raw_spin_unlock(&ls1x_timer_lock);
return 0;
}
static int ls1x_clockevent_set_next(unsigned long evt,
struct clock_event_device *clkevt)
{
struct timer_of *to = to_timer_of(clkevt);
raw_spin_lock(&ls1x_timer_lock);
ls1x_pwmtimer_set_period(evt, to);
ls1x_pwmtimer_clear(to);
ls1x_pwmtimer_start(to);
raw_spin_unlock(&ls1x_timer_lock);
return 0;
}
static struct timer_of ls1x_to = {
.flags = TIMER_OF_IRQ | TIMER_OF_BASE | TIMER_OF_CLOCK,
.clkevt = {
.name = "ls1x-pwmtimer",
.features = CLOCK_EVT_FEAT_PERIODIC |
CLOCK_EVT_FEAT_ONESHOT,
.rating = 300,
.set_next_event = ls1x_clockevent_set_next,
.set_state_periodic = ls1x_clockevent_set_state_periodic,
.set_state_oneshot = ls1x_clockevent_set_state_shutdown,
.set_state_shutdown = ls1x_clockevent_set_state_shutdown,
.tick_resume = ls1x_clockevent_tick_resume,
},
.of_irq = {
.handler = ls1x_clockevent_isr,
.flags = IRQF_TIMER,
},
};
/*
* Since the PWM timer overflows every two ticks, its not very useful
* to just read by itself. So use jiffies to emulate a free
* running counter:
*/
static u64 ls1x_clocksource_read(struct clocksource *cs)
{
struct ls1x_clocksource *ls1x_cs = to_ls1x_clksrc(cs);
unsigned long flags;
int count;
u32 jifs;
static int old_count;
static u32 old_jifs;
raw_spin_lock_irqsave(&ls1x_timer_lock, flags);
/*
* Although our caller may have the read side of xtime_lock,
* this is now a seqlock, and we are cheating in this routine
* by having side effects on state that we cannot undo if
* there is a collision on the seqlock and our caller has to
* retry. (Namely, old_jifs and old_count.) So we must treat
* jiffies as volatile despite the lock. We read jiffies
* before latching the timer count to guarantee that although
* the jiffies value might be older than the count (that is,
* the counter may underflow between the last point where
* jiffies was incremented and the point where we latch the
* count), it cannot be newer.
*/
jifs = jiffies;
/* read the count */
count = readl(ls1x_cs->reg_base + PWM_CNTR);
/*
* It's possible for count to appear to go the wrong way for this
* reason:
*
* The timer counter underflows, but we haven't handled the resulting
* interrupt and incremented jiffies yet.
*
* Previous attempts to handle these cases intelligently were buggy, so
* we just do the simple thing now.
*/
if (count < old_count && jifs == old_jifs)
count = old_count;
old_count = count;
old_jifs = jifs;
raw_spin_unlock_irqrestore(&ls1x_timer_lock, flags);
return (u64)(jifs * ls1x_cs->ticks_per_jiffy) + count;
}
static struct ls1x_clocksource ls1x_clocksource = {
.clksrc = {
.name = "ls1x-pwmtimer",
.rating = 300,
.read = ls1x_clocksource_read,
.mask = CLOCKSOURCE_MASK(CNTR_WIDTH),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
},
};
static int __init ls1x_pwm_clocksource_init(struct device_node *np)
{
struct timer_of *to = &ls1x_to;
int ret;
ret = timer_of_init(np, to);
if (ret)
return ret;
clockevents_config_and_register(&to->clkevt, timer_of_rate(to),
0x1, GENMASK(CNTR_WIDTH - 1, 0));
ls1x_clocksource.reg_base = timer_of_base(to);
ls1x_clocksource.ticks_per_jiffy = timer_of_period(to);
return clocksource_register_hz(&ls1x_clocksource.clksrc,
timer_of_rate(to));
}
TIMER_OF_DECLARE(ls1x_pwm_clocksource, "loongson,ls1b-pwmtimer",
ls1x_pwm_clocksource_init);
...@@ -135,7 +135,7 @@ struct signal_struct { ...@@ -135,7 +135,7 @@ struct signal_struct {
#ifdef CONFIG_POSIX_TIMERS #ifdef CONFIG_POSIX_TIMERS
/* POSIX.1b Interval Timers */ /* POSIX.1b Interval Timers */
int posix_timer_id; unsigned int next_posix_timer_id;
struct list_head posix_timers; struct list_head posix_timers;
/* ITIMER_REAL timer for the process */ /* ITIMER_REAL timer for the process */
......
...@@ -1280,6 +1280,7 @@ asmlinkage long sys_ni_syscall(void); ...@@ -1280,6 +1280,7 @@ asmlinkage long sys_ni_syscall(void);
#endif /* CONFIG_ARCH_HAS_SYSCALL_WRAPPER */ #endif /* CONFIG_ARCH_HAS_SYSCALL_WRAPPER */
asmlinkage long sys_ni_posix_timers(void);
/* /*
* Kernel code should not call syscalls (i.e., sys_xyzyyz()) directly. * Kernel code should not call syscalls (i.e., sys_xyzyyz()) directly.
......
...@@ -44,7 +44,6 @@ struct time_namespace *copy_time_ns(unsigned long flags, ...@@ -44,7 +44,6 @@ struct time_namespace *copy_time_ns(unsigned long flags,
struct time_namespace *old_ns); struct time_namespace *old_ns);
void free_time_ns(struct time_namespace *ns); void free_time_ns(struct time_namespace *ns);
void timens_on_fork(struct nsproxy *nsproxy, struct task_struct *tsk); void timens_on_fork(struct nsproxy *nsproxy, struct task_struct *tsk);
struct vdso_data *arch_get_vdso_data(void *vvar_page);
struct page *find_timens_vvar_page(struct vm_area_struct *vma); struct page *find_timens_vvar_page(struct vm_area_struct *vma);
static inline void put_time_ns(struct time_namespace *ns) static inline void put_time_ns(struct time_namespace *ns)
...@@ -163,4 +162,6 @@ static inline ktime_t timens_ktime_to_host(clockid_t clockid, ktime_t tim) ...@@ -163,4 +162,6 @@ static inline ktime_t timens_ktime_to_host(clockid_t clockid, ktime_t tim)
} }
#endif #endif
struct vdso_data *arch_get_vdso_data(void *vvar_page);
#endif /* _LINUX_TIMENS_H */ #endif /* _LINUX_TIMENS_H */
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2015 Linaro Ltd.
*/
#ifndef __SOC_IMX_TIMER_H__
#define __SOC_IMX_TIMER_H__
enum imx_gpt_type {
GPT_TYPE_IMX1, /* i.MX1 */
GPT_TYPE_IMX21, /* i.MX21/27 */
GPT_TYPE_IMX31, /* i.MX31/35/25/37/51/6Q */
GPT_TYPE_IMX6DL, /* i.MX6DL/SX/SL */
};
#endif /* __SOC_IMX_TIMER_H__ */
...@@ -158,7 +158,11 @@ DEFINE_EVENT(timer_class, timer_cancel, ...@@ -158,7 +158,11 @@ DEFINE_EVENT(timer_class, timer_cancel,
{ HRTIMER_MODE_ABS_SOFT, "ABS|SOFT" }, \ { HRTIMER_MODE_ABS_SOFT, "ABS|SOFT" }, \
{ HRTIMER_MODE_REL_SOFT, "REL|SOFT" }, \ { HRTIMER_MODE_REL_SOFT, "REL|SOFT" }, \
{ HRTIMER_MODE_ABS_PINNED_SOFT, "ABS|PINNED|SOFT" }, \ { HRTIMER_MODE_ABS_PINNED_SOFT, "ABS|PINNED|SOFT" }, \
{ HRTIMER_MODE_REL_PINNED_SOFT, "REL|PINNED|SOFT" }) { HRTIMER_MODE_REL_PINNED_SOFT, "REL|PINNED|SOFT" }, \
{ HRTIMER_MODE_ABS_HARD, "ABS|HARD" }, \
{ HRTIMER_MODE_REL_HARD, "REL|HARD" }, \
{ HRTIMER_MODE_ABS_PINNED_HARD, "ABS|PINNED|HARD" }, \
{ HRTIMER_MODE_REL_PINNED_HARD, "REL|PINNED|HARD" })
/** /**
* hrtimer_init - called when the hrtimer is initialized * hrtimer_init - called when the hrtimer is initialized
......
...@@ -751,7 +751,7 @@ static int alarm_timer_create(struct k_itimer *new_timer) ...@@ -751,7 +751,7 @@ static int alarm_timer_create(struct k_itimer *new_timer)
static enum alarmtimer_restart alarmtimer_nsleep_wakeup(struct alarm *alarm, static enum alarmtimer_restart alarmtimer_nsleep_wakeup(struct alarm *alarm,
ktime_t now) ktime_t now)
{ {
struct task_struct *task = (struct task_struct *)alarm->data; struct task_struct *task = alarm->data;
alarm->data = NULL; alarm->data = NULL;
if (task) if (task)
...@@ -847,7 +847,7 @@ static int alarm_timer_nsleep(const clockid_t which_clock, int flags, ...@@ -847,7 +847,7 @@ static int alarm_timer_nsleep(const clockid_t which_clock, int flags,
struct restart_block *restart = &current->restart_block; struct restart_block *restart = &current->restart_block;
struct alarm alarm; struct alarm alarm;
ktime_t exp; ktime_t exp;
int ret = 0; int ret;
if (!alarmtimer_get_rtcdev()) if (!alarmtimer_get_rtcdev())
return -EOPNOTSUPP; return -EOPNOTSUPP;
......
...@@ -164,6 +164,7 @@ static inline bool is_migration_base(struct hrtimer_clock_base *base) ...@@ -164,6 +164,7 @@ static inline bool is_migration_base(struct hrtimer_clock_base *base)
static static
struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer, struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer,
unsigned long *flags) unsigned long *flags)
__acquires(&timer->base->lock)
{ {
struct hrtimer_clock_base *base; struct hrtimer_clock_base *base;
...@@ -280,6 +281,7 @@ static inline bool is_migration_base(struct hrtimer_clock_base *base) ...@@ -280,6 +281,7 @@ static inline bool is_migration_base(struct hrtimer_clock_base *base)
static inline struct hrtimer_clock_base * static inline struct hrtimer_clock_base *
lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
__acquires(&timer->base->cpu_base->lock)
{ {
struct hrtimer_clock_base *base = timer->base; struct hrtimer_clock_base *base = timer->base;
...@@ -1013,6 +1015,7 @@ void hrtimers_resume_local(void) ...@@ -1013,6 +1015,7 @@ void hrtimers_resume_local(void)
*/ */
static inline static inline
void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
__releases(&timer->base->cpu_base->lock)
{ {
raw_spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags); raw_spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags);
} }
......
This diff is collapsed.
...@@ -1041,7 +1041,7 @@ static bool report_idle_softirq(void) ...@@ -1041,7 +1041,7 @@ static bool report_idle_softirq(void)
return false; return false;
} }
if (ratelimit < 10) if (ratelimit >= 10)
return false; return false;
/* On RT, softirqs handling may be waiting on some lock */ /* On RT, softirqs handling may be waiting on some lock */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment