Commit 1f2d9ffc authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'sched-core-2023-02-20' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull scheduler updates from Ingo Molnar:

 - Improve the scalability of the CFS bandwidth unthrottling logic with
   large number of CPUs.

 - Fix & rework various cpuidle routines, simplify interaction with the
   generic scheduler code. Add __cpuidle methods as noinstr to objtool's
   noinstr detection and fix boatloads of cpuidle bugs & quirks.

 - Add new ABI: introduce MEMBARRIER_CMD_GET_REGISTRATIONS, to query
   previously issued registrations.

 - Limit scheduler slice duration to the sysctl_sched_latency period, to
   improve scheduling granularity with a large number of SCHED_IDLE
   tasks.

 - Debuggability enhancement on sys_exit(): warn about disabled IRQs,
   but also enable them to prevent a cascade of followup problems and
   repeat warnings.

 - Fix the rescheduling logic in prio_changed_dl().

 - Micro-optimize cpufreq and sched-util methods.

 - Micro-optimize ttwu_runnable()

 - Micro-optimize the idle-scanning in update_numa_stats(),
   select_idle_capacity() and steal_cookie_task().

 - Update the RSEQ code & self-tests

 - Constify various scheduler methods

 - Remove unused methods

 - Refine __init tags

 - Documentation updates

 - Misc other cleanups, fixes

* tag 'sched-core-2023-02-20' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (110 commits)
  sched/rt: pick_next_rt_entity(): check list_entry
  sched/deadline: Add more reschedule cases to prio_changed_dl()
  sched/fair: sanitize vruntime of entity being placed
  sched/fair: Remove capacity inversion detection
  sched/fair: unlink misfit task from cpu overutilized
  objtool: mem*() are not uaccess safe
  cpuidle: Fix poll_idle() noinstr annotation
  sched/clock: Make local_clock() noinstr
  sched/clock/x86: Mark sched_clock() noinstr
  x86/pvclock: Improve atomic update of last_value in pvclock_clocksource_read()
  x86/atomics: Always inline arch_atomic64*()
  cpuidle: tracing, preempt: Squash _rcuidle tracing
  cpuidle: tracing: Warn about !rcu_is_watching()
  cpuidle: lib/bug: Disable rcu_is_watching() during WARN/BUG
  cpuidle: drivers: firmware: psci: Dont instrument suspend code
  KVM: selftests: Fix build of rseq test
  exit: Detect and fix irq disabled state in oops
  cpuidle, arm64: Fix the ARM64 cpuidle logic
  cpuidle: mvebu: Fix duplicate flags assignment
  sched/fair: Limit sched slice duration
  ...
parents a2f0e7ee 7c4a5b89
...@@ -619,6 +619,8 @@ process migrations. ...@@ -619,6 +619,8 @@ process migrations.
and is an example of this type. and is an example of this type.
.. _cgroupv2-limits-distributor:
Limits Limits
------ ------
...@@ -635,6 +637,7 @@ process migrations. ...@@ -635,6 +637,7 @@ process migrations.
"io.max" limits the maximum BPS and/or IOPS that a cgroup can consume "io.max" limits the maximum BPS and/or IOPS that a cgroup can consume
on an IO device and is an example of this type. on an IO device and is an example of this type.
.. _cgroupv2-protections-distributor:
Protections Protections
----------- -----------
......
...@@ -15,6 +15,7 @@ Linux Scheduler ...@@ -15,6 +15,7 @@ Linux Scheduler
sched-capacity sched-capacity
sched-energy sched-energy
schedutil schedutil
sched-util-clamp
sched-nice-design sched-nice-design
sched-rt-group sched-rt-group
sched-stats sched-stats
......
This diff is collapsed.
...@@ -57,7 +57,6 @@ EXPORT_SYMBOL(pm_power_off); ...@@ -57,7 +57,6 @@ EXPORT_SYMBOL(pm_power_off);
void arch_cpu_idle(void) void arch_cpu_idle(void)
{ {
wtint(0); wtint(0);
raw_local_irq_enable();
} }
void arch_cpu_idle_dead(void) void arch_cpu_idle_dead(void)
......
...@@ -27,7 +27,6 @@ SECTIONS ...@@ -27,7 +27,6 @@ SECTIONS
HEAD_TEXT HEAD_TEXT
TEXT_TEXT TEXT_TEXT
SCHED_TEXT SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT LOCK_TEXT
*(.fixup) *(.fixup)
*(.gnu.warning) *(.gnu.warning)
......
...@@ -114,6 +114,8 @@ void arch_cpu_idle(void) ...@@ -114,6 +114,8 @@ void arch_cpu_idle(void)
"sleep %0 \n" "sleep %0 \n"
: :
:"I"(arg)); /* can't be "r" has to be embedded const */ :"I"(arg)); /* can't be "r" has to be embedded const */
raw_local_irq_disable();
} }
#else /* ARC700 */ #else /* ARC700 */
...@@ -122,6 +124,7 @@ void arch_cpu_idle(void) ...@@ -122,6 +124,7 @@ void arch_cpu_idle(void)
{ {
/* sleep, but enable both set E1/E2 (levels of interrupts) before committing */ /* sleep, but enable both set E1/E2 (levels of interrupts) before committing */
__asm__ __volatile__("sleep 0x3 \n"); __asm__ __volatile__("sleep 0x3 \n");
raw_local_irq_disable();
} }
#endif #endif
......
...@@ -85,7 +85,6 @@ SECTIONS ...@@ -85,7 +85,6 @@ SECTIONS
_stext = .; _stext = .;
TEXT_TEXT TEXT_TEXT
SCHED_TEXT SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT LOCK_TEXT
KPROBES_TEXT KPROBES_TEXT
IRQENTRY_TEXT IRQENTRY_TEXT
......
...@@ -96,7 +96,6 @@ ...@@ -96,7 +96,6 @@
SOFTIRQENTRY_TEXT \ SOFTIRQENTRY_TEXT \
TEXT_TEXT \ TEXT_TEXT \
SCHED_TEXT \ SCHED_TEXT \
CPUIDLE_TEXT \
LOCK_TEXT \ LOCK_TEXT \
KPROBES_TEXT \ KPROBES_TEXT \
ARM_STUBS_TEXT \ ARM_STUBS_TEXT \
......
...@@ -26,8 +26,8 @@ static struct cpuidle_ops cpuidle_ops[NR_CPUS] __ro_after_init; ...@@ -26,8 +26,8 @@ static struct cpuidle_ops cpuidle_ops[NR_CPUS] __ro_after_init;
* *
* Returns the index passed as parameter * Returns the index passed as parameter
*/ */
int arm_cpuidle_simple_enter(struct cpuidle_device *dev, __cpuidle int arm_cpuidle_simple_enter(struct cpuidle_device *dev, struct
struct cpuidle_driver *drv, int index) cpuidle_driver *drv, int index)
{ {
cpu_do_idle(); cpu_do_idle();
......
...@@ -78,7 +78,6 @@ void arch_cpu_idle(void) ...@@ -78,7 +78,6 @@ void arch_cpu_idle(void)
arm_pm_idle(); arm_pm_idle();
else else
cpu_do_idle(); cpu_do_idle();
raw_local_irq_enable();
} }
void arch_cpu_idle_prepare(void) void arch_cpu_idle_prepare(void)
......
...@@ -638,7 +638,7 @@ static void do_handle_IPI(int ipinr) ...@@ -638,7 +638,7 @@ static void do_handle_IPI(int ipinr)
unsigned int cpu = smp_processor_id(); unsigned int cpu = smp_processor_id();
if ((unsigned)ipinr < NR_IPI) if ((unsigned)ipinr < NR_IPI)
trace_ipi_entry_rcuidle(ipi_types[ipinr]); trace_ipi_entry(ipi_types[ipinr]);
switch (ipinr) { switch (ipinr) {
case IPI_WAKEUP: case IPI_WAKEUP:
...@@ -685,7 +685,7 @@ static void do_handle_IPI(int ipinr) ...@@ -685,7 +685,7 @@ static void do_handle_IPI(int ipinr)
} }
if ((unsigned)ipinr < NR_IPI) if ((unsigned)ipinr < NR_IPI)
trace_ipi_exit_rcuidle(ipi_types[ipinr]); trace_ipi_exit(ipi_types[ipinr]);
} }
/* Legacy version, should go away once all irqchips have been converted */ /* Legacy version, should go away once all irqchips have been converted */
...@@ -708,7 +708,7 @@ static irqreturn_t ipi_handler(int irq, void *data) ...@@ -708,7 +708,7 @@ static irqreturn_t ipi_handler(int irq, void *data)
static void smp_cross_call(const struct cpumask *target, unsigned int ipinr) static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
{ {
trace_ipi_raise_rcuidle(target, ipi_types[ipinr]); trace_ipi_raise(target, ipi_types[ipinr]);
__ipi_send_mask(ipi_desc[ipinr], target); __ipi_send_mask(ipi_desc[ipinr], target);
} }
......
...@@ -44,7 +44,7 @@ static void davinci_save_ddr_power(int enter, bool pdown) ...@@ -44,7 +44,7 @@ static void davinci_save_ddr_power(int enter, bool pdown)
} }
/* Actual code that puts the SoC in different idle states */ /* Actual code that puts the SoC in different idle states */
static int davinci_enter_idle(struct cpuidle_device *dev, static __cpuidle int davinci_enter_idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index) struct cpuidle_driver *drv, int index)
{ {
davinci_save_ddr_power(1, ddr2_pdown); davinci_save_ddr_power(1, ddr2_pdown);
......
...@@ -42,8 +42,9 @@ static void gemini_idle(void) ...@@ -42,8 +42,9 @@ static void gemini_idle(void)
*/ */
/* FIXME: Enabling interrupts here is racy! */ /* FIXME: Enabling interrupts here is racy! */
local_irq_enable(); raw_local_irq_enable();
cpu_do_idle(); cpu_do_idle();
raw_local_irq_disable();
} }
static void __init gemini_init_machine(void) static void __init gemini_init_machine(void)
......
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
#include <asm/system_misc.h> #include <asm/system_misc.h>
#include "cpuidle.h" #include "cpuidle.h"
static int imx5_cpuidle_enter(struct cpuidle_device *dev, static __cpuidle int imx5_cpuidle_enter(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index) struct cpuidle_driver *drv, int index)
{ {
arm_pm_idle(); arm_pm_idle();
......
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
static int num_idle_cpus = 0; static int num_idle_cpus = 0;
static DEFINE_RAW_SPINLOCK(cpuidle_lock); static DEFINE_RAW_SPINLOCK(cpuidle_lock);
static int imx6q_enter_wait(struct cpuidle_device *dev, static __cpuidle int imx6q_enter_wait(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index) struct cpuidle_driver *drv, int index)
{ {
raw_spin_lock(&cpuidle_lock); raw_spin_lock(&cpuidle_lock);
...@@ -25,9 +25,9 @@ static int imx6q_enter_wait(struct cpuidle_device *dev, ...@@ -25,9 +25,9 @@ static int imx6q_enter_wait(struct cpuidle_device *dev,
imx6_set_lpm(WAIT_UNCLOCKED); imx6_set_lpm(WAIT_UNCLOCKED);
raw_spin_unlock(&cpuidle_lock); raw_spin_unlock(&cpuidle_lock);
ct_idle_enter(); ct_cpuidle_enter();
cpu_do_idle(); cpu_do_idle();
ct_idle_exit(); ct_cpuidle_exit();
raw_spin_lock(&cpuidle_lock); raw_spin_lock(&cpuidle_lock);
if (num_idle_cpus-- == num_online_cpus()) if (num_idle_cpus-- == num_online_cpus())
......
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
#include "common.h" #include "common.h"
#include "cpuidle.h" #include "cpuidle.h"
static int imx6sl_enter_wait(struct cpuidle_device *dev, static __cpuidle int imx6sl_enter_wait(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index) struct cpuidle_driver *drv, int index)
{ {
imx6_set_lpm(WAIT_UNCLOCKED); imx6_set_lpm(WAIT_UNCLOCKED);
......
...@@ -30,7 +30,7 @@ static int imx6sx_idle_finish(unsigned long val) ...@@ -30,7 +30,7 @@ static int imx6sx_idle_finish(unsigned long val)
return 0; return 0;
} }
static int imx6sx_enter_wait(struct cpuidle_device *dev, static __cpuidle int imx6sx_enter_wait(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index) struct cpuidle_driver *drv, int index)
{ {
imx6_set_lpm(WAIT_UNCLOCKED); imx6_set_lpm(WAIT_UNCLOCKED);
...@@ -47,7 +47,9 @@ static int imx6sx_enter_wait(struct cpuidle_device *dev, ...@@ -47,7 +47,9 @@ static int imx6sx_enter_wait(struct cpuidle_device *dev,
cpu_pm_enter(); cpu_pm_enter();
cpu_cluster_pm_enter(); cpu_cluster_pm_enter();
ct_cpuidle_enter();
cpu_suspend(0, imx6sx_idle_finish); cpu_suspend(0, imx6sx_idle_finish);
ct_cpuidle_exit();
cpu_cluster_pm_exit(); cpu_cluster_pm_exit();
cpu_pm_exit(); cpu_pm_exit();
...@@ -87,7 +89,8 @@ static struct cpuidle_driver imx6sx_cpuidle_driver = { ...@@ -87,7 +89,8 @@ static struct cpuidle_driver imx6sx_cpuidle_driver = {
*/ */
.exit_latency = 300, .exit_latency = 300,
.target_residency = 500, .target_residency = 500,
.flags = CPUIDLE_FLAG_TIMER_STOP, .flags = CPUIDLE_FLAG_TIMER_STOP |
CPUIDLE_FLAG_RCU_IDLE,
.enter = imx6sx_enter_wait, .enter = imx6sx_enter_wait,
.name = "LOW-POWER-IDLE", .name = "LOW-POWER-IDLE",
.desc = "ARM power off", .desc = "ARM power off",
......
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
#include "common.h" #include "common.h"
#include "cpuidle.h" #include "cpuidle.h"
static int imx7ulp_enter_wait(struct cpuidle_device *dev, static __cpuidle int imx7ulp_enter_wait(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index) struct cpuidle_driver *drv, int index)
{ {
if (index == 1) if (index == 1)
......
...@@ -256,11 +256,13 @@ extern u32 omap4_get_cpu1_ns_pa_addr(void); ...@@ -256,11 +256,13 @@ extern u32 omap4_get_cpu1_ns_pa_addr(void);
#if defined(CONFIG_SMP) && defined(CONFIG_PM) #if defined(CONFIG_SMP) && defined(CONFIG_PM)
extern int omap4_mpuss_init(void); extern int omap4_mpuss_init(void);
extern int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state); extern int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state,
bool rcuidle);
extern int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state); extern int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state);
#else #else
static inline int omap4_enter_lowpower(unsigned int cpu, static inline int omap4_enter_lowpower(unsigned int cpu,
unsigned int power_state) unsigned int power_state,
bool rcuidle)
{ {
cpu_do_idle(); cpu_do_idle();
return 0; return 0;
......
...@@ -133,7 +133,7 @@ static int omap3_enter_idle(struct cpuidle_device *dev, ...@@ -133,7 +133,7 @@ static int omap3_enter_idle(struct cpuidle_device *dev,
} }
/* Execute ARM wfi */ /* Execute ARM wfi */
omap_sram_idle(); omap_sram_idle(true);
/* /*
* Call idle CPU PM enter notifier chain to restore * Call idle CPU PM enter notifier chain to restore
...@@ -265,6 +265,7 @@ static struct cpuidle_driver omap3_idle_driver = { ...@@ -265,6 +265,7 @@ static struct cpuidle_driver omap3_idle_driver = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
.states = { .states = {
{ {
.flags = CPUIDLE_FLAG_RCU_IDLE,
.enter = omap3_enter_idle_bm, .enter = omap3_enter_idle_bm,
.exit_latency = 2 + 2, .exit_latency = 2 + 2,
.target_residency = 5, .target_residency = 5,
...@@ -272,6 +273,7 @@ static struct cpuidle_driver omap3_idle_driver = { ...@@ -272,6 +273,7 @@ static struct cpuidle_driver omap3_idle_driver = {
.desc = "MPU ON + CORE ON", .desc = "MPU ON + CORE ON",
}, },
{ {
.flags = CPUIDLE_FLAG_RCU_IDLE,
.enter = omap3_enter_idle_bm, .enter = omap3_enter_idle_bm,
.exit_latency = 10 + 10, .exit_latency = 10 + 10,
.target_residency = 30, .target_residency = 30,
...@@ -279,6 +281,7 @@ static struct cpuidle_driver omap3_idle_driver = { ...@@ -279,6 +281,7 @@ static struct cpuidle_driver omap3_idle_driver = {
.desc = "MPU ON + CORE ON", .desc = "MPU ON + CORE ON",
}, },
{ {
.flags = CPUIDLE_FLAG_RCU_IDLE,
.enter = omap3_enter_idle_bm, .enter = omap3_enter_idle_bm,
.exit_latency = 50 + 50, .exit_latency = 50 + 50,
.target_residency = 300, .target_residency = 300,
...@@ -286,6 +289,7 @@ static struct cpuidle_driver omap3_idle_driver = { ...@@ -286,6 +289,7 @@ static struct cpuidle_driver omap3_idle_driver = {
.desc = "MPU RET + CORE ON", .desc = "MPU RET + CORE ON",
}, },
{ {
.flags = CPUIDLE_FLAG_RCU_IDLE,
.enter = omap3_enter_idle_bm, .enter = omap3_enter_idle_bm,
.exit_latency = 1500 + 1800, .exit_latency = 1500 + 1800,
.target_residency = 4000, .target_residency = 4000,
...@@ -293,6 +297,7 @@ static struct cpuidle_driver omap3_idle_driver = { ...@@ -293,6 +297,7 @@ static struct cpuidle_driver omap3_idle_driver = {
.desc = "MPU OFF + CORE ON", .desc = "MPU OFF + CORE ON",
}, },
{ {
.flags = CPUIDLE_FLAG_RCU_IDLE,
.enter = omap3_enter_idle_bm, .enter = omap3_enter_idle_bm,
.exit_latency = 2500 + 7500, .exit_latency = 2500 + 7500,
.target_residency = 12000, .target_residency = 12000,
...@@ -300,6 +305,7 @@ static struct cpuidle_driver omap3_idle_driver = { ...@@ -300,6 +305,7 @@ static struct cpuidle_driver omap3_idle_driver = {
.desc = "MPU RET + CORE RET", .desc = "MPU RET + CORE RET",
}, },
{ {
.flags = CPUIDLE_FLAG_RCU_IDLE,
.enter = omap3_enter_idle_bm, .enter = omap3_enter_idle_bm,
.exit_latency = 3000 + 8500, .exit_latency = 3000 + 8500,
.target_residency = 15000, .target_residency = 15000,
...@@ -307,6 +313,7 @@ static struct cpuidle_driver omap3_idle_driver = { ...@@ -307,6 +313,7 @@ static struct cpuidle_driver omap3_idle_driver = {
.desc = "MPU OFF + CORE RET", .desc = "MPU OFF + CORE RET",
}, },
{ {
.flags = CPUIDLE_FLAG_RCU_IDLE,
.enter = omap3_enter_idle_bm, .enter = omap3_enter_idle_bm,
.exit_latency = 10000 + 30000, .exit_latency = 10000 + 30000,
.target_residency = 30000, .target_residency = 30000,
...@@ -328,6 +335,7 @@ static struct cpuidle_driver omap3430_idle_driver = { ...@@ -328,6 +335,7 @@ static struct cpuidle_driver omap3430_idle_driver = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
.states = { .states = {
{ {
.flags = CPUIDLE_FLAG_RCU_IDLE,
.enter = omap3_enter_idle_bm, .enter = omap3_enter_idle_bm,
.exit_latency = 110 + 162, .exit_latency = 110 + 162,
.target_residency = 5, .target_residency = 5,
...@@ -335,6 +343,7 @@ static struct cpuidle_driver omap3430_idle_driver = { ...@@ -335,6 +343,7 @@ static struct cpuidle_driver omap3430_idle_driver = {
.desc = "MPU ON + CORE ON", .desc = "MPU ON + CORE ON",
}, },
{ {
.flags = CPUIDLE_FLAG_RCU_IDLE,
.enter = omap3_enter_idle_bm, .enter = omap3_enter_idle_bm,
.exit_latency = 106 + 180, .exit_latency = 106 + 180,
.target_residency = 309, .target_residency = 309,
...@@ -342,6 +351,7 @@ static struct cpuidle_driver omap3430_idle_driver = { ...@@ -342,6 +351,7 @@ static struct cpuidle_driver omap3430_idle_driver = {
.desc = "MPU ON + CORE ON", .desc = "MPU ON + CORE ON",
}, },
{ {
.flags = CPUIDLE_FLAG_RCU_IDLE,
.enter = omap3_enter_idle_bm, .enter = omap3_enter_idle_bm,
.exit_latency = 107 + 410, .exit_latency = 107 + 410,
.target_residency = 46057, .target_residency = 46057,
...@@ -349,6 +359,7 @@ static struct cpuidle_driver omap3430_idle_driver = { ...@@ -349,6 +359,7 @@ static struct cpuidle_driver omap3430_idle_driver = {
.desc = "MPU RET + CORE ON", .desc = "MPU RET + CORE ON",
}, },
{ {
.flags = CPUIDLE_FLAG_RCU_IDLE,
.enter = omap3_enter_idle_bm, .enter = omap3_enter_idle_bm,
.exit_latency = 121 + 3374, .exit_latency = 121 + 3374,
.target_residency = 46057, .target_residency = 46057,
...@@ -356,6 +367,7 @@ static struct cpuidle_driver omap3430_idle_driver = { ...@@ -356,6 +367,7 @@ static struct cpuidle_driver omap3430_idle_driver = {
.desc = "MPU OFF + CORE ON", .desc = "MPU OFF + CORE ON",
}, },
{ {
.flags = CPUIDLE_FLAG_RCU_IDLE,
.enter = omap3_enter_idle_bm, .enter = omap3_enter_idle_bm,
.exit_latency = 855 + 1146, .exit_latency = 855 + 1146,
.target_residency = 46057, .target_residency = 46057,
...@@ -363,6 +375,7 @@ static struct cpuidle_driver omap3430_idle_driver = { ...@@ -363,6 +375,7 @@ static struct cpuidle_driver omap3430_idle_driver = {
.desc = "MPU RET + CORE RET", .desc = "MPU RET + CORE RET",
}, },
{ {
.flags = CPUIDLE_FLAG_RCU_IDLE,
.enter = omap3_enter_idle_bm, .enter = omap3_enter_idle_bm,
.exit_latency = 7580 + 4134, .exit_latency = 7580 + 4134,
.target_residency = 484329, .target_residency = 484329,
...@@ -370,6 +383,7 @@ static struct cpuidle_driver omap3430_idle_driver = { ...@@ -370,6 +383,7 @@ static struct cpuidle_driver omap3430_idle_driver = {
.desc = "MPU OFF + CORE RET", .desc = "MPU OFF + CORE RET",
}, },
{ {
.flags = CPUIDLE_FLAG_RCU_IDLE,
.enter = omap3_enter_idle_bm, .enter = omap3_enter_idle_bm,
.exit_latency = 7505 + 15274, .exit_latency = 7505 + 15274,
.target_residency = 484329, .target_residency = 484329,
......
...@@ -105,7 +105,7 @@ static int omap_enter_idle_smp(struct cpuidle_device *dev, ...@@ -105,7 +105,7 @@ static int omap_enter_idle_smp(struct cpuidle_device *dev,
} }
raw_spin_unlock_irqrestore(&mpu_lock, flag); raw_spin_unlock_irqrestore(&mpu_lock, flag);
omap4_enter_lowpower(dev->cpu, cx->cpu_state); omap4_enter_lowpower(dev->cpu, cx->cpu_state, true);
raw_spin_lock_irqsave(&mpu_lock, flag); raw_spin_lock_irqsave(&mpu_lock, flag);
if (cx->mpu_state_vote == num_online_cpus()) if (cx->mpu_state_vote == num_online_cpus())
...@@ -151,10 +151,10 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev, ...@@ -151,10 +151,10 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
(cx->mpu_logic_state == PWRDM_POWER_OFF); (cx->mpu_logic_state == PWRDM_POWER_OFF);
/* Enter broadcast mode for periodic timers */ /* Enter broadcast mode for periodic timers */
RCU_NONIDLE(tick_broadcast_enable()); tick_broadcast_enable();
/* Enter broadcast mode for one-shot timers */ /* Enter broadcast mode for one-shot timers */
RCU_NONIDLE(tick_broadcast_enter()); tick_broadcast_enter();
/* /*
* Call idle CPU PM enter notifier chain so that * Call idle CPU PM enter notifier chain so that
...@@ -166,7 +166,7 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev, ...@@ -166,7 +166,7 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
if (dev->cpu == 0) { if (dev->cpu == 0) {
pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state); pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
RCU_NONIDLE(omap_set_pwrdm_state(mpu_pd, cx->mpu_state)); omap_set_pwrdm_state(mpu_pd, cx->mpu_state);
/* /*
* Call idle CPU cluster PM enter notifier chain * Call idle CPU cluster PM enter notifier chain
...@@ -178,13 +178,13 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev, ...@@ -178,13 +178,13 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
index = 0; index = 0;
cx = state_ptr + index; cx = state_ptr + index;
pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state); pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
RCU_NONIDLE(omap_set_pwrdm_state(mpu_pd, cx->mpu_state)); omap_set_pwrdm_state(mpu_pd, cx->mpu_state);
mpuss_can_lose_context = 0; mpuss_can_lose_context = 0;
} }
} }
} }
omap4_enter_lowpower(dev->cpu, cx->cpu_state); omap4_enter_lowpower(dev->cpu, cx->cpu_state, true);
cpu_done[dev->cpu] = true; cpu_done[dev->cpu] = true;
/* Wakeup CPU1 only if it is not offlined */ /* Wakeup CPU1 only if it is not offlined */
...@@ -194,9 +194,9 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev, ...@@ -194,9 +194,9 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
mpuss_can_lose_context) mpuss_can_lose_context)
gic_dist_disable(); gic_dist_disable();
RCU_NONIDLE(clkdm_deny_idle(cpu_clkdm[1])); clkdm_deny_idle(cpu_clkdm[1]);
RCU_NONIDLE(omap_set_pwrdm_state(cpu_pd[1], PWRDM_POWER_ON)); omap_set_pwrdm_state(cpu_pd[1], PWRDM_POWER_ON);
RCU_NONIDLE(clkdm_allow_idle(cpu_clkdm[1])); clkdm_allow_idle(cpu_clkdm[1]);
if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD) && if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD) &&
mpuss_can_lose_context) { mpuss_can_lose_context) {
...@@ -222,7 +222,7 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev, ...@@ -222,7 +222,7 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
cpu_pm_exit(); cpu_pm_exit();
cpu_pm_out: cpu_pm_out:
RCU_NONIDLE(tick_broadcast_exit()); tick_broadcast_exit();
fail: fail:
cpuidle_coupled_parallel_barrier(dev, &abort_barrier); cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
...@@ -247,7 +247,8 @@ static struct cpuidle_driver omap4_idle_driver = { ...@@ -247,7 +247,8 @@ static struct cpuidle_driver omap4_idle_driver = {
/* C2 - CPU0 OFF + CPU1 OFF + MPU CSWR */ /* C2 - CPU0 OFF + CPU1 OFF + MPU CSWR */
.exit_latency = 328 + 440, .exit_latency = 328 + 440,
.target_residency = 960, .target_residency = 960,
.flags = CPUIDLE_FLAG_COUPLED, .flags = CPUIDLE_FLAG_COUPLED |
CPUIDLE_FLAG_RCU_IDLE,
.enter = omap_enter_idle_coupled, .enter = omap_enter_idle_coupled,
.name = "C2", .name = "C2",
.desc = "CPUx OFF, MPUSS CSWR", .desc = "CPUx OFF, MPUSS CSWR",
...@@ -256,7 +257,8 @@ static struct cpuidle_driver omap4_idle_driver = { ...@@ -256,7 +257,8 @@ static struct cpuidle_driver omap4_idle_driver = {
/* C3 - CPU0 OFF + CPU1 OFF + MPU OSWR */ /* C3 - CPU0 OFF + CPU1 OFF + MPU OSWR */
.exit_latency = 460 + 518, .exit_latency = 460 + 518,
.target_residency = 1100, .target_residency = 1100,
.flags = CPUIDLE_FLAG_COUPLED, .flags = CPUIDLE_FLAG_COUPLED |
CPUIDLE_FLAG_RCU_IDLE,
.enter = omap_enter_idle_coupled, .enter = omap_enter_idle_coupled,
.name = "C3", .name = "C3",
.desc = "CPUx OFF, MPUSS OSWR", .desc = "CPUx OFF, MPUSS OSWR",
...@@ -282,7 +284,8 @@ static struct cpuidle_driver omap5_idle_driver = { ...@@ -282,7 +284,8 @@ static struct cpuidle_driver omap5_idle_driver = {
/* C2 - CPU0 RET + CPU1 RET + MPU CSWR */ /* C2 - CPU0 RET + CPU1 RET + MPU CSWR */
.exit_latency = 48 + 60, .exit_latency = 48 + 60,
.target_residency = 100, .target_residency = 100,
.flags = CPUIDLE_FLAG_TIMER_STOP, .flags = CPUIDLE_FLAG_TIMER_STOP |
CPUIDLE_FLAG_RCU_IDLE,
.enter = omap_enter_idle_smp, .enter = omap_enter_idle_smp,
.name = "C2", .name = "C2",
.desc = "CPUx CSWR, MPUSS CSWR", .desc = "CPUx CSWR, MPUSS CSWR",
......
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
* and first to wake-up when MPUSS low power states are excercised * and first to wake-up when MPUSS low power states are excercised
*/ */
#include <linux/cpuidle.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/io.h> #include <linux/io.h>
#include <linux/errno.h> #include <linux/errno.h>
...@@ -214,6 +215,7 @@ static void __init save_l2x0_context(void) ...@@ -214,6 +215,7 @@ static void __init save_l2x0_context(void)
* of OMAP4 MPUSS subsystem * of OMAP4 MPUSS subsystem
* @cpu : CPU ID * @cpu : CPU ID
* @power_state: Low power state. * @power_state: Low power state.
* @rcuidle: RCU needs to be idled
* *
* MPUSS states for the context save: * MPUSS states for the context save:
* save_state = * save_state =
...@@ -222,7 +224,8 @@ static void __init save_l2x0_context(void) ...@@ -222,7 +224,8 @@ static void __init save_l2x0_context(void)
* 2 - CPUx L1 and logic lost + GIC lost: MPUSS OSWR * 2 - CPUx L1 and logic lost + GIC lost: MPUSS OSWR
* 3 - CPUx L1 and logic lost + GIC + L2 lost: DEVICE OFF * 3 - CPUx L1 and logic lost + GIC + L2 lost: DEVICE OFF
*/ */
int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state) __cpuidle int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state,
bool rcuidle)
{ {
struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu); struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu);
unsigned int save_state = 0, cpu_logic_state = PWRDM_POWER_RET; unsigned int save_state = 0, cpu_logic_state = PWRDM_POWER_RET;
...@@ -268,6 +271,10 @@ int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state) ...@@ -268,6 +271,10 @@ int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state)
cpu_clear_prev_logic_pwrst(cpu); cpu_clear_prev_logic_pwrst(cpu);
pwrdm_set_next_pwrst(pm_info->pwrdm, power_state); pwrdm_set_next_pwrst(pm_info->pwrdm, power_state);
pwrdm_set_logic_retst(pm_info->pwrdm, cpu_logic_state); pwrdm_set_logic_retst(pm_info->pwrdm, cpu_logic_state);
if (rcuidle)
ct_cpuidle_enter();
set_cpu_wakeup_addr(cpu, __pa_symbol(omap_pm_ops.resume)); set_cpu_wakeup_addr(cpu, __pa_symbol(omap_pm_ops.resume));
omap_pm_ops.scu_prepare(cpu, power_state); omap_pm_ops.scu_prepare(cpu, power_state);
l2x0_pwrst_prepare(cpu, save_state); l2x0_pwrst_prepare(cpu, save_state);
...@@ -283,6 +290,9 @@ int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state) ...@@ -283,6 +290,9 @@ int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state)
if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD) && cpu) if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD) && cpu)
gic_dist_enable(); gic_dist_enable();
if (rcuidle)
ct_cpuidle_exit();
/* /*
* Restore the CPUx power state to ON otherwise CPUx * Restore the CPUx power state to ON otherwise CPUx
* power domain can transitions to programmed low power * power domain can transitions to programmed low power
......
...@@ -29,7 +29,7 @@ static inline int omap4_idle_init(void) ...@@ -29,7 +29,7 @@ static inline int omap4_idle_init(void)
extern void *omap3_secure_ram_storage; extern void *omap3_secure_ram_storage;
extern void omap3_pm_off_mode_enable(int); extern void omap3_pm_off_mode_enable(int);
extern void omap_sram_idle(void); extern void omap_sram_idle(bool rcuidle);
extern int omap_pm_clkdms_setup(struct clockdomain *clkdm, void *unused); extern int omap_pm_clkdms_setup(struct clockdomain *clkdm, void *unused);
extern int omap3_pm_get_suspend_state(struct powerdomain *pwrdm); extern int omap3_pm_get_suspend_state(struct powerdomain *pwrdm);
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/of.h> #include <linux/of.h>
#include <linux/cpuidle.h>
#include <trace/events/power.h> #include <trace/events/power.h>
...@@ -174,7 +175,7 @@ static int omap34xx_do_sram_idle(unsigned long save_state) ...@@ -174,7 +175,7 @@ static int omap34xx_do_sram_idle(unsigned long save_state)
return 0; return 0;
} }
void omap_sram_idle(void) __cpuidle void omap_sram_idle(bool rcuidle)
{ {
/* Variable to tell what needs to be saved and restored /* Variable to tell what needs to be saved and restored
* in omap_sram_idle*/ * in omap_sram_idle*/
...@@ -254,11 +255,18 @@ void omap_sram_idle(void) ...@@ -254,11 +255,18 @@ void omap_sram_idle(void)
*/ */
if (save_state) if (save_state)
omap34xx_save_context(omap3_arm_context); omap34xx_save_context(omap3_arm_context);
if (rcuidle)
ct_cpuidle_enter();
if (save_state == 1 || save_state == 3) if (save_state == 1 || save_state == 3)
cpu_suspend(save_state, omap34xx_do_sram_idle); cpu_suspend(save_state, omap34xx_do_sram_idle);
else else
omap34xx_do_sram_idle(save_state); omap34xx_do_sram_idle(save_state);
if (rcuidle)
ct_cpuidle_exit();
/* Restore normal SDRC POWER settings */ /* Restore normal SDRC POWER settings */
if (cpu_is_omap3430() && omap_rev() >= OMAP3430_REV_ES3_0 && if (cpu_is_omap3430() && omap_rev() >= OMAP3430_REV_ES3_0 &&
(omap_type() == OMAP2_DEVICE_TYPE_EMU || (omap_type() == OMAP2_DEVICE_TYPE_EMU ||
...@@ -294,7 +302,7 @@ static void omap3_pm_idle(void) ...@@ -294,7 +302,7 @@ static void omap3_pm_idle(void)
if (omap_irq_pending()) if (omap_irq_pending())
return; return;
omap_sram_idle(); omap3_do_wfi();
} }
#ifdef CONFIG_SUSPEND #ifdef CONFIG_SUSPEND
...@@ -316,7 +324,7 @@ static int omap3_pm_suspend(void) ...@@ -316,7 +324,7 @@ static int omap3_pm_suspend(void)
omap3_intc_suspend(); omap3_intc_suspend();
omap_sram_idle(); omap_sram_idle(false);
restore: restore:
/* Restore next_pwrsts */ /* Restore next_pwrsts */
......
...@@ -76,7 +76,7 @@ static int omap4_pm_suspend(void) ...@@ -76,7 +76,7 @@ static int omap4_pm_suspend(void)
* domain CSWR is not supported by hardware. * domain CSWR is not supported by hardware.
* More details can be found in OMAP4430 TRM section 4.3.4.2. * More details can be found in OMAP4430 TRM section 4.3.4.2.
*/ */
omap4_enter_lowpower(cpu_id, cpu_suspend_state); omap4_enter_lowpower(cpu_id, cpu_suspend_state, false);
/* Restore next powerdomain state */ /* Restore next powerdomain state */
list_for_each_entry(pwrst, &pwrst_list, node) { list_for_each_entry(pwrst, &pwrst_list, node) {
......
...@@ -187,7 +187,7 @@ static int _pwrdm_state_switch(struct powerdomain *pwrdm, int flag) ...@@ -187,7 +187,7 @@ static int _pwrdm_state_switch(struct powerdomain *pwrdm, int flag)
trace_state = (PWRDM_TRACE_STATES_FLAG | trace_state = (PWRDM_TRACE_STATES_FLAG |
((next & OMAP_POWERSTATE_MASK) << 8) | ((next & OMAP_POWERSTATE_MASK) << 8) |
((prev & OMAP_POWERSTATE_MASK) << 0)); ((prev & OMAP_POWERSTATE_MASK) << 0));
trace_power_domain_target_rcuidle(pwrdm->name, trace_power_domain_target(pwrdm->name,
trace_state, trace_state,
raw_smp_processor_id()); raw_smp_processor_id());
} }
...@@ -541,7 +541,7 @@ int pwrdm_set_next_pwrst(struct powerdomain *pwrdm, u8 pwrst) ...@@ -541,7 +541,7 @@ int pwrdm_set_next_pwrst(struct powerdomain *pwrdm, u8 pwrst)
if (arch_pwrdm && arch_pwrdm->pwrdm_set_next_pwrst) { if (arch_pwrdm && arch_pwrdm->pwrdm_set_next_pwrst) {
/* Trace the pwrdm desired target state */ /* Trace the pwrdm desired target state */
trace_power_domain_target_rcuidle(pwrdm->name, pwrst, trace_power_domain_target(pwrdm->name, pwrst,
raw_smp_processor_id()); raw_smp_processor_id());
/* Program the pwrdm desired target state */ /* Program the pwrdm desired target state */
ret = arch_pwrdm->pwrdm_set_next_pwrst(pwrdm, pwrst); ret = arch_pwrdm->pwrdm_set_next_pwrst(pwrdm, pwrst);
......
...@@ -19,9 +19,8 @@ ...@@ -19,9 +19,8 @@
#include "regs-sys-s3c64xx.h" #include "regs-sys-s3c64xx.h"
#include "regs-syscon-power-s3c64xx.h" #include "regs-syscon-power-s3c64xx.h"
static int s3c64xx_enter_idle(struct cpuidle_device *dev, static __cpuidle int s3c64xx_enter_idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv, struct cpuidle_driver *drv, int index)
int index)
{ {
unsigned long tmp; unsigned long tmp;
......
...@@ -62,15 +62,15 @@ int acpi_processor_ffh_lpi_probe(unsigned int cpu) ...@@ -62,15 +62,15 @@ int acpi_processor_ffh_lpi_probe(unsigned int cpu)
return psci_acpi_cpu_init_idle(cpu); return psci_acpi_cpu_init_idle(cpu);
} }
int acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi) __cpuidle int acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi)
{ {
u32 state = lpi->address; u32 state = lpi->address;
if (ARM64_LPI_IS_RETENTION_STATE(lpi->arch_flags)) if (ARM64_LPI_IS_RETENTION_STATE(lpi->arch_flags))
return CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM(psci_cpu_suspend_enter, return CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM_RCU(psci_cpu_suspend_enter,
lpi->index, state); lpi->index, state);
else else
return CPU_PM_CPU_IDLE_ENTER_PARAM(psci_cpu_suspend_enter, return CPU_PM_CPU_IDLE_ENTER_PARAM_RCU(psci_cpu_suspend_enter,
lpi->index, state); lpi->index, state);
} }
#endif #endif
...@@ -42,5 +42,4 @@ void noinstr arch_cpu_idle(void) ...@@ -42,5 +42,4 @@ void noinstr arch_cpu_idle(void)
* tricks * tricks
*/ */
cpu_do_idle(); cpu_do_idle();
raw_local_irq_enable();
} }
...@@ -865,7 +865,7 @@ static void do_handle_IPI(int ipinr) ...@@ -865,7 +865,7 @@ static void do_handle_IPI(int ipinr)
unsigned int cpu = smp_processor_id(); unsigned int cpu = smp_processor_id();
if ((unsigned)ipinr < NR_IPI) if ((unsigned)ipinr < NR_IPI)
trace_ipi_entry_rcuidle(ipi_types[ipinr]); trace_ipi_entry(ipi_types[ipinr]);
switch (ipinr) { switch (ipinr) {
case IPI_RESCHEDULE: case IPI_RESCHEDULE:
...@@ -914,7 +914,7 @@ static void do_handle_IPI(int ipinr) ...@@ -914,7 +914,7 @@ static void do_handle_IPI(int ipinr)
} }
if ((unsigned)ipinr < NR_IPI) if ((unsigned)ipinr < NR_IPI)
trace_ipi_exit_rcuidle(ipi_types[ipinr]); trace_ipi_exit(ipi_types[ipinr]);
} }
static irqreturn_t ipi_handler(int irq, void *data) static irqreturn_t ipi_handler(int irq, void *data)
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/pgtable.h> #include <linux/pgtable.h>
#include <linux/cpuidle.h>
#include <asm/alternative.h> #include <asm/alternative.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
...@@ -104,6 +105,10 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) ...@@ -104,6 +105,10 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
* From this point debug exceptions are disabled to prevent * From this point debug exceptions are disabled to prevent
* updates to mdscr register (saved and restored along with * updates to mdscr register (saved and restored along with
* general purpose registers) from kernel debuggers. * general purpose registers) from kernel debuggers.
*
* Strictly speaking the trace_hardirqs_off() here is superfluous,
* hardirqs should be firmly off by now. This really ought to use
* something like raw_local_daif_save().
*/ */
flags = local_daif_save(); flags = local_daif_save();
...@@ -120,6 +125,8 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) ...@@ -120,6 +125,8 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
*/ */
arm_cpuidle_save_irq_context(&context); arm_cpuidle_save_irq_context(&context);
ct_cpuidle_enter();
if (__cpu_suspend_enter(&state)) { if (__cpu_suspend_enter(&state)) {
/* Call the suspend finisher */ /* Call the suspend finisher */
ret = fn(arg); ret = fn(arg);
...@@ -133,8 +140,11 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) ...@@ -133,8 +140,11 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
*/ */
if (!ret) if (!ret)
ret = -EOPNOTSUPP; ret = -EOPNOTSUPP;
ct_cpuidle_exit();
} else { } else {
RCU_NONIDLE(__cpu_suspend_exit()); ct_cpuidle_exit();
__cpu_suspend_exit();
} }
arm_cpuidle_restore_irq_context(&context); arm_cpuidle_restore_irq_context(&context);
......
...@@ -175,7 +175,6 @@ SECTIONS ...@@ -175,7 +175,6 @@ SECTIONS
ENTRY_TEXT ENTRY_TEXT
TEXT_TEXT TEXT_TEXT
SCHED_TEXT SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT LOCK_TEXT
KPROBES_TEXT KPROBES_TEXT
HYPERVISOR_TEXT HYPERVISOR_TEXT
......
...@@ -100,6 +100,5 @@ void arch_cpu_idle(void) ...@@ -100,6 +100,5 @@ void arch_cpu_idle(void)
#ifdef CONFIG_CPU_PM_STOP #ifdef CONFIG_CPU_PM_STOP
asm volatile("stop\n"); asm volatile("stop\n");
#endif #endif
raw_local_irq_enable();
} }
#endif #endif
...@@ -309,7 +309,7 @@ void arch_cpu_idle_dead(void) ...@@ -309,7 +309,7 @@ void arch_cpu_idle_dead(void)
while (!secondary_stack) while (!secondary_stack)
arch_cpu_idle(); arch_cpu_idle();
local_irq_disable(); raw_local_irq_disable();
asm volatile( asm volatile(
"mov sp, %0\n" "mov sp, %0\n"
......
...@@ -34,7 +34,6 @@ SECTIONS ...@@ -34,7 +34,6 @@ SECTIONS
SOFTIRQENTRY_TEXT SOFTIRQENTRY_TEXT
TEXT_TEXT TEXT_TEXT
SCHED_TEXT SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT LOCK_TEXT
KPROBES_TEXT KPROBES_TEXT
*(.fixup) *(.fixup)
......
...@@ -44,7 +44,6 @@ void arch_cpu_idle(void) ...@@ -44,7 +44,6 @@ void arch_cpu_idle(void)
{ {
__vmwait(); __vmwait();
/* interrupts wake us up, but irqs are still disabled */ /* interrupts wake us up, but irqs are still disabled */
raw_local_irq_enable();
} }
/* /*
......
...@@ -41,7 +41,6 @@ SECTIONS ...@@ -41,7 +41,6 @@ SECTIONS
IRQENTRY_TEXT IRQENTRY_TEXT
SOFTIRQENTRY_TEXT SOFTIRQENTRY_TEXT
SCHED_TEXT SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT LOCK_TEXT
KPROBES_TEXT KPROBES_TEXT
*(.fixup) *(.fixup)
......
...@@ -242,6 +242,7 @@ void arch_cpu_idle(void) ...@@ -242,6 +242,7 @@ void arch_cpu_idle(void)
(*mark_idle)(1); (*mark_idle)(1);
raw_safe_halt(); raw_safe_halt();
raw_local_irq_disable();
if (mark_idle) if (mark_idle)
(*mark_idle)(0); (*mark_idle)(0);
......
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/sched/cputime.h> #include <linux/sched/cputime.h>
#include <asm/cputime.h>
#include <asm/delay.h> #include <asm/delay.h>
#include <asm/efi.h> #include <asm/efi.h>
#include <asm/hw_irq.h> #include <asm/hw_irq.h>
......
...@@ -51,7 +51,6 @@ SECTIONS { ...@@ -51,7 +51,6 @@ SECTIONS {
__end_ivt_text = .; __end_ivt_text = .;
TEXT_TEXT TEXT_TEXT
SCHED_TEXT SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT LOCK_TEXT
KPROBES_TEXT KPROBES_TEXT
IRQENTRY_TEXT IRQENTRY_TEXT
......
...@@ -13,4 +13,5 @@ void __cpuidle arch_cpu_idle(void) ...@@ -13,4 +13,5 @@ void __cpuidle arch_cpu_idle(void)
{ {
raw_local_irq_enable(); raw_local_irq_enable();
__arch_cpu_idle(); /* idle instruction needs irq enabled */ __arch_cpu_idle(); /* idle instruction needs irq enabled */
raw_local_irq_disable();
} }
...@@ -43,7 +43,6 @@ SECTIONS ...@@ -43,7 +43,6 @@ SECTIONS
.text : { .text : {
TEXT_TEXT TEXT_TEXT
SCHED_TEXT SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT LOCK_TEXT
KPROBES_TEXT KPROBES_TEXT
IRQENTRY_TEXT IRQENTRY_TEXT
......
...@@ -48,7 +48,6 @@ SECTIONS { ...@@ -48,7 +48,6 @@ SECTIONS {
IRQENTRY_TEXT IRQENTRY_TEXT
SOFTIRQENTRY_TEXT SOFTIRQENTRY_TEXT
SCHED_TEXT SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT LOCK_TEXT
*(.fixup) *(.fixup)
. = ALIGN(16); . = ALIGN(16);
......
...@@ -19,7 +19,6 @@ SECTIONS ...@@ -19,7 +19,6 @@ SECTIONS
IRQENTRY_TEXT IRQENTRY_TEXT
SOFTIRQENTRY_TEXT SOFTIRQENTRY_TEXT
SCHED_TEXT SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT LOCK_TEXT
*(.fixup) *(.fixup)
*(.gnu.warning) *(.gnu.warning)
......
...@@ -19,7 +19,6 @@ SECTIONS ...@@ -19,7 +19,6 @@ SECTIONS
IRQENTRY_TEXT IRQENTRY_TEXT
SOFTIRQENTRY_TEXT SOFTIRQENTRY_TEXT
SCHED_TEXT SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT LOCK_TEXT
*(.fixup) *(.fixup)
*(.gnu.warning) *(.gnu.warning)
......
...@@ -140,5 +140,4 @@ int elf_core_copy_task_fpregs(struct task_struct *t, elf_fpregset_t *fpu) ...@@ -140,5 +140,4 @@ int elf_core_copy_task_fpregs(struct task_struct *t, elf_fpregset_t *fpu)
void arch_cpu_idle(void) void arch_cpu_idle(void)
{ {
raw_local_irq_enable();
} }
...@@ -36,7 +36,6 @@ SECTIONS { ...@@ -36,7 +36,6 @@ SECTIONS {
EXIT_TEXT EXIT_TEXT
EXIT_CALL EXIT_CALL
SCHED_TEXT SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT LOCK_TEXT
KPROBES_TEXT KPROBES_TEXT
IRQENTRY_TEXT IRQENTRY_TEXT
......
...@@ -33,13 +33,13 @@ static void __cpuidle r3081_wait(void) ...@@ -33,13 +33,13 @@ static void __cpuidle r3081_wait(void)
{ {
unsigned long cfg = read_c0_conf(); unsigned long cfg = read_c0_conf();
write_c0_conf(cfg | R30XX_CONF_HALT); write_c0_conf(cfg | R30XX_CONF_HALT);
raw_local_irq_enable();
} }
void __cpuidle r4k_wait(void) void __cpuidle r4k_wait(void)
{ {
raw_local_irq_enable(); raw_local_irq_enable();
__r4k_wait(); __r4k_wait();
raw_local_irq_disable();
} }
/* /*
...@@ -57,7 +57,6 @@ void __cpuidle r4k_wait_irqoff(void) ...@@ -57,7 +57,6 @@ void __cpuidle r4k_wait_irqoff(void)
" .set arch=r4000 \n" " .set arch=r4000 \n"
" wait \n" " wait \n"
" .set pop \n"); " .set pop \n");
raw_local_irq_enable();
} }
/* /*
...@@ -77,7 +76,6 @@ static void __cpuidle rm7k_wait_irqoff(void) ...@@ -77,7 +76,6 @@ static void __cpuidle rm7k_wait_irqoff(void)
" wait \n" " wait \n"
" mtc0 $1, $12 # stalls until W stage \n" " mtc0 $1, $12 # stalls until W stage \n"
" .set pop \n"); " .set pop \n");
raw_local_irq_enable();
} }
/* /*
...@@ -103,6 +101,8 @@ static void __cpuidle au1k_wait(void) ...@@ -103,6 +101,8 @@ static void __cpuidle au1k_wait(void)
" nop \n" " nop \n"
" .set pop \n" " .set pop \n"
: : "r" (au1k_wait), "r" (c0status)); : : "r" (au1k_wait), "r" (c0status));
raw_local_irq_disable();
} }
static int __initdata nowait; static int __initdata nowait;
...@@ -241,17 +241,15 @@ void __init check_wait(void) ...@@ -241,17 +241,15 @@ void __init check_wait(void)
} }
} }
void arch_cpu_idle(void) __cpuidle void arch_cpu_idle(void)
{ {
if (cpu_wait) if (cpu_wait)
cpu_wait(); cpu_wait();
else
raw_local_irq_enable();
} }
#ifdef CONFIG_CPU_IDLE #ifdef CONFIG_CPU_IDLE
int mips_cpuidle_wait_enter(struct cpuidle_device *dev, __cpuidle int mips_cpuidle_wait_enter(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index) struct cpuidle_driver *drv, int index)
{ {
arch_cpu_idle(); arch_cpu_idle();
......
...@@ -61,7 +61,6 @@ SECTIONS ...@@ -61,7 +61,6 @@ SECTIONS
.text : { .text : {
TEXT_TEXT TEXT_TEXT
SCHED_TEXT SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT LOCK_TEXT
KPROBES_TEXT KPROBES_TEXT
IRQENTRY_TEXT IRQENTRY_TEXT
......
...@@ -33,7 +33,6 @@ EXPORT_SYMBOL(pm_power_off); ...@@ -33,7 +33,6 @@ EXPORT_SYMBOL(pm_power_off);
void arch_cpu_idle(void) void arch_cpu_idle(void)
{ {
raw_local_irq_enable();
} }
/* /*
......
...@@ -24,7 +24,6 @@ SECTIONS ...@@ -24,7 +24,6 @@ SECTIONS
.text : { .text : {
TEXT_TEXT TEXT_TEXT
SCHED_TEXT SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT LOCK_TEXT
IRQENTRY_TEXT IRQENTRY_TEXT
SOFTIRQENTRY_TEXT SOFTIRQENTRY_TEXT
......
...@@ -102,6 +102,7 @@ void arch_cpu_idle(void) ...@@ -102,6 +102,7 @@ void arch_cpu_idle(void)
raw_local_irq_enable(); raw_local_irq_enable();
if (mfspr(SPR_UPR) & SPR_UPR_PMP) if (mfspr(SPR_UPR) & SPR_UPR_PMP)
mtspr(SPR_PMR, mfspr(SPR_PMR) | SPR_PMR_DME); mtspr(SPR_PMR, mfspr(SPR_PMR) | SPR_PMR_DME);
raw_local_irq_disable();
} }
void (*pm_power_off)(void) = NULL; void (*pm_power_off)(void) = NULL;
......
...@@ -52,7 +52,6 @@ SECTIONS ...@@ -52,7 +52,6 @@ SECTIONS
_stext = .; _stext = .;
TEXT_TEXT TEXT_TEXT
SCHED_TEXT SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT LOCK_TEXT
KPROBES_TEXT KPROBES_TEXT
IRQENTRY_TEXT IRQENTRY_TEXT
......
...@@ -183,8 +183,6 @@ void arch_cpu_idle_dead(void) ...@@ -183,8 +183,6 @@ void arch_cpu_idle_dead(void)
void __cpuidle arch_cpu_idle(void) void __cpuidle arch_cpu_idle(void)
{ {
raw_local_irq_enable();
/* nop on real hardware, qemu will idle sleep. */ /* nop on real hardware, qemu will idle sleep. */
asm volatile("or %%r10,%%r10,%%r10\n":::); asm volatile("or %%r10,%%r10,%%r10\n":::);
} }
......
...@@ -86,7 +86,6 @@ SECTIONS ...@@ -86,7 +86,6 @@ SECTIONS
TEXT_TEXT TEXT_TEXT
LOCK_TEXT LOCK_TEXT
SCHED_TEXT SCHED_TEXT
CPUIDLE_TEXT
KPROBES_TEXT KPROBES_TEXT
IRQENTRY_TEXT IRQENTRY_TEXT
SOFTIRQENTRY_TEXT SOFTIRQENTRY_TEXT
......
...@@ -51,10 +51,9 @@ void arch_cpu_idle(void) ...@@ -51,10 +51,9 @@ void arch_cpu_idle(void)
* Some power_save functions return with * Some power_save functions return with
* interrupts enabled, some don't. * interrupts enabled, some don't.
*/ */
if (irqs_disabled()) if (!irqs_disabled())
raw_local_irq_enable(); raw_local_irq_disable();
} else { } else {
raw_local_irq_enable();
/* /*
* Go into low thread priority and possibly * Go into low thread priority and possibly
* low power mode. * low power mode.
......
...@@ -112,7 +112,6 @@ SECTIONS ...@@ -112,7 +112,6 @@ SECTIONS
#endif #endif
NOINSTR_TEXT NOINSTR_TEXT
SCHED_TEXT SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT LOCK_TEXT
KPROBES_TEXT KPROBES_TEXT
IRQENTRY_TEXT IRQENTRY_TEXT
......
...@@ -39,7 +39,6 @@ extern asmlinkage void ret_from_kernel_thread(void); ...@@ -39,7 +39,6 @@ extern asmlinkage void ret_from_kernel_thread(void);
void arch_cpu_idle(void) void arch_cpu_idle(void)
{ {
cpu_do_idle(); cpu_do_idle();
raw_local_irq_enable();
} }
void __show_regs(struct pt_regs *regs) void __show_regs(struct pt_regs *regs)
......
...@@ -39,7 +39,6 @@ SECTIONS ...@@ -39,7 +39,6 @@ SECTIONS
_stext = .; _stext = .;
TEXT_TEXT TEXT_TEXT
SCHED_TEXT SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT LOCK_TEXT
KPROBES_TEXT KPROBES_TEXT
ENTRY_TEXT ENTRY_TEXT
......
...@@ -42,7 +42,6 @@ SECTIONS ...@@ -42,7 +42,6 @@ SECTIONS
_stext = .; _stext = .;
TEXT_TEXT TEXT_TEXT
SCHED_TEXT SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT LOCK_TEXT
KPROBES_TEXT KPROBES_TEXT
ENTRY_TEXT ENTRY_TEXT
......
...@@ -12,9 +12,9 @@ ...@@ -12,9 +12,9 @@
#include <linux/notifier.h> #include <linux/notifier.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/sched/cputime.h>
#include <trace/events/power.h> #include <trace/events/power.h>
#include <asm/cpu_mf.h> #include <asm/cpu_mf.h>
#include <asm/cputime.h>
#include <asm/nmi.h> #include <asm/nmi.h>
#include <asm/smp.h> #include <asm/smp.h>
#include "entry.h" #include "entry.h"
...@@ -66,7 +66,6 @@ void arch_cpu_idle(void) ...@@ -66,7 +66,6 @@ void arch_cpu_idle(void)
idle->idle_count++; idle->idle_count++;
account_idle_time(cputime_to_nsecs(idle_time)); account_idle_time(cputime_to_nsecs(idle_time));
raw_write_seqcount_end(&idle->seqcount); raw_write_seqcount_end(&idle->seqcount);
raw_local_irq_enable();
} }
static ssize_t show_idle_count(struct device *dev, static ssize_t show_idle_count(struct device *dev,
......
...@@ -44,7 +44,6 @@ SECTIONS ...@@ -44,7 +44,6 @@ SECTIONS
HEAD_TEXT HEAD_TEXT
TEXT_TEXT TEXT_TEXT
SCHED_TEXT SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT LOCK_TEXT
KPROBES_TEXT KPROBES_TEXT
IRQENTRY_TEXT IRQENTRY_TEXT
......
...@@ -7,13 +7,13 @@ ...@@ -7,13 +7,13 @@
*/ */
#include <linux/kernel_stat.h> #include <linux/kernel_stat.h>
#include <linux/sched/cputime.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/timex.h> #include <linux/timex.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/time.h> #include <linux/time.h>
#include <asm/alternative.h> #include <asm/alternative.h>
#include <asm/cputime.h>
#include <asm/vtimer.h> #include <asm/vtimer.h>
#include <asm/vtime.h> #include <asm/vtime.h>
#include <asm/cpu_mf.h> #include <asm/cpu_mf.h>
......
...@@ -25,6 +25,7 @@ void default_idle(void) ...@@ -25,6 +25,7 @@ void default_idle(void)
raw_local_irq_enable(); raw_local_irq_enable();
/* Isn't this racy ? */ /* Isn't this racy ? */
cpu_sleep(); cpu_sleep();
raw_local_irq_disable();
clear_bl_bit(); clear_bl_bit();
} }
......
...@@ -30,7 +30,6 @@ SECTIONS ...@@ -30,7 +30,6 @@ SECTIONS
HEAD_TEXT HEAD_TEXT
TEXT_TEXT TEXT_TEXT
SCHED_TEXT SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT LOCK_TEXT
KPROBES_TEXT KPROBES_TEXT
IRQENTRY_TEXT IRQENTRY_TEXT
......
...@@ -57,6 +57,8 @@ static void pmc_leon_idle_fixup(void) ...@@ -57,6 +57,8 @@ static void pmc_leon_idle_fixup(void)
"lda [%0] %1, %%g0\n" "lda [%0] %1, %%g0\n"
: :
: "r"(address), "i"(ASI_LEON_BYPASS)); : "r"(address), "i"(ASI_LEON_BYPASS));
raw_local_irq_disable();
} }
/* /*
...@@ -70,6 +72,8 @@ static void pmc_leon_idle(void) ...@@ -70,6 +72,8 @@ static void pmc_leon_idle(void)
/* For systems without power-down, this will be no-op */ /* For systems without power-down, this will be no-op */
__asm__ __volatile__ ("wr %g0, %asr19\n\t"); __asm__ __volatile__ ("wr %g0, %asr19\n\t");
raw_local_irq_disable();
} }
/* Install LEON Power Down function */ /* Install LEON Power Down function */
......
...@@ -71,7 +71,6 @@ void arch_cpu_idle(void) ...@@ -71,7 +71,6 @@ void arch_cpu_idle(void)
{ {
if (sparc_idle) if (sparc_idle)
(*sparc_idle)(); (*sparc_idle)();
raw_local_irq_enable();
} }
/* XXX cli/sti -> local_irq_xxx here, check this works once SMP is fixed. */ /* XXX cli/sti -> local_irq_xxx here, check this works once SMP is fixed. */
......
...@@ -59,7 +59,6 @@ void arch_cpu_idle(void) ...@@ -59,7 +59,6 @@ void arch_cpu_idle(void)
{ {
if (tlb_type != hypervisor) { if (tlb_type != hypervisor) {
touch_nmi_watchdog(); touch_nmi_watchdog();
raw_local_irq_enable();
} else { } else {
unsigned long pstate; unsigned long pstate;
...@@ -90,6 +89,8 @@ void arch_cpu_idle(void) ...@@ -90,6 +89,8 @@ void arch_cpu_idle(void)
"wrpr %0, %%g0, %%pstate" "wrpr %0, %%g0, %%pstate"
: "=&r" (pstate) : "=&r" (pstate)
: "i" (PSTATE_IE)); : "i" (PSTATE_IE));
raw_local_irq_disable();
} }
} }
......
...@@ -50,7 +50,6 @@ SECTIONS ...@@ -50,7 +50,6 @@ SECTIONS
HEAD_TEXT HEAD_TEXT
TEXT_TEXT TEXT_TEXT
SCHED_TEXT SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT LOCK_TEXT
KPROBES_TEXT KPROBES_TEXT
IRQENTRY_TEXT IRQENTRY_TEXT
......
...@@ -74,7 +74,6 @@ SECTIONS ...@@ -74,7 +74,6 @@ SECTIONS
_stext = .; _stext = .;
TEXT_TEXT TEXT_TEXT
SCHED_TEXT SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT LOCK_TEXT
IRQENTRY_TEXT IRQENTRY_TEXT
SOFTIRQENTRY_TEXT SOFTIRQENTRY_TEXT
......
...@@ -218,7 +218,6 @@ void arch_cpu_idle(void) ...@@ -218,7 +218,6 @@ void arch_cpu_idle(void)
{ {
cpu_tasks[current_thread_info()->cpu].pid = os_getpid(); cpu_tasks[current_thread_info()->cpu].pid = os_getpid();
um_idle_sleep(); um_idle_sleep();
raw_local_irq_enable();
} }
int __cant_sleep(void) { int __cant_sleep(void) {
......
...@@ -35,7 +35,6 @@ SECTIONS ...@@ -35,7 +35,6 @@ SECTIONS
_stext = .; _stext = .;
TEXT_TEXT TEXT_TEXT
SCHED_TEXT SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT LOCK_TEXT
IRQENTRY_TEXT IRQENTRY_TEXT
SOFTIRQENTRY_TEXT SOFTIRQENTRY_TEXT
......
...@@ -34,6 +34,7 @@ SECTIONS ...@@ -34,6 +34,7 @@ SECTIONS
_text = .; /* Text */ _text = .; /* Text */
*(.text) *(.text)
*(.text.*) *(.text.*)
*(.noinstr.text)
_etext = . ; _etext = . ;
} }
.rodata : { .rodata : {
......
...@@ -31,6 +31,8 @@ ...@@ -31,6 +31,8 @@
TDX_R12 | TDX_R13 | \ TDX_R12 | TDX_R13 | \
TDX_R14 | TDX_R15 ) TDX_R14 | TDX_R15 )
.section .noinstr.text, "ax"
/* /*
* __tdx_module_call() - Used by TDX guests to request services from * __tdx_module_call() - Used by TDX guests to request services from
* the TDX module (does not include VMM services) using TDCALL instruction. * the TDX module (does not include VMM services) using TDCALL instruction.
...@@ -139,19 +141,6 @@ SYM_FUNC_START(__tdx_hypercall) ...@@ -139,19 +141,6 @@ SYM_FUNC_START(__tdx_hypercall)
movl $TDVMCALL_EXPOSE_REGS_MASK, %ecx movl $TDVMCALL_EXPOSE_REGS_MASK, %ecx
/*
* For the idle loop STI needs to be called directly before the TDCALL
* that enters idle (EXIT_REASON_HLT case). STI instruction enables
* interrupts only one instruction later. If there is a window between
* STI and the instruction that emulates the HALT state, there is a
* chance for interrupts to happen in this window, which can delay the
* HLT operation indefinitely. Since this is the not the desired
* result, conditionally call STI before TDCALL.
*/
testq $TDX_HCALL_ISSUE_STI, %rsi
jz .Lskip_sti
sti
.Lskip_sti:
tdcall tdcall
/* /*
......
...@@ -64,8 +64,9 @@ static inline u64 _tdx_hypercall(u64 fn, u64 r12, u64 r13, u64 r14, u64 r15) ...@@ -64,8 +64,9 @@ static inline u64 _tdx_hypercall(u64 fn, u64 r12, u64 r13, u64 r14, u64 r15)
} }
/* Called from __tdx_hypercall() for unrecoverable failure */ /* Called from __tdx_hypercall() for unrecoverable failure */
void __tdx_hypercall_failed(void) noinstr void __tdx_hypercall_failed(void)
{ {
instrumentation_begin();
panic("TDVMCALL failed. TDX module bug?"); panic("TDVMCALL failed. TDX module bug?");
} }
...@@ -75,7 +76,7 @@ void __tdx_hypercall_failed(void) ...@@ -75,7 +76,7 @@ void __tdx_hypercall_failed(void)
* Reusing the KVM EXIT_REASON macros makes it easier to connect the host and * Reusing the KVM EXIT_REASON macros makes it easier to connect the host and
* guest sides of these calls. * guest sides of these calls.
*/ */
static u64 hcall_func(u64 exit_reason) static __always_inline u64 hcall_func(u64 exit_reason)
{ {
return exit_reason; return exit_reason;
} }
...@@ -220,7 +221,7 @@ static int ve_instr_len(struct ve_info *ve) ...@@ -220,7 +221,7 @@ static int ve_instr_len(struct ve_info *ve)
} }
} }
static u64 __cpuidle __halt(const bool irq_disabled, const bool do_sti) static u64 __cpuidle __halt(const bool irq_disabled)
{ {
struct tdx_hypercall_args args = { struct tdx_hypercall_args args = {
.r10 = TDX_HYPERCALL_STANDARD, .r10 = TDX_HYPERCALL_STANDARD,
...@@ -240,20 +241,14 @@ static u64 __cpuidle __halt(const bool irq_disabled, const bool do_sti) ...@@ -240,20 +241,14 @@ static u64 __cpuidle __halt(const bool irq_disabled, const bool do_sti)
* can keep the vCPU in virtual HLT, even if an IRQ is * can keep the vCPU in virtual HLT, even if an IRQ is
* pending, without hanging/breaking the guest. * pending, without hanging/breaking the guest.
*/ */
return __tdx_hypercall(&args, do_sti ? TDX_HCALL_ISSUE_STI : 0); return __tdx_hypercall(&args, 0);
} }
static int handle_halt(struct ve_info *ve) static int handle_halt(struct ve_info *ve)
{ {
/*
* Since non safe halt is mainly used in CPU offlining
* and the guest will always stay in the halt state, don't
* call the STI instruction (set do_sti as false).
*/
const bool irq_disabled = irqs_disabled(); const bool irq_disabled = irqs_disabled();
const bool do_sti = false;
if (__halt(irq_disabled, do_sti)) if (__halt(irq_disabled))
return -EIO; return -EIO;
return ve_instr_len(ve); return ve_instr_len(ve);
...@@ -261,18 +256,12 @@ static int handle_halt(struct ve_info *ve) ...@@ -261,18 +256,12 @@ static int handle_halt(struct ve_info *ve)
void __cpuidle tdx_safe_halt(void) void __cpuidle tdx_safe_halt(void)
{ {
/*
* For do_sti=true case, __tdx_hypercall() function enables
* interrupts using the STI instruction before the TDCALL. So
* set irq_disabled as false.
*/
const bool irq_disabled = false; const bool irq_disabled = false;
const bool do_sti = true;
/* /*
* Use WARN_ONCE() to report the failure. * Use WARN_ONCE() to report the failure.
*/ */
if (__halt(irq_disabled, do_sti)) if (__halt(irq_disabled))
WARN_ONCE(1, "HLT instruction emulation failed\n"); WARN_ONCE(1, "HLT instruction emulation failed\n");
} }
......
...@@ -41,18 +41,15 @@ static inline unsigned int brs_to(int idx) ...@@ -41,18 +41,15 @@ static inline unsigned int brs_to(int idx)
return MSR_AMD_SAMP_BR_FROM + 2 * idx + 1; return MSR_AMD_SAMP_BR_FROM + 2 * idx + 1;
} }
static inline void set_debug_extn_cfg(u64 val) static __always_inline void set_debug_extn_cfg(u64 val)
{ {
/* bits[4:3] must always be set to 11b */ /* bits[4:3] must always be set to 11b */
wrmsrl(MSR_AMD_DBG_EXTN_CFG, val | 3ULL << 3); __wrmsr(MSR_AMD_DBG_EXTN_CFG, val | 3ULL << 3, val >> 32);
} }
static inline u64 get_debug_extn_cfg(void) static __always_inline u64 get_debug_extn_cfg(void)
{ {
u64 val; return __rdmsr(MSR_AMD_DBG_EXTN_CFG);
rdmsrl(MSR_AMD_DBG_EXTN_CFG, val);
return val;
} }
static bool __init amd_brs_detect(void) static bool __init amd_brs_detect(void)
...@@ -405,7 +402,7 @@ void amd_pmu_brs_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_i ...@@ -405,7 +402,7 @@ void amd_pmu_brs_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_i
* called from ACPI processor_idle.c or acpi_pad.c * called from ACPI processor_idle.c or acpi_pad.c
* with interrupts disabled * with interrupts disabled
*/ */
void perf_amd_brs_lopwr_cb(bool lopwr_in) void noinstr perf_amd_brs_lopwr_cb(bool lopwr_in)
{ {
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
union amd_debug_extn_cfg cfg; union amd_debug_extn_cfg cfg;
......
...@@ -71,7 +71,7 @@ ATOMIC64_DECL(add_unless); ...@@ -71,7 +71,7 @@ ATOMIC64_DECL(add_unless);
* the old value. * the old value.
*/ */
static inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n) static __always_inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
{ {
return arch_cmpxchg64(&v->counter, o, n); return arch_cmpxchg64(&v->counter, o, n);
} }
...@@ -85,7 +85,7 @@ static inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n) ...@@ -85,7 +85,7 @@ static inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
* Atomically xchgs the value of @v to @n and returns * Atomically xchgs the value of @v to @n and returns
* the old value. * the old value.
*/ */
static inline s64 arch_atomic64_xchg(atomic64_t *v, s64 n) static __always_inline s64 arch_atomic64_xchg(atomic64_t *v, s64 n)
{ {
s64 o; s64 o;
unsigned high = (unsigned)(n >> 32); unsigned high = (unsigned)(n >> 32);
...@@ -104,7 +104,7 @@ static inline s64 arch_atomic64_xchg(atomic64_t *v, s64 n) ...@@ -104,7 +104,7 @@ static inline s64 arch_atomic64_xchg(atomic64_t *v, s64 n)
* *
* Atomically sets the value of @v to @n. * Atomically sets the value of @v to @n.
*/ */
static inline void arch_atomic64_set(atomic64_t *v, s64 i) static __always_inline void arch_atomic64_set(atomic64_t *v, s64 i)
{ {
unsigned high = (unsigned)(i >> 32); unsigned high = (unsigned)(i >> 32);
unsigned low = (unsigned)i; unsigned low = (unsigned)i;
...@@ -119,7 +119,7 @@ static inline void arch_atomic64_set(atomic64_t *v, s64 i) ...@@ -119,7 +119,7 @@ static inline void arch_atomic64_set(atomic64_t *v, s64 i)
* *
* Atomically reads the value of @v and returns it. * Atomically reads the value of @v and returns it.
*/ */
static inline s64 arch_atomic64_read(const atomic64_t *v) static __always_inline s64 arch_atomic64_read(const atomic64_t *v)
{ {
s64 r; s64 r;
alternative_atomic64(read, "=&A" (r), "c" (v) : "memory"); alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
...@@ -133,7 +133,7 @@ static inline s64 arch_atomic64_read(const atomic64_t *v) ...@@ -133,7 +133,7 @@ static inline s64 arch_atomic64_read(const atomic64_t *v)
* *
* Atomically adds @i to @v and returns @i + *@v * Atomically adds @i to @v and returns @i + *@v
*/ */
static inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v) static __always_inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
{ {
alternative_atomic64(add_return, alternative_atomic64(add_return,
ASM_OUTPUT2("+A" (i), "+c" (v)), ASM_OUTPUT2("+A" (i), "+c" (v)),
...@@ -145,7 +145,7 @@ static inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v) ...@@ -145,7 +145,7 @@ static inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
/* /*
* Other variants with different arithmetic operators: * Other variants with different arithmetic operators:
*/ */
static inline s64 arch_atomic64_sub_return(s64 i, atomic64_t *v) static __always_inline s64 arch_atomic64_sub_return(s64 i, atomic64_t *v)
{ {
alternative_atomic64(sub_return, alternative_atomic64(sub_return,
ASM_OUTPUT2("+A" (i), "+c" (v)), ASM_OUTPUT2("+A" (i), "+c" (v)),
...@@ -154,7 +154,7 @@ static inline s64 arch_atomic64_sub_return(s64 i, atomic64_t *v) ...@@ -154,7 +154,7 @@ static inline s64 arch_atomic64_sub_return(s64 i, atomic64_t *v)
} }
#define arch_atomic64_sub_return arch_atomic64_sub_return #define arch_atomic64_sub_return arch_atomic64_sub_return
static inline s64 arch_atomic64_inc_return(atomic64_t *v) static __always_inline s64 arch_atomic64_inc_return(atomic64_t *v)
{ {
s64 a; s64 a;
alternative_atomic64(inc_return, "=&A" (a), alternative_atomic64(inc_return, "=&A" (a),
...@@ -163,7 +163,7 @@ static inline s64 arch_atomic64_inc_return(atomic64_t *v) ...@@ -163,7 +163,7 @@ static inline s64 arch_atomic64_inc_return(atomic64_t *v)
} }
#define arch_atomic64_inc_return arch_atomic64_inc_return #define arch_atomic64_inc_return arch_atomic64_inc_return
static inline s64 arch_atomic64_dec_return(atomic64_t *v) static __always_inline s64 arch_atomic64_dec_return(atomic64_t *v)
{ {
s64 a; s64 a;
alternative_atomic64(dec_return, "=&A" (a), alternative_atomic64(dec_return, "=&A" (a),
...@@ -179,7 +179,7 @@ static inline s64 arch_atomic64_dec_return(atomic64_t *v) ...@@ -179,7 +179,7 @@ static inline s64 arch_atomic64_dec_return(atomic64_t *v)
* *
* Atomically adds @i to @v. * Atomically adds @i to @v.
*/ */
static inline s64 arch_atomic64_add(s64 i, atomic64_t *v) static __always_inline s64 arch_atomic64_add(s64 i, atomic64_t *v)
{ {
__alternative_atomic64(add, add_return, __alternative_atomic64(add, add_return,
ASM_OUTPUT2("+A" (i), "+c" (v)), ASM_OUTPUT2("+A" (i), "+c" (v)),
...@@ -194,7 +194,7 @@ static inline s64 arch_atomic64_add(s64 i, atomic64_t *v) ...@@ -194,7 +194,7 @@ static inline s64 arch_atomic64_add(s64 i, atomic64_t *v)
* *
* Atomically subtracts @i from @v. * Atomically subtracts @i from @v.
*/ */
static inline s64 arch_atomic64_sub(s64 i, atomic64_t *v) static __always_inline s64 arch_atomic64_sub(s64 i, atomic64_t *v)
{ {
__alternative_atomic64(sub, sub_return, __alternative_atomic64(sub, sub_return,
ASM_OUTPUT2("+A" (i), "+c" (v)), ASM_OUTPUT2("+A" (i), "+c" (v)),
...@@ -208,7 +208,7 @@ static inline s64 arch_atomic64_sub(s64 i, atomic64_t *v) ...@@ -208,7 +208,7 @@ static inline s64 arch_atomic64_sub(s64 i, atomic64_t *v)
* *
* Atomically increments @v by 1. * Atomically increments @v by 1.
*/ */
static inline void arch_atomic64_inc(atomic64_t *v) static __always_inline void arch_atomic64_inc(atomic64_t *v)
{ {
__alternative_atomic64(inc, inc_return, /* no output */, __alternative_atomic64(inc, inc_return, /* no output */,
"S" (v) : "memory", "eax", "ecx", "edx"); "S" (v) : "memory", "eax", "ecx", "edx");
...@@ -221,7 +221,7 @@ static inline void arch_atomic64_inc(atomic64_t *v) ...@@ -221,7 +221,7 @@ static inline void arch_atomic64_inc(atomic64_t *v)
* *
* Atomically decrements @v by 1. * Atomically decrements @v by 1.
*/ */
static inline void arch_atomic64_dec(atomic64_t *v) static __always_inline void arch_atomic64_dec(atomic64_t *v)
{ {
__alternative_atomic64(dec, dec_return, /* no output */, __alternative_atomic64(dec, dec_return, /* no output */,
"S" (v) : "memory", "eax", "ecx", "edx"); "S" (v) : "memory", "eax", "ecx", "edx");
...@@ -237,7 +237,7 @@ static inline void arch_atomic64_dec(atomic64_t *v) ...@@ -237,7 +237,7 @@ static inline void arch_atomic64_dec(atomic64_t *v)
* Atomically adds @a to @v, so long as it was not @u. * Atomically adds @a to @v, so long as it was not @u.
* Returns non-zero if the add was done, zero otherwise. * Returns non-zero if the add was done, zero otherwise.
*/ */
static inline int arch_atomic64_add_unless(atomic64_t *v, s64 a, s64 u) static __always_inline int arch_atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
{ {
unsigned low = (unsigned)u; unsigned low = (unsigned)u;
unsigned high = (unsigned)(u >> 32); unsigned high = (unsigned)(u >> 32);
...@@ -248,7 +248,7 @@ static inline int arch_atomic64_add_unless(atomic64_t *v, s64 a, s64 u) ...@@ -248,7 +248,7 @@ static inline int arch_atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
} }
#define arch_atomic64_add_unless arch_atomic64_add_unless #define arch_atomic64_add_unless arch_atomic64_add_unless
static inline int arch_atomic64_inc_not_zero(atomic64_t *v) static __always_inline int arch_atomic64_inc_not_zero(atomic64_t *v)
{ {
int r; int r;
alternative_atomic64(inc_not_zero, "=&a" (r), alternative_atomic64(inc_not_zero, "=&a" (r),
...@@ -257,7 +257,7 @@ static inline int arch_atomic64_inc_not_zero(atomic64_t *v) ...@@ -257,7 +257,7 @@ static inline int arch_atomic64_inc_not_zero(atomic64_t *v)
} }
#define arch_atomic64_inc_not_zero arch_atomic64_inc_not_zero #define arch_atomic64_inc_not_zero arch_atomic64_inc_not_zero
static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v) static __always_inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
{ {
s64 r; s64 r;
alternative_atomic64(dec_if_positive, "=&A" (r), alternative_atomic64(dec_if_positive, "=&A" (r),
...@@ -269,7 +269,7 @@ static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v) ...@@ -269,7 +269,7 @@ static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
#undef alternative_atomic64 #undef alternative_atomic64
#undef __alternative_atomic64 #undef __alternative_atomic64
static inline void arch_atomic64_and(s64 i, atomic64_t *v) static __always_inline void arch_atomic64_and(s64 i, atomic64_t *v)
{ {
s64 old, c = 0; s64 old, c = 0;
...@@ -277,7 +277,7 @@ static inline void arch_atomic64_and(s64 i, atomic64_t *v) ...@@ -277,7 +277,7 @@ static inline void arch_atomic64_and(s64 i, atomic64_t *v)
c = old; c = old;
} }
static inline s64 arch_atomic64_fetch_and(s64 i, atomic64_t *v) static __always_inline s64 arch_atomic64_fetch_and(s64 i, atomic64_t *v)
{ {
s64 old, c = 0; s64 old, c = 0;
...@@ -288,7 +288,7 @@ static inline s64 arch_atomic64_fetch_and(s64 i, atomic64_t *v) ...@@ -288,7 +288,7 @@ static inline s64 arch_atomic64_fetch_and(s64 i, atomic64_t *v)
} }
#define arch_atomic64_fetch_and arch_atomic64_fetch_and #define arch_atomic64_fetch_and arch_atomic64_fetch_and
static inline void arch_atomic64_or(s64 i, atomic64_t *v) static __always_inline void arch_atomic64_or(s64 i, atomic64_t *v)
{ {
s64 old, c = 0; s64 old, c = 0;
...@@ -296,7 +296,7 @@ static inline void arch_atomic64_or(s64 i, atomic64_t *v) ...@@ -296,7 +296,7 @@ static inline void arch_atomic64_or(s64 i, atomic64_t *v)
c = old; c = old;
} }
static inline s64 arch_atomic64_fetch_or(s64 i, atomic64_t *v) static __always_inline s64 arch_atomic64_fetch_or(s64 i, atomic64_t *v)
{ {
s64 old, c = 0; s64 old, c = 0;
...@@ -307,7 +307,7 @@ static inline s64 arch_atomic64_fetch_or(s64 i, atomic64_t *v) ...@@ -307,7 +307,7 @@ static inline s64 arch_atomic64_fetch_or(s64 i, atomic64_t *v)
} }
#define arch_atomic64_fetch_or arch_atomic64_fetch_or #define arch_atomic64_fetch_or arch_atomic64_fetch_or
static inline void arch_atomic64_xor(s64 i, atomic64_t *v) static __always_inline void arch_atomic64_xor(s64 i, atomic64_t *v)
{ {
s64 old, c = 0; s64 old, c = 0;
...@@ -315,7 +315,7 @@ static inline void arch_atomic64_xor(s64 i, atomic64_t *v) ...@@ -315,7 +315,7 @@ static inline void arch_atomic64_xor(s64 i, atomic64_t *v)
c = old; c = old;
} }
static inline s64 arch_atomic64_fetch_xor(s64 i, atomic64_t *v) static __always_inline s64 arch_atomic64_fetch_xor(s64 i, atomic64_t *v)
{ {
s64 old, c = 0; s64 old, c = 0;
...@@ -326,7 +326,7 @@ static inline s64 arch_atomic64_fetch_xor(s64 i, atomic64_t *v) ...@@ -326,7 +326,7 @@ static inline s64 arch_atomic64_fetch_xor(s64 i, atomic64_t *v)
} }
#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor #define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
static inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v) static __always_inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
{ {
s64 old, c = 0; s64 old, c = 0;
......
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
* Atomically reads the value of @v. * Atomically reads the value of @v.
* Doesn't imply a read memory barrier. * Doesn't imply a read memory barrier.
*/ */
static inline s64 arch_atomic64_read(const atomic64_t *v) static __always_inline s64 arch_atomic64_read(const atomic64_t *v)
{ {
return __READ_ONCE((v)->counter); return __READ_ONCE((v)->counter);
} }
...@@ -29,7 +29,7 @@ static inline s64 arch_atomic64_read(const atomic64_t *v) ...@@ -29,7 +29,7 @@ static inline s64 arch_atomic64_read(const atomic64_t *v)
* *
* Atomically sets the value of @v to @i. * Atomically sets the value of @v to @i.
*/ */
static inline void arch_atomic64_set(atomic64_t *v, s64 i) static __always_inline void arch_atomic64_set(atomic64_t *v, s64 i)
{ {
__WRITE_ONCE(v->counter, i); __WRITE_ONCE(v->counter, i);
} }
...@@ -55,7 +55,7 @@ static __always_inline void arch_atomic64_add(s64 i, atomic64_t *v) ...@@ -55,7 +55,7 @@ static __always_inline void arch_atomic64_add(s64 i, atomic64_t *v)
* *
* Atomically subtracts @i from @v. * Atomically subtracts @i from @v.
*/ */
static inline void arch_atomic64_sub(s64 i, atomic64_t *v) static __always_inline void arch_atomic64_sub(s64 i, atomic64_t *v)
{ {
asm volatile(LOCK_PREFIX "subq %1,%0" asm volatile(LOCK_PREFIX "subq %1,%0"
: "=m" (v->counter) : "=m" (v->counter)
...@@ -71,7 +71,7 @@ static inline void arch_atomic64_sub(s64 i, atomic64_t *v) ...@@ -71,7 +71,7 @@ static inline void arch_atomic64_sub(s64 i, atomic64_t *v)
* true if the result is zero, or false for all * true if the result is zero, or false for all
* other cases. * other cases.
*/ */
static inline bool arch_atomic64_sub_and_test(s64 i, atomic64_t *v) static __always_inline bool arch_atomic64_sub_and_test(s64 i, atomic64_t *v)
{ {
return GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, e, "er", i); return GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, e, "er", i);
} }
...@@ -113,7 +113,7 @@ static __always_inline void arch_atomic64_dec(atomic64_t *v) ...@@ -113,7 +113,7 @@ static __always_inline void arch_atomic64_dec(atomic64_t *v)
* returns true if the result is 0, or false for all other * returns true if the result is 0, or false for all other
* cases. * cases.
*/ */
static inline bool arch_atomic64_dec_and_test(atomic64_t *v) static __always_inline bool arch_atomic64_dec_and_test(atomic64_t *v)
{ {
return GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, e); return GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, e);
} }
...@@ -127,7 +127,7 @@ static inline bool arch_atomic64_dec_and_test(atomic64_t *v) ...@@ -127,7 +127,7 @@ static inline bool arch_atomic64_dec_and_test(atomic64_t *v)
* and returns true if the result is zero, or false for all * and returns true if the result is zero, or false for all
* other cases. * other cases.
*/ */
static inline bool arch_atomic64_inc_and_test(atomic64_t *v) static __always_inline bool arch_atomic64_inc_and_test(atomic64_t *v)
{ {
return GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, e); return GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, e);
} }
...@@ -142,7 +142,7 @@ static inline bool arch_atomic64_inc_and_test(atomic64_t *v) ...@@ -142,7 +142,7 @@ static inline bool arch_atomic64_inc_and_test(atomic64_t *v)
* if the result is negative, or false when * if the result is negative, or false when
* result is greater than or equal to zero. * result is greater than or equal to zero.
*/ */
static inline bool arch_atomic64_add_negative(s64 i, atomic64_t *v) static __always_inline bool arch_atomic64_add_negative(s64 i, atomic64_t *v)
{ {
return GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, s, "er", i); return GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, s, "er", i);
} }
...@@ -161,25 +161,25 @@ static __always_inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v) ...@@ -161,25 +161,25 @@ static __always_inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
} }
#define arch_atomic64_add_return arch_atomic64_add_return #define arch_atomic64_add_return arch_atomic64_add_return
static inline s64 arch_atomic64_sub_return(s64 i, atomic64_t *v) static __always_inline s64 arch_atomic64_sub_return(s64 i, atomic64_t *v)
{ {
return arch_atomic64_add_return(-i, v); return arch_atomic64_add_return(-i, v);
} }
#define arch_atomic64_sub_return arch_atomic64_sub_return #define arch_atomic64_sub_return arch_atomic64_sub_return
static inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v) static __always_inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
{ {
return xadd(&v->counter, i); return xadd(&v->counter, i);
} }
#define arch_atomic64_fetch_add arch_atomic64_fetch_add #define arch_atomic64_fetch_add arch_atomic64_fetch_add
static inline s64 arch_atomic64_fetch_sub(s64 i, atomic64_t *v) static __always_inline s64 arch_atomic64_fetch_sub(s64 i, atomic64_t *v)
{ {
return xadd(&v->counter, -i); return xadd(&v->counter, -i);
} }
#define arch_atomic64_fetch_sub arch_atomic64_fetch_sub #define arch_atomic64_fetch_sub arch_atomic64_fetch_sub
static inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new) static __always_inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
{ {
return arch_cmpxchg(&v->counter, old, new); return arch_cmpxchg(&v->counter, old, new);
} }
...@@ -191,13 +191,13 @@ static __always_inline bool arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s ...@@ -191,13 +191,13 @@ static __always_inline bool arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s
} }
#define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg #define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg
static inline s64 arch_atomic64_xchg(atomic64_t *v, s64 new) static __always_inline s64 arch_atomic64_xchg(atomic64_t *v, s64 new)
{ {
return arch_xchg(&v->counter, new); return arch_xchg(&v->counter, new);
} }
#define arch_atomic64_xchg arch_atomic64_xchg #define arch_atomic64_xchg arch_atomic64_xchg
static inline void arch_atomic64_and(s64 i, atomic64_t *v) static __always_inline void arch_atomic64_and(s64 i, atomic64_t *v)
{ {
asm volatile(LOCK_PREFIX "andq %1,%0" asm volatile(LOCK_PREFIX "andq %1,%0"
: "+m" (v->counter) : "+m" (v->counter)
...@@ -205,7 +205,7 @@ static inline void arch_atomic64_and(s64 i, atomic64_t *v) ...@@ -205,7 +205,7 @@ static inline void arch_atomic64_and(s64 i, atomic64_t *v)
: "memory"); : "memory");
} }
static inline s64 arch_atomic64_fetch_and(s64 i, atomic64_t *v) static __always_inline s64 arch_atomic64_fetch_and(s64 i, atomic64_t *v)
{ {
s64 val = arch_atomic64_read(v); s64 val = arch_atomic64_read(v);
...@@ -215,7 +215,7 @@ static inline s64 arch_atomic64_fetch_and(s64 i, atomic64_t *v) ...@@ -215,7 +215,7 @@ static inline s64 arch_atomic64_fetch_and(s64 i, atomic64_t *v)
} }
#define arch_atomic64_fetch_and arch_atomic64_fetch_and #define arch_atomic64_fetch_and arch_atomic64_fetch_and
static inline void arch_atomic64_or(s64 i, atomic64_t *v) static __always_inline void arch_atomic64_or(s64 i, atomic64_t *v)
{ {
asm volatile(LOCK_PREFIX "orq %1,%0" asm volatile(LOCK_PREFIX "orq %1,%0"
: "+m" (v->counter) : "+m" (v->counter)
...@@ -223,7 +223,7 @@ static inline void arch_atomic64_or(s64 i, atomic64_t *v) ...@@ -223,7 +223,7 @@ static inline void arch_atomic64_or(s64 i, atomic64_t *v)
: "memory"); : "memory");
} }
static inline s64 arch_atomic64_fetch_or(s64 i, atomic64_t *v) static __always_inline s64 arch_atomic64_fetch_or(s64 i, atomic64_t *v)
{ {
s64 val = arch_atomic64_read(v); s64 val = arch_atomic64_read(v);
...@@ -233,7 +233,7 @@ static inline s64 arch_atomic64_fetch_or(s64 i, atomic64_t *v) ...@@ -233,7 +233,7 @@ static inline s64 arch_atomic64_fetch_or(s64 i, atomic64_t *v)
} }
#define arch_atomic64_fetch_or arch_atomic64_fetch_or #define arch_atomic64_fetch_or arch_atomic64_fetch_or
static inline void arch_atomic64_xor(s64 i, atomic64_t *v) static __always_inline void arch_atomic64_xor(s64 i, atomic64_t *v)
{ {
asm volatile(LOCK_PREFIX "xorq %1,%0" asm volatile(LOCK_PREFIX "xorq %1,%0"
: "+m" (v->counter) : "+m" (v->counter)
...@@ -241,7 +241,7 @@ static inline void arch_atomic64_xor(s64 i, atomic64_t *v) ...@@ -241,7 +241,7 @@ static inline void arch_atomic64_xor(s64 i, atomic64_t *v)
: "memory"); : "memory");
} }
static inline s64 arch_atomic64_fetch_xor(s64 i, atomic64_t *v) static __always_inline s64 arch_atomic64_fetch_xor(s64 i, atomic64_t *v)
{ {
s64 val = arch_atomic64_read(v); s64 val = arch_atomic64_read(v);
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
#define XCR_XFEATURE_ENABLED_MASK 0x00000000 #define XCR_XFEATURE_ENABLED_MASK 0x00000000
#define XCR_XFEATURE_IN_USE_MASK 0x00000001 #define XCR_XFEATURE_IN_USE_MASK 0x00000001
static inline u64 xgetbv(u32 index) static __always_inline u64 xgetbv(u32 index)
{ {
u32 eax, edx; u32 eax, edx;
...@@ -27,7 +27,7 @@ static inline void xsetbv(u32 index, u64 value) ...@@ -27,7 +27,7 @@ static inline void xsetbv(u32 index, u64 value)
* *
* Callers should check X86_FEATURE_XGETBV1. * Callers should check X86_FEATURE_XGETBV1.
*/ */
static inline u64 xfeatures_in_use(void) static __always_inline u64 xfeatures_in_use(void)
{ {
return xgetbv(XCR_XFEATURE_IN_USE_MASK); return xgetbv(XCR_XFEATURE_IN_USE_MASK);
} }
......
...@@ -8,9 +8,6 @@ ...@@ -8,9 +8,6 @@
#include <asm/nospec-branch.h> #include <asm/nospec-branch.h>
/* Provide __cpuidle; we can't safely include <linux/cpu.h> */
#define __cpuidle __section(".cpuidle.text")
/* /*
* Interrupt control: * Interrupt control:
*/ */
...@@ -45,13 +42,13 @@ static __always_inline void native_irq_enable(void) ...@@ -45,13 +42,13 @@ static __always_inline void native_irq_enable(void)
asm volatile("sti": : :"memory"); asm volatile("sti": : :"memory");
} }
static inline __cpuidle void native_safe_halt(void) static __always_inline void native_safe_halt(void)
{ {
mds_idle_clear_cpu_buffers(); mds_idle_clear_cpu_buffers();
asm volatile("sti; hlt": : :"memory"); asm volatile("sti; hlt": : :"memory");
} }
static inline __cpuidle void native_halt(void) static __always_inline void native_halt(void)
{ {
mds_idle_clear_cpu_buffers(); mds_idle_clear_cpu_buffers();
asm volatile("hlt": : :"memory"); asm volatile("hlt": : :"memory");
...@@ -84,7 +81,7 @@ static __always_inline void arch_local_irq_enable(void) ...@@ -84,7 +81,7 @@ static __always_inline void arch_local_irq_enable(void)
* Used in the idle loop; sti takes one instruction cycle * Used in the idle loop; sti takes one instruction cycle
* to complete: * to complete:
*/ */
static inline __cpuidle void arch_safe_halt(void) static __always_inline void arch_safe_halt(void)
{ {
native_safe_halt(); native_safe_halt();
} }
...@@ -93,7 +90,7 @@ static inline __cpuidle void arch_safe_halt(void) ...@@ -93,7 +90,7 @@ static inline __cpuidle void arch_safe_halt(void)
* Used when interrupts are already enabled or to * Used when interrupts are already enabled or to
* shutdown the processor: * shutdown the processor:
*/ */
static inline __cpuidle void halt(void) static __always_inline void halt(void)
{ {
native_halt(); native_halt();
} }
......
...@@ -8,7 +8,7 @@ extern struct clocksource kvm_clock; ...@@ -8,7 +8,7 @@ extern struct clocksource kvm_clock;
DECLARE_PER_CPU(struct pvclock_vsyscall_time_info *, hv_clock_per_cpu); DECLARE_PER_CPU(struct pvclock_vsyscall_time_info *, hv_clock_per_cpu);
static inline struct pvclock_vcpu_time_info *this_cpu_pvti(void) static __always_inline struct pvclock_vcpu_time_info *this_cpu_pvti(void)
{ {
return &this_cpu_read(hv_clock_per_cpu)->pvti; return &this_cpu_read(hv_clock_per_cpu)->pvti;
} }
......
...@@ -26,7 +26,7 @@ ...@@ -26,7 +26,7 @@
#define TPAUSE_C01_STATE 1 #define TPAUSE_C01_STATE 1
#define TPAUSE_C02_STATE 0 #define TPAUSE_C02_STATE 0
static inline void __monitor(const void *eax, unsigned long ecx, static __always_inline void __monitor(const void *eax, unsigned long ecx,
unsigned long edx) unsigned long edx)
{ {
/* "monitor %eax, %ecx, %edx;" */ /* "monitor %eax, %ecx, %edx;" */
...@@ -34,7 +34,7 @@ static inline void __monitor(const void *eax, unsigned long ecx, ...@@ -34,7 +34,7 @@ static inline void __monitor(const void *eax, unsigned long ecx,
:: "a" (eax), "c" (ecx), "d"(edx)); :: "a" (eax), "c" (ecx), "d"(edx));
} }
static inline void __monitorx(const void *eax, unsigned long ecx, static __always_inline void __monitorx(const void *eax, unsigned long ecx,
unsigned long edx) unsigned long edx)
{ {
/* "monitorx %eax, %ecx, %edx;" */ /* "monitorx %eax, %ecx, %edx;" */
...@@ -42,7 +42,7 @@ static inline void __monitorx(const void *eax, unsigned long ecx, ...@@ -42,7 +42,7 @@ static inline void __monitorx(const void *eax, unsigned long ecx,
:: "a" (eax), "c" (ecx), "d"(edx)); :: "a" (eax), "c" (ecx), "d"(edx));
} }
static inline void __mwait(unsigned long eax, unsigned long ecx) static __always_inline void __mwait(unsigned long eax, unsigned long ecx)
{ {
mds_idle_clear_cpu_buffers(); mds_idle_clear_cpu_buffers();
...@@ -77,7 +77,7 @@ static inline void __mwait(unsigned long eax, unsigned long ecx) ...@@ -77,7 +77,7 @@ static inline void __mwait(unsigned long eax, unsigned long ecx)
* EAX (logical) address to monitor * EAX (logical) address to monitor
* ECX #GP if not zero * ECX #GP if not zero
*/ */
static inline void __mwaitx(unsigned long eax, unsigned long ebx, static __always_inline void __mwaitx(unsigned long eax, unsigned long ebx,
unsigned long ecx) unsigned long ecx)
{ {
/* No MDS buffer clear as this is AMD/HYGON only */ /* No MDS buffer clear as this is AMD/HYGON only */
...@@ -87,7 +87,7 @@ static inline void __mwaitx(unsigned long eax, unsigned long ebx, ...@@ -87,7 +87,7 @@ static inline void __mwaitx(unsigned long eax, unsigned long ebx,
:: "a" (eax), "b" (ebx), "c" (ecx)); :: "a" (eax), "b" (ebx), "c" (ecx));
} }
static inline void __sti_mwait(unsigned long eax, unsigned long ecx) static __always_inline void __sti_mwait(unsigned long eax, unsigned long ecx)
{ {
mds_idle_clear_cpu_buffers(); mds_idle_clear_cpu_buffers();
/* "mwait %eax, %ecx;" */ /* "mwait %eax, %ecx;" */
...@@ -105,7 +105,7 @@ static inline void __sti_mwait(unsigned long eax, unsigned long ecx) ...@@ -105,7 +105,7 @@ static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
* New with Core Duo processors, MWAIT can take some hints based on CPU * New with Core Duo processors, MWAIT can take some hints based on CPU
* capability. * capability.
*/ */
static inline void mwait_idle_with_hints(unsigned long eax, unsigned long ecx) static __always_inline void mwait_idle_with_hints(unsigned long eax, unsigned long ecx)
{ {
if (static_cpu_has_bug(X86_BUG_MONITOR) || !current_set_polling_and_test()) { if (static_cpu_has_bug(X86_BUG_MONITOR) || !current_set_polling_and_test()) {
if (static_cpu_has_bug(X86_BUG_CLFLUSH_MONITOR)) { if (static_cpu_has_bug(X86_BUG_CLFLUSH_MONITOR)) {
......
...@@ -564,7 +564,7 @@ static __always_inline void mds_user_clear_cpu_buffers(void) ...@@ -564,7 +564,7 @@ static __always_inline void mds_user_clear_cpu_buffers(void)
* *
* Clear CPU buffers if the corresponding static key is enabled * Clear CPU buffers if the corresponding static key is enabled
*/ */
static inline void mds_idle_clear_cpu_buffers(void) static __always_inline void mds_idle_clear_cpu_buffers(void)
{ {
if (static_branch_likely(&mds_idle_clear)) if (static_branch_likely(&mds_idle_clear))
mds_clear_cpu_buffers(); mds_clear_cpu_buffers();
......
...@@ -26,7 +26,7 @@ DECLARE_STATIC_CALL(pv_sched_clock, dummy_sched_clock); ...@@ -26,7 +26,7 @@ DECLARE_STATIC_CALL(pv_sched_clock, dummy_sched_clock);
void paravirt_set_sched_clock(u64 (*func)(void)); void paravirt_set_sched_clock(u64 (*func)(void));
static inline u64 paravirt_sched_clock(void) static __always_inline u64 paravirt_sched_clock(void)
{ {
return static_call(pv_sched_clock)(); return static_call(pv_sched_clock)();
} }
...@@ -168,7 +168,7 @@ static inline void __write_cr4(unsigned long x) ...@@ -168,7 +168,7 @@ static inline void __write_cr4(unsigned long x)
PVOP_VCALL1(cpu.write_cr4, x); PVOP_VCALL1(cpu.write_cr4, x);
} }
static inline void arch_safe_halt(void) static __always_inline void arch_safe_halt(void)
{ {
PVOP_VCALL0(irq.safe_halt); PVOP_VCALL0(irq.safe_halt);
} }
...@@ -178,7 +178,9 @@ static inline void halt(void) ...@@ -178,7 +178,9 @@ static inline void halt(void)
PVOP_VCALL0(irq.halt); PVOP_VCALL0(irq.halt);
} }
static inline void wbinvd(void) extern noinstr void pv_native_wbinvd(void);
static __always_inline void wbinvd(void)
{ {
PVOP_ALT_VCALL0(cpu.wbinvd, "wbinvd", ALT_NOT(X86_FEATURE_XENPV)); PVOP_ALT_VCALL0(cpu.wbinvd, "wbinvd", ALT_NOT(X86_FEATURE_XENPV));
} }
......
...@@ -586,7 +586,7 @@ extern void perf_amd_brs_lopwr_cb(bool lopwr_in); ...@@ -586,7 +586,7 @@ extern void perf_amd_brs_lopwr_cb(bool lopwr_in);
DECLARE_STATIC_CALL(perf_lopwr_cb, perf_amd_brs_lopwr_cb); DECLARE_STATIC_CALL(perf_lopwr_cb, perf_amd_brs_lopwr_cb);
static inline void perf_lopwr_cb(bool lopwr_in) static __always_inline void perf_lopwr_cb(bool lopwr_in)
{ {
static_call_mod(perf_lopwr_cb)(lopwr_in); static_call_mod(perf_lopwr_cb)(lopwr_in);
} }
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
/* some helper functions for xen and kvm pv clock sources */ /* some helper functions for xen and kvm pv clock sources */
u64 pvclock_clocksource_read(struct pvclock_vcpu_time_info *src); u64 pvclock_clocksource_read(struct pvclock_vcpu_time_info *src);
u64 pvclock_clocksource_read_nowd(struct pvclock_vcpu_time_info *src);
u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src); u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src);
void pvclock_set_flags(u8 flags); void pvclock_set_flags(u8 flags);
unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src); unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src);
...@@ -39,7 +40,7 @@ bool pvclock_read_retry(const struct pvclock_vcpu_time_info *src, ...@@ -39,7 +40,7 @@ bool pvclock_read_retry(const struct pvclock_vcpu_time_info *src,
* Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction, * Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction,
* yielding a 64-bit result. * yielding a 64-bit result.
*/ */
static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift) static __always_inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift)
{ {
u64 product; u64 product;
#ifdef __i386__ #ifdef __i386__
......
...@@ -5,13 +5,13 @@ ...@@ -5,13 +5,13 @@
#include <linux/types.h> #include <linux/types.h>
#define BUILDIO(bwl, bw, type) \ #define BUILDIO(bwl, bw, type) \
static inline void __out##bwl(type value, u16 port) \ static __always_inline void __out##bwl(type value, u16 port) \
{ \ { \
asm volatile("out" #bwl " %" #bw "0, %w1" \ asm volatile("out" #bwl " %" #bw "0, %w1" \
: : "a"(value), "Nd"(port)); \ : : "a"(value), "Nd"(port)); \
} \ } \
\ \
static inline type __in##bwl(u16 port) \ static __always_inline type __in##bwl(u16 port) \
{ \ { \
type value; \ type value; \
asm volatile("in" #bwl " %w1, %" #bw "0" \ asm volatile("in" #bwl " %w1, %" #bw "0" \
......
...@@ -8,7 +8,6 @@ ...@@ -8,7 +8,6 @@
#define TDX_HYPERCALL_STANDARD 0 #define TDX_HYPERCALL_STANDARD 0
#define TDX_HCALL_HAS_OUTPUT BIT(0) #define TDX_HCALL_HAS_OUTPUT BIT(0)
#define TDX_HCALL_ISSUE_STI BIT(1)
#define TDX_CPUID_LEAF_ID 0x21 #define TDX_CPUID_LEAF_ID 0x21
#define TDX_IDENT "IntelTDX " #define TDX_IDENT "IntelTDX "
......
...@@ -115,7 +115,7 @@ static inline void wrpkru(u32 pkru) ...@@ -115,7 +115,7 @@ static inline void wrpkru(u32 pkru)
} }
#endif #endif
static inline void native_wbinvd(void) static __always_inline void native_wbinvd(void)
{ {
asm volatile("wbinvd": : :"memory"); asm volatile("wbinvd": : :"memory");
} }
...@@ -179,7 +179,7 @@ static inline void __write_cr4(unsigned long x) ...@@ -179,7 +179,7 @@ static inline void __write_cr4(unsigned long x)
native_write_cr4(x); native_write_cr4(x);
} }
static inline void wbinvd(void) static __always_inline void wbinvd(void)
{ {
native_wbinvd(); native_wbinvd();
} }
...@@ -196,7 +196,7 @@ static inline void load_gs_index(unsigned int selector) ...@@ -196,7 +196,7 @@ static inline void load_gs_index(unsigned int selector)
#endif /* CONFIG_PARAVIRT_XXL */ #endif /* CONFIG_PARAVIRT_XXL */
static inline void clflush(volatile void *__p) static __always_inline void clflush(volatile void *__p)
{ {
asm volatile("clflush %0" : "+m" (*(volatile char __force *)__p)); asm volatile("clflush %0" : "+m" (*(volatile char __force *)__p));
} }
...@@ -295,7 +295,7 @@ static inline int enqcmds(void __iomem *dst, const void *src) ...@@ -295,7 +295,7 @@ static inline int enqcmds(void __iomem *dst, const void *src)
return 0; return 0;
} }
static inline void tile_release(void) static __always_inline void tile_release(void)
{ {
/* /*
* Instruction opcode for TILERELEASE; supported in binutils * Instruction opcode for TILERELEASE; supported in binutils
......
...@@ -382,7 +382,7 @@ MULTI_stack_switch(struct multicall_entry *mcl, ...@@ -382,7 +382,7 @@ MULTI_stack_switch(struct multicall_entry *mcl,
} }
#endif #endif
static inline int static __always_inline int
HYPERVISOR_sched_op(int cmd, void *arg) HYPERVISOR_sched_op(int cmd, void *arg)
{ {
return _hypercall2(int, sched_op, cmd, arg); return _hypercall2(int, sched_op, cmd, arg);
......
...@@ -86,7 +86,7 @@ void update_spec_ctrl_cond(u64 val) ...@@ -86,7 +86,7 @@ void update_spec_ctrl_cond(u64 val)
wrmsrl(MSR_IA32_SPEC_CTRL, val); wrmsrl(MSR_IA32_SPEC_CTRL, val);
} }
u64 spec_ctrl_current(void) noinstr u64 spec_ctrl_current(void)
{ {
return this_cpu_read(x86_spec_ctrl_current); return this_cpu_read(x86_spec_ctrl_current);
} }
......
...@@ -143,7 +143,7 @@ static __init int parse_no_stealacc(char *arg) ...@@ -143,7 +143,7 @@ static __init int parse_no_stealacc(char *arg)
} }
early_param("no-steal-acc", parse_no_stealacc); early_param("no-steal-acc", parse_no_stealacc);
static unsigned long long notrace vmware_sched_clock(void) static noinstr u64 vmware_sched_clock(void)
{ {
unsigned long long ns; unsigned long long ns;
......
...@@ -853,12 +853,12 @@ int fpu__exception_code(struct fpu *fpu, int trap_nr) ...@@ -853,12 +853,12 @@ int fpu__exception_code(struct fpu *fpu, int trap_nr)
* Initialize register state that may prevent from entering low-power idle. * Initialize register state that may prevent from entering low-power idle.
* This function will be invoked from the cpuidle driver only when needed. * This function will be invoked from the cpuidle driver only when needed.
*/ */
void fpu_idle_fpregs(void) noinstr void fpu_idle_fpregs(void)
{ {
/* Note: AMX_TILE being enabled implies XGETBV1 support */ /* Note: AMX_TILE being enabled implies XGETBV1 support */
if (cpu_feature_enabled(X86_FEATURE_AMX_TILE) && if (cpu_feature_enabled(X86_FEATURE_AMX_TILE) &&
(xfeatures_in_use() & XFEATURE_MASK_XTILE)) { (xfeatures_in_use() & XFEATURE_MASK_XTILE)) {
tile_release(); tile_release();
fpregs_deactivate(&current->thread.fpu); __this_cpu_write(fpu_fpregs_owner_ctx, NULL);
} }
} }
...@@ -71,12 +71,12 @@ static int kvm_set_wallclock(const struct timespec64 *now) ...@@ -71,12 +71,12 @@ static int kvm_set_wallclock(const struct timespec64 *now)
return -ENODEV; return -ENODEV;
} }
static u64 kvm_clock_read(void) static noinstr u64 kvm_clock_read(void)
{ {
u64 ret; u64 ret;
preempt_disable_notrace(); preempt_disable_notrace();
ret = pvclock_clocksource_read(this_cpu_pvti()); ret = pvclock_clocksource_read_nowd(this_cpu_pvti());
preempt_enable_notrace(); preempt_enable_notrace();
return ret; return ret;
} }
...@@ -86,7 +86,7 @@ static u64 kvm_clock_get_cycles(struct clocksource *cs) ...@@ -86,7 +86,7 @@ static u64 kvm_clock_get_cycles(struct clocksource *cs)
return kvm_clock_read(); return kvm_clock_read();
} }
static u64 kvm_sched_clock_read(void) static noinstr u64 kvm_sched_clock_read(void)
{ {
return kvm_clock_read() - kvm_sched_clock_offset; return kvm_clock_read() - kvm_sched_clock_offset;
} }
......
...@@ -216,6 +216,11 @@ static noinstr void pv_native_set_debugreg(int regno, unsigned long val) ...@@ -216,6 +216,11 @@ static noinstr void pv_native_set_debugreg(int regno, unsigned long val)
native_set_debugreg(regno, val); native_set_debugreg(regno, val);
} }
noinstr void pv_native_wbinvd(void)
{
native_wbinvd();
}
static noinstr void pv_native_irq_enable(void) static noinstr void pv_native_irq_enable(void)
{ {
native_irq_enable(); native_irq_enable();
...@@ -225,6 +230,11 @@ static noinstr void pv_native_irq_disable(void) ...@@ -225,6 +230,11 @@ static noinstr void pv_native_irq_disable(void)
{ {
native_irq_disable(); native_irq_disable();
} }
static noinstr void pv_native_safe_halt(void)
{
native_safe_halt();
}
#endif #endif
enum paravirt_lazy_mode paravirt_get_lazy_mode(void) enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
...@@ -256,7 +266,7 @@ struct paravirt_patch_template pv_ops = { ...@@ -256,7 +266,7 @@ struct paravirt_patch_template pv_ops = {
.cpu.read_cr0 = native_read_cr0, .cpu.read_cr0 = native_read_cr0,
.cpu.write_cr0 = native_write_cr0, .cpu.write_cr0 = native_write_cr0,
.cpu.write_cr4 = native_write_cr4, .cpu.write_cr4 = native_write_cr4,
.cpu.wbinvd = native_wbinvd, .cpu.wbinvd = pv_native_wbinvd,
.cpu.read_msr = native_read_msr, .cpu.read_msr = native_read_msr,
.cpu.write_msr = native_write_msr, .cpu.write_msr = native_write_msr,
.cpu.read_msr_safe = native_read_msr_safe, .cpu.read_msr_safe = native_read_msr_safe,
...@@ -290,7 +300,7 @@ struct paravirt_patch_template pv_ops = { ...@@ -290,7 +300,7 @@ struct paravirt_patch_template pv_ops = {
.irq.save_fl = __PV_IS_CALLEE_SAVE(native_save_fl), .irq.save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
.irq.irq_disable = __PV_IS_CALLEE_SAVE(pv_native_irq_disable), .irq.irq_disable = __PV_IS_CALLEE_SAVE(pv_native_irq_disable),
.irq.irq_enable = __PV_IS_CALLEE_SAVE(pv_native_irq_enable), .irq.irq_enable = __PV_IS_CALLEE_SAVE(pv_native_irq_enable),
.irq.safe_halt = native_safe_halt, .irq.safe_halt = pv_native_safe_halt,
.irq.halt = native_halt, .irq.halt = native_halt,
#endif /* CONFIG_PARAVIRT_XXL */ #endif /* CONFIG_PARAVIRT_XXL */
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include <linux/cpuidle.h> #include <linux/cpuidle.h>
#include <linux/acpi.h> #include <linux/acpi.h>
#include <linux/elf-randomize.h> #include <linux/elf-randomize.h>
#include <linux/static_call.h>
#include <trace/events/power.h> #include <trace/events/power.h>
#include <linux/hw_breakpoint.h> #include <linux/hw_breakpoint.h>
#include <asm/cpu.h> #include <asm/cpu.h>
...@@ -694,7 +695,24 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p) ...@@ -694,7 +695,24 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p)
unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE; unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
EXPORT_SYMBOL(boot_option_idle_override); EXPORT_SYMBOL(boot_option_idle_override);
static void (*x86_idle)(void); /*
* We use this if we don't have any better idle routine..
*/
void __cpuidle default_idle(void)
{
raw_safe_halt();
raw_local_irq_disable();
}
#if defined(CONFIG_APM_MODULE) || defined(CONFIG_HALTPOLL_CPUIDLE_MODULE)
EXPORT_SYMBOL(default_idle);
#endif
DEFINE_STATIC_CALL_NULL(x86_idle, default_idle);
static bool x86_idle_set(void)
{
return !!static_call_query(x86_idle);
}
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
static inline void play_dead(void) static inline void play_dead(void)
...@@ -717,28 +735,17 @@ void arch_cpu_idle_dead(void) ...@@ -717,28 +735,17 @@ void arch_cpu_idle_dead(void)
/* /*
* Called from the generic idle code. * Called from the generic idle code.
*/ */
void arch_cpu_idle(void) void __cpuidle arch_cpu_idle(void)
{ {
x86_idle(); static_call(x86_idle)();
} }
/*
* We use this if we don't have any better idle routine..
*/
void __cpuidle default_idle(void)
{
raw_safe_halt();
}
#if defined(CONFIG_APM_MODULE) || defined(CONFIG_HALTPOLL_CPUIDLE_MODULE)
EXPORT_SYMBOL(default_idle);
#endif
#ifdef CONFIG_XEN #ifdef CONFIG_XEN
bool xen_set_default_idle(void) bool xen_set_default_idle(void)
{ {
bool ret = !!x86_idle; bool ret = x86_idle_set();
x86_idle = default_idle; static_call_update(x86_idle, default_idle);
return ret; return ret;
} }
...@@ -800,13 +807,7 @@ static void amd_e400_idle(void) ...@@ -800,13 +807,7 @@ static void amd_e400_idle(void)
default_idle(); default_idle();
/*
* The switch back from broadcast mode needs to be called with
* interrupts disabled.
*/
raw_local_irq_disable();
tick_broadcast_exit(); tick_broadcast_exit();
raw_local_irq_enable();
} }
/* /*
...@@ -864,12 +865,10 @@ static __cpuidle void mwait_idle(void) ...@@ -864,12 +865,10 @@ static __cpuidle void mwait_idle(void)
} }
__monitor((void *)&current_thread_info()->flags, 0, 0); __monitor((void *)&current_thread_info()->flags, 0, 0);
if (!need_resched()) if (!need_resched()) {
__sti_mwait(0, 0); __sti_mwait(0, 0);
else raw_local_irq_disable();
raw_local_irq_enable(); }
} else {
raw_local_irq_enable();
} }
__current_clr_polling(); __current_clr_polling();
} }
...@@ -880,20 +879,20 @@ void select_idle_routine(const struct cpuinfo_x86 *c) ...@@ -880,20 +879,20 @@ void select_idle_routine(const struct cpuinfo_x86 *c)
if (boot_option_idle_override == IDLE_POLL && smp_num_siblings > 1) if (boot_option_idle_override == IDLE_POLL && smp_num_siblings > 1)
pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n"); pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n");
#endif #endif
if (x86_idle || boot_option_idle_override == IDLE_POLL) if (x86_idle_set() || boot_option_idle_override == IDLE_POLL)
return; return;
if (boot_cpu_has_bug(X86_BUG_AMD_E400)) { if (boot_cpu_has_bug(X86_BUG_AMD_E400)) {
pr_info("using AMD E400 aware idle routine\n"); pr_info("using AMD E400 aware idle routine\n");
x86_idle = amd_e400_idle; static_call_update(x86_idle, amd_e400_idle);
} else if (prefer_mwait_c1_over_halt(c)) { } else if (prefer_mwait_c1_over_halt(c)) {
pr_info("using mwait in idle threads\n"); pr_info("using mwait in idle threads\n");
x86_idle = mwait_idle; static_call_update(x86_idle, mwait_idle);
} else if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST)) { } else if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST)) {
pr_info("using TDX aware idle routine\n"); pr_info("using TDX aware idle routine\n");
x86_idle = tdx_safe_halt; static_call_update(x86_idle, tdx_safe_halt);
} else } else
x86_idle = default_idle; static_call_update(x86_idle, default_idle);
} }
void amd_e400_c1e_apic_setup(void) void amd_e400_c1e_apic_setup(void)
...@@ -946,7 +945,7 @@ static int __init idle_setup(char *str) ...@@ -946,7 +945,7 @@ static int __init idle_setup(char *str)
* To continue to load the CPU idle driver, don't touch * To continue to load the CPU idle driver, don't touch
* the boot_option_idle_override. * the boot_option_idle_override.
*/ */
x86_idle = default_idle; static_call_update(x86_idle, default_idle);
boot_option_idle_override = IDLE_HALT; boot_option_idle_override = IDLE_HALT;
} else if (!strcmp(str, "nomwait")) { } else if (!strcmp(str, "nomwait")) {
/* /*
......
...@@ -64,7 +64,8 @@ u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src) ...@@ -64,7 +64,8 @@ u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
return flags & valid_flags; return flags & valid_flags;
} }
u64 pvclock_clocksource_read(struct pvclock_vcpu_time_info *src) static __always_inline
u64 __pvclock_clocksource_read(struct pvclock_vcpu_time_info *src, bool dowd)
{ {
unsigned version; unsigned version;
u64 ret; u64 ret;
...@@ -77,7 +78,7 @@ u64 pvclock_clocksource_read(struct pvclock_vcpu_time_info *src) ...@@ -77,7 +78,7 @@ u64 pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
flags = src->flags; flags = src->flags;
} while (pvclock_read_retry(src, version)); } while (pvclock_read_retry(src, version));
if (unlikely((flags & PVCLOCK_GUEST_STOPPED) != 0)) { if (dowd && unlikely((flags & PVCLOCK_GUEST_STOPPED) != 0)) {
src->flags &= ~PVCLOCK_GUEST_STOPPED; src->flags &= ~PVCLOCK_GUEST_STOPPED;
pvclock_touch_watchdogs(); pvclock_touch_watchdogs();
} }
...@@ -100,16 +101,25 @@ u64 pvclock_clocksource_read(struct pvclock_vcpu_time_info *src) ...@@ -100,16 +101,25 @@ u64 pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
* updating at the same time, and one of them could be slightly behind, * updating at the same time, and one of them could be slightly behind,
* making the assumption that last_value always go forward fail to hold. * making the assumption that last_value always go forward fail to hold.
*/ */
last = atomic64_read(&last_value); last = arch_atomic64_read(&last_value);
do { do {
if (ret < last) if (ret <= last)
return last; return last;
last = atomic64_cmpxchg(&last_value, last, ret); } while (!arch_atomic64_try_cmpxchg(&last_value, &last, ret));
} while (unlikely(last != ret));
return ret; return ret;
} }
u64 pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
{
return __pvclock_clocksource_read(src, true);
}
noinstr u64 pvclock_clocksource_read_nowd(struct pvclock_vcpu_time_info *src)
{
return __pvclock_clocksource_read(src, false);
}
void pvclock_read_wallclock(struct pvclock_wall_clock *wall_clock, void pvclock_read_wallclock(struct pvclock_wall_clock *wall_clock,
struct pvclock_vcpu_time_info *vcpu_time, struct pvclock_vcpu_time_info *vcpu_time,
struct timespec64 *ts) struct timespec64 *ts)
......
...@@ -215,7 +215,7 @@ static void __init cyc2ns_init_secondary_cpus(void) ...@@ -215,7 +215,7 @@ static void __init cyc2ns_init_secondary_cpus(void)
/* /*
* Scheduler clock - returns current time in nanosec units. * Scheduler clock - returns current time in nanosec units.
*/ */
u64 native_sched_clock(void) noinstr u64 native_sched_clock(void)
{ {
if (static_branch_likely(&__use_tsc)) { if (static_branch_likely(&__use_tsc)) {
u64 tsc_now = rdtsc(); u64 tsc_now = rdtsc();
...@@ -248,7 +248,7 @@ u64 native_sched_clock_from_tsc(u64 tsc) ...@@ -248,7 +248,7 @@ u64 native_sched_clock_from_tsc(u64 tsc)
/* We need to define a real function for sched_clock, to override the /* We need to define a real function for sched_clock, to override the
weak default version */ weak default version */
#ifdef CONFIG_PARAVIRT #ifdef CONFIG_PARAVIRT
unsigned long long sched_clock(void) noinstr u64 sched_clock(void)
{ {
return paravirt_sched_clock(); return paravirt_sched_clock();
} }
...@@ -258,8 +258,7 @@ bool using_native_sched_clock(void) ...@@ -258,8 +258,7 @@ bool using_native_sched_clock(void)
return static_call_query(pv_sched_clock) == native_sched_clock; return static_call_query(pv_sched_clock) == native_sched_clock;
} }
#else #else
unsigned long long u64 sched_clock(void) __attribute__((alias("native_sched_clock")));
sched_clock(void) __attribute__((alias("native_sched_clock")));
bool using_native_sched_clock(void) { return true; } bool using_native_sched_clock(void) { return true; }
#endif #endif
......
...@@ -129,7 +129,6 @@ SECTIONS ...@@ -129,7 +129,6 @@ SECTIONS
HEAD_TEXT HEAD_TEXT
TEXT_TEXT TEXT_TEXT
SCHED_TEXT SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT LOCK_TEXT
KPROBES_TEXT KPROBES_TEXT
SOFTIRQENTRY_TEXT SOFTIRQENTRY_TEXT
......
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
#include <asm/alternative.h> #include <asm/alternative.h>
#include <asm/export.h> #include <asm/export.h>
.pushsection .noinstr.text, "ax" .section .noinstr.text, "ax"
/* /*
* We build a jump to memcpy_orig by default which gets NOPped out on * We build a jump to memcpy_orig by default which gets NOPped out on
...@@ -43,7 +43,7 @@ SYM_TYPED_FUNC_START(__memcpy) ...@@ -43,7 +43,7 @@ SYM_TYPED_FUNC_START(__memcpy)
SYM_FUNC_END(__memcpy) SYM_FUNC_END(__memcpy)
EXPORT_SYMBOL(__memcpy) EXPORT_SYMBOL(__memcpy)
SYM_FUNC_ALIAS_WEAK(memcpy, __memcpy) SYM_FUNC_ALIAS(memcpy, __memcpy)
EXPORT_SYMBOL(memcpy) EXPORT_SYMBOL(memcpy)
/* /*
...@@ -184,4 +184,3 @@ SYM_FUNC_START_LOCAL(memcpy_orig) ...@@ -184,4 +184,3 @@ SYM_FUNC_START_LOCAL(memcpy_orig)
RET RET
SYM_FUNC_END(memcpy_orig) SYM_FUNC_END(memcpy_orig)
.popsection
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment