Commit c8034099 authored by Ben Hutchings's avatar Ben Hutchings Committed by Greg Kroah-Hartman

sched: Add sched_smt_active()

Add the sched_smt_active() function needed for some x86 speculation
mitigations.  This was introduced upstream by commits 1b568f0a
"sched/core: Optimize SCHED_SMT", ba2591a5 "sched/smt: Update
sched_smt_present at runtime", c5511d03 "sched/smt: Make
sched_smt_present track topology", and 321a874a "sched/smt: Expose
sched_smt_present static key".  The upstream implementation uses the
static_key_{disable,enable}_cpuslocked() functions, which aren't
practical to backport.
Signed-off-by: default avatarBen Hutchings <ben@decadent.org.uk>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 4cc15490
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_SCHED_SMT_H
#define _LINUX_SCHED_SMT_H
#include <linux/atomic.h>
#ifdef CONFIG_SCHED_SMT
extern atomic_t sched_smt_present;
static __always_inline bool sched_smt_active(void)
{
return atomic_read(&sched_smt_present);
}
#else
static inline bool sched_smt_active(void) { return false; }
#endif
#endif
...@@ -7355,11 +7355,22 @@ static int cpuset_cpu_inactive(unsigned int cpu) ...@@ -7355,11 +7355,22 @@ static int cpuset_cpu_inactive(unsigned int cpu)
return 0; return 0;
} }
#ifdef CONFIG_SCHED_SMT
atomic_t sched_smt_present = ATOMIC_INIT(0);
#endif
int sched_cpu_activate(unsigned int cpu) int sched_cpu_activate(unsigned int cpu)
{ {
struct rq *rq = cpu_rq(cpu); struct rq *rq = cpu_rq(cpu);
unsigned long flags; unsigned long flags;
#ifdef CONFIG_SCHED_SMT
/*
* When going up, increment the number of cores with SMT present.
*/
if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
atomic_inc(&sched_smt_present);
#endif
set_cpu_active(cpu, true); set_cpu_active(cpu, true);
if (sched_smp_initialized) { if (sched_smp_initialized) {
...@@ -7408,6 +7419,14 @@ int sched_cpu_deactivate(unsigned int cpu) ...@@ -7408,6 +7419,14 @@ int sched_cpu_deactivate(unsigned int cpu)
else else
synchronize_rcu(); synchronize_rcu();
#ifdef CONFIG_SCHED_SMT
/*
* When going down, decrement the number of cores with SMT present.
*/
if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
atomic_dec(&sched_smt_present);
#endif
if (!sched_smp_initialized) if (!sched_smp_initialized)
return 0; return 0;
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/sched/sysctl.h> #include <linux/sched/sysctl.h>
#include <linux/sched/rt.h> #include <linux/sched/rt.h>
#include <linux/sched/smt.h>
#include <linux/u64_stats_sync.h> #include <linux/u64_stats_sync.h>
#include <linux/sched/deadline.h> #include <linux/sched/deadline.h>
#include <linux/kernel_stat.h> #include <linux/kernel_stat.h>
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment