Commit 3e2cbc01 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'x86_splitlock_for_v5.19_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 splitlock updates from Borislav Petkov:

 - Add Raptor Lake to the set of CPU models which support splitlock

 - Make life miserable for apps using split locks by slowing them down
   considerably while the rest of the system remains responsive. The
   hope is it will hurt more and people will really fix their misaligned
   locks apps. As a result, free a TIF bit.

* tag 'x86_splitlock_for_v5.19_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/split_lock: Enable the split lock feature on Raptor Lake
  x86/split-lock: Remove unused TIF_SLD bit
  x86/split_lock: Make life miserable for split lockers
parents 91665420 0180a1e8
...@@ -45,14 +45,12 @@ unsigned int x86_model(unsigned int sig); ...@@ -45,14 +45,12 @@ unsigned int x86_model(unsigned int sig);
unsigned int x86_stepping(unsigned int sig); unsigned int x86_stepping(unsigned int sig);
#ifdef CONFIG_CPU_SUP_INTEL #ifdef CONFIG_CPU_SUP_INTEL
extern void __init sld_setup(struct cpuinfo_x86 *c); extern void __init sld_setup(struct cpuinfo_x86 *c);
extern void switch_to_sld(unsigned long tifn);
extern bool handle_user_split_lock(struct pt_regs *regs, long error_code); extern bool handle_user_split_lock(struct pt_regs *regs, long error_code);
extern bool handle_guest_split_lock(unsigned long ip); extern bool handle_guest_split_lock(unsigned long ip);
extern void handle_bus_lock(struct pt_regs *regs); extern void handle_bus_lock(struct pt_regs *regs);
u8 get_this_hybrid_cpu_type(void); u8 get_this_hybrid_cpu_type(void);
#else #else
static inline void __init sld_setup(struct cpuinfo_x86 *c) {} static inline void __init sld_setup(struct cpuinfo_x86 *c) {}
static inline void switch_to_sld(unsigned long tifn) {}
static inline bool handle_user_split_lock(struct pt_regs *regs, long error_code) static inline bool handle_user_split_lock(struct pt_regs *regs, long error_code)
{ {
return false; return false;
......
...@@ -92,7 +92,6 @@ struct thread_info { ...@@ -92,7 +92,6 @@ struct thread_info {
#define TIF_NOCPUID 15 /* CPUID is not accessible in userland */ #define TIF_NOCPUID 15 /* CPUID is not accessible in userland */
#define TIF_NOTSC 16 /* TSC is not accessible in userland */ #define TIF_NOTSC 16 /* TSC is not accessible in userland */
#define TIF_NOTIFY_SIGNAL 17 /* signal notifications exist */ #define TIF_NOTIFY_SIGNAL 17 /* signal notifications exist */
#define TIF_SLD 18 /* Restore split lock detection on context switch */
#define TIF_MEMDIE 20 /* is terminating due to OOM killer */ #define TIF_MEMDIE 20 /* is terminating due to OOM killer */
#define TIF_POLLING_NRFLAG 21 /* idle is polling for TIF_NEED_RESCHED */ #define TIF_POLLING_NRFLAG 21 /* idle is polling for TIF_NEED_RESCHED */
#define TIF_IO_BITMAP 22 /* uses I/O bitmap */ #define TIF_IO_BITMAP 22 /* uses I/O bitmap */
...@@ -116,7 +115,6 @@ struct thread_info { ...@@ -116,7 +115,6 @@ struct thread_info {
#define _TIF_NOCPUID (1 << TIF_NOCPUID) #define _TIF_NOCPUID (1 << TIF_NOCPUID)
#define _TIF_NOTSC (1 << TIF_NOTSC) #define _TIF_NOTSC (1 << TIF_NOTSC)
#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL) #define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL)
#define _TIF_SLD (1 << TIF_SLD)
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
#define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP) #define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP)
#define _TIF_SPEC_FORCE_UPDATE (1 << TIF_SPEC_FORCE_UPDATE) #define _TIF_SPEC_FORCE_UPDATE (1 << TIF_SPEC_FORCE_UPDATE)
...@@ -128,7 +126,7 @@ struct thread_info { ...@@ -128,7 +126,7 @@ struct thread_info {
/* flags to check in __switch_to() */ /* flags to check in __switch_to() */
#define _TIF_WORK_CTXSW_BASE \ #define _TIF_WORK_CTXSW_BASE \
(_TIF_NOCPUID | _TIF_NOTSC | _TIF_BLOCKSTEP | \ (_TIF_NOCPUID | _TIF_NOTSC | _TIF_BLOCKSTEP | \
_TIF_SSBD | _TIF_SPEC_FORCE_UPDATE | _TIF_SLD) _TIF_SSBD | _TIF_SPEC_FORCE_UPDATE)
/* /*
* Avoid calls to __switch_to_xtra() on UP as STIBP is not evaluated. * Avoid calls to __switch_to_xtra() on UP as STIBP is not evaluated.
......
...@@ -7,10 +7,13 @@ ...@@ -7,10 +7,13 @@
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/sched/clock.h> #include <linux/sched/clock.h>
#include <linux/semaphore.h>
#include <linux/thread_info.h> #include <linux/thread_info.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/workqueue.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/cpuhotplug.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/msr.h> #include <asm/msr.h>
...@@ -999,6 +1002,8 @@ static const struct { ...@@ -999,6 +1002,8 @@ static const struct {
static struct ratelimit_state bld_ratelimit; static struct ratelimit_state bld_ratelimit;
static DEFINE_SEMAPHORE(buslock_sem);
static inline bool match_option(const char *arg, int arglen, const char *opt) static inline bool match_option(const char *arg, int arglen, const char *opt)
{ {
int len = strlen(opt), ratelimit; int len = strlen(opt), ratelimit;
...@@ -1109,18 +1114,52 @@ static void split_lock_init(void) ...@@ -1109,18 +1114,52 @@ static void split_lock_init(void)
split_lock_verify_msr(sld_state != sld_off); split_lock_verify_msr(sld_state != sld_off);
} }
static void __split_lock_reenable(struct work_struct *work)
{
sld_update_msr(true);
up(&buslock_sem);
}
/*
* If a CPU goes offline with pending delayed work to re-enable split lock
* detection then the delayed work will be executed on some other CPU. That
* handles releasing the buslock_sem, but because it executes on a
* different CPU probably won't re-enable split lock detection. This is a
* problem on HT systems since the sibling CPU on the same core may then be
* left running with split lock detection disabled.
*
* Unconditionally re-enable detection here.
*/
static int splitlock_cpu_offline(unsigned int cpu)
{
sld_update_msr(true);
return 0;
}
static DECLARE_DELAYED_WORK(split_lock_reenable, __split_lock_reenable);
static void split_lock_warn(unsigned long ip) static void split_lock_warn(unsigned long ip)
{ {
pr_warn_ratelimited("#AC: %s/%d took a split_lock trap at address: 0x%lx\n", int cpu;
current->comm, current->pid, ip);
/* if (!current->reported_split_lock)
* Disable the split lock detection for this task so it can make pr_warn_ratelimited("#AC: %s/%d took a split_lock trap at address: 0x%lx\n",
* progress and set TIF_SLD so the detection is re-enabled via current->comm, current->pid, ip);
* switch_to_sld() when the task is scheduled out. current->reported_split_lock = 1;
*/
/* misery factor #1, sleep 10ms before trying to execute split lock */
if (msleep_interruptible(10) > 0)
return;
/* Misery factor #2, only allow one buslocked disabled core at a time */
if (down_interruptible(&buslock_sem) == -EINTR)
return;
cpu = get_cpu();
schedule_delayed_work_on(cpu, &split_lock_reenable, 2);
/* Disable split lock detection on this CPU to make progress */
sld_update_msr(false); sld_update_msr(false);
set_tsk_thread_flag(current, TIF_SLD); put_cpu();
} }
bool handle_guest_split_lock(unsigned long ip) bool handle_guest_split_lock(unsigned long ip)
...@@ -1193,18 +1232,6 @@ void handle_bus_lock(struct pt_regs *regs) ...@@ -1193,18 +1232,6 @@ void handle_bus_lock(struct pt_regs *regs)
} }
} }
/*
* This function is called only when switching between tasks with
* different split-lock detection modes. It sets the MSR for the
* mode of the new task. This is right most of the time, but since
* the MSR is shared by hyperthreads on a physical core there can
* be glitches when the two threads need different modes.
*/
void switch_to_sld(unsigned long tifn)
{
sld_update_msr(!(tifn & _TIF_SLD));
}
/* /*
* Bits in the IA32_CORE_CAPABILITIES are not architectural, so they should * Bits in the IA32_CORE_CAPABILITIES are not architectural, so they should
* only be trusted if it is confirmed that a CPU model implements a * only be trusted if it is confirmed that a CPU model implements a
...@@ -1230,6 +1257,7 @@ static const struct x86_cpu_id split_lock_cpu_ids[] __initconst = { ...@@ -1230,6 +1257,7 @@ static const struct x86_cpu_id split_lock_cpu_ids[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, 1), X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, 1),
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, 1), X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, 1),
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, 1), X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, 1),
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, 1),
{} {}
}; };
...@@ -1274,10 +1302,14 @@ static void sld_state_show(void) ...@@ -1274,10 +1302,14 @@ static void sld_state_show(void)
pr_info("disabled\n"); pr_info("disabled\n");
break; break;
case sld_warn: case sld_warn:
if (boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT)) if (boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT)) {
pr_info("#AC: crashing the kernel on kernel split_locks and warning on user-space split_locks\n"); pr_info("#AC: crashing the kernel on kernel split_locks and warning on user-space split_locks\n");
else if (boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT)) if (cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
"x86/splitlock", NULL, splitlock_cpu_offline) < 0)
pr_warn("No splitlock CPU offline handler\n");
} else if (boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT)) {
pr_info("#DB: warning on user-space bus_locks\n"); pr_info("#DB: warning on user-space bus_locks\n");
}
break; break;
case sld_fatal: case sld_fatal:
if (boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT)) { if (boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT)) {
......
...@@ -684,9 +684,6 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p) ...@@ -684,9 +684,6 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p)
/* Enforce MSR update to ensure consistent state */ /* Enforce MSR update to ensure consistent state */
__speculation_ctrl_update(~tifn, tifn); __speculation_ctrl_update(~tifn, tifn);
} }
if ((tifp ^ tifn) & _TIF_SLD)
switch_to_sld(tifn);
} }
/* /*
......
...@@ -941,6 +941,9 @@ struct task_struct { ...@@ -941,6 +941,9 @@ struct task_struct {
#ifdef CONFIG_IOMMU_SVA #ifdef CONFIG_IOMMU_SVA
unsigned pasid_activated:1; unsigned pasid_activated:1;
#endif #endif
#ifdef CONFIG_CPU_SUP_INTEL
unsigned reported_split_lock:1;
#endif
unsigned long atomic_flags; /* Flags requiring atomic access. */ unsigned long atomic_flags; /* Flags requiring atomic access. */
......
...@@ -1046,6 +1046,11 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node) ...@@ -1046,6 +1046,11 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
#ifdef CONFIG_MEMCG #ifdef CONFIG_MEMCG
tsk->active_memcg = NULL; tsk->active_memcg = NULL;
#endif #endif
#ifdef CONFIG_CPU_SUP_INTEL
tsk->reported_split_lock = 0;
#endif
return tsk; return tsk;
free_stack: free_stack:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment