Commit 932562a6 authored by Kent Overstreet's avatar Kent Overstreet

rseq: Split out rseq.h from sched.h

We're trying to get sched.h down to more or less just types only, not
code - rseq can live in its own header.

This helps us kill the dependency on preempt.h in sched.h.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent c968b99f
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
#include <linux/hw_breakpoint.h> #include <linux/hw_breakpoint.h>
#include <linux/regset.h> #include <linux/regset.h>
#include <linux/elf.h> #include <linux/elf.h>
#include <linux/rseq.h>
#include <asm/compat.h> #include <asm/compat.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
......
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
#include <linux/context_tracking.h> #include <linux/context_tracking.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/compat.h> #include <linux/compat.h>
#include <linux/rseq.h>
#include <linux/sched/debug.h> /* for show_regs */ #include <linux/sched/debug.h> /* for show_regs */
#include <asm/kup.h> #include <asm/kup.h>
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/sched/task_stack.h> #include <linux/sched/task_stack.h>
#include <linux/rseq.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/kernel.h> #include <linux/kernel.h>
......
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#include <linux/context_tracking.h> #include <linux/context_tracking.h>
#include <linux/entry-common.h> #include <linux/entry-common.h>
#include <linux/syscalls.h> #include <linux/syscalls.h>
#include <linux/rseq.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/ucontext.h> #include <asm/ucontext.h>
......
...@@ -66,6 +66,7 @@ ...@@ -66,6 +66,7 @@
#include <linux/coredump.h> #include <linux/coredump.h>
#include <linux/time_namespace.h> #include <linux/time_namespace.h>
#include <linux/user_events.h> #include <linux/user_events.h>
#include <linux/rseq.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/task_work.h> #include <linux/task_work.h>
#include <linux/memcontrol.h> #include <linux/memcontrol.h>
#include <linux/rseq.h>
#include <linux/blk-cgroup.h> #include <linux/blk-cgroup.h>
/** /**
......
/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
#ifndef _LINUX_RSEQ_H
#define _LINUX_RSEQ_H
#ifdef CONFIG_RSEQ
#include <linux/preempt.h>
#include <linux/sched.h>
/*
* Map the event mask on the user-space ABI enum rseq_cs_flags
* for direct mask checks.
*/
enum rseq_event_mask_bits {
RSEQ_EVENT_PREEMPT_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT,
RSEQ_EVENT_SIGNAL_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT,
RSEQ_EVENT_MIGRATE_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT,
};
enum rseq_event_mask {
RSEQ_EVENT_PREEMPT = (1U << RSEQ_EVENT_PREEMPT_BIT),
RSEQ_EVENT_SIGNAL = (1U << RSEQ_EVENT_SIGNAL_BIT),
RSEQ_EVENT_MIGRATE = (1U << RSEQ_EVENT_MIGRATE_BIT),
};
static inline void rseq_set_notify_resume(struct task_struct *t)
{
if (t->rseq)
set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
}
void __rseq_handle_notify_resume(struct ksignal *sig, struct pt_regs *regs);
static inline void rseq_handle_notify_resume(struct ksignal *ksig,
struct pt_regs *regs)
{
if (current->rseq)
__rseq_handle_notify_resume(ksig, regs);
}
static inline void rseq_signal_deliver(struct ksignal *ksig,
struct pt_regs *regs)
{
preempt_disable();
__set_bit(RSEQ_EVENT_SIGNAL_BIT, &current->rseq_event_mask);
preempt_enable();
rseq_handle_notify_resume(ksig, regs);
}
/* rseq_preempt() requires preemption to be disabled. */
static inline void rseq_preempt(struct task_struct *t)
{
__set_bit(RSEQ_EVENT_PREEMPT_BIT, &t->rseq_event_mask);
rseq_set_notify_resume(t);
}
/* rseq_migrate() requires preemption to be disabled. */
static inline void rseq_migrate(struct task_struct *t)
{
__set_bit(RSEQ_EVENT_MIGRATE_BIT, &t->rseq_event_mask);
rseq_set_notify_resume(t);
}
/*
* If parent process has a registered restartable sequences area, the
* child inherits. Unregister rseq for a clone with CLONE_VM set.
*/
static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
{
if (clone_flags & CLONE_VM) {
t->rseq = NULL;
t->rseq_len = 0;
t->rseq_sig = 0;
t->rseq_event_mask = 0;
} else {
t->rseq = current->rseq;
t->rseq_len = current->rseq_len;
t->rseq_sig = current->rseq_sig;
t->rseq_event_mask = current->rseq_event_mask;
}
}
static inline void rseq_execve(struct task_struct *t)
{
t->rseq = NULL;
t->rseq_len = 0;
t->rseq_sig = 0;
t->rseq_event_mask = 0;
}
#else
static inline void rseq_set_notify_resume(struct task_struct *t)
{
}
static inline void rseq_handle_notify_resume(struct ksignal *ksig,
struct pt_regs *regs)
{
}
static inline void rseq_signal_deliver(struct ksignal *ksig,
struct pt_regs *regs)
{
}
static inline void rseq_preempt(struct task_struct *t)
{
}
static inline void rseq_migrate(struct task_struct *t)
{
}
static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
{
}
static inline void rseq_execve(struct task_struct *t)
{
}
#endif
#ifdef CONFIG_DEBUG_RSEQ
void rseq_syscall(struct pt_regs *regs);
#else
static inline void rseq_syscall(struct pt_regs *regs)
{
}
#endif
#endif /* _LINUX_RSEQ_H */
...@@ -35,7 +35,7 @@ ...@@ -35,7 +35,7 @@
#include <linux/task_io_accounting.h> #include <linux/task_io_accounting.h>
#include <linux/posix-timers_types.h> #include <linux/posix-timers_types.h>
#include <linux/restart_block.h> #include <linux/restart_block.h>
#include <linux/rseq.h> #include <uapi/linux/rseq.h>
#include <linux/seqlock_types.h> #include <linux/seqlock_types.h>
#include <linux/kcsan.h> #include <linux/kcsan.h>
#include <linux/rv.h> #include <linux/rv.h>
...@@ -2181,129 +2181,6 @@ static inline bool owner_on_cpu(struct task_struct *owner) ...@@ -2181,129 +2181,6 @@ static inline bool owner_on_cpu(struct task_struct *owner)
unsigned long sched_cpu_util(int cpu); unsigned long sched_cpu_util(int cpu);
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
#ifdef CONFIG_RSEQ
/*
* Map the event mask on the user-space ABI enum rseq_cs_flags
* for direct mask checks.
*/
enum rseq_event_mask_bits {
RSEQ_EVENT_PREEMPT_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT,
RSEQ_EVENT_SIGNAL_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT,
RSEQ_EVENT_MIGRATE_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT,
};
enum rseq_event_mask {
RSEQ_EVENT_PREEMPT = (1U << RSEQ_EVENT_PREEMPT_BIT),
RSEQ_EVENT_SIGNAL = (1U << RSEQ_EVENT_SIGNAL_BIT),
RSEQ_EVENT_MIGRATE = (1U << RSEQ_EVENT_MIGRATE_BIT),
};
static inline void rseq_set_notify_resume(struct task_struct *t)
{
if (t->rseq)
set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
}
void __rseq_handle_notify_resume(struct ksignal *sig, struct pt_regs *regs);
static inline void rseq_handle_notify_resume(struct ksignal *ksig,
struct pt_regs *regs)
{
if (current->rseq)
__rseq_handle_notify_resume(ksig, regs);
}
static inline void rseq_signal_deliver(struct ksignal *ksig,
struct pt_regs *regs)
{
preempt_disable();
__set_bit(RSEQ_EVENT_SIGNAL_BIT, &current->rseq_event_mask);
preempt_enable();
rseq_handle_notify_resume(ksig, regs);
}
/* rseq_preempt() requires preemption to be disabled. */
static inline void rseq_preempt(struct task_struct *t)
{
__set_bit(RSEQ_EVENT_PREEMPT_BIT, &t->rseq_event_mask);
rseq_set_notify_resume(t);
}
/* rseq_migrate() requires preemption to be disabled. */
static inline void rseq_migrate(struct task_struct *t)
{
__set_bit(RSEQ_EVENT_MIGRATE_BIT, &t->rseq_event_mask);
rseq_set_notify_resume(t);
}
/*
* If parent process has a registered restartable sequences area, the
* child inherits. Unregister rseq for a clone with CLONE_VM set.
*/
static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
{
if (clone_flags & CLONE_VM) {
t->rseq = NULL;
t->rseq_len = 0;
t->rseq_sig = 0;
t->rseq_event_mask = 0;
} else {
t->rseq = current->rseq;
t->rseq_len = current->rseq_len;
t->rseq_sig = current->rseq_sig;
t->rseq_event_mask = current->rseq_event_mask;
}
}
static inline void rseq_execve(struct task_struct *t)
{
t->rseq = NULL;
t->rseq_len = 0;
t->rseq_sig = 0;
t->rseq_event_mask = 0;
}
#else
static inline void rseq_set_notify_resume(struct task_struct *t)
{
}
static inline void rseq_handle_notify_resume(struct ksignal *ksig,
struct pt_regs *regs)
{
}
static inline void rseq_signal_deliver(struct ksignal *ksig,
struct pt_regs *regs)
{
}
static inline void rseq_preempt(struct task_struct *t)
{
}
static inline void rseq_migrate(struct task_struct *t)
{
}
static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
{
}
static inline void rseq_execve(struct task_struct *t)
{
}
#endif
#ifdef CONFIG_DEBUG_RSEQ
void rseq_syscall(struct pt_regs *regs);
#else
static inline void rseq_syscall(struct pt_regs *regs)
{
}
#endif
#ifdef CONFIG_SCHED_CORE #ifdef CONFIG_SCHED_CORE
extern void sched_core_free(struct task_struct *tsk); extern void sched_core_free(struct task_struct *tsk);
extern void sched_core_fork(struct task_struct *p); extern void sched_core_fork(struct task_struct *p);
......
...@@ -100,6 +100,7 @@ ...@@ -100,6 +100,7 @@
#include <linux/stackprotector.h> #include <linux/stackprotector.h>
#include <linux/user_events.h> #include <linux/user_events.h>
#include <linux/iommu.h> #include <linux/iommu.h>
#include <linux/rseq.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
......
...@@ -57,6 +57,7 @@ ...@@ -57,6 +57,7 @@
#include <linux/profile.h> #include <linux/profile.h>
#include <linux/psi.h> #include <linux/psi.h>
#include <linux/rcuwait_api.h> #include <linux/rcuwait_api.h>
#include <linux/rseq.h>
#include <linux/sched/wake_q.h> #include <linux/sched/wake_q.h>
#include <linux/scs.h> #include <linux/scs.h>
#include <linux/slab.h> #include <linux/slab.h>
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment