Commit ae04f69d authored by Qais Yousef's avatar Qais Yousef Committed by Peter Zijlstra

sched/rt: Rename realtime_{prio, task}() to rt_or_dl_{prio, task}()

Some find the name realtime overloaded. Use rt_or_dl() as an
alternative, hopefully better, name.
Suggested-by: default avatarDaniel Bristot de Oliveira <bristot@redhat.com>
Signed-off-by: default avatarQais Yousef <qyousef@layalina.io>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20240610192018.1567075-4-qyousef@layalina.io
parent b166af3d
...@@ -335,7 +335,7 @@ static inline bool six_owner_running(struct six_lock *lock) ...@@ -335,7 +335,7 @@ static inline bool six_owner_running(struct six_lock *lock)
*/ */
rcu_read_lock(); rcu_read_lock();
struct task_struct *owner = READ_ONCE(lock->owner); struct task_struct *owner = READ_ONCE(lock->owner);
bool ret = owner ? owner_on_cpu(owner) : !realtime_task(current); bool ret = owner ? owner_on_cpu(owner) : !rt_or_dl_task(current);
rcu_read_unlock(); rcu_read_unlock();
return ret; return ret;
......
...@@ -82,7 +82,7 @@ u64 select_estimate_accuracy(struct timespec64 *tv) ...@@ -82,7 +82,7 @@ u64 select_estimate_accuracy(struct timespec64 *tv)
* Realtime tasks get a slack of 0 for obvious reasons. * Realtime tasks get a slack of 0 for obvious reasons.
*/ */
if (realtime_task(current)) if (rt_or_dl_task(current))
return 0; return 0;
ktime_get_ts64(&now); ktime_get_ts64(&now);
......
...@@ -40,7 +40,7 @@ static inline int task_nice_ioclass(struct task_struct *task) ...@@ -40,7 +40,7 @@ static inline int task_nice_ioclass(struct task_struct *task)
{ {
if (task->policy == SCHED_IDLE) if (task->policy == SCHED_IDLE)
return IOPRIO_CLASS_IDLE; return IOPRIO_CLASS_IDLE;
else if (realtime_task_policy(task)) else if (rt_or_dl_task_policy(task))
return IOPRIO_CLASS_RT; return IOPRIO_CLASS_RT;
else else
return IOPRIO_CLASS_BE; return IOPRIO_CLASS_BE;
......
...@@ -11,7 +11,7 @@ static inline bool rt_prio(int prio) ...@@ -11,7 +11,7 @@ static inline bool rt_prio(int prio)
return unlikely(prio < MAX_RT_PRIO && prio >= MAX_DL_PRIO); return unlikely(prio < MAX_RT_PRIO && prio >= MAX_DL_PRIO);
} }
static inline bool realtime_prio(int prio) static inline bool rt_or_dl_prio(int prio)
{ {
return unlikely(prio < MAX_RT_PRIO); return unlikely(prio < MAX_RT_PRIO);
} }
...@@ -27,19 +27,19 @@ static inline bool rt_task(struct task_struct *p) ...@@ -27,19 +27,19 @@ static inline bool rt_task(struct task_struct *p)
/* /*
* Returns true if a task has a priority that belongs to RT or DL classes. * Returns true if a task has a priority that belongs to RT or DL classes.
* PI-boosted tasks will return true. Use realtime_task_policy() to ignore * PI-boosted tasks will return true. Use rt_or_dl_task_policy() to ignore
* PI-boosted tasks. * PI-boosted tasks.
*/ */
static inline bool realtime_task(struct task_struct *p) static inline bool rt_or_dl_task(struct task_struct *p)
{ {
return realtime_prio(p->prio); return rt_or_dl_prio(p->prio);
} }
/* /*
* Returns true if a task has a policy that belongs to RT or DL classes. * Returns true if a task has a policy that belongs to RT or DL classes.
* PI-boosted tasks will return false. * PI-boosted tasks will return false.
*/ */
static inline bool realtime_task_policy(struct task_struct *tsk) static inline bool rt_or_dl_task_policy(struct task_struct *tsk)
{ {
int policy = tsk->policy; int policy = tsk->policy;
......
...@@ -347,7 +347,7 @@ static __always_inline int __waiter_prio(struct task_struct *task) ...@@ -347,7 +347,7 @@ static __always_inline int __waiter_prio(struct task_struct *task)
{ {
int prio = task->prio; int prio = task->prio;
if (!realtime_prio(prio)) if (!rt_or_dl_prio(prio))
return DEFAULT_PRIO; return DEFAULT_PRIO;
return prio; return prio;
...@@ -435,7 +435,7 @@ static inline bool rt_mutex_steal(struct rt_mutex_waiter *waiter, ...@@ -435,7 +435,7 @@ static inline bool rt_mutex_steal(struct rt_mutex_waiter *waiter,
* Note that RT tasks are excluded from same priority (lateral) * Note that RT tasks are excluded from same priority (lateral)
* steals to prevent the introduction of an unbounded latency. * steals to prevent the introduction of an unbounded latency.
*/ */
if (realtime_prio(waiter->tree.prio)) if (rt_or_dl_prio(waiter->tree.prio))
return false; return false;
return rt_waiter_node_equal(&waiter->tree, &top_waiter->tree); return rt_waiter_node_equal(&waiter->tree, &top_waiter->tree);
......
...@@ -631,7 +631,7 @@ static inline bool rwsem_try_write_lock(struct rw_semaphore *sem, ...@@ -631,7 +631,7 @@ static inline bool rwsem_try_write_lock(struct rw_semaphore *sem,
* if it is an RT task or wait in the wait queue * if it is an RT task or wait in the wait queue
* for too long. * for too long.
*/ */
if (has_handoff || (!realtime_task(waiter->task) && if (has_handoff || (!rt_or_dl_task(waiter->task) &&
!time_after(jiffies, waiter->timeout))) !time_after(jiffies, waiter->timeout)))
return false; return false;
...@@ -914,7 +914,7 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem) ...@@ -914,7 +914,7 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
if (owner_state != OWNER_WRITER) { if (owner_state != OWNER_WRITER) {
if (need_resched()) if (need_resched())
break; break;
if (realtime_task(current) && if (rt_or_dl_task(current) &&
(prev_owner_state != OWNER_WRITER)) (prev_owner_state != OWNER_WRITER))
break; break;
} }
......
...@@ -237,7 +237,7 @@ __ww_ctx_less(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b) ...@@ -237,7 +237,7 @@ __ww_ctx_less(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b)
int a_prio = a->task->prio; int a_prio = a->task->prio;
int b_prio = b->task->prio; int b_prio = b->task->prio;
if (realtime_prio(a_prio) || realtime_prio(b_prio)) { if (rt_or_dl_prio(a_prio) || rt_or_dl_prio(b_prio)) {
if (a_prio > b_prio) if (a_prio > b_prio)
return true; return true;
......
...@@ -166,7 +166,7 @@ static inline int __task_prio(const struct task_struct *p) ...@@ -166,7 +166,7 @@ static inline int __task_prio(const struct task_struct *p)
if (p->dl_server) if (p->dl_server)
return -1; /* deadline */ return -1; /* deadline */
if (realtime_prio(p->prio)) /* includes deadline */ if (rt_or_dl_prio(p->prio))
return p->prio; /* [-1, 99] */ return p->prio; /* [-1, 99] */
if (p->sched_class == &idle_sched_class) if (p->sched_class == &idle_sched_class)
...@@ -8590,7 +8590,7 @@ void normalize_rt_tasks(void) ...@@ -8590,7 +8590,7 @@ void normalize_rt_tasks(void)
schedstat_set(p->stats.sleep_start, 0); schedstat_set(p->stats.sleep_start, 0);
schedstat_set(p->stats.block_start, 0); schedstat_set(p->stats.block_start, 0);
if (!realtime_task(p)) { if (!rt_or_dl_task(p)) {
/* /*
* Renice negative nice level userspace * Renice negative nice level userspace
* tasks back to 0: * tasks back to 0:
......
...@@ -57,7 +57,7 @@ static int effective_prio(struct task_struct *p) ...@@ -57,7 +57,7 @@ static int effective_prio(struct task_struct *p)
* keep the priority unchanged. Otherwise, update priority * keep the priority unchanged. Otherwise, update priority
* to the normal priority: * to the normal priority:
*/ */
if (!realtime_prio(p->prio)) if (!rt_or_dl_prio(p->prio))
return p->normal_prio; return p->normal_prio;
return p->prio; return p->prio;
} }
......
...@@ -1975,7 +1975,7 @@ static void __hrtimer_init_sleeper(struct hrtimer_sleeper *sl, ...@@ -1975,7 +1975,7 @@ static void __hrtimer_init_sleeper(struct hrtimer_sleeper *sl,
* expiry. * expiry.
*/ */
if (IS_ENABLED(CONFIG_PREEMPT_RT)) { if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
if (realtime_task_policy(current) && !(mode & HRTIMER_MODE_SOFT)) if (rt_or_dl_task_policy(current) && !(mode & HRTIMER_MODE_SOFT))
mode |= HRTIMER_MODE_HARD; mode |= HRTIMER_MODE_HARD;
} }
...@@ -2075,7 +2075,7 @@ long hrtimer_nanosleep(ktime_t rqtp, const enum hrtimer_mode mode, ...@@ -2075,7 +2075,7 @@ long hrtimer_nanosleep(ktime_t rqtp, const enum hrtimer_mode mode,
u64 slack; u64 slack;
slack = current->timer_slack_ns; slack = current->timer_slack_ns;
if (realtime_task(current)) if (rt_or_dl_task(current))
slack = 0; slack = 0;
hrtimer_init_sleeper_on_stack(&t, clockid, mode); hrtimer_init_sleeper_on_stack(&t, clockid, mode);
...@@ -2280,7 +2280,7 @@ schedule_hrtimeout_range_clock(ktime_t *expires, u64 delta, ...@@ -2280,7 +2280,7 @@ schedule_hrtimeout_range_clock(ktime_t *expires, u64 delta,
* Override any slack passed by the user if under * Override any slack passed by the user if under
* rt contraints. * rt contraints.
*/ */
if (realtime_task(current)) if (rt_or_dl_task(current))
delta = 0; delta = 0;
hrtimer_init_sleeper_on_stack(&t, clock_id, mode); hrtimer_init_sleeper_on_stack(&t, clock_id, mode);
......
...@@ -547,7 +547,7 @@ probe_wakeup(void *ignore, struct task_struct *p) ...@@ -547,7 +547,7 @@ probe_wakeup(void *ignore, struct task_struct *p)
* - wakeup_dl handles tasks belonging to sched_dl class only. * - wakeup_dl handles tasks belonging to sched_dl class only.
*/ */
if (tracing_dl || (wakeup_dl && !dl_task(p)) || if (tracing_dl || (wakeup_dl && !dl_task(p)) ||
(wakeup_rt && !realtime_task(p)) || (wakeup_rt && !rt_or_dl_task(p)) ||
(!dl_task(p) && (p->prio >= wakeup_prio || p->prio >= current->prio))) (!dl_task(p) && (p->prio >= wakeup_prio || p->prio >= current->prio)))
return; return;
......
...@@ -418,7 +418,7 @@ static void domain_dirty_limits(struct dirty_throttle_control *dtc) ...@@ -418,7 +418,7 @@ static void domain_dirty_limits(struct dirty_throttle_control *dtc)
bg_thresh = (bg_ratio * available_memory) / PAGE_SIZE; bg_thresh = (bg_ratio * available_memory) / PAGE_SIZE;
tsk = current; tsk = current;
if (realtime_task(tsk)) { if (rt_or_dl_task(tsk)) {
bg_thresh += bg_thresh / 4 + global_wb_domain.dirty_limit / 32; bg_thresh += bg_thresh / 4 + global_wb_domain.dirty_limit / 32;
thresh += thresh / 4 + global_wb_domain.dirty_limit / 32; thresh += thresh / 4 + global_wb_domain.dirty_limit / 32;
} }
...@@ -477,7 +477,7 @@ static unsigned long node_dirty_limit(struct pglist_data *pgdat) ...@@ -477,7 +477,7 @@ static unsigned long node_dirty_limit(struct pglist_data *pgdat)
else else
dirty = vm_dirty_ratio * node_memory / 100; dirty = vm_dirty_ratio * node_memory / 100;
if (realtime_task(tsk)) if (rt_or_dl_task(tsk))
dirty += dirty / 4; dirty += dirty / 4;
/* /*
......
...@@ -4002,7 +4002,7 @@ gfp_to_alloc_flags(gfp_t gfp_mask, unsigned int order) ...@@ -4002,7 +4002,7 @@ gfp_to_alloc_flags(gfp_t gfp_mask, unsigned int order)
*/ */
if (alloc_flags & ALLOC_MIN_RESERVE) if (alloc_flags & ALLOC_MIN_RESERVE)
alloc_flags &= ~ALLOC_CPUSET; alloc_flags &= ~ALLOC_CPUSET;
} else if (unlikely(realtime_task(current)) && in_task()) } else if (unlikely(rt_or_dl_task(current)) && in_task())
alloc_flags |= ALLOC_MIN_RESERVE; alloc_flags |= ALLOC_MIN_RESERVE;
alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, alloc_flags); alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, alloc_flags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment