Commit 6e33cad0 authored by Peter Zijlstra's avatar Peter Zijlstra

sched: Trivial core scheduling cookie management

In order to not have to use pid_struct, create a new, smaller,
structure to manage task cookies for core scheduling.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Tested-by: default avatarDon Hiatt <dhiatt@digitalocean.com>
Tested-by: default avatarHongyu Ning <hongyu.ning@linux.intel.com>
Tested-by: default avatarVincent Guittot <vincent.guittot@linaro.org>
Link: https://lkml.kernel.org/r/20210422123308.919768100@infradead.org
parent 97886d9d
......@@ -2179,4 +2179,10 @@ int sched_trace_rq_nr_running(struct rq *rq);
const struct cpumask *sched_trace_rd_span(struct root_domain *rd);
#ifdef CONFIG_SCHED_CORE
extern void sched_core_free(struct task_struct *tsk);
#else
static inline void sched_core_free(struct task_struct *tsk) { }
#endif
#endif
......@@ -742,6 +742,7 @@ void __put_task_struct(struct task_struct *tsk)
exit_creds(tsk);
delayacct_tsk_free(tsk);
put_signal_struct(tsk->signal);
sched_core_free(tsk);
if (!profile_handoff_task(tsk))
free_task(tsk);
......
......@@ -36,3 +36,4 @@ obj-$(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) += cpufreq_schedutil.o
obj-$(CONFIG_MEMBARRIER) += membarrier.o
obj-$(CONFIG_CPU_ISOLATION) += isolation.o
obj-$(CONFIG_PSI) += psi.o
obj-$(CONFIG_SCHED_CORE) += core_sched.o
......@@ -167,7 +167,7 @@ static inline int rb_sched_core_cmp(const void *key, const struct rb_node *node)
return 0;
}
static void sched_core_enqueue(struct rq *rq, struct task_struct *p)
void sched_core_enqueue(struct rq *rq, struct task_struct *p)
{
rq->core->core_task_seq++;
......@@ -177,14 +177,15 @@ static void sched_core_enqueue(struct rq *rq, struct task_struct *p)
rb_add(&p->core_node, &rq->core_tree, rb_sched_core_less);
}
static void sched_core_dequeue(struct rq *rq, struct task_struct *p)
void sched_core_dequeue(struct rq *rq, struct task_struct *p)
{
rq->core->core_task_seq++;
if (!p->core_cookie)
if (!sched_core_enqueued(p))
return;
rb_erase(&p->core_node, &rq->core_tree);
RB_CLEAR_NODE(&p->core_node);
}
/*
......
// SPDX-License-Identifier: GPL-2.0-only
#include "sched.h"
/*
* A simple wrapper around refcount. An allocated sched_core_cookie's
* address is used to compute the cookie of the task.
*/
struct sched_core_cookie {
refcount_t refcnt;
};
unsigned long sched_core_alloc_cookie(void)
{
struct sched_core_cookie *ck = kmalloc(sizeof(*ck), GFP_KERNEL);
if (!ck)
return 0;
refcount_set(&ck->refcnt, 1);
sched_core_get();
return (unsigned long)ck;
}
void sched_core_put_cookie(unsigned long cookie)
{
struct sched_core_cookie *ptr = (void *)cookie;
if (ptr && refcount_dec_and_test(&ptr->refcnt)) {
kfree(ptr);
sched_core_put();
}
}
unsigned long sched_core_get_cookie(unsigned long cookie)
{
struct sched_core_cookie *ptr = (void *)cookie;
if (ptr)
refcount_inc(&ptr->refcnt);
return cookie;
}
/*
* sched_core_update_cookie - replace the cookie on a task
* @p: the task to update
* @cookie: the new cookie
*
* Effectively exchange the task cookie; caller is responsible for lifetimes on
* both ends.
*
* Returns: the old cookie
*/
unsigned long sched_core_update_cookie(struct task_struct *p, unsigned long cookie)
{
unsigned long old_cookie;
struct rq_flags rf;
struct rq *rq;
bool enqueued;
rq = task_rq_lock(p, &rf);
/*
* Since creating a cookie implies sched_core_get(), and we cannot set
* a cookie until after we've created it, similarly, we cannot destroy
* a cookie until after we've removed it, we must have core scheduling
* enabled here.
*/
SCHED_WARN_ON((p->core_cookie || cookie) && !sched_core_enabled(rq));
enqueued = sched_core_enqueued(p);
if (enqueued)
sched_core_dequeue(rq, p);
old_cookie = p->core_cookie;
p->core_cookie = cookie;
if (enqueued)
sched_core_enqueue(rq, p);
/*
* If task is currently running, it may not be compatible anymore after
* the cookie change, so enter the scheduler on its CPU to schedule it
* away.
*/
if (task_running(rq, p))
resched_curr(rq);
task_rq_unlock(rq, p, &rf);
return old_cookie;
}
static unsigned long sched_core_clone_cookie(struct task_struct *p)
{
unsigned long cookie, flags;
raw_spin_lock_irqsave(&p->pi_lock, flags);
cookie = sched_core_get_cookie(p->core_cookie);
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
return cookie;
}
void sched_core_free(struct task_struct *p)
{
sched_core_put_cookie(p->core_cookie);
}
......@@ -1229,6 +1229,22 @@ static inline bool sched_group_cookie_match(struct rq *rq,
extern void queue_core_balance(struct rq *rq);
static inline bool sched_core_enqueued(struct task_struct *p)
{
return !RB_EMPTY_NODE(&p->core_node);
}
extern void sched_core_enqueue(struct rq *rq, struct task_struct *p);
extern void sched_core_dequeue(struct rq *rq, struct task_struct *p);
extern void sched_core_get(void);
extern void sched_core_put(void);
extern unsigned long sched_core_alloc_cookie(void);
extern void sched_core_put_cookie(unsigned long cookie);
extern unsigned long sched_core_get_cookie(unsigned long cookie);
extern unsigned long sched_core_update_cookie(struct task_struct *p, unsigned long cookie);
#else /* !CONFIG_SCHED_CORE */
static inline bool sched_core_enabled(struct rq *rq)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment