Commit e7243e1b authored by Rusty Russell's avatar Rusty Russell Committed by Linus Torvalds

[PATCH] Hotplug CPUs: Set prio of migration thread before CPU

We need the migration thread to be RT as soon as the CPU comes online:
for example, stop_machine() (another RT task) expects to yield to it.
Extract the core of setscheduler() and do that when the migration
thread is created.  rq lock is a precaution against the (theoretical)
possibility of someone else doing setscheduer on this thread at the
same time.
parent 1329b6ab
...@@ -2102,6 +2102,18 @@ static inline task_t *find_process_by_pid(pid_t pid) ...@@ -2102,6 +2102,18 @@ static inline task_t *find_process_by_pid(pid_t pid)
return pid ? find_task_by_pid(pid) : current; return pid ? find_task_by_pid(pid) : current;
} }
/* Actually do priority change: must hold rq lock. */
static void __setscheduler(struct task_struct *p, int policy, int prio)
{
BUG_ON(p->array);
p->policy = policy;
p->rt_priority = prio;
if (policy != SCHED_NORMAL)
p->prio = MAX_USER_RT_PRIO-1 - p->rt_priority;
else
p->prio = p->static_prio;
}
/* /*
* setscheduler - change the scheduling policy and/or RT priority of a thread. * setscheduler - change the scheduling policy and/or RT priority of a thread.
*/ */
...@@ -2174,13 +2186,8 @@ static int setscheduler(pid_t pid, int policy, struct sched_param __user *param) ...@@ -2174,13 +2186,8 @@ static int setscheduler(pid_t pid, int policy, struct sched_param __user *param)
if (array) if (array)
deactivate_task(p, task_rq(p)); deactivate_task(p, task_rq(p));
retval = 0; retval = 0;
p->policy = policy;
p->rt_priority = lp.sched_priority;
oldprio = p->prio; oldprio = p->prio;
if (policy != SCHED_NORMAL) __setscheduler(p, policy, lp.sched_priority);
p->prio = MAX_USER_RT_PRIO-1 - p->rt_priority;
else
p->prio = p->static_prio;
if (array) { if (array) {
__activate_task(p, task_rq(p)); __activate_task(p, task_rq(p));
/* /*
...@@ -2760,15 +2767,10 @@ static void move_task_away(struct task_struct *p, int dest_cpu) ...@@ -2760,15 +2767,10 @@ static void move_task_away(struct task_struct *p, int dest_cpu)
*/ */
static int migration_thread(void * data) static int migration_thread(void * data)
{ {
/* Marking "param" __user is ok, since we do a set_fs(KERNEL_DS); */
struct sched_param __user param = { .sched_priority = MAX_RT_PRIO-1 };
runqueue_t *rq; runqueue_t *rq;
int cpu = (long)data; int cpu = (long)data;
int ret;
BUG_ON(smp_processor_id() != cpu); BUG_ON(smp_processor_id() != cpu);
ret = setscheduler(0, SCHED_FIFO, &param);
rq = this_rq(); rq = this_rq();
BUG_ON(rq->migration_thread != current); BUG_ON(rq->migration_thread != current);
...@@ -2865,6 +2867,8 @@ static int migration_call(struct notifier_block *nfb, unsigned long action, ...@@ -2865,6 +2867,8 @@ static int migration_call(struct notifier_block *nfb, unsigned long action,
{ {
int cpu = (long)hcpu; int cpu = (long)hcpu;
struct task_struct *p; struct task_struct *p;
struct runqueue *rq;
unsigned long flags;
switch (action) { switch (action) {
case CPU_UP_PREPARE: case CPU_UP_PREPARE:
...@@ -2872,6 +2876,10 @@ static int migration_call(struct notifier_block *nfb, unsigned long action, ...@@ -2872,6 +2876,10 @@ static int migration_call(struct notifier_block *nfb, unsigned long action,
if (IS_ERR(p)) if (IS_ERR(p))
return NOTIFY_BAD; return NOTIFY_BAD;
kthread_bind(p, cpu); kthread_bind(p, cpu);
/* Must be high prio: stop_machine expects to yield to it. */
rq = task_rq_lock(p, &flags);
__setscheduler(p, SCHED_FIFO, MAX_RT_PRIO-1);
task_rq_unlock(rq, &flags);
cpu_rq(cpu)->migration_thread = p; cpu_rq(cpu)->migration_thread = p;
break; break;
case CPU_ONLINE: case CPU_ONLINE:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment