Commit 85ba2d86 authored by Roland McGrath's avatar Roland McGrath Committed by Linus Torvalds

tracehook: wait_task_inactive

This extends wait_task_inactive() with a new argument so it can be used in
a "soft" mode where it will check for the task changing state unexpectedly
and back off.  There is no change to existing callers.  This lays the
groundwork to allow robust, noninvasive tracing that can try to sample a
blocked thread but back off safely if it wakes up.
Signed-off-by: default avatarRoland McGrath <roland@redhat.com>
Cc: Oleg Nesterov <oleg@tv-sign.ru>
Reviewed-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 1f5a4ad9
...@@ -2626,7 +2626,7 @@ pfm_task_incompatible(pfm_context_t *ctx, struct task_struct *task) ...@@ -2626,7 +2626,7 @@ pfm_task_incompatible(pfm_context_t *ctx, struct task_struct *task)
/* /*
* make sure the task is off any CPU * make sure the task is off any CPU
*/ */
wait_task_inactive(task); wait_task_inactive(task, 0);
/* more to come... */ /* more to come... */
...@@ -4774,7 +4774,7 @@ pfm_check_task_state(pfm_context_t *ctx, int cmd, unsigned long flags) ...@@ -4774,7 +4774,7 @@ pfm_check_task_state(pfm_context_t *ctx, int cmd, unsigned long flags)
UNPROTECT_CTX(ctx, flags); UNPROTECT_CTX(ctx, flags);
wait_task_inactive(task); wait_task_inactive(task, 0);
PROTECT_CTX(ctx, flags); PROTECT_CTX(ctx, flags);
......
...@@ -1882,9 +1882,13 @@ extern void set_task_comm(struct task_struct *tsk, char *from); ...@@ -1882,9 +1882,13 @@ extern void set_task_comm(struct task_struct *tsk, char *from);
extern char *get_task_comm(char *to, struct task_struct *tsk); extern char *get_task_comm(char *to, struct task_struct *tsk);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
extern void wait_task_inactive(struct task_struct * p); extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
#else #else
#define wait_task_inactive(p) do { } while (0) static inline unsigned long wait_task_inactive(struct task_struct *p,
long match_state)
{
return 1;
}
#endif #endif
#define next_task(p) list_entry(rcu_dereference((p)->tasks.next), struct task_struct, tasks) #define next_task(p) list_entry(rcu_dereference((p)->tasks.next), struct task_struct, tasks)
......
...@@ -176,7 +176,7 @@ void kthread_bind(struct task_struct *k, unsigned int cpu) ...@@ -176,7 +176,7 @@ void kthread_bind(struct task_struct *k, unsigned int cpu)
return; return;
} }
/* Must have done schedule() in kthread() before we set_task_cpu */ /* Must have done schedule() in kthread() before we set_task_cpu */
wait_task_inactive(k); wait_task_inactive(k, 0);
set_task_cpu(k, cpu); set_task_cpu(k, cpu);
k->cpus_allowed = cpumask_of_cpu(cpu); k->cpus_allowed = cpumask_of_cpu(cpu);
k->rt.nr_cpus_allowed = 1; k->rt.nr_cpus_allowed = 1;
......
...@@ -107,7 +107,7 @@ int ptrace_check_attach(struct task_struct *child, int kill) ...@@ -107,7 +107,7 @@ int ptrace_check_attach(struct task_struct *child, int kill)
read_unlock(&tasklist_lock); read_unlock(&tasklist_lock);
if (!ret && !kill) if (!ret && !kill)
wait_task_inactive(child); ret = wait_task_inactive(child, TASK_TRACED) ? 0 : -ESRCH;
/* All systems go.. */ /* All systems go.. */
return ret; return ret;
......
...@@ -1867,16 +1867,24 @@ migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req) ...@@ -1867,16 +1867,24 @@ migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req)
/* /*
* wait_task_inactive - wait for a thread to unschedule. * wait_task_inactive - wait for a thread to unschedule.
* *
* If @match_state is nonzero, it's the @p->state value just checked and
* not expected to change. If it changes, i.e. @p might have woken up,
* then return zero. When we succeed in waiting for @p to be off its CPU,
* we return a positive number (its total switch count). If a second call
* a short while later returns the same number, the caller can be sure that
* @p has remained unscheduled the whole time.
*
* The caller must ensure that the task *will* unschedule sometime soon, * The caller must ensure that the task *will* unschedule sometime soon,
* else this function might spin for a *long* time. This function can't * else this function might spin for a *long* time. This function can't
* be called with interrupts off, or it may introduce deadlock with * be called with interrupts off, or it may introduce deadlock with
* smp_call_function() if an IPI is sent by the same process we are * smp_call_function() if an IPI is sent by the same process we are
* waiting to become inactive. * waiting to become inactive.
*/ */
void wait_task_inactive(struct task_struct *p) unsigned long wait_task_inactive(struct task_struct *p, long match_state)
{ {
unsigned long flags; unsigned long flags;
int running, on_rq; int running, on_rq;
unsigned long ncsw;
struct rq *rq; struct rq *rq;
for (;;) { for (;;) {
...@@ -1899,8 +1907,11 @@ void wait_task_inactive(struct task_struct *p) ...@@ -1899,8 +1907,11 @@ void wait_task_inactive(struct task_struct *p)
* return false if the runqueue has changed and p * return false if the runqueue has changed and p
* is actually now running somewhere else! * is actually now running somewhere else!
*/ */
while (task_running(rq, p)) while (task_running(rq, p)) {
if (match_state && unlikely(p->state != match_state))
return 0;
cpu_relax(); cpu_relax();
}
/* /*
* Ok, time to look more closely! We need the rq * Ok, time to look more closely! We need the rq
...@@ -1910,8 +1921,20 @@ void wait_task_inactive(struct task_struct *p) ...@@ -1910,8 +1921,20 @@ void wait_task_inactive(struct task_struct *p)
rq = task_rq_lock(p, &flags); rq = task_rq_lock(p, &flags);
running = task_running(rq, p); running = task_running(rq, p);
on_rq = p->se.on_rq; on_rq = p->se.on_rq;
ncsw = 0;
if (!match_state || p->state == match_state) {
ncsw = p->nivcsw + p->nvcsw;
if (unlikely(!ncsw))
ncsw = 1;
}
task_rq_unlock(rq, &flags); task_rq_unlock(rq, &flags);
/*
* If it changed from the expected state, bail out now.
*/
if (unlikely(!ncsw))
break;
/* /*
* Was it really running after all now that we * Was it really running after all now that we
* checked with the proper locks actually held? * checked with the proper locks actually held?
...@@ -1944,6 +1967,8 @@ void wait_task_inactive(struct task_struct *p) ...@@ -1944,6 +1967,8 @@ void wait_task_inactive(struct task_struct *p)
*/ */
break; break;
} }
return ncsw;
} }
/*** /***
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment