Commit a2c1c57b authored by Tejun Heo's avatar Tejun Heo

workqueue: consider work function when searching for busy work items

To avoid executing the same work item concurrenlty, workqueue hashes
currently busy workers according to their current work items and looks
up the the table when it wants to execute a new work item.  If there
already is a worker which is executing the new work item, the new item
is queued to the found worker so that it gets executed only after the
current execution finishes.

Unfortunately, a work item may be freed while being executed and thus
recycled for different purposes.  If it gets recycled for a different
work item and queued while the previous execution is still in
progress, workqueue may make the new work item wait for the old one
although the two aren't really related in any way.

In extreme cases, this false dependency may lead to deadlock although
it's extremely unlikely given that there aren't too many self-freeing
work item users and they usually don't wait for other work items.

To alleviate the problem, record the current work function in each
busy worker and match it together with the work item address in
find_worker_executing_work().  While this isn't complete, it ensures
that unrelated work items don't interact with each other and in the
very unlikely case where a twisted wq user triggers it, it's always
onto itself making the culprit easy to spot.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Reported-by: default avatarAndrey Isakov <andy51@gmx.ru>
Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=51701
Cc: stable@vger.kernel.org
parent 42f8570f
...@@ -137,6 +137,7 @@ struct worker { ...@@ -137,6 +137,7 @@ struct worker {
}; };
struct work_struct *current_work; /* L: work being processed */ struct work_struct *current_work; /* L: work being processed */
work_func_t current_func; /* L: current_work's fn */
struct cpu_workqueue_struct *current_cwq; /* L: current_work's cwq */ struct cpu_workqueue_struct *current_cwq; /* L: current_work's cwq */
struct list_head scheduled; /* L: scheduled works */ struct list_head scheduled; /* L: scheduled works */
struct task_struct *task; /* I: worker task */ struct task_struct *task; /* I: worker task */
...@@ -861,9 +862,27 @@ static inline void worker_clr_flags(struct worker *worker, unsigned int flags) ...@@ -861,9 +862,27 @@ static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
* @gcwq: gcwq of interest * @gcwq: gcwq of interest
* @work: work to find worker for * @work: work to find worker for
* *
* Find a worker which is executing @work on @gcwq. This function is * Find a worker which is executing @work on @gcwq by searching
* identical to __find_worker_executing_work() except that this * @gcwq->busy_hash which is keyed by the address of @work. For a worker
* function calculates @bwh itself. * to match, its current execution should match the address of @work and
* its work function. This is to avoid unwanted dependency between
* unrelated work executions through a work item being recycled while still
* being executed.
*
* This is a bit tricky. A work item may be freed once its execution
* starts and nothing prevents the freed area from being recycled for
* another work item. If the same work item address ends up being reused
* before the original execution finishes, workqueue will identify the
* recycled work item as currently executing and make it wait until the
* current execution finishes, introducing an unwanted dependency.
*
* This function checks the work item address, work function and workqueue
* to avoid false positives. Note that this isn't complete as one may
* construct a work function which can introduce dependency onto itself
* through a recycled work item. Well, if somebody wants to shoot oneself
* in the foot that badly, there's only so much we can do, and if such
* deadlock actually occurs, it should be easy to locate the culprit work
* function.
* *
* CONTEXT: * CONTEXT:
* spin_lock_irq(gcwq->lock). * spin_lock_irq(gcwq->lock).
...@@ -878,8 +897,10 @@ static struct worker *find_worker_executing_work(struct global_cwq *gcwq, ...@@ -878,8 +897,10 @@ static struct worker *find_worker_executing_work(struct global_cwq *gcwq,
struct worker *worker; struct worker *worker;
struct hlist_node *tmp; struct hlist_node *tmp;
hash_for_each_possible(gcwq->busy_hash, worker, tmp, hentry, (unsigned long)work) hash_for_each_possible(gcwq->busy_hash, worker, tmp, hentry,
if (worker->current_work == work) (unsigned long)work)
if (worker->current_work == work &&
worker->current_func == work->func)
return worker; return worker;
return NULL; return NULL;
...@@ -2114,7 +2135,6 @@ __acquires(&gcwq->lock) ...@@ -2114,7 +2135,6 @@ __acquires(&gcwq->lock)
struct worker_pool *pool = worker->pool; struct worker_pool *pool = worker->pool;
struct global_cwq *gcwq = pool->gcwq; struct global_cwq *gcwq = pool->gcwq;
bool cpu_intensive = cwq->wq->flags & WQ_CPU_INTENSIVE; bool cpu_intensive = cwq->wq->flags & WQ_CPU_INTENSIVE;
work_func_t f = work->func;
int work_color; int work_color;
struct worker *collision; struct worker *collision;
#ifdef CONFIG_LOCKDEP #ifdef CONFIG_LOCKDEP
...@@ -2154,6 +2174,7 @@ __acquires(&gcwq->lock) ...@@ -2154,6 +2174,7 @@ __acquires(&gcwq->lock)
debug_work_deactivate(work); debug_work_deactivate(work);
hash_add(gcwq->busy_hash, &worker->hentry, (unsigned long)worker); hash_add(gcwq->busy_hash, &worker->hentry, (unsigned long)worker);
worker->current_work = work; worker->current_work = work;
worker->current_func = work->func;
worker->current_cwq = cwq; worker->current_cwq = cwq;
work_color = get_work_color(work); work_color = get_work_color(work);
...@@ -2186,7 +2207,7 @@ __acquires(&gcwq->lock) ...@@ -2186,7 +2207,7 @@ __acquires(&gcwq->lock)
lock_map_acquire_read(&cwq->wq->lockdep_map); lock_map_acquire_read(&cwq->wq->lockdep_map);
lock_map_acquire(&lockdep_map); lock_map_acquire(&lockdep_map);
trace_workqueue_execute_start(work); trace_workqueue_execute_start(work);
f(work); worker->current_func(work);
/* /*
* While we must be careful to not use "work" after this, the trace * While we must be careful to not use "work" after this, the trace
* point will only record its address. * point will only record its address.
...@@ -2198,7 +2219,8 @@ __acquires(&gcwq->lock) ...@@ -2198,7 +2219,8 @@ __acquires(&gcwq->lock)
if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n" pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n"
" last function: %pf\n", " last function: %pf\n",
current->comm, preempt_count(), task_pid_nr(current), f); current->comm, preempt_count(), task_pid_nr(current),
worker->current_func);
debug_show_held_locks(current); debug_show_held_locks(current);
dump_stack(); dump_stack();
} }
...@@ -2212,6 +2234,7 @@ __acquires(&gcwq->lock) ...@@ -2212,6 +2234,7 @@ __acquires(&gcwq->lock)
/* we're done with it, release */ /* we're done with it, release */
hash_del(&worker->hentry); hash_del(&worker->hentry);
worker->current_work = NULL; worker->current_work = NULL;
worker->current_func = NULL;
worker->current_cwq = NULL; worker->current_cwq = NULL;
cwq_dec_nr_in_flight(cwq, work_color); cwq_dec_nr_in_flight(cwq, work_color);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment