Commit baf59022 authored by Tejun Heo's avatar Tejun Heo

workqueue: factor out start_flush_work()

Factor out start_flush_work() from flush_work().  start_flush_work()
has @wait_executing argument which controls whether the barrier is
queued only if the work is pending or also if executing.  As
flush_work() needs to wait for execution too, it uses %true.

This commit doesn't cause any behavior difference.  start_flush_work()
will be used to implement flush_work_sync().
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
parent 401a8d04
...@@ -2326,35 +2326,17 @@ void flush_workqueue(struct workqueue_struct *wq) ...@@ -2326,35 +2326,17 @@ void flush_workqueue(struct workqueue_struct *wq)
} }
EXPORT_SYMBOL_GPL(flush_workqueue); EXPORT_SYMBOL_GPL(flush_workqueue);
/** static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
* flush_work - wait for a work to finish executing the last queueing instance bool wait_executing)
* @work: the work to flush
*
* Wait until @work has finished execution. This function considers
* only the last queueing instance of @work. If @work has been
* enqueued across different CPUs on a non-reentrant workqueue or on
* multiple workqueues, @work might still be executing on return on
* some of the CPUs from earlier queueing.
*
* If @work was queued only on a non-reentrant, ordered or unbound
* workqueue, @work is guaranteed to be idle on return if it hasn't
* been requeued since flush started.
*
* RETURNS:
* %true if flush_work() waited for the work to finish execution,
* %false if it was already idle.
*/
bool flush_work(struct work_struct *work)
{ {
struct worker *worker = NULL; struct worker *worker = NULL;
struct global_cwq *gcwq; struct global_cwq *gcwq;
struct cpu_workqueue_struct *cwq; struct cpu_workqueue_struct *cwq;
struct wq_barrier barr;
might_sleep(); might_sleep();
gcwq = get_work_gcwq(work); gcwq = get_work_gcwq(work);
if (!gcwq) if (!gcwq)
return 0; return false;
spin_lock_irq(&gcwq->lock); spin_lock_irq(&gcwq->lock);
if (!list_empty(&work->entry)) { if (!list_empty(&work->entry)) {
...@@ -2367,24 +2349,52 @@ bool flush_work(struct work_struct *work) ...@@ -2367,24 +2349,52 @@ bool flush_work(struct work_struct *work)
cwq = get_work_cwq(work); cwq = get_work_cwq(work);
if (unlikely(!cwq || gcwq != cwq->gcwq)) if (unlikely(!cwq || gcwq != cwq->gcwq))
goto already_gone; goto already_gone;
} else { } else if (wait_executing) {
worker = find_worker_executing_work(gcwq, work); worker = find_worker_executing_work(gcwq, work);
if (!worker) if (!worker)
goto already_gone; goto already_gone;
cwq = worker->current_cwq; cwq = worker->current_cwq;
} } else
goto already_gone;
insert_wq_barrier(cwq, &barr, work, worker); insert_wq_barrier(cwq, barr, work, worker);
spin_unlock_irq(&gcwq->lock); spin_unlock_irq(&gcwq->lock);
lock_map_acquire(&cwq->wq->lockdep_map); lock_map_acquire(&cwq->wq->lockdep_map);
lock_map_release(&cwq->wq->lockdep_map); lock_map_release(&cwq->wq->lockdep_map);
return true;
already_gone:
spin_unlock_irq(&gcwq->lock);
return false;
}
/**
* flush_work - wait for a work to finish executing the last queueing instance
* @work: the work to flush
*
* Wait until @work has finished execution. This function considers
* only the last queueing instance of @work. If @work has been
* enqueued across different CPUs on a non-reentrant workqueue or on
* multiple workqueues, @work might still be executing on return on
* some of the CPUs from earlier queueing.
*
* If @work was queued only on a non-reentrant, ordered or unbound
* workqueue, @work is guaranteed to be idle on return if it hasn't
* been requeued since flush started.
*
* RETURNS:
* %true if flush_work() waited for the work to finish execution,
* %false if it was already idle.
*/
bool flush_work(struct work_struct *work)
{
struct wq_barrier barr;
if (start_flush_work(work, &barr, true)) {
wait_for_completion(&barr.done); wait_for_completion(&barr.done);
destroy_work_on_stack(&barr.work); destroy_work_on_stack(&barr.work);
return true; return true;
already_gone: } else
spin_unlock_irq(&gcwq->lock);
return false; return false;
} }
EXPORT_SYMBOL_GPL(flush_work); EXPORT_SYMBOL_GPL(flush_work);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment