Commit 83c22520 authored by Oleg Nesterov's avatar Oleg Nesterov Committed by Linus Torvalds

flush_cpu_workqueue: don't flush an empty ->worklist

Now when we have ->current_work we can avoid adding a barrier and waiting
for its completition when cwq's queue is empty.

Note: this change is also useful if we change flush_workqueue() to also
check the dead CPUs.
Signed-off-by: default avatarOleg Nesterov <oleg@tv-sign.ru>
Cc: Srivatsa Vaddagiri <vatsa@in.ibm.com>
Cc: Gautham Shenoy <ego@in.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent edab2516
...@@ -404,12 +404,15 @@ static void wq_barrier_func(struct work_struct *work) ...@@ -404,12 +404,15 @@ static void wq_barrier_func(struct work_struct *work)
complete(&barr->done); complete(&barr->done);
} }
static inline void init_wq_barrier(struct wq_barrier *barr) static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
struct wq_barrier *barr, int tail)
{ {
INIT_WORK(&barr->work, wq_barrier_func); INIT_WORK(&barr->work, wq_barrier_func);
__set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work)); __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work));
init_completion(&barr->done); init_completion(&barr->done);
insert_work(cwq, &barr->work, tail);
} }
static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
...@@ -428,14 +431,21 @@ static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) ...@@ -428,14 +431,21 @@ static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
preempt_disable(); preempt_disable();
} else { } else {
struct wq_barrier barr; struct wq_barrier barr;
int active = 0;
init_wq_barrier(&barr); spin_lock_irq(&cwq->lock);
__queue_work(cwq, &barr.work); if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
insert_wq_barrier(cwq, &barr, 1);
active = 1;
}
spin_unlock_irq(&cwq->lock);
preempt_enable(); /* Can no longer touch *cwq */ if (active) {
preempt_enable();
wait_for_completion(&barr.done); wait_for_completion(&barr.done);
preempt_disable(); preempt_disable();
} }
}
} }
/** /**
...@@ -475,8 +485,7 @@ static void wait_on_work(struct cpu_workqueue_struct *cwq, ...@@ -475,8 +485,7 @@ static void wait_on_work(struct cpu_workqueue_struct *cwq,
spin_lock_irq(&cwq->lock); spin_lock_irq(&cwq->lock);
if (unlikely(cwq->current_work == work)) { if (unlikely(cwq->current_work == work)) {
init_wq_barrier(&barr); insert_wq_barrier(cwq, &barr, 0);
insert_work(cwq, &barr.work, 0);
running = 1; running = 1;
} }
spin_unlock_irq(&cwq->lock); spin_unlock_irq(&cwq->lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment