Commit 2c1aca4b authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-5.6-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq

Pull workqueue fixes from Tejun Heo:
 "Workqueue has been incorrectly round-robining per-cpu work items.
  Hillf's patch fixes that.

  The other patch documents memory-ordering properties of workqueue
  operations"

* 'for-5.6-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq:
  workqueue: don't use wq_select_unbound_cpu() for bound works
  workqueue: Document (some) memory-ordering properties of {queue,schedule}_work()
parents 30bb5572 aa202f1f
...@@ -487,6 +487,19 @@ extern void wq_worker_comm(char *buf, size_t size, struct task_struct *task); ...@@ -487,6 +487,19 @@ extern void wq_worker_comm(char *buf, size_t size, struct task_struct *task);
* *
* We queue the work to the CPU on which it was submitted, but if the CPU dies * We queue the work to the CPU on which it was submitted, but if the CPU dies
* it can be processed by another CPU. * it can be processed by another CPU.
*
* Memory-ordering properties: If it returns %true, guarantees that all stores
* preceding the call to queue_work() in the program order will be visible from
* the CPU which will execute @work by the time such work executes, e.g.,
*
* { x is initially 0 }
*
* CPU0 CPU1
*
* WRITE_ONCE(x, 1); [ @work is being executed ]
* r0 = queue_work(wq, work); r1 = READ_ONCE(x);
*
* Forbids: r0 == true && r1 == 0
*/ */
static inline bool queue_work(struct workqueue_struct *wq, static inline bool queue_work(struct workqueue_struct *wq,
struct work_struct *work) struct work_struct *work)
...@@ -546,6 +559,9 @@ static inline bool schedule_work_on(int cpu, struct work_struct *work) ...@@ -546,6 +559,9 @@ static inline bool schedule_work_on(int cpu, struct work_struct *work)
* This puts a job in the kernel-global workqueue if it was not already * This puts a job in the kernel-global workqueue if it was not already
* queued and leaves it in the same position on the kernel-global * queued and leaves it in the same position on the kernel-global
* workqueue otherwise. * workqueue otherwise.
*
* Shares the same memory-ordering properties of queue_work(), cf. the
* DocBook header of queue_work().
*/ */
static inline bool schedule_work(struct work_struct *work) static inline bool schedule_work(struct work_struct *work)
{ {
......
...@@ -1411,14 +1411,16 @@ static void __queue_work(int cpu, struct workqueue_struct *wq, ...@@ -1411,14 +1411,16 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
return; return;
rcu_read_lock(); rcu_read_lock();
retry: retry:
if (req_cpu == WORK_CPU_UNBOUND)
cpu = wq_select_unbound_cpu(raw_smp_processor_id());
/* pwq which will be used unless @work is executing elsewhere */ /* pwq which will be used unless @work is executing elsewhere */
if (!(wq->flags & WQ_UNBOUND)) if (wq->flags & WQ_UNBOUND) {
pwq = per_cpu_ptr(wq->cpu_pwqs, cpu); if (req_cpu == WORK_CPU_UNBOUND)
else cpu = wq_select_unbound_cpu(raw_smp_processor_id());
pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu)); pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
} else {
if (req_cpu == WORK_CPU_UNBOUND)
cpu = raw_smp_processor_id();
pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
}
/* /*
* If @work was previously on a different pool, it might still be * If @work was previously on a different pool, it might still be
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment