Commit 669de8bd authored by Bart Van Assche's avatar Bart Van Assche Committed by Ingo Molnar

kernel/workqueue: Use dynamic lockdep keys for workqueues

The following commit:

  87915adc ("workqueue: re-add lockdep dependencies for flushing")

improved deadlock checking in the workqueue implementation. Unfortunately
that patch also introduced a few false positive lockdep complaints.

This patch suppresses these false positives by allocating the workqueue mutex
lockdep key dynamically.

An example of a false positive lockdep complaint suppressed by this patch
can be found below. The root cause of the lockdep complaint shown below
is that the direct I/O code can call alloc_workqueue() from inside a work
item created by another alloc_workqueue() call and that both workqueues
share the same lockdep key. This patch avoids that that lockdep complaint
is triggered by allocating the work queue lockdep keys dynamically.

In other words, this patch guarantees that a unique lockdep key is
associated with each work queue mutex.

  ======================================================
  WARNING: possible circular locking dependency detected
  4.19.0-dbg+ #1 Not tainted
  fio/4129 is trying to acquire lock:
  00000000a01cfe1a ((wq_completion)"dio/%s"sb->s_id){+.+.}, at: flush_workqueue+0xd0/0x970

  but task is already holding lock:
  00000000a0acecf9 (&sb->s_type->i_mutex_key#14){+.+.}, at: ext4_file_write_iter+0x154/0x710

  which lock already depends on the new lock.

  the existing dependency chain (in reverse order) is:

  -> #2 (&sb->s_type->i_mutex_key#14){+.+.}:
         down_write+0x3d/0x80
         __generic_file_fsync+0x77/0xf0
         ext4_sync_file+0x3c9/0x780
         vfs_fsync_range+0x66/0x100
         dio_complete+0x2f5/0x360
         dio_aio_complete_work+0x1c/0x20
         process_one_work+0x481/0x9f0
         worker_thread+0x63/0x5a0
         kthread+0x1cf/0x1f0
         ret_from_fork+0x24/0x30

  -> #1 ((work_completion)(&dio->complete_work)){+.+.}:
         process_one_work+0x447/0x9f0
         worker_thread+0x63/0x5a0
         kthread+0x1cf/0x1f0
         ret_from_fork+0x24/0x30

  -> #0 ((wq_completion)"dio/%s"sb->s_id){+.+.}:
         lock_acquire+0xc5/0x200
         flush_workqueue+0xf3/0x970
         drain_workqueue+0xec/0x220
         destroy_workqueue+0x23/0x350
         sb_init_dio_done_wq+0x6a/0x80
         do_blockdev_direct_IO+0x1f33/0x4be0
         __blockdev_direct_IO+0x79/0x86
         ext4_direct_IO+0x5df/0xbb0
         generic_file_direct_write+0x119/0x220
         __generic_file_write_iter+0x131/0x2d0
         ext4_file_write_iter+0x3fa/0x710
         aio_write+0x235/0x330
         io_submit_one+0x510/0xeb0
         __x64_sys_io_submit+0x122/0x340
         do_syscall_64+0x71/0x220
         entry_SYSCALL_64_after_hwframe+0x49/0xbe

  other info that might help us debug this:

  Chain exists of:
    (wq_completion)"dio/%s"sb->s_id --> (work_completion)(&dio->complete_work) --> &sb->s_type->i_mutex_key#14

   Possible unsafe locking scenario:

         CPU0                    CPU1
         ----                    ----
    lock(&sb->s_type->i_mutex_key#14);
                                 lock((work_completion)(&dio->complete_work));
                                 lock(&sb->s_type->i_mutex_key#14);
    lock((wq_completion)"dio/%s"sb->s_id);

   *** DEADLOCK ***

  1 lock held by fio/4129:
   #0: 00000000a0acecf9 (&sb->s_type->i_mutex_key#14){+.+.}, at: ext4_file_write_iter+0x154/0x710

  stack backtrace:
  CPU: 3 PID: 4129 Comm: fio Not tainted 4.19.0-dbg+ #1
  Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.2-1 04/01/2014
  Call Trace:
   dump_stack+0x86/0xc5
   print_circular_bug.isra.32+0x20a/0x218
   __lock_acquire+0x1c68/0x1cf0
   lock_acquire+0xc5/0x200
   flush_workqueue+0xf3/0x970
   drain_workqueue+0xec/0x220
   destroy_workqueue+0x23/0x350
   sb_init_dio_done_wq+0x6a/0x80
   do_blockdev_direct_IO+0x1f33/0x4be0
   __blockdev_direct_IO+0x79/0x86
   ext4_direct_IO+0x5df/0xbb0
   generic_file_direct_write+0x119/0x220
   __generic_file_write_iter+0x131/0x2d0
   ext4_file_write_iter+0x3fa/0x710
   aio_write+0x235/0x330
   io_submit_one+0x510/0xeb0
   __x64_sys_io_submit+0x122/0x340
   do_syscall_64+0x71/0x220
   entry_SYSCALL_64_after_hwframe+0x49/0xbe
Signed-off-by: default avatarBart Van Assche <bvanassche@acm.org>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Johannes Berg <johannes.berg@intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Tejun Heo <tj@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Waiman Long <longman@redhat.com>
Cc: Will Deacon <will.deacon@arm.com>
Link: https://lkml.kernel.org/r/20190214230058.196511-20-bvanassche@acm.org
[ Reworked the changelog a bit. ]
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 108c1485
...@@ -390,43 +390,23 @@ extern struct workqueue_struct *system_freezable_wq; ...@@ -390,43 +390,23 @@ extern struct workqueue_struct *system_freezable_wq;
extern struct workqueue_struct *system_power_efficient_wq; extern struct workqueue_struct *system_power_efficient_wq;
extern struct workqueue_struct *system_freezable_power_efficient_wq; extern struct workqueue_struct *system_freezable_power_efficient_wq;
extern struct workqueue_struct *
__alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
struct lock_class_key *key, const char *lock_name, ...) __printf(1, 6);
/** /**
* alloc_workqueue - allocate a workqueue * alloc_workqueue - allocate a workqueue
* @fmt: printf format for the name of the workqueue * @fmt: printf format for the name of the workqueue
* @flags: WQ_* flags * @flags: WQ_* flags
* @max_active: max in-flight work items, 0 for default * @max_active: max in-flight work items, 0 for default
* @args...: args for @fmt * remaining args: args for @fmt
* *
* Allocate a workqueue with the specified parameters. For detailed * Allocate a workqueue with the specified parameters. For detailed
* information on WQ_* flags, please refer to * information on WQ_* flags, please refer to
* Documentation/core-api/workqueue.rst. * Documentation/core-api/workqueue.rst.
* *
* The __lock_name macro dance is to guarantee that single lock_class_key
* doesn't end up with different namesm, which isn't allowed by lockdep.
*
* RETURNS: * RETURNS:
* Pointer to the allocated workqueue on success, %NULL on failure. * Pointer to the allocated workqueue on success, %NULL on failure.
*/ */
#ifdef CONFIG_LOCKDEP struct workqueue_struct *alloc_workqueue(const char *fmt,
#define alloc_workqueue(fmt, flags, max_active, args...) \ unsigned int flags,
({ \ int max_active, ...);
static struct lock_class_key __key; \
const char *__lock_name; \
\
__lock_name = "(wq_completion)"#fmt#args; \
\
__alloc_workqueue_key((fmt), (flags), (max_active), \
&__key, __lock_name, ##args); \
})
#else
#define alloc_workqueue(fmt, flags, max_active, args...) \
__alloc_workqueue_key((fmt), (flags), (max_active), \
NULL, NULL, ##args)
#endif
/** /**
* alloc_ordered_workqueue - allocate an ordered workqueue * alloc_ordered_workqueue - allocate an ordered workqueue
......
...@@ -259,6 +259,8 @@ struct workqueue_struct { ...@@ -259,6 +259,8 @@ struct workqueue_struct {
struct wq_device *wq_dev; /* I: for sysfs interface */ struct wq_device *wq_dev; /* I: for sysfs interface */
#endif #endif
#ifdef CONFIG_LOCKDEP #ifdef CONFIG_LOCKDEP
char *lock_name;
struct lock_class_key key;
struct lockdep_map lockdep_map; struct lockdep_map lockdep_map;
#endif #endif
char name[WQ_NAME_LEN]; /* I: workqueue name */ char name[WQ_NAME_LEN]; /* I: workqueue name */
...@@ -3337,11 +3339,49 @@ static int init_worker_pool(struct worker_pool *pool) ...@@ -3337,11 +3339,49 @@ static int init_worker_pool(struct worker_pool *pool)
return 0; return 0;
} }
#ifdef CONFIG_LOCKDEP
static void wq_init_lockdep(struct workqueue_struct *wq)
{
char *lock_name;
lockdep_register_key(&wq->key);
lock_name = kasprintf(GFP_KERNEL, "%s%s", "(wq_completion)", wq->name);
if (!lock_name)
lock_name = wq->name;
lockdep_init_map(&wq->lockdep_map, lock_name, &wq->key, 0);
}
static void wq_unregister_lockdep(struct workqueue_struct *wq)
{
lockdep_unregister_key(&wq->key);
}
static void wq_free_lockdep(struct workqueue_struct *wq)
{
if (wq->lock_name != wq->name)
kfree(wq->lock_name);
}
#else
static void wq_init_lockdep(struct workqueue_struct *wq)
{
}
static void wq_unregister_lockdep(struct workqueue_struct *wq)
{
}
static void wq_free_lockdep(struct workqueue_struct *wq)
{
}
#endif
static void rcu_free_wq(struct rcu_head *rcu) static void rcu_free_wq(struct rcu_head *rcu)
{ {
struct workqueue_struct *wq = struct workqueue_struct *wq =
container_of(rcu, struct workqueue_struct, rcu); container_of(rcu, struct workqueue_struct, rcu);
wq_free_lockdep(wq);
if (!(wq->flags & WQ_UNBOUND)) if (!(wq->flags & WQ_UNBOUND))
free_percpu(wq->cpu_pwqs); free_percpu(wq->cpu_pwqs);
else else
...@@ -3532,8 +3572,10 @@ static void pwq_unbound_release_workfn(struct work_struct *work) ...@@ -3532,8 +3572,10 @@ static void pwq_unbound_release_workfn(struct work_struct *work)
* If we're the last pwq going away, @wq is already dead and no one * If we're the last pwq going away, @wq is already dead and no one
* is gonna access it anymore. Schedule RCU free. * is gonna access it anymore. Schedule RCU free.
*/ */
if (is_last) if (is_last) {
wq_unregister_lockdep(wq);
call_rcu(&wq->rcu, rcu_free_wq); call_rcu(&wq->rcu, rcu_free_wq);
}
} }
/** /**
...@@ -4067,11 +4109,9 @@ static int init_rescuer(struct workqueue_struct *wq) ...@@ -4067,11 +4109,9 @@ static int init_rescuer(struct workqueue_struct *wq)
return 0; return 0;
} }
struct workqueue_struct *__alloc_workqueue_key(const char *fmt, struct workqueue_struct *alloc_workqueue(const char *fmt,
unsigned int flags, unsigned int flags,
int max_active, int max_active, ...)
struct lock_class_key *key,
const char *lock_name, ...)
{ {
size_t tbl_size = 0; size_t tbl_size = 0;
va_list args; va_list args;
...@@ -4106,7 +4146,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt, ...@@ -4106,7 +4146,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
goto err_free_wq; goto err_free_wq;
} }
va_start(args, lock_name); va_start(args, max_active);
vsnprintf(wq->name, sizeof(wq->name), fmt, args); vsnprintf(wq->name, sizeof(wq->name), fmt, args);
va_end(args); va_end(args);
...@@ -4123,7 +4163,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt, ...@@ -4123,7 +4163,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
INIT_LIST_HEAD(&wq->flusher_overflow); INIT_LIST_HEAD(&wq->flusher_overflow);
INIT_LIST_HEAD(&wq->maydays); INIT_LIST_HEAD(&wq->maydays);
lockdep_init_map(&wq->lockdep_map, lock_name, key, 0); wq_init_lockdep(wq);
INIT_LIST_HEAD(&wq->list); INIT_LIST_HEAD(&wq->list);
if (alloc_and_link_pwqs(wq) < 0) if (alloc_and_link_pwqs(wq) < 0)
...@@ -4161,7 +4201,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt, ...@@ -4161,7 +4201,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
destroy_workqueue(wq); destroy_workqueue(wq);
return NULL; return NULL;
} }
EXPORT_SYMBOL_GPL(__alloc_workqueue_key); EXPORT_SYMBOL_GPL(alloc_workqueue);
/** /**
* destroy_workqueue - safely terminate a workqueue * destroy_workqueue - safely terminate a workqueue
...@@ -4214,6 +4254,7 @@ void destroy_workqueue(struct workqueue_struct *wq) ...@@ -4214,6 +4254,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
kthread_stop(wq->rescuer->task); kthread_stop(wq->rescuer->task);
if (!(wq->flags & WQ_UNBOUND)) { if (!(wq->flags & WQ_UNBOUND)) {
wq_unregister_lockdep(wq);
/* /*
* The base ref is never dropped on per-cpu pwqs. Directly * The base ref is never dropped on per-cpu pwqs. Directly
* schedule RCU free. * schedule RCU free.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment