Commit 23b68395 authored by Daniel Vetter's avatar Daniel Vetter Committed by Jason Gunthorpe

mm/mmu_notifiers: add a lockdep map for invalidate_range_start/end

This is a similar idea to the fs_reclaim fake lockdep lock. It's fairly
easy to provoke a specific notifier to be run on a specific range: Just
prep it, and then munmap() it.

A bit harder, but still doable, is to provoke the mmu notifiers for all
the various callchains that might lead to them. But both at the same time
is really hard to reliably hit, especially when you want to exercise paths
like direct reclaim or compaction, where it's not easy to control what
exactly will be unmapped.

By introducing a lockdep map to tie them all together we allow lockdep to
see a lot more dependencies, without having to actually hit them in a
single challchain while testing.

On Jason's suggestion this is is rolled out for both
invalidate_range_start and invalidate_range_end. They both have the same
calling context, hence we can share the same lockdep map. Note that the
annotation for invalidate_ranage_start is outside of the
mm_has_notifiers(), to make sure lockdep is informed about all paths
leading to this context irrespective of whether mmu notifiers are present
for a given context. We don't do that on the invalidate_range_end side to
avoid paying the overhead twice, there the lockdep annotation is pushed
down behind the mm_has_notifiers() check.

Link: https://lore.kernel.org/r/20190826201425.17547-2-daniel.vetter@ffwll.chReviewed-by: default avatarJason Gunthorpe <jgg@mellanox.com>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@intel.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent f0ade90a
...@@ -42,6 +42,10 @@ enum mmu_notifier_event { ...@@ -42,6 +42,10 @@ enum mmu_notifier_event {
#ifdef CONFIG_MMU_NOTIFIER #ifdef CONFIG_MMU_NOTIFIER
#ifdef CONFIG_LOCKDEP
extern struct lockdep_map __mmu_notifier_invalidate_range_start_map;
#endif
/* /*
* The mmu notifier_mm structure is allocated and installed in * The mmu notifier_mm structure is allocated and installed in
* mm->mmu_notifier_mm inside the mm_take_all_locks() protected * mm->mmu_notifier_mm inside the mm_take_all_locks() protected
...@@ -339,20 +343,26 @@ static inline void mmu_notifier_change_pte(struct mm_struct *mm, ...@@ -339,20 +343,26 @@ static inline void mmu_notifier_change_pte(struct mm_struct *mm,
static inline void static inline void
mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range) mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
{ {
lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
if (mm_has_notifiers(range->mm)) { if (mm_has_notifiers(range->mm)) {
range->flags |= MMU_NOTIFIER_RANGE_BLOCKABLE; range->flags |= MMU_NOTIFIER_RANGE_BLOCKABLE;
__mmu_notifier_invalidate_range_start(range); __mmu_notifier_invalidate_range_start(range);
} }
lock_map_release(&__mmu_notifier_invalidate_range_start_map);
} }
static inline int static inline int
mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range) mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
{ {
int ret = 0;
lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
if (mm_has_notifiers(range->mm)) { if (mm_has_notifiers(range->mm)) {
range->flags &= ~MMU_NOTIFIER_RANGE_BLOCKABLE; range->flags &= ~MMU_NOTIFIER_RANGE_BLOCKABLE;
return __mmu_notifier_invalidate_range_start(range); ret = __mmu_notifier_invalidate_range_start(range);
} }
return 0; lock_map_release(&__mmu_notifier_invalidate_range_start_map);
return ret;
} }
static inline void static inline void
......
...@@ -21,6 +21,12 @@ ...@@ -21,6 +21,12 @@
/* global SRCU for all MMs */ /* global SRCU for all MMs */
DEFINE_STATIC_SRCU(srcu); DEFINE_STATIC_SRCU(srcu);
#ifdef CONFIG_LOCKDEP
struct lockdep_map __mmu_notifier_invalidate_range_start_map = {
.name = "mmu_notifier_invalidate_range_start"
};
#endif
/* /*
* This function can't run concurrently against mmu_notifier_register * This function can't run concurrently against mmu_notifier_register
* because mm->mm_users > 0 during mmu_notifier_register and exit_mmap * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap
...@@ -184,6 +190,7 @@ void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range, ...@@ -184,6 +190,7 @@ void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range,
struct mmu_notifier *mn; struct mmu_notifier *mn;
int id; int id;
lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
id = srcu_read_lock(&srcu); id = srcu_read_lock(&srcu);
hlist_for_each_entry_rcu(mn, &range->mm->mmu_notifier_mm->list, hlist) { hlist_for_each_entry_rcu(mn, &range->mm->mmu_notifier_mm->list, hlist) {
/* /*
...@@ -207,6 +214,7 @@ void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range, ...@@ -207,6 +214,7 @@ void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range,
mn->ops->invalidate_range_end(mn, range); mn->ops->invalidate_range_end(mn, range);
} }
srcu_read_unlock(&srcu, id); srcu_read_unlock(&srcu, id);
lock_map_release(&__mmu_notifier_invalidate_range_start_map);
} }
void __mmu_notifier_invalidate_range(struct mm_struct *mm, void __mmu_notifier_invalidate_range(struct mm_struct *mm,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment