Commit 5f32b265 authored by Jérôme Glisse's avatar Jérôme Glisse Committed by Linus Torvalds

mm/mmu_notifier: kill invalidate_page

The invalidate_page callback suffered from two pitfalls.  First it used
to happen after the page table lock was release and thus a new page
might have setup before the call to invalidate_page() happened.

This is in a weird way fixed by commit c7ab0d2f ("mm: convert
try_to_unmap_one() to use page_vma_mapped_walk()") that moved the
callback under the page table lock but this also broke several existing
users of the mmu_notifier API that assumed they could sleep inside this
callback.

The second pitfall was invalidate_page() being the only callback not
taking a range of address in respect to invalidation but was giving an
address and a page.  Lots of the callback implementers assumed this
could never be THP and thus failed to invalidate the appropriate range
for THP.

By killing this callback we unify the mmu_notifier callback API to
always take a virtual address range as input.

Finally this also simplifies the end user life as there is now two clear
choices:
  - invalidate_range_start()/end() callback (which allow you to sleep)
  - invalidate_range() where you can not sleep but happen right after
    page table update under page table lock
Signed-off-by: default avatarJérôme Glisse <jglisse@redhat.com>
Cc: Bernhard Held <berny156@gmx.de>
Cc: Adam Borowski <kilobyte@angband.pl>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Radim Krčmář <rkrcmar@redhat.com>
Cc: Wanpeng Li <kernellwp@gmail.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Takashi Iwai <tiwai@suse.de>
Cc: Nadav Amit <nadav.amit@gmail.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: axie <axie@amd.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent fb1522e0
...@@ -94,17 +94,6 @@ struct mmu_notifier_ops { ...@@ -94,17 +94,6 @@ struct mmu_notifier_ops {
unsigned long address, unsigned long address,
pte_t pte); pte_t pte);
/*
* Before this is invoked any secondary MMU is still ok to
* read/write to the page previously pointed to by the Linux
* pte because the page hasn't been freed yet and it won't be
* freed until this returns. If required set_page_dirty has to
* be called internally to this method.
*/
void (*invalidate_page)(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long address);
/* /*
* invalidate_range_start() and invalidate_range_end() must be * invalidate_range_start() and invalidate_range_end() must be
* paired and are called only when the mmap_sem and/or the * paired and are called only when the mmap_sem and/or the
...@@ -220,8 +209,6 @@ extern int __mmu_notifier_test_young(struct mm_struct *mm, ...@@ -220,8 +209,6 @@ extern int __mmu_notifier_test_young(struct mm_struct *mm,
unsigned long address); unsigned long address);
extern void __mmu_notifier_change_pte(struct mm_struct *mm, extern void __mmu_notifier_change_pte(struct mm_struct *mm,
unsigned long address, pte_t pte); unsigned long address, pte_t pte);
extern void __mmu_notifier_invalidate_page(struct mm_struct *mm,
unsigned long address);
extern void __mmu_notifier_invalidate_range_start(struct mm_struct *mm, extern void __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
unsigned long start, unsigned long end); unsigned long start, unsigned long end);
extern void __mmu_notifier_invalidate_range_end(struct mm_struct *mm, extern void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
...@@ -268,13 +255,6 @@ static inline void mmu_notifier_change_pte(struct mm_struct *mm, ...@@ -268,13 +255,6 @@ static inline void mmu_notifier_change_pte(struct mm_struct *mm,
__mmu_notifier_change_pte(mm, address, pte); __mmu_notifier_change_pte(mm, address, pte);
} }
static inline void mmu_notifier_invalidate_page(struct mm_struct *mm,
unsigned long address)
{
if (mm_has_notifiers(mm))
__mmu_notifier_invalidate_page(mm, address);
}
static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm, static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm,
unsigned long start, unsigned long end) unsigned long start, unsigned long end)
{ {
...@@ -442,11 +422,6 @@ static inline void mmu_notifier_change_pte(struct mm_struct *mm, ...@@ -442,11 +422,6 @@ static inline void mmu_notifier_change_pte(struct mm_struct *mm,
{ {
} }
static inline void mmu_notifier_invalidate_page(struct mm_struct *mm,
unsigned long address)
{
}
static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm, static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm,
unsigned long start, unsigned long end) unsigned long start, unsigned long end)
{ {
......
...@@ -174,20 +174,6 @@ void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address, ...@@ -174,20 +174,6 @@ void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address,
srcu_read_unlock(&srcu, id); srcu_read_unlock(&srcu, id);
} }
void __mmu_notifier_invalidate_page(struct mm_struct *mm,
unsigned long address)
{
struct mmu_notifier *mn;
int id;
id = srcu_read_lock(&srcu);
hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
if (mn->ops->invalidate_page)
mn->ops->invalidate_page(mn, mm, address);
}
srcu_read_unlock(&srcu, id);
}
void __mmu_notifier_invalidate_range_start(struct mm_struct *mm, void __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
unsigned long start, unsigned long end) unsigned long start, unsigned long end)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment