Commit 008cfe44 authored by Peter Xu's avatar Peter Xu Committed by Linus Torvalds

mm: Introduce mm_struct.has_pinned

(Commit message majorly collected from Jason Gunthorpe)

Reduce the chance of false positive from page_maybe_dma_pinned() by
keeping track if the mm_struct has ever been used with pin_user_pages().
This allows cases that might drive up the page ref_count to avoid any
penalty from handling dma_pinned pages.

Future work is planned, to provide a more sophisticated solution, likely
to turn it into a real counter.  For now, make it atomic_t but use it as
a boolean for simplicity.
Suggested-by: default avatarJason Gunthorpe <jgg@ziepe.ca>
Signed-off-by: default avatarPeter Xu <peterx@redhat.com>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent a1bffa48
...@@ -436,6 +436,16 @@ struct mm_struct { ...@@ -436,6 +436,16 @@ struct mm_struct {
*/ */
atomic_t mm_count; atomic_t mm_count;
/**
* @has_pinned: Whether this mm has pinned any pages. This can
* be either replaced in the future by @pinned_vm when it
* becomes stable, or grow into a counter on its own. We're
* aggresive on this bit now - even if the pinned pages were
* unpinned later on, we'll still keep this bit set for the
* lifecycle of this mm just for simplicity.
*/
atomic_t has_pinned;
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
atomic_long_t pgtables_bytes; /* PTE page table pages */ atomic_long_t pgtables_bytes; /* PTE page table pages */
#endif #endif
......
...@@ -1011,6 +1011,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p, ...@@ -1011,6 +1011,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
mm_pgtables_bytes_init(mm); mm_pgtables_bytes_init(mm);
mm->map_count = 0; mm->map_count = 0;
mm->locked_vm = 0; mm->locked_vm = 0;
atomic_set(&mm->has_pinned, 0);
atomic64_set(&mm->pinned_vm, 0); atomic64_set(&mm->pinned_vm, 0);
memset(&mm->rss_stat, 0, sizeof(mm->rss_stat)); memset(&mm->rss_stat, 0, sizeof(mm->rss_stat));
spin_lock_init(&mm->page_table_lock); spin_lock_init(&mm->page_table_lock);
......
...@@ -1255,6 +1255,9 @@ static __always_inline long __get_user_pages_locked(struct mm_struct *mm, ...@@ -1255,6 +1255,9 @@ static __always_inline long __get_user_pages_locked(struct mm_struct *mm,
BUG_ON(*locked != 1); BUG_ON(*locked != 1);
} }
if (flags & FOLL_PIN)
atomic_set(&current->mm->has_pinned, 1);
/* /*
* FOLL_PIN and FOLL_GET are mutually exclusive. Traditional behavior * FOLL_PIN and FOLL_GET are mutually exclusive. Traditional behavior
* is to set FOLL_GET if the caller wants pages[] filled in (but has * is to set FOLL_GET if the caller wants pages[] filled in (but has
...@@ -2660,6 +2663,9 @@ static int internal_get_user_pages_fast(unsigned long start, int nr_pages, ...@@ -2660,6 +2663,9 @@ static int internal_get_user_pages_fast(unsigned long start, int nr_pages,
FOLL_FAST_ONLY))) FOLL_FAST_ONLY)))
return -EINVAL; return -EINVAL;
if (gup_flags & FOLL_PIN)
atomic_set(&current->mm->has_pinned, 1);
if (!(gup_flags & FOLL_FAST_ONLY)) if (!(gup_flags & FOLL_FAST_ONLY))
might_lock_read(&current->mm->mmap_lock); might_lock_read(&current->mm->mmap_lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment