mm: Add folio_mapped()

This function is the equivalent of page_mapped().  It is slightly
shorter as we do not need to handle the PageTail() case.  Reimplement
page_mapped() as a wrapper around folio_mapped().  folio_mapped()
is 13 bytes smaller than page_mapped(), but the page_mapped() wrapper
is 30 bytes, for a net increase of 17 bytes of text.
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
Reviewed-by: default avatarWilliam Kucharski <william.kucharski@oracle.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarDavid Howells <dhowells@redhat.com>
Acked-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: default avatarMike Rapoport <rppt@linux.ibm.com>
parent 6abbaa5b
......@@ -1769,6 +1769,7 @@ static inline pgoff_t page_index(struct page *page)
}
bool page_mapped(struct page *page);
bool folio_mapped(struct folio *folio);
/*
* Return true only if the page has been allocated with
......
......@@ -299,6 +299,12 @@ FOLIO_MATCH(memcg_data, memcg_data);
#endif
#undef FOLIO_MATCH
static inline atomic_t *folio_mapcount_ptr(struct folio *folio)
{
struct page *tail = &folio->page + 1;
return &tail->compound_mapcount;
}
static inline atomic_t *compound_mapcount_ptr(struct page *page)
{
return &page[1].compound_mapcount;
......
......@@ -35,3 +35,9 @@ void wait_for_stable_page(struct page *page)
return folio_wait_stable(page_folio(page));
}
EXPORT_SYMBOL_GPL(wait_for_stable_page);
bool page_mapped(struct page *page)
{
return folio_mapped(page_folio(page));
}
EXPORT_SYMBOL(page_mapped);
......@@ -671,28 +671,31 @@ void *page_rmapping(struct page *page)
return __page_rmapping(page);
}
/*
* Return true if this page is mapped into pagetables.
* For compound page it returns true if any subpage of compound page is mapped.
/**
* folio_mapped - Is this folio mapped into userspace?
* @folio: The folio.
*
* Return: True if any page in this folio is referenced by user page tables.
*/
bool page_mapped(struct page *page)
bool folio_mapped(struct folio *folio)
{
int i;
long i, nr;
if (likely(!PageCompound(page)))
return atomic_read(&page->_mapcount) >= 0;
page = compound_head(page);
if (atomic_read(compound_mapcount_ptr(page)) >= 0)
if (folio_test_single(folio))
return atomic_read(&folio->_mapcount) >= 0;
if (atomic_read(folio_mapcount_ptr(folio)) >= 0)
return true;
if (PageHuge(page))
if (folio_test_hugetlb(folio))
return false;
for (i = 0; i < compound_nr(page); i++) {
if (atomic_read(&page[i]._mapcount) >= 0)
nr = folio_nr_pages(folio);
for (i = 0; i < nr; i++) {
if (atomic_read(&folio_page(folio, i)->_mapcount) >= 0)
return true;
}
return false;
}
EXPORT_SYMBOL(page_mapped);
EXPORT_SYMBOL(folio_mapped);
struct anon_vma *page_anon_vma(struct page *page)
{
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment