mm/filemap: Add folio_wait_locked()

Also add folio_wait_locked_killable().  Turn wait_on_page_locked() and
wait_on_page_locked_killable() into wrappers.  This eliminates a call
to compound_head() from each call-site, reducing text size by 193 bytes
for me.
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Acked-by: default avatarJeff Layton <jlayton@kernel.org>
Acked-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
Reviewed-by: default avatarWilliam Kucharski <william.kucharski@oracle.com>
Reviewed-by: default avatarDavid Howells <dhowells@redhat.com>
Acked-by: default avatarMike Rapoport <rppt@linux.ibm.com>
parent ffdc8dab
...@@ -732,23 +732,33 @@ extern void wait_on_page_bit(struct page *page, int bit_nr); ...@@ -732,23 +732,33 @@ extern void wait_on_page_bit(struct page *page, int bit_nr);
extern int wait_on_page_bit_killable(struct page *page, int bit_nr); extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
/* /*
* Wait for a page to be unlocked. * Wait for a folio to be unlocked.
* *
* This must be called with the caller "holding" the page, * This must be called with the caller "holding" the folio,
* ie with increased "page->count" so that the page won't * ie with increased "page->count" so that the folio won't
* go away during the wait.. * go away during the wait..
*/ */
static inline void folio_wait_locked(struct folio *folio)
{
if (folio_test_locked(folio))
wait_on_page_bit(&folio->page, PG_locked);
}
static inline int folio_wait_locked_killable(struct folio *folio)
{
if (!folio_test_locked(folio))
return 0;
return wait_on_page_bit_killable(&folio->page, PG_locked);
}
static inline void wait_on_page_locked(struct page *page) static inline void wait_on_page_locked(struct page *page)
{ {
if (PageLocked(page)) folio_wait_locked(page_folio(page));
wait_on_page_bit(compound_head(page), PG_locked);
} }
static inline int wait_on_page_locked_killable(struct page *page) static inline int wait_on_page_locked_killable(struct page *page)
{ {
if (!PageLocked(page)) return folio_wait_locked_killable(page_folio(page));
return 0;
return wait_on_page_bit_killable(compound_head(page), PG_locked);
} }
int put_and_wait_on_page_locked(struct page *page, int state); int put_and_wait_on_page_locked(struct page *page, int state);
......
...@@ -1704,9 +1704,9 @@ int __lock_page_or_retry(struct page *page, struct mm_struct *mm, ...@@ -1704,9 +1704,9 @@ int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
mmap_read_unlock(mm); mmap_read_unlock(mm);
if (flags & FAULT_FLAG_KILLABLE) if (flags & FAULT_FLAG_KILLABLE)
wait_on_page_locked_killable(page); folio_wait_locked_killable(folio);
else else
wait_on_page_locked(page); folio_wait_locked(folio);
return 0; return 0;
} }
if (flags & FAULT_FLAG_KILLABLE) { if (flags & FAULT_FLAG_KILLABLE) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment