mm/filemap: Add folio_lock_killable()

This is like lock_page_killable() but for use by callers who
know they have a folio.  Convert __lock_page_killable() to be
__folio_lock_killable().  This saves one call to compound_head() per
contended call to lock_page_killable().

__folio_lock_killable() is 19 bytes smaller than __lock_page_killable()
was.  filemap_fault() shrinks by 74 bytes and __lock_page_or_retry()
shrinks by 71 bytes.  That's a total of 164 bytes of text saved.
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Acked-by: default avatarJeff Layton <jlayton@kernel.org>
Acked-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
Reviewed-by: default avatarWilliam Kucharski <william.kucharski@oracle.com>
Acked-by: default avatarMike Rapoport <rppt@linux.ibm.com>
Reviewed-by: default avatarDavid Howells <dhowells@redhat.com>
parent 7c23c782
...@@ -653,7 +653,7 @@ static inline bool wake_page_match(struct wait_page_queue *wait_page, ...@@ -653,7 +653,7 @@ static inline bool wake_page_match(struct wait_page_queue *wait_page,
} }
void __folio_lock(struct folio *folio); void __folio_lock(struct folio *folio);
extern int __lock_page_killable(struct page *page); int __folio_lock_killable(struct folio *folio);
extern int __lock_page_async(struct page *page, struct wait_page_queue *wait); extern int __lock_page_async(struct page *page, struct wait_page_queue *wait);
extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm, extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
unsigned int flags); unsigned int flags);
...@@ -693,6 +693,14 @@ static inline void lock_page(struct page *page) ...@@ -693,6 +693,14 @@ static inline void lock_page(struct page *page)
__folio_lock(folio); __folio_lock(folio);
} }
static inline int folio_lock_killable(struct folio *folio)
{
might_sleep();
if (!folio_trylock(folio))
return __folio_lock_killable(folio);
return 0;
}
/* /*
* lock_page_killable is like lock_page but can be interrupted by fatal * lock_page_killable is like lock_page but can be interrupted by fatal
* signals. It returns 0 if it locked the page and -EINTR if it was * signals. It returns 0 if it locked the page and -EINTR if it was
...@@ -700,10 +708,7 @@ static inline void lock_page(struct page *page) ...@@ -700,10 +708,7 @@ static inline void lock_page(struct page *page)
*/ */
static inline int lock_page_killable(struct page *page) static inline int lock_page_killable(struct page *page)
{ {
might_sleep(); return folio_lock_killable(page_folio(page));
if (!trylock_page(page))
return __lock_page_killable(page);
return 0;
} }
/* /*
......
...@@ -1644,14 +1644,13 @@ void __folio_lock(struct folio *folio) ...@@ -1644,14 +1644,13 @@ void __folio_lock(struct folio *folio)
} }
EXPORT_SYMBOL(__folio_lock); EXPORT_SYMBOL(__folio_lock);
int __lock_page_killable(struct page *__page) int __folio_lock_killable(struct folio *folio)
{ {
struct page *page = compound_head(__page); wait_queue_head_t *q = page_waitqueue(&folio->page);
wait_queue_head_t *q = page_waitqueue(page); return wait_on_page_bit_common(q, &folio->page, PG_locked, TASK_KILLABLE,
return wait_on_page_bit_common(q, page, PG_locked, TASK_KILLABLE,
EXCLUSIVE); EXCLUSIVE);
} }
EXPORT_SYMBOL_GPL(__lock_page_killable); EXPORT_SYMBOL_GPL(__folio_lock_killable);
int __lock_page_async(struct page *page, struct wait_page_queue *wait) int __lock_page_async(struct page *page, struct wait_page_queue *wait)
{ {
...@@ -1693,6 +1692,8 @@ int __lock_page_async(struct page *page, struct wait_page_queue *wait) ...@@ -1693,6 +1692,8 @@ int __lock_page_async(struct page *page, struct wait_page_queue *wait)
int __lock_page_or_retry(struct page *page, struct mm_struct *mm, int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
unsigned int flags) unsigned int flags)
{ {
struct folio *folio = page_folio(page);
if (fault_flag_allow_retry_first(flags)) { if (fault_flag_allow_retry_first(flags)) {
/* /*
* CAUTION! In this case, mmap_lock is not released * CAUTION! In this case, mmap_lock is not released
...@@ -1711,13 +1712,13 @@ int __lock_page_or_retry(struct page *page, struct mm_struct *mm, ...@@ -1711,13 +1712,13 @@ int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
if (flags & FAULT_FLAG_KILLABLE) { if (flags & FAULT_FLAG_KILLABLE) {
int ret; int ret;
ret = __lock_page_killable(page); ret = __folio_lock_killable(folio);
if (ret) { if (ret) {
mmap_read_unlock(mm); mmap_read_unlock(mm);
return 0; return 0;
} }
} else { } else {
__folio_lock(page_folio(page)); __folio_lock(folio);
} }
return 1; return 1;
...@@ -2929,7 +2930,7 @@ static int lock_page_maybe_drop_mmap(struct vm_fault *vmf, struct page *page, ...@@ -2929,7 +2930,7 @@ static int lock_page_maybe_drop_mmap(struct vm_fault *vmf, struct page *page,
*fpin = maybe_unlock_mmap_for_io(vmf, *fpin); *fpin = maybe_unlock_mmap_for_io(vmf, *fpin);
if (vmf->flags & FAULT_FLAG_KILLABLE) { if (vmf->flags & FAULT_FLAG_KILLABLE) {
if (__lock_page_killable(&folio->page)) { if (__folio_lock_killable(folio)) {
/* /*
* We didn't have the right flags to drop the mmap_lock, * We didn't have the right flags to drop the mmap_lock,
* but all fault_handlers only check for fatal signals * but all fault_handlers only check for fatal signals
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment