Commit bd8a1f36 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Linus Torvalds

mm/filemap: support readpage splitting a page

For page splitting to succeed, the thread asking to split the page has to
be the only one with a reference to the page.  Calling
wait_on_page_locked() while holding a reference to the page will
effectively prevent this from happening with sufficient threads waiting on
the same page.  Use put_and_wait_on_page_locked() to sleep without holding
a reference to the page, then retry the page lookup after the page is
unlocked.

Since we now get the page lock a little earlier in filemap_update_page(),
we can eliminate a number of duplicate checks.  The original intent
(commit ebded027 ("avoid unnecessary calls to lock_page when waiting
for IO to complete during a read")) behind getting the page lock later was
to avoid re-locking the page after it has been brought uptodate by another
thread.  We still avoid that because we go through the normal lookup path
again after the winning thread has brought the page uptodate.

Link: https://lkml.kernel.org/r/20210122160140.223228-7-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarKent Overstreet <kent.overstreet@gmail.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 48054625
......@@ -1373,14 +1373,6 @@ static int __wait_on_page_locked_async(struct page *page,
return ret;
}
static int wait_on_page_locked_async(struct page *page,
struct wait_page_queue *wait)
{
if (!PageLocked(page))
return 0;
return __wait_on_page_locked_async(compound_head(page), wait, false);
}
/**
* put_and_wait_on_page_locked - Drop a reference and wait for it to be unlocked
* @page: The page to wait for.
......@@ -2286,64 +2278,42 @@ static struct page *filemap_update_page(struct kiocb *iocb, struct file *filp,
struct inode *inode = mapping->host;
int error;
/*
* See comment in do_read_cache_page on why
* wait_on_page_locked is used to avoid unnecessarily
* serialisations and why it's safe.
*/
if (iocb->ki_flags & IOCB_WAITQ) {
error = wait_on_page_locked_async(page,
iocb->ki_waitq);
error = lock_page_async(page, iocb->ki_waitq);
if (error) {
put_page(page);
return ERR_PTR(error);
}
} else {
error = wait_on_page_locked_killable(page);
}
if (unlikely(error)) {
put_page(page);
return ERR_PTR(error);
if (!trylock_page(page)) {
put_and_wait_on_page_locked(page, TASK_KILLABLE);
return NULL;
}
}
if (PageUptodate(page))
return page;
if (!page->mapping)
goto truncated;
if (PageUptodate(page))
goto uptodate;
if (inode->i_blkbits == PAGE_SHIFT ||
!mapping->a_ops->is_partially_uptodate)
goto page_not_up_to_date;
goto readpage;
/* pipes can't handle partially uptodate pages */
if (unlikely(iov_iter_is_pipe(iter)))
goto page_not_up_to_date;
if (!trylock_page(page))
goto page_not_up_to_date;
/* Did it get truncated before we got the lock? */
if (!page->mapping)
goto page_not_up_to_date_locked;
goto readpage;
if (!mapping->a_ops->is_partially_uptodate(page,
pos & ~PAGE_MASK, count))
goto page_not_up_to_date_locked;
pos & (thp_size(page) - 1), count))
goto readpage;
uptodate:
unlock_page(page);
return page;
page_not_up_to_date:
/* Get exclusive access to the page ... */
error = lock_page_for_iocb(iocb, page);
if (unlikely(error)) {
put_page(page);
return ERR_PTR(error);
}
page_not_up_to_date_locked:
/* Did it get truncated before we got the lock? */
if (!page->mapping) {
unlock_page(page);
put_page(page);
return NULL;
}
/* Did somebody else fill it already? */
if (PageUptodate(page)) {
unlock_page(page);
return page;
}
readpage:
return filemap_read_page(iocb, filp, mapping, page);
truncated:
unlock_page(page);
put_page(page);
return NULL;
}
static struct page *filemap_create_page(struct kiocb *iocb,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment