Commit 63ec1973 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Linus Torvalds

mm/shmem: return head page from find_lock_entry

Convert shmem_getpage_gfp() (the only remaining caller of
find_lock_entry()) to cope with a head page being returned instead of
the subpage for the index.

[willy@infradead.org: fix BUG()s]
  Link https://lore.kernel.org/linux-mm/20200912032042.GA6583@casper.infradead.org/Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Cc: Alexey Dobriyan <adobriyan@gmail.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Huang Ying <ying.huang@intel.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Jani Nikula <jani.nikula@linux.intel.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Matthew Auld <matthew.auld@intel.com>
Cc: William Kucharski <william.kucharski@oracle.com>
Link: https://lkml.kernel.org/r/20200910183318.20139-8-willy@infradead.orgSigned-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent a6de4b48
...@@ -372,6 +372,15 @@ static inline struct page *grab_cache_page_nowait(struct address_space *mapping, ...@@ -372,6 +372,15 @@ static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
mapping_gfp_mask(mapping)); mapping_gfp_mask(mapping));
} }
/* Does this page contain this index? */
static inline bool thp_contains(struct page *head, pgoff_t index)
{
/* HugeTLBfs indexes the page cache in units of hpage_size */
if (PageHuge(head))
return head->index == index;
return page_index(head) == (index & ~(thp_nr_pages(head) - 1UL));
}
/* /*
* Given the page we found in the page cache, return the page corresponding * Given the page we found in the page cache, return the page corresponding
* to this index in the file * to this index in the file
......
...@@ -1692,37 +1692,34 @@ struct page *find_get_entry(struct address_space *mapping, pgoff_t index) ...@@ -1692,37 +1692,34 @@ struct page *find_get_entry(struct address_space *mapping, pgoff_t index)
} }
/** /**
* find_lock_entry - locate, pin and lock a page cache entry * find_lock_entry - Locate and lock a page cache entry.
* @mapping: the address_space to search * @mapping: The address_space to search.
* @offset: the page cache index * @index: The page cache index.
* *
* Looks up the page cache slot at @mapping & @offset. If there is a * Looks up the page at @mapping & @index. If there is a page in the
* page cache page, it is returned locked and with an increased * cache, the head page is returned locked and with an increased refcount.
* refcount.
* *
* If the slot holds a shadow entry of a previously evicted page, or a * If the slot holds a shadow entry of a previously evicted page, or a
* swap entry from shmem/tmpfs, it is returned. * swap entry from shmem/tmpfs, it is returned.
* *
* find_lock_entry() may sleep. * Context: May sleep.
* * Return: The head page or shadow entry, %NULL if nothing is found.
* Return: the found page or shadow entry, %NULL if nothing is found.
*/ */
struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset) struct page *find_lock_entry(struct address_space *mapping, pgoff_t index)
{ {
struct page *page; struct page *page;
repeat: repeat:
page = find_get_entry(mapping, offset); page = find_get_entry(mapping, index);
if (page && !xa_is_value(page)) { if (page && !xa_is_value(page)) {
lock_page(page); lock_page(page);
/* Has the page been truncated? */ /* Has the page been truncated? */
if (unlikely(page_mapping(page) != mapping)) { if (unlikely(page->mapping != mapping)) {
unlock_page(page); unlock_page(page);
put_page(page); put_page(page);
goto repeat; goto repeat;
} }
page = find_subpage(page, offset); VM_BUG_ON_PAGE(!thp_contains(page, index), page);
VM_BUG_ON_PAGE(page_to_pgoff(page) != offset, page);
} }
return page; return page;
} }
......
...@@ -1830,6 +1830,8 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, ...@@ -1830,6 +1830,8 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
return error; return error;
} }
if (page)
hindex = page->index;
if (page && sgp == SGP_WRITE) if (page && sgp == SGP_WRITE)
mark_page_accessed(page); mark_page_accessed(page);
...@@ -1840,11 +1842,10 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, ...@@ -1840,11 +1842,10 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
unlock_page(page); unlock_page(page);
put_page(page); put_page(page);
page = NULL; page = NULL;
hindex = index;
} }
if (page || sgp == SGP_READ) { if (page || sgp == SGP_READ)
*pagep = page; goto out;
return 0;
}
/* /*
* Fast cache lookup did not find it: * Fast cache lookup did not find it:
...@@ -1969,14 +1970,13 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, ...@@ -1969,14 +1970,13 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
* it now, lest undo on failure cancel our earlier guarantee. * it now, lest undo on failure cancel our earlier guarantee.
*/ */
if (sgp != SGP_WRITE && !PageUptodate(page)) { if (sgp != SGP_WRITE && !PageUptodate(page)) {
struct page *head = compound_head(page);
int i; int i;
for (i = 0; i < compound_nr(head); i++) { for (i = 0; i < compound_nr(page); i++) {
clear_highpage(head + i); clear_highpage(page + i);
flush_dcache_page(head + i); flush_dcache_page(page + i);
} }
SetPageUptodate(head); SetPageUptodate(page);
} }
/* Perhaps the file has been truncated since we checked */ /* Perhaps the file has been truncated since we checked */
...@@ -1992,6 +1992,7 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, ...@@ -1992,6 +1992,7 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
error = -EINVAL; error = -EINVAL;
goto unlock; goto unlock;
} }
out:
*pagep = page + index - hindex; *pagep = page + index - hindex;
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment