Commit 179d3e4f authored by Hugh Dickins's avatar Hugh Dickins Committed by Andrew Morton

mm/madvise: clean up force_shm_swapin_readahead()

Some nearby MADV_WILLNEED cleanup unrelated to pte_offset_map_lock(). 
shmem_swapin_range() is a better name than force_shm_swapin_readahead(). 
Fix unimportant off-by-one on end_index.  Call the swp_entry_t "entry"
rather than "swap": either is okay, but entry is the name used elsewhere
in mm/madvise.c.  Do not assume GFP_HIGHUSER_MOVABLE: that's right for
anon swap, but shmem should take gfp from mapping.  Pass the actual vma
and address to read_swap_cache_async(), in case a NUMA mempolicy applies. 
lru_add_drain() at outer level, like madvise_willneed()'s other branch.

Link: https://lkml.kernel.org/r/67e18875-ffb3-ec27-346-f350e07bed87@google.comSigned-off-by: default avatarHugh Dickins <hughd@google.com>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Anshuman Khandual <anshuman.khandual@arm.com>
Cc: Axel Rasmussen <axelrasmussen@google.com>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: David Hildenbrand <david@redhat.com>
Cc: "Huang, Ying" <ying.huang@intel.com>
Cc: Ira Weiny <ira.weiny@intel.com>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Lorenzo Stoakes <lstoakes@gmail.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Mike Rapoport (IBM) <rppt@kernel.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Naoya Horiguchi <naoya.horiguchi@nec.com>
Cc: Pavel Tatashin <pasha.tatashin@soleen.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Qi Zheng <zhengqi.arch@bytedance.com>
Cc: Ralph Campbell <rcampbell@nvidia.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: SeongJae Park <sj@kernel.org>
Cc: Song Liu <song@kernel.org>
Cc: Steven Price <steven.price@arm.com>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Cc: Will Deacon <will@kernel.org>
Cc: Yang Shi <shy828301@gmail.com>
Cc: Yu Zhao <yuzhao@google.com>
Cc: Zack Rusin <zackr@vmware.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent f3cd4ab0
...@@ -235,30 +235,34 @@ static const struct mm_walk_ops swapin_walk_ops = { ...@@ -235,30 +235,34 @@ static const struct mm_walk_ops swapin_walk_ops = {
.pmd_entry = swapin_walk_pmd_entry, .pmd_entry = swapin_walk_pmd_entry,
}; };
static void force_shm_swapin_readahead(struct vm_area_struct *vma, static void shmem_swapin_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end, unsigned long start, unsigned long end,
struct address_space *mapping) struct address_space *mapping)
{ {
XA_STATE(xas, &mapping->i_pages, linear_page_index(vma, start)); XA_STATE(xas, &mapping->i_pages, linear_page_index(vma, start));
pgoff_t end_index = linear_page_index(vma, end + PAGE_SIZE - 1); pgoff_t end_index = linear_page_index(vma, end) - 1;
struct page *page; struct page *page;
struct swap_iocb *splug = NULL; struct swap_iocb *splug = NULL;
rcu_read_lock(); rcu_read_lock();
xas_for_each(&xas, page, end_index) { xas_for_each(&xas, page, end_index) {
swp_entry_t swap; unsigned long addr;
swp_entry_t entry;
if (!xa_is_value(page)) if (!xa_is_value(page))
continue; continue;
swap = radix_to_swp_entry(page); entry = radix_to_swp_entry(page);
/* There might be swapin error entries in shmem mapping. */ /* There might be swapin error entries in shmem mapping. */
if (non_swap_entry(swap)) if (non_swap_entry(entry))
continue; continue;
addr = vma->vm_start +
((xas.xa_index - vma->vm_pgoff) << PAGE_SHIFT);
xas_pause(&xas); xas_pause(&xas);
rcu_read_unlock(); rcu_read_unlock();
page = read_swap_cache_async(swap, GFP_HIGHUSER_MOVABLE, page = read_swap_cache_async(entry, mapping_gfp_mask(mapping),
NULL, 0, false, &splug); vma, addr, false, &splug);
if (page) if (page)
put_page(page); put_page(page);
...@@ -266,8 +270,6 @@ static void force_shm_swapin_readahead(struct vm_area_struct *vma, ...@@ -266,8 +270,6 @@ static void force_shm_swapin_readahead(struct vm_area_struct *vma,
} }
rcu_read_unlock(); rcu_read_unlock();
swap_read_unplug(splug); swap_read_unplug(splug);
lru_add_drain(); /* Push any new pages onto the LRU now */
} }
#endif /* CONFIG_SWAP */ #endif /* CONFIG_SWAP */
...@@ -291,8 +293,8 @@ static long madvise_willneed(struct vm_area_struct *vma, ...@@ -291,8 +293,8 @@ static long madvise_willneed(struct vm_area_struct *vma,
} }
if (shmem_mapping(file->f_mapping)) { if (shmem_mapping(file->f_mapping)) {
force_shm_swapin_readahead(vma, start, end, shmem_swapin_range(vma, start, end, file->f_mapping);
file->f_mapping); lru_add_drain(); /* Push any new pages onto the LRU now */
return 0; return 0;
} }
#else #else
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment