Commit 06668257 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

mm: remove page_mapping()

All callers are now converted, delete this compatibility wrapper.  Also
fix up some comments which referred to page_mapping.

Link: https://lkml.kernel.org/r/20240423225552.4113447-7-willy@infradead.org
Link: https://lkml.kernel.org/r/20240524181813.698813-1-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarDavid Hildenbrand <david@redhat.com>
Cc: Eric Biggers <ebiggers@google.com>
Cc: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent ffc3c8a6
...@@ -283,7 +283,7 @@ void flush_cache_pages(struct vm_area_struct *vma, unsigned long user_addr, ...@@ -283,7 +283,7 @@ void flush_cache_pages(struct vm_area_struct *vma, unsigned long user_addr,
* flush_dcache_page is used when the kernel has written to the page * flush_dcache_page is used when the kernel has written to the page
* cache page at virtual address page->virtual. * cache page at virtual address page->virtual.
* *
* If this page isn't mapped (ie, page_mapping == NULL), or it might * If this page isn't mapped (ie, folio_mapping == NULL), or it might
* have userspace mappings, then we _must_ always clean + invalidate * have userspace mappings, then we _must_ always clean + invalidate
* the dcache entries associated with the kernel mapping. * the dcache entries associated with the kernel mapping.
* *
......
...@@ -117,7 +117,7 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *, ...@@ -117,7 +117,7 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *,
* flush_dcache_folio is used when the kernel has written to the page * flush_dcache_folio is used when the kernel has written to the page
* cache page at virtual address page->virtual. * cache page at virtual address page->virtual.
* *
* If this page isn't mapped (ie, page_mapping == NULL), or it might * If this page isn't mapped (ie, folio_mapping == NULL), or it might
* have userspace mappings, then we _must_ always clean + invalidate * have userspace mappings, then we _must_ always clean + invalidate
* the dcache entries associated with the kernel mapping. * the dcache entries associated with the kernel mapping.
* *
......
...@@ -112,7 +112,7 @@ void __flush_dcache_pages(struct page *page, unsigned int nr) ...@@ -112,7 +112,7 @@ void __flush_dcache_pages(struct page *page, unsigned int nr)
} }
/* /*
* We could delay the flush for the !page_mapping case too. But that * We could delay the flush for the !folio_mapping case too. But that
* case is for exec env/arg pages and those are %99 certainly going to * case is for exec env/arg pages and those are %99 certainly going to
* get faulted into the tlb (and thus flushed) anyways. * get faulted into the tlb (and thus flushed) anyways.
*/ */
......
...@@ -490,7 +490,7 @@ void flush_dcache_folio(struct folio *folio) ...@@ -490,7 +490,7 @@ void flush_dcache_folio(struct folio *folio)
} }
set_dcache_dirty(folio, this_cpu); set_dcache_dirty(folio, this_cpu);
} else { } else {
/* We could delay the flush for the !page_mapping /* We could delay the flush for the !folio_mapping
* case too. But that case is for exec env/arg * case too. But that case is for exec env/arg
* pages and those are %99 certainly going to get * pages and those are %99 certainly going to get
* faulted into the tlb (and thus flushed) anyways. * faulted into the tlb (and thus flushed) anyways.
......
...@@ -53,7 +53,7 @@ typedef void (bh_end_io_t)(struct buffer_head *bh, int uptodate); ...@@ -53,7 +53,7 @@ typedef void (bh_end_io_t)(struct buffer_head *bh, int uptodate);
* filesystem and block layers. Nowadays the basic I/O unit * filesystem and block layers. Nowadays the basic I/O unit
* is the bio, and buffer_heads are used for extracting block * is the bio, and buffer_heads are used for extracting block
* mappings (via a get_block_t call), for tracking state within * mappings (via a get_block_t call), for tracking state within
* a page (via a page_mapping) and for wrapping bio submission * a folio (via a folio_mapping) and for wrapping bio submission
* for backward compatibility reasons (e.g. submit_bh). * for backward compatibility reasons (e.g. submit_bh).
*/ */
struct buffer_head { struct buffer_head {
......
...@@ -655,27 +655,28 @@ PAGEFLAG_FALSE(VmemmapSelfHosted, vmemmap_self_hosted) ...@@ -655,27 +655,28 @@ PAGEFLAG_FALSE(VmemmapSelfHosted, vmemmap_self_hosted)
#endif #endif
/* /*
* On an anonymous page mapped into a user virtual memory area, * On an anonymous folio mapped into a user virtual memory area,
* page->mapping points to its anon_vma, not to a struct address_space; * folio->mapping points to its anon_vma, not to a struct address_space;
* with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h. * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h.
* *
* On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled, * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
* the PAGE_MAPPING_MOVABLE bit may be set along with the PAGE_MAPPING_ANON * the PAGE_MAPPING_MOVABLE bit may be set along with the PAGE_MAPPING_ANON
* bit; and then page->mapping points, not to an anon_vma, but to a private * bit; and then folio->mapping points, not to an anon_vma, but to a private
* structure which KSM associates with that merged page. See ksm.h. * structure which KSM associates with that merged page. See ksm.h.
* *
* PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is used for non-lru movable * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is used for non-lru movable
* page and then page->mapping points to a struct movable_operations. * page and then folio->mapping points to a struct movable_operations.
* *
* Please note that, confusingly, "page_mapping" refers to the inode * Please note that, confusingly, "folio_mapping" refers to the inode
* address_space which maps the page from disk; whereas "page_mapped" * address_space which maps the folio from disk; whereas "folio_mapped"
* refers to user virtual address space into which the page is mapped. * refers to user virtual address space into which the folio is mapped.
* *
* For slab pages, since slab reuses the bits in struct page to store its * For slab pages, since slab reuses the bits in struct page to store its
* internal states, the page->mapping does not exist as such, nor do these * internal states, the folio->mapping does not exist as such, nor do
* flags below. So in order to avoid testing non-existent bits, please * these flags below. So in order to avoid testing non-existent bits,
* make sure that PageSlab(page) actually evaluates to false before calling * please make sure that folio_test_slab(folio) actually evaluates to
* the following functions (e.g., PageAnon). See mm/slab.h. * false before calling the following functions (e.g., folio_test_anon).
* See mm/slab.h.
*/ */
#define PAGE_MAPPING_ANON 0x1 #define PAGE_MAPPING_ANON 0x1
#define PAGE_MAPPING_MOVABLE 0x2 #define PAGE_MAPPING_MOVABLE 0x2
......
...@@ -426,7 +426,6 @@ static inline void filemap_nr_thps_dec(struct address_space *mapping) ...@@ -426,7 +426,6 @@ static inline void filemap_nr_thps_dec(struct address_space *mapping)
#endif #endif
} }
struct address_space *page_mapping(struct page *);
struct address_space *folio_mapping(struct folio *); struct address_space *folio_mapping(struct folio *);
struct address_space *swapcache_mapping(struct folio *); struct address_space *swapcache_mapping(struct folio *);
......
...@@ -10,12 +10,6 @@ ...@@ -10,12 +10,6 @@
#include <linux/swap.h> #include <linux/swap.h>
#include "internal.h" #include "internal.h"
struct address_space *page_mapping(struct page *page)
{
return folio_mapping(page_folio(page));
}
EXPORT_SYMBOL(page_mapping);
void unlock_page(struct page *page) void unlock_page(struct page *page)
{ {
return folio_unlock(page_folio(page)); return folio_unlock(page_folio(page));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment