mm: Handle per-folio private data

Add folio_get_private() which mirrors page_private() -- ie folio private
data is the same as page private data.  The only difference is that these
return a void * instead of an unsigned long, which matches the majority
of users.

Turn attach_page_private() into folio_attach_private() and reimplement
attach_page_private() as a wrapper.  No filesystem which uses page private
data currently supports compound pages, so we're free to define the rules.
attach_page_private() may only be called on a head page; if you want
to add private data to a tail page, you can call set_page_private()
directly (and shouldn't increment the page refcount!  That should be
done when adding private data to the head page / folio).

This saves 813 bytes of text with the distro-derived config that I'm
testing due to removing the calls to compound_head() in get_page()
& put_page().
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Acked-by: default avatarJeff Layton <jlayton@kernel.org>
Acked-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
Reviewed-by: default avatarWilliam Kucharski <william.kucharski@oracle.com>
Reviewed-by: default avatarDavid Howells <dhowells@redhat.com>
Acked-by: default avatarMike Rapoport <rppt@linux.ibm.com>
parent 889a3747
...@@ -317,6 +317,12 @@ static inline atomic_t *compound_pincount_ptr(struct page *page) ...@@ -317,6 +317,12 @@ static inline atomic_t *compound_pincount_ptr(struct page *page)
#define PAGE_FRAG_CACHE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK) #define PAGE_FRAG_CACHE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK)
#define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE) #define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE)
/*
* page_private can be used on tail pages. However, PagePrivate is only
* checked by the VM on the head page. So page_private on the tail pages
* should be used for data that's ancillary to the head page (eg attaching
* buffer heads to tail pages after attaching buffer heads to the head page)
*/
#define page_private(page) ((page)->private) #define page_private(page) ((page)->private)
static inline void set_page_private(struct page *page, unsigned long private) static inline void set_page_private(struct page *page, unsigned long private)
...@@ -324,6 +330,11 @@ static inline void set_page_private(struct page *page, unsigned long private) ...@@ -324,6 +330,11 @@ static inline void set_page_private(struct page *page, unsigned long private)
page->private = private; page->private = private;
} }
static inline void *folio_get_private(struct folio *folio)
{
return folio->private;
}
struct page_frag_cache { struct page_frag_cache {
void * va; void * va;
#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
......
...@@ -184,42 +184,52 @@ static inline bool page_cache_get_speculative(struct page *page) ...@@ -184,42 +184,52 @@ static inline bool page_cache_get_speculative(struct page *page)
} }
/** /**
* attach_page_private - Attach private data to a page. * folio_attach_private - Attach private data to a folio.
* @page: Page to attach data to. * @folio: Folio to attach data to.
* @data: Data to attach to page. * @data: Data to attach to folio.
* *
* Attaching private data to a page increments the page's reference count. * Attaching private data to a folio increments the page's reference count.
* The data must be detached before the page will be freed. * The data must be detached before the folio will be freed.
*/ */
static inline void attach_page_private(struct page *page, void *data) static inline void folio_attach_private(struct folio *folio, void *data)
{ {
get_page(page); folio_get(folio);
set_page_private(page, (unsigned long)data); folio->private = data;
SetPagePrivate(page); folio_set_private(folio);
} }
/** /**
* detach_page_private - Detach private data from a page. * folio_detach_private - Detach private data from a folio.
* @page: Page to detach data from. * @folio: Folio to detach data from.
* *
* Removes the data that was previously attached to the page and decrements * Removes the data that was previously attached to the folio and decrements
* the refcount on the page. * the refcount on the page.
* *
* Return: Data that was attached to the page. * Return: Data that was attached to the folio.
*/ */
static inline void *detach_page_private(struct page *page) static inline void *folio_detach_private(struct folio *folio)
{ {
void *data = (void *)page_private(page); void *data = folio_get_private(folio);
if (!PagePrivate(page)) if (!folio_test_private(folio))
return NULL; return NULL;
ClearPagePrivate(page); folio_clear_private(folio);
set_page_private(page, 0); folio->private = NULL;
put_page(page); folio_put(folio);
return data; return data;
} }
static inline void attach_page_private(struct page *page, void *data)
{
folio_attach_private(page_folio(page), data);
}
static inline void *detach_page_private(struct page *page)
{
return folio_detach_private(page_folio(page));
}
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
extern struct page *__page_cache_alloc(gfp_t gfp); extern struct page *__page_cache_alloc(gfp_t gfp);
#else #else
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment