Commit 84429b67 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Christian Brauner

fs: Allow fine-grained control of folio sizes

We need filesystems to be able to communicate acceptable folio sizes
to the pagecache for a variety of uses (e.g. large block sizes).
Support a range of folio sizes between order-0 and order-31.
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Co-developed-by: default avatarPankaj Raghav <p.raghav@samsung.com>
Signed-off-by: default avatarPankaj Raghav <p.raghav@samsung.com>
Link: https://lore.kernel.org/r/20240822135018.1931258-2-kernel@pankajraghav.comTested-by: default avatarDavid Howells <dhowells@redhat.com>
Reviewed-by: default avatarHannes Reinecke <hare@suse.de>
Reviewed-by: default avatarDarrick J. Wong <djwong@kernel.org>
Reviewed-by: default avatarDaniel Gomez <da.gomez@samsung.com>
Signed-off-by: default avatarChristian Brauner <brauner@kernel.org>
parent 8400291e
......@@ -204,14 +204,21 @@ enum mapping_flags {
AS_EXITING = 4, /* final truncate in progress */
/* writeback related tags are not used */
AS_NO_WRITEBACK_TAGS = 5,
AS_LARGE_FOLIO_SUPPORT = 6,
AS_RELEASE_ALWAYS, /* Call ->release_folio(), even if no private data */
AS_STABLE_WRITES, /* must wait for writeback before modifying
AS_RELEASE_ALWAYS = 6, /* Call ->release_folio(), even if no private data */
AS_STABLE_WRITES = 7, /* must wait for writeback before modifying
folio contents */
AS_INACCESSIBLE, /* Do not attempt direct R/W access to the mapping,
including to move the mapping */
AS_INACCESSIBLE = 8, /* Do not attempt direct R/W access to the mapping */
/* Bits 16-25 are used for FOLIO_ORDER */
AS_FOLIO_ORDER_BITS = 5,
AS_FOLIO_ORDER_MIN = 16,
AS_FOLIO_ORDER_MAX = AS_FOLIO_ORDER_MIN + AS_FOLIO_ORDER_BITS,
};
#define AS_FOLIO_ORDER_BITS_MASK ((1u << AS_FOLIO_ORDER_BITS) - 1)
#define AS_FOLIO_ORDER_MIN_MASK (AS_FOLIO_ORDER_BITS_MASK << AS_FOLIO_ORDER_MIN)
#define AS_FOLIO_ORDER_MAX_MASK (AS_FOLIO_ORDER_BITS_MASK << AS_FOLIO_ORDER_MAX)
#define AS_FOLIO_ORDER_MASK (AS_FOLIO_ORDER_MIN_MASK | AS_FOLIO_ORDER_MAX_MASK)
/**
* mapping_set_error - record a writeback error in the address_space
* @mapping: the mapping in which an error should be set
......@@ -367,9 +374,51 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
#define MAX_XAS_ORDER (XA_CHUNK_SHIFT * 2 - 1)
#define MAX_PAGECACHE_ORDER min(MAX_XAS_ORDER, PREFERRED_MAX_PAGECACHE_ORDER)
/*
* mapping_set_folio_order_range() - Set the orders supported by a file.
* @mapping: The address space of the file.
* @min: Minimum folio order (between 0-MAX_PAGECACHE_ORDER inclusive).
* @max: Maximum folio order (between @min-MAX_PAGECACHE_ORDER inclusive).
*
* The filesystem should call this function in its inode constructor to
* indicate which base size (min) and maximum size (max) of folio the VFS
* can use to cache the contents of the file. This should only be used
* if the filesystem needs special handling of folio sizes (ie there is
* something the core cannot know).
* Do not tune it based on, eg, i_size.
*
* Context: This should not be called while the inode is active as it
* is non-atomic.
*/
static inline void mapping_set_folio_order_range(struct address_space *mapping,
unsigned int min,
unsigned int max)
{
if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
return;
if (min > MAX_PAGECACHE_ORDER)
min = MAX_PAGECACHE_ORDER;
if (max > MAX_PAGECACHE_ORDER)
max = MAX_PAGECACHE_ORDER;
if (max < min)
max = min;
mapping->flags = (mapping->flags & ~AS_FOLIO_ORDER_MASK) |
(min << AS_FOLIO_ORDER_MIN) | (max << AS_FOLIO_ORDER_MAX);
}
static inline void mapping_set_folio_min_order(struct address_space *mapping,
unsigned int min)
{
mapping_set_folio_order_range(mapping, min, MAX_PAGECACHE_ORDER);
}
/**
* mapping_set_large_folios() - Indicate the file supports large folios.
* @mapping: The file.
* @mapping: The address space of the file.
*
* The filesystem should call this function in its inode constructor to
* indicate that the VFS can use large folios to cache the contents of
......@@ -380,7 +429,23 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
*/
static inline void mapping_set_large_folios(struct address_space *mapping)
{
__set_bit(AS_LARGE_FOLIO_SUPPORT, &mapping->flags);
mapping_set_folio_order_range(mapping, 0, MAX_PAGECACHE_ORDER);
}
static inline unsigned int
mapping_max_folio_order(const struct address_space *mapping)
{
if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
return 0;
return (mapping->flags & AS_FOLIO_ORDER_MAX_MASK) >> AS_FOLIO_ORDER_MAX;
}
static inline unsigned int
mapping_min_folio_order(const struct address_space *mapping)
{
if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
return 0;
return (mapping->flags & AS_FOLIO_ORDER_MIN_MASK) >> AS_FOLIO_ORDER_MIN;
}
/*
......@@ -389,20 +454,17 @@ static inline void mapping_set_large_folios(struct address_space *mapping)
*/
static inline bool mapping_large_folio_support(struct address_space *mapping)
{
/* AS_LARGE_FOLIO_SUPPORT is only reasonable for pagecache folios */
/* AS_FOLIO_ORDER is only reasonable for pagecache folios */
VM_WARN_ONCE((unsigned long)mapping & PAGE_MAPPING_ANON,
"Anonymous mapping always supports large folio");
return IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
test_bit(AS_LARGE_FOLIO_SUPPORT, &mapping->flags);
return mapping_max_folio_order(mapping) > 0;
}
/* Return the maximum folio size for this pagecache mapping, in bytes. */
static inline size_t mapping_max_folio_size(struct address_space *mapping)
static inline size_t mapping_max_folio_size(const struct address_space *mapping)
{
if (mapping_large_folio_support(mapping))
return PAGE_SIZE << MAX_PAGECACHE_ORDER;
return PAGE_SIZE;
return PAGE_SIZE << mapping_max_folio_order(mapping);
}
static inline int filemap_nr_thps(struct address_space *mapping)
......
......@@ -1933,10 +1933,8 @@ struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
if (WARN_ON_ONCE(!(fgp_flags & (FGP_LOCK | FGP_FOR_MMAP))))
fgp_flags |= FGP_LOCK;
if (!mapping_large_folio_support(mapping))
order = 0;
if (order > MAX_PAGECACHE_ORDER)
order = MAX_PAGECACHE_ORDER;
if (order > mapping_max_folio_order(mapping))
order = mapping_max_folio_order(mapping);
/* If we're not aligned, allocate a smaller folio */
if (index & ((1UL << order) - 1))
order = __ffs(index);
......
......@@ -449,10 +449,10 @@ void page_cache_ra_order(struct readahead_control *ractl,
limit = min(limit, index + ra->size - 1);
if (new_order < MAX_PAGECACHE_ORDER)
if (new_order < mapping_max_folio_order(mapping))
new_order += 2;
new_order = min_t(unsigned int, MAX_PAGECACHE_ORDER, new_order);
new_order = min(mapping_max_folio_order(mapping), new_order);
new_order = min_t(unsigned int, new_order, ilog2(ra->size));
/* See comment in page_cache_ra_unbounded() */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment