Commit e7a2ab7b authored by Baolin Wang's avatar Baolin Wang Committed by Andrew Morton

mm: shmem: add mTHP support for anonymous shmem

Commit 19eaf449 adds multi-size THP (mTHP) for anonymous pages, that
can allow THP to be configured through the sysfs interface located at
'/sys/kernel/mm/transparent_hugepage/hugepage-XXkb/enabled'.

However, the anonymous shmem will ignore the anonymous mTHP rule
configured through the sysfs interface, and can only use the PMD-mapped
THP, that is not reasonable.  Users expect to apply the mTHP rule for all
anonymous pages, including the anonymous shmem, in order to enjoy the
benefits of mTHP.  For example, lower latency than PMD-mapped THP, smaller
memory bloat than PMD-mapped THP, contiguous PTEs on ARM architecture to
reduce TLB miss etc.  In addition, the mTHP interfaces can be extended to
support all shmem/tmpfs scenarios in the future, especially for the shmem
mmap() case.

The primary strategy is similar to supporting anonymous mTHP.  Introduce a
new interface '/mm/transparent_hugepage/hugepage-XXkb/shmem_enabled',
which can have almost the same values as the top-level
'/sys/kernel/mm/transparent_hugepage/shmem_enabled', with adding a new
additional "inherit" option and dropping the testing options 'force' and
'deny'.  By default all sizes will be set to "never" except PMD size,
which is set to "inherit".  This ensures backward compatibility with the
anonymous shmem enabled of the top level, meanwhile also allows
independent control of anonymous shmem enabled for each mTHP.

Link: https://lkml.kernel.org/r/65796c1e72e51e15f3410195b5c2d5b6c160d411.1718090413.git.baolin.wang@linux.alibaba.comSigned-off-by: default avatarBaolin Wang <baolin.wang@linux.alibaba.com>
Cc: Barry Song <v-songbaohua@oppo.com>
Cc: Daniel Gomez <da.gomez@samsung.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: "Huang, Ying" <ying.huang@intel.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: Lance Yang <ioworker0@gmail.com>
Cc: Pankaj Raghav <p.raghav@samsung.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Yang Shi <shy828301@gmail.com>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 4b989955
...@@ -560,6 +560,16 @@ static inline bool thp_migration_supported(void) ...@@ -560,6 +560,16 @@ static inline bool thp_migration_supported(void)
{ {
return false; return false;
} }
static inline int highest_order(unsigned long orders)
{
return 0;
}
static inline int next_order(unsigned long *orders, int prev)
{
return 0;
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
static inline int split_folio_to_list_to_order(struct folio *folio, static inline int split_folio_to_list_to_order(struct folio *folio,
......
...@@ -1610,6 +1610,107 @@ static gfp_t limit_gfp_mask(gfp_t huge_gfp, gfp_t limit_gfp) ...@@ -1610,6 +1610,107 @@ static gfp_t limit_gfp_mask(gfp_t huge_gfp, gfp_t limit_gfp)
return result; return result;
} }
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static unsigned long shmem_allowable_huge_orders(struct inode *inode,
struct vm_area_struct *vma, pgoff_t index,
bool global_huge)
{
unsigned long mask = READ_ONCE(huge_shmem_orders_always);
unsigned long within_size_orders = READ_ONCE(huge_shmem_orders_within_size);
unsigned long vm_flags = vma->vm_flags;
/*
* Check all the (large) orders below HPAGE_PMD_ORDER + 1 that
* are enabled for this vma.
*/
unsigned long orders = BIT(PMD_ORDER + 1) - 1;
loff_t i_size;
int order;
if ((vm_flags & VM_NOHUGEPAGE) ||
test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
return 0;
/* If the hardware/firmware marked hugepage support disabled. */
if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED))
return 0;
/*
* Following the 'deny' semantics of the top level, force the huge
* option off from all mounts.
*/
if (shmem_huge == SHMEM_HUGE_DENY)
return 0;
/*
* Only allow inherit orders if the top-level value is 'force', which
* means non-PMD sized THP can not override 'huge' mount option now.
*/
if (shmem_huge == SHMEM_HUGE_FORCE)
return READ_ONCE(huge_shmem_orders_inherit);
/* Allow mTHP that will be fully within i_size. */
order = highest_order(within_size_orders);
while (within_size_orders) {
index = round_up(index + 1, order);
i_size = round_up(i_size_read(inode), PAGE_SIZE);
if (i_size >> PAGE_SHIFT >= index) {
mask |= within_size_orders;
break;
}
order = next_order(&within_size_orders, order);
}
if (vm_flags & VM_HUGEPAGE)
mask |= READ_ONCE(huge_shmem_orders_madvise);
if (global_huge)
mask |= READ_ONCE(huge_shmem_orders_inherit);
return orders & mask;
}
static unsigned long shmem_suitable_orders(struct inode *inode, struct vm_fault *vmf,
struct address_space *mapping, pgoff_t index,
unsigned long orders)
{
struct vm_area_struct *vma = vmf->vma;
unsigned long pages;
int order;
orders = thp_vma_suitable_orders(vma, vmf->address, orders);
if (!orders)
return 0;
/* Find the highest order that can add into the page cache */
order = highest_order(orders);
while (orders) {
pages = 1UL << order;
index = round_down(index, pages);
if (!xa_find(&mapping->i_pages, &index,
index + pages - 1, XA_PRESENT))
break;
order = next_order(&orders, order);
}
return orders;
}
#else
static unsigned long shmem_allowable_huge_orders(struct inode *inode,
struct vm_area_struct *vma, pgoff_t index,
bool global_huge)
{
return 0;
}
static unsigned long shmem_suitable_orders(struct inode *inode, struct vm_fault *vmf,
struct address_space *mapping, pgoff_t index,
unsigned long orders)
{
return 0;
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
static struct folio *shmem_alloc_folio(gfp_t gfp, int order, static struct folio *shmem_alloc_folio(gfp_t gfp, int order,
struct shmem_inode_info *info, pgoff_t index) struct shmem_inode_info *info, pgoff_t index)
{ {
...@@ -1624,21 +1725,28 @@ static struct folio *shmem_alloc_folio(gfp_t gfp, int order, ...@@ -1624,21 +1725,28 @@ static struct folio *shmem_alloc_folio(gfp_t gfp, int order,
return folio; return folio;
} }
static struct folio *shmem_alloc_and_add_folio(gfp_t gfp, static struct folio *shmem_alloc_and_add_folio(struct vm_fault *vmf,
struct inode *inode, pgoff_t index, gfp_t gfp, struct inode *inode, pgoff_t index,
struct mm_struct *fault_mm, bool huge) struct mm_struct *fault_mm, unsigned long orders)
{ {
struct address_space *mapping = inode->i_mapping; struct address_space *mapping = inode->i_mapping;
struct shmem_inode_info *info = SHMEM_I(inode); struct shmem_inode_info *info = SHMEM_I(inode);
struct folio *folio; struct vm_area_struct *vma = vmf ? vmf->vma : NULL;
unsigned long suitable_orders = 0;
struct folio *folio = NULL;
long pages; long pages;
int error; int error, order;
if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
huge = false; orders = 0;
if (huge) { if (orders > 0) {
if (vma && vma_is_anon_shmem(vma)) {
suitable_orders = shmem_suitable_orders(inode, vmf,
mapping, index, orders);
} else if (orders & BIT(HPAGE_PMD_ORDER)) {
pages = HPAGE_PMD_NR; pages = HPAGE_PMD_NR;
suitable_orders = BIT(HPAGE_PMD_ORDER);
index = round_down(index, HPAGE_PMD_NR); index = round_down(index, HPAGE_PMD_NR);
/* /*
...@@ -1652,10 +1760,20 @@ static struct folio *shmem_alloc_and_add_folio(gfp_t gfp, ...@@ -1652,10 +1760,20 @@ static struct folio *shmem_alloc_and_add_folio(gfp_t gfp,
if (xa_find(&mapping->i_pages, &index, if (xa_find(&mapping->i_pages, &index,
index + HPAGE_PMD_NR - 1, XA_PRESENT)) index + HPAGE_PMD_NR - 1, XA_PRESENT))
return ERR_PTR(-E2BIG); return ERR_PTR(-E2BIG);
}
order = highest_order(suitable_orders);
while (suitable_orders) {
pages = 1UL << order;
index = round_down(index, pages);
folio = shmem_alloc_folio(gfp, order, info, index);
if (folio)
goto allocated;
folio = shmem_alloc_folio(gfp, HPAGE_PMD_ORDER, info, index); if (pages == HPAGE_PMD_NR)
if (!folio && pages == HPAGE_PMD_NR)
count_vm_event(THP_FILE_FALLBACK); count_vm_event(THP_FILE_FALLBACK);
order = next_order(&suitable_orders, order);
}
} else { } else {
pages = 1; pages = 1;
folio = shmem_alloc_folio(gfp, 0, info, index); folio = shmem_alloc_folio(gfp, 0, info, index);
...@@ -1663,6 +1781,7 @@ static struct folio *shmem_alloc_and_add_folio(gfp_t gfp, ...@@ -1663,6 +1781,7 @@ static struct folio *shmem_alloc_and_add_folio(gfp_t gfp,
if (!folio) if (!folio)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
allocated:
__folio_set_locked(folio); __folio_set_locked(folio);
__folio_set_swapbacked(folio); __folio_set_swapbacked(folio);
...@@ -1957,7 +2076,8 @@ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index, ...@@ -1957,7 +2076,8 @@ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index,
struct mm_struct *fault_mm; struct mm_struct *fault_mm;
struct folio *folio; struct folio *folio;
int error; int error;
bool alloced; bool alloced, huge;
unsigned long orders = 0;
if (WARN_ON_ONCE(!shmem_mapping(inode->i_mapping))) if (WARN_ON_ONCE(!shmem_mapping(inode->i_mapping)))
return -EINVAL; return -EINVAL;
...@@ -2029,14 +2149,21 @@ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index, ...@@ -2029,14 +2149,21 @@ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index,
return 0; return 0;
} }
if (shmem_is_huge(inode, index, false, fault_mm, huge = shmem_is_huge(inode, index, false, fault_mm,
vma ? vma->vm_flags : 0)) { vma ? vma->vm_flags : 0);
/* Find hugepage orders that are allowed for anonymous shmem. */
if (vma && vma_is_anon_shmem(vma))
orders = shmem_allowable_huge_orders(inode, vma, index, huge);
else if (huge)
orders = BIT(HPAGE_PMD_ORDER);
if (orders > 0) {
gfp_t huge_gfp; gfp_t huge_gfp;
huge_gfp = vma_thp_gfp_mask(vma); huge_gfp = vma_thp_gfp_mask(vma);
huge_gfp = limit_gfp_mask(huge_gfp, gfp); huge_gfp = limit_gfp_mask(huge_gfp, gfp);
folio = shmem_alloc_and_add_folio(huge_gfp, folio = shmem_alloc_and_add_folio(vmf, huge_gfp,
inode, index, fault_mm, true); inode, index, fault_mm, orders);
if (!IS_ERR(folio)) { if (!IS_ERR(folio)) {
if (folio_test_pmd_mappable(folio)) if (folio_test_pmd_mappable(folio))
count_vm_event(THP_FILE_ALLOC); count_vm_event(THP_FILE_ALLOC);
...@@ -2046,7 +2173,7 @@ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index, ...@@ -2046,7 +2173,7 @@ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index,
goto repeat; goto repeat;
} }
folio = shmem_alloc_and_add_folio(gfp, inode, index, fault_mm, false); folio = shmem_alloc_and_add_folio(vmf, gfp, inode, index, fault_mm, 0);
if (IS_ERR(folio)) { if (IS_ERR(folio)) {
error = PTR_ERR(folio); error = PTR_ERR(folio);
if (error == -EEXIST) if (error == -EEXIST)
...@@ -2057,7 +2184,7 @@ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index, ...@@ -2057,7 +2184,7 @@ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index,
alloced: alloced:
alloced = true; alloced = true;
if (folio_test_pmd_mappable(folio) && if (folio_test_large(folio) &&
DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) < DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) <
folio_next_index(folio) - 1) { folio_next_index(folio) - 1) {
struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment