Commit a6b40850 authored by Muchun Song's avatar Muchun Song Committed by Linus Torvalds

mm: hugetlb: replace hugetlb_free_vmemmap_enabled with a static_key

The page_fixed_fake_head() is used throughout memory management and the
conditional check requires checking a global variable, although the
overhead of this check may be small, it increases when the memory cache
comes under pressure.  Also, the global variable will not be modified
after system boot, so it is very appropriate to use static key machanism.

Link: https://lkml.kernel.org/r/20211101031651.75851-3-songmuchun@bytedance.comSigned-off-by: default avatarMuchun Song <songmuchun@bytedance.com>
Reviewed-by: default avatarBarry Song <song.bao.hua@hisilicon.com>
Cc: Bodeddula Balasubramaniam <bodeddub@amazon.com>
Cc: Chen Huang <chenhuang5@huawei.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Fam Zheng <fam.zheng@bytedance.com>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Qi Zheng <zhengqi.arch@bytedance.com>
Cc: Xiongchun Duan <duanxiongchun@bytedance.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent e7d32485
...@@ -1075,12 +1075,6 @@ static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr ...@@ -1075,12 +1075,6 @@ static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr
} }
#endif /* CONFIG_HUGETLB_PAGE */ #endif /* CONFIG_HUGETLB_PAGE */
#ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP
extern bool hugetlb_free_vmemmap_enabled;
#else
#define hugetlb_free_vmemmap_enabled false
#endif
static inline spinlock_t *huge_pte_lock(struct hstate *h, static inline spinlock_t *huge_pte_lock(struct hstate *h,
struct mm_struct *mm, pte_t *pte) struct mm_struct *mm, pte_t *pte)
{ {
......
...@@ -191,7 +191,14 @@ enum pageflags { ...@@ -191,7 +191,14 @@ enum pageflags {
#ifndef __GENERATING_BOUNDS_H #ifndef __GENERATING_BOUNDS_H
#ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP #ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP
extern bool hugetlb_free_vmemmap_enabled; DECLARE_STATIC_KEY_MAYBE(CONFIG_HUGETLB_PAGE_FREE_VMEMMAP_DEFAULT_ON,
hugetlb_free_vmemmap_enabled_key);
static __always_inline bool hugetlb_free_vmemmap_enabled(void)
{
return static_branch_maybe(CONFIG_HUGETLB_PAGE_FREE_VMEMMAP_DEFAULT_ON,
&hugetlb_free_vmemmap_enabled_key);
}
/* /*
* If the feature of freeing some vmemmap pages associated with each HugeTLB * If the feature of freeing some vmemmap pages associated with each HugeTLB
...@@ -211,7 +218,7 @@ extern bool hugetlb_free_vmemmap_enabled; ...@@ -211,7 +218,7 @@ extern bool hugetlb_free_vmemmap_enabled;
*/ */
static __always_inline const struct page *page_fixed_fake_head(const struct page *page) static __always_inline const struct page *page_fixed_fake_head(const struct page *page)
{ {
if (!hugetlb_free_vmemmap_enabled) if (!hugetlb_free_vmemmap_enabled())
return page; return page;
/* /*
...@@ -239,6 +246,11 @@ static inline const struct page *page_fixed_fake_head(const struct page *page) ...@@ -239,6 +246,11 @@ static inline const struct page *page_fixed_fake_head(const struct page *page)
{ {
return page; return page;
} }
static inline bool hugetlb_free_vmemmap_enabled(void)
{
return false;
}
#endif #endif
static __always_inline int page_is_fake_head(struct page *page) static __always_inline int page_is_fake_head(struct page *page)
......
...@@ -188,9 +188,9 @@ ...@@ -188,9 +188,9 @@
#define RESERVE_VMEMMAP_NR 1U #define RESERVE_VMEMMAP_NR 1U
#define RESERVE_VMEMMAP_SIZE (RESERVE_VMEMMAP_NR << PAGE_SHIFT) #define RESERVE_VMEMMAP_SIZE (RESERVE_VMEMMAP_NR << PAGE_SHIFT)
bool hugetlb_free_vmemmap_enabled __read_mostly = DEFINE_STATIC_KEY_MAYBE(CONFIG_HUGETLB_PAGE_FREE_VMEMMAP_DEFAULT_ON,
IS_ENABLED(CONFIG_HUGETLB_PAGE_FREE_VMEMMAP_DEFAULT_ON); hugetlb_free_vmemmap_enabled_key);
EXPORT_SYMBOL(hugetlb_free_vmemmap_enabled); EXPORT_SYMBOL(hugetlb_free_vmemmap_enabled_key);
static int __init early_hugetlb_free_vmemmap_param(char *buf) static int __init early_hugetlb_free_vmemmap_param(char *buf)
{ {
...@@ -204,9 +204,9 @@ static int __init early_hugetlb_free_vmemmap_param(char *buf) ...@@ -204,9 +204,9 @@ static int __init early_hugetlb_free_vmemmap_param(char *buf)
return -EINVAL; return -EINVAL;
if (!strcmp(buf, "on")) if (!strcmp(buf, "on"))
hugetlb_free_vmemmap_enabled = true; static_branch_enable(&hugetlb_free_vmemmap_enabled_key);
else if (!strcmp(buf, "off")) else if (!strcmp(buf, "off"))
hugetlb_free_vmemmap_enabled = false; static_branch_disable(&hugetlb_free_vmemmap_enabled_key);
else else
return -EINVAL; return -EINVAL;
...@@ -284,7 +284,7 @@ void __init hugetlb_vmemmap_init(struct hstate *h) ...@@ -284,7 +284,7 @@ void __init hugetlb_vmemmap_init(struct hstate *h)
BUILD_BUG_ON(__NR_USED_SUBPAGE >= BUILD_BUG_ON(__NR_USED_SUBPAGE >=
RESERVE_VMEMMAP_SIZE / sizeof(struct page)); RESERVE_VMEMMAP_SIZE / sizeof(struct page));
if (!hugetlb_free_vmemmap_enabled) if (!hugetlb_free_vmemmap_enabled())
return; return;
vmemmap_pages = (nr_pages * sizeof(struct page)) >> PAGE_SHIFT; vmemmap_pages = (nr_pages * sizeof(struct page)) >> PAGE_SHIFT;
......
...@@ -1327,7 +1327,7 @@ bool mhp_supports_memmap_on_memory(unsigned long size) ...@@ -1327,7 +1327,7 @@ bool mhp_supports_memmap_on_memory(unsigned long size)
* populate a single PMD. * populate a single PMD.
*/ */
return memmap_on_memory && return memmap_on_memory &&
!hugetlb_free_vmemmap_enabled && !hugetlb_free_vmemmap_enabled() &&
IS_ENABLED(CONFIG_MHP_MEMMAP_ON_MEMORY) && IS_ENABLED(CONFIG_MHP_MEMMAP_ON_MEMORY) &&
size == memory_block_size_bytes() && size == memory_block_size_bytes() &&
IS_ALIGNED(vmemmap_size, PMD_SIZE) && IS_ALIGNED(vmemmap_size, PMD_SIZE) &&
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment