Commit 0f2aa404 authored by Naoya Horiguchi's avatar Naoya Horiguchi Committed by Luis Henriques

mm/hugetlb: introduce minimum hugepage order

commit 641844f5 upstream.

Currently the initial value of order in dissolve_free_huge_page is 64 or
32, which leads to the following warning in static checker:

  mm/hugetlb.c:1203 dissolve_free_huge_pages()
  warn: potential right shift more than type allows '9,18,64'

This is a potential risk of infinite loop, because 1 << order (== 0) is used
in for-loop like this:

  for (pfn =3D start_pfn; pfn < end_pfn; pfn +=3D 1 << order)
      ...

So this patch fixes it by using global minimum_order calculated at boot time.

    text    data     bss     dec     hex filename
   28313     469   84236  113018   1b97a mm/hugetlb.o
   28256     473   84236  112965   1b945 mm/hugetlb.o (patched)

Fixes: c8721bbb ("mm: memory-hotplug: enable memory hotplug to handle hugepage")
Reported-by: default avatarDan Carpenter <dan.carpenter@oracle.com>
Signed-off-by: default avatarNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: default avatarLuis Henriques <luis.henriques@canonical.com>
parent 911fb28e
...@@ -41,6 +41,11 @@ unsigned long hugepages_treat_as_movable; ...@@ -41,6 +41,11 @@ unsigned long hugepages_treat_as_movable;
int hugetlb_max_hstate __read_mostly; int hugetlb_max_hstate __read_mostly;
unsigned int default_hstate_idx; unsigned int default_hstate_idx;
struct hstate hstates[HUGE_MAX_HSTATE]; struct hstate hstates[HUGE_MAX_HSTATE];
/*
* Minimum page order among possible hugepage sizes, set to a proper value
* at boot time.
*/
static unsigned int minimum_order __read_mostly = UINT_MAX;
__initdata LIST_HEAD(huge_boot_pages); __initdata LIST_HEAD(huge_boot_pages);
...@@ -1085,19 +1090,13 @@ static void dissolve_free_huge_page(struct page *page) ...@@ -1085,19 +1090,13 @@ static void dissolve_free_huge_page(struct page *page)
*/ */
void dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn) void dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
{ {
unsigned int order = 8 * sizeof(void *);
unsigned long pfn; unsigned long pfn;
struct hstate *h;
if (!hugepages_supported()) if (!hugepages_supported())
return; return;
/* Set scan step to minimum hugepage size */ VM_BUG_ON(!IS_ALIGNED(start_pfn, 1 << minimum_order));
for_each_hstate(h) for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order)
if (order > huge_page_order(h))
order = huge_page_order(h);
VM_BUG_ON(!IS_ALIGNED(start_pfn, 1 << order));
for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << order)
dissolve_free_huge_page(pfn_to_page(pfn)); dissolve_free_huge_page(pfn_to_page(pfn));
} }
...@@ -1524,10 +1523,14 @@ static void __init hugetlb_init_hstates(void) ...@@ -1524,10 +1523,14 @@ static void __init hugetlb_init_hstates(void)
struct hstate *h; struct hstate *h;
for_each_hstate(h) { for_each_hstate(h) {
if (minimum_order > huge_page_order(h))
minimum_order = huge_page_order(h);
/* oversize hugepages were init'ed in early boot */ /* oversize hugepages were init'ed in early boot */
if (!hstate_is_gigantic(h)) if (!hstate_is_gigantic(h))
hugetlb_hstate_alloc_pages(h); hugetlb_hstate_alloc_pages(h);
} }
VM_BUG_ON(minimum_order == UINT_MAX);
} }
static char * __init memfmt(char *buf, unsigned long n) static char * __init memfmt(char *buf, unsigned long n)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment