Commit d9cc948f authored by Michal Hocko's avatar Michal Hocko Committed by Linus Torvalds

mm, hugetlb: integrate giga hugetlb more naturally to the allocation path

Gigantic hugetlb pages were ingrown to the hugetlb code as an alien
specie with a lot of special casing.  The allocation path is not an
exception.  Unnecessarily so to be honest.  It is true that the
underlying allocator is different but that is an implementation detail.

This patch unifies the hugetlb allocation path that a prepares fresh
pool pages.  alloc_fresh_gigantic_page basically copies
alloc_fresh_huge_page logic so we can move everything there.  This will
simplify set_max_huge_pages which doesn't have to care about what kind
of huge page we allocate.

Link: http://lkml.kernel.org/r/20180103093213.26329-3-mhocko@kernel.orgSigned-off-by: default avatarMichal Hocko <mhocko@suse.com>
Reviewed-by: default avatarMike Kravetz <mike.kravetz@oracle.com>
Reviewed-by: default avatarNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Andrea Reale <ar@linux.vnet.ibm.com>
Cc: Anshuman Khandual <khandual@linux.vnet.ibm.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Zi Yan <zi.yan@cs.rutgers.edu>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent af0fb9df
...@@ -1106,7 +1106,8 @@ static bool zone_spans_last_pfn(const struct zone *zone, ...@@ -1106,7 +1106,8 @@ static bool zone_spans_last_pfn(const struct zone *zone,
return zone_spans_pfn(zone, last_pfn); return zone_spans_pfn(zone, last_pfn);
} }
static struct page *alloc_gigantic_page(int nid, struct hstate *h) static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
int nid, nodemask_t *nodemask)
{ {
unsigned int order = huge_page_order(h); unsigned int order = huge_page_order(h);
unsigned long nr_pages = 1 << order; unsigned long nr_pages = 1 << order;
...@@ -1114,11 +1115,9 @@ static struct page *alloc_gigantic_page(int nid, struct hstate *h) ...@@ -1114,11 +1115,9 @@ static struct page *alloc_gigantic_page(int nid, struct hstate *h)
struct zonelist *zonelist; struct zonelist *zonelist;
struct zone *zone; struct zone *zone;
struct zoneref *z; struct zoneref *z;
gfp_t gfp_mask;
gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
zonelist = node_zonelist(nid, gfp_mask); zonelist = node_zonelist(nid, gfp_mask);
for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), NULL) { for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nodemask) {
spin_lock_irqsave(&zone->lock, flags); spin_lock_irqsave(&zone->lock, flags);
pfn = ALIGN(zone->zone_start_pfn, nr_pages); pfn = ALIGN(zone->zone_start_pfn, nr_pages);
...@@ -1149,42 +1148,13 @@ static struct page *alloc_gigantic_page(int nid, struct hstate *h) ...@@ -1149,42 +1148,13 @@ static struct page *alloc_gigantic_page(int nid, struct hstate *h)
static void prep_new_huge_page(struct hstate *h, struct page *page, int nid); static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
static void prep_compound_gigantic_page(struct page *page, unsigned int order); static void prep_compound_gigantic_page(struct page *page, unsigned int order);
static struct page *alloc_fresh_gigantic_page_node(struct hstate *h, int nid)
{
struct page *page;
page = alloc_gigantic_page(nid, h);
if (page) {
prep_compound_gigantic_page(page, huge_page_order(h));
prep_new_huge_page(h, page, nid);
put_page(page); /* free it into the hugepage allocator */
}
return page;
}
static int alloc_fresh_gigantic_page(struct hstate *h,
nodemask_t *nodes_allowed)
{
struct page *page = NULL;
int nr_nodes, node;
for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
page = alloc_fresh_gigantic_page_node(h, node);
if (page)
return 1;
}
return 0;
}
#else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */ #else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */
static inline bool gigantic_page_supported(void) { return false; } static inline bool gigantic_page_supported(void) { return false; }
static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
int nid, nodemask_t *nodemask) { return NULL; }
static inline void free_gigantic_page(struct page *page, unsigned int order) { } static inline void free_gigantic_page(struct page *page, unsigned int order) { }
static inline void destroy_compound_gigantic_page(struct page *page, static inline void destroy_compound_gigantic_page(struct page *page,
unsigned int order) { } unsigned int order) { }
static inline int alloc_fresh_gigantic_page(struct hstate *h,
nodemask_t *nodes_allowed) { return 0; }
#endif #endif
static void update_and_free_page(struct hstate *h, struct page *page) static void update_and_free_page(struct hstate *h, struct page *page)
...@@ -1410,8 +1380,12 @@ static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed) ...@@ -1410,8 +1380,12 @@ static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) { for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
page = __hugetlb_alloc_buddy_huge_page(h, gfp_mask, if (hstate_is_gigantic(h))
node, nodes_allowed); page = alloc_gigantic_page(h, gfp_mask,
node, nodes_allowed);
else
page = __hugetlb_alloc_buddy_huge_page(h, gfp_mask,
node, nodes_allowed);
if (page) if (page)
break; break;
...@@ -1420,6 +1394,8 @@ static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed) ...@@ -1420,6 +1394,8 @@ static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
if (!page) if (!page)
return 0; return 0;
if (hstate_is_gigantic(h))
prep_compound_gigantic_page(page, huge_page_order(h));
prep_new_huge_page(h, page, page_to_nid(page)); prep_new_huge_page(h, page, page_to_nid(page));
put_page(page); /* free it into the hugepage allocator */ put_page(page); /* free it into the hugepage allocator */
...@@ -2307,10 +2283,7 @@ static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count, ...@@ -2307,10 +2283,7 @@ static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
/* yield cpu to avoid soft lockup */ /* yield cpu to avoid soft lockup */
cond_resched(); cond_resched();
if (hstate_is_gigantic(h)) ret = alloc_fresh_huge_page(h, nodes_allowed);
ret = alloc_fresh_gigantic_page(h, nodes_allowed);
else
ret = alloc_fresh_huge_page(h, nodes_allowed);
spin_lock(&hugetlb_lock); spin_lock(&hugetlb_lock);
if (!ret) if (!ret)
goto out; goto out;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment