Commit b03641af authored by Dan Williams's avatar Dan Williams Committed by Linus Torvalds

mm: move buddy list manipulations into helpers

In preparation for runtime randomization of the zone lists, take all
(well, most of) the list_*() functions in the buddy allocator and put
them in helper functions.  Provide a common control point for injecting
additional behavior when freeing pages.

[dan.j.williams@intel.com: fix buddy list helpers]
  Link: http://lkml.kernel.org/r/155033679702.1773410.13041474192173212653.stgit@dwillia2-desk3.amr.corp.intel.com
[vbabka@suse.cz: remove del_page_from_free_area() migratetype parameter]
  Link: http://lkml.kernel.org/r/4672701b-6775-6efd-0797-b6242591419e@suse.cz
Link: http://lkml.kernel.org/r/154899812264.3165233.5219320056406926223.stgit@dwillia2-desk3.amr.corp.intel.comSigned-off-by: default avatarDan Williams <dan.j.williams@intel.com>
Signed-off-by: default avatarVlastimil Babka <vbabka@suse.cz>
Tested-by: default avatarTetsuo Handa <penguin-kernel@i-love.sakura.ne.jp>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Keith Busch <keith.busch@intel.com>
Cc: Robert Elliott <elliott@hpe.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent e900a918
...@@ -536,9 +536,6 @@ static inline void vma_set_anonymous(struct vm_area_struct *vma) ...@@ -536,9 +536,6 @@ static inline void vma_set_anonymous(struct vm_area_struct *vma)
struct mmu_gather; struct mmu_gather;
struct inode; struct inode;
#define page_private(page) ((page)->private)
#define set_page_private(page, v) ((page)->private = (v))
#if !defined(__HAVE_ARCH_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE) #if !defined(__HAVE_ARCH_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE)
static inline int pmd_devmap(pmd_t pmd) static inline int pmd_devmap(pmd_t pmd)
{ {
......
...@@ -220,6 +220,9 @@ struct page { ...@@ -220,6 +220,9 @@ struct page {
#define PAGE_FRAG_CACHE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK) #define PAGE_FRAG_CACHE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK)
#define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE) #define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE)
#define page_private(page) ((page)->private)
#define set_page_private(page, v) ((page)->private = (v))
struct page_frag_cache { struct page_frag_cache {
void * va; void * va;
#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
......
...@@ -18,6 +18,8 @@ ...@@ -18,6 +18,8 @@
#include <linux/pageblock-flags.h> #include <linux/pageblock-flags.h>
#include <linux/page-flags-layout.h> #include <linux/page-flags-layout.h>
#include <linux/atomic.h> #include <linux/atomic.h>
#include <linux/mm_types.h>
#include <linux/page-flags.h>
#include <asm/page.h> #include <asm/page.h>
/* Free memory management - zoned buddy allocator. */ /* Free memory management - zoned buddy allocator. */
...@@ -98,6 +100,50 @@ struct free_area { ...@@ -98,6 +100,50 @@ struct free_area {
unsigned long nr_free; unsigned long nr_free;
}; };
/* Used for pages not on another list */
static inline void add_to_free_area(struct page *page, struct free_area *area,
int migratetype)
{
list_add(&page->lru, &area->free_list[migratetype]);
area->nr_free++;
}
/* Used for pages not on another list */
static inline void add_to_free_area_tail(struct page *page, struct free_area *area,
int migratetype)
{
list_add_tail(&page->lru, &area->free_list[migratetype]);
area->nr_free++;
}
/* Used for pages which are on another list */
static inline void move_to_free_area(struct page *page, struct free_area *area,
int migratetype)
{
list_move(&page->lru, &area->free_list[migratetype]);
}
static inline struct page *get_page_from_free_area(struct free_area *area,
int migratetype)
{
return list_first_entry_or_null(&area->free_list[migratetype],
struct page, lru);
}
static inline void del_page_from_free_area(struct page *page,
struct free_area *area)
{
list_del(&page->lru);
__ClearPageBuddy(page);
set_page_private(page, 0);
area->nr_free--;
}
static inline bool free_area_empty(struct free_area *area, int migratetype)
{
return list_empty(&area->free_list[migratetype]);
}
struct pglist_data; struct pglist_data;
/* /*
......
...@@ -1888,13 +1888,13 @@ static enum compact_result __compact_finished(struct compact_control *cc) ...@@ -1888,13 +1888,13 @@ static enum compact_result __compact_finished(struct compact_control *cc)
bool can_steal; bool can_steal;
/* Job done if page is free of the right migratetype */ /* Job done if page is free of the right migratetype */
if (!list_empty(&area->free_list[migratetype])) if (!free_area_empty(area, migratetype))
return COMPACT_SUCCESS; return COMPACT_SUCCESS;
#ifdef CONFIG_CMA #ifdef CONFIG_CMA
/* MIGRATE_MOVABLE can fallback on MIGRATE_CMA */ /* MIGRATE_MOVABLE can fallback on MIGRATE_CMA */
if (migratetype == MIGRATE_MOVABLE && if (migratetype == MIGRATE_MOVABLE &&
!list_empty(&area->free_list[MIGRATE_CMA])) !free_area_empty(area, MIGRATE_CMA))
return COMPACT_SUCCESS; return COMPACT_SUCCESS;
#endif #endif
/* /*
......
...@@ -756,12 +756,6 @@ static inline void set_page_order(struct page *page, unsigned int order) ...@@ -756,12 +756,6 @@ static inline void set_page_order(struct page *page, unsigned int order)
__SetPageBuddy(page); __SetPageBuddy(page);
} }
static inline void rmv_page_order(struct page *page)
{
__ClearPageBuddy(page);
set_page_private(page, 0);
}
/* /*
* This function checks whether a page is free && is the buddy * This function checks whether a page is free && is the buddy
* we can coalesce a page and its buddy if * we can coalesce a page and its buddy if
...@@ -919,13 +913,10 @@ static inline void __free_one_page(struct page *page, ...@@ -919,13 +913,10 @@ static inline void __free_one_page(struct page *page,
* Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page, * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
* merge with it and move up one order. * merge with it and move up one order.
*/ */
if (page_is_guard(buddy)) { if (page_is_guard(buddy))
clear_page_guard(zone, buddy, order, migratetype); clear_page_guard(zone, buddy, order, migratetype);
} else { else
list_del(&buddy->lru); del_page_from_free_area(buddy, &zone->free_area[order]);
zone->free_area[order].nr_free--;
rmv_page_order(buddy);
}
combined_pfn = buddy_pfn & pfn; combined_pfn = buddy_pfn & pfn;
page = page + (combined_pfn - pfn); page = page + (combined_pfn - pfn);
pfn = combined_pfn; pfn = combined_pfn;
...@@ -975,15 +966,13 @@ static inline void __free_one_page(struct page *page, ...@@ -975,15 +966,13 @@ static inline void __free_one_page(struct page *page,
higher_buddy = higher_page + (buddy_pfn - combined_pfn); higher_buddy = higher_page + (buddy_pfn - combined_pfn);
if (pfn_valid_within(buddy_pfn) && if (pfn_valid_within(buddy_pfn) &&
page_is_buddy(higher_page, higher_buddy, order + 1)) { page_is_buddy(higher_page, higher_buddy, order + 1)) {
list_add_tail(&page->lru, add_to_free_area_tail(page, &zone->free_area[order],
&zone->free_area[order].free_list[migratetype]); migratetype);
goto out; return;
} }
} }
list_add(&page->lru, &zone->free_area[order].free_list[migratetype]); add_to_free_area(page, &zone->free_area[order], migratetype);
out:
zone->free_area[order].nr_free++;
} }
/* /*
...@@ -1974,8 +1963,7 @@ static inline void expand(struct zone *zone, struct page *page, ...@@ -1974,8 +1963,7 @@ static inline void expand(struct zone *zone, struct page *page,
if (set_page_guard(zone, &page[size], high, migratetype)) if (set_page_guard(zone, &page[size], high, migratetype))
continue; continue;
list_add(&page[size].lru, &area->free_list[migratetype]); add_to_free_area(&page[size], area, migratetype);
area->nr_free++;
set_page_order(&page[size], high); set_page_order(&page[size], high);
} }
} }
...@@ -2117,13 +2105,10 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, ...@@ -2117,13 +2105,10 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
/* Find a page of the appropriate size in the preferred list */ /* Find a page of the appropriate size in the preferred list */
for (current_order = order; current_order < MAX_ORDER; ++current_order) { for (current_order = order; current_order < MAX_ORDER; ++current_order) {
area = &(zone->free_area[current_order]); area = &(zone->free_area[current_order]);
page = list_first_entry_or_null(&area->free_list[migratetype], page = get_page_from_free_area(area, migratetype);
struct page, lru);
if (!page) if (!page)
continue; continue;
list_del(&page->lru); del_page_from_free_area(page, area);
rmv_page_order(page);
area->nr_free--;
expand(zone, page, order, current_order, area, migratetype); expand(zone, page, order, current_order, area, migratetype);
set_pcppage_migratetype(page, migratetype); set_pcppage_migratetype(page, migratetype);
return page; return page;
...@@ -2209,8 +2194,7 @@ static int move_freepages(struct zone *zone, ...@@ -2209,8 +2194,7 @@ static int move_freepages(struct zone *zone,
} }
order = page_order(page); order = page_order(page);
list_move(&page->lru, move_to_free_area(page, &zone->free_area[order], migratetype);
&zone->free_area[order].free_list[migratetype]);
page += 1 << order; page += 1 << order;
pages_moved += 1 << order; pages_moved += 1 << order;
} }
...@@ -2398,7 +2382,7 @@ static void steal_suitable_fallback(struct zone *zone, struct page *page, ...@@ -2398,7 +2382,7 @@ static void steal_suitable_fallback(struct zone *zone, struct page *page,
single_page: single_page:
area = &zone->free_area[current_order]; area = &zone->free_area[current_order];
list_move(&page->lru, &area->free_list[start_type]); move_to_free_area(page, area, start_type);
} }
/* /*
...@@ -2422,7 +2406,7 @@ int find_suitable_fallback(struct free_area *area, unsigned int order, ...@@ -2422,7 +2406,7 @@ int find_suitable_fallback(struct free_area *area, unsigned int order,
if (fallback_mt == MIGRATE_TYPES) if (fallback_mt == MIGRATE_TYPES)
break; break;
if (list_empty(&area->free_list[fallback_mt])) if (free_area_empty(area, fallback_mt))
continue; continue;
if (can_steal_fallback(order, migratetype)) if (can_steal_fallback(order, migratetype))
...@@ -2509,9 +2493,7 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac, ...@@ -2509,9 +2493,7 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
for (order = 0; order < MAX_ORDER; order++) { for (order = 0; order < MAX_ORDER; order++) {
struct free_area *area = &(zone->free_area[order]); struct free_area *area = &(zone->free_area[order]);
page = list_first_entry_or_null( page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC);
&area->free_list[MIGRATE_HIGHATOMIC],
struct page, lru);
if (!page) if (!page)
continue; continue;
...@@ -2634,8 +2616,7 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype, ...@@ -2634,8 +2616,7 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype,
VM_BUG_ON(current_order == MAX_ORDER); VM_BUG_ON(current_order == MAX_ORDER);
do_steal: do_steal:
page = list_first_entry(&area->free_list[fallback_mt], page = get_page_from_free_area(area, fallback_mt);
struct page, lru);
steal_suitable_fallback(zone, page, alloc_flags, start_migratetype, steal_suitable_fallback(zone, page, alloc_flags, start_migratetype,
can_steal); can_steal);
...@@ -3072,6 +3053,7 @@ EXPORT_SYMBOL_GPL(split_page); ...@@ -3072,6 +3053,7 @@ EXPORT_SYMBOL_GPL(split_page);
int __isolate_free_page(struct page *page, unsigned int order) int __isolate_free_page(struct page *page, unsigned int order)
{ {
struct free_area *area = &page_zone(page)->free_area[order];
unsigned long watermark; unsigned long watermark;
struct zone *zone; struct zone *zone;
int mt; int mt;
...@@ -3096,9 +3078,8 @@ int __isolate_free_page(struct page *page, unsigned int order) ...@@ -3096,9 +3078,8 @@ int __isolate_free_page(struct page *page, unsigned int order)
} }
/* Remove page from free list */ /* Remove page from free list */
list_del(&page->lru);
zone->free_area[order].nr_free--; del_page_from_free_area(page, area);
rmv_page_order(page);
/* /*
* Set the pageblock if the isolated page is at least half of a * Set the pageblock if the isolated page is at least half of a
...@@ -3395,13 +3376,13 @@ bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, ...@@ -3395,13 +3376,13 @@ bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
continue; continue;
for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) { for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) {
if (!list_empty(&area->free_list[mt])) if (!free_area_empty(area, mt))
return true; return true;
} }
#ifdef CONFIG_CMA #ifdef CONFIG_CMA
if ((alloc_flags & ALLOC_CMA) && if ((alloc_flags & ALLOC_CMA) &&
!list_empty(&area->free_list[MIGRATE_CMA])) { !free_area_empty(area, MIGRATE_CMA)) {
return true; return true;
} }
#endif #endif
...@@ -5328,7 +5309,7 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask) ...@@ -5328,7 +5309,7 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
types[order] = 0; types[order] = 0;
for (type = 0; type < MIGRATE_TYPES; type++) { for (type = 0; type < MIGRATE_TYPES; type++) {
if (!list_empty(&area->free_list[type])) if (!free_area_empty(area, type))
types[order] |= 1 << type; types[order] |= 1 << type;
} }
} }
...@@ -8501,9 +8482,7 @@ __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn) ...@@ -8501,9 +8482,7 @@ __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
pr_info("remove from free list %lx %d %lx\n", pr_info("remove from free list %lx %d %lx\n",
pfn, 1 << order, end_pfn); pfn, 1 << order, end_pfn);
#endif #endif
list_del(&page->lru); del_page_from_free_area(page, &zone->free_area[order]);
rmv_page_order(page);
zone->free_area[order].nr_free--;
for (i = 0; i < (1 << order); i++) for (i = 0; i < (1 << order); i++)
SetPageReserved((page+i)); SetPageReserved((page+i));
pfn += (1 << order); pfn += (1 << order);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment