Commit a6ffdc07 authored by Xishi Qiu's avatar Xishi Qiu Committed by Linus Torvalds

mm: use is_migrate_highatomic() to simplify the code

Introduce two helpers, is_migrate_highatomic() and is_migrate_highatomic_page().

Simplify the code, no functional changes.

[akpm@linux-foundation.org: use static inlines rather than macros, per mhocko]
Link: http://lkml.kernel.org/r/58B94F15.6060606@huawei.comSigned-off-by: default avatarXishi Qiu <qiuxishi@huawei.com>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 322b8afe
...@@ -35,7 +35,7 @@ ...@@ -35,7 +35,7 @@
*/ */
#define PAGE_ALLOC_COSTLY_ORDER 3 #define PAGE_ALLOC_COSTLY_ORDER 3
enum { enum migratetype {
MIGRATE_UNMOVABLE, MIGRATE_UNMOVABLE,
MIGRATE_MOVABLE, MIGRATE_MOVABLE,
MIGRATE_RECLAIMABLE, MIGRATE_RECLAIMABLE,
......
...@@ -510,4 +510,14 @@ extern const struct trace_print_flags pageflag_names[]; ...@@ -510,4 +510,14 @@ extern const struct trace_print_flags pageflag_names[];
extern const struct trace_print_flags vmaflag_names[]; extern const struct trace_print_flags vmaflag_names[];
extern const struct trace_print_flags gfpflag_names[]; extern const struct trace_print_flags gfpflag_names[];
static inline bool is_migrate_highatomic(enum migratetype migratetype)
{
return migratetype == MIGRATE_HIGHATOMIC;
}
static inline bool is_migrate_highatomic_page(struct page *page)
{
return get_pageblock_migratetype(page) == MIGRATE_HIGHATOMIC;
}
#endif /* __MM_INTERNAL_H */ #endif /* __MM_INTERNAL_H */
...@@ -2036,8 +2036,8 @@ static void reserve_highatomic_pageblock(struct page *page, struct zone *zone, ...@@ -2036,8 +2036,8 @@ static void reserve_highatomic_pageblock(struct page *page, struct zone *zone,
/* Yoink! */ /* Yoink! */
mt = get_pageblock_migratetype(page); mt = get_pageblock_migratetype(page);
if (mt != MIGRATE_HIGHATOMIC && if (!is_migrate_highatomic(mt) && !is_migrate_isolate(mt)
!is_migrate_isolate(mt) && !is_migrate_cma(mt)) { && !is_migrate_cma(mt)) {
zone->nr_reserved_highatomic += pageblock_nr_pages; zone->nr_reserved_highatomic += pageblock_nr_pages;
set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC); set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC);
move_freepages_block(zone, page, MIGRATE_HIGHATOMIC); move_freepages_block(zone, page, MIGRATE_HIGHATOMIC);
...@@ -2094,8 +2094,7 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac, ...@@ -2094,8 +2094,7 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
* from highatomic to ac->migratetype. So we should * from highatomic to ac->migratetype. So we should
* adjust the count once. * adjust the count once.
*/ */
if (get_pageblock_migratetype(page) == if (is_migrate_highatomic_page(page)) {
MIGRATE_HIGHATOMIC) {
/* /*
* It should never happen but changes to * It should never happen but changes to
* locking could inadvertently allow a per-cpu * locking could inadvertently allow a per-cpu
...@@ -2152,8 +2151,7 @@ __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype) ...@@ -2152,8 +2151,7 @@ __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype)
page = list_first_entry(&area->free_list[fallback_mt], page = list_first_entry(&area->free_list[fallback_mt],
struct page, lru); struct page, lru);
if (can_steal && if (can_steal && !is_migrate_highatomic_page(page))
get_pageblock_migratetype(page) != MIGRATE_HIGHATOMIC)
steal_suitable_fallback(zone, page, start_migratetype); steal_suitable_fallback(zone, page, start_migratetype);
/* Remove the page from the freelists */ /* Remove the page from the freelists */
...@@ -2493,7 +2491,7 @@ void free_hot_cold_page(struct page *page, bool cold) ...@@ -2493,7 +2491,7 @@ void free_hot_cold_page(struct page *page, bool cold)
/* /*
* We only track unmovable, reclaimable and movable on pcp lists. * We only track unmovable, reclaimable and movable on pcp lists.
* Free ISOLATE pages back to the allocator because they are being * Free ISOLATE pages back to the allocator because they are being
* offlined but treat RESERVE as movable pages so we can get those * offlined but treat HIGHATOMIC as movable pages so we can get those
* areas back if necessary. Otherwise, we may have to free * areas back if necessary. Otherwise, we may have to free
* excessively into the page allocator * excessively into the page allocator
*/ */
...@@ -2603,7 +2601,7 @@ int __isolate_free_page(struct page *page, unsigned int order) ...@@ -2603,7 +2601,7 @@ int __isolate_free_page(struct page *page, unsigned int order)
for (; page < endpage; page += pageblock_nr_pages) { for (; page < endpage; page += pageblock_nr_pages) {
int mt = get_pageblock_migratetype(page); int mt = get_pageblock_migratetype(page);
if (!is_migrate_isolate(mt) && !is_migrate_cma(mt) if (!is_migrate_isolate(mt) && !is_migrate_cma(mt)
&& mt != MIGRATE_HIGHATOMIC) && !is_migrate_highatomic(mt))
set_pageblock_migratetype(page, set_pageblock_migratetype(page,
MIGRATE_MOVABLE); MIGRATE_MOVABLE);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment