Commit 5f7fa13f authored by Kefeng Wang's avatar Kefeng Wang Committed by Andrew Morton

mm: add pageblock_align() macro

Add pageblock_align() macro and use it to simplify code.

Link: https://lkml.kernel.org/r/20220907060844.126891-2-wangkefeng.wang@huawei.comSigned-off-by: default avatarKefeng Wang <wangkefeng.wang@huawei.com>
Acked-by: default avatarMike Rapoport <rppt@linux.ibm.com>
Reviewed-by: default avatarDavid Hildenbrand <david@redhat.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 4f9bc69a
...@@ -53,6 +53,7 @@ extern unsigned int pageblock_order; ...@@ -53,6 +53,7 @@ extern unsigned int pageblock_order;
#endif /* CONFIG_HUGETLB_PAGE */ #endif /* CONFIG_HUGETLB_PAGE */
#define pageblock_nr_pages (1UL << pageblock_order) #define pageblock_nr_pages (1UL << pageblock_order)
#define pageblock_align(pfn) ALIGN((pfn), pageblock_nr_pages)
#define pageblock_start_pfn(pfn) ALIGN_DOWN((pfn), pageblock_nr_pages) #define pageblock_start_pfn(pfn) ALIGN_DOWN((pfn), pageblock_nr_pages)
#define pageblock_end_pfn(pfn) ALIGN((pfn) + 1, pageblock_nr_pages) #define pageblock_end_pfn(pfn) ALIGN((pfn) + 1, pageblock_nr_pages)
......
...@@ -2014,12 +2014,12 @@ static void __init free_unused_memmap(void) ...@@ -2014,12 +2014,12 @@ static void __init free_unused_memmap(void)
* presume that there are no holes in the memory map inside * presume that there are no holes in the memory map inside
* a pageblock * a pageblock
*/ */
prev_end = ALIGN(end, pageblock_nr_pages); prev_end = pageblock_align(end);
} }
#ifdef CONFIG_SPARSEMEM #ifdef CONFIG_SPARSEMEM
if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION)) { if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION)) {
prev_end = ALIGN(end, pageblock_nr_pages); prev_end = pageblock_align(end);
free_memmap(prev_end, ALIGN(prev_end, PAGES_PER_SECTION)); free_memmap(prev_end, ALIGN(prev_end, PAGES_PER_SECTION));
} }
#endif #endif
......
...@@ -533,7 +533,7 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, ...@@ -533,7 +533,7 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
struct page *page; struct page *page;
/* isolation is done at page block granularity */ /* isolation is done at page block granularity */
unsigned long isolate_start = pageblock_start_pfn(start_pfn); unsigned long isolate_start = pageblock_start_pfn(start_pfn);
unsigned long isolate_end = ALIGN(end_pfn, pageblock_nr_pages); unsigned long isolate_end = pageblock_align(end_pfn);
int ret; int ret;
bool skip_isolation = false; bool skip_isolation = false;
...@@ -580,7 +580,7 @@ void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, ...@@ -580,7 +580,7 @@ void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
unsigned long pfn; unsigned long pfn;
struct page *page; struct page *page;
unsigned long isolate_start = pageblock_start_pfn(start_pfn); unsigned long isolate_start = pageblock_start_pfn(start_pfn);
unsigned long isolate_end = ALIGN(end_pfn, pageblock_nr_pages); unsigned long isolate_end = pageblock_align(end_pfn);
for (pfn = isolate_start; for (pfn = isolate_start;
pfn < isolate_end; pfn < isolate_end;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment