Commit ee0913c4 authored by Kefeng Wang's avatar Kefeng Wang Committed by Andrew Morton

mm: add pageblock_aligned() macro

Add pageblock_aligned() and use it to simplify code.

Link: https://lkml.kernel.org/r/20220907060844.126891-3-wangkefeng.wang@huawei.comSigned-off-by: default avatarKefeng Wang <wangkefeng.wang@huawei.com>
Acked-by: default avatarMike Rapoport <rppt@linux.ibm.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 5f7fa13f
...@@ -54,6 +54,7 @@ extern unsigned int pageblock_order; ...@@ -54,6 +54,7 @@ extern unsigned int pageblock_order;
#define pageblock_nr_pages (1UL << pageblock_order) #define pageblock_nr_pages (1UL << pageblock_order)
#define pageblock_align(pfn) ALIGN((pfn), pageblock_nr_pages) #define pageblock_align(pfn) ALIGN((pfn), pageblock_nr_pages)
#define pageblock_aligned(pfn) IS_ALIGNED((pfn), pageblock_nr_pages)
#define pageblock_start_pfn(pfn) ALIGN_DOWN((pfn), pageblock_nr_pages) #define pageblock_start_pfn(pfn) ALIGN_DOWN((pfn), pageblock_nr_pages)
#define pageblock_end_pfn(pfn) ALIGN((pfn) + 1, pageblock_nr_pages) #define pageblock_end_pfn(pfn) ALIGN((pfn) + 1, pageblock_nr_pages)
......
...@@ -402,7 +402,7 @@ static bool test_and_set_skip(struct compact_control *cc, struct page *page, ...@@ -402,7 +402,7 @@ static bool test_and_set_skip(struct compact_control *cc, struct page *page,
if (cc->ignore_skip_hint) if (cc->ignore_skip_hint)
return false; return false;
if (!IS_ALIGNED(pfn, pageblock_nr_pages)) if (!pageblock_aligned(pfn))
return false; return false;
skip = get_pageblock_skip(page); skip = get_pageblock_skip(page);
...@@ -884,7 +884,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, ...@@ -884,7 +884,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
* COMPACT_CLUSTER_MAX at a time so the second call must * COMPACT_CLUSTER_MAX at a time so the second call must
* not falsely conclude that the block should be skipped. * not falsely conclude that the block should be skipped.
*/ */
if (!valid_page && IS_ALIGNED(low_pfn, pageblock_nr_pages)) { if (!valid_page && pageblock_aligned(low_pfn)) {
if (!isolation_suitable(cc, page)) { if (!isolation_suitable(cc, page)) {
low_pfn = end_pfn; low_pfn = end_pfn;
page = NULL; page = NULL;
...@@ -1937,7 +1937,7 @@ static isolate_migrate_t isolate_migratepages(struct compact_control *cc) ...@@ -1937,7 +1937,7 @@ static isolate_migrate_t isolate_migratepages(struct compact_control *cc)
* before making it "skip" so other compaction instances do * before making it "skip" so other compaction instances do
* not scan the same block. * not scan the same block.
*/ */
if (IS_ALIGNED(low_pfn, pageblock_nr_pages) && if (pageblock_aligned(low_pfn) &&
!fast_find_block && !isolation_suitable(cc, page)) !fast_find_block && !isolation_suitable(cc, page))
continue; continue;
...@@ -2123,7 +2123,7 @@ static enum compact_result __compact_finished(struct compact_control *cc) ...@@ -2123,7 +2123,7 @@ static enum compact_result __compact_finished(struct compact_control *cc)
* migration source is unmovable/reclaimable but it's not worth * migration source is unmovable/reclaimable but it's not worth
* special casing. * special casing.
*/ */
if (!IS_ALIGNED(cc->migrate_pfn, pageblock_nr_pages)) if (!pageblock_aligned(cc->migrate_pfn))
return COMPACT_CONTINUE; return COMPACT_CONTINUE;
/* Direct compactor: Is a suitable page free? */ /* Direct compactor: Is a suitable page free? */
......
...@@ -1085,8 +1085,7 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, ...@@ -1085,8 +1085,7 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages,
* of the physical memory space for vmemmaps. That space is pageblock * of the physical memory space for vmemmaps. That space is pageblock
* aligned. * aligned.
*/ */
if (WARN_ON_ONCE(!nr_pages || if (WARN_ON_ONCE(!nr_pages || !pageblock_aligned(pfn) ||
!IS_ALIGNED(pfn, pageblock_nr_pages) ||
!IS_ALIGNED(pfn + nr_pages, PAGES_PER_SECTION))) !IS_ALIGNED(pfn + nr_pages, PAGES_PER_SECTION)))
return -EINVAL; return -EINVAL;
...@@ -1806,8 +1805,7 @@ int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages, ...@@ -1806,8 +1805,7 @@ int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages,
* of the physical memory space for vmemmaps. That space is pageblock * of the physical memory space for vmemmaps. That space is pageblock
* aligned. * aligned.
*/ */
if (WARN_ON_ONCE(!nr_pages || if (WARN_ON_ONCE(!nr_pages || !pageblock_aligned(start_pfn) ||
!IS_ALIGNED(start_pfn, pageblock_nr_pages) ||
!IS_ALIGNED(start_pfn + nr_pages, PAGES_PER_SECTION))) !IS_ALIGNED(start_pfn + nr_pages, PAGES_PER_SECTION)))
return -EINVAL; return -EINVAL;
......
...@@ -1892,15 +1892,14 @@ static void __init deferred_free_range(unsigned long pfn, ...@@ -1892,15 +1892,14 @@ static void __init deferred_free_range(unsigned long pfn,
page = pfn_to_page(pfn); page = pfn_to_page(pfn);
/* Free a large naturally-aligned chunk if possible */ /* Free a large naturally-aligned chunk if possible */
if (nr_pages == pageblock_nr_pages && if (nr_pages == pageblock_nr_pages && pageblock_aligned(pfn)) {
(pfn & (pageblock_nr_pages - 1)) == 0) {
set_pageblock_migratetype(page, MIGRATE_MOVABLE); set_pageblock_migratetype(page, MIGRATE_MOVABLE);
__free_pages_core(page, pageblock_order); __free_pages_core(page, pageblock_order);
return; return;
} }
for (i = 0; i < nr_pages; i++, page++, pfn++) { for (i = 0; i < nr_pages; i++, page++, pfn++) {
if ((pfn & (pageblock_nr_pages - 1)) == 0) if (pageblock_aligned(pfn))
set_pageblock_migratetype(page, MIGRATE_MOVABLE); set_pageblock_migratetype(page, MIGRATE_MOVABLE);
__free_pages_core(page, 0); __free_pages_core(page, 0);
} }
...@@ -1928,7 +1927,7 @@ static inline void __init pgdat_init_report_one_done(void) ...@@ -1928,7 +1927,7 @@ static inline void __init pgdat_init_report_one_done(void)
*/ */
static inline bool __init deferred_pfn_valid(unsigned long pfn) static inline bool __init deferred_pfn_valid(unsigned long pfn)
{ {
if (!(pfn & (pageblock_nr_pages - 1)) && !pfn_valid(pfn)) if (pageblock_aligned(pfn) && !pfn_valid(pfn))
return false; return false;
return true; return true;
} }
...@@ -1940,14 +1939,13 @@ static inline bool __init deferred_pfn_valid(unsigned long pfn) ...@@ -1940,14 +1939,13 @@ static inline bool __init deferred_pfn_valid(unsigned long pfn)
static void __init deferred_free_pages(unsigned long pfn, static void __init deferred_free_pages(unsigned long pfn,
unsigned long end_pfn) unsigned long end_pfn)
{ {
unsigned long nr_pgmask = pageblock_nr_pages - 1;
unsigned long nr_free = 0; unsigned long nr_free = 0;
for (; pfn < end_pfn; pfn++) { for (; pfn < end_pfn; pfn++) {
if (!deferred_pfn_valid(pfn)) { if (!deferred_pfn_valid(pfn)) {
deferred_free_range(pfn - nr_free, nr_free); deferred_free_range(pfn - nr_free, nr_free);
nr_free = 0; nr_free = 0;
} else if (!(pfn & nr_pgmask)) { } else if (pageblock_aligned(pfn)) {
deferred_free_range(pfn - nr_free, nr_free); deferred_free_range(pfn - nr_free, nr_free);
nr_free = 1; nr_free = 1;
} else { } else {
...@@ -1967,7 +1965,6 @@ static unsigned long __init deferred_init_pages(struct zone *zone, ...@@ -1967,7 +1965,6 @@ static unsigned long __init deferred_init_pages(struct zone *zone,
unsigned long pfn, unsigned long pfn,
unsigned long end_pfn) unsigned long end_pfn)
{ {
unsigned long nr_pgmask = pageblock_nr_pages - 1;
int nid = zone_to_nid(zone); int nid = zone_to_nid(zone);
unsigned long nr_pages = 0; unsigned long nr_pages = 0;
int zid = zone_idx(zone); int zid = zone_idx(zone);
...@@ -1977,7 +1974,7 @@ static unsigned long __init deferred_init_pages(struct zone *zone, ...@@ -1977,7 +1974,7 @@ static unsigned long __init deferred_init_pages(struct zone *zone,
if (!deferred_pfn_valid(pfn)) { if (!deferred_pfn_valid(pfn)) {
page = NULL; page = NULL;
continue; continue;
} else if (!page || !(pfn & nr_pgmask)) { } else if (!page || pageblock_aligned(pfn)) {
page = pfn_to_page(pfn); page = pfn_to_page(pfn);
} else { } else {
page++; page++;
...@@ -6759,7 +6756,7 @@ void __meminit memmap_init_range(unsigned long size, int nid, unsigned long zone ...@@ -6759,7 +6756,7 @@ void __meminit memmap_init_range(unsigned long size, int nid, unsigned long zone
* such that unmovable allocations won't be scattered all * such that unmovable allocations won't be scattered all
* over the place during system boot. * over the place during system boot.
*/ */
if (IS_ALIGNED(pfn, pageblock_nr_pages)) { if (pageblock_aligned(pfn)) {
set_pageblock_migratetype(page, migratetype); set_pageblock_migratetype(page, migratetype);
cond_resched(); cond_resched();
} }
...@@ -6802,7 +6799,7 @@ static void __ref __init_zone_device_page(struct page *page, unsigned long pfn, ...@@ -6802,7 +6799,7 @@ static void __ref __init_zone_device_page(struct page *page, unsigned long pfn,
* Please note that MEMINIT_HOTPLUG path doesn't clear memmap * Please note that MEMINIT_HOTPLUG path doesn't clear memmap
* because this is done early in section_activate() * because this is done early in section_activate()
*/ */
if (IS_ALIGNED(pfn, pageblock_nr_pages)) { if (pageblock_aligned(pfn)) {
set_pageblock_migratetype(page, MIGRATE_MOVABLE); set_pageblock_migratetype(page, MIGRATE_MOVABLE);
cond_resched(); cond_resched();
} }
......
...@@ -312,7 +312,7 @@ static int isolate_single_pageblock(unsigned long boundary_pfn, int flags, ...@@ -312,7 +312,7 @@ static int isolate_single_pageblock(unsigned long boundary_pfn, int flags,
struct zone *zone; struct zone *zone;
int ret; int ret;
VM_BUG_ON(!IS_ALIGNED(boundary_pfn, pageblock_nr_pages)); VM_BUG_ON(!pageblock_aligned(boundary_pfn));
if (isolate_before) if (isolate_before)
isolate_pageblock = boundary_pfn - pageblock_nr_pages; isolate_pageblock = boundary_pfn - pageblock_nr_pages;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment