Commit 29943248 authored by Wei Yang's avatar Wei Yang Committed by Andrew Morton

mm: improve code consistency with zonelist_* helper functions

Replace direct access to zoneref->zone, zoneref->zone_idx, or
zone_to_nid(zoneref->zone) with the corresponding zonelist_* helper
functions for consistency.

No functional change.

Link: https://lkml.kernel.org/r/20240729091717.464-1-shivankg@amd.comCo-developed-by: default avatarShivank Garg <shivankg@amd.com>
Signed-off-by: default avatarShivank Garg <shivankg@amd.com>
Signed-off-by: default avatarWei Yang <richard.weiyang@gmail.com>
Acked-by: default avatarDavid Hildenbrand <david@redhat.com>
Cc: Mike Rapoport (IBM) <rppt@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 9325b8b5
......@@ -1688,7 +1688,7 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
zone = zonelist_zone(z))
#define for_next_zone_zonelist_nodemask(zone, z, highidx, nodemask) \
for (zone = z->zone; \
for (zone = zonelist_zone(z); \
zone; \
z = next_zones_zonelist(++z, highidx, nodemask), \
zone = zonelist_zone(z))
......@@ -1724,7 +1724,7 @@ static inline bool movable_only_nodes(nodemask_t *nodes)
nid = first_node(*nodes);
zonelist = &NODE_DATA(nid)->node_zonelists[ZONELIST_FALLBACK];
z = first_zones_zonelist(zonelist, ZONE_NORMAL, nodes);
return (!z->zone) ? true : false;
return (!zonelist_zone(z)) ? true : false;
}
......
......@@ -55,8 +55,8 @@ TRACE_EVENT(reclaim_retry_zone,
),
TP_fast_assign(
__entry->node = zone_to_nid(zoneref->zone);
__entry->zone_idx = zoneref->zone_idx;
__entry->node = zonelist_node_idx(zoneref);
__entry->zone_idx = zonelist_zone_idx(zoneref);
__entry->order = order;
__entry->reclaimable = reclaimable;
__entry->available = available;
......
......@@ -1951,7 +1951,7 @@ unsigned int mempolicy_slab_node(void)
zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
z = first_zones_zonelist(zonelist, highest_zoneidx,
&policy->nodes);
return z->zone ? zone_to_nid(z->zone) : node;
return zonelist_zone(z) ? zonelist_node_idx(z) : node;
}
case MPOL_LOCAL:
return node;
......@@ -2809,7 +2809,7 @@ int mpol_misplaced(struct folio *folio, struct vm_fault *vmf,
node_zonelist(thisnid, GFP_HIGHUSER),
gfp_zone(GFP_HIGHUSER),
&pol->nodes);
polnid = zone_to_nid(z->zone);
polnid = zonelist_node_idx(z);
break;
default:
......
......@@ -66,7 +66,7 @@ struct zoneref *__next_zones_zonelist(struct zoneref *z,
z++;
else
while (zonelist_zone_idx(z) > highest_zoneidx ||
(z->zone && !zref_in_nodemask(z, nodes)))
(zonelist_zone(z) && !zref_in_nodemask(z, nodes)))
z++;
return z;
......
......@@ -3350,7 +3350,7 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
}
if (no_fallback && nr_online_nodes > 1 &&
zone != ac->preferred_zoneref->zone) {
zone != zonelist_zone(ac->preferred_zoneref)) {
int local_nid;
/*
......@@ -3358,7 +3358,7 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
* fragmenting fallbacks. Locality is more important
* than fragmentation avoidance.
*/
local_nid = zone_to_nid(ac->preferred_zoneref->zone);
local_nid = zonelist_node_idx(ac->preferred_zoneref);
if (zone_to_nid(zone) != local_nid) {
alloc_flags &= ~ALLOC_NOFRAGMENT;
goto retry;
......@@ -3411,7 +3411,7 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
goto try_this_zone;
if (!node_reclaim_enabled() ||
!zone_allows_reclaim(ac->preferred_zoneref->zone, zone))
!zone_allows_reclaim(zonelist_zone(ac->preferred_zoneref), zone))
continue;
ret = node_reclaim(zone->zone_pgdat, gfp_mask, order);
......@@ -3433,7 +3433,7 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
}
try_this_zone:
page = rmqueue(ac->preferred_zoneref->zone, zone, order,
page = rmqueue(zonelist_zone(ac->preferred_zoneref), zone, order,
gfp_mask, alloc_flags, ac->migratetype);
if (page) {
prep_new_page(page, order, gfp_mask, alloc_flags);
......@@ -4202,7 +4202,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
*/
ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
ac->highest_zoneidx, ac->nodemask);
if (!ac->preferred_zoneref->zone)
if (!zonelist_zone(ac->preferred_zoneref))
goto nopage;
/*
......@@ -4214,7 +4214,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
struct zoneref *z = first_zones_zonelist(ac->zonelist,
ac->highest_zoneidx,
&cpuset_current_mems_allowed);
if (!z->zone)
if (!zonelist_zone(z))
goto nopage;
}
......@@ -4571,8 +4571,8 @@ unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
continue;
}
if (nr_online_nodes > 1 && zone != ac.preferred_zoneref->zone &&
zone_to_nid(zone) != zone_to_nid(ac.preferred_zoneref->zone)) {
if (nr_online_nodes > 1 && zone != zonelist_zone(ac.preferred_zoneref) &&
zone_to_nid(zone) != zonelist_node_idx(ac.preferred_zoneref)) {
goto failed;
}
......@@ -4631,7 +4631,7 @@ unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
pcp_trylock_finish(UP_flags);
__count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account);
zone_statistics(ac.preferred_zoneref->zone, zone, nr_account);
zone_statistics(zonelist_zone(ac.preferred_zoneref), zone, nr_account);
out:
return nr_populated;
......@@ -4689,7 +4689,7 @@ struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order,
* Forbid the first pass from falling back to types that fragment
* memory until all local zones are considered.
*/
alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp);
alloc_flags |= alloc_flags_nofragment(zonelist_zone(ac.preferred_zoneref), gfp);
/* First allocation attempt */
page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac);
......@@ -5294,7 +5294,7 @@ int local_memory_node(int node)
z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
gfp_zone(GFP_KERNEL),
NULL);
return zone_to_nid(z->zone);
return zonelist_node_idx(z);
}
#endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment