Commit b92ca18e authored by Mel Gorman's avatar Mel Gorman Committed by Linus Torvalds

mm/page_alloc: disassociate the pcp->high from pcp->batch

The pcp high watermark is based on the batch size but there is no
relationship between them other than it is convenient to use early in
boot.

This patch takes the first step and bases pcp->high on the zone low
watermark split across the number of CPUs local to a zone while the batch
size remains the same to avoid increasing allocation latencies.  The
intent behind the default pcp->high is "set the number of PCP pages such
that if they are all full that background reclaim is not started
prematurely".

Note that in this patch the pcp->high values are adjusted after memory
hotplug events, min_free_kbytes adjustments and watermark scale factor
adjustments but not CPU hotplug events which is handled later in the
series.

On a test KVM instance;

Before grep -E "high:|batch" /proc/zoneinfo | tail -2
              high:  378
              batch: 63

After grep -E "high:|batch" /proc/zoneinfo | tail -2
              high:  649
              batch: 63

[mgorman@techsingularity.net:  fix __setup_per_zone_wmarks for parallel memory
hotplug]
  Link: https://lkml.kernel.org/r/20210528105925.GN30378@techsingularity.net

Link: https://lkml.kernel.org/r/20210525080119.5455-3-mgorman@techsingularity.netSigned-off-by: default avatarMel Gorman <mgorman@techsingularity.net>
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Hillf Danton <hdanton@sina.com>
Cc: Michal Hocko <mhocko@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent bbbecb35
...@@ -961,7 +961,6 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, struct zone *z ...@@ -961,7 +961,6 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, struct zone *z
node_states_set_node(nid, &arg); node_states_set_node(nid, &arg);
if (need_zonelists_rebuild) if (need_zonelists_rebuild)
build_all_zonelists(NULL); build_all_zonelists(NULL);
zone_pcp_update(zone);
/* Basic onlining is complete, allow allocation of onlined pages. */ /* Basic onlining is complete, allow allocation of onlined pages. */
undo_isolate_page_range(pfn, pfn + nr_pages, MIGRATE_MOVABLE); undo_isolate_page_range(pfn, pfn + nr_pages, MIGRATE_MOVABLE);
...@@ -974,6 +973,7 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, struct zone *z ...@@ -974,6 +973,7 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, struct zone *z
*/ */
shuffle_zone(zone); shuffle_zone(zone);
/* reinitialise watermarks and update pcp limits */
init_per_zone_wmark_min(); init_per_zone_wmark_min();
kswapd_run(nid); kswapd_run(nid);
...@@ -1829,13 +1829,13 @@ int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages) ...@@ -1829,13 +1829,13 @@ int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages)
adjust_managed_page_count(pfn_to_page(start_pfn), -nr_pages); adjust_managed_page_count(pfn_to_page(start_pfn), -nr_pages);
adjust_present_page_count(zone, -nr_pages); adjust_present_page_count(zone, -nr_pages);
/* reinitialise watermarks and update pcp limits */
init_per_zone_wmark_min(); init_per_zone_wmark_min();
if (!populated_zone(zone)) { if (!populated_zone(zone)) {
zone_pcp_reset(zone); zone_pcp_reset(zone);
build_all_zonelists(NULL); build_all_zonelists(NULL);
} else }
zone_pcp_update(zone);
node_states_clear_node(node, &arg); node_states_clear_node(node, &arg);
if (arg.status_change_nid >= 0) { if (arg.status_change_nid >= 0) {
......
...@@ -2174,14 +2174,6 @@ void __init page_alloc_init_late(void) ...@@ -2174,14 +2174,6 @@ void __init page_alloc_init_late(void)
/* Block until all are initialised */ /* Block until all are initialised */
wait_for_completion(&pgdat_init_all_done_comp); wait_for_completion(&pgdat_init_all_done_comp);
/*
* The number of managed pages has changed due to the initialisation
* so the pcpu batch and high limits needs to be updated or the limits
* will be artificially small.
*/
for_each_populated_zone(zone)
zone_pcp_update(zone);
/* /*
* We initialized the rest of the deferred pages. Permanently disable * We initialized the rest of the deferred pages. Permanently disable
* on-demand struct page initialization. * on-demand struct page initialization.
...@@ -6633,13 +6625,12 @@ static int zone_batchsize(struct zone *zone) ...@@ -6633,13 +6625,12 @@ static int zone_batchsize(struct zone *zone)
int batch; int batch;
/* /*
* The per-cpu-pages pools are set to around 1000th of the * The number of pages to batch allocate is either ~0.1%
* size of the zone. * of the zone or 1MB, whichever is smaller. The batch
* size is striking a balance between allocation latency
* and zone lock contention.
*/ */
batch = zone_managed_pages(zone) / 1024; batch = min(zone_managed_pages(zone) >> 10, (1024 * 1024) / PAGE_SIZE);
/* But no more than a meg. */
if (batch * PAGE_SIZE > 1024 * 1024)
batch = (1024 * 1024) / PAGE_SIZE;
batch /= 4; /* We effectively *= 4 below */ batch /= 4; /* We effectively *= 4 below */
if (batch < 1) if (batch < 1)
batch = 1; batch = 1;
...@@ -6676,6 +6667,34 @@ static int zone_batchsize(struct zone *zone) ...@@ -6676,6 +6667,34 @@ static int zone_batchsize(struct zone *zone)
#endif #endif
} }
static int zone_highsize(struct zone *zone, int batch)
{
#ifdef CONFIG_MMU
int high;
int nr_local_cpus;
/*
* The high value of the pcp is based on the zone low watermark
* so that if they are full then background reclaim will not be
* started prematurely. The value is split across all online CPUs
* local to the zone. Note that early in boot that CPUs may not be
* online yet.
*/
nr_local_cpus = max(1U, cpumask_weight(cpumask_of_node(zone_to_nid(zone))));
high = low_wmark_pages(zone) / nr_local_cpus;
/*
* Ensure high is at least batch*4. The multiple is based on the
* historical relationship between high and batch.
*/
high = max(high, batch << 2);
return high;
#else
return 0;
#endif
}
/* /*
* pcp->high and pcp->batch values are related and generally batch is lower * pcp->high and pcp->batch values are related and generally batch is lower
* than high. They are also related to pcp->count such that count is lower * than high. They are also related to pcp->count such that count is lower
...@@ -6737,11 +6756,10 @@ static void __zone_set_pageset_high_and_batch(struct zone *zone, unsigned long h ...@@ -6737,11 +6756,10 @@ static void __zone_set_pageset_high_and_batch(struct zone *zone, unsigned long h
*/ */
static void zone_set_pageset_high_and_batch(struct zone *zone) static void zone_set_pageset_high_and_batch(struct zone *zone)
{ {
unsigned long new_high, new_batch; int new_high, new_batch;
new_batch = zone_batchsize(zone); new_batch = max(1, zone_batchsize(zone));
new_high = 6 * new_batch; new_high = zone_highsize(zone, new_batch);
new_batch = max(1UL, 1 * new_batch);
if (zone->pageset_high == new_high && if (zone->pageset_high == new_high &&
zone->pageset_batch == new_batch) zone->pageset_batch == new_batch)
...@@ -8222,11 +8240,19 @@ static void __setup_per_zone_wmarks(void) ...@@ -8222,11 +8240,19 @@ static void __setup_per_zone_wmarks(void)
*/ */
void setup_per_zone_wmarks(void) void setup_per_zone_wmarks(void)
{ {
struct zone *zone;
static DEFINE_SPINLOCK(lock); static DEFINE_SPINLOCK(lock);
spin_lock(&lock); spin_lock(&lock);
__setup_per_zone_wmarks(); __setup_per_zone_wmarks();
spin_unlock(&lock); spin_unlock(&lock);
/*
* The watermark size have changed so update the pcpu batch
* and high limits or the limits may be inappropriate.
*/
for_each_zone(zone)
zone_pcp_update(zone);
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment