Commit d34c0a75 authored by Nitin Gupta's avatar Nitin Gupta Committed by Linus Torvalds

mm: use unsigned types for fragmentation score

Proactive compaction uses per-node/zone "fragmentation score" which is
always in range [0, 100], so use unsigned type of these scores as well as
for related constants.
Signed-off-by: default avatarNitin Gupta <nigupta@nvidia.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Reviewed-by: default avatarBaoquan He <bhe@redhat.com>
Cc: Luis Chamberlain <mcgrof@kernel.org>
Cc: Kees Cook <keescook@chromium.org>
Cc: Iurii Zaikin <yzaikin@google.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Link: http://lkml.kernel.org/r/20200618010319.13159-1-nigupta@nvidia.comSigned-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 25788738
...@@ -85,13 +85,13 @@ static inline unsigned long compact_gap(unsigned int order) ...@@ -85,13 +85,13 @@ static inline unsigned long compact_gap(unsigned int order)
#ifdef CONFIG_COMPACTION #ifdef CONFIG_COMPACTION
extern int sysctl_compact_memory; extern int sysctl_compact_memory;
extern int sysctl_compaction_proactiveness; extern unsigned int sysctl_compaction_proactiveness;
extern int sysctl_compaction_handler(struct ctl_table *table, int write, extern int sysctl_compaction_handler(struct ctl_table *table, int write,
void *buffer, size_t *length, loff_t *ppos); void *buffer, size_t *length, loff_t *ppos);
extern int sysctl_extfrag_threshold; extern int sysctl_extfrag_threshold;
extern int sysctl_compact_unevictable_allowed; extern int sysctl_compact_unevictable_allowed;
extern int extfrag_for_order(struct zone *zone, unsigned int order); extern unsigned int extfrag_for_order(struct zone *zone, unsigned int order);
extern int fragmentation_index(struct zone *zone, unsigned int order); extern int fragmentation_index(struct zone *zone, unsigned int order);
extern enum compact_result try_to_compact_pages(gfp_t gfp_mask, extern enum compact_result try_to_compact_pages(gfp_t gfp_mask,
unsigned int order, unsigned int alloc_flags, unsigned int order, unsigned int alloc_flags,
......
...@@ -2854,7 +2854,7 @@ static struct ctl_table vm_table[] = { ...@@ -2854,7 +2854,7 @@ static struct ctl_table vm_table[] = {
{ {
.procname = "compaction_proactiveness", .procname = "compaction_proactiveness",
.data = &sysctl_compaction_proactiveness, .data = &sysctl_compaction_proactiveness,
.maxlen = sizeof(int), .maxlen = sizeof(sysctl_compaction_proactiveness),
.mode = 0644, .mode = 0644,
.proc_handler = proc_dointvec_minmax, .proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ZERO, .extra1 = SYSCTL_ZERO,
......
...@@ -53,7 +53,7 @@ static inline void count_compact_events(enum vm_event_item item, long delta) ...@@ -53,7 +53,7 @@ static inline void count_compact_events(enum vm_event_item item, long delta)
/* /*
* Fragmentation score check interval for proactive compaction purposes. * Fragmentation score check interval for proactive compaction purposes.
*/ */
static const int HPAGE_FRAG_CHECK_INTERVAL_MSEC = 500; static const unsigned int HPAGE_FRAG_CHECK_INTERVAL_MSEC = 500;
/* /*
* Page order with-respect-to which proactive compaction * Page order with-respect-to which proactive compaction
...@@ -1890,7 +1890,7 @@ static bool kswapd_is_running(pg_data_t *pgdat) ...@@ -1890,7 +1890,7 @@ static bool kswapd_is_running(pg_data_t *pgdat)
* ZONE_DMA32. For smaller zones, the score value remains close to zero, * ZONE_DMA32. For smaller zones, the score value remains close to zero,
* and thus never exceeds the high threshold for proactive compaction. * and thus never exceeds the high threshold for proactive compaction.
*/ */
static int fragmentation_score_zone(struct zone *zone) static unsigned int fragmentation_score_zone(struct zone *zone)
{ {
unsigned long score; unsigned long score;
...@@ -1906,9 +1906,9 @@ static int fragmentation_score_zone(struct zone *zone) ...@@ -1906,9 +1906,9 @@ static int fragmentation_score_zone(struct zone *zone)
* the node's score falls below the low threshold, or one of the back-off * the node's score falls below the low threshold, or one of the back-off
* conditions is met. * conditions is met.
*/ */
static int fragmentation_score_node(pg_data_t *pgdat) static unsigned int fragmentation_score_node(pg_data_t *pgdat)
{ {
unsigned long score = 0; unsigned int score = 0;
int zoneid; int zoneid;
for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
...@@ -1921,17 +1921,17 @@ static int fragmentation_score_node(pg_data_t *pgdat) ...@@ -1921,17 +1921,17 @@ static int fragmentation_score_node(pg_data_t *pgdat)
return score; return score;
} }
static int fragmentation_score_wmark(pg_data_t *pgdat, bool low) static unsigned int fragmentation_score_wmark(pg_data_t *pgdat, bool low)
{ {
int wmark_low; unsigned int wmark_low;
/* /*
* Cap the low watermak to avoid excessive compaction * Cap the low watermak to avoid excessive compaction
* activity in case a user sets the proactivess tunable * activity in case a user sets the proactivess tunable
* close to 100 (maximum). * close to 100 (maximum).
*/ */
wmark_low = max(100 - sysctl_compaction_proactiveness, 5); wmark_low = max(100U - sysctl_compaction_proactiveness, 5U);
return low ? wmark_low : min(wmark_low + 10, 100); return low ? wmark_low : min(wmark_low + 10, 100U);
} }
static bool should_proactive_compact_node(pg_data_t *pgdat) static bool should_proactive_compact_node(pg_data_t *pgdat)
...@@ -2615,7 +2615,7 @@ int sysctl_compact_memory; ...@@ -2615,7 +2615,7 @@ int sysctl_compact_memory;
* aggressively the kernel should compact memory in the * aggressively the kernel should compact memory in the
* background. It takes values in the range [0, 100]. * background. It takes values in the range [0, 100].
*/ */
int __read_mostly sysctl_compaction_proactiveness = 20; unsigned int __read_mostly sysctl_compaction_proactiveness = 20;
/* /*
* This is the entry point for compacting all nodes via * This is the entry point for compacting all nodes via
......
...@@ -1101,7 +1101,7 @@ static int __fragmentation_index(unsigned int order, struct contig_page_info *in ...@@ -1101,7 +1101,7 @@ static int __fragmentation_index(unsigned int order, struct contig_page_info *in
* It is defined as the percentage of pages found in blocks of size * It is defined as the percentage of pages found in blocks of size
* less than 1 << order. It returns values in range [0, 100]. * less than 1 << order. It returns values in range [0, 100].
*/ */
int extfrag_for_order(struct zone *zone, unsigned int order) unsigned int extfrag_for_order(struct zone *zone, unsigned int order)
{ {
struct contig_page_info info; struct contig_page_info info;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment