Commit c603844b authored by Mel Gorman's avatar Mel Gorman Committed by Linus Torvalds

mm, page_alloc: convert alloc_flags to unsigned

alloc_flags is a bitmask of flags but it is signed which does not
necessarily generate the best code depending on the compiler.  Even
without an impact, it makes more sense that this be unsigned.
Signed-off-by: default avatarMel Gorman <mgorman@techsingularity.net>
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
Cc: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent f75fb889
...@@ -39,12 +39,12 @@ extern int sysctl_compact_unevictable_allowed; ...@@ -39,12 +39,12 @@ extern int sysctl_compact_unevictable_allowed;
extern int fragmentation_index(struct zone *zone, unsigned int order); extern int fragmentation_index(struct zone *zone, unsigned int order);
extern unsigned long try_to_compact_pages(gfp_t gfp_mask, unsigned int order, extern unsigned long try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
int alloc_flags, const struct alloc_context *ac, unsigned int alloc_flags, const struct alloc_context *ac,
enum migrate_mode mode, int *contended); enum migrate_mode mode, int *contended);
extern void compact_pgdat(pg_data_t *pgdat, int order); extern void compact_pgdat(pg_data_t *pgdat, int order);
extern void reset_isolation_suitable(pg_data_t *pgdat); extern void reset_isolation_suitable(pg_data_t *pgdat);
extern unsigned long compaction_suitable(struct zone *zone, int order, extern unsigned long compaction_suitable(struct zone *zone, int order,
int alloc_flags, int classzone_idx); unsigned int alloc_flags, int classzone_idx);
extern void defer_compaction(struct zone *zone, int order); extern void defer_compaction(struct zone *zone, int order);
extern bool compaction_deferred(struct zone *zone, int order); extern bool compaction_deferred(struct zone *zone, int order);
......
...@@ -747,7 +747,8 @@ extern struct mutex zonelists_mutex; ...@@ -747,7 +747,8 @@ extern struct mutex zonelists_mutex;
void build_all_zonelists(pg_data_t *pgdat, struct zone *zone); void build_all_zonelists(pg_data_t *pgdat, struct zone *zone);
void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx); void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx);
bool zone_watermark_ok(struct zone *z, unsigned int order, bool zone_watermark_ok(struct zone *z, unsigned int order,
unsigned long mark, int classzone_idx, int alloc_flags); unsigned long mark, int classzone_idx,
unsigned int alloc_flags);
bool zone_watermark_ok_safe(struct zone *z, unsigned int order, bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
unsigned long mark, int classzone_idx); unsigned long mark, int classzone_idx);
enum memmap_context { enum memmap_context {
......
...@@ -1313,7 +1313,8 @@ static int compact_finished(struct zone *zone, struct compact_control *cc, ...@@ -1313,7 +1313,8 @@ static int compact_finished(struct zone *zone, struct compact_control *cc,
* COMPACT_CONTINUE - If compaction should run now * COMPACT_CONTINUE - If compaction should run now
*/ */
static unsigned long __compaction_suitable(struct zone *zone, int order, static unsigned long __compaction_suitable(struct zone *zone, int order,
int alloc_flags, int classzone_idx) unsigned int alloc_flags,
int classzone_idx)
{ {
int fragindex; int fragindex;
unsigned long watermark; unsigned long watermark;
...@@ -1358,7 +1359,8 @@ static unsigned long __compaction_suitable(struct zone *zone, int order, ...@@ -1358,7 +1359,8 @@ static unsigned long __compaction_suitable(struct zone *zone, int order,
} }
unsigned long compaction_suitable(struct zone *zone, int order, unsigned long compaction_suitable(struct zone *zone, int order,
int alloc_flags, int classzone_idx) unsigned int alloc_flags,
int classzone_idx)
{ {
unsigned long ret; unsigned long ret;
...@@ -1530,7 +1532,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) ...@@ -1530,7 +1532,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
static unsigned long compact_zone_order(struct zone *zone, int order, static unsigned long compact_zone_order(struct zone *zone, int order,
gfp_t gfp_mask, enum migrate_mode mode, int *contended, gfp_t gfp_mask, enum migrate_mode mode, int *contended,
int alloc_flags, int classzone_idx) unsigned int alloc_flags, int classzone_idx)
{ {
unsigned long ret; unsigned long ret;
struct compact_control cc = { struct compact_control cc = {
...@@ -1571,8 +1573,8 @@ int sysctl_extfrag_threshold = 500; ...@@ -1571,8 +1573,8 @@ int sysctl_extfrag_threshold = 500;
* This is the main entry point for direct page compaction. * This is the main entry point for direct page compaction.
*/ */
unsigned long try_to_compact_pages(gfp_t gfp_mask, unsigned int order, unsigned long try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
int alloc_flags, const struct alloc_context *ac, unsigned int alloc_flags, const struct alloc_context *ac,
enum migrate_mode mode, int *contended) enum migrate_mode mode, int *contended)
{ {
int may_enter_fs = gfp_mask & __GFP_FS; int may_enter_fs = gfp_mask & __GFP_FS;
int may_perform_io = gfp_mask & __GFP_IO; int may_perform_io = gfp_mask & __GFP_IO;
......
...@@ -175,7 +175,7 @@ struct compact_control { ...@@ -175,7 +175,7 @@ struct compact_control {
bool direct_compaction; /* False from kcompactd or /proc/... */ bool direct_compaction; /* False from kcompactd or /proc/... */
int order; /* order a direct compactor needs */ int order; /* order a direct compactor needs */
const gfp_t gfp_mask; /* gfp mask of a direct compactor */ const gfp_t gfp_mask; /* gfp mask of a direct compactor */
const int alloc_flags; /* alloc flags of a direct compactor */ const unsigned int alloc_flags; /* alloc flags of a direct compactor */
const int classzone_idx; /* zone index of a direct compactor */ const int classzone_idx; /* zone index of a direct compactor */
struct zone *zone; struct zone *zone;
int contended; /* Signal need_sched() or lock int contended; /* Signal need_sched() or lock
......
...@@ -1525,7 +1525,7 @@ static inline bool free_pages_prezeroed(bool poisoned) ...@@ -1525,7 +1525,7 @@ static inline bool free_pages_prezeroed(bool poisoned)
} }
static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
int alloc_flags) unsigned int alloc_flags)
{ {
int i; int i;
bool poisoned = true; bool poisoned = true;
...@@ -2391,7 +2391,8 @@ static inline void zone_statistics(struct zone *preferred_zone, struct zone *z, ...@@ -2391,7 +2391,8 @@ static inline void zone_statistics(struct zone *preferred_zone, struct zone *z,
static inline static inline
struct page *buffered_rmqueue(struct zone *preferred_zone, struct page *buffered_rmqueue(struct zone *preferred_zone,
struct zone *zone, unsigned int order, struct zone *zone, unsigned int order,
gfp_t gfp_flags, int alloc_flags, int migratetype) gfp_t gfp_flags, unsigned int alloc_flags,
int migratetype)
{ {
unsigned long flags; unsigned long flags;
struct page *page; struct page *page;
...@@ -2545,12 +2546,13 @@ static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) ...@@ -2545,12 +2546,13 @@ static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
* to check in the allocation paths if no pages are free. * to check in the allocation paths if no pages are free.
*/ */
static bool __zone_watermark_ok(struct zone *z, unsigned int order, static bool __zone_watermark_ok(struct zone *z, unsigned int order,
unsigned long mark, int classzone_idx, int alloc_flags, unsigned long mark, int classzone_idx,
unsigned int alloc_flags,
long free_pages) long free_pages)
{ {
long min = mark; long min = mark;
int o; int o;
const int alloc_harder = (alloc_flags & ALLOC_HARDER); const bool alloc_harder = (alloc_flags & ALLOC_HARDER);
/* free_pages may go negative - that's OK */ /* free_pages may go negative - that's OK */
free_pages -= (1 << order) - 1; free_pages -= (1 << order) - 1;
...@@ -2613,7 +2615,7 @@ static bool __zone_watermark_ok(struct zone *z, unsigned int order, ...@@ -2613,7 +2615,7 @@ static bool __zone_watermark_ok(struct zone *z, unsigned int order,
} }
bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
int classzone_idx, int alloc_flags) int classzone_idx, unsigned int alloc_flags)
{ {
return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags, return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
zone_page_state(z, NR_FREE_PAGES)); zone_page_state(z, NR_FREE_PAGES));
...@@ -2957,7 +2959,7 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, ...@@ -2957,7 +2959,7 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
/* Try memory compaction for high-order allocations before reclaim */ /* Try memory compaction for high-order allocations before reclaim */
static struct page * static struct page *
__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
int alloc_flags, const struct alloc_context *ac, unsigned int alloc_flags, const struct alloc_context *ac,
enum migrate_mode mode, int *contended_compaction, enum migrate_mode mode, int *contended_compaction,
bool *deferred_compaction) bool *deferred_compaction)
{ {
...@@ -3013,7 +3015,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, ...@@ -3013,7 +3015,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
#else #else
static inline struct page * static inline struct page *
__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
int alloc_flags, const struct alloc_context *ac, unsigned int alloc_flags, const struct alloc_context *ac,
enum migrate_mode mode, int *contended_compaction, enum migrate_mode mode, int *contended_compaction,
bool *deferred_compaction) bool *deferred_compaction)
{ {
...@@ -3053,7 +3055,7 @@ __perform_reclaim(gfp_t gfp_mask, unsigned int order, ...@@ -3053,7 +3055,7 @@ __perform_reclaim(gfp_t gfp_mask, unsigned int order,
/* The really slow allocator path where we enter direct reclaim */ /* The really slow allocator path where we enter direct reclaim */
static inline struct page * static inline struct page *
__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
int alloc_flags, const struct alloc_context *ac, unsigned int alloc_flags, const struct alloc_context *ac,
unsigned long *did_some_progress) unsigned long *did_some_progress)
{ {
struct page *page = NULL; struct page *page = NULL;
...@@ -3092,10 +3094,10 @@ static void wake_all_kswapds(unsigned int order, const struct alloc_context *ac) ...@@ -3092,10 +3094,10 @@ static void wake_all_kswapds(unsigned int order, const struct alloc_context *ac)
wakeup_kswapd(zone, order, zone_idx(ac->preferred_zone)); wakeup_kswapd(zone, order, zone_idx(ac->preferred_zone));
} }
static inline int static inline unsigned int
gfp_to_alloc_flags(gfp_t gfp_mask) gfp_to_alloc_flags(gfp_t gfp_mask)
{ {
int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET; unsigned int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
/* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */ /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH); BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
...@@ -3156,7 +3158,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, ...@@ -3156,7 +3158,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
{ {
bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM; bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM;
struct page *page = NULL; struct page *page = NULL;
int alloc_flags; unsigned int alloc_flags;
unsigned long pages_reclaimed = 0; unsigned long pages_reclaimed = 0;
unsigned long did_some_progress; unsigned long did_some_progress;
enum migrate_mode migration_mode = MIGRATE_ASYNC; enum migrate_mode migration_mode = MIGRATE_ASYNC;
...@@ -3348,7 +3350,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, ...@@ -3348,7 +3350,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
struct zoneref *preferred_zoneref; struct zoneref *preferred_zoneref;
struct page *page = NULL; struct page *page = NULL;
unsigned int cpuset_mems_cookie; unsigned int cpuset_mems_cookie;
int alloc_flags = ALLOC_WMARK_LOW|ALLOC_FAIR; unsigned int alloc_flags = ALLOC_WMARK_LOW|ALLOC_FAIR;
gfp_t alloc_mask; /* The gfp_t that was actually used for allocation */ gfp_t alloc_mask; /* The gfp_t that was actually used for allocation */
struct alloc_context ac = { struct alloc_context ac = {
.high_zoneidx = gfp_zone(gfp_mask), .high_zoneidx = gfp_zone(gfp_mask),
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment