Commit 3f913fc5 authored by Qi Zheng's avatar Qi Zheng Committed by akpm

mm: fix missing handler for __GFP_NOWARN

We expect no warnings to be issued when we specify __GFP_NOWARN, but
currently in paths like alloc_pages() and kmalloc(), there are still some
warnings printed, fix it.

But for some warnings that report usage problems, we don't deal with them.
If such warnings are printed, then we should fix the usage problems. 
Such as the following case:

	WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));

[zhengqi.arch@bytedance.com: v2]
 Link: https://lkml.kernel.org/r/20220511061951.1114-1-zhengqi.arch@bytedance.com
Link: https://lkml.kernel.org/r/20220510113809.80626-1-zhengqi.arch@bytedance.comSigned-off-by: default avatarQi Zheng <zhengqi.arch@bytedance.com>
Cc: Akinobu Mita <akinobu.mita@gmail.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Jiri Slaby <jirislaby@kernel.org>
Cc: Steven Rostedt (Google) <rostedt@goodmis.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 10e0f753
...@@ -20,6 +20,7 @@ struct fault_attr { ...@@ -20,6 +20,7 @@ struct fault_attr {
atomic_t space; atomic_t space;
unsigned long verbose; unsigned long verbose;
bool task_filter; bool task_filter;
bool no_warn;
unsigned long stacktrace_depth; unsigned long stacktrace_depth;
unsigned long require_start; unsigned long require_start;
unsigned long require_end; unsigned long require_end;
...@@ -39,6 +40,7 @@ struct fault_attr { ...@@ -39,6 +40,7 @@ struct fault_attr {
.ratelimit_state = RATELIMIT_STATE_INIT_DISABLED, \ .ratelimit_state = RATELIMIT_STATE_INIT_DISABLED, \
.verbose = 2, \ .verbose = 2, \
.dname = NULL, \ .dname = NULL, \
.no_warn = false, \
} }
#define DECLARE_FAULT_ATTR(name) struct fault_attr name = FAULT_ATTR_INITIALIZER #define DECLARE_FAULT_ATTR(name) struct fault_attr name = FAULT_ATTR_INITIALIZER
......
...@@ -41,6 +41,9 @@ EXPORT_SYMBOL_GPL(setup_fault_attr); ...@@ -41,6 +41,9 @@ EXPORT_SYMBOL_GPL(setup_fault_attr);
static void fail_dump(struct fault_attr *attr) static void fail_dump(struct fault_attr *attr)
{ {
if (attr->no_warn)
return;
if (attr->verbose > 0 && __ratelimit(&attr->ratelimit_state)) { if (attr->verbose > 0 && __ratelimit(&attr->ratelimit_state)) {
printk(KERN_NOTICE "FAULT_INJECTION: forcing a failure.\n" printk(KERN_NOTICE "FAULT_INJECTION: forcing a failure.\n"
"name %pd, interval %lu, probability %lu, " "name %pd, interval %lu, probability %lu, "
......
...@@ -30,6 +30,9 @@ bool __should_failslab(struct kmem_cache *s, gfp_t gfpflags) ...@@ -30,6 +30,9 @@ bool __should_failslab(struct kmem_cache *s, gfp_t gfpflags)
if (failslab.cache_filter && !(s->flags & SLAB_FAILSLAB)) if (failslab.cache_filter && !(s->flags & SLAB_FAILSLAB))
return false; return false;
if (gfpflags & __GFP_NOWARN)
failslab.attr.no_warn = true;
return should_fail(&failslab.attr, s->object_size); return should_fail(&failslab.attr, s->object_size);
} }
......
...@@ -35,6 +35,21 @@ struct folio_batch; ...@@ -35,6 +35,21 @@ struct folio_batch;
/* Do not use these with a slab allocator */ /* Do not use these with a slab allocator */
#define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK) #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
/*
* Different from WARN_ON_ONCE(), no warning will be issued
* when we specify __GFP_NOWARN.
*/
#define WARN_ON_ONCE_GFP(cond, gfp) ({ \
static bool __section(".data.once") __warned; \
int __ret_warn_once = !!(cond); \
\
if (unlikely(!(gfp & __GFP_NOWARN) && __ret_warn_once && !__warned)) { \
__warned = true; \
WARN_ON(1); \
} \
unlikely(__ret_warn_once); \
})
void page_writeback_init(void); void page_writeback_init(void);
static inline void *folio_raw_mapping(struct folio *folio) static inline void *folio_raw_mapping(struct folio *folio)
......
...@@ -3786,6 +3786,9 @@ static bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) ...@@ -3786,6 +3786,9 @@ static bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
(gfp_mask & __GFP_DIRECT_RECLAIM)) (gfp_mask & __GFP_DIRECT_RECLAIM))
return false; return false;
if (gfp_mask & __GFP_NOWARN)
fail_page_alloc.attr.no_warn = true;
return should_fail(&fail_page_alloc.attr, 1 << order); return should_fail(&fail_page_alloc.attr, 1 << order);
} }
...@@ -4334,7 +4337,8 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, ...@@ -4334,7 +4337,8 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
*/ */
/* Exhausted what can be done so it's blame time */ /* Exhausted what can be done so it's blame time */
if (out_of_memory(&oc) || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) { if (out_of_memory(&oc) ||
WARN_ON_ONCE_GFP(gfp_mask & __GFP_NOFAIL, gfp_mask)) {
*did_some_progress = 1; *did_some_progress = 1;
/* /*
...@@ -5108,7 +5112,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, ...@@ -5108,7 +5112,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
* All existing users of the __GFP_NOFAIL are blockable, so warn * All existing users of the __GFP_NOFAIL are blockable, so warn
* of any new users that actually require GFP_NOWAIT * of any new users that actually require GFP_NOWAIT
*/ */
if (WARN_ON_ONCE(!can_direct_reclaim)) if (WARN_ON_ONCE_GFP(!can_direct_reclaim, gfp_mask))
goto fail; goto fail;
/* /*
...@@ -5116,7 +5120,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, ...@@ -5116,7 +5120,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
* because we cannot reclaim anything and only can loop waiting * because we cannot reclaim anything and only can loop waiting
* for somebody to do a work for us * for somebody to do a work for us
*/ */
WARN_ON_ONCE(current->flags & PF_MEMALLOC); WARN_ON_ONCE_GFP(current->flags & PF_MEMALLOC, gfp_mask);
/* /*
* non failing costly orders are a hard requirement which we * non failing costly orders are a hard requirement which we
...@@ -5124,7 +5128,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, ...@@ -5124,7 +5128,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
* so that we can identify them and convert them to something * so that we can identify them and convert them to something
* else. * else.
*/ */
WARN_ON_ONCE(order > PAGE_ALLOC_COSTLY_ORDER); WARN_ON_ONCE_GFP(order > PAGE_ALLOC_COSTLY_ORDER, gfp_mask);
/* /*
* Help non-failing allocations by giving them access to memory * Help non-failing allocations by giving them access to memory
...@@ -5370,10 +5374,8 @@ struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid, ...@@ -5370,10 +5374,8 @@ struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,
* There are several places where we assume that the order value is sane * There are several places where we assume that the order value is sane
* so bail out early if the request is out of bound. * so bail out early if the request is out of bound.
*/ */
if (unlikely(order >= MAX_ORDER)) { if (WARN_ON_ONCE_GFP(order >= MAX_ORDER, gfp))
WARN_ON_ONCE(!(gfp & __GFP_NOWARN));
return NULL; return NULL;
}
gfp &= gfp_allowed_mask; gfp &= gfp_allowed_mask;
/* /*
...@@ -9025,7 +9027,7 @@ int __alloc_contig_migrate_range(struct compact_control *cc, ...@@ -9025,7 +9027,7 @@ int __alloc_contig_migrate_range(struct compact_control *cc,
lru_cache_enable(); lru_cache_enable();
if (ret < 0) { if (ret < 0) {
if (ret == -EBUSY) if (!(cc->gfp_mask & __GFP_NOWARN) && ret == -EBUSY)
alloc_contig_dump_pages(&cc->migratepages); alloc_contig_dump_pages(&cc->migratepages);
putback_movable_pages(&cc->migratepages); putback_movable_pages(&cc->migratepages);
return ret; return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment