XArray: Add calls to might_alloc()

Catch bogus GFP flags deterministically, instead of occasionally
when we actually have to allocate memory.
Reported-by: default avatarNikolay Borisov <nborisov@suse.com>
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
parent 32346491
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <linux/kconfig.h> #include <linux/kconfig.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/rcupdate.h> #include <linux/rcupdate.h>
#include <linux/sched/mm.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/types.h> #include <linux/types.h>
...@@ -586,6 +587,7 @@ static inline void *xa_store_bh(struct xarray *xa, unsigned long index, ...@@ -586,6 +587,7 @@ static inline void *xa_store_bh(struct xarray *xa, unsigned long index,
{ {
void *curr; void *curr;
might_alloc(gfp);
xa_lock_bh(xa); xa_lock_bh(xa);
curr = __xa_store(xa, index, entry, gfp); curr = __xa_store(xa, index, entry, gfp);
xa_unlock_bh(xa); xa_unlock_bh(xa);
...@@ -612,6 +614,7 @@ static inline void *xa_store_irq(struct xarray *xa, unsigned long index, ...@@ -612,6 +614,7 @@ static inline void *xa_store_irq(struct xarray *xa, unsigned long index,
{ {
void *curr; void *curr;
might_alloc(gfp);
xa_lock_irq(xa); xa_lock_irq(xa);
curr = __xa_store(xa, index, entry, gfp); curr = __xa_store(xa, index, entry, gfp);
xa_unlock_irq(xa); xa_unlock_irq(xa);
...@@ -687,6 +690,7 @@ static inline void *xa_cmpxchg(struct xarray *xa, unsigned long index, ...@@ -687,6 +690,7 @@ static inline void *xa_cmpxchg(struct xarray *xa, unsigned long index,
{ {
void *curr; void *curr;
might_alloc(gfp);
xa_lock(xa); xa_lock(xa);
curr = __xa_cmpxchg(xa, index, old, entry, gfp); curr = __xa_cmpxchg(xa, index, old, entry, gfp);
xa_unlock(xa); xa_unlock(xa);
...@@ -714,6 +718,7 @@ static inline void *xa_cmpxchg_bh(struct xarray *xa, unsigned long index, ...@@ -714,6 +718,7 @@ static inline void *xa_cmpxchg_bh(struct xarray *xa, unsigned long index,
{ {
void *curr; void *curr;
might_alloc(gfp);
xa_lock_bh(xa); xa_lock_bh(xa);
curr = __xa_cmpxchg(xa, index, old, entry, gfp); curr = __xa_cmpxchg(xa, index, old, entry, gfp);
xa_unlock_bh(xa); xa_unlock_bh(xa);
...@@ -741,6 +746,7 @@ static inline void *xa_cmpxchg_irq(struct xarray *xa, unsigned long index, ...@@ -741,6 +746,7 @@ static inline void *xa_cmpxchg_irq(struct xarray *xa, unsigned long index,
{ {
void *curr; void *curr;
might_alloc(gfp);
xa_lock_irq(xa); xa_lock_irq(xa);
curr = __xa_cmpxchg(xa, index, old, entry, gfp); curr = __xa_cmpxchg(xa, index, old, entry, gfp);
xa_unlock_irq(xa); xa_unlock_irq(xa);
...@@ -770,6 +776,7 @@ static inline int __must_check xa_insert(struct xarray *xa, ...@@ -770,6 +776,7 @@ static inline int __must_check xa_insert(struct xarray *xa,
{ {
int err; int err;
might_alloc(gfp);
xa_lock(xa); xa_lock(xa);
err = __xa_insert(xa, index, entry, gfp); err = __xa_insert(xa, index, entry, gfp);
xa_unlock(xa); xa_unlock(xa);
...@@ -799,6 +806,7 @@ static inline int __must_check xa_insert_bh(struct xarray *xa, ...@@ -799,6 +806,7 @@ static inline int __must_check xa_insert_bh(struct xarray *xa,
{ {
int err; int err;
might_alloc(gfp);
xa_lock_bh(xa); xa_lock_bh(xa);
err = __xa_insert(xa, index, entry, gfp); err = __xa_insert(xa, index, entry, gfp);
xa_unlock_bh(xa); xa_unlock_bh(xa);
...@@ -828,6 +836,7 @@ static inline int __must_check xa_insert_irq(struct xarray *xa, ...@@ -828,6 +836,7 @@ static inline int __must_check xa_insert_irq(struct xarray *xa,
{ {
int err; int err;
might_alloc(gfp);
xa_lock_irq(xa); xa_lock_irq(xa);
err = __xa_insert(xa, index, entry, gfp); err = __xa_insert(xa, index, entry, gfp);
xa_unlock_irq(xa); xa_unlock_irq(xa);
...@@ -857,6 +866,7 @@ static inline __must_check int xa_alloc(struct xarray *xa, u32 *id, ...@@ -857,6 +866,7 @@ static inline __must_check int xa_alloc(struct xarray *xa, u32 *id,
{ {
int err; int err;
might_alloc(gfp);
xa_lock(xa); xa_lock(xa);
err = __xa_alloc(xa, id, entry, limit, gfp); err = __xa_alloc(xa, id, entry, limit, gfp);
xa_unlock(xa); xa_unlock(xa);
...@@ -886,6 +896,7 @@ static inline int __must_check xa_alloc_bh(struct xarray *xa, u32 *id, ...@@ -886,6 +896,7 @@ static inline int __must_check xa_alloc_bh(struct xarray *xa, u32 *id,
{ {
int err; int err;
might_alloc(gfp);
xa_lock_bh(xa); xa_lock_bh(xa);
err = __xa_alloc(xa, id, entry, limit, gfp); err = __xa_alloc(xa, id, entry, limit, gfp);
xa_unlock_bh(xa); xa_unlock_bh(xa);
...@@ -915,6 +926,7 @@ static inline int __must_check xa_alloc_irq(struct xarray *xa, u32 *id, ...@@ -915,6 +926,7 @@ static inline int __must_check xa_alloc_irq(struct xarray *xa, u32 *id,
{ {
int err; int err;
might_alloc(gfp);
xa_lock_irq(xa); xa_lock_irq(xa);
err = __xa_alloc(xa, id, entry, limit, gfp); err = __xa_alloc(xa, id, entry, limit, gfp);
xa_unlock_irq(xa); xa_unlock_irq(xa);
...@@ -948,6 +960,7 @@ static inline int xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry, ...@@ -948,6 +960,7 @@ static inline int xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry,
{ {
int err; int err;
might_alloc(gfp);
xa_lock(xa); xa_lock(xa);
err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp); err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp);
xa_unlock(xa); xa_unlock(xa);
...@@ -981,6 +994,7 @@ static inline int xa_alloc_cyclic_bh(struct xarray *xa, u32 *id, void *entry, ...@@ -981,6 +994,7 @@ static inline int xa_alloc_cyclic_bh(struct xarray *xa, u32 *id, void *entry,
{ {
int err; int err;
might_alloc(gfp);
xa_lock_bh(xa); xa_lock_bh(xa);
err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp); err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp);
xa_unlock_bh(xa); xa_unlock_bh(xa);
...@@ -1014,6 +1028,7 @@ static inline int xa_alloc_cyclic_irq(struct xarray *xa, u32 *id, void *entry, ...@@ -1014,6 +1028,7 @@ static inline int xa_alloc_cyclic_irq(struct xarray *xa, u32 *id, void *entry,
{ {
int err; int err;
might_alloc(gfp);
xa_lock_irq(xa); xa_lock_irq(xa);
err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp); err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp);
xa_unlock_irq(xa); xa_unlock_irq(xa);
......
#ifndef _TOOLS_PERF_LINUX_SCHED_MM_H #ifndef _TOOLS_PERF_LINUX_SCHED_MM_H
#define _TOOLS_PERF_LINUX_SCHED_MM_H #define _TOOLS_PERF_LINUX_SCHED_MM_H
#define might_alloc(gfp) do { } while (0)
#endif /* _TOOLS_PERF_LINUX_SCHED_MM_H */ #endif /* _TOOLS_PERF_LINUX_SCHED_MM_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment