5.15-stable review patch. If anyone has any objections, please let me know.
------------------
From: Matthew Wilcox (Oracle) willy@infradead.org
[ Upstream commit 1dd685c414a7b9fdb3d23aca3aedae84f0b998ae ]
Catch bogus GFP flags deterministically, instead of occasionally when we actually have to allocate memory.
Reported-by: Nikolay Borisov nborisov@suse.com Signed-off-by: Matthew Wilcox (Oracle) willy@infradead.org Stable-dep-of: 99765233ab42 ("NFS: Fixup allocation flags for nfsiod's __GFP_NORETRY") Signed-off-by: Sasha Levin sashal@kernel.org --- include/linux/xarray.h | 15 +++++++++++++++ tools/include/linux/sched/mm.h | 2 ++ 2 files changed, 17 insertions(+)
diff --git a/include/linux/xarray.h b/include/linux/xarray.h index a91e3d90df8a..549f222a2a6f 100644 --- a/include/linux/xarray.h +++ b/include/linux/xarray.h @@ -15,6 +15,7 @@ #include <linux/kconfig.h> #include <linux/kernel.h> #include <linux/rcupdate.h> +#include <linux/sched/mm.h> #include <linux/spinlock.h> #include <linux/types.h>
@@ -585,6 +586,7 @@ static inline void *xa_store_bh(struct xarray *xa, unsigned long index, { void *curr;
+ might_alloc(gfp); xa_lock_bh(xa); curr = __xa_store(xa, index, entry, gfp); xa_unlock_bh(xa); @@ -611,6 +613,7 @@ static inline void *xa_store_irq(struct xarray *xa, unsigned long index, { void *curr;
+ might_alloc(gfp); xa_lock_irq(xa); curr = __xa_store(xa, index, entry, gfp); xa_unlock_irq(xa); @@ -686,6 +689,7 @@ static inline void *xa_cmpxchg(struct xarray *xa, unsigned long index, { void *curr;
+ might_alloc(gfp); xa_lock(xa); curr = __xa_cmpxchg(xa, index, old, entry, gfp); xa_unlock(xa); @@ -713,6 +717,7 @@ static inline void *xa_cmpxchg_bh(struct xarray *xa, unsigned long index, { void *curr;
+ might_alloc(gfp); xa_lock_bh(xa); curr = __xa_cmpxchg(xa, index, old, entry, gfp); xa_unlock_bh(xa); @@ -740,6 +745,7 @@ static inline void *xa_cmpxchg_irq(struct xarray *xa, unsigned long index, { void *curr;
+ might_alloc(gfp); xa_lock_irq(xa); curr = __xa_cmpxchg(xa, index, old, entry, gfp); xa_unlock_irq(xa); @@ -769,6 +775,7 @@ static inline int __must_check xa_insert(struct xarray *xa, { int err;
+ might_alloc(gfp); xa_lock(xa); err = __xa_insert(xa, index, entry, gfp); xa_unlock(xa); @@ -798,6 +805,7 @@ static inline int __must_check xa_insert_bh(struct xarray *xa, { int err;
+ might_alloc(gfp); xa_lock_bh(xa); err = __xa_insert(xa, index, entry, gfp); xa_unlock_bh(xa); @@ -827,6 +835,7 @@ static inline int __must_check xa_insert_irq(struct xarray *xa, { int err;
+ might_alloc(gfp); xa_lock_irq(xa); err = __xa_insert(xa, index, entry, gfp); xa_unlock_irq(xa); @@ -856,6 +865,7 @@ static inline __must_check int xa_alloc(struct xarray *xa, u32 *id, { int err;
+ might_alloc(gfp); xa_lock(xa); err = __xa_alloc(xa, id, entry, limit, gfp); xa_unlock(xa); @@ -885,6 +895,7 @@ static inline int __must_check xa_alloc_bh(struct xarray *xa, u32 *id, { int err;
+ might_alloc(gfp); xa_lock_bh(xa); err = __xa_alloc(xa, id, entry, limit, gfp); xa_unlock_bh(xa); @@ -914,6 +925,7 @@ static inline int __must_check xa_alloc_irq(struct xarray *xa, u32 *id, { int err;
+ might_alloc(gfp); xa_lock_irq(xa); err = __xa_alloc(xa, id, entry, limit, gfp); xa_unlock_irq(xa); @@ -947,6 +959,7 @@ static inline int xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry, { int err;
+ might_alloc(gfp); xa_lock(xa); err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp); xa_unlock(xa); @@ -980,6 +993,7 @@ static inline int xa_alloc_cyclic_bh(struct xarray *xa, u32 *id, void *entry, { int err;
+ might_alloc(gfp); xa_lock_bh(xa); err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp); xa_unlock_bh(xa); @@ -1013,6 +1027,7 @@ static inline int xa_alloc_cyclic_irq(struct xarray *xa, u32 *id, void *entry, { int err;
+ might_alloc(gfp); xa_lock_irq(xa); err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp); xa_unlock_irq(xa); diff --git a/tools/include/linux/sched/mm.h b/tools/include/linux/sched/mm.h index c8d9f19c1f35..967294b8edcf 100644 --- a/tools/include/linux/sched/mm.h +++ b/tools/include/linux/sched/mm.h @@ -1,4 +1,6 @@ #ifndef _TOOLS_PERF_LINUX_SCHED_MM_H #define _TOOLS_PERF_LINUX_SCHED_MM_H
+#define might_alloc(gfp) do { } while (0) + #endif /* _TOOLS_PERF_LINUX_SCHED_MM_H */