The patch titled Subject: slub: track number of slabs irrespective of CONFIG_SLUB_DEBUG has been added to the -mm tree. Its filename is slub-track-number-of-slabs-irrespective-of-config_slub_debug.patch
This patch should soon appear at http://ozlabs.org/~akpm/mmots/broken-out/slub-track-number-of-slabs-irrespec... and later at http://ozlabs.org/~akpm/mmotm/broken-out/slub-track-number-of-slabs-irrespec...
Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's
*** Remember to use Documentation/process/submit-checklist.rst when testing your code ***
The -mm tree is included into linux-next and is updated there every 3-4 working days
------------------------------------------------------ From: Shakeel Butt shakeelb@google.com Subject: slub: track number of slabs irrespective of CONFIG_SLUB_DEBUG
For !CONFIG_SLUB_DEBUG, SLUB does not maintain the number of slabs allocated per node for a kmem_cache. Thus, slabs_node() in __kmem_cache_empty(), __kmem_cache_shrink() and __kmem_cache_destroy() will always return 0 for such config. This is wrong and can cause issues for all users of these functions.
In fact in [1] Jason has reported a system crash while using SLUB without CONFIG_SLUB_DEBUG. The reason was the usage of slabs_node() by __kmem_cache_empty().
The right solution is to make slabs_node() work even for !CONFIG_SLUB_DEBUG. The commit 0f389ec63077 ("slub: No need for per node slab counters if !SLUB_DEBUG") had put the per node slab counter under CONFIG_SLUB_DEBUG because it was only read through sysfs API and the sysfs API was disabled on !CONFIG_SLUB_DEBUG. However the users of the per node slab counter assumed that it will work in the absence of CONFIG_SLUB_DEBUG. So, make the counter work for !CONFIG_SLUB_DEBUG.
Please note that f9e13c0a5a33 ("slab, slub: skip unnecessary kasan_cache_shutdown()") exposed this issue but it is present even before.
[1] http://lkml.kernel.org/r/CAHmME9rtoPwxUSnktxzKso14iuVCWT7BE_-_8PAC=pGw1iJnQg...
Link: http://lkml.kernel.org/r/20180620224147.23777-1-shakeelb@google.com Fixes: f9e13c0a5a33 ("slab, slub: skip unnecessary kasan_cache_shutdown()") Signed-off-by: Shakeel Butt shakeelb@google.com Suggested-by: David Rientjes rientjes@google.com Reported-by: Jason A . Donenfeld Jason@zx2c4.com Cc: Christoph Lameter cl@linux.com Cc: Pekka Enberg penberg@kernel.org Cc: Joonsoo Kim iamjoonsoo.kim@lge.com Cc: Andrey Ryabinin aryabinin@virtuozzo.com Cc: stable@vger.kernel.org Signed-off-by: Andrew Morton akpm@linux-foundation.org ---
mm/slab.h | 2 - mm/slub.c | 80 ++++++++++++++++++++++++---------------------------- 2 files changed, 38 insertions(+), 44 deletions(-)
diff -puN mm/slab.h~slub-track-number-of-slabs-irrespective-of-config_slub_debug mm/slab.h --- a/mm/slab.h~slub-track-number-of-slabs-irrespective-of-config_slub_debug +++ a/mm/slab.h @@ -473,8 +473,8 @@ struct kmem_cache_node { #ifdef CONFIG_SLUB unsigned long nr_partial; struct list_head partial; -#ifdef CONFIG_SLUB_DEBUG atomic_long_t nr_slabs; +#ifdef CONFIG_SLUB_DEBUG atomic_long_t total_objects; struct list_head full; #endif diff -puN mm/slub.c~slub-track-number-of-slabs-irrespective-of-config_slub_debug mm/slub.c --- a/mm/slub.c~slub-track-number-of-slabs-irrespective-of-config_slub_debug +++ a/mm/slub.c @@ -1030,42 +1030,6 @@ static void remove_full(struct kmem_cach list_del(&page->lru); }
-/* Tracking of the number of slabs for debugging purposes */ -static inline unsigned long slabs_node(struct kmem_cache *s, int node) -{ - struct kmem_cache_node *n = get_node(s, node); - - return atomic_long_read(&n->nr_slabs); -} - -static inline unsigned long node_nr_slabs(struct kmem_cache_node *n) -{ - return atomic_long_read(&n->nr_slabs); -} - -static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects) -{ - struct kmem_cache_node *n = get_node(s, node); - - /* - * May be called early in order to allocate a slab for the - * kmem_cache_node structure. Solve the chicken-egg - * dilemma by deferring the increment of the count during - * bootstrap (see early_kmem_cache_node_alloc). - */ - if (likely(n)) { - atomic_long_inc(&n->nr_slabs); - atomic_long_add(objects, &n->total_objects); - } -} -static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects) -{ - struct kmem_cache_node *n = get_node(s, node); - - atomic_long_dec(&n->nr_slabs); - atomic_long_sub(objects, &n->total_objects); -} - /* Object debug checks for alloc/free paths */ static void setup_object_debug(struct kmem_cache *s, struct page *page, void *object) @@ -1321,16 +1285,46 @@ slab_flags_t kmem_cache_flags(unsigned i
#define disable_higher_order_debug 0
+#endif /* CONFIG_SLUB_DEBUG */ + static inline unsigned long slabs_node(struct kmem_cache *s, int node) - { return 0; } +{ + struct kmem_cache_node *n = get_node(s, node); + + return atomic_long_read(&n->nr_slabs); +} + static inline unsigned long node_nr_slabs(struct kmem_cache_node *n) - { return 0; } -static inline void inc_slabs_node(struct kmem_cache *s, int node, - int objects) {} -static inline void dec_slabs_node(struct kmem_cache *s, int node, - int objects) {} +{ + return atomic_long_read(&n->nr_slabs); +}
-#endif /* CONFIG_SLUB_DEBUG */ +static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects) +{ + struct kmem_cache_node *n = get_node(s, node); + + /* + * May be called early in order to allocate a slab for the + * kmem_cache_node structure. Solve the chicken-egg + * dilemma by deferring the increment of the count during + * bootstrap (see early_kmem_cache_node_alloc). + */ + if (likely(n)) { + atomic_long_inc(&n->nr_slabs); +#ifdef CONFIG_SLUB_DEBUG + atomic_long_add(objects, &n->total_objects); +#endif + } +} +static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects) +{ + struct kmem_cache_node *n = get_node(s, node); + + atomic_long_dec(&n->nr_slabs); +#ifdef CONFIG_SLUB_DEBUG + atomic_long_sub(objects, &n->total_objects); +#endif +}
/* * Hooks for other subsystems that check memory allocations. In a typical _
Patches currently in -mm which might be from shakeelb@google.com are
slub-track-number-of-slabs-irrespective-of-config_slub_debug.patch slub-fix-__kmem_cache_empty-for-config_slub_debug.patch
NAK. Its easier to simply not allow !CONFIG_SLUB_DEBUG for cgroups based configs because in that case you certainly have enough memory to include the runtime debug code as well as the extended counters.
On Wed, 20 Jun 2018, akpm@linux-foundation.org wrote:
The patch titled Subject: slub: track number of slabs irrespective of CONFIG_SLUB_DEBUG has been added to the -mm tree. Its filename is slub-track-number-of-slabs-irrespective-of-config_slub_debug.patch
This patch should soon appear at http://ozlabs.org/~akpm/mmots/broken-out/slub-track-number-of-slabs-irrespec... and later at http://ozlabs.org/~akpm/mmotm/broken-out/slub-track-number-of-slabs-irrespec...
Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's
*** Remember to use Documentation/process/submit-checklist.rst when testing your code ***
The -mm tree is included into linux-next and is updated there every 3-4 working days
From: Shakeel Butt shakeelb@google.com Subject: slub: track number of slabs irrespective of CONFIG_SLUB_DEBUG
For !CONFIG_SLUB_DEBUG, SLUB does not maintain the number of slabs allocated per node for a kmem_cache. Thus, slabs_node() in __kmem_cache_empty(), __kmem_cache_shrink() and __kmem_cache_destroy() will always return 0 for such config. This is wrong and can cause issues for all users of these functions.
In fact in [1] Jason has reported a system crash while using SLUB without CONFIG_SLUB_DEBUG. The reason was the usage of slabs_node() by __kmem_cache_empty().
The right solution is to make slabs_node() work even for !CONFIG_SLUB_DEBUG. The commit 0f389ec63077 ("slub: No need for per node slab counters if !SLUB_DEBUG") had put the per node slab counter under CONFIG_SLUB_DEBUG because it was only read through sysfs API and the sysfs API was disabled on !CONFIG_SLUB_DEBUG. However the users of the per node slab counter assumed that it will work in the absence of CONFIG_SLUB_DEBUG. So, make the counter work for !CONFIG_SLUB_DEBUG.
Please note that f9e13c0a5a33 ("slab, slub: skip unnecessary kasan_cache_shutdown()") exposed this issue but it is present even before.
[1] http://lkml.kernel.org/r/CAHmME9rtoPwxUSnktxzKso14iuVCWT7BE_-_8PAC=pGw1iJnQg...
Link: http://lkml.kernel.org/r/20180620224147.23777-1-shakeelb@google.com Fixes: f9e13c0a5a33 ("slab, slub: skip unnecessary kasan_cache_shutdown()") Signed-off-by: Shakeel Butt shakeelb@google.com Suggested-by: David Rientjes rientjes@google.com Reported-by: Jason A . Donenfeld Jason@zx2c4.com Cc: Christoph Lameter cl@linux.com Cc: Pekka Enberg penberg@kernel.org Cc: Joonsoo Kim iamjoonsoo.kim@lge.com Cc: Andrey Ryabinin aryabinin@virtuozzo.com Cc: stable@vger.kernel.org Signed-off-by: Andrew Morton akpm@linux-foundation.org
mm/slab.h | 2 - mm/slub.c | 80 ++++++++++++++++++++++++---------------------------- 2 files changed, 38 insertions(+), 44 deletions(-)
diff -puN mm/slab.h~slub-track-number-of-slabs-irrespective-of-config_slub_debug mm/slab.h --- a/mm/slab.h~slub-track-number-of-slabs-irrespective-of-config_slub_debug +++ a/mm/slab.h @@ -473,8 +473,8 @@ struct kmem_cache_node { #ifdef CONFIG_SLUB unsigned long nr_partial; struct list_head partial; -#ifdef CONFIG_SLUB_DEBUG atomic_long_t nr_slabs; +#ifdef CONFIG_SLUB_DEBUG atomic_long_t total_objects; struct list_head full; #endif diff -puN mm/slub.c~slub-track-number-of-slabs-irrespective-of-config_slub_debug mm/slub.c --- a/mm/slub.c~slub-track-number-of-slabs-irrespective-of-config_slub_debug +++ a/mm/slub.c @@ -1030,42 +1030,6 @@ static void remove_full(struct kmem_cach list_del(&page->lru); }
-/* Tracking of the number of slabs for debugging purposes */ -static inline unsigned long slabs_node(struct kmem_cache *s, int node) -{
- struct kmem_cache_node *n = get_node(s, node);
- return atomic_long_read(&n->nr_slabs);
-}
-static inline unsigned long node_nr_slabs(struct kmem_cache_node *n) -{
- return atomic_long_read(&n->nr_slabs);
-}
-static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects) -{
- struct kmem_cache_node *n = get_node(s, node);
- /*
* May be called early in order to allocate a slab for the
* kmem_cache_node structure. Solve the chicken-egg
* dilemma by deferring the increment of the count during
* bootstrap (see early_kmem_cache_node_alloc).
*/
- if (likely(n)) {
atomic_long_inc(&n->nr_slabs);
atomic_long_add(objects, &n->total_objects);
- }
-} -static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects) -{
- struct kmem_cache_node *n = get_node(s, node);
- atomic_long_dec(&n->nr_slabs);
- atomic_long_sub(objects, &n->total_objects);
-}
/* Object debug checks for alloc/free paths */ static void setup_object_debug(struct kmem_cache *s, struct page *page, void *object) @@ -1321,16 +1285,46 @@ slab_flags_t kmem_cache_flags(unsigned i
#define disable_higher_order_debug 0
+#endif /* CONFIG_SLUB_DEBUG */
static inline unsigned long slabs_node(struct kmem_cache *s, int node)
{ return 0; }
+{
- struct kmem_cache_node *n = get_node(s, node);
- return atomic_long_read(&n->nr_slabs);
+}
static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
{ return 0; }
-static inline void inc_slabs_node(struct kmem_cache *s, int node,
int objects) {}
-static inline void dec_slabs_node(struct kmem_cache *s, int node,
int objects) {}
+{
- return atomic_long_read(&n->nr_slabs);
+}
-#endif /* CONFIG_SLUB_DEBUG */ +static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects) +{
- struct kmem_cache_node *n = get_node(s, node);
- /*
* May be called early in order to allocate a slab for the
* kmem_cache_node structure. Solve the chicken-egg
* dilemma by deferring the increment of the count during
* bootstrap (see early_kmem_cache_node_alloc).
*/
- if (likely(n)) {
atomic_long_inc(&n->nr_slabs);
+#ifdef CONFIG_SLUB_DEBUG
atomic_long_add(objects, &n->total_objects);
+#endif
- }
+} +static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects) +{
- struct kmem_cache_node *n = get_node(s, node);
- atomic_long_dec(&n->nr_slabs);
+#ifdef CONFIG_SLUB_DEBUG
- atomic_long_sub(objects, &n->total_objects);
+#endif +}
/*
- Hooks for other subsystems that check memory allocations. In a typical
_
Patches currently in -mm which might be from shakeelb@google.com are
slub-track-number-of-slabs-irrespective-of-config_slub_debug.patch slub-fix-__kmem_cache_empty-for-config_slub_debug.patch
On Thu, Jun 21, 2018 at 3:20 AM Christopher Lameter cl@linux.com wrote:
NAK. Its easier to simply not allow !CONFIG_SLUB_DEBUG for cgroups based configs because in that case you certainly have enough memory to include the runtime debug code as well as the extended counters.
FWIW, I ran into issues with a combination of KASAN+CONFIG_SLUB without having CONFIG_SLUB_DEBUG, because KASAN was using functions that were broken without CONFIG_SLUB_DEBUG, so while you're at it with creating dependencies, you might want to also say KASAN+CONFIG_SLUB ==> CONFIG_SLUB_DEBUG.
linux-stable-mirror@lists.linaro.org