On Thu, Dec 04, 2025 at 02:05:07PM -0800, Suren Baghdasaryan wrote:
On Tue, Dec 2, 2025 at 2:16 AM Harry Yoo harry.yoo@oracle.com wrote:
Currently, kvfree_rcu_barrier() flushes RCU sheaves across all slab caches when a cache is destroyed. This is unnecessary; only the RCU sheaves belonging to the cache being destroyed need to be flushed.
As suggested by Vlastimil Babka, introduce a weaker form of kvfree_rcu_barrier() that operates on a specific slab cache.
Factor out flush_rcu_sheaves_on_cache() from flush_all_rcu_sheaves() and call it from flush_all_rcu_sheaves() and kvfree_rcu_barrier_on_cache().
Call kvfree_rcu_barrier_on_cache() instead of kvfree_rcu_barrier() on cache destruction.
The performance benefit is evaluated on a 12 core 24 threads AMD Ryzen 5900X machine (1 socket), by loading slub_kunit module.
Before: Total calls: 19 Average latency (us): 18127 Total time (us): 344414
After: Total calls: 19 Average latency (us): 10066 Total time (us): 191264
Two performance regression have been reported:
- stress module loader test's runtime increases by 50-60% (Daniel)
- internal graphics test's runtime on Tegra23 increases by 35% (Jon)
They are fixed by this change.
Suggested-by: Vlastimil Babka vbabka@suse.cz Fixes: ec66e0d59952 ("slab: add sheaf support for batching kfree_rcu() operations") Cc: stable@vger.kernel.org Link: https://lore.kernel.org/linux-mm/1bda09da-93be-4737-aef0-d47f8c5c9301@suse.c... Reported-and-tested-by: Daniel Gomez da.gomez@samsung.com Closes: https://lore.kernel.org/linux-mm/0406562e-2066-4cf8-9902-b2b0616dd742@kernel... Reported-and-tested-by: Jon Hunter jonathanh@nvidia.com Closes: https://lore.kernel.org/linux-mm/e988eff6-1287-425e-a06c-805af5bbf262@nvidia... Signed-off-by: Harry Yoo harry.yoo@oracle.com
No code change, added proper tags and updated changelog.
include/linux/slab.h | 5 ++++ mm/slab.h | 1 + mm/slab_common.c | 52 +++++++++++++++++++++++++++++------------ mm/slub.c | 55 ++++++++++++++++++++++++-------------------- 4 files changed, 73 insertions(+), 40 deletions(-)
diff --git a/include/linux/slab.h b/include/linux/slab.h index cf443f064a66..937c93d44e8c 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -1149,6 +1149,10 @@ static inline void kvfree_rcu_barrier(void) { rcu_barrier(); } +static inline void kvfree_rcu_barrier_on_cache(struct kmem_cache *s) +{
rcu_barrier();+}
static inline void kfree_rcu_scheduler_running(void) { } #else @@ -1156,6 +1160,7 @@ void kvfree_rcu_barrier(void);
void kfree_rcu_scheduler_running(void); #endif +void kvfree_rcu_barrier_on_cache(struct kmem_cache *s);
Should the above line be before the #endif? I was expecting something like this:
#ifndef CONFIG_KVFREE_RCU_BATCHED ... static inline void kvfree_rcu_barrier_on_cache(struct kmem_cache *s) { rcu_barrier(); } #else ... void kvfree_rcu_barrier_on_cache(struct kmem_cache *s); #endif
but when I apply this patch on mm-new I get this:
#ifndef CONFIG_KVFREE_RCU_BATCHED ... static inline void kvfree_rcu_barrier_on_cache(struct kmem_cache *s) { rcu_barrier(); } #else ... #endif void kvfree_rcu_barrier_on_cache(struct kmem_cache *s);
Oops, nice catch! Interestingly this didn't break CONFIG_KVFREE_RCU_BATCHED=n builds...
I'll send V3 shortly.
Other than that LGTM
Thanks!
/**
- kmalloc_size_roundup - Report allocation bucket size for the given size
diff --git a/mm/slab.h b/mm/slab.h index f730e012553c..e767aa7e91b0 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -422,6 +422,7 @@ static inline bool is_kmalloc_normal(struct kmem_cache *s)
bool __kfree_rcu_sheaf(struct kmem_cache *s, void *obj); void flush_all_rcu_sheaves(void); +void flush_rcu_sheaves_on_cache(struct kmem_cache *s);
#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \ SLAB_CACHE_DMA32 | SLAB_PANIC | \ diff --git a/mm/slab_common.c b/mm/slab_common.c index 84dfff4f7b1f..dd8a49d6f9cc 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -492,7 +492,7 @@ void kmem_cache_destroy(struct kmem_cache *s) return;
/* in-flight kfree_rcu()'s may include objects from our cache */
kvfree_rcu_barrier();
kvfree_rcu_barrier_on_cache(s); if (IS_ENABLED(CONFIG_SLUB_RCU_DEBUG) && (s->flags & SLAB_TYPESAFE_BY_RCU)) {@@ -2038,25 +2038,13 @@ void kvfree_call_rcu(struct rcu_head *head, void *ptr) } EXPORT_SYMBOL_GPL(kvfree_call_rcu);
-/**
- kvfree_rcu_barrier - Wait until all in-flight kvfree_rcu() complete.
- Note that a single argument of kvfree_rcu() call has a slow path that
- triggers synchronize_rcu() following by freeing a pointer. It is done
- before the return from the function. Therefore for any single-argument
- call that will result in a kfree() to a cache that is to be destroyed
- during module exit, it is developer's responsibility to ensure that all
- such calls have returned before the call to kmem_cache_destroy().
- */
-void kvfree_rcu_barrier(void) +static inline void __kvfree_rcu_barrier(void) { struct kfree_rcu_cpu_work *krwp; struct kfree_rcu_cpu *krcp; bool queued; int i, cpu;
flush_all_rcu_sheaves();/* * Firstly we detach objects and queue them over an RCU-batch * for all CPUs. Finally queued works are flushed for each CPU.@@ -2118,8 +2106,43 @@ void kvfree_rcu_barrier(void) } } }
+/**
- kvfree_rcu_barrier - Wait until all in-flight kvfree_rcu() complete.
- Note that a single argument of kvfree_rcu() call has a slow path that
- triggers synchronize_rcu() following by freeing a pointer. It is done
- before the return from the function. Therefore for any single-argument
- call that will result in a kfree() to a cache that is to be destroyed
- during module exit, it is developer's responsibility to ensure that all
- such calls have returned before the call to kmem_cache_destroy().
- */
+void kvfree_rcu_barrier(void) +{
flush_all_rcu_sheaves();__kvfree_rcu_barrier();+} EXPORT_SYMBOL_GPL(kvfree_rcu_barrier);
+/**
- kvfree_rcu_barrier_on_cache - Wait for in-flight kvfree_rcu() calls on a
specific slab cache.
- @s: slab cache to wait for
- See the description of kvfree_rcu_barrier() for details.
- */
+void kvfree_rcu_barrier_on_cache(struct kmem_cache *s) +{
if (s->cpu_sheaves)flush_rcu_sheaves_on_cache(s);/** TODO: Introduce a version of __kvfree_rcu_barrier() that works* on a specific slab cache.*/__kvfree_rcu_barrier();+} +EXPORT_SYMBOL_GPL(kvfree_rcu_barrier_on_cache);
static unsigned long kfree_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc) { @@ -2215,4 +2238,3 @@ void __init kvfree_rcu_init(void) }
#endif /* CONFIG_KVFREE_RCU_BATCHED */
diff --git a/mm/slub.c b/mm/slub.c index 785e25a14999..7cec2220712b 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -4118,42 +4118,47 @@ static void flush_rcu_sheaf(struct work_struct *w)
/* needed for kvfree_rcu_barrier() */ -void flush_all_rcu_sheaves(void) +void flush_rcu_sheaves_on_cache(struct kmem_cache *s) { struct slub_flush_work *sfw;
struct kmem_cache *s; unsigned int cpu;cpus_read_lock();mutex_lock(&slab_mutex);
mutex_lock(&flush_lock);
list_for_each_entry(s, &slab_caches, list) {if (!s->cpu_sheaves)continue;
for_each_online_cpu(cpu) {sfw = &per_cpu(slub_flush, cpu);
mutex_lock(&flush_lock);
/** we don't check if rcu_free sheaf exists - racing* __kfree_rcu_sheaf() might have just removed it.* by executing flush_rcu_sheaf() on the cpu we make* sure the __kfree_rcu_sheaf() finished its call_rcu()*/
for_each_online_cpu(cpu) {sfw = &per_cpu(slub_flush, cpu);
INIT_WORK(&sfw->work, flush_rcu_sheaf);sfw->s = s;queue_work_on(cpu, flushwq, &sfw->work);}
/** we don't check if rcu_free sheaf exists - racing* __kfree_rcu_sheaf() might have just removed it.* by executing flush_rcu_sheaf() on the cpu we make* sure the __kfree_rcu_sheaf() finished its call_rcu()*/
for_each_online_cpu(cpu) {sfw = &per_cpu(slub_flush, cpu);flush_work(&sfw->work);}
INIT_WORK(&sfw->work, flush_rcu_sheaf);sfw->s = s;queue_work_on(cpu, flushwq, &sfw->work);}
mutex_unlock(&flush_lock);+}
for_each_online_cpu(cpu) {sfw = &per_cpu(slub_flush, cpu);flush_work(&sfw->work);}+void flush_all_rcu_sheaves(void) +{
struct kmem_cache *s;cpus_read_lock();mutex_lock(&slab_mutex);
mutex_unlock(&flush_lock);
list_for_each_entry(s, &slab_caches, list) {if (!s->cpu_sheaves)continue;flush_rcu_sheaves_on_cache(s); } mutex_unlock(&slab_mutex);-- 2.43.0