Running the testcase liburing/accept-reust.t with CONFIG_KASAN=y and CONFIG_KASAN_EXTRA_INFO=y leads to the following crash:
Unable to handle kernel paging request at virtual address 00000c6455008008 ... pc : __kasan_mempool_unpoison_object+0x38/0x170 lr : io_netmsg_cache_free+0x8c/0x180 ... Call trace: __kasan_mempool_unpoison_object+0x38/0x170 (P) io_netmsg_cache_free+0x8c/0x180 io_ring_exit_work+0xd4c/0x13a0 process_one_work+0x52c/0x1000 worker_thread+0x830/0xdc0 kthread+0x2bc/0x348 ret_from_fork+0x10/0x20
Since the commit b556a462eb8d ("kasan: save free stack traces for slab mempools") kasan_mempool_poison_object() stores some info inside an object. It was expected that the object must be reinitialized after kasan_mempool_unpoison_object() call, and this is what happens in the most of use cases.
However io_uring code expects that io_alloc_cache_put/get doesn't modify the object, so kasan_mempool_poison_object() end up corrupting it leading to crash later.
Add @notrack argument to kasan_mempool_poison_object() call to tell KASAN to avoid storing info in objects for io_uring use case.
Reported-by: lizetao lizetao1@huawei.com Closes: https://lkml.kernel.org/r/ec2a6ca08c614c10853fbb1270296ac4@huawei.com Fixes: b556a462eb8d ("kasan: save free stack traces for slab mempools") Cc: stable@vger.kernel.org Cc: Alexander Potapenko glider@google.com Cc: Andrey Konovalov andreyknvl@gmail.com Cc: Dmitry Vyukov dvyukov@google.com Cc: Vincenzo Frascino vincenzo.frascino@arm.com Cc: Jens Axboe axboe@kernel.dk Cc: Pavel Begunkov asml.silence@gmail.com Cc: "David S. Miller" davem@davemloft.net Cc: Eric Dumazet edumazet@google.com Cc: Jakub Kicinski kuba@kernel.org Cc: Paolo Abeni pabeni@redhat.com Cc: Simon Horman horms@kernel.org Signed-off-by: Andrey Ryabinin ryabinin.a.a@gmail.com --- include/linux/kasan.h | 13 +++++++------ io_uring/alloc_cache.h | 2 +- io_uring/net.c | 2 +- io_uring/rw.c | 2 +- mm/kasan/common.c | 11 ++++++----- mm/mempool.c | 2 +- net/core/skbuff.c | 2 +- 7 files changed, 18 insertions(+), 16 deletions(-)
diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 890011071f2b..4d0bf4af399d 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -328,18 +328,19 @@ static __always_inline void kasan_mempool_unpoison_pages(struct page *page, __kasan_mempool_unpoison_pages(page, order, _RET_IP_); }
-bool __kasan_mempool_poison_object(void *ptr, unsigned long ip); +bool __kasan_mempool_poison_object(void *ptr, bool notrack, unsigned long ip); /** * kasan_mempool_poison_object - Check and poison a mempool slab allocation. * @ptr: Pointer to the slab allocation. + * @notrack: Don't record stack trace of this call in the object. * * This function is intended for kernel subsystems that cache slab allocations * to reuse them instead of freeing them back to the slab allocator (e.g. * mempool). * * This function poisons a slab allocation and saves a free stack trace for it - * without initializing the allocation's memory and without putting it into the - * quarantine (for the Generic mode). + * (if @notrack == false) without initializing the allocation's memory and + * without putting it into the quarantine (for the Generic mode). * * This function also performs checks to detect double-free and invalid-free * bugs and reports them. The caller can use the return value of this function @@ -354,10 +355,10 @@ bool __kasan_mempool_poison_object(void *ptr, unsigned long ip); * * Return: true if the allocation can be safely reused; false otherwise. */ -static __always_inline bool kasan_mempool_poison_object(void *ptr) +static __always_inline bool kasan_mempool_poison_object(void *ptr, bool notrack) { if (kasan_enabled()) - return __kasan_mempool_poison_object(ptr, _RET_IP_); + return __kasan_mempool_poison_object(ptr, notrack, _RET_IP_); return true; }
@@ -456,7 +457,7 @@ static inline bool kasan_mempool_poison_pages(struct page *page, unsigned int or return true; } static inline void kasan_mempool_unpoison_pages(struct page *page, unsigned int order) {} -static inline bool kasan_mempool_poison_object(void *ptr) +static inline bool kasan_mempool_poison_object(void *ptr, bool notrack) { return true; } diff --git a/io_uring/alloc_cache.h b/io_uring/alloc_cache.h index a3a8cfec32ce..dd508dddea33 100644 --- a/io_uring/alloc_cache.h +++ b/io_uring/alloc_cache.h @@ -10,7 +10,7 @@ static inline bool io_alloc_cache_put(struct io_alloc_cache *cache, void *entry) { if (cache->nr_cached < cache->max_cached) { - if (!kasan_mempool_poison_object(entry)) + if (!kasan_mempool_poison_object(entry, true)) return false; cache->entries[cache->nr_cached++] = entry; return true; diff --git a/io_uring/net.c b/io_uring/net.c index 85f55fbc25c9..a954e37c7fd3 100644 --- a/io_uring/net.c +++ b/io_uring/net.c @@ -149,7 +149,7 @@ static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags) iov = hdr->free_iov; if (io_alloc_cache_put(&req->ctx->netmsg_cache, hdr)) { if (iov) - kasan_mempool_poison_object(iov); + kasan_mempool_poison_object(iov, true); req->async_data = NULL; req->flags &= ~REQ_F_ASYNC_DATA; } diff --git a/io_uring/rw.c b/io_uring/rw.c index a9a2733be842..cba475003ba7 100644 --- a/io_uring/rw.c +++ b/io_uring/rw.c @@ -167,7 +167,7 @@ static void io_rw_recycle(struct io_kiocb *req, unsigned int issue_flags) iov = rw->free_iovec; if (io_alloc_cache_put(&req->ctx->rw_cache, rw)) { if (iov) - kasan_mempool_poison_object(iov); + kasan_mempool_poison_object(iov, true); req->async_data = NULL; req->flags &= ~REQ_F_ASYNC_DATA; } diff --git a/mm/kasan/common.c b/mm/kasan/common.c index ed4873e18c75..e7b54aa9494e 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -230,7 +230,8 @@ static bool check_slab_allocation(struct kmem_cache *cache, void *object, }
static inline void poison_slab_object(struct kmem_cache *cache, void *object, - bool init, bool still_accessible) + bool init, bool still_accessible, + bool notrack) { void *tagged_object = object;
@@ -243,7 +244,7 @@ static inline void poison_slab_object(struct kmem_cache *cache, void *object, kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE), KASAN_SLAB_FREE, init);
- if (kasan_stack_collection_enabled()) + if (kasan_stack_collection_enabled() && !notrack) kasan_save_free_info(cache, tagged_object); }
@@ -261,7 +262,7 @@ bool __kasan_slab_free(struct kmem_cache *cache, void *object, bool init, if (!kasan_arch_is_ready() || is_kfence_address(object)) return false;
- poison_slab_object(cache, object, init, still_accessible); + poison_slab_object(cache, object, init, still_accessible, true);
/* * If the object is put into quarantine, do not let slab put the object @@ -495,7 +496,7 @@ void __kasan_mempool_unpoison_pages(struct page *page, unsigned int order, __kasan_unpoison_pages(page, order, false); }
-bool __kasan_mempool_poison_object(void *ptr, unsigned long ip) +bool __kasan_mempool_poison_object(void *ptr, bool notrack, unsigned long ip) { struct folio *folio = virt_to_folio(ptr); struct slab *slab; @@ -519,7 +520,7 @@ bool __kasan_mempool_poison_object(void *ptr, unsigned long ip) if (check_slab_allocation(slab->slab_cache, ptr, ip)) return false;
- poison_slab_object(slab->slab_cache, ptr, false, false); + poison_slab_object(slab->slab_cache, ptr, false, false, notrack); return true; }
diff --git a/mm/mempool.c b/mm/mempool.c index 3223337135d0..283df5d2b995 100644 --- a/mm/mempool.c +++ b/mm/mempool.c @@ -115,7 +115,7 @@ static inline void poison_element(mempool_t *pool, void *element) static __always_inline bool kasan_poison_element(mempool_t *pool, void *element) { if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc) - return kasan_mempool_poison_object(element); + return kasan_mempool_poison_object(element, false); else if (pool->alloc == mempool_alloc_pages) return kasan_mempool_poison_pages(element, (unsigned long)pool->pool_data); diff --git a/net/core/skbuff.c b/net/core/skbuff.c index a441613a1e6c..c9f58a698bb7 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -1457,7 +1457,7 @@ static void napi_skb_cache_put(struct sk_buff *skb) struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); u32 i;
- if (!kasan_mempool_poison_object(skb)) + if (!kasan_mempool_poison_object(skb, false)) return;
local_lock_nested_bh(&napi_alloc_cache.bh_lock);
linux-stable-mirror@lists.linaro.org