When encrypt_resp() fails at the send path, we only set
STATUS_DATA_ERROR but leave the transform buffer allocated (work->tr_buf
in this tree). Repeating this path leaks kernel memory and can lead to
OOM (DoS) when encryption is required.
Reproduced on: Linux v6.18-rc2 (self-built test kernel)
Fix by freeing the transform buffer and forcing plaintext error reply.
Reported-by: Qianchang Zhao <pioooooooooip(a)gmail.com>
Reported-by: Zhitong Liu <liuzhitong1993(a)gmail.com>
Cc: stable(a)vger.kernel.org
Signed-off-by: Qianchang Zhao <pioooooooooip(a)gmail.com>
---
fs/smb/server/server.c | 8 +++++++-
1 file changed, 7 insertions(+), 1 deletion(-)
diff --git a/fs/smb/server/server.c b/fs/smb/server/server.c
index 40420544c..15dd13e76 100644
--- a/fs/smb/server/server.c
+++ b/fs/smb/server/server.c
@@ -244,8 +244,14 @@ static void __handle_ksmbd_work(struct ksmbd_work *work,
if (work->sess && work->sess->enc && work->encrypted &&
conn->ops->encrypt_resp) {
rc = conn->ops->encrypt_resp(work);
- if (rc < 0)
+ if (rc < 0) {
conn->ops->set_rsp_status(work, STATUS_DATA_ERROR);
+ work->encrypted = false;
+ if (work->tr_buf) {
+ kvfree(work->tr_buf);
+ work->tr_buf = NULL;
+ }
+ }
}
if (work->sess)
ksmbd_user_session_put(work->sess);
--
2.34.1
Post a166563e7ec3 ("arm64: mm: support large block mapping when rodata=full"),
__change_memory_common has a real chance of failing due to split failure.
Before that commit, this line was introduced in c55191e96caa, still having
a chance of failing if it needs to allocate pagetable memory in
apply_to_page_range, although that has never been observed to be true.
In general, we should always propagate the return value to the caller.
Cc: stable(a)vger.kernel.org
Fixes: c55191e96caa ("arm64: mm: apply r/o permissions of VM areas to its linear alias as well")
Signed-off-by: Dev Jain <dev.jain(a)arm.com>
---
Based on Linux 6.18-rc4.
arch/arm64/mm/pageattr.c | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
index 5135f2d66958..b4ea86cd3a71 100644
--- a/arch/arm64/mm/pageattr.c
+++ b/arch/arm64/mm/pageattr.c
@@ -148,6 +148,7 @@ static int change_memory_common(unsigned long addr, int numpages,
unsigned long size = PAGE_SIZE * numpages;
unsigned long end = start + size;
struct vm_struct *area;
+ int ret;
int i;
if (!PAGE_ALIGNED(addr)) {
@@ -185,8 +186,10 @@ static int change_memory_common(unsigned long addr, int numpages,
if (rodata_full && (pgprot_val(set_mask) == PTE_RDONLY ||
pgprot_val(clear_mask) == PTE_RDONLY)) {
for (i = 0; i < area->nr_pages; i++) {
- __change_memory_common((u64)page_address(area->pages[i]),
+ ret = __change_memory_common((u64)page_address(area->pages[i]),
PAGE_SIZE, set_mask, clear_mask);
+ if (ret)
+ return ret;
}
}
--
2.30.2
Hello,
If you’re considering expanding your business, we are here to
help with straightforward funding options. Reach out to discover
what’s possible for your business.
Sincerely,
Ebrahim Bin Mohamed.
Business Development Director.
From: Owen Gu <guhuinan(a)xiaomi.com>
[ Upstream commit cfd6f1a7b42f ("usb: gadget: f_fs: Fix epfile null
pointer access after ep enable.") ]
A race condition occurs when ffs_func_eps_enable() runs concurrently
with ffs_data_reset(). The ffs_data_clear() called in ffs_data_reset()
sets ffs->epfiles to NULL before resetting ffs->eps_count to 0, leading
to a NULL pointer dereference when accessing epfile->ep in
ffs_func_eps_enable() after successful usb_ep_enable().
The ffs->epfiles pointer is set to NULL in both ffs_data_clear() and
ffs_data_close() functions, and its modification is protected by the
spinlock ffs->eps_lock. And the whole ffs_func_eps_enable() function
is also protected by ffs->eps_lock.
Thus, add NULL pointer handling for ffs->epfiles in the
ffs_func_eps_enable() function to fix issues
Signed-off-by: Owen Gu <guhuinan(a)xiaomi.com>
Link: https://lore.kernel.org/r/20250915092907.17802-1-guhuinan@xiaomi.com
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
drivers/usb/gadget/function/f_fs.c | 8 +++++++-
1 file changed, 7 insertions(+), 1 deletion(-)
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 08a251df20c4..04058261cdd0 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -2407,7 +2407,12 @@ static int ffs_func_eps_enable(struct ffs_function *func)
ep = func->eps;
epfile = ffs->epfiles;
count = ffs->eps_count;
- while(count--) {
+ if (!epfile) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ while (count--) {
ep->ep->driver_data = ep;
ret = config_ep_by_speed(func->gadget, &func->function, ep->ep);
@@ -2431,6 +2436,7 @@ static int ffs_func_eps_enable(struct ffs_function *func)
}
wake_up_interruptible(&ffs->wait);
+done:
spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
return ret;
--
2.43.0
During SSR data collection period, the processing of hw_error events
must wait until SSR data Collected or the timeout before it can proceed.
The wake_up_bit function has been added to address the issue
where hw_error events could only be processed after the timeout.
The timeout unit has been changed from jiffies to milliseconds (ms).
Cc: stable(a)vger.kernel.org
Signed-off-by: Shuai Zhang <quic_shuaz(a)quicinc.com>
---
drivers/bluetooth/hci_qca.c | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index 888176b0f..a2e3c97a8 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -1105,6 +1105,7 @@ static void qca_controller_memdump(struct work_struct *work)
cancel_delayed_work(&qca->ctrl_memdump_timeout);
clear_bit(QCA_MEMDUMP_COLLECTION, &qca->flags);
clear_bit(QCA_IBS_DISABLED, &qca->flags);
+ wake_up_bit(&qca->flags, QCA_MEMDUMP_COLLECTION);
mutex_unlock(&qca->hci_memdump_lock);
return;
}
@@ -1182,6 +1183,7 @@ static void qca_controller_memdump(struct work_struct *work)
qca->qca_memdump = NULL;
qca->memdump_state = QCA_MEMDUMP_COLLECTED;
clear_bit(QCA_MEMDUMP_COLLECTION, &qca->flags);
+ wake_up_bit(&qca->flags, QCA_MEMDUMP_COLLECTION);
}
mutex_unlock(&qca->hci_memdump_lock);
@@ -1602,7 +1604,7 @@ static void qca_wait_for_dump_collection(struct hci_dev *hdev)
struct qca_data *qca = hu->priv;
wait_on_bit_timeout(&qca->flags, QCA_MEMDUMP_COLLECTION,
- TASK_UNINTERRUPTIBLE, MEMDUMP_TIMEOUT_MS);
+ TASK_UNINTERRUPTIBLE, msecs_to_jiffies(MEMDUMP_TIMEOUT_MS));
clear_bit(QCA_MEMDUMP_COLLECTION, &qca->flags);
}
--
2.34.1
The quilt patch titled
Subject: kasan: unpoison vms[area] addresses with a common tag
has been removed from the -mm tree. Its filename was
kasan-unpoison-vms-addresses-with-a-common-tag.patch
This patch was dropped because an updated version will be issued
------------------------------------------------------
From: Maciej Wieczor-Retman <maciej.wieczor-retman(a)intel.com>
Subject: kasan: unpoison vms[area] addresses with a common tag
Date: Tue, 04 Nov 2025 14:49:48 +0000
A KASAN tag mismatch, possibly causing a kernel panic, can be observed on
systems with a tag-based KASAN enabled and with multiple NUMA nodes. It
was reported on arm64 and reproduced on x86. It can be explained in the
following points:
1. There can be more than one virtual memory chunk.
2. Chunk's base address has a tag.
3. The base address points at the first chunk and thus inherits
the tag of the first chunk.
4. The subsequent chunks will be accessed with the tag from the
first chunk.
5. Thus, the subsequent chunks need to have their tag set to
match that of the first chunk.
Unpoison all vm_structs after allocating them for the percpu allocator.
Use the same tag to resolve the pcpu chunk address mismatch.
Link: https://lkml.kernel.org/r/cf8fe0ffcdbf54e06d9df26c8473b123c4065f02.17622670…
Fixes: 1d96320f8d53 ("kasan, vmalloc: add vmalloc tagging for SW_TAGS")
Signed-off-by: Maciej Wieczor-Retman <maciej.wieczor-retman(a)intel.com>
Tested-by: Baoquan He <bhe(a)redhat.com>
Cc: Alexander Potapenko <glider(a)google.com>
Cc: Andrey Konovalov <andreyknvl(a)gmail.com>
Cc: Andrey Ryabinin <ryabinin.a.a(a)gmail.com>
Cc: Dmitriy Vyukov <dvyukov(a)google.com>
Cc: Marco Elver <elver(a)google.com>
Cc: "Uladzislau Rezki (Sony)" <urezki(a)gmail.com>
Cc: Vincenzo Frascino <vincenzo.frascino(a)arm.com>
Cc: <stable(a)vger.kernel.org> [6.1+]
Signed-off-by: Andrew Morton <akpm(a)linux-foundation.org>
---
mm/kasan/common.c | 10 +++++++++-
1 file changed, 9 insertions(+), 1 deletion(-)
--- a/mm/kasan/common.c~kasan-unpoison-vms-addresses-with-a-common-tag
+++ a/mm/kasan/common.c
@@ -584,12 +584,20 @@ bool __kasan_check_byte(const void *addr
return true;
}
+/*
+ * A tag mismatch happens when calculating per-cpu chunk addresses, because
+ * they all inherit the tag from vms[0]->addr, even when nr_vms is bigger
+ * than 1. This is a problem because all the vms[]->addr come from separate
+ * allocations and have different tags so while the calculated address is
+ * correct the tag isn't.
+ */
void __kasan_unpoison_vmap_areas(struct vm_struct **vms, int nr_vms)
{
int area;
for (area = 0 ; area < nr_vms ; area++) {
kasan_poison(vms[area]->addr, vms[area]->size,
- arch_kasan_get_tag(vms[area]->addr), false);
+ arch_kasan_get_tag(vms[0]->addr), false);
+ arch_kasan_set_tag(vms[area]->addr, arch_kasan_get_tag(vms[0]->addr));
}
}
_
Patches currently in -mm which might be from maciej.wieczor-retman(a)intel.com are
The quilt patch titled
Subject: kasan: unpoison pcpu chunks with base address tag
has been removed from the -mm tree. Its filename was
kasan-unpoison-pcpu-chunks-with-base-address-tag.patch
This patch was dropped because an updated version will be issued
------------------------------------------------------
From: Maciej Wieczor-Retman <maciej.wieczor-retman(a)intel.com>
Subject: kasan: unpoison pcpu chunks with base address tag
Date: Tue, 04 Nov 2025 14:49:08 +0000
Patch series "kasan: vmalloc: Fix incorrect tag assignment with multiple
vm_structs".
A KASAN tag mismatch, possibly resulting in a kernel panic, can be
observed on systems with a tag-based KASAN enabled and with multiple NUMA
nodes. Initially it was only noticed on x86 [1] but later a similar issue
was also reported on arm64 [2].
Specifically the problem is related to how vm_structs interact with
pcpu_chunks - both when they are allocated, assigned and when pcpu_chunk
addresses are derived.
When vm_structs are allocated they are tagged if vmalloc support is
enabled along the KASAN mode. Later when first pcpu chunk is allocated it
gets its 'base_addr' field set to the first allocated vm_struct. With
that it inherits that vm_struct's tag.
When pcpu_chunk addresses are later derived (by pcpu_chunk_addr(), for
example in pcpu_alloc_noprof()) the base_addr field is used and offsets
are added to it. If the initial conditions are satisfied then some of the
offsets will point into memory allocated with a different vm_struct. So
while the lower bits will get accurately derived the tag bits in the top
of the pointer won't match the shadow memory contents.
The solution (proposed at v2 of the x86 KASAN series [3]) is to tag the
vm_structs the same when allocating them for the per cpu allocator (in
pcpu_get_vm_areas()).
Originally these patches were part of the x86 KASAN series [4].
This patch (of 2):
A KASAN tag mismatch, possibly causing a kernel panic, can be observed on
systems with a tag-based KASAN enabled and with multiple NUMA nodes. It
was reported on arm64 and reproduced on x86. It can be explained in the
following points:
1. There can be more than one virtual memory chunk.
2. Chunk's base address has a tag.
3. The base address points at the first chunk and thus inherits
the tag of the first chunk.
4. The subsequent chunks will be accessed with the tag from the
first chunk.
5. Thus, the subsequent chunks need to have their tag set to
match that of the first chunk.
Refactor code by moving it into a helper in preparation for the actual
fix.
Link: https://lkml.kernel.org/r/821677dd824d003cc5b7a77891db4723e23518ea.17622670…
Link: https://lore.kernel.org/all/e7e04692866d02e6d3b32bb43b998e5d17092ba4.173868… [1]
Link: https://lore.kernel.org/all/aMUrW1Znp1GEj7St@MiWiFi-R3L-srv/ [2]
Link: https://lore.kernel.org/all/CAPAsAGxDRv_uFeMYu9TwhBVWHCCtkSxoWY4xmFB_vowMbi… [3]
Link: https://lore.kernel.org/all/cover.1761763681.git.m.wieczorretman@pm.me/ [4]
Fixes: 1d96320f8d53 ("kasan, vmalloc: add vmalloc tagging for SW_TAGS")
Signed-off-by: Maciej Wieczor-Retman <maciej.wieczor-retman(a)intel.com>
Tested-by: Baoquan He <bhe(a)redhat.com>
Cc: Alexander Potapenko <glider(a)google.com>
Cc: Andrey Konovalov <andreyknvl(a)gmail.com>
Cc: Andrey Ryabinin <ryabinin.a.a(a)gmail.com>
Cc: Dmitriy Vyukov <dvyukov(a)google.com>
Cc: Marco Elver <elver(a)google.com>
Cc: "Uladzislau Rezki (Sony)" <urezki(a)gmail.com>
Cc: Vincenzo Frascino <vincenzo.frascino(a)arm.com>
Cc: <stable(a)vger.kernel.org> [6.1+]
Signed-off-by: Andrew Morton <akpm(a)linux-foundation.org>
---
include/linux/kasan.h | 10 ++++++++++
mm/kasan/common.c | 11 +++++++++++
mm/vmalloc.c | 4 +---
3 files changed, 22 insertions(+), 3 deletions(-)
--- a/include/linux/kasan.h~kasan-unpoison-pcpu-chunks-with-base-address-tag
+++ a/include/linux/kasan.h
@@ -614,6 +614,13 @@ static __always_inline void kasan_poison
__kasan_poison_vmalloc(start, size);
}
+void __kasan_unpoison_vmap_areas(struct vm_struct **vms, int nr_vms);
+static __always_inline void kasan_unpoison_vmap_areas(struct vm_struct **vms, int nr_vms)
+{
+ if (kasan_enabled())
+ __kasan_unpoison_vmap_areas(vms, nr_vms);
+}
+
#else /* CONFIG_KASAN_VMALLOC */
static inline void kasan_populate_early_vm_area_shadow(void *start,
@@ -638,6 +645,9 @@ static inline void *kasan_unpoison_vmall
static inline void kasan_poison_vmalloc(const void *start, unsigned long size)
{ }
+static inline void kasan_unpoison_vmap_areas(struct vm_struct **vms, int nr_vms)
+{ }
+
#endif /* CONFIG_KASAN_VMALLOC */
#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
--- a/mm/kasan/common.c~kasan-unpoison-pcpu-chunks-with-base-address-tag
+++ a/mm/kasan/common.c
@@ -28,6 +28,7 @@
#include <linux/string.h>
#include <linux/types.h>
#include <linux/bug.h>
+#include <linux/vmalloc.h>
#include "kasan.h"
#include "../slab.h"
@@ -582,3 +583,13 @@ bool __kasan_check_byte(const void *addr
}
return true;
}
+
+void __kasan_unpoison_vmap_areas(struct vm_struct **vms, int nr_vms)
+{
+ int area;
+
+ for (area = 0 ; area < nr_vms ; area++) {
+ kasan_poison(vms[area]->addr, vms[area]->size,
+ arch_kasan_get_tag(vms[area]->addr), false);
+ }
+}
--- a/mm/vmalloc.c~kasan-unpoison-pcpu-chunks-with-base-address-tag
+++ a/mm/vmalloc.c
@@ -4870,9 +4870,7 @@ retry:
* With hardware tag-based KASAN, marking is skipped for
* non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
*/
- for (area = 0; area < nr_vms; area++)
- vms[area]->addr = kasan_unpoison_vmalloc(vms[area]->addr,
- vms[area]->size, KASAN_VMALLOC_PROT_NORMAL);
+ kasan_unpoison_vmap_areas(vms, nr_vms);
kfree(vas);
return vms;
_
Patches currently in -mm which might be from maciej.wieczor-retman(a)intel.com are
kasan-unpoison-vms-addresses-with-a-common-tag.patch
Hi Stable,
Please provide a quote for your products:
Include:
1.Pricing (per unit)
2.Delivery cost & timeline
3.Quote expiry date
Deadline: October
Thanks!
Danny Peddinti
Noble alliance trade