When VM boots with one virtio-crypto PCI device and builtin backend,
run openssl benchmark command with multiple processes, such as
openssl speed -evp aes-128-cbc -engine afalg -seconds 10 -multi 32
openssl processes will hangup and there is error reported like this:
virtio_crypto virtio0: dataq.0:id 3 is not a head!
It seems that the data virtqueue need protection when it is handled
for virtio done notification. If the spinlock protection is added
in virtcrypto_done_task(), openssl benchmark with multiple processes
works well.
Fixes: fed93fb62e05 ("crypto: virtio - Handle dataq logic with tasklet")
Cc: stable(a)vger.kernel.org
Signed-off-by: Bibo Mao <maobibo(a)loongson.cn>
---
drivers/crypto/virtio/virtio_crypto_core.c | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/drivers/crypto/virtio/virtio_crypto_core.c b/drivers/crypto/virtio/virtio_crypto_core.c
index 3d241446099c..ccc6b5c1b24b 100644
--- a/drivers/crypto/virtio/virtio_crypto_core.c
+++ b/drivers/crypto/virtio/virtio_crypto_core.c
@@ -75,15 +75,20 @@ static void virtcrypto_done_task(unsigned long data)
struct data_queue *data_vq = (struct data_queue *)data;
struct virtqueue *vq = data_vq->vq;
struct virtio_crypto_request *vc_req;
+ unsigned long flags;
unsigned int len;
+ spin_lock_irqsave(&data_vq->lock, flags);
do {
virtqueue_disable_cb(vq);
while ((vc_req = virtqueue_get_buf(vq, &len)) != NULL) {
+ spin_unlock_irqrestore(&data_vq->lock, flags);
if (vc_req->alg_cb)
vc_req->alg_cb(vc_req, len);
+ spin_lock_irqsave(&data_vq->lock, flags);
}
} while (!virtqueue_enable_cb(vq));
+ spin_unlock_irqrestore(&data_vq->lock, flags);
}
static void virtcrypto_dataq_callback(struct virtqueue *vq)
--
2.39.3
When the SLAB_STORE_USER debug flag is used, any metadata placed after
the original kmalloc request size (orig_size) is not properly aligned
on 64-bit architectures because its type is unsigned int. When both KASAN
and SLAB_STORE_USER are enabled, kasan_alloc_meta is misaligned.
Note that 64-bit architectures without HAVE_EFFICIENT_UNALIGNED_ACCESS
are assumed to require 64-bit accesses to be 64-bit aligned.
See HAVE_64BIT_ALIGNED_ACCESS and commit adab66b71abf ("Revert:
"ring-buffer: Remove HAVE_64BIT_ALIGNED_ACCESS"") for more details.
Because not all architectures support unaligned memory accesses,
ensure that all metadata (track, orig_size, kasan_{alloc,free}_meta)
in a slab object are word-aligned. struct track, kasan_{alloc,free}_meta
are aligned by adding __aligned(__alignof__(unsigned long)).
For orig_size, use ALIGN(sizeof(unsigned int), sizeof(unsigned long)) to
make clear that its size remains unsigned int but it must be aligned to
a word boundary. On 64-bit architectures, this reserves 8 bytes for
orig_size, which is acceptable since kmalloc's original request size
tracking is intended for debugging rather than production use.
Cc: stable(a)vger.kernel.org
Fixes: 6edf2576a6cc ("mm/slub: enable debugging memory wasting of kmalloc")
Acked-by: Andrey Konovalov <andreyknvl(a)gmail.com>
Signed-off-by: Harry Yoo <harry.yoo(a)oracle.com>
---
v1 -> v2:
- Added Andrey's Acked-by.
- Added references to HAVE_64BIT_ALIGNED_ACCESS and the commit that
resurrected it.
- Used __alignof__() instead of sizeof(), as suggested by Pedro (off-list).
Note: either __alignof__ or sizeof() produces the exactly same mm/slub.o
files, so there's no functional difference.
Thanks!
mm/kasan/kasan.h | 4 ++--
mm/slub.c | 16 +++++++++++-----
2 files changed, 13 insertions(+), 7 deletions(-)
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index 129178be5e64..b86b6e9f456a 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -265,7 +265,7 @@ struct kasan_alloc_meta {
struct kasan_track alloc_track;
/* Free track is stored in kasan_free_meta. */
depot_stack_handle_t aux_stack[2];
-};
+} __aligned(__alignof__(unsigned long));
struct qlist_node {
struct qlist_node *next;
@@ -289,7 +289,7 @@ struct qlist_node {
struct kasan_free_meta {
struct qlist_node quarantine_link;
struct kasan_track free_track;
-};
+} __aligned(__alignof__(unsigned long));
#endif /* CONFIG_KASAN_GENERIC */
diff --git a/mm/slub.c b/mm/slub.c
index a585d0ac45d4..462a39d57b3a 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -344,7 +344,7 @@ struct track {
int cpu; /* Was running on cpu */
int pid; /* Pid context */
unsigned long when; /* When did the operation occur */
-};
+} __aligned(__alignof__(unsigned long));
enum track_item { TRACK_ALLOC, TRACK_FREE };
@@ -1196,7 +1196,7 @@ static void print_trailer(struct kmem_cache *s, struct slab *slab, u8 *p)
off += 2 * sizeof(struct track);
if (slub_debug_orig_size(s))
- off += sizeof(unsigned int);
+ off += ALIGN(sizeof(unsigned int), __alignof__(unsigned long));
off += kasan_metadata_size(s, false);
@@ -1392,7 +1392,8 @@ static int check_pad_bytes(struct kmem_cache *s, struct slab *slab, u8 *p)
off += 2 * sizeof(struct track);
if (s->flags & SLAB_KMALLOC)
- off += sizeof(unsigned int);
+ off += ALIGN(sizeof(unsigned int),
+ __alignof__(unsigned long));
}
off += kasan_metadata_size(s, false);
@@ -7820,9 +7821,14 @@ static int calculate_sizes(struct kmem_cache_args *args, struct kmem_cache *s)
*/
size += 2 * sizeof(struct track);
- /* Save the original kmalloc request size */
+ /*
+ * Save the original kmalloc request size.
+ * Although the request size is an unsigned int,
+ * make sure that is aligned to word boundary.
+ */
if (flags & SLAB_KMALLOC)
- size += sizeof(unsigned int);
+ size += ALIGN(sizeof(unsigned int),
+ __alignof__(unsigned long));
}
#endif
--
2.43.0
Hi,
I would like to request backporting commit b441cf3f8c4b ("xfrm: delete
x->tunnel as we delete x") to all LTS kernels.
This patch actually fixes a use-after-free issue, but it hasn't been
backported to any of the LTS versions, which are still being affected.
As the patch describes, a specific trigger scenario could be:
If a tunnel packet is received (e.g., in ip_local_deliver()), with the
outer layer being IPComp protocol and the inner layer being fragmented
packets, during outer packet processing, it will go through xfrm_input()
to hold a reference to the IPComp xfrm_state. Then, it is re-injected into
the network stack via gro_cells_receive() and placed in the reassembly
queue. When exiting the netns and calling cleanup_net(), although
ipv4_frags_exit_net() is called before xfrm_net_exit(), due to asynchronous
scheduling, fqdir_free_work() may execute after xfrm_state_fini().
In xfrm_state_fini(), xfrm_state_flush() puts and deletes the xfrm_state
for IPPROTO_COMP, but does not delete the xfrm_state for IPPROTO_IPIP.
Meanwhile, the skb in the reassembly queue holds the last reference to the
IPPROTO_COMP xfrm_state, so it isn't destroyed yet. Only when the skb in
the reassembly queue is destroyed does the IPPROTO_COMP xfrm_state get
fully destroyed, which calls ipcomp_destroy() to delete the IPPROTO_IPIP
xfrm_state. However, by this time, the hash tables (net->xfrm.state_byxxx)
have already been kfreed in xfrm_state_fini(), leading to a use-after-free
during the deletion.
The bug has existed since kernel v2.6.29, so the patch should be
backported to all LTS kernels.
thanks,
Slavin Liu
More details about DUOONE OLED for your setup
Hello,
Following up on your interest in portable dual-screen setups, I’d love to share the latest details about the DUOONE OLED SPAN & FOLD, now launching on Kickstarter.
This monitor is designed to eliminate the hassle of heavy or complicated multi-screen setups—whether you're gaming with friends or getting work done. Here’s what makes it stand out:
• True Portability – Ultra-light design that fits in your bag, ready for work or play anywhere.
• One-Cable Setup – Connect with a single USB-C cable (HDMI supported as well).
• OLED Visuals – Vivid colors, deep blacks, and smooth motion for gaming, design, and content creation.
• Flexible Modes – Switch between extended desktop for productivity or head-to-head mode for multiplayer gaming.
You can view the full specs and current Kickstarter pricing here:
View product details on Kickstarter ( https://eeenew.com/index.php?option=com_acym&ctrl=fronturl&task=click&urlid… )
If you have any questions, feel free to reply — happy to help anytime.
Best regards,
Shirley Xue
DUOONE OLED Team
`build_assert` relies on the compiler to optimize out its error path,
lest build fails with the dreaded error:
ERROR: modpost: "rust_build_error" [path/to/module.ko] undefined!
It has been observed that very trivial code performing I/O accesses
(sometimes even using an immediate value) would seemingly randomly fail
with this error whenever `CLIPPY=1` was set. The same behavior was also
observed until different, very similar conditions [1][2].
The cause, as pointed out by Gary Guo [3], appears to be that the
failing function is eventually using `build_assert` with its argument,
but is only annotated with `#[inline]`. This gives the compiler freedom
to not inline the function, which it notably did when Clippy was active,
triggering the error.
The fix is to annotate functions passing their argument to
`build_assert` with `#[inline(always)]`, telling the compiler to be as
aggressive as possible with their inlining. This is also the correct
behavior as inlining is mandatory for correct behavior in these cases.
This series fixes all possible points of failure in the kernel crate,
and adds documentation to `build_assert` explaining how to properly
inline functions for which this behavior may arise.
[1] https://lore.kernel.org/all/DEEUYUOAEZU3.1J1HM2YQ10EX1@nvidia.com/
[2] https://lore.kernel.org/all/A1A280D4-836E-4D75-863E-30B1C276C80C@collabora.…
[3] https://lore.kernel.org/all/20251121143008.2f5acc33.gary@garyguo.net/
Signed-off-by: Alexandre Courbot <acourbot(a)nvidia.com>
---
Changes in v3:
- Add "Fixes:" tags.
- CC stable on fixup patches.
- Link to v2: https://patch.msgid.link/20251128-io-build-assert-v2-0-a9ea9ce7d45d@nvidia.…
Changes in v2:
- Turn into a series and address other similar cases in the kernel crate.
- Link to v1: https://patch.msgid.link/20251127-io-build-assert-v1-1-04237f2e5850@nvidia.…
---
Alexandre Courbot (7):
rust: build_assert: add instructions for use with function arguments
rust: io: always inline functions using build_assert with arguments
rust: cpufreq: always inline functions using build_assert with arguments
rust: bits: always inline functions using build_assert with arguments
rust: sync: refcount: always inline functions using build_assert with arguments
rust: irq: always inline functions using build_assert with arguments
rust: num: bounded: add missing comment for always inlined function
rust/kernel/bits.rs | 6 ++++--
rust/kernel/build_assert.rs | 7 ++++++-
rust/kernel/cpufreq.rs | 2 ++
rust/kernel/io.rs | 9 ++++++---
rust/kernel/io/resource.rs | 2 ++
rust/kernel/irq/flags.rs | 2 ++
rust/kernel/num/bounded.rs | 1 +
rust/kernel/sync/refcount.rs | 3 ++-
8 files changed, 25 insertions(+), 7 deletions(-)
---
base-commit: ba65a4e7120a616d9c592750d9147f6dcafedffa
change-id: 20251127-io-build-assert-3579a5bfb81c
Best regards,
--
Alexandre Courbot <acourbot(a)nvidia.com>
From: Namjae Jeon <linkinjeon(a)kernel.org>
[ Upstream commit b39a1833cc4a2755b02603eec3a71a85e9dff926 ]
Under high concurrency, A tree-connection object (tcon) is freed on
a disconnect path while another path still holds a reference and later
executes *_put()/write on it.
Reported-by: Qianchang Zhao <pioooooooooip(a)gmail.com>
Reported-by: Zhitong Liu <liuzhitong1993(a)gmail.com>
Signed-off-by: Namjae Jeon <linkinjeon(a)kernel.org>
Signed-off-by: Steve French <stfrench(a)microsoft.com>
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
---
LLM Generated explanations, may be completely bogus:
### 3. CLASSIFICATION
This is a **security/stability bug fix**:
- **Use-after-free (UAF)** is a serious memory safety issue
- ksmbd is **network-facing code** (SMB server) - security-sensitive
- Can lead to kernel crashes, data corruption, or potential remote
exploitation
### 4. SCOPE AND RISK ASSESSMENT
**Size**: ~30 lines changed across 3 files
**Files affected**:
- `fs/smb/server/mgmt/tree_connect.c` - core reference counting logic
- `fs/smb/server/mgmt/tree_connect.h` - struct definition
- `fs/smb/server/smb2pdu.c` - disconnect path
**Technical mechanism of the bug**:
The previous fix (commit 33b235a6e6ebe) introduced a waitqueue-based
refcount mechanism:
1. `ksmbd_tree_conn_disconnect()` would decrement refcount, wait for it
to hit 0, then always call `kfree()`
2. `ksmbd_tree_connect_put()` would decrement refcount and wake up
waiters
**The race condition**:
- Thread A: In disconnect, waits for refcount to hit 0, then runs more
code before `kfree()`
- Thread B: Drops last reference via `_put()`, refcount hits 0
- Thread A: Wakes up, but Thread B might still be executing code that
accesses `tcon`
- Thread A: Frees `tcon`
- Thread B: UAF when accessing `tcon` after `_put()` returns
**The fix**: Changes to standard "kref-style" reference counting:
- Whoever drops refcount to 0 immediately calls `kfree()`
- No window between refcount hitting 0 and free
- Removes the buggy waitqueue mechanism entirely
**Risk**: LOW
- The new pattern (free on last put) is the standard kernel pattern
(kref)
- Simpler code is easier to verify correct
- Self-contained within tree_connect subsystem
### 5. USER IMPACT
- **Affected users**: Anyone running ksmbd (kernel SMB server)
- **Trigger**: High concurrency - realistic for file servers
- **Severity**: HIGH - kernel crash or potential security exploitation
- **Real-world occurrence**: Two Reported-by tags confirm users hit this
### 6. STABILITY INDICATORS
- Signed-off by ksmbd maintainer (Namjae Jeon) and CIFS/SMB maintainer
(Steve French)
- Two independent reporters indicate real bug
- The buggy code was introduced in commit 33b235a6e6ebe (v6.6-rc1)
### 7. DEPENDENCY CHECK
- The fix is self-contained
- Depends only on commit 33b235a6e6ebe being present (which introduced
the bug)
- Affects: v6.6 and all later versions
- Should apply cleanly - only 2 minor unrelated commits to
tree_connect.c since the buggy commit
### Summary
| Criterion | Assessment |
|-----------|------------|
| Fixes real bug | ✅ UAF in network-facing code |
| Security impact | ✅ High - potential remote exploitation |
| Small and contained | ✅ ~30 lines, 3 files, single subsystem |
| No new features | ✅ Pure bug fix |
| User-reported | ✅ Two Reported-by tags |
| Clean backport | ✅ Self-contained fix |
This commit fixes a use-after-free vulnerability in ksmbd, the in-kernel
SMB server. The bug exists in the reference counting mechanism for tree
connections and can be triggered under concurrent access - a realistic
scenario for network file servers. UAF bugs in network-facing kernel
code are serious security issues. The fix is small, uses a well-
established kernel pattern (kref-style refcounting), and is self-
contained. It should be backported to all stable kernels containing
commit 33b235a6e6ebe (v6.6+).
**YES**
fs/smb/server/mgmt/tree_connect.c | 18 ++++--------------
fs/smb/server/mgmt/tree_connect.h | 1 -
fs/smb/server/smb2pdu.c | 3 ---
3 files changed, 4 insertions(+), 18 deletions(-)
diff --git a/fs/smb/server/mgmt/tree_connect.c b/fs/smb/server/mgmt/tree_connect.c
index ecfc575086712..d3483d9c757c7 100644
--- a/fs/smb/server/mgmt/tree_connect.c
+++ b/fs/smb/server/mgmt/tree_connect.c
@@ -78,7 +78,6 @@ ksmbd_tree_conn_connect(struct ksmbd_work *work, const char *share_name)
tree_conn->t_state = TREE_NEW;
status.tree_conn = tree_conn;
atomic_set(&tree_conn->refcount, 1);
- init_waitqueue_head(&tree_conn->refcount_q);
ret = xa_err(xa_store(&sess->tree_conns, tree_conn->id, tree_conn,
KSMBD_DEFAULT_GFP));
@@ -100,14 +99,8 @@ ksmbd_tree_conn_connect(struct ksmbd_work *work, const char *share_name)
void ksmbd_tree_connect_put(struct ksmbd_tree_connect *tcon)
{
- /*
- * Checking waitqueue to releasing tree connect on
- * tree disconnect. waitqueue_active is safe because it
- * uses atomic operation for condition.
- */
- if (!atomic_dec_return(&tcon->refcount) &&
- waitqueue_active(&tcon->refcount_q))
- wake_up(&tcon->refcount_q);
+ if (atomic_dec_and_test(&tcon->refcount))
+ kfree(tcon);
}
int ksmbd_tree_conn_disconnect(struct ksmbd_session *sess,
@@ -119,14 +112,11 @@ int ksmbd_tree_conn_disconnect(struct ksmbd_session *sess,
xa_erase(&sess->tree_conns, tree_conn->id);
write_unlock(&sess->tree_conns_lock);
- if (!atomic_dec_and_test(&tree_conn->refcount))
- wait_event(tree_conn->refcount_q,
- atomic_read(&tree_conn->refcount) == 0);
-
ret = ksmbd_ipc_tree_disconnect_request(sess->id, tree_conn->id);
ksmbd_release_tree_conn_id(sess, tree_conn->id);
ksmbd_share_config_put(tree_conn->share_conf);
- kfree(tree_conn);
+ if (atomic_dec_and_test(&tree_conn->refcount))
+ kfree(tree_conn);
return ret;
}
diff --git a/fs/smb/server/mgmt/tree_connect.h b/fs/smb/server/mgmt/tree_connect.h
index a42cdd0510411..f0023d86716f2 100644
--- a/fs/smb/server/mgmt/tree_connect.h
+++ b/fs/smb/server/mgmt/tree_connect.h
@@ -33,7 +33,6 @@ struct ksmbd_tree_connect {
int maximal_access;
bool posix_extensions;
atomic_t refcount;
- wait_queue_head_t refcount_q;
unsigned int t_state;
};
diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
index 447e76da44409..aae42d2abf7bc 100644
--- a/fs/smb/server/smb2pdu.c
+++ b/fs/smb/server/smb2pdu.c
@@ -2200,7 +2200,6 @@ int smb2_tree_disconnect(struct ksmbd_work *work)
goto err_out;
}
- WARN_ON_ONCE(atomic_dec_and_test(&tcon->refcount));
tcon->t_state = TREE_DISCONNECTED;
write_unlock(&sess->tree_conns_lock);
@@ -2210,8 +2209,6 @@ int smb2_tree_disconnect(struct ksmbd_work *work)
goto err_out;
}
- work->tcon = NULL;
-
rsp->StructureSize = cpu_to_le16(4);
err = ksmbd_iov_pin_rsp(work, rsp,
sizeof(struct smb2_tree_disconnect_rsp));
--
2.51.0