From: Kairui Song kasong@tencent.com
The helper for shmem swap freeing is not handling the order of swap entries correctly. It uses xa_cmpxchg_irq to erase the swap entry, but it gets the entry order before that using xa_get_order without lock protection. As a result the order could be a stalled value if the entry is split after the xa_get_order and before the xa_cmpxchg_irq. In fact that are more way for other races to occur during the time window.
To fix that, open code the Xarray cmpxchg and put the order retrivial and value checking in the same critical section. Also ensure the order won't exceed the truncate border.
I observed random swapoff hangs and swap entry leaks when stress testing ZSWAP with shmem. After applying this patch, the problem is resolved.
Fixes: 809bc86517cc ("mm: shmem: support large folio swap out") Cc: stable@vger.kernel.org Signed-off-by: Kairui Song kasong@tencent.com --- mm/shmem.c | 35 +++++++++++++++++++++++------------ 1 file changed, 23 insertions(+), 12 deletions(-)
diff --git a/mm/shmem.c b/mm/shmem.c index 0b4c8c70d017..e160da0cd30f 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -961,18 +961,28 @@ static void shmem_delete_from_page_cache(struct folio *folio, void *radswap) * the number of pages being freed. 0 means entry not found in XArray (0 pages * being freed). */ -static long shmem_free_swap(struct address_space *mapping, - pgoff_t index, void *radswap) +static long shmem_free_swap(struct address_space *mapping, pgoff_t index, + unsigned int max_nr, void *radswap) { - int order = xa_get_order(&mapping->i_pages, index); - void *old; + XA_STATE(xas, &mapping->i_pages, index); + unsigned int nr_pages = 0; + void *entry;
- old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0); - if (old != radswap) - return 0; - swap_put_entries_direct(radix_to_swp_entry(radswap), 1 << order); + xas_lock_irq(&xas); + entry = xas_load(&xas); + if (entry == radswap) { + nr_pages = 1 << xas_get_order(&xas); + if (index == round_down(xas.xa_index, nr_pages) && nr_pages < max_nr) + xas_store(&xas, NULL); + else + nr_pages = 0; + } + xas_unlock_irq(&xas); + + if (nr_pages) + swap_put_entries_direct(radix_to_swp_entry(radswap), nr_pages);
- return 1 << order; + return nr_pages; }
/* @@ -1124,8 +1134,8 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, uoff_t lend, if (xa_is_value(folio)) { if (unfalloc) continue; - nr_swaps_freed += shmem_free_swap(mapping, - indices[i], folio); + nr_swaps_freed += shmem_free_swap(mapping, indices[i], + end - indices[i], folio); continue; }
@@ -1195,7 +1205,8 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, uoff_t lend,
if (unfalloc) continue; - swaps_freed = shmem_free_swap(mapping, indices[i], folio); + swaps_freed = shmem_free_swap(mapping, indices[i], + end - indices[i], folio); if (!swaps_freed) { /* Swap was replaced by page: retry */ index = indices[i];
--- base-commit: ab3d40bdac831c67e130fda12f3011505556500f change-id: 20260111-shmem-swap-fix-8d0e20a14b5d
Best regards,
On 1/12/26 1:53 AM, Kairui Song wrote:
From: Kairui Song kasong@tencent.com
The helper for shmem swap freeing is not handling the order of swap entries correctly. It uses xa_cmpxchg_irq to erase the swap entry, but it gets the entry order before that using xa_get_order without lock protection. As a result the order could be a stalled value if the entry is split after the xa_get_order and before the xa_cmpxchg_irq. In fact that are more way for other races to occur during the time window.
To fix that, open code the Xarray cmpxchg and put the order retrivial and value checking in the same critical section. Also ensure the order won't exceed the truncate border.
I observed random swapoff hangs and swap entry leaks when stress testing ZSWAP with shmem. After applying this patch, the problem is resolved.
Fixes: 809bc86517cc ("mm: shmem: support large folio swap out") Cc: stable@vger.kernel.org Signed-off-by: Kairui Song kasong@tencent.com
mm/shmem.c | 35 +++++++++++++++++++++++------------ 1 file changed, 23 insertions(+), 12 deletions(-)
diff --git a/mm/shmem.c b/mm/shmem.c index 0b4c8c70d017..e160da0cd30f 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -961,18 +961,28 @@ static void shmem_delete_from_page_cache(struct folio *folio, void *radswap)
- the number of pages being freed. 0 means entry not found in XArray (0 pages
- being freed).
*/ -static long shmem_free_swap(struct address_space *mapping,
pgoff_t index, void *radswap)+static long shmem_free_swap(struct address_space *mapping, pgoff_t index,
{unsigned int max_nr, void *radswap)
- int order = xa_get_order(&mapping->i_pages, index);
- void *old;
- XA_STATE(xas, &mapping->i_pages, index);
- unsigned int nr_pages = 0;
- void *entry;
- old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0);
- if (old != radswap)
return 0;- swap_put_entries_direct(radix_to_swp_entry(radswap), 1 << order);
- xas_lock_irq(&xas);
- entry = xas_load(&xas);
- if (entry == radswap) {
nr_pages = 1 << xas_get_order(&xas);if (index == round_down(xas.xa_index, nr_pages) && nr_pages < max_nr)xas_store(&xas, NULL);elsenr_pages = 0;- }
- xas_unlock_irq(&xas);
- if (nr_pages)
swap_put_entries_direct(radix_to_swp_entry(radswap), nr_pages);
- return 1 << order;
- return nr_pages; }
Thanks for the analysis, and it makes sense to me. Would the following implementation be simpler and also address your issue (we will not release the lock in __xa_cmpxchg() since gfp = 0)?
static long shmem_free_swap(struct address_space *mapping, pgoff_t index, void *radswap) { XA_STATE(xas, &mapping->i_pages, index); int order; void *old;
xas_lock_irq(&xas); order = xas_get_order(&xas); old = __xa_cmpxchg(xas.xa, index, radswap, NULL, 0); if (old != radswap) { xas_unlock_irq(&xas); return 0; } xas_lock_irq(&xas);
swap_put_entries_direct(radix_to_swp_entry(radswap), 1 << order);
return 1 << order; }
linux-stable-mirror@lists.linaro.org