On 2025/1/8 12:46, Nhat Pham wrote:
On Wed, Jan 8, 2025 at 9:34 AM Yosry Ahmed yosryahmed@google.com wrote:
Actually, using the mutex to protect against CPU hotunplug is not too complicated. The following diff is one way to do it (lightly tested). Johannes, Nhat, any preferences between this patch (disabling migration) and the following diff?
I mean if this works, this over migration diasbling any day? :)
diff --git a/mm/zswap.c b/mm/zswap.c index f6316b66fb236..4d6817c679a54 100644 --- a/mm/zswap.c +++ b/mm/zswap.c @@ -869,17 +869,40 @@ static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node) struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node); struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
mutex_lock(&acomp_ctx->mutex); if (!IS_ERR_OR_NULL(acomp_ctx)) { if (!IS_ERR_OR_NULL(acomp_ctx->req)) acomp_request_free(acomp_ctx->req);
acomp_ctx->req = NULL; if (!IS_ERR_OR_NULL(acomp_ctx->acomp)) crypto_free_acomp(acomp_ctx->acomp); kfree(acomp_ctx->buffer); }
mutex_unlock(&acomp_ctx->mutex); return 0;
}
+static struct crypto_acomp_ctx *acomp_ctx_get_cpu_locked(
struct crypto_acomp_ctx __percpu *acomp_ctx)
+{
struct crypto_acomp_ctx *ctx;
for (;;) {
ctx = raw_cpu_ptr(acomp_ctx);
mutex_lock(&ctx->mutex);
I'm a bit confused. IIUC, ctx is per-cpu right? What's protecting this cpu-local data (including the mutex) from being invalidated under us while we're sleeping and waiting for the mutex?
Yeah, it's not safe, we can only use this_cpu_ptr(), which will disable preempt (so cpu offline can't kick in), and get refcount of ctx. Since we can't mutex_lock in the preempt disabled section.
Thanks.
If it is somehow protected, then yeah this seems quite elegant :)
if (likely(ctx->req))
return ctx;
/* Raced with zswap_cpu_comp_dead() on CPU hotunplug */
mutex_unlock(&ctx->mutex);
}
+}
+static void acomp_ctx_put_unlock(struct crypto_acomp_ctx *ctx) +{
mutex_unlock(&ctx->mutex);
+}
- static bool zswap_compress(struct page *page, struct zswap_entry *entry, struct zswap_pool *pool) {
@@ -893,10 +916,7 @@ static bool zswap_compress(struct page *page, struct zswap_entry *entry, gfp_t gfp; u8 *dst;
acomp_ctx = raw_cpu_ptr(pool->acomp_ctx);
mutex_lock(&acomp_ctx->mutex);
acomp_ctx = acomp_ctx_get_cpu_locked(pool->acomp_ctx); dst = acomp_ctx->buffer; sg_init_table(&input, 1); sg_set_page(&input, page, PAGE_SIZE, 0);
@@ -949,7 +969,7 @@ static bool zswap_compress(struct page *page, struct zswap_entry *entry, else if (alloc_ret) zswap_reject_alloc_fail++;
mutex_unlock(&acomp_ctx->mutex);
}acomp_ctx_put_unlock(acomp_ctx); return comp_ret == 0 && alloc_ret == 0;
@@ -960,9 +980,7 @@ static void zswap_decompress(struct zswap_entry *entry, struct folio *folio) struct crypto_acomp_ctx *acomp_ctx; u8 *src;
acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
mutex_lock(&acomp_ctx->mutex);
acomp_ctx = acomp_ctx_get_cpu_locked(entry->pool->acomp_ctx); src = zpool_map_handle(zpool, entry->handle, ZPOOL_MM_RO); /*