On Wed, Jan 8, 2025 at 5:46 PM Nhat Pham nphamcs@gmail.com wrote:
On Wed, Jan 8, 2025 at 9:34 AM Yosry Ahmed yosryahmed@google.com wrote:
Actually, using the mutex to protect against CPU hotunplug is not too complicated. The following diff is one way to do it (lightly tested). Johannes, Nhat, any preferences between this patch (disabling migration) and the following diff?
I mean if this works, this over migration diasbling any day? :)
diff --git a/mm/zswap.c b/mm/zswap.c index f6316b66fb236..4d6817c679a54 100644 --- a/mm/zswap.c +++ b/mm/zswap.c @@ -869,17 +869,40 @@ static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node) struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node); struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
mutex_lock(&acomp_ctx->mutex); if (!IS_ERR_OR_NULL(acomp_ctx)) { if (!IS_ERR_OR_NULL(acomp_ctx->req)) acomp_request_free(acomp_ctx->req);
acomp_ctx->req = NULL; if (!IS_ERR_OR_NULL(acomp_ctx->acomp)) crypto_free_acomp(acomp_ctx->acomp); kfree(acomp_ctx->buffer); }
mutex_unlock(&acomp_ctx->mutex); return 0;
}
+static struct crypto_acomp_ctx *acomp_ctx_get_cpu_locked(
struct crypto_acomp_ctx __percpu *acomp_ctx)
+{
struct crypto_acomp_ctx *ctx;
for (;;) {
ctx = raw_cpu_ptr(acomp_ctx);
mutex_lock(&ctx->mutex);
I'm a bit confused. IIUC, ctx is per-cpu right? What's protecting this cpu-local data (including the mutex) from being invalidated under us while we're sleeping and waiting for the mutex?
If it is somehow protected, then yeah this seems quite elegant :)
thought about this again. Could it be the following?
bool cpus_is_read_locked(void) { return percpu_is_read_locked(&cpu_hotplug_lock); }
in zswap:
bool locked = cpus_is_read_locked();
if (!locked) cpus_read_lock();
.... // do our job
if (!locked) cpus_read_unlock();
This seems to resolve all three problems: 1. if our context has held read lock, we won't hold it again; 2. if other contexts are holding write lock, we wait for the completion of cpuhotplug by acquiring read lock 3. if our context hasn't held a read lock, we hold it.
if (likely(ctx->req))
return ctx;
/* Raced with zswap_cpu_comp_dead() on CPU hotunplug */
mutex_unlock(&ctx->mutex);
}
+}
+static void acomp_ctx_put_unlock(struct crypto_acomp_ctx *ctx) +{
mutex_unlock(&ctx->mutex);
+}
static bool zswap_compress(struct page *page, struct zswap_entry *entry, struct zswap_pool *pool) { @@ -893,10 +916,7 @@ static bool zswap_compress(struct page *page, struct zswap_entry *entry, gfp_t gfp; u8 *dst;
acomp_ctx = raw_cpu_ptr(pool->acomp_ctx);
mutex_lock(&acomp_ctx->mutex);
acomp_ctx = acomp_ctx_get_cpu_locked(pool->acomp_ctx); dst = acomp_ctx->buffer; sg_init_table(&input, 1); sg_set_page(&input, page, PAGE_SIZE, 0);
@@ -949,7 +969,7 @@ static bool zswap_compress(struct page *page, struct zswap_entry *entry, else if (alloc_ret) zswap_reject_alloc_fail++;
mutex_unlock(&acomp_ctx->mutex);
acomp_ctx_put_unlock(acomp_ctx); return comp_ret == 0 && alloc_ret == 0;
}
@@ -960,9 +980,7 @@ static void zswap_decompress(struct zswap_entry *entry, struct folio *folio) struct crypto_acomp_ctx *acomp_ctx; u8 *src;
acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
mutex_lock(&acomp_ctx->mutex);
acomp_ctx = acomp_ctx_get_cpu_locked(entry->pool->acomp_ctx); src = zpool_map_handle(zpool, entry->handle, ZPOOL_MM_RO); /*
Thanks Barry