On Mon, Nov 26, 2018 at 11:40:20AM +0100, gregkh@linuxfoundation.org wrote:
The patch below does not apply to the 4.19-stable tree. If someone wants it applied there, or to any other stable or longterm tree, then please email the backport, including the original git commit id to stable@vger.kernel.org.
The fix for 4.19 is rather more complex because we don't have the right information in the right places. Dan, does this look right to you?
diff --git a/fs/dax.c b/fs/dax.c index 0fb270f0a0ef6..b8dd66f1951a6 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -227,7 +227,9 @@ static inline void *unlock_slot(struct address_space *mapping, void **slot) * Must be called with the i_pages lock held. */ static void *__get_unlocked_mapping_entry(struct address_space *mapping, - pgoff_t index, void ***slotp, bool (*wait_fn)(void)) + pgoff_t index, void ***slotp, + bool (*wait_fn)(struct address_space *mapping, + pgoff_t index, void *entry)) { void *entry, **slot; struct wait_exceptional_entry_queue ewait; @@ -253,7 +255,7 @@ static void *__get_unlocked_mapping_entry(struct address_space *mapping, prepare_to_wait_exclusive(wq, &ewait.wait, TASK_UNINTERRUPTIBLE); xa_unlock_irq(&mapping->i_pages); - revalidate = wait_fn(); + revalidate = wait_fn(mapping, index, entry); finish_wait(wq, &ewait.wait); xa_lock_irq(&mapping->i_pages); if (revalidate) @@ -261,7 +263,8 @@ static void *__get_unlocked_mapping_entry(struct address_space *mapping, } }
-static bool entry_wait(void) +static bool entry_wait(struct address_space *mapping, unsigned long index, + void *entry) { schedule(); /* @@ -393,12 +396,18 @@ static struct page *dax_busy_page(void *entry) return NULL; }
-static bool entry_wait_revalidate(void) +static bool entry_wait_revalidate(struct address_space *mapping, + unsigned long index, void *entry) { rcu_read_unlock(); schedule(); rcu_read_lock();
+ /* + * We're not going to do anything with this entry; wake the next + * task in line + */ + put_unlocked_mapping_entry(mapping, index, entry); /* * Tell __get_unlocked_mapping_entry() to take a break, we need * to revalidate page->mapping after dropping locks
------------------ original commit in Linus's tree ------------------
From 25bbe21bf427a81b8e3ccd480ea0e1d940256156 Mon Sep 17 00:00:00 2001
From: Matthew Wilcox willy@infradead.org Date: Fri, 16 Nov 2018 15:50:02 -0500 Subject: [PATCH] dax: Avoid losing wakeup in dax_lock_mapping_entry
After calling get_unlocked_entry(), you have to call put_unlocked_entry() to avoid subsequent waiters losing wakeups.
Fixes: c2a7d2a11552 ("filesystem-dax: Introduce dax_lock_mapping_entry()") Cc: stable@vger.kernel.org Signed-off-by: Matthew Wilcox willy@infradead.org
diff --git a/fs/dax.c b/fs/dax.c index cf2394e2bf4b..9bcce89ea18e 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -391,6 +391,7 @@ bool dax_lock_mapping_entry(struct page *page) rcu_read_unlock(); entry = get_unlocked_entry(&xas); xas_unlock_irq(&xas);
}put_unlocked_entry(&xas, entry); rcu_read_lock(); continue;