The patch below does not apply to the 4.19-stable tree. If someone wants it applied there, or to any other stable or longterm tree, then please email the backport, including the original git commit id to stable@vger.kernel.org.
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
From 25bbe21bf427a81b8e3ccd480ea0e1d940256156 Mon Sep 17 00:00:00 2001
From: Matthew Wilcox willy@infradead.org Date: Fri, 16 Nov 2018 15:50:02 -0500 Subject: [PATCH] dax: Avoid losing wakeup in dax_lock_mapping_entry
After calling get_unlocked_entry(), you have to call put_unlocked_entry() to avoid subsequent waiters losing wakeups.
Fixes: c2a7d2a11552 ("filesystem-dax: Introduce dax_lock_mapping_entry()") Cc: stable@vger.kernel.org Signed-off-by: Matthew Wilcox willy@infradead.org
diff --git a/fs/dax.c b/fs/dax.c index cf2394e2bf4b..9bcce89ea18e 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -391,6 +391,7 @@ bool dax_lock_mapping_entry(struct page *page) rcu_read_unlock(); entry = get_unlocked_entry(&xas); xas_unlock_irq(&xas); + put_unlocked_entry(&xas, entry); rcu_read_lock(); continue; }
On Mon, Nov 26, 2018 at 11:40:20AM +0100, gregkh@linuxfoundation.org wrote:
The patch below does not apply to the 4.19-stable tree. If someone wants it applied there, or to any other stable or longterm tree, then please email the backport, including the original git commit id to stable@vger.kernel.org.
The fix for 4.19 is rather more complex because we don't have the right information in the right places. Dan, does this look right to you?
diff --git a/fs/dax.c b/fs/dax.c index 0fb270f0a0ef6..b8dd66f1951a6 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -227,7 +227,9 @@ static inline void *unlock_slot(struct address_space *mapping, void **slot) * Must be called with the i_pages lock held. */ static void *__get_unlocked_mapping_entry(struct address_space *mapping, - pgoff_t index, void ***slotp, bool (*wait_fn)(void)) + pgoff_t index, void ***slotp, + bool (*wait_fn)(struct address_space *mapping, + pgoff_t index, void *entry)) { void *entry, **slot; struct wait_exceptional_entry_queue ewait; @@ -253,7 +255,7 @@ static void *__get_unlocked_mapping_entry(struct address_space *mapping, prepare_to_wait_exclusive(wq, &ewait.wait, TASK_UNINTERRUPTIBLE); xa_unlock_irq(&mapping->i_pages); - revalidate = wait_fn(); + revalidate = wait_fn(mapping, index, entry); finish_wait(wq, &ewait.wait); xa_lock_irq(&mapping->i_pages); if (revalidate) @@ -261,7 +263,8 @@ static void *__get_unlocked_mapping_entry(struct address_space *mapping, } }
-static bool entry_wait(void) +static bool entry_wait(struct address_space *mapping, unsigned long index, + void *entry) { schedule(); /* @@ -393,12 +396,18 @@ static struct page *dax_busy_page(void *entry) return NULL; }
-static bool entry_wait_revalidate(void) +static bool entry_wait_revalidate(struct address_space *mapping, + unsigned long index, void *entry) { rcu_read_unlock(); schedule(); rcu_read_lock();
+ /* + * We're not going to do anything with this entry; wake the next + * task in line + */ + put_unlocked_mapping_entry(mapping, index, entry); /* * Tell __get_unlocked_mapping_entry() to take a break, we need * to revalidate page->mapping after dropping locks
------------------ original commit in Linus's tree ------------------
From 25bbe21bf427a81b8e3ccd480ea0e1d940256156 Mon Sep 17 00:00:00 2001
From: Matthew Wilcox willy@infradead.org Date: Fri, 16 Nov 2018 15:50:02 -0500 Subject: [PATCH] dax: Avoid losing wakeup in dax_lock_mapping_entry
After calling get_unlocked_entry(), you have to call put_unlocked_entry() to avoid subsequent waiters losing wakeups.
Fixes: c2a7d2a11552 ("filesystem-dax: Introduce dax_lock_mapping_entry()") Cc: stable@vger.kernel.org Signed-off-by: Matthew Wilcox willy@infradead.org
diff --git a/fs/dax.c b/fs/dax.c index cf2394e2bf4b..9bcce89ea18e 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -391,6 +391,7 @@ bool dax_lock_mapping_entry(struct page *page) rcu_read_unlock(); entry = get_unlocked_entry(&xas); xas_unlock_irq(&xas);
}put_unlocked_entry(&xas, entry); rcu_read_lock(); continue;
On Mon, Nov 26, 2018 at 04:22:23AM -0800, Matthew Wilcox wrote:
On Mon, Nov 26, 2018 at 11:40:20AM +0100, gregkh@linuxfoundation.org wrote:
The patch below does not apply to the 4.19-stable tree. If someone wants it applied there, or to any other stable or longterm tree, then please email the backport, including the original git commit id to stable@vger.kernel.org.
The fix for 4.19 is rather more complex because we don't have the right information in the right places. Dan, does this look right to you?
Wait, we can do it like this which should be simpler:
diff --git a/fs/dax.c b/fs/dax.c index 0fb270f0a0ef6..b0cd1364c68fa 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -217,6 +217,9 @@ static inline void *unlock_slot(struct address_space *mapping, void **slot) return (void *)entry; }
+static void put_unlocked_mapping_entry(struct address_space *mapping, + pgoff_t index, void *entry); + /* * Lookup entry in radix tree, wait for it to become unlocked if it is * exceptional entry and return it. The caller must call @@ -256,8 +259,10 @@ static void *__get_unlocked_mapping_entry(struct address_space *mapping, revalidate = wait_fn(); finish_wait(wq, &ewait.wait); xa_lock_irq(&mapping->i_pages); - if (revalidate) + if (revalidate) { + put_unlocked_mapping_entry(mapping, index, entry); return ERR_PTR(-EAGAIN); + } } }
------------------ original commit in Linus's tree ------------------
From 25bbe21bf427a81b8e3ccd480ea0e1d940256156 Mon Sep 17 00:00:00 2001
From: Matthew Wilcox willy@infradead.org Date: Fri, 16 Nov 2018 15:50:02 -0500 Subject: [PATCH] dax: Avoid losing wakeup in dax_lock_mapping_entry
After calling get_unlocked_entry(), you have to call put_unlocked_entry() to avoid subsequent waiters losing wakeups.
Fixes: c2a7d2a11552 ("filesystem-dax: Introduce dax_lock_mapping_entry()") Cc: stable@vger.kernel.org Signed-off-by: Matthew Wilcox willy@infradead.org
diff --git a/fs/dax.c b/fs/dax.c index cf2394e2bf4b..9bcce89ea18e 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -391,6 +391,7 @@ bool dax_lock_mapping_entry(struct page *page) rcu_read_unlock(); entry = get_unlocked_entry(&xas); xas_unlock_irq(&xas);
}put_unlocked_entry(&xas, entry); rcu_read_lock(); continue;
From: Matthew Wilcox willy@infradead.org
commit 25bbe21bf427a81b8e3ccd480ea0e1d940256156 upstream.
After calling get_unlocked_entry(), you have to call put_unlocked_entry() to avoid subsequent waiters losing wakeups.
Fixes: c2a7d2a11552 ("filesystem-dax: Introduce dax_lock_mapping_entry()") Cc: stable@vger.kernel.org Signed-off-by: Matthew Wilcox willy@infradead.org Signed-off-by: Dan Williams dan.j.williams@intel.com --- Passes the nvdimm unit test suite which exercises the lock path.
fs/dax.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-)
diff --git a/fs/dax.c b/fs/dax.c index 0fb270f0a0ef..b0cd1364c68f 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -217,6 +217,9 @@ static inline void *unlock_slot(struct address_space *mapping, void **slot) return (void *)entry; }
+static void put_unlocked_mapping_entry(struct address_space *mapping, + pgoff_t index, void *entry); + /* * Lookup entry in radix tree, wait for it to become unlocked if it is * exceptional entry and return it. The caller must call @@ -256,8 +259,10 @@ static void *__get_unlocked_mapping_entry(struct address_space *mapping, revalidate = wait_fn(); finish_wait(wq, &ewait.wait); xa_lock_irq(&mapping->i_pages); - if (revalidate) + if (revalidate) { + put_unlocked_mapping_entry(mapping, index, entry); return ERR_PTR(-EAGAIN); + } } }
On Wed, Nov 28, 2018 at 10:53:44AM -0800, Dan Williams wrote:
From: Matthew Wilcox willy@infradead.org
commit 25bbe21bf427a81b8e3ccd480ea0e1d940256156 upstream.
After calling get_unlocked_entry(), you have to call put_unlocked_entry() to avoid subsequent waiters losing wakeups.
Fixes: c2a7d2a11552 ("filesystem-dax: Introduce dax_lock_mapping_entry()") Cc: stable@vger.kernel.org Signed-off-by: Matthew Wilcox willy@infradead.org Signed-off-by: Dan Williams dan.j.williams@intel.com
Passes the nvdimm unit test suite which exercises the lock path.
Now applied, thanks.
greg k-h
linux-stable-mirror@lists.linaro.org