The patch titled
Subject: mm: convert isolate_page() to mf_isolate_folio()
has been added to the -mm mm-hotfixes-unstable branch. Its filename is
mm-convert-isolate_page-to-mf_isolate_folio.patch
This patch will shortly appear at
https://git.kernel.org/pub/scm/linux/kernel/git/akpm/25-new.git/tree/patche…
This patch will later appear in the mm-hotfixes-unstable branch at
git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Before you just go and hit "reply", please:
a) Consider who else should be cc'ed
b) Prefer to cc a suitable mailing list as well
c) Ideally: find the original patch on the mailing list and do a
reply-to-all to that, adding suitable additional cc's
*** Remember to use Documentation/process/submit-checklist.rst when testing your code ***
The -mm tree is included into linux-next via the mm-everything
branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
and is updated there every 2-3 working days
------------------------------------------------------
From: "Matthew Wilcox (Oracle)" <willy(a)infradead.org>
Subject: mm: convert isolate_page() to mf_isolate_folio()
Date: Wed, 8 Nov 2023 18:28:08 +0000
The only caller now has a folio, so pass it in and operate on it. Saves
many page->folio conversions and introduces only one folio->page
conversion when calling isolate_movable_page().
Link: https://lkml.kernel.org/r/20231108182809.602073-6-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy(a)infradead.org>
Cc: <stable(a)vger.kernel.org>
Cc: Naoya Horiguchi <naoya.horiguchi(a)nec.com>
Signed-off-by: Andrew Morton <akpm(a)linux-foundation.org>
---
mm/memory-failure.c | 28 ++++++++++++++--------------
1 file changed, 14 insertions(+), 14 deletions(-)
--- a/mm/memory-failure.c~mm-convert-isolate_page-to-mf_isolate_folio
+++ a/mm/memory-failure.c
@@ -2602,37 +2602,37 @@ unlock_mutex:
}
EXPORT_SYMBOL(unpoison_memory);
-static bool isolate_page(struct page *page, struct list_head *pagelist)
+static bool mf_isolate_folio(struct folio *folio, struct list_head *pagelist)
{
bool isolated = false;
- if (PageHuge(page)) {
- isolated = isolate_hugetlb(page_folio(page), pagelist);
+ if (folio_test_hugetlb(folio)) {
+ isolated = isolate_hugetlb(folio, pagelist);
} else {
- bool lru = !__PageMovable(page);
+ bool lru = !__folio_test_movable(folio);
if (lru)
- isolated = isolate_lru_page(page);
+ isolated = folio_isolate_lru(folio);
else
- isolated = isolate_movable_page(page,
+ isolated = isolate_movable_page(&folio->page,
ISOLATE_UNEVICTABLE);
if (isolated) {
- list_add(&page->lru, pagelist);
+ list_add(&folio->lru, pagelist);
if (lru)
- inc_node_page_state(page, NR_ISOLATED_ANON +
- page_is_file_lru(page));
+ node_stat_add_folio(folio, NR_ISOLATED_ANON +
+ folio_is_file_lru(folio));
}
}
/*
- * If we succeed to isolate the page, we grabbed another refcount on
- * the page, so we can safely drop the one we got from get_any_page().
- * If we failed to isolate the page, it means that we cannot go further
+ * If we succeed to isolate the folio, we grabbed another refcount on
+ * the folio, so we can safely drop the one we got from get_any_page().
+ * If we failed to isolate the folio, it means that we cannot go further
* and we will return an error, so drop the reference we got from
* get_any_page() as well.
*/
- put_page(page);
+ folio_put(folio);
return isolated;
}
@@ -2686,7 +2686,7 @@ static int soft_offline_in_use_page(stru
return 0;
}
- if (isolate_page(&folio->page, &pagelist)) {
+ if (mf_isolate_folio(folio, &pagelist)) {
ret = migrate_pages(&pagelist, alloc_migration_target, NULL,
(unsigned long)&mtc, MIGRATE_SYNC, MR_MEMORY_FAILURE, NULL);
if (!ret) {
_
Patches currently in -mm which might be from willy(a)infradead.org are
mm-make-mapping_evict_folio-the-preferred-way-to-evict-clean-folios.patch
mm-convert-__do_fault-to-use-a-folio.patch
mm-use-mapping_evict_folio-in-truncate_error_page.patch
mm-convert-soft_offline_in_use_page-to-use-a-folio.patch
mm-convert-isolate_page-to-mf_isolate_folio.patch
mm-remove-invalidate_inode_page.patch
The patch titled
Subject: mm: convert soft_offline_in_use_page() to use a folio
has been added to the -mm mm-hotfixes-unstable branch. Its filename is
mm-convert-soft_offline_in_use_page-to-use-a-folio.patch
This patch will shortly appear at
https://git.kernel.org/pub/scm/linux/kernel/git/akpm/25-new.git/tree/patche…
This patch will later appear in the mm-hotfixes-unstable branch at
git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Before you just go and hit "reply", please:
a) Consider who else should be cc'ed
b) Prefer to cc a suitable mailing list as well
c) Ideally: find the original patch on the mailing list and do a
reply-to-all to that, adding suitable additional cc's
*** Remember to use Documentation/process/submit-checklist.rst when testing your code ***
The -mm tree is included into linux-next via the mm-everything
branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
and is updated there every 2-3 working days
------------------------------------------------------
From: "Matthew Wilcox (Oracle)" <willy(a)infradead.org>
Subject: mm: convert soft_offline_in_use_page() to use a folio
Date: Wed, 8 Nov 2023 18:28:07 +0000
Replace the existing head-page logic with folio logic.
Link: https://lkml.kernel.org/r/20231108182809.602073-5-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy(a)infradead.org>
Cc: <stable(a)vger.kernel.org>
Cc: Naoya Horiguchi <naoya.horiguchi(a)nec.com>
Signed-off-by: Andrew Morton <akpm(a)linux-foundation.org>
---
mm/memory-failure.c | 24 ++++++++++++------------
1 file changed, 12 insertions(+), 12 deletions(-)
--- a/mm/memory-failure.c~mm-convert-soft_offline_in_use_page-to-use-a-folio
+++ a/mm/memory-failure.c
@@ -2645,40 +2645,40 @@ static int soft_offline_in_use_page(stru
{
long ret = 0;
unsigned long pfn = page_to_pfn(page);
- struct page *hpage = compound_head(page);
+ struct folio *folio = page_folio(page);
char const *msg_page[] = {"page", "hugepage"};
- bool huge = PageHuge(page);
+ bool huge = folio_test_hugetlb(folio);
LIST_HEAD(pagelist);
struct migration_target_control mtc = {
.nid = NUMA_NO_NODE,
.gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
};
- if (!huge && PageTransHuge(hpage)) {
+ if (!huge && folio_test_large(folio)) {
if (try_to_split_thp_page(page)) {
pr_info("soft offline: %#lx: thp split failed\n", pfn);
return -EBUSY;
}
- hpage = page;
+ folio = page_folio(page);
}
- lock_page(page);
+ folio_lock(folio);
if (!huge)
- wait_on_page_writeback(page);
+ folio_wait_writeback(folio);
if (PageHWPoison(page)) {
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
pr_info("soft offline: %#lx page already poisoned\n", pfn);
return 0;
}
- if (!huge && PageLRU(page) && !PageSwapCache(page))
+ if (!huge && folio_test_lru(folio) && !folio_test_swapcache(folio))
/*
* Try to invalidate first. This should work for
* non dirty unmapped page cache pages.
*/
- ret = invalidate_inode_page(page);
- unlock_page(page);
+ ret = mapping_evict_folio(folio_mapping(folio), folio);
+ folio_unlock(folio);
if (ret) {
pr_info("soft_offline: %#lx: invalidated\n", pfn);
@@ -2686,7 +2686,7 @@ static int soft_offline_in_use_page(stru
return 0;
}
- if (isolate_page(hpage, &pagelist)) {
+ if (isolate_page(&folio->page, &pagelist)) {
ret = migrate_pages(&pagelist, alloc_migration_target, NULL,
(unsigned long)&mtc, MIGRATE_SYNC, MR_MEMORY_FAILURE, NULL);
if (!ret) {
_
Patches currently in -mm which might be from willy(a)infradead.org are
mm-make-mapping_evict_folio-the-preferred-way-to-evict-clean-folios.patch
mm-convert-__do_fault-to-use-a-folio.patch
mm-use-mapping_evict_folio-in-truncate_error_page.patch
mm-convert-soft_offline_in_use_page-to-use-a-folio.patch
mm-convert-isolate_page-to-mf_isolate_folio.patch
mm-remove-invalidate_inode_page.patch
The patch titled
Subject: mm: use mapping_evict_folio() in truncate_error_page()
has been added to the -mm mm-hotfixes-unstable branch. Its filename is
mm-use-mapping_evict_folio-in-truncate_error_page.patch
This patch will shortly appear at
https://git.kernel.org/pub/scm/linux/kernel/git/akpm/25-new.git/tree/patche…
This patch will later appear in the mm-hotfixes-unstable branch at
git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Before you just go and hit "reply", please:
a) Consider who else should be cc'ed
b) Prefer to cc a suitable mailing list as well
c) Ideally: find the original patch on the mailing list and do a
reply-to-all to that, adding suitable additional cc's
*** Remember to use Documentation/process/submit-checklist.rst when testing your code ***
The -mm tree is included into linux-next via the mm-everything
branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
and is updated there every 2-3 working days
------------------------------------------------------
From: "Matthew Wilcox (Oracle)" <willy(a)infradead.org>
Subject: mm: use mapping_evict_folio() in truncate_error_page()
Date: Wed, 8 Nov 2023 18:28:06 +0000
We already have the folio and the mapping, so replace the call to
invalidate_inode_page() with mapping_evict_folio().
Link: https://lkml.kernel.org/r/20231108182809.602073-4-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy(a)infradead.org>
Cc: <stable(a)vger.kernel.org>
Cc: Naoya Horiguchi <naoya.horiguchi(a)nec.com>
Signed-off-by: Andrew Morton <akpm(a)linux-foundation.org>
---
mm/memory-failure.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
--- a/mm/memory-failure.c~mm-use-mapping_evict_folio-in-truncate_error_page
+++ a/mm/memory-failure.c
@@ -930,10 +930,10 @@ static int delete_from_lru_cache(struct
static int truncate_error_page(struct page *p, unsigned long pfn,
struct address_space *mapping)
{
+ struct folio *folio = page_folio(p);
int ret = MF_FAILED;
if (mapping->a_ops->error_remove_page) {
- struct folio *folio = page_folio(p);
int err = mapping->a_ops->error_remove_page(mapping, p);
if (err != 0)
@@ -947,7 +947,7 @@ static int truncate_error_page(struct pa
* If the file system doesn't support it just invalidate
* This fails on dirty or anything with private pages
*/
- if (invalidate_inode_page(p))
+ if (mapping_evict_folio(mapping, folio))
ret = MF_RECOVERED;
else
pr_info("%#lx: Failed to invalidate\n", pfn);
_
Patches currently in -mm which might be from willy(a)infradead.org are
mm-make-mapping_evict_folio-the-preferred-way-to-evict-clean-folios.patch
mm-convert-__do_fault-to-use-a-folio.patch
mm-use-mapping_evict_folio-in-truncate_error_page.patch
mm-convert-soft_offline_in_use_page-to-use-a-folio.patch
mm-convert-isolate_page-to-mf_isolate_folio.patch
mm-remove-invalidate_inode_page.patch
The patch titled
Subject: mm: convert __do_fault() to use a folio
has been added to the -mm mm-hotfixes-unstable branch. Its filename is
mm-convert-__do_fault-to-use-a-folio.patch
This patch will shortly appear at
https://git.kernel.org/pub/scm/linux/kernel/git/akpm/25-new.git/tree/patche…
This patch will later appear in the mm-hotfixes-unstable branch at
git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Before you just go and hit "reply", please:
a) Consider who else should be cc'ed
b) Prefer to cc a suitable mailing list as well
c) Ideally: find the original patch on the mailing list and do a
reply-to-all to that, adding suitable additional cc's
*** Remember to use Documentation/process/submit-checklist.rst when testing your code ***
The -mm tree is included into linux-next via the mm-everything
branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
and is updated there every 2-3 working days
------------------------------------------------------
From: "Matthew Wilcox (Oracle)" <willy(a)infradead.org>
Subject: mm: convert __do_fault() to use a folio
Date: Wed, 8 Nov 2023 18:28:05 +0000
Convert vmf->page to a folio as soon as we're going to use it. This fixes
a bug if the fault handler returns a tail page with hardware poison; tail
pages have an invalid page->index, so we would fail to unmap the page from
the page tables. We actually have to unmap the entire folio (or
mapping_evict_folio() will fail), so use unmap_mapping_folio() instead.
This also saves various calls to compound_head() hidden in lock_page(),
put_page(), etc.
Link: https://lkml.kernel.org/r/20231108182809.602073-3-willy@infradead.org
Fixes: 793917d997df ("mm/readahead: Add large folio readahead")
Signed-off-by: Matthew Wilcox (Oracle) <willy(a)infradead.org>
Cc: <stable(a)vger.kernel.org>
Cc: Naoya Horiguchi <naoya.horiguchi(a)nec.com>
Signed-off-by: Andrew Morton <akpm(a)linux-foundation.org>
---
mm/memory.c | 20 ++++++++++----------
1 file changed, 10 insertions(+), 10 deletions(-)
--- a/mm/memory.c~mm-convert-__do_fault-to-use-a-folio
+++ a/mm/memory.c
@@ -4239,6 +4239,7 @@ oom:
static vm_fault_t __do_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
+ struct folio *folio;
vm_fault_t ret;
/*
@@ -4267,27 +4268,26 @@ static vm_fault_t __do_fault(struct vm_f
VM_FAULT_DONE_COW)))
return ret;
+ folio = page_folio(vmf->page);
if (unlikely(PageHWPoison(vmf->page))) {
- struct page *page = vmf->page;
vm_fault_t poisonret = VM_FAULT_HWPOISON;
if (ret & VM_FAULT_LOCKED) {
- if (page_mapped(page))
- unmap_mapping_pages(page_mapping(page),
- page->index, 1, false);
- /* Retry if a clean page was removed from the cache. */
- if (invalidate_inode_page(page))
+ if (page_mapped(vmf->page))
+ unmap_mapping_folio(folio);
+ /* Retry if a clean folio was removed from the cache. */
+ if (mapping_evict_folio(folio->mapping, folio))
poisonret = VM_FAULT_NOPAGE;
- unlock_page(page);
+ folio_unlock(folio);
}
- put_page(page);
+ folio_put(folio);
vmf->page = NULL;
return poisonret;
}
if (unlikely(!(ret & VM_FAULT_LOCKED)))
- lock_page(vmf->page);
+ folio_lock(folio);
else
- VM_BUG_ON_PAGE(!PageLocked(vmf->page), vmf->page);
+ VM_BUG_ON_PAGE(!folio_test_locked(folio), vmf->page);
return ret;
}
_
Patches currently in -mm which might be from willy(a)infradead.org are
mm-make-mapping_evict_folio-the-preferred-way-to-evict-clean-folios.patch
mm-convert-__do_fault-to-use-a-folio.patch
mm-use-mapping_evict_folio-in-truncate_error_page.patch
mm-convert-soft_offline_in_use_page-to-use-a-folio.patch
mm-convert-isolate_page-to-mf_isolate_folio.patch
mm-remove-invalidate_inode_page.patch
The patch titled
Subject: mm: make mapping_evict_folio() the preferred way to evict clean folios
has been added to the -mm mm-hotfixes-unstable branch. Its filename is
mm-make-mapping_evict_folio-the-preferred-way-to-evict-clean-folios.patch
This patch will shortly appear at
https://git.kernel.org/pub/scm/linux/kernel/git/akpm/25-new.git/tree/patche…
This patch will later appear in the mm-hotfixes-unstable branch at
git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Before you just go and hit "reply", please:
a) Consider who else should be cc'ed
b) Prefer to cc a suitable mailing list as well
c) Ideally: find the original patch on the mailing list and do a
reply-to-all to that, adding suitable additional cc's
*** Remember to use Documentation/process/submit-checklist.rst when testing your code ***
The -mm tree is included into linux-next via the mm-everything
branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
and is updated there every 2-3 working days
------------------------------------------------------
From: "Matthew Wilcox (Oracle)" <willy(a)infradead.org>
Subject: mm: make mapping_evict_folio() the preferred way to evict clean folios
Date: Wed, 8 Nov 2023 18:28:04 +0000
Patch series "Fix fault handler's handling of poisoned tail pages".
Since introducing the ability to have large folios in the page cache, it's
been possible to have a hwpoisoned tail page returned from the fault
handler. We handle this situation poorly; failing to remove the affected
page from use.
This isn't a minimal patch to fix it, it's a full conversion of all the
code surrounding it.
This patch (of 6):
invalidate_inode_page() does very little beyond calling
mapping_evict_folio(). Move the check for mapping being NULL into
mapping_evict_folio() and make it available to the rest of the MM for use
in the next few patches.
Link: https://lkml.kernel.org/r/20231108182809.602073-1-willy@infradead.org
Link: https://lkml.kernel.org/r/20231108182809.602073-2-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy(a)infradead.org>
Cc: Naoya Horiguchi <naoya.horiguchi(a)nec.com>
Cc: <stable(a)vger.kernel.org>
Signed-off-by: Andrew Morton <akpm(a)linux-foundation.org>
---
mm/internal.h | 1 +
mm/truncate.c | 33 ++++++++++++++++-----------------
2 files changed, 17 insertions(+), 17 deletions(-)
--- a/mm/internal.h~mm-make-mapping_evict_folio-the-preferred-way-to-evict-clean-folios
+++ a/mm/internal.h
@@ -138,6 +138,7 @@ void filemap_free_folio(struct address_s
int truncate_inode_folio(struct address_space *mapping, struct folio *folio);
bool truncate_inode_partial_folio(struct folio *folio, loff_t start,
loff_t end);
+long mapping_evict_folio(struct address_space *mapping, struct folio *folio);
long invalidate_inode_page(struct page *page);
unsigned long mapping_try_invalidate(struct address_space *mapping,
pgoff_t start, pgoff_t end, unsigned long *nr_failed);
--- a/mm/truncate.c~mm-make-mapping_evict_folio-the-preferred-way-to-evict-clean-folios
+++ a/mm/truncate.c
@@ -266,9 +266,22 @@ int generic_error_remove_page(struct add
}
EXPORT_SYMBOL(generic_error_remove_page);
-static long mapping_evict_folio(struct address_space *mapping,
- struct folio *folio)
+/**
+ * mapping_evict_folio() - Remove an unused folio from the page-cache.
+ * @mapping: The mapping this folio belongs to.
+ * @folio: The folio to remove.
+ *
+ * Safely remove one folio from the page cache.
+ * It only drops clean, unused folios.
+ *
+ * Context: Folio must be locked.
+ * Return: The number of pages successfully removed.
+ */
+long mapping_evict_folio(struct address_space *mapping, struct folio *folio)
{
+ /* The page may have been truncated before it was locked */
+ if (!mapping)
+ return 0;
if (folio_test_dirty(folio) || folio_test_writeback(folio))
return 0;
/* The refcount will be elevated if any page in the folio is mapped */
@@ -281,25 +294,11 @@ static long mapping_evict_folio(struct a
return remove_mapping(mapping, folio);
}
-/**
- * invalidate_inode_page() - Remove an unused page from the pagecache.
- * @page: The page to remove.
- *
- * Safely invalidate one page from its pagecache mapping.
- * It only drops clean, unused pages.
- *
- * Context: Page must be locked.
- * Return: The number of pages successfully removed.
- */
long invalidate_inode_page(struct page *page)
{
struct folio *folio = page_folio(page);
- struct address_space *mapping = folio_mapping(folio);
- /* The page may have been truncated before it was locked */
- if (!mapping)
- return 0;
- return mapping_evict_folio(mapping, folio);
+ return mapping_evict_folio(folio_mapping(folio), folio);
}
/**
_
Patches currently in -mm which might be from willy(a)infradead.org are
mm-make-mapping_evict_folio-the-preferred-way-to-evict-clean-folios.patch
mm-convert-__do_fault-to-use-a-folio.patch
mm-use-mapping_evict_folio-in-truncate_error_page.patch
mm-convert-soft_offline_in_use_page-to-use-a-folio.patch
mm-convert-isolate_page-to-mf_isolate_folio.patch
mm-remove-invalidate_inode_page.patch