On 14 Feb 2025, at 16:59, David Hildenbrand wrote:
On 11.02.25 16:50, Zi Yan wrote:
This is a preparation patch, both added functions are not used yet.
The added __split_unmapped_folio() is able to split a folio with its mapping removed in two manners: 1) uniform split (the existing way), and 2) buddy allocator like split.
The added __split_folio_to_order() can split a folio into any lower order. For uniform split, __split_unmapped_folio() calls it once to split the given folio to the new order. For buddy allocator split, __split_unmapped_folio() calls it (folio_order - new_order) times and each time splits the folio containing the given page to one lower order.
Signed-off-by: Zi Yan ziy@nvidia.com
mm/huge_memory.c | 349 ++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 348 insertions(+), 1 deletion(-)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index a0277f4154c2..12d3f515c408 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -3262,7 +3262,6 @@ static void remap_page(struct folio *folio, unsigned long nr, int flags) static void lru_add_page_tail(struct folio *folio, struct page *tail, struct lruvec *lruvec, struct list_head *list) {
- VM_BUG_ON_FOLIO(!folio_test_large(folio), folio); VM_BUG_ON_FOLIO(PageLRU(tail), folio); lockdep_assert_held(&lruvec->lru_lock);
@@ -3506,6 +3505,354 @@ bool can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins) caller_pins; } +/*
- It splits @folio into @new_order folios and copies the @folio metadata to
- all the resulting folios.
- */
+static int __split_folio_to_order(struct folio *folio, int new_order) +{
- int curr_order = folio_order(folio);
- long nr_pages = folio_nr_pages(folio);
- long new_nr_pages = 1 << new_order;
- long index;
- if (curr_order <= new_order)
return -EINVAL;
- /*
* Skip the first new_nr_pages, since the new folio from them have all
* the flags from the original folio.
*/
- for (index = new_nr_pages; index < nr_pages; index += new_nr_pages) {
struct page *head = &folio->page;
struct page *new_head = head + index;
/*
* Careful: new_folio is not a "real" folio before we cleared PageTail.
* Don't pass it around before clear_compound_head().
*/
struct folio *new_folio = (struct folio *)new_head;
VM_BUG_ON_PAGE(atomic_read(&new_head->_mapcount) != -1, new_head);
/*
* Clone page flags before unfreezing refcount.
*
* After successful get_page_unless_zero() might follow flags change,
* for example lock_page() which set PG_waiters.
*
* Note that for mapped sub-pages of an anonymous THP,
* PG_anon_exclusive has been cleared in unmap_folio() and is stored in
* the migration entry instead from where remap_page() will restore it.
* We can still have PG_anon_exclusive set on effectively unmapped and
* unreferenced sub-pages of an anonymous THP: we can simply drop
* PG_anon_exclusive (-> PG_mappedtodisk) for these here.
*/
new_head->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
new_head->flags |= (head->flags &
((1L << PG_referenced) |
(1L << PG_swapbacked) |
(1L << PG_swapcache) |
(1L << PG_mlocked) |
(1L << PG_uptodate) |
(1L << PG_active) |
(1L << PG_workingset) |
(1L << PG_locked) |
(1L << PG_unevictable) |
+#ifdef CONFIG_ARCH_USES_PG_ARCH_2
(1L << PG_arch_2) |
+#endif +#ifdef CONFIG_ARCH_USES_PG_ARCH_3
(1L << PG_arch_3) |
+#endif
(1L << PG_dirty) |
LRU_GEN_MASK | LRU_REFS_MASK));
/* ->mapping in first and second tail page is replaced by other uses */
VM_BUG_ON_PAGE(new_nr_pages > 2 && new_head->mapping != TAIL_MAPPING,
new_head);
new_head->mapping = head->mapping;
new_head->index = head->index + index;
/*
* page->private should not be set in tail pages. Fix up and warn once
* if private is unexpectedly set.
*/
if (unlikely(new_head->private)) {
VM_WARN_ON_ONCE_PAGE(true, new_head);
new_head->private = 0;
}
if (folio_test_swapcache(folio))
new_folio->swap.val = folio->swap.val + index;
/* Page flags must be visible before we make the page non-compound. */
smp_wmb();
/*
* Clear PageTail before unfreezing page refcount.
*
* After successful get_page_unless_zero() might follow put_page()
* which needs correct compound_head().
*/
clear_compound_head(new_head);
if (new_order) {
prep_compound_page(new_head, new_order);
folio_set_large_rmappable(new_folio);
folio_set_order(folio, new_order);
}
if (folio_test_young(folio))
folio_set_young(new_folio);
if (folio_test_idle(folio))
folio_set_idle(new_folio);
folio_xchg_last_cpupid(new_folio, folio_last_cpupid(folio));
- }
- if (!new_order)
ClearPageCompound(&folio->page);
- return 0;
+}
+/*
- It splits an unmapped @folio to lower order smaller folios in two ways.
- @folio: the to-be-split folio
- @new_order: the smallest order of the after split folios (since buddy
allocator like split generates folios with orders from @folio's
order - 1 to new_order).
- @page: in buddy allocator like split, the folio containing @page will be
split until its order becomes @new_order.
- @list: the after split folios will be added to @list if it is not NULL,
otherwise to LRU lists.
- @end: the end of the file @folio maps to. -1 if @folio is anonymous memory.
- @xas: xa_state pointing to folio->mapping->i_pages and locked by caller
- @mapping: @folio->mapping
- @uniform_split: if the split is uniform or not (buddy allocator like split)
- uniform split: the given @folio into multiple @new_order small folios,
- where all small folios have the same order. This is done when
- uniform_split is true.
- buddy allocator like (non-uniform) split: the given @folio is split into
- half and one of the half (containing the given page) is split into half
- until the given @page's order becomes @new_order. This is done when
- uniform_split is false.
- The high level flow for these two methods are:
- uniform split: a single __split_folio_to_order() is called to split the
- @folio into @new_order, then we traverse all the resulting folios one by
- one in PFN ascending order and perform stats, unfreeze, adding to list,
- and file mapping index operations.
- non-uniform split: in general, folio_order - @new_order calls to
- __split_folio_to_order() are made in a for loop to split the @folio
- to one lower order at a time. The resulting small folios are processed
- like what is done during the traversal in 1, except the one containing
- @page, which is split in next for loop.
- After splitting, the caller's folio reference will be transferred to the
- folio containing @page. The other folios may be freed if they are not mapped.
- In terms of locking, after splitting,
- uniform split leaves @page (or the folio contains it) locked;
- buddy allocator like (non-uniform) split leaves @folio locked.
- For !uniform_split, when -ENOMEM is returned, the original folio might be
- split. The caller needs to check the input folio.
- */
+static int __split_unmapped_folio(struct folio *folio, int new_order,
struct page *page, struct list_head *list, pgoff_t end,
struct xa_state *xas, struct address_space *mapping,
bool uniform_split)
+{
- struct lruvec *lruvec;
- struct address_space *swap_cache = NULL;
- struct folio *origin_folio = folio;
- struct folio *next_folio = folio_next(folio);
- struct folio *new_folio;
- struct folio *next;
- int order = folio_order(folio);
- int split_order;
- int start_order = uniform_split ? new_order : order - 1;
- int nr_dropped = 0;
- int ret = 0;
- bool stop_split = false;
- if (folio_test_anon(folio) && folio_test_swapcache(folio)) {
/* a swapcache folio can only be uniformly split to order-0 */
if (!uniform_split || new_order != 0)
return -EINVAL;
swap_cache = swap_address_space(folio->swap);
xa_lock(&swap_cache->i_pages);
- }
- if (folio_test_anon(folio))
mod_mthp_stat(order, MTHP_STAT_NR_ANON, -1);
- /* lock lru list/PageCompound, ref frozen by page_ref_freeze */
- lruvec = folio_lruvec_lock(folio);
- folio_clear_has_hwpoisoned(folio);
- /*
* split to new_order one order at a time. For uniform split,
* folio is split to new_order directly.
*/
- for (split_order = start_order;
split_order >= new_order && !stop_split;
split_order--) {
int old_order = folio_order(folio);
struct folio *release;
struct folio *end_folio = folio_next(folio);
int status;
/* order-1 anonymous folio is not supported */
if (folio_test_anon(folio) && split_order == 1)
continue;
if (uniform_split && split_order != new_order)
continue;
if (mapping) {
/*
* uniform split has xas_split_alloc() called before
* irq is disabled to allocate enough memory, whereas
* non-uniform split can handle ENOMEM.
*/
if (uniform_split)
xas_split(xas, folio, old_order);
else {
xas_set_order(xas, folio->index, split_order);
xas_try_split(xas, folio, old_order,
GFP_NOWAIT);
if (xas_error(xas)) {
ret = xas_error(xas);
stop_split = true;
goto after_split;
}
}
}
/* complete memcg works before add pages to LRU */
split_page_memcg(&folio->page, old_order, split_order);
split_page_owner(&folio->page, old_order, split_order);
pgalloc_tag_split(folio, old_order, split_order);
status = __split_folio_to_order(folio, split_order);
Stumbling over that code (sorry for the late reply ... ).
That looks weird. We split memcg/owner/pgalloc ... and then figure out in __split_folio_to_order() that we don't want to ... split?
Should that all be moved into __split_folio_to_order() and performed only when we really want to split?
Yes, or move it after the status check. In reality, __split_folio_to_order() only fails split_order is bigger than folio’s order, which should not happen. But still. I will fix it in the next version.
Best Regards, Yan, Zi