The quilt patch titled Subject: mm/huge_memory: merge uniform_split_supported() and non_uniform_split_supported() has been removed from the -mm tree. Its filename was mm-huge_memory-merge-uniform_split_supported-and-non_uniform_split_supported.patch
This patch was dropped because it was merged into the mm-stable branch of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
------------------------------------------------------ From: Wei Yang richard.weiyang@gmail.com Subject: mm/huge_memory: merge uniform_split_supported() and non_uniform_split_supported() Date: Thu, 6 Nov 2025 03:41:55 +0000
uniform_split_supported() and non_uniform_split_supported() share significantly similar logic.
The only functional difference is that uniform_split_supported() includes an additional check on the requested @new_order.
The reason for this check comes from the following two aspects:
* some file system or swap cache just supports order-0 folio * the behavioral difference between uniform/non-uniform split
The behavioral difference between uniform split and non-uniform:
* uniform split splits folio directly to @new_order * non-uniform split creates after-split folios with orders from folio_order(folio) - 1 to new_order.
This means for non-uniform split or !new_order split we should check the file system and swap cache respectively.
This commit unifies the logic and merge the two functions into a single combined helper, removing redundant code and simplifying the split support checking mechanism.
Link: https://lkml.kernel.org/r/20251106034155.21398-3-richard.weiyang@gmail.com Fixes: c010d47f107f ("mm: thp: split huge page to any lower order pages") Signed-off-by: Wei Yang richard.weiyang@gmail.com Reviewed-by: Zi Yan ziy@nvidia.com Cc: Zi Yan ziy@nvidia.com Cc: "David Hildenbrand (Red Hat)" david@kernel.org Cc: Baolin Wang baolin.wang@linux.alibaba.com Cc: Barry Song baohua@kernel.org Cc: Dev Jain dev.jain@arm.com Cc: Lance Yang lance.yang@linux.dev Cc: Liam Howlett liam.howlett@oracle.com Cc: Lorenzo Stoakes lorenzo.stoakes@oracle.com Cc: Nico Pache npache@redhat.com Cc: Ryan Roberts ryan.roberts@arm.com Cc: stable@vger.kernel.org Signed-off-by: Andrew Morton akpm@linux-foundation.org ---
include/linux/huge_mm.h | 8 +--- mm/huge_memory.c | 71 ++++++++++++++++---------------------- 2 files changed, 33 insertions(+), 46 deletions(-)
--- a/include/linux/huge_mm.h~mm-huge_memory-merge-uniform_split_supported-and-non_uniform_split_supported +++ a/include/linux/huge_mm.h @@ -374,10 +374,8 @@ int __split_huge_page_to_list_to_order(s unsigned int new_order, bool unmapped); int min_order_for_split(struct folio *folio); int split_folio_to_list(struct folio *folio, struct list_head *list); -bool uniform_split_supported(struct folio *folio, unsigned int new_order, - bool warns); -bool non_uniform_split_supported(struct folio *folio, unsigned int new_order, - bool warns); +bool folio_split_supported(struct folio *folio, unsigned int new_order, + enum split_type split_type, bool warns); int folio_split(struct folio *folio, unsigned int new_order, struct page *page, struct list_head *list);
@@ -408,7 +406,7 @@ static inline int split_huge_page_to_ord static inline int try_folio_split_to_order(struct folio *folio, struct page *page, unsigned int new_order) { - if (!non_uniform_split_supported(folio, new_order, /* warns= */ false)) + if (!folio_split_supported(folio, new_order, SPLIT_TYPE_NON_UNIFORM, /* warns= */ false)) return split_huge_page_to_order(&folio->page, new_order); return folio_split(folio, new_order, page, NULL); } --- a/mm/huge_memory.c~mm-huge_memory-merge-uniform_split_supported-and-non_uniform_split_supported +++ a/mm/huge_memory.c @@ -3593,8 +3593,8 @@ static int __split_unmapped_folio(struct return 0; }
-bool non_uniform_split_supported(struct folio *folio, unsigned int new_order, - bool warns) +bool folio_split_supported(struct folio *folio, unsigned int new_order, + enum split_type split_type, bool warns) { if (folio_test_anon(folio)) { /* order-1 is not supported for anonymous THP. */ @@ -3602,48 +3602,41 @@ bool non_uniform_split_supported(struct "Cannot split to order-1 folio"); if (new_order == 1) return false; - } else if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && - !mapping_large_folio_support(folio->mapping)) { - /* - * No split if the file system does not support large folio. - * Note that we might still have THPs in such mappings due to - * CONFIG_READ_ONLY_THP_FOR_FS. But in that case, the mapping - * does not actually support large folios properly. - */ - VM_WARN_ONCE(warns, - "Cannot split file folio to non-0 order"); - return false; - } - - /* Only swapping a whole PMD-mapped folio is supported */ - if (folio_test_swapcache(folio)) { - VM_WARN_ONCE(warns, - "Cannot split swapcache folio to non-0 order"); - return false; - } - - return true; -} - -/* See comments in non_uniform_split_supported() */ -bool uniform_split_supported(struct folio *folio, unsigned int new_order, - bool warns) -{ - if (folio_test_anon(folio)) { - VM_WARN_ONCE(warns && new_order == 1, - "Cannot split to order-1 folio"); - if (new_order == 1) - return false; - } else if (new_order) { + } else if (split_type == SPLIT_TYPE_NON_UNIFORM || new_order) { if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !mapping_large_folio_support(folio->mapping)) { + /* + * We can always split a folio down to a single page + * (new_order == 0) uniformly. + * + * For any other scenario + * a) uniform split targeting a large folio + * (new_order > 0) + * b) any non-uniform split + * we must confirm that the file system supports large + * folios. + * + * Note that we might still have THPs in such + * mappings, which is created from khugepaged when + * CONFIG_READ_ONLY_THP_FOR_FS is enabled. But in that + * case, the mapping does not actually support large + * folios properly. + */ VM_WARN_ONCE(warns, "Cannot split file folio to non-0 order"); return false; } }
- if (new_order && folio_test_swapcache(folio)) { + /* + * swapcache folio could only be split to order 0 + * + * non-uniform split creates after-split folios with orders from + * folio_order(folio) - 1 to new_order, making it not suitable for any + * swapcache folio split. Only uniform split to order-0 can be used + * here. + */ + if ((split_type == SPLIT_TYPE_NON_UNIFORM || new_order) && folio_test_swapcache(folio)) { VM_WARN_ONCE(warns, "Cannot split swapcache folio to non-0 order"); return false; @@ -3711,11 +3704,7 @@ static int __folio_split(struct folio *f if (new_order >= old_order) return -EINVAL;
- if (split_type == SPLIT_TYPE_UNIFORM && !uniform_split_supported(folio, new_order, true)) - return -EINVAL; - - if (split_type == SPLIT_TYPE_NON_UNIFORM && - !non_uniform_split_supported(folio, new_order, true)) + if (!folio_split_supported(folio, new_order, split_type, /* warn = */ true)) return -EINVAL;
is_hzp = is_huge_zero_folio(folio); _
Patches currently in -mm which might be from richard.weiyang@gmail.com are