On Thu, May 22, 2025 at 08:39:39PM +0800, Muchun Song wrote:
But I think we could use "folio_order() > MAX_PAGE_ORDER" to replace the check of hstate_is_gigantic(), right? Then ee could remove the first parameter of hstate from alloc_and_dissolve_hugetlb_folio() and obtain hstate in it.
Yes, I think we can do that. So something like the following (compily-tested only) maybe?
From d7199339e905f83b54d22849e8f21f631916ce94 Mon Sep 17 00:00:00 2001 From: Oscar Salvador osalvador@suse.de Date: Thu, 22 May 2025 19:51:04 +0200 Subject: [PATCH] TMP
--- mm/hugetlb.c | 38 +++++++++----------------------------- 1 file changed, 9 insertions(+), 29 deletions(-)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index bd8971388236..20f08de9e37d 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -2787,15 +2787,13 @@ void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma, /* * alloc_and_dissolve_hugetlb_folio - Allocate a new folio and dissolve * the old one - * @h: struct hstate old page belongs to * @old_folio: Old folio to dissolve * @list: List to isolate the page in case we need to * Returns 0 on success, otherwise negated error. */ -static int alloc_and_dissolve_hugetlb_folio(struct hstate *h, - struct folio *old_folio, struct list_head *list) +static int alloc_and_dissolve_hugetlb_folio(struct folio *old_folio, struct list_head *list) { - gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; + struct hstate *h; int nid = folio_nid(old_folio); struct folio *new_folio = NULL; int ret = 0; @@ -2829,7 +2827,11 @@ static int alloc_and_dissolve_hugetlb_folio(struct hstate *h, cond_resched(); goto retry; } else { + h = folio_hstate(old_folio); + if (!new_folio) { + gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; + spin_unlock_irq(&hugetlb_lock); new_folio = alloc_buddy_hugetlb_folio(h, gfp_mask, nid, NULL, NULL); @@ -2874,35 +2876,20 @@ static int alloc_and_dissolve_hugetlb_folio(struct hstate *h,
int isolate_or_dissolve_huge_folio(struct folio *folio, struct list_head *list) { - struct hstate *h; int ret = -EBUSY;
- /* - * The page might have been dissolved from under our feet, so make sure - * to carefully check the state under the lock. - * Return success when racing as if we dissolved the page ourselves. - */ - spin_lock_irq(&hugetlb_lock); - if (folio_test_hugetlb(folio)) { - h = folio_hstate(folio); - } else { - spin_unlock_irq(&hugetlb_lock); - return 0; - } - spin_unlock_irq(&hugetlb_lock); - /* * Fence off gigantic pages as there is a cyclic dependency between * alloc_contig_range and them. Return -ENOMEM as this has the effect * of bailing out right away without further retrying. */ - if (hstate_is_gigantic(h)) + if (folio_order(folio) > MAX_PAGE_ORDER) return -ENOMEM;
if (folio_ref_count(folio) && folio_isolate_hugetlb(folio, list)) ret = 0; else if (!folio_ref_count(folio)) - ret = alloc_and_dissolve_hugetlb_folio(h, folio, list); + ret = alloc_and_dissolve_hugetlb_folio(folio, list);
return ret; } @@ -2916,7 +2903,6 @@ int isolate_or_dissolve_huge_folio(struct folio *folio, struct list_head *list) */ int replace_free_hugepage_folios(unsigned long start_pfn, unsigned long end_pfn) { - struct hstate *h; struct folio *folio; int ret = 0;
@@ -2924,15 +2910,9 @@ int replace_free_hugepage_folios(unsigned long start_pfn, unsigned long end_pfn)
while (start_pfn < end_pfn) { folio = pfn_folio(start_pfn); - if (folio_test_hugetlb(folio)) { - h = folio_hstate(folio); - } else { - start_pfn++; - continue; - }
if (!folio_ref_count(folio)) { - ret = alloc_and_dissolve_hugetlb_folio(h, folio, + ret = alloc_and_dissolve_hugetlb_folio(folio, &isolate_list); if (ret) break; -- 2.49.0