When handling a numa page fault, task_numa_fault() should be called by a process that restores the page table of the faulted folio to avoid duplicated stats counting. Commit b99a342d4f11 ("NUMA balancing: reduce TLB flush via delaying mapping on hint page fault") restructured do_numa_page() and do_huge_pmd_numa_page() and did not avoid task_numa_fault() call in the second page table check after a numa migration failure. Fix it by making all !pte_same()/!pmd_same() return immediately.
This issue can cause task_numa_fault() being called more than necessary and lead to unexpected numa balancing results (It is hard to tell whether the issue will cause positive or negative performance impact due to duplicated numa fault counting).
Reported-by: "Huang, Ying" ying.huang@intel.com Closes: https://lore.kernel.org/linux-mm/87zfqfw0yw.fsf@yhuang6-desk2.ccr.corp.intel... Fixes: b99a342d4f11 ("NUMA balancing: reduce TLB flush via delaying mapping on hint page fault") Cc: stable@vger.kernel.org Signed-off-by: Zi Yan ziy@nvidia.com --- mm/huge_memory.c | 5 +++-- mm/memory.c | 5 +++-- 2 files changed, 6 insertions(+), 4 deletions(-)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 0024266dea0a..a3c018f2b554 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1734,10 +1734,11 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf) goto out_map; }
-out: +count_fault: if (nid != NUMA_NO_NODE) task_numa_fault(last_cpupid, nid, HPAGE_PMD_NR, flags);
+out: return 0;
out_map: @@ -1749,7 +1750,7 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf) set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd); update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); spin_unlock(vmf->ptl); - goto out; + goto count_fault; }
/* diff --git a/mm/memory.c b/mm/memory.c index 67496dc5064f..503d493263df 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5536,9 +5536,10 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) goto out_map; }
-out: +count_fault: if (nid != NUMA_NO_NODE) task_numa_fault(last_cpupid, nid, nr_pages, flags); +out: return 0; out_map: /* @@ -5552,7 +5553,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) numa_rebuild_single_mapping(vmf, vma, vmf->address, vmf->pte, writable); pte_unmap_unlock(vmf->pte, vmf->ptl); - goto out; + goto count_fault; }
static inline vm_fault_t create_huge_pmd(struct vm_fault *vmf)
On 2024/8/8 02:47, Zi Yan wrote:
When handling a numa page fault, task_numa_fault() should be called by a process that restores the page table of the faulted folio to avoid duplicated stats counting. Commit b99a342d4f11 ("NUMA balancing: reduce TLB flush via delaying mapping on hint page fault") restructured do_numa_page() and do_huge_pmd_numa_page() and did not avoid task_numa_fault() call in the second page table check after a numa migration failure. Fix it by making all !pte_same()/!pmd_same() return immediately.
This issue can cause task_numa_fault() being called more than necessary and lead to unexpected numa balancing results (It is hard to tell whether the issue will cause positive or negative performance impact due to duplicated numa fault counting).
Reported-by: "Huang, Ying" ying.huang@intel.com Closes: https://lore.kernel.org/linux-mm/87zfqfw0yw.fsf@yhuang6-desk2.ccr.corp.intel... Fixes: b99a342d4f11 ("NUMA balancing: reduce TLB flush via delaying mapping on hint page fault") Cc: stable@vger.kernel.org Signed-off-by: Zi Yan ziy@nvidia.com
The fix looks reasonable to me. Feel free to add: Reviewed-by: Baolin Wang baolin.wang@linux.alibaba.com
(Nit: These goto labels are a bit confusing and might need some cleanup in the future.)
mm/huge_memory.c | 5 +++-- mm/memory.c | 5 +++-- 2 files changed, 6 insertions(+), 4 deletions(-)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 0024266dea0a..a3c018f2b554 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1734,10 +1734,11 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf) goto out_map; } -out: +count_fault: if (nid != NUMA_NO_NODE) task_numa_fault(last_cpupid, nid, HPAGE_PMD_NR, flags); +out: return 0; out_map: @@ -1749,7 +1750,7 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf) set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd); update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); spin_unlock(vmf->ptl);
- goto out;
- goto count_fault; }
/* diff --git a/mm/memory.c b/mm/memory.c index 67496dc5064f..503d493263df 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5536,9 +5536,10 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) goto out_map; } -out: +count_fault: if (nid != NUMA_NO_NODE) task_numa_fault(last_cpupid, nid, nr_pages, flags); +out: return 0; out_map: /* @@ -5552,7 +5553,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) numa_rebuild_single_mapping(vmf, vma, vmf->address, vmf->pte, writable); pte_unmap_unlock(vmf->pte, vmf->ptl);
- goto out;
- goto count_fault; }
static inline vm_fault_t create_huge_pmd(struct vm_fault *vmf)
On 08.08.24 05:19, Baolin Wang wrote:
On 2024/8/8 02:47, Zi Yan wrote:
When handling a numa page fault, task_numa_fault() should be called by a process that restores the page table of the faulted folio to avoid duplicated stats counting. Commit b99a342d4f11 ("NUMA balancing: reduce TLB flush via delaying mapping on hint page fault") restructured do_numa_page() and do_huge_pmd_numa_page() and did not avoid task_numa_fault() call in the second page table check after a numa migration failure. Fix it by making all !pte_same()/!pmd_same() return immediately.
This issue can cause task_numa_fault() being called more than necessary and lead to unexpected numa balancing results (It is hard to tell whether the issue will cause positive or negative performance impact due to duplicated numa fault counting).
Reported-by: "Huang, Ying" ying.huang@intel.com Closes: https://lore.kernel.org/linux-mm/87zfqfw0yw.fsf@yhuang6-desk2.ccr.corp.intel... Fixes: b99a342d4f11 ("NUMA balancing: reduce TLB flush via delaying mapping on hint page fault") Cc: stable@vger.kernel.org Signed-off-by: Zi Yan ziy@nvidia.com
The fix looks reasonable to me. Feel free to add: Reviewed-by: Baolin Wang baolin.wang@linux.alibaba.com
(Nit: These goto labels are a bit confusing and might need some cleanup in the future.)
Agreed, maybe we should simply handle that right away and replace the "goto out;" users by "return 0;".
Then, just copy the 3 LOC.
For mm/memory.c that would be:
diff --git a/mm/memory.c b/mm/memory.c index 67496dc5064f..410ba50ca746 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5461,7 +5461,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
if (unlikely(!pte_same(old_pte, vmf->orig_pte))) { pte_unmap_unlock(vmf->pte, vmf->ptl); - goto out; + return 0; }
pte = pte_modify(old_pte, vma->vm_page_prot); @@ -5528,15 +5528,14 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, &vmf->ptl); if (unlikely(!vmf->pte)) - goto out; + return 0; if (unlikely(!pte_same(ptep_get(vmf->pte), vmf->orig_pte))) { pte_unmap_unlock(vmf->pte, vmf->ptl); - goto out; + return 0; } goto out_map; }
-out: if (nid != NUMA_NO_NODE) task_numa_fault(last_cpupid, nid, nr_pages, flags); return 0; @@ -5552,7 +5551,9 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) numa_rebuild_single_mapping(vmf, vma, vmf->address, vmf->pte, writable); pte_unmap_unlock(vmf->pte, vmf->ptl); - goto out; + if (nid != NUMA_NO_NODE) + task_numa_fault(last_cpupid, nid, nr_pages, flags); + return 0; }
On 2024/8/8 16:22, David Hildenbrand wrote:
On 08.08.24 05:19, Baolin Wang wrote:
On 2024/8/8 02:47, Zi Yan wrote:
When handling a numa page fault, task_numa_fault() should be called by a process that restores the page table of the faulted folio to avoid duplicated stats counting. Commit b99a342d4f11 ("NUMA balancing: reduce TLB flush via delaying mapping on hint page fault") restructured do_numa_page() and do_huge_pmd_numa_page() and did not avoid task_numa_fault() call in the second page table check after a numa migration failure. Fix it by making all !pte_same()/!pmd_same() return immediately.
This issue can cause task_numa_fault() being called more than necessary and lead to unexpected numa balancing results (It is hard to tell whether the issue will cause positive or negative performance impact due to duplicated numa fault counting).
Reported-by: "Huang, Ying" ying.huang@intel.com Closes: https://lore.kernel.org/linux-mm/87zfqfw0yw.fsf@yhuang6-desk2.ccr.corp.intel... Fixes: b99a342d4f11 ("NUMA balancing: reduce TLB flush via delaying mapping on hint page fault") Cc: stable@vger.kernel.org Signed-off-by: Zi Yan ziy@nvidia.com
The fix looks reasonable to me. Feel free to add: Reviewed-by: Baolin Wang baolin.wang@linux.alibaba.com
(Nit: These goto labels are a bit confusing and might need some cleanup in the future.)
Agreed, maybe we should simply handle that right away and replace the "goto out;" users by "return 0;".
Then, just copy the 3 LOC.
For mm/memory.c that would be:
diff --git a/mm/memory.c b/mm/memory.c index 67496dc5064f..410ba50ca746 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5461,7 +5461,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
if (unlikely(!pte_same(old_pte, vmf->orig_pte))) { pte_unmap_unlock(vmf->pte, vmf->ptl); - goto out; + return 0; }
pte = pte_modify(old_pte, vma->vm_page_prot); @@ -5528,15 +5528,14 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, &vmf->ptl); if (unlikely(!vmf->pte)) - goto out; + return 0; if (unlikely(!pte_same(ptep_get(vmf->pte), vmf->orig_pte))) { pte_unmap_unlock(vmf->pte, vmf->ptl); - goto out; + return 0; } goto out_map; }
-out: if (nid != NUMA_NO_NODE) task_numa_fault(last_cpupid, nid, nr_pages, flags); return 0; @@ -5552,7 +5551,9 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) numa_rebuild_single_mapping(vmf, vma, vmf->address, vmf->pte, writable); pte_unmap_unlock(vmf->pte, vmf->ptl); - goto out; + if (nid != NUMA_NO_NODE) + task_numa_fault(last_cpupid, nid, nr_pages, flags); + return 0; }
Thanks. Looks better:)
On 8 Aug 2024, at 4:22, David Hildenbrand wrote:
On 08.08.24 05:19, Baolin Wang wrote:
On 2024/8/8 02:47, Zi Yan wrote:
When handling a numa page fault, task_numa_fault() should be called by a process that restores the page table of the faulted folio to avoid duplicated stats counting. Commit b99a342d4f11 ("NUMA balancing: reduce TLB flush via delaying mapping on hint page fault") restructured do_numa_page() and do_huge_pmd_numa_page() and did not avoid task_numa_fault() call in the second page table check after a numa migration failure. Fix it by making all !pte_same()/!pmd_same() return immediately.
This issue can cause task_numa_fault() being called more than necessary and lead to unexpected numa balancing results (It is hard to tell whether the issue will cause positive or negative performance impact due to duplicated numa fault counting).
Reported-by: "Huang, Ying" ying.huang@intel.com Closes: https://lore.kernel.org/linux-mm/87zfqfw0yw.fsf@yhuang6-desk2.ccr.corp.intel... Fixes: b99a342d4f11 ("NUMA balancing: reduce TLB flush via delaying mapping on hint page fault") Cc: stable@vger.kernel.org Signed-off-by: Zi Yan ziy@nvidia.com
The fix looks reasonable to me. Feel free to add: Reviewed-by: Baolin Wang baolin.wang@linux.alibaba.com
(Nit: These goto labels are a bit confusing and might need some cleanup in the future.)
Agreed, maybe we should simply handle that right away and replace the "goto out;" users by "return 0;".
Then, just copy the 3 LOC.
For mm/memory.c that would be:
diff --git a/mm/memory.c b/mm/memory.c index 67496dc5064f..410ba50ca746 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5461,7 +5461,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) if (unlikely(!pte_same(old_pte, vmf->orig_pte))) { pte_unmap_unlock(vmf->pte, vmf->ptl);
goto out;
return 0; } pte = pte_modify(old_pte, vma->vm_page_prot);
@@ -5528,15 +5528,14 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, &vmf->ptl); if (unlikely(!vmf->pte))
goto out;
return 0; if (unlikely(!pte_same(ptep_get(vmf->pte), vmf->orig_pte))) { pte_unmap_unlock(vmf->pte, vmf->ptl);
goto out;
return 0; } goto out_map; }
-out: if (nid != NUMA_NO_NODE) task_numa_fault(last_cpupid, nid, nr_pages, flags); return 0; @@ -5552,7 +5551,9 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) numa_rebuild_single_mapping(vmf, vma, vmf->address, vmf->pte, writable); pte_unmap_unlock(vmf->pte, vmf->ptl);
goto out;
if (nid != NUMA_NO_NODE)
task_numa_fault(last_cpupid, nid, nr_pages, flags);
return 0;
}
Looks good to me. Thanks.
Hi Andrew,
Should I resend this for an easy back porting? Or you want to fold David’s changes in directly?
Best Regards, Yan, Zi
On 08.08.24 16:13, Zi Yan wrote:
On 8 Aug 2024, at 4:22, David Hildenbrand wrote:
On 08.08.24 05:19, Baolin Wang wrote:
On 2024/8/8 02:47, Zi Yan wrote:
When handling a numa page fault, task_numa_fault() should be called by a process that restores the page table of the faulted folio to avoid duplicated stats counting. Commit b99a342d4f11 ("NUMA balancing: reduce TLB flush via delaying mapping on hint page fault") restructured do_numa_page() and do_huge_pmd_numa_page() and did not avoid task_numa_fault() call in the second page table check after a numa migration failure. Fix it by making all !pte_same()/!pmd_same() return immediately.
This issue can cause task_numa_fault() being called more than necessary and lead to unexpected numa balancing results (It is hard to tell whether the issue will cause positive or negative performance impact due to duplicated numa fault counting).
Reported-by: "Huang, Ying" ying.huang@intel.com Closes: https://lore.kernel.org/linux-mm/87zfqfw0yw.fsf@yhuang6-desk2.ccr.corp.intel... Fixes: b99a342d4f11 ("NUMA balancing: reduce TLB flush via delaying mapping on hint page fault") Cc: stable@vger.kernel.org Signed-off-by: Zi Yan ziy@nvidia.com
The fix looks reasonable to me. Feel free to add: Reviewed-by: Baolin Wang baolin.wang@linux.alibaba.com
(Nit: These goto labels are a bit confusing and might need some cleanup in the future.)
Agreed, maybe we should simply handle that right away and replace the "goto out;" users by "return 0;".
Then, just copy the 3 LOC.
For mm/memory.c that would be:
diff --git a/mm/memory.c b/mm/memory.c index 67496dc5064f..410ba50ca746 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5461,7 +5461,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) if (unlikely(!pte_same(old_pte, vmf->orig_pte))) { pte_unmap_unlock(vmf->pte, vmf->ptl);
goto out;
return 0; } pte = pte_modify(old_pte, vma->vm_page_prot);
@@ -5528,15 +5528,14 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, &vmf->ptl); if (unlikely(!vmf->pte))
goto out;
return 0; if (unlikely(!pte_same(ptep_get(vmf->pte), vmf->orig_pte))) { pte_unmap_unlock(vmf->pte, vmf->ptl);
goto out;
-out: if (nid != NUMA_NO_NODE) task_numa_fault(last_cpupid, nid, nr_pages, flags); return 0;return 0; } goto out_map; }
@@ -5552,7 +5551,9 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) numa_rebuild_single_mapping(vmf, vma, vmf->address, vmf->pte, writable); pte_unmap_unlock(vmf->pte, vmf->ptl);
goto out;
if (nid != NUMA_NO_NODE)
task_numa_fault(last_cpupid, nid, nr_pages, flags);
}return 0;
Looks good to me. Thanks.
Hi Andrew,
Should I resend this for an easy back porting? Or you want to fold David’s changes in directly?
Note that I didn't touch huge_memory.c. So maybe just send a fixup on top?
On 8 Aug 2024, at 10:14, David Hildenbrand wrote:
On 08.08.24 16:13, Zi Yan wrote:
On 8 Aug 2024, at 4:22, David Hildenbrand wrote:
On 08.08.24 05:19, Baolin Wang wrote:
On 2024/8/8 02:47, Zi Yan wrote:
When handling a numa page fault, task_numa_fault() should be called by a process that restores the page table of the faulted folio to avoid duplicated stats counting. Commit b99a342d4f11 ("NUMA balancing: reduce TLB flush via delaying mapping on hint page fault") restructured do_numa_page() and do_huge_pmd_numa_page() and did not avoid task_numa_fault() call in the second page table check after a numa migration failure. Fix it by making all !pte_same()/!pmd_same() return immediately.
This issue can cause task_numa_fault() being called more than necessary and lead to unexpected numa balancing results (It is hard to tell whether the issue will cause positive or negative performance impact due to duplicated numa fault counting).
Reported-by: "Huang, Ying" ying.huang@intel.com Closes: https://lore.kernel.org/linux-mm/87zfqfw0yw.fsf@yhuang6-desk2.ccr.corp.intel... Fixes: b99a342d4f11 ("NUMA balancing: reduce TLB flush via delaying mapping on hint page fault") Cc: stable@vger.kernel.org Signed-off-by: Zi Yan ziy@nvidia.com
The fix looks reasonable to me. Feel free to add: Reviewed-by: Baolin Wang baolin.wang@linux.alibaba.com
(Nit: These goto labels are a bit confusing and might need some cleanup in the future.)
Agreed, maybe we should simply handle that right away and replace the "goto out;" users by "return 0;".
Then, just copy the 3 LOC.
For mm/memory.c that would be:
diff --git a/mm/memory.c b/mm/memory.c index 67496dc5064f..410ba50ca746 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5461,7 +5461,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) if (unlikely(!pte_same(old_pte, vmf->orig_pte))) { pte_unmap_unlock(vmf->pte, vmf->ptl);
goto out;
return 0; } pte = pte_modify(old_pte, vma->vm_page_prot);
@@ -5528,15 +5528,14 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, &vmf->ptl); if (unlikely(!vmf->pte))
goto out;
return 0; if (unlikely(!pte_same(ptep_get(vmf->pte), vmf->orig_pte))) { pte_unmap_unlock(vmf->pte, vmf->ptl);
goto out;
-out: if (nid != NUMA_NO_NODE) task_numa_fault(last_cpupid, nid, nr_pages, flags); return 0;return 0; } goto out_map; }
@@ -5552,7 +5551,9 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) numa_rebuild_single_mapping(vmf, vma, vmf->address, vmf->pte, writable); pte_unmap_unlock(vmf->pte, vmf->ptl);
goto out;
if (nid != NUMA_NO_NODE)
task_numa_fault(last_cpupid, nid, nr_pages, flags);
}return 0;
Looks good to me. Thanks.
Hi Andrew,
Should I resend this for an easy back porting? Or you want to fold David’s changes in directly?
Note that I didn't touch huge_memory.c. So maybe just send a fixup on top?
Got it. The fixup is attached.
Best Regards, Yan, Zi
On 2024/8/8 22:21, Zi Yan wrote:
On 8 Aug 2024, at 10:14, David Hildenbrand wrote:
On 08.08.24 16:13, Zi Yan wrote:
On 8 Aug 2024, at 4:22, David Hildenbrand wrote:
On 08.08.24 05:19, Baolin Wang wrote:
...
Agreed, maybe we should simply handle that right away and replace the "goto out;" users by "return 0;".
Then, just copy the 3 LOC.
For mm/memory.c that would be:
diff --git a/mm/memory.c b/mm/memory.c index 67496dc5064f..410ba50ca746 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5461,7 +5461,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) if (unlikely(!pte_same(old_pte, vmf->orig_pte))) { pte_unmap_unlock(vmf->pte, vmf->ptl);
goto out;
return 0; } pte = pte_modify(old_pte, vma->vm_page_prot);
@@ -5528,15 +5528,14 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, &vmf->ptl); if (unlikely(!vmf->pte))
goto out;
return 0; if (unlikely(!pte_same(ptep_get(vmf->pte), vmf->orig_pte))) { pte_unmap_unlock(vmf->pte, vmf->ptl);
goto out;
-out: if (nid != NUMA_NO_NODE) task_numa_fault(last_cpupid, nid, nr_pages, flags); return 0;return 0; } goto out_map; }
Maybe drop this part too,
diff --git a/mm/memory.c b/mm/memory.c index 410ba50ca746..07343c1469e0 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5523,6 +5523,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) if (!migrate_misplaced_folio(folio, vma, target_nid)) { nid = target_nid; flags |= TNF_MIGRATED; + goto out; } else { flags |= TNF_MIGRATE_FAIL; vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, @@ -5533,12 +5534,8 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) pte_unmap_unlock(vmf->pte, vmf->ptl); return 0; } - goto out_map; }
- if (nid != NUMA_NO_NODE) - task_numa_fault(last_cpupid, nid, nr_pages, flags); - return 0; out_map: /* * Make it present again, depending on how arch implements @@ -5551,6 +5548,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) numa_rebuild_single_mapping(vmf, vma, vmf->address, vmf->pte, writable); pte_unmap_unlock(vmf->pte, vmf->ptl); +out: if (nid != NUMA_NO_NODE) task_numa_fault(last_cpupid, nid, nr_pages, flags); return 0;
@@ -5552,7 +5551,9 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) numa_rebuild_single_mapping(vmf, vma, vmf->address, vmf->pte, writable); pte_unmap_unlock(vmf->pte, vmf->ptl);
goto out;
if (nid != NUMA_NO_NODE)
task_numa_fault(last_cpupid, nid, nr_pages, flags);
}return 0;
Looks good to me. Thanks.
Hi Andrew,
Should I resend this for an easy back porting? Or you want to fold David’s changes in directly?
Note that I didn't touch huge_memory.c. So maybe just send a fixup on top?
Got it. The fixup is attached.
Best Regards, Yan, Zi
On 8 Aug 2024, at 10:36, Kefeng Wang wrote:
On 2024/8/8 22:21, Zi Yan wrote:
On 8 Aug 2024, at 10:14, David Hildenbrand wrote:
On 08.08.24 16:13, Zi Yan wrote:
On 8 Aug 2024, at 4:22, David Hildenbrand wrote:
On 08.08.24 05:19, Baolin Wang wrote:
...
Agreed, maybe we should simply handle that right away and replace the "goto out;" users by "return 0;".
Then, just copy the 3 LOC.
For mm/memory.c that would be:
diff --git a/mm/memory.c b/mm/memory.c index 67496dc5064f..410ba50ca746 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5461,7 +5461,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) if (unlikely(!pte_same(old_pte, vmf->orig_pte))) { pte_unmap_unlock(vmf->pte, vmf->ptl);
goto out;
return 0; } pte = pte_modify(old_pte, vma->vm_page_prot);
@@ -5528,15 +5528,14 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, &vmf->ptl); if (unlikely(!vmf->pte))
goto out;
return 0; if (unlikely(!pte_same(ptep_get(vmf->pte), vmf->orig_pte))) { pte_unmap_unlock(vmf->pte, vmf->ptl);
goto out;
-out: if (nid != NUMA_NO_NODE) task_numa_fault(last_cpupid, nid, nr_pages, flags); return 0;return 0; } goto out_map; }
Maybe drop this part too,
diff --git a/mm/memory.c b/mm/memory.c index 410ba50ca746..07343c1469e0 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5523,6 +5523,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) if (!migrate_misplaced_folio(folio, vma, target_nid)) { nid = target_nid; flags |= TNF_MIGRATED;
goto out; } else { flags |= TNF_MIGRATE_FAIL; vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
@@ -5533,12 +5534,8 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) pte_unmap_unlock(vmf->pte, vmf->ptl); return 0; }
goto out_map; }
if (nid != NUMA_NO_NODE)
task_numa_fault(last_cpupid, nid, nr_pages, flags);
return 0;
out_map: /* * Make it present again, depending on how arch implements @@ -5551,6 +5548,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) numa_rebuild_single_mapping(vmf, vma, vmf->address, vmf->pte, writable); pte_unmap_unlock(vmf->pte, vmf->ptl); +out: if (nid != NUMA_NO_NODE) task_numa_fault(last_cpupid, nid, nr_pages, flags); return 0;
Even better. Thanks. The updated fixup is attached.
Best Regards, Yan, Zi
On 8 Aug 2024, at 10:57, Zi Yan wrote:
On 8 Aug 2024, at 10:36, Kefeng Wang wrote:
On 2024/8/8 22:21, Zi Yan wrote:
On 8 Aug 2024, at 10:14, David Hildenbrand wrote:
On 08.08.24 16:13, Zi Yan wrote:
On 8 Aug 2024, at 4:22, David Hildenbrand wrote:
On 08.08.24 05:19, Baolin Wang wrote: > >
...
Agreed, maybe we should simply handle that right away and replace the "goto out;" users by "return 0;".
Then, just copy the 3 LOC.
For mm/memory.c that would be:
diff --git a/mm/memory.c b/mm/memory.c index 67496dc5064f..410ba50ca746 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5461,7 +5461,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) if (unlikely(!pte_same(old_pte, vmf->orig_pte))) { pte_unmap_unlock(vmf->pte, vmf->ptl);
goto out;
return 0; } pte = pte_modify(old_pte, vma->vm_page_prot);
@@ -5528,15 +5528,14 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, &vmf->ptl); if (unlikely(!vmf->pte))
goto out;
return 0; if (unlikely(!pte_same(ptep_get(vmf->pte), vmf->orig_pte))) { pte_unmap_unlock(vmf->pte, vmf->ptl);
goto out;
-out: if (nid != NUMA_NO_NODE) task_numa_fault(last_cpupid, nid, nr_pages, flags); return 0;return 0; } goto out_map; }
Maybe drop this part too,
diff --git a/mm/memory.c b/mm/memory.c index 410ba50ca746..07343c1469e0 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5523,6 +5523,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) if (!migrate_misplaced_folio(folio, vma, target_nid)) { nid = target_nid; flags |= TNF_MIGRATED;
goto out; } else { flags |= TNF_MIGRATE_FAIL; vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
@@ -5533,12 +5534,8 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) pte_unmap_unlock(vmf->pte, vmf->ptl); return 0; }
goto out_map; }
if (nid != NUMA_NO_NODE)
task_numa_fault(last_cpupid, nid, nr_pages, flags);
return 0;
out_map: /* * Make it present again, depending on how arch implements @@ -5551,6 +5548,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) numa_rebuild_single_mapping(vmf, vma, vmf->address, vmf->pte, writable); pte_unmap_unlock(vmf->pte, vmf->ptl); +out: if (nid != NUMA_NO_NODE) task_numa_fault(last_cpupid, nid, nr_pages, flags); return 0;
Even better. Thanks. The updated fixup is attached.
Update the fixup, if Andrew wants to fold it in instead of a resend, again to fix a typo causing compilation failure.
Best Regards, Yan, Zi
Kefeng Wang wangkefeng.wang@huawei.com writes:
On 2024/8/8 22:21, Zi Yan wrote:
On 8 Aug 2024, at 10:14, David Hildenbrand wrote:
On 08.08.24 16:13, Zi Yan wrote:
On 8 Aug 2024, at 4:22, David Hildenbrand wrote:
On 08.08.24 05:19, Baolin Wang wrote:
...
Agreed, maybe we should simply handle that right away and replace the "goto out;" users by "return 0;".
Then, just copy the 3 LOC.
For mm/memory.c that would be:
diff --git a/mm/memory.c b/mm/memory.c index 67496dc5064f..410ba50ca746 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5461,7 +5461,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) if (unlikely(!pte_same(old_pte, vmf->orig_pte))) { pte_unmap_unlock(vmf->pte, vmf->ptl);
goto out;
return 0; } pte = pte_modify(old_pte, vma->vm_page_prot);
@@ -5528,15 +5528,14 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, &vmf->ptl); if (unlikely(!vmf->pte))
goto out;
return 0; if (unlikely(!pte_same(ptep_get(vmf->pte), vmf->orig_pte))) { pte_unmap_unlock(vmf->pte, vmf->ptl);
goto out;
-out: if (nid != NUMA_NO_NODE) task_numa_fault(last_cpupid, nid, nr_pages, flags); return 0;return 0; } goto out_map; }
Maybe drop this part too,
diff --git a/mm/memory.c b/mm/memory.c index 410ba50ca746..07343c1469e0 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5523,6 +5523,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) if (!migrate_misplaced_folio(folio, vma, target_nid)) { nid = target_nid; flags |= TNF_MIGRATED;
goto out; } else { flags |= TNF_MIGRATE_FAIL; vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
@@ -5533,12 +5534,8 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) pte_unmap_unlock(vmf->pte, vmf->ptl); return 0; }
goto out_map; }
if (nid != NUMA_NO_NODE)
task_numa_fault(last_cpupid, nid, nr_pages, flags);
return 0;
out_map: /* * Make it present again, depending on how arch implements
IMHO, migration success is normal path, while migration failure is error processing path. If so, it's better to use "goto" for error processing instead of normal path.
@@ -5551,6 +5548,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) numa_rebuild_single_mapping(vmf, vma, vmf->address, vmf->pte, writable); pte_unmap_unlock(vmf->pte, vmf->ptl); +out: if (nid != NUMA_NO_NODE) task_numa_fault(last_cpupid, nid, nr_pages, flags); return 0;
-- Best Regards, Huang, Ying
On 8 Aug 2024, at 21:25, Huang, Ying wrote:
Kefeng Wang wangkefeng.wang@huawei.com writes:
On 2024/8/8 22:21, Zi Yan wrote:
On 8 Aug 2024, at 10:14, David Hildenbrand wrote:
On 08.08.24 16:13, Zi Yan wrote:
On 8 Aug 2024, at 4:22, David Hildenbrand wrote:
On 08.08.24 05:19, Baolin Wang wrote: > >
...
Agreed, maybe we should simply handle that right away and replace the "goto out;" users by "return 0;".
Then, just copy the 3 LOC.
For mm/memory.c that would be:
diff --git a/mm/memory.c b/mm/memory.c index 67496dc5064f..410ba50ca746 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5461,7 +5461,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) if (unlikely(!pte_same(old_pte, vmf->orig_pte))) { pte_unmap_unlock(vmf->pte, vmf->ptl);
goto out;
return 0; } pte = pte_modify(old_pte, vma->vm_page_prot);
@@ -5528,15 +5528,14 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, &vmf->ptl); if (unlikely(!vmf->pte))
goto out;
return 0; if (unlikely(!pte_same(ptep_get(vmf->pte), vmf->orig_pte))) { pte_unmap_unlock(vmf->pte, vmf->ptl);
goto out;
-out: if (nid != NUMA_NO_NODE) task_numa_fault(last_cpupid, nid, nr_pages, flags); return 0;return 0; } goto out_map; }
Maybe drop this part too,
diff --git a/mm/memory.c b/mm/memory.c index 410ba50ca746..07343c1469e0 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5523,6 +5523,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) if (!migrate_misplaced_folio(folio, vma, target_nid)) { nid = target_nid; flags |= TNF_MIGRATED;
goto out; } else { flags |= TNF_MIGRATE_FAIL; vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
@@ -5533,12 +5534,8 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) pte_unmap_unlock(vmf->pte, vmf->ptl); return 0; }
goto out_map; }
if (nid != NUMA_NO_NODE)
task_numa_fault(last_cpupid, nid, nr_pages, flags);
return 0;
out_map: /* * Make it present again, depending on how arch implements
IMHO, migration success is normal path, while migration failure is error processing path. If so, it's better to use "goto" for error processing instead of normal path.
@@ -5551,6 +5548,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) numa_rebuild_single_mapping(vmf, vma, vmf->address, vmf->pte, writable); pte_unmap_unlock(vmf->pte, vmf->ptl); +out: if (nid != NUMA_NO_NODE) task_numa_fault(last_cpupid, nid, nr_pages, flags); return 0;
How about calling task_numa_fault and return in the migration successful path? (target_nid cannot be NUMA_NO_NODE, so if is not needed)
diff --git a/mm/memory.c b/mm/memory.c index 3441f60d54ef..abdb73a68b80 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5526,7 +5526,8 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) if (!migrate_misplaced_folio(folio, vma, target_nid)) { nid = target_nid; flags |= TNF_MIGRATED; - goto out; + task_numa_fault(last_cpupid, nid, nr_pages, flags); + return 0; } else { flags |= TNF_MIGRATE_FAIL; vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, @@ -5550,7 +5551,6 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) numa_rebuild_single_mapping(vmf, vma, vmf->address, vmf->pte, writable); pte_unmap_unlock(vmf->pte, vmf->ptl); -out: if (nid != NUMA_NO_NODE) task_numa_fault(last_cpupid, nid, nr_pages, flags); return 0;
Or move the make-present code inside migration failed branch? This one does not duplicate code but others can jump into this branch.
diff --git a/mm/memory.c b/mm/memory.c index 3441f60d54ef..c9b4e7099815 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5526,7 +5526,6 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) if (!migrate_misplaced_folio(folio, vma, target_nid)) { nid = target_nid; flags |= TNF_MIGRATED; - goto out; } else { flags |= TNF_MIGRATE_FAIL; vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, @@ -5537,20 +5536,20 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) pte_unmap_unlock(vmf->pte, vmf->ptl); return 0; } - } out_map: - /* - * Make it present again, depending on how arch implements - * non-accessible ptes, some can allow access by kernel mode. - */ - if (folio && folio_test_large(folio)) - numa_rebuild_large_mapping(vmf, vma, folio, pte, ignore_writable, - pte_write_upgrade); - else - numa_rebuild_single_mapping(vmf, vma, vmf->address, vmf->pte, - writable); - pte_unmap_unlock(vmf->pte, vmf->ptl); -out: + /* + * Make it present again, depending on how arch implements + * non-accessible ptes, some can allow access by kernel mode. + */ + if (folio && folio_test_large(folio)) + numa_rebuild_large_mapping(vmf, vma, folio, pte, + ignore_writable, pte_write_upgrade); + else + numa_rebuild_single_mapping(vmf, vma, vmf->address, + vmf->pte, writable); + pte_unmap_unlock(vmf->pte, vmf->ptl); + } + if (nid != NUMA_NO_NODE) task_numa_fault(last_cpupid, nid, nr_pages, flags); return 0;
Of course, I will need to change mm/huge_memory.c as well.
Best Regards, Yan, Zi
Zi Yan ziy@nvidia.com writes:
On 8 Aug 2024, at 21:25, Huang, Ying wrote:
Kefeng Wang wangkefeng.wang@huawei.com writes:
On 2024/8/8 22:21, Zi Yan wrote:
On 8 Aug 2024, at 10:14, David Hildenbrand wrote:
On 08.08.24 16:13, Zi Yan wrote:
On 8 Aug 2024, at 4:22, David Hildenbrand wrote:
> On 08.08.24 05:19, Baolin Wang wrote: >> >>
...
> Agreed, maybe we should simply handle that right away and replace the "goto out;" users by "return 0;". > > Then, just copy the 3 LOC. > > For mm/memory.c that would be: > > diff --git a/mm/memory.c b/mm/memory.c > index 67496dc5064f..410ba50ca746 100644 > --- a/mm/memory.c > +++ b/mm/memory.c > @@ -5461,7 +5461,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) > if (unlikely(!pte_same(old_pte, vmf->orig_pte))) { > pte_unmap_unlock(vmf->pte, vmf->ptl); > - goto out; > + return 0; > } > pte = pte_modify(old_pte, vma->vm_page_prot); > @@ -5528,15 +5528,14 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) > vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, > vmf->address, &vmf->ptl); > if (unlikely(!vmf->pte)) > - goto out; > + return 0; > if (unlikely(!pte_same(ptep_get(vmf->pte), vmf->orig_pte))) { > pte_unmap_unlock(vmf->pte, vmf->ptl); > - goto out; > + return 0; > } > goto out_map; > } > -out: > if (nid != NUMA_NO_NODE) > task_numa_fault(last_cpupid, nid, nr_pages, flags); > return 0;
Maybe drop this part too,
diff --git a/mm/memory.c b/mm/memory.c index 410ba50ca746..07343c1469e0 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5523,6 +5523,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) if (!migrate_misplaced_folio(folio, vma, target_nid)) { nid = target_nid; flags |= TNF_MIGRATED;
goto out; } else { flags |= TNF_MIGRATE_FAIL; vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
@@ -5533,12 +5534,8 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) pte_unmap_unlock(vmf->pte, vmf->ptl); return 0; }
goto out_map; }
if (nid != NUMA_NO_NODE)
task_numa_fault(last_cpupid, nid, nr_pages, flags);
return 0;
out_map: /* * Make it present again, depending on how arch implements
IMHO, migration success is normal path, while migration failure is error processing path. If so, it's better to use "goto" for error processing instead of normal path.
@@ -5551,6 +5548,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) numa_rebuild_single_mapping(vmf, vma, vmf->address, vmf->pte, writable); pte_unmap_unlock(vmf->pte, vmf->ptl); +out: if (nid != NUMA_NO_NODE) task_numa_fault(last_cpupid, nid, nr_pages, flags); return 0;
How about calling task_numa_fault and return in the migration successful path? (target_nid cannot be NUMA_NO_NODE, so if is not needed)
diff --git a/mm/memory.c b/mm/memory.c index 3441f60d54ef..abdb73a68b80 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5526,7 +5526,8 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) if (!migrate_misplaced_folio(folio, vma, target_nid)) { nid = target_nid; flags |= TNF_MIGRATED;
goto out;
task_numa_fault(last_cpupid, nid, nr_pages, flags);
return 0; } else { flags |= TNF_MIGRATE_FAIL; vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
@@ -5550,7 +5551,6 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) numa_rebuild_single_mapping(vmf, vma, vmf->address, vmf->pte, writable); pte_unmap_unlock(vmf->pte, vmf->ptl); -out: if (nid != NUMA_NO_NODE) task_numa_fault(last_cpupid, nid, nr_pages, flags); return 0;
This looks better for me, or change it further.
if (migrate_misplaced_folio(folio, vma, target_nid)) goto out_map_pt;
nid = target_nid; flags |= TNF_MIGRATED; task_numa_fault(last_cpupid, nid, nr_pages, flags);
return 0;
out_map_pt: flags |= TNF_MIGRATE_FAIL; vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, ...
Or move the make-present code inside migration failed branch? This one does not duplicate code but others can jump into this branch.
diff --git a/mm/memory.c b/mm/memory.c index 3441f60d54ef..c9b4e7099815 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5526,7 +5526,6 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) if (!migrate_misplaced_folio(folio, vma, target_nid)) { nid = target_nid; flags |= TNF_MIGRATED;
goto out; } else { flags |= TNF_MIGRATE_FAIL; vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
@@ -5537,20 +5536,20 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) pte_unmap_unlock(vmf->pte, vmf->ptl); return 0; }
}
out_map:
/*
* Make it present again, depending on how arch implements
* non-accessible ptes, some can allow access by kernel mode.
*/
if (folio && folio_test_large(folio))
numa_rebuild_large_mapping(vmf, vma, folio, pte, ignore_writable,
pte_write_upgrade);
else
numa_rebuild_single_mapping(vmf, vma, vmf->address, vmf->pte,
writable);
pte_unmap_unlock(vmf->pte, vmf->ptl);
-out:
/*
* Make it present again, depending on how arch implements
* non-accessible ptes, some can allow access by kernel mode.
*/
if (folio && folio_test_large(folio))
numa_rebuild_large_mapping(vmf, vma, folio, pte,
ignore_writable, pte_write_upgrade);
else
numa_rebuild_single_mapping(vmf, vma, vmf->address,
vmf->pte, writable);
pte_unmap_unlock(vmf->pte, vmf->ptl);
}
if (nid != NUMA_NO_NODE) task_numa_fault(last_cpupid, nid, nr_pages, flags); return 0;
Of course, I will need to change mm/huge_memory.c as well.
-- Best Regards, Huang, Ying
On 9 Aug 2024, at 3:52, Huang, Ying wrote:
Zi Yan ziy@nvidia.com writes:
On 8 Aug 2024, at 21:25, Huang, Ying wrote:
Kefeng Wang wangkefeng.wang@huawei.com writes:
On 2024/8/8 22:21, Zi Yan wrote:
On 8 Aug 2024, at 10:14, David Hildenbrand wrote:
On 08.08.24 16:13, Zi Yan wrote: > On 8 Aug 2024, at 4:22, David Hildenbrand wrote: > >> On 08.08.24 05:19, Baolin Wang wrote: >>> >>>
...
>> Agreed, maybe we should simply handle that right away and replace the "goto out;" users by "return 0;". >> >> Then, just copy the 3 LOC. >> >> For mm/memory.c that would be: >> >> diff --git a/mm/memory.c b/mm/memory.c >> index 67496dc5064f..410ba50ca746 100644 >> --- a/mm/memory.c >> +++ b/mm/memory.c >> @@ -5461,7 +5461,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) >> if (unlikely(!pte_same(old_pte, vmf->orig_pte))) { >> pte_unmap_unlock(vmf->pte, vmf->ptl); >> - goto out; >> + return 0; >> } >> pte = pte_modify(old_pte, vma->vm_page_prot); >> @@ -5528,15 +5528,14 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) >> vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, >> vmf->address, &vmf->ptl); >> if (unlikely(!vmf->pte)) >> - goto out; >> + return 0; >> if (unlikely(!pte_same(ptep_get(vmf->pte), vmf->orig_pte))) { >> pte_unmap_unlock(vmf->pte, vmf->ptl); >> - goto out; >> + return 0; >> } >> goto out_map; >> } >> -out: >> if (nid != NUMA_NO_NODE) >> task_numa_fault(last_cpupid, nid, nr_pages, flags); >> return 0;
Maybe drop this part too,
diff --git a/mm/memory.c b/mm/memory.c index 410ba50ca746..07343c1469e0 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5523,6 +5523,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) if (!migrate_misplaced_folio(folio, vma, target_nid)) { nid = target_nid; flags |= TNF_MIGRATED;
goto out; } else { flags |= TNF_MIGRATE_FAIL; vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
@@ -5533,12 +5534,8 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) pte_unmap_unlock(vmf->pte, vmf->ptl); return 0; }
goto out_map; }
if (nid != NUMA_NO_NODE)
task_numa_fault(last_cpupid, nid, nr_pages, flags);
return 0;
out_map: /* * Make it present again, depending on how arch implements
IMHO, migration success is normal path, while migration failure is error processing path. If so, it's better to use "goto" for error processing instead of normal path.
@@ -5551,6 +5548,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) numa_rebuild_single_mapping(vmf, vma, vmf->address, vmf->pte, writable); pte_unmap_unlock(vmf->pte, vmf->ptl); +out: if (nid != NUMA_NO_NODE) task_numa_fault(last_cpupid, nid, nr_pages, flags); return 0;
How about calling task_numa_fault and return in the migration successful path? (target_nid cannot be NUMA_NO_NODE, so if is not needed)
diff --git a/mm/memory.c b/mm/memory.c index 3441f60d54ef..abdb73a68b80 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5526,7 +5526,8 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) if (!migrate_misplaced_folio(folio, vma, target_nid)) { nid = target_nid; flags |= TNF_MIGRATED;
goto out;
task_numa_fault(last_cpupid, nid, nr_pages, flags);
return 0; } else { flags |= TNF_MIGRATE_FAIL; vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
@@ -5550,7 +5551,6 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) numa_rebuild_single_mapping(vmf, vma, vmf->address, vmf->pte, writable); pte_unmap_unlock(vmf->pte, vmf->ptl); -out: if (nid != NUMA_NO_NODE) task_numa_fault(last_cpupid, nid, nr_pages, flags); return 0;
This looks better for me, or change it further.
Thanks. Will put a fixup below.
if (migrate_misplaced_folio(folio, vma, target_nid)) goto out_map_pt; nid = target_nid; flags |= TNF_MIGRATED; task_numa_fault(last_cpupid, nid, nr_pages, flags); return 0;
out_map_pt: flags |= TNF_MIGRATE_FAIL; vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, ...
I will send a separate patch for this refactoring, since this patch is meant for fixing commit b99a342d4f11 ("NUMA balancing: reduce TLB flush via delaying mapping on hint page fault”).
Hi Andrew,
Can you fold the fixup below to this patch? Thanks.
From 645fa83b2eed14494755ed67e5c52a55656287ac Mon Sep 17 00:00:00 2001 From: Zi Yan ziy@nvidia.com Date: Thu, 8 Aug 2024 22:06:41 -0400 Subject: [PATCH] fixup! fixup! mm/numa: no task_numa_fault() call if page table is changed
Based on suggestion from Ying.
Link: https://lore.kernel.org/linux-mm/87cymizdvc.fsf@yhuang6-desk2.ccr.corp.intel... Signed-off-by: Zi Yan ziy@nvidia.com --- mm/huge_memory.c | 5 +++-- mm/memory.c | 5 +++-- 2 files changed, 6 insertions(+), 4 deletions(-)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 4e4364a17e6d..0e79ccaaf5e4 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1724,7 +1724,8 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf) if (!migrate_misplaced_folio(folio, vma, target_nid)) { flags |= TNF_MIGRATED; nid = target_nid; - goto out; + task_numa_fault(last_cpupid, nid, HPAGE_PMD_NR, flags); + return 0; } else { flags |= TNF_MIGRATE_FAIL; vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); @@ -1743,7 +1744,7 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf) set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd); update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); spin_unlock(vmf->ptl); -out: + if (nid != NUMA_NO_NODE) task_numa_fault(last_cpupid, nid, HPAGE_PMD_NR, flags); return 0; diff --git a/mm/memory.c b/mm/memory.c index d9b1dff9dc57..6c3aadf3e5ad 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5523,7 +5523,8 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) if (!migrate_misplaced_folio(folio, vma, target_nid)) { nid = target_nid; flags |= TNF_MIGRATED; - goto out; + task_numa_fault(last_cpupid, nid, nr_pages, flags); + return 0; } else { flags |= TNF_MIGRATE_FAIL; vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, @@ -5547,7 +5548,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) numa_rebuild_single_mapping(vmf, vma, vmf->address, vmf->pte, writable); pte_unmap_unlock(vmf->pte, vmf->ptl); -out: + if (nid != NUMA_NO_NODE) task_numa_fault(last_cpupid, nid, nr_pages, flags); return 0;
On 9 Aug 2024, at 9:49, David Hildenbrand wrote:
Hi Andrew,
Can you fold the fixup below to this patch? Thanks.
It might be reasonable to send out a new version. At least I am confused now what the latest state is :D
Sure. I will send v3 to include all patches I sent recently.
Best Regards, Yan, Zi
linux-stable-mirror@lists.linaro.org