vmap_pages_pte_range() enters the lazy MMU mode, but fails to leave it in case an error is encountered.
Link: https://lkml.kernel.org/r/20250623075721.2817094-1-agordeev@linux.ibm.com Fixes: 2ba3e6947aed ("mm/vmalloc: track which page-table levels were modified") Signed-off-by: Alexander Gordeev agordeev@linux.ibm.com Reported-by: kernel test robot lkp@intel.com Reported-by: Dan Carpenter dan.carpenter@linaro.org Closes: https://lore.kernel.org/r/202506132017.T1l1l6ME-lkp@intel.com/ Reviewed-by: Ryan Roberts ryan.roberts@arm.com Cc: stable@vger.kernel.org Signed-off-by: Andrew Morton akpm@linux-foundation.org (cherry picked from commit fea18c686320a53fce7ad62a87a3e1d10ad02f31) --- mm/vmalloc.c | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-)
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 840e25cab934..502f51b86fa3 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -460,6 +460,7 @@ static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, pgprot_t prot, struct page **pages, int *nr, pgtbl_mod_mask *mask) { + int err = 0; pte_t *pte;
/* @@ -473,15 +474,21 @@ static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr, do { struct page *page = pages[*nr];
- if (WARN_ON(!pte_none(*pte))) - return -EBUSY; - if (WARN_ON(!page)) - return -ENOMEM; + if (WARN_ON(!pte_none(*pte))) { + err = -EBUSY; + break; + } + if (WARN_ON(!page)) { + err = -ENOMEM; + break; + } + set_pte_at(&init_mm, addr, pte, mk_pte(page, prot)); (*nr)++; } while (pte++, addr += PAGE_SIZE, addr != end); *mask |= PGTBL_PTE_MODIFIED; - return 0; + + return err; }
static int vmap_pages_pmd_range(pud_t *pud, unsigned long addr,