mm: vmalloc: WARN_ON if mapping size is not PAGE_SIZE aligned
In mm/vmalloc.c, the function vmap_pte_range() assumes that the mapping size is aligned to PAGE_SIZE. If this assumption is violated, the loop will become infinite because the termination condition (`addr != end`) will never be met. This can lead to overwriting other VA ranges and/or random pages physically follow the page table. It's the caller's responsibility to ensure that the mapping size is aligned to PAGE_SIZE. However, the memory corruption is hard to root cause. To identify the programming error in the caller easier, check whether the mapping size is PAGE_SIZE aligned with WARN_ON_ONCE(). [yadong.qi@linux.alibaba.com: fix uninitialized value issue] Closes: https://lore.kernel.org/r/202510110050.VG9YKMRK-lkp@intel.com/ Link: https://lkml.kernel.org/r/20251010014311.1689-1-yadong.qi@linux.alibaba.com Signed-off-by: Yadong Qi <yadong.qi@linux.alibaba.com> Reviewed-by: Huang Ying <ying.huang@linux.alibaba.com> Reviewed-by: Dev Jain <dev.jain@arm.com> Reviewed-by: Uladzislau Rezki (Sony) <urezki@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>pull/1354/merge
parent
ca30ac479e
commit
a739e6b557
29
mm/vmalloc.c
29
mm/vmalloc.c
|
|
@ -100,6 +100,9 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
|
|||
struct page *page;
|
||||
unsigned long size = PAGE_SIZE;
|
||||
|
||||
if (WARN_ON_ONCE(!PAGE_ALIGNED(end - addr)))
|
||||
return -EINVAL;
|
||||
|
||||
pfn = phys_addr >> PAGE_SHIFT;
|
||||
pte = pte_alloc_kernel_track(pmd, addr, mask);
|
||||
if (!pte)
|
||||
|
|
@ -167,6 +170,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
|
|||
{
|
||||
pmd_t *pmd;
|
||||
unsigned long next;
|
||||
int err = 0;
|
||||
|
||||
pmd = pmd_alloc_track(&init_mm, pud, addr, mask);
|
||||
if (!pmd)
|
||||
|
|
@ -180,10 +184,11 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
|
|||
continue;
|
||||
}
|
||||
|
||||
if (vmap_pte_range(pmd, addr, next, phys_addr, prot, max_page_shift, mask))
|
||||
return -ENOMEM;
|
||||
err = vmap_pte_range(pmd, addr, next, phys_addr, prot, max_page_shift, mask);
|
||||
if (err)
|
||||
break;
|
||||
} while (pmd++, phys_addr += (next - addr), addr = next, addr != end);
|
||||
return 0;
|
||||
return err;
|
||||
}
|
||||
|
||||
static int vmap_try_huge_pud(pud_t *pud, unsigned long addr, unsigned long end,
|
||||
|
|
@ -217,6 +222,7 @@ static int vmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
|
|||
{
|
||||
pud_t *pud;
|
||||
unsigned long next;
|
||||
int err = 0;
|
||||
|
||||
pud = pud_alloc_track(&init_mm, p4d, addr, mask);
|
||||
if (!pud)
|
||||
|
|
@ -230,11 +236,11 @@ static int vmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
|
|||
continue;
|
||||
}
|
||||
|
||||
if (vmap_pmd_range(pud, addr, next, phys_addr, prot,
|
||||
max_page_shift, mask))
|
||||
return -ENOMEM;
|
||||
err = vmap_pmd_range(pud, addr, next, phys_addr, prot, max_page_shift, mask);
|
||||
if (err)
|
||||
break;
|
||||
} while (pud++, phys_addr += (next - addr), addr = next, addr != end);
|
||||
return 0;
|
||||
return err;
|
||||
}
|
||||
|
||||
static int vmap_try_huge_p4d(p4d_t *p4d, unsigned long addr, unsigned long end,
|
||||
|
|
@ -268,6 +274,7 @@ static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
|
|||
{
|
||||
p4d_t *p4d;
|
||||
unsigned long next;
|
||||
int err = 0;
|
||||
|
||||
p4d = p4d_alloc_track(&init_mm, pgd, addr, mask);
|
||||
if (!p4d)
|
||||
|
|
@ -281,11 +288,11 @@ static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
|
|||
continue;
|
||||
}
|
||||
|
||||
if (vmap_pud_range(p4d, addr, next, phys_addr, prot,
|
||||
max_page_shift, mask))
|
||||
return -ENOMEM;
|
||||
err = vmap_pud_range(p4d, addr, next, phys_addr, prot, max_page_shift, mask);
|
||||
if (err)
|
||||
break;
|
||||
} while (p4d++, phys_addr += (next - addr), addr = next, addr != end);
|
||||
return 0;
|
||||
return err;
|
||||
}
|
||||
|
||||
static int vmap_range_noflush(unsigned long addr, unsigned long end,
|
||||
|
|
|
|||
Loading…
Reference in New Issue