kasan: cleanup of kasan_enabled() checks

Deduplication of kasan_enabled() checks which are already used by callers.

* Altered functions:

check_page_allocation
	Delete the check because callers have it already in __wrappers in
	include/linux/kasan.h:
		__kasan_kfree_large
		__kasan_mempool_poison_pages
		__kasan_mempool_poison_object

kasan_populate_vmalloc, kasan_release_vmalloc
	Add __wrappers in include/linux/kasan.h.
	They are called externally in mm/vmalloc.c.

__kasan_unpoison_vmalloc, __kasan_poison_vmalloc
	Delete checks because there're already kasan_enabled() checks
	in respective __wrappers in include/linux/kasan.h.

release_free_meta -- Delete the check because the higher caller path
	has it already. See the stack trace:

	__kasan_slab_free -- has the check already
	__kasan_mempool_poison_object -- has the check already
		poison_slab_object
			kasan_save_free_info
				release_free_meta
					kasan_enabled() -- Delete here

Link: https://lkml.kernel.org/r/20251009155403.1379150-3-snovitoll@gmail.com
Signed-off-by: Sabyrzhan Tasbolatov <snovitoll@gmail.com>
Reviewed-by: Andrey Konovalov <andreyknvl@gmail.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Cc: Baoquan He <bhe@redhat.com>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Dmitriy Vyukov <dvyukov@google.com>
Cc: "Ritesh Harjani (IBM)" <ritesh.list@gmail.com>
Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
pull/1354/merge
Sabyrzhan Tasbolatov 2025-10-09 20:54:03 +05:00 committed by Andrew Morton
parent 27109f5703
commit ada5cbe33a
4 changed files with 22 additions and 24 deletions

View File

@ -571,11 +571,27 @@ static inline void kasan_init_hw_tags(void) { }
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
void kasan_populate_early_vm_area_shadow(void *start, unsigned long size);
int kasan_populate_vmalloc(unsigned long addr, unsigned long size, gfp_t gfp_mask);
void kasan_release_vmalloc(unsigned long start, unsigned long end,
int __kasan_populate_vmalloc(unsigned long addr, unsigned long size, gfp_t gfp_mask);
static inline int kasan_populate_vmalloc(unsigned long addr,
unsigned long size, gfp_t gfp_mask)
{
if (kasan_enabled())
return __kasan_populate_vmalloc(addr, size, gfp_mask);
return 0;
}
void __kasan_release_vmalloc(unsigned long start, unsigned long end,
unsigned long free_region_start,
unsigned long free_region_end,
unsigned long flags);
static inline void kasan_release_vmalloc(unsigned long start, unsigned long end,
unsigned long free_region_start,
unsigned long free_region_end,
unsigned long flags)
{
if (kasan_enabled())
return __kasan_release_vmalloc(start, end, free_region_start,
free_region_end, flags);
}
#else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */

View File

@ -305,9 +305,6 @@ bool __kasan_slab_free(struct kmem_cache *cache, void *object, bool init,
static inline bool check_page_allocation(void *ptr, unsigned long ip)
{
if (!kasan_enabled())
return false;
if (ptr != page_address(virt_to_head_page(ptr))) {
kasan_report_invalid_free(ptr, ip, KASAN_REPORT_INVALID_FREE);
return true;

View File

@ -506,9 +506,6 @@ static void release_alloc_meta(struct kasan_alloc_meta *meta)
static void release_free_meta(const void *object, struct kasan_free_meta *meta)
{
if (!kasan_enabled())
return;
/* Check if free meta is valid. */
if (*(u8 *)kasan_mem_to_shadow(object) != KASAN_SLAB_FREE_META)
return;

View File

@ -354,7 +354,7 @@ static int ___alloc_pages_bulk(struct page **pages, int nr_pages, gfp_t gfp_mask
return 0;
}
static int __kasan_populate_vmalloc(unsigned long start, unsigned long end, gfp_t gfp_mask)
static int __kasan_populate_vmalloc_do(unsigned long start, unsigned long end, gfp_t gfp_mask)
{
unsigned long nr_pages, nr_total = PFN_UP(end - start);
struct vmalloc_populate_data data;
@ -395,14 +395,11 @@ static int __kasan_populate_vmalloc(unsigned long start, unsigned long end, gfp_
return ret;
}
int kasan_populate_vmalloc(unsigned long addr, unsigned long size, gfp_t gfp_mask)
int __kasan_populate_vmalloc(unsigned long addr, unsigned long size, gfp_t gfp_mask)
{
unsigned long shadow_start, shadow_end;
int ret;
if (!kasan_enabled())
return 0;
if (!is_vmalloc_or_module_addr((void *)addr))
return 0;
@ -424,7 +421,7 @@ int kasan_populate_vmalloc(unsigned long addr, unsigned long size, gfp_t gfp_mas
shadow_start = PAGE_ALIGN_DOWN(shadow_start);
shadow_end = PAGE_ALIGN(shadow_end);
ret = __kasan_populate_vmalloc(shadow_start, shadow_end, gfp_mask);
ret = __kasan_populate_vmalloc_do(shadow_start, shadow_end, gfp_mask);
if (ret)
return ret;
@ -566,7 +563,7 @@ static int kasan_depopulate_vmalloc_pte(pte_t *ptep, unsigned long addr,
* pages entirely covered by the free region, we will not run in to any
* trouble - any simultaneous allocations will be for disjoint regions.
*/
void kasan_release_vmalloc(unsigned long start, unsigned long end,
void __kasan_release_vmalloc(unsigned long start, unsigned long end,
unsigned long free_region_start,
unsigned long free_region_end,
unsigned long flags)
@ -575,9 +572,6 @@ void kasan_release_vmalloc(unsigned long start, unsigned long end,
unsigned long region_start, region_end;
unsigned long size;
if (!kasan_enabled())
return;
region_start = ALIGN(start, KASAN_MEMORY_PER_SHADOW_PAGE);
region_end = ALIGN_DOWN(end, KASAN_MEMORY_PER_SHADOW_PAGE);
@ -626,9 +620,6 @@ void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
* with setting memory tags, so the KASAN_VMALLOC_INIT flag is ignored.
*/
if (!kasan_enabled())
return (void *)start;
if (!is_vmalloc_or_module_addr(start))
return (void *)start;
@ -651,9 +642,6 @@ void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
*/
void __kasan_poison_vmalloc(const void *start, unsigned long size)
{
if (!kasan_enabled())
return;
if (!is_vmalloc_or_module_addr(start))
return;