mm, swap: cleanup swap entry allocation parameter

We no longer need this GFP parameter after commit 8578e0c00d ("mm, swap:
use the swap table for the swap cache and switch API").  Before that
commit the GFP parameter is already almost identical for all callers, so
nothing changed by that commit.  Swap table just moved the GFP to lower
layer and make it more defined and changes depend on atomic or sleep
allocation.

Now this parameter is no longer used, just remove it.  No behavior change.

Link: https://lkml.kernel.org/r/20251024-swap-clean-after-swap-table-p1-v2-3-a709469052e7@tencent.com
Signed-off-by: Kairui Song <kasong@tencent.com>
Acked-by: Chris Li <chrisl@kernel.org>
Acked-by: Nhat Pham <nphamcs@gmail.com>
Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Reviewed-by: David Hildenbrand <david@redhat.com>
Cc: Baoquan He <bhe@redhat.com>
Cc: Barry Song <baohua@kernel.org>
Cc: "Huang, Ying" <ying.huang@linux.alibaba.com>
Cc: Kemeng Shi <shikemeng@huaweicloud.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
pull/1354/merge
Kairui Song 2025-10-24 02:00:41 +08:00 committed by Andrew Morton
parent e4adea27b9
commit a983471cfc
4 changed files with 6 additions and 7 deletions

View File

@ -462,7 +462,7 @@ static inline long get_nr_swap_pages(void)
}
extern void si_swapinfo(struct sysinfo *);
int folio_alloc_swap(struct folio *folio, gfp_t gfp_mask);
int folio_alloc_swap(struct folio *folio);
bool folio_free_swap(struct folio *folio);
void put_swap_folio(struct folio *folio, swp_entry_t entry);
extern swp_entry_t get_swap_page_of_type(int);
@ -560,7 +560,7 @@ static inline int swp_swapcount(swp_entry_t entry)
return 0;
}
static inline int folio_alloc_swap(struct folio *folio, gfp_t gfp_mask)
static inline int folio_alloc_swap(struct folio *folio)
{
return -EINVAL;
}

View File

@ -1617,7 +1617,7 @@ try_split:
folio_mark_uptodate(folio);
}
if (!folio_alloc_swap(folio, __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN)) {
if (!folio_alloc_swap(folio)) {
bool first_swapped = shmem_recalc_inode(inode, 0, nr_pages);
int error;

View File

@ -1417,7 +1417,6 @@ static bool swap_sync_discard(void)
/**
* folio_alloc_swap - allocate swap space for a folio
* @folio: folio we want to move to swap
* @gfp: gfp mask for shadow nodes
*
* Allocate swap space for the folio and add the folio to the
* swap cache.
@ -1425,7 +1424,7 @@ static bool swap_sync_discard(void)
* Context: Caller needs to hold the folio lock.
* Return: Whether the folio was added to the swap cache.
*/
int folio_alloc_swap(struct folio *folio, gfp_t gfp)
int folio_alloc_swap(struct folio *folio)
{
unsigned int order = folio_order(folio);
unsigned int size = 1 << order;

View File

@ -1318,7 +1318,7 @@ retry:
split_folio_to_list(folio, folio_list))
goto activate_locked;
}
if (folio_alloc_swap(folio, __GFP_HIGH | __GFP_NOWARN)) {
if (folio_alloc_swap(folio)) {
int __maybe_unused order = folio_order(folio);
if (!folio_test_large(folio))
@ -1334,7 +1334,7 @@ retry:
}
#endif
count_mthp_stat(order, MTHP_STAT_SWPOUT_FALLBACK);
if (folio_alloc_swap(folio, __GFP_HIGH | __GFP_NOWARN))
if (folio_alloc_swap(folio))
goto activate_locked_split;
}
/*