slab: Add SL_pfmemalloc flag

Give slab its own name for this flag.  Move the implementation from
slab.h to slub.c since it's only used inside slub.c.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Acked-by: Harry Yoo <harry.yoo@oracle.com>
Link: https://patch.msgid.link/20250611155916.2579160-5-willy@infradead.org
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
pull/1309/head
Matthew Wilcox (Oracle) 2025-06-11 16:59:07 +01:00 committed by Vlastimil Babka
parent c5c44900f4
commit 3df29914d9
2 changed files with 21 additions and 24 deletions

View File

@ -167,30 +167,6 @@ static_assert(IS_ALIGNED(offsetof(struct slab, freelist), sizeof(freelist_aba_t)
*/ */
#define slab_page(s) folio_page(slab_folio(s), 0) #define slab_page(s) folio_page(slab_folio(s), 0)
/*
* If network-based swap is enabled, sl*b must keep track of whether pages
* were allocated from pfmemalloc reserves.
*/
static inline bool slab_test_pfmemalloc(const struct slab *slab)
{
return folio_test_active(slab_folio(slab));
}
static inline void slab_set_pfmemalloc(struct slab *slab)
{
folio_set_active(slab_folio(slab));
}
static inline void slab_clear_pfmemalloc(struct slab *slab)
{
folio_clear_active(slab_folio(slab));
}
static inline void __slab_clear_pfmemalloc(struct slab *slab)
{
__folio_clear_active(slab_folio(slab));
}
static inline void *slab_address(const struct slab *slab) static inline void *slab_address(const struct slab *slab)
{ {
return folio_address(slab_folio(slab)); return folio_address(slab_folio(slab));

View File

@ -187,6 +187,7 @@
* enum slab_flags - How the slab flags bits are used. * enum slab_flags - How the slab flags bits are used.
* @SL_locked: Is locked with slab_lock() * @SL_locked: Is locked with slab_lock()
* @SL_partial: On the per-node partial list * @SL_partial: On the per-node partial list
* @SL_pfmemalloc: Was allocated from PF_MEMALLOC reserves
* *
* The slab flags share space with the page flags but some bits have * The slab flags share space with the page flags but some bits have
* different interpretations. The high bits are used for information * different interpretations. The high bits are used for information
@ -195,6 +196,7 @@
enum slab_flags { enum slab_flags {
SL_locked = PG_locked, SL_locked = PG_locked,
SL_partial = PG_workingset, /* Historical reasons for this bit */ SL_partial = PG_workingset, /* Historical reasons for this bit */
SL_pfmemalloc = PG_active, /* Historical reasons for this bit */
}; };
/* /*
@ -648,6 +650,25 @@ static inline unsigned int slub_get_cpu_partial(struct kmem_cache *s)
} }
#endif /* CONFIG_SLUB_CPU_PARTIAL */ #endif /* CONFIG_SLUB_CPU_PARTIAL */
/*
* If network-based swap is enabled, slub must keep track of whether memory
* were allocated from pfmemalloc reserves.
*/
static inline bool slab_test_pfmemalloc(const struct slab *slab)
{
return test_bit(SL_pfmemalloc, &slab->flags);
}
static inline void slab_set_pfmemalloc(struct slab *slab)
{
set_bit(SL_pfmemalloc, &slab->flags);
}
static inline void __slab_clear_pfmemalloc(struct slab *slab)
{
__clear_bit(SL_pfmemalloc, &slab->flags);
}
/* /*
* Per slab locking using the pagelock * Per slab locking using the pagelock
*/ */