Patch series in this pull request:
- The 2 patch series "powerpc/pseries/cmm: two smaller fixes" from David Hildenbrand fixes a couple of minor things in ppc land. - The 4 patch series "Improve folio split related functions" from Zi Yan provides some cleanups and minorish fixes in the folio splitting code. -----BEGIN PGP SIGNATURE----- iHUEABYIAB0WIQTTMBEPP41GrTpTJgfdBJ7gKXxAjgUCaTseCwAKCRDdBJ7gKXxA jkCLAP9ttvtG7zhDf+tqvjCJFnybPEo6Z2B4Qx9g8i7s27gTZgD8DzYbIl+YdxrN /tS0tdgUfrJIXR2PzzipkydNrs+lxgg= =T65o -----END PGP SIGNATURE----- Merge tag 'mm-stable-2025-12-11-11-39' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Pull more MM updates from Andrew Morton: - "powerpc/pseries/cmm: two smaller fixes" (David Hildenbrand) fixes a couple of minor things in ppc land - "Improve folio split related functions" (Zi Yan) some cleanups and minorish fixes in the folio splitting code * tag 'mm-stable-2025-12-11-11-39' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: mm/damon/tests/core-kunit: avoid damos_test_commit stack warning mm: vmscan: correct nr_requested tracing in scan_folios MAINTAINERS: add idr core-api doc file to XARRAY mm/hugetlb: fix incorrect error return from hugetlb_reserve_pages() mm: fix CONFIG_STACK_GROWSUP typo in mm.h mm/huge_memory: fix folio split stats counting mm/huge_memory: make min_order_for_split() always return an order mm/huge_memory: replace can_split_folio() with direct refcount calculation mm/huge_memory: change folio_split_supported() to folio_check_splittable() mm/sparse: fix sparse_vmemmap_init_nid_early definition without CONFIG_SPARSEMEM powerpc/pseries/cmm: adjust BALLOON_MIGRATE when migrating pages powerpc/pseries/cmm: call balloon_devinfo_init() also without CONFIG_BALLOON_COMPACTIONmaster
commit
2516a87153
|
|
@ -28321,6 +28321,7 @@ M: Matthew Wilcox <willy@infradead.org>
|
||||||
L: linux-fsdevel@vger.kernel.org
|
L: linux-fsdevel@vger.kernel.org
|
||||||
L: linux-mm@kvack.org
|
L: linux-mm@kvack.org
|
||||||
S: Supported
|
S: Supported
|
||||||
|
F: Documentation/core-api/idr.rst
|
||||||
F: Documentation/core-api/xarray.rst
|
F: Documentation/core-api/xarray.rst
|
||||||
F: include/linux/idr.h
|
F: include/linux/idr.h
|
||||||
F: include/linux/xarray.h
|
F: include/linux/xarray.h
|
||||||
|
|
|
||||||
|
|
@ -532,6 +532,7 @@ static int cmm_migratepage(struct balloon_dev_info *b_dev_info,
|
||||||
|
|
||||||
spin_lock_irqsave(&b_dev_info->pages_lock, flags);
|
spin_lock_irqsave(&b_dev_info->pages_lock, flags);
|
||||||
balloon_page_insert(b_dev_info, newpage);
|
balloon_page_insert(b_dev_info, newpage);
|
||||||
|
__count_vm_event(BALLOON_MIGRATE);
|
||||||
b_dev_info->isolated_pages--;
|
b_dev_info->isolated_pages--;
|
||||||
spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
|
spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
|
||||||
|
|
||||||
|
|
@ -550,7 +551,6 @@ static int cmm_migratepage(struct balloon_dev_info *b_dev_info,
|
||||||
|
|
||||||
static void cmm_balloon_compaction_init(void)
|
static void cmm_balloon_compaction_init(void)
|
||||||
{
|
{
|
||||||
balloon_devinfo_init(&b_dev_info);
|
|
||||||
b_dev_info.migratepage = cmm_migratepage;
|
b_dev_info.migratepage = cmm_migratepage;
|
||||||
}
|
}
|
||||||
#else /* CONFIG_BALLOON_COMPACTION */
|
#else /* CONFIG_BALLOON_COMPACTION */
|
||||||
|
|
@ -572,6 +572,7 @@ static int cmm_init(void)
|
||||||
if (!firmware_has_feature(FW_FEATURE_CMO) && !simulate)
|
if (!firmware_has_feature(FW_FEATURE_CMO) && !simulate)
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
|
balloon_devinfo_init(&b_dev_info);
|
||||||
cmm_balloon_compaction_init();
|
cmm_balloon_compaction_init();
|
||||||
|
|
||||||
rc = register_oom_notifier(&cmm_oom_nb);
|
rc = register_oom_notifier(&cmm_oom_nb);
|
||||||
|
|
|
||||||
|
|
@ -369,14 +369,13 @@ enum split_type {
|
||||||
SPLIT_TYPE_NON_UNIFORM,
|
SPLIT_TYPE_NON_UNIFORM,
|
||||||
};
|
};
|
||||||
|
|
||||||
bool can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins);
|
|
||||||
int __split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
|
int __split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
|
||||||
unsigned int new_order);
|
unsigned int new_order);
|
||||||
int folio_split_unmapped(struct folio *folio, unsigned int new_order);
|
int folio_split_unmapped(struct folio *folio, unsigned int new_order);
|
||||||
int min_order_for_split(struct folio *folio);
|
unsigned int min_order_for_split(struct folio *folio);
|
||||||
int split_folio_to_list(struct folio *folio, struct list_head *list);
|
int split_folio_to_list(struct folio *folio, struct list_head *list);
|
||||||
bool folio_split_supported(struct folio *folio, unsigned int new_order,
|
int folio_check_splittable(struct folio *folio, unsigned int new_order,
|
||||||
enum split_type split_type, bool warns);
|
enum split_type split_type);
|
||||||
int folio_split(struct folio *folio, unsigned int new_order, struct page *page,
|
int folio_split(struct folio *folio, unsigned int new_order, struct page *page,
|
||||||
struct list_head *list);
|
struct list_head *list);
|
||||||
|
|
||||||
|
|
@ -407,7 +406,7 @@ static inline int split_huge_page_to_order(struct page *page, unsigned int new_o
|
||||||
static inline int try_folio_split_to_order(struct folio *folio,
|
static inline int try_folio_split_to_order(struct folio *folio,
|
||||||
struct page *page, unsigned int new_order)
|
struct page *page, unsigned int new_order)
|
||||||
{
|
{
|
||||||
if (!folio_split_supported(folio, new_order, SPLIT_TYPE_NON_UNIFORM, /* warns= */ false))
|
if (folio_check_splittable(folio, new_order, SPLIT_TYPE_NON_UNIFORM))
|
||||||
return split_huge_page_to_order(&folio->page, new_order);
|
return split_huge_page_to_order(&folio->page, new_order);
|
||||||
return folio_split(folio, new_order, page, NULL);
|
return folio_split(folio, new_order, page, NULL);
|
||||||
}
|
}
|
||||||
|
|
@ -631,10 +630,10 @@ static inline int split_huge_page(struct page *page)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int min_order_for_split(struct folio *folio)
|
static inline unsigned int min_order_for_split(struct folio *folio)
|
||||||
{
|
{
|
||||||
VM_WARN_ON_ONCE_FOLIO(1, folio);
|
VM_WARN_ON_ONCE_FOLIO(1, folio);
|
||||||
return -EINVAL;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int split_folio_to_list(struct folio *folio, struct list_head *list)
|
static inline int split_folio_to_list(struct folio *folio, struct list_head *list)
|
||||||
|
|
|
||||||
|
|
@ -438,7 +438,7 @@ enum {
|
||||||
#define VM_NOHUGEPAGE INIT_VM_FLAG(NOHUGEPAGE)
|
#define VM_NOHUGEPAGE INIT_VM_FLAG(NOHUGEPAGE)
|
||||||
#define VM_MERGEABLE INIT_VM_FLAG(MERGEABLE)
|
#define VM_MERGEABLE INIT_VM_FLAG(MERGEABLE)
|
||||||
#define VM_STACK INIT_VM_FLAG(STACK)
|
#define VM_STACK INIT_VM_FLAG(STACK)
|
||||||
#ifdef CONFIG_STACK_GROWS_UP
|
#ifdef CONFIG_STACK_GROWSUP
|
||||||
#define VM_STACK_EARLY INIT_VM_FLAG(STACK_EARLY)
|
#define VM_STACK_EARLY INIT_VM_FLAG(STACK_EARLY)
|
||||||
#else
|
#else
|
||||||
#define VM_STACK_EARLY VM_NONE
|
#define VM_STACK_EARLY VM_NONE
|
||||||
|
|
|
||||||
|
|
@ -2289,7 +2289,7 @@ void sparse_init(void);
|
||||||
#else
|
#else
|
||||||
#define sparse_init() do {} while (0)
|
#define sparse_init() do {} while (0)
|
||||||
#define sparse_index_init(_sec, _nid) do {} while (0)
|
#define sparse_index_init(_sec, _nid) do {} while (0)
|
||||||
#define sparse_vmemmap_init_nid_early(_nid, _use) do {} while (0)
|
#define sparse_vmemmap_init_nid_early(_nid) do {} while (0)
|
||||||
#define sparse_vmemmap_init_nid_late(_nid) do {} while (0)
|
#define sparse_vmemmap_init_nid_late(_nid) do {} while (0)
|
||||||
#define pfn_in_present_section pfn_valid
|
#define pfn_in_present_section pfn_valid
|
||||||
#define subsection_map_init(_pfn, _nr_pages) do {} while (0)
|
#define subsection_map_init(_pfn, _nr_pages) do {} while (0)
|
||||||
|
|
|
||||||
|
|
@ -924,7 +924,7 @@ static void damos_test_commit_for(struct kunit *test, struct damos *dst,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void damos_test_commit(struct kunit *test)
|
static void damos_test_commit_pageout(struct kunit *test)
|
||||||
{
|
{
|
||||||
damos_test_commit_for(test,
|
damos_test_commit_for(test,
|
||||||
&(struct damos){
|
&(struct damos){
|
||||||
|
|
@ -945,6 +945,10 @@ static void damos_test_commit(struct kunit *test)
|
||||||
DAMOS_WMARK_FREE_MEM_RATE,
|
DAMOS_WMARK_FREE_MEM_RATE,
|
||||||
800, 50, 30},
|
800, 50, 30},
|
||||||
});
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
static void damos_test_commit_migrate_hot(struct kunit *test)
|
||||||
|
{
|
||||||
damos_test_commit_for(test,
|
damos_test_commit_for(test,
|
||||||
&(struct damos){
|
&(struct damos){
|
||||||
.pattern = (struct damos_access_pattern){
|
.pattern = (struct damos_access_pattern){
|
||||||
|
|
@ -1230,7 +1234,8 @@ static struct kunit_case damon_test_cases[] = {
|
||||||
KUNIT_CASE(damos_test_commit_quota),
|
KUNIT_CASE(damos_test_commit_quota),
|
||||||
KUNIT_CASE(damos_test_commit_dests),
|
KUNIT_CASE(damos_test_commit_dests),
|
||||||
KUNIT_CASE(damos_test_commit_filter),
|
KUNIT_CASE(damos_test_commit_filter),
|
||||||
KUNIT_CASE(damos_test_commit),
|
KUNIT_CASE(damos_test_commit_pageout),
|
||||||
|
KUNIT_CASE(damos_test_commit_migrate_hot),
|
||||||
KUNIT_CASE(damon_test_commit_target_regions),
|
KUNIT_CASE(damon_test_commit_target_regions),
|
||||||
KUNIT_CASE(damos_test_filter_out),
|
KUNIT_CASE(damos_test_filter_out),
|
||||||
KUNIT_CASE(damon_test_feed_loop_next_input),
|
KUNIT_CASE(damon_test_feed_loop_next_input),
|
||||||
|
|
|
||||||
167
mm/huge_memory.c
167
mm/huge_memory.c
|
|
@ -3464,23 +3464,6 @@ static void lru_add_split_folio(struct folio *folio, struct folio *new_folio,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Racy check whether the huge page can be split */
|
|
||||||
bool can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins)
|
|
||||||
{
|
|
||||||
int extra_pins;
|
|
||||||
|
|
||||||
/* Additional pins from page cache */
|
|
||||||
if (folio_test_anon(folio))
|
|
||||||
extra_pins = folio_test_swapcache(folio) ?
|
|
||||||
folio_nr_pages(folio) : 0;
|
|
||||||
else
|
|
||||||
extra_pins = folio_nr_pages(folio);
|
|
||||||
if (pextra_pins)
|
|
||||||
*pextra_pins = extra_pins;
|
|
||||||
return folio_mapcount(folio) == folio_ref_count(folio) - extra_pins -
|
|
||||||
caller_pins;
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool page_range_has_hwpoisoned(struct page *page, long nr_pages)
|
static bool page_range_has_hwpoisoned(struct page *page, long nr_pages)
|
||||||
{
|
{
|
||||||
for (; nr_pages; page++, nr_pages--)
|
for (; nr_pages; page++, nr_pages--)
|
||||||
|
|
@ -3697,15 +3680,40 @@ static int __split_unmapped_folio(struct folio *folio, int new_order,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool folio_split_supported(struct folio *folio, unsigned int new_order,
|
/**
|
||||||
enum split_type split_type, bool warns)
|
* folio_check_splittable() - check if a folio can be split to a given order
|
||||||
|
* @folio: folio to be split
|
||||||
|
* @new_order: the smallest order of the after split folios (since buddy
|
||||||
|
* allocator like split generates folios with orders from @folio's
|
||||||
|
* order - 1 to new_order).
|
||||||
|
* @split_type: uniform or non-uniform split
|
||||||
|
*
|
||||||
|
* folio_check_splittable() checks if @folio can be split to @new_order using
|
||||||
|
* @split_type method. The truncated folio check must come first.
|
||||||
|
*
|
||||||
|
* Context: folio must be locked.
|
||||||
|
*
|
||||||
|
* Return: 0 - @folio can be split to @new_order, otherwise an error number is
|
||||||
|
* returned.
|
||||||
|
*/
|
||||||
|
int folio_check_splittable(struct folio *folio, unsigned int new_order,
|
||||||
|
enum split_type split_type)
|
||||||
{
|
{
|
||||||
|
VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio);
|
||||||
|
/*
|
||||||
|
* Folios that just got truncated cannot get split. Signal to the
|
||||||
|
* caller that there was a race.
|
||||||
|
*
|
||||||
|
* TODO: this will also currently refuse folios without a mapping in the
|
||||||
|
* swapcache (shmem or to-be-anon folios).
|
||||||
|
*/
|
||||||
|
if (!folio->mapping && !folio_test_anon(folio))
|
||||||
|
return -EBUSY;
|
||||||
|
|
||||||
if (folio_test_anon(folio)) {
|
if (folio_test_anon(folio)) {
|
||||||
/* order-1 is not supported for anonymous THP. */
|
/* order-1 is not supported for anonymous THP. */
|
||||||
VM_WARN_ONCE(warns && new_order == 1,
|
|
||||||
"Cannot split to order-1 folio");
|
|
||||||
if (new_order == 1)
|
if (new_order == 1)
|
||||||
return false;
|
return -EINVAL;
|
||||||
} else if (split_type == SPLIT_TYPE_NON_UNIFORM || new_order) {
|
} else if (split_type == SPLIT_TYPE_NON_UNIFORM || new_order) {
|
||||||
if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) &&
|
if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) &&
|
||||||
!mapping_large_folio_support(folio->mapping)) {
|
!mapping_large_folio_support(folio->mapping)) {
|
||||||
|
|
@ -3726,9 +3734,7 @@ bool folio_split_supported(struct folio *folio, unsigned int new_order,
|
||||||
* case, the mapping does not actually support large
|
* case, the mapping does not actually support large
|
||||||
* folios properly.
|
* folios properly.
|
||||||
*/
|
*/
|
||||||
VM_WARN_ONCE(warns,
|
return -EINVAL;
|
||||||
"Cannot split file folio to non-0 order");
|
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -3741,19 +3747,31 @@ bool folio_split_supported(struct folio *folio, unsigned int new_order,
|
||||||
* here.
|
* here.
|
||||||
*/
|
*/
|
||||||
if ((split_type == SPLIT_TYPE_NON_UNIFORM || new_order) && folio_test_swapcache(folio)) {
|
if ((split_type == SPLIT_TYPE_NON_UNIFORM || new_order) && folio_test_swapcache(folio)) {
|
||||||
VM_WARN_ONCE(warns,
|
return -EINVAL;
|
||||||
"Cannot split swapcache folio to non-0 order");
|
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return true;
|
if (is_huge_zero_folio(folio))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (folio_test_writeback(folio))
|
||||||
|
return -EBUSY;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Number of folio references from the pagecache or the swapcache. */
|
||||||
|
static unsigned int folio_cache_ref_count(const struct folio *folio)
|
||||||
|
{
|
||||||
|
if (folio_test_anon(folio) && !folio_test_swapcache(folio))
|
||||||
|
return 0;
|
||||||
|
return folio_nr_pages(folio);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __folio_freeze_and_split_unmapped(struct folio *folio, unsigned int new_order,
|
static int __folio_freeze_and_split_unmapped(struct folio *folio, unsigned int new_order,
|
||||||
struct page *split_at, struct xa_state *xas,
|
struct page *split_at, struct xa_state *xas,
|
||||||
struct address_space *mapping, bool do_lru,
|
struct address_space *mapping, bool do_lru,
|
||||||
struct list_head *list, enum split_type split_type,
|
struct list_head *list, enum split_type split_type,
|
||||||
pgoff_t end, int *nr_shmem_dropped, int extra_pins)
|
pgoff_t end, int *nr_shmem_dropped)
|
||||||
{
|
{
|
||||||
struct folio *end_folio = folio_next(folio);
|
struct folio *end_folio = folio_next(folio);
|
||||||
struct folio *new_folio, *next;
|
struct folio *new_folio, *next;
|
||||||
|
|
@ -3764,10 +3782,9 @@ static int __folio_freeze_and_split_unmapped(struct folio *folio, unsigned int n
|
||||||
VM_WARN_ON_ONCE(!mapping && end);
|
VM_WARN_ON_ONCE(!mapping && end);
|
||||||
/* Prevent deferred_split_scan() touching ->_refcount */
|
/* Prevent deferred_split_scan() touching ->_refcount */
|
||||||
ds_queue = folio_split_queue_lock(folio);
|
ds_queue = folio_split_queue_lock(folio);
|
||||||
if (folio_ref_freeze(folio, 1 + extra_pins)) {
|
if (folio_ref_freeze(folio, folio_cache_ref_count(folio) + 1)) {
|
||||||
struct swap_cluster_info *ci = NULL;
|
struct swap_cluster_info *ci = NULL;
|
||||||
struct lruvec *lruvec;
|
struct lruvec *lruvec;
|
||||||
int expected_refs;
|
|
||||||
|
|
||||||
if (old_order > 1) {
|
if (old_order > 1) {
|
||||||
if (!list_empty(&folio->_deferred_list)) {
|
if (!list_empty(&folio->_deferred_list)) {
|
||||||
|
|
@ -3835,8 +3852,8 @@ static int __folio_freeze_and_split_unmapped(struct folio *folio, unsigned int n
|
||||||
|
|
||||||
zone_device_private_split_cb(folio, new_folio);
|
zone_device_private_split_cb(folio, new_folio);
|
||||||
|
|
||||||
expected_refs = folio_expected_ref_count(new_folio) + 1;
|
folio_ref_unfreeze(new_folio,
|
||||||
folio_ref_unfreeze(new_folio, expected_refs);
|
folio_cache_ref_count(new_folio) + 1);
|
||||||
|
|
||||||
if (do_lru)
|
if (do_lru)
|
||||||
lru_add_split_folio(folio, new_folio, lruvec, list);
|
lru_add_split_folio(folio, new_folio, lruvec, list);
|
||||||
|
|
@ -3879,8 +3896,7 @@ static int __folio_freeze_and_split_unmapped(struct folio *folio, unsigned int n
|
||||||
* Otherwise, a parallel folio_try_get() can grab @folio
|
* Otherwise, a parallel folio_try_get() can grab @folio
|
||||||
* and its caller can see stale page cache entries.
|
* and its caller can see stale page cache entries.
|
||||||
*/
|
*/
|
||||||
expected_refs = folio_expected_ref_count(folio) + 1;
|
folio_ref_unfreeze(folio, folio_cache_ref_count(folio) + 1);
|
||||||
folio_ref_unfreeze(folio, expected_refs);
|
|
||||||
|
|
||||||
if (do_lru)
|
if (do_lru)
|
||||||
unlock_page_lruvec(lruvec);
|
unlock_page_lruvec(lruvec);
|
||||||
|
|
@ -3929,40 +3945,27 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
|
||||||
struct folio *new_folio, *next;
|
struct folio *new_folio, *next;
|
||||||
int nr_shmem_dropped = 0;
|
int nr_shmem_dropped = 0;
|
||||||
int remap_flags = 0;
|
int remap_flags = 0;
|
||||||
int extra_pins, ret;
|
int ret;
|
||||||
pgoff_t end = 0;
|
pgoff_t end = 0;
|
||||||
bool is_hzp;
|
|
||||||
|
|
||||||
VM_WARN_ON_ONCE_FOLIO(!folio_test_locked(folio), folio);
|
VM_WARN_ON_ONCE_FOLIO(!folio_test_locked(folio), folio);
|
||||||
VM_WARN_ON_ONCE_FOLIO(!folio_test_large(folio), folio);
|
VM_WARN_ON_ONCE_FOLIO(!folio_test_large(folio), folio);
|
||||||
|
|
||||||
if (folio != page_folio(split_at) || folio != page_folio(lock_at))
|
if (folio != page_folio(split_at) || folio != page_folio(lock_at)) {
|
||||||
return -EINVAL;
|
ret = -EINVAL;
|
||||||
|
goto out;
|
||||||
/*
|
|
||||||
* Folios that just got truncated cannot get split. Signal to the
|
|
||||||
* caller that there was a race.
|
|
||||||
*
|
|
||||||
* TODO: this will also currently refuse shmem folios that are in the
|
|
||||||
* swapcache.
|
|
||||||
*/
|
|
||||||
if (!is_anon && !folio->mapping)
|
|
||||||
return -EBUSY;
|
|
||||||
|
|
||||||
if (new_order >= old_order)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
if (!folio_split_supported(folio, new_order, split_type, /* warn = */ true))
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
is_hzp = is_huge_zero_folio(folio);
|
|
||||||
if (is_hzp) {
|
|
||||||
pr_warn_ratelimited("Called split_huge_page for huge zero page\n");
|
|
||||||
return -EBUSY;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (folio_test_writeback(folio))
|
if (new_order >= old_order) {
|
||||||
return -EBUSY;
|
ret = -EINVAL;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = folio_check_splittable(folio, new_order, split_type);
|
||||||
|
if (ret) {
|
||||||
|
VM_WARN_ONCE(ret == -EINVAL, "Tried to split an unsplittable folio");
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
if (is_anon) {
|
if (is_anon) {
|
||||||
/*
|
/*
|
||||||
|
|
@ -4027,7 +4030,7 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
|
||||||
* Racy check if we can split the page, before unmap_folio() will
|
* Racy check if we can split the page, before unmap_folio() will
|
||||||
* split PMDs
|
* split PMDs
|
||||||
*/
|
*/
|
||||||
if (!can_split_folio(folio, 1, &extra_pins)) {
|
if (folio_expected_ref_count(folio) != folio_ref_count(folio) - 1) {
|
||||||
ret = -EAGAIN;
|
ret = -EAGAIN;
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
|
|
@ -4050,8 +4053,7 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = __folio_freeze_and_split_unmapped(folio, new_order, split_at, &xas, mapping,
|
ret = __folio_freeze_and_split_unmapped(folio, new_order, split_at, &xas, mapping,
|
||||||
true, list, split_type, end, &nr_shmem_dropped,
|
true, list, split_type, end, &nr_shmem_dropped);
|
||||||
extra_pins);
|
|
||||||
fail:
|
fail:
|
||||||
if (mapping)
|
if (mapping)
|
||||||
xas_unlock(&xas);
|
xas_unlock(&xas);
|
||||||
|
|
@ -4125,20 +4127,20 @@ out:
|
||||||
*/
|
*/
|
||||||
int folio_split_unmapped(struct folio *folio, unsigned int new_order)
|
int folio_split_unmapped(struct folio *folio, unsigned int new_order)
|
||||||
{
|
{
|
||||||
int extra_pins, ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
VM_WARN_ON_ONCE_FOLIO(folio_mapped(folio), folio);
|
VM_WARN_ON_ONCE_FOLIO(folio_mapped(folio), folio);
|
||||||
VM_WARN_ON_ONCE_FOLIO(!folio_test_locked(folio), folio);
|
VM_WARN_ON_ONCE_FOLIO(!folio_test_locked(folio), folio);
|
||||||
VM_WARN_ON_ONCE_FOLIO(!folio_test_large(folio), folio);
|
VM_WARN_ON_ONCE_FOLIO(!folio_test_large(folio), folio);
|
||||||
VM_WARN_ON_ONCE_FOLIO(!folio_test_anon(folio), folio);
|
VM_WARN_ON_ONCE_FOLIO(!folio_test_anon(folio), folio);
|
||||||
|
|
||||||
if (!can_split_folio(folio, 1, &extra_pins))
|
if (folio_expected_ref_count(folio) != folio_ref_count(folio) - 1)
|
||||||
return -EAGAIN;
|
return -EAGAIN;
|
||||||
|
|
||||||
local_irq_disable();
|
local_irq_disable();
|
||||||
ret = __folio_freeze_and_split_unmapped(folio, new_order, &folio->page, NULL,
|
ret = __folio_freeze_and_split_unmapped(folio, new_order, &folio->page, NULL,
|
||||||
NULL, false, NULL, SPLIT_TYPE_UNIFORM,
|
NULL, false, NULL, SPLIT_TYPE_UNIFORM,
|
||||||
0, NULL, extra_pins);
|
0, NULL);
|
||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
@ -4230,16 +4232,29 @@ int folio_split(struct folio *folio, unsigned int new_order,
|
||||||
SPLIT_TYPE_NON_UNIFORM);
|
SPLIT_TYPE_NON_UNIFORM);
|
||||||
}
|
}
|
||||||
|
|
||||||
int min_order_for_split(struct folio *folio)
|
/**
|
||||||
|
* min_order_for_split() - get the minimum order @folio can be split to
|
||||||
|
* @folio: folio to split
|
||||||
|
*
|
||||||
|
* min_order_for_split() tells the minimum order @folio can be split to.
|
||||||
|
* If a file-backed folio is truncated, 0 will be returned. Any subsequent
|
||||||
|
* split attempt should get -EBUSY from split checking code.
|
||||||
|
*
|
||||||
|
* Return: @folio's minimum order for split
|
||||||
|
*/
|
||||||
|
unsigned int min_order_for_split(struct folio *folio)
|
||||||
{
|
{
|
||||||
if (folio_test_anon(folio))
|
if (folio_test_anon(folio))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (!folio->mapping) {
|
/*
|
||||||
if (folio_test_pmd_mappable(folio))
|
* If the folio got truncated, we don't know the previous mapping and
|
||||||
count_vm_event(THP_SPLIT_PAGE_FAILED);
|
* consequently the old min order. But it doesn't matter, as any split
|
||||||
return -EBUSY;
|
* attempt will immediately fail with -EBUSY as the folio cannot get
|
||||||
}
|
* split until freed.
|
||||||
|
*/
|
||||||
|
if (!folio->mapping)
|
||||||
|
return 0;
|
||||||
|
|
||||||
return mapping_min_folio_order(folio->mapping);
|
return mapping_min_folio_order(folio->mapping);
|
||||||
}
|
}
|
||||||
|
|
@ -4631,7 +4646,7 @@ static int split_huge_pages_pid(int pid, unsigned long vaddr_start,
|
||||||
* can be split or not. So skip the check here.
|
* can be split or not. So skip the check here.
|
||||||
*/
|
*/
|
||||||
if (!folio_test_private(folio) &&
|
if (!folio_test_private(folio) &&
|
||||||
!can_split_folio(folio, 0, NULL))
|
folio_expected_ref_count(folio) != folio_ref_count(folio))
|
||||||
goto next;
|
goto next;
|
||||||
|
|
||||||
if (!folio_trylock(folio))
|
if (!folio_trylock(folio))
|
||||||
|
|
|
||||||
25
mm/hugetlb.c
25
mm/hugetlb.c
|
|
@ -6579,6 +6579,7 @@ long hugetlb_reserve_pages(struct inode *inode,
|
||||||
struct resv_map *resv_map;
|
struct resv_map *resv_map;
|
||||||
struct hugetlb_cgroup *h_cg = NULL;
|
struct hugetlb_cgroup *h_cg = NULL;
|
||||||
long gbl_reserve, regions_needed = 0;
|
long gbl_reserve, regions_needed = 0;
|
||||||
|
int err;
|
||||||
|
|
||||||
/* This should never happen */
|
/* This should never happen */
|
||||||
if (from > to) {
|
if (from > to) {
|
||||||
|
|
@ -6612,8 +6613,10 @@ long hugetlb_reserve_pages(struct inode *inode,
|
||||||
} else {
|
} else {
|
||||||
/* Private mapping. */
|
/* Private mapping. */
|
||||||
resv_map = resv_map_alloc();
|
resv_map = resv_map_alloc();
|
||||||
if (!resv_map)
|
if (!resv_map) {
|
||||||
|
err = -ENOMEM;
|
||||||
goto out_err;
|
goto out_err;
|
||||||
|
}
|
||||||
|
|
||||||
chg = to - from;
|
chg = to - from;
|
||||||
|
|
||||||
|
|
@ -6621,11 +6624,15 @@ long hugetlb_reserve_pages(struct inode *inode,
|
||||||
set_vma_desc_resv_flags(desc, HPAGE_RESV_OWNER);
|
set_vma_desc_resv_flags(desc, HPAGE_RESV_OWNER);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (chg < 0)
|
if (chg < 0) {
|
||||||
|
/* region_chg() above can return -ENOMEM */
|
||||||
|
err = (chg == -ENOMEM) ? -ENOMEM : -EINVAL;
|
||||||
goto out_err;
|
goto out_err;
|
||||||
|
}
|
||||||
|
|
||||||
if (hugetlb_cgroup_charge_cgroup_rsvd(hstate_index(h),
|
err = hugetlb_cgroup_charge_cgroup_rsvd(hstate_index(h),
|
||||||
chg * pages_per_huge_page(h), &h_cg) < 0)
|
chg * pages_per_huge_page(h), &h_cg);
|
||||||
|
if (err < 0)
|
||||||
goto out_err;
|
goto out_err;
|
||||||
|
|
||||||
if (desc && !(desc->vm_flags & VM_MAYSHARE) && h_cg) {
|
if (desc && !(desc->vm_flags & VM_MAYSHARE) && h_cg) {
|
||||||
|
|
@ -6641,14 +6648,17 @@ long hugetlb_reserve_pages(struct inode *inode,
|
||||||
* reservations already in place (gbl_reserve).
|
* reservations already in place (gbl_reserve).
|
||||||
*/
|
*/
|
||||||
gbl_reserve = hugepage_subpool_get_pages(spool, chg);
|
gbl_reserve = hugepage_subpool_get_pages(spool, chg);
|
||||||
if (gbl_reserve < 0)
|
if (gbl_reserve < 0) {
|
||||||
|
err = gbl_reserve;
|
||||||
goto out_uncharge_cgroup;
|
goto out_uncharge_cgroup;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Check enough hugepages are available for the reservation.
|
* Check enough hugepages are available for the reservation.
|
||||||
* Hand the pages back to the subpool if there are not
|
* Hand the pages back to the subpool if there are not
|
||||||
*/
|
*/
|
||||||
if (hugetlb_acct_memory(h, gbl_reserve) < 0)
|
err = hugetlb_acct_memory(h, gbl_reserve);
|
||||||
|
if (err < 0)
|
||||||
goto out_put_pages;
|
goto out_put_pages;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
@ -6667,6 +6677,7 @@ long hugetlb_reserve_pages(struct inode *inode,
|
||||||
|
|
||||||
if (unlikely(add < 0)) {
|
if (unlikely(add < 0)) {
|
||||||
hugetlb_acct_memory(h, -gbl_reserve);
|
hugetlb_acct_memory(h, -gbl_reserve);
|
||||||
|
err = add;
|
||||||
goto out_put_pages;
|
goto out_put_pages;
|
||||||
} else if (unlikely(chg > add)) {
|
} else if (unlikely(chg > add)) {
|
||||||
/*
|
/*
|
||||||
|
|
@ -6726,7 +6737,7 @@ out_err:
|
||||||
kref_put(&resv_map->refs, resv_map_release);
|
kref_put(&resv_map->refs, resv_map_release);
|
||||||
set_vma_desc_resv_map(desc, NULL);
|
set_vma_desc_resv_map(desc, NULL);
|
||||||
}
|
}
|
||||||
return chg < 0 ? chg : add < 0 ? add : -EINVAL;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
|
long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
|
||||||
|
|
|
||||||
|
|
@ -1284,7 +1284,8 @@ retry:
|
||||||
goto keep_locked;
|
goto keep_locked;
|
||||||
if (folio_test_large(folio)) {
|
if (folio_test_large(folio)) {
|
||||||
/* cannot split folio, skip it */
|
/* cannot split folio, skip it */
|
||||||
if (!can_split_folio(folio, 1, NULL))
|
if (folio_expected_ref_count(folio) !=
|
||||||
|
folio_ref_count(folio) - 1)
|
||||||
goto activate_locked;
|
goto activate_locked;
|
||||||
/*
|
/*
|
||||||
* Split partially mapped folios right away.
|
* Split partially mapped folios right away.
|
||||||
|
|
@ -4540,7 +4541,8 @@ static int scan_folios(unsigned long nr_to_scan, struct lruvec *lruvec,
|
||||||
int scanned = 0;
|
int scanned = 0;
|
||||||
int isolated = 0;
|
int isolated = 0;
|
||||||
int skipped = 0;
|
int skipped = 0;
|
||||||
int remaining = min(nr_to_scan, MAX_LRU_BATCH);
|
int scan_batch = min(nr_to_scan, MAX_LRU_BATCH);
|
||||||
|
int remaining = scan_batch;
|
||||||
struct lru_gen_folio *lrugen = &lruvec->lrugen;
|
struct lru_gen_folio *lrugen = &lruvec->lrugen;
|
||||||
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
|
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
|
||||||
|
|
||||||
|
|
@ -4600,7 +4602,7 @@ static int scan_folios(unsigned long nr_to_scan, struct lruvec *lruvec,
|
||||||
count_memcg_events(memcg, item, isolated);
|
count_memcg_events(memcg, item, isolated);
|
||||||
count_memcg_events(memcg, PGREFILL, sorted);
|
count_memcg_events(memcg, PGREFILL, sorted);
|
||||||
__count_vm_events(PGSCAN_ANON + type, isolated);
|
__count_vm_events(PGSCAN_ANON + type, isolated);
|
||||||
trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, MAX_LRU_BATCH,
|
trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, scan_batch,
|
||||||
scanned, skipped, isolated,
|
scanned, skipped, isolated,
|
||||||
type ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON);
|
type ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON);
|
||||||
if (type == LRU_GEN_FILE)
|
if (type == LRU_GEN_FILE)
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue