memblock fixes for v6.17-rc4

* printk cleanups in memblock and numa_memblks
 * update kernel-doc for MEMBLOCK_RSRV_NOINIT to be more accurate and
   detailed
 -----BEGIN PGP SIGNATURE-----
 
 iQFEBAABCgAuFiEEeOVYVaWZL5900a/pOQOGJssO/ZEFAmiv6AwQHHJwcHRAa2Vy
 bmVsLm9yZwAKCRA5A4Ymyw79kR4SB/4u5L2tlpZaM2PzMqHVrdXDWNs7ntmRr97e
 BA0fsIoSVtcqAEUkXiG+2x9cBuzJuiGOkSKOu28u7567zQji8rX3IAHaR1Uw1K+0
 2mmzC3QxMjM6g7g310uA+agMAQlZne6ppiCEqyNnng3Uda8zZudL8NojOjtr3rJi
 +ebiy5/2KR9DDV+758ZCQ4MmBGqnS5YLXu+xXbmnsPw7AqujSADskvrTLugii5qp
 khQwTEX9foUR9kwAsrHPmqW560m2oWNf4eh3uxWtVxuCtbAnqC6uxVnaORW7UKaB
 gLcOCinvBE6Le4sggJQvr6NpAJ1HcrXzA61CfBV9UUXNXb9obxDO
 =xp5F
 -----END PGP SIGNATURE-----

Merge tag 'fixes-2025-08-28' of git://git.kernel.org/pub/scm/linux/kernel/git/rppt/memblock

Pull memblock fixes from Mike Rapoport:

 - printk cleanups in memblock and numa_memblks

 - update kernel-doc for MEMBLOCK_RSRV_NOINIT to be more accurate and
   detailed

* tag 'fixes-2025-08-28' of git://git.kernel.org/pub/scm/linux/kernel/git/rppt/memblock:
  memblock: fix kernel-doc for MEMBLOCK_RSRV_NOINIT
  mm: numa,memblock: Use SZ_1M macro to denote bytes to MB conversion
  mm/numa_memblks: Use pr_debug instead of printk(KERN_DEBUG)
pull/1334/head
Linus Torvalds 2025-08-28 15:46:06 -07:00
commit 5b9f3b013b
4 changed files with 21 additions and 13 deletions

View File

@ -40,8 +40,9 @@ extern unsigned long long max_possible_pfn;
* via a driver, and never indicated in the firmware-provided memory map as
* system RAM. This corresponds to IORESOURCE_SYSRAM_DRIVER_MANAGED in the
* kernel resource tree.
* @MEMBLOCK_RSRV_NOINIT: memory region for which struct pages are
* not initialized (only for reserved regions).
* @MEMBLOCK_RSRV_NOINIT: reserved memory region for which struct pages are not
* fully initialized. Users of this flag are responsible to properly initialize
* struct pages of this region
* @MEMBLOCK_RSRV_KERN: memory region that is reserved for kernel use,
* either explictitly with memblock_reserve_kern() or via memblock
* allocation APIs. All memblock allocations set this flag.

View File

@ -780,9 +780,9 @@ bool __init_memblock memblock_validate_numa_coverage(unsigned long threshold_byt
}
if ((nr_pages << PAGE_SHIFT) > threshold_bytes) {
mem_size_mb = memblock_phys_mem_size() >> 20;
mem_size_mb = memblock_phys_mem_size() / SZ_1M;
pr_err("NUMA: no nodes coverage for %luMB of %luMB RAM\n",
(nr_pages << PAGE_SHIFT) >> 20, mem_size_mb);
(nr_pages << PAGE_SHIFT) / SZ_1M, mem_size_mb);
return false;
}
@ -1091,13 +1091,20 @@ int __init_memblock memblock_clear_nomap(phys_addr_t base, phys_addr_t size)
/**
* memblock_reserved_mark_noinit - Mark a reserved memory region with flag
* MEMBLOCK_RSRV_NOINIT which results in the struct pages not being initialized
* for this region.
* MEMBLOCK_RSRV_NOINIT
*
* @base: the base phys addr of the region
* @size: the size of the region
*
* struct pages will not be initialized for reserved memory regions marked with
* %MEMBLOCK_RSRV_NOINIT.
* The struct pages for the reserved regions marked %MEMBLOCK_RSRV_NOINIT will
* not be fully initialized to allow the caller optimize their initialization.
*
* When %CONFIG_DEFERRED_STRUCT_PAGE_INIT is enabled, setting this flag
* completely bypasses the initialization of struct pages for such region.
*
* When %CONFIG_DEFERRED_STRUCT_PAGE_INIT is disabled, struct pages in this
* region will be initialized with default values but won't be marked as
* reserved.
*
* Return: 0 on success, -errno on failure.
*/

View File

@ -73,7 +73,7 @@ static int __init emu_setup_memblk(struct numa_meminfo *ei,
}
printk(KERN_INFO "Faking node %d at [mem %#018Lx-%#018Lx] (%LuMB)\n",
nid, eb->start, eb->end - 1, (eb->end - eb->start) >> 20);
nid, eb->start, eb->end - 1, (eb->end - eb->start) / SZ_1M);
return 0;
}
@ -264,7 +264,7 @@ static int __init split_nodes_size_interleave_uniform(struct numa_meminfo *ei,
min_size = ALIGN(max(min_size, FAKE_NODE_MIN_SIZE), FAKE_NODE_MIN_SIZE);
if (size < min_size) {
pr_err("Fake node size %LuMB too small, increasing to %LuMB\n",
size >> 20, min_size >> 20);
size / SZ_1M, min_size / SZ_1M);
size = min_size;
}
size = ALIGN_DOWN(size, FAKE_NODE_MIN_SIZE);

View File

@ -76,7 +76,7 @@ static int __init numa_alloc_distance(void)
for (j = 0; j < cnt; j++)
numa_distance[i * cnt + j] = i == j ?
LOCAL_DISTANCE : REMOTE_DISTANCE;
printk(KERN_DEBUG "NUMA: Initialized distance table, cnt=%d\n", cnt);
pr_debug("NUMA: Initialized distance table, cnt=%d\n", cnt);
return 0;
}
@ -427,9 +427,9 @@ static int __init numa_register_meminfo(struct numa_meminfo *mi)
unsigned long pfn_align = node_map_pfn_alignment();
if (pfn_align && pfn_align < PAGES_PER_SECTION) {
unsigned long node_align_mb = PFN_PHYS(pfn_align) >> 20;
unsigned long node_align_mb = PFN_PHYS(pfn_align) / SZ_1M;
unsigned long sect_align_mb = PFN_PHYS(PAGES_PER_SECTION) >> 20;
unsigned long sect_align_mb = PFN_PHYS(PAGES_PER_SECTION) / SZ_1M;
pr_warn("Node alignment %luMB < min %luMB, rejecting NUMA config\n",
node_align_mb, sect_align_mb);