x86/boot: Move startup code out of __head section

Move startup code out of the __head section, now that this no longer has
a special significance. Move everything into .text or .init.text as
appropriate, so that startup code is not kept around unnecessarily.

  [ bp: Fold in hunk to fix 32-bit CPU hotplug:
    Reported-by: kernel test robot <oliver.sang@intel.com>
    Closes: https://lore.kernel.org/oe-lkp/202509022207.56fd97f4-lkp@intel.com ]

Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Link: https://lore.kernel.org/20250828102202.1849035-45-ardb+git@google.com
pull/1354/merge
Ard Biesheuvel 2025-08-28 12:22:24 +02:00 committed by Borislav Petkov (AMD)
parent e7b88bc005
commit c5c30a3736
10 changed files with 48 additions and 54 deletions

View File

@ -32,9 +32,6 @@ struct ghcb *boot_ghcb;
#undef __init
#define __init
#undef __head
#define __head
#define __BOOT_COMPRESSED
u8 snp_vmpl;

View File

@ -24,7 +24,7 @@
static gate_desc bringup_idt_table[NUM_EXCEPTION_VECTORS] __page_aligned_data;
/* This may run while still in the direct mapping */
void __head startup_64_load_idt(void *vc_handler)
void startup_64_load_idt(void *vc_handler)
{
struct desc_ptr desc = {
.address = (unsigned long)rip_rel_ptr(bringup_idt_table),
@ -46,7 +46,7 @@ void __head startup_64_load_idt(void *vc_handler)
/*
* Setup boot CPU state needed before kernel switches to virtual addresses.
*/
void __head startup_64_setup_gdt_idt(void)
void __init startup_64_setup_gdt_idt(void)
{
struct gdt_page *gp = rip_rel_ptr((void *)(__force unsigned long)&gdt_page);
void *handler = NULL;

View File

@ -30,7 +30,7 @@ static inline bool check_la57_support(void)
return true;
}
static unsigned long __head sme_postprocess_startup(struct boot_params *bp,
static unsigned long __init sme_postprocess_startup(struct boot_params *bp,
pmdval_t *pmd,
unsigned long p2v_offset)
{
@ -84,7 +84,7 @@ static unsigned long __head sme_postprocess_startup(struct boot_params *bp,
* the 1:1 mapping of memory. Kernel virtual addresses can be determined by
* subtracting p2v_offset from the RIP-relative address.
*/
unsigned long __head __startup_64(unsigned long p2v_offset,
unsigned long __init __startup_64(unsigned long p2v_offset,
struct boot_params *bp)
{
pmd_t (*early_pgts)[PTRS_PER_PMD] = rip_rel_ptr(early_dynamic_pgts);

View File

@ -33,7 +33,7 @@ static u32 cpuid_ext_range_max __ro_after_init;
bool sev_snp_needs_sfw;
void __head __noreturn
void __noreturn
sev_es_terminate(unsigned int set, unsigned int reason)
{
u64 val = GHCB_MSR_TERM_REQ;
@ -52,7 +52,7 @@ sev_es_terminate(unsigned int set, unsigned int reason)
/*
* The hypervisor features are available from GHCB version 2 onward.
*/
u64 get_hv_features(void)
u64 __init get_hv_features(void)
{
u64 val;
@ -222,7 +222,7 @@ const struct snp_cpuid_table *snp_cpuid_get_table(void)
*
* Return: XSAVE area size on success, 0 otherwise.
*/
static u32 __head snp_cpuid_calc_xsave_size(u64 xfeatures_en, bool compacted)
static u32 snp_cpuid_calc_xsave_size(u64 xfeatures_en, bool compacted)
{
const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table();
u64 xfeatures_found = 0;
@ -258,7 +258,7 @@ static u32 __head snp_cpuid_calc_xsave_size(u64 xfeatures_en, bool compacted)
return xsave_size;
}
static bool __head
static bool
snp_cpuid_get_validated_func(struct cpuid_leaf *leaf)
{
const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table();
@ -300,7 +300,7 @@ static void snp_cpuid_hv_msr(void *ctx, struct cpuid_leaf *leaf)
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_CPUID_HV);
}
static int __head
static int
snp_cpuid_postprocess(void (*cpuid_fn)(void *ctx, struct cpuid_leaf *leaf),
void *ctx, struct cpuid_leaf *leaf)
{
@ -396,8 +396,8 @@ snp_cpuid_postprocess(void (*cpuid_fn)(void *ctx, struct cpuid_leaf *leaf),
* Returns -EOPNOTSUPP if feature not enabled. Any other non-zero return value
* should be treated as fatal by caller.
*/
int __head snp_cpuid(void (*cpuid_fn)(void *ctx, struct cpuid_leaf *leaf),
void *ctx, struct cpuid_leaf *leaf)
int snp_cpuid(void (*cpuid_fn)(void *ctx, struct cpuid_leaf *leaf),
void *ctx, struct cpuid_leaf *leaf)
{
const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table();
@ -439,7 +439,7 @@ int __head snp_cpuid(void (*cpuid_fn)(void *ctx, struct cpuid_leaf *leaf),
* page yet, so it only supports the MSR based communication with the
* hypervisor and only the CPUID exit-code.
*/
void __head do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code)
void do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code)
{
unsigned int subfn = lower_bits(regs->cx, 32);
unsigned int fn = lower_bits(regs->ax, 32);
@ -515,7 +515,7 @@ struct cc_setup_data {
* Search for a Confidential Computing blob passed in as a setup_data entry
* via the Linux Boot Protocol.
*/
static __head
static __init
struct cc_blob_sev_info *find_cc_blob_setup_data(struct boot_params *bp)
{
struct cc_setup_data *sd = NULL;
@ -543,7 +543,7 @@ struct cc_blob_sev_info *find_cc_blob_setup_data(struct boot_params *bp)
* mapping needs to be updated in sync with all the changes to virtual memory
* layout and related mapping facilities throughout the boot process.
*/
static void __head setup_cpuid_table(const struct cc_blob_sev_info *cc_info)
static void __init setup_cpuid_table(const struct cc_blob_sev_info *cc_info)
{
const struct snp_cpuid_table *cpuid_table_fw, *cpuid_table;
int i;
@ -571,7 +571,7 @@ static void __head setup_cpuid_table(const struct cc_blob_sev_info *cc_info)
}
}
static int __head svsm_call_msr_protocol(struct svsm_call *call)
static int svsm_call_msr_protocol(struct svsm_call *call)
{
int ret;
@ -582,8 +582,8 @@ static int __head svsm_call_msr_protocol(struct svsm_call *call)
return ret;
}
static void __head svsm_pval_4k_page(unsigned long paddr, bool validate,
struct svsm_ca *caa, u64 caa_pa)
static void svsm_pval_4k_page(unsigned long paddr, bool validate,
struct svsm_ca *caa, u64 caa_pa)
{
struct svsm_pvalidate_call *pc;
struct svsm_call call = {};
@ -624,8 +624,8 @@ static void __head svsm_pval_4k_page(unsigned long paddr, bool validate,
native_local_irq_restore(flags);
}
static void __head pvalidate_4k_page(unsigned long vaddr, unsigned long paddr,
bool validate, struct svsm_ca *caa, u64 caa_pa)
static void pvalidate_4k_page(unsigned long vaddr, unsigned long paddr,
bool validate, struct svsm_ca *caa, u64 caa_pa)
{
int ret;
@ -645,8 +645,8 @@ static void __head pvalidate_4k_page(unsigned long vaddr, unsigned long paddr,
sev_evict_cache((void *)vaddr, 1);
}
static void __head __page_state_change(unsigned long vaddr, unsigned long paddr,
const struct psc_desc *desc)
static void __page_state_change(unsigned long vaddr, unsigned long paddr,
const struct psc_desc *desc)
{
u64 val, msr;
@ -684,7 +684,7 @@ static void __head __page_state_change(unsigned long vaddr, unsigned long paddr,
* Maintain the GPA of the SVSM Calling Area (CA) in order to utilize the SVSM
* services needed when not running in VMPL0.
*/
static bool __head svsm_setup_ca(const struct cc_blob_sev_info *cc_info,
static bool __init svsm_setup_ca(const struct cc_blob_sev_info *cc_info,
void *page)
{
struct snp_secrets_page *secrets_page;

View File

@ -44,7 +44,7 @@
/* Include code shared with pre-decompression boot stage */
#include "sev-shared.c"
void __head
void __init
early_set_pages_state(unsigned long vaddr, unsigned long paddr,
unsigned long npages, const struct psc_desc *desc)
{
@ -63,7 +63,7 @@ early_set_pages_state(unsigned long vaddr, unsigned long paddr,
}
}
void __head early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr,
void __init early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr,
unsigned long npages)
{
struct psc_desc d = {
@ -88,7 +88,7 @@ void __head early_snp_set_memory_private(unsigned long vaddr, unsigned long padd
early_set_pages_state(vaddr, paddr, npages, &d);
}
void __head early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr,
void __init early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr,
unsigned long npages)
{
struct psc_desc d = {
@ -123,7 +123,7 @@ void __head early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr
*
* Scan for the blob in that order.
*/
static __head struct cc_blob_sev_info *find_cc_blob(struct boot_params *bp)
static struct cc_blob_sev_info *__init find_cc_blob(struct boot_params *bp)
{
struct cc_blob_sev_info *cc_info;
@ -149,7 +149,7 @@ found_cc_info:
return cc_info;
}
static __head void svsm_setup(struct cc_blob_sev_info *cc_info)
static void __init svsm_setup(struct cc_blob_sev_info *cc_info)
{
struct snp_secrets_page *secrets = (void *)cc_info->secrets_phys;
struct svsm_call call = {};
@ -190,7 +190,7 @@ static __head void svsm_setup(struct cc_blob_sev_info *cc_info)
boot_svsm_caa_pa = pa;
}
bool __head snp_init(struct boot_params *bp)
bool __init snp_init(struct boot_params *bp)
{
struct cc_blob_sev_info *cc_info;
@ -219,7 +219,7 @@ bool __head snp_init(struct boot_params *bp)
return true;
}
void __head __noreturn snp_abort(void)
void __init __noreturn snp_abort(void)
{
sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED);
}

View File

@ -91,7 +91,7 @@ struct sme_populate_pgd_data {
*/
static char sme_workarea[2 * PMD_SIZE] __section(".init.scratch");
static void __head sme_clear_pgd(struct sme_populate_pgd_data *ppd)
static void __init sme_clear_pgd(struct sme_populate_pgd_data *ppd)
{
unsigned long pgd_start, pgd_end, pgd_size;
pgd_t *pgd_p;
@ -106,7 +106,7 @@ static void __head sme_clear_pgd(struct sme_populate_pgd_data *ppd)
memset(pgd_p, 0, pgd_size);
}
static pud_t __head *sme_prepare_pgd(struct sme_populate_pgd_data *ppd)
static pud_t __init *sme_prepare_pgd(struct sme_populate_pgd_data *ppd)
{
pgd_t *pgd;
p4d_t *p4d;
@ -143,7 +143,7 @@ static pud_t __head *sme_prepare_pgd(struct sme_populate_pgd_data *ppd)
return pud;
}
static void __head sme_populate_pgd_large(struct sme_populate_pgd_data *ppd)
static void __init sme_populate_pgd_large(struct sme_populate_pgd_data *ppd)
{
pud_t *pud;
pmd_t *pmd;
@ -159,7 +159,7 @@ static void __head sme_populate_pgd_large(struct sme_populate_pgd_data *ppd)
set_pmd(pmd, __pmd(ppd->paddr | ppd->pmd_flags));
}
static void __head sme_populate_pgd(struct sme_populate_pgd_data *ppd)
static void __init sme_populate_pgd(struct sme_populate_pgd_data *ppd)
{
pud_t *pud;
pmd_t *pmd;
@ -185,7 +185,7 @@ static void __head sme_populate_pgd(struct sme_populate_pgd_data *ppd)
set_pte(pte, __pte(ppd->paddr | ppd->pte_flags));
}
static void __head __sme_map_range_pmd(struct sme_populate_pgd_data *ppd)
static void __init __sme_map_range_pmd(struct sme_populate_pgd_data *ppd)
{
while (ppd->vaddr < ppd->vaddr_end) {
sme_populate_pgd_large(ppd);
@ -195,7 +195,7 @@ static void __head __sme_map_range_pmd(struct sme_populate_pgd_data *ppd)
}
}
static void __head __sme_map_range_pte(struct sme_populate_pgd_data *ppd)
static void __init __sme_map_range_pte(struct sme_populate_pgd_data *ppd)
{
while (ppd->vaddr < ppd->vaddr_end) {
sme_populate_pgd(ppd);
@ -205,7 +205,7 @@ static void __head __sme_map_range_pte(struct sme_populate_pgd_data *ppd)
}
}
static void __head __sme_map_range(struct sme_populate_pgd_data *ppd,
static void __init __sme_map_range(struct sme_populate_pgd_data *ppd,
pmdval_t pmd_flags, pteval_t pte_flags)
{
unsigned long vaddr_end;
@ -229,22 +229,22 @@ static void __head __sme_map_range(struct sme_populate_pgd_data *ppd,
__sme_map_range_pte(ppd);
}
static void __head sme_map_range_encrypted(struct sme_populate_pgd_data *ppd)
static void __init sme_map_range_encrypted(struct sme_populate_pgd_data *ppd)
{
__sme_map_range(ppd, PMD_FLAGS_ENC, PTE_FLAGS_ENC);
}
static void __head sme_map_range_decrypted(struct sme_populate_pgd_data *ppd)
static void __init sme_map_range_decrypted(struct sme_populate_pgd_data *ppd)
{
__sme_map_range(ppd, PMD_FLAGS_DEC, PTE_FLAGS_DEC);
}
static void __head sme_map_range_decrypted_wp(struct sme_populate_pgd_data *ppd)
static void __init sme_map_range_decrypted_wp(struct sme_populate_pgd_data *ppd)
{
__sme_map_range(ppd, PMD_FLAGS_DEC_WP, PTE_FLAGS_DEC_WP);
}
static unsigned long __head sme_pgtable_calc(unsigned long len)
static unsigned long __init sme_pgtable_calc(unsigned long len)
{
unsigned long entries = 0, tables = 0;
@ -281,7 +281,7 @@ static unsigned long __head sme_pgtable_calc(unsigned long len)
return entries + tables;
}
void __head sme_encrypt_kernel(struct boot_params *bp)
void __init sme_encrypt_kernel(struct boot_params *bp)
{
unsigned long workarea_start, workarea_end, workarea_len;
unsigned long execute_start, execute_end, execute_len;
@ -485,7 +485,7 @@ void __head sme_encrypt_kernel(struct boot_params *bp)
native_write_cr3(__native_read_cr3());
}
void __head sme_enable(struct boot_params *bp)
void __init sme_enable(struct boot_params *bp)
{
unsigned int eax, ebx, ecx, edx;
unsigned long feature_mask;

View File

@ -2,12 +2,6 @@
#ifndef _ASM_X86_INIT_H
#define _ASM_X86_INIT_H
#if defined(CONFIG_CC_IS_CLANG) && CONFIG_CLANG_VERSION < 170000
#define __head __section(".head.text") __no_sanitize_undefined __no_stack_protector
#else
#define __head __section(".head.text") __no_sanitize_undefined __no_kstack_erase
#endif
struct x86_mapping_info {
void *(*alloc_pgt_page)(void *); /* allocate buf for page table */
void (*free_pgt_page)(void *, void *); /* free buf for page table */

View File

@ -61,7 +61,7 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
* any particular GDT layout, because we load our own as soon as we
* can.
*/
__HEAD
__INIT
SYM_CODE_START(startup_32)
movl pa(initial_stack),%ecx
@ -136,6 +136,9 @@ SYM_CODE_END(startup_32)
* If cpu hotplug is not supported then this code can go in init section
* which will be freed later
*/
#ifdef CONFIG_HOTPLUG_CPU
.text
#endif
SYM_FUNC_START(startup_32_smp)
cld
movl $(__BOOT_DS),%eax

View File

@ -33,7 +33,7 @@
* because we need identity-mapped pages.
*/
__HEAD
__INIT
.code64
SYM_CODE_START_NOALIGN(startup_64)
UNWIND_HINT_END_OF_STACK

View File

@ -24,7 +24,7 @@
#include <asm/nospec-branch.h>
#include <xen/interface/elfnote.h>
__HEAD
__INIT
/*
* Entry point for PVH guests.