sched: Move STDL_INIT() functions out-of-line

Since all these functions are address-taken in SDTL_INIT() and called
indirectly, it doesn't really make sense for them to be inline.

Suggested-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
pull/1354/merge
Peter Zijlstra 2025-08-26 10:55:55 +02:00
parent 661f951e37
commit 91c614f09a
2 changed files with 52 additions and 42 deletions

View File

@ -33,56 +33,21 @@ extern const struct sd_flag_debug sd_flag_debug[];
struct sched_domain_topology_level;
#ifdef CONFIG_SCHED_SMT
static inline int cpu_smt_flags(void)
{
return SD_SHARE_CPUCAPACITY | SD_SHARE_LLC;
}
static inline const
struct cpumask *tl_smt_mask(struct sched_domain_topology_level *tl, int cpu)
{
return cpu_smt_mask(cpu);
}
extern int cpu_smt_flags(void);
extern const struct cpumask *tl_smt_mask(struct sched_domain_topology_level *tl, int cpu);
#endif
#ifdef CONFIG_SCHED_CLUSTER
static inline int cpu_cluster_flags(void)
{
return SD_CLUSTER | SD_SHARE_LLC;
}
static inline const
struct cpumask *tl_cls_mask(struct sched_domain_topology_level *tl, int cpu)
{
return cpu_clustergroup_mask(cpu);
}
extern int cpu_cluster_flags(void);
extern const struct cpumask *tl_cls_mask(struct sched_domain_topology_level *tl, int cpu);
#endif
#ifdef CONFIG_SCHED_MC
static inline int cpu_core_flags(void)
{
return SD_SHARE_LLC;
}
static inline const
struct cpumask *tl_mc_mask(struct sched_domain_topology_level *tl, int cpu)
{
return cpu_coregroup_mask(cpu);
}
extern int cpu_core_flags(void);
extern const struct cpumask *tl_mc_mask(struct sched_domain_topology_level *tl, int cpu);
#endif
static inline const
struct cpumask *tl_pkg_mask(struct sched_domain_topology_level *tl, int cpu)
{
return cpu_node_mask(cpu);
}
#ifdef CONFIG_NUMA
static inline int cpu_numa_flags(void)
{
return SD_NUMA;
}
#endif
extern const struct cpumask *tl_pkg_mask(struct sched_domain_topology_level *tl, int cpu);
extern int arch_asym_cpu_priority(int cpu);

View File

@ -1724,6 +1724,47 @@ sd_init(struct sched_domain_topology_level *tl,
return sd;
}
#ifdef CONFIG_SCHED_SMT
int cpu_smt_flags(void)
{
return SD_SHARE_CPUCAPACITY | SD_SHARE_LLC;
}
const struct cpumask *tl_smt_mask(struct sched_domain_topology_level *tl, int cpu)
{
return cpu_smt_mask(cpu);
}
#endif
#ifdef CONFIG_SCHED_CLUSTER
int cpu_cluster_flags(void)
{
return SD_CLUSTER | SD_SHARE_LLC;
}
const struct cpumask *tl_cls_mask(struct sched_domain_topology_level *tl, int cpu)
{
return cpu_clustergroup_mask(cpu);
}
#endif
#ifdef CONFIG_SCHED_MC
int cpu_core_flags(void)
{
return SD_SHARE_LLC;
}
const struct cpumask *tl_mc_mask(struct sched_domain_topology_level *tl, int cpu)
{
return cpu_coregroup_mask(cpu);
}
#endif
const struct cpumask *tl_pkg_mask(struct sched_domain_topology_level *tl, int cpu)
{
return cpu_node_mask(cpu);
}
/*
* Topology list, bottom-up.
*/
@ -1760,6 +1801,10 @@ void __init set_sched_topology(struct sched_domain_topology_level *tl)
}
#ifdef CONFIG_NUMA
static int cpu_numa_flags(void)
{
return SD_NUMA;
}
static const struct cpumask *sd_numa_mask(struct sched_domain_topology_level *tl, int cpu)
{