sched/topology: Wrappers for sched_domains_mutex

Create wrappers for sched_domains_mutex so that it can transparently be
used on both CONFIG_SMP and !CONFIG_SMP, as some function will need to
do.

Fixes: 53916d5fd3 ("sched/deadline: Check bandwidth overflow earlier for hotplug")
Reported-by: Jon Hunter <jonathanh@nvidia.com>
Signed-off-by: Juri Lelli <juri.lelli@redhat.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Valentin Schneider <vschneid@redhat.com>
Reviewed-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
Tested-by: Waiman Long <longman@redhat.com>
Tested-by: Jon Hunter <jonathanh@nvidia.com>
Tested-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
Link: https://lore.kernel.org/r/Z9MP5Oq9RB8jBs3y@jlelli-thinkpadt14gen4.remote.csb
pull/1183/head
Juri Lelli 2025-03-13 18:03:32 +01:00 committed by Peter Zijlstra
parent f6147af176
commit 56209334dd
5 changed files with 23 additions and 10 deletions

View File

@ -382,6 +382,11 @@ enum uclamp_id {
#ifdef CONFIG_SMP
extern struct root_domain def_root_domain;
extern struct mutex sched_domains_mutex;
extern void sched_domains_mutex_lock(void);
extern void sched_domains_mutex_unlock(void);
#else
static inline void sched_domains_mutex_lock(void) { }
static inline void sched_domains_mutex_unlock(void) { }
#endif
struct sched_param {

View File

@ -994,10 +994,10 @@ static void
partition_and_rebuild_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
struct sched_domain_attr *dattr_new)
{
mutex_lock(&sched_domains_mutex);
sched_domains_mutex_lock();
partition_sched_domains_locked(ndoms_new, doms_new, dattr_new);
dl_rebuild_rd_accounting();
mutex_unlock(&sched_domains_mutex);
sched_domains_mutex_unlock();
}
/*

View File

@ -8470,9 +8470,9 @@ void __init sched_init_smp(void)
* CPU masks are stable and all blatant races in the below code cannot
* happen.
*/
mutex_lock(&sched_domains_mutex);
sched_domains_mutex_lock();
sched_init_domains(cpu_active_mask);
mutex_unlock(&sched_domains_mutex);
sched_domains_mutex_unlock();
/* Move init over to a non-isolated CPU */
if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_TYPE_DOMAIN)) < 0)

View File

@ -294,7 +294,7 @@ static ssize_t sched_verbose_write(struct file *filp, const char __user *ubuf,
bool orig;
cpus_read_lock();
mutex_lock(&sched_domains_mutex);
sched_domains_mutex_lock();
orig = sched_debug_verbose;
result = debugfs_write_file_bool(filp, ubuf, cnt, ppos);
@ -306,7 +306,7 @@ static ssize_t sched_verbose_write(struct file *filp, const char __user *ubuf,
sd_dentry = NULL;
}
mutex_unlock(&sched_domains_mutex);
sched_domains_mutex_unlock();
cpus_read_unlock();
return result;
@ -517,9 +517,9 @@ static __init int sched_init_debug(void)
debugfs_create_u32("migration_cost_ns", 0644, debugfs_sched, &sysctl_sched_migration_cost);
debugfs_create_u32("nr_migrate", 0644, debugfs_sched, &sysctl_sched_nr_migrate);
mutex_lock(&sched_domains_mutex);
sched_domains_mutex_lock();
update_sched_domain_debugfs();
mutex_unlock(&sched_domains_mutex);
sched_domains_mutex_unlock();
#endif
#ifdef CONFIG_NUMA_BALANCING

View File

@ -6,6 +6,14 @@
#include <linux/bsearch.h>
DEFINE_MUTEX(sched_domains_mutex);
void sched_domains_mutex_lock(void)
{
mutex_lock(&sched_domains_mutex);
}
void sched_domains_mutex_unlock(void)
{
mutex_unlock(&sched_domains_mutex);
}
/* Protected by sched_domains_mutex: */
static cpumask_var_t sched_domains_tmpmask;
@ -2791,7 +2799,7 @@ match3:
void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
struct sched_domain_attr *dattr_new)
{
mutex_lock(&sched_domains_mutex);
sched_domains_mutex_lock();
partition_sched_domains_locked(ndoms_new, doms_new, dattr_new);
mutex_unlock(&sched_domains_mutex);
sched_domains_mutex_unlock();
}