sched: Add CONFIG_SCHED_PROXY_EXEC & boot argument to enable/disable

Add a CONFIG_SCHED_PROXY_EXEC option, along with a boot argument
sched_proxy_exec= that can be used to disable the feature at boot
time if CONFIG_SCHED_PROXY_EXEC was enabled.

Also uses this option to allow the rq->donor to be different from
rq->curr.

Signed-off-by: John Stultz <jstultz@google.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Tested-by: K Prateek Nayak <kprateek.nayak@amd.com>
Link: https://lkml.kernel.org/r/20250712033407.2383110-2-jstultz@google.com
pull/1309/head
John Stultz 2025-07-12 03:33:42 +00:00 committed by Peter Zijlstra
parent 8f2146159b
commit 25c411fce7
5 changed files with 71 additions and 0 deletions

View File

@ -6387,6 +6387,11 @@
sa1100ir [NET]
See drivers/net/irda/sa1100_ir.c.
sched_proxy_exec= [KNL]
Enables or disables "proxy execution" style
solution to mutex-based priority inversion.
Format: <bool>
sched_verbose [KNL,EARLY] Enables verbose scheduler debug messages.
schedstats= [KNL,X86] Enable or disable scheduled statistics.

View File

@ -1656,6 +1656,19 @@ struct task_struct {
randomized_struct_fields_end
} __attribute__ ((aligned (64)));
#ifdef CONFIG_SCHED_PROXY_EXEC
DECLARE_STATIC_KEY_TRUE(__sched_proxy_exec);
static inline bool sched_proxy_exec(void)
{
return static_branch_likely(&__sched_proxy_exec);
}
#else
static inline bool sched_proxy_exec(void)
{
return false;
}
#endif
#define TASK_REPORT_IDLE (TASK_REPORT + 1)
#define TASK_REPORT_MAX (TASK_REPORT_IDLE << 1)

View File

@ -878,6 +878,18 @@ config UCLAMP_BUCKETS_COUNT
If in doubt, use the default value.
config SCHED_PROXY_EXEC
bool "Proxy Execution"
# Avoid some build failures w/ PREEMPT_RT until it can be fixed
depends on !PREEMPT_RT
# Need to investigate how to inform sched_ext of split contexts
depends on !SCHED_CLASS_EXT
# Not particularly useful until we get to multi-rq proxying
depends on EXPERT
help
This option enables proxy execution, a mechanism for mutex-owning
tasks to inherit the scheduling context of higher priority waiters.
endmenu
#

View File

@ -119,6 +119,35 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(sched_compute_energy_tp);
DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
#ifdef CONFIG_SCHED_PROXY_EXEC
DEFINE_STATIC_KEY_TRUE(__sched_proxy_exec);
static int __init setup_proxy_exec(char *str)
{
bool proxy_enable = true;
if (*str && kstrtobool(str + 1, &proxy_enable)) {
pr_warn("Unable to parse sched_proxy_exec=\n");
return 0;
}
if (proxy_enable) {
pr_info("sched_proxy_exec enabled via boot arg\n");
static_branch_enable(&__sched_proxy_exec);
} else {
pr_info("sched_proxy_exec disabled via boot arg\n");
static_branch_disable(&__sched_proxy_exec);
}
return 1;
}
#else
static int __init setup_proxy_exec(char *str)
{
pr_warn("CONFIG_SCHED_PROXY_EXEC=n, so it cannot be enabled or disabled at boot time\n");
return 0;
}
#endif
__setup("sched_proxy_exec", setup_proxy_exec);
/*
* Debugging: various feature bits
*

View File

@ -1142,10 +1142,15 @@ struct rq {
*/
unsigned long nr_uninterruptible;
#ifdef CONFIG_SCHED_PROXY_EXEC
struct task_struct __rcu *donor; /* Scheduling context */
struct task_struct __rcu *curr; /* Execution context */
#else
union {
struct task_struct __rcu *donor; /* Scheduler context */
struct task_struct __rcu *curr; /* Execution context */
};
#endif
struct sched_dl_entity *dl_server;
struct task_struct *idle;
struct task_struct *stop;
@ -1326,10 +1331,17 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
#define raw_rq() raw_cpu_ptr(&runqueues)
#ifdef CONFIG_SCHED_PROXY_EXEC
static inline void rq_set_donor(struct rq *rq, struct task_struct *t)
{
rcu_assign_pointer(rq->donor, t);
}
#else
static inline void rq_set_donor(struct rq *rq, struct task_struct *t)
{
/* Do nothing */
}
#endif
#ifdef CONFIG_SCHED_CORE
static inline struct cpumask *sched_group_span(struct sched_group *sg);