sched: Fix proxy/current (push,pull)ability

Proxy execution forms atomic pairs of tasks: The waiting donor
task (scheduling context) and a proxy (execution context). The
donor task, along with the rest of the blocked chain, follows
the proxy wrt CPU placement.

They can be the same task, in which case push/pull doesn't need any
modification. When they are different, however,
FIFO1 & FIFO42:

	      ,->  RT42
	      |     | blocked-on
	      |     v
blocked_donor |   mutex
	      |     | owner
	      |     v
	      `--  RT1

   RT1
   RT42

  CPU0            CPU1
   ^                ^
   |                |
  overloaded    !overloaded
  rq prio = 42  rq prio = 0

RT1 is eligible to be pushed to CPU1, but should that happen it will
"carry" RT42 along. Clearly here neither RT1 nor RT42 must be seen as
push/pullable.

Unfortunately, only the donor task is usually dequeued from the rq,
and the proxy'ed execution context (rq->curr) remains on the rq.
This can cause RT1 to be selected for migration from logic like the
rt pushable_list.

Thus, adda a dequeue/enqueue cycle on the proxy task before __schedule
returns, which allows the sched class logic to avoid adding the now
current task to the pushable_list.

Furthermore, tasks becoming blocked on a mutex don't need an explicit
dequeue/enqueue cycle to be made (push/pull)able: they have to be running
to block on a mutex, thus they will eventually hit put_prev_task().

Signed-off-by: Valentin Schneider <valentin.schneider@arm.com>
Signed-off-by: Connor O'Brien <connoro@google.com>
Signed-off-by: John Stultz <jstultz@google.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Tested-by: K Prateek Nayak <kprateek.nayak@amd.com>
Link: https://lkml.kernel.org/r/20250712033407.2383110-8-jstultz@google.com
pull/1309/head
Valentin Schneider 2025-07-12 03:33:48 +00:00 committed by Peter Zijlstra
parent be41bde4c3
commit be39617e38
3 changed files with 37 additions and 0 deletions

View File

@ -6654,6 +6654,23 @@ find_proxy_task(struct rq *rq, struct task_struct *donor, struct rq_flags *rf)
} }
#endif /* SCHED_PROXY_EXEC */ #endif /* SCHED_PROXY_EXEC */
static inline void proxy_tag_curr(struct rq *rq, struct task_struct *owner)
{
if (!sched_proxy_exec())
return;
/*
* pick_next_task() calls set_next_task() on the chosen task
* at some point, which ensures it is not push/pullable.
* However, the chosen/donor task *and* the mutex owner form an
* atomic pair wrt push/pull.
*
* Make sure owner we run is not pushable. Unfortunately we can
* only deal with that by means of a dequeue/enqueue cycle. :-/
*/
dequeue_task(rq, owner, DEQUEUE_NOCLOCK | DEQUEUE_SAVE);
enqueue_task(rq, owner, ENQUEUE_NOCLOCK | ENQUEUE_RESTORE);
}
/* /*
* __schedule() is the main scheduler function. * __schedule() is the main scheduler function.
* *
@ -6798,6 +6815,10 @@ picked:
* changes to task_struct made by pick_next_task(). * changes to task_struct made by pick_next_task().
*/ */
RCU_INIT_POINTER(rq->curr, next); RCU_INIT_POINTER(rq->curr, next);
if (!task_current_donor(rq, next))
proxy_tag_curr(rq, next);
/* /*
* The membarrier system call requires each architecture * The membarrier system call requires each architecture
* to have a full memory barrier after updating * to have a full memory barrier after updating
@ -6832,6 +6853,10 @@ picked:
/* Also unlocks the rq: */ /* Also unlocks the rq: */
rq = context_switch(rq, prev, next, &rf); rq = context_switch(rq, prev, next, &rf);
} else { } else {
/* In case next was already curr but just got blocked_donor */
if (!task_current_donor(rq, next))
proxy_tag_curr(rq, next);
rq_unpin_lock(rq, &rf); rq_unpin_lock(rq, &rf);
__balance_callbacks(rq); __balance_callbacks(rq);
raw_spin_rq_unlock_irq(rq); raw_spin_rq_unlock_irq(rq);

View File

@ -2121,6 +2121,9 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
if (dl_server(&p->dl)) if (dl_server(&p->dl))
return; return;
if (task_is_blocked(p))
return;
if (!task_current(rq, p) && !p->dl.dl_throttled && p->nr_cpus_allowed > 1) if (!task_current(rq, p) && !p->dl.dl_throttled && p->nr_cpus_allowed > 1)
enqueue_pushable_dl_task(rq, p); enqueue_pushable_dl_task(rq, p);
} }
@ -2415,6 +2418,10 @@ static void put_prev_task_dl(struct rq *rq, struct task_struct *p, struct task_s
update_curr_dl(rq); update_curr_dl(rq);
update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1); update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
if (task_is_blocked(p))
return;
if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1) if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
enqueue_pushable_dl_task(rq, p); enqueue_pushable_dl_task(rq, p);
} }

View File

@ -1440,6 +1440,9 @@ enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
enqueue_rt_entity(rt_se, flags); enqueue_rt_entity(rt_se, flags);
if (task_is_blocked(p))
return;
if (!task_current(rq, p) && p->nr_cpus_allowed > 1) if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
enqueue_pushable_task(rq, p); enqueue_pushable_task(rq, p);
} }
@ -1716,6 +1719,8 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p, struct task_s
update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1); update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1);
if (task_is_blocked(p))
return;
/* /*
* The previous task needs to be made eligible for pushing * The previous task needs to be made eligible for pushing
* if it is still active * if it is still active