sched/fair: Get rid of throttled_lb_pair()
Now that throttled tasks are dequeued and can not stay on rq's cfs_tasks list, there is no need to take special care of these throttled tasks anymore in load balance. Suggested-by: K Prateek Nayak <kprateek.nayak@amd.com> Signed-off-by: Aaron Lu <ziqianlu@bytedance.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Tested-by: Valentin Schneider <vschneid@redhat.com> Tested-by: Matteo Martelli <matteo.martelli@codethink.co.uk> Tested-by: K Prateek Nayak <kprateek.nayak@amd.com> Link: https://lore.kernel.org/r/20250829081120.806-6-ziqianlu@bytedance.compull/1354/merge
parent
eb962f251f
commit
5b726e9bf9
|
|
@ -5735,23 +5735,6 @@ static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
|
|||
return cfs_bandwidth_used() && cfs_rq->throttle_count;
|
||||
}
|
||||
|
||||
/*
|
||||
* Ensure that neither of the group entities corresponding to src_cpu or
|
||||
* dest_cpu are members of a throttled hierarchy when performing group
|
||||
* load-balance operations.
|
||||
*/
|
||||
static inline int throttled_lb_pair(struct task_group *tg,
|
||||
int src_cpu, int dest_cpu)
|
||||
{
|
||||
struct cfs_rq *src_cfs_rq, *dest_cfs_rq;
|
||||
|
||||
src_cfs_rq = tg->cfs_rq[src_cpu];
|
||||
dest_cfs_rq = tg->cfs_rq[dest_cpu];
|
||||
|
||||
return throttled_hierarchy(src_cfs_rq) ||
|
||||
throttled_hierarchy(dest_cfs_rq);
|
||||
}
|
||||
|
||||
static inline bool task_is_throttled(struct task_struct *p)
|
||||
{
|
||||
return cfs_bandwidth_used() && p->throttled;
|
||||
|
|
@ -6743,12 +6726,6 @@ static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline int throttled_lb_pair(struct task_group *tg,
|
||||
int src_cpu, int dest_cpu)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b, struct cfs_bandwidth *parent) {}
|
||||
static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
|
||||
|
|
@ -9385,18 +9362,14 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
|
|||
/*
|
||||
* We do not migrate tasks that are:
|
||||
* 1) delayed dequeued unless we migrate load, or
|
||||
* 2) throttled_lb_pair, or
|
||||
* 3) cannot be migrated to this CPU due to cpus_ptr, or
|
||||
* 4) running (obviously), or
|
||||
* 5) are cache-hot on their current CPU, or
|
||||
* 6) are blocked on mutexes (if SCHED_PROXY_EXEC is enabled)
|
||||
* 2) cannot be migrated to this CPU due to cpus_ptr, or
|
||||
* 3) running (obviously), or
|
||||
* 4) are cache-hot on their current CPU, or
|
||||
* 5) are blocked on mutexes (if SCHED_PROXY_EXEC is enabled)
|
||||
*/
|
||||
if ((p->se.sched_delayed) && (env->migration_type != migrate_load))
|
||||
return 0;
|
||||
|
||||
if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* We want to prioritize the migration of eligible tasks.
|
||||
* For ineligible tasks we soft-limit them and only allow
|
||||
|
|
|
|||
Loading…
Reference in New Issue