sched: Clean up and standardize #if/#else/#endif markers in sched/pelt.[ch]
- Use the standard #ifdef marker format for larger blocks,
where appropriate:
#if CONFIG_FOO
...
#else /* !CONFIG_FOO: */
...
#endif /* !CONFIG_FOO */
- Fix whitespace noise and other inconsistencies.
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Cc: Dietmar Eggemann <dietmar.eggemann@arm.com>
Cc: Juri Lelli <juri.lelli@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Cc: Shrikanth Hegde <sshegde@linux.ibm.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Valentin Schneider <vschneid@redhat.com>
Cc: Vincent Guittot <vincent.guittot@linaro.org>
Link: https://lore.kernel.org/r/20250528080924.2273858-13-mingo@kernel.org
pull/1309/head
parent
c215dff7f8
commit
311bb3f7b7
|
|
@ -414,7 +414,7 @@ int update_hw_load_avg(u64 now, struct rq *rq, u64 capacity)
|
|||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
#endif /* CONFIG_SCHED_HW_PRESSURE */
|
||||
|
||||
#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
|
||||
/*
|
||||
|
|
@ -467,7 +467,7 @@ int update_irq_load_avg(struct rq *rq, u64 running)
|
|||
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
#endif /* CONFIG_HAVE_SCHED_AVG_IRQ */
|
||||
|
||||
/*
|
||||
* Load avg and utiliztion metrics need to be updated periodically and before
|
||||
|
|
|
|||
|
|
@ -20,7 +20,7 @@ static inline u64 hw_load_avg(struct rq *rq)
|
|||
{
|
||||
return READ_ONCE(rq->avg_hw.load_avg);
|
||||
}
|
||||
#else
|
||||
#else /* !CONFIG_SCHED_HW_PRESSURE: */
|
||||
static inline int
|
||||
update_hw_load_avg(u64 now, struct rq *rq, u64 capacity)
|
||||
{
|
||||
|
|
@ -31,7 +31,7 @@ static inline u64 hw_load_avg(struct rq *rq)
|
|||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
#endif /* !CONFIG_SCHED_HW_PRESSURE */
|
||||
|
||||
#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
|
||||
int update_irq_load_avg(struct rq *rq, u64 running);
|
||||
|
|
@ -179,15 +179,15 @@ static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
|
|||
|
||||
return rq_clock_pelt(rq_of(cfs_rq)) - cfs_rq->throttled_clock_pelt_time;
|
||||
}
|
||||
#else
|
||||
#else /* !CONFIG_CFS_BANDWIDTH: */
|
||||
static inline void update_idle_cfs_rq_clock_pelt(struct cfs_rq *cfs_rq) { }
|
||||
static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
|
||||
{
|
||||
return rq_clock_pelt(rq_of(cfs_rq));
|
||||
}
|
||||
#endif
|
||||
#endif /* !CONFIG_CFS_BANDWIDTH */
|
||||
|
||||
#else
|
||||
#else /* !CONFIG_SMP: */
|
||||
|
||||
static inline int
|
||||
update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
|
||||
|
|
@ -236,6 +236,6 @@ static inline void
|
|||
update_idle_rq_clock_pelt(struct rq *rq) { }
|
||||
|
||||
static inline void update_idle_cfs_rq_clock_pelt(struct cfs_rq *cfs_rq) { }
|
||||
#endif
|
||||
#endif /* !CONFIG_SMP */
|
||||
|
||||
#endif /* _KERNEL_SCHED_PELT_H */
|
||||
|
|
|
|||
Loading…
Reference in New Issue