sched: Adapt sched tracepoints for RV task model
Add the following tracepoint:
* sched_set_need_resched(tsk, cpu, tif)
Called when a task is set the need resched [lazy] flag
Remove the unused ip parameter from sched_entry and sched_exit and alter
sched_entry to have a value of preempt consistent with the one used in
sched_switch.
Also adapt all monitors using sched_{entry,exit} to avoid breaking build.
These tracepoints are useful to describe the Linux task model and are
adapted from the patches by Daniel Bristot de Oliveira
(https://bristot.me/linux-task-model/).
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Nam Cao <namcao@linutronix.de>
Cc: Tomas Glozar <tglozar@redhat.com>
Cc: Juri Lelli <jlelli@redhat.com>
Cc: Clark Williams <williams@redhat.com>
Cc: John Kacur <jkacur@redhat.com>
Link: https://lore.kernel.org/20250728135022.255578-7-gmonaco@redhat.com
Signed-off-by: Gabriele Monaco <gmonaco@redhat.com>
Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
pull/1309/head
parent
9d475d80c9
commit
adcc3bfa88
|
|
@ -339,9 +339,11 @@ extern void io_schedule_finish(int token);
|
|||
extern long io_schedule_timeout(long timeout);
|
||||
extern void io_schedule(void);
|
||||
|
||||
/* wrapper function to trace from this header file */
|
||||
/* wrapper functions to trace from this header file */
|
||||
DECLARE_TRACEPOINT(sched_set_state_tp);
|
||||
extern void __trace_set_current_state(int state_value);
|
||||
DECLARE_TRACEPOINT(sched_set_need_resched_tp);
|
||||
extern void __trace_set_need_resched(struct task_struct *curr, int tif);
|
||||
|
||||
/**
|
||||
* struct prev_cputime - snapshot of system and user cputime
|
||||
|
|
@ -2063,6 +2065,9 @@ static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
|
|||
|
||||
static inline void set_tsk_need_resched(struct task_struct *tsk)
|
||||
{
|
||||
if (tracepoint_enabled(sched_set_need_resched_tp) &&
|
||||
!test_tsk_thread_flag(tsk, TIF_NEED_RESCHED))
|
||||
__trace_set_need_resched(tsk, TIF_NEED_RESCHED);
|
||||
set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -882,18 +882,22 @@ DECLARE_TRACE(sched_compute_energy,
|
|||
TP_ARGS(p, dst_cpu, energy, max_util, busy_time));
|
||||
|
||||
DECLARE_TRACE(sched_entry,
|
||||
TP_PROTO(bool preempt, unsigned long ip),
|
||||
TP_ARGS(preempt, ip));
|
||||
TP_PROTO(bool preempt),
|
||||
TP_ARGS(preempt));
|
||||
|
||||
DECLARE_TRACE(sched_exit,
|
||||
TP_PROTO(bool is_switch, unsigned long ip),
|
||||
TP_ARGS(is_switch, ip));
|
||||
TP_PROTO(bool is_switch),
|
||||
TP_ARGS(is_switch));
|
||||
|
||||
DECLARE_TRACE_CONDITION(sched_set_state,
|
||||
TP_PROTO(struct task_struct *tsk, int state),
|
||||
TP_ARGS(tsk, state),
|
||||
TP_CONDITION(!!(tsk->__state) != !!state));
|
||||
|
||||
DECLARE_TRACE(sched_set_need_resched,
|
||||
TP_PROTO(struct task_struct *tsk, int cpu, int tif),
|
||||
TP_ARGS(tsk, cpu, tif));
|
||||
|
||||
#endif /* _TRACE_SCHED_H */
|
||||
|
||||
/* This part must be outside protection */
|
||||
|
|
|
|||
|
|
@ -1110,6 +1110,7 @@ static void __resched_curr(struct rq *rq, int tif)
|
|||
|
||||
cpu = cpu_of(rq);
|
||||
|
||||
trace_sched_set_need_resched_tp(curr, cpu, tif);
|
||||
if (cpu == smp_processor_id()) {
|
||||
set_ti_thread_flag(cti, tif);
|
||||
if (tif == TIF_NEED_RESCHED)
|
||||
|
|
@ -1125,6 +1126,11 @@ static void __resched_curr(struct rq *rq, int tif)
|
|||
}
|
||||
}
|
||||
|
||||
void __trace_set_need_resched(struct task_struct *curr, int tif)
|
||||
{
|
||||
trace_sched_set_need_resched_tp(curr, smp_processor_id(), tif);
|
||||
}
|
||||
|
||||
void resched_curr(struct rq *rq)
|
||||
{
|
||||
__resched_curr(rq, TIF_NEED_RESCHED);
|
||||
|
|
@ -5329,7 +5335,7 @@ asmlinkage __visible void schedule_tail(struct task_struct *prev)
|
|||
* switched the context for the first time. It is returning from
|
||||
* schedule for the first time in this path.
|
||||
*/
|
||||
trace_sched_exit_tp(true, CALLER_ADDR0);
|
||||
trace_sched_exit_tp(true);
|
||||
preempt_enable();
|
||||
|
||||
if (current->set_child_tid)
|
||||
|
|
@ -6678,7 +6684,8 @@ static void __sched notrace __schedule(int sched_mode)
|
|||
struct rq *rq;
|
||||
int cpu;
|
||||
|
||||
trace_sched_entry_tp(preempt, CALLER_ADDR0);
|
||||
/* Trace preemptions consistently with task switches */
|
||||
trace_sched_entry_tp(sched_mode == SM_PREEMPT);
|
||||
|
||||
cpu = smp_processor_id();
|
||||
rq = cpu_rq(cpu);
|
||||
|
|
@ -6793,7 +6800,7 @@ picked:
|
|||
__balance_callbacks(rq);
|
||||
raw_spin_rq_unlock_irq(rq);
|
||||
}
|
||||
trace_sched_exit_tp(is_switch, CALLER_ADDR0);
|
||||
trace_sched_exit_tp(is_switch);
|
||||
}
|
||||
|
||||
void __noreturn do_task_dead(void)
|
||||
|
|
|
|||
|
|
@ -24,12 +24,12 @@ static void handle_sched_set_state(void *data, struct task_struct *tsk, int stat
|
|||
da_handle_start_event_sco(sched_set_state_sco);
|
||||
}
|
||||
|
||||
static void handle_schedule_entry(void *data, bool preempt, unsigned long ip)
|
||||
static void handle_schedule_entry(void *data, bool preempt)
|
||||
{
|
||||
da_handle_event_sco(schedule_entry_sco);
|
||||
}
|
||||
|
||||
static void handle_schedule_exit(void *data, bool is_switch, unsigned long ip)
|
||||
static void handle_schedule_exit(void *data, bool is_switch)
|
||||
{
|
||||
da_handle_start_event_sco(schedule_exit_sco);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -30,12 +30,12 @@ static void handle_preempt_enable(void *data, unsigned long ip, unsigned long pa
|
|||
da_handle_start_event_scpd(preempt_enable_scpd);
|
||||
}
|
||||
|
||||
static void handle_schedule_entry(void *data, bool preempt, unsigned long ip)
|
||||
static void handle_schedule_entry(void *data, bool preempt)
|
||||
{
|
||||
da_handle_event_scpd(schedule_entry_scpd);
|
||||
}
|
||||
|
||||
static void handle_schedule_exit(void *data, bool is_switch, unsigned long ip)
|
||||
static void handle_schedule_exit(void *data, bool is_switch)
|
||||
{
|
||||
da_handle_event_scpd(schedule_exit_scpd);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -30,12 +30,12 @@ static void handle_irq_enable(void *data, unsigned long ip, unsigned long parent
|
|||
da_handle_start_event_sncid(irq_enable_sncid);
|
||||
}
|
||||
|
||||
static void handle_schedule_entry(void *data, bool preempt, unsigned long ip)
|
||||
static void handle_schedule_entry(void *data, bool preempt)
|
||||
{
|
||||
da_handle_start_event_sncid(schedule_entry_sncid);
|
||||
}
|
||||
|
||||
static void handle_schedule_exit(void *data, bool is_switch, unsigned long ip)
|
||||
static void handle_schedule_exit(void *data, bool is_switch)
|
||||
{
|
||||
da_handle_start_event_sncid(schedule_exit_sncid);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -30,12 +30,12 @@ static void handle_preempt_enable(void *data, unsigned long ip, unsigned long pa
|
|||
da_handle_start_event_snep(preempt_enable_snep);
|
||||
}
|
||||
|
||||
static void handle_schedule_entry(void *data, bool preempt, unsigned long ip)
|
||||
static void handle_schedule_entry(void *data, bool preempt)
|
||||
{
|
||||
da_handle_event_snep(schedule_entry_snep);
|
||||
}
|
||||
|
||||
static void handle_schedule_exit(void *data, bool is_switch, unsigned long ip)
|
||||
static void handle_schedule_exit(void *data, bool is_switch)
|
||||
{
|
||||
da_handle_start_event_snep(schedule_exit_snep);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -27,12 +27,12 @@ static void handle_sched_switch(void *data, bool preempt,
|
|||
da_handle_event_tss(sched_switch_tss);
|
||||
}
|
||||
|
||||
static void handle_schedule_entry(void *data, bool preempt, unsigned long ip)
|
||||
static void handle_schedule_entry(void *data, bool preempt)
|
||||
{
|
||||
da_handle_event_tss(schedule_entry_tss);
|
||||
}
|
||||
|
||||
static void handle_schedule_exit(void *data, bool is_switch, unsigned long ip)
|
||||
static void handle_schedule_exit(void *data, bool is_switch)
|
||||
{
|
||||
da_handle_start_event_tss(schedule_exit_tss);
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in New Issue