rcu: Clean up after the SRCU-fastification of RCU Tasks Trace
Now that RCU Tasks Trace has been re-implemented in terms of SRCU-fast, the ->trc_ipi_to_cpu, ->trc_blkd_cpu, ->trc_blkd_node, ->trc_holdout_list, and ->trc_reader_special task_struct fields are no longer used. In addition, the rcu_tasks_trace_qs(), rcu_tasks_trace_qs_blkd(), exit_tasks_rcu_finish_trace(), and rcu_spawn_tasks_trace_kthread(), show_rcu_tasks_trace_gp_kthread(), rcu_tasks_trace_get_gp_data(), rcu_tasks_trace_torture_stats_print(), and get_rcu_tasks_trace_gp_kthread() functions and all the other functions that they invoke are no longer used. Also, the TRC_NEED_QS and TRC_NEED_QS_CHECKED CPP macros are no longer used. Neither are the rcu_tasks_trace_lazy_ms and rcu_task_ipi_delay rcupdate module parameters and the TASKS_TRACE_RCU_READ_MB Kconfig option. This commit therefore removes all of them. [ paulmck: Apply Alexei Starovoitov feedback. ] Signed-off-by: Paul E. McKenney <paulmck@kernel.org> Cc: Andrii Nakryiko <andrii@kernel.org> Cc: Alexei Starovoitov <ast@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: bpf@vger.kernel.org Reviewed-by: Joel Fernandes <joelagnelf@nvidia.com> Signed-off-by: Boqun Feng <boqun.feng@gmail.com>master
parent
46e3235999
commit
a73fc3dcc6
|
|
@ -6249,13 +6249,6 @@ Kernel parameters
|
|||
dynamically) adjusted. This parameter is intended
|
||||
for use in testing.
|
||||
|
||||
rcupdate.rcu_task_ipi_delay= [KNL]
|
||||
Set time in jiffies during which RCU tasks will
|
||||
avoid sending IPIs, starting with the beginning
|
||||
of a given grace period. Setting a large
|
||||
number avoids disturbing real-time workloads,
|
||||
but lengthens grace periods.
|
||||
|
||||
rcupdate.rcu_task_lazy_lim= [KNL]
|
||||
Number of callbacks on a given CPU that will
|
||||
cancel laziness on that CPU. Use -1 to disable
|
||||
|
|
@ -6299,14 +6292,6 @@ Kernel parameters
|
|||
of zero will disable batching. Batching is
|
||||
always disabled for synchronize_rcu_tasks().
|
||||
|
||||
rcupdate.rcu_tasks_trace_lazy_ms= [KNL]
|
||||
Set timeout in milliseconds RCU Tasks
|
||||
Trace asynchronous callback batching for
|
||||
call_rcu_tasks_trace(). A negative value
|
||||
will take the default. A value of zero will
|
||||
disable batching. Batching is always disabled
|
||||
for synchronize_rcu_tasks_trace().
|
||||
|
||||
rcupdate.rcu_self_test= [KNL]
|
||||
Run the RCU early boot self tests
|
||||
|
||||
|
|
|
|||
|
|
@ -175,36 +175,7 @@ void rcu_tasks_torture_stats_print(char *tt, char *tf);
|
|||
# define synchronize_rcu_tasks synchronize_rcu
|
||||
# endif
|
||||
|
||||
# ifdef CONFIG_TASKS_TRACE_RCU
|
||||
// Bits for ->trc_reader_special.b.need_qs field.
|
||||
#define TRC_NEED_QS 0x1 // Task needs a quiescent state.
|
||||
#define TRC_NEED_QS_CHECKED 0x2 // Task has been checked for needing quiescent state.
|
||||
|
||||
u8 rcu_trc_cmpxchg_need_qs(struct task_struct *t, u8 old, u8 new);
|
||||
void rcu_tasks_trace_qs_blkd(struct task_struct *t);
|
||||
|
||||
# define rcu_tasks_trace_qs(t) \
|
||||
do { \
|
||||
int ___rttq_nesting = READ_ONCE((t)->trc_reader_nesting); \
|
||||
\
|
||||
if (unlikely(READ_ONCE((t)->trc_reader_special.b.need_qs) == TRC_NEED_QS) && \
|
||||
likely(!___rttq_nesting)) { \
|
||||
rcu_trc_cmpxchg_need_qs((t), TRC_NEED_QS, TRC_NEED_QS_CHECKED); \
|
||||
} else if (___rttq_nesting && ___rttq_nesting != INT_MIN && \
|
||||
!READ_ONCE((t)->trc_reader_special.b.blocked)) { \
|
||||
rcu_tasks_trace_qs_blkd(t); \
|
||||
} \
|
||||
} while (0)
|
||||
void rcu_tasks_trace_torture_stats_print(char *tt, char *tf);
|
||||
# else
|
||||
# define rcu_tasks_trace_qs(t) do { } while (0)
|
||||
# endif
|
||||
|
||||
#define rcu_tasks_qs(t, preempt) \
|
||||
do { \
|
||||
rcu_tasks_classic_qs((t), (preempt)); \
|
||||
rcu_tasks_trace_qs(t); \
|
||||
} while (0)
|
||||
#define rcu_tasks_qs(t, preempt) rcu_tasks_classic_qs((t), (preempt))
|
||||
|
||||
# ifdef CONFIG_TASKS_RUDE_RCU
|
||||
void synchronize_rcu_tasks_rude(void);
|
||||
|
|
|
|||
|
|
@ -136,9 +136,7 @@ static inline void rcu_barrier_tasks_trace(void)
|
|||
}
|
||||
|
||||
// Placeholders to enable stepwise transition.
|
||||
void rcu_tasks_trace_get_gp_data(int *flags, unsigned long *gp_seq);
|
||||
void __init rcu_tasks_trace_suppress_unused(void);
|
||||
struct task_struct *get_rcu_tasks_trace_gp_kthread(void);
|
||||
|
||||
#else
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -946,11 +946,6 @@ struct task_struct {
|
|||
#ifdef CONFIG_TASKS_TRACE_RCU
|
||||
int trc_reader_nesting;
|
||||
struct srcu_ctr __percpu *trc_reader_scp;
|
||||
int trc_ipi_to_cpu;
|
||||
union rcu_special trc_reader_special;
|
||||
struct list_head trc_holdout_list;
|
||||
struct list_head trc_blkd_node;
|
||||
int trc_blkd_cpu;
|
||||
#endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
|
||||
|
||||
struct sched_info sched_info;
|
||||
|
|
|
|||
|
|
@ -195,9 +195,6 @@ struct task_struct init_task __aligned(L1_CACHE_BYTES) = {
|
|||
#endif
|
||||
#ifdef CONFIG_TASKS_TRACE_RCU
|
||||
.trc_reader_nesting = 0,
|
||||
.trc_reader_special.s = 0,
|
||||
.trc_holdout_list = LIST_HEAD_INIT(init_task.trc_holdout_list),
|
||||
.trc_blkd_node = LIST_HEAD_INIT(init_task.trc_blkd_node),
|
||||
#endif
|
||||
#ifdef CONFIG_CPUSETS
|
||||
.mems_allowed_seq = SEQCNT_SPINLOCK_ZERO(init_task.mems_allowed_seq,
|
||||
|
|
|
|||
|
|
@ -1828,9 +1828,6 @@ static inline void rcu_copy_process(struct task_struct *p)
|
|||
#endif /* #ifdef CONFIG_TASKS_RCU */
|
||||
#ifdef CONFIG_TASKS_TRACE_RCU
|
||||
p->trc_reader_nesting = 0;
|
||||
p->trc_reader_special.s = 0;
|
||||
INIT_LIST_HEAD(&p->trc_holdout_list);
|
||||
INIT_LIST_HEAD(&p->trc_blkd_node);
|
||||
#endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -313,24 +313,6 @@ config RCU_NOCB_CPU_CB_BOOST
|
|||
Say Y here if you want to set RT priority for offloading kthreads.
|
||||
Say N here if you are building a !PREEMPT_RT kernel and are unsure.
|
||||
|
||||
config TASKS_TRACE_RCU_READ_MB
|
||||
bool "Tasks Trace RCU readers use memory barriers in user and idle"
|
||||
depends on RCU_EXPERT && TASKS_TRACE_RCU
|
||||
default PREEMPT_RT || NR_CPUS < 8
|
||||
help
|
||||
Use this option to further reduce the number of IPIs sent
|
||||
to CPUs executing in userspace or idle during tasks trace
|
||||
RCU grace periods. Given that a reasonable setting of
|
||||
the rcupdate.rcu_task_ipi_delay kernel boot parameter
|
||||
eliminates such IPIs for many workloads, proper setting
|
||||
of this Kconfig option is important mostly for aggressive
|
||||
real-time installations and for battery-powered devices,
|
||||
hence the default chosen above.
|
||||
|
||||
Say Y here if you hate IPIs.
|
||||
Say N here if you hate read-side memory barriers.
|
||||
Take the default if you are unsure.
|
||||
|
||||
config RCU_LAZY
|
||||
bool "RCU callback lazy invocation functionality"
|
||||
depends on RCU_NOCB_CPU
|
||||
|
|
|
|||
|
|
@ -544,10 +544,6 @@ struct task_struct *get_rcu_tasks_rude_gp_kthread(void);
|
|||
void rcu_tasks_rude_get_gp_data(int *flags, unsigned long *gp_seq);
|
||||
#endif // # ifdef CONFIG_TASKS_RUDE_RCU
|
||||
|
||||
#ifdef CONFIG_TASKS_TRACE_RCU
|
||||
void rcu_tasks_trace_get_gp_data(int *flags, unsigned long *gp_seq);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_TASKS_RCU_GENERIC
|
||||
void tasks_cblist_init_generic(void);
|
||||
#else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
|
||||
|
|
@ -673,11 +669,6 @@ void show_rcu_tasks_rude_gp_kthread(void);
|
|||
#else
|
||||
static inline void show_rcu_tasks_rude_gp_kthread(void) {}
|
||||
#endif
|
||||
#if !defined(CONFIG_TINY_RCU) && defined(CONFIG_TASKS_TRACE_RCU)
|
||||
void show_rcu_tasks_trace_gp_kthread(void);
|
||||
#else
|
||||
static inline void show_rcu_tasks_trace_gp_kthread(void) {}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_TINY_RCU
|
||||
static inline bool rcu_cpu_beenfullyonline(int cpu) { return true; }
|
||||
|
|
|
|||
|
|
@ -400,11 +400,6 @@ static void tasks_trace_scale_read_unlock(int idx)
|
|||
rcu_read_unlock_trace();
|
||||
}
|
||||
|
||||
static void rcu_tasks_trace_scale_stats(void)
|
||||
{
|
||||
rcu_tasks_trace_torture_stats_print(scale_type, SCALE_FLAG);
|
||||
}
|
||||
|
||||
static struct rcu_scale_ops tasks_tracing_ops = {
|
||||
.ptype = RCU_TASKS_FLAVOR,
|
||||
.init = rcu_sync_scale_init,
|
||||
|
|
@ -416,8 +411,6 @@ static struct rcu_scale_ops tasks_tracing_ops = {
|
|||
.gp_barrier = rcu_barrier_tasks_trace,
|
||||
.sync = synchronize_rcu_tasks_trace,
|
||||
.exp_sync = synchronize_rcu_tasks_trace,
|
||||
.rso_gp_kthread = get_rcu_tasks_trace_gp_kthread,
|
||||
.stats = IS_ENABLED(CONFIG_TINY_RCU) ? NULL : rcu_tasks_trace_scale_stats,
|
||||
.name = "tasks-tracing"
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -1180,8 +1180,6 @@ static struct rcu_torture_ops tasks_tracing_ops = {
|
|||
.exp_sync = synchronize_rcu_tasks_trace,
|
||||
.call = call_rcu_tasks_trace,
|
||||
.cb_barrier = rcu_barrier_tasks_trace,
|
||||
.gp_kthread_dbg = show_rcu_tasks_trace_gp_kthread,
|
||||
.get_gp_data = rcu_tasks_trace_get_gp_data,
|
||||
.cbflood_max = 50000,
|
||||
.irq_capable = 1,
|
||||
.slow_gps = 1,
|
||||
|
|
|
|||
|
|
@ -161,11 +161,6 @@ static void tasks_rcu_exit_srcu_stall(struct timer_list *unused);
|
|||
static DEFINE_TIMER(tasks_rcu_exit_srcu_stall_timer, tasks_rcu_exit_srcu_stall);
|
||||
#endif
|
||||
|
||||
/* Avoid IPIing CPUs early in the grace period. */
|
||||
#define RCU_TASK_IPI_DELAY (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) ? HZ / 2 : 0)
|
||||
static int rcu_task_ipi_delay __read_mostly = RCU_TASK_IPI_DELAY;
|
||||
module_param(rcu_task_ipi_delay, int, 0644);
|
||||
|
||||
/* Control stall timeouts. Disable with <= 0, otherwise jiffies till stall. */
|
||||
#define RCU_TASK_BOOT_STALL_TIMEOUT (HZ * 30)
|
||||
#define RCU_TASK_STALL_TIMEOUT (HZ * 60 * 10)
|
||||
|
|
@ -800,8 +795,6 @@ static void rcu_tasks_torture_stats_print_generic(struct rcu_tasks *rtp, char *t
|
|||
|
||||
#endif // #ifndef CONFIG_TINY_RCU
|
||||
|
||||
static void exit_tasks_rcu_finish_trace(struct task_struct *t);
|
||||
|
||||
#if defined(CONFIG_TASKS_RCU)
|
||||
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
|
|
@ -1321,13 +1314,11 @@ void exit_tasks_rcu_finish(void)
|
|||
raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
|
||||
list_del_init(&t->rcu_tasks_exit_list);
|
||||
raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
|
||||
|
||||
exit_tasks_rcu_finish_trace(t);
|
||||
}
|
||||
|
||||
#else /* #ifdef CONFIG_TASKS_RCU */
|
||||
void exit_tasks_rcu_start(void) { }
|
||||
void exit_tasks_rcu_finish(void) { exit_tasks_rcu_finish_trace(current); }
|
||||
void exit_tasks_rcu_finish(void) { }
|
||||
#endif /* #else #ifdef CONFIG_TASKS_RCU */
|
||||
|
||||
#ifdef CONFIG_TASKS_RUDE_RCU
|
||||
|
|
@ -1475,69 +1466,6 @@ void __init rcu_tasks_trace_suppress_unused(void)
|
|||
#endif // #ifndef CONFIG_TINY_RCU
|
||||
}
|
||||
|
||||
/*
|
||||
* Do a cmpxchg() on ->trc_reader_special.b.need_qs, allowing for
|
||||
* the four-byte operand-size restriction of some platforms.
|
||||
*
|
||||
* Returns the old value, which is often ignored.
|
||||
*/
|
||||
u8 rcu_trc_cmpxchg_need_qs(struct task_struct *t, u8 old, u8 new)
|
||||
{
|
||||
return cmpxchg(&t->trc_reader_special.b.need_qs, old, new);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rcu_trc_cmpxchg_need_qs);
|
||||
|
||||
/* Add a newly blocked reader task to its CPU's list. */
|
||||
void rcu_tasks_trace_qs_blkd(struct task_struct *t)
|
||||
{
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rcu_tasks_trace_qs_blkd);
|
||||
|
||||
/* Communicate task state back to the RCU tasks trace stall warning request. */
|
||||
struct trc_stall_chk_rdr {
|
||||
int nesting;
|
||||
int ipi_to_cpu;
|
||||
u8 needqs;
|
||||
};
|
||||
|
||||
/* Report any needed quiescent state for this exiting task. */
|
||||
static void exit_tasks_rcu_finish_trace(struct task_struct *t)
|
||||
{
|
||||
}
|
||||
|
||||
int rcu_tasks_trace_lazy_ms = -1;
|
||||
module_param(rcu_tasks_trace_lazy_ms, int, 0444);
|
||||
|
||||
static int __init rcu_spawn_tasks_trace_kthread(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if !defined(CONFIG_TINY_RCU)
|
||||
void show_rcu_tasks_trace_gp_kthread(void)
|
||||
{
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(show_rcu_tasks_trace_gp_kthread);
|
||||
|
||||
void rcu_tasks_trace_torture_stats_print(char *tt, char *tf)
|
||||
{
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rcu_tasks_trace_torture_stats_print);
|
||||
#endif // !defined(CONFIG_TINY_RCU)
|
||||
|
||||
struct task_struct *get_rcu_tasks_trace_gp_kthread(void)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(get_rcu_tasks_trace_gp_kthread);
|
||||
|
||||
void rcu_tasks_trace_get_gp_data(int *flags, unsigned long *gp_seq)
|
||||
{
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rcu_tasks_trace_get_gp_data);
|
||||
|
||||
#else /* #ifdef CONFIG_TASKS_TRACE_RCU */
|
||||
static void exit_tasks_rcu_finish_trace(struct task_struct *t) { }
|
||||
#endif /* #else #ifdef CONFIG_TASKS_TRACE_RCU */
|
||||
|
||||
#ifndef CONFIG_TINY_RCU
|
||||
|
|
@ -1545,7 +1473,6 @@ void show_rcu_tasks_gp_kthreads(void)
|
|||
{
|
||||
show_rcu_tasks_classic_gp_kthread();
|
||||
show_rcu_tasks_rude_gp_kthread();
|
||||
show_rcu_tasks_trace_gp_kthread();
|
||||
}
|
||||
#endif /* #ifndef CONFIG_TINY_RCU */
|
||||
|
||||
|
|
@ -1684,10 +1611,6 @@ static int __init rcu_init_tasks_generic(void)
|
|||
rcu_spawn_tasks_rude_kthread();
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_TASKS_TRACE_RCU
|
||||
rcu_spawn_tasks_trace_kthread();
|
||||
#endif
|
||||
|
||||
// Run the self-tests.
|
||||
rcu_tasks_initiate_self_tests();
|
||||
|
||||
|
|
|
|||
|
|
@ -10,5 +10,4 @@ CONFIG_PROVE_LOCKING=n
|
|||
#CHECK#CONFIG_PROVE_RCU=n
|
||||
CONFIG_FORCE_TASKS_TRACE_RCU=y
|
||||
#CHECK#CONFIG_TASKS_TRACE_RCU=y
|
||||
CONFIG_TASKS_TRACE_RCU_READ_MB=y
|
||||
CONFIG_RCU_EXPERT=y
|
||||
|
|
|
|||
|
|
@ -9,6 +9,5 @@ CONFIG_PROVE_LOCKING=y
|
|||
#CHECK#CONFIG_PROVE_RCU=y
|
||||
CONFIG_FORCE_TASKS_TRACE_RCU=y
|
||||
#CHECK#CONFIG_TASKS_TRACE_RCU=y
|
||||
CONFIG_TASKS_TRACE_RCU_READ_MB=n
|
||||
CONFIG_RCU_EXPERT=y
|
||||
CONFIG_DEBUG_OBJECTS=y
|
||||
|
|
|
|||
Loading…
Reference in New Issue