sched: Make cond_resched_lock() variants RT aware
The __might_resched() checks in the cond_resched_lock() variants use PREEMPT_LOCK_OFFSET for preempt count offset checking which takes the preemption disable by the spin_lock() which is still held at that point into account. On PREEMPT_RT enabled kernels spin/rw_lock held sections stay preemptible which means PREEMPT_LOCK_OFFSET is 0, but that still triggers the __might_resched() check because that takes RCU read side nesting into account. On RT enabled kernels spin/read/write_lock() issue rcu_read_lock() to resemble the !RT semantics, which means in cond_resched_lock() the might resched check will see preempt_count() == 0 and rcu_preempt_depth() == 1. Introduce PREEMPT_LOCK_SCHED_OFFSET for those might resched checks and map them depending on CONFIG_PREEMPT_RT. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/20210923165358.305969211@linutronix.depull/78/merge
parent
50e081b96e
commit
3e9cc688e5
|
|
@ -122,9 +122,10 @@
|
||||||
* The preempt_count offset after spin_lock()
|
* The preempt_count offset after spin_lock()
|
||||||
*/
|
*/
|
||||||
#if !defined(CONFIG_PREEMPT_RT)
|
#if !defined(CONFIG_PREEMPT_RT)
|
||||||
#define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET
|
#define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET
|
||||||
#else
|
#else
|
||||||
#define PREEMPT_LOCK_OFFSET 0
|
/* Locks on RT do not disable preemption */
|
||||||
|
#define PREEMPT_LOCK_OFFSET 0
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
||||||
|
|
@ -2049,19 +2049,35 @@ extern int __cond_resched_rwlock_write(rwlock_t *lock);
|
||||||
#define MIGHT_RESCHED_RCU_SHIFT 8
|
#define MIGHT_RESCHED_RCU_SHIFT 8
|
||||||
#define MIGHT_RESCHED_PREEMPT_MASK ((1U << MIGHT_RESCHED_RCU_SHIFT) - 1)
|
#define MIGHT_RESCHED_PREEMPT_MASK ((1U << MIGHT_RESCHED_RCU_SHIFT) - 1)
|
||||||
|
|
||||||
#define cond_resched_lock(lock) ({ \
|
#ifndef CONFIG_PREEMPT_RT
|
||||||
__might_resched(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \
|
/*
|
||||||
__cond_resched_lock(lock); \
|
* Non RT kernels have an elevated preempt count due to the held lock,
|
||||||
|
* but are not allowed to be inside a RCU read side critical section
|
||||||
|
*/
|
||||||
|
# define PREEMPT_LOCK_RESCHED_OFFSETS PREEMPT_LOCK_OFFSET
|
||||||
|
#else
|
||||||
|
/*
|
||||||
|
* spin/rw_lock() on RT implies rcu_read_lock(). The might_sleep() check in
|
||||||
|
* cond_resched*lock() has to take that into account because it checks for
|
||||||
|
* preempt_count() and rcu_preempt_depth().
|
||||||
|
*/
|
||||||
|
# define PREEMPT_LOCK_RESCHED_OFFSETS \
|
||||||
|
(PREEMPT_LOCK_OFFSET + (1U << MIGHT_RESCHED_RCU_SHIFT))
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define cond_resched_lock(lock) ({ \
|
||||||
|
__might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS); \
|
||||||
|
__cond_resched_lock(lock); \
|
||||||
})
|
})
|
||||||
|
|
||||||
#define cond_resched_rwlock_read(lock) ({ \
|
#define cond_resched_rwlock_read(lock) ({ \
|
||||||
__might_resched(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \
|
__might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS); \
|
||||||
__cond_resched_rwlock_read(lock); \
|
__cond_resched_rwlock_read(lock); \
|
||||||
})
|
})
|
||||||
|
|
||||||
#define cond_resched_rwlock_write(lock) ({ \
|
#define cond_resched_rwlock_write(lock) ({ \
|
||||||
__might_resched(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \
|
__might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS); \
|
||||||
__cond_resched_rwlock_write(lock); \
|
__cond_resched_rwlock_write(lock); \
|
||||||
})
|
})
|
||||||
|
|
||||||
static inline void cond_resched_rcu(void)
|
static inline void cond_resched_rcu(void)
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue