rqspinlock: Add a test-and-set fallback

Include a test-and-set fallback when queued spinlock support is not
available. Introduce a rqspinlock type to act as a fallback when
qspinlock support is absent.

Include ifdef guards to ensure the slow path in this file is only
compiled when CONFIG_QUEUED_SPINLOCKS=y. Subsequent patches will add
further logic to ensure fallback to the test-and-set implementation
when queued spinlock support is unavailable on an architecture.

Unlike other waiting loops in rqspinlock code, the one for test-and-set
has no theoretical upper bound under contention, therefore we need a
longer timeout than usual. Bump it up to a second in this case.

Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
Link: https://lore.kernel.org/r/20250316040541.108729-14-memxor@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
pull/1190/head
Kumar Kartikeya Dwivedi 2025-03-15 21:05:29 -07:00 committed by Alexei Starovoitov
parent 31158ad02d
commit c9102a68c0
2 changed files with 61 additions and 2 deletions

View File

@ -12,11 +12,28 @@
#include <linux/types.h>
#include <vdso/time64.h>
#include <linux/percpu.h>
#ifdef CONFIG_QUEUED_SPINLOCKS
#include <asm/qspinlock.h>
#endif
struct rqspinlock {
union {
atomic_t val;
u32 locked;
};
};
struct qspinlock;
#ifdef CONFIG_QUEUED_SPINLOCKS
typedef struct qspinlock rqspinlock_t;
#else
typedef struct rqspinlock rqspinlock_t;
#endif
extern int resilient_tas_spin_lock(rqspinlock_t *lock);
#ifdef CONFIG_QUEUED_SPINLOCKS
extern int resilient_queued_spin_lock_slowpath(rqspinlock_t *lock, u32 val);
#endif
/*
* Default timeout for waiting loops is 0.25 seconds

View File

@ -21,7 +21,9 @@
#include <linux/mutex.h>
#include <linux/prefetch.h>
#include <asm/byteorder.h>
#ifdef CONFIG_QUEUED_SPINLOCKS
#include <asm/qspinlock.h>
#endif
#include <trace/events/lock.h>
#include <asm/rqspinlock.h>
#include <linux/timekeeping.h>
@ -29,9 +31,12 @@
/*
* Include queued spinlock definitions and statistics code
*/
#ifdef CONFIG_QUEUED_SPINLOCKS
#include "../locking/qspinlock.h"
#include "../locking/lock_events.h"
#include "rqspinlock.h"
#include "../locking/mcs_spinlock.h"
#endif
/*
* The basic principle of a queue-based spinlock can best be understood
@ -70,8 +75,6 @@
*
*/
#include "../locking/mcs_spinlock.h"
struct rqspinlock_timeout {
u64 timeout_end;
u64 duration;
@ -263,6 +266,43 @@ static noinline int check_timeout(rqspinlock_t *lock, u32 mask,
*/
#define RES_RESET_TIMEOUT(ts, _duration) ({ (ts).timeout_end = 0; (ts).duration = _duration; })
/*
* Provide a test-and-set fallback for cases when queued spin lock support is
* absent from the architecture.
*/
int __lockfunc resilient_tas_spin_lock(rqspinlock_t *lock)
{
struct rqspinlock_timeout ts;
int val, ret = 0;
RES_INIT_TIMEOUT(ts);
grab_held_lock_entry(lock);
/*
* Since the waiting loop's time is dependent on the amount of
* contention, a short timeout unlike rqspinlock waiting loops
* isn't enough. Choose a second as the timeout value.
*/
RES_RESET_TIMEOUT(ts, NSEC_PER_SEC);
retry:
val = atomic_read(&lock->val);
if (val || !atomic_try_cmpxchg(&lock->val, &val, 1)) {
if (RES_CHECK_TIMEOUT(ts, ret, ~0u))
goto out;
cpu_relax();
goto retry;
}
return 0;
out:
release_held_lock_entry();
return ret;
}
EXPORT_SYMBOL_GPL(resilient_tas_spin_lock);
#ifdef CONFIG_QUEUED_SPINLOCKS
/*
* Per-CPU queue node structures; we can never have more than 4 nested
* contexts: task, softirq, hardirq, nmi.
@ -616,3 +656,5 @@ err_release_entry:
return ret;
}
EXPORT_SYMBOL_GPL(resilient_queued_spin_lock_slowpath);
#endif /* CONFIG_QUEUED_SPINLOCKS */