unwind_user/deferred: Add unwind cache
Cache the results of the unwind to ensure the unwind is only performed once, even when called by multiple tracers. The cache nr_entries gets cleared every time the task exits the kernel. When a stacktrace is requested, nr_entries gets set to the number of entries in the stacktrace. If another stacktrace is requested, if nr_entries is not zero, then it contains the same stacktrace that would be retrieved so it is not processed again and the entries is given to the caller. Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ingo Molnar <mingo@kernel.org> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Arnaldo Carvalho de Melo <acme@kernel.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Andrii Nakryiko <andrii@kernel.org> Cc: Indu Bhagat <indu.bhagat@oracle.com> Cc: "Jose E. Marchesi" <jemarch@gnu.org> Cc: Beau Belgrave <beaub@linux.microsoft.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Jens Axboe <axboe@kernel.dk> Cc: Florian Weimer <fweimer@redhat.com> Cc: Sam James <sam@gentoo.org> Link: https://lore.kernel.org/20250729182405.319691167@kernel.org Reviewed-by: Jens Remus <jremus@linux.ibm.com> Reviewed-By: Indu Bhagat <indu.bhagat@oracle.com> Co-developed-by: Steven Rostedt (Google) <rostedt@goodmis.org> Signed-off-by: Josh Poimboeuf <jpoimboe@kernel.org> Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>pull/1320/head
parent
5e32d0f15c
commit
b9c7352410
|
|
@ -12,6 +12,7 @@
|
|||
#include <linux/resume_user_mode.h>
|
||||
#include <linux/tick.h>
|
||||
#include <linux/kmsan.h>
|
||||
#include <linux/unwind_deferred.h>
|
||||
|
||||
#include <asm/entry-common.h>
|
||||
#include <asm/syscall.h>
|
||||
|
|
@ -362,6 +363,7 @@ static __always_inline void exit_to_user_mode(void)
|
|||
lockdep_hardirqs_on_prepare();
|
||||
instrumentation_end();
|
||||
|
||||
unwind_reset_info();
|
||||
user_enter_irqoff();
|
||||
arch_exit_to_user_mode();
|
||||
lockdep_hardirqs_on(CALLER_ADDR0);
|
||||
|
|
|
|||
|
|
@ -12,6 +12,12 @@ void unwind_task_free(struct task_struct *task);
|
|||
|
||||
int unwind_user_faultable(struct unwind_stacktrace *trace);
|
||||
|
||||
static __always_inline void unwind_reset_info(void)
|
||||
{
|
||||
if (unlikely(current->unwind_info.cache))
|
||||
current->unwind_info.cache->nr_entries = 0;
|
||||
}
|
||||
|
||||
#else /* !CONFIG_UNWIND_USER */
|
||||
|
||||
static inline void unwind_task_init(struct task_struct *task) {}
|
||||
|
|
@ -19,6 +25,8 @@ static inline void unwind_task_free(struct task_struct *task) {}
|
|||
|
||||
static inline int unwind_user_faultable(struct unwind_stacktrace *trace) { return -ENOSYS; }
|
||||
|
||||
static inline void unwind_reset_info(void) {}
|
||||
|
||||
#endif /* !CONFIG_UNWIND_USER */
|
||||
|
||||
#endif /* _LINUX_UNWIND_USER_DEFERRED_H */
|
||||
|
|
|
|||
|
|
@ -2,8 +2,13 @@
|
|||
#ifndef _LINUX_UNWIND_USER_DEFERRED_TYPES_H
|
||||
#define _LINUX_UNWIND_USER_DEFERRED_TYPES_H
|
||||
|
||||
struct unwind_cache {
|
||||
unsigned int nr_entries;
|
||||
unsigned long entries[];
|
||||
};
|
||||
|
||||
struct unwind_task_info {
|
||||
unsigned long *entries;
|
||||
struct unwind_cache *cache;
|
||||
};
|
||||
|
||||
#endif /* _LINUX_UNWIND_USER_DEFERRED_TYPES_H */
|
||||
|
|
|
|||
|
|
@ -4,10 +4,13 @@
|
|||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/sizes.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/unwind_deferred.h>
|
||||
|
||||
#define UNWIND_MAX_ENTRIES 512
|
||||
/* Make the cache fit in a 4K page */
|
||||
#define UNWIND_MAX_ENTRIES \
|
||||
((SZ_4K - sizeof(struct unwind_cache)) / sizeof(long))
|
||||
|
||||
/**
|
||||
* unwind_user_faultable - Produce a user stacktrace in faultable context
|
||||
|
|
@ -24,6 +27,7 @@
|
|||
int unwind_user_faultable(struct unwind_stacktrace *trace)
|
||||
{
|
||||
struct unwind_task_info *info = ¤t->unwind_info;
|
||||
struct unwind_cache *cache;
|
||||
|
||||
/* Should always be called from faultable context */
|
||||
might_fault();
|
||||
|
|
@ -31,17 +35,30 @@ int unwind_user_faultable(struct unwind_stacktrace *trace)
|
|||
if (current->flags & PF_EXITING)
|
||||
return -EINVAL;
|
||||
|
||||
if (!info->entries) {
|
||||
info->entries = kmalloc_array(UNWIND_MAX_ENTRIES, sizeof(long),
|
||||
GFP_KERNEL);
|
||||
if (!info->entries)
|
||||
if (!info->cache) {
|
||||
info->cache = kzalloc(struct_size(cache, entries, UNWIND_MAX_ENTRIES),
|
||||
GFP_KERNEL);
|
||||
if (!info->cache)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
cache = info->cache;
|
||||
trace->entries = cache->entries;
|
||||
|
||||
if (cache->nr_entries) {
|
||||
/*
|
||||
* The user stack has already been previously unwound in this
|
||||
* entry context. Skip the unwind and use the cache.
|
||||
*/
|
||||
trace->nr = cache->nr_entries;
|
||||
return 0;
|
||||
}
|
||||
|
||||
trace->nr = 0;
|
||||
trace->entries = info->entries;
|
||||
unwind_user(trace, UNWIND_MAX_ENTRIES);
|
||||
|
||||
cache->nr_entries = trace->nr;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
@ -56,5 +73,5 @@ void unwind_task_free(struct task_struct *task)
|
|||
{
|
||||
struct unwind_task_info *info = &task->unwind_info;
|
||||
|
||||
kfree(info->entries);
|
||||
kfree(info->cache);
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in New Issue