bpf-fixes

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEE+soXsSLHKoYyzcli6rmadz2vbToFAmkXpZUACgkQ6rmadz2v
 bTrGCw//UCx+KBXbzvv7m0A1QGOUL3oHL/Qd+OJA3RW3B+saVbYYzn9jjl0SRgFP
 X0q/DwbDOjFtOSORV9oFgJkrucn7+BM/yxPaC4sE1SQZJAjDFA/CSaF0r8duuGsM
 Mvat9TTiwwetOMAkNB9WZ1e6AKGovBLguLFGAWZc6vLeQZopcER5+pFwS44a9RrK
 dq0Th8O/oY3VmUDgSKJ2KyY51KxpJU7k2ipifiIbu1M1MWZ7s2vERkMEkzJ/lB8/
 nldMsTZUdknGFzVH/W6Rc9ScFYlH+h/x1gkOHwTibMsqDBm92mWVo6O7hvuUbsEO
 NlPDgMtkhBp7PDSx9SA0UBcriMs1M6ovNBOpj/cI4AL1k8WNubf/FHZtrBwoy8C9
 3HaM+8lkA2uiHVPUvT5dImzWqshweN0GXoXAoa9xPSQPchJ38UdzCHqYRAg/kWFZ
 5jUK2j4e5+yyII44pD7Xti0PrfoP81giliqmTbGFV8+Y89dQnk+WK12vnbv34ER7
 unLwId8HLtq0ZN7FVG4F6s/4qNdEMKqXbAkve0WWFXn4vKZMCju4ol6NYVGisRAg
 zcn7Yk+weSuY3UOzC+/4SxhfTEAD0Kg6fUoG/1JdflgNsm8XhLBja0DZaAlIVO0p
 xz5UaljwcNvjAKGGMYbCGrf3XN2tOmGpVyJkMj17Vcq88y3bJBU=
 =JJui
 -----END PGP SIGNATURE-----

Merge tag 'bpf-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf

Pull bpf fixes from Alexei Starovoitov:

 - Fix interaction between livepatch and BPF fexit programs (Song Liu)
   With Steven and Masami acks.

 - Fix stack ORC unwind from BPF kprobe_multi (Jiri Olsa)
   With Steven and Masami acks.

 - Fix out of bounds access in widen_imprecise_scalars() in the verifier
   (Eduard Zingerman)

 - Fix conflicts between MPTCP and BPF sockmap (Jiayuan Chen)

 - Fix net_sched storage collision with BPF data_meta/data_end (Eric
   Dumazet)

 - Add _impl suffix to BPF kfuncs with implicit args to avoid breaking
   them in bpf-next when KF_IMPLICIT_ARGS is added (Mykyta Yatsenko)

* tag 'bpf-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf:
  selftests/bpf: Test widen_imprecise_scalars() with different stack depth
  bpf: account for current allocated stack depth in widen_imprecise_scalars()
  bpf: Add bpf_prog_run_data_pointers()
  selftests/bpf: Add mptcp test with sockmap
  mptcp: Fix proto fallback detection with BPF
  mptcp: Disallow MPTCP subflows from sockmap
  selftests/bpf: Add stacktrace ips test for raw_tp
  selftests/bpf: Add stacktrace ips test for kprobe_multi/kretprobe_multi
  x86/fgraph,bpf: Fix stack ORC unwind from kprobe_multi return probe
  Revert "perf/x86: Always store regs->ip in perf_callchain_kernel()"
  bpf: add _impl suffix for bpf_stream_vprintk() kfunc
  bpf:add _impl suffix for bpf_task_work_schedule* kfuncs
  selftests/bpf: Add tests for livepatch + bpf trampoline
  ftrace: bpf: Fix IPMODIFY + DIRECT in modify_ftrace_direct()
  ftrace: Fix BPF fexit with livepatch
pull/1354/merge
Linus Torvalds 2025-11-14 15:39:39 -08:00
commit cbba5d1b53
29 changed files with 761 additions and 83 deletions

View File

@ -2789,13 +2789,13 @@ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *re
return;
}
if (perf_hw_regs(regs)) {
if (perf_callchain_store(entry, regs->ip))
return;
if (perf_hw_regs(regs))
unwind_start(&state, current, regs, NULL);
else
} else {
unwind_start(&state, current, NULL, (void *)regs->sp);
}
for (; !unwind_done(&state); unwind_next_frame(&state)) {
addr = unwind_get_return_address(&state);

View File

@ -56,6 +56,11 @@ arch_ftrace_get_regs(struct ftrace_regs *fregs)
return &arch_ftrace_regs(fregs)->regs;
}
#define arch_ftrace_partial_regs(regs) do { \
regs->flags &= ~X86_EFLAGS_FIXED; \
regs->cs = __KERNEL_CS; \
} while (0)
#define arch_ftrace_fill_perf_regs(fregs, _regs) do { \
(_regs)->ip = arch_ftrace_regs(fregs)->regs.ip; \
(_regs)->sp = arch_ftrace_regs(fregs)->regs.sp; \

View File

@ -354,12 +354,17 @@ SYM_CODE_START(return_to_handler)
UNWIND_HINT_UNDEFINED
ANNOTATE_NOENDBR
/* Restore return_to_handler value that got eaten by previous ret instruction. */
subq $8, %rsp
UNWIND_HINT_FUNC
/* Save ftrace_regs for function exit context */
subq $(FRAME_SIZE), %rsp
movq %rax, RAX(%rsp)
movq %rdx, RDX(%rsp)
movq %rbp, RBP(%rsp)
movq %rsp, RSP(%rsp)
movq %rsp, %rdi
call ftrace_return_to_handler
@ -368,7 +373,8 @@ SYM_CODE_START(return_to_handler)
movq RDX(%rsp), %rdx
movq RAX(%rsp), %rax
addq $(FRAME_SIZE), %rsp
addq $(FRAME_SIZE) + 8, %rsp
/*
* Jump back to the old return address. This cannot be JMP_NOSPEC rdi
* since IBT would demand that contain ENDBR, which simply isn't so for

View File

@ -901,6 +901,26 @@ static inline void bpf_compute_data_pointers(struct sk_buff *skb)
cb->data_end = skb->data + skb_headlen(skb);
}
static inline int bpf_prog_run_data_pointers(
const struct bpf_prog *prog,
struct sk_buff *skb)
{
struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb;
void *save_data_meta, *save_data_end;
int res;
save_data_meta = cb->data_meta;
save_data_end = cb->data_end;
bpf_compute_data_pointers(skb);
res = bpf_prog_run(prog, skb);
cb->data_meta = save_data_meta;
cb->data_end = save_data_end;
return res;
}
/* Similar to bpf_compute_data_pointers(), except that save orginal
* data in cb->data and cb->meta_data for restore.
*/

View File

@ -193,6 +193,10 @@ static __always_inline struct pt_regs *ftrace_get_regs(struct ftrace_regs *fregs
#if !defined(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS) || \
defined(CONFIG_HAVE_FTRACE_REGS_HAVING_PT_REGS)
#ifndef arch_ftrace_partial_regs
#define arch_ftrace_partial_regs(regs) do {} while (0)
#endif
static __always_inline struct pt_regs *
ftrace_partial_regs(struct ftrace_regs *fregs, struct pt_regs *regs)
{
@ -202,7 +206,11 @@ ftrace_partial_regs(struct ftrace_regs *fregs, struct pt_regs *regs)
* Since arch_ftrace_get_regs() will check some members and may return
* NULL, we can not use it.
*/
return &arch_ftrace_regs(fregs)->regs;
regs = &arch_ftrace_regs(fregs)->regs;
/* Allow arch specific updates to regs. */
arch_ftrace_partial_regs(regs);
return regs;
}
#endif /* !CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS || CONFIG_HAVE_FTRACE_REGS_HAVING_PT_REGS */

View File

@ -4169,7 +4169,8 @@ release_prog:
}
/**
* bpf_task_work_schedule_signal - Schedule BPF callback using task_work_add with TWA_SIGNAL mode
* bpf_task_work_schedule_signal_impl - Schedule BPF callback using task_work_add with TWA_SIGNAL
* mode
* @task: Task struct for which callback should be scheduled
* @tw: Pointer to struct bpf_task_work in BPF map value for internal bookkeeping
* @map__map: bpf_map that embeds struct bpf_task_work in the values
@ -4178,15 +4179,17 @@ release_prog:
*
* Return: 0 if task work has been scheduled successfully, negative error code otherwise
*/
__bpf_kfunc int bpf_task_work_schedule_signal(struct task_struct *task, struct bpf_task_work *tw,
void *map__map, bpf_task_work_callback_t callback,
__bpf_kfunc int bpf_task_work_schedule_signal_impl(struct task_struct *task,
struct bpf_task_work *tw, void *map__map,
bpf_task_work_callback_t callback,
void *aux__prog)
{
return bpf_task_work_schedule(task, tw, map__map, callback, aux__prog, TWA_SIGNAL);
}
/**
* bpf_task_work_schedule_resume - Schedule BPF callback using task_work_add with TWA_RESUME mode
* bpf_task_work_schedule_resume_impl - Schedule BPF callback using task_work_add with TWA_RESUME
* mode
* @task: Task struct for which callback should be scheduled
* @tw: Pointer to struct bpf_task_work in BPF map value for internal bookkeeping
* @map__map: bpf_map that embeds struct bpf_task_work in the values
@ -4195,8 +4198,9 @@ __bpf_kfunc int bpf_task_work_schedule_signal(struct task_struct *task, struct b
*
* Return: 0 if task work has been scheduled successfully, negative error code otherwise
*/
__bpf_kfunc int bpf_task_work_schedule_resume(struct task_struct *task, struct bpf_task_work *tw,
void *map__map, bpf_task_work_callback_t callback,
__bpf_kfunc int bpf_task_work_schedule_resume_impl(struct task_struct *task,
struct bpf_task_work *tw, void *map__map,
bpf_task_work_callback_t callback,
void *aux__prog)
{
return bpf_task_work_schedule(task, tw, map__map, callback, aux__prog, TWA_RESUME);
@ -4376,9 +4380,9 @@ BTF_ID_FLAGS(func, bpf_strnstr);
#if defined(CONFIG_BPF_LSM) && defined(CONFIG_CGROUPS)
BTF_ID_FLAGS(func, bpf_cgroup_read_xattr, KF_RCU)
#endif
BTF_ID_FLAGS(func, bpf_stream_vprintk, KF_TRUSTED_ARGS)
BTF_ID_FLAGS(func, bpf_task_work_schedule_signal, KF_TRUSTED_ARGS)
BTF_ID_FLAGS(func, bpf_task_work_schedule_resume, KF_TRUSTED_ARGS)
BTF_ID_FLAGS(func, bpf_stream_vprintk_impl, KF_TRUSTED_ARGS)
BTF_ID_FLAGS(func, bpf_task_work_schedule_signal_impl, KF_TRUSTED_ARGS)
BTF_ID_FLAGS(func, bpf_task_work_schedule_resume_impl, KF_TRUSTED_ARGS)
BTF_KFUNCS_END(common_btf_ids)
static const struct btf_kfunc_id_set common_kfunc_set = {

View File

@ -355,7 +355,8 @@ __bpf_kfunc_start_defs();
* Avoid using enum bpf_stream_id so that kfunc users don't have to pull in the
* enum in headers.
*/
__bpf_kfunc int bpf_stream_vprintk(int stream_id, const char *fmt__str, const void *args, u32 len__sz, void *aux__prog)
__bpf_kfunc int bpf_stream_vprintk_impl(int stream_id, const char *fmt__str, const void *args,
u32 len__sz, void *aux__prog)
{
struct bpf_bprintf_data data = {
.get_bin_args = true,

View File

@ -479,11 +479,6 @@ again:
* BPF_TRAMP_F_SHARE_IPMODIFY is set, we can generate the
* trampoline again, and retry register.
*/
/* reset fops->func and fops->trampoline for re-register */
tr->fops->func = NULL;
tr->fops->trampoline = 0;
/* free im memory and reallocate later */
bpf_tramp_image_free(im);
goto again;
}

View File

@ -8866,7 +8866,7 @@ static int widen_imprecise_scalars(struct bpf_verifier_env *env,
struct bpf_verifier_state *cur)
{
struct bpf_func_state *fold, *fcur;
int i, fr;
int i, fr, num_slots;
reset_idmap_scratch(env);
for (fr = old->curframe; fr >= 0; fr--) {
@ -8879,7 +8879,9 @@ static int widen_imprecise_scalars(struct bpf_verifier_env *env,
&fcur->regs[i],
&env->idmap_scratch);
for (i = 0; i < fold->allocated_stack / BPF_REG_SIZE; i++) {
num_slots = min(fold->allocated_stack / BPF_REG_SIZE,
fcur->allocated_stack / BPF_REG_SIZE);
for (i = 0; i < num_slots; i++) {
if (!is_spilled_reg(&fold->stack[i]) ||
!is_spilled_reg(&fcur->stack[i]))
continue;
@ -12259,8 +12261,8 @@ enum special_kfunc_type {
KF_bpf_res_spin_lock_irqsave,
KF_bpf_res_spin_unlock_irqrestore,
KF___bpf_trap,
KF_bpf_task_work_schedule_signal,
KF_bpf_task_work_schedule_resume,
KF_bpf_task_work_schedule_signal_impl,
KF_bpf_task_work_schedule_resume_impl,
};
BTF_ID_LIST(special_kfunc_list)
@ -12331,13 +12333,13 @@ BTF_ID(func, bpf_res_spin_unlock)
BTF_ID(func, bpf_res_spin_lock_irqsave)
BTF_ID(func, bpf_res_spin_unlock_irqrestore)
BTF_ID(func, __bpf_trap)
BTF_ID(func, bpf_task_work_schedule_signal)
BTF_ID(func, bpf_task_work_schedule_resume)
BTF_ID(func, bpf_task_work_schedule_signal_impl)
BTF_ID(func, bpf_task_work_schedule_resume_impl)
static bool is_task_work_add_kfunc(u32 func_id)
{
return func_id == special_kfunc_list[KF_bpf_task_work_schedule_signal] ||
func_id == special_kfunc_list[KF_bpf_task_work_schedule_resume];
return func_id == special_kfunc_list[KF_bpf_task_work_schedule_signal_impl] ||
func_id == special_kfunc_list[KF_bpf_task_work_schedule_resume_impl];
}
static bool is_kfunc_ret_null(struct bpf_kfunc_call_arg_meta *meta)

View File

@ -1971,7 +1971,8 @@ static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops)
*/
static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops,
struct ftrace_hash *old_hash,
struct ftrace_hash *new_hash)
struct ftrace_hash *new_hash,
bool update_target)
{
struct ftrace_page *pg;
struct dyn_ftrace *rec, *end = NULL;
@ -2006,10 +2007,13 @@ static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops,
if (rec->flags & FTRACE_FL_DISABLED)
continue;
/* We need to update only differences of filter_hash */
/*
* Unless we are updating the target of a direct function,
* we only need to update differences of filter_hash
*/
in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
if (in_old == in_new)
if (!update_target && (in_old == in_new))
continue;
if (in_new) {
@ -2020,7 +2024,16 @@ static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops,
if (is_ipmodify)
goto rollback;
FTRACE_WARN_ON(rec->flags & FTRACE_FL_DIRECT);
/*
* If this is called by __modify_ftrace_direct()
* then it is only changing where the direct
* pointer is jumping to, and the record already
* points to a direct trampoline. If it isn't,
* then it is a bug to update ipmodify on a direct
* caller.
*/
FTRACE_WARN_ON(!update_target &&
(rec->flags & FTRACE_FL_DIRECT));
/*
* Another ops with IPMODIFY is already
@ -2076,7 +2089,7 @@ static int ftrace_hash_ipmodify_enable(struct ftrace_ops *ops)
if (ftrace_hash_empty(hash))
hash = NULL;
return __ftrace_hash_update_ipmodify(ops, EMPTY_HASH, hash);
return __ftrace_hash_update_ipmodify(ops, EMPTY_HASH, hash, false);
}
/* Disabling always succeeds */
@ -2087,7 +2100,7 @@ static void ftrace_hash_ipmodify_disable(struct ftrace_ops *ops)
if (ftrace_hash_empty(hash))
hash = NULL;
__ftrace_hash_update_ipmodify(ops, hash, EMPTY_HASH);
__ftrace_hash_update_ipmodify(ops, hash, EMPTY_HASH, false);
}
static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
@ -2101,7 +2114,7 @@ static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
if (ftrace_hash_empty(new_hash))
new_hash = NULL;
return __ftrace_hash_update_ipmodify(ops, old_hash, new_hash);
return __ftrace_hash_update_ipmodify(ops, old_hash, new_hash, false);
}
static void print_ip_ins(const char *fmt, const unsigned char *p)
@ -5953,6 +5966,17 @@ static void register_ftrace_direct_cb(struct rcu_head *rhp)
free_ftrace_hash(fhp);
}
static void reset_direct(struct ftrace_ops *ops, unsigned long addr)
{
struct ftrace_hash *hash = ops->func_hash->filter_hash;
remove_direct_functions_hash(hash, addr);
/* cleanup for possible another register call */
ops->func = NULL;
ops->trampoline = 0;
}
/**
* register_ftrace_direct - Call a custom trampoline directly
* for multiple functions registered in @ops
@ -6048,6 +6072,8 @@ int register_ftrace_direct(struct ftrace_ops *ops, unsigned long addr)
ops->direct_call = addr;
err = register_ftrace_function_nolock(ops);
if (err)
reset_direct(ops, addr);
out_unlock:
mutex_unlock(&direct_mutex);
@ -6080,7 +6106,6 @@ EXPORT_SYMBOL_GPL(register_ftrace_direct);
int unregister_ftrace_direct(struct ftrace_ops *ops, unsigned long addr,
bool free_filters)
{
struct ftrace_hash *hash = ops->func_hash->filter_hash;
int err;
if (check_direct_multi(ops))
@ -6090,13 +6115,9 @@ int unregister_ftrace_direct(struct ftrace_ops *ops, unsigned long addr,
mutex_lock(&direct_mutex);
err = unregister_ftrace_function(ops);
remove_direct_functions_hash(hash, addr);
reset_direct(ops, addr);
mutex_unlock(&direct_mutex);
/* cleanup for possible another register call */
ops->func = NULL;
ops->trampoline = 0;
if (free_filters)
ftrace_free_filter(ops);
return err;
@ -6106,7 +6127,7 @@ EXPORT_SYMBOL_GPL(unregister_ftrace_direct);
static int
__modify_ftrace_direct(struct ftrace_ops *ops, unsigned long addr)
{
struct ftrace_hash *hash;
struct ftrace_hash *hash = ops->func_hash->filter_hash;
struct ftrace_func_entry *entry, *iter;
static struct ftrace_ops tmp_ops = {
.func = ftrace_stub,
@ -6126,13 +6147,21 @@ __modify_ftrace_direct(struct ftrace_ops *ops, unsigned long addr)
if (err)
return err;
/*
* Call __ftrace_hash_update_ipmodify() here, so that we can call
* ops->ops_func for the ops. This is needed because the above
* register_ftrace_function_nolock() worked on tmp_ops.
*/
err = __ftrace_hash_update_ipmodify(ops, hash, hash, true);
if (err)
goto out;
/*
* Now the ftrace_ops_list_func() is called to do the direct callers.
* We can safely change the direct functions attached to each entry.
*/
mutex_lock(&ftrace_lock);
hash = ops->func_hash->filter_hash;
size = 1 << hash->size_bits;
for (i = 0; i < size; i++) {
hlist_for_each_entry(iter, &hash->buckets[i], hlist) {
@ -6147,6 +6176,7 @@ __modify_ftrace_direct(struct ftrace_ops *ops, unsigned long addr)
mutex_unlock(&ftrace_lock);
out:
/* Removing the tmp_ops will add the updated direct callers to the functions */
unregister_ftrace_function(&tmp_ops);

View File

@ -61,11 +61,13 @@ static u64 mptcp_wnd_end(const struct mptcp_sock *msk)
static const struct proto_ops *mptcp_fallback_tcp_ops(const struct sock *sk)
{
unsigned short family = READ_ONCE(sk->sk_family);
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
if (sk->sk_prot == &tcpv6_prot)
if (family == AF_INET6)
return &inet6_stream_ops;
#endif
WARN_ON_ONCE(sk->sk_prot != &tcp_prot);
WARN_ON_ONCE(family != AF_INET);
return &inet_stream_ops;
}

View File

@ -2144,6 +2144,10 @@ void __init mptcp_subflow_init(void)
tcp_prot_override = tcp_prot;
tcp_prot_override.release_cb = tcp_release_cb_override;
tcp_prot_override.diag_destroy = tcp_abort_override;
#ifdef CONFIG_BPF_SYSCALL
/* Disable sockmap processing for subflows */
tcp_prot_override.psock_update_sk_prot = NULL;
#endif
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
/* In struct mptcp_subflow_request_sock, we assume the TCP request sock
@ -2180,6 +2184,10 @@ void __init mptcp_subflow_init(void)
tcpv6_prot_override = tcpv6_prot;
tcpv6_prot_override.release_cb = tcp_release_cb_override;
tcpv6_prot_override.diag_destroy = tcp_abort_override;
#ifdef CONFIG_BPF_SYSCALL
/* Disable sockmap processing for subflows */
tcpv6_prot_override.psock_update_sk_prot = NULL;
#endif
#endif
mptcp_diag_subflow_init(&subflow_ulp_ops);

View File

@ -47,12 +47,10 @@ TC_INDIRECT_SCOPE int tcf_bpf_act(struct sk_buff *skb,
filter = rcu_dereference(prog->filter);
if (at_ingress) {
__skb_push(skb, skb->mac_len);
bpf_compute_data_pointers(skb);
filter_res = bpf_prog_run(filter, skb);
filter_res = bpf_prog_run_data_pointers(filter, skb);
__skb_pull(skb, skb->mac_len);
} else {
bpf_compute_data_pointers(skb);
filter_res = bpf_prog_run(filter, skb);
filter_res = bpf_prog_run_data_pointers(filter, skb);
}
if (unlikely(!skb->tstamp && skb->tstamp_type))
skb->tstamp_type = SKB_CLOCK_REALTIME;

View File

@ -97,12 +97,10 @@ TC_INDIRECT_SCOPE int cls_bpf_classify(struct sk_buff *skb,
} else if (at_ingress) {
/* It is safe to push/pull even if skb_shared() */
__skb_push(skb, skb->mac_len);
bpf_compute_data_pointers(skb);
filter_res = bpf_prog_run(prog->filter, skb);
filter_res = bpf_prog_run_data_pointers(prog->filter, skb);
__skb_pull(skb, skb->mac_len);
} else {
bpf_compute_data_pointers(skb);
filter_res = bpf_prog_run(prog->filter, skb);
filter_res = bpf_prog_run_data_pointers(prog->filter, skb);
}
if (unlikely(!skb->tstamp && skb->tstamp_type))
skb->tstamp_type = SKB_CLOCK_REALTIME;

View File

@ -182,7 +182,7 @@ bpftool prog tracelog
bpftool prog tracelog { stdout | stderr } *PROG*
Dump the BPF stream of the program. BPF programs can write to these streams
at runtime with the **bpf_stream_vprintk**\ () kfunc. The kernel may write
at runtime with the **bpf_stream_vprintk_impl**\ () kfunc. The kernel may write
error messages to the standard error stream. This facility should be used
only for debugging purposes.

View File

@ -315,7 +315,7 @@ enum libbpf_tristate {
___param, sizeof(___param)); \
})
extern int bpf_stream_vprintk(int stream_id, const char *fmt__str, const void *args,
extern int bpf_stream_vprintk_impl(int stream_id, const char *fmt__str, const void *args,
__u32 len__sz, void *aux__prog) __weak __ksym;
#define bpf_stream_printk(stream_id, fmt, args...) \
@ -328,7 +328,7 @@ extern int bpf_stream_vprintk(int stream_id, const char *fmt__str, const void *a
___bpf_fill(___param, args); \
_Pragma("GCC diagnostic pop") \
\
bpf_stream_vprintk(stream_id, ___fmt, ___param, sizeof(___param), NULL);\
bpf_stream_vprintk_impl(stream_id, ___fmt, ___param, sizeof(___param), NULL); \
})
/* Use __bpf_printk when bpf_printk call has 3 or fewer fmt args

View File

@ -50,6 +50,7 @@ CONFIG_IPV6_SIT=y
CONFIG_IPV6_TUNNEL=y
CONFIG_KEYS=y
CONFIG_LIRC=y
CONFIG_LIVEPATCH=y
CONFIG_LWTUNNEL=y
CONFIG_MODULE_SIG=y
CONFIG_MODULE_SRCVERSION_ALL=y
@ -111,6 +112,8 @@ CONFIG_IP6_NF_FILTER=y
CONFIG_NF_NAT=y
CONFIG_PACKET=y
CONFIG_RC_CORE=y
CONFIG_SAMPLES=y
CONFIG_SAMPLE_LIVEPATCH=m
CONFIG_SECURITY=y
CONFIG_SECURITYFS=y
CONFIG_SYN_COOKIES=y

View File

@ -0,0 +1,107 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
#include <test_progs.h>
#include "testing_helpers.h"
#include "livepatch_trampoline.skel.h"
static int load_livepatch(void)
{
char path[4096];
/* CI will set KBUILD_OUTPUT */
snprintf(path, sizeof(path), "%s/samples/livepatch/livepatch-sample.ko",
getenv("KBUILD_OUTPUT") ? : "../../../..");
return load_module(path, env_verbosity > VERBOSE_NONE);
}
static void unload_livepatch(void)
{
/* Disable the livepatch before unloading the module */
system("echo 0 > /sys/kernel/livepatch/livepatch_sample/enabled");
unload_module("livepatch_sample", env_verbosity > VERBOSE_NONE);
}
static void read_proc_cmdline(void)
{
char buf[4096];
int fd, ret;
fd = open("/proc/cmdline", O_RDONLY);
if (!ASSERT_OK_FD(fd, "open /proc/cmdline"))
return;
ret = read(fd, buf, sizeof(buf));
if (!ASSERT_GT(ret, 0, "read /proc/cmdline"))
goto out;
ASSERT_OK(strncmp(buf, "this has been live patched", 26), "strncmp");
out:
close(fd);
}
static void __test_livepatch_trampoline(bool fexit_first)
{
struct livepatch_trampoline *skel = NULL;
int err;
skel = livepatch_trampoline__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
goto out;
skel->bss->my_pid = getpid();
if (!fexit_first) {
/* fentry program is loaded first by default */
err = livepatch_trampoline__attach(skel);
if (!ASSERT_OK(err, "skel_attach"))
goto out;
} else {
/* Manually load fexit program first. */
skel->links.fexit_cmdline = bpf_program__attach(skel->progs.fexit_cmdline);
if (!ASSERT_OK_PTR(skel->links.fexit_cmdline, "attach_fexit"))
goto out;
skel->links.fentry_cmdline = bpf_program__attach(skel->progs.fentry_cmdline);
if (!ASSERT_OK_PTR(skel->links.fentry_cmdline, "attach_fentry"))
goto out;
}
read_proc_cmdline();
ASSERT_EQ(skel->bss->fentry_hit, 1, "fentry_hit");
ASSERT_EQ(skel->bss->fexit_hit, 1, "fexit_hit");
out:
livepatch_trampoline__destroy(skel);
}
void test_livepatch_trampoline(void)
{
int retry_cnt = 0;
retry:
if (load_livepatch()) {
if (retry_cnt) {
ASSERT_OK(1, "load_livepatch");
goto out;
}
/*
* Something else (previous run of the same test?) loaded
* the KLP module. Unload the KLP module and retry.
*/
unload_livepatch();
retry_cnt++;
goto retry;
}
if (test__start_subtest("fentry_first"))
__test_livepatch_trampoline(false);
if (test__start_subtest("fexit_first"))
__test_livepatch_trampoline(true);
out:
unload_livepatch();
}

View File

@ -6,11 +6,13 @@
#include <netinet/in.h>
#include <test_progs.h>
#include <unistd.h>
#include <errno.h>
#include "cgroup_helpers.h"
#include "network_helpers.h"
#include "mptcp_sock.skel.h"
#include "mptcpify.skel.h"
#include "mptcp_subflow.skel.h"
#include "mptcp_sockmap.skel.h"
#define NS_TEST "mptcp_ns"
#define ADDR_1 "10.0.1.1"
@ -436,6 +438,142 @@ close_cgroup:
close(cgroup_fd);
}
/* Test sockmap on MPTCP server handling non-mp-capable clients. */
static void test_sockmap_with_mptcp_fallback(struct mptcp_sockmap *skel)
{
int listen_fd = -1, client_fd1 = -1, client_fd2 = -1;
int server_fd1 = -1, server_fd2 = -1, sent, recvd;
char snd[9] = "123456789";
char rcv[10];
/* start server with MPTCP enabled */
listen_fd = start_mptcp_server(AF_INET, NULL, 0, 0);
if (!ASSERT_OK_FD(listen_fd, "sockmap-fb:start_mptcp_server"))
return;
skel->bss->trace_port = ntohs(get_socket_local_port(listen_fd));
skel->bss->sk_index = 0;
/* create client without MPTCP enabled */
client_fd1 = connect_to_fd_opts(listen_fd, NULL);
if (!ASSERT_OK_FD(client_fd1, "sockmap-fb:connect_to_fd"))
goto end;
server_fd1 = accept(listen_fd, NULL, 0);
skel->bss->sk_index = 1;
client_fd2 = connect_to_fd_opts(listen_fd, NULL);
if (!ASSERT_OK_FD(client_fd2, "sockmap-fb:connect_to_fd"))
goto end;
server_fd2 = accept(listen_fd, NULL, 0);
/* test normal redirect behavior: data sent by client_fd1 can be
* received by client_fd2
*/
skel->bss->redirect_idx = 1;
sent = send(client_fd1, snd, sizeof(snd), 0);
if (!ASSERT_EQ(sent, sizeof(snd), "sockmap-fb:send(client_fd1)"))
goto end;
/* try to recv more bytes to avoid truncation check */
recvd = recv(client_fd2, rcv, sizeof(rcv), 0);
if (!ASSERT_EQ(recvd, sizeof(snd), "sockmap-fb:recv(client_fd2)"))
goto end;
end:
if (client_fd1 >= 0)
close(client_fd1);
if (client_fd2 >= 0)
close(client_fd2);
if (server_fd1 >= 0)
close(server_fd1);
if (server_fd2 >= 0)
close(server_fd2);
close(listen_fd);
}
/* Test sockmap rejection of MPTCP sockets - both server and client sides. */
static void test_sockmap_reject_mptcp(struct mptcp_sockmap *skel)
{
int listen_fd = -1, server_fd = -1, client_fd1 = -1;
int err, zero = 0;
/* start server with MPTCP enabled */
listen_fd = start_mptcp_server(AF_INET, NULL, 0, 0);
if (!ASSERT_OK_FD(listen_fd, "start_mptcp_server"))
return;
skel->bss->trace_port = ntohs(get_socket_local_port(listen_fd));
skel->bss->sk_index = 0;
/* create client with MPTCP enabled */
client_fd1 = connect_to_fd(listen_fd, 0);
if (!ASSERT_OK_FD(client_fd1, "connect_to_fd client_fd1"))
goto end;
/* bpf_sock_map_update() called from sockops should reject MPTCP sk */
if (!ASSERT_EQ(skel->bss->helper_ret, -EOPNOTSUPP, "should reject"))
goto end;
server_fd = accept(listen_fd, NULL, 0);
err = bpf_map_update_elem(bpf_map__fd(skel->maps.sock_map),
&zero, &server_fd, BPF_NOEXIST);
if (!ASSERT_EQ(err, -EOPNOTSUPP, "server should be disallowed"))
goto end;
/* MPTCP client should also be disallowed */
err = bpf_map_update_elem(bpf_map__fd(skel->maps.sock_map),
&zero, &client_fd1, BPF_NOEXIST);
if (!ASSERT_EQ(err, -EOPNOTSUPP, "client should be disallowed"))
goto end;
end:
if (client_fd1 >= 0)
close(client_fd1);
if (server_fd >= 0)
close(server_fd);
close(listen_fd);
}
static void test_mptcp_sockmap(void)
{
struct mptcp_sockmap *skel;
struct netns_obj *netns;
int cgroup_fd, err;
cgroup_fd = test__join_cgroup("/mptcp_sockmap");
if (!ASSERT_OK_FD(cgroup_fd, "join_cgroup: mptcp_sockmap"))
return;
skel = mptcp_sockmap__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_open_load: mptcp_sockmap"))
goto close_cgroup;
skel->links.mptcp_sockmap_inject =
bpf_program__attach_cgroup(skel->progs.mptcp_sockmap_inject, cgroup_fd);
if (!ASSERT_OK_PTR(skel->links.mptcp_sockmap_inject, "attach sockmap"))
goto skel_destroy;
err = bpf_prog_attach(bpf_program__fd(skel->progs.mptcp_sockmap_redirect),
bpf_map__fd(skel->maps.sock_map),
BPF_SK_SKB_STREAM_VERDICT, 0);
if (!ASSERT_OK(err, "bpf_prog_attach stream verdict"))
goto skel_destroy;
netns = netns_new(NS_TEST, true);
if (!ASSERT_OK_PTR(netns, "netns_new: mptcp_sockmap"))
goto skel_destroy;
if (endpoint_init("subflow") < 0)
goto close_netns;
test_sockmap_with_mptcp_fallback(skel);
test_sockmap_reject_mptcp(skel);
close_netns:
netns_free(netns);
skel_destroy:
mptcp_sockmap__destroy(skel);
close_cgroup:
close(cgroup_fd);
}
void test_mptcp(void)
{
if (test__start_subtest("base"))
@ -444,4 +582,6 @@ void test_mptcp(void)
test_mptcpify();
if (test__start_subtest("subflow"))
test_subflow();
if (test__start_subtest("sockmap"))
test_mptcp_sockmap();
}

View File

@ -0,0 +1,150 @@
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include "stacktrace_ips.skel.h"
#ifdef __x86_64__
static int check_stacktrace_ips(int fd, __u32 key, int cnt, ...)
{
__u64 ips[PERF_MAX_STACK_DEPTH];
struct ksyms *ksyms = NULL;
int i, err = 0;
va_list args;
/* sorted by addr */
ksyms = load_kallsyms_local();
if (!ASSERT_OK_PTR(ksyms, "load_kallsyms_local"))
return -1;
/* unlikely, but... */
if (!ASSERT_LT(cnt, PERF_MAX_STACK_DEPTH, "check_max"))
return -1;
err = bpf_map_lookup_elem(fd, &key, ips);
if (err)
goto out;
/*
* Compare all symbols provided via arguments with stacktrace ips,
* and their related symbol addresses.t
*/
va_start(args, cnt);
for (i = 0; i < cnt; i++) {
unsigned long val;
struct ksym *ksym;
val = va_arg(args, unsigned long);
ksym = ksym_search_local(ksyms, ips[i]);
if (!ASSERT_OK_PTR(ksym, "ksym_search_local"))
break;
ASSERT_EQ(ksym->addr, val, "stack_cmp");
}
va_end(args);
out:
free_kallsyms_local(ksyms);
return err;
}
static void test_stacktrace_ips_kprobe_multi(bool retprobe)
{
LIBBPF_OPTS(bpf_kprobe_multi_opts, opts,
.retprobe = retprobe
);
LIBBPF_OPTS(bpf_test_run_opts, topts);
struct stacktrace_ips *skel;
skel = stacktrace_ips__open_and_load();
if (!ASSERT_OK_PTR(skel, "stacktrace_ips__open_and_load"))
return;
if (!skel->kconfig->CONFIG_UNWINDER_ORC) {
test__skip();
goto cleanup;
}
skel->links.kprobe_multi_test = bpf_program__attach_kprobe_multi_opts(
skel->progs.kprobe_multi_test,
"bpf_testmod_stacktrace_test", &opts);
if (!ASSERT_OK_PTR(skel->links.kprobe_multi_test, "bpf_program__attach_kprobe_multi_opts"))
goto cleanup;
trigger_module_test_read(1);
load_kallsyms();
check_stacktrace_ips(bpf_map__fd(skel->maps.stackmap), skel->bss->stack_key, 4,
ksym_get_addr("bpf_testmod_stacktrace_test_3"),
ksym_get_addr("bpf_testmod_stacktrace_test_2"),
ksym_get_addr("bpf_testmod_stacktrace_test_1"),
ksym_get_addr("bpf_testmod_test_read"));
cleanup:
stacktrace_ips__destroy(skel);
}
static void test_stacktrace_ips_raw_tp(void)
{
__u32 info_len = sizeof(struct bpf_prog_info);
LIBBPF_OPTS(bpf_test_run_opts, topts);
struct bpf_prog_info info = {};
struct stacktrace_ips *skel;
__u64 bpf_prog_ksym = 0;
int err;
skel = stacktrace_ips__open_and_load();
if (!ASSERT_OK_PTR(skel, "stacktrace_ips__open_and_load"))
return;
if (!skel->kconfig->CONFIG_UNWINDER_ORC) {
test__skip();
goto cleanup;
}
skel->links.rawtp_test = bpf_program__attach_raw_tracepoint(
skel->progs.rawtp_test,
"bpf_testmod_test_read");
if (!ASSERT_OK_PTR(skel->links.rawtp_test, "bpf_program__attach_raw_tracepoint"))
goto cleanup;
/* get bpf program address */
info.jited_ksyms = ptr_to_u64(&bpf_prog_ksym);
info.nr_jited_ksyms = 1;
err = bpf_prog_get_info_by_fd(bpf_program__fd(skel->progs.rawtp_test),
&info, &info_len);
if (!ASSERT_OK(err, "bpf_prog_get_info_by_fd"))
goto cleanup;
trigger_module_test_read(1);
load_kallsyms();
check_stacktrace_ips(bpf_map__fd(skel->maps.stackmap), skel->bss->stack_key, 2,
bpf_prog_ksym,
ksym_get_addr("bpf_trace_run2"));
cleanup:
stacktrace_ips__destroy(skel);
}
static void __test_stacktrace_ips(void)
{
if (test__start_subtest("kprobe_multi"))
test_stacktrace_ips_kprobe_multi(false);
if (test__start_subtest("kretprobe_multi"))
test_stacktrace_ips_kprobe_multi(true);
if (test__start_subtest("raw_tp"))
test_stacktrace_ips_raw_tp();
}
#else
static void __test_stacktrace_ips(void)
{
test__skip();
}
#endif
void test_stacktrace_ips(void)
{
__test_stacktrace_ips();
}

View File

@ -161,3 +161,56 @@ int simplest_loop(void *ctx)
return 0;
}
__used
static void iterator_with_diff_stack_depth(int x)
{
struct bpf_iter_num iter;
asm volatile (
"if r1 == 42 goto 0f;"
"*(u64 *)(r10 - 128) = 0;"
"0:"
/* create iterator */
"r1 = %[iter];"
"r2 = 0;"
"r3 = 10;"
"call %[bpf_iter_num_new];"
"1:"
/* consume next item */
"r1 = %[iter];"
"call %[bpf_iter_num_next];"
"if r0 == 0 goto 2f;"
"goto 1b;"
"2:"
/* destroy iterator */
"r1 = %[iter];"
"call %[bpf_iter_num_destroy];"
:
: __imm_ptr(iter), ITER_HELPERS
: __clobber_common, "r6"
);
}
SEC("socket")
__success
__naked int widening_stack_size_bug(void *ctx)
{
/*
* Depending on iterator_with_diff_stack_depth() parameter value,
* subprogram stack depth is either 8 or 128 bytes. Arrange values so
* that it is 128 on a first call and 8 on a second. This triggered a
* bug in verifier's widen_imprecise_scalars() logic.
*/
asm volatile (
"r6 = 0;"
"r1 = 0;"
"1:"
"call iterator_with_diff_stack_depth;"
"r1 = 42;"
"r6 += 1;"
"if r6 < 2 goto 1b;"
"r0 = 0;"
"exit;"
::: __clobber_all);
}

View File

@ -0,0 +1,30 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
int fentry_hit;
int fexit_hit;
int my_pid;
SEC("fentry/cmdline_proc_show")
int BPF_PROG(fentry_cmdline)
{
if (my_pid != (bpf_get_current_pid_tgid() >> 32))
return 0;
fentry_hit = 1;
return 0;
}
SEC("fexit/cmdline_proc_show")
int BPF_PROG(fexit_cmdline)
{
if (my_pid != (bpf_get_current_pid_tgid() >> 32))
return 0;
fexit_hit = 1;
return 0;
}

View File

@ -0,0 +1,43 @@
// SPDX-License-Identifier: GPL-2.0
#include "bpf_tracing_net.h"
char _license[] SEC("license") = "GPL";
int sk_index;
int redirect_idx;
int trace_port;
int helper_ret;
struct {
__uint(type, BPF_MAP_TYPE_SOCKMAP);
__uint(key_size, sizeof(__u32));
__uint(value_size, sizeof(__u32));
__uint(max_entries, 100);
} sock_map SEC(".maps");
SEC("sockops")
int mptcp_sockmap_inject(struct bpf_sock_ops *skops)
{
struct bpf_sock *sk;
/* only accept specified connection */
if (skops->local_port != trace_port ||
skops->op != BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB)
return 1;
sk = skops->sk;
if (!sk)
return 1;
/* update sk handler */
helper_ret = bpf_sock_map_update(skops, &sock_map, &sk_index, BPF_NOEXIST);
return 1;
}
SEC("sk_skb/stream_verdict")
int mptcp_sockmap_redirect(struct __sk_buff *skb)
{
/* redirect skb to the sk under sock_map[redirect_idx] */
return bpf_sk_redirect_map(skb, &sock_map, redirect_idx, 0);
}

View File

@ -0,0 +1,49 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2018 Facebook
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#ifndef PERF_MAX_STACK_DEPTH
#define PERF_MAX_STACK_DEPTH 127
#endif
typedef __u64 stack_trace_t[PERF_MAX_STACK_DEPTH];
struct {
__uint(type, BPF_MAP_TYPE_STACK_TRACE);
__uint(max_entries, 16384);
__type(key, __u32);
__type(value, stack_trace_t);
} stackmap SEC(".maps");
extern bool CONFIG_UNWINDER_ORC __kconfig __weak;
/*
* This function is here to have CONFIG_UNWINDER_ORC
* used and added to object BTF.
*/
int unused(void)
{
return CONFIG_UNWINDER_ORC ? 0 : 1;
}
__u32 stack_key;
SEC("kprobe.multi")
int kprobe_multi_test(struct pt_regs *ctx)
{
stack_key = bpf_get_stackid(ctx, &stackmap, 0);
return 0;
}
SEC("raw_tp/bpf_testmod_test_read")
int rawtp_test(void *ctx)
{
/* Skip ebpf program entry in the stack. */
stack_key = bpf_get_stackid(ctx, &stackmap, 0);
return 0;
}
char _license[] SEC("license") = "GPL";

View File

@ -10,7 +10,7 @@ SEC("syscall")
__failure __msg("Possibly NULL pointer passed")
int stream_vprintk_null_arg(void *ctx)
{
bpf_stream_vprintk(BPF_STDOUT, "", NULL, 0, NULL);
bpf_stream_vprintk_impl(BPF_STDOUT, "", NULL, 0, NULL);
return 0;
}
@ -18,7 +18,7 @@ SEC("syscall")
__failure __msg("R3 type=scalar expected=")
int stream_vprintk_scalar_arg(void *ctx)
{
bpf_stream_vprintk(BPF_STDOUT, "", (void *)46, 0, NULL);
bpf_stream_vprintk_impl(BPF_STDOUT, "", (void *)46, 0, NULL);
return 0;
}
@ -26,7 +26,7 @@ SEC("syscall")
__failure __msg("arg#1 doesn't point to a const string")
int stream_vprintk_string_arg(void *ctx)
{
bpf_stream_vprintk(BPF_STDOUT, ctx, NULL, 0, NULL);
bpf_stream_vprintk_impl(BPF_STDOUT, ctx, NULL, 0, NULL);
return 0;
}

View File

@ -66,7 +66,7 @@ int oncpu_hash_map(struct pt_regs *args)
if (!work)
return 0;
bpf_task_work_schedule_resume(task, &work->tw, &hmap, process_work, NULL);
bpf_task_work_schedule_resume_impl(task, &work->tw, &hmap, process_work, NULL);
return 0;
}
@ -80,7 +80,7 @@ int oncpu_array_map(struct pt_regs *args)
work = bpf_map_lookup_elem(&arrmap, &key);
if (!work)
return 0;
bpf_task_work_schedule_signal(task, &work->tw, &arrmap, process_work, NULL);
bpf_task_work_schedule_signal_impl(task, &work->tw, &arrmap, process_work, NULL);
return 0;
}
@ -102,6 +102,6 @@ int oncpu_lru_map(struct pt_regs *args)
work = bpf_map_lookup_elem(&lrumap, &key);
if (!work || work->data[0])
return 0;
bpf_task_work_schedule_resume(task, &work->tw, &lrumap, process_work, NULL);
bpf_task_work_schedule_resume_impl(task, &work->tw, &lrumap, process_work, NULL);
return 0;
}

View File

@ -53,7 +53,7 @@ int mismatch_map(struct pt_regs *args)
work = bpf_map_lookup_elem(&arrmap, &key);
if (!work)
return 0;
bpf_task_work_schedule_resume(task, &work->tw, &hmap, process_work, NULL);
bpf_task_work_schedule_resume_impl(task, &work->tw, &hmap, process_work, NULL);
return 0;
}
@ -65,7 +65,7 @@ int no_map_task_work(struct pt_regs *args)
struct bpf_task_work tw;
task = bpf_get_current_task_btf();
bpf_task_work_schedule_resume(task, &tw, &hmap, process_work, NULL);
bpf_task_work_schedule_resume_impl(task, &tw, &hmap, process_work, NULL);
return 0;
}
@ -76,7 +76,7 @@ int task_work_null(struct pt_regs *args)
struct task_struct *task;
task = bpf_get_current_task_btf();
bpf_task_work_schedule_resume(task, NULL, &hmap, process_work, NULL);
bpf_task_work_schedule_resume_impl(task, NULL, &hmap, process_work, NULL);
return 0;
}
@ -91,6 +91,6 @@ int map_null(struct pt_regs *args)
work = bpf_map_lookup_elem(&arrmap, &key);
if (!work)
return 0;
bpf_task_work_schedule_resume(task, &work->tw, NULL, process_work, NULL);
bpf_task_work_schedule_resume_impl(task, &work->tw, NULL, process_work, NULL);
return 0;
}

View File

@ -51,7 +51,7 @@ int schedule_task_work(void *ctx)
if (!work)
return 0;
}
err = bpf_task_work_schedule_signal(bpf_get_current_task_btf(), &work->tw, &hmap,
err = bpf_task_work_schedule_signal_impl(bpf_get_current_task_btf(), &work->tw, &hmap,
process_work, NULL);
if (err)
__sync_fetch_and_add(&schedule_error, 1);

View File

@ -417,6 +417,30 @@ noinline int bpf_testmod_fentry_test11(u64 a, void *b, short c, int d,
return a + (long)b + c + d + (long)e + f + g + h + i + j + k;
}
noinline void bpf_testmod_stacktrace_test(void)
{
/* used for stacktrace test as attach function */
asm volatile ("");
}
noinline void bpf_testmod_stacktrace_test_3(void)
{
bpf_testmod_stacktrace_test();
asm volatile ("");
}
noinline void bpf_testmod_stacktrace_test_2(void)
{
bpf_testmod_stacktrace_test_3();
asm volatile ("");
}
noinline void bpf_testmod_stacktrace_test_1(void)
{
bpf_testmod_stacktrace_test_2();
asm volatile ("");
}
int bpf_testmod_fentry_ok;
noinline ssize_t
@ -497,6 +521,8 @@ bpf_testmod_test_read(struct file *file, struct kobject *kobj,
21, 22, 23, 24, 25, 26) != 231)
goto out;
bpf_testmod_stacktrace_test_1();
bpf_testmod_fentry_ok = 1;
out:
return -EIO; /* always fail */