entry: Split up exit_to_user_mode_prepare()
exit_to_user_mode_prepare() is used for both interrupts and syscalls, but there is extra rseq work, which is only required for in the interrupt exit case. Split up the function and provide wrappers for syscalls and interrupts, which allows to separate the rseq exit work in the next step. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Ingo Molnar <mingo@kernel.org> Reviewed-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Link: https://patch.msgid.link/20251027084307.782234789@linutronix.depull/1354/merge
parent
3db6b38dfe
commit
70fe25a3bc
|
|
@ -100,7 +100,7 @@ static __always_inline void arm64_enter_from_user_mode(struct pt_regs *regs)
|
|||
static __always_inline void arm64_exit_to_user_mode(struct pt_regs *regs)
|
||||
{
|
||||
local_irq_disable();
|
||||
exit_to_user_mode_prepare(regs);
|
||||
exit_to_user_mode_prepare_legacy(regs);
|
||||
local_daif_mask();
|
||||
mte_check_tfsr_exit();
|
||||
exit_to_user_mode();
|
||||
|
|
|
|||
|
|
@ -156,7 +156,7 @@ static __always_inline void syscall_exit_to_user_mode_work(struct pt_regs *regs)
|
|||
if (unlikely(work & SYSCALL_WORK_EXIT))
|
||||
syscall_exit_work(regs, work);
|
||||
local_irq_disable_exit_to_user();
|
||||
exit_to_user_mode_prepare(regs);
|
||||
syscall_exit_to_user_mode_prepare(regs);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
|||
|
|
@ -201,7 +201,7 @@ void arch_do_signal_or_restart(struct pt_regs *regs);
|
|||
unsigned long exit_to_user_mode_loop(struct pt_regs *regs, unsigned long ti_work);
|
||||
|
||||
/**
|
||||
* exit_to_user_mode_prepare - call exit_to_user_mode_loop() if required
|
||||
* __exit_to_user_mode_prepare - call exit_to_user_mode_loop() if required
|
||||
* @regs: Pointer to pt_regs on entry stack
|
||||
*
|
||||
* 1) check that interrupts are disabled
|
||||
|
|
@ -209,8 +209,10 @@ unsigned long exit_to_user_mode_loop(struct pt_regs *regs, unsigned long ti_work
|
|||
* 3) call exit_to_user_mode_loop() if any flags from
|
||||
* EXIT_TO_USER_MODE_WORK are set
|
||||
* 4) check that interrupts are still disabled
|
||||
*
|
||||
* Don't invoke directly, use the syscall/irqentry_ prefixed variants below
|
||||
*/
|
||||
static __always_inline void exit_to_user_mode_prepare(struct pt_regs *regs)
|
||||
static __always_inline void __exit_to_user_mode_prepare(struct pt_regs *regs)
|
||||
{
|
||||
unsigned long ti_work;
|
||||
|
||||
|
|
@ -224,15 +226,52 @@ static __always_inline void exit_to_user_mode_prepare(struct pt_regs *regs)
|
|||
ti_work = exit_to_user_mode_loop(regs, ti_work);
|
||||
|
||||
arch_exit_to_user_mode_prepare(regs, ti_work);
|
||||
}
|
||||
|
||||
rseq_exit_to_user_mode();
|
||||
|
||||
static __always_inline void __exit_to_user_mode_validate(void)
|
||||
{
|
||||
/* Ensure that kernel state is sane for a return to userspace */
|
||||
kmap_assert_nomap();
|
||||
lockdep_assert_irqs_disabled();
|
||||
lockdep_sys_exit();
|
||||
}
|
||||
|
||||
/* Temporary workaround to keep ARM64 alive */
|
||||
static __always_inline void exit_to_user_mode_prepare_legacy(struct pt_regs *regs)
|
||||
{
|
||||
__exit_to_user_mode_prepare(regs);
|
||||
rseq_exit_to_user_mode();
|
||||
__exit_to_user_mode_validate();
|
||||
}
|
||||
|
||||
/**
|
||||
* syscall_exit_to_user_mode_prepare - call exit_to_user_mode_loop() if required
|
||||
* @regs: Pointer to pt_regs on entry stack
|
||||
*
|
||||
* Wrapper around __exit_to_user_mode_prepare() to separate the exit work for
|
||||
* syscalls and interrupts.
|
||||
*/
|
||||
static __always_inline void syscall_exit_to_user_mode_prepare(struct pt_regs *regs)
|
||||
{
|
||||
__exit_to_user_mode_prepare(regs);
|
||||
rseq_exit_to_user_mode();
|
||||
__exit_to_user_mode_validate();
|
||||
}
|
||||
|
||||
/**
|
||||
* irqentry_exit_to_user_mode_prepare - call exit_to_user_mode_loop() if required
|
||||
* @regs: Pointer to pt_regs on entry stack
|
||||
*
|
||||
* Wrapper around __exit_to_user_mode_prepare() to separate the exit work for
|
||||
* syscalls and interrupts.
|
||||
*/
|
||||
static __always_inline void irqentry_exit_to_user_mode_prepare(struct pt_regs *regs)
|
||||
{
|
||||
__exit_to_user_mode_prepare(regs);
|
||||
rseq_exit_to_user_mode();
|
||||
__exit_to_user_mode_validate();
|
||||
}
|
||||
|
||||
/**
|
||||
* exit_to_user_mode - Fixup state when exiting to user mode
|
||||
*
|
||||
|
|
@ -297,7 +336,7 @@ static __always_inline void irqentry_enter_from_user_mode(struct pt_regs *regs)
|
|||
static __always_inline void irqentry_exit_to_user_mode(struct pt_regs *regs)
|
||||
{
|
||||
instrumentation_begin();
|
||||
exit_to_user_mode_prepare(regs);
|
||||
irqentry_exit_to_user_mode_prepare(regs);
|
||||
instrumentation_end();
|
||||
exit_to_user_mode();
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in New Issue