Merge branch 'for-next/uprobes' into for-next/core
* for-next/uprobes: arm64: probes: Fix incorrect bl/blr address and register usage uprobes: uprobe_warn should use passed task arm64: Kconfig: Remove GCS restrictions on UPROBES arm64: uprobes: Add GCS support to uretprobes arm64: probes: Add GCS support to bl/blr/ret arm64: uaccess: Add additional userspace GCS accessors arm64: uaccess: Move existing GCS accessors definitions to gcs.h arm64: probes: Break ret out from bl/blrpull/1354/merge
commit
4e4e36dce3
|
|
@ -2213,7 +2213,6 @@ config ARM64_GCS
|
|||
default y
|
||||
select ARCH_HAS_USER_SHADOW_STACK
|
||||
select ARCH_USES_HIGH_VMA_FLAGS
|
||||
depends on !UPROBES
|
||||
help
|
||||
Guarded Control Stack (GCS) provides support for a separate
|
||||
stack with restricted access which contains only return
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ static inline void gcsstr(u64 *addr, u64 val)
|
|||
register u64 *_addr __asm__ ("x0") = addr;
|
||||
register long _val __asm__ ("x1") = val;
|
||||
|
||||
/* GCSSTTR x1, x0 */
|
||||
/* GCSSTTR x1, [x0] */
|
||||
asm volatile(
|
||||
".inst 0xd91f1c01\n"
|
||||
:
|
||||
|
|
@ -81,6 +81,82 @@ static inline int gcs_check_locked(struct task_struct *task,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline int gcssttr(unsigned long __user *addr, unsigned long val)
|
||||
{
|
||||
register unsigned long __user *_addr __asm__ ("x0") = addr;
|
||||
register unsigned long _val __asm__ ("x1") = val;
|
||||
int err = 0;
|
||||
|
||||
/* GCSSTTR x1, [x0] */
|
||||
asm volatile(
|
||||
"1: .inst 0xd91f1c01\n"
|
||||
"2: \n"
|
||||
_ASM_EXTABLE_UACCESS_ERR(1b, 2b, %w0)
|
||||
: "+r" (err)
|
||||
: "rZ" (_val), "r" (_addr)
|
||||
: "memory");
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static inline void put_user_gcs(unsigned long val, unsigned long __user *addr,
|
||||
int *err)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!access_ok((char __user *)addr, sizeof(u64))) {
|
||||
*err = -EFAULT;
|
||||
return;
|
||||
}
|
||||
|
||||
uaccess_ttbr0_enable();
|
||||
ret = gcssttr(addr, val);
|
||||
if (ret != 0)
|
||||
*err = ret;
|
||||
uaccess_ttbr0_disable();
|
||||
}
|
||||
|
||||
static inline void push_user_gcs(unsigned long val, int *err)
|
||||
{
|
||||
u64 gcspr = read_sysreg_s(SYS_GCSPR_EL0);
|
||||
|
||||
gcspr -= sizeof(u64);
|
||||
put_user_gcs(val, (unsigned long __user *)gcspr, err);
|
||||
if (!*err)
|
||||
write_sysreg_s(gcspr, SYS_GCSPR_EL0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Unlike put/push_user_gcs() above, get/pop_user_gsc() doesn't
|
||||
* validate the GCS permission is set on the page being read. This
|
||||
* differs from how the hardware works when it consumes data stored at
|
||||
* GCSPR. Callers should ensure this is acceptable.
|
||||
*/
|
||||
static inline u64 get_user_gcs(unsigned long __user *addr, int *err)
|
||||
{
|
||||
unsigned long ret;
|
||||
u64 load = 0;
|
||||
|
||||
/* Ensure previous GCS operation are visible before we read the page */
|
||||
gcsb_dsync();
|
||||
ret = copy_from_user(&load, addr, sizeof(load));
|
||||
if (ret != 0)
|
||||
*err = ret;
|
||||
return load;
|
||||
}
|
||||
|
||||
static inline u64 pop_user_gcs(int *err)
|
||||
{
|
||||
u64 gcspr = read_sysreg_s(SYS_GCSPR_EL0);
|
||||
u64 read_val;
|
||||
|
||||
read_val = get_user_gcs((__force unsigned long __user *)gcspr, err);
|
||||
if (!*err)
|
||||
write_sysreg_s(gcspr + sizeof(u64), SYS_GCSPR_EL0);
|
||||
|
||||
return read_val;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static inline bool task_gcs_el0_enabled(struct task_struct *task)
|
||||
|
|
@ -91,6 +167,10 @@ static inline bool task_gcs_el0_enabled(struct task_struct *task)
|
|||
static inline void gcs_set_el0_mode(struct task_struct *task) { }
|
||||
static inline void gcs_free(struct task_struct *task) { }
|
||||
static inline void gcs_preserve_current_state(void) { }
|
||||
static inline void put_user_gcs(unsigned long val, unsigned long __user *addr,
|
||||
int *err) { }
|
||||
static inline void push_user_gcs(unsigned long val, int *err) { }
|
||||
|
||||
static inline unsigned long gcs_alloc_thread_stack(struct task_struct *tsk,
|
||||
const struct kernel_clone_args *args)
|
||||
{
|
||||
|
|
@ -101,6 +181,15 @@ static inline int gcs_check_locked(struct task_struct *task,
|
|||
{
|
||||
return 0;
|
||||
}
|
||||
static inline u64 get_user_gcs(unsigned long __user *addr, int *err)
|
||||
{
|
||||
*err = -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
static inline u64 pop_user_gcs(int *err)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
|
|
|
|||
|
|
@ -502,44 +502,4 @@ static inline size_t probe_subpage_writeable(const char __user *uaddr,
|
|||
|
||||
#endif /* CONFIG_ARCH_HAS_SUBPAGE_FAULTS */
|
||||
|
||||
#ifdef CONFIG_ARM64_GCS
|
||||
|
||||
static inline int gcssttr(unsigned long __user *addr, unsigned long val)
|
||||
{
|
||||
register unsigned long __user *_addr __asm__ ("x0") = addr;
|
||||
register unsigned long _val __asm__ ("x1") = val;
|
||||
int err = 0;
|
||||
|
||||
/* GCSSTTR x1, x0 */
|
||||
asm volatile(
|
||||
"1: .inst 0xd91f1c01\n"
|
||||
"2: \n"
|
||||
_ASM_EXTABLE_UACCESS_ERR(1b, 2b, %w0)
|
||||
: "+r" (err)
|
||||
: "rZ" (_val), "r" (_addr)
|
||||
: "memory");
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static inline void put_user_gcs(unsigned long val, unsigned long __user *addr,
|
||||
int *err)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!access_ok((char __user *)addr, sizeof(u64))) {
|
||||
*err = -EFAULT;
|
||||
return;
|
||||
}
|
||||
|
||||
uaccess_ttbr0_enable();
|
||||
ret = gcssttr(addr, val);
|
||||
if (ret != 0)
|
||||
*err = ret;
|
||||
uaccess_ttbr0_disable();
|
||||
}
|
||||
|
||||
|
||||
#endif /* CONFIG_ARM64_GCS */
|
||||
|
||||
#endif /* __ASM_UACCESS_H */
|
||||
|
|
|
|||
|
|
@ -108,9 +108,10 @@ arm_probe_decode_insn(u32 insn, struct arch_probe_insn *api)
|
|||
aarch64_insn_is_bl(insn)) {
|
||||
api->handler = simulate_b_bl;
|
||||
} else if (aarch64_insn_is_br(insn) ||
|
||||
aarch64_insn_is_blr(insn) ||
|
||||
aarch64_insn_is_ret(insn)) {
|
||||
api->handler = simulate_br_blr_ret;
|
||||
aarch64_insn_is_blr(insn)) {
|
||||
api->handler = simulate_br_blr;
|
||||
} else if (aarch64_insn_is_ret(insn)) {
|
||||
api->handler = simulate_ret;
|
||||
} else {
|
||||
/*
|
||||
* Instruction cannot be stepped out-of-line and we don't
|
||||
|
|
|
|||
|
|
@ -13,6 +13,7 @@
|
|||
#include <asm/traps.h>
|
||||
|
||||
#include "simulate-insn.h"
|
||||
#include "asm/gcs.h"
|
||||
|
||||
#define bbl_displacement(insn) \
|
||||
sign_extend32(((insn) & 0x3ffffff) << 2, 27)
|
||||
|
|
@ -49,6 +50,21 @@ static inline u32 get_w_reg(struct pt_regs *regs, int reg)
|
|||
return lower_32_bits(pt_regs_read_reg(regs, reg));
|
||||
}
|
||||
|
||||
static inline int update_lr(struct pt_regs *regs, long addr)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
if (user_mode(regs) && task_gcs_el0_enabled(current)) {
|
||||
push_user_gcs(addr, &err);
|
||||
if (err) {
|
||||
force_sig(SIGSEGV);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
procedure_link_pointer_set(regs, addr);
|
||||
return err;
|
||||
}
|
||||
|
||||
static bool __kprobes check_cbz(u32 opcode, struct pt_regs *regs)
|
||||
{
|
||||
int xn = opcode & 0x1f;
|
||||
|
|
@ -107,9 +123,9 @@ simulate_b_bl(u32 opcode, long addr, struct pt_regs *regs)
|
|||
{
|
||||
int disp = bbl_displacement(opcode);
|
||||
|
||||
/* Link register is x30 */
|
||||
if (opcode & (1 << 31))
|
||||
set_x_reg(regs, 30, addr + 4);
|
||||
if (update_lr(regs, addr + 4))
|
||||
return;
|
||||
|
||||
instruction_pointer_set(regs, addr + disp);
|
||||
}
|
||||
|
|
@ -126,16 +142,34 @@ simulate_b_cond(u32 opcode, long addr, struct pt_regs *regs)
|
|||
}
|
||||
|
||||
void __kprobes
|
||||
simulate_br_blr_ret(u32 opcode, long addr, struct pt_regs *regs)
|
||||
simulate_br_blr(u32 opcode, long addr, struct pt_regs *regs)
|
||||
{
|
||||
int xn = (opcode >> 5) & 0x1f;
|
||||
u64 b_target = get_x_reg(regs, xn);
|
||||
|
||||
/* update pc first in case we're doing a "blr lr" */
|
||||
instruction_pointer_set(regs, get_x_reg(regs, xn));
|
||||
|
||||
/* Link register is x30 */
|
||||
if (((opcode >> 21) & 0x3) == 1)
|
||||
set_x_reg(regs, 30, addr + 4);
|
||||
if (update_lr(regs, addr + 4))
|
||||
return;
|
||||
|
||||
instruction_pointer_set(regs, b_target);
|
||||
}
|
||||
|
||||
void __kprobes
|
||||
simulate_ret(u32 opcode, long addr, struct pt_regs *regs)
|
||||
{
|
||||
u64 ret_addr;
|
||||
int err = 0;
|
||||
int xn = (opcode >> 5) & 0x1f;
|
||||
u64 r_target = get_x_reg(regs, xn);
|
||||
|
||||
if (user_mode(regs) && task_gcs_el0_enabled(current)) {
|
||||
ret_addr = pop_user_gcs(&err);
|
||||
if (err || ret_addr != r_target) {
|
||||
force_sig(SIGSEGV);
|
||||
return;
|
||||
}
|
||||
}
|
||||
instruction_pointer_set(regs, r_target);
|
||||
}
|
||||
|
||||
void __kprobes
|
||||
|
|
|
|||
|
|
@ -11,7 +11,8 @@
|
|||
void simulate_adr_adrp(u32 opcode, long addr, struct pt_regs *regs);
|
||||
void simulate_b_bl(u32 opcode, long addr, struct pt_regs *regs);
|
||||
void simulate_b_cond(u32 opcode, long addr, struct pt_regs *regs);
|
||||
void simulate_br_blr_ret(u32 opcode, long addr, struct pt_regs *regs);
|
||||
void simulate_br_blr(u32 opcode, long addr, struct pt_regs *regs);
|
||||
void simulate_ret(u32 opcode, long addr, struct pt_regs *regs);
|
||||
void simulate_cbz_cbnz(u32 opcode, long addr, struct pt_regs *regs);
|
||||
void simulate_tbz_tbnz(u32 opcode, long addr, struct pt_regs *regs);
|
||||
void simulate_ldr_literal(u32 opcode, long addr, struct pt_regs *regs);
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@
|
|||
#include <linux/ptrace.h>
|
||||
#include <linux/uprobes.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/gcs.h>
|
||||
|
||||
#include "decode-insn.h"
|
||||
|
||||
|
|
@ -159,11 +160,43 @@ arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr,
|
|||
struct pt_regs *regs)
|
||||
{
|
||||
unsigned long orig_ret_vaddr;
|
||||
unsigned long gcs_ret_vaddr;
|
||||
int err = 0;
|
||||
u64 gcspr;
|
||||
|
||||
orig_ret_vaddr = procedure_link_pointer(regs);
|
||||
|
||||
if (task_gcs_el0_enabled(current)) {
|
||||
gcspr = read_sysreg_s(SYS_GCSPR_EL0);
|
||||
gcs_ret_vaddr = get_user_gcs((__force unsigned long __user *)gcspr, &err);
|
||||
if (err) {
|
||||
force_sig(SIGSEGV);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the LR and GCS return addr don't match, then some kind of PAC
|
||||
* signing or control flow occurred since entering the probed function.
|
||||
* Likely because the user is attempting to retprobe on an instruction
|
||||
* that isn't a function boundary or inside a leaf function. Explicitly
|
||||
* abort this retprobe because it will generate a GCS exception.
|
||||
*/
|
||||
if (gcs_ret_vaddr != orig_ret_vaddr) {
|
||||
orig_ret_vaddr = -1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
put_user_gcs(trampoline_vaddr, (__force unsigned long __user *)gcspr, &err);
|
||||
if (err) {
|
||||
force_sig(SIGSEGV);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
/* Replace the return addr with trampoline addr */
|
||||
procedure_link_pointer_set(regs, trampoline_vaddr);
|
||||
|
||||
out:
|
||||
return orig_ret_vaddr;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -121,7 +121,7 @@ struct xol_area {
|
|||
|
||||
static void uprobe_warn(struct task_struct *t, const char *msg)
|
||||
{
|
||||
pr_warn("uprobe: %s:%d failed to %s\n", current->comm, current->pid, msg);
|
||||
pr_warn("uprobe: %s:%d failed to %s\n", t->comm, t->pid, msg);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
|||
Loading…
Reference in New Issue