ARM development for 6.19-rc1
ARM Development changes for 6.19-rc1: - disable jump label and high PTE for PREEMPT RT kernels - fix input operand modification in load_unaligned_zeropad() - fix hash_name() / fault path induced warnings - fix branch predictor hardening The last three were only merged today after testing was complete. -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEuNNh8scc2k/wOAE+9OeQG+StrGQFAmk5Z3cACgkQ9OeQG+St rGT5/A//Tl81Ae4+Dj5CvC73Qzm3xsaPpTdZbw/TnmiWLGeps5h1qunZsdT5kA0K PGGPEVEyS5F1hgl0gKEUtWyi6qs37tvB72pG/fPX2EBuryq0PLizWa2ejPw+z1J6 fKpX3C+5Fo+z8/jcL1uc0fZtw2ILnQTbxjIyKx5oJrX8yVvU+tKtfegBp6hZbQdj z9QArtc0zMyT71eEsiL0O0/cHx8m1jbtZAb8VgIr6xtTR5pdvq38rovMmua0XgEG v17pSRdfqxZldXcWNMHZlTFcmm1Zrq6twpkPMnks6TZ0u/YCqcIsY7gQLHqUCJH7 VqENeqIXN4mvufz8Hb5tQqME9NDmoJsKrDDMyyOHpe2sW4j+uVs6CAZTxmaGXbw0 Hf1xf9I4sonCh/TDloBB1++jcZWIIbCBpZ0/OuZJHEp9SJgwrk4wlFvMWdVfuzfZ gZ2TEa6r7KzqJSFEcfrNoIDvJXBNlAw7mL+QIEPLEAuJvSceqTqhWC+Kx7VS/8JZ 0lAuOP42IG6uX2K+VZ/I/agYb/Gip8HcrgletQFzid4C8QMkB7z3Dq76gbe/0f0f US1mZFBnzujq/kDC43IzPphVU+5WTyn8YWCz2LzoTZ3RN+4SlIkq5ZnFreOk/aGU sQj+SkNhJX9qFHdijONmqef6f7miIH8iPMmp58WFYvulrdYnMHg= =dtsd -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rmk/linux Pull ARM updates from Russell King: - disable jump label and high PTE for PREEMPT RT kernels - fix input operand modification in load_unaligned_zeropad() - fix hash_name() / fault path induced warnings - fix branch predictor hardening * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rmk/linux: ARM: fix branch predictor hardening ARM: fix hash_name() fault ARM: allow __do_kernel_fault() to report execution of memory faults ARM: group is_permission_fault() with is_translation_fault() ARM: 9464/1: fix input-only operand modification in load_unaligned_zeropad() ARM: 9461/1: Disable HIGHPTE on PREEMPT_RT kernels ARM: 9459/1: Disable jump-label on PREEMPT_RTmaster
commit
29ba26af9a
|
|
@ -82,7 +82,7 @@ config ARM
|
|||
select HAS_IOPORT
|
||||
select HAVE_ARCH_AUDITSYSCALL if AEABI && !OABI_COMPAT
|
||||
select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6
|
||||
select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU
|
||||
select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU && (!PREEMPT_RT || !SMP)
|
||||
select HAVE_ARCH_KFENCE if MMU && !XIP_KERNEL
|
||||
select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 && MMU
|
||||
select HAVE_ARCH_KASAN if MMU && !XIP_KERNEL
|
||||
|
|
@ -1213,7 +1213,7 @@ config HIGHMEM
|
|||
|
||||
config HIGHPTE
|
||||
bool "Allocate 2nd-level pagetables from highmem" if EXPERT
|
||||
depends on HIGHMEM
|
||||
depends on HIGHMEM && !PREEMPT_RT
|
||||
default y
|
||||
help
|
||||
The VM uses one page of physical memory for each page table.
|
||||
|
|
|
|||
|
|
@ -67,7 +67,7 @@ static inline unsigned long find_zero(unsigned long mask)
|
|||
*/
|
||||
static inline unsigned long load_unaligned_zeropad(const void *addr)
|
||||
{
|
||||
unsigned long ret, offset;
|
||||
unsigned long ret, tmp;
|
||||
|
||||
/* Load word from unaligned pointer addr */
|
||||
asm(
|
||||
|
|
@ -75,9 +75,9 @@ static inline unsigned long load_unaligned_zeropad(const void *addr)
|
|||
"2:\n"
|
||||
" .pushsection .text.fixup,\"ax\"\n"
|
||||
" .align 2\n"
|
||||
"3: and %1, %2, #0x3\n"
|
||||
" bic %2, %2, #0x3\n"
|
||||
" ldr %0, [%2]\n"
|
||||
"3: bic %1, %2, #0x3\n"
|
||||
" ldr %0, [%1]\n"
|
||||
" and %1, %2, #0x3\n"
|
||||
" lsl %1, %1, #0x3\n"
|
||||
#ifndef __ARMEB__
|
||||
" lsr %0, %0, %1\n"
|
||||
|
|
@ -90,7 +90,7 @@ static inline unsigned long load_unaligned_zeropad(const void *addr)
|
|||
" .align 3\n"
|
||||
" .long 1b, 3b\n"
|
||||
" .popsection"
|
||||
: "=&r" (ret), "=&r" (offset)
|
||||
: "=&r" (ret), "=&r" (tmp)
|
||||
: "r" (addr), "Qo" (*(unsigned long *)addr));
|
||||
|
||||
return ret;
|
||||
|
|
|
|||
|
|
@ -19,10 +19,11 @@
|
|||
#include <linux/init.h>
|
||||
#include <linux/sched/signal.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/unaligned.h>
|
||||
|
||||
#include <asm/cp15.h>
|
||||
#include <asm/system_info.h>
|
||||
#include <linux/unaligned.h>
|
||||
#include <asm/system_misc.h>
|
||||
#include <asm/opcodes.h>
|
||||
|
||||
#include "fault.h"
|
||||
|
|
@ -809,6 +810,9 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
|
|||
int thumb2_32b = 0;
|
||||
int fault;
|
||||
|
||||
if (addr >= TASK_SIZE && user_mode(regs))
|
||||
harden_branch_predictor();
|
||||
|
||||
if (interrupts_enabled(regs))
|
||||
local_irq_enable();
|
||||
|
||||
|
|
|
|||
|
|
@ -128,6 +128,19 @@ static inline bool is_translation_fault(unsigned int fsr)
|
|||
return false;
|
||||
}
|
||||
|
||||
static inline bool is_permission_fault(unsigned int fsr)
|
||||
{
|
||||
int fs = fsr_fs(fsr);
|
||||
#ifdef CONFIG_ARM_LPAE
|
||||
if ((fs & FS_MMU_NOLL_MASK) == FS_PERM_NOLL)
|
||||
return true;
|
||||
#else
|
||||
if (fs == FS_L1_PERM || fs == FS_L2_PERM)
|
||||
return true;
|
||||
#endif
|
||||
return false;
|
||||
}
|
||||
|
||||
static void die_kernel_fault(const char *msg, struct mm_struct *mm,
|
||||
unsigned long addr, unsigned int fsr,
|
||||
struct pt_regs *regs)
|
||||
|
|
@ -162,6 +175,8 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
|
|||
*/
|
||||
if (addr < PAGE_SIZE) {
|
||||
msg = "NULL pointer dereference";
|
||||
} else if (is_permission_fault(fsr) && fsr & FSR_LNX_PF) {
|
||||
msg = "execution of memory";
|
||||
} else {
|
||||
if (is_translation_fault(fsr) &&
|
||||
kfence_handle_page_fault(addr, is_write_fault(fsr), regs))
|
||||
|
|
@ -183,9 +198,6 @@ __do_user_fault(unsigned long addr, unsigned int fsr, unsigned int sig,
|
|||
{
|
||||
struct task_struct *tsk = current;
|
||||
|
||||
if (addr > TASK_SIZE)
|
||||
harden_branch_predictor();
|
||||
|
||||
#ifdef CONFIG_DEBUG_USER
|
||||
if (((user_debug & UDBG_SEGV) && (sig == SIGSEGV)) ||
|
||||
((user_debug & UDBG_BUS) && (sig == SIGBUS))) {
|
||||
|
|
@ -225,19 +237,6 @@ void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
|
|||
}
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
static inline bool is_permission_fault(unsigned int fsr)
|
||||
{
|
||||
int fs = fsr_fs(fsr);
|
||||
#ifdef CONFIG_ARM_LPAE
|
||||
if ((fs & FS_MMU_NOLL_MASK) == FS_PERM_NOLL)
|
||||
return true;
|
||||
#else
|
||||
if (fs == FS_L1_PERM || fs == FS_L2_PERM)
|
||||
return true;
|
||||
#endif
|
||||
return false;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_CPU_TTBR0_PAN
|
||||
static inline bool ttbr0_usermode_access_allowed(struct pt_regs *regs)
|
||||
{
|
||||
|
|
@ -259,6 +258,37 @@ static inline bool ttbr0_usermode_access_allowed(struct pt_regs *regs)
|
|||
}
|
||||
#endif
|
||||
|
||||
static int __kprobes
|
||||
do_kernel_address_page_fault(struct mm_struct *mm, unsigned long addr,
|
||||
unsigned int fsr, struct pt_regs *regs)
|
||||
{
|
||||
if (user_mode(regs)) {
|
||||
/*
|
||||
* Fault from user mode for a kernel space address. User mode
|
||||
* should not be faulting in kernel space, which includes the
|
||||
* vector/khelper page. Handle the branch predictor hardening
|
||||
* while interrupts are still disabled, then send a SIGSEGV.
|
||||
*/
|
||||
harden_branch_predictor();
|
||||
__do_user_fault(addr, fsr, SIGSEGV, SEGV_MAPERR, regs);
|
||||
} else {
|
||||
/*
|
||||
* Fault from kernel mode. Enable interrupts if they were
|
||||
* enabled in the parent context. Section (upper page table)
|
||||
* translation faults are handled via do_translation_fault(),
|
||||
* so we will only get here for a non-present kernel space
|
||||
* PTE or PTE permission fault. This may happen in exceptional
|
||||
* circumstances and need the fixup tables to be walked.
|
||||
*/
|
||||
if (interrupts_enabled(regs))
|
||||
local_irq_enable();
|
||||
|
||||
__do_kernel_fault(mm, addr, fsr, regs);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __kprobes
|
||||
do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
|
||||
{
|
||||
|
|
@ -272,6 +302,12 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
|
|||
if (kprobe_page_fault(regs, fsr))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Handle kernel addresses faults separately, which avoids touching
|
||||
* the mmap lock from contexts that are not able to sleep.
|
||||
*/
|
||||
if (addr >= TASK_SIZE)
|
||||
return do_kernel_address_page_fault(mm, addr, fsr, regs);
|
||||
|
||||
/* Enable interrupts if they were enabled in the parent context. */
|
||||
if (interrupts_enabled(regs))
|
||||
|
|
@ -448,16 +484,20 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
|
|||
* We enter here because the first level page table doesn't contain
|
||||
* a valid entry for the address.
|
||||
*
|
||||
* If the address is in kernel space (>= TASK_SIZE), then we are
|
||||
* probably faulting in the vmalloc() area.
|
||||
* If this is a user address (addr < TASK_SIZE), we handle this as a
|
||||
* normal page fault. This leaves the remainder of the function to handle
|
||||
* kernel address translation faults.
|
||||
*
|
||||
* If the init_task's first level page tables contains the relevant
|
||||
* entry, we copy the it to this task. If not, we send the process
|
||||
* a signal, fixup the exception, or oops the kernel.
|
||||
* Since user mode is not permitted to access kernel addresses, pass these
|
||||
* directly to do_kernel_address_page_fault() to handle.
|
||||
*
|
||||
* NOTE! We MUST NOT take any locks for this case. We may be in an
|
||||
* interrupt or a critical region, and should only copy the information
|
||||
* from the master page table, nothing more.
|
||||
* Otherwise, we're probably faulting in the vmalloc() area, so try to fix
|
||||
* that up. Note that we must not take any locks or enable interrupts in
|
||||
* this case.
|
||||
*
|
||||
* If vmalloc() fixup fails, that means the non-leaf page tables did not
|
||||
* contain an entry for this address, so handle this via
|
||||
* do_kernel_address_page_fault().
|
||||
*/
|
||||
#ifdef CONFIG_MMU
|
||||
static int __kprobes
|
||||
|
|
@ -523,7 +563,8 @@ do_translation_fault(unsigned long addr, unsigned int fsr,
|
|||
return 0;
|
||||
|
||||
bad_area:
|
||||
do_bad_area(addr, fsr, regs);
|
||||
do_kernel_address_page_fault(current->mm, addr, fsr, regs);
|
||||
|
||||
return 0;
|
||||
}
|
||||
#else /* CONFIG_MMU */
|
||||
|
|
@ -543,7 +584,16 @@ do_translation_fault(unsigned long addr, unsigned int fsr,
|
|||
static int
|
||||
do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
|
||||
{
|
||||
/*
|
||||
* If this is a kernel address, but from user mode, then userspace
|
||||
* is trying bad stuff. Invoke the branch predictor handling.
|
||||
* Interrupts are disabled here.
|
||||
*/
|
||||
if (addr >= TASK_SIZE && user_mode(regs))
|
||||
harden_branch_predictor();
|
||||
|
||||
do_bad_area(addr, fsr, regs);
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_ARM_LPAE */
|
||||
|
|
|
|||
Loading…
Reference in New Issue