LoongArch changes for v6.19

1, Add basic LoongArch32 support;
 2, Select HAVE_ARCH_BITREVERSE in Kconfig;
 3, Fix build and boot for CONFIG_RANDSTRUCT;
 4, Correct the calculation logic of thread_count;
 5, Some bug fixes and other small changes.
 
 Note: Build infrastructures of LoongArch32 are not enabled yet, because
 we need to adjust irqchip drivers and wait for GNU toolchain be upstream
 first.
 -----BEGIN PGP SIGNATURE-----
 
 iQJKBAABCAA0FiEEzOlt8mkP+tbeiYy5AoYrw/LiJnoFAmk2pCUWHGNoZW5odWFj
 YWlAa2VybmVsLm9yZwAKCRAChivD8uImevedD/9glTZwBqWOnBIaw1ldxfmoEQS2
 PCFxxZPG+cHO3Kgg5LrMgz/2UI2bOnrcx5a7Ma0v4TDFyH9Cp/p4NGosndsq5b1v
 3xoPXytKjiKJMyWNAZy7ytP8FmYs0v6rIu3ZvW6N4GPyNm3VC96LdxXtAXnio134
 TruTALLZ6U/EDUz6VmyRgWAR8HXgS08v3Vuh4giZABRHQAWM7u8tUxGZNvjkLqKb
 TsQz8Ko42VEkbwD5MCMIQrviJBOReycANU9jlmJ1JNsJVKjqcjtSpCpO5cnJg3iL
 93M3wiTy/iWrIdQwjGeXgqco8iQItozVIf0FwVum82OLyoxx7G9qZAx5aQg4zjkA
 JvFB0Quecs8e3HaGJCPGytv2r56cMTh73KzarZBXhCPDLun2pssLtqVwYxw/7KOB
 MoVOekFQs6+8Un9Rffrj6PGeZQwflpsp7Z4khEpf4qULuI6BpRCFLojFhwk6e4V/
 DzoYmdpg1TnwNlp74aMv7gWDfnBBfKXDAswbtyfU/aY0+jrf0f5pNfT100Dd0PAw
 VNzNjkH/JGml/0iqf7T3p6h/XalR7/obsg4O4L39I6A1CL3gkZf35jtMtzf7RsIp
 cVYtumlFvrU7hzKpKcq0OXFji7/8HbAOgrIno5MQGjFfKPJPk+wb5XpgWKdhVtC1
 Y94az2s8PmYA4/v9QA==
 =G9hi
 -----END PGP SIGNATURE-----

Merge tag 'loongarch-6.19' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson

Pull LoongArch updates from Huacai Chen:

 - Add basic LoongArch32 support

   Note: Build infrastructures of LoongArch32 are not enabled yet,
   because we need to adjust irqchip drivers and wait for GNU toolchain
   be upstream first.

 - Select HAVE_ARCH_BITREVERSE in Kconfig

 - Fix build and boot for CONFIG_RANDSTRUCT

 - Correct the calculation logic of thread_count

 - Some bug fixes and other small changes

* tag 'loongarch-6.19' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson: (22 commits)
  LoongArch: Adjust default config files for 32BIT/64BIT
  LoongArch: Adjust VDSO/VSYSCALL for 32BIT/64BIT
  LoongArch: Adjust misc routines for 32BIT/64BIT
  LoongArch: Adjust user accessors for 32BIT/64BIT
  LoongArch: Adjust system call for 32BIT/64BIT
  LoongArch: Adjust module loader for 32BIT/64BIT
  LoongArch: Adjust time routines for 32BIT/64BIT
  LoongArch: Adjust process management for 32BIT/64BIT
  LoongArch: Adjust memory management for 32BIT/64BIT
  LoongArch: Adjust boot & setup for 32BIT/64BIT
  LoongArch: Adjust common macro definitions for 32BIT/64BIT
  LoongArch: Add adaptive CSR accessors for 32BIT/64BIT
  LoongArch: Add atomic operations for 32BIT/64BIT
  LoongArch: Add new PCI ID for pci_fixup_vgadev()
  LoongArch: Add and use some macros for AVEC
  LoongArch: Correct the calculation logic of thread_count
  LoongArch: Use unsigned long for _end and _text
  LoongArch: Use __pmd()/__pte() for swap entry conversions
  LoongArch: Fix arch_dup_task_struct() for CONFIG_RANDSTRUCT
  LoongArch: Fix build errors for CONFIG_RANDSTRUCT
  ...
master
Linus Torvalds 2025-12-13 05:44:03 +12:00
commit 9551a26f17
77 changed files with 3002 additions and 772 deletions

View File

@ -115,6 +115,7 @@ config LOONGARCH
select GPIOLIB
select HAS_IOPORT
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_BITREVERSE
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_JUMP_LABEL_RELATIVE
select HAVE_ARCH_KASAN
@ -567,6 +568,10 @@ config ARCH_STRICT_ALIGN
to run kernel only on systems with h/w unaligned access support in
order to optimise for performance.
config CPU_HAS_AMO
bool
default 64BIT
config CPU_HAS_FPU
bool
default y

View File

@ -5,7 +5,12 @@
boot := arch/loongarch/boot
KBUILD_DEFCONFIG := loongson3_defconfig
ifeq ($(shell uname -m),loongarch32)
KBUILD_DEFCONFIG := loongson32_defconfig
else
KBUILD_DEFCONFIG := loongson64_defconfig
endif
KBUILD_DTBS := dtbs
image-name-y := vmlinux

File diff suppressed because it is too large Load Diff

View File

@ -435,7 +435,6 @@ CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_FW_LOADER_COMPRESS=y
CONFIG_FW_LOADER_COMPRESS_ZSTD=y
CONFIG_SYSFB_SIMPLEFB=y
CONFIG_EFI_ZBOOT=y
CONFIG_EFI_BOOTLOADER_CONTROL=m
CONFIG_EFI_CAPSULE_LOADER=m
@ -530,6 +529,7 @@ CONFIG_PATA_ATIIXP=y
CONFIG_PATA_PCMCIA=m
CONFIG_MD=y
CONFIG_BLK_DEV_MD=m
CONFIG_MD_LLBITMAP=y
CONFIG_MD_RAID0=m
CONFIG_MD_RAID1=m
CONFIG_MD_RAID10=m
@ -738,6 +738,7 @@ CONFIG_SERIAL_8250_EXTENDED=y
CONFIG_SERIAL_8250_MANY_PORTS=y
CONFIG_SERIAL_8250_SHARE_IRQ=y
CONFIG_SERIAL_8250_RSA=y
CONFIG_SERIAL_8250_LOONGSON=y
CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_SERIAL_NONSTANDARD=y
CONFIG_PRINTER=m
@ -801,6 +802,8 @@ CONFIG_VIDEO_BT848=m
CONFIG_DVB_BT8XX=m
CONFIG_DRM=y
CONFIG_DRM_LOAD_EDID_FIRMWARE=y
CONFIG_DRM_EFIDRM=y
CONFIG_DRM_SIMPLEDRM=y
CONFIG_DRM_RADEON=m
CONFIG_DRM_RADEON_USERPTR=y
CONFIG_DRM_AMDGPU=m
@ -811,9 +814,7 @@ CONFIG_DRM_AST=y
CONFIG_DRM_QXL=m
CONFIG_DRM_VIRTIO_GPU=m
CONFIG_DRM_LOONGSON=y
CONFIG_DRM_SIMPLEDRM=y
CONFIG_FB=y
CONFIG_FB_EFI=y
CONFIG_FB_RADEON=y
CONFIG_FIRMWARE_EDID=y
CONFIG_LCD_CLASS_DEVICE=y

View File

@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
syscall-y += syscall_table_32.h
syscall-y += syscall_table_64.h
generated-y += orc_hash.h

View File

@ -38,11 +38,20 @@ extern unsigned long vm_map_base;
#endif
#ifndef WRITECOMBINE_BASE
#ifdef CONFIG_32BIT
#define WRITECOMBINE_BASE CSR_DMW0_BASE
#else
#define WRITECOMBINE_BASE CSR_DMW2_BASE
#endif
#endif
#ifdef CONFIG_32BIT
#define DMW_PABITS 29
#define TO_PHYS_MASK ((_UL(1) << _UL(DMW_PABITS)) - 1)
#else
#define DMW_PABITS 48
#define TO_PHYS_MASK ((1ULL << DMW_PABITS) - 1)
#define TO_PHYS_MASK ((_ULL(1) << _ULL(DMW_PABITS)) - 1)
#endif
/*
* Memory above this physical address will be considered highmem.
@ -112,7 +121,11 @@ extern unsigned long vm_map_base;
/*
* Returns the physical address of a KPRANGEx / XKPRANGE address
*/
#ifdef CONFIG_32BIT
#define PHYSADDR(a) ((_ACAST32_(a)) & TO_PHYS_MASK)
#else
#define PHYSADDR(a) ((_ACAST64_(a)) & TO_PHYS_MASK)
#endif
/*
* On LoongArch, I/O ports mappring is following:

View File

@ -72,11 +72,11 @@
#define INT_SUB sub.w
#define INT_L ld.w
#define INT_S st.w
#define INT_SLL slli.w
#define INT_SLLI slli.w
#define INT_SLLV sll.w
#define INT_SRL srli.w
#define INT_SRLI srli.w
#define INT_SRLV srl.w
#define INT_SRA srai.w
#define INT_SRAI srai.w
#define INT_SRAV sra.w
#endif
@ -86,11 +86,11 @@
#define INT_SUB sub.d
#define INT_L ld.d
#define INT_S st.d
#define INT_SLL slli.d
#define INT_SLLI slli.d
#define INT_SLLV sll.d
#define INT_SRL srli.d
#define INT_SRLI srli.d
#define INT_SRLV srl.d
#define INT_SRA srai.d
#define INT_SRAI srai.d
#define INT_SRAV sra.d
#endif
@ -100,15 +100,23 @@
#if (__SIZEOF_LONG__ == 4)
#define LONG_ADD add.w
#define LONG_ADDI addi.w
#define LONG_ALSL alsl.w
#define LONG_BSTRINS bstrins.w
#define LONG_BSTRPICK bstrpick.w
#define LONG_SUB sub.w
#define LONG_L ld.w
#define LONG_LI li.w
#define LONG_LPTR ld.w
#define LONG_S st.w
#define LONG_SLL slli.w
#define LONG_SPTR st.w
#define LONG_SLLI slli.w
#define LONG_SLLV sll.w
#define LONG_SRL srli.w
#define LONG_SRLI srli.w
#define LONG_SRLV srl.w
#define LONG_SRA srai.w
#define LONG_SRAI srai.w
#define LONG_SRAV sra.w
#define LONG_ROTR rotr.w
#define LONG_ROTRI rotri.w
#ifdef __ASSEMBLER__
#define LONG .word
@ -121,15 +129,23 @@
#if (__SIZEOF_LONG__ == 8)
#define LONG_ADD add.d
#define LONG_ADDI addi.d
#define LONG_ALSL alsl.d
#define LONG_BSTRINS bstrins.d
#define LONG_BSTRPICK bstrpick.d
#define LONG_SUB sub.d
#define LONG_L ld.d
#define LONG_LI li.d
#define LONG_LPTR ldptr.d
#define LONG_S st.d
#define LONG_SLL slli.d
#define LONG_SPTR stptr.d
#define LONG_SLLI slli.d
#define LONG_SLLV sll.d
#define LONG_SRL srli.d
#define LONG_SRLI srli.d
#define LONG_SRLV srl.d
#define LONG_SRA srai.d
#define LONG_SRAI srai.d
#define LONG_SRAV sra.d
#define LONG_ROTR rotr.d
#define LONG_ROTRI rotri.d
#ifdef __ASSEMBLER__
#define LONG .dword
@ -145,16 +161,23 @@
#if (__SIZEOF_POINTER__ == 4)
#define PTR_ADD add.w
#define PTR_ADDI addi.w
#define PTR_ALSL alsl.w
#define PTR_BSTRINS bstrins.w
#define PTR_BSTRPICK bstrpick.w
#define PTR_SUB sub.w
#define PTR_L ld.w
#define PTR_S st.w
#define PTR_LI li.w
#define PTR_SLL slli.w
#define PTR_LPTR ld.w
#define PTR_S st.w
#define PTR_SPTR st.w
#define PTR_SLLI slli.w
#define PTR_SLLV sll.w
#define PTR_SRL srli.w
#define PTR_SRLI srli.w
#define PTR_SRLV srl.w
#define PTR_SRA srai.w
#define PTR_SRAI srai.w
#define PTR_SRAV sra.w
#define PTR_ROTR rotr.w
#define PTR_ROTRI rotri.w
#define PTR_SCALESHIFT 2
@ -168,16 +191,23 @@
#if (__SIZEOF_POINTER__ == 8)
#define PTR_ADD add.d
#define PTR_ADDI addi.d
#define PTR_ALSL alsl.d
#define PTR_BSTRINS bstrins.d
#define PTR_BSTRPICK bstrpick.d
#define PTR_SUB sub.d
#define PTR_L ld.d
#define PTR_S st.d
#define PTR_LI li.d
#define PTR_SLL slli.d
#define PTR_LPTR ldptr.d
#define PTR_S st.d
#define PTR_SPTR stptr.d
#define PTR_SLLI slli.d
#define PTR_SLLV sll.d
#define PTR_SRL srli.d
#define PTR_SRLI srli.d
#define PTR_SRLV srl.d
#define PTR_SRA srai.d
#define PTR_SRAI srai.d
#define PTR_SRAV sra.d
#define PTR_ROTR rotr.d
#define PTR_ROTRI rotri.d
#define PTR_SCALESHIFT 3
@ -190,10 +220,17 @@
/* Annotate a function as being unsuitable for kprobes. */
#ifdef CONFIG_KPROBES
#ifdef CONFIG_32BIT
#define _ASM_NOKPROBE(name) \
.pushsection "_kprobe_blacklist", "aw"; \
.long name; \
.popsection
#else
#define _ASM_NOKPROBE(name) \
.pushsection "_kprobe_blacklist", "aw"; \
.quad name; \
.popsection
#endif
#else
#define _ASM_NOKPROBE(name)
#endif

View File

@ -5,43 +5,55 @@
#ifndef _ASM_ASMMACRO_H
#define _ASM_ASMMACRO_H
#include <linux/sizes.h>
#include <asm/asm-offsets.h>
#include <asm/regdef.h>
#include <asm/fpregdef.h>
#include <asm/loongarch.h>
#ifdef CONFIG_64BIT
#define TASK_STRUCT_OFFSET 0
#else
#define TASK_STRUCT_OFFSET 2000
#endif
.macro cpu_save_nonscratch thread
stptr.d s0, \thread, THREAD_REG23
stptr.d s1, \thread, THREAD_REG24
stptr.d s2, \thread, THREAD_REG25
stptr.d s3, \thread, THREAD_REG26
stptr.d s4, \thread, THREAD_REG27
stptr.d s5, \thread, THREAD_REG28
stptr.d s6, \thread, THREAD_REG29
stptr.d s7, \thread, THREAD_REG30
stptr.d s8, \thread, THREAD_REG31
stptr.d sp, \thread, THREAD_REG03
stptr.d fp, \thread, THREAD_REG22
LONG_SPTR s0, \thread, (THREAD_REG23 - TASK_STRUCT_OFFSET)
LONG_SPTR s1, \thread, (THREAD_REG24 - TASK_STRUCT_OFFSET)
LONG_SPTR s2, \thread, (THREAD_REG25 - TASK_STRUCT_OFFSET)
LONG_SPTR s3, \thread, (THREAD_REG26 - TASK_STRUCT_OFFSET)
LONG_SPTR s4, \thread, (THREAD_REG27 - TASK_STRUCT_OFFSET)
LONG_SPTR s5, \thread, (THREAD_REG28 - TASK_STRUCT_OFFSET)
LONG_SPTR s6, \thread, (THREAD_REG29 - TASK_STRUCT_OFFSET)
LONG_SPTR s7, \thread, (THREAD_REG30 - TASK_STRUCT_OFFSET)
LONG_SPTR s8, \thread, (THREAD_REG31 - TASK_STRUCT_OFFSET)
LONG_SPTR ra, \thread, (THREAD_REG01 - TASK_STRUCT_OFFSET)
LONG_SPTR sp, \thread, (THREAD_REG03 - TASK_STRUCT_OFFSET)
LONG_SPTR fp, \thread, (THREAD_REG22 - TASK_STRUCT_OFFSET)
.endm
.macro cpu_restore_nonscratch thread
ldptr.d s0, \thread, THREAD_REG23
ldptr.d s1, \thread, THREAD_REG24
ldptr.d s2, \thread, THREAD_REG25
ldptr.d s3, \thread, THREAD_REG26
ldptr.d s4, \thread, THREAD_REG27
ldptr.d s5, \thread, THREAD_REG28
ldptr.d s6, \thread, THREAD_REG29
ldptr.d s7, \thread, THREAD_REG30
ldptr.d s8, \thread, THREAD_REG31
ldptr.d ra, \thread, THREAD_REG01
ldptr.d sp, \thread, THREAD_REG03
ldptr.d fp, \thread, THREAD_REG22
LONG_LPTR s0, \thread, (THREAD_REG23 - TASK_STRUCT_OFFSET)
LONG_LPTR s1, \thread, (THREAD_REG24 - TASK_STRUCT_OFFSET)
LONG_LPTR s2, \thread, (THREAD_REG25 - TASK_STRUCT_OFFSET)
LONG_LPTR s3, \thread, (THREAD_REG26 - TASK_STRUCT_OFFSET)
LONG_LPTR s4, \thread, (THREAD_REG27 - TASK_STRUCT_OFFSET)
LONG_LPTR s5, \thread, (THREAD_REG28 - TASK_STRUCT_OFFSET)
LONG_LPTR s6, \thread, (THREAD_REG29 - TASK_STRUCT_OFFSET)
LONG_LPTR s7, \thread, (THREAD_REG30 - TASK_STRUCT_OFFSET)
LONG_LPTR s8, \thread, (THREAD_REG31 - TASK_STRUCT_OFFSET)
LONG_LPTR ra, \thread, (THREAD_REG01 - TASK_STRUCT_OFFSET)
LONG_LPTR sp, \thread, (THREAD_REG03 - TASK_STRUCT_OFFSET)
LONG_LPTR fp, \thread, (THREAD_REG22 - TASK_STRUCT_OFFSET)
.endm
.macro fpu_save_csr thread tmp
movfcsr2gr \tmp, fcsr0
#ifdef CONFIG_32BIT
st.w \tmp, \thread, THREAD_FCSR
#else
stptr.w \tmp, \thread, THREAD_FCSR
#endif
#ifdef CONFIG_CPU_HAS_LBT
/* TM bit is always 0 if LBT not supported */
andi \tmp, \tmp, FPU_CSR_TM
@ -56,7 +68,11 @@
.endm
.macro fpu_restore_csr thread tmp0 tmp1
#ifdef CONFIG_32BIT
ld.w \tmp0, \thread, THREAD_FCSR
#else
ldptr.w \tmp0, \thread, THREAD_FCSR
#endif
movgr2fcsr fcsr0, \tmp0
#ifdef CONFIG_CPU_HAS_LBT
/* TM bit is always 0 if LBT not supported */
@ -88,9 +104,52 @@
#endif
.endm
#ifdef CONFIG_32BIT
.macro fpu_save_cc thread tmp0 tmp1
movcf2gr \tmp0, $fcc0
move \tmp1, \tmp0
move \tmp1, \tmp0
movcf2gr \tmp0, $fcc1
bstrins.w \tmp1, \tmp0, 15, 8
movcf2gr \tmp0, $fcc2
bstrins.w \tmp1, \tmp0, 23, 16
movcf2gr \tmp0, $fcc3
bstrins.w \tmp1, \tmp0, 31, 24
st.w \tmp1, \thread, THREAD_FCC
movcf2gr \tmp0, $fcc4
move \tmp1, \tmp0
movcf2gr \tmp0, $fcc5
bstrins.w \tmp1, \tmp0, 15, 8
movcf2gr \tmp0, $fcc6
bstrins.w \tmp1, \tmp0, 23, 16
movcf2gr \tmp0, $fcc7
bstrins.w \tmp1, \tmp0, 31, 24
st.w \tmp1, \thread, (THREAD_FCC + 4)
.endm
.macro fpu_restore_cc thread tmp0 tmp1
ld.w \tmp0, \thread, THREAD_FCC
bstrpick.w \tmp1, \tmp0, 7, 0
movgr2cf $fcc0, \tmp1
bstrpick.w \tmp1, \tmp0, 15, 8
movgr2cf $fcc1, \tmp1
bstrpick.w \tmp1, \tmp0, 23, 16
movgr2cf $fcc2, \tmp1
bstrpick.w \tmp1, \tmp0, 31, 24
movgr2cf $fcc3, \tmp1
ld.w \tmp0, \thread, (THREAD_FCC + 4)
bstrpick.w \tmp1, \tmp0, 7, 0
movgr2cf $fcc4, \tmp1
bstrpick.w \tmp1, \tmp0, 15, 8
movgr2cf $fcc5, \tmp1
bstrpick.w \tmp1, \tmp0, 23, 16
movgr2cf $fcc6, \tmp1
bstrpick.w \tmp1, \tmp0, 31, 24
movgr2cf $fcc7, \tmp1
.endm
#else
.macro fpu_save_cc thread tmp0 tmp1
movcf2gr \tmp0, $fcc0
move \tmp1, \tmp0
movcf2gr \tmp0, $fcc1
bstrins.d \tmp1, \tmp0, 15, 8
movcf2gr \tmp0, $fcc2
@ -109,7 +168,7 @@
.endm
.macro fpu_restore_cc thread tmp0 tmp1
ldptr.d \tmp0, \thread, THREAD_FCC
ldptr.d \tmp0, \thread, THREAD_FCC
bstrpick.d \tmp1, \tmp0, 7, 0
movgr2cf $fcc0, \tmp1
bstrpick.d \tmp1, \tmp0, 15, 8
@ -127,6 +186,7 @@
bstrpick.d \tmp1, \tmp0, 63, 56
movgr2cf $fcc7, \tmp1
.endm
#endif
.macro fpu_save_double thread tmp
li.w \tmp, THREAD_FPR0
@ -606,12 +666,14 @@
766:
lu12i.w \reg, 0
ori \reg, \reg, 0
#ifdef CONFIG_64BIT
lu32i.d \reg, 0
lu52i.d \reg, \reg, 0
#endif
.pushsection ".la_abs", "aw", %progbits
.p2align 3
.dword 766b
.dword \sym
.p2align PTRLOG
PTR 766b
PTR \sym
.popsection
#endif
.endm

View File

@ -0,0 +1,206 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Atomic operations (AMO).
*
* Copyright (C) 2020-2025 Loongson Technology Corporation Limited
*/
#ifndef _ASM_ATOMIC_AMO_H
#define _ASM_ATOMIC_AMO_H
#include <linux/types.h>
#include <asm/barrier.h>
#include <asm/cmpxchg.h>
#define ATOMIC_OP(op, I, asm_op) \
static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \
__asm__ __volatile__( \
"am"#asm_op".w" " $zero, %1, %0 \n" \
: "+ZB" (v->counter) \
: "r" (I) \
: "memory"); \
}
#define ATOMIC_OP_RETURN(op, I, asm_op, c_op, mb, suffix) \
static inline int arch_atomic_##op##_return##suffix(int i, atomic_t *v) \
{ \
int result; \
\
__asm__ __volatile__( \
"am"#asm_op#mb".w" " %1, %2, %0 \n" \
: "+ZB" (v->counter), "=&r" (result) \
: "r" (I) \
: "memory"); \
\
return result c_op I; \
}
#define ATOMIC_FETCH_OP(op, I, asm_op, mb, suffix) \
static inline int arch_atomic_fetch_##op##suffix(int i, atomic_t *v) \
{ \
int result; \
\
__asm__ __volatile__( \
"am"#asm_op#mb".w" " %1, %2, %0 \n" \
: "+ZB" (v->counter), "=&r" (result) \
: "r" (I) \
: "memory"); \
\
return result; \
}
#define ATOMIC_OPS(op, I, asm_op, c_op) \
ATOMIC_OP(op, I, asm_op) \
ATOMIC_OP_RETURN(op, I, asm_op, c_op, _db, ) \
ATOMIC_OP_RETURN(op, I, asm_op, c_op, , _relaxed) \
ATOMIC_FETCH_OP(op, I, asm_op, _db, ) \
ATOMIC_FETCH_OP(op, I, asm_op, , _relaxed)
ATOMIC_OPS(add, i, add, +)
ATOMIC_OPS(sub, -i, add, +)
#define arch_atomic_add_return arch_atomic_add_return
#define arch_atomic_add_return_acquire arch_atomic_add_return
#define arch_atomic_add_return_release arch_atomic_add_return
#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
#define arch_atomic_sub_return arch_atomic_sub_return
#define arch_atomic_sub_return_acquire arch_atomic_sub_return
#define arch_atomic_sub_return_release arch_atomic_sub_return
#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
#define arch_atomic_fetch_add arch_atomic_fetch_add
#define arch_atomic_fetch_add_acquire arch_atomic_fetch_add
#define arch_atomic_fetch_add_release arch_atomic_fetch_add
#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
#define arch_atomic_fetch_sub arch_atomic_fetch_sub
#define arch_atomic_fetch_sub_acquire arch_atomic_fetch_sub
#define arch_atomic_fetch_sub_release arch_atomic_fetch_sub
#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
#undef ATOMIC_OPS
#define ATOMIC_OPS(op, I, asm_op) \
ATOMIC_OP(op, I, asm_op) \
ATOMIC_FETCH_OP(op, I, asm_op, _db, ) \
ATOMIC_FETCH_OP(op, I, asm_op, , _relaxed)
ATOMIC_OPS(and, i, and)
ATOMIC_OPS(or, i, or)
ATOMIC_OPS(xor, i, xor)
#define arch_atomic_fetch_and arch_atomic_fetch_and
#define arch_atomic_fetch_and_acquire arch_atomic_fetch_and
#define arch_atomic_fetch_and_release arch_atomic_fetch_and
#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
#define arch_atomic_fetch_or arch_atomic_fetch_or
#define arch_atomic_fetch_or_acquire arch_atomic_fetch_or
#define arch_atomic_fetch_or_release arch_atomic_fetch_or
#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
#define arch_atomic_fetch_xor arch_atomic_fetch_xor
#define arch_atomic_fetch_xor_acquire arch_atomic_fetch_xor
#define arch_atomic_fetch_xor_release arch_atomic_fetch_xor
#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
#undef ATOMIC_OPS
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
#ifdef CONFIG_64BIT
#define ATOMIC64_OP(op, I, asm_op) \
static inline void arch_atomic64_##op(long i, atomic64_t *v) \
{ \
__asm__ __volatile__( \
"am"#asm_op".d " " $zero, %1, %0 \n" \
: "+ZB" (v->counter) \
: "r" (I) \
: "memory"); \
}
#define ATOMIC64_OP_RETURN(op, I, asm_op, c_op, mb, suffix) \
static inline long arch_atomic64_##op##_return##suffix(long i, atomic64_t *v) \
{ \
long result; \
__asm__ __volatile__( \
"am"#asm_op#mb".d " " %1, %2, %0 \n" \
: "+ZB" (v->counter), "=&r" (result) \
: "r" (I) \
: "memory"); \
\
return result c_op I; \
}
#define ATOMIC64_FETCH_OP(op, I, asm_op, mb, suffix) \
static inline long arch_atomic64_fetch_##op##suffix(long i, atomic64_t *v) \
{ \
long result; \
\
__asm__ __volatile__( \
"am"#asm_op#mb".d " " %1, %2, %0 \n" \
: "+ZB" (v->counter), "=&r" (result) \
: "r" (I) \
: "memory"); \
\
return result; \
}
#define ATOMIC64_OPS(op, I, asm_op, c_op) \
ATOMIC64_OP(op, I, asm_op) \
ATOMIC64_OP_RETURN(op, I, asm_op, c_op, _db, ) \
ATOMIC64_OP_RETURN(op, I, asm_op, c_op, , _relaxed) \
ATOMIC64_FETCH_OP(op, I, asm_op, _db, ) \
ATOMIC64_FETCH_OP(op, I, asm_op, , _relaxed)
ATOMIC64_OPS(add, i, add, +)
ATOMIC64_OPS(sub, -i, add, +)
#define arch_atomic64_add_return arch_atomic64_add_return
#define arch_atomic64_add_return_acquire arch_atomic64_add_return
#define arch_atomic64_add_return_release arch_atomic64_add_return
#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
#define arch_atomic64_sub_return arch_atomic64_sub_return
#define arch_atomic64_sub_return_acquire arch_atomic64_sub_return
#define arch_atomic64_sub_return_release arch_atomic64_sub_return
#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
#define arch_atomic64_fetch_add arch_atomic64_fetch_add
#define arch_atomic64_fetch_add_acquire arch_atomic64_fetch_add
#define arch_atomic64_fetch_add_release arch_atomic64_fetch_add
#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
#define arch_atomic64_fetch_sub arch_atomic64_fetch_sub
#define arch_atomic64_fetch_sub_acquire arch_atomic64_fetch_sub
#define arch_atomic64_fetch_sub_release arch_atomic64_fetch_sub
#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
#undef ATOMIC64_OPS
#define ATOMIC64_OPS(op, I, asm_op) \
ATOMIC64_OP(op, I, asm_op) \
ATOMIC64_FETCH_OP(op, I, asm_op, _db, ) \
ATOMIC64_FETCH_OP(op, I, asm_op, , _relaxed)
ATOMIC64_OPS(and, i, and)
ATOMIC64_OPS(or, i, or)
ATOMIC64_OPS(xor, i, xor)
#define arch_atomic64_fetch_and arch_atomic64_fetch_and
#define arch_atomic64_fetch_and_acquire arch_atomic64_fetch_and
#define arch_atomic64_fetch_and_release arch_atomic64_fetch_and
#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
#define arch_atomic64_fetch_or arch_atomic64_fetch_or
#define arch_atomic64_fetch_or_acquire arch_atomic64_fetch_or
#define arch_atomic64_fetch_or_release arch_atomic64_fetch_or
#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
#define arch_atomic64_fetch_xor_acquire arch_atomic64_fetch_xor
#define arch_atomic64_fetch_xor_release arch_atomic64_fetch_xor
#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
#undef ATOMIC64_OPS
#undef ATOMIC64_FETCH_OP
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
#endif
#endif /* _ASM_ATOMIC_AMO_H */

View File

@ -0,0 +1,100 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Atomic operations (LLSC).
*
* Copyright (C) 2024-2025 Loongson Technology Corporation Limited
*/
#ifndef _ASM_ATOMIC_LLSC_H
#define _ASM_ATOMIC_LLSC_H
#include <linux/types.h>
#include <asm/barrier.h>
#include <asm/cmpxchg.h>
#define ATOMIC_OP(op, I, asm_op) \
static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \
int temp; \
\
__asm__ __volatile__( \
"1: ll.w %0, %1 #atomic_" #op " \n" \
" " #asm_op " %0, %0, %2 \n" \
" sc.w %0, %1 \n" \
" beq %0, $r0, 1b \n" \
:"=&r" (temp) , "+ZC"(v->counter) \
:"r" (I) \
); \
}
#define ATOMIC_OP_RETURN(op, I, asm_op) \
static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \
{ \
int result, temp; \
\
__asm__ __volatile__( \
"1: ll.w %1, %2 # atomic_" #op "_return \n" \
" " #asm_op " %0, %1, %3 \n" \
" sc.w %0, %2 \n" \
" beq %0, $r0 ,1b \n" \
" " #asm_op " %0, %1, %3 \n" \
: "=&r" (result), "=&r" (temp), "+ZC"(v->counter) \
: "r" (I)); \
\
return result; \
}
#define ATOMIC_FETCH_OP(op, I, asm_op) \
static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
{ \
int result, temp; \
\
__asm__ __volatile__( \
"1: ll.w %1, %2 # atomic_fetch_" #op " \n" \
" " #asm_op " %0, %1, %3 \n" \
" sc.w %0, %2 \n" \
" beq %0, $r0 ,1b \n" \
" add.w %0, %1 ,$r0 \n" \
: "=&r" (result), "=&r" (temp), "+ZC" (v->counter) \
: "r" (I)); \
\
return result; \
}
#define ATOMIC_OPS(op,I ,asm_op, c_op) \
ATOMIC_OP(op, I, asm_op) \
ATOMIC_OP_RETURN(op, I , asm_op) \
ATOMIC_FETCH_OP(op, I, asm_op)
ATOMIC_OPS(add, i , add.w ,+=)
ATOMIC_OPS(sub, -i , add.w ,+=)
#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
#undef ATOMIC_OPS
#define ATOMIC_OPS(op, I, asm_op) \
ATOMIC_OP(op, I, asm_op) \
ATOMIC_FETCH_OP(op, I, asm_op)
ATOMIC_OPS(and, i, and)
ATOMIC_OPS(or, i, or)
ATOMIC_OPS(xor, i, xor)
#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
#undef ATOMIC_OPS
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
#ifdef CONFIG_64BIT
#error "64-bit LLSC atomic operations are not supported"
#endif
#endif /* _ASM_ATOMIC_LLSC_H */

View File

@ -11,6 +11,16 @@
#include <asm/barrier.h>
#include <asm/cmpxchg.h>
#ifdef CONFIG_CPU_HAS_AMO
#include <asm/atomic-amo.h>
#else
#include <asm/atomic-llsc.h>
#endif
#ifdef CONFIG_GENERIC_ATOMIC64
#include <asm-generic/atomic64.h>
#endif
#if __SIZEOF_LONG__ == 4
#define __LL "ll.w "
#define __SC "sc.w "
@ -34,100 +44,6 @@
#define arch_atomic_read(v) READ_ONCE((v)->counter)
#define arch_atomic_set(v, i) WRITE_ONCE((v)->counter, (i))
#define ATOMIC_OP(op, I, asm_op) \
static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \
__asm__ __volatile__( \
"am"#asm_op".w" " $zero, %1, %0 \n" \
: "+ZB" (v->counter) \
: "r" (I) \
: "memory"); \
}
#define ATOMIC_OP_RETURN(op, I, asm_op, c_op, mb, suffix) \
static inline int arch_atomic_##op##_return##suffix(int i, atomic_t *v) \
{ \
int result; \
\
__asm__ __volatile__( \
"am"#asm_op#mb".w" " %1, %2, %0 \n" \
: "+ZB" (v->counter), "=&r" (result) \
: "r" (I) \
: "memory"); \
\
return result c_op I; \
}
#define ATOMIC_FETCH_OP(op, I, asm_op, mb, suffix) \
static inline int arch_atomic_fetch_##op##suffix(int i, atomic_t *v) \
{ \
int result; \
\
__asm__ __volatile__( \
"am"#asm_op#mb".w" " %1, %2, %0 \n" \
: "+ZB" (v->counter), "=&r" (result) \
: "r" (I) \
: "memory"); \
\
return result; \
}
#define ATOMIC_OPS(op, I, asm_op, c_op) \
ATOMIC_OP(op, I, asm_op) \
ATOMIC_OP_RETURN(op, I, asm_op, c_op, _db, ) \
ATOMIC_OP_RETURN(op, I, asm_op, c_op, , _relaxed) \
ATOMIC_FETCH_OP(op, I, asm_op, _db, ) \
ATOMIC_FETCH_OP(op, I, asm_op, , _relaxed)
ATOMIC_OPS(add, i, add, +)
ATOMIC_OPS(sub, -i, add, +)
#define arch_atomic_add_return arch_atomic_add_return
#define arch_atomic_add_return_acquire arch_atomic_add_return
#define arch_atomic_add_return_release arch_atomic_add_return
#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
#define arch_atomic_sub_return arch_atomic_sub_return
#define arch_atomic_sub_return_acquire arch_atomic_sub_return
#define arch_atomic_sub_return_release arch_atomic_sub_return
#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
#define arch_atomic_fetch_add arch_atomic_fetch_add
#define arch_atomic_fetch_add_acquire arch_atomic_fetch_add
#define arch_atomic_fetch_add_release arch_atomic_fetch_add
#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
#define arch_atomic_fetch_sub arch_atomic_fetch_sub
#define arch_atomic_fetch_sub_acquire arch_atomic_fetch_sub
#define arch_atomic_fetch_sub_release arch_atomic_fetch_sub
#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
#undef ATOMIC_OPS
#define ATOMIC_OPS(op, I, asm_op) \
ATOMIC_OP(op, I, asm_op) \
ATOMIC_FETCH_OP(op, I, asm_op, _db, ) \
ATOMIC_FETCH_OP(op, I, asm_op, , _relaxed)
ATOMIC_OPS(and, i, and)
ATOMIC_OPS(or, i, or)
ATOMIC_OPS(xor, i, xor)
#define arch_atomic_fetch_and arch_atomic_fetch_and
#define arch_atomic_fetch_and_acquire arch_atomic_fetch_and
#define arch_atomic_fetch_and_release arch_atomic_fetch_and
#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
#define arch_atomic_fetch_or arch_atomic_fetch_or
#define arch_atomic_fetch_or_acquire arch_atomic_fetch_or
#define arch_atomic_fetch_or_release arch_atomic_fetch_or
#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
#define arch_atomic_fetch_xor arch_atomic_fetch_xor
#define arch_atomic_fetch_xor_acquire arch_atomic_fetch_xor
#define arch_atomic_fetch_xor_release arch_atomic_fetch_xor
#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
#undef ATOMIC_OPS
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int prev, rc;
@ -194,99 +110,6 @@ static inline int arch_atomic_sub_if_positive(int i, atomic_t *v)
#define arch_atomic64_read(v) READ_ONCE((v)->counter)
#define arch_atomic64_set(v, i) WRITE_ONCE((v)->counter, (i))
#define ATOMIC64_OP(op, I, asm_op) \
static inline void arch_atomic64_##op(long i, atomic64_t *v) \
{ \
__asm__ __volatile__( \
"am"#asm_op".d " " $zero, %1, %0 \n" \
: "+ZB" (v->counter) \
: "r" (I) \
: "memory"); \
}
#define ATOMIC64_OP_RETURN(op, I, asm_op, c_op, mb, suffix) \
static inline long arch_atomic64_##op##_return##suffix(long i, atomic64_t *v) \
{ \
long result; \
__asm__ __volatile__( \
"am"#asm_op#mb".d " " %1, %2, %0 \n" \
: "+ZB" (v->counter), "=&r" (result) \
: "r" (I) \
: "memory"); \
\
return result c_op I; \
}
#define ATOMIC64_FETCH_OP(op, I, asm_op, mb, suffix) \
static inline long arch_atomic64_fetch_##op##suffix(long i, atomic64_t *v) \
{ \
long result; \
\
__asm__ __volatile__( \
"am"#asm_op#mb".d " " %1, %2, %0 \n" \
: "+ZB" (v->counter), "=&r" (result) \
: "r" (I) \
: "memory"); \
\
return result; \
}
#define ATOMIC64_OPS(op, I, asm_op, c_op) \
ATOMIC64_OP(op, I, asm_op) \
ATOMIC64_OP_RETURN(op, I, asm_op, c_op, _db, ) \
ATOMIC64_OP_RETURN(op, I, asm_op, c_op, , _relaxed) \
ATOMIC64_FETCH_OP(op, I, asm_op, _db, ) \
ATOMIC64_FETCH_OP(op, I, asm_op, , _relaxed)
ATOMIC64_OPS(add, i, add, +)
ATOMIC64_OPS(sub, -i, add, +)
#define arch_atomic64_add_return arch_atomic64_add_return
#define arch_atomic64_add_return_acquire arch_atomic64_add_return
#define arch_atomic64_add_return_release arch_atomic64_add_return
#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
#define arch_atomic64_sub_return arch_atomic64_sub_return
#define arch_atomic64_sub_return_acquire arch_atomic64_sub_return
#define arch_atomic64_sub_return_release arch_atomic64_sub_return
#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
#define arch_atomic64_fetch_add arch_atomic64_fetch_add
#define arch_atomic64_fetch_add_acquire arch_atomic64_fetch_add
#define arch_atomic64_fetch_add_release arch_atomic64_fetch_add
#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
#define arch_atomic64_fetch_sub arch_atomic64_fetch_sub
#define arch_atomic64_fetch_sub_acquire arch_atomic64_fetch_sub
#define arch_atomic64_fetch_sub_release arch_atomic64_fetch_sub
#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
#undef ATOMIC64_OPS
#define ATOMIC64_OPS(op, I, asm_op) \
ATOMIC64_OP(op, I, asm_op) \
ATOMIC64_FETCH_OP(op, I, asm_op, _db, ) \
ATOMIC64_FETCH_OP(op, I, asm_op, , _relaxed)
ATOMIC64_OPS(and, i, and)
ATOMIC64_OPS(or, i, or)
ATOMIC64_OPS(xor, i, xor)
#define arch_atomic64_fetch_and arch_atomic64_fetch_and
#define arch_atomic64_fetch_and_acquire arch_atomic64_fetch_and
#define arch_atomic64_fetch_and_release arch_atomic64_fetch_and
#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
#define arch_atomic64_fetch_or arch_atomic64_fetch_or
#define arch_atomic64_fetch_or_acquire arch_atomic64_fetch_or
#define arch_atomic64_fetch_or_release arch_atomic64_fetch_or
#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
#define arch_atomic64_fetch_xor_acquire arch_atomic64_fetch_xor
#define arch_atomic64_fetch_xor_release arch_atomic64_fetch_xor
#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
#undef ATOMIC64_OPS
#undef ATOMIC64_FETCH_OP
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
static inline long arch_atomic64_fetch_add_unless(atomic64_t *v, long a, long u)
{
long prev, rc;

View File

@ -13,11 +13,22 @@
#include <asm/barrier.h>
#ifdef CONFIG_32BIT_REDUCED
#include <asm-generic/bitops/ffs.h>
#include <asm-generic/bitops/fls.h>
#include <asm-generic/bitops/__ffs.h>
#include <asm-generic/bitops/__fls.h>
#else /* CONFIG_32BIT_STANDARD || CONFIG_64BIT */
#include <asm-generic/bitops/builtin-ffs.h>
#include <asm-generic/bitops/builtin-fls.h>
#include <asm-generic/bitops/builtin-__ffs.h>
#include <asm-generic/bitops/builtin-__fls.h>
#endif
#include <asm-generic/bitops/ffz.h>
#include <asm-generic/bitops/fls64.h>

View File

@ -11,7 +11,7 @@ static __always_inline __attribute_const__ u32 __arch_bitrev32(u32 x)
{
u32 ret;
asm("bitrev.4b %0, %1" : "=r"(ret) : "r"(__swab32(x)));
asm("bitrev.w %0, %1" : "=r"(ret) : "r"(x));
return ret;
}

View File

@ -9,6 +9,8 @@
#include <linux/bitops.h>
#include <linux/in6.h>
#ifdef CONFIG_64BIT
#define _HAVE_ARCH_IPV6_CSUM
__sum16 csum_ipv6_magic(const struct in6_addr *saddr,
const struct in6_addr *daddr,
@ -61,6 +63,8 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
extern unsigned int do_csum(const unsigned char *buff, int len);
#define do_csum do_csum
#endif
#include <asm-generic/checksum.h>
#endif /* __ASM_CHECKSUM_H */

View File

@ -9,17 +9,33 @@
#include <linux/build_bug.h>
#include <asm/barrier.h>
#define __xchg_asm(amswap_db, m, val) \
#define __xchg_amo_asm(amswap_db, m, val) \
({ \
__typeof(val) __ret; \
__typeof(val) __ret; \
\
__asm__ __volatile__ ( \
" "amswap_db" %1, %z2, %0 \n" \
: "+ZB" (*m), "=&r" (__ret) \
: "Jr" (val) \
: "memory"); \
__asm__ __volatile__ ( \
" "amswap_db" %1, %z2, %0 \n" \
: "+ZB" (*m), "=&r" (__ret) \
: "Jr" (val) \
: "memory"); \
\
__ret; \
__ret; \
})
#define __xchg_llsc_asm(ld, st, m, val) \
({ \
__typeof(val) __ret, __tmp; \
\
asm volatile ( \
"1: ll.w %0, %3 \n" \
" move %1, %z4 \n" \
" sc.w %1, %2 \n" \
" beqz %1, 1b \n" \
: "=&r" (__ret), "=&r" (__tmp), "=ZC" (*m) \
: "ZC" (*m), "Jr" (val) \
: "memory"); \
\
__ret; \
})
static inline unsigned int __xchg_small(volatile void *ptr, unsigned int val,
@ -67,13 +83,23 @@ __arch_xchg(volatile void *ptr, unsigned long x, int size)
switch (size) {
case 1:
case 2:
return __xchg_small(ptr, x, size);
return __xchg_small((volatile void *)ptr, x, size);
case 4:
return __xchg_asm("amswap_db.w", (volatile u32 *)ptr, (u32)x);
#ifdef CONFIG_CPU_HAS_AMO
return __xchg_amo_asm("amswap_db.w", (volatile u32 *)ptr, (u32)x);
#else
return __xchg_llsc_asm("ll.w", "sc.w", (volatile u32 *)ptr, (u32)x);
#endif /* CONFIG_CPU_HAS_AMO */
#ifdef CONFIG_64BIT
case 8:
return __xchg_asm("amswap_db.d", (volatile u64 *)ptr, (u64)x);
#ifdef CONFIG_CPU_HAS_AMO
return __xchg_amo_asm("amswap_db.d", (volatile u64 *)ptr, (u64)x);
#else
return __xchg_llsc_asm("ll.d", "sc.d", (volatile u64 *)ptr, (u64)x);
#endif /* CONFIG_CPU_HAS_AMO */
#endif /* CONFIG_64BIT */
default:
BUILD_BUG();

View File

@ -20,16 +20,13 @@
#define cpu_has_loongarch64 (cpu_data[0].isa_level & LOONGARCH_CPU_ISA_64BIT)
#ifdef CONFIG_32BIT
# define cpu_has_64bits (cpu_data[0].isa_level & LOONGARCH_CPU_ISA_64BIT)
# define cpu_vabits 31
# define cpu_pabits 31
#endif
#ifdef CONFIG_64BIT
# define cpu_has_64bits 1
# define cpu_vabits cpu_data[0].vabits
# define cpu_pabits cpu_data[0].pabits
# define __NEED_ADDRBITS_PROBE
#endif
/*

View File

@ -12,7 +12,7 @@
#define dmi_early_unmap(x, l) dmi_unmap(x)
#define dmi_alloc(l) memblock_alloc(l, PAGE_SIZE)
static inline void *dmi_remap(u64 phys_addr, unsigned long size)
static inline void *dmi_remap(phys_addr_t phys_addr, unsigned long size)
{
return ((void *)TO_CACHE(phys_addr));
}

View File

@ -120,6 +120,36 @@
#define R_LARCH_ADD_ULEB128 107
#define R_LARCH_SUB_ULEB128 108
#define R_LARCH_64_PCREL 109
#define R_LARCH_CALL36 110
#define R_LARCH_TLS_DESC_PC_HI20 111
#define R_LARCH_TLS_DESC_PC_LO12 112
#define R_LARCH_TLS_DESC64_PC_LO20 113
#define R_LARCH_TLS_DESC64_PC_HI12 114
#define R_LARCH_TLS_DESC_HI20 115
#define R_LARCH_TLS_DESC_LO12 116
#define R_LARCH_TLS_DESC64_LO20 117
#define R_LARCH_TLS_DESC64_HI12 118
#define R_LARCH_TLS_DESC_LD 119
#define R_LARCH_TLS_DESC_CALL 120
#define R_LARCH_TLS_LE_HI20_R 121
#define R_LARCH_TLS_LE_ADD_R 122
#define R_LARCH_TLS_LE_LO12_R 123
#define R_LARCH_TLS_LD_PCREL20_S2 124
#define R_LARCH_TLS_GD_PCREL20_S2 125
#define R_LARCH_TLS_DESC_PCREL20_S2 126
#define R_LARCH_CALL30 127
#define R_LARCH_PCADD_HI20 128
#define R_LARCH_PCADD_LO12 129
#define R_LARCH_GOT_PCADD_HI20 130
#define R_LARCH_GOT_PCADD_LO12 131
#define R_LARCH_TLS_IE_PCADD_HI20 132
#define R_LARCH_TLS_IE_PCADD_LO12 133
#define R_LARCH_TLS_LD_PCADD_HI20 134
#define R_LARCH_TLS_LD_PCADD_LO12 135
#define R_LARCH_TLS_GD_PCADD_HI20 136
#define R_LARCH_TLS_GD_PCADD_LO12 137
#define R_LARCH_TLS_DESC_PCADD_HI20 138
#define R_LARCH_TLS_DESC_PCADD_LO12 139
#ifndef ELF_ARCH
@ -156,6 +186,7 @@ typedef elf_greg_t elf_gregset_t[ELF_NGREG];
typedef double elf_fpreg_t;
typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
void loongarch_dump_regs32(u32 *uregs, const struct pt_regs *regs);
void loongarch_dump_regs64(u64 *uregs, const struct pt_regs *regs);
#ifdef CONFIG_32BIT

View File

@ -438,8 +438,10 @@ static inline bool is_branch_ins(union loongarch_instruction *ip)
static inline bool is_ra_save_ins(union loongarch_instruction *ip)
{
/* st.d $ra, $sp, offset */
return ip->reg2i12_format.opcode == std_op &&
const u32 opcode = IS_ENABLED(CONFIG_32BIT) ? stw_op : std_op;
/* st.w / st.d $ra, $sp, offset */
return ip->reg2i12_format.opcode == opcode &&
ip->reg2i12_format.rj == LOONGARCH_GPR_SP &&
ip->reg2i12_format.rd == LOONGARCH_GPR_RA &&
!is_imm12_negative(ip->reg2i12_format.immediate);
@ -447,8 +449,10 @@ static inline bool is_ra_save_ins(union loongarch_instruction *ip)
static inline bool is_stack_alloc_ins(union loongarch_instruction *ip)
{
/* addi.d $sp, $sp, -imm */
return ip->reg2i12_format.opcode == addid_op &&
const u32 opcode = IS_ENABLED(CONFIG_32BIT) ? addiw_op : addid_op;
/* addi.w / addi.d $sp, $sp, -imm */
return ip->reg2i12_format.opcode == opcode &&
ip->reg2i12_format.rj == LOONGARCH_GPR_SP &&
ip->reg2i12_format.rd == LOONGARCH_GPR_SP &&
is_imm12_negative(ip->reg2i12_format.immediate);

View File

@ -50,10 +50,22 @@ void spurious_interrupt(void);
#define NR_LEGACY_VECTORS 16
#define IRQ_MATRIX_BITS NR_VECTORS
#define AVEC_IRQ_SHIFT 4
#define AVEC_IRQ_BIT 8
#define AVEC_IRQ_MASK GENMASK(AVEC_IRQ_BIT - 1, 0)
#define AVEC_CPU_SHIFT 12
#define AVEC_CPU_BIT 16
#define AVEC_CPU_MASK GENMASK(AVEC_CPU_BIT - 1, 0)
#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace
void arch_trigger_cpumask_backtrace(const struct cpumask *mask, int exclude_cpu);
#ifdef CONFIG_32BIT
#define MAX_IO_PICS 1
#else
#define MAX_IO_PICS 8
#endif
#define NR_IRQS (64 + NR_VECTORS * (NR_CPUS + MAX_IO_PICS))
struct acpi_vector_group {

View File

@ -10,15 +10,23 @@
#ifndef __ASSEMBLER__
#include <linux/types.h>
#include <linux/stringify.h>
#include <asm/asm.h>
#define JUMP_LABEL_NOP_SIZE 4
#ifdef CONFIG_32BIT
#define JUMP_LABEL_TYPE ".long "
#else
#define JUMP_LABEL_TYPE ".quad "
#endif
/* This macro is also expanded on the Rust side. */
#define JUMP_TABLE_ENTRY(key, label) \
".pushsection __jump_table, \"aw\" \n\t" \
".align 3 \n\t" \
".align " __stringify(PTRLOG) " \n\t" \
".long 1b - ., " label " - . \n\t" \
".quad " key " - . \n\t" \
JUMP_LABEL_TYPE key " - . \n\t" \
".popsection \n\t"
#define ARCH_STATIC_BRANCH_ASM(key, label) \

View File

@ -8,6 +8,7 @@
#include <linux/percpu.h>
#include <linux/bitops.h>
#include <linux/atomic.h>
#include <asm/asm.h>
#include <asm/cmpxchg.h>
typedef struct {
@ -27,6 +28,7 @@ typedef struct {
/*
* Same as above, but return the result value
*/
#ifdef CONFIG_CPU_HAS_AMO
static inline long local_add_return(long i, local_t *l)
{
unsigned long result;
@ -55,6 +57,41 @@ static inline long local_sub_return(long i, local_t *l)
return result;
}
#else
static inline long local_add_return(long i, local_t *l)
{
unsigned long result, temp;
__asm__ __volatile__(
"1:" __LL "%1, %2 # local_add_return \n"
__stringify(LONG_ADD) " %0, %1, %3 \n"
__SC "%0, %2 \n"
" beq %0, $r0, 1b \n"
__stringify(LONG_ADD) " %0, %1, %3 \n"
: "=&r" (result), "=&r" (temp), "=ZC" (l->a.counter)
: "r" (i), "ZC" (l->a.counter)
: "memory");
return result;
}
static inline long local_sub_return(long i, local_t *l)
{
unsigned long result, temp;
__asm__ __volatile__(
"1:" __LL "%1, %2 # local_sub_return \n"
__stringify(LONG_SUB) " %0, %1, %3 \n"
__SC "%0, %2 \n"
" beq %0, $r0, 1b \n"
__stringify(LONG_SUB) " %0, %1, %3 \n"
: "=&r" (result), "=&r" (temp), "=ZC" (l->a.counter)
: "r" (i), "ZC" (l->a.counter)
: "memory");
return result;
}
#endif
static inline long local_cmpxchg(local_t *l, long old, long new)
{

View File

@ -182,6 +182,16 @@
#define csr_xchg32(val, mask, reg) __csrxchg_w(val, mask, reg)
#define csr_xchg64(val, mask, reg) __csrxchg_d(val, mask, reg)
#ifdef CONFIG_32BIT
#define csr_read(reg) csr_read32(reg)
#define csr_write(val, reg) csr_write32(val, reg)
#define csr_xchg(val, mask, reg) csr_xchg32(val, mask, reg)
#else
#define csr_read(reg) csr_read64(reg)
#define csr_write(val, reg) csr_write64(val, reg)
#define csr_xchg(val, mask, reg) csr_xchg64(val, mask, reg)
#endif
/* IOCSR */
#define iocsr_read32(reg) __iocsrrd_w(reg)
#define iocsr_read64(reg) __iocsrrd_d(reg)
@ -904,6 +914,26 @@
#define LOONGARCH_CSR_DMWIN3 0x183 /* 64 direct map win3: MEM */
/* Direct Map window 0/1/2/3 */
#ifdef CONFIG_32BIT
#define CSR_DMW0_PLV0 (1 << 0)
#define CSR_DMW0_VSEG (0x4)
#define CSR_DMW0_BASE (CSR_DMW0_VSEG << DMW_PABITS)
#define CSR_DMW0_INIT (CSR_DMW0_BASE | CSR_DMW0_PLV0)
#define CSR_DMW1_PLV0 (1 << 0)
#define CSR_DMW1_MAT (1 << 4)
#define CSR_DMW1_VSEG (0x5)
#define CSR_DMW1_BASE (CSR_DMW1_VSEG << DMW_PABITS)
#define CSR_DMW1_INIT (CSR_DMW1_BASE | CSR_DMW1_MAT | CSR_DMW1_PLV0)
#define CSR_DMW2_INIT 0x0
#define CSR_DMW3_INIT 0x0
#else
#define CSR_DMW0_PLV0 _CONST64_(1 << 0)
#define CSR_DMW0_VSEG _CONST64_(0x8000)
#define CSR_DMW0_BASE (CSR_DMW0_VSEG << DMW_PABITS)
@ -923,6 +953,8 @@
#define CSR_DMW3_INIT 0x0
#endif
/* Performance Counter registers */
#define LOONGARCH_CSR_PERFCTRL0 0x200 /* 32 perf event 0 config */
#define LOONGARCH_CSR_PERFCNTR0 0x201 /* 64 perf event 0 count value */
@ -1208,7 +1240,35 @@
#ifndef __ASSEMBLER__
static __always_inline u64 drdtime(void)
#ifdef CONFIG_32BIT
static __always_inline u32 rdtime_h(void)
{
u32 val = 0;
__asm__ __volatile__(
"rdtimeh.w %0, $zero\n\t"
: "=r"(val)
:
);
return val;
}
static __always_inline u32 rdtime_l(void)
{
u32 val = 0;
__asm__ __volatile__(
"rdtimel.w %0, $zero\n\t"
: "=r"(val)
:
);
return val;
}
#else
static __always_inline u64 rdtime_d(void)
{
u64 val = 0;
@ -1220,11 +1280,14 @@ static __always_inline u64 drdtime(void)
return val;
}
#endif
static inline unsigned int get_csr_cpuid(void)
{
return csr_read32(LOONGARCH_CSR_CPUID);
}
#ifdef CONFIG_64BIT
static inline void csr_any_send(unsigned int addr, unsigned int data,
unsigned int data_mask, unsigned int cpu)
{
@ -1236,6 +1299,7 @@ static inline void csr_any_send(unsigned int addr, unsigned int data,
val |= ((uint64_t)data << IOCSR_ANY_SEND_BUF_SHIFT);
iocsr_write64(val, LOONGARCH_IOCSR_ANY_SEND);
}
#endif
static inline unsigned int read_csr_excode(void)
{
@ -1259,22 +1323,22 @@ static inline void write_csr_pagesize(unsigned int size)
static inline unsigned int read_csr_tlbrefill_pagesize(void)
{
return (csr_read64(LOONGARCH_CSR_TLBREHI) & CSR_TLBREHI_PS) >> CSR_TLBREHI_PS_SHIFT;
return (csr_read(LOONGARCH_CSR_TLBREHI) & CSR_TLBREHI_PS) >> CSR_TLBREHI_PS_SHIFT;
}
static inline void write_csr_tlbrefill_pagesize(unsigned int size)
{
csr_xchg64(size << CSR_TLBREHI_PS_SHIFT, CSR_TLBREHI_PS, LOONGARCH_CSR_TLBREHI);
csr_xchg(size << CSR_TLBREHI_PS_SHIFT, CSR_TLBREHI_PS, LOONGARCH_CSR_TLBREHI);
}
#define read_csr_asid() csr_read32(LOONGARCH_CSR_ASID)
#define write_csr_asid(val) csr_write32(val, LOONGARCH_CSR_ASID)
#define read_csr_entryhi() csr_read64(LOONGARCH_CSR_TLBEHI)
#define write_csr_entryhi(val) csr_write64(val, LOONGARCH_CSR_TLBEHI)
#define read_csr_entrylo0() csr_read64(LOONGARCH_CSR_TLBELO0)
#define write_csr_entrylo0(val) csr_write64(val, LOONGARCH_CSR_TLBELO0)
#define read_csr_entrylo1() csr_read64(LOONGARCH_CSR_TLBELO1)
#define write_csr_entrylo1(val) csr_write64(val, LOONGARCH_CSR_TLBELO1)
#define read_csr_entryhi() csr_read(LOONGARCH_CSR_TLBEHI)
#define write_csr_entryhi(val) csr_write(val, LOONGARCH_CSR_TLBEHI)
#define read_csr_entrylo0() csr_read(LOONGARCH_CSR_TLBELO0)
#define write_csr_entrylo0(val) csr_write(val, LOONGARCH_CSR_TLBELO0)
#define read_csr_entrylo1() csr_read(LOONGARCH_CSR_TLBELO1)
#define write_csr_entrylo1(val) csr_write(val, LOONGARCH_CSR_TLBELO1)
#define read_csr_ecfg() csr_read32(LOONGARCH_CSR_ECFG)
#define write_csr_ecfg(val) csr_write32(val, LOONGARCH_CSR_ECFG)
#define read_csr_estat() csr_read32(LOONGARCH_CSR_ESTAT)
@ -1284,20 +1348,20 @@ static inline void write_csr_tlbrefill_pagesize(unsigned int size)
#define read_csr_euen() csr_read32(LOONGARCH_CSR_EUEN)
#define write_csr_euen(val) csr_write32(val, LOONGARCH_CSR_EUEN)
#define read_csr_cpuid() csr_read32(LOONGARCH_CSR_CPUID)
#define read_csr_prcfg1() csr_read64(LOONGARCH_CSR_PRCFG1)
#define write_csr_prcfg1(val) csr_write64(val, LOONGARCH_CSR_PRCFG1)
#define read_csr_prcfg2() csr_read64(LOONGARCH_CSR_PRCFG2)
#define write_csr_prcfg2(val) csr_write64(val, LOONGARCH_CSR_PRCFG2)
#define read_csr_prcfg3() csr_read64(LOONGARCH_CSR_PRCFG3)
#define write_csr_prcfg3(val) csr_write64(val, LOONGARCH_CSR_PRCFG3)
#define read_csr_prcfg1() csr_read(LOONGARCH_CSR_PRCFG1)
#define write_csr_prcfg1(val) csr_write(val, LOONGARCH_CSR_PRCFG1)
#define read_csr_prcfg2() csr_read(LOONGARCH_CSR_PRCFG2)
#define write_csr_prcfg2(val) csr_write(val, LOONGARCH_CSR_PRCFG2)
#define read_csr_prcfg3() csr_read(LOONGARCH_CSR_PRCFG3)
#define write_csr_prcfg3(val) csr_write(val, LOONGARCH_CSR_PRCFG3)
#define read_csr_stlbpgsize() csr_read32(LOONGARCH_CSR_STLBPGSIZE)
#define write_csr_stlbpgsize(val) csr_write32(val, LOONGARCH_CSR_STLBPGSIZE)
#define read_csr_rvacfg() csr_read32(LOONGARCH_CSR_RVACFG)
#define write_csr_rvacfg(val) csr_write32(val, LOONGARCH_CSR_RVACFG)
#define write_csr_tintclear(val) csr_write32(val, LOONGARCH_CSR_TINTCLR)
#define read_csr_impctl1() csr_read64(LOONGARCH_CSR_IMPCTL1)
#define write_csr_impctl1(val) csr_write64(val, LOONGARCH_CSR_IMPCTL1)
#define write_csr_impctl2(val) csr_write64(val, LOONGARCH_CSR_IMPCTL2)
#define read_csr_impctl1() csr_read(LOONGARCH_CSR_IMPCTL1)
#define write_csr_impctl1(val) csr_write(val, LOONGARCH_CSR_IMPCTL1)
#define write_csr_impctl2(val) csr_write(val, LOONGARCH_CSR_IMPCTL2)
#define read_csr_perfctrl0() csr_read64(LOONGARCH_CSR_PERFCTRL0)
#define read_csr_perfcntr0() csr_read64(LOONGARCH_CSR_PERFCNTR0)
@ -1378,8 +1442,10 @@ __BUILD_CSR_OP(tlbidx)
#define ENTRYLO_C_SHIFT 4
#define ENTRYLO_C (_ULCAST_(3) << ENTRYLO_C_SHIFT)
#define ENTRYLO_G (_ULCAST_(1) << 6)
#ifdef CONFIG_64BIT
#define ENTRYLO_NR (_ULCAST_(1) << 61)
#define ENTRYLO_NX (_ULCAST_(1) << 62)
#endif
/* Values for PageSize register */
#define PS_4K 0x0000000c

View File

@ -38,8 +38,10 @@ struct got_entry {
struct plt_entry {
u32 inst_lu12iw;
#ifdef CONFIG_64BIT
u32 inst_lu32id;
u32 inst_lu52id;
#endif
u32 inst_jirl;
};
@ -57,6 +59,14 @@ static inline struct got_entry emit_got_entry(Elf_Addr val)
static inline struct plt_entry emit_plt_entry(unsigned long val)
{
#ifdef CONFIG_32BIT
u32 lu12iw, jirl;
lu12iw = larch_insn_gen_lu12iw(LOONGARCH_GPR_T1, ADDR_IMM(val, LU12IW));
jirl = larch_insn_gen_jirl(0, LOONGARCH_GPR_T1, ADDR_IMM(val, ORI));
return (struct plt_entry) { lu12iw, jirl };
#else
u32 lu12iw, lu32id, lu52id, jirl;
lu12iw = larch_insn_gen_lu12iw(LOONGARCH_GPR_T1, ADDR_IMM(val, LU12IW));
@ -65,6 +75,7 @@ static inline struct plt_entry emit_plt_entry(unsigned long val)
jirl = larch_insn_gen_jirl(0, LOONGARCH_GPR_T1, ADDR_IMM(val, ORI));
return (struct plt_entry) { lu12iw, lu32id, lu52id, jirl };
#endif
}
static inline struct plt_idx_entry emit_plt_idx_entry(unsigned long val)

View File

@ -10,7 +10,7 @@
#include <vdso/page.h>
#define HPAGE_SHIFT (PAGE_SHIFT + PAGE_SHIFT - 3)
#define HPAGE_SHIFT (PAGE_SHIFT + PAGE_SHIFT - PTRLOG)
#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
#define HPAGE_MASK (~(HPAGE_SIZE - 1))
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)

View File

@ -13,7 +13,7 @@
* the loading address of main kernel image, but far from where the modules are
* loaded. Tell the compiler this fact when using explicit relocs.
*/
#if defined(MODULE) && defined(CONFIG_AS_HAS_EXPLICIT_RELOCS)
#if defined(MODULE) && defined(CONFIG_AS_HAS_EXPLICIT_RELOCS) && defined(CONFIG_64BIT)
# if __has_attribute(model)
# define PER_CPU_ATTRIBUTES __attribute__((model("extreme")))
# else
@ -27,7 +27,7 @@ register unsigned long __my_cpu_offset __asm__("$r21");
static inline void set_my_cpu_offset(unsigned long off)
{
__my_cpu_offset = off;
csr_write64(off, PERCPU_BASE_KS);
csr_write(off, PERCPU_BASE_KS);
}
#define __my_cpu_offset \
@ -36,6 +36,8 @@ static inline void set_my_cpu_offset(unsigned long off)
__my_cpu_offset; \
})
#ifdef CONFIG_CPU_HAS_AMO
#define PERCPU_OP(op, asm_op, c_op) \
static __always_inline unsigned long __percpu_##op(void *ptr, \
unsigned long val, int size) \
@ -68,25 +70,9 @@ PERCPU_OP(and, and, &)
PERCPU_OP(or, or, |)
#undef PERCPU_OP
static __always_inline unsigned long __percpu_xchg(void *ptr, unsigned long val, int size)
{
switch (size) {
case 1:
case 2:
return __xchg_small((volatile void *)ptr, val, size);
#endif
case 4:
return __xchg_asm("amswap.w", (volatile u32 *)ptr, (u32)val);
case 8:
return __xchg_asm("amswap.d", (volatile u64 *)ptr, (u64)val);
default:
BUILD_BUG();
}
return 0;
}
#ifdef CONFIG_64BIT
#define __pcpu_op_1(op) op ".b "
#define __pcpu_op_2(op) op ".h "
@ -115,6 +101,10 @@ do { \
: "memory"); \
} while (0)
#endif
#define __percpu_xchg __arch_xchg
/* this_cpu_cmpxchg */
#define _protect_cmpxchg_local(pcp, o, n) \
({ \
@ -135,6 +125,8 @@ do { \
__retval; \
})
#ifdef CONFIG_CPU_HAS_AMO
#define _percpu_add(pcp, val) \
_pcp_protect(__percpu_add, pcp, val)
@ -146,9 +138,6 @@ do { \
#define _percpu_or(pcp, val) \
_pcp_protect(__percpu_or, pcp, val)
#define _percpu_xchg(pcp, val) ((typeof(pcp)) \
_pcp_protect(__percpu_xchg, pcp, (unsigned long)(val)))
#define this_cpu_add_4(pcp, val) _percpu_add(pcp, val)
#define this_cpu_add_8(pcp, val) _percpu_add(pcp, val)
@ -161,6 +150,10 @@ do { \
#define this_cpu_or_4(pcp, val) _percpu_or(pcp, val)
#define this_cpu_or_8(pcp, val) _percpu_or(pcp, val)
#endif
#ifdef CONFIG_64BIT
#define this_cpu_read_1(pcp) _percpu_read(1, pcp)
#define this_cpu_read_2(pcp) _percpu_read(2, pcp)
#define this_cpu_read_4(pcp) _percpu_read(4, pcp)
@ -171,6 +164,11 @@ do { \
#define this_cpu_write_4(pcp, val) _percpu_write(4, pcp, val)
#define this_cpu_write_8(pcp, val) _percpu_write(8, pcp, val)
#endif
#define _percpu_xchg(pcp, val) ((typeof(pcp)) \
_pcp_protect(__percpu_xchg, pcp, (unsigned long)(val)))
#define this_cpu_xchg_1(pcp, val) _percpu_xchg(pcp, val)
#define this_cpu_xchg_2(pcp, val) _percpu_xchg(pcp, val)
#define this_cpu_xchg_4(pcp, val) _percpu_xchg(pcp, val)

View File

@ -6,6 +6,26 @@
#define _ASM_PGTABLE_BITS_H
/* Page table bits */
#ifdef CONFIG_32BIT
#define _PAGE_VALID_SHIFT 0
#define _PAGE_ACCESSED_SHIFT 0 /* Reuse Valid for Accessed */
#define _PAGE_DIRTY_SHIFT 1
#define _PAGE_PLV_SHIFT 2 /* 2~3, two bits */
#define _CACHE_SHIFT 4 /* 4~5, two bits */
#define _PAGE_GLOBAL_SHIFT 6
#define _PAGE_HUGE_SHIFT 6 /* HUGE is a PMD bit */
#define _PAGE_PRESENT_SHIFT 7
#define _PAGE_PFN_SHIFT 8
#define _PAGE_HGLOBAL_SHIFT 12 /* HGlobal is a PMD bit */
#define _PAGE_SWP_EXCLUSIVE_SHIFT 13
#define _PAGE_PFN_END_SHIFT 28
#define _PAGE_WRITE_SHIFT 29
#define _PAGE_MODIFIED_SHIFT 30
#define _PAGE_PRESENT_INVALID_SHIFT 31
#endif
#ifdef CONFIG_64BIT
#define _PAGE_VALID_SHIFT 0
#define _PAGE_ACCESSED_SHIFT 0 /* Reuse Valid for Accessed */
#define _PAGE_DIRTY_SHIFT 1
@ -18,14 +38,15 @@
#define _PAGE_MODIFIED_SHIFT 9
#define _PAGE_PROTNONE_SHIFT 10
#define _PAGE_SPECIAL_SHIFT 11
#define _PAGE_HGLOBAL_SHIFT 12 /* HGlobal is a PMD bit */
#define _PAGE_PFN_SHIFT 12
#define _PAGE_HGLOBAL_SHIFT 12 /* HGlobal is a PMD bit */
#define _PAGE_SWP_EXCLUSIVE_SHIFT 23
#define _PAGE_PFN_END_SHIFT 48
#define _PAGE_PRESENT_INVALID_SHIFT 60
#define _PAGE_NO_READ_SHIFT 61
#define _PAGE_NO_EXEC_SHIFT 62
#define _PAGE_RPLV_SHIFT 63
#endif
/* Used by software */
#define _PAGE_PRESENT (_ULCAST_(1) << _PAGE_PRESENT_SHIFT)
@ -33,10 +54,15 @@
#define _PAGE_WRITE (_ULCAST_(1) << _PAGE_WRITE_SHIFT)
#define _PAGE_ACCESSED (_ULCAST_(1) << _PAGE_ACCESSED_SHIFT)
#define _PAGE_MODIFIED (_ULCAST_(1) << _PAGE_MODIFIED_SHIFT)
#ifdef CONFIG_32BIT
#define _PAGE_PROTNONE 0
#define _PAGE_SPECIAL 0
#else
#define _PAGE_PROTNONE (_ULCAST_(1) << _PAGE_PROTNONE_SHIFT)
#define _PAGE_SPECIAL (_ULCAST_(1) << _PAGE_SPECIAL_SHIFT)
#endif
/* We borrow bit 23 to store the exclusive marker in swap PTEs. */
/* We borrow bit 13/23 to store the exclusive marker in swap PTEs. */
#define _PAGE_SWP_EXCLUSIVE (_ULCAST_(1) << _PAGE_SWP_EXCLUSIVE_SHIFT)
/* Used by TLB hardware (placed in EntryLo*) */
@ -46,9 +72,15 @@
#define _PAGE_GLOBAL (_ULCAST_(1) << _PAGE_GLOBAL_SHIFT)
#define _PAGE_HUGE (_ULCAST_(1) << _PAGE_HUGE_SHIFT)
#define _PAGE_HGLOBAL (_ULCAST_(1) << _PAGE_HGLOBAL_SHIFT)
#ifdef CONFIG_32BIT
#define _PAGE_NO_READ 0
#define _PAGE_NO_EXEC 0
#define _PAGE_RPLV 0
#else
#define _PAGE_NO_READ (_ULCAST_(1) << _PAGE_NO_READ_SHIFT)
#define _PAGE_NO_EXEC (_ULCAST_(1) << _PAGE_NO_EXEC_SHIFT)
#define _PAGE_RPLV (_ULCAST_(1) << _PAGE_RPLV_SHIFT)
#endif
#define _CACHE_MASK (_ULCAST_(3) << _CACHE_SHIFT)
#define PFN_PTE_SHIFT (PAGE_SHIFT - 12 + _PAGE_PFN_SHIFT)

View File

@ -11,6 +11,7 @@
#include <linux/compiler.h>
#include <asm/addrspace.h>
#include <asm/asm.h>
#include <asm/page.h>
#include <asm/pgtable-bits.h>
@ -23,37 +24,45 @@
#endif
#if CONFIG_PGTABLE_LEVELS == 2
#define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3))
#define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - PTRLOG))
#elif CONFIG_PGTABLE_LEVELS == 3
#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3))
#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - PTRLOG))
#define PMD_SIZE (1UL << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1))
#define PGDIR_SHIFT (PMD_SHIFT + (PAGE_SHIFT - 3))
#define PGDIR_SHIFT (PMD_SHIFT + (PAGE_SHIFT - PTRLOG))
#elif CONFIG_PGTABLE_LEVELS == 4
#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3))
#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - PTRLOG))
#define PMD_SIZE (1UL << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1))
#define PUD_SHIFT (PMD_SHIFT + (PAGE_SHIFT - 3))
#define PUD_SHIFT (PMD_SHIFT + (PAGE_SHIFT - PTRLOG))
#define PUD_SIZE (1UL << PUD_SHIFT)
#define PUD_MASK (~(PUD_SIZE-1))
#define PGDIR_SHIFT (PUD_SHIFT + (PAGE_SHIFT - 3))
#define PGDIR_SHIFT (PUD_SHIFT + (PAGE_SHIFT - PTRLOG))
#endif
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE-1))
#define VA_BITS (PGDIR_SHIFT + (PAGE_SHIFT - 3))
#ifdef CONFIG_32BIT
#define VA_BITS 32
#else
#define VA_BITS (PGDIR_SHIFT + (PAGE_SHIFT - PTRLOG))
#endif
#define PTRS_PER_PGD (PAGE_SIZE >> 3)
#define PTRS_PER_PGD (PAGE_SIZE >> PTRLOG)
#if CONFIG_PGTABLE_LEVELS > 3
#define PTRS_PER_PUD (PAGE_SIZE >> 3)
#define PTRS_PER_PUD (PAGE_SIZE >> PTRLOG)
#endif
#if CONFIG_PGTABLE_LEVELS > 2
#define PTRS_PER_PMD (PAGE_SIZE >> 3)
#define PTRS_PER_PMD (PAGE_SIZE >> PTRLOG)
#endif
#define PTRS_PER_PTE (PAGE_SIZE >> 3)
#define PTRS_PER_PTE (PAGE_SIZE >> PTRLOG)
#ifdef CONFIG_32BIT
#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
#else
#define USER_PTRS_PER_PGD ((TASK_SIZE64 / PGDIR_SIZE)?(TASK_SIZE64 / PGDIR_SIZE):1)
#endif
#ifndef __ASSEMBLER__
@ -74,11 +83,15 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page)
/*
* TLB refill handlers may also map the vmalloc area into xkvrange.
* Avoid the first couple of pages so NULL pointer dereferences will
* still reliably trap.
*/
#ifdef CONFIG_32BIT
#define VMALLOC_START (vm_map_base + PCI_IOSIZE + (2 * PAGE_SIZE))
#define VMALLOC_END (FIXADDR_START - (2 * PAGE_SIZE))
#endif
#ifdef CONFIG_64BIT
#define MODULES_VADDR (vm_map_base + PCI_IOSIZE + (2 * PAGE_SIZE))
#define MODULES_END (MODULES_VADDR + SZ_256M)
@ -106,6 +119,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#define KFENCE_AREA_START (VMEMMAP_END + 1)
#define KFENCE_AREA_END (KFENCE_AREA_START + KFENCE_AREA_SIZE - 1)
#endif
#define ptep_get(ptep) READ_ONCE(*(ptep))
#define pmdp_get(pmdp) READ_ONCE(*(pmdp))
@ -277,7 +292,16 @@ extern void kernel_pte_init(void *addr);
* Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
* are !pte_none() && !pte_present().
*
* Format of swap PTEs:
* Format of 32bit swap PTEs:
*
* 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
* 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
* <------------ offset -------------> E <- type -> <-- zeroes -->
*
* E is the exclusive marker that is not stored in swap entries.
* The zero'ed bits include _PAGE_PRESENT.
*
* Format of 64bit swap PTEs:
*
* 6 6 6 6 5 5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 3 3 3 3 3 3 3 3
* 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2
@ -290,16 +314,27 @@ extern void kernel_pte_init(void *addr);
* E is the exclusive marker that is not stored in swap entries.
* The zero'ed bits include _PAGE_PRESENT and _PAGE_PROTNONE.
*/
static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
{ pte_t pte; pte_val(pte) = ((type & 0x7f) << 16) | (offset << 24); return pte; }
#define __swp_type(x) (((x).val >> 16) & 0x7f)
#define __swp_offset(x) ((x).val >> 24)
#define __SWP_TYPE_BITS (IS_ENABLED(CONFIG_32BIT) ? 5 : 7)
#define __SWP_TYPE_MASK ((1UL << __SWP_TYPE_BITS) - 1)
#define __SWP_TYPE_SHIFT (IS_ENABLED(CONFIG_32BIT) ? 8 : 16)
#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT + 1)
static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
{
pte_t pte;
pte_val(pte) = ((type & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT) | (offset << __SWP_OFFSET_SHIFT);
return pte;
}
#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
#define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT)
#define __swp_entry(type, offset) ((swp_entry_t) { pte_val(mk_swap_pte((type), (offset))) })
#define __swp_entry_to_pte(x) __pte((x).val)
#define __swp_entry_to_pmd(x) __pmd((x).val | _PAGE_HUGE)
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
#define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) })
#define __swp_entry_to_pmd(x) ((pmd_t) { (x).val | _PAGE_HUGE })
static inline bool pte_swp_exclusive(pte_t pte)
{

View File

@ -38,22 +38,42 @@
cfi_restore \reg \offset \docfi
.endm
.macro SETUP_TWINS temp
pcaddi t0, 0
PTR_LI t1, ~TO_PHYS_MASK
and t0, t0, t1
ori t0, t0, (1 << 4 | 1)
csrwr t0, LOONGARCH_CSR_DMWIN0
PTR_LI t0, CSR_DMW1_INIT
csrwr t0, LOONGARCH_CSR_DMWIN1
.endm
.macro SETUP_MODES temp
/* Enable PG */
li.w \temp, 0xb0 # PLV=0, IE=0, PG=1
csrwr \temp, LOONGARCH_CSR_CRMD
li.w \temp, 0x04 # PLV=0, PIE=1, PWE=0
csrwr \temp, LOONGARCH_CSR_PRMD
li.w \temp, 0x00 # FPE=0, SXE=0, ASXE=0, BTE=0
csrwr \temp, LOONGARCH_CSR_EUEN
.endm
.macro SETUP_DMWINS temp
li.d \temp, CSR_DMW0_INIT # WUC, PLV0, 0x8000 xxxx xxxx xxxx
PTR_LI \temp, CSR_DMW0_INIT # SUC, PLV0, LA32: 0x8xxx xxxx, LA64: 0x8000 xxxx xxxx xxxx
csrwr \temp, LOONGARCH_CSR_DMWIN0
li.d \temp, CSR_DMW1_INIT # CAC, PLV0, 0x9000 xxxx xxxx xxxx
PTR_LI \temp, CSR_DMW1_INIT # CAC, PLV0, LA32: 0xaxxx xxxx, LA64: 0x9000 xxxx xxxx xxxx
csrwr \temp, LOONGARCH_CSR_DMWIN1
li.d \temp, CSR_DMW2_INIT # WUC, PLV0, 0xa000 xxxx xxxx xxxx
PTR_LI \temp, CSR_DMW2_INIT # WUC, PLV0, LA32: unavailable, LA64: 0xa000 xxxx xxxx xxxx
csrwr \temp, LOONGARCH_CSR_DMWIN2
li.d \temp, CSR_DMW3_INIT # 0x0, unused
PTR_LI \temp, CSR_DMW3_INIT # 0x0, unused
csrwr \temp, LOONGARCH_CSR_DMWIN3
.endm
/* Jump to the runtime virtual address. */
.macro JUMP_VIRT_ADDR temp1 temp2
li.d \temp1, CACHE_BASE
PTR_LI \temp1, CACHE_BASE
pcaddi \temp2, 0
bstrins.d \temp1, \temp2, (DMW_PABITS - 1), 0
PTR_BSTRINS \temp1, \temp2, (DMW_PABITS - 1), 0
jirl zero, \temp1, 0xc
.endm
@ -171,7 +191,7 @@
andi t0, t0, 0x3 /* extract pplv bit */
beqz t0, 9f
li.d tp, ~_THREAD_MASK
LONG_LI tp, ~_THREAD_MASK
and tp, tp, sp
cfi_st u0, PT_R21, \docfi
csrrd u0, PERCPU_BASE_KS

View File

@ -5,6 +5,7 @@
#ifndef _ASM_STRING_H
#define _ASM_STRING_H
#ifdef CONFIG_64BIT
#define __HAVE_ARCH_MEMSET
extern void *memset(void *__s, int __c, size_t __count);
extern void *__memset(void *__s, int __c, size_t __count);
@ -16,6 +17,7 @@ extern void *__memcpy(void *__to, __const__ void *__from, size_t __n);
#define __HAVE_ARCH_MEMMOVE
extern void *memmove(void *__dest, __const__ void *__src, size_t __n);
extern void *__memmove(void *__dest, __const__ void *__src, size_t __n);
#endif
#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)

View File

@ -18,7 +18,38 @@ typedef unsigned long cycles_t;
static inline cycles_t get_cycles(void)
{
return drdtime();
#ifdef CONFIG_32BIT
return rdtime_l();
#else
return rdtime_d();
#endif
}
#ifdef CONFIG_32BIT
#define get_cycles_hi get_cycles_hi
static inline cycles_t get_cycles_hi(void)
{
return rdtime_h();
}
#endif
static inline u64 get_cycles64(void)
{
#ifdef CONFIG_32BIT
u32 hi, lo;
do {
hi = rdtime_h();
lo = rdtime_l();
} while (hi != rdtime_h());
return ((u64)hi << 32) | lo;
#else
return rdtime_d();
#endif
}
#endif /* __KERNEL__ */

View File

@ -19,10 +19,16 @@
#include <asm/asm-extable.h>
#include <asm-generic/access_ok.h>
#define __LSW 0
#define __MSW 1
extern u64 __ua_limit;
#define __UA_ADDR ".dword"
#ifdef CONFIG_64BIT
#define __UA_LIMIT __ua_limit
#else
#define __UA_LIMIT 0x80000000UL
#endif
/*
* get_user: - Get a simple variable from user space.
@ -126,6 +132,7 @@ extern u64 __ua_limit;
*
* Returns zero on success, or -EFAULT on error.
*/
#define __put_user(x, ptr) \
({ \
int __pu_err = 0; \
@ -146,7 +153,7 @@ do { \
case 1: __get_data_asm(val, "ld.b", ptr); break; \
case 2: __get_data_asm(val, "ld.h", ptr); break; \
case 4: __get_data_asm(val, "ld.w", ptr); break; \
case 8: __get_data_asm(val, "ld.d", ptr); break; \
case 8: __get_data_asm_8(val, ptr); break; \
default: BUILD_BUG(); break; \
} \
} while (0)
@ -167,13 +174,39 @@ do { \
(val) = (__typeof__(*(ptr))) __gu_tmp; \
}
#ifdef CONFIG_64BIT
#define __get_data_asm_8(val, ptr) \
__get_data_asm(val, "ld.d", ptr)
#else /* !CONFIG_64BIT */
#define __get_data_asm_8(val, ptr) \
{ \
u32 __lo, __hi; \
u32 __user *__ptr = (u32 __user *)(ptr); \
\
__asm__ __volatile__ ( \
"1:\n" \
" ld.w %1, %3 \n" \
"2:\n" \
" ld.w %2, %4 \n" \
"3:\n" \
_ASM_EXTABLE_UACCESS_ERR_ZERO(1b, 3b, %0, %1) \
_ASM_EXTABLE_UACCESS_ERR_ZERO(2b, 3b, %0, %1) \
: "+r" (__gu_err), "=&r" (__lo), "=r" (__hi) \
: "m" (__ptr[__LSW]), "m" (__ptr[__MSW])); \
if (__gu_err) \
__hi = 0; \
(val) = (__typeof__(val))((__typeof__((val)-(val))) \
((((u64)__hi << 32) | __lo))); \
}
#endif /* CONFIG_64BIT */
#define __put_user_common(ptr, size) \
do { \
switch (size) { \
case 1: __put_data_asm("st.b", ptr); break; \
case 2: __put_data_asm("st.h", ptr); break; \
case 4: __put_data_asm("st.w", ptr); break; \
case 8: __put_data_asm("st.d", ptr); break; \
case 8: __put_data_asm_8(ptr); break; \
default: BUILD_BUG(); break; \
} \
} while (0)
@ -190,6 +223,30 @@ do { \
: "Jr" (__pu_val)); \
}
#ifdef CONFIG_64BIT
#define __put_data_asm_8(ptr) \
__put_data_asm("st.d", ptr)
#else /* !CONFIG_64BIT */
#define __put_data_asm_8(ptr) \
{ \
u32 __user *__ptr = (u32 __user *)(ptr); \
u64 __x = (__typeof__((__pu_val)-(__pu_val)))(__pu_val); \
\
__asm__ __volatile__ ( \
"1:\n" \
" st.w %z3, %1 \n" \
"2:\n" \
" st.w %z4, %2 \n" \
"3:\n" \
_ASM_EXTABLE_UACCESS_ERR(1b, 3b, %0) \
_ASM_EXTABLE_UACCESS_ERR(2b, 3b, %0) \
: "+r" (__pu_err), \
"=m" (__ptr[__LSW]), \
"=m" (__ptr[__MSW]) \
: "rJ" (__x), "rJ" (__x >> 32)); \
}
#endif /* CONFIG_64BIT */
#define __get_kernel_nofault(dst, src, type, err_label) \
do { \
int __gu_err = 0; \

View File

@ -12,6 +12,8 @@
#include <asm/unistd.h>
#include <asm/vdso/vdso.h>
#ifdef CONFIG_GENERIC_GETTIMEOFDAY
#define VDSO_HAS_CLOCK_GETRES 1
static __always_inline long gettimeofday_fallback(
@ -89,6 +91,8 @@ static inline bool loongarch_vdso_hres_capable(void)
}
#define __arch_vdso_hres_capable loongarch_vdso_hres_capable
#endif /* CONFIG_GENERIC_GETTIMEOFDAY */
#endif /* !__ASSEMBLER__ */
#endif /* __ASM_VDSO_GETTIMEOFDAY_H */

View File

@ -1,2 +1,3 @@
# SPDX-License-Identifier: GPL-2.0
syscall-y += unistd_32.h
syscall-y += unistd_64.h

View File

@ -61,8 +61,13 @@ struct user_lbt_state {
struct user_watch_state {
__u64 dbg_info;
struct {
#if __BITS_PER_LONG == 32
__u32 addr;
__u32 mask;
#else
__u64 addr;
__u64 mask;
#endif
__u32 ctrl;
__u32 pad;
} dbg_regs[8];
@ -71,8 +76,13 @@ struct user_watch_state {
struct user_watch_state_v2 {
__u64 dbg_info;
struct {
#if __BITS_PER_LONG == 32
__u32 addr;
__u32 mask;
#else
__u64 addr;
__u64 mask;
#endif
__u32 ctrl;
__u32 pad;
} dbg_regs[14];

View File

@ -1,3 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#include <asm/bitsperlong.h>
#if __BITS_PER_LONG == 32
#include <asm/unistd_32.h>
#else
#include <asm/unistd_64.h>
#endif

View File

@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
# No special ABIs on loongarch so far
syscall_abis_32 +=
syscall_abis_64 +=

View File

@ -106,7 +106,11 @@ EXPORT_SYMBOL(vm_map_base);
static void cpu_probe_addrbits(struct cpuinfo_loongarch *c)
{
#ifdef __NEED_ADDRBITS_PROBE
#ifdef CONFIG_32BIT
c->pabits = cpu_pabits;
c->vabits = cpu_vabits;
vm_map_base = KVRANGE;
#else
c->pabits = (read_cpucfg(LOONGARCH_CPUCFG1) & CPUCFG1_PABITS) >> 4;
c->vabits = (read_cpucfg(LOONGARCH_CPUCFG1) & CPUCFG1_VABITS) >> 12;
vm_map_base = 0UL - (1UL << c->vabits);
@ -298,8 +302,15 @@ static inline void cpu_probe_loongson(struct cpuinfo_loongarch *c, unsigned int
return;
}
#ifdef CONFIG_64BIT
*vendor = iocsr_read64(LOONGARCH_IOCSR_VENDOR);
*cpuname = iocsr_read64(LOONGARCH_IOCSR_CPUNAME);
#else
*vendor = iocsr_read32(LOONGARCH_IOCSR_VENDOR) |
(u64)iocsr_read32(LOONGARCH_IOCSR_VENDOR + 4) << 32;
*cpuname = iocsr_read32(LOONGARCH_IOCSR_CPUNAME) |
(u64)iocsr_read32(LOONGARCH_IOCSR_CPUNAME + 4) << 32;
#endif
if (!__cpu_full_name[cpu]) {
if (((char *)vendor)[0] == 0)

View File

@ -9,7 +9,11 @@
.macro __EFI_PE_HEADER
.long IMAGE_NT_SIGNATURE
.Lcoff_header:
#ifdef CONFIG_32BIT
.short IMAGE_FILE_MACHINE_LOONGARCH32 /* Machine */
#else
.short IMAGE_FILE_MACHINE_LOONGARCH64 /* Machine */
#endif
.short .Lsection_count /* NumberOfSections */
.long 0 /* TimeDateStamp */
.long 0 /* PointerToSymbolTable */

View File

@ -115,7 +115,9 @@ void __init efi_init(void)
efi_systab_report_header(&efi_systab->hdr, efi_systab->fw_vendor);
set_bit(EFI_64BIT, &efi.flags);
if (IS_ENABLED(CONFIG_64BIT))
set_bit(EFI_64BIT, &efi.flags);
efi_nr_tables = efi_systab->nr_tables;
efi_config_table = (unsigned long)efi_systab->tables;

View File

@ -23,24 +23,24 @@ SYM_CODE_START(handle_syscall)
UNWIND_HINT_UNDEFINED
csrrd t0, PERCPU_BASE_KS
la.pcrel t1, kernelsp
add.d t1, t1, t0
PTR_ADD t1, t1, t0
move t2, sp
ld.d sp, t1, 0
PTR_L sp, t1, 0
addi.d sp, sp, -PT_SIZE
PTR_ADDI sp, sp, -PT_SIZE
cfi_st t2, PT_R3
cfi_rel_offset sp, PT_R3
st.d zero, sp, PT_R0
LONG_S zero, sp, PT_R0
csrrd t2, LOONGARCH_CSR_PRMD
st.d t2, sp, PT_PRMD
LONG_S t2, sp, PT_PRMD
csrrd t2, LOONGARCH_CSR_CRMD
st.d t2, sp, PT_CRMD
LONG_S t2, sp, PT_CRMD
csrrd t2, LOONGARCH_CSR_EUEN
st.d t2, sp, PT_EUEN
LONG_S t2, sp, PT_EUEN
csrrd t2, LOONGARCH_CSR_ECFG
st.d t2, sp, PT_ECFG
LONG_S t2, sp, PT_ECFG
csrrd t2, LOONGARCH_CSR_ESTAT
st.d t2, sp, PT_ESTAT
LONG_S t2, sp, PT_ESTAT
cfi_st ra, PT_R1
cfi_st a0, PT_R4
cfi_st a1, PT_R5
@ -51,7 +51,7 @@ SYM_CODE_START(handle_syscall)
cfi_st a6, PT_R10
cfi_st a7, PT_R11
csrrd ra, LOONGARCH_CSR_ERA
st.d ra, sp, PT_ERA
LONG_S ra, sp, PT_ERA
cfi_rel_offset ra, PT_ERA
cfi_st tp, PT_R2
@ -67,7 +67,7 @@ SYM_CODE_START(handle_syscall)
#endif
move u0, t0
li.d tp, ~_THREAD_MASK
LONG_LI tp, ~_THREAD_MASK
and tp, tp, sp
move a0, sp

View File

@ -72,9 +72,12 @@ static int __init fdt_cpu_clk_init(void)
clk = of_clk_get(np, 0);
of_node_put(np);
cpu_clock_freq = 200 * 1000 * 1000;
if (IS_ERR(clk))
if (IS_ERR(clk)) {
pr_warn("No valid CPU clock freq, assume 200MHz.\n");
return -ENODEV;
}
cpu_clock_freq = clk_get_rate(clk);
clk_put(clk);

View File

@ -96,6 +96,49 @@
EX fld.d $f31, \base, (31 * FPU_REG_WIDTH)
.endm
#ifdef CONFIG_32BIT
.macro sc_save_fcc thread tmp0 tmp1
movcf2gr \tmp0, $fcc0
move \tmp1, \tmp0
movcf2gr \tmp0, $fcc1
bstrins.w \tmp1, \tmp0, 15, 8
movcf2gr \tmp0, $fcc2
bstrins.w \tmp1, \tmp0, 23, 16
movcf2gr \tmp0, $fcc3
bstrins.w \tmp1, \tmp0, 31, 24
EX st.w \tmp1, \thread, THREAD_FCC
movcf2gr \tmp0, $fcc4
move \tmp1, \tmp0
movcf2gr \tmp0, $fcc5
bstrins.w \tmp1, \tmp0, 15, 8
movcf2gr \tmp0, $fcc6
bstrins.w \tmp1, \tmp0, 23, 16
movcf2gr \tmp0, $fcc7
bstrins.w \tmp1, \tmp0, 31, 24
EX st.w \tmp1, \thread, (THREAD_FCC + 4)
.endm
.macro sc_restore_fcc thread tmp0 tmp1
EX ld.w \tmp0, \thread, THREAD_FCC
bstrpick.w \tmp1, \tmp0, 7, 0
movgr2cf $fcc0, \tmp1
bstrpick.w \tmp1, \tmp0, 15, 8
movgr2cf $fcc1, \tmp1
bstrpick.w \tmp1, \tmp0, 23, 16
movgr2cf $fcc2, \tmp1
bstrpick.w \tmp1, \tmp0, 31, 24
movgr2cf $fcc3, \tmp1
EX ld.w \tmp0, \thread, (THREAD_FCC + 4)
bstrpick.w \tmp1, \tmp0, 7, 0
movgr2cf $fcc4, \tmp1
bstrpick.w \tmp1, \tmp0, 15, 8
movgr2cf $fcc5, \tmp1
bstrpick.w \tmp1, \tmp0, 23, 16
movgr2cf $fcc6, \tmp1
bstrpick.w \tmp1, \tmp0, 31, 24
movgr2cf $fcc7, \tmp1
.endm
#else
.macro sc_save_fcc base, tmp0, tmp1
movcf2gr \tmp0, $fcc0
move \tmp1, \tmp0
@ -135,6 +178,7 @@
bstrpick.d \tmp1, \tmp0, 63, 56
movgr2cf $fcc7, \tmp1
.endm
#endif
.macro sc_save_fcsr base, tmp0
movfcsr2gr \tmp0, fcsr0
@ -410,6 +454,72 @@ SYM_FUNC_START(_init_fpu)
li.w t1, -1 # SNaN
#ifdef CONFIG_32BIT
movgr2fr.w $f0, t1
movgr2frh.w $f0, t1
movgr2fr.w $f1, t1
movgr2frh.w $f1, t1
movgr2fr.w $f2, t1
movgr2frh.w $f2, t1
movgr2fr.w $f3, t1
movgr2frh.w $f3, t1
movgr2fr.w $f4, t1
movgr2frh.w $f4, t1
movgr2fr.w $f5, t1
movgr2frh.w $f5, t1
movgr2fr.w $f6, t1
movgr2frh.w $f6, t1
movgr2fr.w $f7, t1
movgr2frh.w $f7, t1
movgr2fr.w $f8, t1
movgr2frh.w $f8, t1
movgr2fr.w $f9, t1
movgr2frh.w $f9, t1
movgr2fr.w $f10, t1
movgr2frh.w $f10, t1
movgr2fr.w $f11, t1
movgr2frh.w $f11, t1
movgr2fr.w $f12, t1
movgr2frh.w $f12, t1
movgr2fr.w $f13, t1
movgr2frh.w $f13, t1
movgr2fr.w $f14, t1
movgr2frh.w $f14, t1
movgr2fr.w $f15, t1
movgr2frh.w $f15, t1
movgr2fr.w $f16, t1
movgr2frh.w $f16, t1
movgr2fr.w $f17, t1
movgr2frh.w $f17, t1
movgr2fr.w $f18, t1
movgr2frh.w $f18, t1
movgr2fr.w $f19, t1
movgr2frh.w $f19, t1
movgr2fr.w $f20, t1
movgr2frh.w $f20, t1
movgr2fr.w $f21, t1
movgr2frh.w $f21, t1
movgr2fr.w $f22, t1
movgr2frh.w $f22, t1
movgr2fr.w $f23, t1
movgr2frh.w $f23, t1
movgr2fr.w $f24, t1
movgr2frh.w $f24, t1
movgr2fr.w $f25, t1
movgr2frh.w $f25, t1
movgr2fr.w $f26, t1
movgr2frh.w $f26, t1
movgr2fr.w $f27, t1
movgr2frh.w $f27, t1
movgr2fr.w $f28, t1
movgr2frh.w $f28, t1
movgr2fr.w $f29, t1
movgr2frh.w $f29, t1
movgr2fr.w $f30, t1
movgr2frh.w $f30, t1
movgr2fr.w $f31, t1
movgr2frh.w $f31, t1
#else
movgr2fr.d $f0, t1
movgr2fr.d $f1, t1
movgr2fr.d $f2, t1
@ -442,6 +552,7 @@ SYM_FUNC_START(_init_fpu)
movgr2fr.d $f29, t1
movgr2fr.d $f30, t1
movgr2fr.d $f31, t1
#endif
jr ra
SYM_FUNC_END(_init_fpu)

View File

@ -43,36 +43,29 @@ SYM_DATA(kernel_fsize, .long _kernel_fsize);
SYM_CODE_START(kernel_entry) # kernel entry point
/* Config direct window and set PG */
SETUP_DMWINS t0
SETUP_TWINS
SETUP_MODES t0
JUMP_VIRT_ADDR t0, t1
/* Enable PG */
li.w t0, 0xb0 # PLV=0, IE=0, PG=1
csrwr t0, LOONGARCH_CSR_CRMD
li.w t0, 0x04 # PLV=0, PIE=1, PWE=0
csrwr t0, LOONGARCH_CSR_PRMD
li.w t0, 0x00 # FPE=0, SXE=0, ASXE=0, BTE=0
csrwr t0, LOONGARCH_CSR_EUEN
SETUP_DMWINS t0
la.pcrel t0, __bss_start # clear .bss
st.d zero, t0, 0
LONG_S zero, t0, 0
la.pcrel t1, __bss_stop - LONGSIZE
1:
addi.d t0, t0, LONGSIZE
st.d zero, t0, 0
PTR_ADDI t0, t0, LONGSIZE
LONG_S zero, t0, 0
bne t0, t1, 1b
la.pcrel t0, fw_arg0
st.d a0, t0, 0 # firmware arguments
PTR_S a0, t0, 0 # firmware arguments
la.pcrel t0, fw_arg1
st.d a1, t0, 0
PTR_S a1, t0, 0
la.pcrel t0, fw_arg2
st.d a2, t0, 0
PTR_S a2, t0, 0
#ifdef CONFIG_PAGE_SIZE_4KB
li.d t0, 0
li.d t1, CSR_STFILL
LONG_LI t0, 0
LONG_LI t1, CSR_STFILL
csrxchg t0, t1, LOONGARCH_CSR_IMPCTL1
#endif
/* KSave3 used for percpu base, initialized as 0 */
@ -98,7 +91,7 @@ SYM_CODE_START(kernel_entry) # kernel entry point
/* Jump to the new kernel: new_pc = current_pc + random_offset */
pcaddi t0, 0
add.d t0, t0, a0
PTR_ADD t0, t0, a0
jirl zero, t0, 0xc
#endif /* CONFIG_RANDOMIZE_BASE */
@ -121,12 +114,14 @@ SYM_CODE_END(kernel_entry)
*/
SYM_CODE_START(smpboot_entry)
SETUP_DMWINS t0
SETUP_TWINS
SETUP_MODES t0
JUMP_VIRT_ADDR t0, t1
SETUP_DMWINS t0
#ifdef CONFIG_PAGE_SIZE_4KB
li.d t0, 0
li.d t1, CSR_STFILL
LONG_LI t0, 0
LONG_LI t1, CSR_STFILL
csrxchg t0, t1, LOONGARCH_CSR_IMPCTL1
#endif
/* Enable PG */

View File

@ -93,6 +93,7 @@ static void count_max_entries(Elf_Rela *relas, int num,
(*plts)++;
break;
case R_LARCH_GOT_PC_HI20:
case R_LARCH_GOT_PCADD_HI20:
(*gots)++;
break;
default:

View File

@ -22,72 +22,89 @@
#include <asm/inst.h>
#include <asm/unwind.h>
static int rela_stack_push(s64 stack_value, s64 *rela_stack, size_t *rela_stack_top)
/*
* reloc_rela_handler() - Apply a particular relocation to a module
* @mod: the module to apply the reloc to
* @location: the address at which the reloc is to be applied
* @v: the value of the reloc, with addend for RELA-style
* @rela_stack: the stack used for store relocation info, LOCAL to THIS module
* @rela_stac_top: where the stack operation(pop/push) applies to
*
* Return: 0 upon success, else -ERRNO
*/
typedef int (*reloc_rela_handler)(struct module *mod, u32 *location, Elf_Addr v,
long *rela_stack, size_t *rela_stack_top, unsigned int type);
static int rela_stack_push(long stack_value, long *rela_stack, size_t *rela_stack_top)
{
if (*rela_stack_top >= RELA_STACK_DEPTH)
return -ENOEXEC;
rela_stack[(*rela_stack_top)++] = stack_value;
pr_debug("%s stack_value = 0x%llx\n", __func__, stack_value);
pr_debug("%s stack_value = 0x%lx\n", __func__, stack_value);
return 0;
}
static int rela_stack_pop(s64 *stack_value, s64 *rela_stack, size_t *rela_stack_top)
static int rela_stack_pop(long *stack_value, long *rela_stack, size_t *rela_stack_top)
{
if (*rela_stack_top == 0)
return -ENOEXEC;
*stack_value = rela_stack[--(*rela_stack_top)];
pr_debug("%s stack_value = 0x%llx\n", __func__, *stack_value);
pr_debug("%s stack_value = 0x%lx\n", __func__, *stack_value);
return 0;
}
static int apply_r_larch_none(struct module *mod, u32 *location, Elf_Addr v,
s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
long *rela_stack, size_t *rela_stack_top, unsigned int type)
{
return 0;
}
static int apply_r_larch_error(struct module *me, u32 *location, Elf_Addr v,
s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
long *rela_stack, size_t *rela_stack_top, unsigned int type)
{
pr_err("%s: Unsupport relocation type %u, please add its support.\n", me->name, type);
return -EINVAL;
}
static int apply_r_larch_32(struct module *mod, u32 *location, Elf_Addr v,
s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
long *rela_stack, size_t *rela_stack_top, unsigned int type)
{
*location = v;
return 0;
}
#ifdef CONFIG_32BIT
#define apply_r_larch_64 apply_r_larch_error
#else
static int apply_r_larch_64(struct module *mod, u32 *location, Elf_Addr v,
s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
long *rela_stack, size_t *rela_stack_top, unsigned int type)
{
*(Elf_Addr *)location = v;
return 0;
}
#endif
static int apply_r_larch_sop_push_pcrel(struct module *mod, u32 *location, Elf_Addr v,
s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
long *rela_stack, size_t *rela_stack_top, unsigned int type)
{
return rela_stack_push(v - (u64)location, rela_stack, rela_stack_top);
return rela_stack_push(v - (unsigned long)location, rela_stack, rela_stack_top);
}
static int apply_r_larch_sop_push_absolute(struct module *mod, u32 *location, Elf_Addr v,
s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
long *rela_stack, size_t *rela_stack_top, unsigned int type)
{
return rela_stack_push(v, rela_stack, rela_stack_top);
}
static int apply_r_larch_sop_push_dup(struct module *mod, u32 *location, Elf_Addr v,
s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
long *rela_stack, size_t *rela_stack_top, unsigned int type)
{
int err = 0;
s64 opr1;
long opr1;
err = rela_stack_pop(&opr1, rela_stack, rela_stack_top);
if (err)
@ -104,7 +121,7 @@ static int apply_r_larch_sop_push_dup(struct module *mod, u32 *location, Elf_Add
static int apply_r_larch_sop_push_plt_pcrel(struct module *mod,
Elf_Shdr *sechdrs, u32 *location, Elf_Addr v,
s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
long *rela_stack, size_t *rela_stack_top, unsigned int type)
{
ptrdiff_t offset = (void *)v - (void *)location;
@ -118,10 +135,10 @@ static int apply_r_larch_sop_push_plt_pcrel(struct module *mod,
}
static int apply_r_larch_sop(struct module *mod, u32 *location, Elf_Addr v,
s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
long *rela_stack, size_t *rela_stack_top, unsigned int type)
{
int err = 0;
s64 opr1, opr2, opr3;
long opr1, opr2, opr3;
if (type == R_LARCH_SOP_IF_ELSE) {
err = rela_stack_pop(&opr3, rela_stack, rela_stack_top);
@ -164,10 +181,10 @@ static int apply_r_larch_sop(struct module *mod, u32 *location, Elf_Addr v,
}
static int apply_r_larch_sop_imm_field(struct module *mod, u32 *location, Elf_Addr v,
s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
long *rela_stack, size_t *rela_stack_top, unsigned int type)
{
int err = 0;
s64 opr1;
long opr1;
union loongarch_instruction *insn = (union loongarch_instruction *)location;
err = rela_stack_pop(&opr1, rela_stack, rela_stack_top);
@ -244,31 +261,33 @@ static int apply_r_larch_sop_imm_field(struct module *mod, u32 *location, Elf_Ad
}
overflow:
pr_err("module %s: opr1 = 0x%llx overflow! dangerous %s (%u) relocation\n",
pr_err("module %s: opr1 = 0x%lx overflow! dangerous %s (%u) relocation\n",
mod->name, opr1, __func__, type);
return -ENOEXEC;
unaligned:
pr_err("module %s: opr1 = 0x%llx unaligned! dangerous %s (%u) relocation\n",
pr_err("module %s: opr1 = 0x%lx unaligned! dangerous %s (%u) relocation\n",
mod->name, opr1, __func__, type);
return -ENOEXEC;
}
static int apply_r_larch_add_sub(struct module *mod, u32 *location, Elf_Addr v,
s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
long *rela_stack, size_t *rela_stack_top, unsigned int type)
{
switch (type) {
case R_LARCH_ADD32:
*(s32 *)location += v;
return 0;
case R_LARCH_ADD64:
*(s64 *)location += v;
return 0;
case R_LARCH_SUB32:
*(s32 *)location -= v;
return 0;
#ifdef CONFIG_64BIT
case R_LARCH_ADD64:
*(s64 *)location += v;
return 0;
case R_LARCH_SUB64:
*(s64 *)location -= v;
#endif
return 0;
default:
pr_err("%s: Unsupport relocation type %u\n", mod->name, type);
@ -278,7 +297,7 @@ static int apply_r_larch_add_sub(struct module *mod, u32 *location, Elf_Addr v,
static int apply_r_larch_b26(struct module *mod,
Elf_Shdr *sechdrs, u32 *location, Elf_Addr v,
s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
long *rela_stack, size_t *rela_stack_top, unsigned int type)
{
ptrdiff_t offset = (void *)v - (void *)location;
union loongarch_instruction *insn = (union loongarch_instruction *)location;
@ -310,15 +329,40 @@ static int apply_r_larch_b26(struct module *mod,
return 0;
}
static int apply_r_larch_pcadd(struct module *mod, u32 *location, Elf_Addr v,
long *rela_stack, size_t *rela_stack_top, unsigned int type)
{
union loongarch_instruction *insn = (union loongarch_instruction *)location;
/* Use s32 for a sign-extension deliberately. */
s32 offset_hi20 = (void *)((v + 0x800)) - (void *)((Elf_Addr)location);
switch (type) {
case R_LARCH_PCADD_LO12:
insn->reg2i12_format.immediate = v & 0xfff;
break;
case R_LARCH_PCADD_HI20:
v = offset_hi20 >> 12;
insn->reg1i20_format.immediate = v & 0xfffff;
break;
default:
pr_err("%s: Unsupport relocation type %u\n", mod->name, type);
return -EINVAL;
}
return 0;
}
static int apply_r_larch_pcala(struct module *mod, u32 *location, Elf_Addr v,
s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
long *rela_stack, size_t *rela_stack_top, unsigned int type)
{
union loongarch_instruction *insn = (union loongarch_instruction *)location;
/* Use s32 for a sign-extension deliberately. */
s32 offset_hi20 = (void *)((v + 0x800) & ~0xfff) -
(void *)((Elf_Addr)location & ~0xfff);
#ifdef CONFIG_64BIT
Elf_Addr anchor = (((Elf_Addr)location) & ~0xfff) + offset_hi20;
ptrdiff_t offset_rem = (void *)v - (void *)anchor;
#endif
switch (type) {
case R_LARCH_PCALA_LO12:
@ -328,6 +372,7 @@ static int apply_r_larch_pcala(struct module *mod, u32 *location, Elf_Addr v,
v = offset_hi20 >> 12;
insn->reg1i20_format.immediate = v & 0xfffff;
break;
#ifdef CONFIG_64BIT
case R_LARCH_PCALA64_LO20:
v = offset_rem >> 32;
insn->reg1i20_format.immediate = v & 0xfffff;
@ -336,6 +381,7 @@ static int apply_r_larch_pcala(struct module *mod, u32 *location, Elf_Addr v,
v = offset_rem >> 52;
insn->reg2i12_format.immediate = v & 0xfff;
break;
#endif
default:
pr_err("%s: Unsupport relocation type %u\n", mod->name, type);
return -EINVAL;
@ -346,30 +392,43 @@ static int apply_r_larch_pcala(struct module *mod, u32 *location, Elf_Addr v,
static int apply_r_larch_got_pc(struct module *mod,
Elf_Shdr *sechdrs, u32 *location, Elf_Addr v,
s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
long *rela_stack, size_t *rela_stack_top, unsigned int type)
{
Elf_Addr got = module_emit_got_entry(mod, sechdrs, v);
reloc_rela_handler got_handler;
if (!got)
return -EINVAL;
if (type != R_LARCH_GOT_PCADD_LO12) {
v = module_emit_got_entry(mod, sechdrs, v);
if (!v)
return -EINVAL;
}
switch (type) {
case R_LARCH_GOT_PC_LO12:
type = R_LARCH_PCALA_LO12;
got_handler = apply_r_larch_pcala;
break;
case R_LARCH_GOT_PC_HI20:
type = R_LARCH_PCALA_HI20;
got_handler = apply_r_larch_pcala;
break;
case R_LARCH_GOT_PCADD_LO12:
type = R_LARCH_PCADD_LO12;
got_handler = apply_r_larch_pcadd;
break;
case R_LARCH_GOT_PCADD_HI20:
type = R_LARCH_PCADD_HI20;
got_handler = apply_r_larch_pcadd;
break;
default:
pr_err("%s: Unsupport relocation type %u\n", mod->name, type);
return -EINVAL;
}
return apply_r_larch_pcala(mod, location, got, rela_stack, rela_stack_top, type);
return got_handler(mod, location, v, rela_stack, rela_stack_top, type);
}
static int apply_r_larch_32_pcrel(struct module *mod, u32 *location, Elf_Addr v,
s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
long *rela_stack, size_t *rela_stack_top, unsigned int type)
{
ptrdiff_t offset = (void *)v - (void *)location;
@ -377,31 +436,22 @@ static int apply_r_larch_32_pcrel(struct module *mod, u32 *location, Elf_Addr v,
return 0;
}
#ifdef CONFIG_32BIT
#define apply_r_larch_64_pcrel apply_r_larch_error
#else
static int apply_r_larch_64_pcrel(struct module *mod, u32 *location, Elf_Addr v,
s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
long *rela_stack, size_t *rela_stack_top, unsigned int type)
{
ptrdiff_t offset = (void *)v - (void *)location;
*(u64 *)location = offset;
return 0;
}
/*
* reloc_handlers_rela() - Apply a particular relocation to a module
* @mod: the module to apply the reloc to
* @location: the address at which the reloc is to be applied
* @v: the value of the reloc, with addend for RELA-style
* @rela_stack: the stack used for store relocation info, LOCAL to THIS module
* @rela_stac_top: where the stack operation(pop/push) applies to
*
* Return: 0 upon success, else -ERRNO
*/
typedef int (*reloc_rela_handler)(struct module *mod, u32 *location, Elf_Addr v,
s64 *rela_stack, size_t *rela_stack_top, unsigned int type);
#endif
/* The handlers for known reloc types */
static reloc_rela_handler reloc_rela_handlers[] = {
[R_LARCH_NONE ... R_LARCH_64_PCREL] = apply_r_larch_error,
[R_LARCH_NONE ... R_LARCH_TLS_DESC_PCADD_LO12] = apply_r_larch_error,
[R_LARCH_NONE] = apply_r_larch_none,
[R_LARCH_32] = apply_r_larch_32,
@ -414,7 +464,8 @@ static reloc_rela_handler reloc_rela_handlers[] = {
[R_LARCH_SOP_SUB ... R_LARCH_SOP_IF_ELSE] = apply_r_larch_sop,
[R_LARCH_SOP_POP_32_S_10_5 ... R_LARCH_SOP_POP_32_U] = apply_r_larch_sop_imm_field,
[R_LARCH_ADD32 ... R_LARCH_SUB64] = apply_r_larch_add_sub,
[R_LARCH_PCALA_HI20...R_LARCH_PCALA64_HI12] = apply_r_larch_pcala,
[R_LARCH_PCADD_HI20 ... R_LARCH_PCADD_LO12] = apply_r_larch_pcadd,
[R_LARCH_PCALA_HI20 ... R_LARCH_PCALA64_HI12] = apply_r_larch_pcala,
[R_LARCH_32_PCREL] = apply_r_larch_32_pcrel,
[R_LARCH_64_PCREL] = apply_r_larch_64_pcrel,
};
@ -423,9 +474,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
unsigned int symindex, unsigned int relsec,
struct module *mod)
{
int i, err;
unsigned int type;
s64 rela_stack[RELA_STACK_DEPTH];
int err;
unsigned int i, idx, type;
unsigned int num_relocations;
long rela_stack[RELA_STACK_DEPTH];
size_t rela_stack_top = 0;
reloc_rela_handler handler;
void *location;
@ -436,8 +488,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
pr_debug("%s: Applying relocate section %u to %u\n", __func__, relsec,
sechdrs[relsec].sh_info);
idx = 0;
rela_stack_top = 0;
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
num_relocations = sechdrs[relsec].sh_size / sizeof(*rel);
for (i = 0; i < num_relocations; i++) {
/* This is where to make the change */
location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
/* This is the symbol it is referring to */
@ -462,17 +516,59 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
return -EINVAL;
}
pr_debug("type %d st_value %llx r_addend %llx loc %llx\n",
pr_debug("type %d st_value %lx r_addend %lx loc %lx\n",
(int)ELF_R_TYPE(rel[i].r_info),
sym->st_value, rel[i].r_addend, (u64)location);
(unsigned long)sym->st_value, (unsigned long)rel[i].r_addend, (unsigned long)location);
v = sym->st_value + rel[i].r_addend;
if (type == R_LARCH_PCADD_LO12 || type == R_LARCH_GOT_PCADD_LO12) {
bool found = false;
unsigned int j = idx;
do {
u32 hi20_type = ELF_R_TYPE(rel[j].r_info);
unsigned long hi20_location =
sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[j].r_offset;
/* Find the corresponding HI20 relocation entry */
if ((hi20_location == sym->st_value) && (hi20_type == type - 1)) {
s32 hi20, lo12;
Elf_Sym *hi20_sym =
(Elf_Sym *)sechdrs[symindex].sh_addr + ELF_R_SYM(rel[j].r_info);
unsigned long hi20_sym_val = hi20_sym->st_value + rel[j].r_addend;
/* Calculate LO12 offset */
size_t offset = hi20_sym_val - hi20_location;
if (hi20_type == R_LARCH_GOT_PCADD_HI20) {
offset = module_emit_got_entry(mod, sechdrs, hi20_sym_val);
offset = offset - hi20_location;
}
hi20 = (offset + 0x800) & 0xfffff000;
v = lo12 = offset - hi20;
found = true;
break;
}
j = (j + 1) % num_relocations;
} while (idx != j);
if (!found) {
pr_err("%s: Can not find HI20 relocation information\n", mod->name);
return -EINVAL;
}
idx = j; /* Record the previous j-loop end index */
}
switch (type) {
case R_LARCH_B26:
err = apply_r_larch_b26(mod, sechdrs, location,
v, rela_stack, &rela_stack_top, type);
break;
case R_LARCH_GOT_PC_HI20...R_LARCH_GOT_PC_LO12:
case R_LARCH_GOT_PCADD_HI20...R_LARCH_GOT_PCADD_LO12:
err = apply_r_larch_got_pc(mod, sechdrs, location,
v, rela_stack, &rela_stack_top, type);
break;

View File

@ -20,11 +20,14 @@ static int show_cpuinfo(struct seq_file *m, void *v)
unsigned int prid = cpu_data[n].processor_id;
unsigned int version = cpu_data[n].processor_id & 0xff;
unsigned int fp_version = cpu_data[n].fpu_vers;
u64 freq = cpu_clock_freq, bogomips = lpj_fine * cpu_clock_freq;
#ifdef CONFIG_SMP
if (!cpu_online(n))
return 0;
#endif
do_div(freq, 10000);
do_div(bogomips, const_clock_freq * (5000/HZ));
/*
* For the first processor also print the system type
@ -41,11 +44,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
seq_printf(m, "PRID\t\t\t: %s (%08x)\n", id_to_core_name(prid), prid);
seq_printf(m, "CPU Revision\t\t: 0x%02x\n", version);
seq_printf(m, "FPU Revision\t\t: 0x%02x\n", fp_version);
seq_printf(m, "CPU MHz\t\t\t: %llu.%02llu\n",
cpu_clock_freq / 1000000, (cpu_clock_freq / 10000) % 100);
seq_printf(m, "BogoMIPS\t\t: %llu.%02llu\n",
(lpj_fine * cpu_clock_freq / const_clock_freq) / (500000/HZ),
((lpj_fine * cpu_clock_freq / const_clock_freq) / (5000/HZ)) % 100);
seq_printf(m, "CPU MHz\t\t\t: %u.%02u\n", (u32)freq / 100, (u32)freq % 100);
seq_printf(m, "BogoMIPS\t\t: %u.%02u\n", (u32)bogomips / 100, (u32)bogomips % 100);
seq_printf(m, "TLB Entries\t\t: %d\n", cpu_data[n].tlbsize);
seq_printf(m, "Address Sizes\t\t: %d bits physical, %d bits virtual\n",
cpu_pabits + 1, cpu_vabits + 1);

View File

@ -130,6 +130,11 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
preempt_enable();
if (IS_ENABLED(CONFIG_RANDSTRUCT)) {
memcpy(dst, src, sizeof(struct task_struct));
return 0;
}
if (!used_math())
memcpy(dst, src, offsetof(struct task_struct, thread.fpu.fpr));
else
@ -377,8 +382,11 @@ void arch_trigger_cpumask_backtrace(const cpumask_t *mask, int exclude_cpu)
nmi_trigger_cpumask_backtrace(mask, exclude_cpu, raise_backtrace);
}
#ifdef CONFIG_64BIT
#ifdef CONFIG_32BIT
void loongarch_dump_regs32(u32 *uregs, const struct pt_regs *regs)
#else
void loongarch_dump_regs64(u64 *uregs, const struct pt_regs *regs)
#endif
{
unsigned int i;
@ -395,4 +403,3 @@ void loongarch_dump_regs64(u64 *uregs, const struct pt_regs *regs)
uregs[LOONGARCH_EF_CSR_ECFG] = regs->csr_ecfg;
uregs[LOONGARCH_EF_CSR_ESTAT] = regs->csr_estat;
}
#endif /* CONFIG_64BIT */

View File

@ -650,8 +650,13 @@ static int ptrace_hbp_set_addr(unsigned int note_type,
struct perf_event_attr attr;
/* Kernel-space address cannot be monitored by user-space */
#ifdef CONFIG_32BIT
if ((unsigned long)addr >= KPRANGE0)
return -EINVAL;
#else
if ((unsigned long)addr >= XKPRANGE)
return -EINVAL;
#endif
bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
if (IS_ERR(bp))

View File

@ -68,18 +68,25 @@ static inline void __init relocate_absolute(long random_offset)
for (p = begin; (void *)p < end; p++) {
long v = p->symvalue;
uint32_t lu12iw, ori, lu32id, lu52id;
uint32_t lu12iw, ori;
#ifdef CONFIG_64BIT
uint32_t lu32id, lu52id;
#endif
union loongarch_instruction *insn = (void *)p->pc;
lu12iw = (v >> 12) & 0xfffff;
ori = v & 0xfff;
#ifdef CONFIG_64BIT
lu32id = (v >> 32) & 0xfffff;
lu52id = v >> 52;
#endif
insn[0].reg1i20_format.immediate = lu12iw;
insn[1].reg2i12_format.immediate = ori;
#ifdef CONFIG_64BIT
insn[2].reg1i20_format.immediate = lu32id;
insn[3].reg2i12_format.immediate = lu52id;
#endif
}
}
@ -183,7 +190,7 @@ static inline void __init *determine_relocation_address(void)
if (kaslr_disabled())
return destination;
kernel_length = (long)_end - (long)_text;
kernel_length = (unsigned long)_end - (unsigned long)_text;
random_offset = get_random_boot() << 16;
random_offset &= (CONFIG_RANDOMIZE_BASE_MAX_OFFSET - 1);
@ -232,7 +239,7 @@ unsigned long __init relocate_kernel(void)
early_memunmap(cmdline, COMMAND_LINE_SIZE);
if (random_offset) {
kernel_length = (long)(_end) - (long)(_text);
kernel_length = (unsigned long)(_end) - (unsigned long)(_text);
/* Copy the kernel to it's new location */
memcpy(location_new, _text, kernel_length);

View File

@ -56,6 +56,7 @@
#define SMBIOS_FREQLOW_MASK 0xFF
#define SMBIOS_CORE_PACKAGE_OFFSET 0x23
#define SMBIOS_THREAD_PACKAGE_OFFSET 0x25
#define SMBIOS_THREAD_PACKAGE_2_OFFSET 0x2E
#define LOONGSON_EFI_ENABLE (1 << 3)
unsigned long fw_arg0, fw_arg1, fw_arg2;
@ -126,7 +127,12 @@ static void __init parse_cpu_table(const struct dmi_header *dm)
cpu_clock_freq = freq_temp * 1000000;
loongson_sysconf.cpuname = (void *)dmi_string_parse(dm, dmi_data[16]);
loongson_sysconf.cores_per_package = *(dmi_data + SMBIOS_THREAD_PACKAGE_OFFSET);
loongson_sysconf.cores_per_package = *(u8 *)(dmi_data + SMBIOS_THREAD_PACKAGE_OFFSET);
if (dm->length >= 0x30 && loongson_sysconf.cores_per_package == 0xff) {
/* SMBIOS 3.0+ has ThreadCount2 for more than 255 threads */
loongson_sysconf.cores_per_package =
*(u16 *)(dmi_data + SMBIOS_THREAD_PACKAGE_2_OFFSET);
}
pr_info("CpuClock = %llu\n", cpu_clock_freq);
}

View File

@ -16,18 +16,23 @@
*/
.align 5
SYM_FUNC_START(__switch_to)
csrrd t1, LOONGARCH_CSR_PRMD
stptr.d t1, a0, THREAD_CSRPRMD
#ifdef CONFIG_32BIT
PTR_ADDI a0, a0, TASK_STRUCT_OFFSET
PTR_ADDI a1, a1, TASK_STRUCT_OFFSET
#endif
csrrd t1, LOONGARCH_CSR_PRMD
LONG_SPTR t1, a0, (THREAD_CSRPRMD - TASK_STRUCT_OFFSET)
cpu_save_nonscratch a0
stptr.d ra, a0, THREAD_REG01
stptr.d a3, a0, THREAD_SCHED_RA
stptr.d a4, a0, THREAD_SCHED_CFA
LONG_SPTR a3, a0, (THREAD_SCHED_RA - TASK_STRUCT_OFFSET)
LONG_SPTR a4, a0, (THREAD_SCHED_CFA - TASK_STRUCT_OFFSET)
#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP)
la t7, __stack_chk_guard
LONG_L t8, a1, TASK_STACK_CANARY
LONG_S t8, t7, 0
la t7, __stack_chk_guard
LONG_LPTR t8, a1, (TASK_STACK_CANARY - TASK_STRUCT_OFFSET)
LONG_SPTR t8, t7, 0
#endif
move tp, a2
cpu_restore_nonscratch a1
@ -35,8 +40,11 @@ SYM_FUNC_START(__switch_to)
PTR_ADD t0, t0, tp
set_saved_sp t0, t1, t2
ldptr.d t1, a1, THREAD_CSRPRMD
csrwr t1, LOONGARCH_CSR_PRMD
LONG_LPTR t1, a1, (THREAD_CSRPRMD - TASK_STRUCT_OFFSET)
csrwr t1, LOONGARCH_CSR_PRMD
#ifdef CONFIG_32BIT
PTR_ADDI a0, a0, -TASK_STRUCT_OFFSET
#endif
jr ra
SYM_FUNC_END(__switch_to)

View File

@ -34,9 +34,22 @@ SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len, unsigned long,
return ksys_mmap_pgoff(addr, len, prot, flags, fd, offset >> PAGE_SHIFT);
}
SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len, unsigned long,
prot, unsigned long, flags, unsigned long, fd, unsigned long, offset)
{
if (offset & (~PAGE_MASK >> 12))
return -EINVAL;
return ksys_mmap_pgoff(addr, len, prot, flags, fd, offset >> (PAGE_SHIFT - 12));
}
void *sys_call_table[__NR_syscalls] = {
[0 ... __NR_syscalls - 1] = sys_ni_syscall,
#ifdef CONFIG_32BIT
#include <asm/syscall_table_32.h>
#else
#include <asm/syscall_table_64.h>
#endif
};
typedef long (*sys_call_fn)(unsigned long, unsigned long,
@ -75,7 +88,7 @@ void noinstr __no_stack_protector do_syscall(struct pt_regs *regs)
*
* The resulting 6 bits of entropy is seen in SP[9:4].
*/
choose_random_kstack_offset(drdtime());
choose_random_kstack_offset(get_cycles());
syscall_exit_to_user_mode(regs);
}

View File

@ -18,6 +18,7 @@
#include <asm/loongarch.h>
#include <asm/paravirt.h>
#include <asm/time.h>
#include <asm/timex.h>
u64 cpu_clock_freq;
EXPORT_SYMBOL(cpu_clock_freq);
@ -50,10 +51,10 @@ static int constant_set_state_oneshot(struct clock_event_device *evt)
raw_spin_lock(&state_lock);
timer_config = csr_read64(LOONGARCH_CSR_TCFG);
timer_config = csr_read(LOONGARCH_CSR_TCFG);
timer_config |= CSR_TCFG_EN;
timer_config &= ~CSR_TCFG_PERIOD;
csr_write64(timer_config, LOONGARCH_CSR_TCFG);
csr_write(timer_config, LOONGARCH_CSR_TCFG);
raw_spin_unlock(&state_lock);
@ -62,15 +63,15 @@ static int constant_set_state_oneshot(struct clock_event_device *evt)
static int constant_set_state_periodic(struct clock_event_device *evt)
{
unsigned long period;
unsigned long timer_config;
u64 period = const_clock_freq;
raw_spin_lock(&state_lock);
period = const_clock_freq / HZ;
do_div(period, HZ);
timer_config = period & CSR_TCFG_VAL;
timer_config |= (CSR_TCFG_PERIOD | CSR_TCFG_EN);
csr_write64(timer_config, LOONGARCH_CSR_TCFG);
csr_write(timer_config, LOONGARCH_CSR_TCFG);
raw_spin_unlock(&state_lock);
@ -83,9 +84,9 @@ static int constant_set_state_shutdown(struct clock_event_device *evt)
raw_spin_lock(&state_lock);
timer_config = csr_read64(LOONGARCH_CSR_TCFG);
timer_config = csr_read(LOONGARCH_CSR_TCFG);
timer_config &= ~CSR_TCFG_EN;
csr_write64(timer_config, LOONGARCH_CSR_TCFG);
csr_write(timer_config, LOONGARCH_CSR_TCFG);
raw_spin_unlock(&state_lock);
@ -98,7 +99,7 @@ static int constant_timer_next_event(unsigned long delta, struct clock_event_dev
delta &= CSR_TCFG_VAL;
timer_config = delta | CSR_TCFG_EN;
csr_write64(timer_config, LOONGARCH_CSR_TCFG);
csr_write(timer_config, LOONGARCH_CSR_TCFG);
return 0;
}
@ -120,7 +121,7 @@ static int arch_timer_dying(unsigned int cpu)
static unsigned long get_loops_per_jiffy(void)
{
unsigned long lpj = (unsigned long)const_clock_freq;
u64 lpj = const_clock_freq;
do_div(lpj, HZ);
@ -131,13 +132,13 @@ static long init_offset;
void save_counter(void)
{
init_offset = drdtime();
init_offset = get_cycles();
}
void sync_counter(void)
{
/* Ensure counter begin at 0 */
csr_write64(init_offset, LOONGARCH_CSR_CNTC);
csr_write(init_offset, LOONGARCH_CSR_CNTC);
}
int constant_clockevent_init(void)
@ -197,12 +198,12 @@ int constant_clockevent_init(void)
static u64 read_const_counter(struct clocksource *clk)
{
return drdtime();
return get_cycles64();
}
static noinstr u64 sched_clock_read(void)
{
return drdtime();
return get_cycles64();
}
static struct clocksource clocksource_const = {
@ -211,7 +212,9 @@ static struct clocksource clocksource_const = {
.read = read_const_counter,
.mask = CLOCKSOURCE_MASK(64),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
#ifdef CONFIG_GENERIC_GETTIMEOFDAY
.vdso_clock_mode = VDSO_CLOCKMODE_CPU,
#endif
};
int __init constant_clocksource_init(void)
@ -235,7 +238,7 @@ void __init time_init(void)
else
const_clock_freq = calc_const_freq();
init_offset = -(drdtime() - csr_read64(LOONGARCH_CSR_CNTC));
init_offset = -(get_cycles() - csr_read(LOONGARCH_CSR_CNTC));
constant_clockevent_init();
constant_clocksource_init();

View File

@ -625,7 +625,7 @@ asmlinkage void noinstr do_bce(struct pt_regs *regs)
bool user = user_mode(regs);
bool pie = regs_irqs_disabled(regs);
unsigned long era = exception_era(regs);
u64 badv = 0, lower = 0, upper = ULONG_MAX;
unsigned long badv = 0, lower = 0, upper = ULONG_MAX;
union loongarch_instruction insn;
irqentry_state_t state = irqentry_enter(regs);
@ -1070,10 +1070,13 @@ asmlinkage void noinstr do_reserved(struct pt_regs *regs)
asmlinkage void cache_parity_error(void)
{
u32 merrctl = csr_read32(LOONGARCH_CSR_MERRCTL);
unsigned long merrera = csr_read(LOONGARCH_CSR_MERRERA);
/* For the moment, report the problem and hang. */
pr_err("Cache error exception:\n");
pr_err("csr_merrctl == %08x\n", csr_read32(LOONGARCH_CSR_MERRCTL));
pr_err("csr_merrera == %016lx\n", csr_read64(LOONGARCH_CSR_MERRERA));
pr_err("csr_merrctl == %08x\n", merrctl);
pr_err("csr_merrera == %016lx\n", merrera);
panic("Can't handle the cache error!");
}
@ -1130,9 +1133,9 @@ static void configure_exception_vector(void)
eentry = (unsigned long)exception_handlers;
tlbrentry = (unsigned long)exception_handlers + 80*VECSIZE;
csr_write64(eentry, LOONGARCH_CSR_EENTRY);
csr_write64(__pa(eentry), LOONGARCH_CSR_MERRENTRY);
csr_write64(__pa(tlbrentry), LOONGARCH_CSR_TLBRENTRY);
csr_write(eentry, LOONGARCH_CSR_EENTRY);
csr_write(__pa(eentry), LOONGARCH_CSR_MERRENTRY);
csr_write(__pa(tlbrentry), LOONGARCH_CSR_TLBRENTRY);
}
void per_cpu_trap_init(int cpu)

View File

@ -27,12 +27,21 @@ static u32 unaligned_instructions_user;
static u32 unaligned_instructions_kernel;
#endif
static inline unsigned long read_fpr(unsigned int idx)
static inline u64 read_fpr(unsigned int idx)
{
#ifdef CONFIG_64BIT
#define READ_FPR(idx, __value) \
__asm__ __volatile__("movfr2gr.d %0, $f"#idx"\n\t" : "=r"(__value));
unsigned long __value;
#else
#define READ_FPR(idx, __value) \
{ \
u32 __value_lo, __value_hi; \
__asm__ __volatile__("movfr2gr.s %0, $f"#idx"\n\t" : "=r"(__value_lo)); \
__asm__ __volatile__("movfrh2gr.s %0, $f"#idx"\n\t" : "=r"(__value_hi)); \
__value = (__value_lo | ((u64)__value_hi << 32)); \
}
#endif
u64 __value;
switch (idx) {
case 0:
@ -138,11 +147,20 @@ static inline unsigned long read_fpr(unsigned int idx)
return __value;
}
static inline void write_fpr(unsigned int idx, unsigned long value)
static inline void write_fpr(unsigned int idx, u64 value)
{
#ifdef CONFIG_64BIT
#define WRITE_FPR(idx, value) \
__asm__ __volatile__("movgr2fr.d $f"#idx", %0\n\t" :: "r"(value));
#else
#define WRITE_FPR(idx, value) \
{ \
u32 value_lo = value; \
u32 value_hi = value >> 32; \
__asm__ __volatile__("movgr2fr.w $f"#idx", %0\n\t" :: "r"(value_lo)); \
__asm__ __volatile__("movgr2frh.w $f"#idx", %0\n\t" :: "r"(value_hi)); \
}
#endif
switch (idx) {
case 0:
WRITE_FPR(0, value);
@ -252,7 +270,7 @@ void emulate_load_store_insn(struct pt_regs *regs, void __user *addr, unsigned i
bool sign, write;
bool user = user_mode(regs);
unsigned int res, size = 0;
unsigned long value = 0;
u64 value = 0;
union loongarch_instruction insn;
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);

View File

@ -9,6 +9,7 @@
#include <asm/loongarch.h>
#include <asm/setup.h>
#include <asm/time.h>
#include <asm/timex.h>
#define CREATE_TRACE_POINTS
#include "trace.h"
@ -814,7 +815,7 @@ static int kvm_get_one_reg(struct kvm_vcpu *vcpu,
case KVM_REG_LOONGARCH_KVM:
switch (reg->id) {
case KVM_REG_LOONGARCH_COUNTER:
*v = drdtime() + vcpu->kvm->arch.time_offset;
*v = get_cycles() + vcpu->kvm->arch.time_offset;
break;
case KVM_REG_LOONGARCH_DEBUG_INST:
*v = INSN_HVCL | KVM_HCALL_SWDBG;
@ -909,7 +910,7 @@ static int kvm_set_one_reg(struct kvm_vcpu *vcpu,
* only set for the first time for smp system
*/
if (vcpu->vcpu_id == 0)
vcpu->kvm->arch.time_offset = (signed long)(v - drdtime());
vcpu->kvm->arch.time_offset = (signed long)(v - get_cycles());
break;
case KVM_REG_LOONGARCH_VCPU_RESET:
vcpu->arch.st.guest_addr = 0;

View File

@ -0,0 +1,13 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/export.h>
#include <linux/compiler.h>
#include <uapi/linux/swab.h>
/* To silence -Wmissing-prototypes. */
unsigned long long __bswapdi2(unsigned long long u);
unsigned long long notrace __bswapdi2(unsigned long long u)
{
return ___constant_swab64(u);
}
EXPORT_SYMBOL(__bswapdi2);

View File

@ -0,0 +1,13 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/export.h>
#include <linux/compiler.h>
#include <uapi/linux/swab.h>
/* To silence -Wmissing-prototypes. */
unsigned int __bswapsi2(unsigned int u);
unsigned int notrace __bswapsi2(unsigned int u)
{
return ___constant_swab32(u);
}
EXPORT_SYMBOL(__bswapsi2);

View File

@ -13,11 +13,15 @@
#include <asm/unwind_hints.h>
SYM_FUNC_START(__clear_user)
#ifdef CONFIG_32BIT
b __clear_user_generic
#else
/*
* Some CPUs support hardware unaligned access
*/
ALTERNATIVE "b __clear_user_generic", \
"b __clear_user_fast", CPU_FEATURE_UAL
#endif
SYM_FUNC_END(__clear_user)
EXPORT_SYMBOL(__clear_user)
@ -29,19 +33,20 @@ EXPORT_SYMBOL(__clear_user)
* a1: size
*/
SYM_FUNC_START(__clear_user_generic)
beqz a1, 2f
beqz a1, 2f
1: st.b zero, a0, 0
addi.d a0, a0, 1
addi.d a1, a1, -1
bgtz a1, 1b
1: st.b zero, a0, 0
PTR_ADDI a0, a0, 1
PTR_ADDI a1, a1, -1
bgtz a1, 1b
2: move a0, a1
jr ra
2: move a0, a1
jr ra
_asm_extable 1b, 2b
_asm_extable 1b, 2b
SYM_FUNC_END(__clear_user_generic)
#ifdef CONFIG_64BIT
/*
* unsigned long __clear_user_fast(void *addr, unsigned long size)
*
@ -207,3 +212,4 @@ SYM_FUNC_START(__clear_user_fast)
SYM_FUNC_END(__clear_user_fast)
STACK_FRAME_NON_STANDARD __clear_user_fast
#endif

View File

@ -13,11 +13,15 @@
#include <asm/unwind_hints.h>
SYM_FUNC_START(__copy_user)
#ifdef CONFIG_32BIT
b __copy_user_generic
#else
/*
* Some CPUs support hardware unaligned access
*/
ALTERNATIVE "b __copy_user_generic", \
"b __copy_user_fast", CPU_FEATURE_UAL
#endif
SYM_FUNC_END(__copy_user)
EXPORT_SYMBOL(__copy_user)
@ -30,22 +34,23 @@ EXPORT_SYMBOL(__copy_user)
* a2: n
*/
SYM_FUNC_START(__copy_user_generic)
beqz a2, 3f
beqz a2, 3f
1: ld.b t0, a1, 0
2: st.b t0, a0, 0
addi.d a0, a0, 1
addi.d a1, a1, 1
addi.d a2, a2, -1
bgtz a2, 1b
1: ld.b t0, a1, 0
2: st.b t0, a0, 0
PTR_ADDI a0, a0, 1
PTR_ADDI a1, a1, 1
PTR_ADDI a2, a2, -1
bgtz a2, 1b
3: move a0, a2
jr ra
3: move a0, a2
jr ra
_asm_extable 1b, 3b
_asm_extable 2b, 3b
_asm_extable 1b, 3b
_asm_extable 2b, 3b
SYM_FUNC_END(__copy_user_generic)
#ifdef CONFIG_64BIT
/*
* unsigned long __copy_user_fast(void *to, const void *from, unsigned long n)
*
@ -281,3 +286,4 @@ SYM_FUNC_START(__copy_user_fast)
SYM_FUNC_END(__copy_user_fast)
STACK_FRAME_NON_STANDARD __copy_user_fast
#endif

View File

@ -20,9 +20,9 @@ void dump_tlb_regs(void)
pr_info("Index : 0x%0x\n", read_csr_tlbidx());
pr_info("PageSize : 0x%0x\n", read_csr_pagesize());
pr_info("EntryHi : 0x%0*lx\n", field, read_csr_entryhi());
pr_info("EntryLo0 : 0x%0*lx\n", field, read_csr_entrylo0());
pr_info("EntryLo1 : 0x%0*lx\n", field, read_csr_entrylo1());
pr_info("EntryHi : 0x%0*lx\n", field, (unsigned long)read_csr_entryhi());
pr_info("EntryLo0 : 0x%0*lx\n", field, (unsigned long)read_csr_entrylo0());
pr_info("EntryLo1 : 0x%0*lx\n", field, (unsigned long)read_csr_entrylo1());
}
static void dump_tlb(int first, int last)
@ -73,12 +73,16 @@ static void dump_tlb(int first, int last)
vwidth, (entryhi & ~0x1fffUL), asidwidth, asid & asidmask);
/* NR/NX are in awkward places, so mask them off separately */
#ifdef CONFIG_64BIT
pa = entrylo0 & ~(ENTRYLO_NR | ENTRYLO_NX);
#endif
pa = pa & PAGE_MASK;
pr_cont("\n\t[");
#ifdef CONFIG_64BIT
pr_cont("nr=%d nx=%d ",
(entrylo0 & ENTRYLO_NR) ? 1 : 0,
(entrylo0 & ENTRYLO_NX) ? 1 : 0);
#endif
pr_cont("pa=0x%0*llx c=%d d=%d v=%d g=%d plv=%lld] [",
pwidth, pa, c0,
(entrylo0 & ENTRYLO_D) ? 1 : 0,
@ -86,11 +90,15 @@ static void dump_tlb(int first, int last)
(entrylo0 & ENTRYLO_G) ? 1 : 0,
(entrylo0 & ENTRYLO_PLV) >> ENTRYLO_PLV_SHIFT);
/* NR/NX are in awkward places, so mask them off separately */
#ifdef CONFIG_64BIT
pa = entrylo1 & ~(ENTRYLO_NR | ENTRYLO_NX);
#endif
pa = pa & PAGE_MASK;
#ifdef CONFIG_64BIT
pr_cont("nr=%d nx=%d ",
(entrylo1 & ENTRYLO_NR) ? 1 : 0,
(entrylo1 & ENTRYLO_NX) ? 1 : 0);
#endif
pr_cont("pa=0x%0*llx c=%d d=%d v=%d g=%d plv=%lld]\n",
pwidth, pa, c1,
(entrylo1 & ENTRYLO_D) ? 1 : 0,

View File

@ -24,35 +24,35 @@
* a3: sign
*/
SYM_FUNC_START(unaligned_read)
beqz a2, 5f
beqz a2, 5f
li.w t2, 0
addi.d t0, a2, -1
slli.d t1, t0, 3
add.d a0, a0, t0
li.w t2, 0
LONG_ADDI t0, a2, -1
PTR_SLLI t1, t0, LONGLOG
PTR_ADD a0, a0, t0
beqz a3, 2f
1: ld.b t3, a0, 0
b 3f
beqz a3, 2f
1: ld.b t3, a0, 0
b 3f
2: ld.bu t3, a0, 0
3: sll.d t3, t3, t1
or t2, t2, t3
addi.d t1, t1, -8
addi.d a0, a0, -1
addi.d a2, a2, -1
bgtz a2, 2b
4: st.d t2, a1, 0
2: ld.bu t3, a0, 0
3: LONG_SLLV t3, t3, t1
or t2, t2, t3
LONG_ADDI t1, t1, -8
PTR_ADDI a0, a0, -1
PTR_ADDI a2, a2, -1
bgtz a2, 2b
4: LONG_S t2, a1, 0
move a0, a2
jr ra
move a0, a2
jr ra
5: li.w a0, -EFAULT
jr ra
5: li.w a0, -EFAULT
jr ra
_asm_extable 1b, .L_fixup_handle_unaligned
_asm_extable 2b, .L_fixup_handle_unaligned
_asm_extable 4b, .L_fixup_handle_unaligned
_asm_extable 1b, .L_fixup_handle_unaligned
_asm_extable 2b, .L_fixup_handle_unaligned
_asm_extable 4b, .L_fixup_handle_unaligned
SYM_FUNC_END(unaligned_read)
/*
@ -63,21 +63,21 @@ SYM_FUNC_END(unaligned_read)
* a2: n
*/
SYM_FUNC_START(unaligned_write)
beqz a2, 3f
beqz a2, 3f
li.w t0, 0
1: srl.d t1, a1, t0
2: st.b t1, a0, 0
addi.d t0, t0, 8
addi.d a2, a2, -1
addi.d a0, a0, 1
bgtz a2, 1b
li.w t0, 0
1: LONG_SRLV t1, a1, t0
2: st.b t1, a0, 0
LONG_ADDI t0, t0, 8
PTR_ADDI a2, a2, -1
PTR_ADDI a0, a0, 1
bgtz a2, 1b
move a0, a2
jr ra
move a0, a2
jr ra
3: li.w a0, -EFAULT
jr ra
3: li.w a0, -EFAULT
jr ra
_asm_extable 2b, .L_fixup_handle_unaligned
_asm_extable 2b, .L_fixup_handle_unaligned
SYM_FUNC_END(unaligned_write)

View File

@ -224,7 +224,7 @@ EXPORT_SYMBOL(invalid_pmd_table);
pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;
EXPORT_SYMBOL(invalid_pte_table);
#ifdef CONFIG_EXECMEM
#if defined(CONFIG_EXECMEM) && defined(MODULES_VADDR)
static struct execmem_info execmem_info __ro_after_init;
struct execmem_info __init *execmem_arch_setup(void)
@ -242,4 +242,4 @@ struct execmem_info __init *execmem_arch_setup(void)
return &execmem_info;
}
#endif /* CONFIG_EXECMEM */
#endif /* CONFIG_EXECMEM && MODULES_VADDR */

View File

@ -10,75 +10,75 @@
.align 5
SYM_FUNC_START(clear_page)
lu12i.w t0, 1 << (PAGE_SHIFT - 12)
add.d t0, t0, a0
lu12i.w t0, 1 << (PAGE_SHIFT - 12)
PTR_ADD t0, t0, a0
1:
st.d zero, a0, 0
st.d zero, a0, 8
st.d zero, a0, 16
st.d zero, a0, 24
st.d zero, a0, 32
st.d zero, a0, 40
st.d zero, a0, 48
st.d zero, a0, 56
addi.d a0, a0, 128
st.d zero, a0, -64
st.d zero, a0, -56
st.d zero, a0, -48
st.d zero, a0, -40
st.d zero, a0, -32
st.d zero, a0, -24
st.d zero, a0, -16
st.d zero, a0, -8
bne t0, a0, 1b
LONG_S zero, a0, (LONGSIZE * 0)
LONG_S zero, a0, (LONGSIZE * 1)
LONG_S zero, a0, (LONGSIZE * 2)
LONG_S zero, a0, (LONGSIZE * 3)
LONG_S zero, a0, (LONGSIZE * 4)
LONG_S zero, a0, (LONGSIZE * 5)
LONG_S zero, a0, (LONGSIZE * 6)
LONG_S zero, a0, (LONGSIZE * 7)
PTR_ADDI a0, a0, (LONGSIZE * 16)
LONG_S zero, a0, -(LONGSIZE * 8)
LONG_S zero, a0, -(LONGSIZE * 7)
LONG_S zero, a0, -(LONGSIZE * 6)
LONG_S zero, a0, -(LONGSIZE * 5)
LONG_S zero, a0, -(LONGSIZE * 4)
LONG_S zero, a0, -(LONGSIZE * 3)
LONG_S zero, a0, -(LONGSIZE * 2)
LONG_S zero, a0, -(LONGSIZE * 1)
bne t0, a0, 1b
jr ra
jr ra
SYM_FUNC_END(clear_page)
EXPORT_SYMBOL(clear_page)
.align 5
SYM_FUNC_START(copy_page)
lu12i.w t8, 1 << (PAGE_SHIFT - 12)
add.d t8, t8, a0
lu12i.w t8, 1 << (PAGE_SHIFT - 12)
PTR_ADD t8, t8, a0
1:
ld.d t0, a1, 0
ld.d t1, a1, 8
ld.d t2, a1, 16
ld.d t3, a1, 24
ld.d t4, a1, 32
ld.d t5, a1, 40
ld.d t6, a1, 48
ld.d t7, a1, 56
LONG_L t0, a1, (LONGSIZE * 0)
LONG_L t1, a1, (LONGSIZE * 1)
LONG_L t2, a1, (LONGSIZE * 2)
LONG_L t3, a1, (LONGSIZE * 3)
LONG_L t4, a1, (LONGSIZE * 4)
LONG_L t5, a1, (LONGSIZE * 5)
LONG_L t6, a1, (LONGSIZE * 6)
LONG_L t7, a1, (LONGSIZE * 7)
st.d t0, a0, 0
st.d t1, a0, 8
ld.d t0, a1, 64
ld.d t1, a1, 72
st.d t2, a0, 16
st.d t3, a0, 24
ld.d t2, a1, 80
ld.d t3, a1, 88
st.d t4, a0, 32
st.d t5, a0, 40
ld.d t4, a1, 96
ld.d t5, a1, 104
st.d t6, a0, 48
st.d t7, a0, 56
ld.d t6, a1, 112
ld.d t7, a1, 120
addi.d a0, a0, 128
addi.d a1, a1, 128
LONG_S t0, a0, (LONGSIZE * 0)
LONG_S t1, a0, (LONGSIZE * 1)
LONG_L t0, a1, (LONGSIZE * 8)
LONG_L t1, a1, (LONGSIZE * 9)
LONG_S t2, a0, (LONGSIZE * 2)
LONG_S t3, a0, (LONGSIZE * 3)
LONG_L t2, a1, (LONGSIZE * 10)
LONG_L t3, a1, (LONGSIZE * 11)
LONG_S t4, a0, (LONGSIZE * 4)
LONG_S t5, a0, (LONGSIZE * 5)
LONG_L t4, a1, (LONGSIZE * 12)
LONG_L t5, a1, (LONGSIZE * 13)
LONG_S t6, a0, (LONGSIZE * 6)
LONG_S t7, a0, (LONGSIZE * 7)
LONG_L t6, a1, (LONGSIZE * 14)
LONG_L t7, a1, (LONGSIZE * 15)
PTR_ADDI a0, a0, (LONGSIZE * 16)
PTR_ADDI a1, a1, (LONGSIZE * 16)
st.d t0, a0, -64
st.d t1, a0, -56
st.d t2, a0, -48
st.d t3, a0, -40
st.d t4, a0, -32
st.d t5, a0, -24
st.d t6, a0, -16
st.d t7, a0, -8
LONG_S t0, a0, -(LONGSIZE * 8)
LONG_S t1, a0, -(LONGSIZE * 7)
LONG_S t2, a0, -(LONGSIZE * 6)
LONG_S t3, a0, -(LONGSIZE * 5)
LONG_S t4, a0, -(LONGSIZE * 4)
LONG_S t5, a0, -(LONGSIZE * 3)
LONG_S t6, a0, -(LONGSIZE * 2)
LONG_S t7, a0, -(LONGSIZE * 1)
bne t8, a0, 1b
jr ra
bne t8, a0, 1b
jr ra
SYM_FUNC_END(copy_page)
EXPORT_SYMBOL(copy_page)

View File

@ -229,11 +229,11 @@ static void setup_ptwalker(void)
if (cpu_has_ptw)
pwctl1 |= CSR_PWCTL1_PTW;
csr_write64(pwctl0, LOONGARCH_CSR_PWCTL0);
csr_write64(pwctl1, LOONGARCH_CSR_PWCTL1);
csr_write64((long)swapper_pg_dir, LOONGARCH_CSR_PGDH);
csr_write64((long)invalid_pg_dir, LOONGARCH_CSR_PGDL);
csr_write64((long)smp_processor_id(), LOONGARCH_CSR_TMID);
csr_write(pwctl0, LOONGARCH_CSR_PWCTL0);
csr_write(pwctl1, LOONGARCH_CSR_PWCTL1);
csr_write((long)swapper_pg_dir, LOONGARCH_CSR_PGDH);
csr_write((long)invalid_pg_dir, LOONGARCH_CSR_PGDL);
csr_write((long)smp_processor_id(), LOONGARCH_CSR_TMID);
}
static void output_pgtable_bits_defines(void)
@ -251,8 +251,10 @@ static void output_pgtable_bits_defines(void)
pr_define("_PAGE_GLOBAL_SHIFT %d\n", _PAGE_GLOBAL_SHIFT);
pr_define("_PAGE_PRESENT_SHIFT %d\n", _PAGE_PRESENT_SHIFT);
pr_define("_PAGE_WRITE_SHIFT %d\n", _PAGE_WRITE_SHIFT);
#ifdef CONFIG_64BIT
pr_define("_PAGE_NO_READ_SHIFT %d\n", _PAGE_NO_READ_SHIFT);
pr_define("_PAGE_NO_EXEC_SHIFT %d\n", _PAGE_NO_EXEC_SHIFT);
#endif
pr_define("PFN_PTE_SHIFT %d\n", PFN_PTE_SHIFT);
pr_debug("\n");
}

View File

@ -11,10 +11,18 @@
#define INVTLB_ADDR_GFALSE_AND_ASID 5
#define PTRS_PER_PGD_BITS (PAGE_SHIFT - 3)
#define PTRS_PER_PUD_BITS (PAGE_SHIFT - 3)
#define PTRS_PER_PMD_BITS (PAGE_SHIFT - 3)
#define PTRS_PER_PTE_BITS (PAGE_SHIFT - 3)
#define PTRS_PER_PGD_BITS (PAGE_SHIFT - PTRLOG)
#define PTRS_PER_PUD_BITS (PAGE_SHIFT - PTRLOG)
#define PTRS_PER_PMD_BITS (PAGE_SHIFT - PTRLOG)
#define PTRS_PER_PTE_BITS (PAGE_SHIFT - PTRLOG)
#ifdef CONFIG_32BIT
#define PTE_LL ll.w
#define PTE_SC sc.w
#else
#define PTE_LL ll.d
#define PTE_SC sc.d
#endif
.macro tlb_do_page_fault, write
SYM_CODE_START(tlb_do_page_fault_\write)
@ -60,52 +68,61 @@ SYM_CODE_START(handle_tlb_load)
vmalloc_done_load:
/* Get PGD offset in bytes */
bstrpick.d ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT
alsl.d t1, ra, t1, 3
#ifdef CONFIG_32BIT
PTR_BSTRPICK ra, t0, 31, PGDIR_SHIFT
#else
PTR_BSTRPICK ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT
#endif
PTR_ALSL t1, ra, t1, _PGD_T_LOG2
#if CONFIG_PGTABLE_LEVELS > 3
ld.d t1, t1, 0
bstrpick.d ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT
alsl.d t1, ra, t1, 3
PTR_L t1, t1, 0
PTR_BSTRPICK ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT
PTR_ALSL t1, ra, t1, _PMD_T_LOG2
#endif
#if CONFIG_PGTABLE_LEVELS > 2
ld.d t1, t1, 0
bstrpick.d ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT
alsl.d t1, ra, t1, 3
PTR_L t1, t1, 0
PTR_BSTRPICK ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT
PTR_ALSL t1, ra, t1, _PMD_T_LOG2
#endif
ld.d ra, t1, 0
PTR_L ra, t1, 0
/*
* For huge tlb entries, pmde doesn't contain an address but
* instead contains the tlb pte. Check the PAGE_HUGE bit and
* see if we need to jump to huge tlb processing.
*/
rotri.d ra, ra, _PAGE_HUGE_SHIFT + 1
PTR_ROTRI ra, ra, _PAGE_HUGE_SHIFT + 1
bltz ra, tlb_huge_update_load
rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
bstrpick.d t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT
alsl.d t1, t0, ra, _PTE_T_LOG2
PTR_ROTRI ra, ra, BITS_PER_LONG - (_PAGE_HUGE_SHIFT + 1)
PTR_BSTRPICK t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT
PTR_ALSL t1, t0, ra, _PTE_T_LOG2
#ifdef CONFIG_SMP
smp_pgtable_change_load:
ll.d t0, t1, 0
PTE_LL t0, t1, 0
#else
ld.d t0, t1, 0
PTR_L t0, t1, 0
#endif
andi ra, t0, _PAGE_PRESENT
beqz ra, nopage_tlb_load
ori t0, t0, _PAGE_VALID
#ifdef CONFIG_SMP
sc.d t0, t1, 0
PTE_SC t0, t1, 0
beqz t0, smp_pgtable_change_load
#else
st.d t0, t1, 0
PTR_S t0, t1, 0
#endif
tlbsrch
bstrins.d t1, zero, 3, 3
ld.d t0, t1, 0
ld.d t1, t1, 8
PTR_BSTRINS t1, zero, _PTE_T_LOG2, _PTE_T_LOG2
PTR_L t0, t1, 0
PTR_L t1, t1, _PTE_T_SIZE
csrwr t0, LOONGARCH_CSR_TLBELO0
csrwr t1, LOONGARCH_CSR_TLBELO1
tlbwr
@ -115,30 +132,28 @@ smp_pgtable_change_load:
csrrd ra, EXCEPTION_KS2
ertn
#ifdef CONFIG_64BIT
vmalloc_load:
la_abs t1, swapper_pg_dir
b vmalloc_done_load
#endif
/* This is the entry point of a huge page. */
tlb_huge_update_load:
#ifdef CONFIG_SMP
ll.d ra, t1, 0
PTE_LL ra, t1, 0
#else
rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
PTR_ROTRI ra, ra, BITS_PER_LONG - (_PAGE_HUGE_SHIFT + 1)
#endif
andi t0, ra, _PAGE_PRESENT
beqz t0, nopage_tlb_load
#ifdef CONFIG_SMP
ori t0, ra, _PAGE_VALID
sc.d t0, t1, 0
PTE_SC t0, t1, 0
beqz t0, tlb_huge_update_load
ori t0, ra, _PAGE_VALID
#else
ori t0, ra, _PAGE_VALID
st.d t0, t1, 0
PTR_S t0, t1, 0
#endif
csrrd ra, LOONGARCH_CSR_ASID
csrrd t1, LOONGARCH_CSR_BADV
@ -158,27 +173,27 @@ tlb_huge_update_load:
xori t0, t0, _PAGE_HUGE
lu12i.w t1, _PAGE_HGLOBAL >> 12
and t1, t0, t1
srli.d t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
PTR_SRLI t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
or t0, t0, t1
move ra, t0
csrwr ra, LOONGARCH_CSR_TLBELO0
/* Convert to entrylo1 */
addi.d t1, zero, 1
slli.d t1, t1, (HPAGE_SHIFT - 1)
add.d t0, t0, t1
PTR_ADDI t1, zero, 1
PTR_SLLI t1, t1, (HPAGE_SHIFT - 1)
PTR_ADD t0, t0, t1
csrwr t0, LOONGARCH_CSR_TLBELO1
/* Set huge page tlb entry size */
addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
addu16i.d t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
PTR_LI t0, (CSR_TLBIDX_PS >> 16) << 16
PTR_LI t1, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT))
csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
tlbfill
addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
addu16i.d t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
PTR_LI t0, (CSR_TLBIDX_PS >> 16) << 16
PTR_LI t1, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT))
csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
csrrd t0, EXCEPTION_KS0
@ -216,53 +231,71 @@ SYM_CODE_START(handle_tlb_store)
vmalloc_done_store:
/* Get PGD offset in bytes */
bstrpick.d ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT
alsl.d t1, ra, t1, 3
#ifdef CONFIG_32BIT
PTR_BSTRPICK ra, t0, 31, PGDIR_SHIFT
#else
PTR_BSTRPICK ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT
#endif
PTR_ALSL t1, ra, t1, _PGD_T_LOG2
#if CONFIG_PGTABLE_LEVELS > 3
ld.d t1, t1, 0
bstrpick.d ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT
alsl.d t1, ra, t1, 3
PTR_L t1, t1, 0
PTR_BSTRPICK ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT
PTR_ALSL t1, ra, t1, _PMD_T_LOG2
#endif
#if CONFIG_PGTABLE_LEVELS > 2
ld.d t1, t1, 0
bstrpick.d ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT
alsl.d t1, ra, t1, 3
PTR_L t1, t1, 0
PTR_BSTRPICK ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT
PTR_ALSL t1, ra, t1, _PMD_T_LOG2
#endif
ld.d ra, t1, 0
PTR_L ra, t1, 0
/*
* For huge tlb entries, pmde doesn't contain an address but
* instead contains the tlb pte. Check the PAGE_HUGE bit and
* see if we need to jump to huge tlb processing.
*/
rotri.d ra, ra, _PAGE_HUGE_SHIFT + 1
PTR_ROTRI ra, ra, _PAGE_HUGE_SHIFT + 1
bltz ra, tlb_huge_update_store
rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
bstrpick.d t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT
alsl.d t1, t0, ra, _PTE_T_LOG2
PTR_ROTRI ra, ra, BITS_PER_LONG - (_PAGE_HUGE_SHIFT + 1)
PTR_BSTRPICK t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT
PTR_ALSL t1, t0, ra, _PTE_T_LOG2
#ifdef CONFIG_SMP
smp_pgtable_change_store:
ll.d t0, t1, 0
PTE_LL t0, t1, 0
#else
ld.d t0, t1, 0
PTR_L t0, t1, 0
#endif
#ifdef CONFIG_64BIT
andi ra, t0, _PAGE_PRESENT | _PAGE_WRITE
xori ra, ra, _PAGE_PRESENT | _PAGE_WRITE
#else
PTR_LI ra, _PAGE_PRESENT | _PAGE_WRITE
and ra, ra, t0
nor ra, ra, zero
#endif
bnez ra, nopage_tlb_store
#ifdef CONFIG_64BIT
ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
#else
PTR_LI ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
or t0, ra, t0
#endif
#ifdef CONFIG_SMP
sc.d t0, t1, 0
PTE_SC t0, t1, 0
beqz t0, smp_pgtable_change_store
#else
st.d t0, t1, 0
PTR_S t0, t1, 0
#endif
tlbsrch
bstrins.d t1, zero, 3, 3
ld.d t0, t1, 0
ld.d t1, t1, 8
PTR_BSTRINS t1, zero, _PTE_T_LOG2, _PTE_T_LOG2
PTR_L t0, t1, 0
PTR_L t1, t1, _PTE_T_SIZE
csrwr t0, LOONGARCH_CSR_TLBELO0
csrwr t1, LOONGARCH_CSR_TLBELO1
tlbwr
@ -272,31 +305,42 @@ smp_pgtable_change_store:
csrrd ra, EXCEPTION_KS2
ertn
#ifdef CONFIG_64BIT
vmalloc_store:
la_abs t1, swapper_pg_dir
b vmalloc_done_store
#endif
/* This is the entry point of a huge page. */
tlb_huge_update_store:
#ifdef CONFIG_SMP
ll.d ra, t1, 0
PTE_LL ra, t1, 0
#else
rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
PTR_ROTRI ra, ra, BITS_PER_LONG - (_PAGE_HUGE_SHIFT + 1)
#endif
#ifdef CONFIG_64BIT
andi t0, ra, _PAGE_PRESENT | _PAGE_WRITE
xori t0, t0, _PAGE_PRESENT | _PAGE_WRITE
#else
PTR_LI t0, _PAGE_PRESENT | _PAGE_WRITE
and t0, t0, ra
nor t0, t0, zero
#endif
bnez t0, nopage_tlb_store
#ifdef CONFIG_SMP
ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
sc.d t0, t1, 0
PTE_SC t0, t1, 0
beqz t0, tlb_huge_update_store
ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
#else
#ifdef CONFIG_64BIT
ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
st.d t0, t1, 0
#else
PTR_LI t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
or t0, ra, t0
#endif
PTR_S t0, t1, 0
#endif
csrrd ra, LOONGARCH_CSR_ASID
csrrd t1, LOONGARCH_CSR_BADV
@ -316,28 +360,28 @@ tlb_huge_update_store:
xori t0, t0, _PAGE_HUGE
lu12i.w t1, _PAGE_HGLOBAL >> 12
and t1, t0, t1
srli.d t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
PTR_SRLI t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
or t0, t0, t1
move ra, t0
csrwr ra, LOONGARCH_CSR_TLBELO0
/* Convert to entrylo1 */
addi.d t1, zero, 1
slli.d t1, t1, (HPAGE_SHIFT - 1)
add.d t0, t0, t1
PTR_ADDI t1, zero, 1
PTR_SLLI t1, t1, (HPAGE_SHIFT - 1)
PTR_ADD t0, t0, t1
csrwr t0, LOONGARCH_CSR_TLBELO1
/* Set huge page tlb entry size */
addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
addu16i.d t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
PTR_LI t0, (CSR_TLBIDX_PS >> 16) << 16
PTR_LI t1, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT))
csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
tlbfill
/* Reset default page size */
addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
addu16i.d t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
PTR_LI t0, (CSR_TLBIDX_PS >> 16) << 16
PTR_LI t1, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT))
csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
csrrd t0, EXCEPTION_KS0
@ -375,52 +419,69 @@ SYM_CODE_START(handle_tlb_modify)
vmalloc_done_modify:
/* Get PGD offset in bytes */
bstrpick.d ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT
alsl.d t1, ra, t1, 3
#ifdef CONFIG_32BIT
PTR_BSTRPICK ra, t0, 31, PGDIR_SHIFT
#else
PTR_BSTRPICK ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT
#endif
PTR_ALSL t1, ra, t1, _PGD_T_LOG2
#if CONFIG_PGTABLE_LEVELS > 3
ld.d t1, t1, 0
bstrpick.d ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT
alsl.d t1, ra, t1, 3
PTR_L t1, t1, 0
PTR_BSTRPICK ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT
PTR_ALSL t1, ra, t1, _PMD_T_LOG2
#endif
#if CONFIG_PGTABLE_LEVELS > 2
ld.d t1, t1, 0
bstrpick.d ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT
alsl.d t1, ra, t1, 3
PTR_L t1, t1, 0
PTR_BSTRPICK ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT
PTR_ALSL t1, ra, t1, _PMD_T_LOG2
#endif
ld.d ra, t1, 0
PTR_L ra, t1, 0
/*
* For huge tlb entries, pmde doesn't contain an address but
* instead contains the tlb pte. Check the PAGE_HUGE bit and
* see if we need to jump to huge tlb processing.
*/
rotri.d ra, ra, _PAGE_HUGE_SHIFT + 1
PTR_ROTRI ra, ra, _PAGE_HUGE_SHIFT + 1
bltz ra, tlb_huge_update_modify
rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
bstrpick.d t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT
alsl.d t1, t0, ra, _PTE_T_LOG2
PTR_ROTRI ra, ra, BITS_PER_LONG - (_PAGE_HUGE_SHIFT + 1)
PTR_BSTRPICK t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT
PTR_ALSL t1, t0, ra, _PTE_T_LOG2
#ifdef CONFIG_SMP
smp_pgtable_change_modify:
ll.d t0, t1, 0
PTE_LL t0, t1, 0
#else
ld.d t0, t1, 0
PTR_L t0, t1, 0
#endif
#ifdef CONFIG_64BIT
andi ra, t0, _PAGE_WRITE
#else
PTR_LI ra, _PAGE_WRITE
and ra, t0, ra
#endif
beqz ra, nopage_tlb_modify
#ifdef CONFIG_64BIT
ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
#else
PTR_LI ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
or t0, ra, t0
#endif
#ifdef CONFIG_SMP
sc.d t0, t1, 0
PTE_SC t0, t1, 0
beqz t0, smp_pgtable_change_modify
#else
st.d t0, t1, 0
PTR_S t0, t1, 0
#endif
tlbsrch
bstrins.d t1, zero, 3, 3
ld.d t0, t1, 0
ld.d t1, t1, 8
PTR_BSTRINS t1, zero, _PTE_T_LOG2, _PTE_T_LOG2
PTR_L t0, t1, 0
PTR_L t1, t1, _PTE_T_SIZE
csrwr t0, LOONGARCH_CSR_TLBELO0
csrwr t1, LOONGARCH_CSR_TLBELO1
tlbwr
@ -430,30 +491,40 @@ smp_pgtable_change_modify:
csrrd ra, EXCEPTION_KS2
ertn
#ifdef CONFIG_64BIT
vmalloc_modify:
la_abs t1, swapper_pg_dir
b vmalloc_done_modify
#endif
/* This is the entry point of a huge page. */
tlb_huge_update_modify:
#ifdef CONFIG_SMP
ll.d ra, t1, 0
PTE_LL ra, t1, 0
#else
rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
PTR_ROTRI ra, ra, BITS_PER_LONG - (_PAGE_HUGE_SHIFT + 1)
#endif
#ifdef CONFIG_64BIT
andi t0, ra, _PAGE_WRITE
#else
PTR_LI t0, _PAGE_WRITE
and t0, ra, t0
#endif
beqz t0, nopage_tlb_modify
#ifdef CONFIG_SMP
ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
sc.d t0, t1, 0
PTE_SC t0, t1, 0
beqz t0, tlb_huge_update_modify
ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
#else
#ifdef CONFIG_64BIT
ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
st.d t0, t1, 0
#else
PTR_LI t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
or t0, ra, t0
#endif
PTR_S t0, t1, 0
#endif
csrrd ra, LOONGARCH_CSR_ASID
csrrd t1, LOONGARCH_CSR_BADV
@ -473,28 +544,28 @@ tlb_huge_update_modify:
xori t0, t0, _PAGE_HUGE
lu12i.w t1, _PAGE_HGLOBAL >> 12
and t1, t0, t1
srli.d t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
PTR_SRLI t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
or t0, t0, t1
move ra, t0
csrwr ra, LOONGARCH_CSR_TLBELO0
/* Convert to entrylo1 */
addi.d t1, zero, 1
slli.d t1, t1, (HPAGE_SHIFT - 1)
add.d t0, t0, t1
PTR_ADDI t1, zero, 1
PTR_SLLI t1, t1, (HPAGE_SHIFT - 1)
PTR_ADD t0, t0, t1
csrwr t0, LOONGARCH_CSR_TLBELO1
/* Set huge page tlb entry size */
addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
addu16i.d t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
PTR_LI t0, (CSR_TLBIDX_PS >> 16) << 16
PTR_LI t1, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT))
csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
tlbfill
/* Reset default page size */
addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
addu16i.d t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
PTR_LI t0, (CSR_TLBIDX_PS >> 16) << 16
PTR_LI t1, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT))
csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
csrrd t0, EXCEPTION_KS0
@ -517,6 +588,44 @@ SYM_CODE_START(handle_tlb_modify_ptw)
jr t0
SYM_CODE_END(handle_tlb_modify_ptw)
#ifdef CONFIG_32BIT
SYM_CODE_START(handle_tlb_refill)
UNWIND_HINT_UNDEFINED
csrwr t0, EXCEPTION_KS0
csrwr t1, EXCEPTION_KS1
csrwr ra, EXCEPTION_KS2
li.w ra, 0x1fffffff
csrrd t0, LOONGARCH_CSR_PGD
csrrd t1, LOONGARCH_CSR_TLBRBADV
srli.w t1, t1, PGDIR_SHIFT
slli.w t1, t1, 0x2
add.w t0, t0, t1
and t0, t0, ra
ld.w t0, t0, 0
csrrd t1, LOONGARCH_CSR_TLBRBADV
slli.w t1, t1, (32 - PGDIR_SHIFT)
srli.w t1, t1, (32 - PGDIR_SHIFT + PAGE_SHIFT + 1)
slli.w t1, t1, (0x2 + 1)
add.w t0, t0, t1
and t0, t0, ra
ld.w t1, t0, 0x0
csrwr t1, LOONGARCH_CSR_TLBRELO0
ld.w t1, t0, 0x4
csrwr t1, LOONGARCH_CSR_TLBRELO1
tlbfill
csrrd t0, EXCEPTION_KS0
csrrd t1, EXCEPTION_KS1
csrrd ra, EXCEPTION_KS2
ertn
SYM_CODE_END(handle_tlb_refill)
#endif
#ifdef CONFIG_64BIT
SYM_CODE_START(handle_tlb_refill)
UNWIND_HINT_UNDEFINED
csrwr t0, LOONGARCH_CSR_TLBRSAVE
@ -534,3 +643,4 @@ SYM_CODE_START(handle_tlb_refill)
csrrd t0, LOONGARCH_CSR_TLBRSAVE
ertn
SYM_CODE_END(handle_tlb_refill)
#endif

View File

@ -14,6 +14,7 @@
#define PCI_DEVICE_ID_LOONGSON_HOST 0x7a00
#define PCI_DEVICE_ID_LOONGSON_DC1 0x7a06
#define PCI_DEVICE_ID_LOONGSON_DC2 0x7a36
#define PCI_DEVICE_ID_LOONGSON_DC3 0x7a46
int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
int reg, int len, u32 *val)
@ -97,3 +98,4 @@ static void pci_fixup_vgadev(struct pci_dev *pdev)
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, PCI_DEVICE_ID_LOONGSON_DC1, pci_fixup_vgadev);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, PCI_DEVICE_ID_LOONGSON_DC2, pci_fixup_vgadev);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, PCI_DEVICE_ID_LOONGSON_DC3, pci_fixup_vgadev);

View File

@ -10,7 +10,7 @@ static u32 saved_crmd;
static u32 saved_prmd;
static u32 saved_euen;
static u32 saved_ecfg;
static u64 saved_pcpu_base;
static unsigned long saved_pcpu_base;
struct pt_regs saved_regs;
void save_processor_state(void)
@ -20,7 +20,7 @@ void save_processor_state(void)
saved_prmd = csr_read32(LOONGARCH_CSR_PRMD);
saved_euen = csr_read32(LOONGARCH_CSR_EUEN);
saved_ecfg = csr_read32(LOONGARCH_CSR_ECFG);
saved_pcpu_base = csr_read64(PERCPU_BASE_KS);
saved_pcpu_base = csr_read(PERCPU_BASE_KS);
if (is_fpu_owner())
save_fp(current);
@ -33,7 +33,7 @@ void restore_processor_state(void)
csr_write32(saved_prmd, LOONGARCH_CSR_PRMD);
csr_write32(saved_euen, LOONGARCH_CSR_EUEN);
csr_write32(saved_ecfg, LOONGARCH_CSR_ECFG);
csr_write64(saved_pcpu_base, PERCPU_BASE_KS);
csr_write(saved_pcpu_base, PERCPU_BASE_KS);
if (is_fpu_owner())
restore_fp(current);

View File

@ -72,10 +72,10 @@ static int __init loongson3_acpi_suspend_init(void)
status = acpi_evaluate_integer(NULL, "\\SADR", NULL, &suspend_addr);
if (ACPI_FAILURE(status) || !suspend_addr) {
pr_info("ACPI S3 supported with hardware register default\n");
loongson_sysconf.suspend_addr = (u64)default_suspend_addr;
loongson_sysconf.suspend_addr = (unsigned long)default_suspend_addr;
} else {
pr_info("ACPI S3 supported with Loongson ACPI SADR extension\n");
loongson_sysconf.suspend_addr = (u64)phys_to_virt(PHYSADDR(suspend_addr));
loongson_sysconf.suspend_addr = (unsigned long)phys_to_virt(PHYSADDR(suspend_addr));
}
#endif
return 0;

View File

@ -20,24 +20,24 @@ u64 loongarch_suspend_addr;
struct saved_registers {
u32 ecfg;
u32 euen;
u64 pgd;
u64 kpgd;
u32 pwctl0;
u32 pwctl1;
u64 pcpu_base;
unsigned long pgd;
unsigned long kpgd;
unsigned long pcpu_base;
};
static struct saved_registers saved_regs;
void loongarch_common_suspend(void)
{
save_counter();
saved_regs.pgd = csr_read64(LOONGARCH_CSR_PGDL);
saved_regs.kpgd = csr_read64(LOONGARCH_CSR_PGDH);
saved_regs.pgd = csr_read(LOONGARCH_CSR_PGDL);
saved_regs.kpgd = csr_read(LOONGARCH_CSR_PGDH);
saved_regs.pwctl0 = csr_read32(LOONGARCH_CSR_PWCTL0);
saved_regs.pwctl1 = csr_read32(LOONGARCH_CSR_PWCTL1);
saved_regs.ecfg = csr_read32(LOONGARCH_CSR_ECFG);
saved_regs.euen = csr_read32(LOONGARCH_CSR_EUEN);
saved_regs.pcpu_base = csr_read64(PERCPU_BASE_KS);
saved_regs.pcpu_base = csr_read(PERCPU_BASE_KS);
loongarch_suspend_addr = loongson_sysconf.suspend_addr;
}
@ -46,17 +46,17 @@ void loongarch_common_resume(void)
{
sync_counter();
local_flush_tlb_all();
csr_write64(eentry, LOONGARCH_CSR_EENTRY);
csr_write64(eentry, LOONGARCH_CSR_MERRENTRY);
csr_write64(tlbrentry, LOONGARCH_CSR_TLBRENTRY);
csr_write(eentry, LOONGARCH_CSR_EENTRY);
csr_write(eentry, LOONGARCH_CSR_MERRENTRY);
csr_write(tlbrentry, LOONGARCH_CSR_TLBRENTRY);
csr_write64(saved_regs.pgd, LOONGARCH_CSR_PGDL);
csr_write64(saved_regs.kpgd, LOONGARCH_CSR_PGDH);
csr_write(saved_regs.pgd, LOONGARCH_CSR_PGDL);
csr_write(saved_regs.kpgd, LOONGARCH_CSR_PGDH);
csr_write32(saved_regs.pwctl0, LOONGARCH_CSR_PWCTL0);
csr_write32(saved_regs.pwctl1, LOONGARCH_CSR_PWCTL1);
csr_write32(saved_regs.ecfg, LOONGARCH_CSR_ECFG);
csr_write32(saved_regs.euen, LOONGARCH_CSR_EUEN);
csr_write64(saved_regs.pcpu_base, PERCPU_BASE_KS);
csr_write(saved_regs.pcpu_base, PERCPU_BASE_KS);
}
int loongarch_acpi_suspend(void)

View File

@ -14,41 +14,41 @@
/* preparatory stuff */
.macro SETUP_SLEEP
addi.d sp, sp, -PT_SIZE
st.d $r1, sp, PT_R1
st.d $r2, sp, PT_R2
st.d $r3, sp, PT_R3
st.d $r4, sp, PT_R4
st.d $r21, sp, PT_R21
st.d $r22, sp, PT_R22
st.d $r23, sp, PT_R23
st.d $r24, sp, PT_R24
st.d $r25, sp, PT_R25
st.d $r26, sp, PT_R26
st.d $r27, sp, PT_R27
st.d $r28, sp, PT_R28
st.d $r29, sp, PT_R29
st.d $r30, sp, PT_R30
st.d $r31, sp, PT_R31
PTR_ADDI sp, sp, -PT_SIZE
REG_S $r1, sp, PT_R1
REG_S $r2, sp, PT_R2
REG_S $r3, sp, PT_R3
REG_S $r4, sp, PT_R4
REG_S $r21, sp, PT_R21
REG_S $r22, sp, PT_R22
REG_S $r23, sp, PT_R23
REG_S $r24, sp, PT_R24
REG_S $r25, sp, PT_R25
REG_S $r26, sp, PT_R26
REG_S $r27, sp, PT_R27
REG_S $r28, sp, PT_R28
REG_S $r29, sp, PT_R29
REG_S $r30, sp, PT_R30
REG_S $r31, sp, PT_R31
.endm
.macro SETUP_WAKEUP
ld.d $r1, sp, PT_R1
ld.d $r2, sp, PT_R2
ld.d $r3, sp, PT_R3
ld.d $r4, sp, PT_R4
ld.d $r21, sp, PT_R21
ld.d $r22, sp, PT_R22
ld.d $r23, sp, PT_R23
ld.d $r24, sp, PT_R24
ld.d $r25, sp, PT_R25
ld.d $r26, sp, PT_R26
ld.d $r27, sp, PT_R27
ld.d $r28, sp, PT_R28
ld.d $r29, sp, PT_R29
ld.d $r30, sp, PT_R30
ld.d $r31, sp, PT_R31
addi.d sp, sp, PT_SIZE
REG_L $r1, sp, PT_R1
REG_L $r2, sp, PT_R2
REG_L $r3, sp, PT_R3
REG_L $r4, sp, PT_R4
REG_L $r21, sp, PT_R21
REG_L $r22, sp, PT_R22
REG_L $r23, sp, PT_R23
REG_L $r24, sp, PT_R24
REG_L $r25, sp, PT_R25
REG_L $r26, sp, PT_R26
REG_L $r27, sp, PT_R27
REG_L $r28, sp, PT_R28
REG_L $r29, sp, PT_R29
REG_L $r30, sp, PT_R30
REG_L $r31, sp, PT_R31
PTR_ADDI sp, sp, PT_SIZE
.endm
.text
@ -59,15 +59,15 @@ SYM_FUNC_START(loongarch_suspend_enter)
SETUP_SLEEP
la.pcrel t0, acpi_saved_sp
st.d sp, t0, 0
REG_S sp, t0, 0
bl __flush_cache_all
/* Pass RA and SP to BIOS */
addi.d a1, sp, 0
PTR_ADDI a1, sp, 0
la.pcrel a0, loongarch_wakeup_start
la.pcrel t0, loongarch_suspend_addr
ld.d t0, t0, 0
REG_L t0, t0, 0
jirl ra, t0, 0 /* Call BIOS's STR sleep routine */
/*
@ -83,7 +83,7 @@ SYM_INNER_LABEL(loongarch_wakeup_start, SYM_L_GLOBAL)
csrwr t0, LOONGARCH_CSR_CRMD
la.pcrel t0, acpi_saved_sp
ld.d sp, t0, 0
REG_L sp, t0, 0
SETUP_WAKEUP
jr ra

View File

@ -4,8 +4,9 @@
# Include the generic Makefile to check the built vdso.
include $(srctree)/lib/vdso/Makefile.include
obj-vdso-y := elf.o vgetcpu.o vgettimeofday.o vgetrandom.o \
obj-vdso-y := elf.o vgetcpu.o vgetrandom.o \
vgetrandom-chacha.o sigreturn.o
obj-vdso-$(CONFIG_GENERIC_GETTIMEOFDAY) += vgettimeofday.o
# Common compiler flags between ABIs.
ccflags-vdso := \
@ -16,6 +17,10 @@ ccflags-vdso := \
$(CLANG_FLAGS) \
-D__VDSO__
ifdef CONFIG_32BIT
ccflags-vdso += -DBUILD_VDSO32
endif
cflags-vdso := $(ccflags-vdso) \
-isystem $(shell $(CC) -print-file-name=include) \
$(filter -W%,$(filter-out -Wa$(comma)%,$(KBUILD_CFLAGS))) \

View File

@ -7,8 +7,6 @@
#include <generated/asm-offsets.h>
#include <vdso/datapage.h>
OUTPUT_FORMAT("elf64-loongarch", "elf64-loongarch", "elf64-loongarch")
OUTPUT_ARCH(loongarch)
SECTIONS
@ -63,9 +61,11 @@ VERSION
LINUX_5.10 {
global:
__vdso_getcpu;
#ifdef CONFIG_GENERIC_GETTIMEOFDAY
__vdso_clock_getres;
__vdso_clock_gettime;
__vdso_gettimeofday;
#endif
__vdso_getrandom;
__vdso_rt_sigreturn;
local: *;

View File

@ -10,11 +10,19 @@ static __always_inline int read_cpu_id(void)
{
int cpu_id;
#ifdef CONFIG_64BIT
__asm__ __volatile__(
" rdtime.d $zero, %0\n"
: "=r" (cpu_id)
:
: "memory");
#else
__asm__ __volatile__(
" rdtimel.w $zero, %0\n"
: "=r" (cpu_id)
:
: "memory");
#endif
return cpu_id;
}

View File

@ -72,10 +72,10 @@ efi_status_t efi_boot_kernel(void *handle, efi_loaded_image_t *image,
desc_ver, priv.runtime_map);
/* Config Direct Mapping */
csr_write64(CSR_DMW0_INIT, LOONGARCH_CSR_DMWIN0);
csr_write64(CSR_DMW1_INIT, LOONGARCH_CSR_DMWIN1);
csr_write64(CSR_DMW2_INIT, LOONGARCH_CSR_DMWIN2);
csr_write64(CSR_DMW3_INIT, LOONGARCH_CSR_DMWIN3);
csr_write(CSR_DMW0_INIT, LOONGARCH_CSR_DMWIN0);
csr_write(CSR_DMW1_INIT, LOONGARCH_CSR_DMWIN1);
csr_write(CSR_DMW2_INIT, LOONGARCH_CSR_DMWIN2);
csr_write(CSR_DMW3_INIT, LOONGARCH_CSR_DMWIN3);
real_kernel_entry = (void *)kernel_entry_address(kernel_addr, image);

View File

@ -209,8 +209,9 @@ static void avecintc_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
struct avecintc_data *adata = irq_data_get_irq_chip_data(d);
msg->address_hi = 0x0;
msg->address_lo = (loongarch_avec.msi_base_addr | (adata->vec & 0xff) << 4)
| ((cpu_logical_map(adata->cpu & 0xffff)) << 12);
msg->address_lo = (loongarch_avec.msi_base_addr |
(adata->vec & AVEC_IRQ_MASK) << AVEC_IRQ_SHIFT) |
((cpu_logical_map(adata->cpu & AVEC_CPU_MASK)) << AVEC_CPU_SHIFT);
msg->data = 0x0;
}