Merge branch 'for-next/mm' into for-next/core
* for-next/mm: arm64: mm: Test for pmd_sect() in vmemmap_check_pmd() arm64/mm: Replace open encodings with PXD_TABLE_BIT arm64/mm: Rename pte_mkpresent() as pte_mkvalid() arm64: Kconfig: force ARM64_PAN=y when enabling TTBR0 sw PAN arm64/kvm: Avoid invalid physical addresses to signal owner updates arm64/kvm: Configure HYP TCR.PS/DS based on host stage1 arm64/mm: Override PARange for !LPA2 and use it consistently arm64/mm: Reduce PA space to 48 bits when LPA2 is not enabledpull/1131/head
commit
602ffd4ce3
|
|
@ -1379,7 +1379,6 @@ config ARM64_VA_BITS_48
|
|||
|
||||
config ARM64_VA_BITS_52
|
||||
bool "52-bit"
|
||||
depends on ARM64_PAN || !ARM64_SW_TTBR0_PAN
|
||||
help
|
||||
Enable 52-bit virtual addressing for userspace when explicitly
|
||||
requested via a hint to mmap(). The kernel will also use 52-bit
|
||||
|
|
@ -1431,7 +1430,6 @@ config ARM64_PA_BITS_48
|
|||
config ARM64_PA_BITS_52
|
||||
bool "52-bit"
|
||||
depends on ARM64_64K_PAGES || ARM64_VA_BITS_52
|
||||
depends on ARM64_PAN || !ARM64_SW_TTBR0_PAN
|
||||
help
|
||||
Enable support for a 52-bit physical address space, introduced as
|
||||
part of the ARMv8.2-LPA extension.
|
||||
|
|
@ -1681,6 +1679,7 @@ config RODATA_FULL_DEFAULT_ENABLED
|
|||
config ARM64_SW_TTBR0_PAN
|
||||
bool "Emulate Privileged Access Never using TTBR0_EL1 switching"
|
||||
depends on !KCSAN
|
||||
select ARM64_PAN
|
||||
help
|
||||
Enabling this option prevents the kernel from accessing
|
||||
user-space memory directly by pointing TTBR0_EL1 to a reserved
|
||||
|
|
@ -1937,7 +1936,6 @@ config ARM64_RAS_EXTN
|
|||
config ARM64_CNP
|
||||
bool "Enable support for Common Not Private (CNP) translations"
|
||||
default y
|
||||
depends on ARM64_PAN || !ARM64_SW_TTBR0_PAN
|
||||
help
|
||||
Common Not Private (CNP) allows translation table entries to
|
||||
be shared between different PEs in the same inner shareable
|
||||
|
|
@ -2132,7 +2130,7 @@ config ARM64_MTE
|
|||
depends on AS_HAS_ARMV8_5
|
||||
depends on AS_HAS_LSE_ATOMICS
|
||||
# Required for tag checking in the uaccess routines
|
||||
depends on ARM64_PAN
|
||||
select ARM64_PAN
|
||||
select ARCH_HAS_SUBPAGE_FAULTS
|
||||
select ARCH_USES_HIGH_VMA_FLAGS
|
||||
select ARCH_USES_PG_ARCH_2
|
||||
|
|
|
|||
|
|
@ -343,6 +343,11 @@ alternative_cb_end
|
|||
// Narrow PARange to fit the PS field in TCR_ELx
|
||||
ubfx \tmp0, \tmp0, #ID_AA64MMFR0_EL1_PARANGE_SHIFT, #3
|
||||
mov \tmp1, #ID_AA64MMFR0_EL1_PARANGE_MAX
|
||||
#ifdef CONFIG_ARM64_LPA2
|
||||
alternative_if_not ARM64_HAS_VA52
|
||||
mov \tmp1, #ID_AA64MMFR0_EL1_PARANGE_48
|
||||
alternative_else_nop_endif
|
||||
#endif
|
||||
cmp \tmp0, \tmp1
|
||||
csel \tmp0, \tmp1, \tmp0, hi
|
||||
bfi \tcr, \tmp0, \pos, #3
|
||||
|
|
|
|||
|
|
@ -222,12 +222,6 @@
|
|||
*/
|
||||
#define S1_TABLE_AP (_AT(pmdval_t, 3) << 61)
|
||||
|
||||
/*
|
||||
* Highest possible physical address supported.
|
||||
*/
|
||||
#define PHYS_MASK_SHIFT (CONFIG_ARM64_PA_BITS)
|
||||
#define PHYS_MASK ((UL(1) << PHYS_MASK_SHIFT) - 1)
|
||||
|
||||
#define TTBR_CNP_BIT (UL(1) << 0)
|
||||
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -81,6 +81,7 @@ extern unsigned long prot_ns_shared;
|
|||
#define lpa2_is_enabled() false
|
||||
#define PTE_MAYBE_SHARED PTE_SHARED
|
||||
#define PMD_MAYBE_SHARED PMD_SECT_S
|
||||
#define PHYS_MASK_SHIFT (CONFIG_ARM64_PA_BITS)
|
||||
#else
|
||||
static inline bool __pure lpa2_is_enabled(void)
|
||||
{
|
||||
|
|
@ -89,8 +90,14 @@ static inline bool __pure lpa2_is_enabled(void)
|
|||
|
||||
#define PTE_MAYBE_SHARED (lpa2_is_enabled() ? 0 : PTE_SHARED)
|
||||
#define PMD_MAYBE_SHARED (lpa2_is_enabled() ? 0 : PMD_SECT_S)
|
||||
#define PHYS_MASK_SHIFT (lpa2_is_enabled() ? CONFIG_ARM64_PA_BITS : 48)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Highest possible physical address supported.
|
||||
*/
|
||||
#define PHYS_MASK ((UL(1) << PHYS_MASK_SHIFT) - 1)
|
||||
|
||||
/*
|
||||
* If we have userspace only BTI we don't want to mark kernel pages
|
||||
* guarded even if the system does support BTI.
|
||||
|
|
|
|||
|
|
@ -273,7 +273,7 @@ static inline pte_t pte_mknoncont(pte_t pte)
|
|||
return clear_pte_bit(pte, __pgprot(PTE_CONT));
|
||||
}
|
||||
|
||||
static inline pte_t pte_mkpresent(pte_t pte)
|
||||
static inline pte_t pte_mkvalid(pte_t pte)
|
||||
{
|
||||
return set_pte_bit(pte, __pgprot(PTE_VALID));
|
||||
}
|
||||
|
|
|
|||
|
|
@ -5,7 +5,10 @@
|
|||
#ifndef __ASM_SPARSEMEM_H
|
||||
#define __ASM_SPARSEMEM_H
|
||||
|
||||
#define MAX_PHYSMEM_BITS CONFIG_ARM64_PA_BITS
|
||||
#include <asm/pgtable-prot.h>
|
||||
|
||||
#define MAX_PHYSMEM_BITS PHYS_MASK_SHIFT
|
||||
#define MAX_POSSIBLE_PHYSMEM_BITS (52)
|
||||
|
||||
/*
|
||||
* Section size must be at least 512MB for 64K base
|
||||
|
|
|
|||
|
|
@ -3509,7 +3509,7 @@ static void verify_hyp_capabilities(void)
|
|||
return;
|
||||
|
||||
safe_mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
|
||||
mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
|
||||
mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
|
||||
mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
|
||||
|
||||
/* Verify VMID bits */
|
||||
|
|
|
|||
|
|
@ -83,6 +83,15 @@ static bool __init mmfr2_varange_filter(u64 val)
|
|||
id_aa64mmfr0_override.val |=
|
||||
(ID_AA64MMFR0_EL1_TGRAN_LPA2 - 1) << ID_AA64MMFR0_EL1_TGRAN_SHIFT;
|
||||
id_aa64mmfr0_override.mask |= 0xfU << ID_AA64MMFR0_EL1_TGRAN_SHIFT;
|
||||
|
||||
/*
|
||||
* Override PARange to 48 bits - the override will just be
|
||||
* ignored if the actual PARange is smaller, but this is
|
||||
* unlikely to be the case for LPA2 capable silicon.
|
||||
*/
|
||||
id_aa64mmfr0_override.val |=
|
||||
ID_AA64MMFR0_EL1_PARANGE_48 << ID_AA64MMFR0_EL1_PARANGE_SHIFT;
|
||||
id_aa64mmfr0_override.mask |= 0xfU << ID_AA64MMFR0_EL1_PARANGE_SHIFT;
|
||||
}
|
||||
#endif
|
||||
return true;
|
||||
|
|
|
|||
|
|
@ -136,6 +136,12 @@ static void noinline __section(".idmap.text") set_ttbr0_for_lpa2(u64 ttbr)
|
|||
{
|
||||
u64 sctlr = read_sysreg(sctlr_el1);
|
||||
u64 tcr = read_sysreg(tcr_el1) | TCR_DS;
|
||||
u64 mmfr0 = read_sysreg(id_aa64mmfr0_el1);
|
||||
u64 parange = cpuid_feature_extract_unsigned_field(mmfr0,
|
||||
ID_AA64MMFR0_EL1_PARANGE_SHIFT);
|
||||
|
||||
tcr &= ~TCR_IPS_MASK;
|
||||
tcr |= parange << TCR_IPS_SHIFT;
|
||||
|
||||
asm(" msr sctlr_el1, %0 ;"
|
||||
" isb ;"
|
||||
|
|
|
|||
|
|
@ -1990,8 +1990,7 @@ static int kvm_init_vector_slots(void)
|
|||
static void __init cpu_prepare_hyp_mode(int cpu, u32 hyp_va_bits)
|
||||
{
|
||||
struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu);
|
||||
u64 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
|
||||
unsigned long tcr;
|
||||
unsigned long tcr, ips;
|
||||
|
||||
/*
|
||||
* Calculate the raw per-cpu offset without a translation from the
|
||||
|
|
@ -2005,6 +2004,7 @@ static void __init cpu_prepare_hyp_mode(int cpu, u32 hyp_va_bits)
|
|||
params->mair_el2 = read_sysreg(mair_el1);
|
||||
|
||||
tcr = read_sysreg(tcr_el1);
|
||||
ips = FIELD_GET(TCR_IPS_MASK, tcr);
|
||||
if (cpus_have_final_cap(ARM64_KVM_HVHE)) {
|
||||
tcr |= TCR_EPD1_MASK;
|
||||
} else {
|
||||
|
|
@ -2014,8 +2014,8 @@ static void __init cpu_prepare_hyp_mode(int cpu, u32 hyp_va_bits)
|
|||
tcr &= ~TCR_T0SZ_MASK;
|
||||
tcr |= TCR_T0SZ(hyp_va_bits);
|
||||
tcr &= ~TCR_EL2_PS_MASK;
|
||||
tcr |= FIELD_PREP(TCR_EL2_PS_MASK, kvm_get_parange(mmfr0));
|
||||
if (kvm_lpa2_is_enabled())
|
||||
tcr |= FIELD_PREP(TCR_EL2_PS_MASK, ips);
|
||||
if (lpa2_is_enabled())
|
||||
tcr |= TCR_EL2_DS;
|
||||
params->tcr_el2 = tcr;
|
||||
|
||||
|
|
|
|||
|
|
@ -35,14 +35,6 @@ static bool kvm_pgtable_walk_skip_cmo(const struct kvm_pgtable_visit_ctx *ctx)
|
|||
return unlikely(ctx->flags & KVM_PGTABLE_WALK_SKIP_CMO);
|
||||
}
|
||||
|
||||
static bool kvm_phys_is_valid(u64 phys)
|
||||
{
|
||||
u64 parange_max = kvm_get_parange_max();
|
||||
u8 shift = id_aa64mmfr0_parange_to_phys_shift(parange_max);
|
||||
|
||||
return phys < BIT(shift);
|
||||
}
|
||||
|
||||
static bool kvm_block_mapping_supported(const struct kvm_pgtable_visit_ctx *ctx, u64 phys)
|
||||
{
|
||||
u64 granule = kvm_granule_size(ctx->level);
|
||||
|
|
@ -53,7 +45,7 @@ static bool kvm_block_mapping_supported(const struct kvm_pgtable_visit_ctx *ctx,
|
|||
if (granule > (ctx->end - ctx->addr))
|
||||
return false;
|
||||
|
||||
if (kvm_phys_is_valid(phys) && !IS_ALIGNED(phys, granule))
|
||||
if (!IS_ALIGNED(phys, granule))
|
||||
return false;
|
||||
|
||||
return IS_ALIGNED(ctx->addr, granule);
|
||||
|
|
@ -587,6 +579,9 @@ struct stage2_map_data {
|
|||
|
||||
/* Force mappings to page granularity */
|
||||
bool force_pte;
|
||||
|
||||
/* Walk should update owner_id only */
|
||||
bool annotation;
|
||||
};
|
||||
|
||||
u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift)
|
||||
|
|
@ -885,18 +880,7 @@ static u64 stage2_map_walker_phys_addr(const struct kvm_pgtable_visit_ctx *ctx,
|
|||
{
|
||||
u64 phys = data->phys;
|
||||
|
||||
/*
|
||||
* Stage-2 walks to update ownership data are communicated to the map
|
||||
* walker using an invalid PA. Avoid offsetting an already invalid PA,
|
||||
* which could overflow and make the address valid again.
|
||||
*/
|
||||
if (!kvm_phys_is_valid(phys))
|
||||
return phys;
|
||||
|
||||
/*
|
||||
* Otherwise, work out the correct PA based on how far the walk has
|
||||
* gotten.
|
||||
*/
|
||||
/* Work out the correct PA based on how far the walk has gotten */
|
||||
return phys + (ctx->addr - ctx->start);
|
||||
}
|
||||
|
||||
|
|
@ -908,6 +892,9 @@ static bool stage2_leaf_mapping_allowed(const struct kvm_pgtable_visit_ctx *ctx,
|
|||
if (data->force_pte && ctx->level < KVM_PGTABLE_LAST_LEVEL)
|
||||
return false;
|
||||
|
||||
if (data->annotation)
|
||||
return true;
|
||||
|
||||
return kvm_block_mapping_supported(ctx, phys);
|
||||
}
|
||||
|
||||
|
|
@ -923,7 +910,7 @@ static int stage2_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx,
|
|||
if (!stage2_leaf_mapping_allowed(ctx, data))
|
||||
return -E2BIG;
|
||||
|
||||
if (kvm_phys_is_valid(phys))
|
||||
if (!data->annotation)
|
||||
new = kvm_init_valid_leaf_pte(phys, data->attr, ctx->level);
|
||||
else
|
||||
new = kvm_init_invalid_leaf_owner(data->owner_id);
|
||||
|
|
@ -1085,11 +1072,11 @@ int kvm_pgtable_stage2_set_owner(struct kvm_pgtable *pgt, u64 addr, u64 size,
|
|||
{
|
||||
int ret;
|
||||
struct stage2_map_data map_data = {
|
||||
.phys = KVM_PHYS_INVALID,
|
||||
.mmu = pgt->mmu,
|
||||
.memcache = mc,
|
||||
.owner_id = owner_id,
|
||||
.force_pte = true,
|
||||
.annotation = true,
|
||||
};
|
||||
struct kvm_pgtable_walker walker = {
|
||||
.cb = stage2_map_walker,
|
||||
|
|
|
|||
|
|
@ -279,7 +279,12 @@ void __init arm64_memblock_init(void)
|
|||
|
||||
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
|
||||
extern u16 memstart_offset_seed;
|
||||
u64 mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
|
||||
|
||||
/*
|
||||
* Use the sanitised version of id_aa64mmfr0_el1 so that linear
|
||||
* map randomization can be enabled by shrinking the IPA space.
|
||||
*/
|
||||
u64 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
|
||||
int parange = cpuid_feature_extract_unsigned_field(
|
||||
mmfr0, ID_AA64MMFR0_EL1_PARANGE_SHIFT);
|
||||
s64 range = linear_region_size -
|
||||
|
|
|
|||
|
|
@ -1169,7 +1169,8 @@ int __meminit vmemmap_check_pmd(pmd_t *pmdp, int node,
|
|||
unsigned long addr, unsigned long next)
|
||||
{
|
||||
vmemmap_verify((pte_t *)pmdp, node, addr, next);
|
||||
return 1;
|
||||
|
||||
return pmd_sect(READ_ONCE(*pmdp));
|
||||
}
|
||||
|
||||
int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
|
||||
|
|
|
|||
|
|
@ -57,7 +57,7 @@ static void _copy_pte(pte_t *dst_ptep, pte_t *src_ptep, unsigned long addr)
|
|||
*/
|
||||
BUG_ON(!pfn_valid(pte_pfn(pte)));
|
||||
|
||||
__set_pte(dst_ptep, pte_mkpresent(pte_mkwrite_novma(pte)));
|
||||
__set_pte(dst_ptep, pte_mkvalid(pte_mkwrite_novma(pte)));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
Loading…
Reference in New Issue