KVM/arm654 fixes for 6.18, take #2

* Core fixes
 
   - Fix trapping regression when no in-kernel irqchip is present
     (20251021094358.1963807-1-sascha.bischoff@arm.com)
 
   - Check host-provided, untrusted ranges and offsets in pKVM
     (20251016164541.3771235-1-vdonnefort@google.com)
     (20251017075710.2605118-1-sebastianene@google.com)
 
   - Fix regression restoring the ID_PFR1_EL1 register
     (20251030122707.2033690-1-maz@kernel.org
 
   - Fix vgic ITS locking issues when LPIs are not directly injected
     (20251107184847.1784820-1-oupton@kernel.org)
 
 * Test fixes
 
   - Correct target CPU programming in vgic_lpi_stress selftest
     (20251020145946.48288-1-mdittgen@amazon.de)
 
   - Fix exposure of SCTLR2_EL2 and ZCR_EL2 in get-reg-list selftest
     (20251023-b4-kvm-arm64-get-reg-list-sctlr-el2-v1-1-088f88ff992a@kernel.org)
     (20251024-kvm-arm64-get-reg-list-zcr-el2-v1-1-0cd0ff75e22f@kernel.org)
 
 * Misc
 
   - Update Oliver's email address
     (20251107012830.1708225-1-oupton@kernel.org)
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEn9UcU+C1Yxj9lZw9I9DQutE9ekMFAmkPMCoACgkQI9DQutE9
 ekPbaBAAuunvToIxTQoa1hf8+9qlCl/uQNO0R1Q/72N2sLv02kyma9xNA6UGuGFX
 a5AURJItw3RzEDnsJ3Grvu/2IgRgHUrTOqpwPSdGzN/t5wq046Rjp7tWpyQVlwDn
 hax1y7xWpHgC2pDpVran+I7fkC9SZlw5FXGTyEyyGhhYsw41aT6DwYJNyv4TKlxQ
 btXkFN0dhJGw0GjqFvUsZoWBvdWYfhZpMwt24eJ0H8uZXgVImG2TeIcRdwF7A4zr
 0Z3eE17uIochflpWARydX2bAejWWkBGXwFMLtE8SpbFz5W6SpA6e0kkaUsfwIM3c
 d29azeTSspneqjl4CqT7r28ea3F4pOUzCSSsVMWZ9CplRef1/lqIQAQ+sjVWebE/
 gonzdGiRkhylb3lKMPuaip8tnoaLx+KLYGNkY2uBWUtC1C182d+Jy8W49POZHfSn
 01k0VoiwwUiyCA152n2VGThzwmNyfGImNKLnPBlUZpsFGHbdMdKgHr6IBIGkCK8C
 K9ylq++xoQ5KFpDqkE2/mcv75FAJyFJDb7TavtHl6b6H/LjglSYhhperkXbWhx3K
 +GYfsG/m7eEaro41vfLdqZEJt0mgLJaX+asruA+lFHqWTfnY8QFyczFJs8doox+o
 EqIf6bmlDmSE8Waz0YMiPBFl3fgvj3yvMuceDmV3fL8gmQpPa+0=
 =ptQ6
 -----END PGP SIGNATURE-----

Merge tag 'kvmarm-fixes-6.18-2' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD

KVM/arm654 fixes for 6.18, take #2

* Core fixes

  - Fix trapping regression when no in-kernel irqchip is present
    (20251021094358.1963807-1-sascha.bischoff@arm.com)

  - Check host-provided, untrusted ranges and offsets in pKVM
    (20251016164541.3771235-1-vdonnefort@google.com)
    (20251017075710.2605118-1-sebastianene@google.com)

  - Fix regression restoring the ID_PFR1_EL1 register
    (20251030122707.2033690-1-maz@kernel.org

  - Fix vgic ITS locking issues when LPIs are not directly injected
    (20251107184847.1784820-1-oupton@kernel.org)

* Test fixes

  - Correct target CPU programming in vgic_lpi_stress selftest
    (20251020145946.48288-1-mdittgen@amazon.de)

  - Fix exposure of SCTLR2_EL2 and ZCR_EL2 in get-reg-list selftest
    (20251023-b4-kvm-arm64-get-reg-list-sctlr-el2-v1-1-088f88ff992a@kernel.org)
    (20251024-kvm-arm64-get-reg-list-zcr-el2-v1-1-0cd0ff75e22f@kernel.org)

* Misc

  - Update Oliver's email address
    (20251107012830.1708225-1-oupton@kernel.org)
pull/1354/merge
Paolo Bonzini 2025-11-09 08:07:55 +01:00
commit ca00c3af8e
12 changed files with 137 additions and 64 deletions

View File

@ -605,7 +605,8 @@ Oleksij Rempel <o.rempel@pengutronix.de>
Oleksij Rempel <o.rempel@pengutronix.de> <ore@pengutronix.de>
Oliver Hartkopp <socketcan@hartkopp.net> <oliver.hartkopp@volkswagen.de>
Oliver Hartkopp <socketcan@hartkopp.net> <oliver@hartkopp.net>
Oliver Upton <oliver.upton@linux.dev> <oupton@google.com>
Oliver Upton <oupton@kernel.org> <oupton@google.com>
Oliver Upton <oupton@kernel.org> <oliver.upton@linux.dev>
Ondřej Jirman <megi@xff.cz> <megous@megous.com>
Oza Pawandeep <quic_poza@quicinc.com> <poza@codeaurora.org>
Pali Rohár <pali@kernel.org> <pali.rohar@gmail.com>

View File

@ -13656,7 +13656,7 @@ F: virt/kvm/*
KERNEL VIRTUAL MACHINE FOR ARM64 (KVM/arm64)
M: Marc Zyngier <maz@kernel.org>
M: Oliver Upton <oliver.upton@linux.dev>
M: Oliver Upton <oupton@kernel.org>
R: Joey Gouly <joey.gouly@arm.com>
R: Suzuki K Poulose <suzuki.poulose@arm.com>
R: Zenghui Yu <yuzenghui@huawei.com>

View File

@ -479,7 +479,7 @@ static void __do_ffa_mem_xfer(const u64 func_id,
struct ffa_mem_region_attributes *ep_mem_access;
struct ffa_composite_mem_region *reg;
struct ffa_mem_region *buf;
u32 offset, nr_ranges;
u32 offset, nr_ranges, checked_offset;
int ret = 0;
if (addr_mbz || npages_mbz || fraglen > len ||
@ -516,7 +516,12 @@ static void __do_ffa_mem_xfer(const u64 func_id,
goto out_unlock;
}
if (fraglen < offset + sizeof(struct ffa_composite_mem_region)) {
if (check_add_overflow(offset, sizeof(struct ffa_composite_mem_region), &checked_offset)) {
ret = FFA_RET_INVALID_PARAMETERS;
goto out_unlock;
}
if (fraglen < checked_offset) {
ret = FFA_RET_INVALID_PARAMETERS;
goto out_unlock;
}

View File

@ -367,6 +367,19 @@ static int host_stage2_unmap_dev_all(void)
return kvm_pgtable_stage2_unmap(pgt, addr, BIT(pgt->ia_bits) - addr);
}
/*
* Ensure the PFN range is contained within PA-range.
*
* This check is also robust to overflows and is therefore a requirement before
* using a pfn/nr_pages pair from an untrusted source.
*/
static bool pfn_range_is_valid(u64 pfn, u64 nr_pages)
{
u64 limit = BIT(kvm_phys_shift(&host_mmu.arch.mmu) - PAGE_SHIFT);
return pfn < limit && ((limit - pfn) >= nr_pages);
}
struct kvm_mem_range {
u64 start;
u64 end;
@ -776,6 +789,9 @@ int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages)
void *virt = __hyp_va(phys);
int ret;
if (!pfn_range_is_valid(pfn, nr_pages))
return -EINVAL;
host_lock_component();
hyp_lock_component();
@ -804,6 +820,9 @@ int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages)
u64 virt = (u64)__hyp_va(phys);
int ret;
if (!pfn_range_is_valid(pfn, nr_pages))
return -EINVAL;
host_lock_component();
hyp_lock_component();
@ -887,6 +906,9 @@ int __pkvm_host_share_ffa(u64 pfn, u64 nr_pages)
u64 size = PAGE_SIZE * nr_pages;
int ret;
if (!pfn_range_is_valid(pfn, nr_pages))
return -EINVAL;
host_lock_component();
ret = __host_check_page_state_range(phys, size, PKVM_PAGE_OWNED);
if (!ret)
@ -902,6 +924,9 @@ int __pkvm_host_unshare_ffa(u64 pfn, u64 nr_pages)
u64 size = PAGE_SIZE * nr_pages;
int ret;
if (!pfn_range_is_valid(pfn, nr_pages))
return -EINVAL;
host_lock_component();
ret = __host_check_page_state_range(phys, size, PKVM_PAGE_SHARED_OWNED);
if (!ret)
@ -945,6 +970,9 @@ int __pkvm_host_share_guest(u64 pfn, u64 gfn, u64 nr_pages, struct pkvm_hyp_vcpu
if (prot & ~KVM_PGTABLE_PROT_RWX)
return -EINVAL;
if (!pfn_range_is_valid(pfn, nr_pages))
return -EINVAL;
ret = __guest_check_transition_size(phys, ipa, nr_pages, &size);
if (ret)
return ret;

View File

@ -2595,19 +2595,23 @@ static bool bad_redir_trap(struct kvm_vcpu *vcpu,
.val = 0, \
}
/* sys_reg_desc initialiser for known cpufeature ID registers */
#define AA32_ID_SANITISED(name) { \
ID_DESC(name), \
.visibility = aa32_id_visibility, \
.val = 0, \
}
/* sys_reg_desc initialiser for writable ID registers */
#define ID_WRITABLE(name, mask) { \
ID_DESC(name), \
.val = mask, \
}
/*
* 32bit ID regs are fully writable when the guest is 32bit
* capable. Nothing in the KVM code should rely on 32bit features
* anyway, only 64bit, so let the VMM do its worse.
*/
#define AA32_ID_WRITABLE(name) { \
ID_DESC(name), \
.visibility = aa32_id_visibility, \
.val = GENMASK(31, 0), \
}
/* sys_reg_desc initialiser for cpufeature ID registers that need filtering */
#define ID_FILTERED(sysreg, name, mask) { \
ID_DESC(sysreg), \
@ -3128,40 +3132,39 @@ static const struct sys_reg_desc sys_reg_descs[] = {
/* AArch64 mappings of the AArch32 ID registers */
/* CRm=1 */
AA32_ID_SANITISED(ID_PFR0_EL1),
AA32_ID_SANITISED(ID_PFR1_EL1),
AA32_ID_WRITABLE(ID_PFR0_EL1),
AA32_ID_WRITABLE(ID_PFR1_EL1),
{ SYS_DESC(SYS_ID_DFR0_EL1),
.access = access_id_reg,
.get_user = get_id_reg,
.set_user = set_id_dfr0_el1,
.visibility = aa32_id_visibility,
.reset = read_sanitised_id_dfr0_el1,
.val = ID_DFR0_EL1_PerfMon_MASK |
ID_DFR0_EL1_CopDbg_MASK, },
.val = GENMASK(31, 0) },
ID_HIDDEN(ID_AFR0_EL1),
AA32_ID_SANITISED(ID_MMFR0_EL1),
AA32_ID_SANITISED(ID_MMFR1_EL1),
AA32_ID_SANITISED(ID_MMFR2_EL1),
AA32_ID_SANITISED(ID_MMFR3_EL1),
AA32_ID_WRITABLE(ID_MMFR0_EL1),
AA32_ID_WRITABLE(ID_MMFR1_EL1),
AA32_ID_WRITABLE(ID_MMFR2_EL1),
AA32_ID_WRITABLE(ID_MMFR3_EL1),
/* CRm=2 */
AA32_ID_SANITISED(ID_ISAR0_EL1),
AA32_ID_SANITISED(ID_ISAR1_EL1),
AA32_ID_SANITISED(ID_ISAR2_EL1),
AA32_ID_SANITISED(ID_ISAR3_EL1),
AA32_ID_SANITISED(ID_ISAR4_EL1),
AA32_ID_SANITISED(ID_ISAR5_EL1),
AA32_ID_SANITISED(ID_MMFR4_EL1),
AA32_ID_SANITISED(ID_ISAR6_EL1),
AA32_ID_WRITABLE(ID_ISAR0_EL1),
AA32_ID_WRITABLE(ID_ISAR1_EL1),
AA32_ID_WRITABLE(ID_ISAR2_EL1),
AA32_ID_WRITABLE(ID_ISAR3_EL1),
AA32_ID_WRITABLE(ID_ISAR4_EL1),
AA32_ID_WRITABLE(ID_ISAR5_EL1),
AA32_ID_WRITABLE(ID_MMFR4_EL1),
AA32_ID_WRITABLE(ID_ISAR6_EL1),
/* CRm=3 */
AA32_ID_SANITISED(MVFR0_EL1),
AA32_ID_SANITISED(MVFR1_EL1),
AA32_ID_SANITISED(MVFR2_EL1),
AA32_ID_WRITABLE(MVFR0_EL1),
AA32_ID_WRITABLE(MVFR1_EL1),
AA32_ID_WRITABLE(MVFR2_EL1),
ID_UNALLOCATED(3,3),
AA32_ID_SANITISED(ID_PFR2_EL1),
AA32_ID_WRITABLE(ID_PFR2_EL1),
ID_HIDDEN(ID_DFR1_EL1),
AA32_ID_SANITISED(ID_MMFR5_EL1),
AA32_ID_WRITABLE(ID_MMFR5_EL1),
ID_UNALLOCATED(3,7),
/* AArch64 ID registers */
@ -5606,11 +5609,13 @@ int kvm_finalize_sys_regs(struct kvm_vcpu *vcpu)
guard(mutex)(&kvm->arch.config_lock);
if (!(static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif) &&
irqchip_in_kernel(kvm) &&
kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)) {
kvm->arch.id_regs[IDREG_IDX(SYS_ID_AA64PFR0_EL1)] &= ~ID_AA64PFR0_EL1_GIC_MASK;
kvm->arch.id_regs[IDREG_IDX(SYS_ID_PFR1_EL1)] &= ~ID_PFR1_EL1_GIC_MASK;
if (!irqchip_in_kernel(kvm)) {
u64 val;
val = kvm_read_vm_id_reg(kvm, SYS_ID_AA64PFR0_EL1) & ~ID_AA64PFR0_EL1_GIC;
kvm_set_vm_id_reg(kvm, SYS_ID_AA64PFR0_EL1, val);
val = kvm_read_vm_id_reg(kvm, SYS_ID_PFR1_EL1) & ~ID_PFR1_EL1_GIC;
kvm_set_vm_id_reg(kvm, SYS_ID_PFR1_EL1, val);
}
if (vcpu_has_nv(vcpu)) {

View File

@ -64,29 +64,37 @@ static void iter_next(struct kvm *kvm, struct vgic_state_iter *iter)
static int iter_mark_lpis(struct kvm *kvm)
{
struct vgic_dist *dist = &kvm->arch.vgic;
unsigned long intid, flags;
struct vgic_irq *irq;
unsigned long intid;
int nr_lpis = 0;
xa_lock_irqsave(&dist->lpi_xa, flags);
xa_for_each(&dist->lpi_xa, intid, irq) {
if (!vgic_try_get_irq_ref(irq))
continue;
xa_set_mark(&dist->lpi_xa, intid, LPI_XA_MARK_DEBUG_ITER);
__xa_set_mark(&dist->lpi_xa, intid, LPI_XA_MARK_DEBUG_ITER);
nr_lpis++;
}
xa_unlock_irqrestore(&dist->lpi_xa, flags);
return nr_lpis;
}
static void iter_unmark_lpis(struct kvm *kvm)
{
struct vgic_dist *dist = &kvm->arch.vgic;
unsigned long intid, flags;
struct vgic_irq *irq;
unsigned long intid;
xa_for_each_marked(&dist->lpi_xa, intid, irq, LPI_XA_MARK_DEBUG_ITER) {
xa_clear_mark(&dist->lpi_xa, intid, LPI_XA_MARK_DEBUG_ITER);
xa_lock_irqsave(&dist->lpi_xa, flags);
__xa_clear_mark(&dist->lpi_xa, intid, LPI_XA_MARK_DEBUG_ITER);
xa_unlock_irqrestore(&dist->lpi_xa, flags);
/* vgic_put_irq() expects to be called outside of the xa_lock */
vgic_put_irq(kvm, irq);
}
}

View File

@ -53,7 +53,7 @@ void kvm_vgic_early_init(struct kvm *kvm)
{
struct vgic_dist *dist = &kvm->arch.vgic;
xa_init(&dist->lpi_xa);
xa_init_flags(&dist->lpi_xa, XA_FLAGS_LOCK_IRQ);
}
/* CREATION */
@ -71,6 +71,7 @@ static int vgic_allocate_private_irqs_locked(struct kvm_vcpu *vcpu, u32 type);
int kvm_vgic_create(struct kvm *kvm, u32 type)
{
struct kvm_vcpu *vcpu;
u64 aa64pfr0, pfr1;
unsigned long i;
int ret;
@ -161,10 +162,19 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF;
if (type == KVM_DEV_TYPE_ARM_VGIC_V2)
aa64pfr0 = kvm_read_vm_id_reg(kvm, SYS_ID_AA64PFR0_EL1) & ~ID_AA64PFR0_EL1_GIC;
pfr1 = kvm_read_vm_id_reg(kvm, SYS_ID_PFR1_EL1) & ~ID_PFR1_EL1_GIC;
if (type == KVM_DEV_TYPE_ARM_VGIC_V2) {
kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF;
else
} else {
INIT_LIST_HEAD(&kvm->arch.vgic.rd_regions);
aa64pfr0 |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, GIC, IMP);
pfr1 |= SYS_FIELD_PREP_ENUM(ID_PFR1_EL1, GIC, GICv3);
}
kvm_set_vm_id_reg(kvm, SYS_ID_AA64PFR0_EL1, aa64pfr0);
kvm_set_vm_id_reg(kvm, SYS_ID_PFR1_EL1, pfr1);
if (type == KVM_DEV_TYPE_ARM_VGIC_V3)
kvm->arch.vgic.nassgicap = system_supports_direct_sgis();

View File

@ -78,6 +78,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
{
struct vgic_dist *dist = &kvm->arch.vgic;
struct vgic_irq *irq = vgic_get_irq(kvm, intid), *oldirq;
unsigned long flags;
int ret;
/* In this case there is no put, since we keep the reference. */
@ -88,7 +89,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
if (!irq)
return ERR_PTR(-ENOMEM);
ret = xa_reserve(&dist->lpi_xa, intid, GFP_KERNEL_ACCOUNT);
ret = xa_reserve_irq(&dist->lpi_xa, intid, GFP_KERNEL_ACCOUNT);
if (ret) {
kfree(irq);
return ERR_PTR(ret);
@ -103,7 +104,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
irq->target_vcpu = vcpu;
irq->group = 1;
xa_lock(&dist->lpi_xa);
xa_lock_irqsave(&dist->lpi_xa, flags);
/*
* There could be a race with another vgic_add_lpi(), so we need to
@ -114,21 +115,18 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
/* Someone was faster with adding this LPI, lets use that. */
kfree(irq);
irq = oldirq;
goto out_unlock;
} else {
ret = xa_err(__xa_store(&dist->lpi_xa, intid, irq, 0));
}
ret = xa_err(__xa_store(&dist->lpi_xa, intid, irq, 0));
xa_unlock_irqrestore(&dist->lpi_xa, flags);
if (ret) {
xa_release(&dist->lpi_xa, intid);
kfree(irq);
}
out_unlock:
xa_unlock(&dist->lpi_xa);
if (ret)
return ERR_PTR(ret);
}
/*
* We "cache" the configuration table entries in our struct vgic_irq's.

View File

@ -301,7 +301,8 @@ void vcpu_set_ich_hcr(struct kvm_vcpu *vcpu)
return;
/* Hide GICv3 sysreg if necessary */
if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2) {
if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2 ||
!irqchip_in_kernel(vcpu->kvm)) {
vgic_v3->vgic_hcr |= (ICH_HCR_EL2_TALL0 | ICH_HCR_EL2_TALL1 |
ICH_HCR_EL2_TC);
return;

View File

@ -28,7 +28,7 @@ struct vgic_global kvm_vgic_global_state __ro_after_init = {
* kvm->arch.config_lock (mutex)
* its->cmd_lock (mutex)
* its->its_lock (mutex)
* vgic_dist->lpi_xa.xa_lock
* vgic_dist->lpi_xa.xa_lock must be taken with IRQs disabled
* vgic_cpu->ap_list_lock must be taken with IRQs disabled
* vgic_irq->irq_lock must be taken with IRQs disabled
*
@ -141,32 +141,39 @@ static __must_check bool vgic_put_irq_norelease(struct kvm *kvm, struct vgic_irq
void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
{
struct vgic_dist *dist = &kvm->arch.vgic;
unsigned long flags;
if (irq->intid >= VGIC_MIN_LPI)
might_lock(&dist->lpi_xa.xa_lock);
/*
* Normally the lock is only taken when the refcount drops to 0.
* Acquire/release it early on lockdep kernels to make locking issues
* in rare release paths a bit more obvious.
*/
if (IS_ENABLED(CONFIG_LOCKDEP) && irq->intid >= VGIC_MIN_LPI) {
guard(spinlock_irqsave)(&dist->lpi_xa.xa_lock);
}
if (!__vgic_put_irq(kvm, irq))
return;
xa_lock(&dist->lpi_xa);
xa_lock_irqsave(&dist->lpi_xa, flags);
vgic_release_lpi_locked(dist, irq);
xa_unlock(&dist->lpi_xa);
xa_unlock_irqrestore(&dist->lpi_xa, flags);
}
static void vgic_release_deleted_lpis(struct kvm *kvm)
{
struct vgic_dist *dist = &kvm->arch.vgic;
unsigned long intid;
unsigned long flags, intid;
struct vgic_irq *irq;
xa_lock(&dist->lpi_xa);
xa_lock_irqsave(&dist->lpi_xa, flags);
xa_for_each(&dist->lpi_xa, intid, irq) {
if (irq->pending_release)
vgic_release_lpi_locked(dist, irq);
}
xa_unlock(&dist->lpi_xa);
xa_unlock_irqrestore(&dist->lpi_xa, flags);
}
void vgic_flush_pending_lpis(struct kvm_vcpu *vcpu)

View File

@ -63,11 +63,13 @@ static struct feature_id_reg feat_id_regs[] = {
REG_FEAT(HDFGWTR2_EL2, ID_AA64MMFR0_EL1, FGT, FGT2),
REG_FEAT(ZCR_EL2, ID_AA64PFR0_EL1, SVE, IMP),
REG_FEAT(SCTLR2_EL1, ID_AA64MMFR3_EL1, SCTLRX, IMP),
REG_FEAT(SCTLR2_EL2, ID_AA64MMFR3_EL1, SCTLRX, IMP),
REG_FEAT(VDISR_EL2, ID_AA64PFR0_EL1, RAS, IMP),
REG_FEAT(VSESR_EL2, ID_AA64PFR0_EL1, RAS, IMP),
REG_FEAT(VNCR_EL2, ID_AA64MMFR4_EL1, NV_frac, NV2_ONLY),
REG_FEAT(CNTHV_CTL_EL2, ID_AA64MMFR1_EL1, VH, IMP),
REG_FEAT(CNTHV_CVAL_EL2,ID_AA64MMFR1_EL1, VH, IMP),
REG_FEAT(ZCR_EL2, ID_AA64PFR0_EL1, SVE, IMP),
};
bool filter_reg(__u64 reg)
@ -718,6 +720,7 @@ static __u64 el2_regs[] = {
SYS_REG(VMPIDR_EL2),
SYS_REG(SCTLR_EL2),
SYS_REG(ACTLR_EL2),
SYS_REG(SCTLR2_EL2),
SYS_REG(HCR_EL2),
SYS_REG(MDCR_EL2),
SYS_REG(CPTR_EL2),

View File

@ -15,6 +15,8 @@
#include "gic_v3.h"
#include "processor.h"
#define GITS_COLLECTION_TARGET_SHIFT 16
static u64 its_read_u64(unsigned long offset)
{
return readq_relaxed(GITS_BASE_GVA + offset);
@ -163,6 +165,11 @@ static void its_encode_collection(struct its_cmd_block *cmd, u16 col)
its_mask_encode(&cmd->raw_cmd[2], col, 15, 0);
}
static u64 procnum_to_rdbase(u32 vcpu_id)
{
return vcpu_id << GITS_COLLECTION_TARGET_SHIFT;
}
#define GITS_CMDQ_POLL_ITERATIONS 0
static void its_send_cmd(void *cmdq_base, struct its_cmd_block *cmd)
@ -217,7 +224,7 @@ void its_send_mapc_cmd(void *cmdq_base, u32 vcpu_id, u32 collection_id, bool val
its_encode_cmd(&cmd, GITS_CMD_MAPC);
its_encode_collection(&cmd, collection_id);
its_encode_target(&cmd, vcpu_id);
its_encode_target(&cmd, procnum_to_rdbase(vcpu_id));
its_encode_valid(&cmd, valid);
its_send_cmd(cmdq_base, &cmd);