perf: arm_pmu: Request specific affinities for per CPU NMIs/interrupts

Let the PMU driver request both NMIs and normal interrupts with an affinity mask
matching the PMU affinity.

Signed-off-by: Will Deacon <will@kernel.org>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Tested-by: Will Deacon <will@kernel.org>
Link: https://patch.msgid.link/20251020122944.3074811-19-maz@kernel.org
pull/1354/merge
Will Deacon 2025-10-20 13:29:35 +01:00 committed by Thomas Gleixner
parent c734af3b2b
commit 54b350fa8e
4 changed files with 31 additions and 23 deletions

View File

@ -26,7 +26,8 @@
#include <asm/irq_regs.h>
static int armpmu_count_irq_users(const int irq);
static int armpmu_count_irq_users(const struct cpumask *affinity,
const int irq);
struct pmu_irq_ops {
void (*enable_pmuirq)(unsigned int irq);
@ -64,7 +65,9 @@ static void armpmu_enable_percpu_pmuirq(unsigned int irq)
static void armpmu_free_percpu_pmuirq(unsigned int irq, int cpu,
void __percpu *devid)
{
if (armpmu_count_irq_users(irq) == 1)
struct arm_pmu *armpmu = *per_cpu_ptr((void * __percpu *)devid, cpu);
if (armpmu_count_irq_users(&armpmu->supported_cpus, irq) == 1)
free_percpu_irq(irq, devid);
}
@ -89,7 +92,9 @@ static void armpmu_disable_percpu_pmunmi(unsigned int irq)
static void armpmu_free_percpu_pmunmi(unsigned int irq, int cpu,
void __percpu *devid)
{
if (armpmu_count_irq_users(irq) == 1)
struct arm_pmu *armpmu = *per_cpu_ptr((void * __percpu *)devid, cpu);
if (armpmu_count_irq_users(&armpmu->supported_cpus, irq) == 1)
free_percpu_nmi(irq, devid);
}
@ -580,11 +585,11 @@ static const struct attribute_group armpmu_common_attr_group = {
.attrs = armpmu_common_attrs,
};
static int armpmu_count_irq_users(const int irq)
static int armpmu_count_irq_users(const struct cpumask *affinity, const int irq)
{
int cpu, count = 0;
for_each_possible_cpu(cpu) {
for_each_cpu(cpu, affinity) {
if (per_cpu(cpu_irq, cpu) == irq)
count++;
}
@ -592,12 +597,13 @@ static int armpmu_count_irq_users(const int irq)
return count;
}
static const struct pmu_irq_ops *armpmu_find_irq_ops(int irq)
static const struct pmu_irq_ops *
armpmu_find_irq_ops(const struct cpumask *affinity, int irq)
{
const struct pmu_irq_ops *ops = NULL;
int cpu;
for_each_possible_cpu(cpu) {
for_each_cpu(cpu, affinity) {
if (per_cpu(cpu_irq, cpu) != irq)
continue;
@ -609,22 +615,25 @@ static const struct pmu_irq_ops *armpmu_find_irq_ops(int irq)
return ops;
}
void armpmu_free_irq(int irq, int cpu)
void armpmu_free_irq(struct arm_pmu * __percpu *armpmu, int irq, int cpu)
{
if (per_cpu(cpu_irq, cpu) == 0)
return;
if (WARN_ON(irq != per_cpu(cpu_irq, cpu)))
return;
per_cpu(cpu_irq_ops, cpu)->free_pmuirq(irq, cpu, &cpu_armpmu);
per_cpu(cpu_irq_ops, cpu)->free_pmuirq(irq, cpu, armpmu);
per_cpu(cpu_irq, cpu) = 0;
per_cpu(cpu_irq_ops, cpu) = NULL;
}
int armpmu_request_irq(int irq, int cpu)
int armpmu_request_irq(struct arm_pmu * __percpu *pcpu_armpmu, int irq, int cpu)
{
int err = 0;
struct arm_pmu **armpmu = per_cpu_ptr(pcpu_armpmu, cpu);
const struct cpumask *affinity = *armpmu ? &(*armpmu)->supported_cpus :
cpu_possible_mask; /* ACPI */
const irq_handler_t handler = armpmu_dispatch_irq;
const struct pmu_irq_ops *irq_ops;
@ -646,25 +655,24 @@ int armpmu_request_irq(int irq, int cpu)
IRQF_NOBALANCING | IRQF_NO_AUTOEN |
IRQF_NO_THREAD;
err = request_nmi(irq, handler, irq_flags, "arm-pmu",
per_cpu_ptr(&cpu_armpmu, cpu));
err = request_nmi(irq, handler, irq_flags, "arm-pmu", armpmu);
/* If cannot get an NMI, get a normal interrupt */
if (err) {
err = request_irq(irq, handler, irq_flags, "arm-pmu",
per_cpu_ptr(&cpu_armpmu, cpu));
armpmu);
irq_ops = &pmuirq_ops;
} else {
has_nmi = true;
irq_ops = &pmunmi_ops;
}
} else if (armpmu_count_irq_users(irq) == 0) {
err = request_percpu_nmi(irq, handler, "arm-pmu", NULL, &cpu_armpmu);
} else if (armpmu_count_irq_users(affinity, irq) == 0) {
err = request_percpu_nmi(irq, handler, "arm-pmu", affinity, pcpu_armpmu);
/* If cannot get an NMI, get a normal interrupt */
if (err) {
err = request_percpu_irq(irq, handler, "arm-pmu",
&cpu_armpmu);
err = request_percpu_irq_affinity(irq, handler, "arm-pmu",
affinity, pcpu_armpmu);
irq_ops = &percpu_pmuirq_ops;
} else {
has_nmi = true;
@ -672,7 +680,7 @@ int armpmu_request_irq(int irq, int cpu)
}
} else {
/* Per cpudevid irq was already requested by another CPU */
irq_ops = armpmu_find_irq_ops(irq);
irq_ops = armpmu_find_irq_ops(affinity, irq);
if (WARN_ON(!irq_ops))
err = -EINVAL;

View File

@ -218,7 +218,7 @@ static int arm_pmu_acpi_parse_irqs(void)
* them with their PMUs.
*/
per_cpu(pmu_irqs, cpu) = irq;
err = armpmu_request_irq(irq, cpu);
err = armpmu_request_irq(&probed_pmus, irq, cpu);
if (err)
goto out_err;
}

View File

@ -165,7 +165,7 @@ static int armpmu_request_irqs(struct arm_pmu *armpmu)
if (!irq)
continue;
err = armpmu_request_irq(irq, cpu);
err = armpmu_request_irq(&hw_events->percpu_pmu, irq, cpu);
if (err)
break;
}
@ -181,7 +181,7 @@ static void armpmu_free_irqs(struct arm_pmu *armpmu)
for_each_cpu(cpu, &armpmu->supported_cpus) {
int irq = per_cpu(hw_events->irq, cpu);
armpmu_free_irq(irq, cpu);
armpmu_free_irq(&hw_events->percpu_pmu, irq, cpu);
}
}

View File

@ -190,8 +190,8 @@ bool arm_pmu_irq_is_nmi(void);
struct arm_pmu *armpmu_alloc(void);
void armpmu_free(struct arm_pmu *pmu);
int armpmu_register(struct arm_pmu *pmu);
int armpmu_request_irq(int irq, int cpu);
void armpmu_free_irq(int irq, int cpu);
int armpmu_request_irq(struct arm_pmu * __percpu *armpmu, int irq, int cpu);
void armpmu_free_irq(struct arm_pmu * __percpu *armpmu, int irq, int cpu);
#define ARMV8_PMU_PDEV_NAME "armv8-pmu"