perf: arm_pmu: Request specific affinities for per CPU NMIs/interrupts
Let the PMU driver request both NMIs and normal interrupts with an affinity mask matching the PMU affinity. Signed-off-by: Will Deacon <will@kernel.org> Signed-off-by: Marc Zyngier <maz@kernel.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Tested-by: Will Deacon <will@kernel.org> Link: https://patch.msgid.link/20251020122944.3074811-19-maz@kernel.orgpull/1354/merge
parent
c734af3b2b
commit
54b350fa8e
|
|
@ -26,7 +26,8 @@
|
||||||
|
|
||||||
#include <asm/irq_regs.h>
|
#include <asm/irq_regs.h>
|
||||||
|
|
||||||
static int armpmu_count_irq_users(const int irq);
|
static int armpmu_count_irq_users(const struct cpumask *affinity,
|
||||||
|
const int irq);
|
||||||
|
|
||||||
struct pmu_irq_ops {
|
struct pmu_irq_ops {
|
||||||
void (*enable_pmuirq)(unsigned int irq);
|
void (*enable_pmuirq)(unsigned int irq);
|
||||||
|
|
@ -64,7 +65,9 @@ static void armpmu_enable_percpu_pmuirq(unsigned int irq)
|
||||||
static void armpmu_free_percpu_pmuirq(unsigned int irq, int cpu,
|
static void armpmu_free_percpu_pmuirq(unsigned int irq, int cpu,
|
||||||
void __percpu *devid)
|
void __percpu *devid)
|
||||||
{
|
{
|
||||||
if (armpmu_count_irq_users(irq) == 1)
|
struct arm_pmu *armpmu = *per_cpu_ptr((void * __percpu *)devid, cpu);
|
||||||
|
|
||||||
|
if (armpmu_count_irq_users(&armpmu->supported_cpus, irq) == 1)
|
||||||
free_percpu_irq(irq, devid);
|
free_percpu_irq(irq, devid);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -89,7 +92,9 @@ static void armpmu_disable_percpu_pmunmi(unsigned int irq)
|
||||||
static void armpmu_free_percpu_pmunmi(unsigned int irq, int cpu,
|
static void armpmu_free_percpu_pmunmi(unsigned int irq, int cpu,
|
||||||
void __percpu *devid)
|
void __percpu *devid)
|
||||||
{
|
{
|
||||||
if (armpmu_count_irq_users(irq) == 1)
|
struct arm_pmu *armpmu = *per_cpu_ptr((void * __percpu *)devid, cpu);
|
||||||
|
|
||||||
|
if (armpmu_count_irq_users(&armpmu->supported_cpus, irq) == 1)
|
||||||
free_percpu_nmi(irq, devid);
|
free_percpu_nmi(irq, devid);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -580,11 +585,11 @@ static const struct attribute_group armpmu_common_attr_group = {
|
||||||
.attrs = armpmu_common_attrs,
|
.attrs = armpmu_common_attrs,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int armpmu_count_irq_users(const int irq)
|
static int armpmu_count_irq_users(const struct cpumask *affinity, const int irq)
|
||||||
{
|
{
|
||||||
int cpu, count = 0;
|
int cpu, count = 0;
|
||||||
|
|
||||||
for_each_possible_cpu(cpu) {
|
for_each_cpu(cpu, affinity) {
|
||||||
if (per_cpu(cpu_irq, cpu) == irq)
|
if (per_cpu(cpu_irq, cpu) == irq)
|
||||||
count++;
|
count++;
|
||||||
}
|
}
|
||||||
|
|
@ -592,12 +597,13 @@ static int armpmu_count_irq_users(const int irq)
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct pmu_irq_ops *armpmu_find_irq_ops(int irq)
|
static const struct pmu_irq_ops *
|
||||||
|
armpmu_find_irq_ops(const struct cpumask *affinity, int irq)
|
||||||
{
|
{
|
||||||
const struct pmu_irq_ops *ops = NULL;
|
const struct pmu_irq_ops *ops = NULL;
|
||||||
int cpu;
|
int cpu;
|
||||||
|
|
||||||
for_each_possible_cpu(cpu) {
|
for_each_cpu(cpu, affinity) {
|
||||||
if (per_cpu(cpu_irq, cpu) != irq)
|
if (per_cpu(cpu_irq, cpu) != irq)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
|
@ -609,22 +615,25 @@ static const struct pmu_irq_ops *armpmu_find_irq_ops(int irq)
|
||||||
return ops;
|
return ops;
|
||||||
}
|
}
|
||||||
|
|
||||||
void armpmu_free_irq(int irq, int cpu)
|
void armpmu_free_irq(struct arm_pmu * __percpu *armpmu, int irq, int cpu)
|
||||||
{
|
{
|
||||||
if (per_cpu(cpu_irq, cpu) == 0)
|
if (per_cpu(cpu_irq, cpu) == 0)
|
||||||
return;
|
return;
|
||||||
if (WARN_ON(irq != per_cpu(cpu_irq, cpu)))
|
if (WARN_ON(irq != per_cpu(cpu_irq, cpu)))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
per_cpu(cpu_irq_ops, cpu)->free_pmuirq(irq, cpu, &cpu_armpmu);
|
per_cpu(cpu_irq_ops, cpu)->free_pmuirq(irq, cpu, armpmu);
|
||||||
|
|
||||||
per_cpu(cpu_irq, cpu) = 0;
|
per_cpu(cpu_irq, cpu) = 0;
|
||||||
per_cpu(cpu_irq_ops, cpu) = NULL;
|
per_cpu(cpu_irq_ops, cpu) = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
int armpmu_request_irq(int irq, int cpu)
|
int armpmu_request_irq(struct arm_pmu * __percpu *pcpu_armpmu, int irq, int cpu)
|
||||||
{
|
{
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
struct arm_pmu **armpmu = per_cpu_ptr(pcpu_armpmu, cpu);
|
||||||
|
const struct cpumask *affinity = *armpmu ? &(*armpmu)->supported_cpus :
|
||||||
|
cpu_possible_mask; /* ACPI */
|
||||||
const irq_handler_t handler = armpmu_dispatch_irq;
|
const irq_handler_t handler = armpmu_dispatch_irq;
|
||||||
const struct pmu_irq_ops *irq_ops;
|
const struct pmu_irq_ops *irq_ops;
|
||||||
|
|
||||||
|
|
@ -646,25 +655,24 @@ int armpmu_request_irq(int irq, int cpu)
|
||||||
IRQF_NOBALANCING | IRQF_NO_AUTOEN |
|
IRQF_NOBALANCING | IRQF_NO_AUTOEN |
|
||||||
IRQF_NO_THREAD;
|
IRQF_NO_THREAD;
|
||||||
|
|
||||||
err = request_nmi(irq, handler, irq_flags, "arm-pmu",
|
err = request_nmi(irq, handler, irq_flags, "arm-pmu", armpmu);
|
||||||
per_cpu_ptr(&cpu_armpmu, cpu));
|
|
||||||
|
|
||||||
/* If cannot get an NMI, get a normal interrupt */
|
/* If cannot get an NMI, get a normal interrupt */
|
||||||
if (err) {
|
if (err) {
|
||||||
err = request_irq(irq, handler, irq_flags, "arm-pmu",
|
err = request_irq(irq, handler, irq_flags, "arm-pmu",
|
||||||
per_cpu_ptr(&cpu_armpmu, cpu));
|
armpmu);
|
||||||
irq_ops = &pmuirq_ops;
|
irq_ops = &pmuirq_ops;
|
||||||
} else {
|
} else {
|
||||||
has_nmi = true;
|
has_nmi = true;
|
||||||
irq_ops = &pmunmi_ops;
|
irq_ops = &pmunmi_ops;
|
||||||
}
|
}
|
||||||
} else if (armpmu_count_irq_users(irq) == 0) {
|
} else if (armpmu_count_irq_users(affinity, irq) == 0) {
|
||||||
err = request_percpu_nmi(irq, handler, "arm-pmu", NULL, &cpu_armpmu);
|
err = request_percpu_nmi(irq, handler, "arm-pmu", affinity, pcpu_armpmu);
|
||||||
|
|
||||||
/* If cannot get an NMI, get a normal interrupt */
|
/* If cannot get an NMI, get a normal interrupt */
|
||||||
if (err) {
|
if (err) {
|
||||||
err = request_percpu_irq(irq, handler, "arm-pmu",
|
err = request_percpu_irq_affinity(irq, handler, "arm-pmu",
|
||||||
&cpu_armpmu);
|
affinity, pcpu_armpmu);
|
||||||
irq_ops = &percpu_pmuirq_ops;
|
irq_ops = &percpu_pmuirq_ops;
|
||||||
} else {
|
} else {
|
||||||
has_nmi = true;
|
has_nmi = true;
|
||||||
|
|
@ -672,7 +680,7 @@ int armpmu_request_irq(int irq, int cpu)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
/* Per cpudevid irq was already requested by another CPU */
|
/* Per cpudevid irq was already requested by another CPU */
|
||||||
irq_ops = armpmu_find_irq_ops(irq);
|
irq_ops = armpmu_find_irq_ops(affinity, irq);
|
||||||
|
|
||||||
if (WARN_ON(!irq_ops))
|
if (WARN_ON(!irq_ops))
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
|
|
|
||||||
|
|
@ -218,7 +218,7 @@ static int arm_pmu_acpi_parse_irqs(void)
|
||||||
* them with their PMUs.
|
* them with their PMUs.
|
||||||
*/
|
*/
|
||||||
per_cpu(pmu_irqs, cpu) = irq;
|
per_cpu(pmu_irqs, cpu) = irq;
|
||||||
err = armpmu_request_irq(irq, cpu);
|
err = armpmu_request_irq(&probed_pmus, irq, cpu);
|
||||||
if (err)
|
if (err)
|
||||||
goto out_err;
|
goto out_err;
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -165,7 +165,7 @@ static int armpmu_request_irqs(struct arm_pmu *armpmu)
|
||||||
if (!irq)
|
if (!irq)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
err = armpmu_request_irq(irq, cpu);
|
err = armpmu_request_irq(&hw_events->percpu_pmu, irq, cpu);
|
||||||
if (err)
|
if (err)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
@ -181,7 +181,7 @@ static void armpmu_free_irqs(struct arm_pmu *armpmu)
|
||||||
for_each_cpu(cpu, &armpmu->supported_cpus) {
|
for_each_cpu(cpu, &armpmu->supported_cpus) {
|
||||||
int irq = per_cpu(hw_events->irq, cpu);
|
int irq = per_cpu(hw_events->irq, cpu);
|
||||||
|
|
||||||
armpmu_free_irq(irq, cpu);
|
armpmu_free_irq(&hw_events->percpu_pmu, irq, cpu);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -190,8 +190,8 @@ bool arm_pmu_irq_is_nmi(void);
|
||||||
struct arm_pmu *armpmu_alloc(void);
|
struct arm_pmu *armpmu_alloc(void);
|
||||||
void armpmu_free(struct arm_pmu *pmu);
|
void armpmu_free(struct arm_pmu *pmu);
|
||||||
int armpmu_register(struct arm_pmu *pmu);
|
int armpmu_register(struct arm_pmu *pmu);
|
||||||
int armpmu_request_irq(int irq, int cpu);
|
int armpmu_request_irq(struct arm_pmu * __percpu *armpmu, int irq, int cpu);
|
||||||
void armpmu_free_irq(int irq, int cpu);
|
void armpmu_free_irq(struct arm_pmu * __percpu *armpmu, int irq, int cpu);
|
||||||
|
|
||||||
#define ARMV8_PMU_PDEV_NAME "armv8-pmu"
|
#define ARMV8_PMU_PDEV_NAME "armv8-pmu"
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue