genirq/manage: Rework irq_update_affinity_desc()

Use the new guards to get and lock the interrupt descriptor and tidy up the
code.

No functional change.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/all/20250429065421.830357569@linutronix.de
pull/1250/head
Thomas Gleixner 2025-04-29 08:55:32 +02:00
parent 17c1953567
commit b0561582ea
1 changed files with 31 additions and 43 deletions

View File

@ -395,14 +395,8 @@ int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
* an interrupt which is already started or which has already been configured * an interrupt which is already started or which has already been configured
* as managed will also fail, as these mean invalid init state or double init. * as managed will also fail, as these mean invalid init state or double init.
*/ */
int irq_update_affinity_desc(unsigned int irq, int irq_update_affinity_desc(unsigned int irq, struct irq_affinity_desc *affinity)
struct irq_affinity_desc *affinity)
{ {
struct irq_desc *desc;
unsigned long flags;
bool activated;
int ret = 0;
/* /*
* Supporting this with the reservation scheme used by x86 needs * Supporting this with the reservation scheme used by x86 needs
* some more thought. Fail it for now. * some more thought. Fail it for now.
@ -410,44 +404,38 @@ int irq_update_affinity_desc(unsigned int irq,
if (IS_ENABLED(CONFIG_GENERIC_IRQ_RESERVATION_MODE)) if (IS_ENABLED(CONFIG_GENERIC_IRQ_RESERVATION_MODE))
return -EOPNOTSUPP; return -EOPNOTSUPP;
desc = irq_get_desc_buslock(irq, &flags, 0); scoped_irqdesc_get_and_buslock(irq, 0) {
if (!desc) struct irq_desc *desc = scoped_irqdesc;
return -EINVAL; bool activated;
/* Requires the interrupt to be shut down */ /* Requires the interrupt to be shut down */
if (irqd_is_started(&desc->irq_data)) { if (irqd_is_started(&desc->irq_data))
ret = -EBUSY; return -EBUSY;
goto out_unlock;
/* Interrupts which are already managed cannot be modified */
if (irqd_affinity_is_managed(&desc->irq_data))
return -EBUSY;
/*
* Deactivate the interrupt. That's required to undo
* anything an earlier activation has established.
*/
activated = irqd_is_activated(&desc->irq_data);
if (activated)
irq_domain_deactivate_irq(&desc->irq_data);
if (affinity->is_managed) {
irqd_set(&desc->irq_data, IRQD_AFFINITY_MANAGED);
irqd_set(&desc->irq_data, IRQD_MANAGED_SHUTDOWN);
}
cpumask_copy(desc->irq_common_data.affinity, &affinity->mask);
/* Restore the activation state */
if (activated)
irq_domain_activate_irq(&desc->irq_data, false);
return 0;
} }
return -EINVAL;
/* Interrupts which are already managed cannot be modified */
if (irqd_affinity_is_managed(&desc->irq_data)) {
ret = -EBUSY;
goto out_unlock;
}
/*
* Deactivate the interrupt. That's required to undo
* anything an earlier activation has established.
*/
activated = irqd_is_activated(&desc->irq_data);
if (activated)
irq_domain_deactivate_irq(&desc->irq_data);
if (affinity->is_managed) {
irqd_set(&desc->irq_data, IRQD_AFFINITY_MANAGED);
irqd_set(&desc->irq_data, IRQD_MANAGED_SHUTDOWN);
}
cpumask_copy(desc->irq_common_data.affinity, &affinity->mask);
/* Restore the activation state */
if (activated)
irq_domain_activate_irq(&desc->irq_data, false);
out_unlock:
irq_put_desc_busunlock(desc, flags);
return ret;
} }
static int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, static int __irq_set_affinity(unsigned int irq, const struct cpumask *mask,