genirq: Move irq_wait_for_poll() to call site

Move it to the call site so that the waiting for the INPROGRESS flag can be
reused by an upcoming mitigation for a potential live lock in the edge type
handler.

No functional change.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Tested-by: Liangyan <liangyan.peng@bytedance.com>
Reviewed-by: Jiri Slaby <jirislaby@kernel.org>
Link: https://lore.kernel.org/all/20250718185311.948555026@linutronix.de
pull/1309/head
Thomas Gleixner 2025-07-18 20:54:08 +02:00
parent 46958a7bac
commit 4e879dedd5
3 changed files with 25 additions and 45 deletions

View File

@ -457,11 +457,21 @@ void unmask_threaded_irq(struct irq_desc *desc)
unmask_irq(desc);
}
static bool irq_check_poll(struct irq_desc *desc)
/* Busy wait until INPROGRESS is cleared */
static bool irq_wait_on_inprogress(struct irq_desc *desc)
{
if (!(desc->istate & IRQS_POLL_INPROGRESS))
if (IS_ENABLED(CONFIG_SMP)) {
do {
raw_spin_unlock(&desc->lock);
while (irqd_irq_inprogress(&desc->irq_data))
cpu_relax();
raw_spin_lock(&desc->lock);
} while (irqd_irq_inprogress(&desc->irq_data));
/* Might have been disabled in meantime */
return !irqd_irq_disabled(&desc->irq_data) && desc->action;
}
return false;
return irq_wait_for_poll(desc);
}
static bool irq_can_handle_pm(struct irq_desc *desc)
@ -481,10 +491,15 @@ static bool irq_can_handle_pm(struct irq_desc *desc)
if (irq_pm_check_wakeup(desc))
return false;
/*
* Handle a potential concurrent poll on a different core.
*/
return irq_check_poll(desc);
/* Check whether the interrupt is polled on another CPU */
if (unlikely(desc->istate & IRQS_POLL_INPROGRESS)) {
if (WARN_ONCE(irq_poll_cpu == smp_processor_id(),
"irq poll in progress on cpu %d for irq %d\n",
smp_processor_id(), desc->irq_data.irq))
return false;
return irq_wait_on_inprogress(desc);
}
return false;
}
static inline bool irq_can_handle_actions(struct irq_desc *desc)

View File

@ -20,6 +20,7 @@
#define istate core_internal_state__do_not_mess_with_it
extern bool noirqdebug;
extern int irq_poll_cpu;
extern struct irqaction chained_action;
@ -112,7 +113,6 @@ irqreturn_t handle_irq_event(struct irq_desc *desc);
int check_irq_resend(struct irq_desc *desc, bool inject);
void clear_irq_resend(struct irq_desc *desc);
void irq_resend_init(struct irq_desc *desc);
bool irq_wait_for_poll(struct irq_desc *desc);
void __irq_wake_thread(struct irq_desc *desc, struct irqaction *action);
void wake_threads_waitq(struct irq_desc *desc);

View File

@ -19,44 +19,9 @@ static int irqfixup __read_mostly;
#define POLL_SPURIOUS_IRQ_INTERVAL (HZ/10)
static void poll_spurious_irqs(struct timer_list *unused);
static DEFINE_TIMER(poll_spurious_irq_timer, poll_spurious_irqs);
static int irq_poll_cpu;
int irq_poll_cpu;
static atomic_t irq_poll_active;
/*
* We wait here for a poller to finish.
*
* If the poll runs on this CPU, then we yell loudly and return
* false. That will leave the interrupt line disabled in the worst
* case, but it should never happen.
*
* We wait until the poller is done and then recheck disabled and
* action (about to be disabled). Only if it's still active, we return
* true and let the handler run.
*/
bool irq_wait_for_poll(struct irq_desc *desc)
{
lockdep_assert_held(&desc->lock);
if (WARN_ONCE(irq_poll_cpu == smp_processor_id(),
"irq poll in progress on cpu %d for irq %d\n",
smp_processor_id(), desc->irq_data.irq))
return false;
#ifdef CONFIG_SMP
do {
raw_spin_unlock(&desc->lock);
while (irqd_irq_inprogress(&desc->irq_data))
cpu_relax();
raw_spin_lock(&desc->lock);
} while (irqd_irq_inprogress(&desc->irq_data));
/* Might have been disabled in meantime */
return !irqd_irq_disabled(&desc->irq_data) && desc->action;
#else
return false;
#endif
}
/*
* Recovery handler for misrouted interrupts.
*/