Release 4.18 kernel/irq/migration.c
// SPDX-License-Identifier: GPL-2.0
#include <linux/irq.h>
#include <linux/interrupt.h>
#include "internals.h"
/**
* irq_fixup_move_pending - Cleanup irq move pending from a dying CPU
* @desc: Interrupt descpriptor to clean up
* @force_clear: If set clear the move pending bit unconditionally.
* If not set, clear it only when the dying CPU is the
* last one in the pending mask.
*
* Returns true if the pending bit was set and the pending mask contains an
* online CPU other than the dying CPU.
*/
bool irq_fixup_move_pending(struct irq_desc *desc, bool force_clear)
{
struct irq_data *data = irq_desc_get_irq_data(desc);
if (!irqd_is_setaffinity_pending(data))
return false;
/*
* The outgoing CPU might be the last online target in a pending
* interrupt move. If that's the case clear the pending move bit.
*/
if (cpumask_any_and(desc->pending_mask, cpu_online_mask) >= nr_cpu_ids) {
irqd_clr_move_pending(data);
return false;
}
if (force_clear)
irqd_clr_move_pending(data);
return true;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| Thomas Gleixner | 70 | 100.00% | 1 | 100.00% |
| Total | 70 | 100.00% | 1 | 100.00% |
void irq_move_masked_irq(struct irq_data *idata)
{
struct irq_desc *desc = irq_data_to_desc(idata);
struct irq_data *data = &desc->irq_data;
struct irq_chip *chip = data->chip;
if (likely(!irqd_is_setaffinity_pending(data)))
return;
irqd_clr_move_pending(data);
/*
* Paranoia: cpu-local interrupts shouldn't be calling in here anyway.
*/
if (irqd_is_per_cpu(data)) {
WARN_ON(1);
return;
}
if (unlikely(cpumask_empty(desc->pending_mask)))
return;
if (!chip->irq_set_affinity)
return;
assert_raw_spin_locked(&desc->lock);
/*
* If there was a valid mask to work with, please
* do the disable, re-program, enable sequence.
* This is *not* particularly important for level triggered
* but in a edge trigger case, we might be setting rte
* when an active trigger is coming in. This could
* cause some ioapics to mal-function.
* Being paranoid i guess!
*
* For correct operation this depends on the caller
* masking the irqs.
*/
if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids) {
int ret;
ret = irq_do_set_affinity(data, desc->pending_mask, false);
/*
* If the there is a cleanup pending in the underlying
* vector management, reschedule the move for the next
* interrupt. Leave desc->pending_mask intact.
*/
if (ret == -EBUSY) {
irqd_set_move_pending(data);
return;
}
}
cpumask_clear(desc->pending_mask);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| Thomas Gleixner | 65 | 41.67% | 7 | 35.00% |
| Andrew Morton | 35 | 22.44% | 1 | 5.00% |
| Bryan Holty | 19 | 12.18% | 1 | 5.00% |
| Yinghai Lu | 12 | 7.69% | 2 | 10.00% |
| Rusty Russell | 11 | 7.05% | 1 | 5.00% |
| Eric W. Biedermann | 5 | 3.21% | 1 | 5.00% |
| Ingo Molnar | 4 | 2.56% | 3 | 15.00% |
| Jiang Liu | 3 | 1.92% | 2 | 10.00% |
| Lucas De Marchi | 1 | 0.64% | 1 | 5.00% |
| Daniel Walker | 1 | 0.64% | 1 | 5.00% |
| Total | 156 | 100.00% | 20 | 100.00% |
void __irq_move_irq(struct irq_data *idata)
{
bool masked;
/*
* Get top level irq_data when CONFIG_IRQ_DOMAIN_HIERARCHY is enabled,
* and it should be optimized away when CONFIG_IRQ_DOMAIN_HIERARCHY is
* disabled. So we avoid an "#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY" here.
*/
idata = irq_desc_get_irq_data(irq_data_to_desc(idata));
if (unlikely(irqd_irq_disabled(idata)))
return;
/*
* Be careful vs. already masked interrupts. If this is a
* threaded interrupt with ONESHOT set, we can end up with an
* interrupt storm.
*/
masked = irqd_irq_masked(idata);
if (!masked)
idata->chip->irq_mask(idata);
irq_move_masked_irq(idata);
if (!masked)
idata->chip->irq_unmask(idata);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| Thomas Gleixner | 45 | 59.21% | 6 | 50.00% |
| Jiang Liu | 11 | 14.47% | 1 | 8.33% |
| Andrew Morton | 11 | 14.47% | 1 | 8.33% |
| Eric W. Biedermann | 4 | 5.26% | 2 | 16.67% |
| Bryan Holty | 3 | 3.95% | 1 | 8.33% |
| Ingo Molnar | 2 | 2.63% | 1 | 8.33% |
| Total | 76 | 100.00% | 12 | 100.00% |
Overall Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| Thomas Gleixner | 181 | 57.83% | 13 | 46.43% |
| Andrew Morton | 49 | 15.65% | 1 | 3.57% |
| Bryan Holty | 22 | 7.03% | 1 | 3.57% |
| Yinghai Lu | 18 | 5.75% | 2 | 7.14% |
| Jiang Liu | 14 | 4.47% | 2 | 7.14% |
| Rusty Russell | 11 | 3.51% | 1 | 3.57% |
| Eric W. Biedermann | 9 | 2.88% | 2 | 7.14% |
| Ingo Molnar | 6 | 1.92% | 3 | 10.71% |
| Lucas De Marchi | 1 | 0.32% | 1 | 3.57% |
| Greg Kroah-Hartman | 1 | 0.32% | 1 | 3.57% |
| Daniel Walker | 1 | 0.32% | 1 | 3.57% |
| Total | 313 | 100.00% | 28 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.