Release 4.15 kernel/irq/migration.c
// SPDX-License-Identifier: GPL-2.0
#include <linux/irq.h>
#include <linux/interrupt.h>
#include "internals.h"
/**
* irq_fixup_move_pending - Cleanup irq move pending from a dying CPU
* @desc: Interrupt descpriptor to clean up
* @force_clear: If set clear the move pending bit unconditionally.
* If not set, clear it only when the dying CPU is the
* last one in the pending mask.
*
* Returns true if the pending bit was set and the pending mask contains an
* online CPU other than the dying CPU.
*/
bool irq_fixup_move_pending(struct irq_desc *desc, bool force_clear)
{
struct irq_data *data = irq_desc_get_irq_data(desc);
if (!irqd_is_setaffinity_pending(data))
return false;
/*
* The outgoing CPU might be the last online target in a pending
* interrupt move. If that's the case clear the pending move bit.
*/
if (cpumask_any_and(desc->pending_mask, cpu_online_mask) >= nr_cpu_ids) {
irqd_clr_move_pending(data);
return false;
}
if (force_clear)
irqd_clr_move_pending(data);
return true;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 70 | 100.00% | 1 | 100.00% |
Total | 70 | 100.00% | 1 | 100.00% |
void irq_move_masked_irq(struct irq_data *idata)
{
struct irq_desc *desc = irq_data_to_desc(idata);
struct irq_chip *chip = desc->irq_data.chip;
if (likely(!irqd_is_setaffinity_pending(&desc->irq_data)))
return;
irqd_clr_move_pending(&desc->irq_data);
/*
* Paranoia: cpu-local interrupts shouldn't be calling in here anyway.
*/
if (irqd_is_per_cpu(&desc->irq_data)) {
WARN_ON(1);
return;
}
if (unlikely(cpumask_empty(desc->pending_mask)))
return;
if (!chip->irq_set_affinity)
return;
assert_raw_spin_locked(&desc->lock);
/*
* If there was a valid mask to work with, please
* do the disable, re-program, enable sequence.
* This is *not* particularly important for level triggered
* but in a edge trigger case, we might be setting rte
* when an active trigger is coming in. This could
* cause some ioapics to mal-function.
* Being paranoid i guess!
*
* For correct operation this depends on the caller
* masking the irqs.
*/
if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids)
irq_do_set_affinity(&desc->irq_data, desc->pending_mask, false);
cpumask_clear(desc->pending_mask);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 41 | 29.93% | 6 | 31.58% |
Andrew Morton | 37 | 27.01% | 1 | 5.26% |
Bryan Holty | 21 | 15.33% | 1 | 5.26% |
Yinghai Lu | 12 | 8.76% | 2 | 10.53% |
Rusty Russell | 11 | 8.03% | 1 | 5.26% |
Eric W. Biedermann | 5 | 3.65% | 1 | 5.26% |
Jiang Liu | 4 | 2.92% | 2 | 10.53% |
Ingo Molnar | 4 | 2.92% | 3 | 15.79% |
Lucas De Marchi | 1 | 0.73% | 1 | 5.26% |
Daniel Walker | 1 | 0.73% | 1 | 5.26% |
Total | 137 | 100.00% | 19 | 100.00% |
void irq_move_irq(struct irq_data *idata)
{
bool masked;
/*
* Get top level irq_data when CONFIG_IRQ_DOMAIN_HIERARCHY is enabled,
* and it should be optimized away when CONFIG_IRQ_DOMAIN_HIERARCHY is
* disabled. So we avoid an "#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY" here.
*/
idata = irq_desc_get_irq_data(irq_data_to_desc(idata));
if (likely(!irqd_is_setaffinity_pending(idata)))
return;
if (unlikely(irqd_irq_disabled(idata)))
return;
/*
* Be careful vs. already masked interrupts. If this is a
* threaded interrupt with ONESHOT set, we can end up with an
* interrupt storm.
*/
masked = irqd_irq_masked(idata);
if (!masked)
idata->chip->irq_mask(idata);
irq_move_masked_irq(idata);
if (!masked)
idata->chip->irq_unmask(idata);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 49 | 55.68% | 6 | 50.00% |
Jiang Liu | 11 | 12.50% | 1 | 8.33% |
Andrew Morton | 11 | 12.50% | 1 | 8.33% |
Eric W. Biedermann | 10 | 11.36% | 2 | 16.67% |
Bryan Holty | 5 | 5.68% | 1 | 8.33% |
Ingo Molnar | 2 | 2.27% | 1 | 8.33% |
Total | 88 | 100.00% | 12 | 100.00% |
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 161 | 52.61% | 11 | 42.31% |
Andrew Morton | 51 | 16.67% | 1 | 3.85% |
Bryan Holty | 26 | 8.50% | 1 | 3.85% |
Yinghai Lu | 18 | 5.88% | 2 | 7.69% |
Eric W. Biedermann | 15 | 4.90% | 2 | 7.69% |
Jiang Liu | 15 | 4.90% | 2 | 7.69% |
Rusty Russell | 11 | 3.59% | 1 | 3.85% |
Ingo Molnar | 6 | 1.96% | 3 | 11.54% |
Daniel Walker | 1 | 0.33% | 1 | 3.85% |
Greg Kroah-Hartman | 1 | 0.33% | 1 | 3.85% |
Lucas De Marchi | 1 | 0.33% | 1 | 3.85% |
Total | 306 | 100.00% | 26 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.