Release 4.15 kernel/irq/manage.c
/*
* linux/kernel/irq/manage.c
*
* Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
* Copyright (C) 2005-2006 Thomas Gleixner
*
* This file contains driver APIs to the irq subsystem.
*/
#define pr_fmt(fmt) "genirq: " fmt
#include <linux/irq.h>
#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/random.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/sched/rt.h>
#include <linux/sched/task.h>
#include <uapi/linux/sched/types.h>
#include <linux/task_work.h>
#include "internals.h"
#ifdef CONFIG_IRQ_FORCED_THREADING
__read_mostly bool force_irqthreads;
static int __init setup_forced_irqthreads(char *arg)
{
force_irqthreads = true;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 18 | 100.00% | 1 | 100.00% |
Total | 18 | 100.00% | 1 | 100.00% |
early_param("threadirqs", setup_forced_irqthreads);
#endif
static void __synchronize_hardirq(struct irq_desc *desc)
{
bool inprogress;
do {
unsigned long flags;
/*
* Wait until we're out of the critical section. This might
* give the wrong answer due to the lack of memory barriers.
*/
while (irqd_irq_inprogress(&desc->irq_data))
cpu_relax();
/* Ok, that indicated we're done: double-check carefully. */
raw_spin_lock_irqsave(&desc->lock, flags);
inprogress = irqd_irq_inprogress(&desc->irq_data);
raw_spin_unlock_irqrestore(&desc->lock, flags);
/* Oops, that failed? */
} while (inprogress);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Herbert Xu | 37 | 51.39% | 1 | 20.00% |
Thomas Gleixner | 21 | 29.17% | 3 | 60.00% |
Ingo Molnar | 14 | 19.44% | 1 | 20.00% |
Total | 72 | 100.00% | 5 | 100.00% |
/**
* synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs)
* @irq: interrupt number to wait for
*
* This function waits for any pending hard IRQ handlers for this
* interrupt to complete before returning. If you use this
* function while holding a resource the IRQ handler may need you
* will deadlock. It does not take associated threaded handlers
* into account.
*
* Do not use this for shutdown scenarios where you must be sure
* that all parts (hardirq and threaded handler) have completed.
*
* Returns: false if a threaded handler is active.
*
* This function may be called - with care - from IRQ context.
*/
bool synchronize_hardirq(unsigned int irq)
{
struct irq_desc *desc = irq_to_desc(irq);
if (desc) {
__synchronize_hardirq(desc);
return !atomic_read(&desc->threads_active);
}
return true;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 27 | 62.79% | 1 | 50.00% |
Peter Zijlstra | 16 | 37.21% | 1 | 50.00% |
Total | 43 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL(synchronize_hardirq);
/**
* synchronize_irq - wait for pending IRQ handlers (on other CPUs)
* @irq: interrupt number to wait for
*
* This function waits for any pending IRQ handlers for this interrupt
* to complete before returning. If you use this function while
* holding a resource the IRQ handler may need you will deadlock.
*
* This function may be called - with care - from IRQ context.
*/
void synchronize_irq(unsigned int irq)
{
struct irq_desc *desc = irq_to_desc(irq);
if (desc) {
__synchronize_hardirq(desc);
/*
* We made sure that no hardirq handler is
* running. Now verify that no threaded handlers are
* active.
*/
wait_event(desc->wait_for_threads,
!atomic_read(&desc->threads_active));
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 46 | 97.87% | 2 | 66.67% |
Ingo Molnar | 1 | 2.13% | 1 | 33.33% |
Total | 47 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL(synchronize_irq);
#ifdef CONFIG_SMP
cpumask_var_t irq_default_affinity;
static bool __irq_can_set_affinity(struct irq_desc *desc)
{
if (!desc || !irqd_can_balance(&desc->irq_data) ||
!desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
return false;
return true;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 43 | 91.49% | 6 | 85.71% |
Jiang Liu | 4 | 8.51% | 1 | 14.29% |
Total | 47 | 100.00% | 7 | 100.00% |
/**
* irq_can_set_affinity - Check if the affinity of a given irq can be set
* @irq: Interrupt to check
*
*/
int irq_can_set_affinity(unsigned int irq)
{
return __irq_can_set_affinity(irq_to_desc(irq));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiang Liu | 18 | 100.00% | 1 | 100.00% |
Total | 18 | 100.00% | 1 | 100.00% |
/**
* irq_can_set_affinity_usr - Check if affinity of a irq can be set from user space
* @irq: Interrupt to check
*
* Like irq_can_set_affinity() above, but additionally checks for the
* AFFINITY_MANAGED flag.
*/
bool irq_can_set_affinity_usr(unsigned int irq)
{
struct irq_desc *desc = irq_to_desc(irq);
return __irq_can_set_affinity(desc) &&
!irqd_affinity_is_managed(&desc->irq_data);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 34 | 100.00% | 1 | 100.00% |
Total | 34 | 100.00% | 1 | 100.00% |
/**
* irq_set_thread_affinity - Notify irq threads to adjust affinity
* @desc: irq descriptor which has affitnity changed
*
* We just set IRQTF_AFFINITY and delegate the affinity setting
* to the interrupt thread itself. We can not call
* set_cpus_allowed_ptr() here as we hold desc->lock and this
* code can be called from hard interrupt context.
*/
void irq_set_thread_affinity(struct irq_desc *desc)
{
struct irqaction *action;
for_each_action_of_desc(desc, action)
if (action->thread)
set_bit(IRQTF_AFFINITY, &action->thread_flags);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 31 | 83.78% | 2 | 66.67% |
Daniel Lezcano | 6 | 16.22% | 1 | 33.33% |
Total | 37 | 100.00% | 3 | 100.00% |
static void irq_validate_effective_affinity(struct irq_data *data)
{
#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
const struct cpumask *m = irq_data_get_effective_affinity_mask(data);
struct irq_chip *chip = irq_data_get_irq_chip(data);
if (!cpumask_empty(m))
return;
pr_warn_once("irq_chip %s did not update eff. affinity mask of irq %u\n",
chip->name, data->irq);
#endif
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 59 | 100.00% | 1 | 100.00% |
Total | 59 | 100.00% | 1 | 100.00% |
int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
bool force)
{
struct irq_desc *desc = irq_data_to_desc(data);
struct irq_chip *chip = irq_data_get_irq_chip(data);
int ret;
if (!chip || !chip->irq_set_affinity)
return -EINVAL;
ret = chip->irq_set_affinity(data, mask, force);
switch (ret) {
case IRQ_SET_MASK_OK:
case IRQ_SET_MASK_OK_DONE:
cpumask_copy(desc->irq_common_data.affinity, mask);
case IRQ_SET_MASK_OK_NOCOPY:
irq_validate_effective_affinity(data);
irq_set_thread_affinity(desc);
ret = 0;
}
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 56 | 50.00% | 8 | 50.00% |
Jiang Liu | 20 | 17.86% | 3 | 18.75% |
Yinghai Lu | 16 | 14.29% | 2 | 12.50% |
David Daney | 10 | 8.93% | 1 | 6.25% |
Suresh B. Siddha | 6 | 5.36% | 1 | 6.25% |
Rusty Russell | 4 | 3.57% | 1 | 6.25% |
Total | 112 | 100.00% | 16 | 100.00% |
int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
bool force)
{
struct irq_chip *chip = irq_data_get_irq_chip(data);
struct irq_desc *desc = irq_data_to_desc(data);
int ret = 0;
if (!chip || !chip->irq_set_affinity)
return -EINVAL;
if (irq_can_move_pcntxt(data)) {
ret = irq_do_set_affinity(data, mask, force);
} else {
irqd_set_move_pending(data);
irq_copy_pending(desc, mask);
}
if (desc->affinity_notify) {
kref_get(&desc->affinity_notify->kref);
schedule_work(&desc->affinity_notify->work);
}
irqd_set(data, IRQD_AFFINITY_SET);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiang Liu | 71 | 54.20% | 1 | 8.33% |
Ben Hutchings | 28 | 21.37% | 1 | 8.33% |
Thomas Gleixner | 17 | 12.98% | 6 | 50.00% |
David Daney | 10 | 7.63% | 1 | 8.33% |
Yinghai Lu | 2 | 1.53% | 1 | 8.33% |
Rusty Russell | 2 | 1.53% | 1 | 8.33% |
Suresh B. Siddha | 1 | 0.76% | 1 | 8.33% |
Total | 131 | 100.00% | 12 | 100.00% |
int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)
{
struct irq_desc *desc = irq_to_desc(irq);
unsigned long flags;
int ret;
if (!desc)
return -EINVAL;
raw_spin_lock_irqsave(&desc->lock, flags);
ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
raw_spin_unlock_irqrestore(&desc->lock, flags);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Daney | 54 | 66.67% | 1 | 14.29% |
Thomas Gleixner | 27 | 33.33% | 6 | 85.71% |
Total | 81 | 100.00% | 7 | 100.00% |
int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
{
unsigned long flags;
struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
if (!desc)
return -EINVAL;
desc->affinity_hint = m;
irq_put_desc_unlock(desc, flags);
/* set the initial affinity to prevent every interrupt being on CPU0 */
if (m)
__irq_set_affinity(irq, m, false);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter P. Waskiewicz Jr | 48 | 65.75% | 1 | 20.00% |
Jesse Brandeburg | 14 | 19.18% | 2 | 40.00% |
Thomas Gleixner | 9 | 12.33% | 1 | 20.00% |
Marc Zyngier | 2 | 2.74% | 1 | 20.00% |
Total | 73 | 100.00% | 5 | 100.00% |
EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
static void irq_affinity_notify(struct work_struct *work)
{
struct irq_affinity_notify *notify =
container_of(work, struct irq_affinity_notify, work);
struct irq_desc *desc = irq_to_desc(notify->irq);
cpumask_var_t cpumask;
unsigned long flags;
if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
goto out;
raw_spin_lock_irqsave(&desc->lock, flags);
if (irq_move_pending(&desc->irq_data))
irq_get_pending(cpumask, desc);
else
cpumask_copy(cpumask, desc->irq_common_data.affinity);
raw_spin_unlock_irqrestore(&desc->lock, flags);
notify->notify(notify, cpumask);
free_cpumask_var(cpumask);
out:
kref_put(¬ify->kref, notify->release);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ben Hutchings | 129 | 92.81% | 1 | 20.00% |
Thomas Gleixner | 9 | 6.47% | 3 | 60.00% |
Jiang Liu | 1 | 0.72% | 1 | 20.00% |
Total | 139 | 100.00% | 5 | 100.00% |
/**
* irq_set_affinity_notifier - control notification of IRQ affinity changes
* @irq: Interrupt for which to enable/disable notification
* @notify: Context for notification, or %NULL to disable
* notification. Function pointers must be initialised;
* the other fields will be initialised by this function.
*
* Must be called in process context. Notification may only be enabled
* after the IRQ is allocated and must be disabled before the IRQ is
* freed using free_irq().
*/
int
irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
{
struct irq_desc *desc = irq_to_desc(irq);
struct irq_affinity_notify *old_notify;
unsigned long flags;
/* The release function is promised process context */
might_sleep();
if (!desc)
return -EINVAL;
/* Complete initialisation of *notify */
if (notify) {
notify->irq = irq;
kref_init(¬ify->kref);
INIT_WORK(¬ify->work, irq_affinity_notify);
}
raw_spin_lock_irqsave(&desc->lock, flags);
old_notify = desc->affinity_notify;
desc->affinity_notify = notify;
raw_spin_unlock_irqrestore(&desc->lock, flags);
if (old_notify)
kref_put(&old_notify->kref, old_notify->release);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ben Hutchings | 128 | 100.00% | 1 | 100.00% |
Total | 128 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
#ifndef CONFIG_AUTO_IRQ_AFFINITY
/*
* Generic version of the affinity autoselector.
*/
int irq_setup_affinity(struct irq_desc *desc)
{
struct cpumask *set = irq_default_affinity;
int ret, node = irq_desc_get_node(desc);
static DEFINE_RAW_SPINLOCK(mask_lock);
static struct cpumask mask;
/* Excludes PER_CPU and NO_BALANCE interrupts */
if (!__irq_can_set_affinity(desc))
return 0;
raw_spin_lock(&mask_lock);
/*
* Preserve the managed affinity setting and a userspace affinity
* setup, but make sure that one of the targets is online.
*/
if (irqd_affinity_is_managed(&desc->irq_data) ||
irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
if (cpumask_intersects(desc->irq_common_data.affinity,
cpu_online_mask))
set = desc->irq_common_data.affinity;
else
irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
}
cpumask_and(&mask, cpu_online_mask, set);
if (node != NUMA_NO_NODE) {
const struct cpumask *nodemask = cpumask_of_node(node);
/* make sure at least one of the cpus in nodemask is online */
if (cpumask_intersects(&mask, nodemask))
cpumask_and(&mask, &mask, nodemask);
}
ret = irq_do_set_affinity(&desc->irq_data, &mask, false);
raw_spin_unlock(&mask_lock);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 105 | 56.45% | 9 | 50.00% |
Prarit Bhargava | 41 | 22.04% | 1 | 5.56% |
Maksim Krasnyanskiy | 17 | 9.14% | 1 | 5.56% |
Jiang Liu | 8 | 4.30% | 4 | 22.22% |
Yinghai Lu | 7 | 3.76% | 1 | 5.56% |
Rusty Russell | 7 | 3.76% | 1 | 5.56% |
Masahiro Yamada | 1 | 0.54% | 1 | 5.56% |
Total | 186 | 100.00% | 18 | 100.00% |
#else
/* Wrapper for ALPHA specific affinity selector magic */
int irq_setup_affinity(struct irq_desc *desc)
{
return irq_select_affinity(irq_desc_get_irq(desc));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 16 | 84.21% | 2 | 66.67% |
Jiang Liu | 3 | 15.79% | 1 | 33.33% |
Total | 19 | 100.00% | 3 | 100.00% |
#endif
/*
* Called when a bogus affinity is set via /proc/irq
*/
int irq_select_affinity_usr(unsigned int irq)
{
struct irq_desc *desc = irq_to_desc(irq);
unsigned long flags;
int ret;
raw_spin_lock_irqsave(&desc->lock, flags);
ret = irq_setup_affinity(desc);
raw_spin_unlock_irqrestore(&desc->lock, flags);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 56 | 100.00% | 3 | 100.00% |
Total | 56 | 100.00% | 3 | 100.00% |
#endif
/**
* irq_set_vcpu_affinity - Set vcpu affinity for the interrupt
* @irq: interrupt number to set affinity
* @vcpu_info: vCPU specific data or pointer to a percpu array of vCPU
* specific data for percpu_devid interrupts
*
* This function uses the vCPU specific data to set the vCPU
* affinity for an irq. The vCPU specific data is passed from
* outside, such as KVM. One example code path is as below:
* KVM -> IOMMU -> irq_set_vcpu_affinity().
*/
int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
{
unsigned long flags;
struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
struct irq_data *data;
struct irq_chip *chip;
int ret = -ENOSYS;
if (!desc)
return -EINVAL;
data = irq_desc_get_irq_data(desc);
do {
chip = irq_data_get_irq_chip(data);
if (chip && chip->irq_set_vcpu_affinity)
break;
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
data = data->parent_data;
#else
data = NULL;
#endif
} while (data);
if (data)
ret = chip->irq_set_vcpu_affinity(data, vcpu_info);
irq_put_desc_unlock(desc, flags);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Feng Wu | 100 | 76.92% | 1 | 50.00% |
Marc Zyngier | 30 | 23.08% | 1 | 50.00% |
Total | 130 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity);
void __disable_irq(struct irq_desc *desc)
{
if (!desc->depth++)
irq_disable(desc);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rafael J. Wysocki | 21 | 91.30% | 1 | 50.00% |
Thomas Gleixner | 2 | 8.70% | 1 | 50.00% |
Total | 23 | 100.00% | 2 | 100.00% |
static int __disable_irq_nosync(unsigned int irq)
{
unsigned long flags;
struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
if (!desc)
return -EINVAL;
__disable_irq(desc);
irq_put_desc_busunlock(desc, flags);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 23 | 43.40% | 2 | 25.00% |
Ingo Molnar | 21 | 39.62% | 2 | 25.00% |
Matthew Wilcox | 3 | 5.66% | 1 | 12.50% |
Rafael J. Wysocki | 2 | 3.77% | 1 | 12.50% |
Yinghai Lu | 2 | 3.77% | 1 | 12.50% |
Marc Zyngier | 2 | 3.77% | 1 | 12.50% |
Total | 53 | 100.00% | 8 | 100.00% |
/**
* disable_irq_nosync - disable an irq without waiting
* @irq: Interrupt to disable
*
* Disable the selected interrupt line. Disables and Enables are
* nested.
* Unlike disable_irq(), this function does not ensure existing
* instances of the IRQ handler have completed before returning.
*
* This function may be called from IRQ context.
*/
void disable_irq_nosync(unsigned int irq)
{
__disable_irq_nosync(irq);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 13 | 92.86% | 2 | 66.67% |
Ingo Molnar | 1 | 7.14% | 1 | 33.33% |
Total | 14 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL(disable_irq_nosync);
/**
* disable_irq - disable an irq and wait for completion
* @irq: Interrupt to disable
*
* Disable the selected interrupt line. Enables and Disables are
* nested.
* This function waits for any pending IRQ handlers for this interrupt
* to complete before returning. If you use this function while
* holding a resource the IRQ handler may need you will deadlock.
*
* This function may be called - with care - from IRQ context.
*/
void disable_irq(unsigned int irq)
{
if (!__disable_irq_nosync(irq))
synchronize_irq(irq);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ingo Molnar | 18 | 81.82% | 1 | 25.00% |
Matthew Wilcox | 2 | 9.09% | 1 | 25.00% |
Thomas Gleixner | 1 | 4.55% | 1 | 25.00% |
Yinghai Lu | 1 | 4.55% | 1 | 25.00% |
Total | 22 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL(disable_irq);
/**
* disable_hardirq - disables an irq and waits for hardirq completion
* @irq: Interrupt to disable
*
* Disable the selected interrupt line. Enables and Disables are
* nested.
* This function waits for any pending hard IRQ handlers for this
* interrupt to complete before returning. If you use this function while
* holding a resource the hard IRQ handler may need you will deadlock.
*
* When used to optimistically disable an interrupt from atomic context
* the return value must be checked.
*
* Returns: false if a threaded handler is active.
*
* This function may be called - with care - from IRQ context.
*/
bool disable_hardirq(unsigned int irq)
{
if (!__disable_irq_nosync(irq))
return synchronize_hardirq(irq);
return false;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 26 | 100.00% | 1 | 100.00% |
Total | 26 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL_GPL(disable_hardirq);
void __enable_irq(struct irq_desc *desc)
{
switch (desc->depth) {
case 0:
err_out:
WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n",
irq_desc_get_irq(desc));
break;
case 1: {
if (desc->istate & IRQS_SUSPENDED)
goto err_out;
/* Prevent probing on this irq: */
irq_settings_set_noprobe(desc);
/*
* Call irq_startup() not irq_enable() here because the
* interrupt might be marked NOAUTOEN. So irq_startup()
* needs to be invoked when it gets enabled the first
* time. If it was already started up, then irq_startup()
* will invoke irq_enable() under the hood.
*/
irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE);
break;
}
default:
desc->depth--;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ingo Molnar | 33 | 43.42% | 3 | 20.00% |
Thomas Gleixner | 24 | 31.58% | 8 | 53.33% |
Rafael J. Wysocki | 11 | 14.47% | 1 | 6.67% |
Jiang Liu | 4 | 5.26% | 1 | 6.67% |
Arjan van de Ven | 3 | 3.95% | 1 | 6.67% |
Björn Helgaas | 1 | 1.32% | 1 | 6.67% |
Total | 76 | 100.00% | 15 | 100.00% |
/**
* enable_irq - enable handling of an irq
* @irq: Interrupt to enable
*
* Undoes the effect of one call to disable_irq(). If this
* matches the last disable, processing of interrupts on this
* IRQ line is re-enabled.
*
* This function may be called from IRQ context only when
* desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
*/
void enable_irq(unsigned int irq)
{
unsigned long flags;
struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
if (!desc)
return;
if (WARN(!desc->irq_data.chip,
KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
goto out;
__enable_irq(desc);
out:
irq_put_desc_busunlock(desc, flags);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 57 | 83.82% | 4 | 57.14% |
Ingo Molnar | 7 | 10.29% | 1 | 14.29% |
Marc Zyngier | 2 | 2.94% | 1 | 14.29% |
Yinghai Lu | 2 | 2.94% | 1 | 14.29% |
Total | 68 | 100.00% | 7 | 100.00% |
EXPORT_SYMBOL(enable_irq);
static int set_irq_wake_real(unsigned int irq, unsigned int on)
{
struct irq_desc *desc = irq_to_desc(irq);
int ret = -ENXIO;
if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE)
return 0;
if (desc->irq_data.chip->irq_set_wake)
ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Uwe Kleine-König | 47 | 62.67% | 1 | 16.67% |
Santosh Shilimkar | 14 | 18.67% | 1 | 16.67% |
Thomas Gleixner | 10 | 13.33% | 2 | 33.33% |
Yinghai Lu | 3 | 4.00% | 1 | 16.67% |
David Brownell | 1 | 1.33% | 1 | 16.67% |
Total | 75 | 100.00% | 6 | 100.00% |
/**
* irq_set_irq_wake - control irq power management wakeup
* @irq: interrupt to control
* @on: enable/disable power management wakeup
*
* Enable/disable power management wakeup mode, which is
* disabled by default. Enables and disables must match,
* just as they match for non-wakeup mode support.
*
* Wakeup mode lets this IRQ wake the system from sleep
* states like "suspend to RAM".
*/
int irq_set_irq_wake(unsigned int irq, unsigned int on)
{
unsigned long flags;
struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
int ret = 0;
if (!desc)
return -EINVAL;
/* wakeup-capable irqs can be shared between drivers that
* don't need to have the same sleep mode behaviors.
*/
if (on) {
if (desc->wake_depth++ == 0) {
ret = set_irq_wake_real(irq, on);
if (ret)
desc->wake_depth = 0;
else
irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
}
} else {
if (desc->wake_depth == 0) {
WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
} else if (--desc->wake_depth == 0) {
ret = set_irq_wake_real(irq, on);
if (ret)
desc->wake_depth = 1;
else
irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
}
}
irq_put_desc_busunlock(desc, flags);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 67 | 39.88% | 4 | 40.00% |
David Brownell | 45 | 26.79% | 1 | 10.00% |
Uwe Kleine-König | 40 | 23.81% | 1 | 10.00% |
Jesper Juhl | 9 | 5.36% | 1 | 10.00% |
Arjan van de Ven | 4 | 2.38% | 1 | 10.00% |
Marc Zyngier | 2 | 1.19% | 1 | 10.00% |
Yinghai Lu | 1 | 0.60% | 1 | 10.00% |
Total | 168 | 100.00% | 10 | 100.00% |
EXPORT_SYMBOL(irq_set_irq_wake);
/*
* Internal function that tells the architecture code whether a
* particular irq has been exclusively allocated or is available
* for driver use.
*/
int can_request_irq(unsigned int irq, unsigned long irqflags)
{
unsigned long flags;
struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
int canrequest = 0;
if (!desc)
return 0;
if (irq_settings_can_request(desc)) {
if (!desc->action ||
irqflags & desc->action->flags & IRQF_SHARED)
canrequest = 1;
}
irq_put_desc_unlock(desc, flags);
return canrequest;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 37 | 43.53% | 5 | 50.00% |
Ingo Molnar | 31 | 36.47% | 1 | 10.00% |
Yinghai Lu | 13 | 15.29% | 2 | 20.00% |
Ben Hutchings | 2 | 2.35% | 1 | 10.00% |
Marc Zyngier | 2 | 2.35% | 1 | 10.00% |
Total | 85 | 100.00% | 10 | 100.00% |
int __irq_set_trigger(struct irq_desc *desc, unsigned long flags)
{
struct irq_chip *chip = desc->irq_data.chip;
int ret, unmask = 0;
if (!chip || !chip->irq_set_type) {
/*
* IRQF_TRIGGER_* but the PIC does not support multiple
* flow-types?
*/
pr_debug("No set_type function for IRQ %d (%s)\n",
irq_desc_get_irq(desc),
chip ? (chip->name ? : "unknown") : "unknown");
return 0;
}
if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
if (!irqd_irq_masked(&desc->irq_data))
mask_irq(desc);
if (!irqd_irq_disabled(&desc->irq_data))
unmask = 1;
}
/* Mask all flags except trigger mode */
flags &= IRQ_TYPE_SENSE_MASK;
ret = chip->irq_set_type(&desc->irq_data, flags);
switch (ret) {
case IRQ_SET_MASK_OK:
case IRQ_SET_MASK_OK_DONE:
irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
irqd_set(&desc->irq_data, flags);
case IRQ_SET_MASK_OK_NOCOPY:
flags = irqd_get_trigger_type(&desc->irq_data);
irq_settings_set_trigger_mask(desc, flags);
irqd_clear(&desc->irq_data, IRQD_LEVEL);
irq_settings_clr_level(desc);
if (flags & IRQ_TYPE_LEVEL_MASK) {
irq_settings_set_level(desc);
irqd_set(&desc->irq_data, IRQD_LEVEL);
}
ret = 0;
break;
default:
pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n",
flags, irq_desc_get_irq(desc), chip->irq_set_type);
}
if (unmask)
unmask_irq(desc);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 148 | 58.04% | 6 | 40.00% |
Uwe Kleine-König | 65 | 25.49% | 1 | 6.67% |
David Brownell | 23 | 9.02% | 3 | 20.00% |
Jiang Liu | 11 | 4.31% | 2 | 13.33% |
Alexander Kuleshov | 5 | 1.96% | 1 | 6.67% |
Andrew Morton | 2 | 0.78% | 1 | 6.67% |
Mark Nelson | 1 | 0.39% | 1 | 6.67% |
Total | 255 | 100.00% | 15 | 100.00% |
#ifdef CONFIG_HARDIRQS_SW_RESEND
int irq_set_parent(int irq, int parent_irq)
{
unsigned long flags;
struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
if (!desc)
return -EINVAL;
desc->parent_irq = parent_irq;
irq_put_desc_unlock(desc, flags);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 55 | 100.00% | 1 | 100.00% |
Total | 55 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL_GPL(irq_set_parent);
#endif
/*
* Default primary interrupt handler for threaded interrupts. Is
* assigned as primary handler when request_threaded_irq is called
* with handler == NULL. Useful for oneshot interrupts.
*/
static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
{
return IRQ_WAKE_THREAD;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 16 | 100.00% | 1 | 100.00% |
Total | 16 | 100.00% | 1 | 100.00% |
/*
* Primary handler for nested threaded interrupts. Should never be
* called.
*/
static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
{
WARN(1, "Primary handler called for nested irq %d\n", irq);
return IRQ_NONE;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 25 | 100.00% | 1 | 100.00% |
Total | 25 | 100.00% | 1 | 100.00% |
static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
{
WARN(1, "Secondary action handler called for irq %d\n", irq);
return IRQ_NONE;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 25 | 100.00% | 1 | 100.00% |
Total | 25 | 100.00% | 1 | 100.00% |
static int irq_wait_for_interrupt(struct irqaction *action)
{
set_current_state(TASK_INTERRUPTIBLE);
while (!kthread_should_stop()) {
if (test_and_clear_bit(IRQTF_RUNTHREAD,
&action->thread_flags)) {
__set_current_state(TASK_RUNNING);
return 0;
}
schedule();
set_current_state(TASK_INTERRUPTIBLE);
}
__set_current_state(TASK_RUNNING);
return -1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 33 | 52.38% | 2 | 33.33% |
Ido Yariv | 15 | 23.81% | 1 | 16.67% |
Ingo Molnar | 14 | 22.22% | 2 | 33.33% |
Yinghai Lu | 1 | 1.59% | 1 | 16.67% |
Total | 63 | 100.00% | 6 | 100.00% |
/*
* Oneshot interrupts keep the irq line masked until the threaded
* handler finished. unmask if the interrupt has not been disabled and
* is marked MASKED.
*/
static void irq_finalize_oneshot(struct irq_desc *desc,
struct irqaction *action)
{
if (!(desc->istate & IRQS_ONESHOT) ||
action->handler == irq_forced_secondary_handler)
return;
again:
chip_bus_lock(desc);
raw_spin_lock_irq(&desc->lock);
/*
* Implausible though it may be we need to protect us against
* the following scenario:
*
* The thread is faster done than the hard interrupt handler
* on the other CPU. If we unmask the irq line then the
* interrupt can come in again and masks the line, leaves due
* to IRQS_INPROGRESS and the irq line is masked forever.
*
* This also serializes the state of shared oneshot handlers
* versus "desc->threads_onehsot |= action->thread_mask;" in
* irq_wake_thread(). See the comment there which explains the
* serialization.
*/
if (unlikely(irqd_irq_inprogress(&desc->irq_data)