cregit-Linux how code gets into the kernel

Release 4.15 kernel/irq/manage.c

Directory: kernel/irq
/*
 * linux/kernel/irq/manage.c
 *
 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
 * Copyright (C) 2005-2006 Thomas Gleixner
 *
 * This file contains driver APIs to the irq subsystem.
 */


#define pr_fmt(fmt) "genirq: " fmt

#include <linux/irq.h>
#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/random.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/sched/rt.h>
#include <linux/sched/task.h>
#include <uapi/linux/sched/types.h>
#include <linux/task_work.h>

#include "internals.h"

#ifdef CONFIG_IRQ_FORCED_THREADING

__read_mostly bool force_irqthreads;


static int __init setup_forced_irqthreads(char *arg) { force_irqthreads = true; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Gleixner18100.00%1100.00%
Total18100.00%1100.00%

early_param("threadirqs", setup_forced_irqthreads); #endif
static void __synchronize_hardirq(struct irq_desc *desc) { bool inprogress; do { unsigned long flags; /* * Wait until we're out of the critical section. This might * give the wrong answer due to the lack of memory barriers. */ while (irqd_irq_inprogress(&desc->irq_data)) cpu_relax(); /* Ok, that indicated we're done: double-check carefully. */ raw_spin_lock_irqsave(&desc->lock, flags); inprogress = irqd_irq_inprogress(&desc->irq_data); raw_spin_unlock_irqrestore(&desc->lock, flags); /* Oops, that failed? */ } while (inprogress); }

Contributors

PersonTokensPropCommitsCommitProp
Herbert Xu3751.39%120.00%
Thomas Gleixner2129.17%360.00%
Ingo Molnar1419.44%120.00%
Total72100.00%5100.00%

/** * synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs) * @irq: interrupt number to wait for * * This function waits for any pending hard IRQ handlers for this * interrupt to complete before returning. If you use this * function while holding a resource the IRQ handler may need you * will deadlock. It does not take associated threaded handlers * into account. * * Do not use this for shutdown scenarios where you must be sure * that all parts (hardirq and threaded handler) have completed. * * Returns: false if a threaded handler is active. * * This function may be called - with care - from IRQ context. */
bool synchronize_hardirq(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); if (desc) { __synchronize_hardirq(desc); return !atomic_read(&desc->threads_active); } return true; }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Gleixner2762.79%150.00%
Peter Zijlstra1637.21%150.00%
Total43100.00%2100.00%

EXPORT_SYMBOL(synchronize_hardirq); /** * synchronize_irq - wait for pending IRQ handlers (on other CPUs) * @irq: interrupt number to wait for * * This function waits for any pending IRQ handlers for this interrupt * to complete before returning. If you use this function while * holding a resource the IRQ handler may need you will deadlock. * * This function may be called - with care - from IRQ context. */
void synchronize_irq(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); if (desc) { __synchronize_hardirq(desc); /* * We made sure that no hardirq handler is * running. Now verify that no threaded handlers are * active. */ wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active)); } }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Gleixner4697.87%266.67%
Ingo Molnar12.13%133.33%
Total47100.00%3100.00%

EXPORT_SYMBOL(synchronize_irq); #ifdef CONFIG_SMP cpumask_var_t irq_default_affinity;
static bool __irq_can_set_affinity(struct irq_desc *desc) { if (!desc || !irqd_can_balance(&desc->irq_data) || !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity) return false; return true; }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Gleixner4391.49%685.71%
Jiang Liu48.51%114.29%
Total47100.00%7100.00%

/** * irq_can_set_affinity - Check if the affinity of a given irq can be set * @irq: Interrupt to check * */
int irq_can_set_affinity(unsigned int irq) { return __irq_can_set_affinity(irq_to_desc(irq)); }

Contributors

PersonTokensPropCommitsCommitProp
Jiang Liu18100.00%1100.00%
Total18100.00%1100.00%

/** * irq_can_set_affinity_usr - Check if affinity of a irq can be set from user space * @irq: Interrupt to check * * Like irq_can_set_affinity() above, but additionally checks for the * AFFINITY_MANAGED flag. */
bool irq_can_set_affinity_usr(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); return __irq_can_set_affinity(desc) && !irqd_affinity_is_managed(&desc->irq_data); }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Gleixner34100.00%1100.00%
Total34100.00%1100.00%

/** * irq_set_thread_affinity - Notify irq threads to adjust affinity * @desc: irq descriptor which has affitnity changed * * We just set IRQTF_AFFINITY and delegate the affinity setting * to the interrupt thread itself. We can not call * set_cpus_allowed_ptr() here as we hold desc->lock and this * code can be called from hard interrupt context. */
void irq_set_thread_affinity(struct irq_desc *desc) { struct irqaction *action; for_each_action_of_desc(desc, action) if (action->thread) set_bit(IRQTF_AFFINITY, &action->thread_flags); }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Gleixner3183.78%266.67%
Daniel Lezcano616.22%133.33%
Total37100.00%3100.00%


static void irq_validate_effective_affinity(struct irq_data *data) { #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK const struct cpumask *m = irq_data_get_effective_affinity_mask(data); struct irq_chip *chip = irq_data_get_irq_chip(data); if (!cpumask_empty(m)) return; pr_warn_once("irq_chip %s did not update eff. affinity mask of irq %u\n", chip->name, data->irq); #endif }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Gleixner59100.00%1100.00%
Total59100.00%1100.00%


int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) { struct irq_desc *desc = irq_data_to_desc(data); struct irq_chip *chip = irq_data_get_irq_chip(data); int ret; if (!chip || !chip->irq_set_affinity) return -EINVAL; ret = chip->irq_set_affinity(data, mask, force); switch (ret) { case IRQ_SET_MASK_OK: case IRQ_SET_MASK_OK_DONE: cpumask_copy(desc->irq_common_data.affinity, mask); case IRQ_SET_MASK_OK_NOCOPY: irq_validate_effective_affinity(data); irq_set_thread_affinity(desc); ret = 0; } return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Gleixner5650.00%850.00%
Jiang Liu2017.86%318.75%
Yinghai Lu1614.29%212.50%
David Daney108.93%16.25%
Suresh B. Siddha65.36%16.25%
Rusty Russell43.57%16.25%
Total112100.00%16100.00%


int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask, bool force) { struct irq_chip *chip = irq_data_get_irq_chip(data); struct irq_desc *desc = irq_data_to_desc(data); int ret = 0; if (!chip || !chip->irq_set_affinity) return -EINVAL; if (irq_can_move_pcntxt(data)) { ret = irq_do_set_affinity(data, mask, force); } else { irqd_set_move_pending(data); irq_copy_pending(desc, mask); } if (desc->affinity_notify) { kref_get(&desc->affinity_notify->kref); schedule_work(&desc->affinity_notify->work); } irqd_set(data, IRQD_AFFINITY_SET); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Jiang Liu7154.20%18.33%
Ben Hutchings2821.37%18.33%
Thomas Gleixner1712.98%650.00%
David Daney107.63%18.33%
Yinghai Lu21.53%18.33%
Rusty Russell21.53%18.33%
Suresh B. Siddha10.76%18.33%
Total131100.00%12100.00%


int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force) { struct irq_desc *desc = irq_to_desc(irq); unsigned long flags; int ret; if (!desc) return -EINVAL; raw_spin_lock_irqsave(&desc->lock, flags); ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force); raw_spin_unlock_irqrestore(&desc->lock, flags); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
David Daney5466.67%114.29%
Thomas Gleixner2733.33%685.71%
Total81100.00%7100.00%


int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m) { unsigned long flags; struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); if (!desc) return -EINVAL; desc->affinity_hint = m; irq_put_desc_unlock(desc, flags); /* set the initial affinity to prevent every interrupt being on CPU0 */ if (m) __irq_set_affinity(irq, m, false); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Peter P. Waskiewicz Jr4865.75%120.00%
Jesse Brandeburg1419.18%240.00%
Thomas Gleixner912.33%120.00%
Marc Zyngier22.74%120.00%
Total73100.00%5100.00%

EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
static void irq_affinity_notify(struct work_struct *work) { struct irq_affinity_notify *notify = container_of(work, struct irq_affinity_notify, work); struct irq_desc *desc = irq_to_desc(notify->irq); cpumask_var_t cpumask; unsigned long flags; if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL)) goto out; raw_spin_lock_irqsave(&desc->lock, flags); if (irq_move_pending(&desc->irq_data)) irq_get_pending(cpumask, desc); else cpumask_copy(cpumask, desc->irq_common_data.affinity); raw_spin_unlock_irqrestore(&desc->lock, flags); notify->notify(notify, cpumask); free_cpumask_var(cpumask); out: kref_put(&notify->kref, notify->release); }

Contributors

PersonTokensPropCommitsCommitProp
Ben Hutchings12992.81%120.00%
Thomas Gleixner96.47%360.00%
Jiang Liu10.72%120.00%
Total139100.00%5100.00%

/** * irq_set_affinity_notifier - control notification of IRQ affinity changes * @irq: Interrupt for which to enable/disable notification * @notify: Context for notification, or %NULL to disable * notification. Function pointers must be initialised; * the other fields will be initialised by this function. * * Must be called in process context. Notification may only be enabled * after the IRQ is allocated and must be disabled before the IRQ is * freed using free_irq(). */
int irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) { struct irq_desc *desc = irq_to_desc(irq); struct irq_affinity_notify *old_notify; unsigned long flags; /* The release function is promised process context */ might_sleep(); if (!desc) return -EINVAL; /* Complete initialisation of *notify */ if (notify) { notify->irq = irq; kref_init(&notify->kref); INIT_WORK(&notify->work, irq_affinity_notify); } raw_spin_lock_irqsave(&desc->lock, flags); old_notify = desc->affinity_notify; desc->affinity_notify = notify; raw_spin_unlock_irqrestore(&desc->lock, flags); if (old_notify) kref_put(&old_notify->kref, old_notify->release); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Ben Hutchings128100.00%1100.00%
Total128100.00%1100.00%

EXPORT_SYMBOL_GPL(irq_set_affinity_notifier); #ifndef CONFIG_AUTO_IRQ_AFFINITY /* * Generic version of the affinity autoselector. */
int irq_setup_affinity(struct irq_desc *desc) { struct cpumask *set = irq_default_affinity; int ret, node = irq_desc_get_node(desc); static DEFINE_RAW_SPINLOCK(mask_lock); static struct cpumask mask; /* Excludes PER_CPU and NO_BALANCE interrupts */ if (!__irq_can_set_affinity(desc)) return 0; raw_spin_lock(&mask_lock); /* * Preserve the managed affinity setting and a userspace affinity * setup, but make sure that one of the targets is online. */ if (irqd_affinity_is_managed(&desc->irq_data) || irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) { if (cpumask_intersects(desc->irq_common_data.affinity, cpu_online_mask)) set = desc->irq_common_data.affinity; else irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET); } cpumask_and(&mask, cpu_online_mask, set); if (node != NUMA_NO_NODE) { const struct cpumask *nodemask = cpumask_of_node(node); /* make sure at least one of the cpus in nodemask is online */ if (cpumask_intersects(&mask, nodemask)) cpumask_and(&mask, &mask, nodemask); } ret = irq_do_set_affinity(&desc->irq_data, &mask, false); raw_spin_unlock(&mask_lock); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Gleixner10556.45%950.00%
Prarit Bhargava4122.04%15.56%
Maksim Krasnyanskiy179.14%15.56%
Jiang Liu84.30%422.22%
Yinghai Lu73.76%15.56%
Rusty Russell73.76%15.56%
Masahiro Yamada10.54%15.56%
Total186100.00%18100.00%

#else /* Wrapper for ALPHA specific affinity selector magic */
int irq_setup_affinity(struct irq_desc *desc) { return irq_select_affinity(irq_desc_get_irq(desc)); }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Gleixner1684.21%266.67%
Jiang Liu315.79%133.33%
Total19100.00%3100.00%

#endif /* * Called when a bogus affinity is set via /proc/irq */
int irq_select_affinity_usr(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); unsigned long flags; int ret; raw_spin_lock_irqsave(&desc->lock, flags); ret = irq_setup_affinity(desc); raw_spin_unlock_irqrestore(&desc->lock, flags); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Gleixner56100.00%3100.00%
Total56100.00%3100.00%

#endif /** * irq_set_vcpu_affinity - Set vcpu affinity for the interrupt * @irq: interrupt number to set affinity * @vcpu_info: vCPU specific data or pointer to a percpu array of vCPU * specific data for percpu_devid interrupts * * This function uses the vCPU specific data to set the vCPU * affinity for an irq. The vCPU specific data is passed from * outside, such as KVM. One example code path is as below: * KVM -> IOMMU -> irq_set_vcpu_affinity(). */
int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info) { unsigned long flags; struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); struct irq_data *data; struct irq_chip *chip; int ret = -ENOSYS; if (!desc) return -EINVAL; data = irq_desc_get_irq_data(desc); do { chip = irq_data_get_irq_chip(data); if (chip && chip->irq_set_vcpu_affinity) break; #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY data = data->parent_data; #else data = NULL; #endif } while (data); if (data) ret = chip->irq_set_vcpu_affinity(data, vcpu_info); irq_put_desc_unlock(desc, flags); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Feng Wu10076.92%150.00%
Marc Zyngier3023.08%150.00%
Total130100.00%2100.00%

EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity);
void __disable_irq(struct irq_desc *desc) { if (!desc->depth++) irq_disable(desc); }

Contributors

PersonTokensPropCommitsCommitProp
Rafael J. Wysocki2191.30%150.00%
Thomas Gleixner28.70%150.00%
Total23100.00%2100.00%


static int __disable_irq_nosync(unsigned int irq) { unsigned long flags; struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); if (!desc) return -EINVAL; __disable_irq(desc); irq_put_desc_busunlock(desc, flags); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Gleixner2343.40%225.00%
Ingo Molnar2139.62%225.00%
Matthew Wilcox35.66%112.50%
Rafael J. Wysocki23.77%112.50%
Yinghai Lu23.77%112.50%
Marc Zyngier23.77%112.50%
Total53100.00%8100.00%

/** * disable_irq_nosync - disable an irq without waiting * @irq: Interrupt to disable * * Disable the selected interrupt line. Disables and Enables are * nested. * Unlike disable_irq(), this function does not ensure existing * instances of the IRQ handler have completed before returning. * * This function may be called from IRQ context. */
void disable_irq_nosync(unsigned int irq) { __disable_irq_nosync(irq); }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Gleixner1392.86%266.67%
Ingo Molnar17.14%133.33%
Total14100.00%3100.00%

EXPORT_SYMBOL(disable_irq_nosync); /** * disable_irq - disable an irq and wait for completion * @irq: Interrupt to disable * * Disable the selected interrupt line. Enables and Disables are * nested. * This function waits for any pending IRQ handlers for this interrupt * to complete before returning. If you use this function while * holding a resource the IRQ handler may need you will deadlock. * * This function may be called - with care - from IRQ context. */
void disable_irq(unsigned int irq) { if (!__disable_irq_nosync(irq)) synchronize_irq(irq); }

Contributors

PersonTokensPropCommitsCommitProp
Ingo Molnar1881.82%125.00%
Matthew Wilcox29.09%125.00%
Thomas Gleixner14.55%125.00%
Yinghai Lu14.55%125.00%
Total22100.00%4100.00%

EXPORT_SYMBOL(disable_irq); /** * disable_hardirq - disables an irq and waits for hardirq completion * @irq: Interrupt to disable * * Disable the selected interrupt line. Enables and Disables are * nested. * This function waits for any pending hard IRQ handlers for this * interrupt to complete before returning. If you use this function while * holding a resource the hard IRQ handler may need you will deadlock. * * When used to optimistically disable an interrupt from atomic context * the return value must be checked. * * Returns: false if a threaded handler is active. * * This function may be called - with care - from IRQ context. */
bool disable_hardirq(unsigned int irq) { if (!__disable_irq_nosync(irq)) return synchronize_hardirq(irq); return false; }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra26100.00%1100.00%
Total26100.00%1100.00%

EXPORT_SYMBOL_GPL(disable_hardirq);
void __enable_irq(struct irq_desc *desc) { switch (desc->depth) { case 0: err_out: WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq_desc_get_irq(desc)); break; case 1: { if (desc->istate & IRQS_SUSPENDED) goto err_out; /* Prevent probing on this irq: */ irq_settings_set_noprobe(desc); /* * Call irq_startup() not irq_enable() here because the * interrupt might be marked NOAUTOEN. So irq_startup() * needs to be invoked when it gets enabled the first * time. If it was already started up, then irq_startup() * will invoke irq_enable() under the hood. */ irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE); break; } default: desc->depth--; } }

Contributors

PersonTokensPropCommitsCommitProp
Ingo Molnar3343.42%320.00%
Thomas Gleixner2431.58%853.33%
Rafael J. Wysocki1114.47%16.67%
Jiang Liu45.26%16.67%
Arjan van de Ven33.95%16.67%
Björn Helgaas11.32%16.67%
Total76100.00%15100.00%

/** * enable_irq - enable handling of an irq * @irq: Interrupt to enable * * Undoes the effect of one call to disable_irq(). If this * matches the last disable, processing of interrupts on this * IRQ line is re-enabled. * * This function may be called from IRQ context only when * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL ! */
void enable_irq(unsigned int irq) { unsigned long flags; struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); if (!desc) return; if (WARN(!desc->irq_data.chip, KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq)) goto out; __enable_irq(desc); out: irq_put_desc_busunlock(desc, flags); }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Gleixner5783.82%457.14%
Ingo Molnar710.29%114.29%
Marc Zyngier22.94%114.29%
Yinghai Lu22.94%114.29%
Total68100.00%7100.00%

EXPORT_SYMBOL(enable_irq);
static int set_irq_wake_real(unsigned int irq, unsigned int on) { struct irq_desc *desc = irq_to_desc(irq); int ret = -ENXIO; if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE) return 0; if (desc->irq_data.chip->irq_set_wake) ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Uwe Kleine-König4762.67%116.67%
Santosh Shilimkar1418.67%116.67%
Thomas Gleixner1013.33%233.33%
Yinghai Lu34.00%116.67%
David Brownell11.33%116.67%
Total75100.00%6100.00%

/** * irq_set_irq_wake - control irq power management wakeup * @irq: interrupt to control * @on: enable/disable power management wakeup * * Enable/disable power management wakeup mode, which is * disabled by default. Enables and disables must match, * just as they match for non-wakeup mode support. * * Wakeup mode lets this IRQ wake the system from sleep * states like "suspend to RAM". */
int irq_set_irq_wake(unsigned int irq, unsigned int on) { unsigned long flags; struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); int ret = 0; if (!desc) return -EINVAL; /* wakeup-capable irqs can be shared between drivers that * don't need to have the same sleep mode behaviors. */ if (on) { if (desc->wake_depth++ == 0) { ret = set_irq_wake_real(irq, on); if (ret) desc->wake_depth = 0; else irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE); } } else { if (desc->wake_depth == 0) { WARN(1, "Unbalanced IRQ %d wake disable\n", irq); } else if (--desc->wake_depth == 0) { ret = set_irq_wake_real(irq, on); if (ret) desc->wake_depth = 1; else irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE); } } irq_put_desc_busunlock(desc, flags); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Gleixner6739.88%440.00%
David Brownell4526.79%110.00%
Uwe Kleine-König4023.81%110.00%
Jesper Juhl95.36%110.00%
Arjan van de Ven42.38%110.00%
Marc Zyngier21.19%110.00%
Yinghai Lu10.60%110.00%
Total168100.00%10100.00%

EXPORT_SYMBOL(irq_set_irq_wake); /* * Internal function that tells the architecture code whether a * particular irq has been exclusively allocated or is available * for driver use. */
int can_request_irq(unsigned int irq, unsigned long irqflags) { unsigned long flags; struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); int canrequest = 0; if (!desc) return 0; if (irq_settings_can_request(desc)) { if (!desc->action || irqflags & desc->action->flags & IRQF_SHARED) canrequest = 1; } irq_put_desc_unlock(desc, flags); return canrequest; }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Gleixner3743.53%550.00%
Ingo Molnar3136.47%110.00%
Yinghai Lu1315.29%220.00%
Ben Hutchings22.35%110.00%
Marc Zyngier22.35%110.00%
Total85100.00%10100.00%


int __irq_set_trigger(struct irq_desc *desc, unsigned long flags) { struct irq_chip *chip = desc->irq_data.chip; int ret, unmask = 0; if (!chip || !chip->irq_set_type) { /* * IRQF_TRIGGER_* but the PIC does not support multiple * flow-types? */ pr_debug("No set_type function for IRQ %d (%s)\n", irq_desc_get_irq(desc), chip ? (chip->name ? : "unknown") : "unknown"); return 0; } if (chip->flags & IRQCHIP_SET_TYPE_MASKED) { if (!irqd_irq_masked(&desc->irq_data)) mask_irq(desc); if (!irqd_irq_disabled(&desc->irq_data)) unmask = 1; } /* Mask all flags except trigger mode */ flags &= IRQ_TYPE_SENSE_MASK; ret = chip->irq_set_type(&desc->irq_data, flags); switch (ret) { case IRQ_SET_MASK_OK: case IRQ_SET_MASK_OK_DONE: irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK); irqd_set(&desc->irq_data, flags); case IRQ_SET_MASK_OK_NOCOPY: flags = irqd_get_trigger_type(&desc->irq_data); irq_settings_set_trigger_mask(desc, flags); irqd_clear(&desc->irq_data, IRQD_LEVEL); irq_settings_clr_level(desc); if (flags & IRQ_TYPE_LEVEL_MASK) { irq_settings_set_level(desc); irqd_set(&desc->irq_data, IRQD_LEVEL); } ret = 0; break; default: pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n", flags, irq_desc_get_irq(desc), chip->irq_set_type); } if (unmask) unmask_irq(desc); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Gleixner14858.04%640.00%
Uwe Kleine-König6525.49%16.67%
David Brownell239.02%320.00%
Jiang Liu114.31%213.33%
Alexander Kuleshov51.96%16.67%
Andrew Morton20.78%16.67%
Mark Nelson10.39%16.67%
Total255100.00%15100.00%

#ifdef CONFIG_HARDIRQS_SW_RESEND
int irq_set_parent(int irq, int parent_irq) { unsigned long flags; struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); if (!desc) return -EINVAL; desc->parent_irq = parent_irq; irq_put_desc_unlock(desc, flags); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Gleixner55100.00%1100.00%
Total55100.00%1100.00%

EXPORT_SYMBOL_GPL(irq_set_parent); #endif /* * Default primary interrupt handler for threaded interrupts. Is * assigned as primary handler when request_threaded_irq is called * with handler == NULL. Useful for oneshot interrupts. */
static irqreturn_t irq_default_primary_handler(int irq, void *dev_id) { return IRQ_WAKE_THREAD; }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Gleixner16100.00%1100.00%
Total16100.00%1100.00%

/* * Primary handler for nested threaded interrupts. Should never be * called. */
static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id) { WARN(1, "Primary handler called for nested irq %d\n", irq); return IRQ_NONE; }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Gleixner25100.00%1100.00%
Total25100.00%1100.00%


static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id) { WARN(1, "Secondary action handler called for irq %d\n", irq); return IRQ_NONE; }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Gleixner25100.00%1100.00%
Total25100.00%1100.00%


static int irq_wait_for_interrupt(struct irqaction *action) { set_current_state(TASK_INTERRUPTIBLE); while (!kthread_should_stop()) { if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags)) { __set_current_state(TASK_RUNNING); return 0; } schedule(); set_current_state(TASK_INTERRUPTIBLE); } __set_current_state(TASK_RUNNING); return -1; }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Gleixner3352.38%233.33%
Ido Yariv1523.81%116.67%
Ingo Molnar1422.22%233.33%
Yinghai Lu11.59%116.67%
Total63100.00%6100.00%

/* * Oneshot interrupts keep the irq line masked until the threaded * handler finished. unmask if the interrupt has not been disabled and * is marked MASKED. */
static void irq_finalize_oneshot(struct irq_desc *desc, struct irqaction *action) { if (!(desc->istate & IRQS_ONESHOT) || action->handler == irq_forced_secondary_handler) return; again: chip_bus_lock(desc); raw_spin_lock_irq(&desc->lock); /* * Implausible though it may be we need to protect us against * the following scenario: * * The thread is faster done than the hard interrupt handler * on the other CPU. If we unmask the irq line then the * interrupt can come in again and masks the line, leaves due * to IRQS_INPROGRESS and the irq line is masked forever. * * This also serializes the state of shared oneshot handlers * versus "desc->threads_onehsot |= action->thread_mask;" in * irq_wake_thread(). See the comment there which explains the * serialization. */ if (unlikely(irqd_irq_inprogress(&desc->irq_data)