cregit-Linux how code gets into the kernel

Release 4.16 kernel/irq_work.c

Directory: kernel
/*
 * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra
 *
 * Provides a framework for enqueueing and running callbacks from hardirq
 * context. The enqueueing is NMI-safe.
 */

#include <linux/bug.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/irq_work.h>
#include <linux/percpu.h>
#include <linux/hardirq.h>
#include <linux/irqflags.h>
#include <linux/sched.h>
#include <linux/tick.h>
#include <linux/cpu.h>
#include <linux/notifier.h>
#include <linux/smp.h>
#include <asm/processor.h>


static DEFINE_PER_CPU(struct llist_head, raised_list);
static DEFINE_PER_CPU(struct llist_head, lazy_list);

/*
 * Claim the entry so that no one else will poke at it.
 */

static bool irq_work_claim(struct irq_work *work) { unsigned long flags, oflags, nflags; /* * Start with our best wish as a premise but only trust any * flag value after cmpxchg() result. */ flags = work->flags & ~IRQ_WORK_PENDING; for (;;) { nflags = flags | IRQ_WORK_CLAIMED; oflags = cmpxchg(&work->flags, flags, nflags); if (oflags == flags) break; if (oflags & IRQ_WORK_PENDING) return false; flags = oflags; cpu_relax(); } return true; }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra3239.02%125.00%
Frédéric Weisbecker3036.59%125.00%
Huang Ying1923.17%125.00%
Bartosz Golaszewski11.22%125.00%
Total82100.00%4100.00%


void __weak arch_irq_work_raise(void) { /* * Lame architectures will get the timer tick callback */ }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra9100.00%1100.00%
Total9100.00%1100.00%

/* * Enqueue the irq_work @work on @cpu unless it's already pending * somewhere. * * Can be re-enqueued while the callback is still in progress. */
bool irq_work_queue_on(struct irq_work *work, int cpu) { /* All work should have been flushed before going offline */ WARN_ON_ONCE(cpu_is_offline(cpu)); #ifdef CONFIG_SMP /* Arch remote IPI send/receive backend aren't NMI safe */ WARN_ON_ONCE(in_nmi()); /* Only queue if not already pending */ if (!irq_work_claim(work)) return false; if (llist_add(&work->llnode, &per_cpu(raised_list, cpu))) arch_send_call_function_single_ipi(cpu); #else /* #ifdef CONFIG_SMP */ irq_work_queue(work); #endif /* #else #ifdef CONFIG_SMP */ return true; }

Contributors

PersonTokensPropCommitsCommitProp
Frédéric Weisbecker6883.95%150.00%
Paul E. McKenney1316.05%150.00%
Total81100.00%2100.00%

/* Enqueue the irq work @work on the current CPU */
bool irq_work_queue(struct irq_work *work) { /* Only queue if not already pending */ if (!irq_work_claim(work)) return false; /* Queue the entry and raise the IPI if needed. */ preempt_disable(); /* If the work is "lazy", handle it from next tick if any */ if (work->flags & IRQ_WORK_LAZY) { if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) && tick_nohz_tick_stopped()) arch_irq_work_raise(); } else { if (llist_add(&work->llnode, this_cpu_ptr(&raised_list))) arch_irq_work_raise(); } preempt_enable(); return true; }

Contributors

PersonTokensPropCommitsCommitProp
Frédéric Weisbecker3742.53%225.00%
Peter Zijlstra2427.59%225.00%
Anish Kumar1112.64%112.50%
Christoph Lameter910.34%225.00%
Huang Ying66.90%112.50%
Total87100.00%8100.00%

EXPORT_SYMBOL_GPL(irq_work_queue);
bool irq_work_needs_cpu(void) { struct llist_head *raised, *lazy; raised = this_cpu_ptr(&raised_list); lazy = this_cpu_ptr(&lazy_list); if (llist_empty(raised) || arch_irq_work_has_interrupt()) if (llist_empty(lazy)) return false; /* All work should have been flushed before going offline */ WARN_ON_ONCE(cpu_is_offline(smp_processor_id())); return true; }

Contributors

PersonTokensPropCommitsCommitProp
Frédéric Weisbecker5078.12%360.00%
Steven Rostedt1015.62%120.00%
Christoph Lameter46.25%120.00%
Total64100.00%5100.00%


static void irq_work_run_list(struct llist_head *list) { struct irq_work *work, *tmp; struct llist_node *llnode; unsigned long flags; BUG_ON(!irqs_disabled()); if (llist_empty(list)) return; llnode = llist_del_all(list); llist_for_each_entry_safe(work, tmp, llnode, llnode) { /* * Clear the PENDING bit, after this point the @work * can be re-used. * Make it immediately visible so that other CPUs trying * to claim that work don't rely on us to handle their data * while we are in the middle of the func. */ flags = work->flags & ~IRQ_WORK_PENDING; xchg(&work->flags, flags); work->func(work); /* * Clear the BUSY bit and return to the free state if * no-one else claimed it meanwhile. */ (void)cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY); } }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra3431.48%218.18%
Frédéric Weisbecker3330.56%327.27%
Huang Ying2220.37%19.09%
Thomas Gleixner98.33%19.09%
Byungchul Park43.70%19.09%
Sergio Aguirre32.78%19.09%
Christoph Lameter21.85%19.09%
Steven Rostedt10.93%19.09%
Total108100.00%11100.00%

/* * hotplug calls this through: * hotplug_cfd() -> flush_smp_call_function_queue() */
void irq_work_run(void) { irq_work_run_list(this_cpu_ptr(&raised_list)); irq_work_run_list(this_cpu_ptr(&lazy_list)); }

Contributors

PersonTokensPropCommitsCommitProp
Frédéric Weisbecker2080.00%133.33%
Christoph Lameter416.00%133.33%
Peter Zijlstra14.00%133.33%
Total25100.00%3100.00%

EXPORT_SYMBOL_GPL(irq_work_run);
void irq_work_tick(void) { struct llist_head *raised = this_cpu_ptr(&raised_list); if (!llist_empty(raised) && !arch_irq_work_has_interrupt()) irq_work_run_list(raised); irq_work_run_list(this_cpu_ptr(&lazy_list)); }

Contributors

PersonTokensPropCommitsCommitProp
Frédéric Weisbecker4090.91%150.00%
Christoph Lameter49.09%150.00%
Total44100.00%2100.00%

/* * Synchronize against the irq_work @entry, ensures the entry is not * currently in use. */
void irq_work_sync(struct irq_work *work) { lockdep_assert_irqs_enabled(); while (work->flags & IRQ_WORK_BUSY) cpu_relax(); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra1875.00%133.33%
Huang Ying520.83%133.33%
Frédéric Weisbecker14.17%133.33%
Total24100.00%3100.00%

EXPORT_SYMBOL_GPL(irq_work_sync);

Overall Contributors

PersonTokensPropCommitsCommitProp
Frédéric Weisbecker30049.83%827.59%
Peter Zijlstra15525.75%517.24%
Huang Ying538.80%13.45%
Christoph Lameter233.82%310.34%
Steven Rostedt172.82%26.90%
Paul E. McKenney132.16%13.45%
Anish Kumar111.83%13.45%
Paul Gortmaker101.66%310.34%
Thomas Gleixner91.50%13.45%
Byungchul Park40.66%13.45%
Chris Metcalf30.50%13.45%
Sergio Aguirre30.50%13.45%
Bartosz Golaszewski10.17%13.45%
Total602100.00%29100.00%
Directory: kernel
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.