cregit-Linux how code gets into the kernel

Release 4.15 kernel/rcu/tree.c

Directory: kernel/rcu
/*
 * Read-Copy Update mechanism for mutual exclusion
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, you can access it online at
 * http://www.gnu.org/licenses/gpl-2.0.html.
 *
 * Copyright IBM Corporation, 2008
 *
 * Authors: Dipankar Sarma <dipankar@in.ibm.com>
 *          Manfred Spraul <manfred@colorfullife.com>
 *          Paul E. McKenney <paulmck@linux.vnet.ibm.com> Hierarchical version
 *
 * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
 *
 * For detailed explanation of Read-Copy Update mechanism see -
 *      Documentation/RCU
 */
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/smp.h>
#include <linux/rcupdate_wait.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/nmi.h>
#include <linux/atomic.h>
#include <linux/bitops.h>
#include <linux/export.h>
#include <linux/completion.h>
#include <linux/moduleparam.h>
#include <linux/percpu.h>
#include <linux/notifier.h>
#include <linux/cpu.h>
#include <linux/mutex.h>
#include <linux/time.h>
#include <linux/kernel_stat.h>
#include <linux/wait.h>
#include <linux/kthread.h>
#include <uapi/linux/sched/types.h>
#include <linux/prefetch.h>
#include <linux/delay.h>
#include <linux/stop_machine.h>
#include <linux/random.h>
#include <linux/trace_events.h>
#include <linux/suspend.h>
#include <linux/ftrace.h>

#include "tree.h"
#include "rcu.h"

#ifdef MODULE_PARAM_PREFIX

#undef MODULE_PARAM_PREFIX
#endif

#define MODULE_PARAM_PREFIX "rcutree."

/* Data structures. */

/*
 * In order to export the rcu_state name to the tracing tools, it
 * needs to be added in the __tracepoint_string section.
 * This requires defining a separate variable tp_<sname>_varname
 * that points to the string being used, and this will allow
 * the tracing userspace tools to be able to decipher the string
 * address to the matching string.
 */
#ifdef CONFIG_TRACING

# define DEFINE_RCU_TPS(sname) \
static char sname##_varname[] = #sname; \
static const char *tp_##sname##_varname __used __tracepoint_string = sname##_varname;

# define RCU_STATE_NAME(sname) sname##_varname
#else

# define DEFINE_RCU_TPS(sname)

# define RCU_STATE_NAME(sname) __stringify(sname)
#endif


#define RCU_STATE_INITIALIZER(sname, sabbr, cr) \
DEFINE_RCU_TPS(sname) \
static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, sname##_data); \
struct rcu_state sname##_state = { \
        .level = { &sname##_state.node[0] }, \
        .rda = &sname##_data, \
        .call = cr, \
        .gp_state = RCU_GP_IDLE, \
        .gpnum = 0UL - 300UL, \
        .completed = 0UL - 300UL, \
        .barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \
        .name = RCU_STATE_NAME(sname), \
        .abbr = sabbr, \
        .exp_mutex = __MUTEX_INITIALIZER(sname##_state.exp_mutex), \
        .exp_wake_mutex = __MUTEX_INITIALIZER(sname##_state.exp_wake_mutex), \
}

RCU_STATE_INITIALIZER(rcu_sched, 's', call_rcu_sched);
RCU_STATE_INITIALIZER(rcu_bh, 'b', call_rcu_bh);


static struct rcu_state *const rcu_state_p;

LIST_HEAD(rcu_struct_flavors);

/* Dump rcu_node combining tree at boot to verify correct setup. */

static bool dump_tree;
module_param(dump_tree, bool, 0444);
/* Control rcu_node-tree auto-balancing at boot time. */

static bool rcu_fanout_exact;
module_param(rcu_fanout_exact, bool, 0444);
/* Increase (but not decrease) the RCU_FANOUT_LEAF at boot time. */

static int rcu_fanout_leaf = RCU_FANOUT_LEAF;
module_param(rcu_fanout_leaf, int, 0444);

int rcu_num_lvls __read_mostly = RCU_NUM_LVLS;
/* Number of rcu_nodes at specified level. */

int num_rcu_lvl[] = NUM_RCU_LVL_INIT;

int rcu_num_nodes __read_mostly = NUM_RCU_NODES; 
/* Total # rcu_nodes in use. */
/* panic() on RCU Stall sysctl. */

int sysctl_panic_on_rcu_stall __read_mostly;

/*
 * The rcu_scheduler_active variable is initialized to the value
 * RCU_SCHEDULER_INACTIVE and transitions RCU_SCHEDULER_INIT just before the
 * first task is spawned.  So when this variable is RCU_SCHEDULER_INACTIVE,
 * RCU can assume that there is but one task, allowing RCU to (for example)
 * optimize synchronize_rcu() to a simple barrier().  When this variable
 * is RCU_SCHEDULER_INIT, RCU must actually do all the hard work required
 * to detect real grace periods.  This variable is also used to suppress
 * boot-time false positives from lockdep-RCU error checking.  Finally, it
 * transitions from RCU_SCHEDULER_INIT to RCU_SCHEDULER_RUNNING after RCU
 * is fully initialized, including all of its kthreads having been spawned.
 */

int rcu_scheduler_active __read_mostly;

EXPORT_SYMBOL_GPL(rcu_scheduler_active);

/*
 * The rcu_scheduler_fully_active variable transitions from zero to one
 * during the early_initcall() processing, which is after the scheduler
 * is capable of creating new tasks.  So RCU processing (for example,
 * creating tasks for RCU priority boosting) must be delayed until after
 * rcu_scheduler_fully_active transitions from zero to one.  We also
 * currently delay invocation of any RCU callbacks until after this point.
 *
 * It might later prove better for people registering RCU callbacks during
 * early boot to take responsibility for these callbacks, but one step at
 * a time.
 */

static int rcu_scheduler_fully_active __read_mostly;

static void rcu_init_new_rnp(struct rcu_node *rnp_leaf);
static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf);
static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
static void invoke_rcu_core(void);
static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
static void rcu_report_exp_rdp(struct rcu_state *rsp,
			       struct rcu_data *rdp, bool wake);
static void sync_sched_exp_online_cleanup(int cpu);

/* rcuc/rcub kthread realtime priority */

static int kthread_prio = IS_ENABLED(CONFIG_RCU_BOOST) ? 1 : 0;
module_param(kthread_prio, int, 0644);

/* Delay in jiffies for grace-period initialization delays, debug only. */


static int gp_preinit_delay;
module_param(gp_preinit_delay, int, 0444);

static int gp_init_delay;
module_param(gp_init_delay, int, 0444);

static int gp_cleanup_delay;
module_param(gp_cleanup_delay, int, 0444);

/*
 * Number of grace periods between delays, normalized by the duration of
 * the delay.  The longer the delay, the more the grace periods between
 * each delay.  The reason for this normalization is that it means that,
 * for non-zero delays, the overall slowdown of grace periods is constant
 * regardless of the duration of the delay.  This arrangement balances
 * the need for long delays to increase some race probabilities with the
 * need for fast grace periods to increase other race probabilities.
 */

#define PER_RCU_NODE_PERIOD 3	
/* Number of grace periods between delays. */

/*
 * Track the rcutorture test sequence number and the update version
 * number within a given test.  The rcutorture_testseq is incremented
 * on every rcutorture module load and unload, so has an odd value
 * when a test is running.  The rcutorture_vernum is set to zero
 * when rcutorture starts and is incremented on each rcutorture update.
 * These variables enable correlating rcutorture output with the
 * RCU tracing information.
 */

unsigned long rcutorture_testseq;

unsigned long rcutorture_vernum;

/*
 * Compute the mask of online CPUs for the specified rcu_node structure.
 * This will not be stable unless the rcu_node structure's ->lock is
 * held, but the bit corresponding to the current CPU will be stable
 * in most contexts.
 */

unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp) { return READ_ONCE(rnp->qsmaskinitnext); }

Contributors

PersonTokensPropCommitsCommitProp
Paul E. McKenney19100.00%2100.00%
Total19100.00%2100.00%

/* * Return true if an RCU grace period is in progress. The READ_ONCE()s * permit this function to be invoked without holding the root rcu_node * structure's ->lock, but of course results can be subject to change. */
static int rcu_gp_in_progress(struct rcu_state *rsp) { return READ_ONCE(rsp->completed) != READ_ONCE(rsp->gpnum); }

Contributors

PersonTokensPropCommitsCommitProp
Paul E. McKenney26100.00%2100.00%
Total26100.00%2100.00%

/* * Note a quiescent state. Because we do not need to know * how many quiescent states passed, just if there was at least * one since the start of the grace period, this just sets a flag. * The caller must have disabled preemption. */
void rcu_sched_qs(void) { RCU_LOCKDEP_WARN(preemptible(), "rcu_sched_qs() invoked with preemption enabled!!!"); if (!__this_cpu_read(rcu_sched_data.cpu_no_qs.s)) return; trace_rcu_grace_period(TPS("rcu_sched"), __this_cpu_read(rcu_sched_data.gpnum), TPS("cpuqs")); __this_cpu_write(rcu_sched_data.cpu_no_qs.b.norm, false); if (!__this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp)) return; __this_cpu_write(rcu_sched_data.cpu_no_qs.b.exp, false); rcu_report_exp_rdp(&rcu_sched_state, this_cpu_ptr(&rcu_sched_data), true); }

Contributors

PersonTokensPropCommitsCommitProp
Paul E. McKenney9390.29%1083.33%
Steven Rostedt65.83%18.33%
Ingo Molnar43.88%18.33%
Total103100.00%12100.00%


void rcu_bh_qs(void) { RCU_LOCKDEP_WARN(preemptible(), "rcu_bh_qs() invoked with preemption enabled!!!"); if (__this_cpu_read(rcu_bh_data.cpu_no_qs.s)) { trace_rcu_grace_period(TPS("rcu_bh"), __this_cpu_read(rcu_bh_data.gpnum), TPS("cpuqs")); __this_cpu_write(rcu_bh_data.cpu_no_qs.b.norm, false); } }

Contributors

PersonTokensPropCommitsCommitProp
Paul E. McKenney5081.97%777.78%
Steven Rostedt69.84%111.11%
Ingo Molnar58.20%111.11%
Total61100.00%9100.00%

/* * Steal a bit from the bottom of ->dynticks for idle entry/exit * control. Initially this is for TLB flushing. */ #define RCU_DYNTICK_CTRL_MASK 0x1 #define RCU_DYNTICK_CTRL_CTR (RCU_DYNTICK_CTRL_MASK + 1) #ifndef rcu_eqs_special_exit #define rcu_eqs_special_exit() do { } while (0) #endif static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = { .dynticks_nesting = DYNTICK_TASK_EXIT_IDLE, .dynticks = ATOMIC_INIT(RCU_DYNTICK_CTRL_CTR), }; /* * There's a few places, currently just in the tracing infrastructure, * that uses rcu_irq_enter() to make sure RCU is watching. But there's * a small location where that will not even work. In those cases * rcu_irq_enter_disabled() needs to be checked to make sure rcu_irq_enter() * can be called. */ static DEFINE_PER_CPU(bool, disable_rcu_irq_enter);
bool rcu_irq_enter_disabled(void) { return this_cpu_read(disable_rcu_irq_enter); }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt13100.00%1100.00%
Total13100.00%1100.00%

/* * Record entry into an extended quiescent state. This is only to be * called when not already in an extended quiescent state. */
static void rcu_dynticks_eqs_enter(void) { struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); int seq; /* * CPUs seeing atomic_add_return() must see prior RCU read-side * critical sections, and we also must force ordering with the * next idle sojourn. */ seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdtp->dynticks); /* Better be in an extended quiescent state! */ WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && (seq & RCU_DYNTICK_CTRL_CTR)); /* Better not have special action (TLB flush) pending! */ WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && (seq & RCU_DYNTICK_CTRL_MASK)); }

Contributors

PersonTokensPropCommitsCommitProp
Paul E. McKenney65100.00%2100.00%
Total65100.00%2100.00%

/* * Record exit from an extended quiescent state. This is only to be * called from an extended quiescent state. */
static void rcu_dynticks_eqs_exit(void) { struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); int seq; /* * CPUs seeing atomic_add_return() must see prior idle sojourns, * and we also must force ordering with the next RCU read-side * critical section. */ seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdtp->dynticks); WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !(seq & RCU_DYNTICK_CTRL_CTR)); if (seq & RCU_DYNTICK_CTRL_MASK) { atomic_andnot(RCU_DYNTICK_CTRL_MASK, &rdtp->dynticks); smp_mb__after_atomic(); /* _exit after clearing mask. */ /* Prefer duplicate flushes to losing a flush. */ rcu_eqs_special_exit(); } }

Contributors

PersonTokensPropCommitsCommitProp
Paul E. McKenney76100.00%2100.00%
Total76100.00%2100.00%

/* * Reset the current CPU's ->dynticks counter to indicate that the * newly onlined CPU is no longer in an extended quiescent state. * This will either leave the counter unchanged, or increment it * to the next non-quiescent value. * * The non-atomic test/increment sequence works because the upper bits * of the ->dynticks counter are manipulated only by the corresponding CPU, * or when the corresponding CPU is offline. */
static void rcu_dynticks_eqs_online(void) { struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); if (atomic_read(&rdtp->dynticks) & RCU_DYNTICK_CTRL_CTR) return; atomic_add(RCU_DYNTICK_CTRL_CTR, &rdtp->dynticks); }

Contributors

PersonTokensPropCommitsCommitProp
Paul E. McKenney42100.00%2100.00%
Total42100.00%2100.00%

/* * Is the current CPU in an extended quiescent state? * * No ordering, as we are sampling CPU-local information. */
bool rcu_dynticks_curr_cpu_in_eqs(void) { struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); return !(atomic_read(&rdtp->dynticks) & RCU_DYNTICK_CTRL_CTR); }

Contributors

PersonTokensPropCommitsCommitProp
Paul E. McKenney32100.00%2100.00%
Total32100.00%2100.00%

/* * Snapshot the ->dynticks counter with full ordering so as to allow * stable comparison of this counter with past and future snapshots. */
int rcu_dynticks_snap(struct rcu_dynticks *rdtp) { int snap = atomic_add_return(0, &rdtp->dynticks); return snap & ~RCU_DYNTICK_CTRL_MASK; }

Contributors

PersonTokensPropCommitsCommitProp
Paul E. McKenney29100.00%2100.00%
Total29100.00%2100.00%

/* * Return true if the snapshot returned from rcu_dynticks_snap() * indicates that RCU is in an extended quiescent state. */
static bool rcu_dynticks_in_eqs(int snap) { return !(snap & RCU_DYNTICK_CTRL_CTR); }

Contributors

PersonTokensPropCommitsCommitProp
Paul E. McKenney17100.00%2100.00%
Total17100.00%2100.00%

/* * Return true if the CPU corresponding to the specified rcu_dynticks * structure has spent some time in an extended quiescent state since * rcu_dynticks_snap() returned the specified snapshot. */
static bool rcu_dynticks_in_eqs_since(struct rcu_dynticks *rdtp, int snap) { return snap != rcu_dynticks_snap(rdtp); }

Contributors

PersonTokensPropCommitsCommitProp
Paul E. McKenney22100.00%1100.00%
Total22100.00%1100.00%

/* * Do a double-increment of the ->dynticks counter to emulate a * momentary idle-CPU quiescent state. */
static void rcu_dynticks_momentary_idle(void) { struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); int special = atomic_add_return(2 * RCU_DYNTICK_CTRL_CTR, &rdtp->dynticks); /* It is illegal to call this from idle state. */ WARN_ON_ONCE(!(special & RCU_DYNTICK_CTRL_CTR)); }

Contributors

PersonTokensPropCommitsCommitProp
Paul E. McKenney45100.00%2100.00%
Total45100.00%2100.00%

/* * Set the special (bottom) bit of the specified CPU so that it * will take special action (such as flushing its TLB) on the * next exit from an extended quiescent state. Returns true if * the bit was successfully set, or false if the CPU was not in * an extended quiescent state. */
bool rcu_eqs_special_set(int cpu) { int old; int new; struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu); do { old = atomic_read(&rdtp->dynticks); if (old & RCU_DYNTICK_CTRL_CTR) return false; new = old | RCU_DYNTICK_CTRL_MASK; } while (atomic_cmpxchg(&rdtp->dynticks, old, new) != old); return true; }

Contributors

PersonTokensPropCommitsCommitProp
Paul E. McKenney75100.00%2100.00%
Total75100.00%2100.00%

/* * Let the RCU core know that this CPU has gone through the scheduler, * which is a quiescent state. This is called when the need for a * quiescent state is urgent, so we burn an atomic operation and full * memory barriers to let the RCU core know about it, regardless of what * this CPU might (or might not) do in the near future. * * We inform the RCU core by emulating a zero-duration dyntick-idle period. * * The caller must have disabled interrupts. */
static void rcu_momentary_dyntick_idle(void) { raw_cpu_write(rcu_dynticks.rcu_need_heavy_qs, false); rcu_dynticks_momentary_idle(); }

Contributors

PersonTokensPropCommitsCommitProp
Paul E. McKenney20100.00%4100.00%
Total20100.00%4100.00%

/* * Note a context switch. This is a quiescent state for RCU-sched, * and requires special handling for preemptible RCU. * The caller must have disabled interrupts. */
void rcu_note_context_switch(bool preempt) { barrier(); /* Avoid RCU read-side critical sections leaking down. */ trace_rcu_utilization(TPS("Start context switch")); rcu_sched_qs(); rcu_preempt_note_context_switch(preempt); /* Load rcu_urgent_qs before other flags. */ if (!smp_load_acquire(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs))) goto out; this_cpu_write(rcu_dynticks.rcu_urgent_qs, false); if (unlikely(raw_cpu_read(rcu_dynticks.rcu_need_heavy_qs))) rcu_momentary_dyntick_idle(); this_cpu_inc(rcu_dynticks.rcu_qs_ctr); if (!preempt) rcu_note_voluntary_context_switch_lite(current); out: trace_rcu_utilization(TPS("End context switch")); barrier(); /* Avoid RCU read-side critical sections leaking up. */ }

Contributors

PersonTokensPropCommitsCommitProp
Paul E. McKenney9392.08%787.50%
Boqun Feng87.92%112.50%
Total101100.00%8100.00%

EXPORT_SYMBOL_GPL(rcu_note_context_switch); /* * Register a quiescent state for all RCU flavors. If there is an * emergency, invoke rcu_momentary_dyntick_idle() to do a heavy-weight * dyntick-idle quiescent state visible to other CPUs (but only for those * RCU flavors in desperate need of a quiescent state, which will normally * be none of them). Either way, do a lightweight quiescent state for * all RCU flavors. * * The barrier() calls are redundant in the common case when this is * called externally, but just in case this is called from within this * file. * */
void rcu_all_qs(void) { unsigned long flags; if (!raw_cpu_read(rcu_dynticks.rcu_urgent_qs)) return; preempt_disable(); /* Load rcu_urgent_qs before other flags. */ if (!smp_load_acquire(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs))) { preempt_enable(); return; } this_cpu_write(rcu_dynticks.rcu_urgent_qs, false); barrier(); /* Avoid RCU read-side critical sections leaking down. */ if (unlikely(raw_cpu_read(rcu_dynticks.rcu_need_heavy_qs))) { local_irq_save(flags); rcu_momentary_dyntick_idle(); local_irq_restore(flags); } if (unlikely(raw_cpu_read(rcu_sched_data.cpu_no_qs.b.exp))) rcu_sched_qs(); this_cpu_inc(rcu_dynticks.rcu_qs_ctr); barrier(); /* Avoid RCU read-side critical sections leaking up. */ preempt_enable(); }

Contributors

PersonTokensPropCommitsCommitProp
Paul E. McKenney11193.28%787.50%
Boqun Feng86.72%112.50%
Total119100.00%8100.00%

EXPORT_SYMBOL_GPL(rcu_all_qs); #define DEFAULT_RCU_BLIMIT 10 /* Maximum callbacks per rcu_do_batch. */ static long blimit = DEFAULT_RCU_BLIMIT; #define DEFAULT_RCU_QHIMARK 10000 /* If this many pending, ignore blimit. */ static long qhimark = DEFAULT_RCU_QHIMARK; #define DEFAULT_RCU_QLOMARK 100 /* Once only this many pending, use blimit. */ static long qlowmark = DEFAULT_RCU_QLOMARK; module_param(blimit, long, 0444); module_param(qhimark, long, 0444); module_param(qlowmark, long, 0444); static ulong jiffies_till_first_fqs = ULONG_MAX; static ulong jiffies_till_next_fqs = ULONG_MAX; static bool rcu_kick_kthreads; module_param(jiffies_till_first_fqs, ulong, 0644); module_param(jiffies_till_next_fqs, ulong, 0644); module_param(rcu_kick_kthreads, bool, 0644); /* * How long the grace period must be before we start recruiting * quiescent-state help from rcu_note_context_switch(). */ static ulong jiffies_till_sched_qs = HZ / 10; module_param(jiffies_till_sched_qs, ulong, 0444); static bool rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp); static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *rsp)); static void force_quiescent_state(struct rcu_state *rsp); static int rcu_pending(void); /* * Return the number of RCU batches started thus far for debug & stats. */
unsigned long rcu_batches_started(void) { return rcu_state_p->gpnum; }

Contributors

PersonTokensPropCommitsCommitProp
Paul E. McKenney13100.00%1100.00%
Total13100.00%1100.00%

EXPORT_SYMBOL_GPL(rcu_batches_started); /* * Return the number of RCU-sched batches started thus far for debug & stats. */
unsigned long rcu_batches_started_sched(void) { return rcu_sched_state.gpnum; }

Contributors

PersonTokensPropCommitsCommitProp
Paul E. McKenney13100.00%1100.00%
Total13100.00%1100.00%

EXPORT_SYMBOL_GPL(rcu_batches_started_sched); /* * Return the number of RCU BH batches started thus far for debug & stats. */
unsigned long rcu_batches_started_bh(void) { return rcu_bh_state.gpnum; }

Contributors

PersonTokensPropCommitsCommitProp
Paul E. McKenney13100.00%1100.00%
Total13100.00%1100.00%

EXPORT_SYMBOL_GPL(rcu_batches_started_bh); /* * Return the number of RCU batches completed thus far for debug & stats. */
unsigned long rcu_batches_completed(void) { return rcu_state_p->completed; }

Contributors

PersonTokensPropCommitsCommitProp
Paul E. McKenney13100.00%1100.00%
Total13100.00%1100.00%

EXPORT_SYMBOL_GPL(rcu_batches_completed); /* * Return the number of RCU-sched batches completed thus far for debug & stats. */
unsigned long rcu_batches_completed_sched(void) { return rcu_sched_state.completed; }

Contributors

PersonTokensPropCommitsCommitProp
Paul E. McKenney13100.00%3100.00%
Total13100.00%3100.00%

EXPORT_SYMBOL_GPL(rcu_batches_completed_sched); /* * Return the number of RCU BH batches completed thus far for debug & stats. */
unsigned long rcu_batches_completed_bh(void) { return rcu_bh_state.completed; }

Contributors

PersonTokensPropCommitsCommitProp
Paul E. McKenney13100.00%2100.00%
Total13100.00%2100.00%

EXPORT_SYMBOL_GPL(rcu_batches_completed_bh); /* * Return the number of RCU expedited batches completed thus far for * debug & stats. Odd numbers mean that a batch is in progress, even * numbers mean idle. The value returned will thus be roughly double * the cumulative batches since boot. */
unsigned long rcu_exp_batches_completed(void) { return rcu_state_p->expedited_sequence; }

Contributors

PersonTokensPropCommitsCommitProp
Paul E. McKenney13100.00%1100.00%
Total13100.00%1100.00%

EXPORT_SYMBOL_GPL(rcu_exp_batches_completed); /* * Return the number of RCU-sched expedited batches completed thus far * for debug & stats. Similar to rcu_exp_batches_completed(). */
unsigned long rcu_exp_batches_completed_sched(void) { return rcu_sched_state.expedited_sequence; }

Contributors

PersonTokensPropCommitsCommitProp
Paul E. McKenney13100.00%1100.00%
Total13100.00%1100.00%

EXPORT_SYMBOL_GPL(rcu_exp_batches_completed_sched); /* * Force a quiescent state. */
void rcu_force_quiescent_state(void) { force_quiescent_state(rcu_state_p); }

Contributors

PersonTokensPropCommitsCommitProp
Andreea-Cristina Bernat1191.67%150.00%
Uma Sharma18.33%150.00%
Total12100.00%2100.00%

EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); /* * Force a quiescent state for RCU BH. */
void rcu_bh_force_quiescent_state(void) { force_quiescent_state(&rcu_bh_state); }

Contributors

PersonTokensPropCommitsCommitProp
Paul E. McKenney13100.00%1100.00%
Total13100.00%1100.00%

EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state); /* * Force a quiescent state for RCU-sched. */
void rcu_sched_force_quiescent_state(void) { force_quiescent_state(&rcu_sched_state); }

Contributors

PersonTokensPropCommitsCommitProp
Paul E. McKenney13100.00%1100.00%
Total13100.00%1100.00%

EXPORT_SYMBOL_GPL(rcu_sched_force_quiescent_state); /* * Show the state of the grace-period kthreads. */
void show_rcu_gp_kthreads(void) { struct rcu_state *rsp; for_each_rcu_flavor(rsp) { pr_info("%s: wait state: %d ->state: %#lx\n", rsp->name, rsp->gp_state, rsp->gp_kthread->state); /* sched_show_task(rsp->gp_kthread); */ } }

Contributors

PersonTokensPropCommitsCommitProp
Paul E. McKenney38100.00%1100.00%
Total38100.00%1100.00%

EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads); /* * Record the number of times rcutorture tests have been initiated and * terminated. This information allows the debugfs tracing stats to be * correlated to the rcutorture messages, even when the rcutorture module * is being repeatedly loaded and unloaded. In other words, we cannot * store this state in rcutorture itself. */
void rcutorture_record_test_transition(void) { rcutorture_testseq++; rcutorture_vernum = 0; }

Contributors

PersonTokensPropCommitsCommitProp
Paul E. McKenney14100.00%1100.00%
Total14100.00%1100.00%

EXPORT_SYMBOL_GPL(rcutorture_record_test_transition); /* * Send along grace-period-related data for rcutorture diagnostics. */
void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags, unsigned long *gpnum, unsigned long *completed) { struct rcu_state *rsp = NULL; switch (test_type) { case RCU_FLAVOR: rsp = rcu_state_p; break; case RCU_BH_FLAVOR: rsp = &rcu_bh_state; break; case RCU_SCHED_FLAVOR: rsp = &rcu_sched_state; break; default: break; } if (rsp == NULL) return; *flags = READ_ONCE(rsp->gp_flags); *gpnum = READ_ONCE(rsp->gpnum); *completed = READ_ONCE(rsp->completed); }

Contributors

PersonTokensPropCommitsCommitProp
Paul E. McKenney10099.01%375.00%
Uma Sharma10.99%125.00%
Total101100.00%4100.00%

EXPORT_SYMBOL_GPL(rcutorture_get_gp_data); /* * Record the number of writer passes through the current rcutorture test. * This is also used to correlate debugfs tracing stats with the rcutorture * messages. */
void rcutorture_record_progress(unsigned long vernum) { rcutorture_vernum++; }

Contributors

PersonTokensPropCommitsCommitProp
Paul E. McKenney12100.00%1100.00%
Total12100.00%1100.00%

EXPORT_SYMBOL_GPL(rcutorture_record_progress); /* * Return the root node of the specified rcu_state structure. */
static struct rcu_node *rcu_get_root(struct rcu_state *rsp) { return &rsp->node[0]; }

Contributors

PersonTokensPropCommitsCommitProp
Paul E. McKenney22100.00%1100.00%
Total22100.00%1100.00%

/* * Is there any need for future grace periods? * Interrupts must be disabled. If the caller does not hold the root * rnp_node structure's ->lock, the results are advisory only. */
static int rcu_future_needs_gp(struct rcu_state *rsp) { struct rcu_node *rnp = rcu_get_root(rsp); int idx = (READ_ONCE(rnp->completed) + 1) & 0x1; int *fp = &rnp->need_future_gp[idx]; lockdep_assert_irqs_disabled(); return READ_ONCE(*fp); }

Contributors

PersonTokensPropCommitsCommitProp
Paul E. McKenney5898.31%375.00%
Frédéric Weisbecker11.69%125.00%
Total59100.00%4100.00%

/* * Does the current CPU require a not-yet-started grace period? * The caller must have disabled interrupts to prevent races with * normal callback registry. */
static bool cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp) { lockdep_assert_irqs_disabled(); if (rcu_gp_in_progress(rsp)) return false; /* No, a grace period is already in progress. */ if (rcu_future_needs_gp(rsp)) return true; /* Yes, a no-CBs CPU needs one. */ if (!rcu_segcblist_is_enabled(&rdp->cblist)) return false; /* No, this is a no-CBs (or offline) CPU. */ if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL)) return true; /* Yes, CPU has newly registered callbacks. */ if (rcu_segcblist_future_gp_needed(&rdp->cblist, READ_ONCE(rsp->completed))) return true; /* Yes, CBs for future grace period. */ return false; /* No grace period needed. */ }

Contributors

PersonTokensPropCommitsCommitProp
Paul E. McKenney9798.98%1191.67%
Frédéric Weisbecker11.02%18.33%
Total98100.00%12100.00%

/* * rcu_eqs_enter_common - current CPU is entering an extended quiescent state * * Enter idle, doing appropriate accounting. The caller must have * disabled interrupts. */
static void rcu_eqs_enter_common(bool user) { struct rcu_state *rsp; struct rcu_data *rdp; struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); lockdep_assert_irqs_disabled(); trace_rcu_dyntick(TPS("Start"), rdtp->dynticks_nesting, 0); if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current)) { struct task_struct *idle __maybe_unused = idle_task(smp_processor_id()); trace_rcu_dyntick(TPS("Error on entry: not idle task"), rdtp->dynticks_nesting, 0); rcu_ftrace_dump(DUMP_ORIG); WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s", current->pid, current->comm, idle->pid, idle->comm); /* must be idle task! */ } for_each_rcu_flavor(rsp) { rdp = this_cpu_ptr(rsp->rda); do_nocb_deferred_wakeup(rdp); } rcu_prepare_for_idle(); __this_cpu_inc(disable_rcu_irq_enter); rdtp->dynticks_nesting = 0; /* Breaks tracing momentarily. */ rcu_dynticks_eqs_enter(); /* After this, tracing works again. */ __this_cpu_dec(disable_rcu_irq_enter); rcu_dynticks_task_enter(); /* * It is illegal to enter an extended quiescent state while * in an RCU read-side critical section. */ RCU_LOCKDEP_WARN(lock_is_held(&rcu_lock_map), "Illegal idle entry in RCU read-side critical section."); RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map), "Illegal idle entry in RCU-bh read-side critical section."); RCU_LOCKDEP_WARN(lock_is_held(&rcu_sched_lock_map), "Illegal idle entry in RCU-sched read-side critical section."); }

Contributors

PersonTokensPropCommitsCommitProp
Paul E. McKenney17687.56%1872.00%
Steven Rostedt146.97%28.00%
Frédéric Weisbecker62.99%312.00%
Li Zhong31.49%14.00%
Christoph Lameter21.00%14.00%
Total201100.00%25100.00%

/* * Enter an RCU extended quiescent state, which can be either the * idle loop or adaptive-tickless usermode execution. */
static void rcu_eqs_enter(bool user) { struct rcu_dynticks *rdtp; rdtp = this_cpu_ptr(&rcu_dynticks); WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && (rdtp->dynticks_nesting & DYNTICK_TASK_NEST_MASK) == 0); if ((rdtp->dynticks_nesting & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE) rcu_eqs_enter_common(user); else rdtp->dynticks_nesting -= DYNTICK_TASK_NEST_VALUE; }

Contributors

PersonTokensPropCommitsCommitProp
Paul E. McKenney5687.50%675.00%
Frédéric Weisbecker69.38%112.50%
Christoph Lameter23.12%112.50%
Total64100.00%8100.00%

/** * rcu_idle_enter - inform RCU that current CPU is entering idle * * Enter idle mode, in other words, -leave- the mode in which RCU * read-side critical sections can occur. (Though RCU read-side * critical sections can occur in irq handlers in idle, a possibility * handled by irq_enter() and irq_exit().) * * We crowbar the ->dynticks_nesting field to zero to allow for * the possibility of usermode upcalls having messed up our count * of interrupt nesting level during the prior busy period. * * If you add or remove a call to rcu_idle_enter(), be sure to test with * CONFIG_RCU_EQS_DEBUG=y. */
void rcu_idle_enter(void) { lockdep_assert_irqs_disabled(); rcu_eqs_enter(false); }

Contributors

PersonTokensPropCommitsCommitProp
Frédéric Weisbecker1386.67%360.00%
Paul E. McKenney16.67%120.00%
Peter Zijlstra16.67%120.00%
Total15100.00%5100.00%

#ifdef CONFIG_NO_HZ_FULL /** * rcu_user_enter - inform RCU that we are resuming userspace. * * Enter RCU idle mode right before resuming userspace. No use of RCU * is permitted between this call and rcu_user_exit(). This way the * CPU doesn't need to maintain the tick for RCU maintenance purposes * when the CPU runs in userspace. * * If you add or remove a call to rcu_user_enter(), be sure to test with * CONFIG_RCU_EQS_DEBUG=y. */
void rcu_user_enter(void) { lockdep_assert_irqs_disabled(); rcu_eqs_enter(true); }

Contributors

PersonTokensPropCommitsCommitProp
Frédéric Weisbecker1280.00%375.00%
Paul E. McKenney320.00%125.00%
Total15100.00%4100.00%

#endif /* CONFIG_NO_HZ_FULL */ /** * rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle * * Exit from an interrupt handler, which might possibly result in entering * idle mode, in other words, leaving the mode in which read-side critical * sections can occur. The caller must have disabled interrupts. * * This code assumes that the idle loop never does anything that might * result in unbalanced calls to irq_enter() and irq_exit(). If your * architecture violates this assumption, RCU will give you what you * deserve, good and hard. But very infrequently and irreproducibly. * * Use things like work queues to work around this limitation. * * You have been warned. * * If you add or remove a call to rcu_irq_exit(), be sure to test with * CONFIG_RCU_EQS_DEBUG=y. */
void rcu_irq_exit(void) { struct rcu_dynticks *rdtp; lockdep_assert_irqs_disabled(); rdtp = this_cpu_ptr(&rcu_dynticks); /* Page faults can happen in NMI handlers, so check... */ if (rdtp->dynticks_nmi_nesting) return; WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && rdtp->dynticks_nesting < 1); if (rdtp->dynticks_nesting <= 1) { rcu_eqs_enter_common(true); } else { trace_rcu_dyntick(TPS("--="), rdtp->dynticks_nesting, rdtp->dynticks_nesting - 1); rdtp->dynticks_nesting--; } }

Contributors

PersonTokensPropCommitsCommitProp
Paul E. McKenney6474.42%660.00%
Frédéric Weisbecker1719.77%220.00%
Steven Rostedt33.49%110.00%
Christoph Lameter22.33%110.00%
Total86100.00%10100.00%

/* * Wrapper for rcu_irq_exit() where interrupts are enabled. * * If you add or remove a call to rcu_irq_exit_irqson(), be sure to test * with CONFIG_RCU_EQS_DEBUG=y. */
void rcu_irq_exit_irqson(void) { unsigned long flags; local_irq_save(flags); rcu_irq_exit(); local_irq_restore(flags); }

Contributors

PersonTokensPropCommitsCommitProp
Paul E. McKenney24100.00%3100.00%
Total24100.00%3100.00%

/* * rcu_eqs_exit_common - current CPU moving away from extended quiescent state * * If the new value of the ->dynticks_nesting counter was previously zero, * we really have exited idle, and must do the appropriate accounting. * The caller must have disabled interrupts. */
static void rcu_eqs_exit_common(