cregit-Linux how code gets into the kernel

Release 4.10 kernel/sched/stats.h

Directory: kernel/sched
#ifdef CONFIG_SCHEDSTATS

/*
 * Expects runqueue lock to be held for atomicity of update
 */

static inline void rq_sched_info_arrive(struct rq *rq, unsigned long long delta) { if (rq) { rq->rq_sched_info.run_delay += delta; rq->rq_sched_info.pcount++; } }

Contributors

PersonTokensPropCommitsCommitProp
ingo molnaringo molnar38100.00%2100.00%
Total38100.00%2100.00%

/* * Expects runqueue lock to be held for atomicity of update */
static inline void rq_sched_info_depart(struct rq *rq, unsigned long long delta) { if (rq) rq->rq_cpu_time += delta; }

Contributors

PersonTokensPropCommitsCommitProp
ingo molnaringo molnar2696.30%150.00%
ken chenken chen13.70%150.00%
Total27100.00%2100.00%


static inline void rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) { if (rq) rq->rq_sched_info.run_delay += delta; }

Contributors

PersonTokensPropCommitsCommitProp
ankita gargankita garg29100.00%1100.00%
Total29100.00%1100.00%

#define schedstat_enabled() static_branch_unlikely(&sched_schedstats) #define schedstat_inc(var) do { if (schedstat_enabled()) { var++; } } while (0) #define schedstat_add(var, amt) do { if (schedstat_enabled()) { var += (amt); } } while (0) #define schedstat_set(var, val) do { if (schedstat_enabled()) { var = (val); } } while (0) #define schedstat_val(var) (var) #define schedstat_val_or_zero(var) ((schedstat_enabled()) ? (var) : 0) #else /* !CONFIG_SCHEDSTATS */
static inline void rq_sched_info_arrive(struct rq *rq, unsigned long long delta) {}

Contributors

PersonTokensPropCommitsCommitProp
ingo molnaringo molnar16100.00%1100.00%
Total16100.00%1100.00%


static inline void rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) {}

Contributors

PersonTokensPropCommitsCommitProp
ankita gargankita garg1381.25%150.00%
ingo molnaringo molnar318.75%150.00%
Total16100.00%2100.00%


static inline void rq_sched_info_depart(struct rq *rq, unsigned long long delta) {}

Contributors

PersonTokensPropCommitsCommitProp
ingo molnaringo molnar1381.25%150.00%
ankita gargankita garg318.75%150.00%
Total16100.00%2100.00%

#define schedstat_enabled() 0 #define schedstat_inc(var) do { } while (0) #define schedstat_add(var, amt) do { } while (0) #define schedstat_set(var, val) do { } while (0) #define schedstat_val(var) 0 #define schedstat_val_or_zero(var) 0 #endif /* CONFIG_SCHEDSTATS */ #ifdef CONFIG_SCHED_INFO
static inline void sched_info_reset_dequeued(struct task_struct *t) { t->sched_info.last_queued = 0; }

Contributors

PersonTokensPropCommitsCommitProp
ankita gargankita garg20100.00%1100.00%
Total20100.00%1100.00%

/* * We are interested in knowing how long it was from the *first* time a * task was queued to the time that it finally hit a cpu, we call this routine * from dequeue_task() to account for possible rq->clock skew across cpus. The * delta taken on each cpu would annul the skew. */
static inline void sched_info_dequeued(struct rq *rq, struct task_struct *t) { unsigned long long now = rq_clock(rq), delta = 0; if (unlikely(sched_info_on())) if (t->sched_info.last_queued) delta = now - t->sched_info.last_queued; sched_info_reset_dequeued(t); t->sched_info.run_delay += delta; rq_sched_info_dequeued(rq, delta); }

Contributors

PersonTokensPropCommitsCommitProp
ankita gargankita garg4862.34%125.00%
ingo molnaringo molnar1924.68%125.00%
michael s. tsirkinmichael s. tsirkin79.09%125.00%
frederic weisbeckerfrederic weisbecker33.90%125.00%
Total77100.00%4100.00%

/* * Called when a task finally hits the cpu. We can now calculate how * long it was waiting to run. We also note when it began so that we * can keep stats on how long its timeslice is. */
static void sched_info_arrive(struct rq *rq, struct task_struct *t) { unsigned long long now = rq_clock(rq), delta = 0; if (t->sched_info.last_queued) delta = now - t->sched_info.last_queued; sched_info_reset_dequeued(t); t->sched_info.run_delay += delta; t->sched_info.last_arrival = now; t->sched_info.pcount++; rq_sched_info_arrive(rq, delta); }

Contributors

PersonTokensPropCommitsCommitProp
ingo molnaringo molnar7286.75%240.00%
michael s. tsirkinmichael s. tsirkin78.43%120.00%
frederic weisbeckerfrederic weisbecker33.61%120.00%
ankita gargankita garg11.20%120.00%
Total83100.00%5100.00%

/* * This function is only called from enqueue_task(), but also only updates * the timestamp if it is already not set. It's assumed that * sched_info_dequeued() will clear that stamp when appropriate. */
static inline void sched_info_queued(struct rq *rq, struct task_struct *t) { if (unlikely(sched_info_on())) if (!t->sched_info.last_queued) t->sched_info.last_queued = rq_clock(rq); }

Contributors

PersonTokensPropCommitsCommitProp
ingo molnaringo molnar3680.00%133.33%
michael s. tsirkinmichael s. tsirkin613.33%133.33%
frederic weisbeckerfrederic weisbecker36.67%133.33%
Total45100.00%3100.00%

/* * Called when a process ceases being the active-running process involuntarily * due, typically, to expiring its time slice (this may also be called when * switching to the idle task). Now we can calculate how long we ran. * Also, if the process is still in the TASK_RUNNING state, call * sched_info_queued() to mark that it has now again started waiting on * the runqueue. */
static inline void sched_info_depart(struct rq *rq, struct task_struct *t) { unsigned long long delta = rq_clock(rq) - t->sched_info.last_arrival; rq_sched_info_depart(rq, delta); if (t->state == TASK_RUNNING) sched_info_queued(rq, t); }

Contributors

PersonTokensPropCommitsCommitProp
ingo molnaringo molnar3054.55%125.00%
bharath ravibharath ravi1323.64%125.00%
michael s. tsirkinmichael s. tsirkin916.36%125.00%
frederic weisbeckerfrederic weisbecker35.45%125.00%
Total55100.00%4100.00%

/* * Called when tasks are switched involuntarily due, typically, to expiring * their time slice. (This may also be called when switching to or from * the idle task.) We are only called when prev != next. */
static inline void __sched_info_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next) { /* * prev now departs the cpu. It's not interesting to record * stats about how efficient we were at scheduling the idle * process, however. */ if (prev != rq->idle) sched_info_depart(rq, prev); if (next != rq->idle) sched_info_arrive(rq, next); }

Contributors

PersonTokensPropCommitsCommitProp
ingo molnaringo molnar4483.02%150.00%
michael s. tsirkinmichael s. tsirkin916.98%150.00%
Total53100.00%2100.00%


static inline void sched_info_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next) { if (unlikely(sched_info_on())) __sched_info_switch(rq, prev, next); }

Contributors

PersonTokensPropCommitsCommitProp
ingo molnaringo molnar3282.05%150.00%
michael s. tsirkinmichael s. tsirkin717.95%150.00%
Total39100.00%2100.00%

#else #define sched_info_queued(rq, t) do { } while (0) #define sched_info_reset_dequeued(t) do { } while (0) #define sched_info_dequeued(rq, t) do { } while (0) #define sched_info_depart(rq, t) do { } while (0) #define sched_info_arrive(rq, next) do { } while (0) #define sched_info_switch(rq, t, next) do { } while (0) #endif /* CONFIG_SCHED_INFO */ /* * The following are functions that support scheduler-internal time accounting. * These functions are generally called at the timer tick. None of this depends * on CONFIG_SCHEDSTATS. */ /** * cputimer_running - return true if cputimer is running * * @tsk: Pointer to target task. */
static inline bool cputimer_running(struct task_struct *tsk) { struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; /* Check if cputimer isn't running. This is accessed without locking. */ if (!READ_ONCE(cputimer->running)) return false; /* * After we flush the task's sum_exec_runtime to sig->sum_sched_runtime * in __exit_signal(), we won't account to the signal struct further * cputime consumed by that task, even though the task can still be * ticking after __exit_signal(). * * In order to keep a consistent behaviour between thread group cputime * and thread group cputimer accounting, lets also ignore the cputime * elapsing after __exit_signal() in any thread group timer running. * * This makes sure that POSIX CPU clocks and timers are synchronized, so * that a POSIX CPU timer won't expire while the corresponding POSIX CPU * clock delta is behind the expiring timer value. */ if (unlikely(!tsk->sighand)) return false; return true; }

Contributors

PersonTokensPropCommitsCommitProp
kosaki motohirokosaki motohiro5192.73%150.00%
jason lowjason low47.27%150.00%
Total55100.00%2100.00%

/** * account_group_user_time - Maintain utime for a thread group. * * @tsk: Pointer to task structure. * @cputime: Time value by which to increment the utime field of the * thread_group_cputime structure. * * If thread group time is being maintained, get the structure for the * running CPU and update the utime field there. */
static inline void account_group_user_time(struct task_struct *tsk, cputime_t cputime) { struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; if (!cputimer_running(tsk)) return; atomic64_add(cputime, &cputimer->cputime_atomic.utime); }

Contributors

PersonTokensPropCommitsCommitProp
frank mayharfrank mayhar1939.58%222.22%
peter zijlstrapeter zijlstra1531.25%222.22%
jason lowjason low510.42%222.22%
oleg nesterovoleg nesterov510.42%222.22%
kosaki motohirokosaki motohiro48.33%111.11%
Total48100.00%9100.00%

/** * account_group_system_time - Maintain stime for a thread group. * * @tsk: Pointer to task structure. * @cputime: Time value by which to increment the stime field of the * thread_group_cputime structure. * * If thread group time is being maintained, get the structure for the * running CPU and update the stime field there. */
static inline void account_group_system_time(struct task_struct *tsk, cputime_t cputime) { struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; if (!cputimer_running(tsk)) return; atomic64_add(cputime, &cputimer->cputime_atomic.stime); }

Contributors

PersonTokensPropCommitsCommitProp
frank mayharfrank mayhar1837.50%222.22%
peter zijlstrapeter zijlstra1633.33%222.22%
jason lowjason low510.42%222.22%
oleg nesterovoleg nesterov510.42%222.22%
kosaki motohirokosaki motohiro48.33%111.11%
Total48100.00%9100.00%

/** * account_group_exec_runtime - Maintain exec runtime for a thread group. * * @tsk: Pointer to task structure. * @ns: Time value by which to increment the sum_exec_runtime field * of the thread_group_cputime structure. * * If thread group time is being maintained, get the structure for the * running CPU and update the sum_exec_runtime field there. */
static inline void account_group_exec_runtime(struct task_struct *tsk, unsigned long long ns) { struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; if (!cputimer_running(tsk)) return; atomic64_add(ns, &cputimer->cputime_atomic.sum_exec_runtime); }

Contributors

PersonTokensPropCommitsCommitProp
frank mayharfrank mayhar2448.00%225.00%
peter zijlstrapeter zijlstra1428.00%225.00%
jason lowjason low510.00%225.00%
kosaki motohirokosaki motohiro48.00%112.50%
oleg nesterovoleg nesterov36.00%112.50%
Total50100.00%8100.00%


Overall Contributors

PersonTokensPropCommitsCommitProp
ingo molnaringo molnar40445.60%312.50%
ankita gargankita garg12814.45%14.17%
michael s. tsirkinmichael s. tsirkin707.90%28.33%
frank mayharfrank mayhar657.34%28.33%
kosaki motohirokosaki motohiro647.22%14.17%
peter zijlstrapeter zijlstra455.08%28.33%
josh poimboeufjosh poimboeuf353.95%312.50%
jason lowjason low192.14%28.33%
bharath ravibharath ravi131.47%14.17%
oleg nesterovoleg nesterov131.47%28.33%
frederic weisbeckerfrederic weisbecker121.35%14.17%
mel gormanmel gorman111.24%14.17%
naveen n. raonaveen n. rao40.45%14.17%
rakib mullickrakib mullick20.23%14.17%
ken chenken chen10.11%14.17%
Total886100.00%24100.00%
Directory: kernel/sched
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.