cregit-Linux how code gets into the kernel

Release 4.15 kernel/sched/stats.h

Directory: kernel/sched
/* SPDX-License-Identifier: GPL-2.0 */

#ifdef CONFIG_SCHEDSTATS

/*
 * Expects runqueue lock to be held for atomicity of update
 */

static inline void rq_sched_info_arrive(struct rq *rq, unsigned long long delta) { if (rq) { rq->rq_sched_info.run_delay += delta; rq->rq_sched_info.pcount++; } }

Contributors

PersonTokensPropCommitsCommitProp
Ingo Molnar38100.00%2100.00%
Total38100.00%2100.00%

/* * Expects runqueue lock to be held for atomicity of update */
static inline void rq_sched_info_depart(struct rq *rq, unsigned long long delta) { if (rq) rq->rq_cpu_time += delta; }

Contributors

PersonTokensPropCommitsCommitProp
Ingo Molnar2696.30%150.00%
Ken Chen13.70%150.00%
Total27100.00%2100.00%


static inline void rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) { if (rq) rq->rq_sched_info.run_delay += delta; }

Contributors

PersonTokensPropCommitsCommitProp
Ankita Garg29100.00%1100.00%
Total29100.00%1100.00%

#define schedstat_enabled() static_branch_unlikely(&sched_schedstats) #define schedstat_inc(var) do { if (schedstat_enabled()) { var++; } } while (0) #define schedstat_add(var, amt) do { if (schedstat_enabled()) { var += (amt); } } while (0) #define schedstat_set(var, val) do { if (schedstat_enabled()) { var = (val); } } while (0) #define schedstat_val(var) (var) #define schedstat_val_or_zero(var) ((schedstat_enabled()) ? (var) : 0) #else /* !CONFIG_SCHEDSTATS */
static inline void rq_sched_info_arrive(struct rq *rq, unsigned long long delta) {}

Contributors

PersonTokensPropCommitsCommitProp
Ingo Molnar16100.00%1100.00%
Total16100.00%1100.00%


static inline void rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) {}

Contributors

PersonTokensPropCommitsCommitProp
Ankita Garg1381.25%150.00%
Ingo Molnar318.75%150.00%
Total16100.00%2100.00%


static inline void rq_sched_info_depart(struct rq *rq, unsigned long long delta) {}

Contributors

PersonTokensPropCommitsCommitProp
Ingo Molnar1381.25%150.00%
Ankita Garg318.75%150.00%
Total16100.00%2100.00%

#define schedstat_enabled() 0 #define schedstat_inc(var) do { } while (0) #define schedstat_add(var, amt) do { } while (0) #define schedstat_set(var, val) do { } while (0) #define schedstat_val(var) 0 #define schedstat_val_or_zero(var) 0 #endif /* CONFIG_SCHEDSTATS */ #ifdef CONFIG_SCHED_INFO
static inline void sched_info_reset_dequeued(struct task_struct *t) { t->sched_info.last_queued = 0; }

Contributors

PersonTokensPropCommitsCommitProp
Ankita Garg20100.00%1100.00%
Total20100.00%1100.00%

/* * We are interested in knowing how long it was from the *first* time a * task was queued to the time that it finally hit a cpu, we call this routine * from dequeue_task() to account for possible rq->clock skew across cpus. The * delta taken on each cpu would annul the skew. */
static inline void sched_info_dequeued(struct rq *rq, struct task_struct *t) { unsigned long long now = rq_clock(rq), delta = 0; if (unlikely(sched_info_on())) if (t->sched_info.last_queued) delta = now - t->sched_info.last_queued; sched_info_reset_dequeued(t); t->sched_info.run_delay += delta; rq_sched_info_dequeued(rq, delta); }

Contributors

PersonTokensPropCommitsCommitProp
Ankita Garg4862.34%125.00%
Ingo Molnar1924.68%125.00%
Michael S. Tsirkin79.09%125.00%
Frédéric Weisbecker33.90%125.00%
Total77100.00%4100.00%

/* * Called when a task finally hits the cpu. We can now calculate how * long it was waiting to run. We also note when it began so that we * can keep stats on how long its timeslice is. */
static void sched_info_arrive(struct rq *rq, struct task_struct *t) { unsigned long long now = rq_clock(rq), delta = 0; if (t->sched_info.last_queued) delta = now - t->sched_info.last_queued; sched_info_reset_dequeued(t); t->sched_info.run_delay += delta; t->sched_info.last_arrival = now; t->sched_info.pcount++; rq_sched_info_arrive(rq, delta); }

Contributors

PersonTokensPropCommitsCommitProp
Ingo Molnar7286.75%240.00%
Michael S. Tsirkin78.43%120.00%
Frédéric Weisbecker33.61%120.00%
Ankita Garg11.20%120.00%
Total83100.00%5100.00%

/* * This function is only called from enqueue_task(), but also only updates * the timestamp if it is already not set. It's assumed that * sched_info_dequeued() will clear that stamp when appropriate. */
static inline void sched_info_queued(struct rq *rq, struct task_struct *t) { if (unlikely(sched_info_on())) if (!t->sched_info.last_queued) t->sched_info.last_queued = rq_clock(rq); }

Contributors

PersonTokensPropCommitsCommitProp
Ingo Molnar3680.00%133.33%
Michael S. Tsirkin613.33%133.33%
Frédéric Weisbecker36.67%133.33%
Total45100.00%3100.00%

/* * Called when a process ceases being the active-running process involuntarily * due, typically, to expiring its time slice (this may also be called when * switching to the idle task). Now we can calculate how long we ran. * Also, if the process is still in the TASK_RUNNING state, call * sched_info_queued() to mark that it has now again started waiting on * the runqueue. */
static inline void sched_info_depart(struct rq *rq, struct task_struct *t) { unsigned long long delta = rq_clock(rq) - t->sched_info.last_arrival; rq_sched_info_depart(rq, delta); if (t->state == TASK_RUNNING) sched_info_queued(rq, t); }

Contributors

PersonTokensPropCommitsCommitProp
Ingo Molnar3054.55%125.00%
Bharath Ravi1323.64%125.00%
Michael S. Tsirkin916.36%125.00%
Frédéric Weisbecker35.45%125.00%
Total55100.00%4100.00%

/* * Called when tasks are switched involuntarily due, typically, to expiring * their time slice. (This may also be called when switching to or from * the idle task.) We are only called when prev != next. */
static inline void __sched_info_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next) { /* * prev now departs the cpu. It's not interesting to record * stats about how efficient we were at scheduling the idle * process, however. */ if (prev != rq->idle) sched_info_depart(rq, prev); if (next != rq->idle) sched_info_arrive(rq, next); }

Contributors

PersonTokensPropCommitsCommitProp
Ingo Molnar4483.02%150.00%
Michael S. Tsirkin916.98%150.00%
Total53100.00%2100.00%


static inline void sched_info_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next) { if (unlikely(sched_info_on())) __sched_info_switch(rq, prev, next); }

Contributors

PersonTokensPropCommitsCommitProp
Ingo Molnar3282.05%150.00%
Michael S. Tsirkin717.95%150.00%
Total39100.00%2100.00%

#else #define sched_info_queued(rq, t) do { } while (0) #define sched_info_reset_dequeued(t) do { } while (0) #define sched_info_dequeued(rq, t) do { } while (0) #define sched_info_depart(rq, t) do { } while (0) #define sched_info_arrive(rq, next) do { } while (0) #define sched_info_switch(rq, t, next) do { } while (0) #endif /* CONFIG_SCHED_INFO */

Overall Contributors

PersonTokensPropCommitsCommitProp
Ingo Molnar40459.32%318.75%
Ankita Garg12818.80%16.25%
Michael S. Tsirkin7010.28%212.50%
Josh Poimboeuf355.14%318.75%
Bharath Ravi131.91%16.25%
Frédéric Weisbecker121.76%16.25%
Mel Gorman111.62%16.25%
Naveen N. Rao40.59%16.25%
Rakib Mullick20.29%16.25%
Greg Kroah-Hartman10.15%16.25%
Ken Chen10.15%16.25%
Total681100.00%16100.00%
Directory: kernel/sched
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.