cregit-Linux how code gets into the kernel

Release 4.16 kernel/profile.c

Directory: kernel
/*
 *  linux/kernel/profile.c
 *  Simple profiling. Manages a direct-mapped profile hit count buffer,
 *  with configurable resolution, support for restricting the cpus on
 *  which profiling is done, and switching between cpu time and
 *  schedule() calls via kernel command line parameters passed at boot.
 *
 *  Scheduler profiling support, Arjan van de Ven and Ingo Molnar,
 *      Red Hat, July 2004
 *  Consolidation of architecture support code for profiling,
 *      Nadia Yvette Chambers, Oracle, July 2004
 *  Amortized hit count accounting via per-cpu open-addressed hashtables
 *      to resolve timer interrupt livelocks, Nadia Yvette Chambers,
 *      Oracle, 2004
 */

#include <linux/export.h>
#include <linux/profile.h>
#include <linux/bootmem.h>
#include <linux/notifier.h>
#include <linux/mm.h>
#include <linux/cpumask.h>
#include <linux/cpu.h>
#include <linux/highmem.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/sched/stat.h>

#include <asm/sections.h>
#include <asm/irq_regs.h>
#include <asm/ptrace.h>


struct profile_hit {
	

u32 pc, hits;
};

#define PROFILE_GRPSHIFT	3

#define PROFILE_GRPSZ		(1 << PROFILE_GRPSHIFT)

#define NR_PROFILE_HIT		(PAGE_SIZE/sizeof(struct profile_hit))

#define NR_PROFILE_GRP		(NR_PROFILE_HIT/PROFILE_GRPSZ)


static atomic_t *prof_buffer;


static unsigned long prof_len, prof_shift;


int prof_on __read_mostly;
EXPORT_SYMBOL_GPL(prof_on);


static cpumask_var_t prof_cpu_mask;
#if defined(CONFIG_SMP) && defined(CONFIG_PROC_FS)
static DEFINE_PER_CPU(struct profile_hit *[2], cpu_profile_hits);
static DEFINE_PER_CPU(int, cpu_profile_flip);
static DEFINE_MUTEX(profile_flip_mutex);
#endif /* CONFIG_SMP */


int profile_setup(char *str) { static const char schedstr[] = "schedule"; static const char sleepstr[] = "sleep"; static const char kvmstr[] = "kvm"; int par; if (!strncmp(str, sleepstr, strlen(sleepstr))) { #ifdef CONFIG_SCHEDSTATS force_schedstat_enabled(); prof_on = SLEEP_PROFILING; if (str[strlen(sleepstr)] == ',') str += strlen(sleepstr) + 1; if (get_option(&str, &par)) prof_shift = par; pr_info("kernel sleep profiling enabled (shift: %ld)\n", prof_shift); #else pr_warn("kernel sleep profiling requires CONFIG_SCHEDSTATS\n"); #endif /* CONFIG_SCHEDSTATS */ } else if (!strncmp(str, schedstr, strlen(schedstr))) { prof_on = SCHED_PROFILING; if (str[strlen(schedstr)] == ',') str += strlen(schedstr) + 1; if (get_option(&str, &par)) prof_shift = par; pr_info("kernel schedule profiling enabled (shift: %ld)\n", prof_shift); } else if (!strncmp(str, kvmstr, strlen(kvmstr))) { prof_on = KVM_PROFILING; if (str[strlen(kvmstr)] == ',') str += strlen(kvmstr) + 1; if (get_option(&str, &par)) prof_shift = par; pr_info("kernel KVM profiling enabled (shift: %ld)\n", prof_shift); } else if (get_option(&str, &par)) { prof_shift = par; prof_on = CPU_PROFILING; pr_info("kernel profiling enabled (shift: %ld)\n", prof_shift); } return 1; }

Contributors

PersonTokensPropCommitsCommitProp
Ingo Molnar14652.52%433.33%
William Lee Irwin III4215.11%18.33%
John Levon3010.79%18.33%
Arjan van de Ven2910.43%18.33%
Mel Gorman155.40%216.67%
Fabian Frederick82.88%216.67%
Andrew Morton82.88%18.33%
Total278100.00%12100.00%

__setup("profile=", profile_setup);
int __ref profile_init(void) { int buffer_bytes; if (!prof_on) return 0; /* only text is profiled */ prof_len = (_etext - _stext) >> prof_shift; buffer_bytes = prof_len*sizeof(atomic_t); if (!alloc_cpumask_var(&prof_cpu_mask, GFP_KERNEL)) return -ENOMEM; cpumask_copy(prof_cpu_mask, cpu_possible_mask); prof_buffer = kzalloc(buffer_bytes, GFP_KERNEL|__GFP_NOWARN); if (prof_buffer) return 0; prof_buffer = alloc_pages_exact(buffer_bytes, GFP_KERNEL|__GFP_ZERO|__GFP_NOWARN); if (prof_buffer) return 0; prof_buffer = vzalloc(buffer_bytes); if (prof_buffer) return 0; free_cpumask_var(prof_cpu_mask); return -ENOMEM; }

Contributors

PersonTokensPropCommitsCommitProp
Dave Hansen6150.00%111.11%
John Levon2117.21%111.11%
Rusty Russell1713.93%111.11%
William Lee Irwin III97.38%111.11%
Hugh Dickins75.74%111.11%
Mel Gorman43.28%111.11%
Paul Mundt10.82%111.11%
Jesper Juhl10.82%111.11%
Andrew Morton10.82%111.11%
Total122100.00%9100.00%

/* Profile event notifications */ static BLOCKING_NOTIFIER_HEAD(task_exit_notifier); static ATOMIC_NOTIFIER_HEAD(task_free_notifier); static BLOCKING_NOTIFIER_HEAD(munmap_notifier);
void profile_task_exit(struct task_struct *task) { blocking_notifier_call_chain(&task_exit_notifier, 0, task); }

Contributors

PersonTokensPropCommitsCommitProp
John Levon1995.00%266.67%
Alan Stern15.00%133.33%
Total20100.00%3100.00%


int profile_handoff_task(struct task_struct *task) { int ret; ret = atomic_notifier_call_chain(&task_free_notifier, 0, task); return (ret == NOTIFY_OK) ? 1 : 0; }

Contributors

PersonTokensPropCommitsCommitProp
John Levon3597.22%266.67%
Alan Stern12.78%133.33%
Total36100.00%3100.00%


void profile_munmap(unsigned long addr) { blocking_notifier_call_chain(&munmap_notifier, 0, (void *)addr); }

Contributors

PersonTokensPropCommitsCommitProp
John Levon2295.65%266.67%
Alan Stern14.35%133.33%
Total23100.00%3100.00%


int task_handoff_register(struct notifier_block *n) { return atomic_notifier_chain_register(&task_free_notifier, n); }

Contributors

PersonTokensPropCommitsCommitProp
John Levon1789.47%150.00%
Alan Stern210.53%150.00%
Total19100.00%2100.00%

EXPORT_SYMBOL_GPL(task_handoff_register);
int task_handoff_unregister(struct notifier_block *n) { return atomic_notifier_chain_unregister(&task_free_notifier, n); }

Contributors

PersonTokensPropCommitsCommitProp
John Levon1789.47%150.00%
Alan Stern210.53%150.00%
Total19100.00%2100.00%

EXPORT_SYMBOL_GPL(task_handoff_unregister);
int profile_event_register(enum profile_type type, struct notifier_block *n) { int err = -EINVAL; switch (type) { case PROFILE_TASK_EXIT: err = blocking_notifier_chain_register( &task_exit_notifier, n); break; case PROFILE_MUNMAP: err = blocking_notifier_chain_register( &munmap_notifier, n); break; } return err; }

Contributors

PersonTokensPropCommitsCommitProp
John Levon5596.49%266.67%
Alan Stern23.51%133.33%
Total57100.00%3100.00%

EXPORT_SYMBOL_GPL(profile_event_register);
int profile_event_unregister(enum profile_type type, struct notifier_block *n) { int err = -EINVAL; switch (type) { case PROFILE_TASK_EXIT: err = blocking_notifier_chain_unregister( &task_exit_notifier, n); break; case PROFILE_MUNMAP: err = blocking_notifier_chain_unregister( &munmap_notifier, n); break; } return err; }

Contributors

PersonTokensPropCommitsCommitProp
John Levon5596.49%266.67%
Alan Stern23.51%133.33%
Total57100.00%3100.00%

EXPORT_SYMBOL_GPL(profile_event_unregister); #if defined(CONFIG_SMP) && defined(CONFIG_PROC_FS) /* * Each cpu has a pair of open-addressed hashtables for pending * profile hits. read_profile() IPI's all cpus to request them * to flip buffers and flushes their contents to prof_buffer itself. * Flip requests are serialized by the profile_flip_mutex. The sole * use of having a second hashtable is for avoiding cacheline * contention that would otherwise happen during flushes of pending * profile hits required for the accuracy of reported profile hits * and so resurrect the interrupt livelock issue. * * The open-addressed hashtables are indexed by profile buffer slot * and hold the number of pending hits to that profile buffer slot on * a cpu in an entry. When the hashtable overflows, all pending hits * are accounted to their corresponding profile buffer slots with * atomic_add() and the hashtable emptied. As numerous pending hits * may be accounted to a profile buffer slot in a hashtable entry, * this amortizes a number of atomic profile buffer increments likely * to be far larger than the number of entries in the hashtable, * particularly given that the number of distinct profile buffer * positions to which hits are accounted during short intervals (e.g. * several seconds) is usually very small. Exclusion from buffer * flipping is provided by interrupt disablement (note that for * SCHED_PROFILING or SLEEP_PROFILING profile_hit() may be called from * process context). * The hash function is meant to be lightweight as opposed to strong, * and was vaguely inspired by ppc64 firmware-supported inverted * pagetable hash functions, but uses a full hashtable full of finite * collision chains, not just pairs of them. * * -- nyc */
static void __profile_flip_buffers(void *unused) { int cpu = smp_processor_id(); per_cpu(cpu_profile_flip, cpu) = !per_cpu(cpu_profile_flip, cpu); }

Contributors

PersonTokensPropCommitsCommitProp
William Lee Irwin III31100.00%2100.00%
Total31100.00%2100.00%


static void profile_flip_buffers(void) { int i, j, cpu; mutex_lock(&profile_flip_mutex); j = per_cpu(cpu_profile_flip, get_cpu()); put_cpu(); on_each_cpu(__profile_flip_buffers, NULL, 1); for_each_online_cpu(cpu) { struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[j]; for (i = 0; i < NR_PROFILE_HIT; ++i) { if (!hits[i].hits) { if (hits[i].pc) hits[i].pc = 0; continue; } atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]); hits[i].hits = hits[i].pc = 0; } } mutex_unlock(&profile_flip_mutex); }

Contributors

PersonTokensPropCommitsCommitProp
William Lee Irwin III15198.69%266.67%
Arjan van de Ven21.31%133.33%
Total153100.00%3100.00%


static void profile_discard_flip_buffers(void) { int i, cpu; mutex_lock(&profile_flip_mutex); i = per_cpu(cpu_profile_flip, get_cpu()); put_cpu(); on_each_cpu(__profile_flip_buffers, NULL, 1); for_each_online_cpu(cpu) { struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[i]; memset(hits, 0, NR_PROFILE_HIT*sizeof(struct profile_hit)); } mutex_unlock(&profile_flip_mutex); }

Contributors

PersonTokensPropCommitsCommitProp
William Lee Irwin III8197.59%150.00%
Arjan van de Ven22.41%150.00%
Total83100.00%2100.00%


static void do_profile_hits(int type, void *__pc, unsigned int nr_hits) { unsigned long primary, secondary, flags, pc = (unsigned long)__pc; int i, j, cpu; struct profile_hit *hits; pc = min((pc - (unsigned long)_stext) >> prof_shift, prof_len - 1); i = primary = (pc & (NR_PROFILE_GRP - 1)) << PROFILE_GRPSHIFT; secondary = (~(pc << 1) & (NR_PROFILE_GRP - 1)) << PROFILE_GRPSHIFT; cpu = get_cpu(); hits = per_cpu(cpu_profile_hits, cpu)[per_cpu(cpu_profile_flip, cpu)]; if (!hits) { put_cpu(); return; } /* * We buffer the global profiler buffer into a per-CPU * queue and thus reduce the number of global (and possibly * NUMA-alien) accesses. The write-queue is self-coalescing: */ local_irq_save(flags); do { for (j = 0; j < PROFILE_GRPSZ; ++j) { if (hits[i + j].pc == pc) { hits[i + j].hits += nr_hits; goto out; } else if (!hits[i + j].hits) { hits[i + j].pc = pc; hits[i + j].hits = nr_hits; goto out; } } i = (i + secondary) & (NR_PROFILE_HIT - 1); } while (i != primary); /* * Add the current hit(s) and flush the write-queue out * to the global buffer: */ atomic_add(nr_hits, &prof_buffer[pc]); for (i = 0; i < NR_PROFILE_HIT; ++i) { atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]); hits[i].pc = hits[i].hits = 0; } out: local_irq_restore(flags); put_cpu(); }

Contributors

PersonTokensPropCommitsCommitProp
William Lee Irwin III30895.65%133.33%
Ingo Molnar123.73%133.33%
Rakib Mullick20.62%133.33%
Total322100.00%3100.00%


static int profile_dead_cpu(unsigned int cpu) { struct page *page; int i; if (prof_cpu_mask != NULL) cpumask_clear_cpu(cpu, prof_cpu_mask); for (i = 0; i < 2; i++) { if (per_cpu(cpu_profile_hits, cpu)[i]) { page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[i]); per_cpu(cpu_profile_hits, cpu)[i] = NULL; __free_page(page); } } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
William Lee Irwin III5052.63%133.33%
Sebastian Andrzej Siewior4446.32%133.33%
Akinobu Mita11.05%133.33%
Total95100.00%3100.00%


static int profile_prepare_cpu(unsigned int cpu) { int i, node = cpu_to_mem(cpu); struct page *page; per_cpu(cpu_profile_flip, cpu) = 0; for (i = 0; i < 2; i++) { if (per_cpu(cpu_profile_hits, cpu)[i]) continue; page = __alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0); if (!page) { profile_dead_cpu(cpu); return -ENOMEM; } per_cpu(cpu_profile_hits, cpu)[i] = page_address(page); } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Sebastian Andrzej Siewior6055.05%125.00%
William Lee Irwin III4642.20%125.00%
Christoph Lameter21.83%125.00%
Vlastimil Babka10.92%125.00%
Total109100.00%4100.00%


static int profile_online_cpu(unsigned int cpu) { if (prof_cpu_mask != NULL) cpumask_set_cpu(cpu, prof_cpu_mask); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
William Lee Irwin III1038.46%133.33%
Sebastian Andrzej Siewior1038.46%133.33%
Rusty Russell623.08%133.33%
Total26100.00%3100.00%

#else /* !CONFIG_SMP */ #define profile_flip_buffers() do { } while (0) #define profile_discard_flip_buffers() do { } while (0)
static void do_profile_hits(int type, void *__pc, unsigned int nr_hits) { unsigned long pc; pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift; atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]); }

Contributors

PersonTokensPropCommitsCommitProp
William Lee Irwin III4884.21%133.33%
Ingo Molnar712.28%133.33%
Rakib Mullick23.51%133.33%
Total57100.00%3100.00%

#endif /* !CONFIG_SMP */
void profile_hits(int type, void *__pc, unsigned int nr_hits) { if (prof_on != type || !prof_buffer) return; do_profile_hits(type, __pc, nr_hits); }

Contributors

PersonTokensPropCommitsCommitProp
Rakib Mullick35100.00%1100.00%
Total35100.00%1100.00%

EXPORT_SYMBOL_GPL(profile_hits);
void profile_tick(int type) { struct pt_regs *regs = get_irq_regs(); if (!user_mode(regs) && prof_cpu_mask != NULL && cpumask_test_cpu(smp_processor_id(), prof_cpu_mask)) profile_hit(type, (void *)profile_pc(regs)); }

Contributors

PersonTokensPropCommitsCommitProp
William Lee Irwin III3978.00%133.33%
David Howells612.00%133.33%
Rusty Russell510.00%133.33%
Total50100.00%3100.00%

#ifdef CONFIG_PROC_FS #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/uaccess.h>
static int prof_cpu_mask_proc_show(struct seq_file *m, void *v) { seq_printf(m, "%*pb\n", cpumask_pr_args(prof_cpu_mask)); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
William Lee Irwin III1756.67%133.33%
Alexey Dobriyan826.67%133.33%
Tejun Heo516.67%133.33%
Total30100.00%3100.00%


static int prof_cpu_mask_proc_open(struct inode *inode, struct file *file) { return single_open(file, prof_cpu_mask_proc_show, NULL); }

Contributors

PersonTokensPropCommitsCommitProp
Alexey Dobriyan2492.31%150.00%
William Lee Irwin III27.69%150.00%
Total26100.00%2100.00%


static ssize_t prof_cpu_mask_proc_write(struct file *file, const char __user *buffer, size_t count, loff_t *pos) { cpumask_var_t new_value; int err; if (!alloc_cpumask_var(&new_value, GFP_KERNEL)) return -ENOMEM; err = cpumask_parse_user(buffer, count, new_value); if (!err) { cpumask_copy(prof_cpu_mask, new_value); err = count; } free_cpumask_var(new_value); return err; }

Contributors

PersonTokensPropCommitsCommitProp
William Lee Irwin III3947.56%125.00%
Rusty Russell3137.80%125.00%
Alexey Dobriyan1113.41%125.00%
Reinette Chatre11.22%125.00%
Total82100.00%4100.00%

static const struct file_operations prof_cpu_mask_proc_fops = { .open = prof_cpu_mask_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .write = prof_cpu_mask_proc_write, };
void create_prof_cpu_mask(void) { /* create /proc/irq/prof_cpu_mask */ proc_create("irq/prof_cpu_mask", 0600, NULL, &prof_cpu_mask_proc_fops); }

Contributors

PersonTokensPropCommitsCommitProp
William Lee Irwin III1260.00%125.00%
Alexey Dobriyan420.00%125.00%
Al Viro315.00%125.00%
Paolo Ciarrocchi15.00%125.00%
Total20100.00%4100.00%

/* * This function accesses profiling information. The returned data is * binary: the sampling step and the actual contents of the profile * buffer. Use of the program readprofile is recommended in order to * get meaningful info out of these data. */
static ssize_t read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos) { unsigned long p = *ppos; ssize_t read; char *pnt; unsigned int sample_step = 1 << prof_shift; profile_flip_buffers(); if (p >= (prof_len+1)*sizeof(unsigned int)) return 0; if (count > (prof_len+1)*sizeof(unsigned int) - p) count = (prof_len+1)*sizeof(unsigned int) - p; read = 0; while (p < sizeof(unsigned int) && count > 0) { if (put_user(*((char *)(&sample_step)+p), buf)) return -EFAULT; buf++; p++; count--; read++; } pnt = (char *)prof_buffer + p - sizeof(atomic_t); if (copy_to_user(buf, (void *)pnt, count)) return -EFAULT; read += count; *ppos += read; return read; }

Contributors

PersonTokensPropCommitsCommitProp
William Lee Irwin III19796.57%375.00%
Heiko Carstens73.43%125.00%
Total204100.00%4100.00%

/* * Writing to /proc/profile resets the counters * * Writing a 'profiling multiplier' value into it also re-sets the profiling * interrupt frequency, on architectures that support this. */
static ssize_t write_profile(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { #ifdef CONFIG_SMP extern int setup_profiling_timer(unsigned int multiplier); if (count == sizeof(int)) { unsigned int multiplier; if (copy_from_user(&multiplier, buf, sizeof(int))) return -EFAULT; if (setup_profiling_timer(multiplier)) return -EINVAL; } #endif profile_discard_flip_buffers(); memset(prof_buffer, 0, prof_len * sizeof(atomic_t)); return count; }

Contributors

PersonTokensPropCommitsCommitProp
Sebastian Andrzej Siewior7572.82%150.00%
William Lee Irwin III2827.18%150.00%
Total103100.00%2100.00%

static const struct file_operations proc_profile_operations = { .read = read_profile, .write = write_profile, .llseek = default_llseek, };
int __ref create_proc_profile(void) { struct proc_dir_entry *entry; #ifdef CONFIG_SMP enum cpuhp_state online_state; #endif int err = 0; if (!prof_on) return 0; #ifdef CONFIG_SMP err = cpuhp_setup_state(CPUHP_PROFILE_PREPARE, "PROFILE_PREPARE", profile_prepare_cpu, profile_dead_cpu); if (err) return err; err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "AP_PROFILE_ONLINE", profile_online_cpu, NULL); if (err < 0) goto err_state_prep; online_state = err; err = 0; #endif entry = proc_create("profile", S_IWUSR | S_IRUGO, NULL, &proc_profile_operations); if (!entry) goto err_state_onl; proc_set_size(entry, (1 + prof_len) * sizeof(atomic_t)); return err; err_state_onl: #ifdef CONFIG_SMP cpuhp_remove_state(online_state); err_state_prep: cpuhp_remove_state(CPUHP_PROFILE_PREPARE); #endif return err; }

Contributors

PersonTokensPropCommitsCommitProp
Sebastian Andrzej Siewior7347.10%112.50%
William Lee Irwin III5233.55%225.00%
Srivatsa S. Bhat1610.32%112.50%
Paolo Ciarrocchi53.23%112.50%
Denis V. Lunev42.58%112.50%
David Howells42.58%112.50%
Al Viro10.65%112.50%
Total155100.00%8100.00%

subsys_initcall(create_proc_profile); #endif /* CONFIG_PROC_FS */

Overall Contributors

PersonTokensPropCommitsCommitProp
William Lee Irwin III127850.98%712.73%
John Levon30011.97%35.45%
Sebastian Andrzej Siewior28511.37%11.82%
Ingo Molnar1736.90%59.09%
Alexey Dobriyan863.43%23.64%
Dave Hansen672.67%11.82%
Rusty Russell632.51%23.64%
Rakib Mullick391.56%11.82%
Arjan van de Ven371.48%23.64%
Paolo Ciarrocchi261.04%11.82%
Arnd Bergmann200.80%11.82%
Alan Stern190.76%11.82%
Mel Gorman190.76%35.45%
Andrew Morton170.68%23.64%
Srivatsa S. Bhat160.64%11.82%
David Howells110.44%23.64%
Fabian Frederick80.32%23.64%
Hugh Dickins70.28%11.82%
Heiko Carstens70.28%11.82%
Tejun Heo50.20%11.82%
Denis V. Lunev40.16%11.82%
Al Viro40.16%23.64%
Thomas Gleixner30.12%11.82%
Christoph Lameter30.12%23.64%
Nadia Yvette Chambers20.08%11.82%
Paul Gortmaker20.08%23.64%
Paul Mundt10.04%11.82%
Linus Torvalds10.04%11.82%
Jesper Juhl10.04%11.82%
Vlastimil Babka10.04%11.82%
Reinette Chatre10.04%11.82%
Akinobu Mita10.04%11.82%
Total2507100.00%55100.00%
Directory: kernel
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.