cregit-Linux how code gets into the kernel

Release 4.17 init/main.c

Directory: init
 *  linux/init/main.c
 *  Copyright (C) 1991, 1992  Linus Torvalds
 *  GK 2/5/95  -  Changed to support mounting root fs via NFS
 *  Added initrd & change_root: Werner Almesberger & Hans Lermen, Feb '96
 *  Moan early if gcc is old, avoiding bogus kernels - Paul Gortmaker, May '96
 *  Simplified starting of init:  Michael A. Griffith <>

#define DEBUG		
/* Enable initcall_debug */

#include <linux/types.h>
#include <linux/extable.h>
#include <linux/module.h>
#include <linux/proc_fs.h>
#include <linux/binfmts.h>
#include <linux/kernel.h>
#include <linux/syscalls.h>
#include <linux/stackprotector.h>
#include <linux/string.h>
#include <linux/ctype.h>
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/initrd.h>
#include <linux/bootmem.h>
#include <linux/acpi.h>
#include <linux/console.h>
#include <linux/nmi.h>
#include <linux/percpu.h>
#include <linux/kmod.h>
#include <linux/vmalloc.h>
#include <linux/kernel_stat.h>
#include <linux/start_kernel.h>
#include <linux/security.h>
#include <linux/smp.h>
#include <linux/profile.h>
#include <linux/rcupdate.h>
#include <linux/moduleparam.h>
#include <linux/kallsyms.h>
#include <linux/writeback.h>
#include <linux/cpu.h>
#include <linux/cpuset.h>
#include <linux/cgroup.h>
#include <linux/efi.h>
#include <linux/tick.h>
#include <linux/sched/isolation.h>
#include <linux/interrupt.h>
#include <linux/taskstats_kern.h>
#include <linux/delayacct.h>
#include <linux/unistd.h>
#include <linux/utsname.h>
#include <linux/rmap.h>
#include <linux/mempolicy.h>
#include <linux/key.h>
#include <linux/buffer_head.h>
#include <linux/page_ext.h>
#include <linux/debug_locks.h>
#include <linux/debugobjects.h>
#include <linux/lockdep.h>
#include <linux/kmemleak.h>
#include <linux/pid_namespace.h>
#include <linux/device.h>
#include <linux/kthread.h>
#include <linux/sched.h>
#include <linux/sched/init.h>
#include <linux/signal.h>
#include <linux/idr.h>
#include <linux/kgdb.h>
#include <linux/ftrace.h>
#include <linux/async.h>
#include <linux/sfi.h>
#include <linux/shmem_fs.h>
#include <linux/slab.h>
#include <linux/perf_event.h>
#include <linux/ptrace.h>
#include <linux/pti.h>
#include <linux/blkdev.h>
#include <linux/elevator.h>
#include <linux/sched_clock.h>
#include <linux/sched/task.h>
#include <linux/sched/task_stack.h>
#include <linux/context_tracking.h>
#include <linux/random.h>
#include <linux/list.h>
#include <linux/integrity.h>
#include <linux/proc_ns.h>
#include <linux/io.h>
#include <linux/cache.h>
#include <linux/rodata_test.h>
#include <linux/jump_label.h>
#include <linux/mem_encrypt.h>

#include <asm/io.h>
#include <asm/bugs.h>
#include <asm/setup.h>
#include <asm/sections.h>
#include <asm/cacheflush.h>

#include <trace/events/initcall.h>

static int kernel_init(void *);

extern void init_IRQ(void);
extern void fork_init(void);
extern void radix_tree_init(void);

 * Debug helper: via this flag we know that we are in 'early bootup code'
 * where only the boot processor is running with IRQ disabled.  This means
 * two things - IRQ must not be enabled before the flag is cleared and some
 * operations which are not allowed with IRQ disabled are allowed while the
 * flag is set.

bool early_boot_irqs_disabled __read_mostly;

enum system_states system_state __read_mostly;

 * Boot command-line arguments



extern void time_init(void);
/* Default late time init is NULL. archs can override this later. */

void (*__initdata late_time_init)(void);

/* Untouched command line saved by arch-specific code. */

char __initdata boot_command_line[COMMAND_LINE_SIZE];
/* Untouched saved command line (eg. for /proc) */

char *saved_command_line;
/* Command line for parameter parsing */

static char *static_command_line;
/* Command line for per-initcall parameter parsing */

static char *initcall_command_line;

static char *execute_command;

static char *ramdisk_execute_command;

 * Used to generate warnings if static_key manipulation functions are used
 * before jump_label_init is called.

bool static_key_initialized __read_mostly;

 * If set, this is an indication to the drivers that reset the underlying
 * device before going ahead with the initialization otherwise driver might
 * rely on the BIOS and skip the reset operation.
 * This is useful if kernel is booting in an unreliable environment.
 * For ex. kdump situation where previous kernel has crashed, BIOS has been
 * skipped and devices will be in unknown state.

unsigned int reset_devices;

static int __init set_reset_devices(char *str) { reset_devices = 1; return 1; }


Vivek Goyal18100.00%1100.00%

__setup("reset_devices", set_reset_devices); static const char *argv_init[MAX_INIT_ARGS+2] = { "init", NULL, }; const char *envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, }; static const char *panic_later, *panic_param; extern const struct obs_kernel_param __setup_start[], __setup_end[];
static bool __init obsolete_checksetup(char *line) { const struct obs_kernel_param *p; bool had_early_param = false; p = __setup_start; do { int n = strlen(p->str); if (parameqn(line, p->str, n)) { if (p->early) { /* Already done in parse_early_param? * (Needs exact match on param part). * Keep iterating, as we can have early * params and __setups of same names 8( */ if (line[n] == '\0' || line[n] == '=') had_early_param = true; } else if (!p->setup_func) { pr_warn("Parameter %s is obsolete, ignored\n", p->str); return true; } else if (p->setup_func(line + n)) return true; } p++; } while (p < __setup_end); return had_early_param; }


Linus Torvalds (pre-git)6951.88%327.27%
Andrew Morton2720.30%218.18%
Dmitry Torokhov1914.29%19.09%
Rusty Russell118.27%327.27%
Yaowei Bai64.51%19.09%
Michal Schmidt10.75%19.09%

/* * This should be approx 2 Bo*oMips to start (note initial shift), and will * still work even if initially too large, it will just take slightly longer */ unsigned long loops_per_jiffy = (1<<12); EXPORT_SYMBOL(loops_per_jiffy);
static int __init debug_kernel(char *str) { console_loglevel = CONSOLE_LOGLEVEL_DEBUG; return 0; }


Linus Torvalds (pre-git)1688.89%133.33%
Borislav Petkov15.56%133.33%
Yinghai Lu15.56%133.33%

static int __init quiet_kernel(char *str) { console_loglevel = CONSOLE_LOGLEVEL_QUIET; return 0; }


Linus Torvalds (pre-git)1688.89%133.33%
Yinghai Lu15.56%133.33%
Borislav Petkov15.56%133.33%

early_param("debug", debug_kernel); early_param("quiet", quiet_kernel);
static int __init loglevel(char *str) { int newlevel; /* * Only update loglevel value when a correct setting was passed, * to prevent blind crashes (when loglevel being set to 0) that * are quite hard to debug */ if (get_option(&str, &newlevel)) { console_loglevel = newlevel; return 0; } return -EINVAL; }


Alexander Sverdlin1743.59%133.33%
Alex Riesen12.56%133.33%

early_param("loglevel", loglevel); /* Change NUL term back to "=", to make "param" the whole string. */
static int __init repair_env_string(char *param, char *val, const char *unused, void *arg) { if (val) { /* param=val or param="val"? */ if (val == param+strlen(param)+1) val[-1] = '='; else if (val == param+strlen(param)+2) { val[-2] = '='; memmove(val-1, val, strlen(val)+1); val--; } else BUG(); } return 0; }


Rusty Russell6260.78%220.00%
Linus Torvalds (pre-git)1514.71%440.00%
Len Brown1110.78%110.00%
Chris Metcalf54.90%110.00%
Jim Cromie54.90%110.00%
Luis R. Rodriguez43.92%110.00%

/* Anything after -- gets handed straight to init. */
static int __init set_init_arg(char *param, char *val, const char *unused, void *arg) { unsigned int i; if (panic_later) return 0; repair_env_string(param, val, unused, NULL); for (i = 0; argv_init[i]; i++) { if (i == MAX_INIT_ARGS) { panic_later = "init"; panic_param = param; return 0; } } argv_init[i] = param; return 0; }


Rusty Russell8593.41%150.00%
Luis R. Rodriguez66.59%150.00%

/* * Unknown boot options get handed to init, unless they look like * unused parameters (modprobe will find them in /proc/cmdline). */
static int __init unknown_bootoption(char *param, char *val, const char *unused, void *arg) { repair_env_string(param, val, unused, NULL); /* Handle obsolete-style parameters */ if (obsolete_checksetup(param)) return 0; /* Unused module parameter. */ if (strchr(param, '.') && (!val || strchr(param, '.') < val)) return 0; if (panic_later) return 0; if (val) { /* Environment option */ unsigned int i; for (i = 0; envp_init[i]; i++) { if (i == MAX_INIT_ENVS) { panic_later = "env"; panic_param = param; } if (!strncmp(param, envp_init[i], val - param)) break; } envp_init[i] = param; } else { /* Command line option */ unsigned int i; for (i = 0; argv_init[i]; i++) { if (i == MAX_INIT_ARGS) { panic_later = "init"; panic_param = param; } } argv_init[i] = param; } return 0; }


Rusty Russell8040.40%212.50%
Linus Torvalds (pre-git)4321.72%850.00%
Chris Metcalf2110.61%16.25%
Andrew Morton2110.61%16.25%
William Lee Irwin III189.09%16.25%
Greg Kroah-Hartman73.54%16.25%
Luis R. Rodriguez63.03%16.25%
Tetsuo Handa21.01%16.25%

static int __init init_setup(char *str) { unsigned int i; execute_command = str; /* * In case LILO is going to boot us with default command line, * it prepends "auto" before the whole cmdline which makes * the shell think it should execute a script with such name. * So we ignore all arguments entered _before_ init=... [MJ] */ for (i = 1; i < MAX_INIT_ARGS; i++) argv_init[i] = NULL; return 1; }


Rusty Russell4297.67%150.00%
Adam Kropelin12.33%150.00%

__setup("init=", init_setup);
static int __init rdinit_setup(char *str) { unsigned int i; ramdisk_execute_command = str; /* See "auto" comment in init_setup */ for (i = 1; i < MAX_INIT_ARGS; i++) argv_init[i] = NULL; return 1; }


Olof Johansson43100.00%1100.00%

__setup("rdinit=", rdinit_setup); #ifndef CONFIG_SMP static const unsigned int setup_max_cpus = NR_CPUS;
static inline void setup_nr_cpu_ids(void) { }


Mike Travis8100.00%1100.00%

static inline void smp_prepare_cpus(unsigned int maxcpus) { }


Rusty Russell10100.00%1100.00%

#endif /* * We need to store the untouched command line for future reference. * We also need to store the touched command line since the parameter * parsing is performed in place, and we should allow a component to * store reference of name/value for future reference. */
static void __init setup_command_line(char *command_line) { saved_command_line = memblock_virt_alloc(strlen(boot_command_line) + 1, 0); initcall_command_line = memblock_virt_alloc(strlen(boot_command_line) + 1, 0); static_command_line = memblock_virt_alloc(strlen(command_line) + 1, 0); strcpy(saved_command_line, boot_command_line); strcpy(static_command_line, command_line); }


Alon Bar-Lev4770.15%133.33%
Krzysztof Mazur1116.42%133.33%
Santosh Shilimkar913.43%133.33%

/* * We need to finalize in a non-__init function or else race conditions * between the root thread and the init thread may cause start_kernel to * be reaped by free_initmem before the root thread has proceeded to * cpu_idle. * * gcc-3.4 accidentally inlines this function, so use noinline. */ static __initdata DECLARE_COMPLETION(kthreadd_done);
static noinline void __ref rest_init(void) { struct task_struct *tsk; int pid; rcu_scheduler_starting(); /* * We need to spawn init first so that it obtains pid 1, however * the init task will end up wanting to create kthreads, which, if * we schedule it before we create kthreadd, will OOPS. */ pid = kernel_thread(kernel_init, NULL, CLONE_FS); /* * Pin init on the boot CPU. Task migration is not properly working * until sched_init_smp() has been run. It will set the allowed * CPUs for init to the non isolated CPUs. */ rcu_read_lock(); tsk = find_task_by_pid_ns(pid, &init_pid_ns); set_cpus_allowed_ptr(tsk, cpumask_of(smp_processor_id())); rcu_read_unlock(); numa_default_policy(); pid = kernel_thread(kthreadd, NULL, CLONE_FS | CLONE_FILES); rcu_read_lock(); kthreadd_task = find_task_by_pid_ns(pid, &init_pid_ns); rcu_read_unlock(); /* * Enable might_sleep() and smp_processor_id() checks. * They cannot be enabled earlier because with CONFIG_PREEMPT=y * kernel_thread() would trigger might_sleep() splats. With * CONFIG_PREEMPT_VOLUNTARY=y the init task might have scheduled * already, but it's stuck on the kthreadd_done completion. */ system_state = SYSTEM_SCHEDULING; complete(&kthreadd_done); /* * The boot idle thread must execute schedule() * at least once to get things moving: */ schedule_preempt_disabled(); /* Call into cpu_idle with preempt disabled */ cpu_startup_entry(CPUHP_ONLINE); }


Thomas Gleixner4438.60%419.05%
Eric W. Biedermann2219.30%14.76%
Linus Torvalds1513.16%14.76%
Paul E. McKenney97.89%29.52%
Peter Zijlstra76.14%29.52%
Pavel Emelyanov43.51%14.76%
Ingo Molnar32.63%29.52%
Andi Kleen32.63%14.76%
Andrew Morton21.75%29.52%
Rakib Mullick10.88%14.76%
Florian La Roche10.88%14.76%
Fabian Frederick10.88%14.76%
Nicholas Piggin10.88%14.76%
Sam Ravnborg10.88%14.76%

/* Check for early params. */
static int __init do_early_param(char *param, char *val, const char *unused, void *arg) { const struct obs_kernel_param *p; for (p = __setup_start; p < __setup_end; p++) { if ((p->early && parameq(param, p->str)) || (strcmp(param, "console") == 0 && strcmp(p->str, "earlycon") == 0) ) { if (p->setup_func(val) != 0) pr_warn("Malformed early option '%s'\n", param); } } /* We accept everything at this stage. */ return 0; }


Andrew Morton7367.59%228.57%
Yinghai Lu2422.22%114.29%
Jim Cromie54.63%114.29%
Luis R. Rodriguez43.70%114.29%
Rusty Russell10.93%114.29%
Michal Schmidt10.93%114.29%

void __init parse_early_options(char *cmdline) { parse_args("early options", cmdline, NULL, 0, 0, 0, NULL, do_early_param); }


Magnus Damm2379.31%133.33%
Pawel Moll413.79%133.33%
Luis R. Rodriguez26.90%133.33%

/* Arch code calls this early on, or if not, just before other parsing. */
void __init parse_early_param(void) { static int done __initdata; static char tmp_cmdline[COMMAND_LINE_SIZE] __initdata; if (done) return; /* All fall through to do_early_param. */ strlcpy(tmp_cmdline, boot_command_line, COMMAND_LINE_SIZE); parse_early_options(tmp_cmdline); done = 1; }


Andrew Morton4191.11%125.00%
Fabian Frederick24.44%125.00%
Magnus Damm12.22%125.00%
Alon Bar-Lev12.22%125.00%

void __init __weak arch_post_acpi_subsys_init(void) { }


Thomas Gleixner8100.00%1100.00%

void __init __weak smp_setup_processor_id(void) { }


Andrew Morton562.50%150.00%
Benjamin Herrenschmidt337.50%150.00%

void __init __weak thread_stack_cache_init(void) { }


Benjamin Herrenschmidt787.50%150.00%
Linus Torvalds112.50%150.00%

void __init __weak mem_encrypt_init(void) { }


Tom Lendacky8100.00%1100.00%

bool initcall_debug; core_param(initcall_debug, initcall_debug, bool, 0644); #ifdef TRACEPOINTS_ENABLED static void __init initcall_debug_enable(void); #else
static inline void initcall_debug_enable(void) { }


Steven Rostedt8100.00%1100.00%

#endif /* * Set up kernel memory allocators */
static void __init mm_init(void) { /* * page_ext requires contiguous pages, * bigger than MAX_ORDER unless SPARSEMEM. */ page_ext_init_flatmem(); mem_init(); kmem_cache_init(); pgtable_init(); vmalloc_init(); ioremap_huge_init(); /* Should be run before the first non-init thread is created */ init_espfix_bsp(); /* Should be run after espfix64 is set up. */ pti_init(); }


Pekka J Enberg1850.00%114.29%
Thomas Gleixner822.22%228.57%
JoonSoo Kim411.11%114.29%
Toshi Kani38.33%114.29%
Benjamin Herrenschmidt25.56%114.29%
Kirill A. Shutemov12.78%114.29%

asmlinkage __visible void __init start_kernel(void) { char *command_line; char *after_dashes; set_task_stack_end_magic(&init_task); smp_setup_processor_id(); debug_objects_early_init(); cgroup_init_early(); local_irq_disable(); early_boot_irqs_disabled = true; /* * Interrupts are still disabled. Do necessary setups, then * enable them. */ boot_cpu_init(); page_address_init(); pr_notice("%s", linux_banner); setup_arch(&command_line); /* * Set up the the initial canary and entropy after arch * and after adding latent and command line entropy. */ add_latent_entropy(); add_device_randomness(command_line, strlen(command_line)); boot_init_stack_canary(); mm_init_cpumask(&init_mm); setup_command_line(command_line); setup_nr_cpu_ids(); setup_per_cpu_areas(); boot_cpu_state_init(); smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */ build_all_zonelists(NULL); page_alloc_init(); pr_notice("Kernel command line: %s\n", boot_command_line); parse_early_param(); after_dashes = parse_args("Booting kernel", static_command_line, __start___param, __stop___param - __start___param, -1, -1, NULL, &unknown_bootoption); if (!IS_ERR_OR_NULL(after_dashes)) parse_args("Setting init args", after_dashes, NULL, 0, -1, -1, NULL, set_init_arg); jump_label_init(); /* * These use large bootmem allocations and must precede * kmem_cache_init() */ setup_log_buf(0); vfs_caches_init_early(); sort_main_extable(); trap_init(); mm_init(); ftrace_init(); /* trace_printk can be enabled here */ early_trace_init(); /* * Set up the scheduler prior starting any interrupts (such as the * timer interrupt). Full topology setup happens at smp_init() * time - but meanwhile we still have a functioning scheduler. */ sched_init(); /* * Disable preemption - early bootup scheduling is extremely * fragile until we cpu_idle() for the first time. */ preempt_disable(); if (WARN(!irqs_disabled(), "Interrupts were enabled *very* early, fixing it\n")) local_irq_disable(); radix_tree_init(); /* * Set up housekeeping before setting up workqueues to allow the unbound * workqueue to take non-housekeeping into account. */ housekeeping_init(); /* * Allow workqueue creation and work item queueing/cancelling * early. Work item execution depends on kthreads and starts after * workqueue_init(). */ workqueue_init_early(); rcu_init(); /* Trace events are available after this */ trace_init(); if (initcall_debug) initcall_debug_enable(); context_tracking_init(); /* init some links before init_ISA_irqs() */ early_irq_init(); init_IRQ(); tick_init(); rcu_init_nohz(); init_timers(); hrtimers_init(); softirq_init(); timekeeping_init(); time_init(); sched_clock_postinit(); printk_safe_init(); perf_event_init(); profile_init(); call_function_init(); WARN(!irqs_disabled(), "Interrupts were enabled early\n"); early_boot_irqs_disabled = false; local_irq_enable(); kmem_cache_init_late(); /* * HACK ALERT! This is early. We're enabling the console before * we've done PCI setups etc, and console_init() must be aware of * this. But we do want output early, in case something goes wrong. */ console_init(); if (panic_later) panic("Too many boot %s vars at `%s'", panic_later, panic_param); lockdep_info(); /* * Need to run this when irqs are enabled, because it wants * to self-test [hard/soft]-irqs on/off lock inversion bugs * too: */ locking_selftest(); /* * This needs to be called before any devices perform DMA * operations that might use the SWIOTLB bounce buffers. It will * mark the bounce buffers as decrypted so that their usage will * not cause "plain-text" data to be decrypted when accessed. */ mem_encrypt_init(); #ifdef CONFIG_BLK_DEV_INITRD if (initrd_start && !initrd_below_start_ok && page_to_pfn(virt_to_page((void *)initrd_start)) < min_low_pfn) { pr_crit("initrd overwritten (0x%08lx < 0x%08lx) - disabling it.\n", page_to_pfn(virt_to_page((void *)initrd_start)), min_low_pfn); initrd_start = 0; } #endif page_ext_init(); kmemleak_init(); debug_objects_mem_init(); setup_per_cpu_pageset(); numa_policy_init(); acpi_early_init(); if (late_time_init) late_time_init(); calibrate_delay(); pid_idr_init(); anon_vma_init(); #ifdef CONFIG_X86 if (efi_enabled(EFI_RUNTIME_SERVICES)) efi_enter_virtual_mode(); #endif thread_stack_cache_init(); cred_init(); fork_init(); proc_caches_init(); uts_ns_init(); buffer_init(); key_init(); security_init(); dbg_late_init(); vfs_caches_init(); pagecache_init(); signals_init(); seq_file_init(); proc_root_init(); nsfs_init(); cpuset_init(); cgroup_init(); taskstats_init_early(); delayacct_init(); check_bugs(); acpi_subsystem_init(); arch_post_acpi_subsys_init(); sfi_init_late(); if (efi_enabled(EFI_RUNTIME_SERVICES)) { efi_free_boot_services(); } /* Do the rest non-__init'ed, we're now alive */ rest_init(); }


Linus Torvalds (pre-git)8616.32%2920.57%
Rusty Russell458.54%42.84%
Andrew Morton366.83%117.80%
Linus Torvalds285.31%85.67%
Steven Rostedt275.12%53.55%
Pekka J Enberg244.55%42.84%
Geert Uytterhoeven203.80%21.42%
Thomas Gleixner173.23%53.55%
Ingo Molnar152.85%42.84%
Heiko Carstens152.85%10.71%
Tejun Heo122.28%32.13%
Daniel Micay112.09%10.71%
John Stultz91.71%21.42%
Josh Triplett91.71%21.42%
Ard van Breemen91.71%10.71%
Mike Travis71.33%21.42%
Alon Bar-Lev71.33%10.71%
Aaron Tomlin61.14%10.71%
Shailabh Nagar61.14%21.42%
Laura Abbott61.14%10.71%
David Howells61.14%21.42%
Matt Fleming61.14%10.71%
Alexey Dobriyan61.14%21.42%
Paul Menage50.95%10.71%
Yinghai Lu40.76%10.71%
Tom Lendacky40.76%10.71%
Andi Kleen40.76%21.42%
Daniel R Thompson40.76%10.71%
Stas Sergeev40.76%10.71%
Tal Shorer40.76%10.71%
Luis R. Rodriguez40.76%10.71%
Takao Indoh30.57%10.71%
Dou Liyang30.57%10.71%
Rafael J. Wysocki30.57%10.71%
Christoph Lameter30.57%10.71%
Jeremy Fitzhardinge30.57%10.71%
Frédéric Weisbecker30.57%10.71%
Stéphane Eranian30.57%10.71%
Feng Tang30.57%10.71%
Dipankar Sarma30.57%10.71%
Lei Ming30.57%10.71%
Stephen Boyd30.57%10.71%
Haicheng Li30.57%10.71%
Jason Wessel30.57%21.42%
Yang Shi30.57%10.71%
Al Viro30.57%10.71%
Catalin Marinas20.38%10.71%
Chris Wright20.38%21.42%
Petr Mladek20.38%10.71%
Li Zefan20.38%10.71%
Nicholas Piggin20.38%10.71%
Greg Kroah-Hartman20.38%10.71%
Fabian Frederick20.38%10.71%
John Levon20.38%10.71%
Waiman Long20.38%10.71%
Alex Riesen20.38%10.71%
Tetsuo Handa20.38%10.71%
Paul Jackson20.38%10.71%
Benjamin Herrenschmidt20.38%10.71%
Peter Zijlstra20.38%10.71%
Pawel Moll20.38%10.71%
Mel Gorman10.19%10.71%
Viresh Kumar10.19%10.71%
Sergey Senozhatsky10.19%10.71%
Matthew Wilcox10.19%10.71%
Heinrich Schuchardt10.19%10.71%
Gargi Sharma10.19%10.71%

/* Call all constructor functions linked into the kernel. */
static void __init do_ctors(void) { #ifdef CONFIG_CONSTRUCTORS ctor_fn_t *fn = (ctor_fn_t *) __ctors_start; for (; fn < (ctor_fn_t *) __ctors_end; fn++) (*fn)(); #endif }


Peter Oberparleiter4090.91%150.00%
H Hartley Sweeten49.09%150.00%

#ifdef CONFIG_KALLSYMS struct blacklist_entry { struct list_head next; char *buf; }; static __initdata_or_module LIST_HEAD(blacklisted_initcalls);
static int __init initcall_blacklist(char *str) { char *str_entry; struct blacklist_entry *entry; /* str argument is a comma-separated list of functions */ do { str_entry = strsep(&str, ","); if (str_entry) { pr_debug("blacklisting initcall %s\n", str_entry); entry = alloc_bootmem(sizeof(*entry)); entry->buf = alloc_bootmem(strlen(str_entry) + 1); strcpy(entry->buf, str_entry); list_add(&entry->next, &blacklisted_initcalls); } } while (str_entry); return 0; }


Prarit Bhargava100100.00%1100.00%

static bool __init_or_module initcall_blacklisted(initcall_t fn) { struct blacklist_entry *entry; char fn_name[KSYM_SYMBOL_LEN]; unsigned long addr; if (list_empty(&blacklisted_initcalls)) return false; addr = (unsigned long) dereference_function_descriptor(fn); sprint_symbol_no_offset(fn_name, addr); /* * fn will be "function_name [module_name]" where [module_name] is not * displayed for built-in init functions. Strip off the [module_name]. */ strreplace(fn_name, ' ', '\0'); list_for_each_entry(entry, &blacklisted_initcalls, next) { if (!strcmp(fn_name, entry->buf)) { pr_debug("initcall %s blacklisted\n", fn_name); return true; } } return false; }


Prarit Bhargava6766.34%240.00%
Rasmus Villemoes3029.70%240.00%
Geliang Tang43.96%120.00%

static int __init initcall_blacklist(char *str) { pr_warn("initcall_blacklist requires CONFIG_KALLSYMS\n"); return 0; }


Prarit Bhargava19100.00%1100.00%

static bool __init_or_module initcall_blacklisted(initcall_t fn) { return false; }


Prarit Bhargava13100.00%1100.00%

#endif __setup("initcall_blacklist=", initcall_blacklist);
static __init_or_module void trace_initcall_start_cb(void *data, initcall_t fn) { ktime_t *calltime = (ktime_t *)data; printk(KERN_DEBUG "calling %pF @ %i\n", fn, task_pid_nr(current)); *calltime = ktime_get(); }


Steven Rostedt1534.88%18.33%
Andrew Morton613.95%216.67%
Américo Wang511.63%18.33%
Ingo Molnar511.63%18.33%
Linus Torvalds (pre-git)36.98%18.33%
Linus Torvalds36.98%18.33%
Arjan van de Ven24.65%18.33%
Kevin Winchester24.65%216.67%
Frédéric Weisbecker24.65%216.67%

static __init_or_module void trace_initcall_finish_cb(void *data, initcall_t fn, int ret) { ktime_t *