cregit-Linux how code gets into the kernel

Release 4.8 kernel/fork.c

Directory: kernel
/*
 *  linux/kernel/fork.c
 *
 *  Copyright (C) 1991, 1992  Linus Torvalds
 */

/*
 *  'fork.c' contains the help-routines for the 'fork' system call
 * (see also entry.S and others).
 * Fork is rather simple, once you get the hang of it, but the memory
 * management can be a bitch. See 'mm/memory.c': 'copy_page_range()'
 */

#include <linux/slab.h>
#include <linux/init.h>
#include <linux/unistd.h>
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/completion.h>
#include <linux/personality.h>
#include <linux/mempolicy.h>
#include <linux/sem.h>
#include <linux/file.h>
#include <linux/fdtable.h>
#include <linux/iocontext.h>
#include <linux/key.h>
#include <linux/binfmts.h>
#include <linux/mman.h>
#include <linux/mmu_notifier.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/vmacache.h>
#include <linux/nsproxy.h>
#include <linux/capability.h>
#include <linux/cpu.h>
#include <linux/cgroup.h>
#include <linux/security.h>
#include <linux/hugetlb.h>
#include <linux/seccomp.h>
#include <linux/swap.h>
#include <linux/syscalls.h>
#include <linux/jiffies.h>
#include <linux/futex.h>
#include <linux/compat.h>
#include <linux/kthread.h>
#include <linux/task_io_accounting_ops.h>
#include <linux/rcupdate.h>
#include <linux/ptrace.h>
#include <linux/mount.h>
#include <linux/audit.h>
#include <linux/memcontrol.h>
#include <linux/ftrace.h>
#include <linux/proc_fs.h>
#include <linux/profile.h>
#include <linux/rmap.h>
#include <linux/ksm.h>
#include <linux/acct.h>
#include <linux/tsacct_kern.h>
#include <linux/cn_proc.h>
#include <linux/freezer.h>
#include <linux/delayacct.h>
#include <linux/taskstats_kern.h>
#include <linux/random.h>
#include <linux/tty.h>
#include <linux/blkdev.h>
#include <linux/fs_struct.h>
#include <linux/magic.h>
#include <linux/perf_event.h>
#include <linux/posix-timers.h>
#include <linux/user-return-notifier.h>
#include <linux/oom.h>
#include <linux/khugepaged.h>
#include <linux/signalfd.h>
#include <linux/uprobes.h>
#include <linux/aio.h>
#include <linux/compiler.h>
#include <linux/sysctl.h>
#include <linux/kcov.h>

#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/uaccess.h>
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>

#include <trace/events/sched.h>


#define CREATE_TRACE_POINTS
#include <trace/events/task.h>

/*
 * Minimum number of threads to boot the kernel
 */

#define MIN_THREADS 20

/*
 * Maximum number of threads
 */

#define MAX_THREADS FUTEX_TID_MASK

/*
 * Protected counters by write_lock_irq(&tasklist_lock)
 */

unsigned long total_forks;	
/* Handle normal Linux uptimes. */

int nr_threads;			
/* The idle threads do not count.. */


int max_threads;		
/* tunable limit on nr_threads */

DEFINE_PER_CPU(unsigned long, process_counts) = 0;

__cacheline_aligned DEFINE_RWLOCK(tasklist_lock);  /* outer */

#ifdef CONFIG_PROVE_RCU

int lockdep_tasklist_lock_is_held(void) { return lockdep_is_held(&tasklist_lock); }

Contributors

PersonTokensPropCommitsCommitProp
paul e. mckenneypaul e. mckenney14100.00%1100.00%
Total14100.00%1100.00%

EXPORT_SYMBOL_GPL(lockdep_tasklist_lock_is_held); #endif /* #ifdef CONFIG_PROVE_RCU */
int nr_processes(void) { int cpu; int total = 0; for_each_possible_cpu(cpu) total += per_cpu(process_counts, cpu); return total; }

Contributors

PersonTokensPropCommitsCommitProp
andrew mortonandrew morton3096.77%266.67%
ian campbellian campbell13.23%133.33%
Total31100.00%3100.00%


void __weak arch_release_task_struct(struct task_struct *tsk) { }

Contributors

PersonTokensPropCommitsCommitProp
akinobu mitaakinobu mita10100.00%1100.00%
Total10100.00%1100.00%

#ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR static struct kmem_cache *task_struct_cachep;
static inline struct task_struct *alloc_task_struct_node(int node) { return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node); }

Contributors

PersonTokensPropCommitsCommitProp
thomas gleixnerthomas gleixner1881.82%150.00%
eric dumazeteric dumazet418.18%150.00%
Total22100.00%2100.00%


static inline void free_task_struct(struct task_struct *tsk) { kmem_cache_free(task_struct_cachep, tsk); }

Contributors

PersonTokensPropCommitsCommitProp
thomas gleixnerthomas gleixner1578.95%133.33%
david mosbergerdavid mosberger315.79%133.33%
christoph lameterchristoph lameter15.26%133.33%
Total19100.00%3100.00%

#endif
void __weak arch_release_thread_stack(unsigned long *stack) { }

Contributors

PersonTokensPropCommitsCommitProp
thomas gleixnerthomas gleixner660.00%150.00%
linus torvaldslinus torvalds440.00%150.00%
Total10100.00%2100.00%

#ifndef CONFIG_ARCH_THREAD_STACK_ALLOCATOR /* * Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a * kmemcache based allocator. */ # if THREAD_SIZE >= PAGE_SIZE
static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node) { struct page *page = alloc_pages_node(node, THREADINFO_GFP, THREAD_SIZE_ORDER); return page ? page_address(page) : NULL; }

Contributors

PersonTokensPropCommitsCommitProp
eric dumazeteric dumazet2050.00%120.00%
fujita tomonorifujita tomonori1537.50%120.00%
linus torvaldslinus torvalds37.50%120.00%
vladimir davydovvladimir davydov25.00%240.00%
Total40100.00%5100.00%


static inline void free_thread_stack(unsigned long *stack) { __free_pages(virt_to_page(stack), THREAD_SIZE_ORDER); }

Contributors

PersonTokensPropCommitsCommitProp
fujita tomonorifujita tomonori1254.55%125.00%
linus torvaldslinus torvalds522.73%125.00%
vladimir davydovvladimir davydov313.64%125.00%
andy lutomirskiandy lutomirski29.09%125.00%
Total22100.00%4100.00%

# else static struct kmem_cache *thread_stack_cache;
static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node) { return kmem_cache_alloc_node(thread_stack_cache, THREADINFO_GFP, node); }

Contributors

PersonTokensPropCommitsCommitProp
thomas gleixnerthomas gleixner2284.62%133.33%
michael ellermanmichael ellerman27.69%133.33%
linus torvaldslinus torvalds27.69%133.33%
Total26100.00%3100.00%


static void free_thread_stack(unsigned long *stack) { kmem_cache_free(thread_stack_cache, stack); }

Contributors

PersonTokensPropCommitsCommitProp
thomas gleixnerthomas gleixner1266.67%133.33%
linus torvaldslinus torvalds527.78%133.33%
michael ellermanmichael ellerman15.56%133.33%
Total18100.00%3100.00%


void thread_stack_cache_init(void) { thread_stack_cache = kmem_cache_create("thread_stack", THREAD_SIZE, THREAD_SIZE, 0, NULL); BUG_ON(thread_stack_cache == NULL); }

Contributors

PersonTokensPropCommitsCommitProp
thomas gleixnerthomas gleixner2586.21%150.00%
linus torvaldslinus torvalds413.79%150.00%
Total29100.00%2100.00%

# endif #endif /* SLAB cache for signal_struct structures (tsk->signal) */ static struct kmem_cache *signal_cachep; /* SLAB cache for sighand_struct structures (tsk->sighand) */ struct kmem_cache *sighand_cachep; /* SLAB cache for files_struct structures (tsk->files) */ struct kmem_cache *files_cachep; /* SLAB cache for fs_struct structures (tsk->fs) */ struct kmem_cache *fs_cachep; /* SLAB cache for vm_area_struct structures */ struct kmem_cache *vm_area_cachep; /* SLAB cache for mm_struct structures (tsk->mm) */ static struct kmem_cache *mm_cachep;
static void account_kernel_stack(unsigned long *stack, int account) { /* All stack pages are in the same zone and belong to the same memcg. */ struct page *first_page = virt_to_page(stack); mod_zone_page_state(page_zone(first_page), NR_KERNEL_STACK_KB, THREAD_SIZE / 1024 * account); memcg_kmem_update_page_stat( first_page, MEMCG_KERNEL_STACK_KB, account * (THREAD_SIZE / 1024)); }

Contributors

PersonTokensPropCommitsCommitProp
andy lutomirskiandy lutomirski2748.21%250.00%
kosaki motohirokosaki motohiro2544.64%125.00%
linus torvaldslinus torvalds47.14%125.00%
Total56100.00%4100.00%


void free_task(struct task_struct *tsk) { account_kernel_stack(tsk->stack, -1); arch_release_thread_stack(tsk->stack); free_thread_stack(tsk->stack); rt_mutex_debug_task_free(tsk); ftrace_graph_exit_task(tsk); put_seccomp_filter(tsk); arch_release_task_struct(tsk); free_task_struct(tsk); }

Contributors

PersonTokensPropCommitsCommitProp
ingo molnaringo molnar2338.98%220.00%
akinobu mitaakinobu mita1118.64%110.00%
kosaki motohirokosaki motohiro1016.95%110.00%
will drewrywill drewry58.47%110.00%
frederic weisbeckerfrederic weisbecker58.47%220.00%
linus torvaldslinus torvalds23.39%110.00%
david mosbergerdavid mosberger23.39%110.00%
roman zippelroman zippel11.69%110.00%
Total59100.00%10100.00%

EXPORT_SYMBOL(free_task);
static inline void free_signal_struct(struct signal_struct *sig) { taskstats_tgid_free(sig); sched_autogroup_exit(sig); kmem_cache_free(signal_cachep, sig); }

Contributors

PersonTokensPropCommitsCommitProp
oleg nesterovoleg nesterov2482.76%266.67%
mike galbraithmike galbraith517.24%133.33%
Total29100.00%3100.00%


static inline void put_signal_struct(struct signal_struct *sig) { if (atomic_dec_and_test(&sig->sigcnt)) free_signal_struct(sig); }

Contributors

PersonTokensPropCommitsCommitProp
oleg nesterovoleg nesterov27100.00%1100.00%
Total27100.00%1100.00%


void __put_task_struct(struct task_struct *tsk) { WARN_ON(!tsk->exit_state); WARN_ON(atomic_read(&tsk->usage)); WARN_ON(tsk == current); cgroup_free(tsk); task_numa_free(tsk); security_task_free(tsk); exit_creds(tsk); delayacct_tsk_free(tsk); put_signal_struct(tsk->signal); if (!profile_handoff_task(tsk)) free_task(tsk); }

Contributors

PersonTokensPropCommitsCommitProp
linus torvaldslinus torvalds3543.21%17.69%
john levonjohn levon89.88%17.69%
oleg nesterovoleg nesterov78.64%17.69%
kees cookkees cook56.17%17.69%
shailabh nagarshailabh nagar56.17%17.69%
mike galbraithmike galbraith56.17%17.69%
andrew mortonandrew morton56.17%215.38%
tejun heotejun heo56.17%17.69%
christoph hellwigchristoph hellwig33.70%17.69%
ingo molnaringo molnar11.23%17.69%
david howellsdavid howells11.23%17.69%
david mosbergerdavid mosberger11.23%17.69%
Total81100.00%13100.00%

EXPORT_SYMBOL_GPL(__put_task_struct);
void __init __weak arch_task_cache_init(void) { }

Contributors

PersonTokensPropCommitsCommitProp
thomas gleixnerthomas gleixner787.50%150.00%
suresh siddhasuresh siddha112.50%150.00%
Total8100.00%2100.00%

/* * set_max_threads */
static void set_max_threads(unsigned int max_threads_suggested) { u64 threads; /* * The number of threads shall be limited such that the thread * structures may only consume a small part of the available memory. */ if (fls64(totalram_pages) + fls64(PAGE_SIZE) > 64) threads = MAX_THREADS; else threads = div64_u64((u64) totalram_pages * (u64) PAGE_SIZE, (u64) THREAD_SIZE * 8UL); if (threads > max_threads_suggested) threads = max_threads_suggested; max_threads = clamp_t(u64, threads, MIN_THREADS, MAX_THREADS); }

Contributors

PersonTokensPropCommitsCommitProp
heinrich schuchardtheinrich schuchardt78100.00%3100.00%
Total78100.00%3100.00%

#ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT /* Initialized by the architecture: */ int arch_task_struct_size __read_mostly; #endif
void __init fork_init(void) { #ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR #ifndef ARCH_MIN_TASKALIGN #define ARCH_MIN_TASKALIGN L1_CACHE_BYTES #endif /* create a slab on which task_structs can be allocated */ task_struct_cachep = kmem_cache_create("task_struct", arch_task_struct_size, ARCH_MIN_TASKALIGN, SLAB_PANIC|SLAB_NOTRACK|SLAB_ACCOUNT, NULL); #endif /* do the arch specific task caches init */ arch_task_cache_init(); set_max_threads(MAX_THREADS); init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2; init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2; init_task.signal->rlim[RLIMIT_SIGPENDING] = init_task.signal->rlim[RLIMIT_NPROC]; }

Contributors

PersonTokensPropCommitsCommitProp
pre-gitpre-git3333.33%525.00%
roland mcgrathroland mcgrath2222.22%210.00%
david howellsdavid howells1313.13%15.00%
andrew mortonandrew morton1111.11%315.00%
heinrich schuchardtheinrich schuchardt55.05%210.00%
david mosbergerdavid mosberger44.04%15.00%
suresh siddhasuresh siddha44.04%15.00%
vegard nossumvegard nossum22.02%15.00%
vladimir davydovvladimir davydov22.02%15.00%
alan coxalan cox11.01%15.00%
ingo molnaringo molnar11.01%15.00%
thomas gleixnerthomas gleixner11.01%15.00%
Total99100.00%20100.00%


int __weak arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) { *dst = *src; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
suresh siddhasuresh siddha1768.00%150.00%
gideon israel dsouzagideon israel dsouza832.00%150.00%
Total25100.00%2100.00%


void set_task_stack_end_magic(struct task_struct *tsk) { unsigned long *stackend; stackend = end_of_stack(tsk); *stackend = STACK_END_MAGIC; /* for overflow detection */ }

Contributors

PersonTokensPropCommitsCommitProp
aaron tomlinaaron tomlin28100.00%1100.00%
Total28100.00%1100.00%


static struct task_struct *dup_task_struct(struct task_struct *orig, int node) { struct task_struct *tsk; unsigned long *stack; int err; if (node == NUMA_NO_NODE) node = tsk_fork_get_node(orig); tsk = alloc_task_struct_node(node); if (!tsk) return NULL; stack = alloc_thread_stack_node(tsk, node); if (!stack) goto free_tsk; err = arch_dup_task_struct(tsk, orig); if (err) goto free_stack; tsk->stack = stack; #ifdef CONFIG_SECCOMP /* * We must handle setting up seccomp filters once we're under * the sighand lock in case orig has changed between now and * then. Until then, filter must be NULL to avoid messing up * the usage counts on the error path calling free_task. */ tsk->seccomp.filter = NULL; #endif setup_thread_stack(tsk, orig); clear_user_return_notifier(tsk); clear_tsk_need_resched(tsk); set_task_stack_end_magic(tsk); #ifdef CONFIG_CC_STACKPROTECTOR tsk->stack_canary = get_random_int(); #endif /* * One for us, one for whoever does the "release_task()" (usually * parent) */ atomic_set(&tsk->usage, 2); #ifdef CONFIG_BLK_DEV_IO_TRACE tsk->btrace_seq = 0; #endif tsk->splice_pipe = NULL; tsk->task_frag.page = NULL; tsk->wake_q.next = NULL; account_kernel_stack(stack, 1); kcov_task_init(tsk); return tsk; free_stack: free_thread_stack(stack); free_tsk: free_task_struct(tsk); return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
david howellsdavid howells5625.45%13.33%
eric dumazeteric dumazet198.64%413.33%
suresh siddhasuresh siddha188.18%13.33%
kees cookkees cook146.36%13.33%
linus torvaldslinus torvalds135.91%26.67%
andi kleenandi kleen135.91%13.33%
jens axboejens axboe125.45%26.67%
arjan van de venarjan van de ven125.45%13.33%
sebastian andrzej siewiorsebastian andrzej siewior83.64%13.33%
kosaki motohirokosaki motohiro62.73%13.33%
andrew mortonandrew morton62.73%13.33%
al viroal viro52.27%13.33%
akinobu mitaakinobu mita52.27%13.33%
mike galbraithmike galbraith52.27%13.33%
avi kivityavi kivity52.27%13.33%
alexey dobriyanalexey dobriyan52.27%13.33%
dmitriy vyukovdmitriy vyukov52.27%13.33%
eric sandeeneric sandeen41.82%13.33%
ingo molnaringo molnar20.91%13.33%
david mosbergerdavid mosberger20.91%13.33%
roman zippelroman zippel10.45%13.33%
peter zijlstrapeter zijlstra10.45%13.33%
aaron tomlinaaron tomlin10.45%13.33%
daniel rebelo de oliveiradaniel rebelo de oliveira10.45%13.33%
stephen rothwellstephen rothwell10.45%13.33%
Total220100.00%30100.00%

#ifdef CONFIG_MMU
static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) { struct vm_area_struct *mpnt, *tmp, *prev, **pprev; struct rb_node **rb_link, *rb_parent; int retval; unsigned long charge; uprobe_start_dup_mmap(); if (down_write_killable(&oldmm->mmap_sem)) { retval = -EINTR; goto fail_uprobe_end; } flush_cache_dup_mm(oldmm); uprobe_dup_mmap(oldmm, mm); /* * Not linked in yet - no deadlock potential: */ down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING); /* No ordering required: file already has been exposed. */ RCU_INIT_POINTER(mm->exe_file, get_mm_exe_file(oldmm)); mm->total_vm = oldmm->total_vm; mm->data_vm = oldmm->data_vm; mm->exec_vm = oldmm->exec_vm; mm->stack_vm = oldmm->stack_vm; rb_link = &mm->mm_rb.rb_node; rb_parent = NULL; pprev = &mm->mmap; retval = ksm_fork(mm, oldmm); if (retval) goto out; retval = khugepaged_fork(mm, oldmm); if (retval) goto out; prev = NULL; for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) { struct file *file; if (mpnt->vm_flags & VM_DONTCOPY) { vm_stat_account(mm, mpnt->vm_flags, -vma_pages(mpnt)); continue; } charge = 0; if (mpnt->vm_flags & VM_ACCOUNT) { unsigned long len = vma_pages(mpnt); if (security_vm_enough_memory_mm(oldmm, len)) /* sic */ goto fail_nomem; charge = len; } tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); if (!tmp) goto fail_nomem; *tmp = *mpnt; INIT_LIST_HEAD(&tmp->anon_vma_chain); retval = vma_dup_policy(mpnt, tmp); if (retval) goto fail_nomem_policy; tmp->vm_mm = mm; if (anon_vma_fork(tmp, mpnt)) goto fail_nomem_anon_vma_fork; tmp->vm_flags &= ~(VM_LOCKED|VM_LOCKONFAULT|VM_UFFD_MISSING|VM_UFFD_WP); tmp->vm_next = tmp->vm_prev = NULL; tmp->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; file = tmp->vm_file; if (file) { struct inode *inode = file_inode(file); struct address_space *mapping = file->f_mapping; get_file(file); if (tmp->vm_flags & VM_DENYWRITE) atomic_dec(&inode->i_writecount); i_mmap_lock_write(mapping); if (tmp->vm_flags & VM_SHARED) atomic_inc(&mapping->i_mmap_writable); flush_dcache_mmap_lock(mapping); /* insert tmp into the share list, just after mpnt */ vma_interval_tree_insert_after(tmp, mpnt, &mapping->i_mmap); flush_dcache_mmap_unlock(mapping); i_mmap_unlock_write(mapping); } /* * Clear hugetlb-related page reserves for children. This only * affects MAP_PRIVATE mappings. Faults generated by the child * are not guaranteed to succeed, even if read-only */ if (is_vm_hugetlb_page(tmp)) reset_vma_resv_huge_pages(tmp); /* * Link in the new vma and copy the page table entries. */ *pprev = tmp; pprev = &tmp->vm_next; tmp->vm_prev = prev; prev = tmp; __vma_link_rb(mm, tmp, rb_link, rb_parent); rb_link = &tmp->vm_rb.rb_right; rb_parent = &tmp->vm_rb; mm->map_count++; retval = copy_page_range(mm, oldmm, mpnt); if (tmp->vm_ops && tmp->vm_ops->open) tmp->vm_ops->open(tmp); if (retval) goto out; } /* a new mm has just been created */ arch_dup_mmap(oldmm, mm); retval = 0; out: up_write(&mm->mmap_sem); flush_tlb_mm(oldmm); up_write(&oldmm->mmap_sem); fail_uprobe_end: uprobe_end_dup_mmap(); return retval; fail_nomem_anon_vma_fork: mpol_put(vma_policy(tmp)); fail_nomem_policy: kmem_cache_free(vm_area_cachep, tmp); fail_nomem: retval = -ENOMEM; vm_unacct_memory(charge); goto out; }

Contributors

PersonTokensPropCommitsCommitProp
pre-gitpre-git19329.07%2131.34%
andrew mortonandrew morton12819.28%68.96%
hugh dickinshugh dickins649.64%68.96%
linus torvaldslinus torvalds385.72%22.99%
andrea arcangeliandrea arcangeli345.12%45.97%
vladimir davydovvladimir davydov304.52%11.49%
rik van rielrik van riel263.92%11.49%
oleg nesterovoleg nesterov233.46%45.97%
christoph hellwigchristoph hellwig203.01%11.49%
konstantin khlebnikovkonstantin khlebnikov152.26%22.99%
michal hockomichal hocko152.26%11.49%
mel gormanmel gorman131.96%11.49%
william lee irwin iiiwilliam lee irwin iii121.81%11.49%
huang shijiehuang shijie91.36%22.99%
jeremy fitzhardingejeremy fitzhardinge81.20%11.49%
ingo molnaringo molnar71.05%22.99%
al viroal viro71.05%22.99%
michel lespinassemichel lespinasse60.90%22.99%
luca barbieriluca barbieri50.75%11.49%
david herrmanndavid herrmann40.60%11.49%
davidlohr buesodavidlohr bueso20.30%11.49%
eric b munsoneric b munson20.30%11.49%
ralf baechleralf baechle10.15%11.49%
siddhesh poyarekarsiddhesh poyarekar10.15%11.49%
christoph lameterchristoph lameter10.15%11.49%
Total664100.00%67100.00%


static inline int mm_alloc_pgd(struct mm_struct *mm) { mm->pgd = pgd_alloc(mm); if (unlikely(!mm->pgd)) return -ENOMEM; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
christoph hellwigchristoph hellwig38100.00%1100.00%
Total38100.00%1100.00%


static inline void mm_free_pgd(struct mm_struct *mm) { pgd_free(mm, mm->pgd); }

Contributors

PersonTokensPropCommitsCommitProp
christoph hellwigchristoph hellwig1990.48%150.00%
benjamin herrenschmidtbenjamin herrenschmidt29.52%150.00%
Total21100.00%2100.00%

#else
static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) { down_write(&oldmm->mmap_sem); RCU_INIT_POINTER(mm->exe_file, get_mm_exe_file(oldmm)); up_write(&oldmm->mmap_sem); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
konstantin khlebnikovkonstantin khlebnikov4187.23%150.00%
christoph hellwigchristoph hellwig612.77%150.00%
Total47100.00%2100.00%

#define mm_alloc_pgd(mm) (0) #define mm_free_pgd(mm) #endif /* CONFIG_MMU */ __cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock); #define allocate_mm() (kmem_cache_alloc(mm_cachep, GFP_KERNEL)) #define free_mm(mm) (kmem_cache_free(mm_cachep, (mm))) static unsigned long default_dump_filter = MMF_DUMP_FILTER_DEFAULT;
static int __init coredump_filter_setup(char *s) { default_dump_filter = (simple_strtoul(s, NULL, 0) << MMF_DUMP_FILTER_SHIFT) & MMF_DUMP_FILTER_MASK; return 1; }

Contributors

PersonTokensPropCommitsCommitProp
hidehiro kawaihidehiro kawai31100.00%1100.00%
Total31100.00%1100.00%

__setup("coredump_filter=", coredump_filter_setup); #include <linux/init_task.h>
static void mm_init_aio(struct mm_struct *mm) { #ifdef CONFIG_AIO spin_lock_init(&mm->ioctx_lock); mm->ioctx_table = NULL; #endif }

Contributors

PersonTokensPropCommitsCommitProp
alexey dobriyanalexey dobriyan2790.00%150.00%
benjamin lahaisebenjamin lahaise310.00%150.00%
Total30100.00%2100.00%


static void mm_init_owner(struct mm_struct *mm, struct task_struct *p) { #ifdef CONFIG_MEMCG mm->owner = p; #endif }

Contributors

PersonTokensPropCommitsCommitProp
vladimir davydovvladimir davydov27100.00%1100.00%
Total27100.00%1100.00%


static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p) { mm->mmap = NULL; mm->mm_rb = RB_ROOT; mm->vmacache_seqnum = 0; atomic_set(&mm->mm_users, 1); atomic_set(&mm->mm_count, 1); init_rwsem(&mm->mmap_sem); INIT_LIST_HEAD(&mm->mmlist); mm->core_state = NULL; atomic_long_set(&mm->nr_ptes, 0); mm_nr_pmds_init(mm); mm->map_count = 0; mm->locked_vm = 0; mm->pinned_vm = 0; memset(&mm->rss_stat, 0, sizeof(mm->rss_stat)); spin_lock_init(&mm->page_table_lock); mm_init_cpumask(mm); mm_init_aio(mm); mm_init_owner(mm, p); mmu_notifier_mm_init(mm); clear_tlb_flush_pending(mm); #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS mm->pmd_huge_pte = NULL; #endif if (current->mm) { mm->flags = current->mm->flags & MMF_INIT_MASK; mm->def_flags = current->mm->def_flags & VM_INIT_DEF_MASK; } else { mm->flags = default_dump_filter; mm->def_flags = 0; } if (mm_alloc_pgd(mm)) goto fail_nopgd; if (init_new_context(p, mm)) goto fail_nocontext; return mm; fail_nocontext: mm_free_pgd(mm); fail_nopgd: free_mm(mm); return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
vladimir davydovvladimir davydov8431.23%26.67%
pre-gitpre-git5921.93%930.00%
alex thorltonalex thorlton4717.47%13.33%
hugh dickinshugh dickins165.95%26.67%
pavel emelianovpavel emelianov114.09%13.33%
kirill a. shutemovkirill a. shutemov103.72%310.00%
kamezawa hiroyukikamezawa hiroyuki93.35%13.33%
rik van rielrik van riel51.86%13.33%
william lee irwin iiiwilliam lee irwin iii51.86%13.33%
alexey dobriyanalexey dobriyan51.86%13.33%
ingo molnaringo molnar41.49%26.67%
thomas gleixnerthomas gleixner41.49%13.33%
christoph hellwigchristoph hellwig31.12%13.33%
andrea arcangeliandrea arcangeli31.12%13.33%
oleg nesterovoleg nesterov20.74%13.33%
linus torvaldslinus torvalds10.37%13.33%
balbir singhbalbir singh10.37%13.33%
Total269100.00%30100.00%


static void check_mm(struct mm_struct *mm) { int i; for (i = 0; i < NR_MM_COUNTERS; i++) { long x = atomic_long_read(&mm->rss_stat.count[i]); if (unlikely(x)) printk(KERN_ALERT "BUG: Bad rss-counter state " "mm:%p idx:%d val:%ld\n", mm, i, x); } if (atomic_long_read(&mm->nr_ptes)) pr_alert("BUG: non-zero nr_ptes on freeing mm: %ld\n", atomic_long_read(&mm->nr_ptes)); if (mm_nr_pmds(mm)) pr_alert("BUG: non-zero nr_pmds on freeing mm: %ld\n", mm_nr_pmds(mm)); #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS VM_BUG_ON_MM(mm->pmd_huge_pte, mm); #endif }

Contributors

PersonTokensPropCommitsCommitProp
konstantin khlebnikovkonstantin khlebnikov7459.20%125.00%
kirill a. shutemovkirill a. shutemov4838.40%250.00%
sasha levinsasha levin32.40%125.00%
Total125100.00%4100.00%

/* * Allocate and initialize an mm_struct. */
struct mm_struct *mm_alloc(void) { struct mm_struct *mm; mm = allocate_mm(); if (!mm) return NULL; memset(mm, 0, sizeof(*mm)); return mm_init(mm, current); }

Contributors

PersonTokensPropCommitsCommitProp
pre-gitpre-git3777.08%360.00%
kosaki motohirokosaki motohiro918.75%120.00%
linus torvaldslinus torvalds24.17%120.00%
Total48100.00%5100.00%

/* * Called when the last reference to the mm * is dropped: either by a lazy thread or by * mmput. Free the page directory and the mm. */
void __mmdrop(struct mm_struct *mm) { BUG_ON(mm == &init_mm); mm_free_pgd(mm); destroy_context(mm); mmu_notifier_mm_destroy(mm); check_mm(mm); free_mm(mm); }

Contributors

PersonTokensPropCommitsCommitProp
pre-gitpre-git2967.44%342.86%
andrea arcangeliandrea arcangeli920.93%228.57%
christoph hellwigchristoph hellwig49.30%114.29%
konstantin khlebnikovkonstantin khlebnikov12.33%114.29%
Total43100.00%7100.00%

EXPORT_SYMBOL_GPL(__mmdrop);
static inline void __mmput(struct mm_struct *mm) { VM_BUG_ON(atomic_read(&mm->mm_users)); uprobe_clear_state(mm); exit_aio(mm); ksm_exit(mm); khugepaged_exit(mm); /* must run before exit_mmap */ exit_mmap(mm); set_mm_exe_file(mm, NULL); if (!list_empty(&mm->mmlist)) { spin_lock(&mmlist_lock); list_del(&mm->mmlist); spin_unlock(&mmlist_lock); } if (mm->binfmt) module_put(mm->binfmt->module); mmdrop(mm); }

Contributors

PersonTokensPropCommitsCommitProp
pre-gitpre-git3431.19%746.67%
hugh dickinshugh dickins2522.94%16.67%
hiroshi shimamotohiroshi shimamoto1513.76%16.67%
andrea arcangeliandrea arcangeli1110.09%213.33%
michal hockomichal hocko87.34%16.67%
matt helsleymatt helsley76.42%16.67%
srikar dronamrajusrikar dronamraju54.59%16.67%
andrew mortonandrew morton43.67%16.67%
Total109100.00%15100.00%

/* * Decrement the use count and release all resources for an mm. */
void mmput(struct mm_struct *mm) { might_sleep(); if (atomic_dec_and_test(&mm->mm_users)) __mmput(mm); }

Contributors

PersonTokensPropCommitsCommitProp
michal hockomichal hocko2796.43%150.00%
pre-gitpre-git13.57%150.00%
Total28100.00%2100.00%

EXPORT_SYMBOL_GPL(mmput); #ifdef CONFIG_MMU
static void mmput_async_fn(struct work_struct *work) { struct mm_struct *mm = container_of(work, struct mm_struct, async_put_work); __mmput(mm); }

Contributors

PersonTokensPropCommitsCommitProp
michal hockomichal hocko31100.00%1100.00%
Total31100.00%1100.00%


void mmput_async(struct mm_struct *mm) { if (atomic_dec_and_test(&mm->mm_users)) { INIT_WORK(&mm->async_put_work, mmput_async_fn); schedule_work(&mm->async_put_work); } }

Contributors

PersonTokensPropCommitsCommitProp
michal hockomichal hocko40100.00%1100.00%
Total40100.00%1100.00%

#endif /** * set_mm_exe_file - change a reference to the mm's executable file * * This changes mm's executable file (shown as symlink /proc/[pid]/exe). * * Main users are mmput() and sys_execve(). Callers prevent concurrent * invocations: in mmput() nobody alive left, in execve task is single * threaded. sys_prctl(PR_SET_MM_MAP/EXE_FILE) also needs to set the * mm->exe_file, but does so without using set_mm_exe_file() in order * to do avoid the need for any locks. */
void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file) { struct file *old_exe_file; /* * It is safe to dereference the exe_file without RCU as * this function is only called if nobody else can access * this mm -- see comment above for justification. */ old_exe_file = rcu_dereference_raw(mm->exe_file); if (new_exe_file) get_file(new_exe_file); rcu_assign_pointer(mm->exe_file, new_exe_file); if (old_exe_file) fput(old_exe_file); }

Contributors

PersonTokensPropCommitsCommitProp
jiri slabyjiri slaby3256.14%133.33%
konstantin khlebnikovkonstantin khlebnikov1933.33%133.33%
davidlohr buesodavidlohr bueso610.53%133.33%
Total57100.00%3100.00%

/** * get_mm_exe_file - acquire a reference to the mm's executable file * * Returns %NULL if mm has no associated executable file. * User must release file via fput(). */
struct file *get_mm_exe_file(struct mm_struct *mm) { struct file *exe_file; rcu_read_lock(); exe_file = rcu_dereference(mm->exe_file); if (exe_file && !get_file_rcu(exe_file)) exe_file = NULL; rcu_read_unlock(); return exe_file; }

Contributors

PersonTokensPropCommitsCommitProp
jiri slabyjiri slaby3571.43%150.00%
konstantin khlebnikovkonstantin khlebnikov1428.57%150.00%
Total49100.00%2100.00%

EXPORT_SYMBOL(get_mm_exe_file); /** * get_task_exe_file - acquire a reference to the task's executable file * * Returns %NULL if task's mm (if any) has no associated executable file or * this is a kernel thread with borrowed mm (see the comment above get_task_mm). * User must release file via fput(). */
struct file *get_task_exe_file(struct task_struct *task) { struct file *exe_file = NULL; struct mm_struct *mm; task_lock(task); mm = task->mm; if (mm) { if (!(task->flags & PF_KTHREAD)) exe_file = get_mm_exe_file(mm); } task_unlock(task); return exe_file; }

Contributors

PersonTokensPropCommitsCommitProp
mateusz guzikmateusz guzik67100.00%1100.00%
Total67100.00%1100.00%

EXPORT_SYMBOL(get_task_exe_file); /** * get_task_mm - acquire a reference to the task's mm * * Returns %NULL if the task has no mm. Checks PF_KTHREAD (meaning * this kernel workthread has transiently adopted a user mm with use_mm, * to do its AIO) is not set and if so returns a reference to it, after * bumping up the use count. User must release the mm via mmput() * after use. Typically used by /proc and ptrace. */
struct mm_struct *get_task_mm(struct task_struct *task) { struct mm_struct *mm; task_lock(task); mm = task->mm; if (mm) { if (task->flags & PF_KTHREAD) mm = NULL; else atomic_inc(&mm->mm_users); } task_unlock(task); return mm; }

Contributors

PersonTokensPropCommitsCommitProp
hugh dickinshugh dickins3250.79%250.00%
andrew mortonandrew morton3047.62%125.00%
oleg nesterovoleg nesterov11.59%125.00%
Total63100.00%4100.00%

EXPORT_SYMBOL_GPL(get_task_mm);
struct mm_struct *mm_access(struct task_struct *task, unsigned int mode) { struct mm_struct *mm; int err; err = mutex_lock_killable(&task->signal->cred_guard_mutex); if (err) return ERR_PTR(err); mm = get_task_mm(task); if (mm && mm != current->mm && !ptrace_may_access(task, mode)) { mmput(mm); mm = ERR_PTR(-EACCES); } mutex_unlock(&task->signal->cred_guard_mutex); return mm; }

Contributors

PersonTokensPropCommitsCommitProp
christopher yeohchristopher yeoh99100.00%1100.00%
Total99100.00%1100.00%


static void complete_vfork_done(struct task_struct *tsk) { struct completion *vfork; task_lock(tsk); vfork = tsk->vfork_done; if (likely(vfork)) { tsk->vfork_done = NULL; complete(vfork); } task_unlock(tsk); }

Contributors

PersonTokensPropCommitsCommitProp
oleg nesterovoleg nesterov52100.00%3100.00%
Total52100.00%3100.00%


static int wait_for_vfork_done(struct task_struct *child, struct completion *vfork) { int killed; freezer_do_not_count(); killed = wait_for_completion_killable(vfork); freezer_count(); if (killed) { task_lock(child); child->vfork_done = NULL; task_unlock(child); } put_task_struct(child); return killed; }

Contributors

PersonTokensPropCommitsCommitProp
oleg nesterovoleg nesterov62100.00%2100.00%
Total62100.00%2100.00%

/* Please note the differences between mmput and mm_release. * mmput is called whenever we stop holding onto a mm_struct, * error success whatever. * * mm_release is called after a mm_struct has been removed * from the current process. * * This difference is important for error handling, when we * only half set up a mm_struct for a new process and need to restore * the old one. Because we mmput the new mm_struct before * restoring the old one. . . * Eric Biederman 10 January 1998 */
void mm_release(struct task_struct *tsk, struct mm_struct *mm) { /* Get rid of any futexes when releasing the mm */ #ifdef CONFIG_FUTEX if (unlikely(tsk->robust_list)) { exit_robust_list(tsk); tsk->robust_list = NULL; } #ifdef CONFIG_COMPAT if (unlikely(tsk->compat_robust_list)) { compat_exit_robust_list(tsk); tsk->compat_robust_list = NULL; } #endif if (unlikely(!list_empty(&tsk->pi_state_list))) exit_pi_state_list(tsk); #endif uprobe_free_utask(tsk); /* Get rid of any cached register state */ deactivate_mm(tsk, mm); /* * Signal userspace if we're not exiting with a core dump * because we want to leave the value intact for debugging * purposes. */ if (tsk->clear_child_tid) { if (!(tsk->signal->flags & SIGNAL_GROUP_COREDUMP) && atomic_read(&mm->mm_users) > 1) { /* * We don't check the error code - if userspace has * not set up a proper pointer then tough luck. */ put_user(0, tsk->clear_child_tid); sys_futex(tsk->clear_child_tid, FUTEX_WAKE, 1, NULL, NULL, 0); } tsk->clear_child_tid = NULL; } /* * All done, finally we can wake up parent and return this mm to him. * Also kthread_stop() uses this completion for synchronization. */ if (tsk->vfork_done) complete_vfork_done(tsk); }

Contributors

PersonTokensPropCommitsCommitProp
linus torvaldslinus torvalds6334.81%211.76%
ingo molnaringo molnar2513.81%423.53%
thomas gleixnerthomas gleixner1910.50%15.88%
eric dumazeteric dumazet189.94%15.88%
peter zijlstrapeter zijlstra168.84%15.88%
konstantin khlebnikovkonstantin khlebnikov126.63%15.88%
roland mcgrathroland mcgrath84.42%15.88%
pre-gitpre-git84.42%211.76%
srikar dronamrajusrikar dronamraju52.76%15.88%
michal hockomichal hocko42.21%15.88%
andrew mortonandrew morton21.10%15.88%
david s. millerdavid s. miller10.55%15.88%
Total181100.00%17100.00%

/* * Allocate a new mm structure and copy contents from the * mm structure of the passed in task structure. */
static struct mm_struct *dup_mm(struct task_struct *tsk) { struct mm_struct *mm, *oldmm = current->mm; int err; mm = allocate_mm(); if (!mm) goto fail_nomem; memcpy(mm, oldmm, sizeof(*mm)); if (!mm_init(mm, tsk)) goto fail_nomem; err = dup_mmap(mm, oldmm); if (err) goto free_pt; mm->hiwater_rss = get_mm_rss(mm); mm->hiwater_vm = mm->total_vm; if (mm->binfmt && !try_module_get(mm->binfmt->module)) goto free_pt; return mm; free_pt: /* don't put binfmt in mmput, we haven't got module yet */ mm->binfmt = NULL; mmput(mm); fail_nomem: return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
janak desaijanak desai11279.43%125.00%
hiroshi shimamotohiroshi shimamoto2618.44%125.00%
pavel emelianovpavel emelianov21.42%125.00%
daeseok youndaeseok youn10.71%125.00%
Total141100.00%4100.00%


static int copy_mm(unsigned long clone_flags, struct task_struct *tsk) { struct mm_struct *mm, *oldmm; int retval; tsk->min_flt = tsk->maj_flt = 0; tsk->nvcsw = tsk->nivcsw = 0; #ifdef CONFIG_DETECT_HUNG_TASK tsk->last_switch_count = tsk->nvcsw + tsk->nivcsw; #endif tsk->mm = NULL; tsk->active_mm = NULL; /* * Are we cloning a kernel thread? * * We need to steal a active VM for that.. */ oldmm = current->mm; if (!oldmm) return 0; /* initialize the new vmacache entries */ vmacache_flush(tsk); if (clone_flags & CLONE_VM) { atomic_inc(&oldmm->mm_users); mm = oldmm; goto good_mm; } retval = -ENOMEM; mm = dup_mm(tsk); if (!mm) goto fail_nomem; good_mm: tsk->mm = mm; tsk->active_mm = mm; return 0; fail_nomem: return retval; }

Contributors

PersonTokensPropCommitsCommitProp
pre-gitpre-git12678.26%1173.33%
mandeep singh bainesmandeep singh baines1710.56%16.67%
andrew mortonandrew morton106.21%16.67%
davidlohr buesodavidlohr bueso63.73%16.67%
janak desaijanak desai21.24%16.67%
Total161100.00%15100.00%


static int copy_fs(unsigned long clone_flags, struct task_struct *tsk) { struct fs_struct *fs = current->fs; if (clone_flags & CLONE_FS) { /* tsk->fs is already what we want */ spin_lock(&fs->lock); if (fs->in_exec) { spin_unlock(&fs->lock); return -EAGAIN; } fs->users++; spin_unlock(&fs->lock); return 0; } tsk->fs = copy_fs_struct(fs); if (!tsk->fs) return -ENOMEM; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
pre-gitpre-git7474.00%646.15%
al viroal viro1717.00%215.38%
nick pigginnick piggin33.00%17.69%
thomas gleixnerthomas gleixner33.00%17.69%
jan blunckjan blunck11.00%17.69%
arnaldo carvalho de meloarnaldo carvalho de melo11.00%17.69%
andrew mortonandrew morton11.00%17.69%
Total100100.00%13100.00%


static int copy_files(unsigned long clone_flags, struct task_struct *tsk) { struct files_struct *oldf, *newf; int error = 0; /* * A background process may not have any files ... */ oldf = current->files; if (!oldf) goto out; if (clone_flags & CLONE_FILES) { atomic_inc(&oldf->count); goto out; } newf = dup_fd(oldf, &error); if (!newf) goto out; tsk->files = newf; error = 0; out: return error; }

Contributors

PersonTokensPropCommitsCommitProp
janak desaijanak desai95100.00%1100.00%
Total95100.00%1100.00%


static int copy_io(unsigned long clone_flags, struct task_struct *tsk) { #ifdef CONFIG_BLOCK struct io_context *ioc = current->io_context; struct io_context *new_ioc; if (!ioc) return 0; /* * Share io context with parent, if CLONE_IO is set */ if (clone_flags & CLONE_IO) { ioc_task_link(ioc); tsk->io_context = ioc; } else if (ioprio_valid(ioc->ioprio)) { new_ioc = get_task_io_context(tsk, GFP_KERNEL, NUMA_NO_NODE); if (unlikely(!new_ioc)) return -ENOMEM; new_ioc->ioprio = ioc->ioprio; put_io_context(new_ioc); } #endif return 0; }

Contributors

PersonTokensPropCommitsCommitProp
jens axboejens axboe9382.30%250.00%
tejun heotejun heo2017.70%250.00%
Total113100.00%4100.00%


static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk) { struct sighand_struct *sig; if (clone_flags & CLONE_SIGHAND) { atomic_inc(&current->sighand->count); return 0; } sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL); rcu_assign_pointer(tsk->sighand, sig); if (!sig) return -ENOMEM; atomic_set(&sig->count, 1); memcpy(sig->action, current->sighand->action, sizeof(sig->action)); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
pre-gitpre-git6867.33%770.00%
linus torvaldslinus torvalds2827.72%110.00%
ingo molnaringo molnar43.96%110.00%
andrew mortonandrew morton10.99%110.00%
Total101100.00%10100.00%


void __cleanup_sighand(struct sighand_struct *sighand) { if (atomic_dec_and_test(&sighand->count)) { signalfd_cleanup(sighand); /* * sighand_cachep is SLAB_DESTROY_BY_RCU so we can free it * without an RCU grace period, see __lock_task_sighand(). */ kmem_cache_free(sighand_cachep, sighand); } }

Contributors

PersonTokensPropCommitsCommitProp
oleg nesterovoleg nesterov35100.00%4100.00%
Total35100.00%4100.00%

/* * Initialize POSIX timer handling for a thread group. */
static void posix_cpu_timers_init_group(struct signal_struct *sig) { unsigned long cpu_limit; cpu_limit = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur); if (cpu_limit != RLIM_INFINITY) { sig->cputime_expires.prof_exp = secs_to_cputime(cpu_limit); sig->cputimer.running = true; } /* The timer lists. */ INIT_LIST_HEAD(&sig->cpu_timers[0]); INIT_LIST_HEAD(&sig->cpu_timers[1]); INIT_LIST_HEAD(&sig->cpu_timers[2]); }

Contributors

PersonTokensPropCommitsCommitProp
frank mayharfrank mayhar4550.00%120.00%
oleg nesterovoleg nesterov3033.33%120.00%
jiri slabyjiri slaby1314.44%120.00%
jason lowjason low22.22%240.00%
Total90100.00%5100.00%


static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) { struct signal_struct *sig; if (clone_flags & CLONE_THREAD) return 0; sig = kmem_cache_zalloc(signal_cachep, GFP_KERNEL); tsk->signal = sig; if (!sig) return -ENOMEM; sig->nr_threads = 1; atomic_set(&sig->live, 1); atomic_set(&sig->sigcnt, 1); /* list_add(thread_node, thread_head) without INIT_LIST_HEAD() */ sig->thread_head = (struct list_head)LIST_HEAD_INIT(tsk->thread_node); tsk->thread_node = (struct list_head)LIST_HEAD_INIT(sig->thread_head); init_waitqueue_head(&sig->wait_chldexit); sig->curr_target = tsk; init_sigpending(&sig->shared_pending); INIT_LIST_HEAD(&sig->posix_timers); seqlock_init(&sig->stats_lock); prev_cputime_init(&sig->prev_cputime); hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); sig->real_timer.function = it_real_fn; task_lock(current->group_leader); memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim); task_unlock(current->group_leader); posix_cpu_timers_init_group(sig); tty_audit_fork(sig); sched_autogroup_fork(sig); sig->oom_score_adj = current->signal->oom_score_adj; sig->oom_score_adj_min = current->signal->oom_score_adj_min; sig->has_child_subreaper = current->signal->has_child_subreaper || current->signal->is_child_subreaper; mutex_init(&sig->cred_guard_mutex); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
roland mcgrathroland mcgrath5921.85%516.13%
linus torvaldslinus torvalds5520.37%13.23%
oleg nesterovoleg nesterov4817.78%516.13%
lennart poetteringlennart poettering165.93%13.23%
ingo molnaringo molnar114.07%13.23%
andrew mortonandrew morton114.07%39.68%
kosaki motohirokosaki motohiro103.70%26.45%
mandeep singh bainesmandeep singh baines103.70%13.23%
thomas gleixnerthomas gleixner93.33%26.45%
peter zijlstrapeter zijlstra93.33%26.45%
rik van rielrik van riel82.96%13.23%
david rientjesdavid rientjes82.96%13.23%
miloslav trmacmiloslav trmac51.85%13.23%
mike galbraithmike galbraith51.85%13.23%
pre-gitpre-git41.48%26.45%
veaceslav falicoveaceslav falico10.37%13.23%
george anzingergeorge anzinger10.37%13.23%
Total270100.00%31100.00%


static void copy_seccomp(struct task_struct *p) { #ifdef CONFIG_SECCOMP /* * Must be called with sighand->lock held, which is common to * all threads in the group. Holding cred_guard_mutex is not * needed because this new task is not yet running and cannot * be racing exec. */ assert_spin_locked(&current->sighand->siglock); /* Ref-count the new filter user, and assign it. */ get_seccomp_filter(current); p->seccomp = current->seccomp; /* * Explicitly enable no_new_privs here in case it got set * between the task_struct being duplicated and holding the * sighand lock. The seccomp state and nnp must be in sync. */ if (task_no_new_privs(current)) task_set_no_new_privs(p); /* * If the parent gained a seccomp mode after copying thread * flags and between before we held the sighand lock, we have * to manually enable the seccomp thread flag here. */ if (p->seccomp.mode != SECCOMP_MODE_DISABLED) set_tsk_thread_flag(p, TIF_SECCOMP); #endif }

Contributors

PersonTokensPropCommitsCommitProp
kees cookkees cook7198.61%150.00%
guenter roeckguenter roeck11.39%150.00%
Total72100.00%2100.00%

SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr) { current->clear_child_tid = tidptr; return task_pid_vnr(current); }
static void rt_mutex_init_task(struct task_struct *p) { raw_spin_lock_init(&p->pi_lock); #ifdef CONFIG_RT_MUTEXES p->pi_waiters = RB_ROOT; p->pi_waiters_leftmost = NULL; p->pi_blocked_on = NULL; #endif }

Contributors

PersonTokensPropCommitsCommitProp
ingo molnaringo molnar3071.43%125.00%
peter zijlstrapeter zijlstra819.05%125.00%
zilvinas valinskaszilvinas valinskas37.14%125.00%
thomas gleixnerthomas gleixner12.38%125.00%
Total42100.00%4100.00%

/* * Initialize POSIX timer handling for a single task. */
static void posix_cpu_timers_init(struct task_struct *tsk) { tsk->cputime_expires.prof_exp = 0; tsk->cputime_expires.virt_exp = 0; tsk->cputime_expires.sched_exp = 0; INIT_LIST_HEAD(&tsk->cpu_timers[0]); INIT_LIST_HEAD(&tsk->cpu_timers[1]); INIT_LIST_HEAD(&tsk->cpu_timers[2]); }

Contributors

PersonTokensPropCommitsCommitProp
frank mayharfrank mayhar6697.06%150.00%
martin schwidefskymartin schwidefsky22.94%150.00%
Total68100.00%2100.00%


static inline void init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid) { task->pids[type].pid = pid; }

Contributors

PersonTokensPropCommitsCommitProp
oleg nesterovoleg nesterov32100.00%1100.00%
Total32100.00%1100.00%

/* * This creates a new process as a copy of the old one, * but does not actually start it yet. * * It copies the registers, and all the appropriate * parts of the process environment (as per the clone * flags). The actual kick-off is left to the caller. */
static struct task_struct *copy_process(unsigned long clone_flags, unsigned long stack_start, unsigned long stack_size, int __user *child_tidptr, struct pid *pid, int trace, unsigned long tls, int node) { int retval; struct task_struct *p; if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS)) return ERR_PTR(-EINVAL); if ((clone_flags & (CLONE_NEWUSER|CLONE_FS)) == (CLONE_NEWUSER|CLONE_FS)) return ERR_PTR(-EINVAL); /* * Thread groups must share signals as well, and detached threads * can only be started up within the thread group. */ if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND)) return ERR_PTR(-EINVAL); /* * Shared signal handlers imply shared VM. By way of the above, * thread groups also imply shared VM. Blocking this case allows * for various simplifications in other code. */ if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM)) return ERR_PTR(-EINVAL); /* * Siblings of global init remain as zombies on exit since they are * not reaped by their parent (swapper). To solve this and to avoid * multi-rooted process trees, prevent global and container-inits * from creating siblings. */ if ((clone_flags & CLONE_PARENT) && current->signal->flags & SIGNAL_UNKILLABLE) return ERR_PTR(-EINVAL); /* * If the new process will be in a different pid or user namespace * do not allow it to share a thread group with the forking task. */ if (clone_flags & CLONE_THREAD) { if ((clone_flags & (CLONE_NEWUSER | CLONE_NEWPID)) || (task_active_pid_ns(current) != current->nsproxy->pid_ns_for_children)) return ERR_PTR(-EINVAL); } retval = security_task_create(clone_flags); if (retval) goto fork_out; retval = -ENOMEM; p = dup_task_struct(current, node); if (!p) goto fork_out; ftrace_graph_init_task(p); rt_mutex_init_task(p); #ifdef CONFIG_PROVE_LOCKING DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled); DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled); #endif retval = -EAGAIN; if (atomic_read(&p->real_cred->user->processes) >= task_rlimit(p, RLIMIT_NPROC)) { if (p->real_cred->user != INIT_USER && !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) goto bad_fork_free; } current->flags &= ~PF_NPROC_EXCEEDED; retval = copy_creds(p, clone_flags); if (retval < 0) goto bad_fork_free; /* * If multiple threads are within copy_process(), then this check * triggers too late. This doesn't hurt, the check is only there * to stop root fork bombs. */ retval = -EAGAIN; if (nr_threads >= max_threads) goto bad_fork_cleanup_count; delayacct_tsk_init(p); /* Must remain after dup_task_struct() */ p->flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER); p->flags |= PF_FORKNOEXEC; INIT_LIST_HEAD(&p->children); INIT_LIST_HEAD(&p->sibling); rcu_copy_process(p); p->vfork_done = NULL; spin_lock_init(&p->alloc_lock); init_sigpending(&p->pending); p->utime = p->stime = p->gtime = 0; p->utimescaled = p->stimescaled = 0; prev_cputime_init(&p->prev_cputime); #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN seqcount_init(&p->vtime_seqcount); p->vtime_snap = 0; p->vtime_snap_whence = VTIME_INACTIVE; #endif #if defined(SPLIT_RSS_COUNTING) memset(&p->rss_stat, 0, sizeof(p->rss_stat)); #endif p->default_timer_slack_ns = current->timer_slack_ns; task_io_accounting_init(&p->ioac); acct_clear_integrals(p); posix_cpu_timers_init(p); p->start_time = ktime_get_ns(); p->real_start_time = ktime_get_boot_ns(); p->io_context = NULL; p->audit_context = NULL; cgroup_fork(p); #ifdef CONFIG_NUMA p->mempolicy = mpol_dup(p->mempolicy); if (IS_ERR(p->mempolicy)) { retval = PTR_ERR(p->mempolicy); p->mempolicy = NULL; goto bad_fork_cleanup_threadgroup_lock; } #endif #ifdef CONFIG_CPUSETS p->cpuset_mem_spread_rotor = NUMA_NO_NODE; p->cpuset_slab_spread_rotor = NUMA_NO_NODE; seqcount_init(&p->mems_allowed_seq); #endif #ifdef CONFIG_TRACE_IRQFLAGS p->irq_events = 0; p->hardirqs_enabled = 0; p->hardirq_enable_ip = 0; p->hardirq_enable_event = 0; p->hardirq_disable_ip = _THIS_IP_; p->hardirq_disable_event = 0; p->softirqs_enabled = 1; p->softirq_enable_ip = _THIS_IP_; p->softirq_enable_event = 0; p->softirq_disable_ip = 0; p->softirq_disable_event = 0; p->hardirq_context = 0; p->softirq_context = 0; #endif p->pagefault_disabled = 0; #ifdef CONFIG_LOCKDEP p->lockdep_depth = 0; /* no locks held yet */ p->curr_chain_key = 0; p->lockdep_recursion = 0; #endif #ifdef CONFIG_DEBUG_MUTEXES p->blocked_on = NULL; /* not blocked yet */ #endif #ifdef CONFIG_BCACHE p->sequential_io = 0; p->sequential_io_avg = 0; #endif /* Perform scheduler related setup. Assign this task to a CPU. */ retval = sched_fork(clone_flags, p); if (retval) goto bad_fork_cleanup_policy; retval = perf_event_init_task(p); if (retval) goto bad_fork_cleanup_policy; retval = audit_alloc(p); if (retval) goto bad_fork_cleanup_perf; /* copy all the process information */ shm_init_task(p); retval = copy_semundo(clone_flags, p); if (retval) goto bad_fork_cleanup_audit; retval = copy_files(clone_flags, p); if (retval) goto bad_fork_cleanup_semundo; retval = copy_fs(clone_flags, p); if (retval) goto bad_fork_cleanup_files; retval = copy_sighand(clone_flags, p); if (retval) goto bad_fork_cleanup_fs; retval = copy_signal(clone_flags, p); if (retval) goto bad_fork_cleanup_sighand; retval = copy_mm(clone_flags, p); if (retval) goto bad_fork_cleanup_signal; retval = copy_namespaces(clone_flags, p); if (retval) goto bad_fork_cleanup_mm; retval = copy_io(clone_flags, p); if (retval) goto bad_fork_cleanup_namespaces; retval = copy_thread_tls(clone_flags, stack_start, stack_size, p, tls); if (retval) goto bad_fork_cleanup_io; if (pid != &init_struct_pid) { pid = alloc_pid(p->nsproxy->pid_ns_for_children); if (IS_ERR(pid)) { retval = PTR_ERR(pid); goto bad_fork_cleanup_thread; } } p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL; /* * Clear TID on mm_release()? */ p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr : NULL; #ifdef CONFIG_BLOCK p->plug = NULL; #endif #ifdef CONFIG_FUTEX p->robust_list = NULL; #ifdef CONFIG_COMPAT p->compat_robust_list = NULL; #endif INIT_LIST_HEAD(&p->pi_state_list); p->pi_state_cache = NULL; #endif /* * sigaltstack should be cleared when sharing the same VM */ if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM) sas_ss_reset(p); /* * Syscall tracing and stepping should be turned off in the * child regardless of CLONE_PTRACE. */ user_disable_single_step(p); clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE); #ifdef TIF_SYSCALL_EMU clear_tsk_thread_flag(p, TIF_SYSCALL_EMU); #endif clear_all_latency_tracing(p); /* ok, now we should be set up.. */ p->pid = pid_nr(pid); if (clone_flags & CLONE_THREAD) { p->exit_signal = -1; p->group_leader = current->group_leader; p->tgid = current->tgid; } else { if (clone_flags & CLONE_PARENT) p->exit_signal = current->group_leader->exit_signal; else p->exit_signal = (clone_flags & CSIGNAL); p->group_leader = p; p->tgid = p->pid; } p->nr_dirtied = 0; p->nr_dirtied_pause = 128 >> (PAGE_SHIFT - 10); p->dirty_paused_when = 0; p->pdeath_signal = 0; INIT_LIST_HEAD(&p->thread_group); p->task_works = NULL; threadgroup_change_begin(current); /* * Ensure that the cgroup subsystem policies allow the new process to be * forked. It should be noted the the new process's css_set can be changed * between here and cgroup_post_fork() if an organisation operation is in * progress. */ retval = cgroup_can_fork(p); if (retval) goto bad_fork_free_pid; /* * Make it visible to the rest of the system, but dont wake it up yet. * Need tasklist lock for parent etc handling! */ write_lock_irq(&tasklist_lock); /* CLONE_PARENT re-uses the old parent */ if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) { p->real_parent = current->real_parent; p->parent_exec_id = current->parent_exec_id; } else { p->real_parent = current; p->parent_exec_id = current->self_exec_id; } spin_lock(&current->sighand->siglock); /* * Copy seccomp details explicitly here, in case they were changed * before holding sighand lock. */ copy_seccomp(p); /* * Process group and session signals need to be delivered to just the * parent before the fork or both the parent and the child after the * fork. Restart if a signal comes in before we add the new process to * it's process group. * A fatal signal pending means that current will exit, so the new * thread can't slip out of an OOM kill (or normal SIGKILL). */ recalc_sigpending(); if (signal_pending(current)) { spin_unlock(&current->sighand->siglock); write_unlock_irq(&tasklist_lock); retval = -ERESTARTNOINTR; goto bad_fork_cancel_cgroup; } if (likely(p->pid)) { ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace); init_task_pid(p, PIDTYPE_PID, pid); if (thread_group_leader(p)) { init_task_pid(p, PIDTYPE_PGID, task_pgrp(current)); init_task_pid(p, PIDTYPE_SID, task_session(current)); if (is_child_reaper(pid)) { ns_of_pid(pid)->child_reaper = p; p->signal->flags |= SIGNAL_UNKILLABLE; } p->signal->leader_pid = pid; p->signal->tty = tty_kref_get(current->signal->tty); list_add_tail(&p->sibling, &p->real_parent->children); list_add_tail_rcu(&p->tasks, &init_task.tasks); attach_pid(p, PIDTYPE_PGID); attach_pid(p, PIDTYPE_SID); __this_cpu_inc(process_counts); } else { current->signal->nr_threads++; atomic_inc(&current->signal->live); atomic_inc(&current->signal->sigcnt); list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group); list_add_tail_rcu(&p->thread_node, &p->signal->thread_head); } attach_pid(p, PIDTYPE_PID); nr_threads++; } total_forks++; spin_unlock(&current->sighand->siglock); syscall_tracepoint_update(p); write_unlock_irq(&tasklist_lock); proc_fork_connector(p); cgroup_post_fork(p); threadgroup_change_end(current); perf_event_fork(p); trace_task_newtask(p, clone_flags); uprobe_copy_process(p, clone_flags); return p; bad_fork_cancel_cgroup: cgroup_cancel_fork(p); bad_fork_free_pid: threadgroup_change_end(current); if (pid != &init_struct_pid) free_pid(pid); bad_fork_cleanup_thread: exit_thread(p); bad_fork_cleanup_io: if (p->io_context) exit_io_context(p); bad_fork_cleanup_namespaces: exit_task_namespaces(p); bad_fork_cleanup_mm: if (p->mm) mmput(p->mm); bad_fork_cleanup_signal: if (!(clone_flags & CLONE_THREAD)) free_signal_struct(p->signal); bad_fork_cleanup_sighand: __cleanup_sighand(p->sighand); bad_fork_cleanup_fs: exit_fs(p); /* blocking */ bad_fork_cleanup_files: exit_files(p); /* blocking */ bad_fork_cleanup_semundo: exit_sem(p); bad_fork_cleanup_audit: audit_free(p); bad_fork_cleanup_perf: perf_event_free_task(p); bad_fork_cleanup_policy: #ifdef CONFIG_NUMA mpol_put(p->mempolicy); bad_fork_cleanup_threadgroup_lock: #endif delayacct_tsk_free(p); bad_fork_cleanup_count: atomic_dec(&p->cred->user->processes); exit_creds(p); bad_fork_free: free_task(p); fork_out: return ERR_PTR(retval); }

Contributors

PersonTokensPropCommitsCommitProp
oleg nesterovoleg nesterov36519.34%2410.53%
ingo molnaringo molnar24512.98%177.46%
pre-gitpre-git23112.24%4318.86%
andrew mortonandrew morton1327.00%146.14%
linus torvaldslinus torvalds894.72%146.14%
eric w. biedermaneric w. biederman723.82%83.51%
pavel emelianovpavel emelianov522.76%20.88%
david howellsdavid howells432.28%83.51%
daniel rebelo de oliveiradaniel rebelo de oliveira371.96%10.44%
kamezawa hiroyukikamezawa hiroyuki341.80%31.32%
jens axboejens axboe311.64%31.32%
michal hockomichal hocko291.54%20.88%
peter zijlstrapeter zijlstra281.48%52.19%
sukadev bhattiprolusukadev bhattiprolu271.43%20.88%
frederic weisbeckerfrederic weisbecker261.38%41.75%
albert cahalanalbert cahalan231.22%10.44%
aleksa saraialeksa sarai231.22%10.44%
fengguang wufengguang wu221.17%20.88%
laurent vivierlaurent vivier170.90%20.88%
greg kroah-hartmangreg kroah-hartman170.90%31.32%
dave oliendave olien170.90%10.44%
goto masanorigoto masanori170.90%10.44%
kent overstreetkent overstreet150.79%10.44%
david rientjesdavid rientjes150.79%10.44%
roland mcgrathroland mcgrath130.69%31.32%
arjan van de venarjan van de ven130.69%20.88%
balbir singhbalbir singh130.69%20.88%
jiri slabyjiri slaby120.64%20.88%
shailabh nagarshailabh nagar110.58%20.88%
eric pariseric paris100.53%10.44%
paul menagepaul menage100.53%20.88%
dario faggiolidario faggioli90.48%10.44%
michael neulingmichael neuling90.48%10.44%
serge hallynserge hallyn90.48%20.88%
mel gormanmel gorman80.42%10.44%
kirill korotaevkirill korotaev80.42%10.44%
tejun heotejun heo70.37%20.88%
li zefanli zefan70.37%20.88%
tomas janousektomas janousek70.37%10.44%
josh triplettjosh triplett70.37%10.44%
daniel jacobowitzdaniel jacobowitz70.37%10.44%
vasiliy kulikovvasiliy kulikov70.37%10.44%
oren laadanoren laadan60.32%10.44%
rusty russellrusty russell60.32%10.44%
kees cookkees cook60.32%10.44%
david hildenbranddavid hildenbrand60.32%10.44%
louis rillinglouis rilling60.32%10.44%
john l. byrnejohn l. byrne60.32%10.44%
andi kleenandi kleen50.26%10.44%
paul e. mckenneypaul e. mckenney50.26%20.88%
alexey dobriyanalexey dobriyan50.26%10.44%
jack millerjack miller50.26%10.44%
jay lanjay lan50.26%10.44%
steven rostedtsteven rostedt50.26%10.44%
thomas gleixnerthomas gleixner40.21%20.88%
paul jacksonpaul jackson40.21%20.88%
ben blumben blum40.21%10.44%
prasanna medaprasanna meda30.16%10.44%
andrea righiandrea righi30.16%10.44%
stas sergeevstas sergeev30.16%10.44%
alan coxalan cox30.16%10.44%
william lee irwin iiiwilliam lee irwin iii30.16%20.88%
srivatsa vaddagirisrivatsa vaddagiri30.16%10.44%
rik van rielrik van riel20.11%10.44%
andries brouwerandries brouwer20.11%10.44%
lee schermerhornlee schermerhorn20.11%20.88%
al viroal viro20.11%10.44%
andy lutomirskiandy lutomirski20.11%10.44%
martin schwidefskymartin schwidefsky20.11%10.44%
david mosbergerdavid mosberger10.05%10.44%
frank mayharfrank mayhar10.05%10.44%
christoph lameterchristoph lameter10.05%10.44%
mike galbraithmike galbraith10.05%10.44%
dave mccrackendave mccracken10.05%10.44%
Total1887100.00%228100.00%


static inline void init_idle_pids(struct pid_link *links) { enum pid_type type; for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type) { INIT_HLIST_NODE(&links[type].node); /* not really needed */ links[type].pid = &init_struct_pid; } }

Contributors

PersonTokensPropCommitsCommitProp
oleg nesterovoleg nesterov53100.00%1100.00%
Total53100.00%1100.00%


struct task_struct *fork_idle(int cpu) { struct task_struct *task; task = copy_process(CLONE_VM, 0, 0, NULL, &init_struct_pid, 0, 0, cpu_to_node(cpu)); if (!IS_ERR(task)) { init_idle_pids(task->pids); init_idle(task, cpu); } return task; }

Contributors

PersonTokensPropCommitsCommitProp
william lee irwin iiiwilliam lee irwin iii4059.70%112.50%
oleg nesterovoleg nesterov913.43%112.50%
andi kleenandi kleen57.46%112.50%
ingo molnaringo molnar45.97%112.50%
akinobu mitaakinobu mita34.48%112.50%
josh triplettjosh triplett22.99%112.50%
sukadev bhattiprolusukadev bhattiprolu22.99%112.50%
roland mcgrathroland mcgrath22.99%112.50%
Total67100.00%8100.00%

/* * Ok, this is the main fork-routine. * * It copies the process, and if successful kick-starts * it and waits for it to finish using the VM if required. */
long _do_fork(unsigned long clone_flags, unsigned long stack_start, unsigned long stack_size, int __user *parent_tidptr, int __user *child_tidptr, unsigned long tls) { struct task_struct *p; int trace = 0; long nr; /* * Determine whether and which event to report to ptracer. When * called from kernel_thread or CLONE_UNTRACED is explicitly * requested, no event is reported; otherwise, report if the event * for the type of forking is enabled. */ if (!(clone_flags & CLONE_UNTRACED)) { if (clone_flags & CLONE_VFORK) trace = PTRACE_EVENT_VFORK; else if ((clone_flags & CSIGNAL) != SIGCHLD) trace = PTRACE_EVENT_CLONE; else trace = PTRACE_EVENT_FORK; if (likely(!ptrace_event_enabled(current, trace))) trace = 0; } p = copy_process(clone_flags, stack_start, stack_size, child_tidptr, NULL, trace, tls, NUMA_NO_NODE); /* * Do this prior waking up the new thread - the thread pointer * might get invalid after that point, if the thread exits quickly. */ if (!IS_ERR(p)) { struct completion vfork; struct pid *pid; trace_sched_process_fork(current, p); pid = get_task_pid(p, PIDTYPE_PID); nr = pid_vnr(pid); if (clone_flags & CLONE_PARENT_SETTID) put_user(nr, parent_tidptr); if (clone_flags & CLONE_VFORK) { p->vfork_done = &vfork; init_completion(&vfork); get_task_struct(p); } wake_up_new_task(p); /* forking complete and child started to run, tell ptracer */ if (unlikely(trace)) ptrace_event_pid(trace, pid); if (clone_flags & CLONE_VFORK) { if (!wait_for_vfork_done(p, &vfork)) ptrace_event_pid(PTRACE_EVENT_VFORK_DONE, pid); } put_pid(pid); } else { nr = PTR_ERR(p); } return nr; }

Contributors

PersonTokensPropCommitsCommitProp
linus torvaldslinus torvalds7828.89%313.04%
tejun heotejun heo5420.00%28.70%
matthew dempskymatthew dempsky259.26%14.35%
daniel jacobowitzdaniel jacobowitz228.15%28.70%
oleg nesterovoleg nesterov145.19%28.70%
ingo molnaringo molnar134.81%313.04%
william lee irwin iiiwilliam lee irwin iii134.81%14.35%
pavel emelianovpavel emelianov124.44%14.35%
mathieu desnoyersmathieu desnoyers72.59%14.35%
sukadev bhattiprolusukadev bhattiprolu72.59%14.35%
josh triplettjosh triplett72.59%14.35%
al viroal viro62.22%14.35%
eric w. biedermaneric w. biederman51.85%14.35%
roland mcgrathroland mcgrath51.85%28.70%
andi kleenandi kleen20.74%14.35%
Total270100.00%23100.00%

#ifndef CONFIG_HAVE_COPY_THREAD_TLS /* For compatibility with architectures that call do_fork directly rather than * using the syscall entry points below. */
long do_fork(unsigned long clone_flags, unsigned long stack_start, unsigned long stack_size, int __user *parent_tidptr, int __user *child_tidptr) { return _do_fork(clone_flags, stack_start, stack_size, parent_tidptr, child_tidptr, 0); }

Contributors

PersonTokensPropCommitsCommitProp
josh triplettjosh triplett43100.00%1100.00%
Total43100.00%1100.00%

#endif /* * Create a kernel thread. */
pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) { return _do_fork(flags|CLONE_VM|CLONE_UNTRACED, (unsigned long)fn, (unsigned long)arg, NULL, NULL, 0); }

Contributors

PersonTokensPropCommitsCommitProp
al viroal viro4894.12%150.00%
josh triplettjosh triplett35.88%150.00%
Total51100.00%2100.00%

#ifdef __ARCH_WANT_SYS_FORK SYSCALL_DEFINE0(fork) { #ifdef CONFIG_MMU return _do_fork(SIGCHLD, 0, 0, NULL, NULL, 0); #else /* can not support in nommu mode */ return -EINVAL; #endif } #endif #ifdef __ARCH_WANT_SYS_VFORK SYSCALL_DEFINE0(vfork) { return _do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, 0, 0, NULL, NULL, 0); } #endif #ifdef __ARCH_WANT_SYS_CLONE #ifdef CONFIG_CLONE_BACKWARDS SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp, int __user *, parent_tidptr, unsigned long, tls, int __user *, child_tidptr) #elif defined(CONFIG_CLONE_BACKWARDS2) SYSCALL_DEFINE5(clone, unsigned long, newsp, unsigned long, clone_flags, int __user *, parent_tidptr, int __user *, child_tidptr, unsigned long, tls) #elif defined(CONFIG_CLONE_BACKWARDS3) SYSCALL_DEFINE6(clone, unsigned long, clone_flags, unsigned long, newsp, int, stack_size, int __user *, parent_tidptr, int __user *, child_tidptr, unsigned long, tls) #else SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp, int __user *, parent_tidptr, int __user *, child_tidptr, unsigned long, tls) #endif { return _do_fork(clone_flags, newsp, 0, parent_tidptr, child_tidptr, tls); } #endif #ifndef ARCH_MIN_MMSTRUCT_ALIGN #define ARCH_MIN_MMSTRUCT_ALIGN 0 #endif
static void sighand_ctor(void *data) { struct sighand_struct *sighand = data; spin_lock_init(&sighand->siglock); init_waitqueue_head(&sighand->signalfd_wqh); }

Contributors

PersonTokensPropCommitsCommitProp
oleg nesterovoleg nesterov2163.64%125.00%
davide libenzidavide libenzi927.27%250.00%
christoph lameterchristoph lameter39.09%125.00%
Total33100.00%4100.00%


void __init proc_caches_init(void) { sighand_cachep = kmem_cache_create("sighand_cache", sizeof(struct sighand_struct), 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU| SLAB_NOTRACK|SLAB_ACCOUNT, sighand_ctor); signal_cachep = kmem_cache_create("signal_cache", sizeof(struct signal_struct), 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK|SLAB_ACCOUNT, NULL); files_cachep = kmem_cache_create("files_cache", sizeof(struct files_struct), 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK|SLAB_ACCOUNT, NULL); fs_cachep = kmem_cache_create("fs_cache", sizeof(struct fs_struct), 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK|SLAB_ACCOUNT, NULL); /* * FIXME! The "sizeof(struct mm_struct)" currently includes the * whole struct cpumask for the OFFSTACK case. We could change * this to *only* allocate as much of it as required by the * maximum number of CPU's we can ever have. The cpumask_allocation * is at the end of the structure, exactly for that reason. */ mm_cachep = kmem_cache_create("mm_struct", sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN, SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK|SLAB_ACCOUNT, NULL); vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC|SLAB_ACCOUNT); mmap_init(); nsproxy_cache_init(); }

Contributors

PersonTokensPropCommitsCommitProp
pre-gitpre-git8253.59%19.09%
linus torvaldslinus torvalds2113.73%218.18%
vladimir davydovvladimir davydov127.84%19.09%
david howellsdavid howells117.19%218.18%
vegard nossumvegard nossum106.54%19.09%
andrew mortonandrew morton106.54%19.09%
al viroal viro31.96%19.09%
oleg nesterovoleg nesterov31.96%19.09%
ravikiran g thirumalairavikiran g thirumalai10.65%19.09%
Total153100.00%11100.00%

/* * Check constraints on flags passed to the unshare system call. */
static int check_unshare_flags(unsigned long unshare_flags) { if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND| CLONE_VM|CLONE_FILES|CLONE_SYSVSEM| CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWNET| CLONE_NEWUSER|CLONE_NEWPID|CLONE_NEWCGROUP)) return -EINVAL; /* * Not implemented, but pretend it works if there is nothing * to unshare. Note that unsharing the address space or the * signal handlers also need to unshare the signal queues (aka * CLONE_THREAD). */ if (unshare_flags & (CLONE_THREAD | CLONE_SIGHAND | CLONE_VM)) { if (!thread_group_empty(current)) return -EINVAL; } if (unshare_flags & (CLONE_SIGHAND | CLONE_VM)) { if (atomic_read(&current->sighand->count) > 1) return -EINVAL; } if (unshare_flags & CLONE_VM) { if (!current_is_single_threaded()) return -EINVAL; } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
eric w. biedermaneric w. biederman4939.20%350.00%
oleg nesterovoleg nesterov4435.20%116.67%
janak desaijanak desai3024.00%116.67%
aditya kaliaditya kali21.60%116.67%
Total125100.00%6100.00%

/* * Unshare the filesystem structure if it is being shared */
static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp) { struct fs_struct *fs = current->fs; if (!(unshare_flags & CLONE_FS) || !fs) return 0; /* don't need lock here; in the worst case we'll do useless copy */ if (fs->users == 1) return 0; *new_fsp = copy_fs_struct(fs); if (!*new_fsp) return -ENOMEM; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
janak desaijanak desai5778.08%250.00%
al viroal viro1621.92%250.00%
Total73100.00%4100.00%

/* * Unshare file descriptor table if it is being shared */
static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp) { struct files_struct *fd = current->files; int error = 0; if ((unshare_flags & CLONE_FILES) && (fd && atomic_read(&fd->count) > 1)) { *new_fdp = dup_fd(fd, &error); if (!*new_fdp) return error; } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
janak desaijanak desai77100.00%2100.00%
Total77100.00%2100.00%

/* * unshare allows a process to 'unshare' part of the process * context which was originally shared using clone. copy_* * functions used by do_fork() cannot be used here directly * because they modify an inactive task_struct that is being * constructed. Here we are modifying the current, active, * task_struct. */ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags) { struct fs_struct *fs, *new_fs = NULL; struct files_struct *fd, *new_fd = NULL; struct cred *new_cred = NULL; struct nsproxy *new_nsproxy = NULL; int do_sysvsem = 0; int err; /* * If unsharing a user namespace must also unshare the thread group * and unshare the filesystem root and working directories. */ if (unshare_flags & CLONE_NEWUSER) unshare_flags |= CLONE_THREAD | CLONE_FS; /* * If unsharing vm, must also unshare signal handlers. */ if (unshare_flags & CLONE_VM) unshare_flags |= CLONE_SIGHAND; /* * If unsharing a signal handlers, must also unshare the signal queues. */ if (unshare_flags & CLONE_SIGHAND) unshare_flags |= CLONE_THREAD; /* * If unsharing namespace, must also unshare filesystem information. */ if (unshare_flags & CLONE_NEWNS) unshare_flags |= CLONE_FS; err = check_unshare_flags(unshare_flags); if (err) goto bad_unshare_out; /* * CLONE_NEWIPC must also detach from the undolist: after switching * to a new ipc namespace, the semaphore arrays from the old * namespace are unreachable. */ if (unshare_flags & (CLONE_NEWIPC|CLONE_SYSVSEM)) do_sysvsem = 1; err = unshare_fs(unshare_flags, &new_fs); if (err) goto bad_unshare_out; err = unshare_fd(unshare_flags, &new_fd); if (err) goto bad_unshare_cleanup_fs; err = unshare_userns(unshare_flags, &new_cred); if (err) goto bad_unshare_cleanup_fd; err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy, new_cred, new_fs); if (err) goto bad_unshare_cleanup_cred; if (new_fs || new_fd || do_sysvsem || new_cred || new_nsproxy) { if (do_sysvsem) { /* * CLONE_SYSVSEM is equivalent to sys_exit(). */ exit_sem(current); } if (unshare_flags & CLONE_NEWIPC) { /* Orphan segments in old ns (see sem above). */ exit_shm(current); shm_init_task(current); } if (new_nsproxy) switch_task_namespaces(current, new_nsproxy); task_lock(current); if (new_fs) { fs = current->fs; spin_lock(&fs->lock); current->fs = new_fs; if (--fs->users) new_fs = NULL; else new_fs = fs; spin_unlock(&fs->lock); } if (new_fd) { fd = current->files; current->files = new_fd; new_fd = fd; } task_unlock(current); if (new_cred) { /* Install the new user namespace */ commit_creds(new_cred); new_cred = NULL; } } bad_unshare_cleanup_cred: if (new_cred) put_cred(new_cred); bad_unshare_cleanup_fd: if (new_fd) put_files_struct(new_fd); bad_unshare_cleanup_fs: if (new_fs) free_fs_struct(new_fs); bad_unshare_out: return err; } /* * Helper to unshare the files of the current task. * We don't want to expose copy_files internals to * the exec layer of the kernel. */
int unshare_files(struct files_struct **displaced) { struct task_struct *task = current; struct files_struct *copy = NULL; int error; error = unshare_fd(CLONE_FILES, &copy); if (error || !copy) { *displaced = NULL; return error; } *displaced = task->files; task_lock(task); task->files = copy; task_unlock(task); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
al viroal viro81100.00%2100.00%
Total81100.00%2100.00%


int sysctl_max_threads(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { struct ctl_table t; int ret; int threads = max_threads; int min = MIN_THREADS; int max = MAX_THREADS; t = *table; t.data = &threads; t.extra1 = &min; t.extra2 = &max; ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); if (ret || !write) return ret; set_max_threads(threads); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
heinrich schuchardtheinrich schuchardt108100.00%1100.00%
Total108100.00%1100.00%


Overall Contributors

PersonTokensPropCommitsCommitProp
pre-gitpre-git102611.95%8414.66%
oleg nesterovoleg nesterov87110.15%468.03%
janak desaijanak desai5055.88%40.70%
linus torvaldslinus torvalds4735.51%284.89%
andrew mortonandrew morton4174.86%366.28%
ingo molnaringo molnar3964.61%305.24%
al viroal viro3834.46%142.44%
eric w. biedermaneric w. biederman2322.70%132.27%
heinrich schuchardtheinrich schuchardt2052.39%30.52%
thomas gleixnerthomas gleixner1782.07%122.09%
konstantin khlebnikovkonstantin khlebnikov1772.06%40.70%
michal hockomichal hocko1601.86%61.05%
vladimir davydovvladimir davydov1601.86%81.40%
jens axboejens axboe1421.65%61.05%
hugh dickinshugh dickins1401.63%91.57%
david howellsdavid howells1271.48%101.75%
christoph hellwigchristoph hellwig1171.36%20.35%
frank mayharfrank mayhar1141.33%10.17%
roland mcgrathroland mcgrath1091.27%111.92%
christopher yeohchristopher yeoh991.15%10.17%
kees cookkees cook961.12%20.35%
pavel emelianovpavel emelianov921.07%50.87%
jiri slabyjiri slaby921.07%30.52%
tejun heotejun heo861.00%61.05%
josh triplettjosh triplett850.99%10.17%
mateusz guzikmateusz guzik730.85%10.17%
william lee irwin iiiwilliam lee irwin iii730.85%40.70%
eric dumazeteric dumazet640.75%50.87%
andrea arcangeliandrea arcangeli630.73%71.22%
peter zijlstrapeter zijlstra620.72%91.57%
kosaki motohirokosaki motohiro600.70%40.70%
kirill a. shutemovkirill a. shutemov580.68%50.87%
daniel rebelo de oliveiradaniel rebelo de oliveira500.58%10.17%
kamezawa hiroyukikamezawa hiroyuki490.57%40.70%
rik van rielrik van riel490.57%61.05%
alex thorltonalex thorlton470.55%10.17%
hidehiro kawaihidehiro kawai450.52%10.17%
alexey dobriyanalexey dobriyan420.49%30.52%
hiroshi shimamotohiroshi shimamoto410.48%10.17%
suresh siddhasuresh siddha400.47%20.35%
serge hallynserge hallyn400.47%40.70%
sukadev bhattiprolusukadev bhattiprolu360.42%30.52%
frederic weisbeckerfrederic weisbecker340.40%71.22%
michal simekmichal simek320.37%10.17%
manfred spraulmanfred spraul310.36%20.35%
andy lutomirskiandy lutomirski310.36%30.52%
akinobu mitaakinobu mita310.36%20.35%
paul e. mckenneypaul e. mckenney300.35%40.70%
fujita tomonorifujita tomonori290.34%10.17%
aaron tomlinaaron tomlin290.34%10.17%
daniel jacobowitzdaniel jacobowitz290.34%30.52%
arjan van de venarjan van de ven280.33%30.52%
mandeep singh bainesmandeep singh baines270.31%20.35%
davidlohr buesodavidlohr bueso260.30%40.70%
john levonjohn levon260.30%20.35%
andi kleenandi kleen250.29%10.17%
matthew dempskymatthew dempsky250.29%10.17%
adrian bunkadrian bunk250.29%10.17%
mike galbraithmike galbraith240.28%50.87%
mel gormanmel gorman240.28%20.35%
jack millerjack miller240.28%10.17%
david rientjesdavid rientjes230.27%20.35%
albert cahalanalbert cahalan230.27%10.17%
aleksa saraialeksa sarai230.27%10.17%
shailabh nagarshailabh nagar220.26%30.52%
fengguang wufengguang wu220.26%20.35%
christoph lameterchristoph lameter190.22%40.70%
kent overstreetkent overstreet180.21%20.35%
greg kroah-hartmangreg kroah-hartman180.21%30.52%
dave oliendave olien170.20%10.17%
david mosbergerdavid mosberger170.20%10.17%
laurent vivierlaurent vivier170.20%20.35%
goto masanorigoto masanori170.20%10.17%
lennart poetteringlennart poettering160.19%10.17%
heiko carstensheiko carstens160.19%20.35%
balbir singhbalbir singh140.16%30.52%
srikar dronamrajusrikar dronamraju130.15%20.35%
paul menagepaul menage130.15%20.35%
avi kivityavi kivity130.15%20.35%
kirill korotaevkirill korotaev120.14%20.35%
vegard nossumvegard nossum120.14%10.17%
jay lanjay lan110.13%20.35%
prasanna medaprasanna meda110.13%10.17%
gideon israel dsouzagideon israel dsouza110.13%10.17%
ravikiran g thirumalairavikiran g thirumalai100.12%10.17%
eric pariseric paris100.12%10.17%
matt helsleymatt helsley100.12%20.35%
huang shijiehuang shijie90.10%20.35%
davide libenzidavide libenzi90.10%20.35%
michael neulingmichael neuling90.10%10.17%
dario faggiolidario faggioli90.10%10.17%
dmitriy vyukovdmitriy vyukov80.09%10.17%
sebastian andrzej siewiorsebastian andrzej siewior80.09%10.17%
jeremy fitzhardingejeremy fitzhardinge80.09%10.17%
miloslav trmacmiloslav trmac80.09%10.17%
steven rostedtsteven rostedt80.09%20.35%
will drewrywill drewry80.09%10.17%
vasiliy kulikovvasiliy kulikov70.08%10.17%
tomas janousektomas janousek70.08%10.17%
li zefanli zefan70.08%20.35%
eric sandeeneric sandeen70.08%10.17%
mathieu desnoyersmathieu desnoyers70.08%10.17%
john l. byrnejohn l. byrne60.07%10.17%
michel lespinassemichel lespinasse60.07%20.35%
david hildenbranddavid hildenbrand60.07%10.17%
richard hendersonrichard henderson60.07%10.17%
rusty russellrusty russell60.07%10.17%
oren laadanoren laadan60.07%10.17%
dave jonesdave jones60.07%10.17%
louis rillinglouis rilling60.07%10.17%
nick pigginnick piggin50.06%10.17%
luca barbieriluca barbieri50.06%10.17%
ben blumben blum40.05%10.17%
alan coxalan cox40.05%20.35%
badari pulavartybadari pulavarty40.05%10.17%
martin schwidefskymartin schwidefsky40.05%10.17%
david herrmanndavid herrmann40.05%10.17%
paul jacksonpaul jackson40.05%20.35%
tim schmielautim schmielau30.03%10.17%
srivatsa vaddagirisrivatsa vaddagiri30.03%10.17%
stas sergeevstas sergeev30.03%10.17%
rafael j. wysockirafael j. wysocki30.03%10.17%
ying hanying han30.03%10.17%
randy dunlaprandy dunlap30.03%10.17%
sasha levinsasha levin30.03%10.17%
stanislaw gruszkastanislaw gruszka30.03%10.17%
dipankar sarmadipankar sarma30.03%10.17%
andrea righiandrea righi30.03%10.17%
benjamin lahaisebenjamin lahaise30.03%10.17%
zilvinas valinskaszilvinas valinskas30.03%10.17%
michael ellermanmichael ellerman30.03%10.17%
russell kingrussell king20.02%10.17%
aditya kaliaditya kali20.02%10.17%
eric b munsoneric b munson20.02%10.17%
roman zippelroman zippel20.02%10.17%
paul mackerraspaul mackerras20.02%10.17%
lee schermerhornlee schermerhorn20.02%20.35%
jason lowjason low20.02%20.35%
dave hansendave hansen20.02%10.17%
benjamin herrenschmidtbenjamin herrenschmidt20.02%10.17%
andries brouwerandries brouwer20.02%10.17%
jan blunckjan blunck10.01%10.17%
arnaldo carvalho de meloarnaldo carvalho de melo10.01%10.17%
george anzingergeorge anzinger10.01%10.17%
david s. millerdavid s. miller10.01%10.17%
guenter roeckguenter roeck10.01%10.17%
daeseok youndaeseok youn10.01%10.17%
siddhesh poyarekarsiddhesh poyarekar10.01%10.17%
stephen rothwellstephen rothwell10.01%10.17%
dave mccrackendave mccracken10.01%10.17%
ian campbellian campbell10.01%10.17%
veaceslav falicoveaceslav falico10.01%10.17%
ralf baechleralf baechle10.01%10.17%
Total8584100.00%573100.00%
Directory: kernel
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.