cregit-Linux how code gets into the kernel

Release 4.15 kernel/events/uprobes.c

Directory: kernel/events
/*
 * User-space Probes (UProbes)
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 *
 * Copyright (C) IBM Corporation, 2008-2012
 * Authors:
 *      Srikar Dronamraju
 *      Jim Keniston
 * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra
 */

#include <linux/kernel.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>	/* read_mapping_page */
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/sched/mm.h>
#include <linux/sched/coredump.h>
#include <linux/export.h>
#include <linux/rmap.h>		/* anon_vma_prepare */
#include <linux/mmu_notifier.h>	/* set_pte_at_notify */
#include <linux/swap.h>		/* try_to_free_swap */
#include <linux/ptrace.h>	/* user_enable_single_step */
#include <linux/kdebug.h>	/* notifier mechanism */
#include "../../mm/internal.h"	/* munlock_vma_page */
#include <linux/percpu-rwsem.h>
#include <linux/task_work.h>
#include <linux/shmem_fs.h>

#include <linux/uprobes.h>


#define UINSNS_PER_PAGE			(PAGE_SIZE/UPROBE_XOL_SLOT_BYTES)

#define MAX_UPROBE_XOL_SLOTS		UINSNS_PER_PAGE


static struct rb_root uprobes_tree = RB_ROOT;
/*
 * allows us to skip the uprobe_mmap if there are no uprobe events active
 * at this time.  Probably a fine grained per inode count is better?
 */

#define no_uprobe_events()	RB_EMPTY_ROOT(&uprobes_tree)

static DEFINE_SPINLOCK(uprobes_treelock);	/* serialize rbtree access */


#define UPROBES_HASH_SZ	13
/* serialize uprobe->pending_list */

static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ];

#define uprobes_mmap_hash(v)	(&uprobes_mmap_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ])


static struct percpu_rw_semaphore dup_mmap_sem;

/* Have a copy of original instruction */

#define UPROBE_COPY_INSN	0


struct uprobe {
	
struct rb_node		rb_node;	/* node in the rb tree */
	
atomic_t		ref;
	
struct rw_semaphore	register_rwsem;
	
struct rw_semaphore	consumer_rwsem;
	
struct list_head	pending_list;
	
struct uprobe_consumer	*consumers;
	
struct inode		*inode;		/* Also hold a ref to inode */
	
loff_t			offset;
	
unsigned long		flags;

	/*
         * The generic code assumes that it has two members of unknown type
         * owned by the arch-specific code:
         *
         *      insn -  copy_insn() saves the original instruction here for
         *              arch_uprobe_analyze_insn().
         *
         *      ixol -  potentially modified instruction to execute out of
         *              line, copied to xol_area by xol_get_insn_slot().
         */
	
struct arch_uprobe	arch;
};

/*
 * Execute out of line area: anonymous executable mapping installed
 * by the probed task to execute the copy of the original instruction
 * mangled by set_swbp().
 *
 * On a breakpoint hit, thread contests for a slot.  It frees the
 * slot after singlestep. Currently a fixed number of slots are
 * allocated.
 */

struct xol_area {
	
wait_queue_head_t 		wq;		/* if all slots are busy */
	
atomic_t 			slot_count;	/* number of in-use slots */
	
unsigned long 			*bitmap;	/* 0 = free slot */

	
struct vm_special_mapping	xol_mapping;
	
struct page 			*pages[2];
	/*
         * We keep the vma's vm_start rather than a pointer to the vma
         * itself.  The probed process or a naughty kernel module could make
         * the vma go away, and we must handle that reasonably gracefully.
         */
	
unsigned long 			vaddr;		/* Page(s) of instruction slots */
};

/*
 * valid_vma: Verify if the specified vma is an executable vma
 * Relax restrictions while unregistering: vm_flags might have
 * changed after breakpoint was inserted.
 *      - is_register: indicates if we are in register context.
 *      - Return 1 if the specified virtual address is in an
 *        executable vma.
 */

static bool valid_vma(struct vm_area_struct *vma, bool is_register) { vm_flags_t flags = VM_HUGETLB | VM_MAYEXEC | VM_MAYSHARE; if (is_register) flags |= VM_WRITE; return vma->vm_file && (vma->vm_flags & flags) == VM_MAYEXEC; }

Contributors

PersonTokensPropCommitsCommitProp
Srikar Dronamraju2758.70%125.00%
Oleg Nesterov1941.30%375.00%
Total46100.00%4100.00%


static unsigned long offset_to_vaddr(struct vm_area_struct *vma, loff_t offset) { return vma->vm_start + offset - ((loff_t)vma->vm_pgoff << PAGE_SHIFT); }

Contributors

PersonTokensPropCommitsCommitProp
Srikar Dronamraju2369.70%133.33%
Oleg Nesterov1030.30%266.67%
Total33100.00%3100.00%


static loff_t vaddr_to_offset(struct vm_area_struct *vma, unsigned long vaddr) { return ((loff_t)vma->vm_pgoff << PAGE_SHIFT) + (vaddr - vma->vm_start); }

Contributors

PersonTokensPropCommitsCommitProp
Oleg Nesterov35100.00%1100.00%
Total35100.00%1100.00%

/** * __replace_page - replace page in vma by new page. * based on replace_page in mm/ksm.c * * @vma: vma that holds the pte pointing to page * @addr: address the old @page is mapped at * @page: the cowed page we are replacing by kpage * @kpage: the modified page we replace page by * * Returns 0 on success, -EFAULT on failure. */
static int __replace_page(struct vm_area_struct *vma, unsigned long addr, struct page *old_page, struct page *new_page) { struct mm_struct *mm = vma->vm_mm; struct page_vma_mapped_walk pvmw = { .page = old_page, .vma = vma, .address = addr, }; int err; /* For mmu_notifiers */ const unsigned long mmun_start = addr; const unsigned long mmun_end = addr + PAGE_SIZE; struct mem_cgroup *memcg; VM_BUG_ON_PAGE(PageTransHuge(old_page), old_page); err = mem_cgroup_try_charge(new_page, vma->vm_mm, GFP_KERNEL, &memcg, false); if (err) return err; /* For try_to_free_swap() and munlock_vma_page() below */ lock_page(old_page); mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); err = -EAGAIN; if (!page_vma_mapped_walk(&pvmw)) { mem_cgroup_cancel_charge(new_page, memcg, false); goto unlock; } VM_BUG_ON_PAGE(addr != pvmw.address, old_page); get_page(new_page); page_add_new_anon_rmap(new_page, vma, addr, false); mem_cgroup_commit_charge(new_page, memcg, false, false); lru_cache_add_active_or_unevictable(new_page, vma); if (!PageAnon(old_page)) { dec_mm_counter(mm, mm_counter_file(old_page)); inc_mm_counter(mm, MM_ANONPAGES); } flush_cache_page(vma, addr, pte_pfn(*pvmw.pte)); ptep_clear_flush_notify(vma, addr, pvmw.pte); set_pte_at_notify(mm, addr, pvmw.pte, mk_pte(new_page, vma->vm_page_prot)); page_remove_rmap(old_page, false); if (!page_mapped(old_page)) try_to_free_swap(old_page); page_vma_mapped_walk_done(&pvmw); if (vma->vm_flags & VM_LOCKED) munlock_vma_page(old_page); put_page(old_page); err = 0; unlock: mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); unlock_page(old_page); return err; }

Contributors

PersonTokensPropCommitsCommitProp
Srikar Dronamraju12135.28%213.33%
Oleg Nesterov7622.16%640.00%
Kirill A. Shutemov6619.24%320.00%
Johannes Weiner4111.95%16.67%
Haggai Eran3510.20%16.67%
Jerome Marchand30.87%16.67%
Joerg Roedel10.29%16.67%
Total343100.00%15100.00%

/** * is_swbp_insn - check if instruction is breakpoint instruction. * @insn: instruction to be checked. * Default implementation of is_swbp_insn * Returns true if @insn is a breakpoint instruction. */
bool __weak is_swbp_insn(uprobe_opcode_t *insn) { return *insn == UPROBE_SWBP_INSN; }

Contributors

PersonTokensPropCommitsCommitProp
Srikar Dronamraju16100.00%2100.00%
Total16100.00%2100.00%

/** * is_trap_insn - check if instruction is breakpoint instruction. * @insn: instruction to be checked. * Default implementation of is_trap_insn * Returns true if @insn is a breakpoint instruction. * * This function is needed for the case where an architecture has multiple * trap instructions (like powerpc). */
bool __weak is_trap_insn(uprobe_opcode_t *insn) { return is_swbp_insn(insn); }

Contributors

PersonTokensPropCommitsCommitProp
Ananth N. Mavinakayanahalli16100.00%1100.00%
Total16100.00%1100.00%


static void copy_from_page(struct page *page, unsigned long vaddr, void *dst, int len) { void *kaddr = kmap_atomic(page); memcpy(dst, kaddr + (vaddr & ~PAGE_MASK), len); kunmap_atomic(kaddr); }

Contributors

PersonTokensPropCommitsCommitProp
Oleg Nesterov52100.00%2100.00%
Total52100.00%2100.00%


static void copy_to_page(struct page *page, unsigned long vaddr, const void *src, int len) { void *kaddr = kmap_atomic(page); memcpy(kaddr + (vaddr & ~PAGE_MASK), src, len); kunmap_atomic(kaddr); }

Contributors

PersonTokensPropCommitsCommitProp
Oleg Nesterov53100.00%1100.00%
Total53100.00%1100.00%


static int verify_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t *new_opcode) { uprobe_opcode_t old_opcode; bool is_swbp; /* * Note: We only check if the old_opcode is UPROBE_SWBP_INSN here. * We do not check if it is any other 'trap variant' which could * be conditional trap instruction such as the one powerpc supports. * * The logic is that we do not care if the underlying instruction * is a trap variant; uprobes always wins over any other (gdb) * breakpoint. */ copy_from_page(page, vaddr, &old_opcode, UPROBE_SWBP_INSN_SIZE); is_swbp = is_swbp_insn(&old_opcode); if (is_swbp_insn(new_opcode)) { if (is_swbp) /* register: already installed? */ return 0; } else { if (!is_swbp) /* unregister: was it changed by us? */ return 0; } return 1; }

Contributors

PersonTokensPropCommitsCommitProp
Oleg Nesterov7798.72%375.00%
Ananth N. Mavinakayanahalli11.28%125.00%
Total78100.00%4100.00%

/* * NOTE: * Expect the breakpoint instruction to be the smallest size instruction for * the architecture. If an arch has variable length instruction and the * breakpoint instruction is not of the smallest length instruction * supported by that architecture then we need to modify is_trap_at_addr and * uprobe_write_opcode accordingly. This would never be a problem for archs * that have fixed length instructions. * * uprobe_write_opcode - write the opcode at a given virtual address. * @mm: the probed process address space. * @vaddr: the virtual address to store the opcode. * @opcode: opcode to be written at @vaddr. * * Called with mm->mmap_sem held for write. * Return 0 (success) or a negative errno. */
int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t opcode) { struct page *old_page, *new_page; struct vm_area_struct *vma; int ret; retry: /* Read the page with vaddr into memory */ ret = get_user_pages_remote(NULL, mm, vaddr, 1, FOLL_FORCE | FOLL_SPLIT, &old_page, &vma, NULL); if (ret <= 0) return ret; ret = verify_opcode(old_page, vaddr, &opcode); if (ret <= 0) goto put_old; ret = anon_vma_prepare(vma); if (ret) goto put_old; ret = -ENOMEM; new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr); if (!new_page) goto put_old; __SetPageUptodate(new_page); copy_highpage(new_page, old_page); copy_to_page(new_page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE); ret = __replace_page(vma, vaddr, old_page, new_page); put_page(new_page); put_old: put_page(old_page); if (unlikely(ret == -EAGAIN)) goto retry; return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Srikar Dronamraju12363.40%321.43%
Oleg Nesterov6432.99%642.86%
Lorenzo Stoakes31.55%214.29%
Kirill A. Shutemov31.55%214.29%
Dave Hansen10.52%17.14%
Total194100.00%14100.00%

/** * set_swbp - store breakpoint at a given address. * @auprobe: arch specific probepoint information. * @mm: the probed process address space. * @vaddr: the virtual address to insert the opcode. * * For mm @mm, store the breakpoint instruction at @vaddr. * Return 0 (success) or a negative errno. */
int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr) { return uprobe_write_opcode(mm, vaddr, UPROBE_SWBP_INSN); }

Contributors

PersonTokensPropCommitsCommitProp
Srikar Dronamraju2996.67%375.00%
Oleg Nesterov13.33%125.00%
Total30100.00%4100.00%

/** * set_orig_insn - Restore the original instruction. * @mm: the probed process address space. * @auprobe: arch specific probepoint information. * @vaddr: the virtual address to insert the opcode. * * For mm @mm, restore the original opcode (opcode) at @vaddr. * Return 0 (success) or a negative errno. */
int __weak set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr) { return uprobe_write_opcode(mm, vaddr, *(uprobe_opcode_t *)&auprobe->insn); }

Contributors

PersonTokensPropCommitsCommitProp
Srikar Dronamraju3694.74%360.00%
Oleg Nesterov25.26%240.00%
Total38100.00%5100.00%


static struct uprobe *get_uprobe(struct uprobe *uprobe) { atomic_inc(&uprobe->ref); return uprobe; }

Contributors

PersonTokensPropCommitsCommitProp
Oleg Nesterov24100.00%1100.00%
Total24100.00%1100.00%


static void put_uprobe(struct uprobe *uprobe) { if (atomic_dec_and_test(&uprobe->ref)) kfree(uprobe); }

Contributors

PersonTokensPropCommitsCommitProp
Oleg Nesterov26100.00%1100.00%
Total26100.00%1100.00%


static int match_uprobe(struct uprobe *l, struct uprobe *r) { if (l->inode < r->inode) return -1; if (l->inode > r->inode) return 1; if (l->offset < r->offset) return -1; if (l->offset > r->offset) return 1; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Srikar Dronamraju73100.00%1100.00%
Total73100.00%1100.00%


static struct uprobe *__find_uprobe(struct inode *inode, loff_t offset) { struct uprobe u = { .inode = inode, .offset = offset }; struct rb_node *n = uprobes_tree.rb_node; struct uprobe *uprobe; int match; while (n) { uprobe = rb_entry(n, struct uprobe, rb_node); match = match_uprobe(&u, uprobe); if (!match) return get_uprobe(uprobe); if (match < 0) n = n->rb_left; else n = n->rb_right; } return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Srikar Dronamraju10898.18%150.00%
Oleg Nesterov21.82%150.00%
Total110100.00%2100.00%

/* * Find a uprobe corresponding to a given inode:offset * Acquires uprobes_treelock */
static struct uprobe *find_uprobe(struct inode *inode, loff_t offset) { struct uprobe *uprobe; spin_lock(&uprobes_treelock); uprobe = __find_uprobe(inode, offset); spin_unlock(&uprobes_treelock); return uprobe; }

Contributors

PersonTokensPropCommitsCommitProp
Srikar Dronamraju4395.56%150.00%
Oleg Nesterov24.44%150.00%
Total45100.00%2100.00%


static struct uprobe *__insert_uprobe(struct uprobe *uprobe) { struct rb_node **p = &uprobes_tree.rb_node; struct rb_node *parent = NULL; struct uprobe *u; int match; while (*p) { parent = *p; u = rb_entry(parent, struct uprobe, rb_node); match = match_uprobe(uprobe, u); if (!match) return get_uprobe(u); if (match < 0) p = &parent->rb_left; else p = &parent->rb_right; } u = NULL; rb_link_node(&uprobe->rb_node, parent, p); rb_insert_color(&uprobe->rb_node, &uprobes_tree); /* get access + creation ref */ atomic_set(&uprobe->ref, 2); return u; }

Contributors

PersonTokensPropCommitsCommitProp
Srikar Dronamraju14398.62%150.00%
Oleg Nesterov21.38%150.00%
Total145100.00%2100.00%

/* * Acquire uprobes_treelock. * Matching uprobe already exists in rbtree; * increment (access refcount) and return the matching uprobe. * * No matching uprobe; insert the uprobe in rb_tree; * get a double refcount (access + creation) and return NULL. */
static struct uprobe *insert_uprobe(struct uprobe *uprobe) { struct uprobe *u; spin_lock(&uprobes_treelock); u = __insert_uprobe(uprobe); spin_unlock(&uprobes_treelock); return u; }

Contributors

PersonTokensPropCommitsCommitProp
Srikar Dronamraju3895.00%150.00%
Oleg Nesterov25.00%150.00%
Total40100.00%2100.00%


static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset) { struct uprobe *uprobe, *cur_uprobe; uprobe = kzalloc(sizeof(struct uprobe), GFP_KERNEL); if (!uprobe) return NULL; uprobe->inode = igrab(inode); uprobe->offset = offset; init_rwsem(&uprobe->register_rwsem); init_rwsem(&uprobe->consumer_rwsem); /* add to uprobes_tree, sorted on inode:offset */ cur_uprobe = insert_uprobe(uprobe); /* a uprobe exists for this inode:offset combination */ if (cur_uprobe) { kfree(uprobe); uprobe = cur_uprobe; iput(inode); } return uprobe; }

Contributors

PersonTokensPropCommitsCommitProp
Srikar Dronamraju10092.59%150.00%
Oleg Nesterov87.41%150.00%
Total108100.00%2100.00%


static void consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc) { down_write(&uprobe->consumer_rwsem); uc->next = uprobe->consumers; uprobe->consumers = uc; up_write(&uprobe->consumer_rwsem); }

Contributors

PersonTokensPropCommitsCommitProp
Srikar Dronamraju4393.48%250.00%
Oleg Nesterov24.35%125.00%
Ingo Molnar12.17%125.00%
Total46100.00%4100.00%

/* * For uprobe @uprobe, delete the consumer @uc. * Return true if the @uc is deleted successfully * or return false. */
static bool consumer_del(struct uprobe *uprobe, struct uprobe_consumer *uc) { struct uprobe_consumer **con; bool ret = false; down_write(&uprobe->consumer_rwsem); for (con = &uprobe->consumers; *con; con = &(*con)->next) { if (*con == uc) { *con = uc->next; ret = true; break; } } up_write(&uprobe->consumer_rwsem); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Srikar Dronamraju9098.90%266.67%
Ingo Molnar11.10%133.33%
Total91100.00%3100.00%


static int __copy_insn(struct address_space *mapping, struct file *filp, void *insn, int nbytes, loff_t offset) { struct page *page; /* * Ensure that the page that has the original instruction is populated * and in page-cache. If ->readpage == NULL it must be shmem_mapping(), * see uprobe_register(). */ if (mapping->a_ops->readpage) page = read_mapping_page(mapping, offset >> PAGE_SHIFT, filp); else page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT); if (IS_ERR(page)) return PTR_ERR(page); copy_from_page(page, offset, insn, nbytes); put_page(page); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Srikar Dronamraju6567.01%112.50%
Oleg Nesterov2929.90%675.00%
Kirill A. Shutemov33.09%112.50%
Total97100.00%8100.00%


static int copy_insn(struct uprobe *uprobe, struct file *filp) { struct address_space *mapping = uprobe->inode->i_mapping; loff_t offs = uprobe->offset; void *insn = &uprobe->arch.insn; int size = sizeof(uprobe->arch.insn); int len, err = -EIO; /* Copy only available bytes, -EIO if nothing was read */ do { if (offs >= i_size_read(uprobe->inode)) break; len = min_t(int, size, PAGE_SIZE - (offs & ~PAGE_MASK)); err = __copy_insn(mapping, filp, insn, len, offs); if (err) break; insn += len; offs += len; size -= len; } while (size); return err; }

Contributors

PersonTokensPropCommitsCommitProp
Oleg Nesterov9266.19%480.00%
Srikar Dronamraju4733.81%120.00%
Total139100.00%5100.00%


static int prepare_uprobe(struct uprobe *uprobe, struct file *file, struct mm_struct *mm, unsigned long vaddr) { int ret = 0; if (test_bit(UPROBE_COPY_INSN, &uprobe->flags)) return ret; /* TODO: move this into _register, until then we abuse this sem. */ down_write(&uprobe->consumer_rwsem); if (test_bit(UPROBE_COPY_INSN, &uprobe->flags)) goto out; ret = copy_insn(uprobe, file); if (ret) goto out; ret = -ENOTSUPP; if (is_trap_insn((uprobe_opcode_t *)&uprobe->arch.insn)) goto out; ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr); if (ret) goto out; /* uprobe_write_opcode() assumes we don't cross page boundary */ BUG_ON((uprobe->offset & ~PAGE_MASK) + UPROBE_SWBP_INSN_SIZE > PAGE_SIZE); smp_wmb(); /* pairs with rmb() in find_active_uprobe() */ set_bit(UPROBE_COPY_INSN, &uprobe->flags); out: up_write(&uprobe->consumer_rwsem); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Oleg Nesterov9051.72%1062.50%
Srikar Dronamraju8247.13%425.00%
Ananth N. Mavinakayanahalli21.15%212.50%
Total174100.00%16100.00%


static inline bool consumer_filter(struct uprobe_consumer *uc, enum uprobe_filter_ctx ctx, struct mm_struct *mm) { return !uc->filter || uc->filter(uc, ctx, mm); }

Contributors

PersonTokensPropCommitsCommitProp
Oleg Nesterov38100.00%2100.00%
Total38100.00%2100.00%


static bool filter_chain(struct uprobe *uprobe, enum uprobe_filter_ctx ctx, struct mm_struct *mm) { struct uprobe_consumer *uc; bool ret = false; down_read(&uprobe->consumer_rwsem); for (uc = uprobe->consumers; uc; uc = uc->next) { ret = consumer_filter(uc, ctx, mm); if (ret) break; } up_read(&uprobe->consumer_rwsem); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Oleg Nesterov83100.00%4100.00%
Total83100.00%4100.00%


static int install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, struct vm_area_struct *vma, unsigned long vaddr) { bool first_uprobe; int ret; ret = prepare_uprobe(uprobe, vma->vm_file, mm, vaddr); if (ret) return ret; /* * set MMF_HAS_UPROBES in advance for uprobe_pre_sstep_notifier(), * the task can hit this breakpoint right after __replace_page(). */ first_uprobe = !test_bit(MMF_HAS_UPROBES, &mm->flags); if (first_uprobe) set_bit(MMF_HAS_UPROBES, &mm->flags); ret = set_swbp(&uprobe->arch, mm, vaddr); if (!ret) clear_bit(MMF_RECALC_UPROBES, &mm->flags); else if (first_uprobe) clear_bit(MMF_HAS_UPROBES, &mm->flags); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Oleg Nesterov11186.72%450.00%
Srikar Dronamraju1713.28%450.00%
Total128100.00%8100.00%


static int remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr) { set_bit(MMF_RECALC_UPROBES, &mm->flags); return set_orig_insn(&uprobe->arch, mm, vaddr); }

Contributors

PersonTokensPropCommitsCommitProp
Srikar Dronamraju2865.12%350.00%
Oleg Nesterov1534.88%350.00%
Total43100.00%6100.00%


static inline bool uprobe_is_active(struct uprobe *uprobe) { return !RB_EMPTY_NODE(&uprobe->rb_node); }

Contributors

PersonTokensPropCommitsCommitProp
Oleg Nesterov22100.00%1100.00%
Total22100.00%1100.00%

/* * There could be threads that have already hit the breakpoint. They * will recheck the current insn and restart if find_uprobe() fails. * See find_active_uprobe(). */
static void delete_uprobe(struct uprobe *uprobe) { if (WARN_ON(!uprobe_is_active(uprobe))) return; spin_lock(&uprobes_treelock); rb_erase(&uprobe->rb_node, &uprobes_tree); spin_unlock(