Release 4.15 kernel/events/uprobes.c
/*
* User-space Probes (UProbes)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* Copyright (C) IBM Corporation, 2008-2012
* Authors:
* Srikar Dronamraju
* Jim Keniston
* Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra
*/
#include <linux/kernel.h>
#include <linux/highmem.h>
#include <linux/pagemap.h> /* read_mapping_page */
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/sched/mm.h>
#include <linux/sched/coredump.h>
#include <linux/export.h>
#include <linux/rmap.h> /* anon_vma_prepare */
#include <linux/mmu_notifier.h> /* set_pte_at_notify */
#include <linux/swap.h> /* try_to_free_swap */
#include <linux/ptrace.h> /* user_enable_single_step */
#include <linux/kdebug.h> /* notifier mechanism */
#include "../../mm/internal.h" /* munlock_vma_page */
#include <linux/percpu-rwsem.h>
#include <linux/task_work.h>
#include <linux/shmem_fs.h>
#include <linux/uprobes.h>
#define UINSNS_PER_PAGE (PAGE_SIZE/UPROBE_XOL_SLOT_BYTES)
#define MAX_UPROBE_XOL_SLOTS UINSNS_PER_PAGE
static struct rb_root uprobes_tree = RB_ROOT;
/*
* allows us to skip the uprobe_mmap if there are no uprobe events active
* at this time. Probably a fine grained per inode count is better?
*/
#define no_uprobe_events() RB_EMPTY_ROOT(&uprobes_tree)
static DEFINE_SPINLOCK(uprobes_treelock); /* serialize rbtree access */
#define UPROBES_HASH_SZ 13
/* serialize uprobe->pending_list */
static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ];
#define uprobes_mmap_hash(v) (&uprobes_mmap_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ])
static struct percpu_rw_semaphore dup_mmap_sem;
/* Have a copy of original instruction */
#define UPROBE_COPY_INSN 0
struct uprobe {
struct rb_node rb_node; /* node in the rb tree */
atomic_t ref;
struct rw_semaphore register_rwsem;
struct rw_semaphore consumer_rwsem;
struct list_head pending_list;
struct uprobe_consumer *consumers;
struct inode *inode; /* Also hold a ref to inode */
loff_t offset;
unsigned long flags;
/*
* The generic code assumes that it has two members of unknown type
* owned by the arch-specific code:
*
* insn - copy_insn() saves the original instruction here for
* arch_uprobe_analyze_insn().
*
* ixol - potentially modified instruction to execute out of
* line, copied to xol_area by xol_get_insn_slot().
*/
struct arch_uprobe arch;
};
/*
* Execute out of line area: anonymous executable mapping installed
* by the probed task to execute the copy of the original instruction
* mangled by set_swbp().
*
* On a breakpoint hit, thread contests for a slot. It frees the
* slot after singlestep. Currently a fixed number of slots are
* allocated.
*/
struct xol_area {
wait_queue_head_t wq; /* if all slots are busy */
atomic_t slot_count; /* number of in-use slots */
unsigned long *bitmap; /* 0 = free slot */
struct vm_special_mapping xol_mapping;
struct page *pages[2];
/*
* We keep the vma's vm_start rather than a pointer to the vma
* itself. The probed process or a naughty kernel module could make
* the vma go away, and we must handle that reasonably gracefully.
*/
unsigned long vaddr; /* Page(s) of instruction slots */
};
/*
* valid_vma: Verify if the specified vma is an executable vma
* Relax restrictions while unregistering: vm_flags might have
* changed after breakpoint was inserted.
* - is_register: indicates if we are in register context.
* - Return 1 if the specified virtual address is in an
* executable vma.
*/
static bool valid_vma(struct vm_area_struct *vma, bool is_register)
{
vm_flags_t flags = VM_HUGETLB | VM_MAYEXEC | VM_MAYSHARE;
if (is_register)
flags |= VM_WRITE;
return vma->vm_file && (vma->vm_flags & flags) == VM_MAYEXEC;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Srikar Dronamraju | 27 | 58.70% | 1 | 25.00% |
Oleg Nesterov | 19 | 41.30% | 3 | 75.00% |
Total | 46 | 100.00% | 4 | 100.00% |
static unsigned long offset_to_vaddr(struct vm_area_struct *vma, loff_t offset)
{
return vma->vm_start + offset - ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Srikar Dronamraju | 23 | 69.70% | 1 | 33.33% |
Oleg Nesterov | 10 | 30.30% | 2 | 66.67% |
Total | 33 | 100.00% | 3 | 100.00% |
static loff_t vaddr_to_offset(struct vm_area_struct *vma, unsigned long vaddr)
{
return ((loff_t)vma->vm_pgoff << PAGE_SHIFT) + (vaddr - vma->vm_start);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Oleg Nesterov | 35 | 100.00% | 1 | 100.00% |
Total | 35 | 100.00% | 1 | 100.00% |
/**
* __replace_page - replace page in vma by new page.
* based on replace_page in mm/ksm.c
*
* @vma: vma that holds the pte pointing to page
* @addr: address the old @page is mapped at
* @page: the cowed page we are replacing by kpage
* @kpage: the modified page we replace page by
*
* Returns 0 on success, -EFAULT on failure.
*/
static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
struct page *old_page, struct page *new_page)
{
struct mm_struct *mm = vma->vm_mm;
struct page_vma_mapped_walk pvmw = {
.page = old_page,
.vma = vma,
.address = addr,
};
int err;
/* For mmu_notifiers */
const unsigned long mmun_start = addr;
const unsigned long mmun_end = addr + PAGE_SIZE;
struct mem_cgroup *memcg;
VM_BUG_ON_PAGE(PageTransHuge(old_page), old_page);
err = mem_cgroup_try_charge(new_page, vma->vm_mm, GFP_KERNEL, &memcg,
false);
if (err)
return err;
/* For try_to_free_swap() and munlock_vma_page() below */
lock_page(old_page);
mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
err = -EAGAIN;
if (!page_vma_mapped_walk(&pvmw)) {
mem_cgroup_cancel_charge(new_page, memcg, false);
goto unlock;
}
VM_BUG_ON_PAGE(addr != pvmw.address, old_page);
get_page(new_page);
page_add_new_anon_rmap(new_page, vma, addr, false);
mem_cgroup_commit_charge(new_page, memcg, false, false);
lru_cache_add_active_or_unevictable(new_page, vma);
if (!PageAnon(old_page)) {
dec_mm_counter(mm, mm_counter_file(old_page));
inc_mm_counter(mm, MM_ANONPAGES);
}
flush_cache_page(vma, addr, pte_pfn(*pvmw.pte));
ptep_clear_flush_notify(vma, addr, pvmw.pte);
set_pte_at_notify(mm, addr, pvmw.pte,
mk_pte(new_page, vma->vm_page_prot));
page_remove_rmap(old_page, false);
if (!page_mapped(old_page))
try_to_free_swap(old_page);
page_vma_mapped_walk_done(&pvmw);
if (vma->vm_flags & VM_LOCKED)
munlock_vma_page(old_page);
put_page(old_page);
err = 0;
unlock:
mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
unlock_page(old_page);
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Srikar Dronamraju | 121 | 35.28% | 2 | 13.33% |
Oleg Nesterov | 76 | 22.16% | 6 | 40.00% |
Kirill A. Shutemov | 66 | 19.24% | 3 | 20.00% |
Johannes Weiner | 41 | 11.95% | 1 | 6.67% |
Haggai Eran | 35 | 10.20% | 1 | 6.67% |
Jerome Marchand | 3 | 0.87% | 1 | 6.67% |
Joerg Roedel | 1 | 0.29% | 1 | 6.67% |
Total | 343 | 100.00% | 15 | 100.00% |
/**
* is_swbp_insn - check if instruction is breakpoint instruction.
* @insn: instruction to be checked.
* Default implementation of is_swbp_insn
* Returns true if @insn is a breakpoint instruction.
*/
bool __weak is_swbp_insn(uprobe_opcode_t *insn)
{
return *insn == UPROBE_SWBP_INSN;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Srikar Dronamraju | 16 | 100.00% | 2 | 100.00% |
Total | 16 | 100.00% | 2 | 100.00% |
/**
* is_trap_insn - check if instruction is breakpoint instruction.
* @insn: instruction to be checked.
* Default implementation of is_trap_insn
* Returns true if @insn is a breakpoint instruction.
*
* This function is needed for the case where an architecture has multiple
* trap instructions (like powerpc).
*/
bool __weak is_trap_insn(uprobe_opcode_t *insn)
{
return is_swbp_insn(insn);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ananth N. Mavinakayanahalli | 16 | 100.00% | 1 | 100.00% |
Total | 16 | 100.00% | 1 | 100.00% |
static void copy_from_page(struct page *page, unsigned long vaddr, void *dst, int len)
{
void *kaddr = kmap_atomic(page);
memcpy(dst, kaddr + (vaddr & ~PAGE_MASK), len);
kunmap_atomic(kaddr);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Oleg Nesterov | 52 | 100.00% | 2 | 100.00% |
Total | 52 | 100.00% | 2 | 100.00% |
static void copy_to_page(struct page *page, unsigned long vaddr, const void *src, int len)
{
void *kaddr = kmap_atomic(page);
memcpy(kaddr + (vaddr & ~PAGE_MASK), src, len);
kunmap_atomic(kaddr);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Oleg Nesterov | 53 | 100.00% | 1 | 100.00% |
Total | 53 | 100.00% | 1 | 100.00% |
static int verify_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t *new_opcode)
{
uprobe_opcode_t old_opcode;
bool is_swbp;
/*
* Note: We only check if the old_opcode is UPROBE_SWBP_INSN here.
* We do not check if it is any other 'trap variant' which could
* be conditional trap instruction such as the one powerpc supports.
*
* The logic is that we do not care if the underlying instruction
* is a trap variant; uprobes always wins over any other (gdb)
* breakpoint.
*/
copy_from_page(page, vaddr, &old_opcode, UPROBE_SWBP_INSN_SIZE);
is_swbp = is_swbp_insn(&old_opcode);
if (is_swbp_insn(new_opcode)) {
if (is_swbp) /* register: already installed? */
return 0;
} else {
if (!is_swbp) /* unregister: was it changed by us? */
return 0;
}
return 1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Oleg Nesterov | 77 | 98.72% | 3 | 75.00% |
Ananth N. Mavinakayanahalli | 1 | 1.28% | 1 | 25.00% |
Total | 78 | 100.00% | 4 | 100.00% |
/*
* NOTE:
* Expect the breakpoint instruction to be the smallest size instruction for
* the architecture. If an arch has variable length instruction and the
* breakpoint instruction is not of the smallest length instruction
* supported by that architecture then we need to modify is_trap_at_addr and
* uprobe_write_opcode accordingly. This would never be a problem for archs
* that have fixed length instructions.
*
* uprobe_write_opcode - write the opcode at a given virtual address.
* @mm: the probed process address space.
* @vaddr: the virtual address to store the opcode.
* @opcode: opcode to be written at @vaddr.
*
* Called with mm->mmap_sem held for write.
* Return 0 (success) or a negative errno.
*/
int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr,
uprobe_opcode_t opcode)
{
struct page *old_page, *new_page;
struct vm_area_struct *vma;
int ret;
retry:
/* Read the page with vaddr into memory */
ret = get_user_pages_remote(NULL, mm, vaddr, 1,
FOLL_FORCE | FOLL_SPLIT, &old_page, &vma, NULL);
if (ret <= 0)
return ret;
ret = verify_opcode(old_page, vaddr, &opcode);
if (ret <= 0)
goto put_old;
ret = anon_vma_prepare(vma);
if (ret)
goto put_old;
ret = -ENOMEM;
new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr);
if (!new_page)
goto put_old;
__SetPageUptodate(new_page);
copy_highpage(new_page, old_page);
copy_to_page(new_page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
ret = __replace_page(vma, vaddr, old_page, new_page);
put_page(new_page);
put_old:
put_page(old_page);
if (unlikely(ret == -EAGAIN))
goto retry;
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Srikar Dronamraju | 123 | 63.40% | 3 | 21.43% |
Oleg Nesterov | 64 | 32.99% | 6 | 42.86% |
Lorenzo Stoakes | 3 | 1.55% | 2 | 14.29% |
Kirill A. Shutemov | 3 | 1.55% | 2 | 14.29% |
Dave Hansen | 1 | 0.52% | 1 | 7.14% |
Total | 194 | 100.00% | 14 | 100.00% |
/**
* set_swbp - store breakpoint at a given address.
* @auprobe: arch specific probepoint information.
* @mm: the probed process address space.
* @vaddr: the virtual address to insert the opcode.
*
* For mm @mm, store the breakpoint instruction at @vaddr.
* Return 0 (success) or a negative errno.
*/
int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
{
return uprobe_write_opcode(mm, vaddr, UPROBE_SWBP_INSN);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Srikar Dronamraju | 29 | 96.67% | 3 | 75.00% |
Oleg Nesterov | 1 | 3.33% | 1 | 25.00% |
Total | 30 | 100.00% | 4 | 100.00% |
/**
* set_orig_insn - Restore the original instruction.
* @mm: the probed process address space.
* @auprobe: arch specific probepoint information.
* @vaddr: the virtual address to insert the opcode.
*
* For mm @mm, restore the original opcode (opcode) at @vaddr.
* Return 0 (success) or a negative errno.
*/
int __weak
set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
{
return uprobe_write_opcode(mm, vaddr, *(uprobe_opcode_t *)&auprobe->insn);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Srikar Dronamraju | 36 | 94.74% | 3 | 60.00% |
Oleg Nesterov | 2 | 5.26% | 2 | 40.00% |
Total | 38 | 100.00% | 5 | 100.00% |
static struct uprobe *get_uprobe(struct uprobe *uprobe)
{
atomic_inc(&uprobe->ref);
return uprobe;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Oleg Nesterov | 24 | 100.00% | 1 | 100.00% |
Total | 24 | 100.00% | 1 | 100.00% |
static void put_uprobe(struct uprobe *uprobe)
{
if (atomic_dec_and_test(&uprobe->ref))
kfree(uprobe);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Oleg Nesterov | 26 | 100.00% | 1 | 100.00% |
Total | 26 | 100.00% | 1 | 100.00% |
static int match_uprobe(struct uprobe *l, struct uprobe *r)
{
if (l->inode < r->inode)
return -1;
if (l->inode > r->inode)
return 1;
if (l->offset < r->offset)
return -1;
if (l->offset > r->offset)
return 1;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Srikar Dronamraju | 73 | 100.00% | 1 | 100.00% |
Total | 73 | 100.00% | 1 | 100.00% |
static struct uprobe *__find_uprobe(struct inode *inode, loff_t offset)
{
struct uprobe u = { .inode = inode, .offset = offset };
struct rb_node *n = uprobes_tree.rb_node;
struct uprobe *uprobe;
int match;
while (n) {
uprobe = rb_entry(n, struct uprobe, rb_node);
match = match_uprobe(&u, uprobe);
if (!match)
return get_uprobe(uprobe);
if (match < 0)
n = n->rb_left;
else
n = n->rb_right;
}
return NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Srikar Dronamraju | 108 | 98.18% | 1 | 50.00% |
Oleg Nesterov | 2 | 1.82% | 1 | 50.00% |
Total | 110 | 100.00% | 2 | 100.00% |
/*
* Find a uprobe corresponding to a given inode:offset
* Acquires uprobes_treelock
*/
static struct uprobe *find_uprobe(struct inode *inode, loff_t offset)
{
struct uprobe *uprobe;
spin_lock(&uprobes_treelock);
uprobe = __find_uprobe(inode, offset);
spin_unlock(&uprobes_treelock);
return uprobe;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Srikar Dronamraju | 43 | 95.56% | 1 | 50.00% |
Oleg Nesterov | 2 | 4.44% | 1 | 50.00% |
Total | 45 | 100.00% | 2 | 100.00% |
static struct uprobe *__insert_uprobe(struct uprobe *uprobe)
{
struct rb_node **p = &uprobes_tree.rb_node;
struct rb_node *parent = NULL;
struct uprobe *u;
int match;
while (*p) {
parent = *p;
u = rb_entry(parent, struct uprobe, rb_node);
match = match_uprobe(uprobe, u);
if (!match)
return get_uprobe(u);
if (match < 0)
p = &parent->rb_left;
else
p = &parent->rb_right;
}
u = NULL;
rb_link_node(&uprobe->rb_node, parent, p);
rb_insert_color(&uprobe->rb_node, &uprobes_tree);
/* get access + creation ref */
atomic_set(&uprobe->ref, 2);
return u;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Srikar Dronamraju | 143 | 98.62% | 1 | 50.00% |
Oleg Nesterov | 2 | 1.38% | 1 | 50.00% |
Total | 145 | 100.00% | 2 | 100.00% |
/*
* Acquire uprobes_treelock.
* Matching uprobe already exists in rbtree;
* increment (access refcount) and return the matching uprobe.
*
* No matching uprobe; insert the uprobe in rb_tree;
* get a double refcount (access + creation) and return NULL.
*/
static struct uprobe *insert_uprobe(struct uprobe *uprobe)
{
struct uprobe *u;
spin_lock(&uprobes_treelock);
u = __insert_uprobe(uprobe);
spin_unlock(&uprobes_treelock);
return u;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Srikar Dronamraju | 38 | 95.00% | 1 | 50.00% |
Oleg Nesterov | 2 | 5.00% | 1 | 50.00% |
Total | 40 | 100.00% | 2 | 100.00% |
static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset)
{
struct uprobe *uprobe, *cur_uprobe;
uprobe = kzalloc(sizeof(struct uprobe), GFP_KERNEL);
if (!uprobe)
return NULL;
uprobe->inode = igrab(inode);
uprobe->offset = offset;
init_rwsem(&uprobe->register_rwsem);
init_rwsem(&uprobe->consumer_rwsem);
/* add to uprobes_tree, sorted on inode:offset */
cur_uprobe = insert_uprobe(uprobe);
/* a uprobe exists for this inode:offset combination */
if (cur_uprobe) {
kfree(uprobe);
uprobe = cur_uprobe;
iput(inode);
}
return uprobe;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Srikar Dronamraju | 100 | 92.59% | 1 | 50.00% |
Oleg Nesterov | 8 | 7.41% | 1 | 50.00% |
Total | 108 | 100.00% | 2 | 100.00% |
static void consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc)
{
down_write(&uprobe->consumer_rwsem);
uc->next = uprobe->consumers;
uprobe->consumers = uc;
up_write(&uprobe->consumer_rwsem);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Srikar Dronamraju | 43 | 93.48% | 2 | 50.00% |
Oleg Nesterov | 2 | 4.35% | 1 | 25.00% |
Ingo Molnar | 1 | 2.17% | 1 | 25.00% |
Total | 46 | 100.00% | 4 | 100.00% |
/*
* For uprobe @uprobe, delete the consumer @uc.
* Return true if the @uc is deleted successfully
* or return false.
*/
static bool consumer_del(struct uprobe *uprobe, struct uprobe_consumer *uc)
{
struct uprobe_consumer **con;
bool ret = false;
down_write(&uprobe->consumer_rwsem);
for (con = &uprobe->consumers; *con; con = &(*con)->next) {
if (*con == uc) {
*con = uc->next;
ret = true;
break;
}
}
up_write(&uprobe->consumer_rwsem);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Srikar Dronamraju | 90 | 98.90% | 2 | 66.67% |
Ingo Molnar | 1 | 1.10% | 1 | 33.33% |
Total | 91 | 100.00% | 3 | 100.00% |
static int __copy_insn(struct address_space *mapping, struct file *filp,
void *insn, int nbytes, loff_t offset)
{
struct page *page;
/*
* Ensure that the page that has the original instruction is populated
* and in page-cache. If ->readpage == NULL it must be shmem_mapping(),
* see uprobe_register().
*/
if (mapping->a_ops->readpage)
page = read_mapping_page(mapping, offset >> PAGE_SHIFT, filp);
else
page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
if (IS_ERR(page))
return PTR_ERR(page);
copy_from_page(page, offset, insn, nbytes);
put_page(page);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Srikar Dronamraju | 65 | 67.01% | 1 | 12.50% |
Oleg Nesterov | 29 | 29.90% | 6 | 75.00% |
Kirill A. Shutemov | 3 | 3.09% | 1 | 12.50% |
Total | 97 | 100.00% | 8 | 100.00% |
static int copy_insn(struct uprobe *uprobe, struct file *filp)
{
struct address_space *mapping = uprobe->inode->i_mapping;
loff_t offs = uprobe->offset;
void *insn = &uprobe->arch.insn;
int size = sizeof(uprobe->arch.insn);
int len, err = -EIO;
/* Copy only available bytes, -EIO if nothing was read */
do {
if (offs >= i_size_read(uprobe->inode))
break;
len = min_t(int, size, PAGE_SIZE - (offs & ~PAGE_MASK));
err = __copy_insn(mapping, filp, insn, len, offs);
if (err)
break;
insn += len;
offs += len;
size -= len;
} while (size);
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Oleg Nesterov | 92 | 66.19% | 4 | 80.00% |
Srikar Dronamraju | 47 | 33.81% | 1 | 20.00% |
Total | 139 | 100.00% | 5 | 100.00% |
static int prepare_uprobe(struct uprobe *uprobe, struct file *file,
struct mm_struct *mm, unsigned long vaddr)
{
int ret = 0;
if (test_bit(UPROBE_COPY_INSN, &uprobe->flags))
return ret;
/* TODO: move this into _register, until then we abuse this sem. */
down_write(&uprobe->consumer_rwsem);
if (test_bit(UPROBE_COPY_INSN, &uprobe->flags))
goto out;
ret = copy_insn(uprobe, file);
if (ret)
goto out;
ret = -ENOTSUPP;
if (is_trap_insn((uprobe_opcode_t *)&uprobe->arch.insn))
goto out;
ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr);
if (ret)
goto out;
/* uprobe_write_opcode() assumes we don't cross page boundary */
BUG_ON((uprobe->offset & ~PAGE_MASK) +
UPROBE_SWBP_INSN_SIZE > PAGE_SIZE);
smp_wmb(); /* pairs with rmb() in find_active_uprobe() */
set_bit(UPROBE_COPY_INSN, &uprobe->flags);
out:
up_write(&uprobe->consumer_rwsem);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Oleg Nesterov | 90 | 51.72% | 10 | 62.50% |
Srikar Dronamraju | 82 | 47.13% | 4 | 25.00% |
Ananth N. Mavinakayanahalli | 2 | 1.15% | 2 | 12.50% |
Total | 174 | 100.00% | 16 | 100.00% |
static inline bool consumer_filter(struct uprobe_consumer *uc,
enum uprobe_filter_ctx ctx, struct mm_struct *mm)
{
return !uc->filter || uc->filter(uc, ctx, mm);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Oleg Nesterov | 38 | 100.00% | 2 | 100.00% |
Total | 38 | 100.00% | 2 | 100.00% |
static bool filter_chain(struct uprobe *uprobe,
enum uprobe_filter_ctx ctx, struct mm_struct *mm)
{
struct uprobe_consumer *uc;
bool ret = false;
down_read(&uprobe->consumer_rwsem);
for (uc = uprobe->consumers; uc; uc = uc->next) {
ret = consumer_filter(uc, ctx, mm);
if (ret)
break;
}
up_read(&uprobe->consumer_rwsem);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Oleg Nesterov | 83 | 100.00% | 4 | 100.00% |
Total | 83 | 100.00% | 4 | 100.00% |
static int
install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
struct vm_area_struct *vma, unsigned long vaddr)
{
bool first_uprobe;
int ret;
ret = prepare_uprobe(uprobe, vma->vm_file, mm, vaddr);
if (ret)
return ret;
/*
* set MMF_HAS_UPROBES in advance for uprobe_pre_sstep_notifier(),
* the task can hit this breakpoint right after __replace_page().
*/
first_uprobe = !test_bit(MMF_HAS_UPROBES, &mm->flags);
if (first_uprobe)
set_bit(MMF_HAS_UPROBES, &mm->flags);
ret = set_swbp(&uprobe->arch, mm, vaddr);
if (!ret)
clear_bit(MMF_RECALC_UPROBES, &mm->flags);
else if (first_uprobe)
clear_bit(MMF_HAS_UPROBES, &mm->flags);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Oleg Nesterov | 111 | 86.72% | 4 | 50.00% |
Srikar Dronamraju | 17 | 13.28% | 4 | 50.00% |
Total | 128 | 100.00% | 8 | 100.00% |
static int
remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr)
{
set_bit(MMF_RECALC_UPROBES, &mm->flags);
return set_orig_insn(&uprobe->arch, mm, vaddr);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Srikar Dronamraju | 28 | 65.12% | 3 | 50.00% |
Oleg Nesterov | 15 | 34.88% | 3 | 50.00% |
Total | 43 | 100.00% | 6 | 100.00% |
static inline bool uprobe_is_active(struct uprobe *uprobe)
{
return !RB_EMPTY_NODE(&uprobe->rb_node);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Oleg Nesterov | 22 | 100.00% | 1 | 100.00% |
Total | 22 | 100.00% | 1 | 100.00% |
/*
* There could be threads that have already hit the breakpoint. They
* will recheck the current insn and restart if find_uprobe() fails.
* See find_active_uprobe().
*/
static void delete_uprobe(struct uprobe *uprobe)
{
if (WARN_ON(!uprobe_is_active(uprobe)))
return;
spin_lock(&uprobes_treelock);
rb_erase(&uprobe->rb_node, &uprobes_tree);
spin_unlock(