cregit-Linux how code gets into the kernel

Release 4.17 kernel/kprobes.c

Directory: kernel
/*
 *  Kernel Probes (KProbes)
 *  kernel/kprobes.c
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 *
 * Copyright (C) IBM Corporation, 2002, 2004
 *
 * 2002-Oct     Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
 *              Probes initial implementation (includes suggestions from
 *              Rusty Russell).
 * 2004-Aug     Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
 *              hlists and exceptions notifier as suggested by Andi Kleen.
 * 2004-July    Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
 *              interface to access function arguments.
 * 2004-Sep     Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
 *              exceptions notifier to be first on the priority list.
 * 2005-May     Hien Nguyen <hien@us.ibm.com>, Jim Keniston
 *              <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
 *              <prasanna@in.ibm.com> added function-return probes.
 */
#include <linux/kprobes.h>
#include <linux/hash.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/stddef.h>
#include <linux/export.h>
#include <linux/moduleloader.h>
#include <linux/kallsyms.h>
#include <linux/freezer.h>
#include <linux/seq_file.h>
#include <linux/debugfs.h>
#include <linux/sysctl.h>
#include <linux/kdebug.h>
#include <linux/memory.h>
#include <linux/ftrace.h>
#include <linux/cpu.h>
#include <linux/jump_label.h>

#include <asm/sections.h>
#include <asm/cacheflush.h>
#include <asm/errno.h>
#include <linux/uaccess.h>


#define KPROBE_HASH_BITS 6

#define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)



static int kprobes_initialized;

static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];

static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];

/* NOTE: change this value only with kprobe_mutex held */

static bool kprobes_all_disarmed;

/* This protects kprobe_table and optimizing_list */
static DEFINE_MUTEX(kprobe_mutex);
static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;

static struct {
	
raw_spinlock_t lock ____cacheline_aligned_in_smp;

} kretprobe_table_locks[KPROBE_TABLE_SIZE];


kprobe_opcode_t * __weak kprobe_lookup_name(const char *name, unsigned int __unused) { return ((kprobe_opcode_t *)(kallsyms_lookup_name(name))); }

Contributors

PersonTokensPropCommitsCommitProp
Naveen N. Rao30100.00%2100.00%
Total30100.00%2100.00%


static raw_spinlock_t *kretprobe_table_lock_ptr(unsigned long hash) { return &(kretprobe_table_locks[hash].lock); }

Contributors

PersonTokensPropCommitsCommitProp
Srinivasa D S2195.45%150.00%
Thomas Gleixner14.55%150.00%
Total22100.00%2100.00%

/* Blacklist -- list of struct kprobe_blacklist_entry */ static LIST_HEAD(kprobe_blacklist); #ifdef __ARCH_WANT_KPROBES_INSN_SLOT /* * kprobe->ainsn.insn points to the copy of the instruction to be * single-stepped. x86_64, POWER4 and above have no-exec support and * stepping on the instruction on a vmalloced/kmalloced/data page * is a recipe for disaster */ struct kprobe_insn_page { struct list_head list; kprobe_opcode_t *insns; /* Page of instruction slots */ struct kprobe_insn_cache *cache; int nused; int ngarbage; char slot_used[]; }; #define KPROBE_INSN_PAGE_SIZE(slots) \ (offsetof(struct kprobe_insn_page, slot_used) + \ (sizeof(char) * (slots)))
static int slots_per_page(struct kprobe_insn_cache *c) { return PAGE_SIZE/(c->insn_size * sizeof(kprobe_opcode_t)); }

Contributors

PersonTokensPropCommitsCommitProp
Masami Hiramatsu25100.00%1100.00%
Total25100.00%1100.00%

enum kprobe_slot_state { SLOT_CLEAN = 0, SLOT_DIRTY = 1, SLOT_USED = 2, };
void __weak *alloc_insn_page(void) { return module_alloc(PAGE_SIZE); }

Contributors

PersonTokensPropCommitsCommitProp
Heiko Carstens1493.33%150.00%
Masami Hiramatsu16.67%150.00%
Total15100.00%2100.00%


void __weak free_insn_page(void *page) { module_memfree(page); }

Contributors

PersonTokensPropCommitsCommitProp
Heiko Carstens1386.67%133.33%
Masami Hiramatsu16.67%133.33%
Rusty Russell16.67%133.33%
Total15100.00%3100.00%

struct kprobe_insn_cache kprobe_insn_slots = { .mutex = __MUTEX_INITIALIZER(kprobe_insn_slots.mutex), .alloc = alloc_insn_page, .free = free_insn_page, .pages = LIST_HEAD_INIT(kprobe_insn_slots.pages), .insn_size = MAX_INSN_SIZE, .nr_garbage = 0, }; static int collect_garbage_slots(struct kprobe_insn_cache *c); /** * __get_insn_slot() - Find a slot on an executable page for an instruction. * We allocate an executable page if there's no room on existing ones. */
kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c) { struct kprobe_insn_page *kip; kprobe_opcode_t *slot = NULL; /* Since the slot array is not protected by rcu, we need a mutex */ mutex_lock(&c->mutex); retry: rcu_read_lock(); list_for_each_entry_rcu(kip, &c->pages, list) { if (kip->nused < slots_per_page(c)) { int i; for (i = 0; i < slots_per_page(c); i++) { if (kip->slot_used[i] == SLOT_CLEAN) { kip->slot_used[i] = SLOT_USED; kip->nused++; slot = kip->insns + (i * c->insn_size); rcu_read_unlock(); goto out; } } /* kip->nused is broken. Fix it. */ kip->nused = slots_per_page(c); WARN_ON(1); } } rcu_read_unlock(); /* If there are any garbage slots, collect it and try again. */ if (c->nr_garbage && collect_garbage_slots(c) == 0) goto retry; /* All out of space. Need to allocate a new page. */ kip = kmalloc(KPROBE_INSN_PAGE_SIZE(slots_per_page(c)), GFP_KERNEL); if (!kip) goto out; /* * Use module_alloc so this page is within +/- 2GB of where the * kernel image and loaded module images reside. This is required * so x86_64 can correctly handle the %rip-relative fixups. */ kip->insns = c->alloc(); if (!kip->insns) { kfree(kip); goto out; } INIT_LIST_HEAD(&kip->list); memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c)); kip->slot_used[0] = SLOT_USED; kip->nused = 1; kip->ngarbage = 0; kip->cache = c; list_add_rcu(&kip->list, &c->pages); slot = kip->insns; out: mutex_unlock(&c->mutex); return slot; }

Contributors

PersonTokensPropCommitsCommitProp
Ananth N. Mavinakayanahalli14448.98%110.00%
Masami Hiramatsu10335.03%660.00%
Heiko Carstens4414.97%220.00%
Christoph Hellwig31.02%110.00%
Total294100.00%10100.00%

/* Return 1 if all garbages are collected, otherwise 0. */
static int collect_one_slot(struct kprobe_insn_page *kip, int idx) { kip->slot_used[idx] = SLOT_CLEAN; kip->nused--; if (kip->nused == 0) { /* * Page is no longer in use. Free it unless * it's the last one. We keep the last one * so as not to have to set it up again the * next time somebody inserts a probe. */ if (!list_is_singular(&kip->list)) { list_del_rcu(&kip->list); synchronize_rcu(); kip->cache->free(kip->insns); kfree(kip); } return 1; } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Masami Hiramatsu8094.12%583.33%
Heiko Carstens55.88%116.67%
Total85100.00%6100.00%


static int collect_garbage_slots(struct kprobe_insn_cache *c) { struct kprobe_insn_page *kip, *next; /* Ensure no-one is interrupted on the garbages */ synchronize_sched(); list_for_each_entry_safe(kip, next, &c->pages, list) { int i; if (kip->ngarbage == 0) continue; kip->ngarbage = 0; /* we will collect all garbages */ for (i = 0; i < slots_per_page(c); i++) { if (kip->slot_used[i] == SLOT_DIRTY && collect_one_slot(kip, i)) break; } } c->nr_garbage = 0; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Masami Hiramatsu9797.00%685.71%
Christoph Hellwig33.00%114.29%
Total100100.00%7100.00%


void __free_insn_slot(struct kprobe_insn_cache *c, kprobe_opcode_t *slot, int dirty) { struct kprobe_insn_page *kip; long idx; mutex_lock(&c->mutex); rcu_read_lock(); list_for_each_entry_rcu(kip, &c->pages, list) { idx = ((long)slot - (long)kip->insns) / (c->insn_size * sizeof(kprobe_opcode_t)); if (idx >= 0 && idx < slots_per_page(c)) goto out; } /* Could not find this slot. */ WARN_ON(1); kip = NULL; out: rcu_read_unlock(); /* Mark and sweep: this may sleep */ if (kip) { /* Check double free */ WARN_ON(kip->slot_used[idx] != SLOT_USED); if (dirty) { kip->slot_used[idx] = SLOT_DIRTY; kip->ngarbage++; if (++c->nr_garbage > slots_per_page(c)) collect_garbage_slots(c); } else { collect_one_slot(kip, idx); } } mutex_unlock(&c->mutex); }

Contributors

PersonTokensPropCommitsCommitProp
Masami Hiramatsu12368.72%770.00%
Ananth N. Mavinakayanahalli4223.46%110.00%
Heiko Carstens116.15%110.00%
Christoph Hellwig31.68%110.00%
Total179100.00%10100.00%

/* * Check given address is on the page of kprobe instruction slots. * This will be used for checking whether the address on a stack * is on a text area or not. */
bool __is_insn_slot_addr(struct kprobe_insn_cache *c, unsigned long addr) { struct kprobe_insn_page *kip; bool ret = false; rcu_read_lock(); list_for_each_entry_rcu(kip, &c->pages, list) { if (addr >= (unsigned long)kip->insns && addr < (unsigned long)kip->insns + PAGE_SIZE) { ret = true; break; } } rcu_read_unlock(); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Masami Hiramatsu74100.00%1100.00%
Total74100.00%1100.00%

#ifdef CONFIG_OPTPROBES /* For optimized_kprobe buffer */ struct kprobe_insn_cache kprobe_optinsn_slots = { .mutex = __MUTEX_INITIALIZER(kprobe_optinsn_slots.mutex), .alloc = alloc_insn_page, .free = free_insn_page, .pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages), /* .insn_size is initialized later */ .nr_garbage = 0, }; #endif #endif /* We have preemption disabled.. so it is safe to use __ versions */
static inline void set_kprobe_instance(struct kprobe *kp) { __this_cpu_write(kprobe_instance, kp); }

Contributors

PersonTokensPropCommitsCommitProp
Ananth N. Mavinakayanahalli1684.21%150.00%
Christoph Lameter315.79%150.00%
Total19100.00%2100.00%


static inline void reset_kprobe_instance(void) { __this_cpu_write(kprobe_instance, NULL); }

Contributors

PersonTokensPropCommitsCommitProp
Ananth N. Mavinakayanahalli1381.25%150.00%
Christoph Lameter318.75%150.00%
Total16100.00%2100.00%

/* * This routine is called either: * - under the kprobe_mutex - during kprobe_[un]register() * OR * - with preemption disabled - from arch/xxx/kernel/kprobes.c */
struct kprobe *get_kprobe(void *addr) { struct hlist_head *head; struct kprobe *p; head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)]; hlist_for_each_entry_rcu(p, head, hlist) { if (p->addr == addr) return p; } return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Ananth N. Mavinakayanahalli58100.00%2100.00%
Total58100.00%2100.00%

NOKPROBE_SYMBOL(get_kprobe); static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs); /* Return true if the kprobe is an aggregator */
static inline int kprobe_aggrprobe(struct kprobe *p) { return p->pre_handler == aggr_pre_handler; }

Contributors

PersonTokensPropCommitsCommitProp
Masami Hiramatsu19100.00%2100.00%
Total19100.00%2100.00%

/* Return true(!0) if the kprobe is unused */
static inline int kprobe_unused(struct kprobe *p) { return kprobe_aggrprobe(p) && kprobe_disabled(p) && list_empty(&p->list); }

Contributors

PersonTokensPropCommitsCommitProp
Masami Hiramatsu31100.00%1100.00%
Total31100.00%1100.00%

/* * Keep all fields in the kprobe consistent */
static inline void copy_kprobe(struct kprobe *ap, struct kprobe *p) { memcpy(&p->opcode, &ap->opcode, sizeof(kprobe_opcode_t)); memcpy(&p->ainsn, &ap->ainsn, sizeof(struct arch_specific_insn)); }

Contributors

PersonTokensPropCommitsCommitProp
Masami Hiramatsu54100.00%3100.00%
Total54100.00%3100.00%

#ifdef CONFIG_OPTPROBES /* NOTE: change this value only with kprobe_mutex held */ static bool kprobes_allow_optimization; /* * Call all pre_handler on the list, but ignores its return value. * This must be called from arch-dep optimized caller. */
void opt_pre_handler(struct kprobe *p, struct pt_regs *regs) { struct kprobe *kp; list_for_each_entry_rcu(kp, &p->list, list) { if (kp->pre_handler && likely(!kprobe_disabled(kp))) { set_kprobe_instance(kp); kp->pre_handler(kp, regs); } reset_kprobe_instance(); } }

Contributors

PersonTokensPropCommitsCommitProp
Ananth N. Mavinakayanahalli5179.69%342.86%
Masami Hiramatsu1218.75%342.86%
Prasanna S. Panchamukhi11.56%114.29%
Total64100.00%7100.00%

NOKPROBE_SYMBOL(opt_pre_handler); /* Free optimized instructions and optimized_kprobe */
static void free_aggr_kprobe(struct kprobe *p) { struct optimized_kprobe *op; op = container_of(p, struct optimized_kprobe, kp); arch_remove_optimized_kprobe(op); arch_remove_kprobe(p); kfree(op); }

Contributors

PersonTokensPropCommitsCommitProp
Masami Hiramatsu43100.00%1100.00%
Total43100.00%1100.00%

/* Return true(!0) if the kprobe is ready for optimization. */
static inline int kprobe_optready(struct kprobe *p) { struct optimized_kprobe *op; if (kprobe_aggrprobe(p)) { op = container_of(p, struct optimized_kprobe, kp); return arch_prepared_optinsn(&op->optinsn); } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Masami Hiramatsu4998.00%150.00%
Hien Nguyen12.00%150.00%
Total50100.00%2100.00%

/* Return true(!0) if the kprobe is disarmed. Note: p must be on hash list */
static inline int kprobe_disarmed(struct kprobe *p) { struct optimized_kprobe *op; /* If kprobe is not aggr/opt probe, just return kprobe is disabled */ if (!kprobe_aggrprobe(p)) return kprobe_disabled(p); op = container_of(p, struct optimized_kprobe, kp); return kprobe_disabled(p) && list_empty(&op->list); }

Contributors

PersonTokensPropCommitsCommitProp
Masami Hiramatsu58100.00%1100.00%
Total58100.00%1100.00%

/* Return true(!0) if the probe is queued on (un)optimizing lists */
static int kprobe_queued(struct kprobe *p) { struct optimized_kprobe *op; if (kprobe_aggrprobe(p)) { op = container_of(p, struct optimized_kprobe, kp); if (!list_empty(&op->list)) return 1; } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Masami Hiramatsu54100.00%1100.00%
Total54100.00%1100.00%

/* * Return an optimized kprobe whose optimizing code replaces * instructions including addr (exclude breakpoint). */
static struct kprobe *get_optimized_kprobe(unsigned long addr) { int i; struct kprobe *p = NULL; struct optimized_kprobe *op; /* Don't check i == 0, since that is a breakpoint case. */ for (i = 1; !p && i < MAX_OPTIMIZED_LENGTH; i++) p = get_kprobe((void *)(addr - i)); if (p && kprobe_optready(p)) { op = container_of(p, struct optimized_kprobe, kp); if (arch_within_optimized_kprobe(op, addr)) return p; } return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Masami Hiramatsu9698.97%150.00%
Namhyung Kim11.03%150.00%
Total97100.00%2100.00%

/* Optimization staging list, protected by kprobe_mutex */ static LIST_HEAD(optimizing_list); static LIST_HEAD(unoptimizing_list); static LIST_HEAD(freeing_list); static void kprobe_optimizer(struct work_struct *work); static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer); #define OPTIMIZE_DELAY 5 /* * Optimize (replace a breakpoint with a jump) kprobes listed on * optimizing_list. */
static void do_optimize_kprobes(void) { /* * The optimization/unoptimization refers online_cpus via * stop_machine() and cpu-hotplug modifies online_cpus. * And same time, text_mutex will be held in cpu-hotplug and here. * This combination can cause a deadlock (cpu-hotplug try to lock * text_mutex but stop_machine can not be done because online_cpus * has been changed) * To avoid this deadlock, caller must have locked cpu hotplug * for preventing cpu-hotplug outside of text_mutex locking. */ lockdep_assert_cpus_held(); /* Optimization never be done when disarmed */ if (kprobes_all_disarmed || !kprobes_allow_optimization || list_empty(&optimizing_list)) return; mutex_lock(&text_mutex); arch_optimize_kprobes(&optimizing_list); mutex_unlock(&text_mutex); }

Contributors

PersonTokensPropCommitsCommitProp
Masami Hiramatsu4191.11%480.00%
Thomas Gleixner48.89%120.00%
Total45100.00%5100.00%

/* * Unoptimize (replace a jump with a breakpoint and remove the breakpoint * if need) kprobes listed on unoptimizing_list. */
static void do_unoptimize_kprobes(void) { struct optimized_kprobe *op, *tmp; /* See comment in do_optimize_kprobes() */ lockdep_assert_cpus_held(); /* Unoptimization must be done anytime */ if (list_empty(&unoptimizing_list)) return; mutex_lock(&text_mutex); arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list); /* Loop free_list for disarming */ list_for_each_entry_safe(op, tmp, &freeing_list, list) { /* Disarm probes if marked disabled */ if (kprobe_disabled(&op->kp)) arch_disarm_kprobe(&op->kp); if (kprobe_unused(&op->kp)) { /* * Remove unused probes from hash list. After waiting * for synchronization, these probes are reclaimed. * (reclaiming is done by do_free_cleaned_kprobes.) */ hlist_del_rcu(&op->kp.hlist); } else list_del_init(&op->list); } mutex_unlock(&text_mutex); }

Contributors

PersonTokensPropCommitsCommitProp
Masami Hiramatsu11196.52%480.00%
Thomas Gleixner43.48%120.00%
Total115100.00%5100.00%

/* Reclaim all kprobes on the free_list */
static void do_free_cleaned_kprobes(void) { struct optimized_kprobe *op, *tmp; list_for_each_entry_safe(op, tmp, &freeing_list, list) { BUG_ON(!kprobe_unused(&op->kp)); list_del_init(&op->list); free_aggr_kprobe(&op->kp); } }

Contributors

PersonTokensPropCommitsCommitProp
Masami Hiramatsu56100.00%3100.00%
Total56100.00%3100.00%

/* Start optimizer after OPTIMIZE_DELAY passed */
static void kick_kprobe_optimizer(void) { schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY); }

Contributors

PersonTokensPropCommitsCommitProp
Masami Hiramatsu16100.00%1100.00%
Total16100.00%1100.00%

/* Kprobe jump optimizer */
static void kprobe_optimizer(struct work_struct *work) { mutex_lock(&kprobe_mutex); cpus_read_lock(); /* Lock modules while optimizing kprobes */ mutex_lock(&module_mutex); /* * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed) * kprobes before waiting for quiesence period. */ do_unoptimize_kprobes(); /* * Step 2: Wait for quiesence period to ensure all potentially * preempted tasks to have normally scheduled. Because optprobe * may modify multiple instructions, there is a chance that Nth * instruction is preempted. In that case, such tasks can return * to 2nd-Nth byte of jump instruction. This wait is for avoiding it. * Note that on non-preemptive kernel, this is transparently converted * to synchronoze_sched() to wait for all interrupts to have completed. */ synchronize_rcu_tasks(); /* Step 3: Optimize kprobes after quiesence period */ do_optimize_kprobes(); /* Step 4: Free cleaned kprobes after quiesence period */ do_free_cleaned_kprobes(); mutex_unlock(&module_mutex); cpus_read_unlock(); mutex_unlock(&kprobe_mutex); /* Step 5: Kick optimizer again if needed */ if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) kick_kprobe_optimizer(); }

Contributors

PersonTokensPropCommitsCommitProp
Masami Hiramatsu6785.90%777.78%
Thomas Gleixner67.69%111.11%
Steven Rostedt56.41%111.11%
Total78100.00%9100.00%

/* Wait for completing optimization and unoptimization */
void wait_for_kprobe_optimizer(void) { mutex_lock(&kprobe_mutex); while (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) { mutex_unlock(&kprobe_mutex); /* this will also make optimizing_work execute immmediately */ flush_delayed_work(&optimizing_work); /* @optimizing_work might not have been queued yet, relax */ cpu_relax(); mutex_lock(&kprobe_mutex); } mutex_unlock(&kprobe_mutex); }

Contributors

PersonTokensPropCommitsCommitProp
Tejun Heo4473.33%133.33%
Masami Hiramatsu1626.67%266.67%
Total60100.00%3100.00%

/* Optimize kprobe if p is ready to be optimized */
static void optimize_kprobe(struct kprobe *p) { struct optimized_kprobe *op; /* Check if the kprobe is disabled or not ready for optimization. */ if (!kprobe_optready(p) || !kprobes_allow_optimization || (kprobe_disabled(p) || kprobes_all_disarmed)) return; /* Both of break_handler and post_handler are not supported. */ if (p->break_handler || p->post_handler) return; op = container_of(p, struct optimized_kprobe, kp); /* Check there is no other kprobes at the optimized instructions */ if (arch_check_optimized_kprobe(op) < 0) return; /* Check if it is already optimized. */ if (op->kp.flags & KPROBE_FLAG_OPTIMIZED) return; op->kp.flags |= KPROBE_FLAG_OPTIMIZED; if (!list_empty(&op->list)) /* This is under unoptimizing. Just dequeue the probe */ list_del_init(&op->list); else { list_add(&op->list, &optimizing_list); kick_kprobe_optimizer(); } }

Contributors

PersonTokensPropCommitsCommitProp
Masami Hiramatsu130100.00%3100.00%
Total130100.00%3100.00%

/* Short cut to direct unoptimizing */
static void force_unoptimize_kprobe(struct optimized_kprobe *op) { lockdep_assert_cpus_held(); arch_unoptimize_kprobe(op); if (kprobe_disabled(&op->kp)) arch_disarm_kprobe(&op->kp); }

Contributors

PersonTokensPropCommitsCommitProp
Masami Hiramatsu3697.30%266.67%
Thomas Gleixner12.70%133.33%
Total37100.00%3100.00%

/* Unoptimize a kprobe if p is optimized */
static void unoptimize_kprobe(struct kprobe *p, bool force) { struct optimized_kprobe *op; if (!kprobe_aggrprobe(p) || kprobe_disarmed(p)) return; /* This is not an optprobe nor optimized */ op = container_of(p, struct optimized_kprobe, kp); if (!kprobe_optimized(p)) { /* Unoptimized or unoptimizing case */ if (force && !list_empty(&op->list)) { /* * Only if this is unoptimizing kprobe and forced, * forcibly unoptimize it. (No need to unoptimize * unoptimized kprobe again :) */ list_del_init(&op->list); force_unoptimize_kprobe(op); } return; } op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; if (!list_empty(&op->list)) { /* Dequeue from the optimization queue */ list_del_init(&op->list); return; } /* Optimized kprobe case */ if (force) /* Forcibly update the code: this is a special case */ force_unoptimize_kprobe(op); else { list_add(&op->list, &unoptimizing_list); kick_kprobe_optimizer(); } }

Contributors

PersonTokensPropCommitsCommitProp
Masami Hiramatsu147100.00%2100.00%
Total147100.00%2100.00%

/* Cancel unoptimizing for reusing */
static void reuse_unused_kprobe(struct kprobe *ap) { struct optimized_kprobe *op; BUG_ON(!kprobe_unused(ap)); /* * Unused kprobe MUST be on the way of delayed unoptimizing (means * there is still a relative jump) and disabled. */ op = container_of(ap, struct optimized_kprobe, kp); if (unlikely(list_empty(&op->list))) printk(KERN_WARNING "Warning: found a stray unused " "aggrprobe@%p\n", ap->addr); /* Enable the probe again */ ap->flags &= ~KPROBE_FLAG_DISABLED; /* Optimize it again (remove from op->list) */ BUG_ON(!kprobe_optready(ap)); optimize_kprobe(ap); }

Contributors

PersonTokensPropCommitsCommitProp
Masami Hiramatsu85100.00%1100.00%
Total85100.00%1100.00%

/* Remove optimized instructions */
static void kill_optimized_kprobe(struct kprobe *p) { struct optimized_kprobe *op; op = container_of(p, struct optimized_kprobe, kp); if (!list_empty(&op->list)) /* Dequeue from the (un)optimization queue */ list_del_init(&op->list); op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; if (kprobe_unused(p)) { /* Enqueue if it is unused */ list_add(&op->list, &freeing_list); /* * Remove unused probes from the hash list. After waiting * for synchronization, this probe is reclaimed. * (reclaiming is done by do_free_cleaned_kprobes().) */ hlist_del_rcu(&op->kp.hlist); } /* Don't touch the code, because it is already freed. */ arch_remove_optimized_kprobe(op); }

Contributors

PersonTokensPropCommitsCommitProp
Masami Hiramatsu95100.00%3100.00%
Total95100.00%3100.00%


static inline void __prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p) { if (!kprobe_ftrace(p)) arch_prepare_optimized_kprobe(op, p); }

Contributors

PersonTokensPropCommitsCommitProp
Masami Hiramatsu32100.00%1100.00%
Total32100.00%1100.00%

/* Try to prepare optimized instructions */
static void prepare_optimized_kprobe(struct kprobe *p)