Release 4.17 kernel/kprobes.c
/*
* Kernel Probes (KProbes)
* kernel/kprobes.c
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* Copyright (C) IBM Corporation, 2002, 2004
*
* 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
* Probes initial implementation (includes suggestions from
* Rusty Russell).
* 2004-Aug Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
* hlists and exceptions notifier as suggested by Andi Kleen.
* 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
* interface to access function arguments.
* 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
* exceptions notifier to be first on the priority list.
* 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
* <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
* <prasanna@in.ibm.com> added function-return probes.
*/
#include <linux/kprobes.h>
#include <linux/hash.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/stddef.h>
#include <linux/export.h>
#include <linux/moduleloader.h>
#include <linux/kallsyms.h>
#include <linux/freezer.h>
#include <linux/seq_file.h>
#include <linux/debugfs.h>
#include <linux/sysctl.h>
#include <linux/kdebug.h>
#include <linux/memory.h>
#include <linux/ftrace.h>
#include <linux/cpu.h>
#include <linux/jump_label.h>
#include <asm/sections.h>
#include <asm/cacheflush.h>
#include <asm/errno.h>
#include <linux/uaccess.h>
#define KPROBE_HASH_BITS 6
#define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
static int kprobes_initialized;
static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
/* NOTE: change this value only with kprobe_mutex held */
static bool kprobes_all_disarmed;
/* This protects kprobe_table and optimizing_list */
static DEFINE_MUTEX(kprobe_mutex);
static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
static struct {
raw_spinlock_t lock ____cacheline_aligned_in_smp;
} kretprobe_table_locks[KPROBE_TABLE_SIZE];
kprobe_opcode_t * __weak kprobe_lookup_name(const char *name,
unsigned int __unused)
{
return ((kprobe_opcode_t *)(kallsyms_lookup_name(name)));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Naveen N. Rao | 30 | 100.00% | 2 | 100.00% |
Total | 30 | 100.00% | 2 | 100.00% |
static raw_spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
{
return &(kretprobe_table_locks[hash].lock);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Srinivasa D S | 21 | 95.45% | 1 | 50.00% |
Thomas Gleixner | 1 | 4.55% | 1 | 50.00% |
Total | 22 | 100.00% | 2 | 100.00% |
/* Blacklist -- list of struct kprobe_blacklist_entry */
static LIST_HEAD(kprobe_blacklist);
#ifdef __ARCH_WANT_KPROBES_INSN_SLOT
/*
* kprobe->ainsn.insn points to the copy of the instruction to be
* single-stepped. x86_64, POWER4 and above have no-exec support and
* stepping on the instruction on a vmalloced/kmalloced/data page
* is a recipe for disaster
*/
struct kprobe_insn_page {
struct list_head list;
kprobe_opcode_t *insns; /* Page of instruction slots */
struct kprobe_insn_cache *cache;
int nused;
int ngarbage;
char slot_used[];
};
#define KPROBE_INSN_PAGE_SIZE(slots) \
(offsetof(struct kprobe_insn_page, slot_used) + \
(sizeof(char) * (slots)))
static int slots_per_page(struct kprobe_insn_cache *c)
{
return PAGE_SIZE/(c->insn_size * sizeof(kprobe_opcode_t));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Masami Hiramatsu | 25 | 100.00% | 1 | 100.00% |
Total | 25 | 100.00% | 1 | 100.00% |
enum kprobe_slot_state {
SLOT_CLEAN = 0,
SLOT_DIRTY = 1,
SLOT_USED = 2,
};
void __weak *alloc_insn_page(void)
{
return module_alloc(PAGE_SIZE);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Heiko Carstens | 14 | 93.33% | 1 | 50.00% |
Masami Hiramatsu | 1 | 6.67% | 1 | 50.00% |
Total | 15 | 100.00% | 2 | 100.00% |
void __weak free_insn_page(void *page)
{
module_memfree(page);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Heiko Carstens | 13 | 86.67% | 1 | 33.33% |
Masami Hiramatsu | 1 | 6.67% | 1 | 33.33% |
Rusty Russell | 1 | 6.67% | 1 | 33.33% |
Total | 15 | 100.00% | 3 | 100.00% |
struct kprobe_insn_cache kprobe_insn_slots = {
.mutex = __MUTEX_INITIALIZER(kprobe_insn_slots.mutex),
.alloc = alloc_insn_page,
.free = free_insn_page,
.pages = LIST_HEAD_INIT(kprobe_insn_slots.pages),
.insn_size = MAX_INSN_SIZE,
.nr_garbage = 0,
};
static int collect_garbage_slots(struct kprobe_insn_cache *c);
/**
* __get_insn_slot() - Find a slot on an executable page for an instruction.
* We allocate an executable page if there's no room on existing ones.
*/
kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c)
{
struct kprobe_insn_page *kip;
kprobe_opcode_t *slot = NULL;
/* Since the slot array is not protected by rcu, we need a mutex */
mutex_lock(&c->mutex);
retry:
rcu_read_lock();
list_for_each_entry_rcu(kip, &c->pages, list) {
if (kip->nused < slots_per_page(c)) {
int i;
for (i = 0; i < slots_per_page(c); i++) {
if (kip->slot_used[i] == SLOT_CLEAN) {
kip->slot_used[i] = SLOT_USED;
kip->nused++;
slot = kip->insns + (i * c->insn_size);
rcu_read_unlock();
goto out;
}
}
/* kip->nused is broken. Fix it. */
kip->nused = slots_per_page(c);
WARN_ON(1);
}
}
rcu_read_unlock();
/* If there are any garbage slots, collect it and try again. */
if (c->nr_garbage && collect_garbage_slots(c) == 0)
goto retry;
/* All out of space. Need to allocate a new page. */
kip = kmalloc(KPROBE_INSN_PAGE_SIZE(slots_per_page(c)), GFP_KERNEL);
if (!kip)
goto out;
/*
* Use module_alloc so this page is within +/- 2GB of where the
* kernel image and loaded module images reside. This is required
* so x86_64 can correctly handle the %rip-relative fixups.
*/
kip->insns = c->alloc();
if (!kip->insns) {
kfree(kip);
goto out;
}
INIT_LIST_HEAD(&kip->list);
memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c));
kip->slot_used[0] = SLOT_USED;
kip->nused = 1;
kip->ngarbage = 0;
kip->cache = c;
list_add_rcu(&kip->list, &c->pages);
slot = kip->insns;
out:
mutex_unlock(&c->mutex);
return slot;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ananth N. Mavinakayanahalli | 144 | 48.98% | 1 | 10.00% |
Masami Hiramatsu | 103 | 35.03% | 6 | 60.00% |
Heiko Carstens | 44 | 14.97% | 2 | 20.00% |
Christoph Hellwig | 3 | 1.02% | 1 | 10.00% |
Total | 294 | 100.00% | 10 | 100.00% |
/* Return 1 if all garbages are collected, otherwise 0. */
static int collect_one_slot(struct kprobe_insn_page *kip, int idx)
{
kip->slot_used[idx] = SLOT_CLEAN;
kip->nused--;
if (kip->nused == 0) {
/*
* Page is no longer in use. Free it unless
* it's the last one. We keep the last one
* so as not to have to set it up again the
* next time somebody inserts a probe.
*/
if (!list_is_singular(&kip->list)) {
list_del_rcu(&kip->list);
synchronize_rcu();
kip->cache->free(kip->insns);
kfree(kip);
}
return 1;
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Masami Hiramatsu | 80 | 94.12% | 5 | 83.33% |
Heiko Carstens | 5 | 5.88% | 1 | 16.67% |
Total | 85 | 100.00% | 6 | 100.00% |
static int collect_garbage_slots(struct kprobe_insn_cache *c)
{
struct kprobe_insn_page *kip, *next;
/* Ensure no-one is interrupted on the garbages */
synchronize_sched();
list_for_each_entry_safe(kip, next, &c->pages, list) {
int i;
if (kip->ngarbage == 0)
continue;
kip->ngarbage = 0; /* we will collect all garbages */
for (i = 0; i < slots_per_page(c); i++) {
if (kip->slot_used[i] == SLOT_DIRTY && collect_one_slot(kip, i))
break;
}
}
c->nr_garbage = 0;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Masami Hiramatsu | 97 | 97.00% | 6 | 85.71% |
Christoph Hellwig | 3 | 3.00% | 1 | 14.29% |
Total | 100 | 100.00% | 7 | 100.00% |
void __free_insn_slot(struct kprobe_insn_cache *c,
kprobe_opcode_t *slot, int dirty)
{
struct kprobe_insn_page *kip;
long idx;
mutex_lock(&c->mutex);
rcu_read_lock();
list_for_each_entry_rcu(kip, &c->pages, list) {
idx = ((long)slot - (long)kip->insns) /
(c->insn_size * sizeof(kprobe_opcode_t));
if (idx >= 0 && idx < slots_per_page(c))
goto out;
}
/* Could not find this slot. */
WARN_ON(1);
kip = NULL;
out:
rcu_read_unlock();
/* Mark and sweep: this may sleep */
if (kip) {
/* Check double free */
WARN_ON(kip->slot_used[idx] != SLOT_USED);
if (dirty) {
kip->slot_used[idx] = SLOT_DIRTY;
kip->ngarbage++;
if (++c->nr_garbage > slots_per_page(c))
collect_garbage_slots(c);
} else {
collect_one_slot(kip, idx);
}
}
mutex_unlock(&c->mutex);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Masami Hiramatsu | 123 | 68.72% | 7 | 70.00% |
Ananth N. Mavinakayanahalli | 42 | 23.46% | 1 | 10.00% |
Heiko Carstens | 11 | 6.15% | 1 | 10.00% |
Christoph Hellwig | 3 | 1.68% | 1 | 10.00% |
Total | 179 | 100.00% | 10 | 100.00% |
/*
* Check given address is on the page of kprobe instruction slots.
* This will be used for checking whether the address on a stack
* is on a text area or not.
*/
bool __is_insn_slot_addr(struct kprobe_insn_cache *c, unsigned long addr)
{
struct kprobe_insn_page *kip;
bool ret = false;
rcu_read_lock();
list_for_each_entry_rcu(kip, &c->pages, list) {
if (addr >= (unsigned long)kip->insns &&
addr < (unsigned long)kip->insns + PAGE_SIZE) {
ret = true;
break;
}
}
rcu_read_unlock();
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Masami Hiramatsu | 74 | 100.00% | 1 | 100.00% |
Total | 74 | 100.00% | 1 | 100.00% |
#ifdef CONFIG_OPTPROBES
/* For optimized_kprobe buffer */
struct kprobe_insn_cache kprobe_optinsn_slots = {
.mutex = __MUTEX_INITIALIZER(kprobe_optinsn_slots.mutex),
.alloc = alloc_insn_page,
.free = free_insn_page,
.pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages),
/* .insn_size is initialized later */
.nr_garbage = 0,
};
#endif
#endif
/* We have preemption disabled.. so it is safe to use __ versions */
static inline void set_kprobe_instance(struct kprobe *kp)
{
__this_cpu_write(kprobe_instance, kp);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ananth N. Mavinakayanahalli | 16 | 84.21% | 1 | 50.00% |
Christoph Lameter | 3 | 15.79% | 1 | 50.00% |
Total | 19 | 100.00% | 2 | 100.00% |
static inline void reset_kprobe_instance(void)
{
__this_cpu_write(kprobe_instance, NULL);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ananth N. Mavinakayanahalli | 13 | 81.25% | 1 | 50.00% |
Christoph Lameter | 3 | 18.75% | 1 | 50.00% |
Total | 16 | 100.00% | 2 | 100.00% |
/*
* This routine is called either:
* - under the kprobe_mutex - during kprobe_[un]register()
* OR
* - with preemption disabled - from arch/xxx/kernel/kprobes.c
*/
struct kprobe *get_kprobe(void *addr)
{
struct hlist_head *head;
struct kprobe *p;
head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
hlist_for_each_entry_rcu(p, head, hlist) {
if (p->addr == addr)
return p;
}
return NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ananth N. Mavinakayanahalli | 58 | 100.00% | 2 | 100.00% |
Total | 58 | 100.00% | 2 | 100.00% |
NOKPROBE_SYMBOL(get_kprobe);
static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs);
/* Return true if the kprobe is an aggregator */
static inline int kprobe_aggrprobe(struct kprobe *p)
{
return p->pre_handler == aggr_pre_handler;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Masami Hiramatsu | 19 | 100.00% | 2 | 100.00% |
Total | 19 | 100.00% | 2 | 100.00% |
/* Return true(!0) if the kprobe is unused */
static inline int kprobe_unused(struct kprobe *p)
{
return kprobe_aggrprobe(p) && kprobe_disabled(p) &&
list_empty(&p->list);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Masami Hiramatsu | 31 | 100.00% | 1 | 100.00% |
Total | 31 | 100.00% | 1 | 100.00% |
/*
* Keep all fields in the kprobe consistent
*/
static inline void copy_kprobe(struct kprobe *ap, struct kprobe *p)
{
memcpy(&p->opcode, &ap->opcode, sizeof(kprobe_opcode_t));
memcpy(&p->ainsn, &ap->ainsn, sizeof(struct arch_specific_insn));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Masami Hiramatsu | 54 | 100.00% | 3 | 100.00% |
Total | 54 | 100.00% | 3 | 100.00% |
#ifdef CONFIG_OPTPROBES
/* NOTE: change this value only with kprobe_mutex held */
static bool kprobes_allow_optimization;
/*
* Call all pre_handler on the list, but ignores its return value.
* This must be called from arch-dep optimized caller.
*/
void opt_pre_handler(struct kprobe *p, struct pt_regs *regs)
{
struct kprobe *kp;
list_for_each_entry_rcu(kp, &p->list, list) {
if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
set_kprobe_instance(kp);
kp->pre_handler(kp, regs);
}
reset_kprobe_instance();
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ananth N. Mavinakayanahalli | 51 | 79.69% | 3 | 42.86% |
Masami Hiramatsu | 12 | 18.75% | 3 | 42.86% |
Prasanna S. Panchamukhi | 1 | 1.56% | 1 | 14.29% |
Total | 64 | 100.00% | 7 | 100.00% |
NOKPROBE_SYMBOL(opt_pre_handler);
/* Free optimized instructions and optimized_kprobe */
static void free_aggr_kprobe(struct kprobe *p)
{
struct optimized_kprobe *op;
op = container_of(p, struct optimized_kprobe, kp);
arch_remove_optimized_kprobe(op);
arch_remove_kprobe(p);
kfree(op);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Masami Hiramatsu | 43 | 100.00% | 1 | 100.00% |
Total | 43 | 100.00% | 1 | 100.00% |
/* Return true(!0) if the kprobe is ready for optimization. */
static inline int kprobe_optready(struct kprobe *p)
{
struct optimized_kprobe *op;
if (kprobe_aggrprobe(p)) {
op = container_of(p, struct optimized_kprobe, kp);
return arch_prepared_optinsn(&op->optinsn);
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Masami Hiramatsu | 49 | 98.00% | 1 | 50.00% |
Hien Nguyen | 1 | 2.00% | 1 | 50.00% |
Total | 50 | 100.00% | 2 | 100.00% |
/* Return true(!0) if the kprobe is disarmed. Note: p must be on hash list */
static inline int kprobe_disarmed(struct kprobe *p)
{
struct optimized_kprobe *op;
/* If kprobe is not aggr/opt probe, just return kprobe is disabled */
if (!kprobe_aggrprobe(p))
return kprobe_disabled(p);
op = container_of(p, struct optimized_kprobe, kp);
return kprobe_disabled(p) && list_empty(&op->list);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Masami Hiramatsu | 58 | 100.00% | 1 | 100.00% |
Total | 58 | 100.00% | 1 | 100.00% |
/* Return true(!0) if the probe is queued on (un)optimizing lists */
static int kprobe_queued(struct kprobe *p)
{
struct optimized_kprobe *op;
if (kprobe_aggrprobe(p)) {
op = container_of(p, struct optimized_kprobe, kp);
if (!list_empty(&op->list))
return 1;
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Masami Hiramatsu | 54 | 100.00% | 1 | 100.00% |
Total | 54 | 100.00% | 1 | 100.00% |
/*
* Return an optimized kprobe whose optimizing code replaces
* instructions including addr (exclude breakpoint).
*/
static struct kprobe *get_optimized_kprobe(unsigned long addr)
{
int i;
struct kprobe *p = NULL;
struct optimized_kprobe *op;
/* Don't check i == 0, since that is a breakpoint case. */
for (i = 1; !p && i < MAX_OPTIMIZED_LENGTH; i++)
p = get_kprobe((void *)(addr - i));
if (p && kprobe_optready(p)) {
op = container_of(p, struct optimized_kprobe, kp);
if (arch_within_optimized_kprobe(op, addr))
return p;
}
return NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Masami Hiramatsu | 96 | 98.97% | 1 | 50.00% |
Namhyung Kim | 1 | 1.03% | 1 | 50.00% |
Total | 97 | 100.00% | 2 | 100.00% |
/* Optimization staging list, protected by kprobe_mutex */
static LIST_HEAD(optimizing_list);
static LIST_HEAD(unoptimizing_list);
static LIST_HEAD(freeing_list);
static void kprobe_optimizer(struct work_struct *work);
static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
#define OPTIMIZE_DELAY 5
/*
* Optimize (replace a breakpoint with a jump) kprobes listed on
* optimizing_list.
*/
static void do_optimize_kprobes(void)
{
/*
* The optimization/unoptimization refers online_cpus via
* stop_machine() and cpu-hotplug modifies online_cpus.
* And same time, text_mutex will be held in cpu-hotplug and here.
* This combination can cause a deadlock (cpu-hotplug try to lock
* text_mutex but stop_machine can not be done because online_cpus
* has been changed)
* To avoid this deadlock, caller must have locked cpu hotplug
* for preventing cpu-hotplug outside of text_mutex locking.
*/
lockdep_assert_cpus_held();
/* Optimization never be done when disarmed */
if (kprobes_all_disarmed || !kprobes_allow_optimization ||
list_empty(&optimizing_list))
return;
mutex_lock(&text_mutex);
arch_optimize_kprobes(&optimizing_list);
mutex_unlock(&text_mutex);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Masami Hiramatsu | 41 | 91.11% | 4 | 80.00% |
Thomas Gleixner | 4 | 8.89% | 1 | 20.00% |
Total | 45 | 100.00% | 5 | 100.00% |
/*
* Unoptimize (replace a jump with a breakpoint and remove the breakpoint
* if need) kprobes listed on unoptimizing_list.
*/
static void do_unoptimize_kprobes(void)
{
struct optimized_kprobe *op, *tmp;
/* See comment in do_optimize_kprobes() */
lockdep_assert_cpus_held();
/* Unoptimization must be done anytime */
if (list_empty(&unoptimizing_list))
return;
mutex_lock(&text_mutex);
arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
/* Loop free_list for disarming */
list_for_each_entry_safe(op, tmp, &freeing_list, list) {
/* Disarm probes if marked disabled */
if (kprobe_disabled(&op->kp))
arch_disarm_kprobe(&op->kp);
if (kprobe_unused(&op->kp)) {
/*
* Remove unused probes from hash list. After waiting
* for synchronization, these probes are reclaimed.
* (reclaiming is done by do_free_cleaned_kprobes.)
*/
hlist_del_rcu(&op->kp.hlist);
} else
list_del_init(&op->list);
}
mutex_unlock(&text_mutex);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Masami Hiramatsu | 111 | 96.52% | 4 | 80.00% |
Thomas Gleixner | 4 | 3.48% | 1 | 20.00% |
Total | 115 | 100.00% | 5 | 100.00% |
/* Reclaim all kprobes on the free_list */
static void do_free_cleaned_kprobes(void)
{
struct optimized_kprobe *op, *tmp;
list_for_each_entry_safe(op, tmp, &freeing_list, list) {
BUG_ON(!kprobe_unused(&op->kp));
list_del_init(&op->list);
free_aggr_kprobe(&op->kp);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Masami Hiramatsu | 56 | 100.00% | 3 | 100.00% |
Total | 56 | 100.00% | 3 | 100.00% |
/* Start optimizer after OPTIMIZE_DELAY passed */
static void kick_kprobe_optimizer(void)
{
schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Masami Hiramatsu | 16 | 100.00% | 1 | 100.00% |
Total | 16 | 100.00% | 1 | 100.00% |
/* Kprobe jump optimizer */
static void kprobe_optimizer(struct work_struct *work)
{
mutex_lock(&kprobe_mutex);
cpus_read_lock();
/* Lock modules while optimizing kprobes */
mutex_lock(&module_mutex);
/*
* Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed)
* kprobes before waiting for quiesence period.
*/
do_unoptimize_kprobes();
/*
* Step 2: Wait for quiesence period to ensure all potentially
* preempted tasks to have normally scheduled. Because optprobe
* may modify multiple instructions, there is a chance that Nth
* instruction is preempted. In that case, such tasks can return
* to 2nd-Nth byte of jump instruction. This wait is for avoiding it.
* Note that on non-preemptive kernel, this is transparently converted
* to synchronoze_sched() to wait for all interrupts to have completed.
*/
synchronize_rcu_tasks();
/* Step 3: Optimize kprobes after quiesence period */
do_optimize_kprobes();
/* Step 4: Free cleaned kprobes after quiesence period */
do_free_cleaned_kprobes();
mutex_unlock(&module_mutex);
cpus_read_unlock();
mutex_unlock(&kprobe_mutex);
/* Step 5: Kick optimizer again if needed */
if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list))
kick_kprobe_optimizer();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Masami Hiramatsu | 67 | 85.90% | 7 | 77.78% |
Thomas Gleixner | 6 | 7.69% | 1 | 11.11% |
Steven Rostedt | 5 | 6.41% | 1 | 11.11% |
Total | 78 | 100.00% | 9 | 100.00% |
/* Wait for completing optimization and unoptimization */
void wait_for_kprobe_optimizer(void)
{
mutex_lock(&kprobe_mutex);
while (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) {
mutex_unlock(&kprobe_mutex);
/* this will also make optimizing_work execute immmediately */
flush_delayed_work(&optimizing_work);
/* @optimizing_work might not have been queued yet, relax */
cpu_relax();
mutex_lock(&kprobe_mutex);
}
mutex_unlock(&kprobe_mutex);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tejun Heo | 44 | 73.33% | 1 | 33.33% |
Masami Hiramatsu | 16 | 26.67% | 2 | 66.67% |
Total | 60 | 100.00% | 3 | 100.00% |
/* Optimize kprobe if p is ready to be optimized */
static void optimize_kprobe(struct kprobe *p)
{
struct optimized_kprobe *op;
/* Check if the kprobe is disabled or not ready for optimization. */
if (!kprobe_optready(p) || !kprobes_allow_optimization ||
(kprobe_disabled(p) || kprobes_all_disarmed))
return;
/* Both of break_handler and post_handler are not supported. */
if (p->break_handler || p->post_handler)
return;
op = container_of(p, struct optimized_kprobe, kp);
/* Check there is no other kprobes at the optimized instructions */
if (arch_check_optimized_kprobe(op) < 0)
return;
/* Check if it is already optimized. */
if (op->kp.flags & KPROBE_FLAG_OPTIMIZED)
return;
op->kp.flags |= KPROBE_FLAG_OPTIMIZED;
if (!list_empty(&op->list))
/* This is under unoptimizing. Just dequeue the probe */
list_del_init(&op->list);
else {
list_add(&op->list, &optimizing_list);
kick_kprobe_optimizer();
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Masami Hiramatsu | 130 | 100.00% | 3 | 100.00% |
Total | 130 | 100.00% | 3 | 100.00% |
/* Short cut to direct unoptimizing */
static void force_unoptimize_kprobe(struct optimized_kprobe *op)
{
lockdep_assert_cpus_held();
arch_unoptimize_kprobe(op);
if (kprobe_disabled(&op->kp))
arch_disarm_kprobe(&op->kp);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Masami Hiramatsu | 36 | 97.30% | 2 | 66.67% |
Thomas Gleixner | 1 | 2.70% | 1 | 33.33% |
Total | 37 | 100.00% | 3 | 100.00% |
/* Unoptimize a kprobe if p is optimized */
static void unoptimize_kprobe(struct kprobe *p, bool force)
{
struct optimized_kprobe *op;
if (!kprobe_aggrprobe(p) || kprobe_disarmed(p))
return; /* This is not an optprobe nor optimized */
op = container_of(p, struct optimized_kprobe, kp);
if (!kprobe_optimized(p)) {
/* Unoptimized or unoptimizing case */
if (force && !list_empty(&op->list)) {
/*
* Only if this is unoptimizing kprobe and forced,
* forcibly unoptimize it. (No need to unoptimize
* unoptimized kprobe again :)
*/
list_del_init(&op->list);
force_unoptimize_kprobe(op);
}
return;
}
op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
if (!list_empty(&op->list)) {
/* Dequeue from the optimization queue */
list_del_init(&op->list);
return;
}
/* Optimized kprobe case */
if (force)
/* Forcibly update the code: this is a special case */
force_unoptimize_kprobe(op);
else {
list_add(&op->list, &unoptimizing_list);
kick_kprobe_optimizer();
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Masami Hiramatsu | 147 | 100.00% | 2 | 100.00% |
Total | 147 | 100.00% | 2 | 100.00% |
/* Cancel unoptimizing for reusing */
static void reuse_unused_kprobe(struct kprobe *ap)
{
struct optimized_kprobe *op;
BUG_ON(!kprobe_unused(ap));
/*
* Unused kprobe MUST be on the way of delayed unoptimizing (means
* there is still a relative jump) and disabled.
*/
op = container_of(ap, struct optimized_kprobe, kp);
if (unlikely(list_empty(&op->list)))
printk(KERN_WARNING "Warning: found a stray unused "
"aggrprobe@%p\n", ap->addr);
/* Enable the probe again */
ap->flags &= ~KPROBE_FLAG_DISABLED;
/* Optimize it again (remove from op->list) */
BUG_ON(!kprobe_optready(ap));
optimize_kprobe(ap);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Masami Hiramatsu | 85 | 100.00% | 1 | 100.00% |
Total | 85 | 100.00% | 1 | 100.00% |
/* Remove optimized instructions */
static void kill_optimized_kprobe(struct kprobe *p)
{
struct optimized_kprobe *op;
op = container_of(p, struct optimized_kprobe, kp);
if (!list_empty(&op->list))
/* Dequeue from the (un)optimization queue */
list_del_init(&op->list);
op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
if (kprobe_unused(p)) {
/* Enqueue if it is unused */
list_add(&op->list, &freeing_list);
/*
* Remove unused probes from the hash list. After waiting
* for synchronization, this probe is reclaimed.
* (reclaiming is done by do_free_cleaned_kprobes().)
*/
hlist_del_rcu(&op->kp.hlist);
}
/* Don't touch the code, because it is already freed. */
arch_remove_optimized_kprobe(op);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Masami Hiramatsu | 95 | 100.00% | 3 | 100.00% |
Total | 95 | 100.00% | 3 | 100.00% |
static inline
void __prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
{
if (!kprobe_ftrace(p))
arch_prepare_optimized_kprobe(op, p);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Masami Hiramatsu | 32 | 100.00% | 1 | 100.00% |
Total | 32 | 100.00% | 1 | 100.00% |
/* Try to prepare optimized instructions */
static void prepare_optimized_kprobe(struct kprobe *p)