cregit-Linux how code gets into the kernel

Release 4.15 kernel/bpf/verifier.c

Directory: kernel/bpf
/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
 * Copyright (c) 2016 Facebook
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of version 2 of the GNU General Public
 * License as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
 * General Public License for more details.
 */
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/bpf.h>
#include <linux/bpf_verifier.h>
#include <linux/filter.h>
#include <net/netlink.h>
#include <linux/file.h>
#include <linux/vmalloc.h>
#include <linux/stringify.h>

#include "disasm.h"


static const struct bpf_verifier_ops * const bpf_verifier_ops[] = {

#define BPF_PROG_TYPE(_id, _name) \
	[_id] = & _name ## _verifier_ops,

#define BPF_MAP_TYPE(_id, _ops)
#include <linux/bpf_types.h>

#undef BPF_PROG_TYPE

#undef BPF_MAP_TYPE
};

/* bpf_check() is a static code analyzer that walks eBPF program
 * instruction by instruction and updates register/stack state.
 * All paths of conditional branches are analyzed until 'bpf_exit' insn.
 *
 * The first pass is depth-first-search to check that the program is a DAG.
 * It rejects the following programs:
 * - larger than BPF_MAXINSNS insns
 * - if loop is present (detected via back-edge)
 * - unreachable insns exist (shouldn't be a forest. program = one function)
 * - out of bounds or malformed jumps
 * The second pass is all possible path descent from the 1st insn.
 * Since it's analyzing all pathes through the program, the length of the
 * analysis is limited to 64k insn, which may be hit even if total number of
 * insn is less then 4K, but there are too many branches that change stack/regs.
 * Number of 'branches to be analyzed' is limited to 1k
 *
 * On entry to each instruction, each register has a type, and the instruction
 * changes the types of the registers depending on instruction semantics.
 * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is
 * copied to R1.
 *
 * All registers are 64-bit.
 * R0 - return register
 * R1-R5 argument passing registers
 * R6-R9 callee saved registers
 * R10 - frame pointer read-only
 *
 * At the start of BPF program the register R1 contains a pointer to bpf_context
 * and has type PTR_TO_CTX.
 *
 * Verifier tracks arithmetic operations on pointers in case:
 *    BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
 *    BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20),
 * 1st insn copies R10 (which has FRAME_PTR) type into R1
 * and 2nd arithmetic instruction is pattern matched to recognize
 * that it wants to construct a pointer to some element within stack.
 * So after 2nd insn, the register R1 has type PTR_TO_STACK
 * (and -20 constant is saved for further stack bounds checking).
 * Meaning that this reg is a pointer to stack plus known immediate constant.
 *
 * Most of the time the registers have SCALAR_VALUE type, which
 * means the register has some value, but it's not a valid pointer.
 * (like pointer plus pointer becomes SCALAR_VALUE type)
 *
 * When verifier sees load or store instructions the type of base register
 * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, PTR_TO_STACK. These are three pointer
 * types recognized by check_mem_access() function.
 *
 * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value'
 * and the range of [ptr, ptr + map's value_size) is accessible.
 *
 * registers used to pass values to function calls are checked against
 * function argument constraints.
 *
 * ARG_PTR_TO_MAP_KEY is one of such argument constraints.
 * It means that the register type passed to this function must be
 * PTR_TO_STACK and it will be used inside the function as
 * 'pointer to map element key'
 *
 * For example the argument constraints for bpf_map_lookup_elem():
 *   .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
 *   .arg1_type = ARG_CONST_MAP_PTR,
 *   .arg2_type = ARG_PTR_TO_MAP_KEY,
 *
 * ret_type says that this function returns 'pointer to map elem value or null'
 * function expects 1st argument to be a const pointer to 'struct bpf_map' and
 * 2nd argument should be a pointer to stack, which will be used inside
 * the helper function as a pointer to map element key.
 *
 * On the kernel side the helper function looks like:
 * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
 * {
 *    struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
 *    void *key = (void *) (unsigned long) r2;
 *    void *value;
 *
 *    here kernel can access 'key' and 'map' pointers safely, knowing that
 *    [key, key + map->key_size) bytes are valid and were initialized on
 *    the stack of eBPF program.
 * }
 *
 * Corresponding eBPF program may look like:
 *    BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),  // after this insn R2 type is FRAME_PTR
 *    BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK
 *    BPF_LD_MAP_FD(BPF_REG_1, map_fd),      // after this insn R1 type is CONST_PTR_TO_MAP
 *    BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
 * here verifier looks at prototype of map_lookup_elem() and sees:
 * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok,
 * Now verifier knows that this map has key of R1->map_ptr->key_size bytes
 *
 * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far,
 * Now verifier checks that [R2, R2 + map's key_size) are within stack limits
 * and were initialized prior to this call.
 * If it's ok, then verifier allows this BPF_CALL insn and looks at
 * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets
 * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function
 * returns ether pointer to map value or NULL.
 *
 * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off'
 * insn, the register holding that pointer in the true branch changes state to
 * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false
 * branch. See check_cond_jmp_op().
 *
 * After the call R0 is set to return type of the function and registers R1-R5
 * are set to NOT_INIT to indicate that they are no longer readable.
 */

/* verifier_state + insn_idx are pushed to stack when branch is encountered */

struct bpf_verifier_stack_elem {
	/* verifer state is 'st'
         * before processing instruction 'insn_idx'
         * and after processing instruction 'prev_insn_idx'
         */
	
struct bpf_verifier_state st;
	
int insn_idx;
	
int prev_insn_idx;
	
struct bpf_verifier_stack_elem *next;
};


#define BPF_COMPLEXITY_LIMIT_INSNS	131072

#define BPF_COMPLEXITY_LIMIT_STACK	1024


#define BPF_MAP_PTR_POISON ((void *)0xeB9F + POISON_POINTER_DELTA)


struct bpf_call_arg_meta {
	
struct bpf_map *map_ptr;
	
bool raw_mode;
	
bool pkt_access;
	
int regno;
	
int access_size;
};

static DEFINE_MUTEX(bpf_verifier_lock);

/* log_level controls verbosity level of eBPF verifier.
 * verbose() is used to dump the verification trace to the log, so the user
 * can figure out what's wrong with the program
 */

static __printf(2, 3) void verbose(struct bpf_verifier_env *env,
				   const char *fmt, ...)
{
        struct bpf_verifer_log *log = &env->log;
	unsigned int n;
	va_list args;

	if (!log->level || !log->ubuf || bpf_verifier_log_full(log))
		return;

	va_start(args, fmt);
	n = vscnprintf(log->kbuf, BPF_VERIFIER_TMP_LOG_SIZE, fmt, args);
	va_end(args);

	WARN_ONCE(n >= BPF_VERIFIER_TMP_LOG_SIZE - 1,
		  "verifier log line truncated - local buffer too short\n");

	n = min(log->len_total - log->len_used - 1, n);
	log->kbuf[n] = '\0';

	if (!copy_to_user(log->ubuf + log->len_used, log->kbuf, n + 1))
		log->len_used += n;
	else
		log->ubuf = NULL;

} static bool type_is_pkt_pointer(enum bpf_reg_type type) { return type == PTR_TO_PACKET || type == PTR_TO_PACKET_META; }

Contributors

PersonTokensPropCommitsCommitProp
Daniel Borkmann19100.00%1100.00%
Total19100.00%1100.00%

/* string representation of 'enum bpf_reg_type' */ static const char * const reg_type_str[] = { [NOT_INIT] = "?", [SCALAR_VALUE] = "inv", [PTR_TO_CTX] = "ctx", [CONST_PTR_TO_MAP] = "map_ptr", [PTR_TO_MAP_VALUE] = "map_value", [PTR_TO_MAP_VALUE_OR_NULL] = "map_value_or_null", [PTR_TO_STACK] = "fp", [PTR_TO_PACKET] = "pkt", [PTR_TO_PACKET_META] = "pkt_meta", [PTR_TO_PACKET_END] = "pkt_end", };
static void print_verifier_state(struct bpf_verifier_env *env, struct bpf_verifier_state *state) { struct bpf_reg_state *reg; enum bpf_reg_type t; int i; for (i = 0; i < MAX_BPF_REG; i++) { reg = &state->regs[i]; t = reg->type; if (t == NOT_INIT) continue; verbose(env, " R%d=%s", i, reg_type_str[t]); if ((t == SCALAR_VALUE || t == PTR_TO_STACK) && tnum_is_const(reg->var_off)) { /* reg->off should be 0 for SCALAR_VALUE */ verbose(env, "%lld", reg->var_off.value + reg->off); } else { verbose(env, "(id=%d", reg->id); if (t != SCALAR_VALUE) verbose(env, ",off=%d", reg->off); if (type_is_pkt_pointer(t)) verbose(env, ",r=%d", reg->range); else if (t == CONST_PTR_TO_MAP || t == PTR_TO_MAP_VALUE || t == PTR_TO_MAP_VALUE_OR_NULL) verbose(env, ",ks=%d,vs=%d", reg->map_ptr->key_size, reg->map_ptr->value_size); if (tnum_is_const(reg->var_off)) { /* Typically an immediate SCALAR_VALUE, but * could be a pointer whose offset is too big * for reg->off */ verbose(env, ",imm=%llx", reg->var_off.value); } else { if (reg->smin_value != reg->umin_value && reg->smin_value != S64_MIN) verbose(env, ",smin_value=%lld", (long long)reg->smin_value); if (reg->smax_value != reg->umax_value && reg->smax_value != S64_MAX) verbose(env, ",smax_value=%lld", (long long)reg->smax_value); if (reg->umin_value != 0) verbose(env, ",umin_value=%llu", (unsigned long long)reg->umin_value); if (reg->umax_value != U64_MAX) verbose(env, ",umax_value=%llu", (unsigned long long)reg->umax_value); if (!tnum_is_unknown(reg->var_off)) { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); verbose(env, ",var_off=%s", tn_buf); } } verbose(env, ")"); } } for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { if (state->stack[i].slot_type[0] == STACK_SPILL) verbose(env, " fp%d=%s", -MAX_BPF_STACK + i * BPF_REG_SIZE, reg_type_str[state->stack[i].spilled_ptr.type]); } verbose(env, "\n"); }

Contributors

PersonTokensPropCommitsCommitProp
Alexei Starovoitov22448.91%535.71%
Jakub Kiciński15533.84%321.43%
Edward Cree7015.28%321.43%
Josef Bacik51.09%17.14%
Daniel Borkmann30.66%17.14%
Thomas Graf10.22%17.14%
Total458100.00%14100.00%


static int copy_stack_state(struct bpf_verifier_state *dst, const struct bpf_verifier_state *src) { if (!src->stack) return 0; if (WARN_ON_ONCE(dst->allocated_stack < src->allocated_stack)) { /* internal bug, make state invalid to reject the program */ memset(dst, 0, sizeof(*dst)); return -EFAULT; } memcpy(dst->stack, src->stack, sizeof(*src->stack) * (src->allocated_stack / BPF_REG_SIZE)); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Alexei Starovoitov90100.00%1100.00%
Total90100.00%1100.00%

/* do_check() starts with zero-sized stack in struct bpf_verifier_state to * make it consume minimal amount of memory. check_stack_write() access from * the program calls into realloc_verifier_state() to grow the stack size. * Note there is a non-zero 'parent' pointer inside bpf_verifier_state * which this function copies over. It points to previous bpf_verifier_state * which is never reallocated */
static int realloc_verifier_state(struct bpf_verifier_state *state, int size, bool copy_old) { u32 old_size = state->allocated_stack; struct bpf_stack_state *new_stack; int slot = size / BPF_REG_SIZE; if (size <= old_size || !size) { if (copy_old) return 0; state->allocated_stack = slot * BPF_REG_SIZE; if (!size && old_size) { kfree(state->stack); state->stack = NULL; } return 0; } new_stack = kmalloc_array(slot, sizeof(struct bpf_stack_state), GFP_KERNEL); if (!new_stack) return -ENOMEM; if (copy_old) { if (state->stack) memcpy(new_stack, state->stack, sizeof(*new_stack) * (old_size / BPF_REG_SIZE)); memset(new_stack + old_size / BPF_REG_SIZE, 0, sizeof(*new_stack) * (size - old_size) / BPF_REG_SIZE); } state->allocated_stack = slot * BPF_REG_SIZE; kfree(state->stack); state->stack = new_stack; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Alexei Starovoitov193100.00%1100.00%
Total193100.00%1100.00%


static void free_verifier_state(struct bpf_verifier_state *state, bool free_self) { kfree(state->stack); if (free_self) kfree(state); }

Contributors

PersonTokensPropCommitsCommitProp
Alexei Starovoitov30100.00%2100.00%
Total30100.00%2100.00%

/* copy verifier state from src to dst growing dst stack space * when necessary to accommodate larger src stack */
static int copy_verifier_state(struct bpf_verifier_state *dst, const struct bpf_verifier_state *src) { int err; err = realloc_verifier_state(dst, src->allocated_stack, false); if (err) return err; memcpy(dst, src, offsetof(struct bpf_verifier_state, allocated_stack)); return copy_stack_state(dst, src); }

Contributors

PersonTokensPropCommitsCommitProp
Alexei Starovoitov63100.00%1100.00%
Total63100.00%1100.00%


static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx, int *insn_idx) { struct bpf_verifier_state *cur = env->cur_state; struct bpf_verifier_stack_elem *elem, *head = env->head; int err; if (env->head == NULL) return -ENOENT; if (cur) { err = copy_verifier_state(cur, &head->st); if (err) return err; } if (insn_idx) *insn_idx = head->insn_idx; if (prev_insn_idx) *prev_insn_idx = head->prev_insn_idx; elem = head->next; free_verifier_state(&head->st, false); kfree(head); env->head = elem; env->stack_size--; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Alexei Starovoitov13598.54%685.71%
Jakub Kiciński21.46%114.29%
Total137100.00%7100.00%


static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx) { struct bpf_verifier_state *cur = env->cur_state; struct bpf_verifier_stack_elem *elem; int err; elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL); if (!elem) goto err; elem->insn_idx = insn_idx; elem->prev_insn_idx = prev_insn_idx; elem->next = env->head; env->head = elem; env->stack_size++; err = copy_verifier_state(&elem->st, cur); if (err) goto err; if (env->stack_size > BPF_COMPLEXITY_LIMIT_STACK) { verbose(env, "BPF program is too complex\n"); goto err; } return &elem->st; err: /* pop all elements and return */ while (!pop_stack(env, NULL, NULL)); return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Alexei Starovoitov14595.39%457.14%
Jakub Kiciński63.95%228.57%
Daniel Borkmann10.66%114.29%
Total152100.00%7100.00%

#define CALLER_SAVED_REGS 6 static const int caller_saved[CALLER_SAVED_REGS] = { BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5 }; static void __mark_reg_not_init(struct bpf_reg_state *reg); /* Mark the unknown part of a register (variable offset or scalar value) as * known to have the value @imm. */
static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm) { reg->id = 0; reg->var_off = tnum_const(imm); reg->smin_value = (s64)imm; reg->smax_value = (s64)imm; reg->umin_value = imm; reg->umax_value = imm; }

Contributors

PersonTokensPropCommitsCommitProp
Edward Cree4576.27%228.57%
Alexei Starovoitov813.56%228.57%
Josef Bacik35.08%114.29%
Daniel Borkmann23.39%114.29%
Jakub Kiciński11.69%114.29%
Total59100.00%7100.00%

/* Mark the 'variable offset' part of a register as zero. This should be * used only on registers holding a pointer type. */
static void __mark_reg_known_zero(struct bpf_reg_state *reg) { __mark_reg_known(reg, 0); }

Contributors

PersonTokensPropCommitsCommitProp
Edward Cree1688.89%250.00%
Josef Bacik15.56%125.00%
Daniel Borkmann15.56%125.00%
Total18100.00%4100.00%


static void mark_reg_known_zero(struct bpf_verifier_env *env, struct bpf_reg_state *regs, u32 regno) { if (WARN_ON(regno >= MAX_BPF_REG)) { verbose(env, "mark_reg_known_zero(regs, %u)\n", regno); /* Something bad happened, let's kill all regs */ for (regno = 0; regno < MAX_BPF_REG; regno++) __mark_reg_not_init(regs + regno); return; } __mark_reg_known_zero(regs + regno); }

Contributors

PersonTokensPropCommitsCommitProp
Edward Cree3551.47%120.00%
Daniel Borkmann1826.47%120.00%
Jakub Kiciński710.29%120.00%
David S. Miller57.35%120.00%
Alexei Starovoitov34.41%120.00%
Total68100.00%5100.00%


static bool reg_is_pkt_pointer(const struct bpf_reg_state *reg) { return type_is_pkt_pointer(reg->type); }

Contributors

PersonTokensPropCommitsCommitProp
Daniel Borkmann20100.00%1100.00%
Total20100.00%1100.00%


static bool reg_is_pkt_pointer_any(const struct bpf_reg_state *reg) { return reg_is_pkt_pointer(reg) || reg->type == PTR_TO_PACKET_END; }

Contributors

PersonTokensPropCommitsCommitProp
Daniel Borkmann24100.00%1100.00%
Total24100.00%1100.00%

/* Unmodified PTR_TO_PACKET[_META,_END] register from ctx access. */
static bool reg_is_init_pkt_pointer(const struct bpf_reg_state *reg, enum bpf_reg_type which) { /* The register can already have a range from prior markings. * This is fine as long as it hasn't been advanced from its * origin. */ return reg->type == which && reg->id == 0 && reg->off == 0 && tnum_equals_const(reg->var_off, 0); }

Contributors

PersonTokensPropCommitsCommitProp
Daniel Borkmann45100.00%1100.00%
Total45100.00%1100.00%

/* Attempts to improve min/max values based on var_off information */
static void __update_reg_bounds(struct bpf_reg_state *reg) { /* min signed is max(sign bit) | min(other bits) */ reg->smin_value = max_t(s64, reg->smin_value, reg->var_off.value | (reg->var_off.mask & S64_MIN)); /* max signed is min(sign bit) | max(other bits) */ reg->smax_value = min_t(s64, reg->smax_value, reg->var_off.value | (reg->var_off.mask & S64_MAX)); reg->umin_value = max(reg->umin_value, reg->var_off.value); reg->umax_value = min(reg->umax_value, reg->var_off.value | reg->var_off.mask); }

Contributors

PersonTokensPropCommitsCommitProp
Edward Cree9989.19%240.00%
Alexei Starovoitov109.01%120.00%
Jakub Kiciński10.90%120.00%
Thomas Graf10.90%120.00%
Total111100.00%5100.00%

/* Uses signed min/max values to inform unsigned, and vice-versa */
static void __reg_deduce_bounds(struct bpf_reg_state *reg) { /* Learn sign from signed bounds. * If we cannot cross the sign boundary, then signed and unsigned bounds * are the same, so combine. This works even in the negative case, e.g. * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff. */ if (reg->smin_value >= 0 || reg->smax_value < 0) { reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value, reg->umin_value); reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value, reg->umax_value); return; } /* Learn sign from unsigned bounds. Signed bounds cross the sign * boundary, so we must be careful. */ if ((s64)reg->umax_value >= 0) { /* Positive. We can't learn anything from the smin, but smax * is positive, hence safe. */ reg->smin_value = reg->umin_value; reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value, reg->umax_value); } else if ((s64)reg->umin_value < 0) { /* Negative. We can't learn anything from the smax, but smin * is negative, hence safe. */ reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value, reg->umin_value); reg->smax_value = reg->umax_value; } }

Contributors

PersonTokensPropCommitsCommitProp
Edward Cree14993.71%266.67%
Daniel Borkmann106.29%133.33%
Total159100.00%3100.00%

/* Attempts to improve var_off based on unsigned min/max information */
static void __reg_bound_offset(struct bpf_reg_state *reg) { reg->var_off = tnum_intersect(reg->var_off, tnum_range(reg->umin_value, reg->umax_value)); }

Contributors

PersonTokensPropCommitsCommitProp
Edward Cree33100.00%1100.00%
Total33100.00%1100.00%

/* Reset the min/max bounds of a register */
static void __mark_reg_unbounded(struct bpf_reg_state *reg) { reg->smin_value = S64_MIN; reg->smax_value = S64_MAX; reg->umin_value = 0; reg->umax_value = U64_MAX; }

Contributors

PersonTokensPropCommitsCommitProp
Edward Cree35100.00%1100.00%
Total35100.00%1100.00%

/* Mark a register as having a completely unknown (scalar) value. */
static void __mark_reg_unknown(struct bpf_reg_state *reg) { reg->type = SCALAR_VALUE; reg->id = 0; reg->off = 0; reg->var_off = tnum_unknown; __mark_reg_unbounded(reg); }

Contributors

PersonTokensPropCommitsCommitProp
Edward Cree40100.00%1100.00%
Total40100.00%1100.00%


static void mark_reg_unknown(struct bpf_verifier_env *env, struct bpf_reg_state *regs, u32 regno) { if (WARN_ON(regno >= MAX_BPF_REG)) { verbose(env, "mark_reg_unknown(regs, %u)\n", regno); /* Something bad happened, let's kill all regs */ for (regno = 0; regno < MAX_BPF_REG; regno++) __mark_reg_not_init(regs + regno); return; } __mark_reg_unknown(regs + regno); }

Contributors

PersonTokensPropCommitsCommitProp
Edward Cree5580.88%250.00%
Jakub Kiciński710.29%125.00%
Daniel Borkmann68.82%125.00%
Total68100.00%4100.00%


static void __mark_reg_not_init(struct bpf_reg_state *reg) { __mark_reg_unknown(reg); reg->type = NOT_INIT; }

Contributors

PersonTokensPropCommitsCommitProp
Edward Cree2090.91%150.00%
Josef Bacik29.09%150.00%
Total22100.00%2100.00%


static void mark_reg_not_init(struct bpf_verifier_env *env, struct bpf_reg_state *regs, u32 regno) { if (WARN_ON(regno >= MAX_BPF_REG)) { verbose(env, "mark_reg_not_init(regs, %u)\n", regno); /* Something bad happened, let's kill all regs */ for (regno = 0; regno < MAX_BPF_REG; regno++) __mark_reg_not_init(regs + regno); return; } __mark_reg_not_init(regs + regno); }

Contributors

PersonTokensPropCommitsCommitProp
Edward Cree4667.65%133.33%
Josef Bacik1522.06%133.33%
Jakub Kiciński710.29%133.33%
Total68100.00%3100.00%


static void init_reg_state(struct bpf_verifier_env *env, struct bpf_reg_state *regs) { int i; for (i = 0; i < MAX_BPF_REG; i++) { mark_reg_not_init(env, regs, i); regs[i].live = REG_LIVE_NONE; } /* frame pointer */ regs[BPF_REG_FP].type = PTR_TO_STACK; mark_reg_known_zero(env, regs, BPF_REG_FP); /* 1st arg to a function */ regs[BPF_REG_1].type = PTR_TO_CTX; mark_reg_known_zero(env, regs, BPF_REG_1); }

Contributors

PersonTokensPropCommitsCommitProp
Edward Cree6471.11%233.33%
Jakub Kiciński1112.22%116.67%
David S. Miller66.67%116.67%
Daniel Borkmann66.67%116.67%
Josef Bacik33.33%116.67%
Total90100.00%6100.00%

enum reg_arg_type { SRC_OP, /* register is used as source operand */ DST_OP, /* register is used as destination operand */ DST_OP_NO_MARK /* same as above, check only, don't mark */ };
static void mark_reg_read(const struct bpf_verifier_state *state, u32 regno) { struct bpf_verifier_state *parent = state->parent; if (regno == BPF_REG_FP) /* We don't need to worry about FP liveness because it's read-only */ return; while (parent) { /* if read wasn't screened by an earlier write ... */ if (state->regs[regno].live & REG_LIVE_WRITTEN) break; /* ... then we depend on parent's value */ parent->regs[regno].live |= REG_LIVE_READ; state = parent; parent = state->parent; } }

Contributors

PersonTokensPropCommitsCommitProp
Edward Cree6789.33%150.00%
Alexei Starovoitov810.67%150.00%
Total75100.00%2100.00%


static int check_reg_arg(struct bpf_verifier_env *env, u32 regno, enum reg_arg_type t) { struct bpf_reg_state *regs = env->cur_state->regs; if (regno >= MAX_BPF_REG) { verbose(env, "R%d is invalid\n", regno); return -EINVAL; } if (t == SRC_OP) { /* check whether register used as source operand can be read */ if (regs[regno].type == NOT_INIT) { verbose(env, "R%d !read_ok\n", regno); return -EACCES; } mark_reg_read(env->cur_state, regno); } else { /* check whether register used as dest operand can be written to */ if (regno == BPF_REG_FP) { verbose(env, "frame pointer is read only\n"); return -EACCES; } regs[regno].live |= REG_LIVE_WRITTEN; if (t == DST_OP) mark_reg_unknown(env, regs, regno); } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Alexei Starovoitov10572.92%240.00%
Edward Cree3121.53%240.00%
Jakub Kiciński85.56%120.00%
Total144100.00%5100.00%


static bool is_spillable_regtype(enum bpf_reg_type type) { switch (type) { case PTR_TO_MAP_VALUE: case PTR_TO_MAP_VALUE_OR_NULL: case PTR_TO_STACK: case PTR_TO_CTX: case PTR_TO_PACKET: case PTR_TO_PACKET_META: case PTR_TO_PACKET_END: case CONST_PTR_TO_MAP: return true; default: return false; } }

Contributors

PersonTokensPropCommitsCommitProp
Alexei Starovoitov4493.62%266.67%
Daniel Borkmann36.38%133.33%
Total47100.00%3100.00%

/* check_stack_read/write functions track spill/fill of registers, * stack boundary and alignment are checked in check_mem_access() */
static int check_stack_write(struct bpf_verifier_env *env, struct bpf_verifier_state *state, int off, int size, int value_regno) { int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err; err = realloc_verifier_state(state, round_up(slot + 1, BPF_REG_SIZE), true); if (err) return err; /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0, * so it's aligned access and [off, off + size) are within stack limits */ if (!env->allow_ptr_leaks && state->stack[spi].slot_type