Release 4.15 kernel/bpf/hashtab.c
/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
* Copyright (c) 2016 Facebook
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#include <linux/bpf.h>
#include <linux/jhash.h>
#include <linux/filter.h>
#include <linux/rculist_nulls.h>
#include "percpu_freelist.h"
#include "bpf_lru_list.h"
#include "map_in_map.h"
#define HTAB_CREATE_FLAG_MASK \
(BPF_F_NO_PREALLOC | BPF_F_NO_COMMON_LRU | BPF_F_NUMA_NODE | \
BPF_F_RDONLY | BPF_F_WRONLY)
struct bucket {
struct hlist_nulls_head head;
raw_spinlock_t lock;
};
struct bpf_htab {
struct bpf_map map;
struct bucket *buckets;
void *elems;
union {
struct pcpu_freelist freelist;
struct bpf_lru lru;
};
struct htab_elem *__percpu *extra_elems;
atomic_t count; /* number of elements in this hashtable */
u32 n_buckets; /* number of hash buckets */
u32 elem_size; /* size of each element in bytes */
};
/* each htab element is struct htab_elem + key + value */
struct htab_elem {
union {
struct hlist_nulls_node hash_node;
struct {
void *padding;
union {
struct bpf_htab *htab;
struct pcpu_freelist_node fnode;
};
};
};
union {
struct rcu_head rcu;
struct bpf_lru_node lru_node;
};
u32 hash;
char key[0] __aligned(8);
};
static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node);
static bool htab_is_lru(const struct bpf_htab *htab)
{
return htab->map.map_type == BPF_MAP_TYPE_LRU_HASH ||
htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Martin KaFai Lau | 29 | 100.00% | 2 | 100.00% |
Total | 29 | 100.00% | 2 | 100.00% |
static bool htab_is_percpu(const struct bpf_htab *htab)
{
return htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH ||
htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Martin KaFai Lau | 29 | 100.00% | 2 | 100.00% |
Total | 29 | 100.00% | 2 | 100.00% |
static bool htab_is_prealloc(const struct bpf_htab *htab)
{
return !(htab->map.map_flags & BPF_F_NO_PREALLOC);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alexei Starovoitov | 24 | 100.00% | 1 | 100.00% |
Total | 24 | 100.00% | 1 | 100.00% |
static inline void htab_elem_set_ptr(struct htab_elem *l, u32 key_size,
void __percpu *pptr)
{
*(void __percpu **)(l->key + key_size) = pptr;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alexei Starovoitov | 37 | 100.00% | 1 | 100.00% |
Total | 37 | 100.00% | 1 | 100.00% |
static inline void __percpu *htab_elem_get_ptr(struct htab_elem *l, u32 key_size)
{
return *(void __percpu **)(l->key + key_size);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alexei Starovoitov | 33 | 100.00% | 1 | 100.00% |
Total | 33 | 100.00% | 1 | 100.00% |
static void *fd_htab_map_get_ptr(const struct bpf_map *map, struct htab_elem *l)
{
return *(void **)(l->key + roundup(map->key_size, 8));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Martin KaFai Lau | 40 | 100.00% | 1 | 100.00% |
Total | 40 | 100.00% | 1 | 100.00% |
static struct htab_elem *get_htab_elem(struct bpf_htab *htab, int i)
{
return (struct htab_elem *) (htab->elems + i * htab->elem_size);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alexei Starovoitov | 34 | 100.00% | 1 | 100.00% |
Total | 34 | 100.00% | 1 | 100.00% |
static void htab_free_elems(struct bpf_htab *htab)
{
int i;
if (!htab_is_percpu(htab))
goto free_elems;
for (i = 0; i < htab->map.max_entries; i++) {
void __percpu *pptr;
pptr = htab_elem_get_ptr(get_htab_elem(htab, i),
htab->map.key_size);
free_percpu(pptr);
cond_resched();
}
free_elems:
bpf_map_area_free(htab->elems);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alexei Starovoitov | 76 | 90.48% | 1 | 25.00% |
Martin KaFai Lau | 4 | 4.76% | 1 | 25.00% |
Eric Dumazet | 3 | 3.57% | 1 | 25.00% |
Daniel Borkmann | 1 | 1.19% | 1 | 25.00% |
Total | 84 | 100.00% | 4 | 100.00% |
static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key,
u32 hash)
{
struct bpf_lru_node *node = bpf_lru_pop_free(&htab->lru, hash);
struct htab_elem *l;
if (node) {
l = container_of(node, struct htab_elem, lru_node);
memcpy(l->key, key, htab->map.key_size);
return l;
}
return NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Martin KaFai Lau | 79 | 100.00% | 1 | 100.00% |
Total | 79 | 100.00% | 1 | 100.00% |
static int prealloc_init(struct bpf_htab *htab)
{
u32 num_entries = htab->map.max_entries;
int err = -ENOMEM, i;
if (!htab_is_percpu(htab) && !htab_is_lru(htab))
num_entries += num_possible_cpus();
htab->elems = bpf_map_area_alloc(htab->elem_size * num_entries,
htab->map.numa_node);
if (!htab->elems)
return -ENOMEM;
if (!htab_is_percpu(htab))
goto skip_percpu_elems;
for (i = 0; i < num_entries; i++) {
u32 size = round_up(htab->map.value_size, 8);
void __percpu *pptr;
pptr = __alloc_percpu_gfp(size, 8, GFP_USER | __GFP_NOWARN);
if (!pptr)
goto free_elems;
htab_elem_set_ptr(get_htab_elem(htab, i), htab->map.key_size,
pptr);
cond_resched();
}
skip_percpu_elems:
if (htab_is_lru(htab))
err = bpf_lru_init(&htab->lru,
htab->map.map_flags & BPF_F_NO_COMMON_LRU,
offsetof(struct htab_elem, hash) -
offsetof(struct htab_elem, lru_node),
htab_lru_map_delete_node,
htab);
else
err = pcpu_freelist_init(&htab->freelist);
if (err)
goto free_elems;
if (htab_is_lru(htab))
bpf_lru_populate(&htab->lru, htab->elems,
offsetof(struct htab_elem, lru_node),
htab->elem_size, num_entries);
else
pcpu_freelist_populate(&htab->freelist,
htab->elems + offsetof(struct htab_elem, fnode),
htab->elem_size, num_entries);
return 0;
free_elems:
htab_free_elems(htab);
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alexei Starovoitov | 208 | 68.87% | 3 | 37.50% |
Martin KaFai Lau | 90 | 29.80% | 3 | 37.50% |
Eric Dumazet | 3 | 0.99% | 1 | 12.50% |
Daniel Borkmann | 1 | 0.33% | 1 | 12.50% |
Total | 302 | 100.00% | 8 | 100.00% |
static void prealloc_destroy(struct bpf_htab *htab)
{
htab_free_elems(htab);
if (htab_is_lru(htab))
bpf_lru_destroy(&htab->lru);
else
pcpu_freelist_destroy(&htab->freelist);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Martin KaFai Lau | 40 | 100.00% | 1 | 100.00% |
Total | 40 | 100.00% | 1 | 100.00% |
static int alloc_extra_elems(struct bpf_htab *htab)
{
struct htab_elem *__percpu *pptr, *l_new;
struct pcpu_freelist_node *l;
int cpu;
pptr = __alloc_percpu_gfp(sizeof(struct htab_elem *), 8,
GFP_USER | __GFP_NOWARN);
if (!pptr)
return -ENOMEM;
for_each_possible_cpu(cpu) {
l = pcpu_freelist_pop(&htab->freelist);
/* pop will succeed, since prealloc_init()
* preallocated extra num_possible_cpus elements
*/
l_new = container_of(l, struct htab_elem, fnode);
*per_cpu_ptr(pptr, cpu) = l_new;
}
htab->extra_elems = pptr;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alexei Starovoitov | 104 | 100.00% | 2 | 100.00% |
Total | 104 | 100.00% | 2 | 100.00% |
/* Called from syscall */
static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
{
bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH ||
attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
/* percpu_lru means each cpu has its own LRU list.
* it is different from BPF_MAP_TYPE_PERCPU_HASH where
* the map's value itself is percpu. percpu_lru has
* nothing to do with the map's value.
*/
bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU);
bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC);
int numa_node = bpf_map_attr_numa_node(attr);
struct bpf_htab *htab;
int err, i;
u64 cost;
BUILD_BUG_ON(offsetof(struct htab_elem, htab) !=
offsetof(struct htab_elem, hash_node.pprev));
BUILD_BUG_ON(offsetof(struct htab_elem, fnode.next) !=
offsetof(struct htab_elem, hash_node.pprev));
if (lru && !capable(CAP_SYS_ADMIN))
/* LRU implementation is much complicated than other
* maps. Hence, limit to CAP_SYS_ADMIN for now.
*/
return ERR_PTR(-EPERM);
if (attr->map_flags & ~HTAB_CREATE_FLAG_MASK)
/* reserved bits should not be used */
return ERR_PTR(-EINVAL);
if (!lru && percpu_lru)
return ERR_PTR(-EINVAL);
if (lru && !prealloc)
return ERR_PTR(-ENOTSUPP);
if (numa_node != NUMA_NO_NODE && (percpu || percpu_lru))
return ERR_PTR(-EINVAL);
htab = kzalloc(sizeof(*htab), GFP_USER);
if (!htab)
return ERR_PTR(-ENOMEM);
/* mandatory map attributes */
htab->map.map_type = attr->map_type;
htab->map.key_size = attr->key_size;
htab->map.value_size = attr->value_size;
htab->map.max_entries = attr->max_entries;
htab->map.map_flags = attr->map_flags;
htab->map.numa_node = numa_node;
/* check sanity of attributes.
* value_size == 0 may be allowed in the future to use map as a set
*/
err = -EINVAL;
if (htab->map.max_entries == 0 || htab->map.key_size == 0 ||
htab->map.value_size == 0)
goto free_htab;
if (percpu_lru) {
/* ensure each CPU's lru list has >=1 elements.
* since we are at it, make each lru list has the same
* number of elements.
*/
htab->map.max_entries = roundup(attr->max_entries,
num_possible_cpus());
if (htab->map.max_entries < attr->max_entries)
htab->map.max_entries = rounddown(attr->max_entries,
num_possible_cpus());
}
/* hash table size must be power of 2 */
htab->n_buckets = roundup_pow_of_two(htab->map.max_entries);
err = -E2BIG;
if (htab->map.key_size > MAX_BPF_STACK)
/* eBPF programs initialize keys on stack, so they cannot be
* larger than max stack size
*/
goto free_htab;
if (htab->map.value_size >= KMALLOC_MAX_SIZE -
MAX_BPF_STACK - sizeof(struct htab_elem))
/* if value_size is bigger, the user space won't be able to
* access the elements via bpf syscall. This check also makes
* sure that the elem_size doesn't overflow and it's
* kmalloc-able later in htab_map_update_elem()
*/
goto free_htab;
htab->elem_size = sizeof(struct htab_elem) +
round_up(htab->map.key_size, 8);
if (percpu)
htab->elem_size += sizeof(void *);
else
htab->elem_size += round_up(htab->map.value_size, 8);
/* prevent zero size kmalloc and check for u32 overflow */
if (htab->n_buckets == 0 ||
htab->n_buckets > U32_MAX / sizeof(struct bucket))
goto free_htab;
cost = (u64) htab->n_buckets * sizeof(struct bucket) +
(u64) htab->elem_size * htab->map.max_entries;
if (percpu)
cost += (u64) round_up(htab->map.value_size, 8) *
num_possible_cpus() * htab->map.max_entries;
else
cost += (u64) htab->elem_size * num_possible_cpus();
if (cost >= U32_MAX - PAGE_SIZE)
/* make sure page count doesn't overflow */
goto free_htab;
htab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
/* if map size is larger than memlock limit, reject it early */
err = bpf_map_precharge_memlock(htab->map.pages);
if (err)
goto free_htab;
err = -ENOMEM;
htab->buckets = bpf_map_area_alloc(htab->n_buckets *
sizeof(struct bucket),
htab->map.numa_node);
if (!htab->buckets)
goto free_htab;
for (i = 0; i < htab->n_buckets; i++) {
INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i);
raw_spin_lock_init(&htab->buckets[i].lock);
}
if (prealloc) {
err = prealloc_init(htab);
if (err)
goto free_buckets;
if (!percpu && !lru) {
/* lru itself can remove the least used element, so
* there is no need for an extra elem during map_update.
*/
err = alloc_extra_elems(htab);
if (err)
goto free_prealloc;
}
}
return &htab->map;
free_prealloc:
prealloc_destroy(htab);
free_buckets:
bpf_map_area_free(htab->buckets);
free_htab:
kfree(htab);
return ERR_PTR(err);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alexei Starovoitov | 575 | 72.15% | 9 | 52.94% |
Martin KaFai Lau | 205 | 25.72% | 3 | 17.65% |
Lei Ming | 13 | 1.63% | 2 | 11.76% |
Daniel Borkmann | 2 | 0.25% | 1 | 5.88% |
Yang Shi | 1 | 0.13% | 1 | 5.88% |
Michal Hocko | 1 | 0.13% | 1 | 5.88% |
Total | 797 | 100.00% | 17 | 100.00% |
static inline u32 htab_map_hash(const void *key, u32 key_len)
{
return jhash(key, key_len, 0);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alexei Starovoitov | 25 | 100.00% | 1 | 100.00% |
Total | 25 | 100.00% | 1 | 100.00% |
static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash)
{
return &htab->buckets[hash & (htab->n_buckets - 1)];
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alexei Starovoitov | 32 | 94.12% | 1 | 50.00% |
Lei Ming | 2 | 5.88% | 1 | 50.00% |
Total | 34 | 100.00% | 2 | 100.00% |
static inline struct hlist_nulls_head *select_bucket(struct bpf_htab *htab, u32 hash)
{
return &__select_bucket(htab, hash)->head;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Lei Ming | 27 | 96.43% | 1 | 50.00% |
Alexei Starovoitov | 1 | 3.57% | 1 | 50.00% |
Total | 28 | 100.00% | 2 | 100.00% |
/* this lookup function can only be called with bucket lock taken */
static struct htab_elem *lookup_elem_raw(struct hlist_nulls_head *head, u32 hash,
void *key, u32 key_size)
{
struct hlist_nulls_node *n;
struct htab_elem *l;
hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
if (l->hash == hash && !memcmp(&l->key, key, key_size))
return l;
return NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alexei Starovoitov | 70 | 100.00% | 2 | 100.00% |
Total | 70 | 100.00% | 2 | 100.00% |
/* can be called without bucket lock. it will repeat the loop in
* the unlikely event when elements moved from one bucket into another
* while link list is being walked
*/
static struct htab_elem *lookup_nulls_elem_raw(struct hlist_nulls_head *head,
u32 hash, void *key,
u32 key_size, u32 n_buckets)
{
struct hlist_nulls_node *n;
struct htab_elem *l;
again:
hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
if (l->hash == hash && !memcmp(&l->key, key, key_size))
return l;
if (unlikely(get_nulls_value(n) != (hash & (n_buckets - 1))))
goto again;
return NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alexei Starovoitov | 98 | 100.00% | 1 | 100.00% |
Total | 98 | 100.00% | 1 | 100.00% |
/* Called from syscall or from eBPF program directly, so
* arguments have to match bpf_map_lookup_elem() exactly.
* The return value is adjusted by BPF instructions
* in htab_map_gen_lookup().
*/
static void *__htab_map_lookup_elem(struct bpf_map *map, void *key)
{
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
struct hlist_nulls_head *head;
struct htab_elem *l;
u32 hash, key_size;
/* Must be called with rcu_read_lock. */
WARN_ON_ONCE(!rcu_read_lock_held());
key_size = map->key_size;
hash = htab_map_hash(key, key_size);
head = select_bucket(htab, hash);
l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets);
return l;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alexei Starovoitov | 98 | 100.00% | 3 | 100.00% |
Total | 98 | 100.00% | 3 | 100.00% |
static void *htab_map_lookup_elem(struct bpf_map *map, void *key)
{
struct htab_elem *l = __htab_map_lookup_elem(map, key);
if (l)
return l->key + round_up(map->key_size, 8);
return NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alexei Starovoitov | 49 | 100.00% | 2 | 100.00% |
Total | 49 | 100.00% | 2 | 100.00% |
/* inline bpf_map_lookup_elem() call.
* Instead of:
* bpf_prog
* bpf_map_lookup_elem
* map->ops->map_lookup_elem
* htab_map_lookup_elem
* __htab_map_lookup_elem
* do:
* bpf_prog
* __htab_map_lookup_elem
*/
static u32 htab_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
{
struct bpf_insn *insn = insn_buf;
const int ret = BPF_REG_0;
*insn++ = BPF_EMIT_CALL((u64 (*)(u64, u64, u64, u64, u64))__htab_map_lookup_elem);
*insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1);
*insn++ = BPF_ALU64_IMM(BPF_ADD, ret,
offsetof(struct htab_elem, key) +
round_up(map->key_size, 8));
return insn - insn_buf;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alexei Starovoitov | 103 | 100.00% | 1 | 100.00% |
Total | 103 | 100.00% | 1 | 100.00% |
static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key)
{
struct htab_elem *l = __htab_map_lookup_elem(map, key);
if (l) {
bpf_lru_node_set_ref(&l->lru_node);
return l->key + round_up(map->key_size, 8);
}
return NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Martin KaFai Lau | 39 | 66.10% | 1 | 50.00% |
Alexei Starovoitov | 20 | 33.90% | 1 | 50.00% |
Total | 59 | 100.00% | 2 | 100.00% |
static u32 htab_lru_map_gen_lookup(struct bpf_map *map,
struct bpf_insn *insn_buf)
{
struct bpf_insn *insn = insn_buf;
const int ret = BPF_REG_0;
const int ref_reg = BPF_REG_1;
*insn++ = BPF_EMIT_CALL((u64 (*)(u64, u64, u64, u64, u64))__htab_map_lookup_elem);
*insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 4);
*insn++ = BPF_LDX_MEM(BPF_B, ref_reg, ret,
offsetof(struct htab_elem, lru_node) +
offsetof(struct bpf_lru_node, ref));
*insn++ = BPF_JMP_IMM(BPF_JNE, ref_reg, 0, 1);
*insn++ = BPF_ST_MEM(BPF_B, ret,
offsetof(struct htab_elem, lru_node) +
offsetof(struct bpf_lru_node, ref),
1);
*insn++ = BPF_ALU64_IMM(BPF_ADD, ret,
offsetof(struct htab_elem, key) +
round_up(map->key_size, 8));
return insn - insn_buf;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Martin KaFai Lau | 182 | 100.00% | 2 | 100.00% |
Total | 182 | 100.00% | 2 | 100.00% |
/* It is called from the bpf_lru_list when the LRU needs to delete
* older elements from the htab.
*/
static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node)
{
struct bpf_htab *htab = (struct bpf_htab *)arg;
struct htab_elem *l = NULL, *tgt_l;
struct hlist_nulls_head *head;
struct hlist_nulls_node *n;
unsigned long flags;
struct bucket *b;
tgt_l = container_of(node, struct htab_elem, lru_node);
b = __select_bucket(htab, tgt_l->hash);
head = &b->head;
raw_spin_lock_irqsave(&b->lock, flags);
hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
if (l == tgt_l) {
hlist_nulls_del_rcu(&l->hash_node);
break;
}
raw_spin_unlock_irqrestore(&b->lock, flags);
return l == tgt_l;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Martin KaFai Lau | 109 | 78.99% | 1 | 33.33% |
Alexei Starovoitov | 29 | 21.01% | 2 | 66.67% |
Total | 138 | 100.00% | 3 | 100.00% |
/* Called from syscall */
static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
{
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
struct hlist_nulls_head *head;
struct htab_elem *l, *next_l;
u32 hash, key_size;
int i = 0;
WARN_ON_ONCE(!rcu_read_lock_held());
key_size = map->key_size;
if (!key)
goto find_first_elem;
hash = htab_map_hash(key, key_size);
head = select_bucket(htab, hash);
/* lookup the key */
l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets);
if (!l)
goto find_first_elem;
/* key was found, get next key in the same bucket */
next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_next_rcu(&l->hash_node)),
struct htab_elem, hash_node);
if (next_l) {
/* if next elem in this hash list is non-zero, just return it */
memcpy(next_key, next_l->key, key_size);
return 0;
}
/* no more elements in this hash list, go to the next bucket */
i = hash & (htab->n_buckets - 1)