cregit-Linux how code gets into the kernel

Release 4.18 net/core/xdp.c

Directory: net/core
/* net/core/xdp.c
 *
 * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
 * Released under terms in GPL version 2.  See COPYING.
 */
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/idr.h>
#include <linux/rhashtable.h>
#include <net/page_pool.h>

#include <net/xdp.h>


#define REG_STATE_NEW		0x0

#define REG_STATE_REGISTERED	0x1

#define REG_STATE_UNREGISTERED	0x2

#define REG_STATE_UNUSED	0x3

static DEFINE_IDA(mem_id_pool);
static DEFINE_MUTEX(mem_id_lock);

#define MEM_ID_MAX 0xFFFE

#define MEM_ID_MIN 1

static int mem_id_next = MEM_ID_MIN;


static bool mem_id_init; 
/* false */

static struct rhashtable *mem_id_ht;


struct xdp_mem_allocator {
	
struct xdp_mem_info mem;
	
union {
		
void *allocator;
		
struct page_pool *page_pool;
		
struct zero_copy_allocator *zc_alloc;
	};
	
struct rhash_head node;
	
struct rcu_head rcu;
};


static u32 xdp_mem_id_hashfn(const void *data, u32 len, u32 seed) { const u32 *k = data; const u32 key = *k; BUILD_BUG_ON(FIELD_SIZEOF(struct xdp_mem_allocator, mem.id) != sizeof(u32)); /* Use cyclic increasing ID as direct hash key, see rht_bucket_index */ return key << RHT_HASH_RESERVED_SPACE; }

Contributors

PersonTokensPropCommitsCommitProp
Jesper Dangaard Brouer55100.00%2100.00%
Total55100.00%2100.00%


static int xdp_mem_id_cmp(struct rhashtable_compare_arg *arg, const void *ptr) { const struct xdp_mem_allocator *xa = ptr; u32 mem_id = *(u32 *)arg->key; return xa->mem.id != mem_id; }

Contributors

PersonTokensPropCommitsCommitProp
Jesper Dangaard Brouer45100.00%2100.00%
Total45100.00%2100.00%

static const struct rhashtable_params mem_id_rht_params = { .nelem_hint = 64, .head_offset = offsetof(struct xdp_mem_allocator, node), .key_offset = offsetof(struct xdp_mem_allocator, mem.id), .key_len = FIELD_SIZEOF(struct xdp_mem_allocator, mem.id), .max_size = MEM_ID_MAX, .min_size = 8, .automatic_shrinking = true, .hashfn = xdp_mem_id_hashfn, .obj_cmpfn = xdp_mem_id_cmp, };
static void __xdp_mem_allocator_rcu_free(struct rcu_head *rcu) { struct xdp_mem_allocator *xa; xa = container_of(rcu, struct xdp_mem_allocator, rcu); /* Allow this ID to be reused */ ida_simple_remove(&mem_id_pool, xa->mem.id); /* Notice, driver is expected to free the *allocator, * e.g. page_pool, and MUST also use RCU free. */ /* Poison memory */ xa->mem.id = 0xFFFF; xa->mem.type = 0xF0F0; xa->allocator = (void *)0xDEAD9001; kfree(xa); }

Contributors

PersonTokensPropCommitsCommitProp
Jesper Dangaard Brouer74100.00%3100.00%
Total74100.00%3100.00%


static void __xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq) { struct xdp_mem_allocator *xa; int id = xdp_rxq->mem.id; int err; if (id == 0) return; mutex_lock(&mem_id_lock); xa = rhashtable_lookup(mem_id_ht, &id, mem_id_rht_params); if (!xa) { mutex_unlock(&mem_id_lock); return; } err = rhashtable_remove_fast(mem_id_ht, &xa->node, mem_id_rht_params); WARN_ON(err); call_rcu(&xa->rcu, __xdp_mem_allocator_rcu_free); mutex_unlock(&mem_id_lock); }

Contributors

PersonTokensPropCommitsCommitProp
Jesper Dangaard Brouer102100.00%2100.00%
Total102100.00%2100.00%


void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq) { /* Simplify driver cleanup code paths, allow unreg "unused" */ if (xdp_rxq->reg_state == REG_STATE_UNUSED) return; WARN(!(xdp_rxq->reg_state == REG_STATE_REGISTERED), "Driver BUG"); __xdp_rxq_info_unreg_mem_model(xdp_rxq); xdp_rxq->reg_state = REG_STATE_UNREGISTERED; xdp_rxq->dev = NULL; /* Reset mem info to defaults */ xdp_rxq->mem.id = 0; xdp_rxq->mem.type = 0; }

Contributors

PersonTokensPropCommitsCommitProp
Jesper Dangaard Brouer68100.00%1100.00%
Total68100.00%1100.00%

EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg);
static void xdp_rxq_info_init(struct xdp_rxq_info *xdp_rxq) { memset(xdp_rxq, 0, sizeof(*xdp_rxq)); }

Contributors

PersonTokensPropCommitsCommitProp
Jesper Dangaard Brouer24100.00%1100.00%
Total24100.00%1100.00%

/* Returns 0 on success, negative on failure */
int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq, struct net_device *dev, u32 queue_index) { if (xdp_rxq->reg_state == REG_STATE_UNUSED) { WARN(1, "Driver promised not to register this"); return -EINVAL; } if (xdp_rxq->reg_state == REG_STATE_REGISTERED) { WARN(1, "Missing unregister, handled but fix driver"); xdp_rxq_info_unreg(xdp_rxq); } if (!dev) { WARN(1, "Missing net_device from driver"); return -ENODEV; } /* State either UNREGISTERED or NEW */ xdp_rxq_info_init(xdp_rxq); xdp_rxq->dev = dev; xdp_rxq->queue_index = queue_index; xdp_rxq->reg_state = REG_STATE_REGISTERED; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Jesper Dangaard Brouer106100.00%1100.00%
Total106100.00%1100.00%

EXPORT_SYMBOL_GPL(xdp_rxq_info_reg);
void xdp_rxq_info_unused(struct xdp_rxq_info *xdp_rxq) { xdp_rxq->reg_state = REG_STATE_UNUSED; }

Contributors

PersonTokensPropCommitsCommitProp
Jesper Dangaard Brouer16100.00%1100.00%
Total16100.00%1100.00%

EXPORT_SYMBOL_GPL(xdp_rxq_info_unused);
bool xdp_rxq_info_is_reg(struct xdp_rxq_info *xdp_rxq) { return (xdp_rxq->reg_state == REG_STATE_REGISTERED); }

Contributors

PersonTokensPropCommitsCommitProp
Jesper Dangaard Brouer19100.00%1100.00%
Total19100.00%1100.00%

EXPORT_SYMBOL_GPL(xdp_rxq_info_is_reg);
static int __mem_id_init_hash_table(void) { struct rhashtable *rht; int ret; if (unlikely(mem_id_init)) return 0; rht = kzalloc(sizeof(*rht), GFP_KERNEL); if (!rht) return -ENOMEM; ret = rhashtable_init(rht, &mem_id_rht_params); if (ret < 0) { kfree(rht); return ret; } mem_id_ht = rht; smp_mb(); /* mutex lock should provide enough pairing */ mem_id_init = true; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Jesper Dangaard Brouer89100.00%1100.00%
Total89100.00%1100.00%

/* Allocate a cyclic ID that maps to allocator pointer. * See: https://www.kernel.org/doc/html/latest/core-api/idr.html * * Caller must lock mem_id_lock. */
static int __mem_id_cyclic_get(gfp_t gfp) { int retries = 1; int id; again: id = ida_simple_get(&mem_id_pool, mem_id_next, MEM_ID_MAX, gfp); if (id < 0) { if (id == -ENOSPC) { /* Cyclic allocator, reset next id */ if (retries--) { mem_id_next = MEM_ID_MIN; goto again; } } return id; /* errno */ } mem_id_next = id + 1; return id; }

Contributors

PersonTokensPropCommitsCommitProp
Jesper Dangaard Brouer78100.00%2100.00%
Total78100.00%2100.00%


static bool __is_supported_mem_type(enum xdp_mem_type type) { if (type == MEM_TYPE_PAGE_POOL) return is_page_pool_compiled_in(); if (type >= MEM_TYPE_MAX) return false; return true; }

Contributors

PersonTokensPropCommitsCommitProp
Jesper Dangaard Brouer32100.00%1100.00%
Total32100.00%1100.00%


int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq, enum xdp_mem_type type, void *allocator) { struct xdp_mem_allocator *xdp_alloc; gfp_t gfp = GFP_KERNEL; int id, errno, ret; void *ptr; if (xdp_rxq->reg_state != REG_STATE_REGISTERED) { WARN(1, "Missing register, driver bug"); return -EFAULT; } if (!__is_supported_mem_type(type)) return -EOPNOTSUPP; xdp_rxq->mem.type = type; if (!allocator) { if (type == MEM_TYPE_PAGE_POOL || type == MEM_TYPE_ZERO_COPY) return -EINVAL; /* Setup time check page_pool req */ return 0; } /* Delay init of rhashtable to save memory if feature isn't used */ if (!mem_id_init) { mutex_lock(&mem_id_lock); ret = __mem_id_init_hash_table(); mutex_unlock(&mem_id_lock); if (ret < 0) { WARN_ON(1); return ret; } } xdp_alloc = kzalloc(sizeof(*xdp_alloc), gfp); if (!xdp_alloc) return -ENOMEM; mutex_lock(&mem_id_lock); id = __mem_id_cyclic_get(gfp); if (id < 0) { errno = id; goto err; } xdp_rxq->mem.id = id; xdp_alloc->mem = xdp_rxq->mem; xdp_alloc->allocator = allocator; /* Insert allocator into ID lookup table */ ptr = rhashtable_insert_slow(mem_id_ht, &id, &xdp_alloc->node); if (IS_ERR(ptr)) { errno = PTR_ERR(ptr); goto err; } mutex_unlock(&mem_id_lock); return 0; err: mutex_unlock(&mem_id_lock); kfree(xdp_alloc); return errno; }

Contributors

PersonTokensPropCommitsCommitProp
Jesper Dangaard Brouer27498.56%480.00%
Björn Töpel41.44%120.00%
Total278100.00%5100.00%

EXPORT_SYMBOL_GPL(xdp_rxq_info_reg_mem_model); /* XDP RX runs under NAPI protection, and in different delivery error * scenarios (e.g. queue full), it is possible to return the xdp_frame * while still leveraging this protection. The @napi_direct boolian * is used for those calls sites. Thus, allowing for faster recycling * of xdp_frames/pages in those cases. */
static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct, unsigned long handle) { struct xdp_mem_allocator *xa; struct page *page; switch (mem->type) { case MEM_TYPE_PAGE_POOL: rcu_read_lock(); /* mem->id is valid, checked in xdp_rxq_info_reg_mem_model() */ xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params); page = virt_to_head_page(data); if (xa) page_pool_put_page(xa->page_pool, page, napi_direct); else put_page(page); rcu_read_unlock(); break; case MEM_TYPE_PAGE_SHARED: page_frag_free(data); break; case MEM_TYPE_PAGE_ORDER0: page = virt_to_page(data); /* Assumes order0 page*/ put_page(page); break; case MEM_TYPE_ZERO_COPY: /* NB! Only valid from an xdp_buff! */ rcu_read_lock(); /* mem->id is valid, checked in xdp_rxq_info_reg_mem_model() */ xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params); if (!WARN_ON_ONCE(!xa)) xa->zc_alloc->free(xa->zc_alloc, handle); rcu_read_unlock(); default: /* Not possible, checked in xdp_rxq_info_reg_mem_model() */ break; } }

Contributors

PersonTokensPropCommitsCommitProp
Jesper Dangaard Brouer11266.27%457.14%
Björn Töpel4828.40%228.57%
Taehee Yoo95.33%114.29%
Total169100.00%7100.00%


void xdp_return_frame(struct xdp_frame *xdpf) { __xdp_return(xdpf->data, &xdpf->mem, false, 0); }

Contributors

PersonTokensPropCommitsCommitProp
Björn Töpel2388.46%266.67%
Jesper Dangaard Brouer311.54%133.33%
Total26100.00%3100.00%

EXPORT_SYMBOL_GPL(xdp_return_frame);
void xdp_return_frame_rx_napi(struct xdp_frame *xdpf) { __xdp_return(xdpf->data, &xdpf->mem, true, 0); }

Contributors

PersonTokensPropCommitsCommitProp
Jesper Dangaard Brouer2492.31%150.00%
Björn Töpel27.69%150.00%
Total26100.00%2100.00%

EXPORT_SYMBOL_GPL(xdp_return_frame_rx_napi);
void xdp_return_buff(struct xdp_buff *xdp) { __xdp_return(xdp->data, &xdp->rxq->mem, true, xdp->handle); }

Contributors

PersonTokensPropCommitsCommitProp
Björn Töpel2790.00%266.67%
Jesper Dangaard Brouer310.00%133.33%
Total30100.00%3100.00%

EXPORT_SYMBOL_GPL(xdp_return_buff);

Overall Contributors

PersonTokensPropCommitsCommitProp
Jesper Dangaard Brouer134291.60%666.67%
Björn Töpel1147.78%222.22%
Taehee Yoo90.61%111.11%
Total1465100.00%9100.00%
Directory: net/core
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.