cregit-Linux how code gets into the kernel

Release 4.8 net/core/skbuff.c

Directory: net/core
/*
 *      Routines having to do with the 'struct sk_buff' memory handlers.
 *
 *      Authors:        Alan Cox <alan@lxorguk.ukuu.org.uk>
 *                      Florian La Roche <rzsfl@rz.uni-sb.de>
 *
 *      Fixes:
 *              Alan Cox        :       Fixed the worst of the load
 *                                      balancer bugs.
 *              Dave Platt      :       Interrupt stacking fix.
 *      Richard Kooijman        :       Timestamp fixes.
 *              Alan Cox        :       Changed buffer format.
 *              Alan Cox        :       destructor hook for AF_UNIX etc.
 *              Linus Torvalds  :       Better skb_clone.
 *              Alan Cox        :       Added skb_copy.
 *              Alan Cox        :       Added all the changed routines Linus
 *                                      only put in the headers
 *              Ray VanTassle   :       Fixed --skb->lock in free
 *              Alan Cox        :       skb_copy copy arp field
 *              Andi Kleen      :       slabified it.
 *              Robert Olsson   :       Removed skb_head_pool
 *
 *      NOTE:
 *              The __skb_ routines should be called with interrupts
 *      disabled, or you better be *real* sure that the operation is atomic
 *      with respect to whatever list is being frobbed (e.g. via lock_sock()
 *      or via disabling bottom half handlers, etc).
 *
 *      This program is free software; you can redistribute it and/or
 *      modify it under the terms of the GNU General Public License
 *      as published by the Free Software Foundation; either version
 *      2 of the License, or (at your option) any later version.
 */

/*
 *      The functions in this file will not compile correctly with gcc 2.4.x
 */


#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/kmemcheck.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/in.h>
#include <linux/inet.h>
#include <linux/slab.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/sctp.h>
#include <linux/netdevice.h>
#ifdef CONFIG_NET_CLS_ACT
#include <net/pkt_sched.h>
#endif
#include <linux/string.h>
#include <linux/skbuff.h>
#include <linux/splice.h>
#include <linux/cache.h>
#include <linux/rtnetlink.h>
#include <linux/init.h>
#include <linux/scatterlist.h>
#include <linux/errqueue.h>
#include <linux/prefetch.h>
#include <linux/if_vlan.h>

#include <net/protocol.h>
#include <net/dst.h>
#include <net/sock.h>
#include <net/checksum.h>
#include <net/ip6_checksum.h>
#include <net/xfrm.h>

#include <asm/uaccess.h>
#include <trace/events/skb.h>
#include <linux/highmem.h>
#include <linux/capability.h>
#include <linux/user_namespace.h>


struct kmem_cache *skbuff_head_cache __read_mostly;

static struct kmem_cache *skbuff_fclone_cache __read_mostly;

int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS;

EXPORT_SYMBOL(sysctl_max_skb_frags);

/**
 *      skb_panic - private function for out-of-line support
 *      @skb:   buffer
 *      @sz:    size
 *      @addr:  address
 *      @msg:   skb_over_panic or skb_under_panic
 *
 *      Out-of-line support for skb_put() and skb_push().
 *      Called via the wrapper skb_over_panic() or skb_under_panic().
 *      Keep out of line to prevent kernel bloat.
 *      __builtin_return_address is not used because it is not always reliable.
 */

static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr, const char msg[]) { pr_emerg("%s: text:%p len:%d put:%d head:%p data:%p tail:%#lx end:%#lx dev:%s\n", msg, addr, skb->len, sz, skb->head, skb->data, (unsigned long)skb->tail, (unsigned long)skb->end, skb->dev ? skb->dev->name : "<NULL>"); BUG(); }

Contributors

PersonTokensPropCommitsCommitProp
pre-gitpre-git4051.28%330.00%
patrick mchardypatrick mchardy1620.51%110.00%
arnaldo carvalho de meloarnaldo carvalho de melo810.26%220.00%
jean sacrenjean sacren810.26%110.00%
joe perchesjoe perches33.85%110.00%
james hoganjames hogan22.56%110.00%
rami rosenrami rosen11.28%110.00%
Total78100.00%10100.00%


static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr) { skb_panic(skb, sz, addr, __func__); }

Contributors

PersonTokensPropCommitsCommitProp
pre-gitpre-git1963.33%125.00%
jean sacrenjean sacren930.00%125.00%
patrick mchardypatrick mchardy13.33%125.00%
rami rosenrami rosen13.33%125.00%
Total30100.00%4100.00%


static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr) { skb_panic(skb, sz, addr, __func__); }

Contributors

PersonTokensPropCommitsCommitProp
jean sacrenjean sacren2273.33%125.00%
patrick mchardypatrick mchardy413.33%125.00%
pre-gitpre-git310.00%125.00%
arnaldo carvalho de meloarnaldo carvalho de melo13.33%125.00%
Total30100.00%4100.00%

/* * kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells * the caller if emergency pfmemalloc reserves are being used. If it is and * the socket is later found to be SOCK_MEMALLOC then PFMEMALLOC reserves * may be used. Otherwise, the packet data may be discarded until enough * memory is free */ #define kmalloc_reserve(size, gfp, node, pfmemalloc) \ __kmalloc_reserve(size, gfp, node, _RET_IP_, pfmemalloc)
static void *__kmalloc_reserve(size_t size, gfp_t flags, int node, unsigned long ip, bool *pfmemalloc) { void *obj; bool ret_pfmemalloc = false; /* * Try a regular allocation, when that fails and we're not entitled * to the reserves, fail. */ obj = kmalloc_node_track_caller(size, flags | __GFP_NOMEMALLOC | __GFP_NOWARN, node); if (obj || !(gfp_pfmemalloc_allowed(flags))) goto out; /* Try again but now we are using pfmemalloc reserves */ ret_pfmemalloc = true; obj = kmalloc_node_track_caller(size, flags, node); out: if (pfmemalloc) *pfmemalloc = ret_pfmemalloc; return obj; }

Contributors

PersonTokensPropCommitsCommitProp
mel gormanmel gorman9398.94%150.00%
stephen hemmingerstephen hemminger11.06%150.00%
Total94100.00%2100.00%

/* Allocate a new skbuff. We do this ourselves so we can fill in a few * 'private' fields and also do memory statistics to find all the * [BEEP] leaks. * */
struct sk_buff *__alloc_skb_head(gfp_t gfp_mask, int node) { struct sk_buff *skb; /* Get the HEAD */ skb = kmem_cache_alloc_node(skbuff_head_cache, gfp_mask & ~__GFP_DMA, node); if (!skb) goto out; /* * Only clear those fields we need to clear, not those that we will * actually initialise below. Hence, don't put any more fields after * the tail pointer in struct sk_buff! */ memset(skb, 0, offsetof(struct sk_buff, tail)); skb->head = NULL; skb->truesize = sizeof(struct sk_buff); atomic_set(&skb->users, 1); skb->mac_header = (typeof(skb->mac_header))~0U; out: return skb; }

Contributors

PersonTokensPropCommitsCommitProp
patrick mchardypatrick mchardy9491.26%125.00%
americo wangamerico wang65.83%125.00%
david s. millerdavid s. miller21.94%125.00%
pablo neira ayusopablo neira ayuso10.97%125.00%
Total103100.00%4100.00%

/** * __alloc_skb - allocate a network buffer * @size: size to allocate * @gfp_mask: allocation mask * @flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache * instead of head cache and allocate a cloned (child) skb. * If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for * allocations in case the data is required for writeback * @node: numa node to allocate memory on * * Allocate a new &sk_buff. The returned buffer has no headroom and a * tail room of at least size bytes. The object has a reference count * of one. The return is the buffer. On a failure the return is %NULL. * * Buffers may only be allocated from interrupts using a @gfp_mask of * %GFP_ATOMIC. */
struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, int flags, int node) { struct kmem_cache *cache; struct skb_shared_info *shinfo; struct sk_buff *skb; u8 *data; bool pfmemalloc; cache = (flags & SKB_ALLOC_FCLONE) ? skbuff_fclone_cache : skbuff_head_cache; if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX)) gfp_mask |= __GFP_MEMALLOC; /* Get the HEAD */ skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node); if (!skb) goto out; prefetchw(skb); /* We do our best to align skb_shared_info on a separate cache * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives * aligned memory blocks, unless SLUB/SLAB debug is enabled. * Both skb->head and skb_shared_info are cache line aligned. */ size = SKB_DATA_ALIGN(size); size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); data = kmalloc_reserve(size, gfp_mask, node, &pfmemalloc); if (!data) goto nodata; /* kmalloc(size) might give us more room than requested. * Put skb_shared_info exactly at the end of allocated zone, * to allow max possible filling before reallocation. */ size = SKB_WITH_OVERHEAD(ksize(data)); prefetchw(data + size); /* * Only clear those fields we need to clear, not those that we will * actually initialise below. Hence, don't put any more fields after * the tail pointer in struct sk_buff! */ memset(skb, 0, offsetof(struct sk_buff, tail)); /* Account for allocated memory : skb + skb->head */ skb->truesize = SKB_TRUESIZE(size); skb->pfmemalloc = pfmemalloc; atomic_set(&skb->users, 1); skb->head = data; skb->data = data; skb_reset_tail_pointer(skb); skb->end = skb->tail + size; skb->mac_header = (typeof(skb->mac_header))~0U; skb->transport_header = (typeof(skb->transport_header))~0U; /* make sure we initialize shinfo sequentially */ shinfo = skb_shinfo(skb); memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); atomic_set(&shinfo->dataref, 1); kmemcheck_annotate_variable(shinfo->destructor_arg); if (flags & SKB_ALLOC_FCLONE) { struct sk_buff_fclones *fclones; fclones = container_of(skb, struct sk_buff_fclones, skb1); kmemcheck_annotate_bitfield(&fclones->skb2, flags1); skb->fclone = SKB_FCLONE_ORIG; atomic_set(&fclones->fclone_ref, 1); fclones->skb2.fclone = SKB_FCLONE_CLONE; fclones->skb2.pfmemalloc = pfmemalloc; } out: return skb; nodata: kmem_cache_free(cache, skb); skb = NULL; goto out; }

Contributors

PersonTokensPropCommitsCommitProp
eric dumazeteric dumazet8622.45%613.64%
pre-gitpre-git7018.28%1431.82%
david s. millerdavid s. miller4712.27%49.09%
mel gormanmel gorman4210.97%12.27%
ian prattian pratt215.48%12.27%
benjamin lahaisebenjamin lahaise205.22%12.27%
mala anandmala anand153.92%12.27%
americo wangamerico wang123.13%12.27%
herbert xuherbert xu112.87%12.27%
linus torvaldslinus torvalds102.61%24.55%
arnaldo carvalho de meloarnaldo carvalho de melo102.61%49.09%
christoph hellwigchristoph hellwig82.09%12.27%
tony lindgrentony lindgren71.83%12.27%
stephen hemmingerstephen hemminger71.83%12.27%
patrick ohlypatrick ohly71.83%12.27%
vegard nossumvegard nossum61.57%12.27%
christoph lameterchristoph lameter20.52%12.27%
al viroal viro10.26%12.27%
johannes bergjohannes berg10.26%12.27%
Total383100.00%44100.00%

EXPORT_SYMBOL(__alloc_skb); /** * __build_skb - build a network buffer * @data: data buffer provided by caller * @frag_size: size of data, or 0 if head was kmalloced * * Allocate a new &sk_buff. Caller provides space holding head and * skb_shared_info. @data must have been allocated by kmalloc() only if * @frag_size is 0, otherwise data should come from the page allocator * or vmalloc() * The return is the new skb buffer. * On a failure the return is %NULL, and @data is not freed. * Notes : * Before IO, driver allocates only data buffer where NIC put incoming frame * Driver should add room at head (NET_SKB_PAD) and * MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info)) * After IO, driver calls build_skb(), to allocate sk_buff and populate it * before giving packet to stack. * RX rings only contains data buffers, not full skbs. */
struct sk_buff *__build_skb(void *data, unsigned int frag_size) { struct skb_shared_info *shinfo; struct sk_buff *skb; unsigned int size = frag_size ? : ksize(data); skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC); if (!skb) return NULL; size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); memset(skb, 0, offsetof(struct sk_buff, tail)); skb->truesize = SKB_TRUESIZE(size); atomic_set(&skb->users, 1); skb->head = data; skb->data = data; skb_reset_tail_pointer(skb); skb->end = skb->tail + size; skb->mac_header = (typeof(skb->mac_header))~0U; skb->transport_header = (typeof(skb->transport_header))~0U; /* make sure we initialize shinfo sequentially */ shinfo = skb_shinfo(skb); memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); atomic_set(&shinfo->dataref, 1); kmemcheck_annotate_variable(shinfo->destructor_arg); return skb; }

Contributors

PersonTokensPropCommitsCommitProp
eric dumazeteric dumazet18391.96%466.67%
americo wangamerico wang126.03%116.67%
david s. millerdavid s. miller42.01%116.67%
Total199100.00%6100.00%

/* build_skb() is wrapper over __build_skb(), that specifically * takes care of skb->head and skb->pfmemalloc * This means that if @frag_size is not zero, then @data must be backed * by a page fragment, not kmalloc() or vmalloc() */
struct sk_buff *build_skb(void *data, unsigned int frag_size) { struct sk_buff *skb = __build_skb(data, frag_size); if (skb && frag_size) { skb->head_frag = 1; if (page_is_pfmemalloc(virt_to_head_page(data))) skb->pfmemalloc = 1; } return skb; }

Contributors

PersonTokensPropCommitsCommitProp
eric dumazeteric dumazet5795.00%150.00%
michal hockomichal hocko35.00%150.00%
Total60100.00%2100.00%

EXPORT_SYMBOL(build_skb); #define NAPI_SKB_CACHE_SIZE 64 struct napi_alloc_cache { struct page_frag_cache page; size_t skb_count; void *skb_cache[NAPI_SKB_CACHE_SIZE]; }; static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache); static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache);
static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) { struct page_frag_cache *nc; unsigned long flags; void *data; local_irq_save(flags); nc = this_cpu_ptr(&netdev_alloc_cache); data = __alloc_page_frag(nc, fragsz, gfp_mask); local_irq_restore(flags); return data; }

Contributors

PersonTokensPropCommitsCommitProp
alexander duyckalexander duyck4779.66%360.00%
eric dumazeteric dumazet1220.34%240.00%
Total59100.00%5100.00%

/** * netdev_alloc_frag - allocate a page fragment * @fragsz: fragment size * * Allocates a frag from a page for receive buffer. * Uses GFP_ATOMIC allocations. */
void *netdev_alloc_frag(unsigned int fragsz) { return __netdev_alloc_frag(fragsz, GFP_ATOMIC | __GFP_COLD); }

Contributors

PersonTokensPropCommitsCommitProp
mel gormanmel gorman20100.00%1100.00%
Total20100.00%1100.00%

EXPORT_SYMBOL(netdev_alloc_frag);
static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) { struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); return __alloc_page_frag(&nc->page, fragsz, gfp_mask); }

Contributors

PersonTokensPropCommitsCommitProp
alexander duyckalexander duyck3489.47%266.67%
jesper dangaard brouerjesper dangaard brouer410.53%133.33%
Total38100.00%3100.00%


void *napi_alloc_frag(unsigned int fragsz) { return __napi_alloc_frag(fragsz, GFP_ATOMIC | __GFP_COLD); }

Contributors

PersonTokensPropCommitsCommitProp
alexander duyckalexander duyck20100.00%1100.00%
Total20100.00%1100.00%

EXPORT_SYMBOL(napi_alloc_frag); /** * __netdev_alloc_skb - allocate an skbuff for rx on a specific device * @dev: network device to receive on * @len: length to allocate * @gfp_mask: get_free_pages mask, passed to alloc_skb * * Allocate a new &sk_buff and assign it a usage count of one. The * buffer has NET_SKB_PAD headroom built in. Users should allocate * the headroom they think they need without accounting for the * built in space. The built in space is used for optimisations. * * %NULL is returned if there is no free memory. */
struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len, gfp_t gfp_mask) { struct page_frag_cache *nc; unsigned long flags; struct sk_buff *skb; bool pfmemalloc; void *data; len += NET_SKB_PAD; if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) || (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) { skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE); if (!skb) goto skb_fail; goto skb_success; } len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); len = SKB_DATA_ALIGN(len); if (sk_memalloc_socks()) gfp_mask |= __GFP_MEMALLOC; local_irq_save(flags); nc = this_cpu_ptr(&netdev_alloc_cache); data = __alloc_page_frag(nc, len, gfp_mask); pfmemalloc = nc->pfmemalloc; local_irq_restore(flags); if (unlikely(!data)) return NULL; skb = __build_skb(data, len); if (unlikely(!skb)) { skb_free_frag(data); return NULL; } /* use OR instead of assignment to avoid clearing of bits in mask */ if (pfmemalloc) skb->pfmemalloc = 1; skb->head_frag = 1; skb_success: skb_reserve(skb, NET_SKB_PAD); skb->dev = dev; skb_fail: return skb; }

Contributors

PersonTokensPropCommitsCommitProp
alexander duyckalexander duyck14463.16%541.67%
eric dumazeteric dumazet5624.56%325.00%
christoph hellwigchristoph hellwig177.46%216.67%
mel gormanmel gorman114.82%216.67%
Total228100.00%12100.00%

EXPORT_SYMBOL(__netdev_alloc_skb); /** * __napi_alloc_skb - allocate skbuff for rx in a specific NAPI instance * @napi: napi instance this buffer was allocated for * @len: length to allocate * @gfp_mask: get_free_pages mask, passed to alloc_skb and alloc_pages * * Allocate a new sk_buff for use in NAPI receive. This buffer will * attempt to allocate the head from a special reserved region used * only for NAPI Rx allocation. By doing this we can save several * CPU cycles by avoiding having to disable and re-enable IRQs. * * %NULL is returned if there is no free memory. */
struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len, gfp_t gfp_mask) { struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); struct sk_buff *skb; void *data; len += NET_SKB_PAD + NET_IP_ALIGN; if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) || (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) { skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE); if (!skb) goto skb_fail; goto skb_success; } len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); len = SKB_DATA_ALIGN(len); if (sk_memalloc_socks()) gfp_mask |= __GFP_MEMALLOC; data = __alloc_page_frag(&nc->page, len, gfp_mask); if (unlikely(!data)) return NULL; skb = __build_skb(data, len); if (unlikely(!skb)) { skb_free_frag(data); return NULL; } /* use OR instead of assignment to avoid clearing of bits in mask */ if (nc->page.pfmemalloc) skb->pfmemalloc = 1; skb->head_frag = 1; skb_success: skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); skb->dev = napi->dev; skb_fail: return skb; }

Contributors

PersonTokensPropCommitsCommitProp
alexander duyckalexander duyck20996.76%466.67%
jesper dangaard brouerjesper dangaard brouer62.78%116.67%
mel gormanmel gorman10.46%116.67%
Total216100.00%6100.00%

EXPORT_SYMBOL(__napi_alloc_skb);
void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, int size, unsigned int truesize) { skb_fill_page_desc(skb, i, page, off, size); skb->len += size; skb->data_len += size; skb->truesize += truesize; }

Contributors

PersonTokensPropCommitsCommitProp
peter zijlstrapeter zijlstra5491.53%150.00%
eric dumazeteric dumazet58.47%150.00%
Total59100.00%2100.00%

EXPORT_SYMBOL(skb_add_rx_frag);
void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size, unsigned int truesize) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; skb_frag_size_add(frag, size); skb->len += size; skb->data_len += size; skb->truesize += truesize; }

Contributors

PersonTokensPropCommitsCommitProp
jason wangjason wang60100.00%1100.00%
Total60100.00%1100.00%

EXPORT_SYMBOL(skb_coalesce_rx_frag);
static void skb_drop_list(struct sk_buff **listp) { kfree_skb_list(*listp); *listp = NULL; }

Contributors

PersonTokensPropCommitsCommitProp
linus torvaldslinus torvalds1147.83%125.00%
herbert xuherbert xu730.43%125.00%
eric dumazeteric dumazet417.39%125.00%
pre-gitpre-git14.35%125.00%
Total23100.00%4100.00%


static inline void skb_drop_fraglist(struct sk_buff *skb) { skb_drop_list(&skb_shinfo(skb)->frag_list); }

Contributors

PersonTokensPropCommitsCommitProp
herbert xuherbert xu23100.00%1100.00%
Total23100.00%1100.00%


static void skb_clone_fraglist(struct sk_buff *skb) { struct sk_buff *list; skb_walk_frags(skb, list) skb_get(list); }

Contributors

PersonTokensPropCommitsCommitProp
linus torvaldslinus torvalds2385.19%150.00%
david s. millerdavid s. miller414.81%150.00%
Total27100.00%2100.00%


static void skb_free_head(struct sk_buff *skb) { unsigned char *head = skb->head; if (skb->head_frag) skb_free_frag(head); else kfree(head); }

Contributors

PersonTokensPropCommitsCommitProp
eric dumazeteric dumazet2772.97%150.00%
alexander duyckalexander duyck1027.03%150.00%
Total37100.00%2100.00%


static void skb_release_data(struct sk_buff *skb) { struct skb_shared_info *shinfo = skb_shinfo(skb); int i; if (skb->cloned && atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1, &shinfo->dataref)) return; for (i = 0; i < shinfo->nr_frags; i++) __skb_frag_unref(&shinfo->frags[i]); /* * If skb buf is from userspace, we need to notify the caller * the lower device DMA has done; */ if (shinfo->tx_flags & SKBTX_DEV_ZEROCOPY) { struct ubuf_info *uarg; uarg = shinfo->destructor_arg; if (uarg->callback) uarg->callback(uarg, true); } if (shinfo->frag_list) kfree_skb_list(shinfo->frag_list); skb_free_head(skb); }

Contributors

PersonTokensPropCommitsCommitProp
eric dumazeteric dumazet3526.12%220.00%
shirley mashirley ma3223.88%110.00%
linus torvaldslinus torvalds3022.39%110.00%
pre-gitpre-git1914.18%330.00%
herbert xuherbert xu1511.19%110.00%
michael s. tsirkinmichael s. tsirkin21.49%110.00%
adrian bunkadrian bunk10.75%110.00%
Total134100.00%10100.00%

/* * Free an skbuff by memory without cleaning the state. */
static void kfree_skbmem(struct sk_buff *skb) { struct sk_buff_fclones *fclones; switch (skb->fclone) { case SKB_FCLONE_UNAVAILABLE: kmem_cache_free(skbuff_head_cache, skb); return; case SKB_FCLONE_ORIG: fclones = container_of(skb, struct sk_buff_fclones, skb1); /* We usually free the clone (TX completion) before original skb * This test would have no chance to be true for the clone, * while here, branch prediction will be good. */ if (atomic_read(&fclones->fclone_ref) == 1) goto fastpath; break; default: /* SKB_FCLONE_CLONE */ fclones = container_of(skb, struct sk_buff_fclones, skb2); break; } if (!atomic_dec_and_test(&fclones->fclone_ref)) return; fastpath: kmem_cache_free(skbuff_fclone_cache, fclones); }

Contributors

PersonTokensPropCommitsCommitProp
eric dumazeteric dumazet4341.75%225.00%
david s. millerdavid s. miller4240.78%112.50%
linus torvaldslinus torvalds98.74%112.50%
pre-gitpre-git54.85%225.00%
robert olssonrobert olsson32.91%112.50%
herbert xuherbert xu10.97%112.50%
Total103100.00%8100.00%


static void skb_release_head_state(struct sk_buff *skb) { skb_dst_drop(skb); #ifdef CONFIG_XFRM secpath_put(skb->sp); #endif if (skb->destructor) { WARN_ON(in_irq()); skb->destructor(skb); } #if IS_ENABLED(CONFIG_NF_CONNTRACK) nf_conntrack_put(skb->nfct); #endif #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) nf_bridge_put(skb->nf_bridge); #endif }

Contributors

PersonTokensPropCommitsCommitProp
pre-gitpre-git3139.24%733.33%
yasuyuki kozakaiyasuyuki kozakai1215.19%29.52%
bart de schuymerbart de schuymer1012.66%29.52%
alexey kuznetsovalexey kuznetsov78.86%14.76%
pablo neira ayusopablo neira ayuso56.33%14.76%
tomas szepetomas szepe45.06%14.76%
kovacs krisztiankovacs krisztian22.53%14.76%
stephen hemmingerstephen hemminger22.53%14.76%
lennert buytenheklennert buytenhek22.53%14.76%
andi kleenandi kleen11.27%14.76%
herbert xuherbert xu11.27%14.76%
eric dumazeteric dumazet11.27%14.76%
igor maravicigor maravic11.27%14.76%
Total79100.00%21100.00%

/* Free everything but the sk_buff shell. */
static void skb_release_all(struct sk_buff *skb) { skb_release_head_state(skb); if (likely(skb->head)) skb_release_data(skb); }

Contributors

PersonTokensPropCommitsCommitProp
lennert buytenheklennert buytenhek1550.00%125.00%
patrick mchardypatrick mchardy826.67%125.00%
herbert xuherbert xu620.00%125.00%
pablo neira ayusopablo neira ayuso13.33%125.00%
Total30100.00%4100.00%

/** * __kfree_skb - private function * @skb: buffer * * Free an sk_buff. Release anything attached to the buffer. * Clean the state. This is an internal helper function. Users should * always call kfree_skb */
void __kfree_skb(struct sk_buff *skb) { skb_release_all(skb); kfree_skbmem(skb); }

Contributors

PersonTokensPropCommitsCommitProp
herbert xuherbert xu1470.00%125.00%
pre-gitpre-git630.00%375.00%
Total20100.00%4100.00%

EXPORT_SYMBOL(__kfree_skb); /** * kfree_skb - free an sk_buff * @skb: buffer to free * * Drop a reference to the buffer and free it if the usage count has * hit zero. */
void kfree_skb(struct sk_buff *skb) { if (unlikely(!skb)) return; if (likely(atomic_read(&skb->users) == 1)) smp_rmb(); else if (likely(!atomic_dec_and_test(&skb->users))) return; trace_kfree_skb(skb, __builtin_return_address(0)); __kfree_skb(skb); }

Contributors

PersonTokensPropCommitsCommitProp
joern engeljoern engel5885.29%150.00%
neil hormanneil horman1014.71%150.00%
Total68100.00%2100.00%

EXPORT_SYMBOL(kfree_skb);
void kfree_skb_list(struct sk_buff *segs) { while (segs) { struct sk_buff *next = segs->next; kfree_skb(segs); segs = next; } }

Contributors

PersonTokensPropCommitsCommitProp
eric dumazeteric dumazet34100.00%1100.00%
Total34100.00%1100.00%

EXPORT_SYMBOL(kfree_skb_list); /** * skb_tx_error - report an sk_buff xmit error * @skb: buffer that triggered an error * * Report xmit error if a device callback is tracking this skb. * skb must be freed afterwards. */
void skb_tx_error(struct sk_buff *skb) { if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { struct ubuf_info *uarg; uarg = skb_shinfo(skb)->destructor_arg; if (uarg->callback) uarg->callback(uarg, false); skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; } }

Contributors

PersonTokensPropCommitsCommitProp
michael s. tsirkinmichael s. tsirkin62100.00%1100.00%
Total62100.00%1100.00%

EXPORT_SYMBOL(skb_tx_error); /** * consume_skb - free an skbuff * @skb: buffer to free * * Drop a ref to the buffer and free it if the usage count has hit zero * Functions identically to kfree_skb, but kfree_skb assumes that the frame * is being dropped after a failure and notes that */
void consume_skb(struct sk_buff *skb) { if (unlikely(!skb)) return; if (likely(atomic_read(&skb->users) == 1)) smp_rmb(); else if (likely(!atomic_dec_and_test(&skb->users))) return; trace_consume_skb(skb); __kfree_skb(skb); }

Contributors

PersonTokensPropCommitsCommitProp
neil hormanneil horman5892.06%150.00%
koki sanagikoki sanagi57.94%150.00%
Total63100.00%2100.00%

EXPORT_SYMBOL(consume_skb);
void __kfree_skb_flush(void) { struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); /* flush skb_cache if containing objects */ if (nc->skb_count) { kmem_cache_free_bulk(skbuff_head_cache, nc->skb_count, nc->skb_cache); nc->skb_count = 0; } }

Contributors

PersonTokensPropCommitsCommitProp
jesper dangaard brouerjesper dangaard brouer46100.00%1100.00%
Total46100.00%1100.00%


static inline void _kfree_skb_defer(struct sk_buff *skb) { struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); /* drop skb->head and call any destructors for packet */ skb_release_all(skb); /* record skb to CPU local list */ nc->skb_cache[nc->skb_count++] = skb; #ifdef CONFIG_SLUB /* SLUB writes into objects when freeing */ prefetchw(skb); #endif /* flush skb_cache if it is filled */ if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) { kmem_cache_free_bulk(skbuff_head_cache, NAPI_SKB_CACHE_SIZE, nc->skb_cache); nc->skb_count = 0; } }

Contributors

PersonTokensPropCommitsCommitProp
jesper dangaard brouerjesper dangaard brouer84100.00%2100.00%
Total84100.00%2100.00%


void __kfree_skb_defer(struct sk_buff *skb) { _kfree_skb_defer(skb); }

Contributors

PersonTokensPropCommitsCommitProp
jesper dangaard brouerjesper dangaard brouer15100.00%1100.00%
Total15100.00%1100.00%


void napi_consume_skb(struct sk_buff *skb, int budget) { if (unlikely