cregit-Linux how code gets into the kernel

Release 4.11 net/core/skbuff.c

Directory: net/core
/*
 *      Routines having to do with the 'struct sk_buff' memory handlers.
 *
 *      Authors:        Alan Cox <alan@lxorguk.ukuu.org.uk>
 *                      Florian La Roche <rzsfl@rz.uni-sb.de>
 *
 *      Fixes:
 *              Alan Cox        :       Fixed the worst of the load
 *                                      balancer bugs.
 *              Dave Platt      :       Interrupt stacking fix.
 *      Richard Kooijman        :       Timestamp fixes.
 *              Alan Cox        :       Changed buffer format.
 *              Alan Cox        :       destructor hook for AF_UNIX etc.
 *              Linus Torvalds  :       Better skb_clone.
 *              Alan Cox        :       Added skb_copy.
 *              Alan Cox        :       Added all the changed routines Linus
 *                                      only put in the headers
 *              Ray VanTassle   :       Fixed --skb->lock in free
 *              Alan Cox        :       skb_copy copy arp field
 *              Andi Kleen      :       slabified it.
 *              Robert Olsson   :       Removed skb_head_pool
 *
 *      NOTE:
 *              The __skb_ routines should be called with interrupts
 *      disabled, or you better be *real* sure that the operation is atomic
 *      with respect to whatever list is being frobbed (e.g. via lock_sock()
 *      or via disabling bottom half handlers, etc).
 *
 *      This program is free software; you can redistribute it and/or
 *      modify it under the terms of the GNU General Public License
 *      as published by the Free Software Foundation; either version
 *      2 of the License, or (at your option) any later version.
 */

/*
 *      The functions in this file will not compile correctly with gcc 2.4.x
 */


#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/kmemcheck.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/in.h>
#include <linux/inet.h>
#include <linux/slab.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/sctp.h>
#include <linux/netdevice.h>
#ifdef CONFIG_NET_CLS_ACT
#include <net/pkt_sched.h>
#endif
#include <linux/string.h>
#include <linux/skbuff.h>
#include <linux/splice.h>
#include <linux/cache.h>
#include <linux/rtnetlink.h>
#include <linux/init.h>
#include <linux/scatterlist.h>
#include <linux/errqueue.h>
#include <linux/prefetch.h>
#include <linux/if_vlan.h>

#include <net/protocol.h>
#include <net/dst.h>
#include <net/sock.h>
#include <net/checksum.h>
#include <net/ip6_checksum.h>
#include <net/xfrm.h>

#include <linux/uaccess.h>
#include <trace/events/skb.h>
#include <linux/highmem.h>
#include <linux/capability.h>
#include <linux/user_namespace.h>


struct kmem_cache *skbuff_head_cache __read_mostly;

static struct kmem_cache *skbuff_fclone_cache __read_mostly;

int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS;

EXPORT_SYMBOL(sysctl_max_skb_frags);

/**
 *      skb_panic - private function for out-of-line support
 *      @skb:   buffer
 *      @sz:    size
 *      @addr:  address
 *      @msg:   skb_over_panic or skb_under_panic
 *
 *      Out-of-line support for skb_put() and skb_push().
 *      Called via the wrapper skb_over_panic() or skb_under_panic().
 *      Keep out of line to prevent kernel bloat.
 *      __builtin_return_address is not used because it is not always reliable.
 */

static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr, const char msg[]) { pr_emerg("%s: text:%p len:%d put:%d head:%p data:%p tail:%#lx end:%#lx dev:%s\n", msg, addr, skb->len, sz, skb->head, skb->data, (unsigned long)skb->tail, (unsigned long)skb->end, skb->dev ? skb->dev->name : "<NULL>"); BUG(); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)4051.28%330.00%
Patrick McHardy1620.51%110.00%
Arnaldo Carvalho de Melo810.26%220.00%
Jean Sacren810.26%110.00%
Joe Perches33.85%110.00%
James Hogan22.56%110.00%
Rami Rosen11.28%110.00%
Total78100.00%10100.00%


static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr) { skb_panic(skb, sz, addr, __func__); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)1963.33%125.00%
Jean Sacren930.00%125.00%
Rami Rosen13.33%125.00%
Patrick McHardy13.33%125.00%
Total30100.00%4100.00%


static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr) { skb_panic(skb, sz, addr, __func__); }

Contributors

PersonTokensPropCommitsCommitProp
Jean Sacren2273.33%125.00%
Patrick McHardy413.33%125.00%
Linus Torvalds (pre-git)310.00%125.00%
Arnaldo Carvalho de Melo13.33%125.00%
Total30100.00%4100.00%

/* * kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells * the caller if emergency pfmemalloc reserves are being used. If it is and * the socket is later found to be SOCK_MEMALLOC then PFMEMALLOC reserves * may be used. Otherwise, the packet data may be discarded until enough * memory is free */ #define kmalloc_reserve(size, gfp, node, pfmemalloc) \ __kmalloc_reserve(size, gfp, node, _RET_IP_, pfmemalloc)
static void *__kmalloc_reserve(size_t size, gfp_t flags, int node, unsigned long ip, bool *pfmemalloc) { void *obj; bool ret_pfmemalloc = false; /* * Try a regular allocation, when that fails and we're not entitled * to the reserves, fail. */ obj = kmalloc_node_track_caller(size, flags | __GFP_NOMEMALLOC | __GFP_NOWARN, node); if (obj || !(gfp_pfmemalloc_allowed(flags))) goto out; /* Try again but now we are using pfmemalloc reserves */ ret_pfmemalloc = true; obj = kmalloc_node_track_caller(size, flags, node); out: if (pfmemalloc) *pfmemalloc = ret_pfmemalloc; return obj; }

Contributors

PersonTokensPropCommitsCommitProp
Mel Gorman9398.94%150.00%
Stephen Hemminger11.06%150.00%
Total94100.00%2100.00%

/* Allocate a new skbuff. We do this ourselves so we can fill in a few * 'private' fields and also do memory statistics to find all the * [BEEP] leaks. * */
struct sk_buff *__alloc_skb_head(gfp_t gfp_mask, int node) { struct sk_buff *skb; /* Get the HEAD */ skb = kmem_cache_alloc_node(skbuff_head_cache, gfp_mask & ~__GFP_DMA, node); if (!skb) goto out; /* * Only clear those fields we need to clear, not those that we will * actually initialise below. Hence, don't put any more fields after * the tail pointer in struct sk_buff! */ memset(skb, 0, offsetof(struct sk_buff, tail)); skb->head = NULL; skb->truesize = sizeof(struct sk_buff); atomic_set(&skb->users, 1); skb->mac_header = (typeof(skb->mac_header))~0U; out: return skb; }

Contributors

PersonTokensPropCommitsCommitProp
Patrick McHardy9491.26%125.00%
Américo Wang65.83%125.00%
David S. Miller21.94%125.00%
Pablo Neira Ayuso10.97%125.00%
Total103100.00%4100.00%

/** * __alloc_skb - allocate a network buffer * @size: size to allocate * @gfp_mask: allocation mask * @flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache * instead of head cache and allocate a cloned (child) skb. * If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for * allocations in case the data is required for writeback * @node: numa node to allocate memory on * * Allocate a new &sk_buff. The returned buffer has no headroom and a * tail room of at least size bytes. The object has a reference count * of one. The return is the buffer. On a failure the return is %NULL. * * Buffers may only be allocated from interrupts using a @gfp_mask of * %GFP_ATOMIC. */
struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, int flags, int node) { struct kmem_cache *cache; struct skb_shared_info *shinfo; struct sk_buff *skb; u8 *data; bool pfmemalloc; cache = (flags & SKB_ALLOC_FCLONE) ? skbuff_fclone_cache : skbuff_head_cache; if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX)) gfp_mask |= __GFP_MEMALLOC; /* Get the HEAD */ skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node); if (!skb) goto out; prefetchw(skb); /* We do our best to align skb_shared_info on a separate cache * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives * aligned memory blocks, unless SLUB/SLAB debug is enabled. * Both skb->head and skb_shared_info are cache line aligned. */ size = SKB_DATA_ALIGN(size); size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); data = kmalloc_reserve(size, gfp_mask, node, &pfmemalloc); if (!data) goto nodata; /* kmalloc(size) might give us more room than requested. * Put skb_shared_info exactly at the end of allocated zone, * to allow max possible filling before reallocation. */ size = SKB_WITH_OVERHEAD(ksize(data)); prefetchw(data + size); /* * Only clear those fields we need to clear, not those that we will * actually initialise below. Hence, don't put any more fields after * the tail pointer in struct sk_buff! */ memset(skb, 0, offsetof(struct sk_buff, tail)); /* Account for allocated memory : skb + skb->head */ skb->truesize = SKB_TRUESIZE(size); skb->pfmemalloc = pfmemalloc; atomic_set(&skb->users, 1); skb->head = data; skb->data = data; skb_reset_tail_pointer(skb); skb->end = skb->tail + size; skb->mac_header = (typeof(skb->mac_header))~0U; skb->transport_header = (typeof(skb->transport_header))~0U; /* make sure we initialize shinfo sequentially */ shinfo = skb_shinfo(skb); memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); atomic_set(&shinfo->dataref, 1); kmemcheck_annotate_variable(shinfo->destructor_arg); if (flags & SKB_ALLOC_FCLONE) { struct sk_buff_fclones *fclones; fclones = container_of(skb, struct sk_buff_fclones, skb1); kmemcheck_annotate_bitfield(&fclones->skb2, flags1); skb->fclone = SKB_FCLONE_ORIG; atomic_set(&fclones->fclone_ref, 1); fclones->skb2.fclone = SKB_FCLONE_CLONE; } out: return skb; nodata: kmem_cache_free(cache, skb); skb = NULL; goto out; }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet8322.13%613.64%
Linus Torvalds (pre-git)7018.67%1431.82%
David S. Miller4712.53%49.09%
Mel Gorman379.87%12.27%
Ian Pratt215.60%12.27%
Benjamin LaHaise205.33%12.27%
Mala Anand154.00%12.27%
Américo Wang123.20%12.27%
Herbert Xu112.93%12.27%
Linus Torvalds102.67%24.55%
Arnaldo Carvalho de Melo102.67%49.09%
Christoph Hellwig82.13%12.27%
Patrick Ohly71.87%12.27%
Tony Lindgren71.87%12.27%
Stephen Hemminger71.87%12.27%
Vegard Nossum61.60%12.27%
Christoph Lameter20.53%12.27%
Al Viro10.27%12.27%
Johannes Berg10.27%12.27%
Total375100.00%44100.00%

EXPORT_SYMBOL(__alloc_skb); /** * __build_skb - build a network buffer * @data: data buffer provided by caller * @frag_size: size of data, or 0 if head was kmalloced * * Allocate a new &sk_buff. Caller provides space holding head and * skb_shared_info. @data must have been allocated by kmalloc() only if * @frag_size is 0, otherwise data should come from the page allocator * or vmalloc() * The return is the new skb buffer. * On a failure the return is %NULL, and @data is not freed. * Notes : * Before IO, driver allocates only data buffer where NIC put incoming frame * Driver should add room at head (NET_SKB_PAD) and * MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info)) * After IO, driver calls build_skb(), to allocate sk_buff and populate it * before giving packet to stack. * RX rings only contains data buffers, not full skbs. */
struct sk_buff *__build_skb(void *data, unsigned int frag_size) { struct skb_shared_info *shinfo; struct sk_buff *skb; unsigned int size = frag_size ? : ksize(data); skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC); if (!skb) return NULL; size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); memset(skb, 0, offsetof(struct sk_buff, tail)); skb->truesize = SKB_TRUESIZE(size); atomic_set(&skb->users, 1); skb->head = data; skb->data = data; skb_reset_tail_pointer(skb); skb->end = skb->tail + size; skb->mac_header = (typeof(skb->mac_header))~0U; skb->transport_header = (typeof(skb->transport_header))~0U; /* make sure we initialize shinfo sequentially */ shinfo = skb_shinfo(skb); memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); atomic_set(&shinfo->dataref, 1); kmemcheck_annotate_variable(shinfo->destructor_arg); return skb; }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet18391.96%466.67%
Américo Wang126.03%116.67%
David S. Miller42.01%116.67%
Total199100.00%6100.00%

/* build_skb() is wrapper over __build_skb(), that specifically * takes care of skb->head and skb->pfmemalloc * This means that if @frag_size is not zero, then @data must be backed * by a page fragment, not kmalloc() or vmalloc() */
struct sk_buff *build_skb(void *data, unsigned int frag_size) { struct sk_buff *skb = __build_skb(data, frag_size); if (skb && frag_size) { skb->head_frag = 1; if (page_is_pfmemalloc(virt_to_head_page(data))) skb->pfmemalloc = 1; } return skb; }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet5795.00%150.00%
Michal Hocko35.00%150.00%
Total60100.00%2100.00%

EXPORT_SYMBOL(build_skb); #define NAPI_SKB_CACHE_SIZE 64 struct napi_alloc_cache { struct page_frag_cache page; unsigned int skb_count; void *skb_cache[NAPI_SKB_CACHE_SIZE]; }; static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache); static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache);
static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) { struct page_frag_cache *nc; unsigned long flags; void *data; local_irq_save(flags); nc = this_cpu_ptr(&netdev_alloc_cache); data = page_frag_alloc(nc, fragsz, gfp_mask); local_irq_restore(flags); return data; }

Contributors

PersonTokensPropCommitsCommitProp
Alexander Duyck4779.66%466.67%
Eric Dumazet1220.34%233.33%
Total59100.00%6100.00%

/** * netdev_alloc_frag - allocate a page fragment * @fragsz: fragment size * * Allocates a frag from a page for receive buffer. * Uses GFP_ATOMIC allocations. */
void *netdev_alloc_frag(unsigned int fragsz) { return __netdev_alloc_frag(fragsz, GFP_ATOMIC | __GFP_COLD); }

Contributors

PersonTokensPropCommitsCommitProp
Mel Gorman20100.00%1100.00%
Total20100.00%1100.00%

EXPORT_SYMBOL(netdev_alloc_frag);
static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) { struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); return page_frag_alloc(&nc->page, fragsz, gfp_mask); }

Contributors

PersonTokensPropCommitsCommitProp
Alexander Duyck3489.47%375.00%
Jesper Dangaard Brouer410.53%125.00%
Total38100.00%4100.00%


void *napi_alloc_frag(unsigned int fragsz) { return __napi_alloc_frag(fragsz, GFP_ATOMIC | __GFP_COLD); }

Contributors

PersonTokensPropCommitsCommitProp
Alexander Duyck20100.00%1100.00%
Total20100.00%1100.00%

EXPORT_SYMBOL(napi_alloc_frag); /** * __netdev_alloc_skb - allocate an skbuff for rx on a specific device * @dev: network device to receive on * @len: length to allocate * @gfp_mask: get_free_pages mask, passed to alloc_skb * * Allocate a new &sk_buff and assign it a usage count of one. The * buffer has NET_SKB_PAD headroom built in. Users should allocate * the headroom they think they need without accounting for the * built in space. The built in space is used for optimisations. * * %NULL is returned if there is no free memory. */
struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len, gfp_t gfp_mask) { struct page_frag_cache *nc; unsigned long flags; struct sk_buff *skb; bool pfmemalloc; void *data; len += NET_SKB_PAD; if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) || (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) { skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE); if (!skb) goto skb_fail; goto skb_success; } len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); len = SKB_DATA_ALIGN(len); if (sk_memalloc_socks()) gfp_mask |= __GFP_MEMALLOC; local_irq_save(flags); nc = this_cpu_ptr(&netdev_alloc_cache); data = page_frag_alloc(nc, len, gfp_mask); pfmemalloc = nc->pfmemalloc; local_irq_restore(flags); if (unlikely(!data)) return NULL; skb = __build_skb(data, len); if (unlikely(!skb)) { skb_free_frag(data); return NULL; } /* use OR instead of assignment to avoid clearing of bits in mask */ if (pfmemalloc) skb->pfmemalloc = 1; skb->head_frag = 1; skb_success: skb_reserve(skb, NET_SKB_PAD); skb->dev = dev; skb_fail: return skb; }

Contributors

PersonTokensPropCommitsCommitProp
Alexander Duyck14463.16%646.15%
Eric Dumazet5624.56%323.08%
Christoph Hellwig177.46%215.38%
Mel Gorman114.82%215.38%
Total228100.00%13100.00%

EXPORT_SYMBOL(__netdev_alloc_skb); /** * __napi_alloc_skb - allocate skbuff for rx in a specific NAPI instance * @napi: napi instance this buffer was allocated for * @len: length to allocate * @gfp_mask: get_free_pages mask, passed to alloc_skb and alloc_pages * * Allocate a new sk_buff for use in NAPI receive. This buffer will * attempt to allocate the head from a special reserved region used * only for NAPI Rx allocation. By doing this we can save several * CPU cycles by avoiding having to disable and re-enable IRQs. * * %NULL is returned if there is no free memory. */
struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len, gfp_t gfp_mask) { struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); struct sk_buff *skb; void *data; len += NET_SKB_PAD + NET_IP_ALIGN; if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) || (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) { skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE); if (!skb) goto skb_fail; goto skb_success; } len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); len = SKB_DATA_ALIGN(len); if (sk_memalloc_socks()) gfp_mask |= __GFP_MEMALLOC; data = page_frag_alloc(&nc->page, len, gfp_mask); if (unlikely(!data)) return NULL; skb = __build_skb(data, len); if (unlikely(!skb)) { skb_free_frag(data); return NULL; } /* use OR instead of assignment to avoid clearing of bits in mask */ if (nc->page.pfmemalloc) skb->pfmemalloc = 1; skb->head_frag = 1; skb_success: skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); skb->dev = napi->dev; skb_fail: return skb; }

Contributors

PersonTokensPropCommitsCommitProp
Alexander Duyck20996.76%571.43%
Jesper Dangaard Brouer62.78%114.29%
Mel Gorman10.46%114.29%
Total216100.00%7100.00%

EXPORT_SYMBOL(__napi_alloc_skb);
void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, int size, unsigned int truesize) { skb_fill_page_desc(skb, i, page, off, size); skb->len += size; skb->data_len += size; skb->truesize += truesize; }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra5491.53%150.00%
Eric Dumazet58.47%150.00%
Total59100.00%2100.00%

EXPORT_SYMBOL(skb_add_rx_frag);
void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size, unsigned int truesize) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; skb_frag_size_add(frag, size); skb->len += size; skb->data_len += size; skb->truesize += truesize; }

Contributors

PersonTokensPropCommitsCommitProp
Jason (Hui) Wang60100.00%1100.00%
Total60100.00%1100.00%

EXPORT_SYMBOL(skb_coalesce_rx_frag);
static void skb_drop_list(struct sk_buff **listp) { kfree_skb_list(*listp); *listp = NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds1147.83%125.00%
Herbert Xu730.43%125.00%
Eric Dumazet417.39%125.00%
Linus Torvalds (pre-git)14.35%125.00%
Total23100.00%4100.00%


static inline void skb_drop_fraglist(struct sk_buff *skb) { skb_drop_list(&skb_shinfo(skb)->frag_list); }

Contributors

PersonTokensPropCommitsCommitProp
Herbert Xu23100.00%1100.00%
Total23100.00%1100.00%


static void skb_clone_fraglist(struct sk_buff *skb) { struct sk_buff *list; skb_walk_frags(skb, list) skb_get(list); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds2385.19%150.00%
David S. Miller414.81%150.00%
Total27100.00%2100.00%


static void skb_free_head(struct sk_buff *skb) { unsigned char *head = skb->head; if (skb->head_frag) skb_free_frag(head); else kfree(head); }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet2772.97%150.00%
Alexander Duyck1027.03%150.00%
Total37100.00%2100.00%


static void skb_release_data(struct sk_buff *skb) { struct skb_shared_info *shinfo = skb_shinfo(skb); int i; if (skb->cloned && atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1, &shinfo->dataref)) return; for (i = 0; i < shinfo->nr_frags; i++) __skb_frag_unref(&shinfo->frags[i]); /* * If skb buf is from userspace, we need to notify the caller * the lower device DMA has done; */ if (shinfo->tx_flags & SKBTX_DEV_ZEROCOPY) { struct ubuf_info *uarg; uarg = shinfo->destructor_arg; if (uarg->callback) uarg->callback(uarg, true); } if (shinfo->frag_list) kfree_skb_list(shinfo->frag_list); skb_free_head(skb); }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet3526.12%220.00%
Shirley Ma3223.88%110.00%
Linus Torvalds3022.39%110.00%
Linus Torvalds (pre-git)1914.18%330.00%
Herbert Xu1511.19%110.00%
Michael S. Tsirkin21.49%110.00%
Adrian Bunk10.75%110.00%
Total134100.00%10100.00%

/* * Free an skbuff by memory without cleaning the state. */
static void kfree_skbmem(struct sk_buff *skb) { struct sk_buff_fclones *fclones; switch (skb->fclone) { case SKB_FCLONE_UNAVAILABLE: kmem_cache_free(skbuff_head_cache, skb); return; case SKB_FCLONE_ORIG: fclones = container_of(skb, struct sk_buff_fclones, skb1); /* We usually free the clone (TX completion) before original skb * This test would have no chance to be true for the clone, * while here, branch prediction will be good. */ if (atomic_read(&fclones->fclone_ref) == 1) goto fastpath; break; default: /* SKB_FCLONE_CLONE */ fclones = container_of(skb, struct sk_buff_fclones, skb2); break; } if (!atomic_dec_and_test(&fclones->fclone_ref)) return; fastpath: kmem_cache_free(skbuff_fclone_cache, fclones); }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet4341.75%225.00%
David S. Miller4240.78%112.50%
Linus Torvalds98.74%112.50%
Linus Torvalds (pre-git)54.85%225.00%
Robert Olsson32.91%112.50%
Herbert Xu10.97%112.50%
Total103100.00%8100.00%


static void skb_release_head_state(struct sk_buff *skb) { skb_dst_drop(skb); #ifdef CONFIG_XFRM secpath_put(skb->sp); #endif if (skb->destructor) { WARN_ON(in_irq()); skb->destructor(skb); } #if IS_ENABLED(CONFIG_NF_CONNTRACK) nf_conntrack_put(skb_nfct(skb)); #endif #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) nf_bridge_put(skb->nf_bridge); #endif }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)3138.75%731.82%
Yasuyuki Kozakai1012.50%29.09%
Bart De Schuymer1012.50%29.09%
Alexey Kuznetsov78.75%14.55%
Pablo Neira Ayuso56.25%14.55%
Tomas Szepe45.00%14.55%
Florian Westphal33.75%14.55%
KOVACS Krisztian22.50%14.55%
Lennert Buytenhek22.50%14.55%
Stephen Hemminger22.50%14.55%
Igor Maravić11.25%14.55%
Eric Dumazet11.25%14.55%
Herbert Xu11.25%14.55%
Andi Kleen11.25%14.55%
Total80100.00%22100.00%

/* Free everything but the sk_buff shell. */
static void skb_release_all(struct sk_buff *skb) { skb_release_head_state(skb); if (likely(skb->head)) skb_release_data(skb); }

Contributors

PersonTokensPropCommitsCommitProp
Lennert Buytenhek1550.00%125.00%
Patrick McHardy826.67%125.00%
Herbert Xu620.00%125.00%
Pablo Neira Ayuso13.33%125.00%
Total30100.00%4100.00%

/** * __kfree_skb - private function * @skb: buffer * * Free an sk_buff. Release anything attached to the buffer. * Clean the state. This is an internal helper function. Users should * always call kfree_skb */
void __kfree_skb(struct sk_buff *skb) { skb_release_all(skb); kfree_skbmem(skb); }

Contributors

PersonTokensPropCommitsCommitProp
Herbert Xu1470.00%125.00%
Linus Torvalds (pre-git)630.00%375.00%
Total20100.00%4100.00%

EXPORT_SYMBOL(__kfree_skb); /** * kfree_skb - free an sk_buff * @skb: buffer to free * * Drop a reference to the buffer and free it if the usage count has * hit zero. */
void kfree_skb(struct sk_buff *skb) { if (unlikely(!skb)) return; if (likely(atomic_read(&skb->users) == 1)) smp_rmb(); else if (likely(!atomic_dec_and_test(&skb->users))) return; trace_kfree_skb(skb, __builtin_return_address(0)); __kfree_skb(skb); }

Contributors

PersonTokensPropCommitsCommitProp
Jörn Engel5885.29%150.00%
Neil Horman1014.71%150.00%
Total68100.00%2100.00%

EXPORT_SYMBOL(kfree_skb);
void kfree_skb_list(struct sk_buff *segs) { while (segs) { struct sk_buff *next = segs->next; kfree_skb(segs); segs = next; } }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet34100.00%1100.00%
Total34100.00%1100.00%

EXPORT_SYMBOL(kfree_skb_list); /** * skb_tx_error - report an sk_buff xmit error * @skb: buffer that triggered an error * * Report xmit error if a device callback is tracking this skb. * skb must be freed afterwards. */
void skb_tx_error(struct sk_buff *skb) { if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { struct ubuf_info *uarg; uarg = skb_shinfo(skb)->destructor_arg; if (uarg->callback) uarg->callback(uarg, false); skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; } }

Contributors

PersonTokensPropCommitsCommitProp
Michael S. Tsirkin62100.00%1100.00%
Total62100.00%1100.00%

EXPORT_SYMBOL(skb_tx_error); /** * consume_skb - free an skbuff * @skb: buffer to free * * Drop a ref to the buffer and free it if the usage count has hit zero * Functions identically to kfree_skb, but kfree_skb assumes that the frame * is being dropped after a failure and notes that */
void consume_skb(struct sk_buff *skb) { if (unlikely(!skb)) return; if (likely(atomic_read(&skb->users) == 1)) smp_rmb(); else if (likely(!atomic_dec_and_test(&skb->users))) return; trace_consume_skb(skb); __kfree_skb(skb); }

Contributors

PersonTokensPropCommitsCommitProp
Neil Horman5892.06%150.00%
Koki Sanagi57.94%150.00%
Total63100.00%2100.00%

EXPORT_SYMBOL(consume_skb);
void __kfree_skb_flush(void) { struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); /* flush skb_cache if containing objects */ if (nc->skb_count) { kmem_cache_free_bulk(skbuff_head_cache, nc->skb_count, nc->skb_cache); nc->skb_count = 0; } }

Contributors

PersonTokensPropCommitsCommitProp
Jesper Dangaard Brouer46100.00%1100.00%
Total46100.00%1100.00%


static inline void _kfree_skb_defer(struct sk_buff *skb) { struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); /* drop skb->head and call any destructors for packet */ skb_release_all(skb); /* record skb to CPU local list */ nc->skb_cache[nc->skb_count++] = skb; #ifdef CONFIG_SLUB /* SLUB writes into objects when freeing */ prefetchw(skb); #endif /* flush skb_cache if it is filled */ if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) { kmem_cache_free_bulk(skbuff_head_cache, NAPI_SKB_CACHE_SIZE, nc->skb_cache); nc->skb_count = 0; } }

Contributors

PersonTokensPropCommitsCommitProp
Jesper Dangaard Brouer84100.00%2100.00%
Total84100.00%2100.00%


void __kfree_skb_defer(struct sk_buff *skb) { _kfree_skb_defer(skb); }

Contributors

PersonTokensPropCommitsCommitProp
Jesper Dangaard Brouer15100.00%1100.00%
Total15100.00%1100.00%


void napi_consume_skb(struct sk_buff *skb, int budget) { if (unlikely(!skb)) return; /* Zero budget indicate non-NAPI context called us, like netpoll */ if (unlikely(!budget)) { dev_consume_skb_any(skb); return; } if (likely(atomic_read(&skb->users) == 1)) smp_rmb(); else if (likely(!atomic_dec_and_test(&skb->users))) return; /* if reaching here SKB is ready to free */ trace_consume_skb(skb); /* if SKB is a clone, don't handle this case */ if (skb->fclone != SKB_FCLONE_UNAVAILABLE)