Release 4.11 net/core/skbuff.c
/*
* Routines having to do with the 'struct sk_buff' memory handlers.
*
* Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>
* Florian La Roche <rzsfl@rz.uni-sb.de>
*
* Fixes:
* Alan Cox : Fixed the worst of the load
* balancer bugs.
* Dave Platt : Interrupt stacking fix.
* Richard Kooijman : Timestamp fixes.
* Alan Cox : Changed buffer format.
* Alan Cox : destructor hook for AF_UNIX etc.
* Linus Torvalds : Better skb_clone.
* Alan Cox : Added skb_copy.
* Alan Cox : Added all the changed routines Linus
* only put in the headers
* Ray VanTassle : Fixed --skb->lock in free
* Alan Cox : skb_copy copy arp field
* Andi Kleen : slabified it.
* Robert Olsson : Removed skb_head_pool
*
* NOTE:
* The __skb_ routines should be called with interrupts
* disabled, or you better be *real* sure that the operation is atomic
* with respect to whatever list is being frobbed (e.g. via lock_sock()
* or via disabling bottom half handlers, etc).
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
/*
* The functions in this file will not compile correctly with gcc 2.4.x
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/kmemcheck.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/in.h>
#include <linux/inet.h>
#include <linux/slab.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/sctp.h>
#include <linux/netdevice.h>
#ifdef CONFIG_NET_CLS_ACT
#include <net/pkt_sched.h>
#endif
#include <linux/string.h>
#include <linux/skbuff.h>
#include <linux/splice.h>
#include <linux/cache.h>
#include <linux/rtnetlink.h>
#include <linux/init.h>
#include <linux/scatterlist.h>
#include <linux/errqueue.h>
#include <linux/prefetch.h>
#include <linux/if_vlan.h>
#include <net/protocol.h>
#include <net/dst.h>
#include <net/sock.h>
#include <net/checksum.h>
#include <net/ip6_checksum.h>
#include <net/xfrm.h>
#include <linux/uaccess.h>
#include <trace/events/skb.h>
#include <linux/highmem.h>
#include <linux/capability.h>
#include <linux/user_namespace.h>
struct kmem_cache *skbuff_head_cache __read_mostly;
static struct kmem_cache *skbuff_fclone_cache __read_mostly;
int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS;
EXPORT_SYMBOL(sysctl_max_skb_frags);
/**
* skb_panic - private function for out-of-line support
* @skb: buffer
* @sz: size
* @addr: address
* @msg: skb_over_panic or skb_under_panic
*
* Out-of-line support for skb_put() and skb_push().
* Called via the wrapper skb_over_panic() or skb_under_panic().
* Keep out of line to prevent kernel bloat.
* __builtin_return_address is not used because it is not always reliable.
*/
static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr,
const char msg[])
{
pr_emerg("%s: text:%p len:%d put:%d head:%p data:%p tail:%#lx end:%#lx dev:%s\n",
msg, addr, skb->len, sz, skb->head, skb->data,
(unsigned long)skb->tail, (unsigned long)skb->end,
skb->dev ? skb->dev->name : "<NULL>");
BUG();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 40 | 51.28% | 3 | 30.00% |
Patrick McHardy | 16 | 20.51% | 1 | 10.00% |
Arnaldo Carvalho de Melo | 8 | 10.26% | 2 | 20.00% |
Jean Sacren | 8 | 10.26% | 1 | 10.00% |
Joe Perches | 3 | 3.85% | 1 | 10.00% |
James Hogan | 2 | 2.56% | 1 | 10.00% |
Rami Rosen | 1 | 1.28% | 1 | 10.00% |
Total | 78 | 100.00% | 10 | 100.00% |
static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr)
{
skb_panic(skb, sz, addr, __func__);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 19 | 63.33% | 1 | 25.00% |
Jean Sacren | 9 | 30.00% | 1 | 25.00% |
Rami Rosen | 1 | 3.33% | 1 | 25.00% |
Patrick McHardy | 1 | 3.33% | 1 | 25.00% |
Total | 30 | 100.00% | 4 | 100.00% |
static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr)
{
skb_panic(skb, sz, addr, __func__);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jean Sacren | 22 | 73.33% | 1 | 25.00% |
Patrick McHardy | 4 | 13.33% | 1 | 25.00% |
Linus Torvalds (pre-git) | 3 | 10.00% | 1 | 25.00% |
Arnaldo Carvalho de Melo | 1 | 3.33% | 1 | 25.00% |
Total | 30 | 100.00% | 4 | 100.00% |
/*
* kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells
* the caller if emergency pfmemalloc reserves are being used. If it is and
* the socket is later found to be SOCK_MEMALLOC then PFMEMALLOC reserves
* may be used. Otherwise, the packet data may be discarded until enough
* memory is free
*/
#define kmalloc_reserve(size, gfp, node, pfmemalloc) \
__kmalloc_reserve(size, gfp, node, _RET_IP_, pfmemalloc)
static void *__kmalloc_reserve(size_t size, gfp_t flags, int node,
unsigned long ip, bool *pfmemalloc)
{
void *obj;
bool ret_pfmemalloc = false;
/*
* Try a regular allocation, when that fails and we're not entitled
* to the reserves, fail.
*/
obj = kmalloc_node_track_caller(size,
flags | __GFP_NOMEMALLOC | __GFP_NOWARN,
node);
if (obj || !(gfp_pfmemalloc_allowed(flags)))
goto out;
/* Try again but now we are using pfmemalloc reserves */
ret_pfmemalloc = true;
obj = kmalloc_node_track_caller(size, flags, node);
out:
if (pfmemalloc)
*pfmemalloc = ret_pfmemalloc;
return obj;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Mel Gorman | 93 | 98.94% | 1 | 50.00% |
Stephen Hemminger | 1 | 1.06% | 1 | 50.00% |
Total | 94 | 100.00% | 2 | 100.00% |
/* Allocate a new skbuff. We do this ourselves so we can fill in a few
* 'private' fields and also do memory statistics to find all the
* [BEEP] leaks.
*
*/
struct sk_buff *__alloc_skb_head(gfp_t gfp_mask, int node)
{
struct sk_buff *skb;
/* Get the HEAD */
skb = kmem_cache_alloc_node(skbuff_head_cache,
gfp_mask & ~__GFP_DMA, node);
if (!skb)
goto out;
/*
* Only clear those fields we need to clear, not those that we will
* actually initialise below. Hence, don't put any more fields after
* the tail pointer in struct sk_buff!
*/
memset(skb, 0, offsetof(struct sk_buff, tail));
skb->head = NULL;
skb->truesize = sizeof(struct sk_buff);
atomic_set(&skb->users, 1);
skb->mac_header = (typeof(skb->mac_header))~0U;
out:
return skb;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Patrick McHardy | 94 | 91.26% | 1 | 25.00% |
Américo Wang | 6 | 5.83% | 1 | 25.00% |
David S. Miller | 2 | 1.94% | 1 | 25.00% |
Pablo Neira Ayuso | 1 | 0.97% | 1 | 25.00% |
Total | 103 | 100.00% | 4 | 100.00% |
/**
* __alloc_skb - allocate a network buffer
* @size: size to allocate
* @gfp_mask: allocation mask
* @flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache
* instead of head cache and allocate a cloned (child) skb.
* If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for
* allocations in case the data is required for writeback
* @node: numa node to allocate memory on
*
* Allocate a new &sk_buff. The returned buffer has no headroom and a
* tail room of at least size bytes. The object has a reference count
* of one. The return is the buffer. On a failure the return is %NULL.
*
* Buffers may only be allocated from interrupts using a @gfp_mask of
* %GFP_ATOMIC.
*/
struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
int flags, int node)
{
struct kmem_cache *cache;
struct skb_shared_info *shinfo;
struct sk_buff *skb;
u8 *data;
bool pfmemalloc;
cache = (flags & SKB_ALLOC_FCLONE)
? skbuff_fclone_cache : skbuff_head_cache;
if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX))
gfp_mask |= __GFP_MEMALLOC;
/* Get the HEAD */
skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node);
if (!skb)
goto out;
prefetchw(skb);
/* We do our best to align skb_shared_info on a separate cache
* line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives
* aligned memory blocks, unless SLUB/SLAB debug is enabled.
* Both skb->head and skb_shared_info are cache line aligned.
*/
size = SKB_DATA_ALIGN(size);
size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
data = kmalloc_reserve(size, gfp_mask, node, &pfmemalloc);
if (!data)
goto nodata;
/* kmalloc(size) might give us more room than requested.
* Put skb_shared_info exactly at the end of allocated zone,
* to allow max possible filling before reallocation.
*/
size = SKB_WITH_OVERHEAD(ksize(data));
prefetchw(data + size);
/*
* Only clear those fields we need to clear, not those that we will
* actually initialise below. Hence, don't put any more fields after
* the tail pointer in struct sk_buff!
*/
memset(skb, 0, offsetof(struct sk_buff, tail));
/* Account for allocated memory : skb + skb->head */
skb->truesize = SKB_TRUESIZE(size);
skb->pfmemalloc = pfmemalloc;
atomic_set(&skb->users, 1);
skb->head = data;
skb->data = data;
skb_reset_tail_pointer(skb);
skb->end = skb->tail + size;
skb->mac_header = (typeof(skb->mac_header))~0U;
skb->transport_header = (typeof(skb->transport_header))~0U;
/* make sure we initialize shinfo sequentially */
shinfo = skb_shinfo(skb);
memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
atomic_set(&shinfo->dataref, 1);
kmemcheck_annotate_variable(shinfo->destructor_arg);
if (flags & SKB_ALLOC_FCLONE) {
struct sk_buff_fclones *fclones;
fclones = container_of(skb, struct sk_buff_fclones, skb1);
kmemcheck_annotate_bitfield(&fclones->skb2, flags1);
skb->fclone = SKB_FCLONE_ORIG;
atomic_set(&fclones->fclone_ref, 1);
fclones->skb2.fclone = SKB_FCLONE_CLONE;
}
out:
return skb;
nodata:
kmem_cache_free(cache, skb);
skb = NULL;
goto out;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Dumazet | 83 | 22.13% | 6 | 13.64% |
Linus Torvalds (pre-git) | 70 | 18.67% | 14 | 31.82% |
David S. Miller | 47 | 12.53% | 4 | 9.09% |
Mel Gorman | 37 | 9.87% | 1 | 2.27% |
Ian Pratt | 21 | 5.60% | 1 | 2.27% |
Benjamin LaHaise | 20 | 5.33% | 1 | 2.27% |
Mala Anand | 15 | 4.00% | 1 | 2.27% |
Américo Wang | 12 | 3.20% | 1 | 2.27% |
Herbert Xu | 11 | 2.93% | 1 | 2.27% |
Linus Torvalds | 10 | 2.67% | 2 | 4.55% |
Arnaldo Carvalho de Melo | 10 | 2.67% | 4 | 9.09% |
Christoph Hellwig | 8 | 2.13% | 1 | 2.27% |
Patrick Ohly | 7 | 1.87% | 1 | 2.27% |
Tony Lindgren | 7 | 1.87% | 1 | 2.27% |
Stephen Hemminger | 7 | 1.87% | 1 | 2.27% |
Vegard Nossum | 6 | 1.60% | 1 | 2.27% |
Christoph Lameter | 2 | 0.53% | 1 | 2.27% |
Al Viro | 1 | 0.27% | 1 | 2.27% |
Johannes Berg | 1 | 0.27% | 1 | 2.27% |
Total | 375 | 100.00% | 44 | 100.00% |
EXPORT_SYMBOL(__alloc_skb);
/**
* __build_skb - build a network buffer
* @data: data buffer provided by caller
* @frag_size: size of data, or 0 if head was kmalloced
*
* Allocate a new &sk_buff. Caller provides space holding head and
* skb_shared_info. @data must have been allocated by kmalloc() only if
* @frag_size is 0, otherwise data should come from the page allocator
* or vmalloc()
* The return is the new skb buffer.
* On a failure the return is %NULL, and @data is not freed.
* Notes :
* Before IO, driver allocates only data buffer where NIC put incoming frame
* Driver should add room at head (NET_SKB_PAD) and
* MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info))
* After IO, driver calls build_skb(), to allocate sk_buff and populate it
* before giving packet to stack.
* RX rings only contains data buffers, not full skbs.
*/
struct sk_buff *__build_skb(void *data, unsigned int frag_size)
{
struct skb_shared_info *shinfo;
struct sk_buff *skb;
unsigned int size = frag_size ? : ksize(data);
skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
if (!skb)
return NULL;
size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
memset(skb, 0, offsetof(struct sk_buff, tail));
skb->truesize = SKB_TRUESIZE(size);
atomic_set(&skb->users, 1);
skb->head = data;
skb->data = data;
skb_reset_tail_pointer(skb);
skb->end = skb->tail + size;
skb->mac_header = (typeof(skb->mac_header))~0U;
skb->transport_header = (typeof(skb->transport_header))~0U;
/* make sure we initialize shinfo sequentially */
shinfo = skb_shinfo(skb);
memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
atomic_set(&shinfo->dataref, 1);
kmemcheck_annotate_variable(shinfo->destructor_arg);
return skb;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Dumazet | 183 | 91.96% | 4 | 66.67% |
Américo Wang | 12 | 6.03% | 1 | 16.67% |
David S. Miller | 4 | 2.01% | 1 | 16.67% |
Total | 199 | 100.00% | 6 | 100.00% |
/* build_skb() is wrapper over __build_skb(), that specifically
* takes care of skb->head and skb->pfmemalloc
* This means that if @frag_size is not zero, then @data must be backed
* by a page fragment, not kmalloc() or vmalloc()
*/
struct sk_buff *build_skb(void *data, unsigned int frag_size)
{
struct sk_buff *skb = __build_skb(data, frag_size);
if (skb && frag_size) {
skb->head_frag = 1;
if (page_is_pfmemalloc(virt_to_head_page(data)))
skb->pfmemalloc = 1;
}
return skb;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Dumazet | 57 | 95.00% | 1 | 50.00% |
Michal Hocko | 3 | 5.00% | 1 | 50.00% |
Total | 60 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL(build_skb);
#define NAPI_SKB_CACHE_SIZE 64
struct napi_alloc_cache {
struct page_frag_cache page;
unsigned int skb_count;
void *skb_cache[NAPI_SKB_CACHE_SIZE];
};
static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache);
static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
{
struct page_frag_cache *nc;
unsigned long flags;
void *data;
local_irq_save(flags);
nc = this_cpu_ptr(&netdev_alloc_cache);
data = page_frag_alloc(nc, fragsz, gfp_mask);
local_irq_restore(flags);
return data;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alexander Duyck | 47 | 79.66% | 4 | 66.67% |
Eric Dumazet | 12 | 20.34% | 2 | 33.33% |
Total | 59 | 100.00% | 6 | 100.00% |
/**
* netdev_alloc_frag - allocate a page fragment
* @fragsz: fragment size
*
* Allocates a frag from a page for receive buffer.
* Uses GFP_ATOMIC allocations.
*/
void *netdev_alloc_frag(unsigned int fragsz)
{
return __netdev_alloc_frag(fragsz, GFP_ATOMIC | __GFP_COLD);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Mel Gorman | 20 | 100.00% | 1 | 100.00% |
Total | 20 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL(netdev_alloc_frag);
static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
{
struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
return page_frag_alloc(&nc->page, fragsz, gfp_mask);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alexander Duyck | 34 | 89.47% | 3 | 75.00% |
Jesper Dangaard Brouer | 4 | 10.53% | 1 | 25.00% |
Total | 38 | 100.00% | 4 | 100.00% |
void *napi_alloc_frag(unsigned int fragsz)
{
return __napi_alloc_frag(fragsz, GFP_ATOMIC | __GFP_COLD);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alexander Duyck | 20 | 100.00% | 1 | 100.00% |
Total | 20 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL(napi_alloc_frag);
/**
* __netdev_alloc_skb - allocate an skbuff for rx on a specific device
* @dev: network device to receive on
* @len: length to allocate
* @gfp_mask: get_free_pages mask, passed to alloc_skb
*
* Allocate a new &sk_buff and assign it a usage count of one. The
* buffer has NET_SKB_PAD headroom built in. Users should allocate
* the headroom they think they need without accounting for the
* built in space. The built in space is used for optimisations.
*
* %NULL is returned if there is no free memory.
*/
struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len,
gfp_t gfp_mask)
{
struct page_frag_cache *nc;
unsigned long flags;
struct sk_buff *skb;
bool pfmemalloc;
void *data;
len += NET_SKB_PAD;
if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) ||
(gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
if (!skb)
goto skb_fail;
goto skb_success;
}
len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
len = SKB_DATA_ALIGN(len);
if (sk_memalloc_socks())
gfp_mask |= __GFP_MEMALLOC;
local_irq_save(flags);
nc = this_cpu_ptr(&netdev_alloc_cache);
data = page_frag_alloc(nc, len, gfp_mask);
pfmemalloc = nc->pfmemalloc;
local_irq_restore(flags);
if (unlikely(!data))
return NULL;
skb = __build_skb(data, len);
if (unlikely(!skb)) {
skb_free_frag(data);
return NULL;
}
/* use OR instead of assignment to avoid clearing of bits in mask */
if (pfmemalloc)
skb->pfmemalloc = 1;
skb->head_frag = 1;
skb_success:
skb_reserve(skb, NET_SKB_PAD);
skb->dev = dev;
skb_fail:
return skb;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alexander Duyck | 144 | 63.16% | 6 | 46.15% |
Eric Dumazet | 56 | 24.56% | 3 | 23.08% |
Christoph Hellwig | 17 | 7.46% | 2 | 15.38% |
Mel Gorman | 11 | 4.82% | 2 | 15.38% |
Total | 228 | 100.00% | 13 | 100.00% |
EXPORT_SYMBOL(__netdev_alloc_skb);
/**
* __napi_alloc_skb - allocate skbuff for rx in a specific NAPI instance
* @napi: napi instance this buffer was allocated for
* @len: length to allocate
* @gfp_mask: get_free_pages mask, passed to alloc_skb and alloc_pages
*
* Allocate a new sk_buff for use in NAPI receive. This buffer will
* attempt to allocate the head from a special reserved region used
* only for NAPI Rx allocation. By doing this we can save several
* CPU cycles by avoiding having to disable and re-enable IRQs.
*
* %NULL is returned if there is no free memory.
*/
struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
gfp_t gfp_mask)
{
struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
struct sk_buff *skb;
void *data;
len += NET_SKB_PAD + NET_IP_ALIGN;
if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) ||
(gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
if (!skb)
goto skb_fail;
goto skb_success;
}
len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
len = SKB_DATA_ALIGN(len);
if (sk_memalloc_socks())
gfp_mask |= __GFP_MEMALLOC;
data = page_frag_alloc(&nc->page, len, gfp_mask);
if (unlikely(!data))
return NULL;
skb = __build_skb(data, len);
if (unlikely(!skb)) {
skb_free_frag(data);
return NULL;
}
/* use OR instead of assignment to avoid clearing of bits in mask */
if (nc->page.pfmemalloc)
skb->pfmemalloc = 1;
skb->head_frag = 1;
skb_success:
skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
skb->dev = napi->dev;
skb_fail:
return skb;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alexander Duyck | 209 | 96.76% | 5 | 71.43% |
Jesper Dangaard Brouer | 6 | 2.78% | 1 | 14.29% |
Mel Gorman | 1 | 0.46% | 1 | 14.29% |
Total | 216 | 100.00% | 7 | 100.00% |
EXPORT_SYMBOL(__napi_alloc_skb);
void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
int size, unsigned int truesize)
{
skb_fill_page_desc(skb, i, page, off, size);
skb->len += size;
skb->data_len += size;
skb->truesize += truesize;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 54 | 91.53% | 1 | 50.00% |
Eric Dumazet | 5 | 8.47% | 1 | 50.00% |
Total | 59 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL(skb_add_rx_frag);
void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
unsigned int truesize)
{
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
skb_frag_size_add(frag, size);
skb->len += size;
skb->data_len += size;
skb->truesize += truesize;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jason (Hui) Wang | 60 | 100.00% | 1 | 100.00% |
Total | 60 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL(skb_coalesce_rx_frag);
static void skb_drop_list(struct sk_buff **listp)
{
kfree_skb_list(*listp);
*listp = NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds | 11 | 47.83% | 1 | 25.00% |
Herbert Xu | 7 | 30.43% | 1 | 25.00% |
Eric Dumazet | 4 | 17.39% | 1 | 25.00% |
Linus Torvalds (pre-git) | 1 | 4.35% | 1 | 25.00% |
Total | 23 | 100.00% | 4 | 100.00% |
static inline void skb_drop_fraglist(struct sk_buff *skb)
{
skb_drop_list(&skb_shinfo(skb)->frag_list);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Herbert Xu | 23 | 100.00% | 1 | 100.00% |
Total | 23 | 100.00% | 1 | 100.00% |
static void skb_clone_fraglist(struct sk_buff *skb)
{
struct sk_buff *list;
skb_walk_frags(skb, list)
skb_get(list);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds | 23 | 85.19% | 1 | 50.00% |
David S. Miller | 4 | 14.81% | 1 | 50.00% |
Total | 27 | 100.00% | 2 | 100.00% |
static void skb_free_head(struct sk_buff *skb)
{
unsigned char *head = skb->head;
if (skb->head_frag)
skb_free_frag(head);
else
kfree(head);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Dumazet | 27 | 72.97% | 1 | 50.00% |
Alexander Duyck | 10 | 27.03% | 1 | 50.00% |
Total | 37 | 100.00% | 2 | 100.00% |
static void skb_release_data(struct sk_buff *skb)
{
struct skb_shared_info *shinfo = skb_shinfo(skb);
int i;
if (skb->cloned &&
atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1,
&shinfo->dataref))
return;
for (i = 0; i < shinfo->nr_frags; i++)
__skb_frag_unref(&shinfo->frags[i]);
/*
* If skb buf is from userspace, we need to notify the caller
* the lower device DMA has done;
*/
if (shinfo->tx_flags & SKBTX_DEV_ZEROCOPY) {
struct ubuf_info *uarg;
uarg = shinfo->destructor_arg;
if (uarg->callback)
uarg->callback(uarg, true);
}
if (shinfo->frag_list)
kfree_skb_list(shinfo->frag_list);
skb_free_head(skb);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Dumazet | 35 | 26.12% | 2 | 20.00% |
Shirley Ma | 32 | 23.88% | 1 | 10.00% |
Linus Torvalds | 30 | 22.39% | 1 | 10.00% |
Linus Torvalds (pre-git) | 19 | 14.18% | 3 | 30.00% |
Herbert Xu | 15 | 11.19% | 1 | 10.00% |
Michael S. Tsirkin | 2 | 1.49% | 1 | 10.00% |
Adrian Bunk | 1 | 0.75% | 1 | 10.00% |
Total | 134 | 100.00% | 10 | 100.00% |
/*
* Free an skbuff by memory without cleaning the state.
*/
static void kfree_skbmem(struct sk_buff *skb)
{
struct sk_buff_fclones *fclones;
switch (skb->fclone) {
case SKB_FCLONE_UNAVAILABLE:
kmem_cache_free(skbuff_head_cache, skb);
return;
case SKB_FCLONE_ORIG:
fclones = container_of(skb, struct sk_buff_fclones, skb1);
/* We usually free the clone (TX completion) before original skb
* This test would have no chance to be true for the clone,
* while here, branch prediction will be good.
*/
if (atomic_read(&fclones->fclone_ref) == 1)
goto fastpath;
break;
default: /* SKB_FCLONE_CLONE */
fclones = container_of(skb, struct sk_buff_fclones, skb2);
break;
}
if (!atomic_dec_and_test(&fclones->fclone_ref))
return;
fastpath:
kmem_cache_free(skbuff_fclone_cache, fclones);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Dumazet | 43 | 41.75% | 2 | 25.00% |
David S. Miller | 42 | 40.78% | 1 | 12.50% |
Linus Torvalds | 9 | 8.74% | 1 | 12.50% |
Linus Torvalds (pre-git) | 5 | 4.85% | 2 | 25.00% |
Robert Olsson | 3 | 2.91% | 1 | 12.50% |
Herbert Xu | 1 | 0.97% | 1 | 12.50% |
Total | 103 | 100.00% | 8 | 100.00% |
static void skb_release_head_state(struct sk_buff *skb)
{
skb_dst_drop(skb);
#ifdef CONFIG_XFRM
secpath_put(skb->sp);
#endif
if (skb->destructor) {
WARN_ON(in_irq());
skb->destructor(skb);
}
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
nf_conntrack_put(skb_nfct(skb));
#endif
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
nf_bridge_put(skb->nf_bridge);
#endif
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 31 | 38.75% | 7 | 31.82% |
Yasuyuki Kozakai | 10 | 12.50% | 2 | 9.09% |
Bart De Schuymer | 10 | 12.50% | 2 | 9.09% |
Alexey Kuznetsov | 7 | 8.75% | 1 | 4.55% |
Pablo Neira Ayuso | 5 | 6.25% | 1 | 4.55% |
Tomas Szepe | 4 | 5.00% | 1 | 4.55% |
Florian Westphal | 3 | 3.75% | 1 | 4.55% |
KOVACS Krisztian | 2 | 2.50% | 1 | 4.55% |
Lennert Buytenhek | 2 | 2.50% | 1 | 4.55% |
Stephen Hemminger | 2 | 2.50% | 1 | 4.55% |
Igor Maravić | 1 | 1.25% | 1 | 4.55% |
Eric Dumazet | 1 | 1.25% | 1 | 4.55% |
Herbert Xu | 1 | 1.25% | 1 | 4.55% |
Andi Kleen | 1 | 1.25% | 1 | 4.55% |
Total | 80 | 100.00% | 22 | 100.00% |
/* Free everything but the sk_buff shell. */
static void skb_release_all(struct sk_buff *skb)
{
skb_release_head_state(skb);
if (likely(skb->head))
skb_release_data(skb);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Lennert Buytenhek | 15 | 50.00% | 1 | 25.00% |
Patrick McHardy | 8 | 26.67% | 1 | 25.00% |
Herbert Xu | 6 | 20.00% | 1 | 25.00% |
Pablo Neira Ayuso | 1 | 3.33% | 1 | 25.00% |
Total | 30 | 100.00% | 4 | 100.00% |
/**
* __kfree_skb - private function
* @skb: buffer
*
* Free an sk_buff. Release anything attached to the buffer.
* Clean the state. This is an internal helper function. Users should
* always call kfree_skb
*/
void __kfree_skb(struct sk_buff *skb)
{
skb_release_all(skb);
kfree_skbmem(skb);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Herbert Xu | 14 | 70.00% | 1 | 25.00% |
Linus Torvalds (pre-git) | 6 | 30.00% | 3 | 75.00% |
Total | 20 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL(__kfree_skb);
/**
* kfree_skb - free an sk_buff
* @skb: buffer to free
*
* Drop a reference to the buffer and free it if the usage count has
* hit zero.
*/
void kfree_skb(struct sk_buff *skb)
{
if (unlikely(!skb))
return;
if (likely(atomic_read(&skb->users) == 1))
smp_rmb();
else if (likely(!atomic_dec_and_test(&skb->users)))
return;
trace_kfree_skb(skb, __builtin_return_address(0));
__kfree_skb(skb);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jörn Engel | 58 | 85.29% | 1 | 50.00% |
Neil Horman | 10 | 14.71% | 1 | 50.00% |
Total | 68 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL(kfree_skb);
void kfree_skb_list(struct sk_buff *segs)
{
while (segs) {
struct sk_buff *next = segs->next;
kfree_skb(segs);
segs = next;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Dumazet | 34 | 100.00% | 1 | 100.00% |
Total | 34 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL(kfree_skb_list);
/**
* skb_tx_error - report an sk_buff xmit error
* @skb: buffer that triggered an error
*
* Report xmit error if a device callback is tracking this skb.
* skb must be freed afterwards.
*/
void skb_tx_error(struct sk_buff *skb)
{
if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
struct ubuf_info *uarg;
uarg = skb_shinfo(skb)->destructor_arg;
if (uarg->callback)
uarg->callback(uarg, false);
skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Michael S. Tsirkin | 62 | 100.00% | 1 | 100.00% |
Total | 62 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL(skb_tx_error);
/**
* consume_skb - free an skbuff
* @skb: buffer to free
*
* Drop a ref to the buffer and free it if the usage count has hit zero
* Functions identically to kfree_skb, but kfree_skb assumes that the frame
* is being dropped after a failure and notes that
*/
void consume_skb(struct sk_buff *skb)
{
if (unlikely(!skb))
return;
if (likely(atomic_read(&skb->users) == 1))
smp_rmb();
else if (likely(!atomic_dec_and_test(&skb->users)))
return;
trace_consume_skb(skb);
__kfree_skb(skb);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Neil Horman | 58 | 92.06% | 1 | 50.00% |
Koki Sanagi | 5 | 7.94% | 1 | 50.00% |
Total | 63 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL(consume_skb);
void __kfree_skb_flush(void)
{
struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
/* flush skb_cache if containing objects */
if (nc->skb_count) {
kmem_cache_free_bulk(skbuff_head_cache, nc->skb_count,
nc->skb_cache);
nc->skb_count = 0;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jesper Dangaard Brouer | 46 | 100.00% | 1 | 100.00% |
Total | 46 | 100.00% | 1 | 100.00% |
static inline void _kfree_skb_defer(struct sk_buff *skb)
{
struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
/* drop skb->head and call any destructors for packet */
skb_release_all(skb);
/* record skb to CPU local list */
nc->skb_cache[nc->skb_count++] = skb;
#ifdef CONFIG_SLUB
/* SLUB writes into objects when freeing */
prefetchw(skb);
#endif
/* flush skb_cache if it is filled */
if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) {
kmem_cache_free_bulk(skbuff_head_cache, NAPI_SKB_CACHE_SIZE,
nc->skb_cache);
nc->skb_count = 0;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jesper Dangaard Brouer | 84 | 100.00% | 2 | 100.00% |
Total | 84 | 100.00% | 2 | 100.00% |
void __kfree_skb_defer(struct sk_buff *skb)
{
_kfree_skb_defer(skb);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jesper Dangaard Brouer | 15 | 100.00% | 1 | 100.00% |
Total | 15 | 100.00% | 1 | 100.00% |
void napi_consume_skb(struct sk_buff *skb, int budget)
{
if (unlikely(!skb))
return;
/* Zero budget indicate non-NAPI context called us, like netpoll */
if (unlikely(!budget)) {
dev_consume_skb_any(skb);
return;
}
if (likely(atomic_read(&skb->users) == 1))
smp_rmb();
else if (likely(!atomic_dec_and_test(&skb->users)))
return;
/* if reaching here SKB is ready to free */
trace_consume_skb(skb);
/* if SKB is a clone, don't handle this case */
if (skb->fclone != SKB_FCLONE_UNAVAILABLE)