cregit-Linux how code gets into the kernel

Release 4.15 net/xfrm/xfrm_input.c

Directory: net/xfrm
// SPDX-License-Identifier: GPL-2.0
/*
 * xfrm_input.c
 *
 * Changes:
 *      YOSHIFUJI Hideaki @USAGI
 *              Split up af-specific portion
 *
 */

#include <linux/bottom_half.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/percpu.h>
#include <net/dst.h>
#include <net/ip.h>
#include <net/xfrm.h>
#include <net/ip_tunnels.h>
#include <net/ip6_tunnel.h>


struct xfrm_trans_tasklet {
	
struct tasklet_struct tasklet;
	
struct sk_buff_head queue;
};


struct xfrm_trans_cb {
	
int (*finish)(struct net *net, struct sock *sk, struct sk_buff *skb);
};


#define XFRM_TRANS_SKB_CB(__skb) ((struct xfrm_trans_cb *)&((__skb)->cb[0]))


static struct kmem_cache *secpath_cachep __read_mostly;

static DEFINE_SPINLOCK(xfrm_input_afinfo_lock);

static struct xfrm_input_afinfo const __rcu *xfrm_input_afinfo[AF_INET6 + 1];


static struct gro_cells gro_cells;

static struct net_device xfrm_napi_dev;

static DEFINE_PER_CPU(struct xfrm_trans_tasklet, xfrm_trans_tasklet);


int xfrm_input_register_afinfo(const struct xfrm_input_afinfo *afinfo) { int err = 0; if (WARN_ON(afinfo->family >= ARRAY_SIZE(xfrm_input_afinfo))) return -EAFNOSUPPORT; spin_lock_bh(&xfrm_input_afinfo_lock); if (unlikely(xfrm_input_afinfo[afinfo->family] != NULL)) err = -EEXIST; else rcu_assign_pointer(xfrm_input_afinfo[afinfo->family], afinfo); spin_unlock_bh(&xfrm_input_afinfo_lock); return err; }

Contributors

PersonTokensPropCommitsCommitProp
Steffen Klassert7491.36%133.33%
Florian Westphal67.41%133.33%
Li RongQing11.23%133.33%
Total81100.00%3100.00%

EXPORT_SYMBOL(xfrm_input_register_afinfo);
int xfrm_input_unregister_afinfo(const struct xfrm_input_afinfo *afinfo) { int err = 0; spin_lock_bh(&xfrm_input_afinfo_lock); if (likely(xfrm_input_afinfo[afinfo->family] != NULL)) { if (unlikely(xfrm_input_afinfo[afinfo->family] != afinfo)) err = -EINVAL; else RCU_INIT_POINTER(xfrm_input_afinfo[afinfo->family], NULL); } spin_unlock_bh(&xfrm_input_afinfo_lock); synchronize_rcu(); return err; }

Contributors

PersonTokensPropCommitsCommitProp
Steffen Klassert8198.78%150.00%
Florian Westphal11.22%150.00%
Total82100.00%2100.00%

EXPORT_SYMBOL(xfrm_input_unregister_afinfo);
static const struct xfrm_input_afinfo *xfrm_input_get_afinfo(unsigned int family) { const struct xfrm_input_afinfo *afinfo; if (WARN_ON_ONCE(family >= ARRAY_SIZE(xfrm_input_afinfo))) return NULL; rcu_read_lock(); afinfo = rcu_dereference(xfrm_input_afinfo[family]); if (unlikely(!afinfo)) rcu_read_unlock(); return afinfo; }

Contributors

PersonTokensPropCommitsCommitProp
Steffen Klassert5488.52%150.00%
Florian Westphal711.48%150.00%
Total61100.00%2100.00%


static int xfrm_rcv_cb(struct sk_buff *skb, unsigned int family, u8 protocol, int err) { int ret; const struct xfrm_input_afinfo *afinfo = xfrm_input_get_afinfo(family); if (!afinfo) return -EAFNOSUPPORT; ret = afinfo->callback(skb, protocol, err); rcu_read_unlock(); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Steffen Klassert6095.24%150.00%
Florian Westphal34.76%150.00%
Total63100.00%2100.00%


void __secpath_destroy(struct sec_path *sp) { int i; for (i = 0; i < sp->len; i++) xfrm_state_put(sp->xvec[i]); kmem_cache_free(secpath_cachep, sp); }

Contributors

PersonTokensPropCommitsCommitProp
Alexey Kuznetsov3884.44%133.33%
Herbert Xu715.56%266.67%
Total45100.00%3100.00%

EXPORT_SYMBOL(__secpath_destroy);
struct sec_path *secpath_dup(struct sec_path *src) { struct sec_path *sp; sp = kmem_cache_alloc(secpath_cachep, GFP_ATOMIC); if (!sp) return NULL; sp->len = 0; sp->olen = 0; memset(sp->ovec, 0, sizeof(sp->ovec[XFRM_MAX_OFFLOAD_DEPTH])); if (src) { int i; memcpy(sp, src, sizeof(*sp)); for (i = 0; i < sp->len; i++) xfrm_state_hold(sp->xvec[i]); } refcount_set(&sp->refcnt, 1); return sp; }

Contributors

PersonTokensPropCommitsCommitProp
Herbert Xu9172.80%225.00%
Steffen Klassert2520.00%225.00%
Alexey Kuznetsov54.00%112.50%
Hideaki Yoshifuji / 吉藤英明21.60%112.50%
Elena Reshetova10.80%112.50%
Christoph Lameter10.80%112.50%
Total125100.00%8100.00%

EXPORT_SYMBOL(secpath_dup);
int secpath_set(struct sk_buff *skb) { struct sec_path *sp; /* Allocate new secpath or COW existing one. */ if (!skb->sp || refcount_read(&skb->sp->refcnt) != 1) { sp = secpath_dup(skb->sp); if (!sp) return -ENOMEM; if (skb->sp) secpath_put(skb->sp); skb->sp = sp; } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Steffen Klassert7698.70%150.00%
Elena Reshetova11.30%150.00%
Total77100.00%2100.00%

EXPORT_SYMBOL(secpath_set); /* Fetch spi and seq from ipsec header */
int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq) { int offset, offset_seq; int hlen; switch (nexthdr) { case IPPROTO_AH: hlen = sizeof(struct ip_auth_hdr); offset = offsetof(struct ip_auth_hdr, spi); offset_seq = offsetof(struct ip_auth_hdr, seq_no); break; case IPPROTO_ESP: hlen = sizeof(struct ip_esp_hdr); offset = offsetof(struct ip_esp_hdr, spi); offset_seq = offsetof(struct ip_esp_hdr, seq_no); break; case IPPROTO_COMP: if (!pskb_may_pull(skb, sizeof(struct ip_comp_hdr))) return -EINVAL; *spi = htonl(ntohs(*(__be16 *)(skb_transport_header(skb) + 2))); *seq = 0; return 0; default: return 1; } if (!pskb_may_pull(skb, hlen)) return -EINVAL; *spi = *(__be32 *)(skb_transport_header(skb) + offset); *seq = *(__be32 *)(skb_transport_header(skb) + offset_seq); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Alexey Kuznetsov15776.21%111.11%
Herbert Xu209.71%111.11%
Arnaldo Carvalho de Melo94.37%111.11%
Mitsuru Kanda52.43%111.11%
Al Viro52.43%111.11%
James Morris52.43%111.11%
Hideaki Yoshifuji / 吉藤英明41.94%222.22%
Alexey Dobriyan10.49%111.11%
Total206100.00%9100.00%

EXPORT_SYMBOL(xfrm_parse_spi);
int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb) { struct xfrm_mode *inner_mode = x->inner_mode; int err; err = x->outer_mode->afinfo->extract_input(x, skb); if (err) return err; if (x->sel.family == AF_UNSPEC) { inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol); if (inner_mode == NULL) return -EAFNOSUPPORT; } skb->protocol = inner_mode->afinfo->eth_proto; return inner_mode->input2(x, skb); }

Contributors

PersonTokensPropCommitsCommitProp
Herbert Xu6057.14%150.00%
Kazunori Miyazawa4542.86%150.00%
Total105100.00%2100.00%

EXPORT_SYMBOL(xfrm_prepare_input);
int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type) { struct net *net = dev_net(skb->dev); int err; __be32 seq; __be32 seq_hi; struct xfrm_state *x = NULL; xfrm_address_t *daddr; struct xfrm_mode *inner_mode; u32 mark = skb->mark; unsigned int family = AF_UNSPEC; int decaps = 0; int async = 0; bool xfrm_gro = false; bool crypto_done = false; struct xfrm_offload *xo = xfrm_offload(skb); if (encap_type < 0) { x = xfrm_input_state(skb); if (unlikely(x->km.state != XFRM_STATE_VALID)) { if (x->km.state == XFRM_STATE_ACQ) XFRM_INC_STATS(net, LINUX_MIB_XFRMACQUIREERROR); else XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEINVALID); goto drop; } family = x->outer_mode->afinfo->family; /* An encap_type of -1 indicates async resumption. */ if (encap_type == -1) { async = 1; seq = XFRM_SKB_CB(skb)->seq.input.low; goto resume; } /* encap_type < -1 indicates a GRO call. */ encap_type = 0; seq = XFRM_SPI_SKB_CB(skb)->seq; if (xo && (xo->flags & CRYPTO_DONE)) { crypto_done = true; x = xfrm_input_state(skb); family = XFRM_SPI_SKB_CB(skb)->family; if (!(xo->status & CRYPTO_SUCCESS)) { if (xo->status & (CRYPTO_TRANSPORT_AH_AUTH_FAILED | CRYPTO_TRANSPORT_ESP_AUTH_FAILED | CRYPTO_TUNNEL_AH_AUTH_FAILED | CRYPTO_TUNNEL_ESP_AUTH_FAILED)) { xfrm_audit_state_icvfail(x, skb, x->type->proto); x->stats.integrity_failed++; XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR); goto drop; } if (xo->status & CRYPTO_INVALID_PROTOCOL) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR); goto drop; } XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR); goto drop; } if ((err = xfrm_parse_spi(skb, nexthdr, &spi, &seq)) != 0) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR); goto drop; } } goto lock; } family = XFRM_SPI_SKB_CB(skb)->family; /* if tunnel is present override skb->mark value with tunnel i_key */ switch (family) { case AF_INET: if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4) mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4->parms.i_key); break; case AF_INET6: if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6) mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6->parms.i_key); break; } err = secpath_set(skb); if (err) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINERROR); goto drop; } seq = 0; if (!spi && (err = xfrm_parse_spi(skb, nexthdr, &spi, &seq)) != 0) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR); goto drop; } daddr = (xfrm_address_t *)(skb_network_header(skb) + XFRM_SPI_SKB_CB(skb)->daddroff); do { if (skb->sp->len == XFRM_MAX_DEPTH) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR); goto drop; } x = xfrm_state_lookup(net, mark, daddr, spi, nexthdr, family); if (x == NULL) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOSTATES); xfrm_audit_state_notfound(skb, family, spi, seq); goto drop; } skb->sp->xvec[skb->sp->len++] = x; lock: spin_lock(&x->lock); if (unlikely(x->km.state != XFRM_STATE_VALID)) { if (x->km.state == XFRM_STATE_ACQ) XFRM_INC_STATS(net, LINUX_MIB_XFRMACQUIREERROR); else XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEINVALID); goto drop_unlock; } if ((x->encap ? x->encap->encap_type : 0) != encap_type) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH); goto drop_unlock; } if (x->repl->check(x, skb, seq)) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR); goto drop_unlock; } if (xfrm_state_check_expire(x)) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEEXPIRED); goto drop_unlock; } spin_unlock(&x->lock); if (xfrm_tunnel_check(skb, x, family)) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMODEERROR); goto drop; } seq_hi = htonl(xfrm_replay_seqhi(x, seq)); XFRM_SKB_CB(skb)->seq.input.low = seq; XFRM_SKB_CB(skb)->seq.input.hi = seq_hi; skb_dst_force(skb); dev_hold(skb->dev); if (crypto_done) nexthdr = x->type_offload->input_tail(x, skb); else nexthdr = x->type->input(x, skb); if (nexthdr == -EINPROGRESS) return 0; resume: dev_put(skb->dev); spin_lock(&x->lock); if (nexthdr <= 0) { if (nexthdr == -EBADMSG) { xfrm_audit_state_icvfail(x, skb, x->type->proto); x->stats.integrity_failed++; } XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR); goto drop_unlock; } /* only the first xfrm gets the encap type */ encap_type = 0; if (async && x->repl->recheck(x, skb, seq)) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR); goto drop_unlock; } x->repl->advance(x, seq); x->curlft.bytes += skb->len; x->curlft.packets++; spin_unlock(&x->lock); XFRM_MODE_SKB_CB(skb)->protocol = nexthdr; inner_mode = x->inner_mode; if (x->sel.family == AF_UNSPEC) { inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol); if (inner_mode == NULL) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMODEERROR); goto drop; } } if (inner_mode->input(x, skb)) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMODEERROR); goto drop; } if (x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL) { decaps = 1; break; } /* * We need the inner address. However, we only get here for * transport mode so the outer address is identical. */ daddr = &x->id.daddr; family = x->outer_mode->afinfo->family; err = xfrm_parse_spi(skb, nexthdr, &spi, &seq); if (err < 0) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR); goto drop; } } while (!err); err = xfrm_rcv_cb(skb, family, x->type->proto, 0); if (err) goto drop; nf_reset(skb); if (decaps) { if (skb->sp) skb->sp->olen = 0; skb_dst_drop(skb); gro_cells_receive(&gro_cells, skb); return 0; } else { xo = xfrm_offload(skb); if (xo) xfrm_gro = xo->flags & XFRM_GRO; err = x->inner_mode->afinfo->transport_finish(skb, xfrm_gro || async); if (xfrm_gro) { if (skb->sp) skb->sp->olen = 0; skb_dst_drop(skb); gro_cells_receive(&gro_cells, skb); return err; } return err; } drop_unlock: spin_unlock(&x->lock); drop: xfrm_rcv_cb(skb, family, x && x->type ? x->type->proto : nexthdr, -1); kfree_skb(skb); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Herbert Xu49636.63%1022.22%
Steffen Klassert41330.50%1328.89%
Masahide Nakamura674.95%24.44%
Alexander Duyck614.51%12.22%
Alexey Dobriyan574.21%48.89%
Aviv Heller453.32%12.22%
Kazunori Miyazawa443.25%12.22%
Alexey Kodanev312.29%24.44%
David S. Miller302.22%12.22%
Yossi Kuperman201.48%12.22%
Florian Westphal201.48%12.22%
Ilan Tayari161.18%12.22%
Subash Abhinov Kasiviswanathan141.03%12.22%
Paul Moore130.96%12.22%
Li RongQing120.89%12.22%
Fan Du100.74%12.22%
Sabrina Dubroca20.15%12.22%
Jamal Hadi Salim20.15%12.22%
Eric Dumazet10.07%12.22%
Total1354100.00%45100.00%

EXPORT_SYMBOL(xfrm_input);
int xfrm_input_resume(struct sk_buff *skb, int nexthdr) { return xfrm_input(skb, nexthdr, 0, -1); }

Contributors

PersonTokensPropCommitsCommitProp
Herbert Xu26100.00%1100.00%
Total26100.00%1100.00%

EXPORT_SYMBOL(xfrm_input_resume);
static void xfrm_trans_reinject(unsigned long data) { struct xfrm_trans_tasklet *trans = (void *)data; struct sk_buff_head queue; struct sk_buff *skb; __skb_queue_head_init(&queue); skb_queue_splice_init(&trans->queue, &queue); while ((skb = __skb_dequeue(&queue))) XFRM_TRANS_SKB_CB(skb)->finish(dev_net(skb->dev), NULL, skb); }

Contributors

PersonTokensPropCommitsCommitProp
Herbert Xu78100.00%1100.00%
Total78100.00%1100.00%


int xfrm_trans_queue(struct sk_buff *skb, int (*finish)(struct net *, struct sock *, struct sk_buff *)) { struct xfrm_trans_tasklet *trans; trans = this_cpu_ptr(&xfrm_trans_tasklet); if (skb_queue_len(&trans->queue) >= netdev_max_backlog) return -ENOBUFS; XFRM_TRANS_SKB_CB(skb)->finish = finish; __skb_queue_tail(&trans->queue, skb); tasklet_schedule(&trans->tasklet); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Herbert Xu88100.00%2100.00%
Total88100.00%2100.00%

EXPORT_SYMBOL(xfrm_trans_queue);
void __init xfrm_input_init(void) { int err; int i; init_dummy_netdev(&xfrm_napi_dev); err = gro_cells_init(&gro_cells, &xfrm_napi_dev); if (err) gro_cells.cells = NULL; secpath_cachep = kmem_cache_create("secpath_cache", sizeof(struct sec_path), 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); for_each_possible_cpu(i) { struct xfrm_trans_tasklet *trans; trans = &per_cpu(xfrm_trans_tasklet, i); __skb_queue_head_init(&trans->queue); tasklet_init(&trans->tasklet, xfrm_trans_reinject, (unsigned long)trans); } }

Contributors

PersonTokensPropCommitsCommitProp
Herbert Xu7570.09%250.00%
Steffen Klassert3028.04%125.00%
Alexey Dobriyan21.87%125.00%
Total107100.00%4100.00%


Overall Contributors

PersonTokensPropCommitsCommitProp
Herbert Xu103938.83%1621.92%
Steffen Klassert85832.06%1621.92%
Alexey Kuznetsov2067.70%11.37%
Kazunori Miyazawa893.33%11.37%
Alexander Duyck672.50%11.37%
Masahide Nakamura672.50%22.74%
Alexey Dobriyan602.24%68.22%
Aviv Heller451.68%11.37%
Florian Westphal411.53%22.74%
Alexey Kodanev311.16%22.74%
David S. Miller301.12%11.37%
Yossi Kuperman200.75%11.37%
Ilan Tayari160.60%11.37%
Subash Abhinov Kasiviswanathan140.52%11.37%
Paul Moore130.49%11.37%
Li RongQing130.49%22.74%
Adrian Bunk130.49%11.37%
Fan Du100.37%11.37%
Arnaldo Carvalho de Melo90.34%11.37%
Hideaki Yoshifuji / 吉藤英明80.30%45.48%
James Morris50.19%11.37%
Al Viro50.19%11.37%
Mitsuru Kanda50.19%11.37%
Christoph Lameter30.11%22.74%
Eric Dumazet20.07%22.74%
Elena Reshetova20.07%11.37%
Jamal Hadi Salim20.07%11.37%
Sabrina Dubroca20.07%11.37%
Greg Kroah-Hartman10.04%11.37%
Total2676100.00%73100.00%
Directory: net/xfrm
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.