Contributors: 25
Author |
Tokens |
Token Proportion |
Commits |
Commit Proportion |
James Chapman |
350 |
35.82% |
1 |
1.72% |
Steffen Klassert |
272 |
27.84% |
5 |
8.62% |
Patrick McHardy |
103 |
10.54% |
2 |
3.45% |
Herbert Xu |
59 |
6.04% |
7 |
12.07% |
Linus Torvalds (pre-git) |
53 |
5.42% |
15 |
25.86% |
Arnaldo Carvalho de Melo |
16 |
1.64% |
2 |
3.45% |
Eric Dumazet |
15 |
1.54% |
4 |
6.90% |
Derek Atkins |
13 |
1.33% |
1 |
1.72% |
David S. Miller |
13 |
1.33% |
2 |
3.45% |
Hideaki Yoshifuji / 吉藤英明 |
11 |
1.13% |
3 |
5.17% |
Eric W. Biedermann |
11 |
1.13% |
2 |
3.45% |
Paul Davey |
11 |
1.13% |
1 |
1.72% |
Alexey Kuznetsov |
10 |
1.02% |
1 |
1.72% |
Maciej Żenczykowski |
8 |
0.82% |
1 |
1.72% |
Kazunori Miyazawa |
8 |
0.82% |
1 |
1.72% |
Alexey Dobriyan |
6 |
0.61% |
1 |
1.72% |
Harald Welte |
6 |
0.61% |
1 |
1.72% |
Andi Kleen |
5 |
0.51% |
1 |
1.72% |
Jon Grimm |
1 |
0.10% |
1 |
1.72% |
Pravin B Shelar |
1 |
0.10% |
1 |
1.72% |
Jan Engelhardt |
1 |
0.10% |
1 |
1.72% |
Greg Kroah-Hartman |
1 |
0.10% |
1 |
1.72% |
Al Viro |
1 |
0.10% |
1 |
1.72% |
Ian Morris |
1 |
0.10% |
1 |
1.72% |
Linus Torvalds |
1 |
0.10% |
1 |
1.72% |
Total |
977 |
|
58 |
|
// SPDX-License-Identifier: GPL-2.0
/*
* xfrm4_input.c
*
* Changes:
* YOSHIFUJI Hideaki @USAGI
* Split up af-specific portion
* Derek Atkins <derek@ihtfp.com>
* Add Encapsulation support
*
*/
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv4.h>
#include <net/ip.h>
#include <net/xfrm.h>
#include <net/protocol.h>
#include <net/gro.h>
static int xfrm4_rcv_encap_finish2(struct net *net, struct sock *sk,
struct sk_buff *skb)
{
return dst_input(skb);
}
static inline int xfrm4_rcv_encap_finish(struct net *net, struct sock *sk,
struct sk_buff *skb)
{
if (!skb_dst(skb)) {
const struct iphdr *iph = ip_hdr(skb);
if (ip_route_input_noref(skb, iph->daddr, iph->saddr,
iph->tos, skb->dev))
goto drop;
}
if (xfrm_trans_queue(skb, xfrm4_rcv_encap_finish2))
goto drop;
return 0;
drop:
kfree_skb(skb);
return NET_RX_DROP;
}
int xfrm4_transport_finish(struct sk_buff *skb, int async)
{
struct xfrm_offload *xo = xfrm_offload(skb);
struct iphdr *iph = ip_hdr(skb);
iph->protocol = XFRM_MODE_SKB_CB(skb)->protocol;
#ifndef CONFIG_NETFILTER
if (!async)
return -iph->protocol;
#endif
__skb_push(skb, -skb_network_offset(skb));
iph->tot_len = htons(skb->len);
ip_send_check(iph);
if (xo && (xo->flags & XFRM_GRO)) {
/* The full l2 header needs to be preserved so that re-injecting the packet at l2
* works correctly in the presence of vlan tags.
*/
skb_mac_header_rebuild_full(skb, xo->orig_mac_len);
skb_reset_network_header(skb);
skb_reset_transport_header(skb);
return 0;
}
NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING,
dev_net(skb->dev), NULL, skb, skb->dev, NULL,
xfrm4_rcv_encap_finish);
return 0;
}
static int __xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb, bool pull)
{
struct udp_sock *up = udp_sk(sk);
struct udphdr *uh;
struct iphdr *iph;
int iphlen, len;
__u8 *udpdata;
__be32 *udpdata32;
u16 encap_type;
encap_type = READ_ONCE(up->encap_type);
/* if this is not encapsulated socket, then just return now */
if (!encap_type)
return 1;
/* If this is a paged skb, make sure we pull up
* whatever data we need to look at. */
len = skb->len - sizeof(struct udphdr);
if (!pskb_may_pull(skb, sizeof(struct udphdr) + min(len, 8)))
return 1;
/* Now we can get the pointers */
uh = udp_hdr(skb);
udpdata = (__u8 *)uh + sizeof(struct udphdr);
udpdata32 = (__be32 *)udpdata;
switch (encap_type) {
default:
case UDP_ENCAP_ESPINUDP:
/* Check if this is a keepalive packet. If so, eat it. */
if (len == 1 && udpdata[0] == 0xff) {
return -EINVAL;
} else if (len > sizeof(struct ip_esp_hdr) && udpdata32[0] != 0) {
/* ESP Packet without Non-ESP header */
len = sizeof(struct udphdr);
} else
/* Must be an IKE packet.. pass it through */
return 1;
break;
case UDP_ENCAP_ESPINUDP_NON_IKE:
/* Check if this is a keepalive packet. If so, eat it. */
if (len == 1 && udpdata[0] == 0xff) {
return -EINVAL;
} else if (len > 2 * sizeof(u32) + sizeof(struct ip_esp_hdr) &&
udpdata32[0] == 0 && udpdata32[1] == 0) {
/* ESP Packet with Non-IKE marker */
len = sizeof(struct udphdr) + 2 * sizeof(u32);
} else
/* Must be an IKE packet.. pass it through */
return 1;
break;
}
/* At this point we are sure that this is an ESPinUDP packet,
* so we need to remove 'len' bytes from the packet (the UDP
* header and optional ESP marker bytes) and then modify the
* protocol to ESP, and then call into the transform receiver.
*/
if (skb_unclone(skb, GFP_ATOMIC))
return -EINVAL;
/* Now we can update and verify the packet length... */
iph = ip_hdr(skb);
iphlen = iph->ihl << 2;
iph->tot_len = htons(ntohs(iph->tot_len) - len);
if (skb->len < iphlen + len) {
/* packet is too small!?! */
return -EINVAL;
}
/* pull the data buffer up to the ESP header and set the
* transport header to point to ESP. Keep UDP on the stack
* for later.
*/
if (pull) {
__skb_pull(skb, len);
skb_reset_transport_header(skb);
} else {
skb_set_transport_header(skb, len);
}
/* process ESP */
return 0;
}
/* If it's a keepalive packet, then just eat it.
* If it's an encapsulated packet, then pass it to the
* IPsec xfrm input.
* Returns 0 if skb passed to xfrm or was dropped.
* Returns >0 if skb should be passed to UDP.
* Returns <0 if skb should be resubmitted (-ret is protocol)
*/
int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb)
{
int ret;
ret = __xfrm4_udp_encap_rcv(sk, skb, true);
if (!ret)
return xfrm4_rcv_encap(skb, IPPROTO_ESP, 0,
udp_sk(sk)->encap_type);
if (ret < 0) {
kfree_skb(skb);
return 0;
}
return ret;
}
EXPORT_SYMBOL(xfrm4_udp_encap_rcv);
struct sk_buff *xfrm4_gro_udp_encap_rcv(struct sock *sk, struct list_head *head,
struct sk_buff *skb)
{
int offset = skb_gro_offset(skb);
const struct net_offload *ops;
struct sk_buff *pp = NULL;
int ret;
offset = offset - sizeof(struct udphdr);
if (!pskb_pull(skb, offset))
return NULL;
rcu_read_lock();
ops = rcu_dereference(inet_offloads[IPPROTO_ESP]);
if (!ops || !ops->callbacks.gro_receive)
goto out;
ret = __xfrm4_udp_encap_rcv(sk, skb, false);
if (ret)
goto out;
skb_push(skb, offset);
NAPI_GRO_CB(skb)->proto = IPPROTO_UDP;
pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
rcu_read_unlock();
return pp;
out:
rcu_read_unlock();
skb_push(skb, offset);
NAPI_GRO_CB(skb)->same_flow = 0;
NAPI_GRO_CB(skb)->flush = 1;
return NULL;
}
EXPORT_SYMBOL(xfrm4_gro_udp_encap_rcv);
int xfrm4_rcv(struct sk_buff *skb)
{
return xfrm4_rcv_spi(skb, ip_hdr(skb)->protocol, 0);
}
EXPORT_SYMBOL(xfrm4_rcv);