cregit-Linux how code gets into the kernel

Release 4.11 net/ipv4/raw.c

Directory: net/ipv4
/*
 * INET         An implementation of the TCP/IP protocol suite for the LINUX
 *              operating system.  INET is implemented using the  BSD Socket
 *              interface as the means of communication with the user level.
 *
 *              RAW - implementation of IP "raw" sockets.
 *
 * Authors:     Ross Biro
 *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
 *
 * Fixes:
 *              Alan Cox        :       verify_area() fixed up
 *              Alan Cox        :       ICMP error handling
 *              Alan Cox        :       EMSGSIZE if you send too big a packet
 *              Alan Cox        :       Now uses generic datagrams and shared
 *                                      skbuff library. No more peek crashes,
 *                                      no more backlogs
 *              Alan Cox        :       Checks sk->broadcast.
 *              Alan Cox        :       Uses skb_free_datagram/skb_copy_datagram
 *              Alan Cox        :       Raw passes ip options too
 *              Alan Cox        :       Setsocketopt added
 *              Alan Cox        :       Fixed error return for broadcasts
 *              Alan Cox        :       Removed wake_up calls
 *              Alan Cox        :       Use ttl/tos
 *              Alan Cox        :       Cleaned up old debugging
 *              Alan Cox        :       Use new kernel side addresses
 *      Arnt Gulbrandsen        :       Fixed MSG_DONTROUTE in raw sockets.
 *              Alan Cox        :       BSD style RAW socket demultiplexing.
 *              Alan Cox        :       Beginnings of mrouted support.
 *              Alan Cox        :       Added IP_HDRINCL option.
 *              Alan Cox        :       Skip broadcast check if BSDism set.
 *              David S. Miller :       New socket lookup architecture.
 *
 *              This program is free software; you can redistribute it and/or
 *              modify it under the terms of the GNU General Public License
 *              as published by the Free Software Foundation; either version
 *              2 of the License, or (at your option) any later version.
 */

#include <linux/types.h>
#include <linux/atomic.h>
#include <asm/byteorder.h>
#include <asm/current.h>
#include <linux/uaccess.h>
#include <asm/ioctls.h>
#include <linux/stddef.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/spinlock.h>
#include <linux/sockios.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/mroute.h>
#include <linux/netdevice.h>
#include <linux/in_route.h>
#include <linux/route.h>
#include <linux/skbuff.h>
#include <linux/igmp.h>
#include <net/net_namespace.h>
#include <net/dst.h>
#include <net/sock.h>
#include <linux/ip.h>
#include <linux/net.h>
#include <net/ip.h>
#include <net/icmp.h>
#include <net/udp.h>
#include <net/raw.h>
#include <net/snmp.h>
#include <net/tcp_states.h>
#include <net/inet_common.h>
#include <net/checksum.h>
#include <net/xfrm.h>
#include <linux/rtnetlink.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv4.h>
#include <linux/compat.h>
#include <linux/uio.h>


struct raw_frag_vec {
	
struct msghdr *msg;
	union {
		
struct icmphdr icmph;
		
char c[1];
	} 
hdr;
	
int hlen;
};


struct raw_hashinfo raw_v4_hashinfo = {
	.lock = __RW_LOCK_UNLOCKED(raw_v4_hashinfo.lock),
};

EXPORT_SYMBOL_GPL(raw_v4_hashinfo);


int raw_hash_sk(struct sock *sk) { struct raw_hashinfo *h = sk->sk_prot->h.raw_hash; struct hlist_head *head; head = &h->ht[inet_sk(sk)->inet_num & (RAW_HTABLE_SIZE - 1)]; write_lock_bh(&h->lock); sk_add_node(sk, head); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); write_unlock_bh(&h->lock); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Pavel Emelyanov3539.33%428.57%
Linus Torvalds (pre-git)3539.33%428.57%
Arnaldo Carvalho de Melo88.99%214.29%
Craig Gallek44.49%17.14%
Eric Dumazet44.49%214.29%
David S. Miller33.37%17.14%
Total89100.00%14100.00%

EXPORT_SYMBOL_GPL(raw_hash_sk);
void raw_unhash_sk(struct sock *sk) { struct raw_hashinfo *h = sk->sk_prot->h.raw_hash; write_lock_bh(&h->lock); if (sk_del_node_init(sk)) sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); write_unlock_bh(&h->lock); }

Contributors

PersonTokensPropCommitsCommitProp
Pavel Emelyanov5793.44%375.00%
Eric Dumazet46.56%125.00%
Total61100.00%4100.00%

EXPORT_SYMBOL_GPL(raw_unhash_sk);
struct sock *__raw_v4_lookup(struct net *net, struct sock *sk, unsigned short num, __be32 raddr, __be32 laddr, int dif) { sk_for_each_from(sk) { struct inet_sock *inet = inet_sk(sk); if (net_eq(sock_net(sk), net) && inet->inet_num == num && !(inet->inet_daddr && inet->inet_daddr != raddr) && !(inet->inet_rcv_saddr && inet->inet_rcv_saddr != laddr) && !(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)) goto found; /* gotcha */ } sk = NULL; found: return sk; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)6152.59%430.77%
Arnaldo Carvalho de Melo2017.24%323.08%
David S. Miller1311.21%17.69%
Pavel Emelyanov86.90%17.69%
Hideaki Yoshifuji / 吉藤英明76.03%215.38%
Eric Dumazet54.31%17.69%
Alexey Dobriyan21.72%17.69%
Total116100.00%13100.00%

EXPORT_SYMBOL_GPL(__raw_v4_lookup); /* * 0 - deliver * 1 - block */
static int icmp_filter(const struct sock *sk, const struct sk_buff *skb) { struct icmphdr _hdr; const struct icmphdr *hdr; hdr = skb_header_pointer(skb, skb_transport_offset(skb), sizeof(_hdr), &_hdr); if (!hdr) return 1; if (hdr->type < 32) { __u32 data = raw_sk(sk)->filter.data; return ((1U << hdr->type) & data) != 0; } /* Do not block unknown ICMP types */ return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet3334.02%116.67%
Linus Torvalds (pre-git)3131.96%116.67%
Linus Torvalds1919.59%116.67%
Ian Pratt1111.34%116.67%
David S. Miller22.06%116.67%
Arnaldo Carvalho de Melo11.03%116.67%
Total97100.00%6100.00%

/* IP input processing comes here for RAW socket delivery. * Caller owns SKB, so we must make clones. * * RFC 1122: SHOULD pass TOS value up to the transport layer. * -> It does. And not only TOS, but all IP header. */
static int raw_v4_input(struct sk_buff *skb, const struct iphdr *iph, int hash) { struct sock *sk; struct hlist_head *head; int delivered = 0; struct net *net; read_lock(&raw_v4_hashinfo.lock); head = &raw_v4_hashinfo.ht[hash]; if (hlist_empty(head)) goto out; net = dev_net(skb->dev); sk = __raw_v4_lookup(net, __sk_head(head), iph->protocol, iph->saddr, iph->daddr, skb->dev->ifindex); while (sk) { delivered = 1; if ((iph->protocol != IPPROTO_ICMP || !icmp_filter(sk, skb)) && ip_mc_sf_allow(sk, iph->daddr, iph->saddr, skb->dev->ifindex)) { struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC); /* Not releasing hash table! */ if (clone) raw_rcv(sk, clone); } sk = __raw_v4_lookup(net, sk_next(sk), iph->protocol, iph->saddr, iph->daddr, skb->dev->ifindex); } out: read_unlock(&raw_v4_hashinfo.lock); return delivered; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)11451.12%323.08%
Pavel Emelyanov2511.21%323.08%
David S. Miller2410.76%17.69%
Arnaldo Carvalho de Melo229.87%215.38%
Quentin Armitage219.42%17.69%
Patrick McHardy135.83%17.69%
Hideaki Yoshifuji / 吉藤英明31.35%17.69%
Eric Dumazet10.45%17.69%
Total223100.00%13100.00%


int raw_local_deliver(struct sk_buff *skb, int protocol) { int hash; struct sock *raw_sk; hash = protocol & (RAW_HTABLE_SIZE - 1); raw_sk = sk_head(&raw_v4_hashinfo.ht[hash]); /* If there maybe a raw socket we must check - if not we * don't care less */ if (raw_sk && !raw_v4_input(skb, ip_hdr(skb), hash)) raw_sk = NULL; return raw_sk != NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Pavel Emelyanov71100.00%2100.00%
Total71100.00%2100.00%


static void raw_err(struct sock *sk, struct sk_buff *skb, u32 info) { struct inet_sock *inet = inet_sk(sk); const int type = icmp_hdr(skb)->type; const int code = icmp_hdr(skb)->code; int err = 0; int harderr = 0; if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) ipv4_sk_update_pmtu(skb, sk, info); else if (type == ICMP_REDIRECT) { ipv4_sk_redirect(skb, sk); return; } /* Report error on raw socket, if: 1. User requested ip_recverr. 2. Socket is connected (otherwise the error indication is useless without ip_recverr and error is hard. */ if (!inet->recverr && sk->sk_state != TCP_ESTABLISHED) return; switch (type) { default: case ICMP_TIME_EXCEEDED: err = EHOSTUNREACH; break; case ICMP_SOURCE_QUENCH: return; case ICMP_PARAMETERPROB: err = EPROTO; harderr = 1; break; case ICMP_DEST_UNREACH: err = EHOSTUNREACH; if (code > NR_ICMP_UNREACH) break; err = icmp_err_convert[code].errno; harderr = icmp_err_convert[code].fatal; if (code == ICMP_FRAG_NEEDED) { harderr = inet->pmtudisc != IP_PMTUDISC_DONT; err = EMSGSIZE; } } if (inet->recverr) { const struct iphdr *iph = (const struct iphdr *)skb->data; u8 *payload = skb->data + (iph->ihl << 2); if (inet->hdrincl) payload = skb->data; ip_icmp_error(sk, skb, err, 0, info, payload); } if (inet->recverr || harderr) { sk->sk_err = err; sk->sk_error_report(sk); } }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)17360.70%1050.00%
Linus Torvalds4716.49%15.00%
David S. Miller4716.49%315.00%
Arnaldo Carvalho de Melo124.21%315.00%
Duan Jiong31.05%15.00%
Eric Dumazet20.70%15.00%
Pavel Emelyanov10.35%15.00%
Total285100.00%20100.00%


void raw_icmp_error(struct sk_buff *skb, int protocol, u32 info) { int hash; struct sock *raw_sk; const struct iphdr *iph; struct net *net; hash = protocol & (RAW_HTABLE_SIZE - 1); read_lock(&raw_v4_hashinfo.lock); raw_sk = sk_head(&raw_v4_hashinfo.ht[hash]); if (raw_sk) { iph = (const struct iphdr *)skb->data; net = dev_net(skb->dev); while ((raw_sk = __raw_v4_lookup(net, raw_sk, protocol, iph->daddr, iph->saddr, skb->dev->ifindex)) != NULL) { raw_err(raw_sk, skb, info); raw_sk = sk_next(raw_sk); iph = (const struct iphdr *)skb->data; } } read_unlock(&raw_v4_hashinfo.lock); }

Contributors

PersonTokensPropCommitsCommitProp
Pavel Emelyanov15696.30%360.00%
Eric Dumazet31.85%120.00%
Hideaki Yoshifuji / 吉藤英明31.85%120.00%
Total162100.00%5100.00%


static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb) { /* Charge it to the socket. */ ipv4_pktinfo_prepare(sk, skb); if (sock_queue_rcv_skb(sk, skb) < 0) { kfree_skb(skb); return NET_RX_DROP; } return NET_RX_SUCCESS; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)4083.33%360.00%
Eric Dumazet612.50%120.00%
Shawn Bohrer24.17%120.00%
Total48100.00%5100.00%


int raw_rcv(struct sock *sk, struct sk_buff *skb) { if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) { atomic_inc(&sk->sk_drops); kfree_skb(skb); return NET_RX_DROP; } nf_reset(skb); skb_push(skb, skb->data - skb_network_header(skb)); raw_rcv_skb(sk, skb); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)3243.24%853.33%
Alexey Kuznetsov2027.03%16.67%
Wang Chen810.81%16.67%
Patrick McHardy56.76%16.67%
Linus Torvalds45.41%16.67%
Arnaldo Carvalho de Melo34.05%16.67%
Hideaki Yoshifuji / 吉藤英明11.35%16.67%
David S. Miller11.35%16.67%
Total74100.00%15100.00%


static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4, struct msghdr *msg, size_t length, struct rtable **rtp, unsigned int flags, const struct sockcm_cookie *sockc) { struct inet_sock *inet = inet_sk(sk); struct net *net = sock_net(sk); struct iphdr *iph; struct sk_buff *skb; unsigned int iphlen; int err; struct rtable *rt = *rtp; int hlen, tlen; if (length > rt->dst.dev->mtu) { ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport, rt->dst.dev->mtu); return -EMSGSIZE; } if (flags&MSG_PROBE) goto out; hlen = LL_RESERVED_SPACE(rt->dst.dev); tlen = rt->dst.dev->needed_tailroom; skb = sock_alloc_send_skb(sk, length + hlen + tlen + 15, flags & MSG_DONTWAIT, &err); if (!skb) goto error; skb_reserve(skb, hlen); skb->priority = sk->sk_priority; skb->mark = sk->sk_mark; skb_dst_set(skb, &rt->dst); *rtp = NULL; skb_reset_network_header(skb); iph = ip_hdr(skb); skb_put(skb, length); skb->ip_summed = CHECKSUM_NONE; sock_tx_timestamp(sk, sockc->tsflags, &skb_shinfo(skb)->tx_flags); if (flags & MSG_CONFIRM) skb_set_dst_pending_confirm(skb, 1); skb->transport_header = skb->network_header; err = -EFAULT; if (memcpy_from_msg(iph, msg, length)) goto error_free; iphlen = iph->ihl * 4; /* * We don't want to modify the ip header, but we do need to * be sure that it won't cause problems later along the network * stack. Specifically we want to make sure that iph->ihl is a * sane value. If ihl points beyond the length of the buffer passed * in, reject the frame as invalid */ err = -EINVAL; if (iphlen > length) goto error_free; if (iphlen >= sizeof(*iph)) { if (!iph->saddr) iph->saddr = fl4->saddr; iph->check = 0; iph->tot_len = htons(length); if (!iph->id) ip_select_ident(net, skb, NULL); iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); skb->transport_header += iphlen; if (iph->protocol == IPPROTO_ICMP && length >= iphlen + sizeof(struct icmphdr)) icmp_out_count(net, ((struct icmphdr *) skb_transport_header(skb))->type); } err = NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_OUT, net, sk, skb, NULL, rt->dst.dev, dst_output); if (err > 0) err = net_xmit_errno(err); if (err) goto error; out: return 0; error_free: kfree_skb(skb); error: IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS); if (err == -ENOBUFS && !inet->recverr) err = 0; return err; }

Contributors

PersonTokensPropCommitsCommitProp
Alexey Kuznetsov20838.66%36.67%
Linus Torvalds (pre-git)8816.36%817.78%
Herbert Xu437.99%24.44%
Eric Dumazet356.51%48.89%
David L Stevens254.65%12.22%
Neil Horman224.09%12.22%
Ben Cartwright-Cox173.16%12.22%
Arnaldo Carvalho de Melo152.79%511.11%
Pavel Emelyanov142.60%24.44%
Willem de Bruijn132.42%12.22%
Julian Anastasov132.42%12.22%
David S. Miller112.04%24.44%
Soheil Hassas Yeganeh101.86%12.22%
Tóth László Attila71.30%12.22%
Al Viro50.93%24.44%
Eric W. Biedermann30.56%24.44%
Hannes Frederic Sowa20.37%12.22%
Jan Engelhardt10.19%12.22%
Ansis Atteka10.19%12.22%
Linus Torvalds10.19%12.22%
Ian Morris10.19%12.22%
Hideaki Yoshifuji / 吉藤英明10.19%12.22%
Patrick McHardy10.19%12.22%
Jesper Juhl10.19%12.22%
Total538100.00%45100.00%


static int raw_probe_proto_opt(struct raw_frag_vec *rfv, struct flowi4 *fl4) { int err; if (fl4->flowi4_proto != IPPROTO_ICMP) return 0; /* We only need the first two bytes. */ rfv->hlen = 2; err = memcpy_from_msg(rfv->hdr.c, rfv->msg, rfv->hlen); if (err) return err; fl4->fl4_icmp_type = rfv->hdr.icmph.type; fl4->fl4_icmp_code = rfv->hdr.icmph.code; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Herbert Xu4853.33%228.57%
Masahide Nakamura3033.33%114.29%
Heiko Carstens77.78%114.29%
David S. Miller33.33%228.57%
Al Viro22.22%114.29%
Total90100.00%7100.00%


static int raw_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb) { struct raw_frag_vec *rfv = from; if (offset < rfv->hlen) { int copy = min(rfv->hlen - offset, len); if (skb->ip_summed == CHECKSUM_PARTIAL) memcpy(to, rfv->hdr.c + offset, copy); else skb->csum = csum_block_add( skb->csum, csum_partial_copy_nocheck(rfv->hdr.c + offset, to, copy, 0), odd); odd = 0; offset += copy; to += copy; len -= copy; if (!len) return 0; } offset -= rfv->hlen; return ip_generic_getfrag(rfv->msg, to, offset, len, odd, skb); }

Contributors

PersonTokensPropCommitsCommitProp
Herbert Xu16099.38%150.00%
Al Viro10.62%150.00%
Total161100.00%2100.00%


static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) { struct inet_sock *inet = inet_sk(sk); struct net *net = sock_net(sk); struct ipcm_cookie ipc; struct rtable *rt = NULL; struct flowi4 fl4; int free = 0; __be32 daddr; __be32 saddr; u8 tos; int err; struct ip_options_data opt_copy; struct raw_frag_vec rfv; err = -EMSGSIZE; if (len > 0xFFFF) goto out; /* * Check the flags. */ err = -EOPNOTSUPP; if (msg->msg_flags & MSG_OOB) /* Mirror BSD error message */ goto out; /* compatibility */ /* * Get and verify the address. */ if (msg->msg_namelen) { DECLARE_SOCKADDR(struct sockaddr_in *, usin, msg->msg_name); err = -EINVAL; if (msg->msg_namelen < sizeof(*usin)) goto out; if (usin->sin_family != AF_INET) { pr_info_once("%s: %s forgot to set AF_INET. Fix it!\n", __func__, current->comm); err = -EAFNOSUPPORT; if (usin->sin_family) goto out; } daddr = usin->sin_addr.s_addr; /* ANK: I did not forget to get protocol from port field. * I just do not know, who uses this weirdness. * IP_HDRINCL is much more convenient. */ } else { err = -EDESTADDRREQ; if (sk->sk_state != TCP_ESTABLISHED) goto out; daddr = inet->inet_daddr; } ipc.sockc.tsflags = sk->sk_tsflags; ipc.addr = inet->inet_saddr; ipc.opt = NULL; ipc.tx_flags = 0; ipc.ttl = 0; ipc.tos = -1; ipc.oif = sk->sk_bound_dev_if; if (msg->msg_controllen) { err = ip_cmsg_send(sk, msg, &ipc, false); if (unlikely(err)) { kfree(ipc.opt); goto out; } if (ipc.opt) free = 1; } saddr = ipc.addr; ipc.addr = daddr; if (!ipc.opt) { struct ip_options_rcu *inet_opt; rcu_read_lock(); inet_opt = rcu_dereference(inet->inet_opt); if (inet_opt) { memcpy(&opt_copy, inet_opt, sizeof(*inet_opt) + inet_opt->opt.optlen); ipc.opt = &opt_copy.opt; } rcu_read_unlock(); } if (ipc.opt) { err = -EINVAL; /* Linux does not mangle headers on raw sockets, * so that IP options + IP_HDRINCL is non-sense. */ if (inet->hdrincl) goto done; if (ipc.opt->opt.srr) { if (!daddr) goto done; daddr = ipc.opt->opt.faddr; } } tos = get_rtconn_flags(&ipc, sk); if (msg->msg_flags & MSG_DONTROUTE) tos |= RTO_ONLINK; if (ipv4_is_multicast(daddr)) { if (!ipc.oif) ipc.oif = inet->mc_index; if (!saddr) saddr = inet->mc_addr; } else if (!ipc.oif) ipc.oif = inet->uc_index; flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos, RT_SCOPE_UNIVERSE, inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol, inet_sk_flowi_flags(sk) | (inet->hdrincl ? FLOWI_FLAG_KNOWN_NH : 0), daddr, saddr, 0, 0, sk->sk_uid); if (!inet->hdrincl) { rfv.msg = msg; rfv.hlen = 0; err = raw_probe_proto_opt(&rfv, &fl4); if (err) goto done; } security_sk_classify_flow(sk, flowi4_to_flowi(&fl4)); rt = ip_route_output_flow(net, &fl4, sk); if (IS_ERR(rt)) { err = PTR_ERR(rt); rt = NULL; goto done; } err = -EACCES; if (rt->rt_flags & RTCF_BROADCAST && !sock_flag(sk, SOCK_BROADCAST)) goto done; if (msg->msg_flags & MSG_CONFIRM) goto do_confirm; back_from_confirm