Release 4.12 include/net/udplite.h
/*
* Definitions for the UDP-Lite (RFC 3828) code.
*/
#ifndef _UDPLITE_H
#define _UDPLITE_H
#include <net/ip6_checksum.h>
/* UDP-Lite socket options */
#define UDPLITE_SEND_CSCOV 10
/* sender partial coverage (as sent) */
#define UDPLITE_RECV_CSCOV 11
/* receiver partial coverage (threshold ) */
extern struct proto udplite_prot;
extern struct udp_table udplite_table;
/*
* Checksum computation is all in software, hence simpler getfrag.
*/
static __inline__ int udplite_getfrag(void *from, char *to, int offset,
int len, int odd, struct sk_buff *skb)
{
struct msghdr *msg = from;
return copy_from_iter_full(to, len, &msg->msg_iter) ? 0 : -EFAULT;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Gerrit Renker | 35 | 64.81% | 1 | 20.00% |
Al Viro | 19 | 35.19% | 4 | 80.00% |
Total | 54 | 100.00% | 5 | 100.00% |
/* Designate sk as UDP-Lite socket */
static inline int udplite_sk_init(struct sock *sk)
{
udp_sk(sk)->pcflag = UDPLITE_BIT;
sk->sk_destruct = udp_destruct_sock;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Gerrit Renker | 24 | 80.00% | 1 | 50.00% |
Paolo Abeni | 6 | 20.00% | 1 | 50.00% |
Total | 30 | 100.00% | 2 | 100.00% |
/*
* Checksumming routines
*/
static inline int udplite_checksum_init(struct sk_buff *skb, struct udphdr *uh)
{
u16 cscov;
/* In UDPv4 a zero checksum means that the transmitter generated no
* checksum. UDP-Lite (like IPv6) mandates checksums, hence packets
* with a zero checksum field are illegal. */
if (uh->check == 0) {
net_dbg_ratelimited("UDPLite: zeroed checksum field\n");
return 1;
}
cscov = ntohs(uh->len);
if (cscov == 0) /* Indicates that full coverage is required. */
;
else if (cscov < 8 || cscov > skb->len) {
/*
* Coverage length violates RFC 3828: log and discard silently.
*/
net_dbg_ratelimited("UDPLite: bad csum coverage %d/%d\n",
cscov, skb->len);
return 1;
} else if (cscov < skb->len) {
UDP_SKB_CB(skb)->partial_cov = 1;
UDP_SKB_CB(skb)->cscov = cscov;
if (skb->ip_summed == CHECKSUM_COMPLETE)
skb->ip_summed = CHECKSUM_NONE;
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Gerrit Renker | 119 | 90.15% | 1 | 25.00% |
Herbert Xu | 9 | 6.82% | 1 | 25.00% |
Joe Perches | 4 | 3.03% | 2 | 50.00% |
Total | 132 | 100.00% | 4 | 100.00% |
/* Slow-path computation of checksum. Socket is locked. */
static inline __wsum udplite_csum_outgoing(struct sock *sk, struct sk_buff *skb)
{
const struct udp_sock *up = udp_sk(skb->sk);
int cscov = up->len;
__wsum csum = 0;
if (up->pcflag & UDPLITE_SEND_CC) {
/*
* Sender has set `partial coverage' option on UDP-Lite socket.
* The special case "up->pcslen == 0" signifies full coverage.
*/
if (up->pcslen < up->len) {
if (0 < up->pcslen)
cscov = up->pcslen;
udp_hdr(skb)->len = htons(up->pcslen);
}
/*
* NOTE: Causes for the error case `up->pcslen > up->len':
* (i) Application error (will not be penalized).
* (ii) Payload too big for send buffer: data is split
* into several packets, each with its own header.
* In this case (e.g. last segment), coverage may
* exceed packet length.
* Since packets with coverage length > packet length are
* illegal, we fall back to the defaults here.
*/
}
skb->ip_summed = CHECKSUM_NONE; /* no HW support for checksumming */
skb_queue_walk(&sk->sk_write_queue, skb) {
const int off = skb_transport_offset(skb);
const int len = skb->len - off;
csum = skb_checksum(skb, off, (cscov > len)? len : cscov, csum);
if ((cscov -= len) <= 0)
break;
}
return csum;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Gerrit Renker | 152 | 93.25% | 2 | 66.67% |
Arnaldo Carvalho de Melo | 11 | 6.75% | 1 | 33.33% |
Total | 163 | 100.00% | 3 | 100.00% |
/* Fast-path computation of checksum. Socket may not be locked. */
static inline __wsum udplite_csum(struct sk_buff *skb)
{
const struct udp_sock *up = udp_sk(skb->sk);
const int off = skb_transport_offset(skb);
int len = skb->len - off;
if ((up->pcflag & UDPLITE_SEND_CC) && up->pcslen < len) {
if (0 < up->pcslen)
len = up->pcslen;
udp_hdr(skb)->len = htons(up->pcslen);
}
skb->ip_summed = CHECKSUM_NONE; /* no HW support for checksumming */
return skb_checksum(skb, off, len, 0);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Herbert Xu | 57 | 52.78% | 1 | 50.00% |
Gerrit Renker | 51 | 47.22% | 1 | 50.00% |
Total | 108 | 100.00% | 2 | 100.00% |
void udplite4_register(void);
int udplite_get_port(struct sock *sk, unsigned short snum,
int (*scmp)(const struct sock *, const struct sock *));
#endif /* _UDPLITE_H */
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Gerrit Renker | 438 | 77.94% | 2 | 13.33% |
Herbert Xu | 66 | 11.74% | 2 | 13.33% |
Al Viro | 19 | 3.38% | 4 | 26.67% |
David S. Miller | 16 | 2.85% | 2 | 13.33% |
Arnaldo Carvalho de Melo | 11 | 1.96% | 1 | 6.67% |
Paolo Abeni | 6 | 1.07% | 1 | 6.67% |
Joe Perches | 4 | 0.71% | 2 | 13.33% |
Eric Dumazet | 2 | 0.36% | 1 | 6.67% |
Total | 562 | 100.00% | 15 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.