Release 4.11 net/core/flow_dissector.c
#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <linux/export.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/if_vlan.h>
#include <net/ip.h>
#include <net/ipv6.h>
#include <net/gre.h>
#include <net/pptp.h>
#include <linux/igmp.h>
#include <linux/icmp.h>
#include <linux/sctp.h>
#include <linux/dccp.h>
#include <linux/if_tunnel.h>
#include <linux/if_pppox.h>
#include <linux/ppp_defs.h>
#include <linux/stddef.h>
#include <linux/if_ether.h>
#include <linux/mpls.h>
#include <net/flow_dissector.h>
#include <scsi/fc/fc_fcoe.h>
static void dissector_set_key(struct flow_dissector *flow_dissector,
enum flow_dissector_key_id key_id)
{
flow_dissector->used_keys |= (1 << key_id);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiri Pirko | 24 | 96.00% | 1 | 50.00% |
David S. Miller | 1 | 4.00% | 1 | 50.00% |
Total | 25 | 100.00% | 2 | 100.00% |
void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
const struct flow_dissector_key *key,
unsigned int key_count)
{
unsigned int i;
memset(flow_dissector, 0, sizeof(*flow_dissector));
for (i = 0; i < key_count; i++, key++) {
/* User should make sure that every key target offset is withing
* boundaries of unsigned short.
*/
BUG_ON(key->offset > USHRT_MAX);
BUG_ON(dissector_uses_key(flow_dissector,
key->key_id));
dissector_set_key(flow_dissector, key->key_id);
flow_dissector->offset[key->key_id] = key->offset;
}
/* Ensure that the dissector always includes control and basic key.
* That way we are able to avoid handling lack of these in fast path.
*/
BUG_ON(!dissector_uses_key(flow_dissector,
FLOW_DISSECTOR_KEY_CONTROL));
BUG_ON(!dissector_uses_key(flow_dissector,
FLOW_DISSECTOR_KEY_BASIC));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiri Pirko | 107 | 87.70% | 1 | 33.33% |
Tom Herbert | 11 | 9.02% | 1 | 33.33% |
David S. Miller | 4 | 3.28% | 1 | 33.33% |
Total | 122 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL(skb_flow_dissector_init);
/**
* skb_flow_get_be16 - extract be16 entity
* @skb: sk_buff to extract from
* @poff: offset to extract at
* @data: raw buffer pointer to the packet
* @hlen: packet header length
*
* The function will try to retrieve a be32 entity at
* offset poff
*/
static __be16 skb_flow_get_be16(const struct sk_buff *skb, int poff,
void *data, int hlen)
{
__be16 *u, _u;
u = __skb_header_pointer(skb, poff, sizeof(_u), data, hlen, &_u);
if (u)
return *u;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Simon Horman | 59 | 98.33% | 1 | 50.00% |
Eric Dumazet | 1 | 1.67% | 1 | 50.00% |
Total | 60 | 100.00% | 2 | 100.00% |
/**
* __skb_flow_get_ports - extract the upper layer ports and return them
* @skb: sk_buff to extract the ports from
* @thoff: transport header offset
* @ip_proto: protocol for which to get port offset
* @data: raw buffer pointer to the packet, if NULL use skb->data
* @hlen: packet header length, if @data is NULL use skb_headlen(skb)
*
* The function will try to retrieve the ports at offset thoff + poff where poff
* is the protocol port offset returned from proto_ports_offset
*/
__be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
void *data, int hlen)
{
int poff = proto_ports_offset(ip_proto);
if (!data) {
data = skb->data;
hlen = skb_headlen(skb);
}
if (poff >= 0) {
__be32 *ports, _ports;
ports = __skb_header_pointer(skb, thoff + poff,
sizeof(_ports), data, hlen, &_ports);
if (ports)
return *ports;
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Nikolay Aleksandrov | 67 | 67.00% | 1 | 50.00% |
David S. Miller | 33 | 33.00% | 1 | 50.00% |
Total | 100 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL(__skb_flow_get_ports);
/**
* __skb_flow_dissect - extract the flow_keys struct and return it
* @skb: sk_buff to extract the flow from, can be NULL if the rest are specified
* @flow_dissector: list of keys to dissect
* @target_container: target structure to put dissected values into
* @data: raw buffer pointer to the packet, if NULL use skb->data
* @proto: protocol for which to get the flow, if @data is NULL use skb->protocol
* @nhoff: network header offset, if @data is NULL use skb_network_offset(skb)
* @hlen: packet header length, if @data is NULL use skb_headlen(skb)
*
* The function will try to retrieve individual keys into target specified
* by flow_dissector from either the skbuff or a raw buffer specified by the
* rest parameters.
*
* Caller must take care of zeroing target container memory.
*/
bool __skb_flow_dissect(const struct sk_buff *skb,
struct flow_dissector *flow_dissector,
void *target_container,
void *data, __be16 proto, int nhoff, int hlen,
unsigned int flags)
{
struct flow_dissector_key_control *key_control;
struct flow_dissector_key_basic *key_basic;
struct flow_dissector_key_addrs *key_addrs;
struct flow_dissector_key_arp *key_arp;
struct flow_dissector_key_ports *key_ports;
struct flow_dissector_key_icmp *key_icmp;
struct flow_dissector_key_tags *key_tags;
struct flow_dissector_key_vlan *key_vlan;
struct flow_dissector_key_keyid *key_keyid;
bool skip_vlan = false;
u8 ip_proto = 0;
bool ret;
if (!data) {
data = skb->data;
proto = skb_vlan_tag_present(skb) ?
skb->vlan_proto : skb->protocol;
nhoff = skb_network_offset(skb);
hlen = skb_headlen(skb);
}
/* It is ensured by skb_flow_dissector_init() that control key will
* be always present.
*/
key_control = skb_flow_dissector_target(flow_dissector,
FLOW_DISSECTOR_KEY_CONTROL,
target_container);
/* It is ensured by skb_flow_dissector_init() that basic key will
* be always present.
*/
key_basic = skb_flow_dissector_target(flow_dissector,
FLOW_DISSECTOR_KEY_BASIC,
target_container);
if (dissector_uses_key(flow_dissector,
FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
struct ethhdr *eth = eth_hdr(skb);
struct flow_dissector_key_eth_addrs *key_eth_addrs;
key_eth_addrs = skb_flow_dissector_target(flow_dissector,
FLOW_DISSECTOR_KEY_ETH_ADDRS,
target_container);
memcpy(key_eth_addrs, ð->h_dest, sizeof(*key_eth_addrs));
}
again:
switch (proto) {
case htons(ETH_P_IP): {
const struct iphdr *iph;
struct iphdr _iph;
ip:
iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph);
if (!iph || iph->ihl < 5)
goto out_bad;
nhoff += iph->ihl * 4;
ip_proto = iph->protocol;
if (dissector_uses_key(flow_dissector,
FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
key_addrs = skb_flow_dissector_target(flow_dissector,
FLOW_DISSECTOR_KEY_IPV4_ADDRS,
target_container);
memcpy(&key_addrs->v4addrs, &iph->saddr,
sizeof(key_addrs->v4addrs));
key_control->addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
}
if (ip_is_fragment(iph)) {
key_control->flags |= FLOW_DIS_IS_FRAGMENT;
if (iph->frag_off & htons(IP_OFFSET)) {
goto out_good;
} else {
key_control->flags |= FLOW_DIS_FIRST_FRAG;
if (!(flags & FLOW_DISSECTOR_F_PARSE_1ST_FRAG))
goto out_good;
}
}
if (flags & FLOW_DISSECTOR_F_STOP_AT_L3)
goto out_good;
break;
}
case htons(ETH_P_IPV6): {
const struct ipv6hdr *iph;
struct ipv6hdr _iph;
ipv6:
iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph);
if (!iph)
goto out_bad;
ip_proto = iph->nexthdr;
nhoff += sizeof(struct ipv6hdr);
if (dissector_uses_key(flow_dissector,
FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
key_addrs = skb_flow_dissector_target(flow_dissector,
FLOW_DISSECTOR_KEY_IPV6_ADDRS,
target_container);
memcpy(&key_addrs->v6addrs, &iph->saddr,
sizeof(key_addrs->v6addrs));
key_control->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
}
if ((dissector_uses_key(flow_dissector,
FLOW_DISSECTOR_KEY_FLOW_LABEL) ||
(flags & FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL)) &&
ip6_flowlabel(iph)) {
__be32 flow_label = ip6_flowlabel(iph);
if (dissector_uses_key(flow_dissector,
FLOW_DISSECTOR_KEY_FLOW_LABEL)) {
key_tags = skb_flow_dissector_target(flow_dissector,
FLOW_DISSECTOR_KEY_FLOW_LABEL,
target_container);
key_tags->flow_label = ntohl(flow_label);
}
if (flags & FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL)
goto out_good;
}
if (flags & FLOW_DISSECTOR_F_STOP_AT_L3)
goto out_good;
break;
}
case htons(ETH_P_8021AD):
case htons(ETH_P_8021Q): {
const struct vlan_hdr *vlan;
struct vlan_hdr _vlan;
bool vlan_tag_present = skb && skb_vlan_tag_present(skb);
if (vlan_tag_present)
proto = skb->protocol;
if (!vlan_tag_present || eth_type_vlan(skb->protocol)) {
vlan = __skb_header_pointer(skb, nhoff, sizeof(_vlan),
data, hlen, &_vlan);
if (!vlan)
goto out_bad;
proto = vlan->h_vlan_encapsulated_proto;
nhoff += sizeof(*vlan);
if (skip_vlan)
goto again;
}
skip_vlan = true;
if (dissector_uses_key(flow_dissector,
FLOW_DISSECTOR_KEY_VLAN)) {
key_vlan = skb_flow_dissector_target(flow_dissector,
FLOW_DISSECTOR_KEY_VLAN,
target_container);
if (vlan_tag_present) {
key_vlan->vlan_id = skb_vlan_tag_get_id(skb);
key_vlan->vlan_priority =
(skb_vlan_tag_get_prio(skb) >> VLAN_PRIO_SHIFT);
} else {
key_vlan->vlan_id = ntohs(vlan->h_vlan_TCI) &
VLAN_VID_MASK;
key_vlan->vlan_priority =
(ntohs(vlan->h_vlan_TCI) &
VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
}
}
goto again;
}
case htons(ETH_P_PPP_SES): {
struct {
struct pppoe_hdr hdr;
__be16 proto;
} *hdr, _hdr;
hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr);
if (!hdr)
goto out_bad;
proto = hdr->proto;
nhoff += PPPOE_SES_HLEN;
switch (proto) {
case htons(PPP_IP):
goto ip;
case htons(PPP_IPV6):
goto ipv6;
default:
goto out_bad;
}
}
case htons(ETH_P_TIPC): {
struct {
__be32 pre[3];
__be32 srcnode;
} *hdr, _hdr;
hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr);
if (!hdr)
goto out_bad;
if (dissector_uses_key(flow_dissector,
FLOW_DISSECTOR_KEY_TIPC_ADDRS)) {
key_addrs = skb_flow_dissector_target(flow_dissector,
FLOW_DISSECTOR_KEY_TIPC_ADDRS,
target_container);
key_addrs->tipcaddrs.srcnode = hdr->srcnode;
key_control->addr_type = FLOW_DISSECTOR_KEY_TIPC_ADDRS;
}
goto out_good;
}
case htons(ETH_P_MPLS_UC):
case htons(ETH_P_MPLS_MC): {
struct mpls_label *hdr, _hdr[2];
mpls:
hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data,
hlen, &_hdr);
if (!hdr)
goto out_bad;
if ((ntohl(hdr[0].entry) & MPLS_LS_LABEL_MASK) >>
MPLS_LS_LABEL_SHIFT == MPLS_LABEL_ENTROPY) {
if (dissector_uses_key(flow_dissector,
FLOW_DISSECTOR_KEY_MPLS_ENTROPY)) {
key_keyid = skb_flow_dissector_target(flow_dissector,
FLOW_DISSECTOR_KEY_MPLS_ENTROPY,
target_container);
key_keyid->keyid = hdr[1].entry &
htonl(MPLS_LS_LABEL_MASK);
}
goto out_good;
}
goto out_good;
}
case htons(ETH_P_FCOE):
if ((hlen - nhoff) < FCOE_HEADER_LEN)
goto out_bad;
nhoff += FCOE_HEADER_LEN;
goto out_good;
case htons(ETH_P_ARP):
case htons(ETH_P_RARP): {
struct {
unsigned char ar_sha[ETH_ALEN];
unsigned char ar_sip[4];
unsigned char ar_tha[ETH_ALEN];
unsigned char ar_tip[4];
} *arp_eth, _arp_eth;
const struct arphdr *arp;
struct arphdr _arp;
arp = __skb_header_pointer(skb, nhoff, sizeof(_arp), data,
hlen, &_arp);
if (!arp)
goto out_bad;
if (arp->ar_hrd != htons(ARPHRD_ETHER) ||
arp->ar_pro != htons(ETH_P_IP) ||
arp->ar_hln != ETH_ALEN ||
arp->ar_pln != 4 ||
(arp->ar_op != htons(ARPOP_REPLY) &&
arp->ar_op != htons(ARPOP_REQUEST)))
goto out_bad;
arp_eth = __skb_header_pointer(skb, nhoff + sizeof(_arp),
sizeof(_arp_eth), data,
hlen,
&_arp_eth);
if (!arp_eth)
goto out_bad;
if (dissector_uses_key(flow_dissector,
FLOW_DISSECTOR_KEY_ARP)) {
key_arp = skb_flow_dissector_target(flow_dissector,
FLOW_DISSECTOR_KEY_ARP,
target_container);
memcpy(&key_arp->sip, arp_eth->ar_sip,
sizeof(key_arp->sip));
memcpy(&key_arp->tip, arp_eth->ar_tip,
sizeof(key_arp->tip));
/* Only store the lower byte of the opcode;
* this covers ARPOP_REPLY and ARPOP_REQUEST.
*/
key_arp->op = ntohs(arp->ar_op) & 0xff;
ether_addr_copy(key_arp->sha, arp_eth->ar_sha);
ether_addr_copy(key_arp->tha, arp_eth->ar_tha);
}
goto out_good;
}
default:
goto out_bad;
}
ip_proto_again:
switch (ip_proto) {
case IPPROTO_GRE: {
struct gre_base_hdr *hdr, _hdr;
u16 gre_ver;
int offset = 0;
hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr);
if (!hdr)
goto out_bad;
/* Only look inside GRE without routing */
if (hdr->flags & GRE_ROUTING)
break;
/* Only look inside GRE for version 0 and 1 */
gre_ver = ntohs(hdr->flags & GRE_VERSION);
if (gre_ver > 1)
break;
proto = hdr->protocol;
if (gre_ver) {
/* Version1 must be PPTP, and check the flags */
if (!(proto == GRE_PROTO_PPP && (hdr->flags & GRE_KEY)))
break;
}
offset += sizeof(struct gre_base_hdr);
if (hdr->flags & GRE_CSUM)
offset += sizeof(((struct gre_full_hdr *)0)->csum) +
sizeof(((struct gre_full_hdr *)0)->reserved1);
if (hdr->flags & GRE_KEY) {
const __be32 *keyid;
__be32 _keyid;
keyid = __skb_header_pointer(skb, nhoff + offset, sizeof(_keyid),
data, hlen, &_keyid);
if (!keyid)
goto out_bad;
if (dissector_uses_key(flow_dissector,
FLOW_DISSECTOR_KEY_GRE_KEYID)) {
key_keyid = skb_flow_dissector_target(flow_dissector,
FLOW_DISSECTOR_KEY_GRE_KEYID,
target_container);
if (gre_ver == 0)
key_keyid->keyid = *keyid;
else
key_keyid->keyid = *keyid & GRE_PPTP_KEY_MASK;
}
offset += sizeof(((struct gre_full_hdr *)0)->key);
}
if (hdr->flags & GRE_SEQ)
offset += sizeof(((struct pptp_gre_header *)0)->seq);
if (gre_ver == 0) {
if (proto == htons(ETH_P_TEB)) {
const struct ethhdr *eth;
struct ethhdr _eth;
eth = __skb_header_pointer(skb, nhoff + offset,
sizeof(_eth),
data, hlen, &_eth);
if (!eth)
goto out_bad;
proto = eth->h_proto;
offset += sizeof(*eth);
/* Cap headers that we access via pointers at the
* end of the Ethernet header as our maximum alignment
* at that point is only 2 bytes.
*/
if (NET_IP_ALIGN)
hlen = (nhoff + offset);
}
} else { /* version 1, must be PPTP */
u8 _ppp_hdr[PPP_HDRLEN];
u8 *ppp_hdr;
if (hdr->flags & GRE_ACK)
offset += sizeof(((struct pptp_gre_header *)0)->ack);
ppp_hdr = __skb_header_pointer(skb, nhoff + offset,
sizeof(_ppp_hdr),
data, hlen, _ppp_hdr);
if (!ppp_hdr)
goto out_bad;
switch (PPP_PROTOCOL(ppp_hdr)) {
case PPP_IP:
proto = htons(ETH_P_IP);
break;
case PPP_IPV6:
proto = htons(ETH_P_IPV6);
break;
default:
/* Could probably catch some more like MPLS */
break;
}
offset += PPP_HDRLEN;
}
nhoff += offset;
key_control->flags |= FLOW_DIS_ENCAPSULATION;
if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP)
goto out_good;
goto again;
}
case NEXTHDR_HOP:
case NEXTHDR_ROUTING:
case NEXTHDR_DEST: {
u8 _opthdr[2], *opthdr;
if (proto != htons(ETH_P_IPV6))
break;
opthdr = __skb_header_pointer(skb, nhoff, sizeof(_opthdr),
data, hlen, &_opthdr);
if (!opthdr)
goto out_bad;
ip_proto = opthdr[0];
nhoff += (opthdr[1] + 1) << 3;
goto ip_proto_again;
}
case NEXTHDR_FRAGMENT: {
struct frag_hdr _fh, *fh;
if (proto != htons(ETH_P_IPV6))
break;
fh = __skb_header_pointer(skb, nhoff, sizeof(_fh),
data, hlen, &_fh);
if (!fh)
goto out_bad;
key_control->flags |= FLOW_DIS_IS_FRAGMENT;
nhoff += sizeof(_fh);
ip_proto = fh->nexthdr;
if (!(fh->frag_off & htons(IP6_OFFSET))) {
key_control->flags |= FLOW_DIS_FIRST_FRAG;
if (flags & FLOW_DISSECTOR_F_PARSE_1ST_FRAG)
goto ip_proto_again;
}
goto out_good;
}
case IPPROTO_IPIP:
proto = htons(ETH_P_IP);
key_control->flags |= FLOW_DIS_ENCAPSULATION;
if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP)
goto out_good;
goto ip;
case IPPROTO_IPV6:
proto = htons(ETH_P_IPV6);
key_control->flags |= FLOW_DIS_ENCAPSULATION;
if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP)
goto out_good;
goto ipv6;
case IPPROTO_MPLS:
proto = htons(ETH_P_MPLS_UC);
goto mpls;
default:
break;
}
if (dissector_uses_key(flow_dissector,
FLOW_DISSECTOR_KEY_PORTS)) {
key_ports = skb_flow_dissector_target(flow_dissector,
FLOW_DISSECTOR_KEY_PORTS,
target_container);
key_ports->ports = __skb_flow_get_ports(skb, nhoff, ip_proto,
data, hlen);
}
if (dissector_uses_key(flow_dissector,
FLOW_DISSECTOR_KEY_ICMP)) {
key_icmp = skb_flow_dissector_target(flow_dissector,
FLOW_DISSECTOR_KEY_ICMP,
target_container);
key_icmp->icmp = skb_flow_get_be16(skb, nhoff, data, hlen);
}
out_good:
ret = true;
key_control->thoff = (u16)nhoff;
out:
key_basic->n_proto = proto;
key_basic->ip_proto = ip_proto;
return ret;
out_bad:
ret = false;
key_control->thoff = min_t(u16, nhoff, skb ? skb->len : hlen);
goto out;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tom Herbert | 657 | 28.44% | 20 | 35.09% |
Eric Dumazet | 403 | 17.45% | 6 | 10.53% |
Simon Horman | 323 | 13.98% | 2 | 3.51% |
Gao Feng | 259 | 11.21% | 1 | 1.75% |
Jiri Pirko | 204 | 8.83% | 4 | 7.02% |
Hadar Hen Zion | 115 | 4.98% | 2 | 3.51% |
David S. Miller | 93 | 4.03% | 3 | 5.26% |
Alexander Duyck | 90 | 3.90% | 8 | 14.04% |
Michael Dalton | 55 | 2.38% | 1 | 1.75% |
Erik Hugne | 51 | 2.21% | 1 | 1.75% |
Américo Wang | 19 | 0.82% | 1 | 1.75% |
Arnd Bergmann | 16 | 0.69% | 1 | 1.75% |
Joe Perches | 7 | 0.30% | 1 | 1.75% |
Jason (Hui) Wang | 6 | 0.26% | 1 | 1.75% |
Ian Kumlien | 5 | 0.22% | 1 | 1.75% |
Eric Garver | 3 | 0.13% | 1 | 1.75% |
Geert Uytterhoeven | 2 | 0.09% | 1 | 1.75% |
Colin Ian King | 1 | 0.04% | 1 | 1.75% |
Nikolay Aleksandrov | 1 | 0.04% | 1 | 1.75% |
Total | 2310 | 100.00% | 57 | 100.00% |
EXPORT_SYMBOL(__skb_flow_dissect);
static u32 hashrnd __read_mostly;
static __always_inline void __flow_hash_secret_init(void)
{
net_get_random_once(&hashrnd, sizeof(hashrnd));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hannes Frederic Sowa | 20 | 100.00% | 1 | 100.00% |
Total | 20 | 100.00% | 1 | 100.00% |
static __always_inline u32 __flow_hash_words(const u32 *words, u32 length,
u32 keyval)
{
return jhash2(words, length, keyval);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hannes Frederic Sowa | 16 | 57.14% | 1 | 25.00% |
Tom Herbert | 11 | 39.29% | 2 | 50.00% |
David S. Miller | 1 | 3.57% | 1 | 25.00% |
Total | 28 | 100.00% | 4 | 100.00% |
static inline const u32 *flow_keys_hash_start(const struct flow_keys *flow)
{
const void *p = flow;
BUILD_BUG_ON(FLOW_KEYS_HASH_OFFSET % sizeof(u32));
return (const u32 *)(p + FLOW_KEYS_HASH_OFFSET);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tom Herbert | 29 | 65.91% | 1 | 50.00% |
David S. Miller | 15 | 34.09% | 1 | 50.00% |
Total | 44 | 100.00% | 2 | 100.00% |
static inline size_t flow_keys_hash_length(const struct flow_keys *flow)
{
size_t diff = FLOW_KEYS_HASH_OFFSET + sizeof(flow->addrs);
BUILD_BUG_ON((sizeof(*flow) - FLOW_KEYS_HASH_OFFSET) % sizeof(u32));
BUILD_BUG_ON(offsetof(typeof(*flow), addrs) !=
sizeof(*flow) - sizeof(flow->addrs));
switch (flow->control.addr_type) {
case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
diff -= sizeof(flow->addrs.v4addrs);
break;
case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
diff -= sizeof(flow->addrs.v6addrs);
break;
case FLOW_DISSECTOR_KEY_TIPC_ADDRS:
diff -= sizeof(flow->addrs.tipcaddrs);
break;
}
return (sizeof(*flow) - diff) / sizeof(u32);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tom Herbert | 140 | 99.29% | 3 | 75.00% |
David S. Miller | 1 | 0.71% | 1 | 25.00% |
Total | 141 | 100.00% | 4 | 100.00% |
__be32 flow_get_u32_src(const struct flow_keys *flow)
{
switch (flow->control.addr_type) {
case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
return flow->addrs.v4addrs.src;
case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
return (__force __be32)ipv6_addr_hash(
&flow->addrs.v6addrs.src);
case FLOW_DISSECTOR_KEY_TIPC_ADDRS:
return flow->addrs.tipcaddrs.srcnode;
default:
return 0;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tom Herbert | 69 | 100.00% | 2 | 100.00% |
Total | 69 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL(flow_get_u32_src);
__be32 flow_get_u32_dst(const struct flow_keys *flow)
{
switch (flow->control.addr_type) {
case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
return flow->addrs.v4addrs.dst;
case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
return (__force __be32)ipv6_addr_hash