cregit-Linux how code gets into the kernel

Release 4.14 net/bridge/br_input.c

Directory: net/bridge
 *      Handle incoming frames
 *      Linux ethernet bridge
 *      Authors:
 *      Lennert Buytenhek               <>
 *      This program is free software; you can redistribute it and/or
 *      modify it under the terms of the GNU General Public License
 *      as published by the Free Software Foundation; either version
 *      2 of the License, or (at your option) any later version.

#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/netfilter_bridge.h>
#include <linux/neighbour.h>
#include <net/arp.h>
#include <linux/export.h>
#include <linux/rculist.h>
#include "br_private.h"
#include "br_private_tunnel.h"

/* Hook for brouter */

br_should_route_hook_t __rcu *br_should_route_hook __read_mostly;


static int br_netif_receive_skb(struct net *net, struct sock *sk, struct sk_buff *skb) { br_drop_fake_rtable(skb); return netif_receive_skb(skb); }


Eric W. Biedermann2784.38%266.67%
Florian Westphal515.62%133.33%

static int br_pass_frame_up(struct sk_buff *skb) { struct net_device *indev, *brdev = BR_INPUT_SKB_CB(skb)->brdev; struct net_bridge *br = netdev_priv(brdev); struct net_bridge_vlan_group *vg; struct pcpu_sw_netstats *brstats = this_cpu_ptr(br->stats); u64_stats_update_begin(&brstats->syncp); brstats->rx_packets++; brstats->rx_bytes += skb->len; u64_stats_update_end(&brstats->syncp); vg = br_vlan_group_rcu(br); /* Bridge is just like any other port. Make sure the * packet is allowed except in promisc modue when someone * may be running packet capture. */ if (!(brdev->flags & IFF_PROMISC) && !br_allowed_egress(vg, skb)) { kfree_skb(skb); return NET_RX_DROP; } indev = skb->dev; skb->dev = brdev; skb = br_handle_vlan(br, NULL, vg, skb); if (!skb) return NET_RX_DROP; /* update the multicast stats if the packet is IGMP/MLD */ br_multicast_count(br, NULL, skb, br_multicast_igmp_type(skb), BR_MCAST_DIR_TX); return NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, dev_net(indev), NULL, skb, indev, NULL, br_netif_receive_skb); }


Vlad Yasevich6332.81%316.67%
Nikolay Aleksandrov2714.06%316.67%
Linus Torvalds (pre-git)2412.50%15.56%
Stephen Hemminger2110.94%15.56%
Eric Dumazet168.33%15.56%
Linus Torvalds136.77%15.56%
Pavel Emelyanov84.17%15.56%
Herbert Xu84.17%15.56%
Eric W. Biedermann63.12%211.11%
Roopa Prabhu21.04%15.56%
David S. Miller21.04%15.56%
Li RongQing10.52%15.56%
Jan Engelhardt10.52%15.56%

static void br_do_proxy_arp(struct sk_buff *skb, struct net_bridge *br, u16 vid, struct net_bridge_port *p) { struct net_device *dev = br->dev; struct neighbour *n; struct arphdr *parp; u8 *arpptr, *sha; __be32 sip, tip; BR_INPUT_SKB_CB(skb)->proxyarp_replied = false; if ((dev->flags & IFF_NOARP) || !pskb_may_pull(skb, arp_hdr_len(dev))) return; parp = arp_hdr(skb); if (parp->ar_pro != htons(ETH_P_IP) || parp->ar_op != htons(ARPOP_REQUEST) || parp->ar_hln != dev->addr_len || parp->ar_pln != 4) return; arpptr = (u8 *)parp + sizeof(struct arphdr); sha = arpptr; arpptr += dev->addr_len; /* sha */ memcpy(&sip, arpptr, sizeof(sip)); arpptr += sizeof(sip); arpptr += dev->addr_len; /* tha */ memcpy(&tip, arpptr, sizeof(tip)); if (ipv4_is_loopback(tip) || ipv4_is_multicast(tip)) return; n = neigh_lookup(&arp_tbl, &tip, dev); if (n) { struct net_bridge_fdb_entry *f; if (!(n->nud_state & NUD_VALID)) { neigh_release(n); return; } f = br_fdb_find_rcu(br, n->ha, vid); if (f && ((p->flags & BR_PROXYARP) || (f->dst && (f->dst->flags & BR_PROXYARP_WIFI)))) { arp_send(ARPOP_REPLY, ETH_P_ARP, sip, skb->dev, tip, sha, n->ha, sha); BR_INPUT_SKB_CB(skb)->proxyarp_replied = true; } neigh_release(n); } }


Kyeyoon Park27683.38%125.00%
Jouni Malinen5115.41%125.00%
Nikolay Aleksandrov41.21%250.00%

/* note: already called with rcu_read_lock */
int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb) { struct net_bridge_port *p = br_port_get_rcu(skb->dev); enum br_pkt_type pkt_type = BR_PKT_UNICAST; struct net_bridge_fdb_entry *dst = NULL; struct net_bridge_mdb_entry *mdst; bool local_rcv, mcast_hit = false; const unsigned char *dest; struct net_bridge *br; u16 vid = 0; if (!p || p->state == BR_STATE_DISABLED) goto drop; if (!br_allowed_ingress(p->br, nbp_vlan_group_rcu(p), skb, &vid)) goto out; nbp_switchdev_frame_mark(p, skb); /* insert into forwarding database after filtering to avoid spoofing */ br = p->br; if (p->flags & BR_LEARNING) br_fdb_update(br, p, eth_hdr(skb)->h_source, vid, false); local_rcv = !!(br->dev->flags & IFF_PROMISC); dest = eth_hdr(skb)->h_dest; if (is_multicast_ether_addr(dest)) { /* by definition the broadcast is also a multicast address */ if (is_broadcast_ether_addr(dest)) { pkt_type = BR_PKT_BROADCAST; local_rcv = true; } else { pkt_type = BR_PKT_MULTICAST; if (br_multicast_rcv(br, p, skb, vid)) goto drop; } } if (p->state == BR_STATE_LEARNING) goto drop; BR_INPUT_SKB_CB(skb)->brdev = br->dev; if (IS_ENABLED(CONFIG_INET) && skb->protocol == htons(ETH_P_ARP)) br_do_proxy_arp(skb, br, vid, p); switch (pkt_type) { case BR_PKT_MULTICAST: mdst = br_mdb_get(br, skb, vid); if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) && br_multicast_querier_exists(br, eth_hdr(skb))) { if ((mdst && mdst->mglist) || br_multicast_is_router(br)) { local_rcv = true; br->dev->stats.multicast++; } mcast_hit = true; } else { local_rcv = true; br->dev->stats.multicast++; } break; case BR_PKT_UNICAST: dst = br_fdb_find_rcu(br, dest, vid); default: break; } if (dst) { unsigned long now = jiffies; if (dst->is_local) return br_pass_frame_up(skb); if (now != dst->used) dst->used = now; br_forward(dst->dst, skb, local_rcv, false); } else { if (!mcast_hit) br_flood(br, skb, pkt_type, local_rcv, false); else br_multicast_flood(mdst, skb, local_rcv, false); } if (local_rcv) return br_pass_frame_up(skb); out: return 0; drop: kfree_skb(skb); goto out; }


Nikolay Aleksandrov16333.20%1021.28%
Herbert Xu9118.53%612.77%
Stephen Hemminger8617.52%714.89%
Linus Torvalds (pre-git)438.76%24.26%
Vlad Yasevich346.92%612.77%
Kyeyoon Park183.67%12.13%
Linus Lüssing122.44%24.26%
Linus Torvalds81.63%12.13%
Ido Schimmel71.43%12.13%
David S. Miller51.02%12.13%
Eric W. Biedermann51.02%12.13%
Arnd Bergmann51.02%12.13%
Pavel Emelyanov30.61%12.13%
Toshiaki Makita30.61%24.26%
Jouni Malinen30.61%12.13%
Américo Wang20.41%12.13%
Michael Braun10.20%12.13%
Jiri Pirko10.20%12.13%
Hideaki Yoshifuji / 吉藤英明10.20%12.13%

static void __br_handle_local_finish(struct sk_buff *skb) { struct net_bridge_port *p = br_port_get_rcu(skb->dev); u16 vid = 0; /* check if vlan is allowed, to avoid spoofing */ if (p->flags & BR_LEARNING && br_should_learn(p, skb, &vid)) br_fdb_update(p->br, p, eth_hdr(skb)->h_source, vid, false); }


Stephen Hemminger3552.24%114.29%
Vlad Yasevich1522.39%228.57%
Toshiaki Makita1319.40%228.57%
Ido Schimmel34.48%114.29%
Jiri Pirko11.49%114.29%

/* note: already called with rcu_read_lock */
static int br_handle_local_finish(struct net *net, struct sock *sk, struct sk_buff *skb) { struct net_bridge_port *p = br_port_get_rcu(skb->dev); __br_handle_local_finish(skb); BR_INPUT_SKB_CB(skb)->brdev = p->br->dev; br_pass_frame_up(skb); return 0; }


Ido Schimmel3762.71%133.33%
Florian Westphal1830.51%133.33%
Stephen Hemminger46.78%133.33%

/* * Return NULL if skb is handled * note: already called with rcu_read_lock */
rx_handler_result_t br_handle_frame(struct sk_buff **pskb) { struct net_bridge_port *p; struct sk_buff *skb = *pskb; const unsigned char *dest = eth_hdr(skb)->h_dest; br_should_route_hook_t *rhook; if (unlikely(skb->pkt_type == PACKET_LOOPBACK)) return RX_HANDLER_PASS; if (!is_valid_ether_addr(eth_hdr(skb)->h_source)) goto drop; skb = skb_share_check(skb, GFP_ATOMIC); if (!skb) return RX_HANDLER_CONSUMED; p = br_port_get_rcu(skb->dev); if (p->flags & BR_VLAN_TUNNEL) { if (br_handle_ingress_vlan_tunnel(skb, p, nbp_vlan_group_rcu(p))) goto drop; } if (unlikely(is_link_local_ether_addr(dest))) { u16 fwd_mask = p->br->group_fwd_mask_required; /* * See IEEE 802.1D Table 7-10 Reserved addresses * * Assignment Value * Bridge Group Address 01-80-C2-00-00-00 * (MAC Control) 802.3 01-80-C2-00-00-01 * (Link Aggregation) 802.3 01-80-C2-00-00-02 * 802.1X PAE address 01-80-C2-00-00-03 * * 802.1AB LLDP 01-80-C2-00-00-0E * * Others reserved for future standardization */ switch (dest[5]) { case 0x00: /* Bridge Group Address */ /* If STP is turned off, then must forward to keep loop detection */ if (p->br->stp_enabled == BR_NO_STP || fwd_mask & (1u << dest[5])) goto forward; *pskb = skb; __br_handle_local_finish(skb); return RX_HANDLER_PASS; case 0x01: /* IEEE MAC (Pause) */ goto drop; case 0x0E: /* 802.1AB LLDP */ fwd_mask |= p->br->group_fwd_mask; if (fwd_mask & (1u << dest[5])) goto forward; *pskb = skb; __br_handle_local_finish(skb); return RX_HANDLER_PASS; default: /* Allow selective forwarding for most other protocols */ fwd_mask |= p->br->group_fwd_mask; if (fwd_mask & (1u << dest[5])) goto forward; } /* Deliver packet to local host only */ NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, dev_net(skb->dev), NULL, skb, skb->dev, NULL, br_handle_local_finish); return RX_HANDLER_CONSUMED; } forward: switch (p->state) { case BR_STATE_FORWARDING: rhook = rcu_dereference(br_should_route_hook); if (rhook) { if ((*rhook)(skb)) { *pskb = skb; return RX_HANDLER_PASS; } dest = eth_hdr(skb)->h_dest; } /* fall through */ case BR_STATE_LEARNING: if (ether_addr_equal(p->br->dev->dev_addr, dest)) skb->pkt_type = PACKET_HOST; NF_HOOK(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, dev_net(skb->dev), NULL, skb, skb->dev, NULL, br_handle_frame_finish); break; default: drop: kfree_skb(skb); } return RX_HANDLER_CONSUMED; }


Stephen Hemminger11226.73%1232.43%
Ido Schimmel5412.89%25.41%
Jiri Pirko4911.69%38.11%
Linus Torvalds4510.74%25.41%
Roopa Prabhu276.44%12.70%
Bart De Schuymer276.44%38.11%
Toshiaki Makita266.21%12.70%
Herbert Xu163.82%12.70%
Eric W. Biedermann143.34%12.70%
Pavel Emelyanov112.63%12.70%
Arnaldo Carvalho de Melo92.15%12.70%
David S. Miller81.91%25.41%
Linus Torvalds (pre-git)81.91%12.70%
Eric Dumazet51.19%12.70%
Simon Horman30.72%12.70%
Jan Engelhardt20.48%12.70%
Florian Westphal10.24%12.70%
Joe Perches10.24%12.70%
Ben Hutchings10.24%12.70%

Overall Contributors

Kyeyoon Park30018.24%11.04%
Stephen Hemminger25915.74%2020.83%
Nikolay Aleksandrov19411.79%1313.54%
Herbert Xu1156.99%77.29%
Vlad Yasevich1156.99%88.33%
Ido Schimmel1026.20%33.12%
Linus Torvalds (pre-git)875.29%22.08%
Linus Torvalds694.19%22.08%
Jouni Malinen543.28%11.04%
Eric W. Biedermann523.16%33.12%
Jiri Pirko513.10%33.12%
Toshiaki Makita422.55%44.17%
Eric Dumazet332.01%22.08%
Roopa Prabhu321.95%11.04%
Bart De Schuymer271.64%33.12%
Florian Westphal241.46%22.08%
Pavel Emelyanov221.34%22.08%
David S. Miller160.97%33.12%
Linus Lüssing120.73%22.08%
Arnaldo Carvalho de Melo90.55%11.04%
Pablo Neira Ayuso50.30%11.04%
Arnd Bergmann50.30%11.04%
Paul Gortmaker30.18%11.04%
Simon Horman30.18%11.04%
Jan Engelhardt30.18%11.04%
Tejun Heo30.18%11.04%
Américo Wang20.12%11.04%
Ben Hutchings10.06%11.04%
Hideaki Yoshifuji / 吉藤英明10.06%11.04%
Joe Perches10.06%11.04%
Li RongQing10.06%11.04%
Michael Braun10.06%11.04%
Adrian Bunk10.06%11.04%
Directory: net/bridge
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.