cregit-Linux how code gets into the kernel

Release 4.11 net/bridge/br_device.c

Directory: net/bridge
/*
 *      Device handling code
 *      Linux ethernet bridge
 *
 *      Authors:
 *      Lennert Buytenhek               <buytenh@gnu.org>
 *
 *      This program is free software; you can redistribute it and/or
 *      modify it under the terms of the GNU General Public License
 *      as published by the Free Software Foundation; either version
 *      2 of the License, or (at your option) any later version.
 */

#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/netpoll.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/list.h>
#include <linux/netfilter_bridge.h>

#include <linux/uaccess.h>
#include "br_private.h"


#define COMMON_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | \
                         NETIF_F_GSO_MASK | NETIF_F_HW_CSUM)


const struct nf_br_ops __rcu *nf_br_ops __read_mostly;

EXPORT_SYMBOL_GPL(nf_br_ops);


static struct lock_class_key bridge_netdev_addr_lock_key;

/* net device transmit always called with BH disabled */

netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev) { struct net_bridge *br = netdev_priv(dev); const unsigned char *dest = skb->data; struct net_bridge_fdb_entry *dst; struct net_bridge_mdb_entry *mdst; struct pcpu_sw_netstats *brstats = this_cpu_ptr(br->stats); const struct nf_br_ops *nf_ops; u16 vid = 0; rcu_read_lock(); nf_ops = rcu_dereference(nf_br_ops); if (nf_ops && nf_ops->br_dev_xmit_hook(skb)) { rcu_read_unlock(); return NETDEV_TX_OK; } u64_stats_update_begin(&brstats->syncp); brstats->tx_packets++; brstats->tx_bytes += skb->len; u64_stats_update_end(&brstats->syncp); BR_INPUT_SKB_CB(skb)->brdev = dev; skb_reset_mac_header(skb); skb_pull(skb, ETH_HLEN); if (!br_allowed_ingress(br, br_vlan_group_rcu(br), skb, &vid)) goto out; if (is_broadcast_ether_addr(dest)) { br_flood(br, skb, BR_PKT_BROADCAST, false, true); } else if (is_multicast_ether_addr(dest)) { if (unlikely(netpoll_tx_running(dev))) { br_flood(br, skb, BR_PKT_MULTICAST, false, true); goto out; } if (br_multicast_rcv(br, NULL, skb, vid)) { kfree_skb(skb); goto out; } mdst = br_mdb_get(br, skb, vid); if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) && br_multicast_querier_exists(br, eth_hdr(skb))) br_multicast_flood(mdst, skb, false, true); else br_flood(br, skb, BR_PKT_MULTICAST, false, true); } else if ((dst = br_fdb_find_rcu(br, dest, vid)) != NULL) { br_forward(dst->dst, skb, false, true); } else { br_flood(br, skb, BR_PKT_UNICAST, false, true); } out: rcu_read_unlock(); return NETDEV_TX_OK; }

Contributors

PersonTokensPropCommitsCommitProp
Herbert Xu9024.66%514.29%
Linus Torvalds (pre-git)8122.19%12.86%
Stephen Hemminger4612.60%617.14%
Nikolay Aleksandrov4612.60%514.29%
Pablo Neira Ayuso205.48%25.71%
Toshiaki Makita164.38%12.86%
Eric Dumazet164.38%12.86%
Vlad Yasevich133.56%411.43%
Linus Lüssing123.29%25.71%
Bart De Schuymer92.47%12.86%
Linus Torvalds71.92%12.86%
Arnaldo Carvalho de Melo30.82%12.86%
Américo Wang20.55%12.86%
Li RongQing10.27%12.86%
Hideaki Yoshifuji / 吉藤英明10.27%12.86%
Patrick McHardy10.27%12.86%
Pavel Emelyanov10.27%12.86%
Total365100.00%35100.00%


static void br_set_lockdep_class(struct net_device *dev) { lockdep_set_class(&dev->addr_list_lock, &bridge_netdev_addr_lock_key); }

Contributors

PersonTokensPropCommitsCommitProp
Nikolay Aleksandrov22100.00%1100.00%
Total22100.00%1100.00%


static int br_dev_init(struct net_device *dev) { struct net_bridge *br = netdev_priv(dev); int err; br->stats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); if (!br->stats) return -ENOMEM; err = br_vlan_init(br); if (err) { free_percpu(br->stats); return err; } err = br_multicast_init_stats(br); if (err) { free_percpu(br->stats); br_vlan_flush(br); } br_set_lockdep_class(dev); return err; }

Contributors

PersonTokensPropCommitsCommitProp
Stephen Hemminger4241.58%116.67%
Nikolay Aleksandrov3534.65%233.33%
Vlad Yasevich2221.78%116.67%
Américo Wang10.99%116.67%
Li RongQing10.99%116.67%
Total101100.00%6100.00%


static void br_dev_uninit(struct net_device *dev) { struct net_bridge *br = netdev_priv(dev); br_multicast_dev_del(br); br_multicast_uninit_stats(br); br_vlan_flush(br); free_percpu(br->stats); }

Contributors

PersonTokensPropCommitsCommitProp
Ido Schimmel3888.37%150.00%
Xin Long511.63%150.00%
Total43100.00%2100.00%


static int br_dev_open(struct net_device *dev) { struct net_bridge *br = netdev_priv(dev); netdev_update_features(dev); netif_start_queue(dev); br_stp_enable_bridge(br); br_multicast_open(br); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)2352.27%125.00%
Stephen Hemminger1431.82%125.00%
Herbert Xu511.36%125.00%
Michał Mirosław24.55%125.00%
Total44100.00%4100.00%


static void br_dev_set_multicast_list(struct net_device *dev) { }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)10100.00%1100.00%
Total10100.00%1100.00%


static void br_dev_change_rx_flags(struct net_device *dev, int change) { if (change & IFF_PROMISC) br_manage_promisc(netdev_priv(dev)); }

Contributors

PersonTokensPropCommitsCommitProp
Vlad Yasevich28100.00%1100.00%
Total28100.00%1100.00%


static int br_dev_stop(struct net_device *dev) { struct net_bridge *br = netdev_priv(dev); br_stp_disable_bridge(br); br_multicast_stop(br); netif_stop_queue(dev); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)2153.85%125.00%
Herbert Xu1435.90%125.00%
Stephen Hemminger410.26%250.00%
Total39100.00%4100.00%


static void br_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct net_bridge *br = netdev_priv(dev); struct pcpu_sw_netstats tmp, sum = { 0 }; unsigned int cpu; for_each_possible_cpu(cpu) { unsigned int start; const struct pcpu_sw_netstats *bstats = per_cpu_ptr(br->stats, cpu); do { start = u64_stats_fetch_begin_irq(&bstats->syncp); memcpy(&tmp, bstats, sizeof(tmp)); } while (u64_stats_fetch_retry_irq(&bstats->syncp, start)); sum.tx_bytes += tmp.tx_bytes; sum.tx_packets += tmp.tx_packets; sum.rx_bytes += tmp.rx_bytes; sum.rx_packets += tmp.rx_packets; } stats->tx_bytes = sum.tx_bytes; stats->tx_packets = sum.tx_packets; stats->rx_bytes = sum.rx_bytes; stats->rx_packets = sum.rx_packets; }

Contributors

PersonTokensPropCommitsCommitProp
Stephen Hemminger10763.69%233.33%
Eric Dumazet5733.93%233.33%
Li RongQing21.19%116.67%
Eric W. Biedermann21.19%116.67%
Total168100.00%6100.00%


static int br_change_mtu(struct net_device *dev, int new_mtu) { struct net_bridge *br = netdev_priv(dev); if (new_mtu > br_min_mtu(br)) return -EINVAL; dev->mtu = new_mtu; #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) /* remember the MTU in the rtable for PMTU */ dst_metric_set(&br->fake_rtable.dst, RTAX_MTU, new_mtu); #endif return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Stephen Hemminger3550.72%125.00%
Simon Wunderlich2333.33%125.00%
David S. Miller68.70%125.00%
Pablo Neira Ayuso57.25%125.00%
Total69100.00%4100.00%

/* Allow setting mac address to any valid ethernet address. */
static int br_set_mac_address(struct net_device *dev, void *p) { struct net_bridge *br = netdev_priv(dev); struct sockaddr *addr = p; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; spin_lock_bh(&br->lock); if (!ether_addr_equal(dev->dev_addr, addr->sa_data)) { /* Mac address will be changed in br_stp_change_bridge_id(). */ br_stp_change_bridge_id(br, addr->sa_data); } spin_unlock_bh(&br->lock); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Stephen Hemminger8795.60%350.00%
Joe Perches22.20%116.67%
Danny Kukawka11.10%116.67%
Toshiaki Makita11.10%116.67%
Total91100.00%6100.00%


static void br_getinfo(struct net_device *dev, struct ethtool_drvinfo *info) { strlcpy(info->driver, "bridge", sizeof(info->driver)); strlcpy(info->version, BR_VERSION, sizeof(info->version)); strlcpy(info->fw_version, "N/A", sizeof(info->fw_version)); strlcpy(info->bus_info, "N/A", sizeof(info->bus_info)); }

Contributors

PersonTokensPropCommitsCommitProp
Stephen Hemminger4860.00%150.00%
Jiri Pirko3240.00%150.00%
Total80100.00%2100.00%


static netdev_features_t br_fix_features(struct net_device *dev, netdev_features_t features) { struct net_bridge *br = netdev_priv(dev); return br_features_recompute(br, features); }

Contributors

PersonTokensPropCommitsCommitProp
Stephen Hemminger2371.88%125.00%
Michał Mirosław515.62%250.00%
Jesse Gross412.50%125.00%
Total32100.00%4100.00%

#ifdef CONFIG_NET_POLL_CONTROLLER
static void br_poll_controller(struct net_device *br_dev) { }

Contributors

PersonTokensPropCommitsCommitProp
Herbert Xu660.00%133.33%
Américo Wang330.00%133.33%
Stephen Hemminger110.00%133.33%
Total10100.00%3100.00%


static void br_netpoll_cleanup(struct net_device *dev) { struct net_bridge *br = netdev_priv(dev); struct net_bridge_port *p; list_for_each_entry(p, &br->port_list, list) br_netpoll_disable(p); }

Contributors

PersonTokensPropCommitsCommitProp
Herbert Xu2153.85%133.33%
Américo Wang1846.15%266.67%
Total39100.00%3100.00%


static int __br_netpoll_enable(struct net_bridge_port *p) { struct netpoll *np; int err; np = kzalloc(sizeof(*p->np), GFP_KERNEL); if (!np) return -ENOMEM; err = __netpoll_setup(np, p->dev); if (err) { kfree(np); return err; } p->np = np; return err; }

Contributors

PersonTokensPropCommitsCommitProp
Cong Wang4153.25%125.00%
Américo Wang2431.17%125.00%
Herbert Xu1114.29%125.00%
Eric W. Biedermann11.30%125.00%
Total77100.00%4100.00%


int br_netpoll_enable(struct net_bridge_port *p) { if (!p->br->dev->npinfo) return 0; return __br_netpoll_enable(p); }

Contributors

PersonTokensPropCommitsCommitProp
Stephen Hemminger1550.00%240.00%
Herbert Xu723.33%120.00%
Américo Wang413.33%120.00%
Cong Wang413.33%120.00%
Total30100.00%5100.00%


static int br_netpoll_setup(struct net_device *dev, struct netpoll_info *ni) { struct net_bridge *br = netdev_priv(dev); struct net_bridge_port *p; int err = 0; list_for_each_entry(p, &br->port_list, list) { if (!p->dev) continue; err = __br_netpoll_enable(p); if (err) goto fail; } out: return err; fail: br_netpoll_cleanup(dev); goto out; }

Contributors

PersonTokensPropCommitsCommitProp
Cong Wang6477.11%120.00%
Herbert Xu1012.05%120.00%
Stephen Hemminger67.23%120.00%
Américo Wang22.41%120.00%
Jiri Pirko11.20%120.00%
Total83100.00%5100.00%


void br_netpoll_disable(struct net_bridge_port *p) { struct netpoll *np = p->np; if (!np) return; p->np = NULL; __netpoll_free_async(np); }

Contributors

PersonTokensPropCommitsCommitProp
Herbert Xu1952.78%133.33%
Stephen Hemminger1644.44%133.33%
Neil Horman12.78%133.33%
Total36100.00%3100.00%

#endif
static int br_add_slave(struct net_device *dev, struct net_device *slave_dev) { struct net_bridge *br = netdev_priv(dev); return br_add_if(br, slave_dev); }

Contributors

PersonTokensPropCommitsCommitProp
Jiri Pirko34100.00%1100.00%
Total34100.00%1100.00%


static int br_del_slave(struct net_device *dev, struct net_device *slave_dev) { struct net_bridge *br = netdev_priv(dev); return br_del_if(br, slave_dev); }

Contributors

PersonTokensPropCommitsCommitProp
Jiri Pirko34100.00%1100.00%
Total34100.00%1100.00%

static const struct ethtool_ops br_ethtool_ops = { .get_drvinfo = br_getinfo, .get_link = ethtool_op_get_link, }; static const struct net_device_ops br_netdev_ops = { .ndo_open = br_dev_open, .ndo_stop = br_dev_stop, .ndo_init = br_dev_init, .ndo_uninit = br_dev_uninit, .ndo_start_xmit = br_dev_xmit, .ndo_get_stats64 = br_get_stats64, .ndo_set_mac_address = br_set_mac_address, .ndo_set_rx_mode = br_dev_set_multicast_list, .ndo_change_rx_flags = br_dev_change_rx_flags, .ndo_change_mtu = br_change_mtu, .ndo_do_ioctl = br_dev_ioctl, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_netpoll_setup = br_netpoll_setup, .ndo_netpoll_cleanup = br_netpoll_cleanup, .ndo_poll_controller = br_poll_controller, #endif .ndo_add_slave = br_add_slave, .ndo_del_slave = br_del_slave, .ndo_fix_features = br_fix_features, .ndo_fdb_add = br_fdb_add, .ndo_fdb_del = br_fdb_delete, .ndo_fdb_dump = br_fdb_dump, .ndo_bridge_getlink = br_getlink, .ndo_bridge_setlink = br_setlink, .ndo_bridge_dellink = br_dellink, .ndo_features_check = passthru_features_check, }; static struct device_type br_type = { .name = "bridge", };
void br_dev_setup(struct net_device *dev) { struct net_bridge *br = netdev_priv(dev); eth_hw_addr_random(dev); ether_setup(dev); dev->netdev_ops = &br_netdev_ops; dev->destructor = free_netdev; dev->ethtool_ops = &br_ethtool_ops; SET_NETDEV_DEVTYPE(dev, &br_type); dev->priv_flags = IFF_EBRIDGE | IFF_NO_QUEUE; dev->features = COMMON_FEATURES | NETIF_F_LLTX | NETIF_F_NETNS_LOCAL | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX; dev->hw_features = COMMON_FEATURES | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX; dev->vlan_features = COMMON_FEATURES; br->dev = dev; spin_lock_init(&br->lock); INIT_LIST_HEAD(&br->port_list); spin_lock_init(&br->hash_lock); br->bridge_id.prio[0] = 0x80; br->bridge_id.prio[1] = 0x00; ether_addr_copy(br->group_addr, eth_reserved_addr_base); br->stp_enabled = BR_NO_STP; br->group_fwd_mask = BR_GROUPFWD_DEFAULT; br->group_fwd_mask_required = BR_GROUPFWD_DEFAULT; br->designated_root = br->bridge_id; br->bridge_max_age = br->max_age = 20 * HZ; br->bridge_hello_time = br->hello_time = 2 * HZ; br->bridge_forward_delay = br->forward_delay = 15 * HZ; br->bridge_ageing_time = br->ageing_time = BR_DEFAULT_AGEING_TIME; dev->max_mtu = ETH_MAX_MTU; br_netfilter_rtable_init(br); br_stp_timer_init(br); br_multicast_init(br); INIT_DELAYED_WORK(&br->gc_work, br_fdb_cleanup); }

Contributors

PersonTokensPropCommitsCommitProp
Stephen Hemminger17968.85%725.00%
Linus Torvalds (pre-git)207.69%13.57%
Nikolay Aleksandrov103.85%13.57%
Toshiaki Makita103.85%27.14%
Vlad Yasevich83.08%13.57%
Jarod Wilson62.31%13.57%
Michał Mirosław51.92%13.57%
Vivien Didelot41.54%13.57%
Herbert Xu31.15%310.71%
Wilfried Klaebe31.15%13.57%
Alexey Dobriyan20.77%13.57%
Patrick McHardy20.77%13.57%
Phil Sutter20.77%13.57%
Scott Feldman10.38%13.57%
Ben Hutchings10.38%13.57%
Jesse Gross10.38%13.57%
Ido Schimmel10.38%13.57%
Joe Perches10.38%13.57%
Danny Kukawka10.38%13.57%
Total260100.00%28100.00%


Overall Contributors

PersonTokensPropCommitsCommitProp
Stephen Hemminger71037.08%2322.12%
Herbert Xu1919.97%98.65%
Linus Torvalds (pre-git)1668.67%10.96%
Nikolay Aleksandrov1186.16%87.69%
Jiri Pirko1125.85%43.85%
Cong Wang1095.69%10.96%
Vlad Yasevich854.44%87.69%
Américo Wang814.23%54.81%
Eric Dumazet753.92%21.92%
Ido Schimmel442.30%10.96%
Pablo Neira Ayuso381.98%32.88%
Toshiaki Makita321.67%54.81%
John Fastabend251.31%21.92%
Simon Wunderlich231.20%10.96%
Michał Mirosław170.89%21.92%
Linus Lüssing120.63%21.92%
Bart De Schuymer120.63%10.96%
Linus Torvalds80.42%21.92%
David S. Miller60.31%10.96%
Jarod Wilson60.31%10.96%
Jesse Gross50.26%10.96%
Xin Long50.26%10.96%
Vivien Didelot40.21%10.96%
Li RongQing40.21%10.96%
Patrick McHardy30.16%21.92%
Wilfried Klaebe30.16%10.96%
Joe Perches30.16%21.92%
Arnaldo Carvalho de Melo30.16%10.96%
Eric W. Biedermann30.16%21.92%
Alexey Dobriyan20.10%10.96%
Danny Kukawka20.10%21.92%
Phil Sutter20.10%10.96%
Neil Horman10.05%10.96%
Hideaki Yoshifuji / 吉藤英明10.05%10.96%
Scott Feldman10.05%10.96%
Ben Hutchings10.05%10.96%
Adrian Bunk10.05%10.96%
Pavel Emelyanov10.05%10.96%
Total1915100.00%104100.00%
Directory: net/bridge
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.