Release 4.14 net/bridge/br_forward.c
/*
* Forwarding decision
* Linux ethernet bridge
*
* Authors:
* Lennert Buytenhek <buytenh@gnu.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/netpoll.h>
#include <linux/skbuff.h>
#include <linux/if_vlan.h>
#include <linux/netfilter_bridge.h>
#include "br_private.h"
/* Don't forward packets to originating port or forwarding disabled */
static inline int should_deliver(const struct net_bridge_port *p,
const struct sk_buff *skb)
{
struct net_bridge_vlan_group *vg;
vg = nbp_vlan_group_rcu(p);
return ((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) &&
br_allowed_egress(vg, skb) && p->state == BR_STATE_FORWARDING &&
nbp_switchdev_allowed_egress(p, skb);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 27 | 38.57% | 1 | 11.11% |
Nikolay Aleksandrov | 13 | 18.57% | 2 | 22.22% |
Fischer, Anna | 9 | 12.86% | 1 | 11.11% |
Stephen Hemminger | 7 | 10.00% | 2 | 22.22% |
Ido Schimmel | 7 | 10.00% | 1 | 11.11% |
Vlad Yasevich | 6 | 8.57% | 1 | 11.11% |
Linus Torvalds | 1 | 1.43% | 1 | 11.11% |
Total | 70 | 100.00% | 9 | 100.00% |
int br_dev_queue_push_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
{
if (!is_skb_forwardable(skb->dev, skb))
goto drop;
skb_push(skb, ETH_HLEN);
br_drop_fake_rtable(skb);
if (skb->ip_summed == CHECKSUM_PARTIAL &&
(skb->protocol == htons(ETH_P_8021Q) ||
skb->protocol == htons(ETH_P_8021AD))) {
int depth;
if (!__vlan_get_protocol(skb, skb->protocol, &depth))
goto drop;
skb_set_network_header(skb, depth);
}
dev_queue_xmit(skb);
return 0;
drop:
kfree_skb(skb);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Toshiaki Makita | 71 | 57.72% | 1 | 10.00% |
Linus Torvalds | 23 | 18.70% | 1 | 10.00% |
Stephen Hemminger | 8 | 6.50% | 2 | 20.00% |
David S. Miller | 5 | 4.07% | 1 | 10.00% |
Peter Huang (Peng) | 5 | 4.07% | 1 | 10.00% |
Eric W. Biedermann | 5 | 4.07% | 1 | 10.00% |
Vlad Yasevich | 3 | 2.44% | 1 | 10.00% |
Vlad Drukker | 2 | 1.63% | 1 | 10.00% |
Bart De Schuymer | 1 | 0.81% | 1 | 10.00% |
Total | 123 | 100.00% | 10 | 100.00% |
EXPORT_SYMBOL_GPL(br_dev_queue_push_xmit);
int br_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
{
return NF_HOOK(NFPROTO_BRIDGE, NF_BR_POST_ROUTING,
net, sk, skb, NULL, skb->dev,
br_dev_queue_push_xmit);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds | 24 | 57.14% | 1 | 14.29% |
David S. Miller | 7 | 16.67% | 1 | 14.29% |
Eric W. Biedermann | 7 | 16.67% | 2 | 28.57% |
Bart De Schuymer | 2 | 4.76% | 1 | 14.29% |
Jan Engelhardt | 1 | 2.38% | 1 | 14.29% |
Stephen Hemminger | 1 | 2.38% | 1 | 14.29% |
Total | 42 | 100.00% | 7 | 100.00% |
EXPORT_SYMBOL_GPL(br_forward_finish);
static void __br_forward(const struct net_bridge_port *to,
struct sk_buff *skb, bool local_orig)
{
struct net_bridge_vlan_group *vg;
struct net_device *indev;
struct net *net;
int br_hook;
vg = nbp_vlan_group_rcu(to);
skb = br_handle_vlan(to->br, to, vg, skb);
if (!skb)
return;
indev = skb->dev;
skb->dev = to->dev;
if (!local_orig) {
if (skb_warn_if_lro(skb)) {
kfree_skb(skb);
return;
}
br_hook = NF_BR_FORWARD;
skb_forward_csum(skb);
net = dev_net(indev);
} else {
if (unlikely(netpoll_tx_running(to->br->dev))) {
if (!is_skb_forwardable(skb->dev, skb)) {
kfree_skb(skb);
} else {
skb_push(skb, ETH_HLEN);
br_netpoll_send_skb(to, skb);
}
return;
}
br_hook = NF_BR_LOCAL_OUT;
net = dev_net(skb->dev);
indev = NULL;
}
NF_HOOK(NFPROTO_BRIDGE, br_hook,
net, NULL, skb, indev, skb->dev,
br_forward_finish);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Nikolay Aleksandrov | 97 | 45.75% | 3 | 21.43% |
Herbert Xu | 35 | 16.51% | 1 | 7.14% |
Linus Torvalds | 27 | 12.74% | 1 | 7.14% |
Vlad Yasevich | 21 | 9.91% | 2 | 14.29% |
Américo Wang | 19 | 8.96% | 2 | 14.29% |
Eric W. Biedermann | 7 | 3.30% | 1 | 7.14% |
David S. Miller | 2 | 0.94% | 1 | 7.14% |
Roopa Prabhu | 2 | 0.94% | 1 | 7.14% |
Stephen Hemminger | 1 | 0.47% | 1 | 7.14% |
Bart De Schuymer | 1 | 0.47% | 1 | 7.14% |
Total | 212 | 100.00% | 14 | 100.00% |
static int deliver_clone(const struct net_bridge_port *prev,
struct sk_buff *skb, bool local_orig)
{
struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
skb = skb_clone(skb, GFP_ATOMIC);
if (!skb) {
dev->stats.tx_dropped++;
return -ENOMEM;
}
__br_forward(prev, skb, local_orig);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Nikolay Aleksandrov | 31 | 43.66% | 2 | 28.57% |
Linus Torvalds (pre-git) | 16 | 22.54% | 1 | 14.29% |
Linus Torvalds | 10 | 14.08% | 1 | 14.29% |
Vlad Yasevich | 9 | 12.68% | 1 | 14.29% |
Herbert Xu | 4 | 5.63% | 1 | 14.29% |
Stephen Hemminger | 1 | 1.41% | 1 | 14.29% |
Total | 71 | 100.00% | 7 | 100.00% |
/**
* br_forward - forward a packet to a specific port
* @to: destination port
* @skb: packet being forwarded
* @local_rcv: packet will be received locally after forwarding
* @local_orig: packet is locally originated
*
* Should be called with rcu_read_lock.
*/
void br_forward(const struct net_bridge_port *to,
struct sk_buff *skb, bool local_rcv, bool local_orig)
{
if (to && should_deliver(to, skb)) {
if (local_rcv)
deliver_clone(to, skb, local_orig);
else
__br_forward(to, skb, local_orig);
return;
}
if (!local_rcv)
kfree_skb(skb);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Nikolay Aleksandrov | 30 | 43.48% | 2 | 22.22% |
Herbert Xu | 17 | 24.64% | 2 | 22.22% |
Linus Torvalds (pre-git) | 17 | 24.64% | 1 | 11.11% |
Roopa Prabhu | 2 | 2.90% | 1 | 11.11% |
Stephen Hemminger | 1 | 1.45% | 1 | 11.11% |
Michael Braun | 1 | 1.45% | 1 | 11.11% |
Linus Torvalds | 1 | 1.45% | 1 | 11.11% |
Total | 69 | 100.00% | 9 | 100.00% |
EXPORT_SYMBOL_GPL(br_forward);
static struct net_bridge_port *maybe_deliver(
struct net_bridge_port *prev, struct net_bridge_port *p,
struct sk_buff *skb, bool local_orig)
{
int err;
if (!should_deliver(p, skb))
return prev;
if (!prev)
goto out;
err = deliver_clone(prev, skb, local_orig);
if (err)
return ERR_PTR(err);
out:
return p;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Herbert Xu | 73 | 96.05% | 1 | 50.00% |
Nikolay Aleksandrov | 3 | 3.95% | 1 | 50.00% |
Total | 76 | 100.00% | 2 | 100.00% |
/* called under rcu_read_lock */
void br_flood(struct net_bridge *br, struct sk_buff *skb,
enum br_pkt_type pkt_type, bool local_rcv, bool local_orig)
{
u8 igmp_type = br_multicast_igmp_type(skb);
struct net_bridge_port *prev = NULL;
struct net_bridge_port *p;
list_for_each_entry_rcu(p, &br->port_list, list) {
/* Do not flood unicast traffic to ports that turn it off, nor
* other traffic if flood off, except for traffic we originate
*/
switch (pkt_type) {
case BR_PKT_UNICAST:
if (!(p->flags & BR_FLOOD))
continue;
break;
case BR_PKT_MULTICAST:
if (!(p->flags & BR_MCAST_FLOOD) && skb->dev != br->dev)
continue;
break;
case BR_PKT_BROADCAST:
if (!(p->flags & BR_BCAST_FLOOD) && skb->dev != br->dev)
continue;
break;
}
/* Do not flood to ports that enable proxy ARP */
if (p->flags & BR_PROXYARP)
continue;
if ((p->flags & BR_PROXYARP_WIFI) &&
BR_INPUT_SKB_CB(skb)->proxyarp_replied)
continue;
prev = maybe_deliver(prev, p, skb, local_orig);
if (IS_ERR(prev))
goto out;
if (prev == p)
br_multicast_count(p->br, p, skb, igmp_type,
BR_MCAST_DIR_TX);
}
if (!prev)
goto out;
if (local_rcv)
deliver_clone(prev, skb, local_orig);
else
__br_forward(prev, skb, local_orig);
return;
out:
if (!local_rcv)
kfree_skb(skb);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Nikolay Aleksandrov | 61 | 25.21% | 6 | 37.50% |
Linus Torvalds (pre-git) | 51 | 21.07% | 1 | 6.25% |
Mike Manning | 46 | 19.01% | 2 | 12.50% |
Herbert Xu | 35 | 14.46% | 2 | 12.50% |
Jouni Malinen | 18 | 7.44% | 1 | 6.25% |
Vlad Yasevich | 13 | 5.37% | 1 | 6.25% |
Kyeyoon Park | 10 | 4.13% | 1 | 6.25% |
Stephen Hemminger | 8 | 3.31% | 2 | 12.50% |
Total | 242 | 100.00% | 16 | 100.00% |
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
static void maybe_deliver_addr(struct net_bridge_port *p, struct sk_buff *skb,
const unsigned char *addr, bool local_orig)
{
struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
const unsigned char *src = eth_hdr(skb)->h_source;
if (!should_deliver(p, skb))
return;
/* Even with hairpin, no soliloquies - prevent breaking IPv6 DAD */
if (skb->dev == p->dev && ether_addr_equal(src, addr))
return;
skb = skb_copy(skb, GFP_ATOMIC);
if (!skb) {
dev->stats.tx_dropped++;
return;
}
if (!is_broadcast_ether_addr(addr))
memcpy(eth_hdr(skb)->h_dest, addr, ETH_ALEN);
__br_forward(p, skb, local_orig);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Arnd Bergmann | 135 | 100.00% | 1 | 100.00% |
Total | 135 | 100.00% | 1 | 100.00% |
/* called with rcu_read_lock */
void br_multicast_flood(struct net_bridge_mdb_entry *mdst,
struct sk_buff *skb,
bool local_rcv, bool local_orig)
{
struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
u8 igmp_type = br_multicast_igmp_type(skb);
struct net_bridge *br = netdev_priv(dev);
struct net_bridge_port *prev = NULL;
struct net_bridge_port_group *p;
struct hlist_node *rp;
rp = rcu_dereference(hlist_first_rcu(&br->router_list));
p = mdst ? rcu_dereference(mdst->ports) : NULL;
while (p || rp) {
struct net_bridge_port *port, *lport, *rport;
lport = p ? p->port : NULL;
rport = rp ? hlist_entry(rp, struct net_bridge_port, rlist) :
NULL;
if ((unsigned long)lport > (unsigned long)rport) {
port = lport;
if (port->flags & BR_MULTICAST_TO_UNICAST) {
maybe_deliver_addr(lport, skb, p->eth_addr,
local_orig);
goto delivered;
}
} else {
port = rport;
}
prev = maybe_deliver(prev, port, skb, local_orig);
delivered:
if (IS_ERR(prev))
goto out;
if (prev == port)
br_multicast_count(port->br, port, skb, igmp_type,
BR_MCAST_DIR_TX);
if ((unsigned long)lport >= (unsigned long)port)
p = rcu_dereference(p->next);
if ((unsigned long)rport >= (unsigned long)port)
rp = rcu_dereference(hlist_next_rcu(rp));
}
if (!prev)
goto out;
if (local_rcv)
deliver_clone(prev, skb, local_orig);
else
__br_forward(prev, skb, local_orig);
return;
out:
if (!local_rcv)
kfree_skb(skb);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Herbert Xu | 215 | 65.35% | 1 | 10.00% |
Nikolay Aleksandrov | 41 | 12.46% | 4 | 40.00% |
Felix Fietkau | 41 | 12.46% | 1 | 10.00% |
Stephen Hemminger | 25 | 7.60% | 3 | 30.00% |
Eric Dumazet | 7 | 2.13% | 1 | 10.00% |
Total | 329 | 100.00% | 10 | 100.00% |
#endif
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Herbert Xu | 388 | 27.30% | 6 | 10.91% |
Nikolay Aleksandrov | 283 | 19.92% | 8 | 14.55% |
Arnd Bergmann | 135 | 9.50% | 1 | 1.82% |
Linus Torvalds (pre-git) | 123 | 8.66% | 1 | 1.82% |
Linus Torvalds | 89 | 6.26% | 2 | 3.64% |
Toshiaki Makita | 71 | 5.00% | 1 | 1.82% |
Stephen Hemminger | 55 | 3.87% | 8 | 14.55% |
Vlad Yasevich | 52 | 3.66% | 4 | 7.27% |
Mike Manning | 46 | 3.24% | 2 | 3.64% |
Felix Fietkau | 41 | 2.89% | 1 | 1.82% |
Américo Wang | 22 | 1.55% | 2 | 3.64% |
Eric W. Biedermann | 19 | 1.34% | 2 | 3.64% |
Jouni Malinen | 18 | 1.27% | 1 | 1.82% |
David S. Miller | 14 | 0.99% | 1 | 1.82% |
Pablo Neira Ayuso | 10 | 0.70% | 1 | 1.82% |
Kyeyoon Park | 10 | 0.70% | 1 | 1.82% |
Fischer, Anna | 9 | 0.63% | 1 | 1.82% |
Eric Dumazet | 7 | 0.49% | 1 | 1.82% |
Ido Schimmel | 7 | 0.49% | 1 | 1.82% |
Peter Huang (Peng) | 5 | 0.35% | 1 | 1.82% |
Roopa Prabhu | 4 | 0.28% | 2 | 3.64% |
Bart De Schuymer | 4 | 0.28% | 1 | 1.82% |
Tejun Heo | 3 | 0.21% | 1 | 1.82% |
Vlad Drukker | 2 | 0.14% | 1 | 1.82% |
Jan Engelhardt | 1 | 0.07% | 1 | 1.82% |
Adrian Bunk | 1 | 0.07% | 1 | 1.82% |
Tan Xiaojun | 1 | 0.07% | 1 | 1.82% |
Michael Braun | 1 | 0.07% | 1 | 1.82% |
Total | 1421 | 100.00% | 55 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.