Release 4.7 net/bridge/br_forward.c
/*
* Forwarding decision
* Linux ethernet bridge
*
* Authors:
* Lennert Buytenhek <buytenh@gnu.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/netpoll.h>
#include <linux/skbuff.h>
#include <linux/if_vlan.h>
#include <linux/netfilter_bridge.h>
#include "br_private.h"
static int deliver_clone(const struct net_bridge_port *prev,
struct sk_buff *skb,
void (*__packet_hook)(const struct net_bridge_port *p,
struct sk_buff *skb));
/* Don't forward packets to originating port or forwarding disabled */
static inline int should_deliver(const struct net_bridge_port *p,
const struct sk_buff *skb)
{
struct net_bridge_vlan_group *vg;
vg = nbp_vlan_group_rcu(p);
return ((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) &&
br_allowed_egress(vg, skb) && p->state == BR_STATE_FORWARDING;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| pre-git | pre-git | 27 | 42.86% | 1 | 12.50% |
| nikolay aleksandrov | nikolay aleksandrov | 13 | 20.63% | 2 | 25.00% |
| anna fischer | anna fischer | 9 | 14.29% | 1 | 12.50% |
| stephen hemminger | stephen hemminger | 7 | 11.11% | 2 | 25.00% |
| vlad yasevich | vlad yasevich | 6 | 9.52% | 1 | 12.50% |
| linus torvalds | linus torvalds | 1 | 1.59% | 1 | 12.50% |
| Total | 63 | 100.00% | 8 | 100.00% |
int br_dev_queue_push_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
{
if (!is_skb_forwardable(skb->dev, skb))
goto drop;
skb_push(skb, ETH_HLEN);
br_drop_fake_rtable(skb);
if (skb->ip_summed == CHECKSUM_PARTIAL &&
(skb->protocol == htons(ETH_P_8021Q) ||
skb->protocol == htons(ETH_P_8021AD))) {
int depth;
if (!__vlan_get_protocol(skb, skb->protocol, &depth))
goto drop;
skb_set_network_header(skb, depth);
}
dev_queue_xmit(skb);
return 0;
drop:
kfree_skb(skb);
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| toshiaki makita | toshiaki makita | 71 | 57.72% | 1 | 10.00% |
| linus torvalds | linus torvalds | 23 | 18.70% | 1 | 10.00% |
| stephen hemminger | stephen hemminger | 8 | 6.50% | 2 | 20.00% |
| peter huang | peter huang | 5 | 4.07% | 1 | 10.00% |
| david s. miller | david s. miller | 5 | 4.07% | 1 | 10.00% |
| eric w. biederman | eric w. biederman | 5 | 4.07% | 1 | 10.00% |
| vlad yasevich | vlad yasevich | 3 | 2.44% | 1 | 10.00% |
| vlad drukker | vlad drukker | 2 | 1.63% | 1 | 10.00% |
| bart de schuymer | bart de schuymer | 1 | 0.81% | 1 | 10.00% |
| Total | 123 | 100.00% | 10 | 100.00% |
EXPORT_SYMBOL_GPL(br_dev_queue_push_xmit);
int br_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
{
return NF_HOOK(NFPROTO_BRIDGE, NF_BR_POST_ROUTING,
net, sk, skb, NULL, skb->dev,
br_dev_queue_push_xmit);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| linus torvalds | linus torvalds | 24 | 57.14% | 1 | 14.29% |
| eric w. biederman | eric w. biederman | 7 | 16.67% | 2 | 28.57% |
| david s. miller | david s. miller | 7 | 16.67% | 1 | 14.29% |
| bart de schuymer | bart de schuymer | 2 | 4.76% | 1 | 14.29% |
| jan engelhardt | jan engelhardt | 1 | 2.38% | 1 | 14.29% |
| stephen hemminger | stephen hemminger | 1 | 2.38% | 1 | 14.29% |
| Total | 42 | 100.00% | 7 | 100.00% |
EXPORT_SYMBOL_GPL(br_forward_finish);
static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
{
struct net_bridge_vlan_group *vg;
vg = nbp_vlan_group_rcu(to);
skb = br_handle_vlan(to->br, vg, skb);
if (!skb)
return;
skb->dev = to->dev;
if (unlikely(netpoll_tx_running(to->br->dev))) {
if (!is_skb_forwardable(skb->dev, skb))
kfree_skb(skb);
else {
skb_push(skb, ETH_HLEN);
br_netpoll_send_skb(to, skb);
}
return;
}
NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT,
dev_net(skb->dev), NULL, skb,NULL, skb->dev,
br_forward_finish);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| herbert xu | herbert xu | 35 | 26.32% | 1 | 7.14% |
| linus torvalds | linus torvalds | 32 | 24.06% | 1 | 7.14% |
| vlad yasevich | vlad yasevich | 21 | 15.79% | 2 | 14.29% |
| americo wang | americo wang | 19 | 14.29% | 2 | 14.29% |
| nikolay aleksandrov | nikolay aleksandrov | 13 | 9.77% | 2 | 14.29% |
| eric w. biederman | eric w. biederman | 7 | 5.26% | 1 | 7.14% |
| david s. miller | david s. miller | 2 | 1.50% | 1 | 7.14% |
| stephen hemminger | stephen hemminger | 1 | 0.75% | 1 | 7.14% |
| lennert buytenhek | lennert buytenhek | 1 | 0.75% | 1 | 7.14% |
| jan engelhardt | jan engelhardt | 1 | 0.75% | 1 | 7.14% |
| bart de schuymer | bart de schuymer | 1 | 0.75% | 1 | 7.14% |
| Total | 133 | 100.00% | 14 | 100.00% |
static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
{
struct net_bridge_vlan_group *vg;
struct net_device *indev;
if (skb_warn_if_lro(skb)) {
kfree_skb(skb);
return;
}
vg = nbp_vlan_group_rcu(to);
skb = br_handle_vlan(to->br, vg, skb);
if (!skb)
return;
indev = skb->dev;
skb->dev = to->dev;
skb_forward_csum(skb);
NF_HOOK(NFPROTO_BRIDGE, NF_BR_FORWARD,
dev_net(indev), NULL, skb, indev, skb->dev,
br_forward_finish);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| linus torvalds | linus torvalds | 27 | 24.32% | 1 | 7.69% |
| pre-git | pre-git | 23 | 20.72% | 1 | 7.69% |
| herbert xu | herbert xu | 18 | 16.22% | 2 | 15.38% |
| vlad yasevich | vlad yasevich | 18 | 16.22% | 1 | 7.69% |
| nikolay aleksandrov | nikolay aleksandrov | 13 | 11.71% | 2 | 15.38% |
| eric w. biederman | eric w. biederman | 5 | 4.50% | 1 | 7.69% |
| stephen hemminger | stephen hemminger | 3 | 2.70% | 2 | 15.38% |
| david s. miller | david s. miller | 2 | 1.80% | 1 | 7.69% |
| bart de schuymer | bart de schuymer | 1 | 0.90% | 1 | 7.69% |
| jan engelhardt | jan engelhardt | 1 | 0.90% | 1 | 7.69% |
| Total | 111 | 100.00% | 13 | 100.00% |
/* called with rcu_read_lock */
void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
{
if (to && should_deliver(to, skb)) {
__br_deliver(to, skb);
return;
}
kfree_skb(skb);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| linus torvalds | linus torvalds | 34 | 80.95% | 1 | 25.00% |
| pre-git | pre-git | 5 | 11.90% | 1 | 25.00% |
| stephen hemminger | stephen hemminger | 3 | 7.14% | 2 | 50.00% |
| Total | 42 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL_GPL(br_deliver);
/* called with rcu_read_lock */
void br_forward(const struct net_bridge_port *to, struct sk_buff *skb, struct sk_buff *skb0)
{
if (to && should_deliver(to, skb)) {
if (skb0)
deliver_clone(to, skb, __br_forward);
else
__br_forward(to, skb);
return;
}
if (!skb0)
kfree_skb(skb);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| pre-git | pre-git | 38 | 57.58% | 1 | 20.00% |
| michael braun | michael braun | 24 | 36.36% | 1 | 20.00% |
| roopa prabhu | roopa prabhu | 2 | 3.03% | 1 | 20.00% |
| stephen hemminger | stephen hemminger | 1 | 1.52% | 1 | 20.00% |
| linus torvalds | linus torvalds | 1 | 1.52% | 1 | 20.00% |
| Total | 66 | 100.00% | 5 | 100.00% |
static int deliver_clone(const struct net_bridge_port *prev,
struct sk_buff *skb,
void (*__packet_hook)(const struct net_bridge_port *p,
struct sk_buff *skb))
{
struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
skb = skb_clone(skb, GFP_ATOMIC);
if (!skb) {
dev->stats.tx_dropped++;
return -ENOMEM;
}
__packet_hook(prev, skb);
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| herbert xu | herbert xu | 83 | 98.81% | 2 | 66.67% |
| david s. miller | david s. miller | 1 | 1.19% | 1 | 33.33% |
| Total | 84 | 100.00% | 3 | 100.00% |
static struct net_bridge_port *maybe_deliver(
struct net_bridge_port *prev, struct net_bridge_port *p,
struct sk_buff *skb,
void (*__packet_hook)(const struct net_bridge_port *p,
struct sk_buff *skb))
{
int err;
if (!should_deliver(p, skb))
return prev;
if (!prev)
goto out;
err = deliver_clone(prev, skb, __packet_hook);
if (err)
return ERR_PTR(err);
out:
return p;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| herbert xu | herbert xu | 91 | 100.00% | 1 | 100.00% |
| Total | 91 | 100.00% | 1 | 100.00% |
/* called under bridge lock */
static void br_flood(struct net_bridge *br, struct sk_buff *skb,
struct sk_buff *skb0,
void (*__packet_hook)(const struct net_bridge_port *p,
struct sk_buff *skb),
bool unicast)
{
struct net_bridge_port *p;
struct net_bridge_port *prev;
prev = NULL;
list_for_each_entry_rcu(p, &br->port_list, list) {
/* Do not flood unicast traffic to ports that turn it off */
if (unicast && !(p->flags & BR_FLOOD))
continue;
/* Do not flood to ports that enable proxy ARP */
if (p->flags & BR_PROXYARP)
continue;
if ((p->flags & BR_PROXYARP_WIFI) &&
BR_INPUT_SKB_CB(skb)->proxyarp_replied)
continue;
prev = maybe_deliver(prev, p, skb, __packet_hook);
if (IS_ERR(prev))
goto out;
}
if (!prev)
goto out;
if (skb0)
deliver_clone(prev, skb, __packet_hook);
else
__packet_hook(prev, skb);
return;
out:
if (!skb0)
kfree_skb(skb);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| pre-git | pre-git | 57 | 32.76% | 1 | 11.11% |
| herbert xu | herbert xu | 44 | 25.29% | 2 | 22.22% |
| jouni malinen | jouni malinen | 18 | 10.34% | 1 | 11.11% |
| linus torvalds | linus torvalds | 18 | 10.34% | 1 | 11.11% |
| vlad yasevich | vlad yasevich | 18 | 10.34% | 1 | 11.11% |
| kyeyoon park | kyeyoon park | 10 | 5.75% | 1 | 11.11% |
| stephen hemminger | stephen hemminger | 9 | 5.17% | 2 | 22.22% |
| Total | 174 | 100.00% | 9 | 100.00% |
/* called with rcu_read_lock */
void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb, bool unicast)
{
br_flood(br, skb, NULL, __br_deliver, unicast);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| linus torvalds | linus torvalds | 24 | 77.42% | 1 | 33.33% |
| vlad yasevich | vlad yasevich | 5 | 16.13% | 1 | 33.33% |
| herbert xu | herbert xu | 2 | 6.45% | 1 | 33.33% |
| Total | 31 | 100.00% | 3 | 100.00% |
/* called under bridge lock */
void br_flood_forward(struct net_bridge *br, struct sk_buff *skb,
struct sk_buff *skb2, bool unicast)
{
br_flood(br, skb, skb2, __br_forward, unicast);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| linus torvalds | linus torvalds | 24 | 66.67% | 1 | 33.33% |
| herbert xu | herbert xu | 7 | 19.44% | 1 | 33.33% |
| vlad yasevich | vlad yasevich | 5 | 13.89% | 1 | 33.33% |
| Total | 36 | 100.00% | 3 | 100.00% |
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
/* called with rcu_read_lock */
static void br_multicast_flood(struct net_bridge_mdb_entry *mdst,
struct sk_buff *skb, struct sk_buff *skb0,
void (*__packet_hook)(
const struct net_bridge_port *p,
struct sk_buff *skb))
{
struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
struct net_bridge *br = netdev_priv(dev);
struct net_bridge_port *prev = NULL;
struct net_bridge_port_group *p;
struct hlist_node *rp;
rp = rcu_dereference(hlist_first_rcu(&br->router_list));
p = mdst ? rcu_dereference(mdst->ports) : NULL;
while (p || rp) {
struct net_bridge_port *port, *lport, *rport;
lport = p ? p->port : NULL;
rport = rp ? hlist_entry(rp, struct net_bridge_port, rlist) :
NULL;
port = (unsigned long)lport > (unsigned long)rport ?
lport : rport;
prev = maybe_deliver(prev, port, skb, __packet_hook);
if (IS_ERR(prev))
goto out;
if ((unsigned long)lport >= (unsigned long)port)
p = rcu_dereference(p->next);
if ((unsigned long)rport >= (unsigned long)port)
rp = rcu_dereference(hlist_next_rcu(rp));
}
if (!prev)
goto out;
if (skb0)
deliver_clone(prev, skb, __packet_hook);
else
__packet_hook(prev, skb);
return;
out:
if (!skb0)
kfree_skb(skb);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| herbert xu | herbert xu | 247 | 88.53% | 1 | 20.00% |
| stephen hemminger | stephen hemminger | 25 | 8.96% | 3 | 60.00% |
| eric dumazet | eric dumazet | 7 | 2.51% | 1 | 20.00% |
| Total | 279 | 100.00% | 5 | 100.00% |
/* called with rcu_read_lock */
void br_multicast_deliver(struct net_bridge_mdb_entry *mdst,
struct sk_buff *skb)
{
br_multicast_flood(mdst, skb, NULL, __br_deliver);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| herbert xu | herbert xu | 26 | 100.00% | 1 | 100.00% |
| Total | 26 | 100.00% | 1 | 100.00% |
/* called with rcu_read_lock */
void br_multicast_forward(struct net_bridge_mdb_entry *mdst,
struct sk_buff *skb, struct sk_buff *skb2)
{
br_multicast_flood(mdst, skb, skb2, __br_forward);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| herbert xu | herbert xu | 31 | 100.00% | 1 | 100.00% |
| Total | 31 | 100.00% | 1 | 100.00% |
#endif
Overall Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| herbert xu | herbert xu | 595 | 41.81% | 7 | 14.29% |
| linus torvalds | linus torvalds | 212 | 14.90% | 2 | 4.08% |
| pre-git | pre-git | 163 | 11.45% | 1 | 2.04% |
| vlad yasevich | vlad yasevich | 76 | 5.34% | 4 | 8.16% |
| toshiaki makita | toshiaki makita | 71 | 4.99% | 1 | 2.04% |
| stephen hemminger | stephen hemminger | 64 | 4.50% | 10 | 20.41% |
| michael braun | michael braun | 57 | 4.01% | 1 | 2.04% |
| nikolay aleksandrov | nikolay aleksandrov | 39 | 2.74% | 2 | 4.08% |
| eric w. biederman | eric w. biederman | 24 | 1.69% | 2 | 4.08% |
| americo wang | americo wang | 22 | 1.55% | 2 | 4.08% |
| david s. miller | david s. miller | 18 | 1.26% | 2 | 4.08% |
| jouni malinen | jouni malinen | 18 | 1.26% | 1 | 2.04% |
| pablo neira ayuso | pablo neira ayuso | 15 | 1.05% | 2 | 4.08% |
| kyeyoon park | kyeyoon park | 10 | 0.70% | 1 | 2.04% |
| anna fischer | anna fischer | 9 | 0.63% | 1 | 2.04% |
| eric dumazet | eric dumazet | 7 | 0.49% | 1 | 2.04% |
| peter huang | peter huang | 5 | 0.35% | 1 | 2.04% |
| bart de schuymer | bart de schuymer | 5 | 0.35% | 1 | 2.04% |
| tejun heo | tejun heo | 3 | 0.21% | 1 | 2.04% |
| jan engelhardt | jan engelhardt | 3 | 0.21% | 1 | 2.04% |
| vlad drukker | vlad drukker | 2 | 0.14% | 1 | 2.04% |
| roopa prabhu | roopa prabhu | 2 | 0.14% | 1 | 2.04% |
| adrian bunk | adrian bunk | 1 | 0.07% | 1 | 2.04% |
| tan xiaojun | tan xiaojun | 1 | 0.07% | 1 | 2.04% |
| lennert buytenhek | lennert buytenhek | 1 | 0.07% | 1 | 2.04% |
| Total | 1423 | 100.00% | 49 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.