Contributors: 35
	  
        
          | Author | 
          Tokens | 
          Token Proportion | 
          Commits | 
          Commit Proportion | 
        
	  
	  
        
        
          | Nikolay Aleksandrov | 
          373 | 
          24.80% | 
          13 | 
          18.57% | 
        
        
          | Herbert Xu | 
          341 | 
          22.67% | 
          6 | 
          8.57% | 
        
        
          | Arnd Bergmann | 
          135 | 
          8.98% | 
          1 | 
          1.43% | 
        
        
          | Linus Torvalds (pre-git) | 
          119 | 
          7.91% | 
          1 | 
          1.43% | 
        
        
          | Linus Torvalds | 
          82 | 
          5.45% | 
          2 | 
          2.86% | 
        
        
          | Toshiaki Makita | 
          57 | 
          3.79% | 
          1 | 
          1.43% | 
        
        
          | Stephen Hemminger | 
          52 | 
          3.46% | 
          8 | 
          11.43% | 
        
        
          | Vlad Yasevich | 
          52 | 
          3.46% | 
          4 | 
          5.71% | 
        
        
          | Mike Manning | 
          46 | 
          3.06% | 
          2 | 
          2.86% | 
        
        
          | Felix Fietkau | 
          39 | 
          2.59% | 
          1 | 
          1.43% | 
        
        
          | Pablo Neira Ayuso | 
          35 | 
          2.33% | 
          2 | 
          2.86% | 
        
        
          | Américo Wang | 
          21 | 
          1.40% | 
          2 | 
          2.86% | 
        
        
          | Eric W. Biedermann | 
          19 | 
          1.26% | 
          2 | 
          2.86% | 
        
        
          | Tobias Waldekranz | 
          19 | 
          1.26% | 
          1 | 
          1.43% | 
        
        
          | Jouni Malinen | 
          18 | 
          1.20% | 
          1 | 
          1.43% | 
        
        
          | wangyunjian | 
          14 | 
          0.93% | 
          1 | 
          1.43% | 
        
        
          | David S. Miller | 
          14 | 
          0.93% | 
          1 | 
          1.43% | 
        
        
          | Kyeyoon Park | 
          10 | 
          0.66% | 
          1 | 
          1.43% | 
        
        
          | Fischer, Anna | 
          9 | 
          0.60% | 
          1 | 
          1.43% | 
        
        
          | Roopa Prabhu | 
          8 | 
          0.53% | 
          3 | 
          4.29% | 
        
        
          | Eric Dumazet | 
          6 | 
          0.40% | 
          2 | 
          2.86% | 
        
        
          | Ido Schimmel | 
          6 | 
          0.40% | 
          1 | 
          1.43% | 
        
        
          | Linus Lüssing | 
          5 | 
          0.33% | 
          1 | 
          1.43% | 
        
        
          | Bart De Schuymer | 
          4 | 
          0.27% | 
          1 | 
          1.43% | 
        
        
          | Peter Huang (Peng) | 
          3 | 
          0.20% | 
          1 | 
          1.43% | 
        
        
          | Martin KaFai Lau | 
          3 | 
          0.20% | 
          1 | 
          1.43% | 
        
        
          | Tejun Heo | 
          3 | 
          0.20% | 
          1 | 
          1.43% | 
        
        
          | Paolo Abeni | 
          2 | 
          0.13% | 
          1 | 
          1.43% | 
        
        
          | Thomas Gleixner | 
          2 | 
          0.13% | 
          1 | 
          1.43% | 
        
        
          | Vlad Drukker | 
          2 | 
          0.13% | 
          1 | 
          1.43% | 
        
        
          | Jan Engelhardt | 
          1 | 
          0.07% | 
          1 | 
          1.43% | 
        
        
          | Vladimir Oltean | 
          1 | 
          0.07% | 
          1 | 
          1.43% | 
        
        
          | Michael Braun | 
          1 | 
          0.07% | 
          1 | 
          1.43% | 
        
        
          | Menglong Dong | 
          1 | 
          0.07% | 
          1 | 
          1.43% | 
        
        
          | Tan Xiaojun | 
          1 | 
          0.07% | 
          1 | 
          1.43% | 
        
	  
	  
        
          | Total | 
          1504 | 
           | 
          70 | 
           | 
	    
	  
    
 
// SPDX-License-Identifier: GPL-2.0-or-later
/*
 *	Forwarding decision
 *	Linux ethernet bridge
 *
 *	Authors:
 *	Lennert Buytenhek		<buytenh@gnu.org>
 */
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/netpoll.h>
#include <linux/skbuff.h>
#include <linux/if_vlan.h>
#include <linux/netfilter_bridge.h>
#include "br_private.h"
/* Don't forward packets to originating port or forwarding disabled */
static inline int should_deliver(const struct net_bridge_port *p,
				 const struct sk_buff *skb)
{
	struct net_bridge_vlan_group *vg;
	vg = nbp_vlan_group_rcu(p);
	return ((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) &&
		p->state == BR_STATE_FORWARDING && br_allowed_egress(vg, skb) &&
		nbp_switchdev_allowed_egress(p, skb) &&
		!br_skb_isolated(p, skb);
}
int br_dev_queue_push_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
{
	skb_push(skb, ETH_HLEN);
	if (!is_skb_forwardable(skb->dev, skb))
		goto drop;
	br_drop_fake_rtable(skb);
	if (skb->ip_summed == CHECKSUM_PARTIAL &&
	    eth_type_vlan(skb->protocol)) {
		int depth;
		if (!__vlan_get_protocol(skb, skb->protocol, &depth))
			goto drop;
		skb_set_network_header(skb, depth);
	}
	br_switchdev_frame_set_offload_fwd_mark(skb);
	dev_queue_xmit(skb);
	return 0;
drop:
	kfree_skb(skb);
	return 0;
}
EXPORT_SYMBOL_GPL(br_dev_queue_push_xmit);
int br_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
{
	skb_clear_tstamp(skb);
	return NF_HOOK(NFPROTO_BRIDGE, NF_BR_POST_ROUTING,
		       net, sk, skb, NULL, skb->dev,
		       br_dev_queue_push_xmit);
}
EXPORT_SYMBOL_GPL(br_forward_finish);
static void __br_forward(const struct net_bridge_port *to,
			 struct sk_buff *skb, bool local_orig)
{
	struct net_bridge_vlan_group *vg;
	struct net_device *indev;
	struct net *net;
	int br_hook;
	/* Mark the skb for forwarding offload early so that br_handle_vlan()
	 * can know whether to pop the VLAN header on egress or keep it.
	 */
	nbp_switchdev_frame_mark_tx_fwd_offload(to, skb);
	vg = nbp_vlan_group_rcu(to);
	skb = br_handle_vlan(to->br, to, vg, skb);
	if (!skb)
		return;
	indev = skb->dev;
	skb->dev = to->dev;
	if (!local_orig) {
		if (skb_warn_if_lro(skb)) {
			kfree_skb(skb);
			return;
		}
		br_hook = NF_BR_FORWARD;
		skb_forward_csum(skb);
		net = dev_net(indev);
	} else {
		if (unlikely(netpoll_tx_running(to->br->dev))) {
			skb_push(skb, ETH_HLEN);
			if (!is_skb_forwardable(skb->dev, skb))
				kfree_skb(skb);
			else
				br_netpoll_send_skb(to, skb);
			return;
		}
		br_hook = NF_BR_LOCAL_OUT;
		net = dev_net(skb->dev);
		indev = NULL;
	}
	NF_HOOK(NFPROTO_BRIDGE, br_hook,
		net, NULL, skb, indev, skb->dev,
		br_forward_finish);
}
static int deliver_clone(const struct net_bridge_port *prev,
			 struct sk_buff *skb, bool local_orig)
{
	struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
	skb = skb_clone(skb, GFP_ATOMIC);
	if (!skb) {
		dev->stats.tx_dropped++;
		return -ENOMEM;
	}
	__br_forward(prev, skb, local_orig);
	return 0;
}
/**
 * br_forward - forward a packet to a specific port
 * @to: destination port
 * @skb: packet being forwarded
 * @local_rcv: packet will be received locally after forwarding
 * @local_orig: packet is locally originated
 *
 * Should be called with rcu_read_lock.
 */
void br_forward(const struct net_bridge_port *to,
		struct sk_buff *skb, bool local_rcv, bool local_orig)
{
	if (unlikely(!to))
		goto out;
	/* redirect to backup link if the destination port is down */
	if (rcu_access_pointer(to->backup_port) && !netif_carrier_ok(to->dev)) {
		struct net_bridge_port *backup_port;
		backup_port = rcu_dereference(to->backup_port);
		if (unlikely(!backup_port))
			goto out;
		to = backup_port;
	}
	if (should_deliver(to, skb)) {
		if (local_rcv)
			deliver_clone(to, skb, local_orig);
		else
			__br_forward(to, skb, local_orig);
		return;
	}
out:
	if (!local_rcv)
		kfree_skb(skb);
}
EXPORT_SYMBOL_GPL(br_forward);
static struct net_bridge_port *maybe_deliver(
	struct net_bridge_port *prev, struct net_bridge_port *p,
	struct sk_buff *skb, bool local_orig)
{
	u8 igmp_type = br_multicast_igmp_type(skb);
	int err;
	if (!should_deliver(p, skb))
		return prev;
	nbp_switchdev_frame_mark_tx_fwd_to_hwdom(p, skb);
	if (!prev)
		goto out;
	err = deliver_clone(prev, skb, local_orig);
	if (err)
		return ERR_PTR(err);
out:
	br_multicast_count(p->br, p, skb, igmp_type, BR_MCAST_DIR_TX);
	return p;
}
/* called under rcu_read_lock */
void br_flood(struct net_bridge *br, struct sk_buff *skb,
	      enum br_pkt_type pkt_type, bool local_rcv, bool local_orig)
{
	struct net_bridge_port *prev = NULL;
	struct net_bridge_port *p;
	list_for_each_entry_rcu(p, &br->port_list, list) {
		/* Do not flood unicast traffic to ports that turn it off, nor
		 * other traffic if flood off, except for traffic we originate
		 */
		switch (pkt_type) {
		case BR_PKT_UNICAST:
			if (!(p->flags & BR_FLOOD))
				continue;
			break;
		case BR_PKT_MULTICAST:
			if (!(p->flags & BR_MCAST_FLOOD) && skb->dev != br->dev)
				continue;
			break;
		case BR_PKT_BROADCAST:
			if (!(p->flags & BR_BCAST_FLOOD) && skb->dev != br->dev)
				continue;
			break;
		}
		/* Do not flood to ports that enable proxy ARP */
		if (p->flags & BR_PROXYARP)
			continue;
		if ((p->flags & (BR_PROXYARP_WIFI | BR_NEIGH_SUPPRESS)) &&
		    BR_INPUT_SKB_CB(skb)->proxyarp_replied)
			continue;
		prev = maybe_deliver(prev, p, skb, local_orig);
		if (IS_ERR(prev))
			goto out;
	}
	if (!prev)
		goto out;
	if (local_rcv)
		deliver_clone(prev, skb, local_orig);
	else
		__br_forward(prev, skb, local_orig);
	return;
out:
	if (!local_rcv)
		kfree_skb(skb);
}
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
static void maybe_deliver_addr(struct net_bridge_port *p, struct sk_buff *skb,
			       const unsigned char *addr, bool local_orig)
{
	struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
	const unsigned char *src = eth_hdr(skb)->h_source;
	if (!should_deliver(p, skb))
		return;
	/* Even with hairpin, no soliloquies - prevent breaking IPv6 DAD */
	if (skb->dev == p->dev && ether_addr_equal(src, addr))
		return;
	skb = skb_copy(skb, GFP_ATOMIC);
	if (!skb) {
		dev->stats.tx_dropped++;
		return;
	}
	if (!is_broadcast_ether_addr(addr))
		memcpy(eth_hdr(skb)->h_dest, addr, ETH_ALEN);
	__br_forward(p, skb, local_orig);
}
/* called with rcu_read_lock */
void br_multicast_flood(struct net_bridge_mdb_entry *mdst,
			struct sk_buff *skb,
			struct net_bridge_mcast *brmctx,
			bool local_rcv, bool local_orig)
{
	struct net_bridge_port *prev = NULL;
	struct net_bridge_port_group *p;
	bool allow_mode_include = true;
	struct hlist_node *rp;
	rp = br_multicast_get_first_rport_node(brmctx, skb);
	if (mdst) {
		p = rcu_dereference(mdst->ports);
		if (br_multicast_should_handle_mode(brmctx, mdst->addr.proto) &&
		    br_multicast_is_star_g(&mdst->addr))
			allow_mode_include = false;
	} else {
		p = NULL;
	}
	while (p || rp) {
		struct net_bridge_port *port, *lport, *rport;
		lport = p ? p->key.port : NULL;
		rport = br_multicast_rport_from_node_skb(rp, skb);
		if ((unsigned long)lport > (unsigned long)rport) {
			port = lport;
			if (port->flags & BR_MULTICAST_TO_UNICAST) {
				maybe_deliver_addr(lport, skb, p->eth_addr,
						   local_orig);
				goto delivered;
			}
			if ((!allow_mode_include &&
			     p->filter_mode == MCAST_INCLUDE) ||
			    (p->flags & MDB_PG_FLAGS_BLOCKED))
				goto delivered;
		} else {
			port = rport;
		}
		prev = maybe_deliver(prev, port, skb, local_orig);
		if (IS_ERR(prev))
			goto out;
delivered:
		if ((unsigned long)lport >= (unsigned long)port)
			p = rcu_dereference(p->next);
		if ((unsigned long)rport >= (unsigned long)port)
			rp = rcu_dereference(hlist_next_rcu(rp));
	}
	if (!prev)
		goto out;
	if (local_rcv)
		deliver_clone(prev, skb, local_orig);
	else
		__br_forward(prev, skb, local_orig);
	return;
out:
	if (!local_rcv)
		kfree_skb(skb);
}
#endif