Release 4.11 net/bridge/br_netfilter_hooks.c
/*
* Handle firewalling
* Linux ethernet bridge
*
* Authors:
* Lennert Buytenhek <buytenh@gnu.org>
* Bart De Schuymer <bdschuym@pandora.be>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Lennert dedicates this file to Kerstin Wurdinger.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/ip.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/if_arp.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <linux/if_pppox.h>
#include <linux/ppp_defs.h>
#include <linux/netfilter_bridge.h>
#include <linux/netfilter_ipv4.h>
#include <linux/netfilter_ipv6.h>
#include <linux/netfilter_arp.h>
#include <linux/in_route.h>
#include <linux/rculist.h>
#include <linux/inetdevice.h>
#include <net/ip.h>
#include <net/ipv6.h>
#include <net/addrconf.h>
#include <net/route.h>
#include <net/netfilter/br_netfilter.h>
#include <net/netns/generic.h>
#include <linux/uaccess.h>
#include "br_private.h"
#ifdef CONFIG_SYSCTL
#include <linux/sysctl.h>
#endif
static unsigned int brnf_net_id __read_mostly;
struct brnf_net {
bool enabled;
};
#ifdef CONFIG_SYSCTL
static struct ctl_table_header *brnf_sysctl_header;
static int brnf_call_iptables __read_mostly = 1;
static int brnf_call_ip6tables __read_mostly = 1;
static int brnf_call_arptables __read_mostly = 1;
static int brnf_filter_vlan_tagged __read_mostly;
static int brnf_filter_pppoe_tagged __read_mostly;
static int brnf_pass_vlan_indev __read_mostly;
#else
#define brnf_call_iptables 1
#define brnf_call_ip6tables 1
#define brnf_call_arptables 1
#define brnf_filter_vlan_tagged 0
#define brnf_filter_pppoe_tagged 0
#define brnf_pass_vlan_indev 0
#endif
#define IS_IP(skb) \
(!skb_vlan_tag_present(skb) && skb->protocol == htons(ETH_P_IP))
#define IS_IPV6(skb) \
(!skb_vlan_tag_present(skb) && skb->protocol == htons(ETH_P_IPV6))
#define IS_ARP(skb) \
(!skb_vlan_tag_present(skb) && skb->protocol == htons(ETH_P_ARP))
static inline __be16 vlan_proto(const struct sk_buff *skb)
{
if (skb_vlan_tag_present(skb))
return skb->protocol;
else if (skb->protocol == htons(ETH_P_8021Q))
return vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
else
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jesse Gross | 27 | 55.10% | 1 | 25.00% |
Stephen Hemminger | 20 | 40.82% | 1 | 25.00% |
Dave Jones | 1 | 2.04% | 1 | 25.00% |
Jiri Pirko | 1 | 2.04% | 1 | 25.00% |
Total | 49 | 100.00% | 4 | 100.00% |
#define IS_VLAN_IP(skb) \
(vlan_proto(skb) == htons(ETH_P_IP) && \
brnf_filter_vlan_tagged)
#define IS_VLAN_IPV6(skb) \
(vlan_proto(skb) == htons(ETH_P_IPV6) && \
brnf_filter_vlan_tagged)
#define IS_VLAN_ARP(skb) \
(vlan_proto(skb) == htons(ETH_P_ARP) && \
brnf_filter_vlan_tagged)
static inline __be16 pppoe_proto(const struct sk_buff *skb)
{
return *((__be16 *)(skb_mac_header(skb) + ETH_HLEN +
sizeof(struct pppoe_hdr)));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Michael Milner | 36 | 100.00% | 1 | 100.00% |
Total | 36 | 100.00% | 1 | 100.00% |
#define IS_PPPOE_IP(skb) \
(skb->protocol == htons(ETH_P_PPP_SES) && \
pppoe_proto(skb) == htons(PPP_IP) && \
brnf_filter_pppoe_tagged)
#define IS_PPPOE_IPV6(skb) \
(skb->protocol == htons(ETH_P_PPP_SES) && \
pppoe_proto(skb) == htons(PPP_IPV6) && \
brnf_filter_pppoe_tagged)
/* largest possible L2 header, see br_nf_dev_queue_xmit() */
#define NF_BRIDGE_MAX_MAC_HEADER_LENGTH (PPPOE_SES_HLEN + ETH_HLEN)
struct brnf_frag_data {
char mac[NF_BRIDGE_MAX_MAC_HEADER_LENGTH];
u8 encap_size;
u8 size;
u16 vlan_tci;
__be16 vlan_proto;
};
static DEFINE_PER_CPU(struct brnf_frag_data, brnf_frag_data_storage);
static void nf_bridge_info_free(struct sk_buff *skb)
{
if (skb->nf_bridge) {
nf_bridge_put(skb->nf_bridge);
skb->nf_bridge = NULL;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Florian Westphal | 32 | 100.00% | 1 | 100.00% |
Total | 32 | 100.00% | 1 | 100.00% |
static inline struct net_device *bridge_parent(const struct net_device *dev)
{
struct net_bridge_port *port;
port = br_port_get_rcu(dev);
return port ? port->br->dev : NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Stephen Hemminger | 34 | 89.47% | 2 | 66.67% |
Jiri Pirko | 4 | 10.53% | 1 | 33.33% |
Total | 38 | 100.00% | 3 | 100.00% |
static inline struct nf_bridge_info *nf_bridge_unshare(struct sk_buff *skb)
{
struct nf_bridge_info *nf_bridge = skb->nf_bridge;
if (atomic_read(&nf_bridge->use) > 1) {
struct nf_bridge_info *tmp = nf_bridge_alloc(skb);
if (tmp) {
memcpy(tmp, nf_bridge, sizeof(struct nf_bridge_info));
atomic_set(&tmp->use, 1);
}
nf_bridge_put(nf_bridge);
nf_bridge = tmp;
}
return nf_bridge;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Patrick McHardy | 87 | 98.86% | 1 | 50.00% |
Changli Gao | 1 | 1.14% | 1 | 50.00% |
Total | 88 | 100.00% | 2 | 100.00% |
unsigned int nf_bridge_encap_header_len(const struct sk_buff *skb)
{
switch (skb->protocol) {
case __cpu_to_be16(ETH_P_8021Q):
return VLAN_HLEN;
case __cpu_to_be16(ETH_P_PPP_SES):
return PPPOE_SES_HLEN;
default:
return 0;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Florian Westphal | 42 | 100.00% | 1 | 100.00% |
Total | 42 | 100.00% | 1 | 100.00% |
static inline void nf_bridge_pull_encap_header(struct sk_buff *skb)
{
unsigned int len = nf_bridge_encap_header_len(skb);
skb_pull(skb, len);
skb->network_header += len;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Patrick McHardy | 29 | 85.29% | 1 | 33.33% |
Stephen Hemminger | 3 | 8.82% | 1 | 33.33% |
Michael Milner | 2 | 5.88% | 1 | 33.33% |
Total | 34 | 100.00% | 3 | 100.00% |
static inline void nf_bridge_pull_encap_header_rcsum(struct sk_buff *skb)
{
unsigned int len = nf_bridge_encap_header_len(skb);
skb_pull_rcsum(skb, len);
skb->network_header += len;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Patrick McHardy | 30 | 88.24% | 1 | 50.00% |
Michael Milner | 4 | 11.76% | 1 | 50.00% |
Total | 34 | 100.00% | 2 | 100.00% |
/* When handing a packet over to the IP layer
* check whether we have a skb that is in the
* expected format
*/
static int br_validate_ipv4(struct net *net, struct sk_buff *skb)
{
const struct iphdr *iph;
u32 len;
if (!pskb_may_pull(skb, sizeof(struct iphdr)))
goto inhdr_error;
iph = ip_hdr(skb);
/* Basic sanity checks */
if (iph->ihl < 5 || iph->version != 4)
goto inhdr_error;
if (!pskb_may_pull(skb, iph->ihl*4))
goto inhdr_error;
iph = ip_hdr(skb);
if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
goto inhdr_error;
len = ntohs(iph->tot_len);
if (skb->len < len) {
__IP_INC_STATS(net, IPSTATS_MIB_INTRUNCATEDPKTS);
goto drop;
} else if (len < (iph->ihl*4))
goto inhdr_error;
if (pskb_trim_rcsum(skb, len)) {
__IP_INC_STATS(net, IPSTATS_MIB_INDISCARDS);
goto drop;
}
memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
/* We should really parse IP options here but until
* somebody who actually uses IP options complains to
* us we'll just silently ignore the options because
* we're lazy!
*/
return 0;
inhdr_error:
__IP_INC_STATS(net, IPSTATS_MIB_INHDRERRORS);
drop:
return -1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Bandan Das | 181 | 84.98% | 1 | 12.50% |
Sarveshwar Bandi | 17 | 7.98% | 1 | 12.50% |
Eric W. Biedermann | 8 | 3.76% | 1 | 12.50% |
Eric Dumazet | 4 | 1.88% | 2 | 25.00% |
Bernhard Thaler | 1 | 0.47% | 1 | 12.50% |
Herbert Xu | 1 | 0.47% | 1 | 12.50% |
Stephen Hemminger | 1 | 0.47% | 1 | 12.50% |
Total | 213 | 100.00% | 8 | 100.00% |
void nf_bridge_update_protocol(struct sk_buff *skb)
{
switch (skb->nf_bridge->orig_proto) {
case BRNF_PROTO_8021Q:
skb->protocol = htons(ETH_P_8021Q);
break;
case BRNF_PROTO_PPPOE:
skb->protocol = htons(ETH_P_PPP_SES);
break;
case BRNF_PROTO_UNCHANGED:
break;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Pablo Neira Ayuso | 21 | 42.00% | 1 | 20.00% |
Florian Westphal | 21 | 42.00% | 3 | 60.00% |
Bernhard Thaler | 8 | 16.00% | 1 | 20.00% |
Total | 50 | 100.00% | 5 | 100.00% |
/* Obtain the correct destination MAC address, while preserving the original
* source MAC address. If we already know this address, we just copy it. If we
* don't, we use the neighbour framework to find out. In both cases, we make
* sure that br_handle_frame_finish() is called afterwards.
*/
int br_nf_pre_routing_finish_bridge(struct net *net, struct sock *sk, struct sk_buff *skb)
{
struct neighbour *neigh;
struct dst_entry *dst;
skb->dev = bridge_parent(skb->dev);
if (!skb->dev)
goto free_skb;
dst = skb_dst(skb);
neigh = dst_neigh_lookup_skb(dst, skb);
if (neigh) {
struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
int ret;
if (neigh->hh.hh_len) {
neigh_hh_bridge(&neigh->hh, skb);
skb->dev = nf_bridge->physindev;
ret = br_handle_frame_finish(net, sk, skb);
} else {
/* the neighbour function below overwrites the complete
* MAC header, so we save the Ethernet source address and
* protocol number.
*/
skb_copy_from_linear_data_offset(skb,
-(ETH_HLEN-ETH_ALEN),
nf_bridge->neigh_header,
ETH_HLEN-ETH_ALEN);
/* tell br_dev_xmit to continue with forwarding */
nf_bridge->bridged_dnat = 1;
/* FIXME Need to refragment */
ret = neigh->output(neigh, skb);
}
neigh_release(neigh);
return ret;
}
free_skb:
kfree_skb(skb);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Bernhard Thaler | 89 | 47.85% | 3 | 50.00% |
Pablo Neira Ayuso | 87 | 46.77% | 1 | 16.67% |
Eric W. Biedermann | 7 | 3.76% | 1 | 16.67% |
Florian Westphal | 3 | 1.61% | 1 | 16.67% |
Total | 186 | 100.00% | 6 | 100.00% |
static inline bool
br_nf_ipv4_daddr_was_changed(const struct sk_buff *skb,
const struct nf_bridge_info *nf_bridge)
{
return ip_hdr(skb)->daddr != nf_bridge->ipv4_daddr;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Pablo Neira Ayuso | 31 | 100.00% | 1 | 100.00% |
Total | 31 | 100.00% | 1 | 100.00% |
/* This requires some explaining. If DNAT has taken place,
* we will need to fix up the destination Ethernet address.
* This is also true when SNAT takes place (for the reply direction).
*
* There are two cases to consider:
* 1. The packet was DNAT'ed to a device in the same bridge
* port group as it was received on. We can still bridge
* the packet.
* 2. The packet was DNAT'ed to a different device, either
* a non-bridged device or another bridge port group.
* The packet will need to be routed.
*
* The correct way of distinguishing between these two cases is to
* call ip_route_input() and to look at skb->dst->dev, which is
* changed to the destination device if ip_route_input() succeeds.
*
* Let's first consider the case that ip_route_input() succeeds:
*
* If the output device equals the logical bridge device the packet
* came in on, we can consider this bridging. The corresponding MAC
* address will be obtained in br_nf_pre_routing_finish_bridge.
* Otherwise, the packet is considered to be routed and we just
* change the destination MAC address so that the packet will
* later be passed up to the IP stack to be routed. For a redirected
* packet, ip_route_input() will give back the localhost as output device,
* which differs from the bridge device.
*
* Let's now consider the case that ip_route_input() fails:
*
* This can be because the destination address is martian, in which case
* the packet will be dropped.
* If IP forwarding is disabled, ip_route_input() will fail, while
* ip_route_output_key() can return success. The source
* address for ip_route_output_key() is set to zero, so ip_route_output_key()
* thinks we're handling a locally generated packet and won't care
* if IP forwarding is enabled. If the output device equals the logical bridge
* device, we proceed as if ip_route_input() succeeded. If it differs from the
* logical bridge port or if ip_route_output_key() fails we drop the packet.
*/
static int br_nf_pre_routing_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
{
struct net_device *dev = skb->dev;
struct iphdr *iph = ip_hdr(skb);
struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
struct rtable *rt;
int err;
nf_bridge->frag_max_size = IPCB(skb)->frag_max_size;
if (nf_bridge->pkt_otherhost) {
skb->pkt_type = PACKET_OTHERHOST;
nf_bridge->pkt_otherhost = false;
}
nf_bridge->in_prerouting = 0;
if (br_nf_ipv4_daddr_was_changed(skb, nf_bridge)) {
if ((err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))) {
struct in_device *in_dev = __in_dev_get_rcu(dev);
/* If err equals -EHOSTUNREACH the error is due to a
* martian destination or due to the fact that
* forwarding is disabled. For most martian packets,
* ip_route_output_key() will fail. It won't fail for 2 types of
* martian destinations: loopback destinations and destination
* 0.0.0.0. In both cases the packet will be dropped because the
* destination is the loopback device and not the bridge. */
if (err != -EHOSTUNREACH || !in_dev || IN_DEV_FORWARD(in_dev))
goto free_skb;
rt = ip_route_output(net, iph->daddr, 0,
RT_TOS(iph->tos), 0);
if (!IS_ERR(rt)) {
/* - Bridged-and-DNAT'ed traffic doesn't
* require ip_forwarding. */
if (rt->dst.dev == dev) {
skb_dst_set(skb, &rt->dst);
goto bridged_dnat;
}
ip_rt_put(rt);
}
free_skb:
kfree_skb(skb);
return 0;
} else {
if (skb_dst(skb)->dev == dev) {
bridged_dnat:
skb->dev = nf_bridge->physindev;
nf_bridge_update_protocol(skb);
nf_bridge_push_encap_header(skb);
br_nf_hook_thresh(NF_BR_PRE_ROUTING,
net, sk, skb, skb->dev,
NULL,
br_nf_pre_routing_finish_bridge);
return 0;
}
ether_addr_copy(eth_hdr(skb)->h_dest, dev->dev_addr);
skb->pkt_type = PACKET_HOST;
}
} else {
rt = bridge_parent_rtable(nf_bridge->physindev);
if (!rt) {
kfree_skb(skb);
return 0;
}
skb_dst_set_noref(skb, &rt->dst);
}
skb->dev = nf_bridge->physindev;
nf_bridge_update_protocol(skb);
nf_bridge_push_encap_header(skb);
br_nf_hook_thresh(NF_BR_PRE_ROUTING, net, sk, skb, skb->dev, NULL,
br_handle_frame_finish);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Bart De Schuymer | 258 | 66.15% | 6 | 18.75% |
David S. Miller | 40 | 10.26% | 3 | 9.38% |
Simon Wunderlich | 22 | 5.64% | 1 | 3.12% |
Eric Dumazet | 21 | 5.38% | 4 | 12.50% |
Florian Westphal | 14 | 3.59% | 5 | 15.62% |
Eric W. Biedermann | 10 | 2.56% | 3 | 9.38% |
Herbert Xu | 9 | 2.31% | 1 | 3.12% |
Arnaldo Carvalho de Melo | 6 | 1.54% | 2 | 6.25% |
Michael Milner | 2 | 0.51% | 1 | 3.12% |
Bernhard Thaler | 2 | 0.51% | 1 | 3.12% |
Patrick McHardy | 2 | 0.51% | 1 | 3.12% |
Joe Perches | 1 | 0.26% | 1 | 3.12% |
Artur Molchanov | 1 | 0.26% | 1 | 3.12% |
Pablo Neira Ayuso | 1 | 0.26% | 1 | 3.12% |
Denis V. Lunev | 1 | 0.26% | 1 | 3.12% |
Total | 390 | 100.00% | 32 | 100.00% |
static struct net_device *brnf_get_logical_dev(struct sk_buff *skb, const struct net_device *dev)
{
struct net_device *vlan, *br;
br = bridge_parent(dev);
if (brnf_pass_vlan_indev == 0 || !skb_vlan_tag_present(skb))
return br;
vlan = __vlan_find_dev_deep_rcu(br, skb->vlan_proto,
skb_vlan_tag_get(skb) & VLAN_VID_MASK);
return vlan ? vlan : br;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Pablo Neira Ayuso | 67 | 90.54% | 1 | 20.00% |
Patrick McHardy | 4 | 5.41% | 2 | 40.00% |
Jiri Pirko | 2 | 2.70% | 1 | 20.00% |
Ding Tianhong | 1 | 1.35% | 1 | 20.00% |
Total | 74 | 100.00% | 5 | 100.00% |
/* Some common code for IPv4/IPv6 */
struct net_device *setup_pre_routing(struct sk_buff *skb)
{
struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
if (skb->pkt_type == PACKET_OTHERHOST) {
skb->pkt_type = PACKET_HOST;
nf_bridge->pkt_otherhost = true;
}
nf_bridge->in_prerouting = 1;
nf_bridge->physindev = skb->dev;
skb->dev = brnf_get_logical_dev(skb, skb->dev);
if (skb->protocol == htons(ETH_P_8021Q))
nf_bridge->orig_proto = BRNF_PROTO_8021Q;
else if (skb->protocol == htons(ETH_P_PPP_SES))
nf_bridge->orig_proto = BRNF_PROTO_PPPOE;
/* Must drop socket now because of tproxy. */
skb_orphan(skb);
return skb->dev;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Bart De Schuymer | 85 | 72.65% | 2 | 22.22% |
Florian Westphal | 21 | 17.95% | 5 | 55.56% |
Stephen Hemminger | 8 | 6.84% | 1 | 11.11% |
Pablo Neira Ayuso | 3 | 2.56% | 1 | 11.11% |
Total | 117 | 100.00% | 9 | 100.00% |
/* Direct IPv6 traffic to br_nf_pre_routing_ipv6.
* Replicate the checks that IPv4 does on packet reception.
* Set skb->dev to the bridge device (i.e. parent of the
* receiving device) to make netfilter happy, the REDIRECT
* target in particular. Save the original destination IP
* address to be able to detect DNAT afterwards. */
static unsigned int br_nf_pre_routing(void *priv,
struct sk_buff *skb,
const struct nf_hook_state *state)
{
struct nf_bridge_info *nf_bridge;
struct net_bridge_port *p;
struct net_bridge *br;
__u32 len = nf_bridge_encap_header_len(skb);
if (unlikely(!pskb_may_pull(skb, len)))
return NF_DROP;
p = br_port_get_rcu(state->in);
if (p == NULL)
return NF_DROP;
br = p->br;
if (IS_IPV6(skb) || IS_VLAN_IPV6(skb) || IS_PPPOE_IPV6(skb)) {
if (!brnf_call_ip6tables && !br->nf_call_ip6tables)
return NF_ACCEPT;
nf_bridge_pull_encap_header_rcsum(skb);
return br_nf_pre_routing_ipv6(priv, skb, state);
}
if (!brnf_call_iptables && !br->nf_call_iptables)
return NF_ACCEPT;
if (!IS_IP(skb) && !IS_VLAN_IP(skb) && !IS_PPPOE_IP(skb))
return NF_ACCEPT;
nf_bridge_pull_encap_header_rcsum(skb);
if (br_validate_ipv4(state->net, skb))
return NF_DROP;
nf_bridge_put(skb->nf_bridge);
if (!nf_bridge_alloc(skb))
return NF_DROP;
if (!setup_pre_routing(skb))
return NF_DROP;
nf_bridge = nf_bridge_info_get(skb);
nf_bridge->ipv4_daddr = ip_hdr(skb)->daddr;
skb->protocol = htons(ETH_P_IP);
NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, state->net, state->sk, skb,
skb->dev, NULL,
br_nf_pre_routing_finish);
return NF_STOLEN;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Bart De Schuymer | 108 | 39.85% | 4 | 15.38% |
Patrick McHardy | 41 | 15.13% | 4 | 15.38% |
Florian Westphal | 28 | 10.33% | 2 | 7.69% |
Evgeniy Polyakov | 21 | 7.75% | 1 | 3.85% |
Michael Milner | 19 | 7.01% | 1 | 3.85% |
Stephen Hemminger | 14 | 5.17% | 3 | 11.54% |
Eric W. Biedermann | 11 | 4.06% | 3 | 11.54% |
David S. Miller | 10 | 3.69% | 3 | 11.54% |
Herbert Xu | 10 | 3.69% | 2 | 7.69% |
David Kimdon | 7 | 2.58% | 1 | 3.85% |
Jan Engelhardt | 1 | 0.37% | 1 | 3.85% |
Bernhard Thaler | 1 | 0.37% | 1 | 3.85% |
Total | 271 | 100.00% | 26 | 100.00% |
/* PF_BRIDGE/FORWARD *************************************************/
static int br_nf_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
{
struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
struct net_device *in;
if (!IS_ARP(skb) && !IS_VLAN_ARP(skb)) {
if (skb->protocol == htons(ETH_P_IP))
nf_bridge->frag_max_size = IPCB(skb)->frag_max_size;
if (skb->protocol == htons(ETH_P_IPV6))
nf_bridge->frag_max_size = IP6CB(skb)->frag_max_size;
in = nf_bridge->physindev;
if (nf_bridge->pkt_otherhost) {
skb->pkt_type = PACKET_OTHERHOST;
nf_bridge->pkt_otherhost = false;
}
nf_bridge_update_protocol(skb);
} else {
in = *((struct net_device **)(skb->cb));
}
nf_bridge_push_encap_header(skb);
br_nf_hook_thresh(NF_BR_FORWARD, net, sk, skb, in, skb->dev,
br_forward_finish);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Bart De Schuymer | 97 | 55.75% | 4 | 23.53% |
Florian Westphal | 30 | 17.24% | 4 | 23.53% |
Bernhard Thaler | 24 | 13.79% | 2 | 11.76% |
David S. Miller | 7 | 4.02% | 1 | 5.88% |
Eric W. Biedermann | 7 | 4.02% | 2 | 11.76% |
Michael Milner | 4 | 2.30% | 1 | 5.88% |
Stephen Hemminger | 3 | 1.72% | 1 | 5.88% |
Patrick McHardy | 1 | 0.57% | 1 | 5.88% |
Pablo Neira Ayuso | 1 | 0.57% | 1 | 5.88% |
Total | 174 | 100.00% | 17 | 100.00% |
/* This is the 'purely bridged' case. For IP, we pass the packet to
* netfilter with indev and outdev set to the bridge device,
* but we are still able to filter on the 'real' indev/outdev
* because of the physdev module. For ARP, indev and outdev are the
* bridge ports. */
static unsigned int br_nf_forward_ip(void *priv,
struct sk_buff *skb,
const struct nf_hook_state *state)
{
struct nf_bridge_info *nf_bridge;
struct net_device *parent;
u_int8_t pf;
if (!skb->nf_bridge)
return NF_ACCEPT;
/* Need exclusive nf_bridge_info since we might have multiple
* different physoutdevs. */
if (!nf_bridge_unshare(skb))
return NF_DROP;
nf_bridge = nf_bridge_info_get(skb);
if (!nf_bridge)
return NF_DROP;
parent = bridge_parent(state->out);
if (!parent)
return NF_DROP;
if (IS_IP(skb) || IS_VLAN_IP(skb) || IS_PPPOE_IP(skb))
pf = NFPROTO_IPV4;
else if (IS_IPV6(skb) || IS_VLAN_IPV6(skb) || IS_PPPOE_IPV6(skb))
pf = NFPROTO_IPV6;
else
return NF_ACCEPT;
nf_bridge_pull_encap_header(skb);
if (skb->pkt_type == PACKET_OTHERHOST) {
skb->pkt_type = PACKET_HOST;
nf_bridge->pkt_otherhost = true;
}
if (pf == NFPROTO_IPV4) {
if (br_validate_ipv4(state->net, skb))
return NF_DROP;
IPCB(skb)->frag_max_size = nf_bridge->frag_max_size;
}
if (pf == NFPROTO_IPV6) {
if (br_validate_ipv6(state->net, skb))
return NF_DROP;
IP6CB(skb)->frag_max_size = nf_bridge->frag_max_size;
}
nf_bridge->physoutdev = skb->dev;
if (pf == NFPROTO_IPV4)
skb->protocol = htons(ETH_P_IP);
else
skb->protocol = htons(ETH_P_IPV6);
NF_HOOK(pf, NF_INET_FORWARD, state->net, NULL, skb,
brnf_get_logical_dev(skb, state->in),
parent, br_nf_forward_finish);
return NF_STOLEN;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Bart De Schuymer | 116 | 39.59% | 7 | 21.21% |
Florian Westphal | 35 | 11.95% | 4 | 12.12% |
Bernhard Thaler | 33 | 11.26% | 3 | 9.09% |
Herbert Xu | 31 | 10.58% | 3 | 9.09% |
Stephen Hemminger | 25 | 8.53% | 3 | 9.09% |
Patrick McHardy | 15 | 5.12% | 4 | 12.12% |
Eric W. Biedermann | 14 | 4.78% | 3 | 9.09% |
Michael Milner | 8 | 2.73% | 1 | 3.03% |
David S. Miller | 8 | 2.73% | 2 | 6.06% |
Alban Crequy | 4 | 1.37% | 1 | 3.03% |
Pablo Neira Ayuso | 3 | 1.02% | 1 | 3.03% |
Jan Engelhardt | 1 | 0.34% | 1 | 3.03% |
Total | 293 | 100.00% | 33 | 100.00% |
static unsigned int br_nf_forward_arp(void *priv,
struct sk_buff *skb,
const struct nf_hook_state *state)
{
struct net_bridge_port *p;
struct net_bridge *br;
struct net_device **d = (struct net_device **)(skb->cb);
p = br_port_get_rcu(state->out);
if (p == NULL)
return NF_ACCEPT;
br = p->br;
if (!brnf_call_arptables && !br->nf_call_arptables)
return NF_ACCEPT;
if (!IS_ARP(skb)) {
if (!IS_VLAN_ARP(skb))
return NF_ACCEPT;
nf_bridge_pull_encap_header(skb);
}
if (arp_hdr(skb)->ar_pln != 4) {
if (IS_VLAN_ARP(skb))
nf_bridge_push_encap_header(skb);
return NF_ACCEPT;
}
*d = state->in;
NF_HOOK(NFPROTO_ARP, NF_ARP_FORWARD, state->net, state->sk, skb,
state->in, state->out, br_nf_forward_finish);
return NF_STOLEN;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Bart De Schuymer | 102 | 57.30% | 4 | 23.53% |
Patrick McHardy | 39 | 21.91% | 3 | 17.65% |
David S. Miller | 15 | 8.43% | 3 | 17.65% |
Eric W. Biedermann | 6 | 3.37% | 2 | 11.76% |
Stephen Hemminger | 6 | 3.37% | 1 | 5.88% |
Arnaldo Carvalho de Melo | 3 | 1.69% | 1 | 5.88% |
Florian Westphal | 3 | 1.69% | 1 | 5.88% |
Herbert Xu | 3 | 1.69% | 1 | 5.88% |
Jan Engelhardt | 1 | 0.56% | 1 | 5.88% |
Total | 178 | 100.00% | 17 | 100.00% |
static int br_nf_push_frag_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
{
struct brnf_frag_data *data;
int err;
data = this_cpu_ptr(&brnf_frag_data_storage);
err = skb_cow_head(skb, data->size);
if (err) {
kfree_skb(skb);
return 0;
}
if (data->vlan_tci) {
skb->vlan_tci = data->vlan_tci;
skb->vlan_proto = data->vlan_proto;
}
skb_copy_to_linear_data_offset(skb, -data->size, data->mac, data->size);
__skb_push(skb, data->encap_size);
nf_bridge_info_free(skb);
return br_dev_queue_push_xmit(net, sk, skb);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Florian Westphal | 114 | 89.06% | 4 | 50.00% |
Eric W. Biedermann | 7 | 5.47% | 2 | 25.00% |
Pablo Neira Ayuso | 5 | 3.91% | 1 | 12.50% |
David S. Miller | 2 | 1.56% | 1 | 12.50% |
Total | 128 | 100.00% | 8 | 100.00% |
static int
br_nf_ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
int (*output)(struct