Release 4.11 net/bridge/br_fdb.c
/*
* Forwarding database
* Linux ethernet bridge
*
* Authors:
* Lennert Buytenhek <buytenh@gnu.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/rculist.h>
#include <linux/spinlock.h>
#include <linux/times.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/jhash.h>
#include <linux/random.h>
#include <linux/slab.h>
#include <linux/atomic.h>
#include <asm/unaligned.h>
#include <linux/if_vlan.h>
#include <net/switchdev.h>
#include "br_private.h"
static struct kmem_cache *br_fdb_cache __read_mostly;
static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
const unsigned char *addr, u16 vid);
static void fdb_notify(struct net_bridge *br,
const struct net_bridge_fdb_entry *, int);
static u32 fdb_salt __read_mostly;
int __init br_fdb_init(void)
{
br_fdb_cache = kmem_cache_create("bridge_fdb_cache",
sizeof(struct net_bridge_fdb_entry),
0,
SLAB_HWCACHE_ALIGN, NULL);
if (!br_fdb_cache)
return -ENOMEM;
get_random_bytes(&fdb_salt, sizeof(fdb_salt));
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Stephen Hemminger | 37 | 74.00% | 2 | 66.67% |
Akinobu Mita | 13 | 26.00% | 1 | 33.33% |
Total | 50 | 100.00% | 3 | 100.00% |
void br_fdb_fini(void)
{
kmem_cache_destroy(br_fdb_cache);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Stephen Hemminger | 12 | 100.00% | 1 | 100.00% |
Total | 12 | 100.00% | 1 | 100.00% |
/* if topology_changing then use forward_delay (default 15 sec)
* otherwise keep longer (default 5 minutes)
*/
static inline unsigned long hold_time(const struct net_bridge *br)
{
return br->topology_change ? br->forward_delay : br->ageing_time;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 18 | 66.67% | 1 | 33.33% |
Stephen Hemminger | 9 | 33.33% | 2 | 66.67% |
Total | 27 | 100.00% | 3 | 100.00% |
static inline int has_expired(const struct net_bridge *br,
const struct net_bridge_fdb_entry *fdb)
{
return !fdb->is_static && !fdb->added_by_external_learn &&
time_before_eq(fdb->updated + hold_time(br), jiffies);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 30 | 68.18% | 1 | 20.00% |
Stephen Hemminger | 9 | 20.45% | 3 | 60.00% |
Roopa Prabhu | 5 | 11.36% | 1 | 20.00% |
Total | 44 | 100.00% | 5 | 100.00% |
static inline int br_mac_hash(const unsigned char *mac, __u16 vid)
{
/* use 1 byte of OUI and 3 bytes of NIC */
u32 key = get_unaligned((u32 *)(mac + 2));
return jhash_2words(key, vid, fdb_salt) & (BR_HASH_SIZE - 1);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Stephen Hemminger | 23 | 46.94% | 3 | 60.00% |
Linus Torvalds (pre-git) | 19 | 38.78% | 1 | 20.00% |
Vlad Yasevich | 7 | 14.29% | 1 | 20.00% |
Total | 49 | 100.00% | 5 | 100.00% |
static void fdb_rcu_free(struct rcu_head *head)
{
struct net_bridge_fdb_entry *ent
= container_of(head, struct net_bridge_fdb_entry, rcu);
kmem_cache_free(br_fdb_cache, ent);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Michał Mirosław | 33 | 100.00% | 1 | 100.00% |
Total | 33 | 100.00% | 1 | 100.00% |
static struct net_bridge_fdb_entry *fdb_find_rcu(struct hlist_head *head,
const unsigned char *addr,
__u16 vid)
{
struct net_bridge_fdb_entry *f;
WARN_ON_ONCE(!rcu_read_lock_held());
hlist_for_each_entry_rcu(f, head, hlist)
if (ether_addr_equal(f->addr.addr, addr) && f->vlan_id == vid)
break;
return f;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Nikolay Aleksandrov | 65 | 100.00% | 2 | 100.00% |
Total | 65 | 100.00% | 2 | 100.00% |
/* requires bridge hash_lock */
static struct net_bridge_fdb_entry *br_fdb_find(struct net_bridge *br,
const unsigned char *addr,
__u16 vid)
{
struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)];
struct net_bridge_fdb_entry *fdb;
lockdep_assert_held_once(&br->hash_lock);
rcu_read_lock();
fdb = fdb_find_rcu(head, addr, vid);
rcu_read_unlock();
return fdb;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Nikolay Aleksandrov | 69 | 94.52% | 2 | 66.67% |
Américo Wang | 4 | 5.48% | 1 | 33.33% |
Total | 73 | 100.00% | 3 | 100.00% |
struct net_bridge_fdb_entry *br_fdb_find_rcu(struct net_bridge *br,
const unsigned char *addr,
__u16 vid)
{
struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)];
return fdb_find_rcu(head, addr, vid);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Nikolay Aleksandrov | 49 | 100.00% | 1 | 100.00% |
Total | 49 | 100.00% | 1 | 100.00% |
/* When a static FDB entry is added, the mac address from the entry is
* added to the bridge private HW address list and all required ports
* are then updated with the new information.
* Called under RTNL.
*/
static void fdb_add_hw_addr(struct net_bridge *br, const unsigned char *addr)
{
int err;
struct net_bridge_port *p;
ASSERT_RTNL();
list_for_each_entry(p, &br->port_list, list) {
if (!br_promisc_port(p)) {
err = dev_uc_add(p->dev, addr);
if (err)
goto undo;
}
}
return;
undo:
list_for_each_entry_continue_reverse(p, &br->port_list, list) {
if (!br_promisc_port(p))
dev_uc_del(p->dev, addr);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Vlad Yasevich | 91 | 94.79% | 1 | 33.33% |
Li RongQing | 4 | 4.17% | 1 | 33.33% |
Jiri Pirko | 1 | 1.04% | 1 | 33.33% |
Total | 96 | 100.00% | 3 | 100.00% |
/* When a static FDB entry is deleted, the HW address from that entry is
* also removed from the bridge private HW address list and updates all
* the ports with needed information.
* Called under RTNL.
*/
static void fdb_del_hw_addr(struct net_bridge *br, const unsigned char *addr)
{
struct net_bridge_port *p;
ASSERT_RTNL();
list_for_each_entry(p, &br->port_list, list) {
if (!br_promisc_port(p))
dev_uc_del(p->dev, addr);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Vlad Yasevich | 51 | 98.08% | 1 | 50.00% |
Jiri Pirko | 1 | 1.92% | 1 | 50.00% |
Total | 52 | 100.00% | 2 | 100.00% |
static void fdb_del_external_learn(struct net_bridge_fdb_entry *f)
{
struct switchdev_obj_port_fdb fdb = {
.obj = {
.orig_dev = f->dst->dev,
.id = SWITCHDEV_OBJ_ID_PORT_FDB,
.flags = SWITCHDEV_F_DEFER,
},
.vid = f->vlan_id,
};
ether_addr_copy(fdb.addr, f->addr.addr);
switchdev_port_obj_del(f->dst->dev, &fdb.obj);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Scott Feldman | 33 | 44.00% | 1 | 12.50% |
Jiri Pirko | 31 | 41.33% | 5 | 62.50% |
Ido Schimmel | 9 | 12.00% | 1 | 12.50% |
Vivien Didelot | 2 | 2.67% | 1 | 12.50% |
Total | 75 | 100.00% | 8 | 100.00% |
static void fdb_delete(struct net_bridge *br, struct net_bridge_fdb_entry *f)
{
if (f->is_static)
fdb_del_hw_addr(br, f->addr.addr);
if (f->added_by_external_learn)
fdb_del_external_learn(f);
hlist_del_init_rcu(&f->hlist);
fdb_notify(br, f, RTM_DELNEIGH);
call_rcu(&f->rcu, fdb_rcu_free);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Stephen Hemminger | 36 | 50.70% | 2 | 28.57% |
Vlad Yasevich | 16 | 22.54% | 1 | 14.29% |
Scott Feldman | 11 | 15.49% | 1 | 14.29% |
Michał Mirosław | 6 | 8.45% | 1 | 14.29% |
Jiri Pirko | 1 | 1.41% | 1 | 14.29% |
Nikolay Aleksandrov | 1 | 1.41% | 1 | 14.29% |
Total | 71 | 100.00% | 7 | 100.00% |
/* Delete a local entry if no other port had the same address. */
static void fdb_delete_local(struct net_bridge *br,
const struct net_bridge_port *p,
struct net_bridge_fdb_entry *f)
{
const unsigned char *addr = f->addr.addr;
struct net_bridge_vlan_group *vg;
const struct net_bridge_vlan *v;
struct net_bridge_port *op;
u16 vid = f->vlan_id;
/* Maybe another port has same hw addr? */
list_for_each_entry(op, &br->port_list, list) {
vg = nbp_vlan_group(op);
if (op != p && ether_addr_equal(op->dev->dev_addr, addr) &&
(!vid || br_vlan_find(vg, vid))) {
f->dst = op;
f->added_by_user = 0;
return;
}
}
vg = br_vlan_group(br);
v = br_vlan_find(vg, vid);
/* Maybe bridge device has same hw addr? */
if (p && ether_addr_equal(br->dev->dev_addr, addr) &&
(!vid || (v && br_vlan_should_use(v)))) {
f->dst = NULL;
f->added_by_user = 0;
return;
}
fdb_delete(br, f);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Toshiaki Makita | 140 | 74.87% | 2 | 66.67% |
Nikolay Aleksandrov | 47 | 25.13% | 1 | 33.33% |
Total | 187 | 100.00% | 3 | 100.00% |
void br_fdb_find_delete_local(struct net_bridge *br,
const struct net_bridge_port *p,
const unsigned char *addr, u16 vid)
{
struct net_bridge_fdb_entry *f;
spin_lock_bh(&br->hash_lock);
f = br_fdb_find(br, addr, vid);
if (f && f->is_local && !f->added_by_user && f->dst == p)
fdb_delete_local(br, p, f);
spin_unlock_bh(&br->hash_lock);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Toshiaki Makita | 83 | 97.65% | 1 | 50.00% |
Nikolay Aleksandrov | 2 | 2.35% | 1 | 50.00% |
Total | 85 | 100.00% | 2 | 100.00% |
void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr)
{
struct net_bridge_vlan_group *vg;
struct net_bridge *br = p->br;
struct net_bridge_vlan *v;
int i;
spin_lock_bh(&br->hash_lock);
vg = nbp_vlan_group(p);
/* Search all chains since old address/hash is unknown */
for (i = 0; i < BR_HASH_SIZE; i++) {
struct hlist_node *h;
hlist_for_each(h, &br->hash[i]) {
struct net_bridge_fdb_entry *f;
f = hlist_entry(h, struct net_bridge_fdb_entry, hlist);
if (f->dst == p && f->is_local && !f->added_by_user) {
/* delete old one */
fdb_delete_local(br, p, f);
/* if this port has no vlan information
* configured, we can safely be done at
* this point.
*/
if (!vg || !vg->num_vlans)
goto insert;
}
}
}
insert:
/* insert new address, may fail if invalid address or dup. */
fdb_insert(br, p, newaddr, 0);
if (!vg || !vg->num_vlans)
goto done;
/* Now add entries for every VLAN configured on the port.
* This function runs under RTNL so the bitmap will not change
* from under us.
*/
list_for_each_entry(v, &vg->vlan_list, vlist)
fdb_insert(br, p, newaddr, v->vid);
done:
spin_unlock_bh(&br->hash_lock);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 71 | 35.32% | 1 | 8.33% |
Stephen Hemminger | 48 | 23.88% | 6 | 50.00% |
Toshiaki Makita | 38 | 18.91% | 3 | 25.00% |
Nikolay Aleksandrov | 34 | 16.92% | 1 | 8.33% |
Vlad Yasevich | 10 | 4.98% | 1 | 8.33% |
Total | 201 | 100.00% | 12 | 100.00% |
void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
{
struct net_bridge_vlan_group *vg;
struct net_bridge_fdb_entry *f;
struct net_bridge_vlan *v;
spin_lock_bh(&br->hash_lock);
/* If old entry was unassociated with any port, then delete it. */
f = br_fdb_find(br, br->dev->dev_addr, 0);
if (f && f->is_local && !f->dst && !f->added_by_user)
fdb_delete_local(br, NULL, f);
fdb_insert(br, NULL, newaddr, 0);
vg = br_vlan_group(br);
if (!vg || !vg->num_vlans)
goto out;
/* Now remove and add entries for every VLAN configured on the
* bridge. This function runs under RTNL so the bitmap will not
* change from under us.
*/
list_for_each_entry(v, &vg->vlan_list, vlist) {
if (!br_vlan_should_use(v))
continue;
f = br_fdb_find(br, br->dev->dev_addr, v->vid);
if (f && f->is_local && !f->dst && !f->added_by_user)
fdb_delete_local(br, NULL, f);
fdb_insert(br, NULL, newaddr, v->vid);
}
out:
spin_unlock_bh(&br->hash_lock);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Vlad Yasevich | 66 | 33.17% | 2 | 22.22% |
Stephen Hemminger | 61 | 30.65% | 1 | 11.11% |
Toshiaki Makita | 46 | 23.12% | 4 | 44.44% |
Nikolay Aleksandrov | 26 | 13.07% | 2 | 22.22% |
Total | 199 | 100.00% | 9 | 100.00% |
void br_fdb_cleanup(struct work_struct *work)
{
struct net_bridge *br = container_of(work, struct net_bridge,
gc_work.work);
unsigned long delay = hold_time(br);
unsigned long work_delay = delay;
unsigned long now = jiffies;
int i;
for (i = 0; i < BR_HASH_SIZE; i++) {
struct net_bridge_fdb_entry *f;
struct hlist_node *n;
if (!br->hash[i].first)
continue;
spin_lock_bh(&br->hash_lock);
hlist_for_each_entry_safe(f, n, &br->hash[i], hlist) {
unsigned long this_timer;
if (f->is_static)
continue;
if (f->added_by_external_learn)
continue;
this_timer = f->updated + delay;
if (time_after(this_timer, now))
work_delay = min(work_delay, this_timer - now);
else
fdb_delete(br, f);
}
spin_unlock_bh(&br->hash_lock);
cond_resched();
}
/* Cleanup minimum 10 milliseconds apart */
work_delay = max_t(unsigned long, work_delay, msecs_to_jiffies(10));
mod_delayed_work(system_long_wq, &br->gc_work, work_delay);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Nikolay Aleksandrov | 78 | 39.00% | 1 | 9.09% |
Stephen Hemminger | 55 | 27.50% | 5 | 45.45% |
Linus Torvalds (pre-git) | 36 | 18.00% | 1 | 9.09% |
Baruch Even | 19 | 9.50% | 1 | 9.09% |
Siva Mannem | 7 | 3.50% | 1 | 9.09% |
Fabio Checconi | 3 | 1.50% | 1 | 9.09% |
Andrew Morton | 2 | 1.00% | 1 | 9.09% |
Total | 200 | 100.00% | 11 | 100.00% |
/* Completely flush all dynamic entries in forwarding database.*/
void br_fdb_flush(struct net_bridge *br)
{
int i;
spin_lock_bh(&br->hash_lock);
for (i = 0; i < BR_HASH_SIZE; i++) {
struct net_bridge_fdb_entry *f;
struct hlist_node *n;
hlist_for_each_entry_safe(f, n, &br->hash[i], hlist) {
if (!f->is_static)
fdb_delete(br, f);
}
}
spin_unlock_bh(&br->hash_lock);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Stephen Hemminger | 80 | 100.00% | 2 | 100.00% |
Total | 80 | 100.00% | 2 | 100.00% |
/* Flush all entries referring to a specific port.
* if do_all is set also flush static entries
* if vid is set delete all entries that match the vlan_id
*/
void br_fdb_delete_by_port(struct net_bridge *br,
const struct net_bridge_port *p,
u16 vid,
int do_all)
{
int i;
spin_lock_bh(&br->hash_lock);
for (i = 0; i < BR_HASH_SIZE; i++) {
struct hlist_node *h, *g;
hlist_for_each_safe(h, g, &br->hash[i]) {
struct net_bridge_fdb_entry *f
= hlist_entry(h, struct net_bridge_fdb_entry, hlist);
if (f->dst != p)
continue;
if (!do_all)
if (f->is_static || (vid && f->vlan_id != vid))
continue;
if (f->is_local)
fdb_delete_local(br, p, f);
else
fdb_delete(br, f);
}
}
spin_unlock_bh(&br->hash_lock);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 67 | 46.85% | 1 | 12.50% |
Stephen Hemminger | 56 | 39.16% | 5 | 62.50% |
Nikolay Aleksandrov | 17 | 11.89% | 1 | 12.50% |
Toshiaki Makita | 3 | 2.10% | 1 | 12.50% |
Total | 143 | 100.00% | 8 | 100.00% |
#if IS_ENABLED(CONFIG_ATM_LANE)
/* Interface used by ATM LANE hook to test
* if an addr is on some other bridge port */
int br_fdb_test_addr(struct net_device *dev, unsigned char *addr)
{
struct net_bridge_fdb_entry *fdb;
struct net_bridge_port *port;
int ret;
rcu_read_lock();
port = br_port_get_rcu(dev);
if (!port)
ret = 0;
else {
fdb = br_fdb_find_rcu(port->br, addr, 0);
ret = fdb && fdb->dst && fdb->dst->dev != dev &&
fdb->dst->state == BR_STATE_FORWARDING;
}
rcu_read_unlock();
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Stephen Hemminger | 49 | 52.69% | 3 | 33.33% |
Michał Mirosław | 26 | 27.96% | 1 | 11.11% |
Linus Torvalds (pre-git) | 11 | 11.83% | 1 | 11.11% |
Vlad Yasevich | 2 | 2.15% | 1 | 11.11% |
Patrick McHardy | 2 | 2.15% | 1 | 11.11% |
Jiri Pirko | 2 | 2.15% | 1 | 11.11% |
Nikolay Aleksandrov | 1 | 1.08% | 1 | 11.11% |
Total | 93 | 100.00% | 9 | 100.00% |
#endif /* CONFIG_ATM_LANE */
/*
* Fill buffer with forwarding table records in
* the API format.
*/
int br_fdb_fillbuf(struct net_bridge *br, void *buf,
unsigned long maxnum, unsigned long skip)
{
struct __fdb_entry *fe = buf;
int i, num = 0;
struct net_bridge_fdb_entry *f;
memset(buf, 0, maxnum*sizeof(struct __fdb_entry));
rcu_read_lock();
for (i = 0; i < BR_HASH_SIZE; i++) {
hlist_for_each_entry_rcu(f, &br->hash[i], hlist) {
if (num >= maxnum)
goto out;
if (has_expired(br, f))
continue;
/* ignore pseudo entry for local MAC address */
if (!f->dst)
continue;
if (skip) {
--skip;
continue;
}
/* convert from internal format to API */
memcpy(fe->mac_addr, f->addr.addr, ETH_ALEN);
/* due to ABI compat need to split into hi/lo */
fe->port_no = f->dst->port_no;
fe->port_hi = f->dst->port_no >> 8;
fe->is_local = f->is_local;
if (!f->is_static)
fe->ageing_timer_value = jiffies_delta_to_clock_t(jiffies - f->updated);
++fe;
++num;
}
}
out:
rcu_read_unlock();
return num;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Stephen Hemminger | 119 | 58.62% | 7 | 70.00% |
Linus Torvalds (pre-git) | 83 | 40.89% | 2 | 20.00% |
Eric Dumazet | 1 | 0.49% | 1 | 10.00% |
Total | 203 | 100.00% | 10 | 100.00% |
static struct net_bridge_fdb_entry *fdb_create(struct hlist_head *head,
struct net_bridge_port *source,
const unsigned char *addr,
__u16 vid,
unsigned char is_local,
unsigned char is_static)
{
struct net_bridge_fdb_entry *fdb;
fdb = kmem_cache_alloc(br_fdb_cache, GFP_ATOMIC);
if (fdb) {
memcpy(fdb->addr.addr, addr, ETH_ALEN);
fdb->dst = source;
fdb->vlan_id = vid;
fdb->is_local = is_local;
fdb->is_static = is_static;
fdb->added_by_user = 0;
fdb->added_by_external_learn = 0;
fdb->updated = fdb->used = jiffies;
hlist_add_head_rcu(&fdb->hlist, head);
}
return fdb;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 46 | 36.22% | 1 | 10.00% |
Stephen Hemminger | 40 | 31.50% | 4 | 40.00% |
Pavel Emelyanov | 10 | 7.87% | 1 | 10.00% |
Roopa Prabhu | 10 | 7.87% | 1 | 10.00% |
Vlad Yasevich | 9 | 7.09% | 1 | 10.00% |
Scott Feldman | 6 | 4.72% | 1 | 10.00% |
Toshiaki Makita | 6 | 4.72% | 1 | 10.00% |
Total | 127 | 100.00% | 10 | 100.00% |
static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
const unsigned char *addr, u16 vid)
{
struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)];
struct net_bridge_fdb_entry *fdb;
if (!is_valid_ether_addr(addr))
return -EINVAL;
fdb = br_fdb_find(br, addr, vid);
if (fdb) {
/* it is okay to have multiple ports with same
* address, just use the first one.
*/
if (fdb->is_local)
return 0;
br_warn(br, "adding interface %s with same address as a received packet (addr:%pM, vlan:%u)\n",
source ? source->dev->name : br->dev->name, addr, vid);
fdb_delete(br, fdb);
}
fdb = fdb_create(head, source, addr, vid, 1, 1);
if (!fdb)
return -ENOMEM;
fdb_add_hw_addr(br, addr);
fdb_notify(br, fdb, RTM_NEWNEIGH);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Stephen Hemminger | 128 | 78.05% | 6 | 40.00% |
Vlad Yasevich | 15 | 9.15% | 3 | 20.00% |
Roopa Prabhu | 9 | 5.49% | 2 | 13.33% |
Hong Zhi Guo | 8 | 4.88% | 1 | 6.67% |
Nikolay Aleksandrov | 2 | 1.22% | 1 | 6.67% |
Hideaki Yoshifuji / 吉藤英明 | 1 | 0.61% | 1 | 6.67% |
Jiri Pirko | 1 | 0.61% | 1 | 6.67% |
Total | 164 | 100.00% | 15 | 100.00% |
/* Add entry for local address of interface */
int br_fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
const unsigned char *addr, u16 vid)
{
int ret;
spin_lock_bh(&br->hash_lock);
ret = fdb_insert(br, source, addr, vid);
spin_unlock_bh(&br->hash_lock);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Stephen Hemminger | 46 | 77.97% | 3 | 60.00% |
Linus Torvalds (pre-git) | 8 | 13.56% | 1 | 20.00% |
Vlad Yasevich | 5 | 8.47% | 1 | 20.00% |
Total | 59 | 100.00% | 5 | 100.00% |
void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
const unsigned char *addr, u16 vid, bool added_by_user)
{
struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)];
struct net_bridge_fdb_entry *fdb;
bool fdb_modified = false;
/* some users want to always flood. */
if (hold_time(br) == 0)
return;
/* ignore packets unless we are using this port */
if (!(source->state == BR_STATE_LEARNING ||
source->state == BR_STATE_FORWARDING))
return;
fdb = fdb_find_rcu(head, addr, vid);
if (likely(fdb)) {
/* attempt to update an entry for a local interface */
if (unlikely(fdb->is_local)) {
if (net_ratelimit())
br_warn(br, "received packet on %s with own address as source address (addr:%pM, vlan:%u)\n",
source->dev->name, addr, vid);
} else {
unsigned long now = jiffies;
/* fastpath: update of existing entry */
if (unlikely(source != fdb->dst)) {
fdb->dst = source;
fdb_modified = true;
}
if (now != fdb->updated)
fdb->updated = now;
if (unlikely(added_by_user))
fdb->added_by_user = 1;
if (unlikely(fdb_modified))
fdb_notify(br, fdb, RTM_NEWNEIGH);
}
} else {
spin_lock(&br->hash_lock);
if (likely(!fdb_find_rcu(head, addr, vid))) {
fdb = fdb_create(head, source, addr, vid, 0, 0);
if (fdb) {
if (unlikely(added_by_user))
fdb->added_by_user = 1;
fdb_notify(br, fdb, RTM_NEWNEIGH);
}
}
/* else we lose race and someone else inserts
* it first, don't bother updating
*/
spin_unlock(&br->hash_lock);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Stephen Hemminger | 200 | 67.34% | 11 | 61.11% |
Jon Maxwell | 38 | 12.79% | 1 | 5.56% |
Toshiaki Makita | 31 | 10.44% | 1 | 5.56% |
Vlad Yasevich | 11 | 3.70% | 1 | 5.56% |
Roopa Prabhu | 9 | 3.03% | 2 | 11.11% |
Nikolay Aleksandrov | 8 | 2.69% | 2 | 11.11% |
Total | 297 | 100.00% | 18 | 100.00% |
static int fdb_to_nud(const struct net_bridge *br,
const struct net_bridge_fdb_entry *fdb)
{
if (fdb->is_local)
return NUD_PERMANENT;
else if (fdb->is_static)
return NUD_NOARP;
else if (has_expired(br, fdb))
return NUD_STALE;
else
return NUD_REACHABLE;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Stephen Hemminger | 48 | 88.89% | 1 | 50.00% |
Roopa Prabhu | 6 | 11.11% | 1 | 50.00% |
Total | 54 | 100.00% | 2 | 100.00% |
static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br,
const struct net_bridge_fdb_entry