Release 4.11 net/bridge/br_fdb.c
/*
* Forwarding database
* Linux ethernet bridge
*
* Authors:
* Lennert Buytenhek <buytenh@gnu.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/rculist.h>
#include <linux/spinlock.h>
#include <linux/times.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/jhash.h>
#include <linux/random.h>
#include <linux/slab.h>
#include <linux/atomic.h>
#include <asm/unaligned.h>
#include <linux/if_vlan.h>
#include <net/switchdev.h>
#include "br_private.h"
static struct kmem_cache *br_fdb_cache __read_mostly;
static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
const unsigned char *addr, u16 vid);
static void fdb_notify(struct net_bridge *br,
const struct net_bridge_fdb_entry *, int);
static u32 fdb_salt __read_mostly;
int __init br_fdb_init(void)
{
br_fdb_cache = kmem_cache_create("bridge_fdb_cache",
sizeof(struct net_bridge_fdb_entry),
0,
SLAB_HWCACHE_ALIGN, NULL);
if (!br_fdb_cache)
return -ENOMEM;
get_random_bytes(&fdb_salt, sizeof(fdb_salt));
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Stephen Hemminger | 37 | 74.00% | 2 | 66.67% |
Akinobu Mita | 13 | 26.00% | 1 | 33.33% |
Total | 50 | 100.00% | 3 | 100.00% |
void br_fdb_fini(void)
{
kmem_cache_destroy(br_fdb_cache);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Stephen Hemminger | 12 | 100.00% | 1 | 100.00% |
Total | 12 | 100.00% | 1 | 100.00% |
/* if topology_changing then use forward_delay (default 15 sec)
* otherwise keep longer (default 5 minutes)
*/
static inline unsigned long hold_time(const struct net_bridge *br)
{
return br->topology_change ? br->forward_delay : br->ageing_time;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 18 | 66.67% | 1 | 33.33% |
Stephen Hemminger | 9 | 33.33% | 2 | 66.67% |
Total | 27 | 100.00% | 3 | 100.00% |
static inline int has_expired(const struct net_bridge *br,
const struct net_bridge_fdb_entry *fdb)
{
return !fdb->is_static && !fdb->added_by_external_learn &&
time_before_eq(fdb->updated + hold_time(br), jiffies);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 30 | 68.18% | 1 | 20.00% |
Stephen Hemminger | 9 | 20.45% | 3 | 60.00% |
Roopa Prabhu | 5 | 11.36% | 1 | 20.00% |
Total | 44 | 100.00% | 5 | 100.00% |
static inline int br_mac_hash(const unsigned char *mac, __u16 vid)
{
/* use 1 byte of OUI and 3 bytes of NIC */
u32 key = get_unaligned((u32 *)(mac + 2));
return jhash_2words(key, vid, fdb_salt) & (BR_HASH_SIZE - 1);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Stephen Hemminger | 23 | 46.94% | 3 | 60.00% |
Linus Torvalds (pre-git) | 19 | 38.78% | 1 | 20.00% |
Vlad Yasevich | 7 | 14.29% | 1 | 20.00% |
Total | 49 | 100.00% | 5 | 100.00% |
static void fdb_rcu_free(struct rcu_head *head)
{
struct net_bridge_fdb_entry *ent
= container_of(head, struct net_bridge_fdb_entry, rcu);
kmem_cache_free(br_fdb_cache, ent);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Michał Mirosław | 33 | 100.00% | 1 | 100.00% |
Total | 33 | 100.00% | 1 | 100.00% |
static struct net_bridge_fdb_entry *fdb_find_rcu(struct hlist_head *head,
const unsigned char *addr,
__u16 vid)
{
struct net_bridge_fdb_entry *f;
WARN_ON_ONCE(!rcu_read_lock_held());
hlist_for_each_entry_rcu(f, head, hlist)
if (ether_addr_equal(f->addr.addr, addr) && f->vlan_id == vid)
break;
return f;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Nikolay Aleksandrov | 65 | 100.00% | 2 | 100.00% |
Total | 65 | 100.00% | 2 | 100.00% |
/* requires bridge hash_lock */
static struct net_bridge_fdb_entry *br_fdb_find(struct net_bridge *br,
const unsigned char *addr,
__u16 vid)
{
struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)];
struct net_bridge_fdb_entry *fdb;
lockdep_assert_held_once(&br->hash_lock);
rcu_read_lock();
fdb = fdb_find_rcu(head, addr, vid);
rcu_read_unlock();
return fdb;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Nikolay Aleksandrov | 69 | 94.52% | 2 | 66.67% |
Américo Wang | 4 | 5.48% | 1 | 33.33% |
Total | 73 | 100.00% | 3 | 100.00% |
struct net_bridge_fdb_entry *br_fdb_find_rcu(struct net_bridge *br,
const unsigned char *addr,
__u16 vid)
{
struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)];
return fdb_find_rcu(head, addr, vid);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Nikolay Aleksandrov | 49 | 100.00% | 1 | 100.00% |
Total | 49 | 100.00% | 1 | 100.00% |
/* When a static FDB entry is added, the mac address from the entry is
* added to the bridge private HW address list and all required ports
* are then updated with the new information.
* Called under RTNL.
*/
static void fdb_add_hw_addr(struct net_bridge *br, const unsigned char *addr)
{
int err;
struct net_bridge_port *p;
ASSERT_RTNL();
list_for_each_entry(p, &br->port_list, list) {
if (!br_promisc_port(p)) {
err = dev_uc_add(p->dev, addr);
if (err)
goto undo;
}
}
return;
undo:
list_for_each_entry_continue_reverse(p, &br->port_list, list) {
if (!br_promisc_port(p))
dev_uc_del(p->dev, addr);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Vlad Yasevich | 91 | 94.79% | 1 | 33.33% |
Li RongQing | 4 | 4.17% | 1 | 33.33% |
Jiri Pirko | 1 | 1.04% | 1 | 33.33% |
Total | 96 | 100.00% | 3 | 100.00% |
/* When a static FDB entry is deleted, the HW address from that entry is
* also removed from the bridge private HW address list and updates all
* the ports with needed information.
* Called under RTNL.
*/
static void fdb_del_hw_addr(struct net_bridge *br, const unsigned char *addr)
{
struct net_bridge_port *p;
ASSERT_RTNL();
list_for_each_entry(p, &br->port_list, list) {
if (!br_promisc_port(p))
dev_uc_del(p->dev, addr);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Vlad Yasevich | 51 | 98.08% | 1 | 50.00% |
Jiri Pirko | 1 | 1.92% | 1 | 50.00% |
Total | 52 | 100.00% | 2 | 100.00% |
static void fdb_del_external_learn(struct net_bridge_fdb_entry *f)
{
struct switchdev_obj_port_fdb fdb = {
.obj = {
.orig_dev = f->dst->dev,
.id = SWITCHDEV_OBJ_ID_PORT_FDB,
.flags = SWITCHDEV_F_DEFER,
},
.vid = f->vlan_id,
};
ether_addr_copy(fdb.addr, f->addr.addr);
switchdev_port_obj_del(f->dst->dev, &fdb.obj);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Scott Feldman | 33 | 44.00% | 1 | 12.50% |
Jiri Pirko | 31 | 41.33% | 5 | 62.50% |
Ido Schimmel | 9 | 12.00% | 1 | 12.50% |
Vivien Didelot | 2 | 2.67% | 1 | 12.50% |
Total | 75 | 100.00% | 8 | 100.00% |
static void fdb_delete(struct net_bridge *br, struct net_bridge_fdb_entry *f)
{
if (f->is_static)
fdb_del_hw_addr(br, f->addr.addr);
if (f->added_by_external_learn)
fdb_del_external_learn(f);
hlist_del_init_rcu(&f->hlist);
fdb_notify(br, f, RTM_DELNEIGH);
call_rcu(&f->rcu, fdb_rcu_free);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Stephen Hemminger | 36 | 50.70% | 2 | 28.57% |
Vlad Yasevich | 16 | 22.54% | 1 | 14.29% |
Scott Feldman | 11 | 15.49% | 1 | 14.29% |
Michał Mirosław | 6 | 8.45% | 1 | 14.29% |
Jiri Pirko | 1 | 1.41% | 1 | 14.29% |
Nikolay Aleksandrov | 1 | 1.41% | 1 | 14.29% |
Total | 71 | 100.00% | 7 | 100.00% |
/* Delete a local entry if no other port had the same address. */
static void fdb_delete_local(struct net_bridge *br,
const struct net_bridge_port *p,
struct net_bridge_fdb_entry *f)
{
const unsigned char *addr = f->addr.addr;
struct net_bridge_vlan_group *vg;
const struct net_bridge_vlan *v;
struct net_bridge_port *op;
u16 vid = f->vlan_id;
/* Maybe another port has same hw addr? */
list_for_each_entry(op, &br->port_list, list) {
vg = nbp_vlan_group(op);
if (op != p && ether_addr_equal(op->dev->dev_addr, addr) &&
(!vid || br_vlan_find(vg, vid))) {
f->dst = op;
f->added_by_user = 0;
return;
}
}
vg = br_vlan_group(br);
v = br_vlan_find(vg, vid);
/* Maybe bridge device has same hw addr? */
if (p && ether_addr_equal(br->dev->dev_addr, addr) &&
(!vid || (v && br_vlan_should_use(v)))) {
f->dst = NULL;
f->added_by_user = 0;
return;
}
fdb_delete(br, f);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Toshiaki Makita | 140 | 74.87% | 2 | 66.67% |
Nikolay Aleksandrov | 47 | 25.13% | 1 | 33.33% |
Total | 187 | 100.00% | 3 | 100.00% |
void br_fdb_find_delete_local(struct net_bridge *br,
const struct net_bridge_port *p,
const unsigned char *addr, u16 vid)
{
struct net_bridge_fdb_entry *f;
spin_lock_bh(&br->hash_lock);
f = br_fdb_find(br, addr, vid);
if (f && f->is_local && !f->added_by_user && f->dst == p)
fdb_delete_local(br, p, f);
spin_unlock_bh(&br->hash_lock);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Toshiaki Makita | 83 | 97.65% | 1 | 50.00% |
Nikolay Aleksandrov | 2 | 2.35% | 1 | 50.00% |
Total | 85 | 100.00% | 2 | 100.00% |
void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr)
{
struct net_bridge_vlan_group *vg;
struct net_bridge *br = p->br;
struct net_bridge_vlan *v;
int i;
spin_lock_bh(&br->hash_lock);
vg = nbp_vlan_group(p);
/* Search all chains since old address/hash is unknown */
for (i = 0; i < BR_HASH_SIZE; i++) {
struct hlist_node *h;
hlist_for_each(h, &br->hash[i]) {
struct net_bridge_fdb_entry *f;
f = hlist_entry(h, struct net_bridge_fdb_entry, hlist);
if (f->dst == p && f->is_local && !f->added_by_user) {
/* delete old one */
fdb_delete_local(br, p, f);
/* if this port has no vlan information
* configured, we can safely be done at
* this point.
*/
if (!vg || !vg->num_vlans)
goto insert;
}
}
}
insert:
/* insert new address, may fail if invalid address or dup. */
fdb_insert(br, p, newaddr, 0);
if (!vg || !vg->num_vlans)
goto done;
/* Now add entries for every VLAN configured on the port.
* This function runs under RTNL so the bitmap will not change
* from under us.
*/
list_for_each_entry(v, &vg->vlan_list, vlist)
fdb_insert(br, p, newaddr, v->vid);
done:
spin_unlock_bh(&br->hash_lock);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 71 | 35.32% | 1 | 8.33% |
Stephen Hemminger | 48 | 23.88% | 6 | 50.00% |
Toshiaki Makita | 38 | 18.91% | 3 | 25.00% |
Nikolay Aleksandrov | 34 | 16.92% | 1 | 8.33% |
Vlad Yasevich | 10 | 4.98% | 1 | 8.33% |
Total | 201 | 100.00% | 12 | 100.00% |
void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
{
struct net_bridge_vlan_group *vg;
struct net_bridge_fdb_entry *f;
struct net_bridge_vlan *v;
spin_lock_bh(&br->hash_lock);
/* If old entry was unassociated with any port, then delete it. */
f = br_fdb_find(br, br->dev->dev_addr, 0);
if (f && f->is_local && !f->dst && !f->added_by_user)
fdb_delete_local(br, NULL, f);
fdb_insert(br, NULL, newaddr, 0);
vg = br_vlan_group(br);
if (!vg || !vg->num_vlans)
goto out;
/* Now remove and add entries for every VLAN configured on the
* bridge. This function runs under RTNL so the bitmap will not
* change from under us.
*/
list_for_each_entry(v, &vg->vlan_list, vlist) {
if (!br_vlan_should_use(v))
continue;
f = br_fdb_find(br, br->dev->dev_addr, v->vid);
if (f && f->is_local && !f->dst && !f->added_by_user)
fdb_delete_local(br, NULL, f);
fdb_insert(br, NULL, newaddr, v->vid);
}
out:
spin_unlock_bh(&br->hash_lock);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Vlad Yasevich | 66 | 33.17% | 2 | 22.22% |
Stephen Hemminger | 61 | 30.65% | 1 | 11.11% |
Toshiaki Makita | 46 | 23.12% | 4 | 44.44% |
Nikolay Aleksandrov | 26 | 13.07% | 2 | 22.22% |
Total | 199 | 100.00% | 9 | 100.00% |
void br_fdb_cleanup(struct work_struct *work)
{
struct net_bridge *br = container_of(work, struct net_bridge,
gc_work.work);
unsigned long delay = hold_time(br);
unsigned long work_delay = delay;
unsigned long now = jiffies;
int i;
for (i = 0; i < BR_HASH_SIZE; i++) {
struct net_bridge_fdb_entry *f;
struct hlist_node *n;
if (!br->hash[i].first)
continue;
spin_lock_bh(&br->hash_lock);
hlist_for_each_entry_safe(f, n, &br->hash[i], hlist) {
unsigned long this_timer;
if (f->is_static)
continue;
if (f->added_by_external_learn)
continue;
this_timer = f->updated + delay;
if (time_after(this_timer, now))
work_delay = min(work_delay, this_timer - now);
else
fdb_delete(br, f);
}
spin_unlock_bh(&br->hash_lock);
cond_resched();
}
/* Cleanup minimum 10 milliseconds apart */
work_delay = max_t(unsigned long, work_delay, msecs_to_jiffies(10));
mod_delayed_work(system_long_wq, &br->gc_work, work_delay);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Nikolay Aleksandrov | 78 | 39.00% | 1 | 9.09% |
Stephen Hemminger | 55 | 27.50% | 5 | 45.45% |
Linus Torvalds (pre-git) | 36 | 18.00% | 1 | 9.09% |
Baruch Even | 19 | 9.50% | 1 | 9.09% |
Siva Mannem | 7 | 3.50% | 1 | 9.09% |
Fabio Checconi | 3 | 1.50% | 1 | 9.09% |
Andrew Morton | 2 | 1.00% | 1 | 9.09% |
Total | 200 | 100.00% | 11 | 100.00% |
/* Completely flush all dynamic entries in forwarding database.*/
void br_fdb_flush(struct net_bridge *br)
{
int i;
spin_lock_bh(&br->hash_lock);
for (i = 0; i < BR_HASH_SIZE; i++) {
struct net_bridge_fdb_entry *f;
struct hlist_node *n;
hlist_for_each_entry_safe(f, n, &br->hash[i], hlist) {
if (!f->is_static)
fdb_delete(br, f);
}
}
spin_unlock_bh(&br->hash_lock);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Stephen Hemminger | 80 | 100.00% | 2 | 100.00% |
Total | 80 | 100.00% | 2 | 100.00% |
/* Flush all entries referring to a specific port.
* if do_all is set also flush static entries
* if vid is set delete all entries that match the vlan_id
*/
void br_fdb_delete_by_port(struct net_bridge *br,
const struct net_bridge_port *p,
u16 vid,
int do_all)
{
int i;
spin_lock_bh(&br->hash_lock);
for (i = 0; i < BR_HASH_SIZE; i++) {
struct hlist_node *h, *g;
hlist_for_each_safe(h, g, &br->hash[i]) {
struct net_bridge_fdb_entry *f
= hlist_entry(h, struct net_bridge_fdb_entry, hlist);
if (f->dst != p)
continue;
if (!do_all)
if (f->is_static || (vid && f->vlan_id != vid))
continue;
if (f->is_local)
fdb_delete_local(br, p, f);
else
fdb_delete(br, f);
}
}
spin_unlock_bh(&br->hash_lock);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 67 | 46.85% | 1 | 12.50% |
Stephen Hemminger | 56 | 39.16% | 5 | 62.50% |
Nikolay Aleksandrov | 17 | 11.89% | 1 | 12.50% |
Toshiaki Makita | 3 | 2.10% | 1 | 12.50% |
Total | 143 | 100.00% | 8 | 100.00% |
#if IS_ENABLED(CONFIG_ATM_LANE)
/* Interface used by ATM LANE hook to test
* if an addr is on some other bridge port */
int br_fdb_test_addr(struct net_device *dev, unsigned char *addr)
{
struct net_bridge_fdb_entry *fdb;
struct net_bridge_port *port;
int ret;
rcu_read_lock();
port = br_port_get_rcu(dev);
if (!port)
ret = 0;
else {
fdb = br_fdb_find_rcu(port->br, addr, 0);
ret = fdb && fdb->dst && fdb->dst->dev != dev &&
fdb->dst->state == BR_STATE_FORWARDING;
}
rcu_read_unlock();
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Stephen Hemminger | 49 | 52.69% | 3 | 33.33% |
Michał Mirosław | 26 | 27.96% | 1 | 11.11% |
Linus Torvalds (pre-git) | 11 | 11.83% | 1 | 11.11% |
Vlad Yasevich | 2 | 2.15% | 1 | 11.11% |
Patrick McHardy | 2 | 2.15% | 1 | 11.11% |
Jiri Pirko | 2 | 2.15% | 1 | 11.11% |
Nikolay Aleksandrov | 1 | 1.08% | 1 | 11.11% |
Total | 93 | 100.00% | 9 | 100.00% |
#endif /* CONFIG_ATM_LANE */
/*
* Fill buffer with forwarding table records in
* the API format.
*/
int br_fdb_fillbuf(struct net_bridge *br, void *buf,
unsigned long maxnum, unsigned long skip)
{
struct __fdb_entry *fe = buf;
int i, num = 0;
struct net_bridge_fdb_entry *f;
memset(buf, 0, maxnum*sizeof(struct __fdb_entry));
rcu_read_lock();
for (i = 0; i < BR_HASH_SIZE; i++) {
hlist_for_each_entry_rcu(f, &br->hash[i], hlist) {
if (num >= maxnum)
goto out;
if (has_expired(br, f))
continue;
/* ignore pseudo entry for local MAC address */
if (!f->dst)
continue;
if (skip) {
--skip;
continue;
}
/* convert from internal format to API */
memcpy(fe->mac_addr, f->addr.addr, ETH_ALEN);
/* due to ABI compat need to split into hi/lo */
fe->port_no = f->dst->port_no;
fe->port_hi = f->dst->port_no >> 8;
fe->is_local = f->is_local;
if (!f->is_static)
fe->ageing_timer_value = jiffies_delta_to_clock_t(jiffies - f->updated);
++fe;
++num;
}
}
out:
rcu_read_unlock();
return num;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Stephen Hemminger | 119 | 58.62% | 7 | 70.00% |
Linus Torvalds (pre-git) | 83 | 40.89% | 2 | 20.00% |
Eric Dumazet | 1 | 0.49% | 1 | 10.00% |
Total | 203 | 100.00% | 10 | 100.00% |
static struct net_bridge_fdb_entry *fdb_create(struct hlist_head *head,
struct net_bridge_port *source,
const unsigned char *addr,
__u16 vid,
unsigned char is_local,
unsigned char is_static)
{
struct net_bridge_fdb_entry *fdb;
fdb = kmem_cache_alloc(br_fdb_cache, GFP_ATOMIC);
if (fdb) {
memcpy(fdb->addr.addr, addr, ETH_ALEN);
fdb->dst = source;
fdb->vlan_id = vid;
fdb->is_local = is_local;
fdb->is_static = is_static;
fdb->added_by_user = 0;
fdb->added_by_external_learn = 0;
fdb->updated = fdb->used = jiffies;
hlist_add_head_rcu(&fdb->hlist, head);
}
return fdb;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 46 | 36.22% | 1 | 10.00% |
Stephen Hemminger | 40 | 31.50% | 4 | 40.00% |
Pavel Emelyanov | 10 | 7.87% | 1 | 10.00% |
Roopa Prabhu | 10 | 7.87% | 1 | 10.00% |
Vlad Yasevich | 9 | 7.09% | 1 | 10.00% |
Scott Feldman | 6 | 4.72% | 1 | 10.00% |
Toshiaki Makita | 6 | 4.72% | 1 | 10.00% |
Total | 127 | 100.00% | 10 | 100.00% |
static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
const unsigned char *addr, u16 vid)
{
struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)];
struct net_bridge_fdb_entry *fdb;
if (!is_valid_ether_addr(addr))
return -EINVAL;
fdb = br_fdb_find(br, addr, vid);
if (fdb) {
/* it is okay to have multiple ports with same
* address, just use the first one.
*/
if (fdb->is_local)
return 0;
br_warn(br, "adding interface %s with same address as a received packet (addr:%pM, vlan:%u)\n",
source ? source->dev->name : br->dev->name, addr, vid);
fdb_delete(br, fdb);
}
fdb = fdb_create(head, source, addr, vid, 1, 1);
if (!fdb)
return -ENOMEM;
fdb_add_hw_addr(br, addr);
fdb_notify(br, fdb, RTM_NEWNEIGH);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Stephen Hemminger | 128 | 78.05% | 6 | 40.00% |
Vlad Yasevich | 15 | 9.15% | 3 | 20.00% |
Roopa Prabhu | 9 | 5.49% | 2 | 13.33% |
Hong Zhi Guo | 8 | 4.88% | 1 | 6.67% |
Nikolay Aleksandrov | 2 | 1.22% | 1 | 6.67% |
Hideaki Yoshifuji / 吉藤英明 | 1 | 0.61% | 1 | 6.67% |
Jiri Pirko | 1 | 0.61% | 1 | 6.67% |
Total | 164 | 100.00% | 15 | 100.00% |
/* Add entry for local address of interface */
int br_fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
const unsigned char *addr, u16 vid)
{
int ret;
spin_lock_bh(&br->hash_lock);
ret = fdb_insert(br, source, addr, vid);
spin_unlock_bh(&br->hash_lock);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Stephen Hemminger | 46 | 77.97% | 3 | 60.00% |
Linus Torvalds (pre-git) | 8 | 13.56% | 1 | 20.00% |
Vlad Yasevich | 5 | 8.47% | 1 | 20.00% |
Total | 59 | 100.00% | 5 | 100.00% |
void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
const unsigned char *addr, u16 vid, bool added_by_user)
{
struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)];
struct net_bridge_fdb_entry *fdb;
bool fdb_modified = false;
/* some users want to always flood. */
if (hold_time(br) == 0)
return;
/* ignore packets unless we are using this port */
if (!(source->state == BR_STATE_LEARNING ||
source->state == BR_STATE_FORWARDING))
return;
fdb = fdb_find_rcu(head, addr, vid);
if (likely(fdb)) {
/* attempt to update an entry for a local interface */
if (unlikely(fdb->is_local)) {
if (net_ratelimit())
br_warn(br, "received packet on %s with own address as source address (addr:%pM, vlan:%u)\n",
source->dev->name, addr, vid);
} else {
unsigned long now = jiffies;
/* fastpath: update of existing entry */
if (unlikely(source != fdb->dst)) {
fdb->dst = source;
fdb_modified = true;
}
if (now != fdb->updated)
fdb->updated = now;
if (unlikely(added_by_user))
fdb->added_by_user = 1;
if (unlikely(fdb_modified))
fdb_notify(br, fdb, RTM_NEWNEIGH);
}
} else {
spin_lock(&br->hash_lock);
if (likely(!fdb_find_rcu(head, addr, vid))) {
fdb = fdb_create(head, source, addr, vid, 0, 0);
if (fdb) {
if (unlikely(added_by_user))
fdb->added_by_user = 1;
fdb_notify(br, fdb, RTM_NEWNEIGH);
}
}
/* else we lose race and someone else inserts
* it first, don't bother updating
*/
spin_unlock(&br->hash_lock);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Stephen Hemminger | 200 | 67.34% | 11 | 61.11% |
Jon Maxwell | 38 | 12.79% | 1 | 5.56% |
Toshiaki Makita | 31 | 10.44% | 1 | 5.56% |
Vlad Yasevich | 11 | 3.70% | 1 | 5.56% |
Roopa Prabhu | 9 | 3.03% | 2 | 11.11% |
Nikolay Aleksandrov | 8 | 2.69% | 2 | 11.11% |
Total | 297 | 100.00% | 18 | 100.00% |
static int fdb_to_nud(const struct net_bridge *br,
const struct net_bridge_fdb_entry *fdb)
{
if (fdb->is_local)
return NUD_PERMANENT;
else if (fdb->is_static)
return NUD_NOARP;
else if (has_expired(br, fdb))
return NUD_STALE;
else
return NUD_REACHABLE;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Stephen Hemminger | 48 | 88.89% | 1 | 50.00% |
Roopa Prabhu | 6 | 11.11% | 1 | 50.00% |
Total | 54 | 100.00% | 2 | 100.00% |
static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br,
const struct net_bridge_fdb_entry *fdb,
u32 portid, u32 seq, int type, unsigned int flags)
{
unsigned long now = jiffies;
struct nda_cacheinfo ci;
struct nlmsghdr *nlh;
struct ndmsg *ndm;
nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
if (nlh == NULL)
return -EMSGSIZE;
ndm = nlmsg_data(nlh);
ndm->ndm_family = AF_BRIDGE;
ndm->ndm_pad1 = 0;
ndm->ndm_pad2 = 0;
ndm->ndm_flags = fdb->added_by_external_learn ? NTF_EXT_LEARNED : 0;
ndm->ndm_type = 0;
ndm->ndm_ifindex = fdb->dst ? fdb->dst->dev->ifindex : br->dev->ifindex;
ndm->ndm_state = fdb_to_nud(br, fdb);
if (nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->addr))
goto nla_put_failure;
if (nla_put_u32(skb, NDA_MASTER, br->dev->ifindex))
goto nla_put_failure;
ci.ndm_used = jiffies_to_clock_t(now - fdb->used);
ci.ndm_confirmed = 0;
ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated);
ci.ndm_refcnt = 0;
if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
goto nla_put_failure;
if (fdb->vlan_id && nla_put(skb, NDA_VLAN, sizeof(u16), &fdb->vlan_id))
goto nla_put_failure;
nlmsg_end(skb, nlh);
return 0;
nla_put_failure:
nlmsg_cancel(skb, nlh);
return -EMSGSIZE;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Stephen Hemminger | 236 | 76.87% | 3 | 27.27% |
Vlad Yasevich | 22 | 7.17% | 1 | 9.09% |
Roopa Prabhu | 20 | 6.51% | 2 | 18.18% |
David S. Miller | 14 | 4.56% | 1 | 9.09% |
Scott Feldman | 6 | 1.95% | 1 | 9.09% |
Toshiaki Makita | 4 | 1.30% | 1 | 9.09% |
Johannes Berg | 3 | 0.98% | 1 | 9.09% |
Eric W. Biedermann | 2 | 0.65% | 1 | 9.09% |
Total | 307 | 100.00% | 11 | 100.00% |
static inline size_t fdb_nlmsg_size(void)
{
return NLMSG_ALIGN(sizeof(struct ndmsg))
+ nla_total_size(ETH_ALEN) /* NDA_LLADDR */
+ nla_total_size(sizeof(u32)) /* NDA_MASTER */
+ nla_total_size(sizeof(u16)) /* NDA_VLAN */
+ nla_total_size(sizeof(struct nda_cacheinfo));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Stephen Hemminger | 34 | 65.38% | 1 | 33.33% |
Roopa Prabhu | 9 | 17.31% | 1 | 33.33% |
Vlad Yasevich | 9 | 17.31% | 1 | 33.33% |
Total | 52 | 100.00% | 3 | 100.00% |
static void fdb_notify(struct net_bridge *br,
const struct net_bridge_fdb_entry *fdb, int type)
{
struct net *net = dev_net(br->dev);
struct sk_buff *skb;
int err = -ENOBUFS;
skb = nlmsg_new(fdb_nlmsg_size(), GFP_ATOMIC);
if (skb == NULL)
goto errout;
err = fdb_fill_info(skb, br, fdb, 0, 0, type, 0);
if (err < 0) {
/* -EMSGSIZE implies BUG in fdb_nlmsg_size() */
WARN_ON(err == -EMSGSIZE);
kfree_skb(skb);
goto errout;
}
rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
return;
errout:
rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Stephen Hemminger | 133 | 100.00% | 2 | 100.00% |
Total | 133 | 100.00% | 2 | 100.00% |
/* Dump information about entries, in response to GETNEIGH */
int br_fdb_dump(struct sk_buff *skb,
struct netlink_callback *cb,
struct net_device *dev,
struct net_device *filter_dev,
int *idx)
{
struct net_bridge *br = netdev_priv(dev);
int err = 0;
int i;
if (!(dev->priv_flags & IFF_EBRIDGE))
goto out;
if (!filter_dev) {
err = ndo_dflt_fdb_dump(skb, cb, dev, NULL, idx);
if (err < 0)
goto out;
}
for (i = 0; i < BR_HASH_SIZE; i++) {
struct net_bridge_fdb_entry *f;
hlist_for_each_entry_rcu(f, &br->hash[i], hlist) {
if (*idx < cb->args[2])
goto skip;
if (filter_dev &&
(!f->dst || f->dst->dev != filter_dev)) {
if (filter_dev != dev)
goto skip;
/* !f->dst is a special case for bridge
* It means the MAC belongs to the bridge
* Therefore need a little more filtering
* we only want to dump the !f->dst case
*/
if (f->dst)
goto skip;
}
if (!filter_dev && f->dst)
goto skip;
err = fdb_fill_info(skb, br, f,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
RTM_NEWNEIGH,
NLM_F_MULTI);
if (err < 0)
goto out;
skip:
*idx += 1;
}
}
out:
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Stephen Hemminger | 124 | 51.67% | 2 | 22.22% |
Jamal Hadi Salim | 44 | 18.33% | 2 | 22.22% |
Hubert Sokolowski | 30 | 12.50% | 1 | 11.11% |
Roopa Prabhu | 27 | 11.25% | 1 | 11.11% |
John Fastabend | 8 | 3.33% | 1 | 11.11% |
MINOURA Makoto / 箕浦 真 | 6 | 2.50% | 1 | 11.11% |
Eric W. Biedermann | 1 | 0.42% | 1 | 11.11% |
Total | 240 | 100.00% | 9 | 100.00% |
/* Update (create or replace) forwarding database entry */
static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source,
const __u8 *addr, __u16 state, __u16 flags, __u16 vid)
{
struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)];
struct net_bridge_fdb_entry *fdb;
bool modified = false;
/* If the port cannot learn allow only local and static entries */
if (source && !(state & NUD_PERMANENT) && !(state & NUD_NOARP) &&
!(source->state == BR_STATE_LEARNING ||
source->state == BR_STATE_FORWARDING))
return -EPERM;
if (!source && !(state & NUD_PERMANENT)) {
pr_info("bridge: RTM_NEWNEIGH %s without NUD_PERMANENT\n",
br->dev->name);
return -EINVAL;
}
fdb = br_fdb_find(br, addr, vid);
if (fdb == NULL) {
if (!(flags & NLM_F_CREATE))
return -ENOENT;
fdb = fdb_create(head, source, addr, vid, 0, 0);
if (!fdb)
return -ENOMEM;
modified = true;
} else {
if (flags & NLM_F_EXCL)
return -EEXIST;
if (fdb->dst != source) {
fdb->dst = source;
modified = true;
}
}
if (fdb_to_nud(br, fdb) != state) {
if (state & NUD_PERMANENT) {
fdb->is_local = 1;
if (!fdb->is_static) {
fdb->is_static = 1;
fdb_add_hw_addr(br, addr);
}
} else if (state & NUD_NOARP) {
fdb->is_local = 0;
if (!fdb->is_static) {
fdb->is_static = 1;
fdb_add_hw_addr(br, addr);
}
} else {
fdb->is_local = 0;
if (fdb->is_static) {
fdb->is_static = 0;
fdb_del_hw_addr(br, addr);
}
}
modified = true;
}
fdb->added_by_user = 1;
fdb->used = jiffies;
if (modified) {
fdb->updated = jiffies;
fdb_notify(br, fdb, RTM_NEWNEIGH);
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Stephen Hemminger | 184 | 48.94% | 5 | 33.33% |
Vlad Yasevich | 61 | 16.22% | 2 | 13.33% |
Roopa Prabhu | 48 | 12.77% | 3 | 20.00% |
Toshiaki Makita | 42 | 11.17% | 2 | 13.33% |
Wilson Kok | 36 | 9.57% | 1 | 6.67% |
Jiri Pirko | 3 | 0.80% | 1 | 6.67% |
Nikolay Aleksandrov | 2 | 0.53% | 1 | 6.67% |
Total | 376 | 100.00% | 15 | 100.00% |
static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge *br,
struct net_bridge_port *p, const unsigned char *addr,
u16 nlh_flags, u16 vid)
{
int err = 0;
if (ndm->ndm_flags & NTF_USE) {
if (!p) {
pr_info("bridge: RTM_NEWNEIGH %s with NTF_USE is not supported\n",
br->dev->name);
return -EINVAL;
}
local_bh_disable();
rcu_read_lock();
br_fdb_update(br, p, addr, vid, true);
rcu_read_unlock();
local_bh_enable();
} else {
spin_lock_bh(&br->hash_lock);
err = fdb_add_entry(br, p, addr, ndm->ndm_state,
nlh_flags, vid);
spin_unlock_bh(&br->hash_lock);
}
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Stephen Hemminger | 77 | 56.62% | 4 | 40.00% |
Toshiaki Makita | 31 | 22.79% | 2 | 20.00% |
Vlad Yasevich | 11 | 8.09% | 2 | 20.00% |
John Fastabend | 11 | 8.09% | 1 | 10.00% |
Nikolay Aleksandrov | 6 | 4.41% | 1 | 10.00% |
Total | 136 | 100.00% | 10 | 100.00% |
/* Add new permanent fdb entry with RTM_NEWNEIGH */
int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr, u16 vid, u16 nlh_flags)
{
struct net_bridge_vlan_group *vg;
struct net_bridge_port *p = NULL;
struct net_bridge_vlan *v;
struct net_bridge *br = NULL;
int err = 0;
if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE))) {
pr_info("bridge: RTM_NEWNEIGH with invalid state %#x\n", ndm->ndm_state);
return -EINVAL;
}
if (is_zero_ether_addr(addr)) {
pr_info("bridge: RTM_NEWNEIGH with invalid ether address\n");
return -EINVAL;
}
if (dev->priv_flags & IFF_EBRIDGE) {
br = netdev_priv(dev);
vg = br_vlan_group(br);
} else {
p = br_port_get_rtnl(dev);
if (!p) {
pr_info("bridge: RTM_NEWNEIGH %s not a bridge port\n",
dev->name);
return -EINVAL;
}
br = p->br;
vg = nbp_vlan_group(p);
}
if (vid) {
v = br_vlan_find(vg, vid);
if (!v || !br_vlan_should_use(v)) {
pr_info("bridge: RTM_NEWNEIGH with unconfigured vlan %d on %s\n", vid, dev->name);
return -EINVAL;
}
/* VID was specified, so use it. */
err = __br_fdb_add(ndm, br, p, addr, nlh_flags, vid);
} else {
err = __br_fdb_add(ndm, br, p, addr, nlh_flags, 0);
if (err || !vg || !vg->num_vlans)
goto out;
/* We have vlans configured on this port and user didn't
* specify a VLAN. To be nice, add/update entry for every
* vlan on this port.
*/
list_for_each_entry(v, &vg->vlan_list, vlist) {
if (!br_vlan_should_use(v))
continue;
err = __br_fdb_add(ndm, br, p, addr, nlh_flags, v->vid);
if (err)
goto out;
}
}
out:
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Vlad Yasevich | 160 | 49.08% | 1 | 9.09% |
Roopa Prabhu | 53 | 16.26% | 1 | 9.09% |
Stephen Hemminger | 50 | 15.34% | 3 | 27.27% |
Nikolay Aleksandrov | 34 | 10.43% | 1 | 9.09% |
Toshiaki Makita | 18 | 5.52% | 2 | 18.18% |
John Fastabend | 7 | 2.15% | 1 | 9.09% |
Jiri Pirko | 3 | 0.92% | 1 | 9.09% |
Wei Yongjun | 1 | 0.31% | 1 | 9.09% |
Total | 326 | 100.00% | 11 | 100.00% |
static int fdb_delete_by_addr_and_port(struct net_bridge *br,
const struct net_bridge_port *p,
const u8 *addr, u16 vlan)
{
struct net_bridge_fdb_entry *fdb;
fdb = br_fdb_find(br, addr, vlan);
if (!fdb || fdb->dst != p)
return -ENOENT;
fdb_delete(br, fdb);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Vlad Yasevich | 46 | 69.70% | 1 | 20.00% |
Nikolay Aleksandrov | 11 | 16.67% | 3 | 60.00% |
Roopa Prabhu | 9 | 13.64% | 1 | 20.00% |
Total | 66 | 100.00% | 5 | 100.00% |
static int __br_fdb_delete(struct net_bridge *br,
const struct net_bridge_port *p,
const unsigned char *addr, u16 vid)
{
int err;
spin_lock_bh(&br->hash_lock);
err = fdb_delete_by_addr_and_port(br, p, addr, vid);
spin_unlock_bh(&br->hash_lock);
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Vlad Yasevich | 26 | 42.62% | 1 | 25.00% |
Stephen Hemminger | 26 | 42.62% | 1 | 25.00% |
Nikolay Aleksandrov | 9 | 14.75% | 2 | 50.00% |
Total | 61 | 100.00% | 4 | 100.00% |
/* Remove neighbor entry with RTM_DELNEIGH */
int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr, u16 vid)
{
struct net_bridge_vlan_group *vg;
struct net_bridge_port *p = NULL;
struct net_bridge_vlan *v;
struct net_bridge *br;
int err;
if (dev->priv_flags & IFF_EBRIDGE) {
br = netdev_priv(dev);
vg = br_vlan_group(br);
} else {
p = br_port_get_rtnl(dev);
if (!p) {
pr_info("bridge: RTM_DELNEIGH %s not a bridge port\n",
dev->name);
return -EINVAL;
}
vg = nbp_vlan_group(p);
br = p->br;
}
if (vid) {
v = br_vlan_find(vg, vid);
if (!v) {
pr_info("bridge: RTM_DELNEIGH with unconfigured vlan %d on %s\n", vid, dev->name);
return -EINVAL;
}
err = __br_fdb_delete(br, p, addr, vid);
} else {
err = -ENOENT;
err &= __br_fdb_delete(br, p, addr, 0);
if (!vg || !vg->num_vlans)
return err;
list_for_each_entry(v, &vg->vlan_list, vlist) {
if (!br_vlan_should_use(v))
continue;
err &= __br_fdb_delete(br, p, addr, v->vid);
}
}
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Vlad Yasevich | 130 | 53.50% | 1 | 14.29% |
Roopa Prabhu | 52 | 21.40% | 1 | 14.29% |
Nikolay Aleksandrov | 48 | 19.75% | 2 | 28.57% |
Toshiaki Makita | 9 | 3.70% | 1 | 14.29% |
Jiri Pirko | 3 | 1.23% | 1 | 14.29% |
Wei Yongjun | 1 | 0.41% | 1 | 14.29% |
Total | 243 | 100.00% | 7 | 100.00% |
int br_fdb_sync_static(struct net_bridge *br, struct net_bridge_port *p)
{
struct net_bridge_fdb_entry *fdb, *tmp;
int i;
int err;
ASSERT_RTNL();
for (i = 0; i < BR_HASH_SIZE; i++) {
hlist_for_each_entry(fdb, &br->hash[i], hlist) {
/* We only care for static entries */
if (!fdb->is_static)
continue;
err = dev_uc_add(p->dev, fdb->addr.addr);
if (err)
goto rollback;
}
}
return 0;
rollback:
for (i = 0; i < BR_HASH_SIZE; i++) {
hlist_for_each_entry(tmp, &br->hash[i], hlist) {
/* If we reached the fdb that failed, we can stop */
if (tmp == fdb)
break;
/* We only care for static entries */
if (!tmp->is_static)
continue;
dev_uc_del(p->dev, tmp->addr.addr);
}
}
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Vlad Yasevich | 151 | 100.00% | 1 | 100.00% |
Total | 151 | 100.00% | 1 | 100.00% |
void br_fdb_unsync_static(struct net_bridge *br, struct net_bridge_port *p)
{
struct net_bridge_fdb_entry *fdb;
int i;
ASSERT_RTNL();
for (i = 0; i < BR_HASH_SIZE; i++) {
hlist_for_each_entry_rcu(fdb, &br->hash[i], hlist) {
/* We only care for static entries */
if (!fdb->is_static)
continue;
dev_uc_del(p->dev, fdb->addr.addr);
}
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Vlad Yasevich | 73 | 100.00% | 1 | 100.00% |
Total | 73 | 100.00% | 1 | 100.00% |
int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
const unsigned char *addr, u16 vid)
{
struct hlist_head *head;
struct net_bridge_fdb_entry *fdb;
int err = 0;
ASSERT_RTNL();
spin_lock_bh(&br->hash_lock);
head = &br->hash[br_mac_hash(addr, vid)];
fdb = br_fdb_find(br, addr, vid);
if (!fdb) {
fdb = fdb_create(head, p, addr, vid, 0, 0);
if (!fdb) {
err = -ENOMEM;
goto err_unlock;
}
fdb->added_by_external_learn = 1;
fdb_notify(br, fdb, RTM_NEWNEIGH);
} else if (fdb->added_by_external_learn) {
/* Refresh entry */
fdb->updated = fdb->used = jiffies;
} else if (!fdb->added_by_user) {
/* Take over SW learned entry */
fdb->added_by_external_learn = 1;
fdb->updated = jiffies;
fdb_notify(br, fdb, RTM_NEWNEIGH);
}
err_unlock:
spin_unlock_bh(&br->hash_lock);
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Scott Feldman | 181 | 92.82% | 1 | 25.00% |
Jiri Pirko | 8 | 4.10% | 1 | 25.00% |
Roopa Prabhu | 4 | 2.05% | 1 | 25.00% |
Nikolay Aleksandrov | 2 | 1.03% | 1 | 25.00% |
Total | 195 | 100.00% | 4 | 100.00% |
int br_fdb_external_learn_del(struct net_bridge *br, struct net_bridge_port *p,
const unsigned char *addr, u16 vid)
{
struct net_bridge_fdb_entry *fdb;
int err = 0;
ASSERT_RTNL();
spin_lock_bh(&br->hash_lock);
fdb = br_fdb_find(br, addr, vid);
if (fdb && fdb->added_by_external_learn)
fdb_delete(br, fdb);
else
err = -ENOENT;
spin_unlock_bh(&br->hash_lock);
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Scott Feldman | 78 | 88.64% | 1 | 33.33% |
Jiri Pirko | 8 | 9.09% | 1 | 33.33% |
Nikolay Aleksandrov | 2 | 2.27% | 1 | 33.33% |
Total | 88 | 100.00% | 3 | 100.00% |
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Stephen Hemminger | 1988 | 37.11% | 35 | 30.70% |
Vlad Yasevich | 982 | 18.33% | 5 | 4.39% |
Nikolay Aleksandrov | 515 | 9.61% | 9 | 7.89% |
Toshiaki Makita | 452 | 8.44% | 10 | 8.77% |
Linus Torvalds (pre-git) | 402 | 7.50% | 3 | 2.63% |
Scott Feldman | 318 | 5.94% | 2 | 1.75% |
Roopa Prabhu | 261 | 4.87% | 7 | 6.14% |
Michał Mirosław | 74 | 1.38% | 1 | 0.88% |
Jiri Pirko | 62 | 1.16% | 9 | 7.89% |
Jamal Hadi Salim | 44 | 0.82% | 2 | 1.75% |
Jon Maxwell | 38 | 0.71% | 1 | 0.88% |
Wilson Kok | 36 | 0.67% | 1 | 0.88% |
Hubert Sokolowski | 30 | 0.56% | 1 | 0.88% |
John Fastabend | 26 | 0.49% | 1 | 0.88% |
Baruch Even | 19 | 0.35% | 1 | 0.88% |
David S. Miller | 17 | 0.32% | 2 | 1.75% |
Akinobu Mita | 13 | 0.24% | 1 | 0.88% |
Pavel Emelyanov | 10 | 0.19% | 1 | 0.88% |
Ido Schimmel | 9 | 0.17% | 1 | 0.88% |
Hong Zhi Guo | 8 | 0.15% | 1 | 0.88% |
Siva Mannem | 7 | 0.13% | 1 | 0.88% |
MINOURA Makoto / 箕浦 真 | 6 | 0.11% | 1 | 0.88% |
Américo Wang | 4 | 0.07% | 1 | 0.88% |
Li RongQing | 4 | 0.07% | 1 | 0.88% |
Johannes Berg | 3 | 0.06% | 1 | 0.88% |
Tejun Heo | 3 | 0.06% | 1 | 0.88% |
Eric W. Biedermann | 3 | 0.06% | 1 | 0.88% |
Fabio Checconi | 3 | 0.06% | 1 | 0.88% |
Franck Bui-Huu | 3 | 0.06% | 1 | 0.88% |
Vivien Didelot | 2 | 0.04% | 1 | 0.88% |
Eric Dumazet | 2 | 0.04% | 2 | 1.75% |
Hideaki Yoshifuji / 吉藤英明 | 2 | 0.04% | 1 | 0.88% |
Wei Yongjun | 2 | 0.04% | 1 | 0.88% |
Patrick McHardy | 2 | 0.04% | 1 | 0.88% |
Christoph Lameter | 2 | 0.04% | 1 | 0.88% |
Andrew Morton | 2 | 0.04% | 1 | 0.88% |
Arun Sharma | 1 | 0.02% | 1 | 0.88% |
Adrian Bunk | 1 | 0.02% | 1 | 0.88% |
Igor Maravić | 1 | 0.02% | 1 | 0.88% |
Total | 5357 | 100.00% | 114 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.