Release 4.8 net/bridge/br_device.c
/*
* Device handling code
* Linux ethernet bridge
*
* Authors:
* Lennert Buytenhek <buytenh@gnu.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/netpoll.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/list.h>
#include <linux/netfilter_bridge.h>
#include <asm/uaccess.h>
#include "br_private.h"
#define COMMON_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | \
NETIF_F_GSO_MASK | NETIF_F_HW_CSUM)
const struct nf_br_ops __rcu *nf_br_ops __read_mostly;
EXPORT_SYMBOL_GPL(nf_br_ops);
static struct lock_class_key bridge_netdev_addr_lock_key;
/* net device transmit always called with BH disabled */
netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct net_bridge *br = netdev_priv(dev);
const unsigned char *dest = skb->data;
struct net_bridge_fdb_entry *dst;
struct net_bridge_mdb_entry *mdst;
struct pcpu_sw_netstats *brstats = this_cpu_ptr(br->stats);
const struct nf_br_ops *nf_ops;
u16 vid = 0;
rcu_read_lock();
nf_ops = rcu_dereference(nf_br_ops);
if (nf_ops && nf_ops->br_dev_xmit_hook(skb)) {
rcu_read_unlock();
return NETDEV_TX_OK;
}
u64_stats_update_begin(&brstats->syncp);
brstats->tx_packets++;
brstats->tx_bytes += skb->len;
u64_stats_update_end(&brstats->syncp);
BR_INPUT_SKB_CB(skb)->brdev = dev;
skb_reset_mac_header(skb);
skb_pull(skb, ETH_HLEN);
if (!br_allowed_ingress(br, br_vlan_group_rcu(br), skb, &vid))
goto out;
if (is_broadcast_ether_addr(dest)) {
br_flood(br, skb, false, false, true);
} else if (is_multicast_ether_addr(dest)) {
if (unlikely(netpoll_tx_running(dev))) {
br_flood(br, skb, false, false, true);
goto out;
}
if (br_multicast_rcv(br, NULL, skb, vid)) {
kfree_skb(skb);
goto out;
}
mdst = br_mdb_get(br, skb, vid);
if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) &&
br_multicast_querier_exists(br, eth_hdr(skb)))
br_multicast_flood(mdst, skb, false, true);
else
br_flood(br, skb, false, false, true);
} else if ((dst = __br_fdb_get(br, dest, vid)) != NULL) {
br_forward(dst->dst, skb, false, true);
} else {
br_flood(br, skb, true, false, true);
}
out:
rcu_read_unlock();
return NETDEV_TX_OK;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
herbert xu | herbert xu | 90 | 24.66% | 5 | 14.71% |
pre-git | pre-git | 81 | 22.19% | 1 | 2.94% |
stephen hemminger | stephen hemminger | 47 | 12.88% | 7 | 20.59% |
nikolay aleksandrov | nikolay aleksandrov | 41 | 11.23% | 3 | 8.82% |
pablo neira ayuso | pablo neira ayuso | 20 | 5.48% | 2 | 5.88% |
vlad yasevich | vlad yasevich | 17 | 4.66% | 4 | 11.76% |
toshiaki makita | toshiaki makita | 16 | 4.38% | 1 | 2.94% |
eric dumazet | eric dumazet | 16 | 4.38% | 1 | 2.94% |
linus lussing | linus lussing | 12 | 3.29% | 2 | 5.88% |
bart de schuymer | bart de schuymer | 9 | 2.47% | 1 | 2.94% |
linus torvalds | linus torvalds | 7 | 1.92% | 1 | 2.94% |
arnaldo carvalho de melo | arnaldo carvalho de melo | 3 | 0.82% | 1 | 2.94% |
americo wang | americo wang | 2 | 0.55% | 1 | 2.94% |
li rongqing | li rongqing | 1 | 0.27% | 1 | 2.94% |
hideaki yoshifuji | hideaki yoshifuji | 1 | 0.27% | 1 | 2.94% |
pavel emelianov | pavel emelianov | 1 | 0.27% | 1 | 2.94% |
patrick mchardy | patrick mchardy | 1 | 0.27% | 1 | 2.94% |
| Total | 365 | 100.00% | 34 | 100.00% |
static void br_set_lockdep_class(struct net_device *dev)
{
lockdep_set_class(&dev->addr_list_lock, &bridge_netdev_addr_lock_key);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
nikolay aleksandrov | nikolay aleksandrov | 22 | 100.00% | 1 | 100.00% |
| Total | 22 | 100.00% | 1 | 100.00% |
static int br_dev_init(struct net_device *dev)
{
struct net_bridge *br = netdev_priv(dev);
int err;
br->stats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
if (!br->stats)
return -ENOMEM;
err = br_vlan_init(br);
if (err) {
free_percpu(br->stats);
return err;
}
err = br_multicast_init_stats(br);
if (err) {
free_percpu(br->stats);
br_vlan_flush(br);
}
br_set_lockdep_class(dev);
return err;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
stephen hemminger | stephen hemminger | 42 | 41.58% | 1 | 16.67% |
nikolay aleksandrov | nikolay aleksandrov | 35 | 34.65% | 2 | 33.33% |
vlad yasevich | vlad yasevich | 22 | 21.78% | 1 | 16.67% |
li rongqing | li rongqing | 1 | 0.99% | 1 | 16.67% |
americo wang | americo wang | 1 | 0.99% | 1 | 16.67% |
| Total | 101 | 100.00% | 6 | 100.00% |
static int br_dev_open(struct net_device *dev)
{
struct net_bridge *br = netdev_priv(dev);
netdev_update_features(dev);
netif_start_queue(dev);
br_stp_enable_bridge(br);
br_multicast_open(br);
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 23 | 52.27% | 1 | 25.00% |
stephen hemminger | stephen hemminger | 14 | 31.82% | 1 | 25.00% |
herbert xu | herbert xu | 5 | 11.36% | 1 | 25.00% |
michal miroslaw | michal miroslaw | 2 | 4.55% | 1 | 25.00% |
| Total | 44 | 100.00% | 4 | 100.00% |
static void br_dev_set_multicast_list(struct net_device *dev)
{
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 10 | 100.00% | 1 | 100.00% |
| Total | 10 | 100.00% | 1 | 100.00% |
static void br_dev_change_rx_flags(struct net_device *dev, int change)
{
if (change & IFF_PROMISC)
br_manage_promisc(netdev_priv(dev));
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
vlad yasevich | vlad yasevich | 28 | 100.00% | 1 | 100.00% |
| Total | 28 | 100.00% | 1 | 100.00% |
static int br_dev_stop(struct net_device *dev)
{
struct net_bridge *br = netdev_priv(dev);
br_stp_disable_bridge(br);
br_multicast_stop(br);
netif_stop_queue(dev);
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 21 | 53.85% | 1 | 25.00% |
herbert xu | herbert xu | 14 | 35.90% | 1 | 25.00% |
stephen hemminger | stephen hemminger | 4 | 10.26% | 2 | 50.00% |
| Total | 39 | 100.00% | 4 | 100.00% |
static struct rtnl_link_stats64 *br_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *stats)
{
struct net_bridge *br = netdev_priv(dev);
struct pcpu_sw_netstats tmp, sum = { 0 };
unsigned int cpu;
for_each_possible_cpu(cpu) {
unsigned int start;
const struct pcpu_sw_netstats *bstats
= per_cpu_ptr(br->stats, cpu);
do {
start = u64_stats_fetch_begin_irq(&bstats->syncp);
memcpy(&tmp, bstats, sizeof(tmp));
} while (u64_stats_fetch_retry_irq(&bstats->syncp, start));
sum.tx_bytes += tmp.tx_bytes;
sum.tx_packets += tmp.tx_packets;
sum.rx_bytes += tmp.rx_bytes;
sum.rx_packets += tmp.rx_packets;
}
stats->tx_bytes = sum.tx_bytes;
stats->tx_packets = sum.tx_packets;
stats->rx_bytes = sum.rx_bytes;
stats->rx_packets = sum.rx_packets;
return stats;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
stephen hemminger | stephen hemminger | 111 | 64.16% | 1 | 20.00% |
eric dumazet | eric dumazet | 58 | 33.53% | 2 | 40.00% |
eric w. biederman | eric w. biederman | 2 | 1.16% | 1 | 20.00% |
li rongqing | li rongqing | 2 | 1.16% | 1 | 20.00% |
| Total | 173 | 100.00% | 5 | 100.00% |
static int br_change_mtu(struct net_device *dev, int new_mtu)
{
struct net_bridge *br = netdev_priv(dev);
if (new_mtu < 68 || new_mtu > br_min_mtu(br))
return -EINVAL;
dev->mtu = new_mtu;
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
/* remember the MTU in the rtable for PMTU */
dst_metric_set(&br->fake_rtable.dst, RTAX_MTU, new_mtu);
#endif
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
stephen hemminger | stephen hemminger | 39 | 53.42% | 1 | 25.00% |
simon wunderlich | simon wunderlich | 23 | 31.51% | 1 | 25.00% |
david s. miller | david s. miller | 6 | 8.22% | 1 | 25.00% |
pablo neira ayuso | pablo neira ayuso | 5 | 6.85% | 1 | 25.00% |
| Total | 73 | 100.00% | 4 | 100.00% |
/* Allow setting mac address to any valid ethernet address. */
static int br_set_mac_address(struct net_device *dev, void *p)
{
struct net_bridge *br = netdev_priv(dev);
struct sockaddr *addr = p;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
spin_lock_bh(&br->lock);
if (!ether_addr_equal(dev->dev_addr, addr->sa_data)) {
/* Mac address will be changed in br_stp_change_bridge_id(). */
br_stp_change_bridge_id(br, addr->sa_data);
}
spin_unlock_bh(&br->lock);
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
stephen hemminger | stephen hemminger | 87 | 95.60% | 3 | 50.00% |
joe perches | joe perches | 2 | 2.20% | 1 | 16.67% |
toshiaki makita | toshiaki makita | 1 | 1.10% | 1 | 16.67% |
danny kukawka | danny kukawka | 1 | 1.10% | 1 | 16.67% |
| Total | 91 | 100.00% | 6 | 100.00% |
static void br_getinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
strlcpy(info->driver, "bridge", sizeof(info->driver));
strlcpy(info->version, BR_VERSION, sizeof(info->version));
strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
strlcpy(info->bus_info, "N/A", sizeof(info->bus_info));
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
stephen hemminger | stephen hemminger | 48 | 60.00% | 1 | 50.00% |
jiri pirko | jiri pirko | 32 | 40.00% | 1 | 50.00% |
| Total | 80 | 100.00% | 2 | 100.00% |
static netdev_features_t br_fix_features(struct net_device *dev,
netdev_features_t features)
{
struct net_bridge *br = netdev_priv(dev);
return br_features_recompute(br, features);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
stephen hemminger | stephen hemminger | 23 | 71.88% | 1 | 25.00% |
michal miroslaw | michal miroslaw | 5 | 15.62% | 2 | 50.00% |
jesse gross | jesse gross | 4 | 12.50% | 1 | 25.00% |
| Total | 32 | 100.00% | 4 | 100.00% |
#ifdef CONFIG_NET_POLL_CONTROLLER
static void br_poll_controller(struct net_device *br_dev)
{
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
herbert xu | herbert xu | 6 | 60.00% | 1 | 33.33% |
americo wang | americo wang | 3 | 30.00% | 1 | 33.33% |
stephen hemminger | stephen hemminger | 1 | 10.00% | 1 | 33.33% |
| Total | 10 | 100.00% | 3 | 100.00% |
static void br_netpoll_cleanup(struct net_device *dev)
{
struct net_bridge *br = netdev_priv(dev);
struct net_bridge_port *p;
list_for_each_entry(p, &br->port_list, list)
br_netpoll_disable(p);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
herbert xu | herbert xu | 21 | 53.85% | 1 | 33.33% |
americo wang | americo wang | 18 | 46.15% | 2 | 66.67% |
| Total | 39 | 100.00% | 3 | 100.00% |
static int __br_netpoll_enable(struct net_bridge_port *p)
{
struct netpoll *np;
int err;
np = kzalloc(sizeof(*p->np), GFP_KERNEL);
if (!np)
return -ENOMEM;
err = __netpoll_setup(np, p->dev);
if (err) {
kfree(np);
return err;
}
p->np = np;
return err;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
cong wang | cong wang | 41 | 53.25% | 1 | 25.00% |
americo wang | americo wang | 24 | 31.17% | 1 | 25.00% |
herbert xu | herbert xu | 11 | 14.29% | 1 | 25.00% |
eric w. biederman | eric w. biederman | 1 | 1.30% | 1 | 25.00% |
| Total | 77 | 100.00% | 4 | 100.00% |
int br_netpoll_enable(struct net_bridge_port *p)
{
if (!p->br->dev->npinfo)
return 0;
return __br_netpoll_enable(p);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
stephen hemminger | stephen hemminger | 15 | 50.00% | 2 | 40.00% |
herbert xu | herbert xu | 7 | 23.33% | 1 | 20.00% |
cong wang | cong wang | 4 | 13.33% | 1 | 20.00% |
americo wang | americo wang | 4 | 13.33% | 1 | 20.00% |
| Total | 30 | 100.00% | 5 | 100.00% |
static int br_netpoll_setup(struct net_device *dev, struct netpoll_info *ni)
{
struct net_bridge *br = netdev_priv(dev);
struct net_bridge_port *p;
int err = 0;
list_for_each_entry(p, &br->port_list, list) {
if (!p->dev)
continue;
err = __br_netpoll_enable(p);
if (err)
goto fail;
}
out:
return err;
fail:
br_netpoll_cleanup(dev);
goto out;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
cong wang | cong wang | 64 | 77.11% | 1 | 20.00% |
herbert xu | herbert xu | 10 | 12.05% | 1 | 20.00% |
stephen hemminger | stephen hemminger | 6 | 7.23% | 1 | 20.00% |
americo wang | americo wang | 2 | 2.41% | 1 | 20.00% |
jiri pirko | jiri pirko | 1 | 1.20% | 1 | 20.00% |
| Total | 83 | 100.00% | 5 | 100.00% |
void br_netpoll_disable(struct net_bridge_port *p)
{
struct netpoll *np = p->np;
if (!np)
return;
p->np = NULL;
__netpoll_free_async(np);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
herbert xu | herbert xu | 19 | 52.78% | 1 | 33.33% |
stephen hemminger | stephen hemminger | 16 | 44.44% | 1 | 33.33% |
neil horman | neil horman | 1 | 2.78% | 1 | 33.33% |
| Total | 36 | 100.00% | 3 | 100.00% |
#endif
static int br_add_slave(struct net_device *dev, struct net_device *slave_dev)
{
struct net_bridge *br = netdev_priv(dev);
return br_add_if(br, slave_dev);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jiri pirko | jiri pirko | 34 | 100.00% | 1 | 100.00% |
| Total | 34 | 100.00% | 1 | 100.00% |
static int br_del_slave(struct net_device *dev, struct net_device *slave_dev)
{
struct net_bridge *br = netdev_priv(dev);
return br_del_if(br, slave_dev);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jiri pirko | jiri pirko | 34 | 100.00% | 1 | 100.00% |
| Total | 34 | 100.00% | 1 | 100.00% |
static const struct ethtool_ops br_ethtool_ops = {
.get_drvinfo = br_getinfo,
.get_link = ethtool_op_get_link,
};
static const struct net_device_ops br_netdev_ops = {
.ndo_open = br_dev_open,
.ndo_stop = br_dev_stop,
.ndo_init = br_dev_init,
.ndo_start_xmit = br_dev_xmit,
.ndo_get_stats64 = br_get_stats64,
.ndo_set_mac_address = br_set_mac_address,
.ndo_set_rx_mode = br_dev_set_multicast_list,
.ndo_change_rx_flags = br_dev_change_rx_flags,
.ndo_change_mtu = br_change_mtu,
.ndo_do_ioctl = br_dev_ioctl,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_netpoll_setup = br_netpoll_setup,
.ndo_netpoll_cleanup = br_netpoll_cleanup,
.ndo_poll_controller = br_poll_controller,
#endif
.ndo_add_slave = br_add_slave,
.ndo_del_slave = br_del_slave,
.ndo_fix_features = br_fix_features,
.ndo_neigh_construct = netdev_default_l2upper_neigh_construct,
.ndo_neigh_destroy = netdev_default_l2upper_neigh_destroy,
.ndo_fdb_add = br_fdb_add,
.ndo_fdb_del = br_fdb_delete,
.ndo_fdb_dump = br_fdb_dump,
.ndo_bridge_getlink = br_getlink,
.ndo_bridge_setlink = br_setlink,
.ndo_bridge_dellink = br_dellink,
.ndo_features_check = passthru_features_check,
};
static void br_dev_free(struct net_device *dev)
{
struct net_bridge *br = netdev_priv(dev);
free_percpu(br->stats);
free_netdev(dev);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
stephen hemminger | stephen hemminger | 33 | 100.00% | 1 | 100.00% |
| Total | 33 | 100.00% | 1 | 100.00% |
static struct device_type br_type = {
.name = "bridge",
};
void br_dev_setup(struct net_device *dev)
{
struct net_bridge *br = netdev_priv(dev);
eth_hw_addr_random(dev);
ether_setup(dev);
dev->netdev_ops = &br_netdev_ops;
dev->destructor = br_dev_free;
dev->ethtool_ops = &br_ethtool_ops;
SET_NETDEV_DEVTYPE(dev, &br_type);
dev->priv_flags = IFF_EBRIDGE | IFF_NO_QUEUE;
dev->features = COMMON_FEATURES | NETIF_F_LLTX | NETIF_F_NETNS_LOCAL |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
dev->hw_features = COMMON_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_STAG_TX;
dev->vlan_features = COMMON_FEATURES;
br->dev = dev;
spin_lock_init(&br->lock);
INIT_LIST_HEAD(&br->port_list);
spin_lock_init(&br->hash_lock);
br->bridge_id.prio[0] = 0x80;
br->bridge_id.prio[1] = 0x00;
ether_addr_copy(br->group_addr, eth_reserved_addr_base);
br->stp_enabled = BR_NO_STP;
br->group_fwd_mask = BR_GROUPFWD_DEFAULT;
br->group_fwd_mask_required = BR_GROUPFWD_DEFAULT;
br->designated_root = br->bridge_id;
br->bridge_max_age = br->max_age = 20 * HZ;
br->bridge_hello_time = br->hello_time = 2 * HZ;
br->bridge_forward_delay = br->forward_delay = 15 * HZ;
br->ageing_time = BR_DEFAULT_AGEING_TIME;
br_netfilter_rtable_init(br);
br_stp_timer_init(br);
br_multicast_init(br);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
stephen hemminger | stephen hemminger | 180 | 75.00% | 8 | 32.00% |
pre-git | pre-git | 20 | 8.33% | 1 | 4.00% |
toshiaki makita | toshiaki makita | 10 | 4.17% | 2 | 8.00% |
vlad yasevich | vlad yasevich | 8 | 3.33% | 1 | 4.00% |
michal miroslaw | michal miroslaw | 5 | 2.08% | 1 | 4.00% |
herbert xu | herbert xu | 3 | 1.25% | 3 | 12.00% |
wilfried klaebe | wilfried klaebe | 3 | 1.25% | 1 | 4.00% |
phil sutter | phil sutter | 2 | 0.83% | 1 | 4.00% |
patrick mchardy | patrick mchardy | 2 | 0.83% | 1 | 4.00% |
alexey dobriyan | alexey dobriyan | 2 | 0.83% | 1 | 4.00% |
scott feldman | scott feldman | 1 | 0.42% | 1 | 4.00% |
joe perches | joe perches | 1 | 0.42% | 1 | 4.00% |
danny kukawka | danny kukawka | 1 | 0.42% | 1 | 4.00% |
ben hutchings | ben hutchings | 1 | 0.42% | 1 | 4.00% |
jesse gross | jesse gross | 1 | 0.42% | 1 | 4.00% |
| Total | 240 | 100.00% | 25 | 100.00% |
Overall Contributors
| Person | Tokens | Prop | Commits | CommitProp |
stephen hemminger | stephen hemminger | 753 | 39.65% | 23 | 23.71% |
herbert xu | herbert xu | 191 | 10.06% | 9 | 9.28% |
pre-git | pre-git | 167 | 8.79% | 1 | 1.03% |
jiri pirko | jiri pirko | 122 | 6.42% | 5 | 5.15% |
cong wang | cong wang | 109 | 5.74% | 1 | 1.03% |
nikolay aleksandrov | nikolay aleksandrov | 103 | 5.42% | 5 | 5.15% |
vlad yasevich | vlad yasevich | 89 | 4.69% | 8 | 8.25% |
americo wang | americo wang | 81 | 4.27% | 5 | 5.15% |
eric dumazet | eric dumazet | 76 | 4.00% | 2 | 2.06% |
pablo neira ayuso | pablo neira ayuso | 38 | 2.00% | 3 | 3.09% |
toshiaki makita | toshiaki makita | 32 | 1.69% | 5 | 5.15% |
john fastabend | john fastabend | 25 | 1.32% | 2 | 2.06% |
simon wunderlich | simon wunderlich | 23 | 1.21% | 1 | 1.03% |
michal miroslaw | michal miroslaw | 17 | 0.90% | 2 | 2.06% |
bart de schuymer | bart de schuymer | 12 | 0.63% | 1 | 1.03% |
linus lussing | linus lussing | 12 | 0.63% | 2 | 2.06% |
linus torvalds | linus torvalds | 7 | 0.37% | 1 | 1.03% |
david s. miller | david s. miller | 6 | 0.32% | 1 | 1.03% |
jesse gross | jesse gross | 5 | 0.26% | 1 | 1.03% |
li rongqing | li rongqing | 4 | 0.21% | 1 | 1.03% |
patrick mchardy | patrick mchardy | 3 | 0.16% | 2 | 2.06% |
wilfried klaebe | wilfried klaebe | 3 | 0.16% | 1 | 1.03% |
eric w. biederman | eric w. biederman | 3 | 0.16% | 2 | 2.06% |
arnaldo carvalho de melo | arnaldo carvalho de melo | 3 | 0.16% | 1 | 1.03% |
joe perches | joe perches | 3 | 0.16% | 2 | 2.06% |
danny kukawka | danny kukawka | 2 | 0.11% | 2 | 2.06% |
phil sutter | phil sutter | 2 | 0.11% | 1 | 1.03% |
alexey dobriyan | alexey dobriyan | 2 | 0.11% | 1 | 1.03% |
neil horman | neil horman | 1 | 0.05% | 1 | 1.03% |
ben hutchings | ben hutchings | 1 | 0.05% | 1 | 1.03% |
pavel emelianov | pavel emelianov | 1 | 0.05% | 1 | 1.03% |
scott feldman | scott feldman | 1 | 0.05% | 1 | 1.03% |
adrian bunk | adrian bunk | 1 | 0.05% | 1 | 1.03% |
hideaki yoshifuji | hideaki yoshifuji | 1 | 0.05% | 1 | 1.03% |
| Total | 1899 | 100.00% | 97 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.