Release 4.11 net/bridge/br_device.c
/*
* Device handling code
* Linux ethernet bridge
*
* Authors:
* Lennert Buytenhek <buytenh@gnu.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/netpoll.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/list.h>
#include <linux/netfilter_bridge.h>
#include <linux/uaccess.h>
#include "br_private.h"
#define COMMON_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | \
NETIF_F_GSO_MASK | NETIF_F_HW_CSUM)
const struct nf_br_ops __rcu *nf_br_ops __read_mostly;
EXPORT_SYMBOL_GPL(nf_br_ops);
static struct lock_class_key bridge_netdev_addr_lock_key;
/* net device transmit always called with BH disabled */
netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct net_bridge *br = netdev_priv(dev);
const unsigned char *dest = skb->data;
struct net_bridge_fdb_entry *dst;
struct net_bridge_mdb_entry *mdst;
struct pcpu_sw_netstats *brstats = this_cpu_ptr(br->stats);
const struct nf_br_ops *nf_ops;
u16 vid = 0;
rcu_read_lock();
nf_ops = rcu_dereference(nf_br_ops);
if (nf_ops && nf_ops->br_dev_xmit_hook(skb)) {
rcu_read_unlock();
return NETDEV_TX_OK;
}
u64_stats_update_begin(&brstats->syncp);
brstats->tx_packets++;
brstats->tx_bytes += skb->len;
u64_stats_update_end(&brstats->syncp);
BR_INPUT_SKB_CB(skb)->brdev = dev;
skb_reset_mac_header(skb);
skb_pull(skb, ETH_HLEN);
if (!br_allowed_ingress(br, br_vlan_group_rcu(br), skb, &vid))
goto out;
if (is_broadcast_ether_addr(dest)) {
br_flood(br, skb, BR_PKT_BROADCAST, false, true);
} else if (is_multicast_ether_addr(dest)) {
if (unlikely(netpoll_tx_running(dev))) {
br_flood(br, skb, BR_PKT_MULTICAST, false, true);
goto out;
}
if (br_multicast_rcv(br, NULL, skb, vid)) {
kfree_skb(skb);
goto out;
}
mdst = br_mdb_get(br, skb, vid);
if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) &&
br_multicast_querier_exists(br, eth_hdr(skb)))
br_multicast_flood(mdst, skb, false, true);
else
br_flood(br, skb, BR_PKT_MULTICAST, false, true);
} else if ((dst = br_fdb_find_rcu(br, dest, vid)) != NULL) {
br_forward(dst->dst, skb, false, true);
} else {
br_flood(br, skb, BR_PKT_UNICAST, false, true);
}
out:
rcu_read_unlock();
return NETDEV_TX_OK;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Herbert Xu | 90 | 24.66% | 5 | 14.29% |
Linus Torvalds (pre-git) | 81 | 22.19% | 1 | 2.86% |
Stephen Hemminger | 46 | 12.60% | 6 | 17.14% |
Nikolay Aleksandrov | 46 | 12.60% | 5 | 14.29% |
Pablo Neira Ayuso | 20 | 5.48% | 2 | 5.71% |
Toshiaki Makita | 16 | 4.38% | 1 | 2.86% |
Eric Dumazet | 16 | 4.38% | 1 | 2.86% |
Vlad Yasevich | 13 | 3.56% | 4 | 11.43% |
Linus Lüssing | 12 | 3.29% | 2 | 5.71% |
Bart De Schuymer | 9 | 2.47% | 1 | 2.86% |
Linus Torvalds | 7 | 1.92% | 1 | 2.86% |
Arnaldo Carvalho de Melo | 3 | 0.82% | 1 | 2.86% |
Américo Wang | 2 | 0.55% | 1 | 2.86% |
Li RongQing | 1 | 0.27% | 1 | 2.86% |
Hideaki Yoshifuji / 吉藤英明 | 1 | 0.27% | 1 | 2.86% |
Patrick McHardy | 1 | 0.27% | 1 | 2.86% |
Pavel Emelyanov | 1 | 0.27% | 1 | 2.86% |
Total | 365 | 100.00% | 35 | 100.00% |
static void br_set_lockdep_class(struct net_device *dev)
{
lockdep_set_class(&dev->addr_list_lock, &bridge_netdev_addr_lock_key);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Nikolay Aleksandrov | 22 | 100.00% | 1 | 100.00% |
Total | 22 | 100.00% | 1 | 100.00% |
static int br_dev_init(struct net_device *dev)
{
struct net_bridge *br = netdev_priv(dev);
int err;
br->stats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
if (!br->stats)
return -ENOMEM;
err = br_vlan_init(br);
if (err) {
free_percpu(br->stats);
return err;
}
err = br_multicast_init_stats(br);
if (err) {
free_percpu(br->stats);
br_vlan_flush(br);
}
br_set_lockdep_class(dev);
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Stephen Hemminger | 42 | 41.58% | 1 | 16.67% |
Nikolay Aleksandrov | 35 | 34.65% | 2 | 33.33% |
Vlad Yasevich | 22 | 21.78% | 1 | 16.67% |
Américo Wang | 1 | 0.99% | 1 | 16.67% |
Li RongQing | 1 | 0.99% | 1 | 16.67% |
Total | 101 | 100.00% | 6 | 100.00% |
static void br_dev_uninit(struct net_device *dev)
{
struct net_bridge *br = netdev_priv(dev);
br_multicast_dev_del(br);
br_multicast_uninit_stats(br);
br_vlan_flush(br);
free_percpu(br->stats);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ido Schimmel | 38 | 88.37% | 1 | 50.00% |
Xin Long | 5 | 11.63% | 1 | 50.00% |
Total | 43 | 100.00% | 2 | 100.00% |
static int br_dev_open(struct net_device *dev)
{
struct net_bridge *br = netdev_priv(dev);
netdev_update_features(dev);
netif_start_queue(dev);
br_stp_enable_bridge(br);
br_multicast_open(br);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 23 | 52.27% | 1 | 25.00% |
Stephen Hemminger | 14 | 31.82% | 1 | 25.00% |
Herbert Xu | 5 | 11.36% | 1 | 25.00% |
Michał Mirosław | 2 | 4.55% | 1 | 25.00% |
Total | 44 | 100.00% | 4 | 100.00% |
static void br_dev_set_multicast_list(struct net_device *dev)
{
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 10 | 100.00% | 1 | 100.00% |
Total | 10 | 100.00% | 1 | 100.00% |
static void br_dev_change_rx_flags(struct net_device *dev, int change)
{
if (change & IFF_PROMISC)
br_manage_promisc(netdev_priv(dev));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Vlad Yasevich | 28 | 100.00% | 1 | 100.00% |
Total | 28 | 100.00% | 1 | 100.00% |
static int br_dev_stop(struct net_device *dev)
{
struct net_bridge *br = netdev_priv(dev);
br_stp_disable_bridge(br);
br_multicast_stop(br);
netif_stop_queue(dev);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 21 | 53.85% | 1 | 25.00% |
Herbert Xu | 14 | 35.90% | 1 | 25.00% |
Stephen Hemminger | 4 | 10.26% | 2 | 50.00% |
Total | 39 | 100.00% | 4 | 100.00% |
static void br_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *stats)
{
struct net_bridge *br = netdev_priv(dev);
struct pcpu_sw_netstats tmp, sum = { 0 };
unsigned int cpu;
for_each_possible_cpu(cpu) {
unsigned int start;
const struct pcpu_sw_netstats *bstats
= per_cpu_ptr(br->stats, cpu);
do {
start = u64_stats_fetch_begin_irq(&bstats->syncp);
memcpy(&tmp, bstats, sizeof(tmp));
} while (u64_stats_fetch_retry_irq(&bstats->syncp, start));
sum.tx_bytes += tmp.tx_bytes;
sum.tx_packets += tmp.tx_packets;
sum.rx_bytes += tmp.rx_bytes;
sum.rx_packets += tmp.rx_packets;
}
stats->tx_bytes = sum.tx_bytes;
stats->tx_packets = sum.tx_packets;
stats->rx_bytes = sum.rx_bytes;
stats->rx_packets = sum.rx_packets;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Stephen Hemminger | 107 | 63.69% | 2 | 33.33% |
Eric Dumazet | 57 | 33.93% | 2 | 33.33% |
Li RongQing | 2 | 1.19% | 1 | 16.67% |
Eric W. Biedermann | 2 | 1.19% | 1 | 16.67% |
Total | 168 | 100.00% | 6 | 100.00% |
static int br_change_mtu(struct net_device *dev, int new_mtu)
{
struct net_bridge *br = netdev_priv(dev);
if (new_mtu > br_min_mtu(br))
return -EINVAL;
dev->mtu = new_mtu;
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
/* remember the MTU in the rtable for PMTU */
dst_metric_set(&br->fake_rtable.dst, RTAX_MTU, new_mtu);
#endif
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Stephen Hemminger | 35 | 50.72% | 1 | 25.00% |
Simon Wunderlich | 23 | 33.33% | 1 | 25.00% |
David S. Miller | 6 | 8.70% | 1 | 25.00% |
Pablo Neira Ayuso | 5 | 7.25% | 1 | 25.00% |
Total | 69 | 100.00% | 4 | 100.00% |
/* Allow setting mac address to any valid ethernet address. */
static int br_set_mac_address(struct net_device *dev, void *p)
{
struct net_bridge *br = netdev_priv(dev);
struct sockaddr *addr = p;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
spin_lock_bh(&br->lock);
if (!ether_addr_equal(dev->dev_addr, addr->sa_data)) {
/* Mac address will be changed in br_stp_change_bridge_id(). */
br_stp_change_bridge_id(br, addr->sa_data);
}
spin_unlock_bh(&br->lock);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Stephen Hemminger | 87 | 95.60% | 3 | 50.00% |
Joe Perches | 2 | 2.20% | 1 | 16.67% |
Danny Kukawka | 1 | 1.10% | 1 | 16.67% |
Toshiaki Makita | 1 | 1.10% | 1 | 16.67% |
Total | 91 | 100.00% | 6 | 100.00% |
static void br_getinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
strlcpy(info->driver, "bridge", sizeof(info->driver));
strlcpy(info->version, BR_VERSION, sizeof(info->version));
strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
strlcpy(info->bus_info, "N/A", sizeof(info->bus_info));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Stephen Hemminger | 48 | 60.00% | 1 | 50.00% |
Jiri Pirko | 32 | 40.00% | 1 | 50.00% |
Total | 80 | 100.00% | 2 | 100.00% |
static netdev_features_t br_fix_features(struct net_device *dev,
netdev_features_t features)
{
struct net_bridge *br = netdev_priv(dev);
return br_features_recompute(br, features);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Stephen Hemminger | 23 | 71.88% | 1 | 25.00% |
Michał Mirosław | 5 | 15.62% | 2 | 50.00% |
Jesse Gross | 4 | 12.50% | 1 | 25.00% |
Total | 32 | 100.00% | 4 | 100.00% |
#ifdef CONFIG_NET_POLL_CONTROLLER
static void br_poll_controller(struct net_device *br_dev)
{
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Herbert Xu | 6 | 60.00% | 1 | 33.33% |
Américo Wang | 3 | 30.00% | 1 | 33.33% |
Stephen Hemminger | 1 | 10.00% | 1 | 33.33% |
Total | 10 | 100.00% | 3 | 100.00% |
static void br_netpoll_cleanup(struct net_device *dev)
{
struct net_bridge *br = netdev_priv(dev);
struct net_bridge_port *p;
list_for_each_entry(p, &br->port_list, list)
br_netpoll_disable(p);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Herbert Xu | 21 | 53.85% | 1 | 33.33% |
Américo Wang | 18 | 46.15% | 2 | 66.67% |
Total | 39 | 100.00% | 3 | 100.00% |
static int __br_netpoll_enable(struct net_bridge_port *p)
{
struct netpoll *np;
int err;
np = kzalloc(sizeof(*p->np), GFP_KERNEL);
if (!np)
return -ENOMEM;
err = __netpoll_setup(np, p->dev);
if (err) {
kfree(np);
return err;
}
p->np = np;
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Cong Wang | 41 | 53.25% | 1 | 25.00% |
Américo Wang | 24 | 31.17% | 1 | 25.00% |
Herbert Xu | 11 | 14.29% | 1 | 25.00% |
Eric W. Biedermann | 1 | 1.30% | 1 | 25.00% |
Total | 77 | 100.00% | 4 | 100.00% |
int br_netpoll_enable(struct net_bridge_port *p)
{
if (!p->br->dev->npinfo)
return 0;
return __br_netpoll_enable(p);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Stephen Hemminger | 15 | 50.00% | 2 | 40.00% |
Herbert Xu | 7 | 23.33% | 1 | 20.00% |
Américo Wang | 4 | 13.33% | 1 | 20.00% |
Cong Wang | 4 | 13.33% | 1 | 20.00% |
Total | 30 | 100.00% | 5 | 100.00% |
static int br_netpoll_setup(struct net_device *dev, struct netpoll_info *ni)
{
struct net_bridge *br = netdev_priv(dev);
struct net_bridge_port *p;
int err = 0;
list_for_each_entry(p, &br->port_list, list) {
if (!p->dev)
continue;
err = __br_netpoll_enable(p);
if (err)
goto fail;
}
out:
return err;
fail:
br_netpoll_cleanup(dev);
goto out;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Cong Wang | 64 | 77.11% | 1 | 20.00% |
Herbert Xu | 10 | 12.05% | 1 | 20.00% |
Stephen Hemminger | 6 | 7.23% | 1 | 20.00% |
Américo Wang | 2 | 2.41% | 1 | 20.00% |
Jiri Pirko | 1 | 1.20% | 1 | 20.00% |
Total | 83 | 100.00% | 5 | 100.00% |
void br_netpoll_disable(struct net_bridge_port *p)
{
struct netpoll *np = p->np;
if (!np)
return;
p->np = NULL;
__netpoll_free_async(np);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Herbert Xu | 19 | 52.78% | 1 | 33.33% |
Stephen Hemminger | 16 | 44.44% | 1 | 33.33% |
Neil Horman | 1 | 2.78% | 1 | 33.33% |
Total | 36 | 100.00% | 3 | 100.00% |
#endif
static int br_add_slave(struct net_device *dev, struct net_device *slave_dev)
{
struct net_bridge *br = netdev_priv(dev);
return br_add_if(br, slave_dev);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiri Pirko | 34 | 100.00% | 1 | 100.00% |
Total | 34 | 100.00% | 1 | 100.00% |
static int br_del_slave(struct net_device *dev, struct net_device *slave_dev)
{
struct net_bridge *br = netdev_priv(dev);
return br_del_if(br, slave_dev);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiri Pirko | 34 | 100.00% | 1 | 100.00% |
Total | 34 | 100.00% | 1 | 100.00% |
static const struct ethtool_ops br_ethtool_ops = {
.get_drvinfo = br_getinfo,
.get_link = ethtool_op_get_link,
};
static const struct net_device_ops br_netdev_ops = {
.ndo_open = br_dev_open,
.ndo_stop = br_dev_stop,
.ndo_init = br_dev_init,
.ndo_uninit = br_dev_uninit,
.ndo_start_xmit = br_dev_xmit,
.ndo_get_stats64 = br_get_stats64,
.ndo_set_mac_address = br_set_mac_address,
.ndo_set_rx_mode = br_dev_set_multicast_list,
.ndo_change_rx_flags = br_dev_change_rx_flags,
.ndo_change_mtu = br_change_mtu,
.ndo_do_ioctl = br_dev_ioctl,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_netpoll_setup = br_netpoll_setup,
.ndo_netpoll_cleanup = br_netpoll_cleanup,
.ndo_poll_controller = br_poll_controller,
#endif
.ndo_add_slave = br_add_slave,
.ndo_del_slave = br_del_slave,
.ndo_fix_features = br_fix_features,
.ndo_fdb_add = br_fdb_add,
.ndo_fdb_del = br_fdb_delete,
.ndo_fdb_dump = br_fdb_dump,
.ndo_bridge_getlink = br_getlink,
.ndo_bridge_setlink = br_setlink,
.ndo_bridge_dellink = br_dellink,
.ndo_features_check = passthru_features_check,
};
static struct device_type br_type = {
.name = "bridge",
};
void br_dev_setup(struct net_device *dev)
{
struct net_bridge *br = netdev_priv(dev);
eth_hw_addr_random(dev);
ether_setup(dev);
dev->netdev_ops = &br_netdev_ops;
dev->destructor = free_netdev;
dev->ethtool_ops = &br_ethtool_ops;
SET_NETDEV_DEVTYPE(dev, &br_type);
dev->priv_flags = IFF_EBRIDGE | IFF_NO_QUEUE;
dev->features = COMMON_FEATURES | NETIF_F_LLTX | NETIF_F_NETNS_LOCAL |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
dev->hw_features = COMMON_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_STAG_TX;
dev->vlan_features = COMMON_FEATURES;
br->dev = dev;
spin_lock_init(&br->lock);
INIT_LIST_HEAD(&br->port_list);
spin_lock_init(&br->hash_lock);
br->bridge_id.prio[0] = 0x80;
br->bridge_id.prio[1] = 0x00;
ether_addr_copy(br->group_addr, eth_reserved_addr_base);
br->stp_enabled = BR_NO_STP;
br->group_fwd_mask = BR_GROUPFWD_DEFAULT;
br->group_fwd_mask_required = BR_GROUPFWD_DEFAULT;
br->designated_root = br->bridge_id;
br->bridge_max_age = br->max_age = 20 * HZ;
br->bridge_hello_time = br->hello_time = 2 * HZ;
br->bridge_forward_delay = br->forward_delay = 15 * HZ;
br->bridge_ageing_time = br->ageing_time = BR_DEFAULT_AGEING_TIME;
dev->max_mtu = ETH_MAX_MTU;
br_netfilter_rtable_init(br);
br_stp_timer_init(br);
br_multicast_init(br);
INIT_DELAYED_WORK(&br->gc_work, br_fdb_cleanup);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Stephen Hemminger | 179 | 68.85% | 7 | 25.00% |
Linus Torvalds (pre-git) | 20 | 7.69% | 1 | 3.57% |
Nikolay Aleksandrov | 10 | 3.85% | 1 | 3.57% |
Toshiaki Makita | 10 | 3.85% | 2 | 7.14% |
Vlad Yasevich | 8 | 3.08% | 1 | 3.57% |
Jarod Wilson | 6 | 2.31% | 1 | 3.57% |
Michał Mirosław | 5 | 1.92% | 1 | 3.57% |
Vivien Didelot | 4 | 1.54% | 1 | 3.57% |
Herbert Xu | 3 | 1.15% | 3 | 10.71% |
Wilfried Klaebe | 3 | 1.15% | 1 | 3.57% |
Alexey Dobriyan | 2 | 0.77% | 1 | 3.57% |
Patrick McHardy | 2 | 0.77% | 1 | 3.57% |
Phil Sutter | 2 | 0.77% | 1 | 3.57% |
Scott Feldman | 1 | 0.38% | 1 | 3.57% |
Ben Hutchings | 1 | 0.38% | 1 | 3.57% |
Jesse Gross | 1 | 0.38% | 1 | 3.57% |
Ido Schimmel | 1 | 0.38% | 1 | 3.57% |
Joe Perches | 1 | 0.38% | 1 | 3.57% |
Danny Kukawka | 1 | 0.38% | 1 | 3.57% |
Total | 260 | 100.00% | 28 | 100.00% |
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Stephen Hemminger | 710 | 37.08% | 23 | 22.12% |
Herbert Xu | 191 | 9.97% | 9 | 8.65% |
Linus Torvalds (pre-git) | 166 | 8.67% | 1 | 0.96% |
Nikolay Aleksandrov | 118 | 6.16% | 8 | 7.69% |
Jiri Pirko | 112 | 5.85% | 4 | 3.85% |
Cong Wang | 109 | 5.69% | 1 | 0.96% |
Vlad Yasevich | 85 | 4.44% | 8 | 7.69% |
Américo Wang | 81 | 4.23% | 5 | 4.81% |
Eric Dumazet | 75 | 3.92% | 2 | 1.92% |
Ido Schimmel | 44 | 2.30% | 1 | 0.96% |
Pablo Neira Ayuso | 38 | 1.98% | 3 | 2.88% |
Toshiaki Makita | 32 | 1.67% | 5 | 4.81% |
John Fastabend | 25 | 1.31% | 2 | 1.92% |
Simon Wunderlich | 23 | 1.20% | 1 | 0.96% |
Michał Mirosław | 17 | 0.89% | 2 | 1.92% |
Linus Lüssing | 12 | 0.63% | 2 | 1.92% |
Bart De Schuymer | 12 | 0.63% | 1 | 0.96% |
Linus Torvalds | 8 | 0.42% | 2 | 1.92% |
David S. Miller | 6 | 0.31% | 1 | 0.96% |
Jarod Wilson | 6 | 0.31% | 1 | 0.96% |
Jesse Gross | 5 | 0.26% | 1 | 0.96% |
Xin Long | 5 | 0.26% | 1 | 0.96% |
Vivien Didelot | 4 | 0.21% | 1 | 0.96% |
Li RongQing | 4 | 0.21% | 1 | 0.96% |
Patrick McHardy | 3 | 0.16% | 2 | 1.92% |
Wilfried Klaebe | 3 | 0.16% | 1 | 0.96% |
Joe Perches | 3 | 0.16% | 2 | 1.92% |
Arnaldo Carvalho de Melo | 3 | 0.16% | 1 | 0.96% |
Eric W. Biedermann | 3 | 0.16% | 2 | 1.92% |
Alexey Dobriyan | 2 | 0.10% | 1 | 0.96% |
Danny Kukawka | 2 | 0.10% | 2 | 1.92% |
Phil Sutter | 2 | 0.10% | 1 | 0.96% |
Neil Horman | 1 | 0.05% | 1 | 0.96% |
Hideaki Yoshifuji / 吉藤英明 | 1 | 0.05% | 1 | 0.96% |
Scott Feldman | 1 | 0.05% | 1 | 0.96% |
Ben Hutchings | 1 | 0.05% | 1 | 0.96% |
Adrian Bunk | 1 | 0.05% | 1 | 0.96% |
Pavel Emelyanov | 1 | 0.05% | 1 | 0.96% |
Total | 1915 | 100.00% | 104 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.