Release 4.11 net/core/link_watch.c
/*
* Linux network device link state notification
*
* Author:
* Stefan Rompf <sux@loplof.de>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*/
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/if.h>
#include <net/sock.h>
#include <net/pkt_sched.h>
#include <linux/rtnetlink.h>
#include <linux/jiffies.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/bitops.h>
#include <linux/types.h>
enum lw_bits {
LW_URGENT = 0,
};
static unsigned long linkwatch_flags;
static unsigned long linkwatch_nextevent;
static void linkwatch_event(struct work_struct *dummy);
static DECLARE_DELAYED_WORK(linkwatch_work, linkwatch_event);
static LIST_HEAD(lweventlist);
static DEFINE_SPINLOCK(lweventlist_lock);
static unsigned char default_operstate(const struct net_device *dev)
{
if (!netif_carrier_ok(dev))
return (dev->ifindex != dev_get_iflink(dev) ?
IF_OPER_LOWERLAYERDOWN : IF_OPER_DOWN);
if (netif_dormant(dev))
return IF_OPER_DORMANT;
return IF_OPER_UP;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Stefan Rompf | 47 | 94.00% | 1 | 50.00% |
Nicolas Dichtel | 3 | 6.00% | 1 | 50.00% |
Total | 50 | 100.00% | 2 | 100.00% |
static void rfc2863_policy(struct net_device *dev)
{
unsigned char operstate = default_operstate(dev);
if (operstate == dev->operstate)
return;
write_lock_bh(&dev_base_lock);
switch(dev->link_mode) {
case IF_LINK_MODE_DORMANT:
if (operstate == IF_OPER_UP)
operstate = IF_OPER_DORMANT;
break;
case IF_LINK_MODE_DEFAULT:
default:
break;
}
dev->operstate = operstate;
write_unlock_bh(&dev_base_lock);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Stefan Rompf | 74 | 100.00% | 1 | 100.00% |
Total | 74 | 100.00% | 1 | 100.00% |
void linkwatch_init_dev(struct net_device *dev)
{
/* Handle pre-registration link state changes */
if (!netif_carrier_ok(dev) || netif_dormant(dev))
rfc2863_policy(dev);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ben Hutchings | 29 | 100.00% | 1 | 100.00% |
Total | 29 | 100.00% | 1 | 100.00% |
static bool linkwatch_urgent_event(struct net_device *dev)
{
if (!netif_running(dev))
return false;
if (dev->ifindex != dev_get_iflink(dev))
return true;
if (dev->priv_flags & IFF_TEAM_PORT)
return true;
return netif_carrier_ok(dev) && qdisc_tx_changing(dev);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Herbert Xu | 20 | 34.48% | 1 | 20.00% |
Eric Dumazet | 19 | 32.76% | 1 | 20.00% |
Flavio Leitner | 11 | 18.97% | 1 | 20.00% |
David S. Miller | 5 | 8.62% | 1 | 20.00% |
Nicolas Dichtel | 3 | 5.17% | 1 | 20.00% |
Total | 58 | 100.00% | 5 | 100.00% |
static void linkwatch_add_event(struct net_device *dev)
{
unsigned long flags;
spin_lock_irqsave(&lweventlist_lock, flags);
if (list_empty(&dev->link_watch_list)) {
list_add_tail(&dev->link_watch_list, &lweventlist);
dev_hold(dev);
}
spin_unlock_irqrestore(&lweventlist_lock, flags);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Herbert Xu | 36 | 61.02% | 1 | 33.33% |
Eric Dumazet | 22 | 37.29% | 1 | 33.33% |
Stefan Rompf | 1 | 1.69% | 1 | 33.33% |
Total | 59 | 100.00% | 3 | 100.00% |
static void linkwatch_schedule_work(int urgent)
{
unsigned long delay = linkwatch_nextevent - jiffies;
if (test_bit(LW_URGENT, &linkwatch_flags))
return;
/* Minimise down-time: drop delay for up event. */
if (urgent) {
if (test_and_set_bit(LW_URGENT, &linkwatch_flags))
return;
delay = 0;
}
/* If we wrap around we'll delay it by at most HZ. */
if (delay > HZ)
delay = 0;
/*
* If urgent, schedule immediate execution; otherwise, don't
* override the existing timer.
*/
if (test_bit(LW_URGENT, &linkwatch_flags))
mod_delayed_work(system_wq, &linkwatch_work, 0);
else
schedule_delayed_work(&linkwatch_work, delay);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Herbert Xu | 81 | 89.01% | 2 | 50.00% |
Tejun Heo | 9 | 9.89% | 1 | 25.00% |
Stefan Rompf | 1 | 1.10% | 1 | 25.00% |
Total | 91 | 100.00% | 4 | 100.00% |
static void linkwatch_do_dev(struct net_device *dev)
{
/*
* Make sure the above read is complete since it can be
* rewritten as soon as we clear the bit below.
*/
smp_mb__before_atomic();
/* We are about to handle this device,
* so new events can be accepted
*/
clear_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state);
rfc2863_policy(dev);
if (dev->flags & IFF_UP) {
if (netif_carrier_ok(dev))
dev_activate(dev);
else
dev_deactivate(dev);
netdev_state_change(dev);
}
dev_put(dev);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Dumazet | 68 | 98.55% | 1 | 50.00% |
Peter Zijlstra | 1 | 1.45% | 1 | 50.00% |
Total | 69 | 100.00% | 2 | 100.00% |
static void __linkwatch_run_queue(int urgent_only)
{
struct net_device *dev;
LIST_HEAD(wrk);
/*
* Limit the number of linkwatch events to one
* per second so that a runaway driver does not
* cause a storm of messages on the netlink
* socket. This limit does not apply to up events
* while the device qdisc is down.
*/
if (!urgent_only)
linkwatch_nextevent = jiffies + HZ;
/* Limit wrap-around effect on delay. */
else if (time_after(linkwatch_nextevent, jiffies + HZ))
linkwatch_nextevent = jiffies;
clear_bit(LW_URGENT, &linkwatch_flags);
spin_lock_irq(&lweventlist_lock);
list_splice_init(&lweventlist, &wrk);
while (!list_empty(&wrk)) {
dev = list_first_entry(&wrk, struct net_device, link_watch_list);
list_del_init(&dev->link_watch_list);
if (urgent_only && !linkwatch_urgent_event(dev)) {
list_add_tail(&dev->link_watch_list, &lweventlist);
continue;
}
spin_unlock_irq(&lweventlist_lock);
linkwatch_do_dev(dev);
spin_lock_irq(&lweventlist_lock);
}
if (!list_empty(&lweventlist))
linkwatch_schedule_work(0);
spin_unlock_irq(&lweventlist_lock);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Herbert Xu | 66 | 40.24% | 3 | 42.86% |
Eric Dumazet | 61 | 37.20% | 1 | 14.29% |
Stefan Rompf | 28 | 17.07% | 2 | 28.57% |
Tommy S. Christensen | 9 | 5.49% | 1 | 14.29% |
Total | 164 | 100.00% | 7 | 100.00% |
void linkwatch_forget_dev(struct net_device *dev)
{
unsigned long flags;
int clean = 0;
spin_lock_irqsave(&lweventlist_lock, flags);
if (!list_empty(&dev->link_watch_list)) {
list_del_init(&dev->link_watch_list);
clean = 1;
}
spin_unlock_irqrestore(&lweventlist_lock, flags);
if (clean)
linkwatch_do_dev(dev);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Dumazet | 51 | 73.91% | 1 | 25.00% |
Stefan Rompf | 8 | 11.59% | 1 | 25.00% |
Herbert Xu | 7 | 10.14% | 1 | 25.00% |
Tommy S. Christensen | 3 | 4.35% | 1 | 25.00% |
Total | 69 | 100.00% | 4 | 100.00% |
/* Must be called with the rtnl semaphore held */
void linkwatch_run_queue(void)
{
__linkwatch_run_queue(0);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Herbert Xu | 11 | 91.67% | 1 | 50.00% |
Stefan Rompf | 1 | 8.33% | 1 | 50.00% |
Total | 12 | 100.00% | 2 | 100.00% |
static void linkwatch_event(struct work_struct *dummy)
{
rtnl_lock();
__linkwatch_run_queue(time_after(linkwatch_nextevent, jiffies));
rtnl_unlock();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Stefan Rompf | 16 | 59.26% | 1 | 25.00% |
Herbert Xu | 8 | 29.63% | 1 | 25.00% |
David Howells | 2 | 7.41% | 1 | 25.00% |
Stephen Hemminger | 1 | 3.70% | 1 | 25.00% |
Total | 27 | 100.00% | 4 | 100.00% |
void linkwatch_fire_event(struct net_device *dev)
{
bool urgent = linkwatch_urgent_event(dev);
if (!test_and_set_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state)) {
linkwatch_add_event(dev);
} else if (!urgent)
return;
linkwatch_schedule_work(urgent);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Stefan Rompf | 33 | 66.00% | 1 | 20.00% |
Herbert Xu | 16 | 32.00% | 3 | 60.00% |
David S. Miller | 1 | 2.00% | 1 | 20.00% |
Total | 50 | 100.00% | 5 | 100.00% |
EXPORT_SYMBOL(linkwatch_fire_event);
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Stefan Rompf | 266 | 31.67% | 2 | 9.09% |
Herbert Xu | 248 | 29.52% | 3 | 13.64% |
Eric Dumazet | 225 | 26.79% | 2 | 9.09% |
Ben Hutchings | 29 | 3.45% | 1 | 4.55% |
Tommy S. Christensen | 15 | 1.79% | 1 | 4.55% |
Flavio Leitner | 11 | 1.31% | 1 | 4.55% |
Arnaldo Carvalho de Melo | 9 | 1.07% | 1 | 4.55% |
Tejun Heo | 9 | 1.07% | 1 | 4.55% |
Nicolas Dichtel | 6 | 0.71% | 1 | 4.55% |
David S. Miller | 6 | 0.71% | 1 | 4.55% |
David Howells | 5 | 0.60% | 2 | 9.09% |
Thomas Gleixner | 4 | 0.48% | 1 | 4.55% |
Krishna Kumar | 3 | 0.36% | 1 | 4.55% |
Peter Zijlstra | 1 | 0.12% | 1 | 4.55% |
Stephen Hemminger | 1 | 0.12% | 1 | 4.55% |
Fabian Frederick | 1 | 0.12% | 1 | 4.55% |
Adrian Bunk | 1 | 0.12% | 1 | 4.55% |
Total | 840 | 100.00% | 22 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.