cregit-Linux how code gets into the kernel

Release 4.11 net/core/link_watch.c

Directory: net/core
/*
 * Linux network device link state notification
 *
 * Author:
 *     Stefan Rompf <sux@loplof.de>
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version
 * 2 of the License, or (at your option) any later version.
 *
 */

#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/if.h>
#include <net/sock.h>
#include <net/pkt_sched.h>
#include <linux/rtnetlink.h>
#include <linux/jiffies.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/bitops.h>
#include <linux/types.h>



enum lw_bits {
	
LW_URGENT = 0,
};


static unsigned long linkwatch_flags;

static unsigned long linkwatch_nextevent;

static void linkwatch_event(struct work_struct *dummy);
static DECLARE_DELAYED_WORK(linkwatch_work, linkwatch_event);

static LIST_HEAD(lweventlist);
static DEFINE_SPINLOCK(lweventlist_lock);


static unsigned char default_operstate(const struct net_device *dev) { if (!netif_carrier_ok(dev)) return (dev->ifindex != dev_get_iflink(dev) ? IF_OPER_LOWERLAYERDOWN : IF_OPER_DOWN); if (netif_dormant(dev)) return IF_OPER_DORMANT; return IF_OPER_UP; }

Contributors

PersonTokensPropCommitsCommitProp
Stefan Rompf4794.00%150.00%
Nicolas Dichtel36.00%150.00%
Total50100.00%2100.00%


static void rfc2863_policy(struct net_device *dev) { unsigned char operstate = default_operstate(dev); if (operstate == dev->operstate) return; write_lock_bh(&dev_base_lock); switch(dev->link_mode) { case IF_LINK_MODE_DORMANT: if (operstate == IF_OPER_UP) operstate = IF_OPER_DORMANT; break; case IF_LINK_MODE_DEFAULT: default: break; } dev->operstate = operstate; write_unlock_bh(&dev_base_lock); }

Contributors

PersonTokensPropCommitsCommitProp
Stefan Rompf74100.00%1100.00%
Total74100.00%1100.00%


void linkwatch_init_dev(struct net_device *dev) { /* Handle pre-registration link state changes */ if (!netif_carrier_ok(dev) || netif_dormant(dev)) rfc2863_policy(dev); }

Contributors

PersonTokensPropCommitsCommitProp
Ben Hutchings29100.00%1100.00%
Total29100.00%1100.00%


static bool linkwatch_urgent_event(struct net_device *dev) { if (!netif_running(dev)) return false; if (dev->ifindex != dev_get_iflink(dev)) return true; if (dev->priv_flags & IFF_TEAM_PORT) return true; return netif_carrier_ok(dev) && qdisc_tx_changing(dev); }

Contributors

PersonTokensPropCommitsCommitProp
Herbert Xu2034.48%120.00%
Eric Dumazet1932.76%120.00%
Flavio Leitner1118.97%120.00%
David S. Miller58.62%120.00%
Nicolas Dichtel35.17%120.00%
Total58100.00%5100.00%


static void linkwatch_add_event(struct net_device *dev) { unsigned long flags; spin_lock_irqsave(&lweventlist_lock, flags); if (list_empty(&dev->link_watch_list)) { list_add_tail(&dev->link_watch_list, &lweventlist); dev_hold(dev); } spin_unlock_irqrestore(&lweventlist_lock, flags); }

Contributors

PersonTokensPropCommitsCommitProp
Herbert Xu3661.02%133.33%
Eric Dumazet2237.29%133.33%
Stefan Rompf11.69%133.33%
Total59100.00%3100.00%


static void linkwatch_schedule_work(int urgent) { unsigned long delay = linkwatch_nextevent - jiffies; if (test_bit(LW_URGENT, &linkwatch_flags)) return; /* Minimise down-time: drop delay for up event. */ if (urgent) { if (test_and_set_bit(LW_URGENT, &linkwatch_flags)) return; delay = 0; } /* If we wrap around we'll delay it by at most HZ. */ if (delay > HZ) delay = 0; /* * If urgent, schedule immediate execution; otherwise, don't * override the existing timer. */ if (test_bit(LW_URGENT, &linkwatch_flags)) mod_delayed_work(system_wq, &linkwatch_work, 0); else schedule_delayed_work(&linkwatch_work, delay); }

Contributors

PersonTokensPropCommitsCommitProp
Herbert Xu8189.01%250.00%
Tejun Heo99.89%125.00%
Stefan Rompf11.10%125.00%
Total91100.00%4100.00%


static void linkwatch_do_dev(struct net_device *dev) { /* * Make sure the above read is complete since it can be * rewritten as soon as we clear the bit below. */ smp_mb__before_atomic(); /* We are about to handle this device, * so new events can be accepted */ clear_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state); rfc2863_policy(dev); if (dev->flags & IFF_UP) { if (netif_carrier_ok(dev)) dev_activate(dev); else dev_deactivate(dev); netdev_state_change(dev); } dev_put(dev); }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet6898.55%150.00%
Peter Zijlstra11.45%150.00%
Total69100.00%2100.00%


static void __linkwatch_run_queue(int urgent_only) { struct net_device *dev; LIST_HEAD(wrk); /* * Limit the number of linkwatch events to one * per second so that a runaway driver does not * cause a storm of messages on the netlink * socket. This limit does not apply to up events * while the device qdisc is down. */ if (!urgent_only) linkwatch_nextevent = jiffies + HZ; /* Limit wrap-around effect on delay. */ else if (time_after(linkwatch_nextevent, jiffies + HZ)) linkwatch_nextevent = jiffies; clear_bit(LW_URGENT, &linkwatch_flags); spin_lock_irq(&lweventlist_lock); list_splice_init(&lweventlist, &wrk); while (!list_empty(&wrk)) { dev = list_first_entry(&wrk, struct net_device, link_watch_list); list_del_init(&dev->link_watch_list); if (urgent_only && !linkwatch_urgent_event(dev)) { list_add_tail(&dev->link_watch_list, &lweventlist); continue; } spin_unlock_irq(&lweventlist_lock); linkwatch_do_dev(dev); spin_lock_irq(&lweventlist_lock); } if (!list_empty(&lweventlist)) linkwatch_schedule_work(0); spin_unlock_irq(&lweventlist_lock); }

Contributors

PersonTokensPropCommitsCommitProp
Herbert Xu6640.24%342.86%
Eric Dumazet6137.20%114.29%
Stefan Rompf2817.07%228.57%
Tommy S. Christensen95.49%114.29%
Total164100.00%7100.00%


void linkwatch_forget_dev(struct net_device *dev) { unsigned long flags; int clean = 0; spin_lock_irqsave(&lweventlist_lock, flags); if (!list_empty(&dev->link_watch_list)) { list_del_init(&dev->link_watch_list); clean = 1; } spin_unlock_irqrestore(&lweventlist_lock, flags); if (clean) linkwatch_do_dev(dev); }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet5173.91%125.00%
Stefan Rompf811.59%125.00%
Herbert Xu710.14%125.00%
Tommy S. Christensen34.35%125.00%
Total69100.00%4100.00%

/* Must be called with the rtnl semaphore held */
void linkwatch_run_queue(void) { __linkwatch_run_queue(0); }

Contributors

PersonTokensPropCommitsCommitProp
Herbert Xu1191.67%150.00%
Stefan Rompf18.33%150.00%
Total12100.00%2100.00%


static void linkwatch_event(struct work_struct *dummy) { rtnl_lock(); __linkwatch_run_queue(time_after(linkwatch_nextevent, jiffies)); rtnl_unlock(); }

Contributors

PersonTokensPropCommitsCommitProp
Stefan Rompf1659.26%125.00%
Herbert Xu829.63%125.00%
David Howells27.41%125.00%
Stephen Hemminger13.70%125.00%
Total27100.00%4100.00%


void linkwatch_fire_event(struct net_device *dev) { bool urgent = linkwatch_urgent_event(dev); if (!test_and_set_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state)) { linkwatch_add_event(dev); } else if (!urgent) return; linkwatch_schedule_work(urgent); }

Contributors

PersonTokensPropCommitsCommitProp
Stefan Rompf3366.00%120.00%
Herbert Xu1632.00%360.00%
David S. Miller12.00%120.00%
Total50100.00%5100.00%

EXPORT_SYMBOL(linkwatch_fire_event);

Overall Contributors

PersonTokensPropCommitsCommitProp
Stefan Rompf26631.67%29.09%
Herbert Xu24829.52%313.64%
Eric Dumazet22526.79%29.09%
Ben Hutchings293.45%14.55%
Tommy S. Christensen151.79%14.55%
Flavio Leitner111.31%14.55%
Arnaldo Carvalho de Melo91.07%14.55%
Tejun Heo91.07%14.55%
Nicolas Dichtel60.71%14.55%
David S. Miller60.71%14.55%
David Howells50.60%29.09%
Thomas Gleixner40.48%14.55%
Krishna Kumar30.36%14.55%
Peter Zijlstra10.12%14.55%
Stephen Hemminger10.12%14.55%
Fabian Frederick10.12%14.55%
Adrian Bunk10.12%14.55%
Total840100.00%22100.00%
Directory: net/core
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.