Release 4.7 include/linux/netpoll.h
/*
* Common code for low-level network console, dump, and debugger code
*
* Derived from netconsole, kgdb-over-ethernet, and netdump patches
*/
#ifndef _LINUX_NETPOLL_H
#define _LINUX_NETPOLL_H
#include <linux/netdevice.h>
#include <linux/interrupt.h>
#include <linux/rcupdate.h>
#include <linux/list.h>
union inet_addr {
__u32 all[4];
__be32 ip;
__be32 ip6[4];
struct in_addr in;
struct in6_addr in6;
};
struct netpoll {
struct net_device *dev;
char dev_name[IFNAMSIZ];
const char *name;
union inet_addr local_ip, remote_ip;
bool ipv6;
u16 local_port, remote_port;
u8 remote_mac[ETH_ALEN];
struct work_struct cleanup_work;
};
struct netpoll_info {
atomic_t refcnt;
struct semaphore dev_lock;
struct sk_buff_head txq;
struct delayed_work tx_work;
struct netpoll *netpoll;
struct rcu_head rcu;
};
#ifdef CONFIG_NETPOLL
extern void netpoll_poll_disable(struct net_device *dev);
extern void netpoll_poll_enable(struct net_device *dev);
#else
static inline void netpoll_poll_disable(struct net_device *dev) { return; }
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
neil horman | neil horman | 10 | 76.92% | 1 | 33.33% |
ding tianhong | ding tianhong | 2 | 15.38% | 1 | 33.33% |
eric w. biederman | eric w. biederman | 1 | 7.69% | 1 | 33.33% |
| Total | 13 | 100.00% | 3 | 100.00% |
static inline void netpoll_poll_enable(struct net_device *dev) { return; }
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
neil horman | neil horman | 12 | 92.31% | 1 | 50.00% |
eric w. biederman | eric w. biederman | 1 | 7.69% | 1 | 50.00% |
| Total | 13 | 100.00% | 2 | 100.00% |
#endif
void netpoll_send_udp(struct netpoll *np, const char *msg, int len);
void netpoll_print_options(struct netpoll *np);
int netpoll_parse_options(struct netpoll *np, char *opt);
int __netpoll_setup(struct netpoll *np, struct net_device *ndev);
int netpoll_setup(struct netpoll *np);
void __netpoll_cleanup(struct netpoll *np);
void __netpoll_free_async(struct netpoll *np);
void netpoll_cleanup(struct netpoll *np);
void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
struct net_device *dev);
static inline void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
{
unsigned long flags;
local_irq_save(flags);
netpoll_send_skb_on_dev(np, skb, np->dev);
local_irq_restore(flags);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
neil horman | neil horman | 28 | 66.67% | 1 | 50.00% |
americo wang | americo wang | 14 | 33.33% | 1 | 50.00% |
| Total | 42 | 100.00% | 2 | 100.00% |
#ifdef CONFIG_NETPOLL
static inline void *netpoll_poll_lock(struct napi_struct *napi)
{
struct net_device *dev = napi->dev;
if (dev && dev->npinfo) {
spin_lock(&napi->poll_lock);
napi->poll_owner = smp_processor_id();
return napi;
}
return NULL;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
matt mackall | matt mackall | 35 | 66.04% | 2 | 40.00% |
stephen hemminger | stephen hemminger | 16 | 30.19% | 1 | 20.00% |
david s. miller | david s. miller | 1 | 1.89% | 1 | 20.00% |
jeff moyer | jeff moyer | 1 | 1.89% | 1 | 20.00% |
| Total | 53 | 100.00% | 5 | 100.00% |
static inline void netpoll_poll_unlock(void *have)
{
struct napi_struct *napi = have;
if (napi) {
napi->poll_owner = -1;
spin_unlock(&napi->poll_lock);
}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
matt mackall | matt mackall | 25 | 64.10% | 2 | 50.00% |
jeff moyer | jeff moyer | 9 | 23.08% | 1 | 25.00% |
stephen hemminger | stephen hemminger | 5 | 12.82% | 1 | 25.00% |
| Total | 39 | 100.00% | 4 | 100.00% |
static inline bool netpoll_tx_running(struct net_device *dev)
{
return irqs_disabled();
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
herbert xu | herbert xu | 15 | 93.75% | 1 | 50.00% |
americo wang | americo wang | 1 | 6.25% | 1 | 50.00% |
| Total | 16 | 100.00% | 2 | 100.00% |
#else
static inline void *netpoll_poll_lock(struct napi_struct *napi)
{
return NULL;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
stephen hemminger | stephen hemminger | 13 | 81.25% | 1 | 50.00% |
matt mackall | matt mackall | 3 | 18.75% | 1 | 50.00% |
| Total | 16 | 100.00% | 2 | 100.00% |
static inline void netpoll_poll_unlock(void *have)
{
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
stephen hemminger | stephen hemminger | 7 | 70.00% | 1 | 50.00% |
matt mackall | matt mackall | 3 | 30.00% | 1 | 50.00% |
| Total | 10 | 100.00% | 2 | 100.00% |
static inline void netpoll_netdev_init(struct net_device *dev)
{
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
stephen hemminger | stephen hemminger | 11 | 100.00% | 1 | 100.00% |
| Total | 11 | 100.00% | 1 | 100.00% |
static inline bool netpoll_tx_running(struct net_device *dev)
{
return false;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
herbert xu | herbert xu | 13 | 86.67% | 1 | 50.00% |
americo wang | americo wang | 2 | 13.33% | 1 | 50.00% |
| Total | 15 | 100.00% | 2 | 100.00% |
#endif
#endif
Overall Contributors
| Person | Tokens | Prop | Commits | CommitProp |
matt mackall | matt mackall | 158 | 32.05% | 4 | 14.29% |
neil horman | neil horman | 85 | 17.24% | 4 | 14.29% |
americo wang | americo wang | 75 | 15.21% | 5 | 17.86% |
stephen hemminger | stephen hemminger | 66 | 13.39% | 4 | 14.29% |
herbert xu | herbert xu | 46 | 9.33% | 2 | 7.14% |
eric w. biederman | eric w. biederman | 29 | 5.88% | 2 | 7.14% |
jeff moyer | jeff moyer | 15 | 3.04% | 2 | 7.14% |
satyam sharma | satyam sharma | 9 | 1.83% | 1 | 3.57% |
jiri pirko | jiri pirko | 5 | 1.01% | 1 | 3.57% |
ding tianhong | ding tianhong | 3 | 0.61% | 1 | 3.57% |
david s. miller | david s. miller | 1 | 0.20% | 1 | 3.57% |
david howells | david howells | 1 | 0.20% | 1 | 3.57% |
| Total | 493 | 100.00% | 28 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.