Release 4.11 net/core/netpoll.c
/*
* Common framework for low-level network console, dump, and debugger code
*
* Sep 8 2003 Matt Mackall <mpm@selenic.com>
*
* based on the netconsole code from:
*
* Copyright (C) 2001 Ingo Molnar <mingo@redhat.com>
* Copyright (C) 2002 Red Hat, Inc.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/string.h>
#include <linux/if_arp.h>
#include <linux/inetdevice.h>
#include <linux/inet.h>
#include <linux/interrupt.h>
#include <linux/netpoll.h>
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/rcupdate.h>
#include <linux/workqueue.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/if_vlan.h>
#include <net/tcp.h>
#include <net/udp.h>
#include <net/addrconf.h>
#include <net/ndisc.h>
#include <net/ip6_checksum.h>
#include <asm/unaligned.h>
#include <trace/events/napi.h>
/*
* We maintain a small pool of fully-sized skbs, to make sure the
* message gets out even in extreme OOM situations.
*/
#define MAX_UDP_CHUNK 1460
#define MAX_SKBS 32
static struct sk_buff_head skb_pool;
DEFINE_STATIC_SRCU(netpoll_srcu);
#define USEC_PER_POLL 50
#define MAX_SKB_SIZE \
(sizeof(struct ethhdr) + \
sizeof(struct iphdr) + \
sizeof(struct udphdr) + \
MAX_UDP_CHUNK)
static void zap_completion_queue(void);
static void netpoll_async_cleanup(struct work_struct *work);
static unsigned int carrier_timeout = 4;
module_param(carrier_timeout, uint, 0644);
#define np_info(np, fmt, ...) \
pr_info("%s: " fmt, np->name, ##__VA_ARGS__)
#define np_err(np, fmt, ...) \
pr_err("%s: " fmt, np->name, ##__VA_ARGS__)
#define np_notice(np, fmt, ...) \
pr_notice("%s: " fmt, np->name, ##__VA_ARGS__)
static int netpoll_start_xmit(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq)
{
int status = NETDEV_TX_OK;
netdev_features_t features;
features = netif_skb_features(skb);
if (skb_vlan_tag_present(skb) &&
!vlan_hw_offload_capable(features, skb->vlan_proto)) {
skb = __vlan_hwaccel_push_inside(skb);
if (unlikely(!skb)) {
/* This is actually a packet drop, but we
* don't want the code that calls this
* function to try and operate on a NULL skb.
*/
goto out;
}
}
status = netdev_start_xmit(skb, dev, txq, false);
out:
return status;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric W. Biedermann | 88 | 93.62% | 1 | 16.67% |
David S. Miller | 4 | 4.26% | 3 | 50.00% |
Jiri Pirko | 2 | 2.13% | 2 | 33.33% |
Total | 94 | 100.00% | 6 | 100.00% |
static void queue_process(struct work_struct *work)
{
struct netpoll_info *npinfo =
container_of(work, struct netpoll_info, tx_work.work);
struct sk_buff *skb;
unsigned long flags;
while ((skb = skb_dequeue(&npinfo->txq))) {
struct net_device *dev = skb->dev;
struct netdev_queue *txq;
unsigned int q_index;
if (!netif_device_present(dev) || !netif_running(dev)) {
kfree_skb(skb);
continue;
}
local_irq_save(flags);
/* check if skb->queue_mapping is still valid */
q_index = skb_get_queue_mapping(skb);
if (unlikely(q_index >= dev->real_num_tx_queues)) {
q_index = q_index % dev->real_num_tx_queues;
skb_set_queue_mapping(skb, q_index);
}
txq = netdev_get_tx_queue(dev, q_index);
HARD_TX_LOCK(dev, txq, smp_processor_id());
if (netif_xmit_frozen_or_stopped(txq) ||
netpoll_start_xmit(skb, dev, txq) != NETDEV_TX_OK) {
skb_queue_head(&npinfo->txq, skb);
HARD_TX_UNLOCK(dev, txq);
local_irq_restore(flags);
schedule_delayed_work(&npinfo->tx_work, HZ/10);
return;
}
HARD_TX_UNLOCK(dev, txq);
local_irq_restore(flags);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Stephen Hemminger | 54 | 24.55% | 2 | 15.38% |
Matt Mackall | 52 | 23.64% | 1 | 7.69% |
Tushar Dave | 45 | 20.45% | 1 | 7.69% |
David S. Miller | 21 | 9.55% | 2 | 15.38% |
Ingo Molnar | 20 | 9.09% | 1 | 7.69% |
David Howells | 14 | 6.36% | 2 | 15.38% |
Eric W. Biedermann | 13 | 5.91% | 3 | 23.08% |
Tom Herbert | 1 | 0.45% | 1 | 7.69% |
Total | 220 | 100.00% | 13 | 100.00% |
/*
* Check whether delayed processing was scheduled for our NIC. If so,
* we attempt to grab the poll lock and use ->poll() to pump the card.
* If this fails, either we've recursed in ->poll() or it's already
* running on another CPU.
*
* Note: we don't mask interrupts with this lock because we're using
* trylock here and interrupts are already disabled in the softirq
* case. Further, we test the poll_owner to avoid recursion on UP
* systems where the lock doesn't exist.
*/
static void poll_one_napi(struct napi_struct *napi)
{
int work = 0;
/* net_rx_action's ->poll() invocations and our's are
* synchronized by this test which is only made while
* holding the napi->poll_lock.
*/
if (!test_bit(NAPI_STATE_SCHED, &napi->state))
return;
/* If we set this bit but see that it has already been set,
* that indicates that napi has been disabled and we need
* to abort this operation
*/
if (test_and_set_bit(NAPI_STATE_NPSVC, &napi->state))
return;
/* We explicilty pass the polling call a budget of 0 to
* indicate that we are clearing the Tx path only.
*/
work = napi->poll(napi, 0);
WARN_ONCE(work, "%pF exceeded budget in poll\n", napi->poll);
trace_napi_poll(napi, work, 0);
clear_bit(NAPI_STATE_NPSVC, &napi->state);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 38 | 43.68% | 1 | 14.29% |
Neil Horman | 29 | 33.33% | 3 | 42.86% |
Eric W. Biedermann | 11 | 12.64% | 1 | 14.29% |
Alexander Duyck | 5 | 5.75% | 1 | 14.29% |
Jesper Dangaard Brouer | 4 | 4.60% | 1 | 14.29% |
Total | 87 | 100.00% | 7 | 100.00% |
static void poll_napi(struct net_device *dev)
{
struct napi_struct *napi;
int cpu = smp_processor_id();
list_for_each_entry(napi, &dev->napi_list, dev_list) {
if (cmpxchg(&napi->poll_owner, -1, cpu) == -1) {
poll_one_napi(napi);
smp_store_release(&napi->poll_owner, -1);
}
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Dumazet | 21 | 30.88% | 1 | 10.00% |
Stephen Hemminger | 18 | 26.47% | 2 | 20.00% |
Matt Mackall | 14 | 20.59% | 2 | 20.00% |
Andrew Morton | 7 | 10.29% | 1 | 10.00% |
Jeff Moyer | 6 | 8.82% | 2 | 20.00% |
Neil Horman | 1 | 1.47% | 1 | 10.00% |
David S. Miller | 1 | 1.47% | 1 | 10.00% |
Total | 68 | 100.00% | 10 | 100.00% |
static void netpoll_poll_dev(struct net_device *dev)
{
const struct net_device_ops *ops;
struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo);
/* Don't do any rx activity if the dev_lock mutex is held
* the dev_open/close paths use this to block netpoll activity
* while changing device state
*/
if (down_trylock(&ni->dev_lock))
return;
if (!netif_running(dev)) {
up(&ni->dev_lock);
return;
}
ops = dev->netdev_ops;
if (!ops->ndo_poll_controller) {
up(&ni->dev_lock);
return;
}
/* Process pending work on NIC */
ops->ndo_poll_controller(dev);
poll_napi(dev);
up(&ni->dev_lock);
zap_completion_queue();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Neil Horman | 40 | 37.04% | 4 | 30.77% |
Andrew Morton | 24 | 22.22% | 1 | 7.69% |
Américo Wang | 15 | 13.89% | 2 | 15.38% |
Stephen Hemminger | 14 | 12.96% | 2 | 15.38% |
Pavel Emelyanov | 10 | 9.26% | 1 | 7.69% |
David S. Miller | 3 | 2.78% | 1 | 7.69% |
Matt Mackall | 1 | 0.93% | 1 | 7.69% |
Joe Perches | 1 | 0.93% | 1 | 7.69% |
Total | 108 | 100.00% | 13 | 100.00% |
void netpoll_poll_disable(struct net_device *dev)
{
struct netpoll_info *ni;
int idx;
might_sleep();
idx = srcu_read_lock(&netpoll_srcu);
ni = srcu_dereference(dev->npinfo, &netpoll_srcu);
if (ni)
down(&ni->dev_lock);
srcu_read_unlock(&netpoll_srcu, idx);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Neil Horman | 59 | 96.72% | 2 | 50.00% |
Ding Tianhong | 1 | 1.64% | 1 | 25.00% |
Eric W. Biedermann | 1 | 1.64% | 1 | 25.00% |
Total | 61 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL(netpoll_poll_disable);
void netpoll_poll_enable(struct net_device *dev)
{
struct netpoll_info *ni;
rcu_read_lock();
ni = rcu_dereference(dev->npinfo);
if (ni)
up(&ni->dev_lock);
rcu_read_unlock();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Neil Horman | 41 | 97.62% | 2 | 66.67% |
Eric W. Biedermann | 1 | 2.38% | 1 | 33.33% |
Total | 42 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL(netpoll_poll_enable);
static void refill_skbs(void)
{
struct sk_buff *skb;
unsigned long flags;
spin_lock_irqsave(&skb_pool.lock, flags);
while (skb_pool.qlen < MAX_SKBS) {
skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
if (!skb)
break;
__skb_queue_tail(&skb_pool, skb);
}
spin_unlock_irqrestore(&skb_pool.lock, flags);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Matt Mackall | 55 | 78.57% | 1 | 50.00% |
Stephen Hemminger | 15 | 21.43% | 1 | 50.00% |
Total | 70 | 100.00% | 2 | 100.00% |
static void zap_completion_queue(void)
{
unsigned long flags;
struct softnet_data *sd = &get_cpu_var(softnet_data);
if (sd->completion_queue) {
struct sk_buff *clist;
local_irq_save(flags);
clist = sd->completion_queue;
sd->completion_queue = NULL;
local_irq_restore(flags);
while (clist != NULL) {
struct sk_buff *skb = clist;
clist = clist->next;
if (!skb_irq_freeable(skb)) {
atomic_inc(&skb->users);
dev_kfree_skb_any(skb); /* put this one back */
} else {
__kfree_skb(skb);
}
}
}
put_cpu_var(softnet_data);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 112 | 96.55% | 1 | 50.00% |
Eric W. Biedermann | 4 | 3.45% | 1 | 50.00% |
Total | 116 | 100.00% | 2 | 100.00% |
static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
{
int count = 0;
struct sk_buff *skb;
zap_completion_queue();
refill_skbs();
repeat:
skb = alloc_skb(len, GFP_ATOMIC);
if (!skb)
skb = skb_dequeue(&skb_pool);
if (!skb) {
if (++count < 10) {
netpoll_poll_dev(np->dev);
goto repeat;
}
return NULL;
}
atomic_set(&skb->users, 1);
skb_reserve(skb, reserve);
return skb;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Matt Mackall | 91 | 84.26% | 1 | 25.00% |
Stephen Hemminger | 11 | 10.19% | 1 | 25.00% |
Joe Perches | 3 | 2.78% | 1 | 25.00% |
David S. Miller | 3 | 2.78% | 1 | 25.00% |
Total | 108 | 100.00% | 4 | 100.00% |
static int netpoll_owner_active(struct net_device *dev)
{
struct napi_struct *napi;
list_for_each_entry(napi, &dev->napi_list, dev_list) {
if (napi->poll_owner == smp_processor_id())
return 1;
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Stephen Hemminger | 41 | 100.00% | 1 | 100.00% |
Total | 41 | 100.00% | 1 | 100.00% |
/* call with IRQ disabled */
void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
struct net_device *dev)
{
int status = NETDEV_TX_BUSY;
unsigned long tries;
/* It is up to the caller to keep npinfo alive. */
struct netpoll_info *npinfo;
WARN_ON_ONCE(!irqs_disabled());
npinfo = rcu_dereference_bh(np->dev->npinfo);
if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
dev_kfree_skb_irq(skb);
return;
}
/* don't get messages out of order, and no recursion */
if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
struct netdev_queue *txq;
txq = netdev_pick_tx(dev, skb, NULL);
/* try until next clock tick */
for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
tries > 0; --tries) {
if (HARD_TX_TRYLOCK(dev, txq)) {
if (!netif_xmit_stopped(txq))
status = netpoll_start_xmit(skb, dev, txq);
HARD_TX_UNLOCK(dev, txq);
if (status == NETDEV_TX_OK)
break;
}
/* tickle device maybe there is some cleanup */
netpoll_poll_dev(np->dev);
udelay(USEC_PER_POLL);
}
WARN_ONCE(!irqs_disabled(),
"netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pF)\n",
dev->name, dev->netdev_ops->ndo_start_xmit);
}
if (status != NETDEV_TX_OK) {
skb_queue_tail(&npinfo->txq, skb);
schedule_delayed_work(&npinfo->tx_work,0);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Matt Mackall | 74 | 30.83% | 7 | 23.33% |
Stephen Hemminger | 71 | 29.58% | 4 | 13.33% |
David S. Miller | 18 | 7.50% | 2 | 6.67% |
Américo Wang | 16 | 6.67% | 2 | 6.67% |
Dongdong Deng | 15 | 6.25% | 1 | 3.33% |
Eric W. Biedermann | 12 | 5.00% | 3 | 10.00% |
Jeff Moyer | 10 | 4.17% | 1 | 3.33% |
Neil Horman | 6 | 2.50% | 1 | 3.33% |
Andrew Morton | 3 | 1.25% | 1 | 3.33% |
Eric Dumazet | 3 | 1.25% | 1 | 3.33% |
Joe Perches | 3 | 1.25% | 1 | 3.33% |
Jeremy Fitzhardinge | 2 | 0.83% | 1 | 3.33% |
David Howells | 2 | 0.83% | 1 | 3.33% |
Jason (Hui) Wang | 2 | 0.83% | 1 | 3.33% |
Herbert Xu | 1 | 0.42% | 1 | 3.33% |
Peter P. Waskiewicz Jr | 1 | 0.42% | 1 | 3.33% |
Tom Herbert | 1 | 0.42% | 1 | 3.33% |
Total | 240 | 100.00% | 30 | 100.00% |
EXPORT_SYMBOL(netpoll_send_skb_on_dev);
void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
{
int total_len, ip_len, udp_len;
struct sk_buff *skb;
struct udphdr *udph;
struct iphdr *iph;
struct ethhdr *eth;
static atomic_t ip_ident;
struct ipv6hdr *ip6h;
WARN_ON_ONCE(!irqs_disabled());
udp_len = len + sizeof(*udph);
if (np->ipv6)
ip_len = udp_len + sizeof(*ip6h);
else
ip_len = udp_len + sizeof(*iph);
total_len = ip_len + LL_RESERVED_SPACE(np->dev);
skb = find_skb(np, total_len + np->dev->needed_tailroom,
total_len - len);
if (!skb)
return;
skb_copy_to_linear_data(skb, msg, len);
skb_put(skb, len);
skb_push(skb, sizeof(*udph));
skb_reset_transport_header(skb);
udph = udp_hdr(skb);
udph->source = htons(np->local_port);
udph->dest = htons(np->remote_port);
udph->len = htons(udp_len);
if (np->ipv6) {
udph->check = 0;
udph->check = csum_ipv6_magic(&np->local_ip.in6,
&np->remote_ip.in6,
udp_len, IPPROTO_UDP,
csum_partial(udph, udp_len, 0));
if (udph->check == 0)
udph->check = CSUM_MANGLED_0;
skb_push(skb, sizeof(*ip6h));
skb_reset_network_header(skb);
ip6h = ipv6_hdr(skb);
/* ip6h->version = 6; ip6h->priority = 0; */
put_unaligned(0x60, (unsigned char *)ip6h);
ip6h->flow_lbl[0] = 0;
ip6h->flow_lbl[1] = 0;
ip6h->flow_lbl[2] = 0;
ip6h->payload_len = htons(sizeof(struct udphdr) + len);
ip6h->nexthdr = IPPROTO_UDP;
ip6h->hop_limit = 32;
ip6h->saddr = np->local_ip.in6;
ip6h->daddr = np->remote_ip.in6;
eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
skb_reset_mac_header(skb);
skb->protocol = eth->h_proto = htons(ETH_P_IPV6);
} else {
udph->check = 0;
udph->check = csum_tcpudp_magic(np->local_ip.ip,
np->remote_ip.ip,
udp_len, IPPROTO_UDP,
csum_partial(udph, udp_len, 0));
if (udph->check == 0)
udph->check = CSUM_MANGLED_0;
skb_push(skb, sizeof(*iph));
skb_reset_network_header(skb);
iph = ip_hdr(skb);
/* iph->version = 4; iph->ihl = 5; */
put_unaligned(0x45, (unsigned char *)iph);
iph->tos = 0;
put_unaligned(htons(ip_len), &(iph->tot_len));
iph->id = htons(atomic_inc_return(&ip_ident));
iph->frag_off = 0;
iph->ttl = 64;
iph->protocol = IPPROTO_UDP;
iph->check = 0;
put_unaligned(np->local_ip.ip, &(iph->saddr));
put_unaligned(np->remote_ip.ip, &(iph->daddr));
iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
skb_reset_mac_header(skb);
skb->protocol = eth->h_proto = htons(ETH_P_IP);
}
ether_addr_copy(eth->h_source, np->dev->dev_addr);
ether_addr_copy(eth->h_dest, np->remote_mac);
skb->dev = np->dev;
netpoll_send_skb(np, skb);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Matt Mackall | 323 | 47.64% | 4 | 20.00% |
Américo Wang | 237 | 34.96% | 2 | 10.00% |
Chris Lalancette | 41 | 6.05% | 1 | 5.00% |
Eric Dumazet | 29 | 4.28% | 2 | 10.00% |
Arnaldo Carvalho de Melo | 28 | 4.13% | 5 | 25.00% |
Stephen Hemminger | 9 | 1.33% | 2 | 10.00% |
Nikolay Aleksandrov | 7 | 1.03% | 1 | 5.00% |
Joe Perches | 2 | 0.29% | 1 | 5.00% |
Sven Henkel | 1 | 0.15% | 1 | 5.00% |
Al Viro | 1 | 0.15% | 1 | 5.00% |
Total | 678 | 100.00% | 20 | 100.00% |
EXPORT_SYMBOL(netpoll_send_udp);
void netpoll_print_options(struct netpoll *np)
{
np_info(np, "local port %d\n", np->local_port);
if (np->ipv6)
np_info(np, "local IPv6 address %pI6c\n", &np->local_ip.in6);
else
np_info(np, "local IPv4 address %pI4\n", &np->local_ip.ip);
np_info(np, "interface '%s'\n", np->dev_name);
np_info(np, "remote port %d\n", np->remote_port);
if (np->ipv6)
np_info(np, "remote IPv6 address %pI6c\n", &np->remote_ip.in6);
else
np_info(np, "remote IPv4 address %pI4\n", &np->remote_ip.ip);
np_info(np, "remote ethernet address %pM\n", np->remote_mac);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Satyam Sharma | 50 | 40.32% | 1 | 11.11% |
Américo Wang | 48 | 38.71% | 2 | 22.22% |
Joe Perches | 17 | 13.71% | 2 | 22.22% |
Eric W. Biedermann | 3 | 2.42% | 1 | 11.11% |
Matt Mackall | 3 | 2.42% | 1 | 11.11% |
Harvey Harrison | 2 | 1.61% | 1 | 11.11% |
Jeff Moyer | 1 | 0.81% | 1 | 11.11% |
Total | 124 | 100.00% | 9 | 100.00% |
EXPORT_SYMBOL(netpoll_print_options);
static int netpoll_parse_ip_addr(const char *str, union inet_addr *addr)
{
const char *end;
if (!strchr(str, ':') &&
in4_pton(str, -1, (void *)addr, -1, &end) > 0) {
if (!*end)
return 0;
}
if (in6_pton(str, -1, addr->in6.s6_addr, -1, &end) > 0) {
#if IS_ENABLED(CONFIG_IPV6)
if (!*end)
return 1;
#else
return -1;
#endif
}
return -1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Américo Wang | 117 | 100.00% | 1 | 100.00% |
Total | 117 | 100.00% | 1 | 100.00% |
int netpoll_parse_options(struct netpoll *np, char *opt)
{
char *cur=opt, *delim;
int ipv6;
bool ipversion_set = false;
if (*cur != '@') {
if ((delim = strchr(cur, '@')) == NULL)
goto parse_failed;
*delim = 0;
if (kstrtou16(cur, 10, &np->local_port))
goto parse_failed;
cur = delim;
}
cur++;
if (*cur != '/') {
ipversion_set = true;
if ((delim = strchr(cur, '/')) == NULL)
goto parse_failed;
*delim = 0;
ipv6 = netpoll_parse_ip_addr(cur, &np->local_ip);
if (ipv6 < 0)
goto parse_failed;
else
np->ipv6 = (bool)ipv6;
cur = delim;
}
cur++;
if (*cur != ',') {
/* parse out dev name */
if ((delim = strchr(cur, ',')) == NULL)
goto parse_failed;
*delim = 0;
strlcpy(np->dev_name, cur, sizeof(np->dev_name));
cur = delim;
}
cur++;
if (*cur != '@') {
/* dst port */
if ((delim = strchr(cur, '@')) == NULL)
goto parse_failed;
*delim = 0;
if (*cur == ' ' || *cur == '\t')
np_info(np, "warning: whitespace is not allowed\n");
if (kstrtou16(cur, 10, &np->remote_port))
goto parse_failed;
cur = delim;
}
cur++;
/* dst ip */
if ((delim = strchr(cur, '/')) == NULL)
goto parse_failed;
*delim = 0;
ipv6 = netpoll_parse_ip_addr(cur, &np->remote_ip);
if (ipv6 < 0)
goto parse_failed;
else if (ipversion_set && np->ipv6 != (bool)ipv6)
goto parse_failed;
else
np->ipv6 = (bool)ipv6;
cur = delim + 1;
if (*cur != 0) {
/* MAC address */
if (!mac_pton(cur, np->remote_mac))
goto parse_failed;
}
netpoll_print_options(np);
return 0;
parse_failed:
np_info(np, "couldn't parse config at '%s'!\n", cur);
return -1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Matt Mackall | 284 | 67.78% | 1 | 12.50% |
Américo Wang | 86 | 20.53% | 2 | 25.00% |
Abhijit Pawar | 29 | 6.92% | 1 | 12.50% |
Sabrina Dubroca | 11 | 2.63% | 1 | 12.50% |
Joe Perches | 6 | 1.43% | 1 | 12.50% |
Alexey Dobriyan | 2 | 0.48% | 1 | 12.50% |
Satyam Sharma | 1 | 0.24% | 1 | 12.50% |
Total | 419 | 100.00% | 8 | 100.00% |
EXPORT_SYMBOL(netpoll_parse_options);
int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
{
struct netpoll_info *npinfo;
const struct net_device_ops *ops;
int err;
np->dev = ndev;
strlcpy(np->dev_name, ndev->name, IFNAMSIZ);
INIT_WORK(&np->cleanup_work, netpoll_async_cleanup);
if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
!ndev->netdev_ops->ndo_poll_controller) {
np_err(np, "%s doesn't support polling, aborting\n",
np->dev_name);
err = -ENOTSUPP;
goto out;
}
if (!ndev->npinfo) {
npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
if (!npinfo) {
err = -ENOMEM;
goto out;
}
sema_init(&npinfo->dev_lock, 1);
skb_queue_head_init(&npinfo->txq);
INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
atomic_set(&npinfo->refcnt, 1);
ops = np->dev->netdev_ops;
if (ops->ndo_netpoll_setup) {
err = ops->ndo_netpoll_setup(ndev, npinfo);
if (err)
goto free_npinfo;
}
} else {
npinfo = rtnl_dereference(ndev->npinfo);
atomic_inc(&npinfo->refcnt);
}
npinfo->netpoll = np;
/* last thing to do is link it to the net device structure */
rcu_assign_pointer(ndev->npinfo, npinfo);
return 0;
free_npinfo:
kfree(npinfo);