Author | Tokens | Token Proportion | Commits | Commit Proportion |
---|---|---|---|---|
Anton Ivanov | 7326 | 93.74% | 12 | 20.34% |
Jeff Dike | 308 | 3.94% | 8 | 13.56% |
Johannes Berg | 36 | 0.46% | 5 | 8.47% |
Paolo 'Blaisorblade' Giarrusso | 15 | 0.19% | 2 | 3.39% |
Sjoerd Simons | 10 | 0.13% | 1 | 1.69% |
Mike Rapoport | 10 | 0.13% | 4 | 6.78% |
Kees Cook | 10 | 0.13% | 1 | 1.69% |
Tiwei Bie | 8 | 0.10% | 1 | 1.69% |
Peter Zijlstra | 8 | 0.10% | 1 | 1.69% |
Martin Schwidefsky | 7 | 0.09% | 1 | 1.69% |
Gerd Knorr | 7 | 0.09% | 1 | 1.69% |
Allen Pais | 6 | 0.08% | 1 | 1.69% |
Thomas Gleixner | 6 | 0.08% | 2 | 3.39% |
Jakub Kiciński | 6 | 0.08% | 2 | 3.39% |
Wang Chen | 6 | 0.08% | 1 | 1.69% |
Hao Chen | 5 | 0.06% | 1 | 1.69% |
Paul Chavent | 5 | 0.06% | 1 | 1.69% |
Christoph Hellwig | 5 | 0.06% | 1 | 1.69% |
Xiang Yang | 5 | 0.06% | 1 | 1.69% |
Christophe Jaillet | 5 | 0.06% | 2 | 3.39% |
Al Viro | 4 | 0.05% | 1 | 1.69% |
Michael S. Tsirkin | 4 | 0.05% | 1 | 1.69% |
Stephen Hemminger | 3 | 0.04% | 1 | 1.69% |
Linus Torvalds (pre-git) | 2 | 0.03% | 1 | 1.69% |
Tiezhu Yang | 2 | 0.03% | 1 | 1.69% |
Russell King | 2 | 0.03% | 1 | 1.69% |
Linus Torvalds | 1 | 0.01% | 1 | 1.69% |
Patrick McHardy | 1 | 0.01% | 1 | 1.69% |
Wolfram Sang | 1 | 0.01% | 1 | 1.69% |
Alex Dewar | 1 | 0.01% | 1 | 1.69% |
Total | 7815 | 59 |
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2017 - 2019 Cambridge Greys Limited * Copyright (C) 2011 - 2014 Cisco Systems Inc * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) * Copyright (C) 2001 Lennert Buytenhek (buytenh@gnu.org) and * James Leu (jleu@mindspring.net). * Copyright (C) 2001 by various other people who didn't put their name here. */ #include <linux/memblock.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/inetdevice.h> #include <linux/init.h> #include <linux/list.h> #include <linux/netdevice.h> #include <linux/platform_device.h> #include <linux/rtnetlink.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/firmware.h> #include <linux/fs.h> #include <asm/atomic.h> #include <uapi/linux/filter.h> #include <init.h> #include <irq_kern.h> #include <irq_user.h> #include <net_kern.h> #include <os.h> #include "mconsole_kern.h" #include "vector_user.h" #include "vector_kern.h" /* * Adapted from network devices with the following major changes: * All transports are static - simplifies the code significantly * Multiple FDs/IRQs per device * Vector IO optionally used for read/write, falling back to legacy * based on configuration and/or availability * Configuration is no longer positional - L2TPv3 and GRE require up to * 10 parameters, passing this as positional is not fit for purpose. * Only socket transports are supported */ #define DRIVER_NAME "uml-vector" struct vector_cmd_line_arg { struct list_head list; int unit; char *arguments; }; struct vector_device { struct list_head list; struct net_device *dev; struct platform_device pdev; int unit; int opened; }; static LIST_HEAD(vec_cmd_line); static DEFINE_SPINLOCK(vector_devices_lock); static LIST_HEAD(vector_devices); static int driver_registered; static void vector_eth_configure(int n, struct arglist *def); static int vector_mmsg_rx(struct vector_private *vp, int budget); /* Argument accessors to set variables (and/or set default values) * mtu, buffer sizing, default headroom, etc */ #define DEFAULT_HEADROOM 2 #define SAFETY_MARGIN 32 #define DEFAULT_VECTOR_SIZE 64 #define TX_SMALL_PACKET 128 #define MAX_IOV_SIZE (MAX_SKB_FRAGS + 1) static const struct { const char string[ETH_GSTRING_LEN]; } ethtool_stats_keys[] = { { "rx_queue_max" }, { "rx_queue_running_average" }, { "tx_queue_max" }, { "tx_queue_running_average" }, { "rx_encaps_errors" }, { "tx_timeout_count" }, { "tx_restart_queue" }, { "tx_kicks" }, { "tx_flow_control_xon" }, { "tx_flow_control_xoff" }, { "rx_csum_offload_good" }, { "rx_csum_offload_errors"}, { "sg_ok"}, { "sg_linearized"}, }; #define VECTOR_NUM_STATS ARRAY_SIZE(ethtool_stats_keys) static void vector_reset_stats(struct vector_private *vp) { /* We reuse the existing queue locks for stats */ /* RX stats are modified with RX head_lock held * in vector_poll. */ spin_lock(&vp->rx_queue->head_lock); vp->estats.rx_queue_max = 0; vp->estats.rx_queue_running_average = 0; vp->estats.rx_encaps_errors = 0; vp->estats.sg_ok = 0; vp->estats.sg_linearized = 0; spin_unlock(&vp->rx_queue->head_lock); /* TX stats are modified with TX head_lock held * in vector_send. */ spin_lock(&vp->tx_queue->head_lock); vp->estats.tx_timeout_count = 0; vp->estats.tx_restart_queue = 0; vp->estats.tx_kicks = 0; vp->estats.tx_flow_control_xon = 0; vp->estats.tx_flow_control_xoff = 0; vp->estats.tx_queue_max = 0; vp->estats.tx_queue_running_average = 0; spin_unlock(&vp->tx_queue->head_lock); } static int get_mtu(struct arglist *def) { char *mtu = uml_vector_fetch_arg(def, "mtu"); long result; if (mtu != NULL) { if (kstrtoul(mtu, 10, &result) == 0) if ((result < (1 << 16) - 1) && (result >= 576)) return result; } return ETH_MAX_PACKET; } static char *get_bpf_file(struct arglist *def) { return uml_vector_fetch_arg(def, "bpffile"); } static bool get_bpf_flash(struct arglist *def) { char *allow = uml_vector_fetch_arg(def, "bpfflash"); long result; if (allow != NULL) { if (kstrtoul(allow, 10, &result) == 0) return result > 0; } return false; } static int get_depth(struct arglist *def) { char *mtu = uml_vector_fetch_arg(def, "depth"); long result; if (mtu != NULL) { if (kstrtoul(mtu, 10, &result) == 0) return result; } return DEFAULT_VECTOR_SIZE; } static int get_headroom(struct arglist *def) { char *mtu = uml_vector_fetch_arg(def, "headroom"); long result; if (mtu != NULL) { if (kstrtoul(mtu, 10, &result) == 0) return result; } return DEFAULT_HEADROOM; } static int get_req_size(struct arglist *def) { char *gro = uml_vector_fetch_arg(def, "gro"); long result; if (gro != NULL) { if (kstrtoul(gro, 10, &result) == 0) { if (result > 0) return 65536; } } return get_mtu(def) + ETH_HEADER_OTHER + get_headroom(def) + SAFETY_MARGIN; } static int get_transport_options(struct arglist *def) { char *transport = uml_vector_fetch_arg(def, "transport"); char *vector = uml_vector_fetch_arg(def, "vec"); int vec_rx = VECTOR_RX; int vec_tx = VECTOR_TX; long parsed; int result = 0; if (transport == NULL) return -EINVAL; if (vector != NULL) { if (kstrtoul(vector, 10, &parsed) == 0) { if (parsed == 0) { vec_rx = 0; vec_tx = 0; } } } if (get_bpf_flash(def)) result = VECTOR_BPF_FLASH; if (strncmp(transport, TRANS_TAP, TRANS_TAP_LEN) == 0) return result; if (strncmp(transport, TRANS_HYBRID, TRANS_HYBRID_LEN) == 0) return (result | vec_rx | VECTOR_BPF); if (strncmp(transport, TRANS_RAW, TRANS_RAW_LEN) == 0) return (result | vec_rx | vec_tx | VECTOR_QDISC_BYPASS); return (result | vec_rx | vec_tx); } /* A mini-buffer for packet drop read * All of our supported transports are datagram oriented and we always * read using recvmsg or recvmmsg. If we pass a buffer which is smaller * than the packet size it still counts as full packet read and will * clean the incoming stream to keep sigio/epoll happy */ #define DROP_BUFFER_SIZE 32 static char *drop_buffer; /* * Advance the mmsg queue head by n = advance. Resets the queue to * maximum enqueue/dequeue-at-once capacity if possible. Called by * dequeuers. Caller must hold the head_lock! */ static int vector_advancehead(struct vector_queue *qi, int advance) { qi->head = (qi->head + advance) % qi->max_depth; atomic_sub(advance, &qi->queue_depth); return atomic_read(&qi->queue_depth); } /* Advance the queue tail by n = advance. * This is called by enqueuers which should hold the * head lock already */ static int vector_advancetail(struct vector_queue *qi, int advance) { qi->tail = (qi->tail + advance) % qi->max_depth; atomic_add(advance, &qi->queue_depth); return atomic_read(&qi->queue_depth); } static int prep_msg(struct vector_private *vp, struct sk_buff *skb, struct iovec *iov) { int iov_index = 0; int nr_frags, frag; skb_frag_t *skb_frag; nr_frags = skb_shinfo(skb)->nr_frags; if (nr_frags > MAX_IOV_SIZE) { if (skb_linearize(skb) != 0) goto drop; } if (vp->header_size > 0) { iov[iov_index].iov_len = vp->header_size; vp->form_header(iov[iov_index].iov_base, skb, vp); iov_index++; } iov[iov_index].iov_base = skb->data; if (nr_frags > 0) { iov[iov_index].iov_len = skb->len - skb->data_len; vp->estats.sg_ok++; } else iov[iov_index].iov_len = skb->len; iov_index++; for (frag = 0; frag < nr_frags; frag++) { skb_frag = &skb_shinfo(skb)->frags[frag]; iov[iov_index].iov_base = skb_frag_address_safe(skb_frag); iov[iov_index].iov_len = skb_frag_size(skb_frag); iov_index++; } return iov_index; drop: return -1; } /* * Generic vector enqueue with support for forming headers using transport * specific callback. Allows GRE, L2TPv3, RAW and other transports * to use a common enqueue procedure in vector mode */ static int vector_enqueue(struct vector_queue *qi, struct sk_buff *skb) { struct vector_private *vp = netdev_priv(qi->dev); int queue_depth; int packet_len; struct mmsghdr *mmsg_vector = qi->mmsg_vector; int iov_count; spin_lock(&qi->tail_lock); queue_depth = atomic_read(&qi->queue_depth); if (skb) packet_len = skb->len; if (queue_depth < qi->max_depth) { *(qi->skbuff_vector + qi->tail) = skb; mmsg_vector += qi->tail; iov_count = prep_msg( vp, skb, mmsg_vector->msg_hdr.msg_iov ); if (iov_count < 1) goto drop; mmsg_vector->msg_hdr.msg_iovlen = iov_count; mmsg_vector->msg_hdr.msg_name = vp->fds->remote_addr; mmsg_vector->msg_hdr.msg_namelen = vp->fds->remote_addr_size; wmb(); /* Make the packet visible to the NAPI poll thread */ queue_depth = vector_advancetail(qi, 1); } else goto drop; spin_unlock(&qi->tail_lock); return queue_depth; drop: qi->dev->stats.tx_dropped++; if (skb != NULL) { packet_len = skb->len; dev_consume_skb_any(skb); netdev_completed_queue(qi->dev, 1, packet_len); } spin_unlock(&qi->tail_lock); return queue_depth; } static int consume_vector_skbs(struct vector_queue *qi, int count) { struct sk_buff *skb; int skb_index; int bytes_compl = 0; for (skb_index = qi->head; skb_index < qi->head + count; skb_index++) { skb = *(qi->skbuff_vector + skb_index); /* mark as empty to ensure correct destruction if * needed */ bytes_compl += skb->len; *(qi->skbuff_vector + skb_index) = NULL; dev_consume_skb_any(skb); } qi->dev->stats.tx_bytes += bytes_compl; qi->dev->stats.tx_packets += count; netdev_completed_queue(qi->dev, count, bytes_compl); return vector_advancehead(qi, count); } /* * Generic vector dequeue via sendmmsg with support for forming headers * using transport specific callback. Allows GRE, L2TPv3, RAW and * other transports to use a common dequeue procedure in vector mode */ static int vector_send(struct vector_queue *qi) { struct vector_private *vp = netdev_priv(qi->dev); struct mmsghdr *send_from; int result = 0, send_len; if (spin_trylock(&qi->head_lock)) { /* update queue_depth to current value */ while (atomic_read(&qi->queue_depth) > 0) { /* Calculate the start of the vector */ send_len = atomic_read(&qi->queue_depth); send_from = qi->mmsg_vector; send_from += qi->head; /* Adjust vector size if wraparound */ if (send_len + qi->head > qi->max_depth) send_len = qi->max_depth - qi->head; /* Try to TX as many packets as possible */ if (send_len > 0) { result = uml_vector_sendmmsg( vp->fds->tx_fd, send_from, send_len, 0 ); vp->in_write_poll = (result != send_len); } /* For some of the sendmmsg error scenarios * we may end being unsure in the TX success * for all packets. It is safer to declare * them all TX-ed and blame the network. */ if (result < 0) { if (net_ratelimit()) netdev_err(vp->dev, "sendmmsg err=%i\n", result); vp->in_error = true; result = send_len; } if (result > 0) { consume_vector_skbs(qi, result); /* This is equivalent to an TX IRQ. * Restart the upper layers to feed us * more packets. */ if (result > vp->estats.tx_queue_max) vp->estats.tx_queue_max = result; vp->estats.tx_queue_running_average = (vp->estats.tx_queue_running_average + result) >> 1; } netif_wake_queue(qi->dev); /* if TX is busy, break out of the send loop, * poll write IRQ will reschedule xmit for us. */ if (result != send_len) { vp->estats.tx_restart_queue++; break; } } spin_unlock(&qi->head_lock); } return atomic_read(&qi->queue_depth); } /* Queue destructor. Deliberately stateless so we can use * it in queue cleanup if initialization fails. */ static void destroy_queue(struct vector_queue *qi) { int i; struct iovec *iov; struct vector_private *vp = netdev_priv(qi->dev); struct mmsghdr *mmsg_vector; if (qi == NULL) return; /* deallocate any skbuffs - we rely on any unused to be * set to NULL. */ if (qi->skbuff_vector != NULL) { for (i = 0; i < qi->max_depth; i++) { if (*(qi->skbuff_vector + i) != NULL) dev_kfree_skb_any(*(qi->skbuff_vector + i)); } kfree(qi->skbuff_vector); } /* deallocate matching IOV structures including header buffs */ if (qi->mmsg_vector != NULL) { mmsg_vector = qi->mmsg_vector; for (i = 0; i < qi->max_depth; i++) { iov = mmsg_vector->msg_hdr.msg_iov; if (iov != NULL) { if ((vp->header_size > 0) && (iov->iov_base != NULL)) kfree(iov->iov_base); kfree(iov); } mmsg_vector++; } kfree(qi->mmsg_vector); } kfree(qi); } /* * Queue constructor. Create a queue with a given side. */ static struct vector_queue *create_queue( struct vector_private *vp, int max_size, int header_size, int num_extra_frags) { struct vector_queue *result; int i; struct iovec *iov; struct mmsghdr *mmsg_vector; result = kmalloc(sizeof(struct vector_queue), GFP_KERNEL); if (result == NULL) return NULL; result->max_depth = max_size; result->dev = vp->dev; result->mmsg_vector = kmalloc( (sizeof(struct mmsghdr) * max_size), GFP_KERNEL); if (result->mmsg_vector == NULL) goto out_mmsg_fail; result->skbuff_vector = kmalloc( (sizeof(void *) * max_size), GFP_KERNEL); if (result->skbuff_vector == NULL) goto out_skb_fail; /* further failures can be handled safely by destroy_queue*/ mmsg_vector = result->mmsg_vector; for (i = 0; i < max_size; i++) { /* Clear all pointers - we use non-NULL as marking on * what to free on destruction */ *(result->skbuff_vector + i) = NULL; mmsg_vector->msg_hdr.msg_iov = NULL; mmsg_vector++; } mmsg_vector = result->mmsg_vector; result->max_iov_frags = num_extra_frags; for (i = 0; i < max_size; i++) { if (vp->header_size > 0) iov = kmalloc_array(3 + num_extra_frags, sizeof(struct iovec), GFP_KERNEL ); else iov = kmalloc_array(2 + num_extra_frags, sizeof(struct iovec), GFP_KERNEL ); if (iov == NULL) goto out_fail; mmsg_vector->msg_hdr.msg_iov = iov; mmsg_vector->msg_hdr.msg_iovlen = 1; mmsg_vector->msg_hdr.msg_control = NULL; mmsg_vector->msg_hdr.msg_controllen = 0; mmsg_vector->msg_hdr.msg_flags = MSG_DONTWAIT; mmsg_vector->msg_hdr.msg_name = NULL; mmsg_vector->msg_hdr.msg_namelen = 0; if (vp->header_size > 0) { iov->iov_base = kmalloc(header_size, GFP_KERNEL); if (iov->iov_base == NULL) goto out_fail; iov->iov_len = header_size; mmsg_vector->msg_hdr.msg_iovlen = 2; iov++; } iov->iov_base = NULL; iov->iov_len = 0; mmsg_vector++; } spin_lock_init(&result->head_lock); spin_lock_init(&result->tail_lock); atomic_set(&result->queue_depth, 0); result->head = 0; result->tail = 0; return result; out_skb_fail: kfree(result->mmsg_vector); out_mmsg_fail: kfree(result); return NULL; out_fail: destroy_queue(result); return NULL; } /* * We do not use the RX queue as a proper wraparound queue for now * This is not necessary because the consumption via napi_gro_receive() * happens in-line. While we can try using the return code of * netif_rx() for flow control there are no drivers doing this today. * For this RX specific use we ignore the tail/head locks and * just read into a prepared queue filled with skbuffs. */ static struct sk_buff *prep_skb( struct vector_private *vp, struct user_msghdr *msg) { int linear = vp->max_packet + vp->headroom + SAFETY_MARGIN; struct sk_buff *result; int iov_index = 0, len; struct iovec *iov = msg->msg_iov; int err, nr_frags, frag; skb_frag_t *skb_frag; if (vp->req_size <= linear) len = linear; else len = vp->req_size; result = alloc_skb_with_frags( linear, len - vp->max_packet, 3, &err, GFP_ATOMIC ); if (vp->header_size > 0) iov_index++; if (result == NULL) { iov[iov_index].iov_base = NULL; iov[iov_index].iov_len = 0; goto done; } skb_reserve(result, vp->headroom); result->dev = vp->dev; skb_put(result, vp->max_packet); result->data_len = len - vp->max_packet; result->len += len - vp->max_packet; skb_reset_mac_header(result); result->ip_summed = CHECKSUM_NONE; iov[iov_index].iov_base = result->data; iov[iov_index].iov_len = vp->max_packet; iov_index++; nr_frags = skb_shinfo(result)->nr_frags; for (frag = 0; frag < nr_frags; frag++) { skb_frag = &skb_shinfo(result)->frags[frag]; iov[iov_index].iov_base = skb_frag_address_safe(skb_frag); if (iov[iov_index].iov_base != NULL) iov[iov_index].iov_len = skb_frag_size(skb_frag); else iov[iov_index].iov_len = 0; iov_index++; } done: msg->msg_iovlen = iov_index; return result; } /* Prepare queue for recvmmsg one-shot rx - fill with fresh sk_buffs */ static void prep_queue_for_rx(struct vector_queue *qi) { struct vector_private *vp = netdev_priv(qi->dev); struct mmsghdr *mmsg_vector = qi->mmsg_vector; void **skbuff_vector = qi->skbuff_vector; int i, queue_depth; queue_depth = atomic_read(&qi->queue_depth); if (queue_depth == 0) return; /* RX is always emptied 100% during each cycle, so we do not * have to do the tail wraparound math for it. */ qi->head = qi->tail = 0; for (i = 0; i < queue_depth; i++) { /* it is OK if allocation fails - recvmmsg with NULL data in * iov argument still performs an RX, just drops the packet * This allows us stop faffing around with a "drop buffer" */ *skbuff_vector = prep_skb(vp, &mmsg_vector->msg_hdr); skbuff_vector++; mmsg_vector++; } atomic_set(&qi->queue_depth, 0); } static struct vector_device *find_device(int n) { struct vector_device *device; struct list_head *ele; spin_lock(&vector_devices_lock); list_for_each(ele, &vector_devices) { device = list_entry(ele, struct vector_device, list); if (device->unit == n) goto out; } device = NULL; out: spin_unlock(&vector_devices_lock); return device; } static int vector_parse(char *str, int *index_out, char **str_out, char **error_out) { int n, err; char *start = str; while ((*str != ':') && (strlen(str) > 1)) str++; if (*str != ':') { *error_out = "Expected ':' after device number"; return -EINVAL; } *str = '\0'; err = kstrtouint(start, 0, &n); if (err < 0) { *error_out = "Bad device number"; return err; } str++; if (find_device(n)) { *error_out = "Device already configured"; return -EINVAL; } *index_out = n; *str_out = str; return 0; } static int vector_config(char *str, char **error_out) { int err, n; char *params; struct arglist *parsed; err = vector_parse(str, &n, ¶ms, error_out); if (err != 0) return err; /* This string is broken up and the pieces used by the underlying * driver. We should copy it to make sure things do not go wrong * later. */ params = kstrdup(params, GFP_KERNEL); if (params == NULL) { *error_out = "vector_config failed to strdup string"; return -ENOMEM; } parsed = uml_parse_vector_ifspec(params); if (parsed == NULL) { *error_out = "vector_config failed to parse parameters"; kfree(params); return -EINVAL; } vector_eth_configure(n, parsed); return 0; } static int vector_id(char **str, int *start_out, int *end_out) { char *end; int n; n = simple_strtoul(*str, &end, 0); if ((*end != '\0') || (end == *str)) return -1; *start_out = n; *end_out = n; *str = end; return n; } static int vector_remove(int n, char **error_out) { struct vector_device *vec_d; struct net_device *dev; struct vector_private *vp; vec_d = find_device(n); if (vec_d == NULL) return -ENODEV; dev = vec_d->dev; vp = netdev_priv(dev); if (vp->fds != NULL) return -EBUSY; unregister_netdev(dev); platform_device_unregister(&vec_d->pdev); return 0; } /* * There is no shared per-transport initialization code, so * we will just initialize each interface one by one and * add them to a list */ static struct platform_driver uml_net_driver = { .driver = { .name = DRIVER_NAME, }, }; static void vector_device_release(struct device *dev) { struct vector_device *device = container_of(dev, struct vector_device, pdev.dev); struct net_device *netdev = device->dev; list_del(&device->list); kfree(device); free_netdev(netdev); } /* Bog standard recv using recvmsg - not used normally unless the user * explicitly specifies not to use recvmmsg vector RX. */ static int vector_legacy_rx(struct vector_private *vp) { int pkt_len; struct user_msghdr hdr; struct iovec iov[2 + MAX_IOV_SIZE]; /* header + data use case only */ int iovpos = 0; struct sk_buff *skb; int header_check; hdr.msg_name = NULL; hdr.msg_namelen = 0; hdr.msg_iov = (struct iovec *) &iov; hdr.msg_control = NULL; hdr.msg_controllen = 0; hdr.msg_flags = 0; if (vp->header_size > 0) { iov[0].iov_base = vp->header_rxbuffer; iov[0].iov_len = vp->header_size; } skb = prep_skb(vp, &hdr); if (skb == NULL) { /* Read a packet into drop_buffer and don't do * anything with it. */ iov[iovpos].iov_base = drop_buffer; iov[iovpos].iov_len = DROP_BUFFER_SIZE; hdr.msg_iovlen = 1; vp->dev->stats.rx_dropped++; } pkt_len = uml_vector_recvmsg(vp->fds->rx_fd, &hdr, 0); if (pkt_len < 0) { vp->in_error = true; return pkt_len; } if (skb != NULL) { if (pkt_len > vp->header_size) { if (vp->header_size > 0) { header_check = vp->verify_header( vp->header_rxbuffer, skb, vp); if (header_check < 0) { dev_kfree_skb_irq(skb); vp->dev->stats.rx_dropped++; vp->estats.rx_encaps_errors++; return 0; } if (header_check > 0) { vp->estats.rx_csum_offload_good++; skb->ip_summed = CHECKSUM_UNNECESSARY; } } pskb_trim(skb, pkt_len - vp->rx_header_size); skb->protocol = eth_type_trans(skb, skb->dev); vp->dev->stats.rx_bytes += skb->len; vp->dev->stats.rx_packets++; napi_gro_receive(&vp->napi, skb); } else { dev_kfree_skb_irq(skb); } } return pkt_len; } /* * Packet at a time TX which falls back to vector TX if the * underlying transport is busy. */ static int writev_tx(struct vector_private *vp, struct sk_buff *skb) { struct iovec iov[3 + MAX_IOV_SIZE]; int iov_count, pkt_len = 0; iov[0].iov_base = vp->header_txbuffer; iov_count = prep_msg(vp, skb, (struct iovec *) &iov); if (iov_count < 1) goto drop; pkt_len = uml_vector_writev( vp->fds->tx_fd, (struct iovec *) &iov, iov_count ); if (pkt_len < 0) goto drop; netif_trans_update(vp->dev); netif_wake_queue(vp->dev); if (pkt_len > 0) { vp->dev->stats.tx_bytes += skb->len; vp->dev->stats.tx_packets++; } else { vp->dev->stats.tx_dropped++; } consume_skb(skb); return pkt_len; drop: vp->dev->stats.tx_dropped++; consume_skb(skb); if (pkt_len < 0) vp->in_error = true; return pkt_len; } /* * Receive as many messages as we can in one call using the special * mmsg vector matched to an skb vector which we prepared earlier. */ static int vector_mmsg_rx(struct vector_private *vp, int budget) { int packet_count, i; struct vector_queue *qi = vp->rx_queue; struct sk_buff *skb; struct mmsghdr *mmsg_vector = qi->mmsg_vector; void **skbuff_vector = qi->skbuff_vector; int header_check; /* Refresh the vector and make sure it is with new skbs and the * iovs are updated to point to them. */ prep_queue_for_rx(qi); /* Fire the Lazy Gun - get as many packets as we can in one go. */ if (budget > qi->max_depth) budget = qi->max_depth; packet_count = uml_vector_recvmmsg( vp->fds->rx_fd, qi->mmsg_vector, budget, 0); if (packet_count < 0) vp->in_error = true; if (packet_count <= 0) return packet_count; /* We treat packet processing as enqueue, buffer refresh as dequeue * The queue_depth tells us how many buffers have been used and how * many do we need to prep the next time prep_queue_for_rx() is called. */ atomic_add(packet_count, &qi->queue_depth); for (i = 0; i < packet_count; i++) { skb = (*skbuff_vector); if (mmsg_vector->msg_len > vp->header_size) { if (vp->header_size > 0) { header_check = vp->verify_header( mmsg_vector->msg_hdr.msg_iov->iov_base, skb, vp ); if (header_check < 0) { /* Overlay header failed to verify - discard. * We can actually keep this skb and reuse it, * but that will make the prep logic too * complex. */ dev_kfree_skb_irq(skb); vp->estats.rx_encaps_errors++; continue; } if (header_check > 0) { vp->estats.rx_csum_offload_good++; skb->ip_summed = CHECKSUM_UNNECESSARY; } } pskb_trim(skb, mmsg_vector->msg_len - vp->rx_header_size); skb->protocol = eth_type_trans(skb, skb->dev); /* * We do not need to lock on updating stats here * The interrupt loop is non-reentrant. */ vp->dev->stats.rx_bytes += skb->len; vp->dev->stats.rx_packets++; napi_gro_receive(&vp->napi, skb); } else { /* Overlay header too short to do anything - discard. * We can actually keep this skb and reuse it, * but that will make the prep logic too complex. */ if (skb != NULL) dev_kfree_skb_irq(skb); } (*skbuff_vector) = NULL; /* Move to the next buffer element */ mmsg_vector++; skbuff_vector++; } if (packet_count > 0) { if (vp->estats.rx_queue_max < packet_count) vp->estats.rx_queue_max = packet_count; vp->estats.rx_queue_running_average = (vp->estats.rx_queue_running_average + packet_count) >> 1; } return packet_count; } static int vector_net_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct vector_private *vp = netdev_priv(dev); int queue_depth = 0; if (vp->in_error) { deactivate_fd(vp->fds->rx_fd, vp->rx_irq); if ((vp->fds->rx_fd != vp->fds->tx_fd) && (vp->tx_irq != 0)) deactivate_fd(vp->fds->tx_fd, vp->tx_irq); return NETDEV_TX_BUSY; } if ((vp->options & VECTOR_TX) == 0) { writev_tx(vp, skb); return NETDEV_TX_OK; } /* We do BQL only in the vector path, no point doing it in * packet at a time mode as there is no device queue */ netdev_sent_queue(vp->dev, skb->len); queue_depth = vector_enqueue(vp->tx_queue, skb); if (queue_depth < vp->tx_queue->max_depth && netdev_xmit_more()) { mod_timer(&vp->tl, vp->coalesce); return NETDEV_TX_OK; } else { queue_depth = vector_send(vp->tx_queue); if (queue_depth > 0) napi_schedule(&vp->napi); } return NETDEV_TX_OK; } static irqreturn_t vector_rx_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; struct vector_private *vp = netdev_priv(dev); if (!netif_running(dev)) return IRQ_NONE; napi_schedule(&vp->napi); return IRQ_HANDLED; } static irqreturn_t vector_tx_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; struct vector_private *vp = netdev_priv(dev); if (!netif_running(dev)) return IRQ_NONE; /* We need to pay attention to it only if we got * -EAGAIN or -ENOBUFFS from sendmmsg. Otherwise * we ignore it. In the future, it may be worth * it to improve the IRQ controller a bit to make * tweaking the IRQ mask less costly */ napi_schedule(&vp->napi); return IRQ_HANDLED; } static int irq_rr; static int vector_net_close(struct net_device *dev) { struct vector_private *vp = netdev_priv(dev); netif_stop_queue(dev); del_timer(&vp->tl); vp->opened = false; if (vp->fds == NULL) return 0; /* Disable and free all IRQS */ if (vp->rx_irq > 0) { um_free_irq(vp->rx_irq, dev); vp->rx_irq = 0; } if (vp->tx_irq > 0) { um_free_irq(vp->tx_irq, dev); vp->tx_irq = 0; } napi_disable(&vp->napi); netif_napi_del(&vp->napi); if (vp->fds->rx_fd > 0) { if (vp->bpf) uml_vector_detach_bpf(vp->fds->rx_fd, vp->bpf); os_close_file(vp->fds->rx_fd); vp->fds->rx_fd = -1; } if (vp->fds->tx_fd > 0) { os_close_file(vp->fds->tx_fd); vp->fds->tx_fd = -1; } if (vp->bpf != NULL) kfree(vp->bpf->filter); kfree(vp->bpf); vp->bpf = NULL; kfree(vp->fds->remote_addr); kfree(vp->transport_data); kfree(vp->header_rxbuffer); kfree(vp->header_txbuffer); if (vp->rx_queue != NULL) destroy_queue(vp->rx_queue); if (vp->tx_queue != NULL) destroy_queue(vp->tx_queue); kfree(vp->fds); vp->fds = NULL; vp->in_error = false; return 0; } static int vector_poll(struct napi_struct *napi, int budget) { struct vector_private *vp = container_of(napi, struct vector_private, napi); int work_done = 0; int err; bool tx_enqueued = false; if ((vp->options & VECTOR_TX) != 0) tx_enqueued = (vector_send(vp->tx_queue) > 0); spin_lock(&vp->rx_queue->head_lock); if ((vp->options & VECTOR_RX) > 0) err = vector_mmsg_rx(vp, budget); else { err = vector_legacy_rx(vp); if (err > 0) err = 1; } spin_unlock(&vp->rx_queue->head_lock); if (err > 0) work_done += err; if (tx_enqueued || err > 0) napi_schedule(napi); if (work_done <= budget) napi_complete_done(napi, work_done); return work_done; } static void vector_reset_tx(struct work_struct *work) { struct vector_private *vp = container_of(work, struct vector_private, reset_tx); netdev_reset_queue(vp->dev); netif_start_queue(vp->dev); netif_wake_queue(vp->dev); } static int vector_net_open(struct net_device *dev) { struct vector_private *vp = netdev_priv(dev); int err = -EINVAL; struct vector_device *vdevice; if (vp->opened) return -ENXIO; vp->opened = true; vp->bpf = uml_vector_user_bpf(get_bpf_file(vp->parsed)); vp->fds = uml_vector_user_open(vp->unit, vp->parsed); if (vp->fds == NULL) goto out_close; if (build_transport_data(vp) < 0) goto out_close; if ((vp->options & VECTOR_RX) > 0) { vp->rx_queue = create_queue( vp, get_depth(vp->parsed), vp->rx_header_size, MAX_IOV_SIZE ); atomic_set(&vp->rx_queue->queue_depth, get_depth(vp->parsed)); } else { vp->header_rxbuffer = kmalloc( vp->rx_header_size, GFP_KERNEL ); if (vp->header_rxbuffer == NULL) goto out_close; } if ((vp->options & VECTOR_TX) > 0) { vp->tx_queue = create_queue( vp, get_depth(vp->parsed), vp->header_size, MAX_IOV_SIZE ); } else { vp->header_txbuffer = kmalloc(vp->header_size, GFP_KERNEL); if (vp->header_txbuffer == NULL) goto out_close; } netif_napi_add_weight(vp->dev, &vp->napi, vector_poll, get_depth(vp->parsed)); napi_enable(&vp->napi); /* READ IRQ */ err = um_request_irq( irq_rr + VECTOR_BASE_IRQ, vp->fds->rx_fd, IRQ_READ, vector_rx_interrupt, IRQF_SHARED, dev->name, dev); if (err < 0) { netdev_err(dev, "vector_open: failed to get rx irq(%d)\n", err); err = -ENETUNREACH; goto out_close; } vp->rx_irq = irq_rr + VECTOR_BASE_IRQ; dev->irq = irq_rr + VECTOR_BASE_IRQ; irq_rr = (irq_rr + 1) % VECTOR_IRQ_SPACE; /* WRITE IRQ - we need it only if we have vector TX */ if ((vp->options & VECTOR_TX) > 0) { err = um_request_irq( irq_rr + VECTOR_BASE_IRQ, vp->fds->tx_fd, IRQ_WRITE, vector_tx_interrupt, IRQF_SHARED, dev->name, dev); if (err < 0) { netdev_err(dev, "vector_open: failed to get tx irq(%d)\n", err); err = -ENETUNREACH; goto out_close; } vp->tx_irq = irq_rr + VECTOR_BASE_IRQ; irq_rr = (irq_rr + 1) % VECTOR_IRQ_SPACE; } if ((vp->options & VECTOR_QDISC_BYPASS) != 0) { if (!uml_raw_enable_qdisc_bypass(vp->fds->rx_fd)) vp->options |= VECTOR_BPF; } if (((vp->options & VECTOR_BPF) != 0) && (vp->bpf == NULL)) vp->bpf = uml_vector_default_bpf(dev->dev_addr); if (vp->bpf != NULL) uml_vector_attach_bpf(vp->fds->rx_fd, vp->bpf); netif_start_queue(dev); vector_reset_stats(vp); /* clear buffer - it can happen that the host side of the interface * is full when we get here. In this case, new data is never queued, * SIGIOs never arrive, and the net never works. */ napi_schedule(&vp->napi); vdevice = find_device(vp->unit); vdevice->opened = 1; if ((vp->options & VECTOR_TX) != 0) add_timer(&vp->tl); return 0; out_close: vector_net_close(dev); return err; } static void vector_net_set_multicast_list(struct net_device *dev) { /* TODO: - we can do some BPF games here */ return; } static void vector_net_tx_timeout(struct net_device *dev, unsigned int txqueue) { struct vector_private *vp = netdev_priv(dev); vp->estats.tx_timeout_count++; netif_trans_update(dev); schedule_work(&vp->reset_tx); } static netdev_features_t vector_fix_features(struct net_device *dev, netdev_features_t features) { features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); return features; } static int vector_set_features(struct net_device *dev, netdev_features_t features) { struct vector_private *vp = netdev_priv(dev); /* Adjust buffer sizes for GSO/GRO. Unfortunately, there is * no way to negotiate it on raw sockets, so we can change * only our side. */ if (features & NETIF_F_GRO) /* All new frame buffers will be GRO-sized */ vp->req_size = 65536; else /* All new frame buffers will be normal sized */ vp->req_size = vp->max_packet + vp->headroom + SAFETY_MARGIN; return 0; } #ifdef CONFIG_NET_POLL_CONTROLLER static void vector_net_poll_controller(struct net_device *dev) { disable_irq(dev->irq); vector_rx_interrupt(dev->irq, dev); enable_irq(dev->irq); } #endif static void vector_net_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { strscpy(info->driver, DRIVER_NAME); } static int vector_net_load_bpf_flash(struct net_device *dev, struct ethtool_flash *efl) { struct vector_private *vp = netdev_priv(dev); struct vector_device *vdevice; const struct firmware *fw; int result = 0; if (!(vp->options & VECTOR_BPF_FLASH)) { netdev_err(dev, "loading firmware not permitted: %s\n", efl->data); return -1; } if (vp->bpf != NULL) { if (vp->opened) uml_vector_detach_bpf(vp->fds->rx_fd, vp->bpf); kfree(vp->bpf->filter); vp->bpf->filter = NULL; } else { vp->bpf = kmalloc(sizeof(struct sock_fprog), GFP_ATOMIC); if (vp->bpf == NULL) { netdev_err(dev, "failed to allocate memory for firmware\n"); goto flash_fail; } } vdevice = find_device(vp->unit); if (request_firmware(&fw, efl->data, &vdevice->pdev.dev)) goto flash_fail; vp->bpf->filter = kmemdup(fw->data, fw->size, GFP_ATOMIC); if (!vp->bpf->filter) goto free_buffer; vp->bpf->len = fw->size / sizeof(struct sock_filter); release_firmware(fw); if (vp->opened) result = uml_vector_attach_bpf(vp->fds->rx_fd, vp->bpf); return result; free_buffer: release_firmware(fw); flash_fail: if (vp->bpf != NULL) kfree(vp->bpf->filter); kfree(vp->bpf); vp->bpf = NULL; return -1; } static void vector_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, struct kernel_ethtool_ringparam *kernel_ring, struct netlink_ext_ack *extack) { struct vector_private *vp = netdev_priv(netdev); ring->rx_max_pending = vp->rx_queue->max_depth; ring->tx_max_pending = vp->tx_queue->max_depth; ring->rx_pending = vp->rx_queue->max_depth; ring->tx_pending = vp->tx_queue->max_depth; } static void vector_get_strings(struct net_device *dev, u32 stringset, u8 *buf) { switch (stringset) { case ETH_SS_TEST: *buf = '\0'; break; case ETH_SS_STATS: memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys)); break; default: WARN_ON(1); break; } } static int vector_get_sset_count(struct net_device *dev, int sset) { switch (sset) { case ETH_SS_TEST: return 0; case ETH_SS_STATS: return VECTOR_NUM_STATS; default: return -EOPNOTSUPP; } } static void vector_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *estats, u64 *tmp_stats) { struct vector_private *vp = netdev_priv(dev); /* Stats are modified in the dequeue portions of * rx/tx which are protected by the head locks * grabbing these locks here ensures they are up * to date. */ spin_lock(&vp->tx_queue->head_lock); spin_lock(&vp->rx_queue->head_lock); memcpy(tmp_stats, &vp->estats, sizeof(struct vector_estats)); spin_unlock(&vp->rx_queue->head_lock); spin_unlock(&vp->tx_queue->head_lock); } static int vector_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack) { struct vector_private *vp = netdev_priv(netdev); ec->tx_coalesce_usecs = (vp->coalesce * 1000000) / HZ; return 0; } static int vector_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack) { struct vector_private *vp = netdev_priv(netdev); vp->coalesce = (ec->tx_coalesce_usecs * HZ) / 1000000; if (vp->coalesce == 0) vp->coalesce = 1; return 0; } static const struct ethtool_ops vector_net_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_TX_USECS, .get_drvinfo = vector_net_get_drvinfo, .get_link = ethtool_op_get_link, .get_ts_info = ethtool_op_get_ts_info, .get_ringparam = vector_get_ringparam, .get_strings = vector_get_strings, .get_sset_count = vector_get_sset_count, .get_ethtool_stats = vector_get_ethtool_stats, .get_coalesce = vector_get_coalesce, .set_coalesce = vector_set_coalesce, .flash_device = vector_net_load_bpf_flash, }; static const struct net_device_ops vector_netdev_ops = { .ndo_open = vector_net_open, .ndo_stop = vector_net_close, .ndo_start_xmit = vector_net_start_xmit, .ndo_set_rx_mode = vector_net_set_multicast_list, .ndo_tx_timeout = vector_net_tx_timeout, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_fix_features = vector_fix_features, .ndo_set_features = vector_set_features, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = vector_net_poll_controller, #endif }; static void vector_timer_expire(struct timer_list *t) { struct vector_private *vp = from_timer(vp, t, tl); vp->estats.tx_kicks++; napi_schedule(&vp->napi); } static void vector_eth_configure( int n, struct arglist *def ) { struct vector_device *device; struct net_device *dev; struct vector_private *vp; int err; device = kzalloc(sizeof(*device), GFP_KERNEL); if (device == NULL) { printk(KERN_ERR "eth_configure failed to allocate struct " "vector_device\n"); return; } dev = alloc_etherdev(sizeof(struct vector_private)); if (dev == NULL) { printk(KERN_ERR "eth_configure: failed to allocate struct " "net_device for vec%d\n", n); goto out_free_device; } dev->mtu = get_mtu(def); INIT_LIST_HEAD(&device->list); device->unit = n; /* If this name ends up conflicting with an existing registered * netdevice, that is OK, register_netdev{,ice}() will notice this * and fail. */ snprintf(dev->name, sizeof(dev->name), "vec%d", n); uml_net_setup_etheraddr(dev, uml_vector_fetch_arg(def, "mac")); vp = netdev_priv(dev); /* sysfs register */ if (!driver_registered) { platform_driver_register(¨_net_driver); driver_registered = 1; } device->pdev.id = n; device->pdev.name = DRIVER_NAME; device->pdev.dev.release = vector_device_release; dev_set_drvdata(&device->pdev.dev, device); if (platform_device_register(&device->pdev)) goto out_free_netdev; SET_NETDEV_DEV(dev, &device->pdev.dev); device->dev = dev; *vp = ((struct vector_private) { .list = LIST_HEAD_INIT(vp->list), .dev = dev, .unit = n, .options = get_transport_options(def), .rx_irq = 0, .tx_irq = 0, .parsed = def, .max_packet = get_mtu(def) + ETH_HEADER_OTHER, /* TODO - we need to calculate headroom so that ip header * is 16 byte aligned all the time */ .headroom = get_headroom(def), .form_header = NULL, .verify_header = NULL, .header_rxbuffer = NULL, .header_txbuffer = NULL, .header_size = 0, .rx_header_size = 0, .rexmit_scheduled = false, .opened = false, .transport_data = NULL, .in_write_poll = false, .coalesce = 2, .req_size = get_req_size(def), .in_error = false, .bpf = NULL }); dev->features = dev->hw_features = (NETIF_F_SG | NETIF_F_FRAGLIST); INIT_WORK(&vp->reset_tx, vector_reset_tx); timer_setup(&vp->tl, vector_timer_expire, 0); /* FIXME */ dev->netdev_ops = &vector_netdev_ops; dev->ethtool_ops = &vector_net_ethtool_ops; dev->watchdog_timeo = (HZ >> 1); /* primary IRQ - fixme */ dev->irq = 0; /* we will adjust this once opened */ rtnl_lock(); err = register_netdevice(dev); rtnl_unlock(); if (err) goto out_undo_user_init; spin_lock(&vector_devices_lock); list_add(&device->list, &vector_devices); spin_unlock(&vector_devices_lock); return; out_undo_user_init: return; out_free_netdev: free_netdev(dev); out_free_device: kfree(device); } /* * Invoked late in the init */ static int __init vector_init(void) { struct list_head *ele; struct vector_cmd_line_arg *def; struct arglist *parsed; list_for_each(ele, &vec_cmd_line) { def = list_entry(ele, struct vector_cmd_line_arg, list); parsed = uml_parse_vector_ifspec(def->arguments); if (parsed != NULL) vector_eth_configure(def->unit, parsed); } return 0; } /* Invoked at initial argument parsing, only stores * arguments until a proper vector_init is called * later */ static int __init vector_setup(char *str) { char *error; int n, err; struct vector_cmd_line_arg *new; err = vector_parse(str, &n, &str, &error); if (err) { printk(KERN_ERR "vector_setup - Couldn't parse '%s' : %s\n", str, error); return 1; } new = memblock_alloc(sizeof(*new), SMP_CACHE_BYTES); if (!new) panic("%s: Failed to allocate %zu bytes\n", __func__, sizeof(*new)); INIT_LIST_HEAD(&new->list); new->unit = n; new->arguments = str; list_add_tail(&new->list, &vec_cmd_line); return 1; } __setup("vec", vector_setup); __uml_help(vector_setup, "vec[0-9]+:<option>=<value>,<option>=<value>\n" " Configure a vector io network device.\n\n" ); late_initcall(vector_init); static struct mc_device vector_mc = { .list = LIST_HEAD_INIT(vector_mc.list), .name = "vec", .config = vector_config, .get_config = NULL, .id = vector_id, .remove = vector_remove, }; #ifdef CONFIG_INET static int vector_inetaddr_event( struct notifier_block *this, unsigned long event, void *ptr) { return NOTIFY_DONE; } static struct notifier_block vector_inetaddr_notifier = { .notifier_call = vector_inetaddr_event, }; static void inet_register(void) { register_inetaddr_notifier(&vector_inetaddr_notifier); } #else static inline void inet_register(void) { } #endif static int vector_net_init(void) { mconsole_register_dev(&vector_mc); inet_register(); return 0; } __initcall(vector_net_init);
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with Cregit http://github.com/cregit/cregit
Version 2.0-RC1