cregit-Linux how code gets into the kernel

Release 4.11 drivers/net/hyperv/rndis_filter.c

/*
 * Copyright (c) 2009, Microsoft Corporation.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, see <http://www.gnu.org/licenses/>.
 *
 * Authors:
 *   Haiyang Zhang <haiyangz@microsoft.com>
 *   Hank Janssen  <hjanssen@microsoft.com>
 */
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/highmem.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/if_ether.h>
#include <linux/netdevice.h>
#include <linux/if_vlan.h>
#include <linux/nls.h>
#include <linux/vmalloc.h>

#include "hyperv_net.h"



#define RNDIS_EXT_LEN PAGE_SIZE

struct rndis_request {
	
struct list_head list_ent;
	
struct completion  wait_event;

	
struct rndis_message response_msg;
	/*
         * The buffer for extended info after the RNDIS response message. It's
         * referenced based on the data offset in the RNDIS message. Its size
         * is enough for current needs, and should be sufficient for the near
         * future.
         */
	
u8 response_ext[RNDIS_EXT_LEN];

	/* Simplify allocation by having a netvsc packet inline */
	
struct hv_netvsc_packet	pkt;

	
struct rndis_message request_msg;
	/*
         * The buffer for the extended info after the RNDIS request message.
         * It is referenced and sized in a similar way as response_ext.
         */
	
u8 request_ext[RNDIS_EXT_LEN];
};


static const u8 netvsc_hash_key[NETVSC_HASH_KEYLEN] = {
	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
};


static struct rndis_device *get_rndis_device(void) { struct rndis_device *device; device = kzalloc(sizeof(struct rndis_device), GFP_KERNEL); if (!device) return NULL; spin_lock_init(&device->request_lock); INIT_LIST_HEAD(&device->req_list); device->state = RNDIS_DEV_UNINITIALIZED; return device; }

Contributors

PersonTokensPropCommitsCommitProp
Hank Janssen4472.13%114.29%
Greg Kroah-Hartman1321.31%342.86%
Haiyang Zhang34.92%228.57%
Bill Pemberton11.64%114.29%
Total61100.00%7100.00%


static struct rndis_request *get_rndis_request(struct rndis_device *dev, u32 msg_type, u32 msg_len) { struct rndis_request *request; struct rndis_message *rndis_msg; struct rndis_set_request *set; unsigned long flags; request = kzalloc(sizeof(struct rndis_request), GFP_KERNEL); if (!request) return NULL; init_completion(&request->wait_event); rndis_msg = &request->request_msg; rndis_msg->ndis_msg_type = msg_type; rndis_msg->msg_len = msg_len; request->pkt.q_idx = 0; /* * Set the request id. This field is always after the rndis header for * request/response packet types so we just used the SetRequest as a * template */ set = &rndis_msg->msg.set_req; set->req_id = atomic_inc_return(&dev->new_req_id); /* Add to the request list */ spin_lock_irqsave(&dev->request_lock, flags); list_add_tail(&request->list_ent, &dev->req_list); spin_unlock_irqrestore(&dev->request_lock, flags); return request; }

Contributors

PersonTokensPropCommitsCommitProp
Hank Janssen8152.94%16.25%
Haiyang Zhang3220.92%425.00%
Greg Kroah-Hartman3220.92%637.50%
Bill Pemberton42.61%318.75%
K. Y. Srinivasan42.61%212.50%
Total153100.00%16100.00%


static void put_rndis_request(struct rndis_device *dev, struct rndis_request *req) { unsigned long flags; spin_lock_irqsave(&dev->request_lock, flags); list_del(&req->list_ent); spin_unlock_irqrestore(&dev->request_lock, flags); kfree(req); }

Contributors

PersonTokensPropCommitsCommitProp
Hank Janssen2547.17%114.29%
Greg Kroah-Hartman1833.96%228.57%
Haiyang Zhang815.09%228.57%
Bill Pemberton23.77%228.57%
Total53100.00%7100.00%


static void dump_rndis_message(struct hv_device *hv_dev, const struct rndis_message *rndis_msg) { struct net_device *netdev = hv_get_drvdata(hv_dev); switch (rndis_msg->ndis_msg_type) { case RNDIS_MSG_PACKET: netdev_dbg(netdev, "RNDIS_MSG_PACKET (len %u, " "data offset %u data len %u, # oob %u, " "oob offset %u, oob len %u, pkt offset %u, " "pkt len %u\n", rndis_msg->msg_len, rndis_msg->msg.pkt.data_offset, rndis_msg->msg.pkt.data_len, rndis_msg->msg.pkt.num_oob_data_elements, rndis_msg->msg.pkt.oob_data_offset, rndis_msg->msg.pkt.oob_data_len, rndis_msg->msg.pkt.per_pkt_info_offset, rndis_msg->msg.pkt.per_pkt_info_len); break; case RNDIS_MSG_INIT_C: netdev_dbg(netdev, "RNDIS_MSG_INIT_C " "(len %u, id 0x%x, status 0x%x, major %d, minor %d, " "device flags %d, max xfer size 0x%x, max pkts %u, " "pkt aligned %u)\n", rndis_msg->msg_len, rndis_msg->msg.init_complete.req_id, rndis_msg->msg.init_complete.status, rndis_msg->msg.init_complete.major_ver, rndis_msg->msg.init_complete.minor_ver, rndis_msg->msg.init_complete.dev_flags, rndis_msg->msg.init_complete.max_xfer_size, rndis_msg->msg.init_complete. max_pkt_per_msg, rndis_msg->msg.init_complete. pkt_alignment_factor); break; case RNDIS_MSG_QUERY_C: netdev_dbg(netdev, "RNDIS_MSG_QUERY_C " "(len %u, id 0x%x, status 0x%x, buf len %u, " "buf offset %u)\n", rndis_msg->msg_len, rndis_msg->msg.query_complete.req_id, rndis_msg->msg.query_complete.status, rndis_msg->msg.query_complete. info_buflen, rndis_msg->msg.query_complete. info_buf_offset); break; case RNDIS_MSG_SET_C: netdev_dbg(netdev, "RNDIS_MSG_SET_C (len %u, id 0x%x, status 0x%x)\n", rndis_msg->msg_len, rndis_msg->msg.set_complete.req_id, rndis_msg->msg.set_complete.status); break; case RNDIS_MSG_INDICATE: netdev_dbg(netdev, "RNDIS_MSG_INDICATE " "(len %u, status 0x%x, buf len %u, buf offset %u)\n", rndis_msg->msg_len, rndis_msg->msg.indicate_status.status, rndis_msg->msg.indicate_status.status_buflen, rndis_msg->msg.indicate_status.status_buf_offset); break; default: netdev_dbg(netdev, "0x%x (len %u)\n", rndis_msg->ndis_msg_type, rndis_msg->msg_len); break; } }

Contributors

PersonTokensPropCommitsCommitProp
Hank Janssen16450.00%19.09%
Haiyang Zhang14343.60%436.36%
Linus Walleij103.05%19.09%
Greg Kroah-Hartman72.13%218.18%
Vitaly Kuznetsov20.61%19.09%
Stephen Hemminger10.30%19.09%
K. Y. Srinivasan10.30%19.09%
Total328100.00%11100.00%


static int rndis_filter_send_request(struct rndis_device *dev, struct rndis_request *req) { int ret; struct hv_netvsc_packet *packet; struct hv_page_buffer page_buf[2]; struct hv_page_buffer *pb = page_buf; struct net_device_context *net_device_ctx = netdev_priv(dev->ndev); /* Setup the packet to send it */ packet = &req->pkt; packet->total_data_buflen = req->request_msg.msg_len; packet->page_buf_cnt = 1; pb[0].pfn = virt_to_phys(&req->request_msg) >> PAGE_SHIFT; pb[0].len = req->request_msg.msg_len; pb[0].offset = (unsigned long)&req->request_msg & (PAGE_SIZE - 1); /* Add one page_buf when request_msg crossing page boundary */ if (pb[0].offset + pb[0].len > PAGE_SIZE) { packet->page_buf_cnt++; pb[0].len = PAGE_SIZE - pb[0].offset; pb[1].pfn = virt_to_phys((void *)&req->request_msg + pb[0].len) >> PAGE_SHIFT; pb[1].offset = 0; pb[1].len = req->request_msg.msg_len - pb[0].len; } ret = netvsc_send(net_device_ctx->device_ctx, packet, NULL, &pb, NULL); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Haiyang Zhang11044.72%633.33%
Hank Janssen7831.71%15.56%
K. Y. Srinivasan3413.82%527.78%
Vitaly Kuznetsov145.69%15.56%
Greg Kroah-Hartman72.85%316.67%
Nicolas Palix20.81%15.56%
Bill Pemberton10.41%15.56%
Total246100.00%18100.00%


static void rndis_set_link_state(struct rndis_device *rdev, struct rndis_request *request) { u32 link_status; struct rndis_query_complete *query_complete; query_complete = &request->response_msg.msg.query_complete; if (query_complete->status == RNDIS_STATUS_SUCCESS && query_complete->info_buflen == sizeof(u32)) { memcpy(&link_status, (void *)((unsigned long)query_complete + query_complete->info_buf_offset), sizeof(u32)); rdev->link_state = link_status != 0; } }

Contributors

PersonTokensPropCommitsCommitProp
Haiyang Zhang89100.00%1100.00%
Total89100.00%1100.00%


static void rndis_filter_receive_response(struct rndis_device *dev, struct rndis_message *resp) { struct rndis_request *request = NULL; bool found = false; unsigned long flags; struct net_device *ndev = dev->ndev; spin_lock_irqsave(&dev->request_lock, flags); list_for_each_entry(request, &dev->req_list, list_ent) { /* * All request/response message contains RequestId as the 1st * field */ if (request->request_msg.msg.init_req.req_id == resp->msg.init_complete.req_id) { found = true; break; } } spin_unlock_irqrestore(&dev->request_lock, flags); if (found) { if (resp->msg_len <= sizeof(struct rndis_message) + RNDIS_EXT_LEN) { memcpy(&request->response_msg, resp, resp->msg_len); if (request->request_msg.ndis_msg_type == RNDIS_MSG_QUERY && request->request_msg.msg. query_req.oid == RNDIS_OID_GEN_MEDIA_CONNECT_STATUS) rndis_set_link_state(dev, request); } else { netdev_err(ndev, "rndis response buffer overflow " "detected (size %u max %zu)\n", resp->msg_len, sizeof(struct rndis_message)); if (resp->ndis_msg_type == RNDIS_MSG_RESET_C) { /* does not have a request id field */ request->response_msg.msg.reset_complete. status = RNDIS_STATUS_BUFFER_OVERFLOW; } else { request->response_msg.msg. init_complete.status = RNDIS_STATUS_BUFFER_OVERFLOW; } } complete(&request->wait_event); } else { netdev_err(ndev, "no rndis request found for this response " "(id 0x%x res type 0x%x)\n", resp->msg.init_complete.req_id, resp->ndis_msg_type); } }

Contributors

PersonTokensPropCommitsCommitProp
Hank Janssen13151.57%29.09%
Haiyang Zhang8031.50%627.27%
Greg Kroah-Hartman3011.81%627.27%
K. Y. Srinivasan51.97%418.18%
Linus Walleij31.18%29.09%
Bill Pemberton31.18%14.55%
Vitaly Kuznetsov20.79%14.55%
Total254100.00%22100.00%

/* * Get the Per-Packet-Info with the specified type * return NULL if not found. */
static inline void *rndis_get_ppi(struct rndis_packet *rpkt, u32 type) { struct rndis_per_packet_info *ppi; int len; if (rpkt->per_pkt_info_offset == 0) return NULL; ppi = (struct rndis_per_packet_info *)((ulong)rpkt + rpkt->per_pkt_info_offset); len = rpkt->per_pkt_info_len; while (len > 0) { if (ppi->type == type) return (void *)((ulong)ppi + ppi->ppi_offset); len -= ppi->size; ppi = (struct rndis_per_packet_info *)((ulong)ppi + ppi->size); } return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Haiyang Zhang118100.00%1100.00%
Total118100.00%1100.00%


static int rndis_filter_receive_data(struct net_device *ndev, struct rndis_device *dev, struct rndis_message *msg, struct vmbus_channel *channel, void *data, u32 data_buflen) { struct rndis_packet *rndis_pkt = &msg->msg.pkt; const struct ndis_tcp_ip_checksum_info *csum_info; const struct ndis_pkt_8021q_info *vlan; u32 data_offset; /* Remove the rndis header and pass it back up the stack */ data_offset = RNDIS_HEADER_SIZE + rndis_pkt->data_offset; data_buflen -= data_offset; /* * Make sure we got a valid RNDIS message, now total_data_buflen * should be the data packet size plus the trailer padding size */ if (unlikely(data_buflen < rndis_pkt->data_len)) { netdev_err(dev->ndev, "rndis message buffer " "overflow detected (got %u, min %u)" "...dropping this message!\n", data_buflen, rndis_pkt->data_len); return NVSP_STAT_FAIL; } vlan = rndis_get_ppi(rndis_pkt, IEEE_8021Q_INFO); /* * Remove the rndis trailer padding from rndis packet message * rndis_pkt->data_len tell us the real data length, we only copy * the data packet to the stack, without the rndis trailer padding */ data = (void *)((unsigned long)data + data_offset); csum_info = rndis_get_ppi(rndis_pkt, TCPIP_CHKSUM_PKTINFO); return netvsc_recv_callback(ndev, channel, data, rndis_pkt->data_len, csum_info, vlan); }

Contributors

PersonTokensPropCommitsCommitProp
Stephen Hemminger4528.12%15.26%
K. Y. Srinivasan3119.38%631.58%
Wei Yongjun2918.12%15.26%
Haiyang Zhang2213.75%526.32%
Hank Janssen2213.75%15.26%
Greg Kroah-Hartman63.75%210.53%
Vitaly Kuznetsov31.88%15.26%
Nicolas Palix10.62%15.26%
Bill Pemberton10.62%15.26%
Total160100.00%19100.00%


int rndis_filter_receive(struct net_device *ndev, struct netvsc_device *net_dev, struct hv_device *dev, struct vmbus_channel *channel, void *data, u32 buflen) { struct net_device_context *net_device_ctx = netdev_priv(ndev); struct rndis_device *rndis_dev = net_dev->extension; struct rndis_message *rndis_msg = data; /* Make sure the rndis device state is initialized */ if (unlikely(!rndis_dev)) { netif_err(net_device_ctx, rx_err, ndev, "got rndis message but no rndis device!\n"); return NVSP_STAT_FAIL; } if (unlikely(rndis_dev->state == RNDIS_DEV_UNINITIALIZED)) { netif_err(net_device_ctx, rx_err, ndev, "got rndis message uninitialized\n"); return NVSP_STAT_FAIL; } if (netif_msg_rx_status(net_device_ctx)) dump_rndis_message(dev, rndis_msg); switch (rndis_msg->ndis_msg_type) { case RNDIS_MSG_PACKET: return rndis_filter_receive_data(ndev, rndis_dev, rndis_msg, channel, data, buflen); case RNDIS_MSG_INIT_C: case RNDIS_MSG_QUERY_C: case RNDIS_MSG_SET_C: /* completion msgs */ rndis_filter_receive_response(rndis_dev, rndis_msg); break; case RNDIS_MSG_INDICATE: /* notification msgs */ netvsc_linkstatus_callback(dev, rndis_msg); break; default: netdev_err(ndev, "unhandled rndis message (type %u len %u)\n", rndis_msg->ndis_msg_type, rndis_msg->msg_len); break; } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Hank Janssen7537.31%28.33%
Stephen Hemminger5024.88%14.17%
Haiyang Zhang3416.92%833.33%
K. Y. Srinivasan125.97%312.50%
Vitaly Kuznetsov115.47%14.17%
Simon Xiao52.49%14.17%
Greg Kroah-Hartman52.49%312.50%
Linus Walleij52.49%14.17%
Nicolas Palix21.00%28.33%
Bill Pemberton21.00%28.33%
Total201100.00%24100.00%


static int rndis_filter_query_device(struct rndis_device *dev, u32 oid, void *result, u32 *result_size) { struct rndis_request *request; u32 inresult_size = *result_size; struct rndis_query_request *query; struct rndis_query_complete *query_complete; int ret = 0; if (!result) return -EINVAL; *result_size = 0; request = get_rndis_request(dev, RNDIS_MSG_QUERY, RNDIS_MESSAGE_SIZE(struct rndis_query_request)); if (!request) { ret = -ENOMEM; goto cleanup; } /* Setup the rndis query */ query = &request->request_msg.msg.query_req; query->oid = oid; query->info_buf_offset = sizeof(struct rndis_query_request); query->info_buflen = 0; query->dev_vc_handle = 0; if (oid == OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES) { struct net_device_context *ndevctx = netdev_priv(dev->ndev); struct netvsc_device *nvdev = ndevctx->nvdev; struct ndis_offload *hwcaps; u32 nvsp_version = nvdev->nvsp_version; u8 ndis_rev; size_t size; if (nvsp_version >= NVSP_PROTOCOL_VERSION_5) { ndis_rev = NDIS_OFFLOAD_PARAMETERS_REVISION_3; size = NDIS_OFFLOAD_SIZE; } else if (nvsp_version >= NVSP_PROTOCOL_VERSION_4) { ndis_rev = NDIS_OFFLOAD_PARAMETERS_REVISION_2; size = NDIS_OFFLOAD_SIZE_6_1; } else { ndis_rev = NDIS_OFFLOAD_PARAMETERS_REVISION_1; size = NDIS_OFFLOAD_SIZE_6_0; } request->request_msg.msg_len += size; query->info_buflen = size; hwcaps = (struct ndis_offload *) ((unsigned long)query + query->info_buf_offset); hwcaps->header.type = NDIS_OBJECT_TYPE_OFFLOAD; hwcaps->header.revision = ndis_rev; hwcaps->header.size = size; } else if (oid == OID_GEN_RECEIVE_SCALE_CAPABILITIES) { struct ndis_recv_scale_cap *cap; request->request_msg.msg_len += sizeof(struct ndis_recv_scale_cap); query->info_buflen = sizeof(struct ndis_recv_scale_cap); cap = (struct ndis_recv_scale_cap *)((unsigned long)query + query->info_buf_offset); cap->hdr.type = NDIS_OBJECT_TYPE_RSS_CAPABILITIES; cap->hdr.rev = NDIS_RECEIVE_SCALE_CAPABILITIES_REVISION_2; cap->hdr.size = sizeof(struct ndis_recv_scale_cap); } ret = rndis_filter_send_request(dev, request); if (ret != 0) goto cleanup; wait_for_completion(&request->wait_event); /* Copy the response back */ query_complete = &request->response_msg.msg.query_complete; if (query_complete->info_buflen > inresult_size) { ret = -1; goto cleanup; } memcpy(result, (void *)((unsigned long)query_complete + query_complete->info_buf_offset), query_complete->info_buflen); *result_size = query_complete->info_buflen; cleanup: if (request) put_rndis_request(dev, request); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Hank Janssen15733.62%15.26%
Stephen Hemminger14931.91%15.26%
Haiyang Zhang12226.12%421.05%
Greg Kroah-Hartman183.85%526.32%
Bill Pemberton102.14%210.53%
K. Y. Srinivasan91.93%421.05%
Vitaly Kuznetsov10.21%15.26%
Linus Walleij10.21%15.26%
Total467100.00%19100.00%

/* Get the hardware offload capabilities */
static int rndis_query_hwcaps(struct rndis_device *dev, struct ndis_offload *caps) { u32 caps_len = sizeof(*caps); int ret; memset(caps, 0, sizeof(*caps)); ret = rndis_filter_query_device(dev, OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES, caps, &caps_len); if (ret) return ret; if (caps->header.type != NDIS_OBJECT_TYPE_OFFLOAD) { netdev_warn(dev->ndev, "invalid NDIS objtype %#x\n", caps->header.type); return -EINVAL; } if (caps->header.revision < NDIS_OFFLOAD_PARAMETERS_REVISION_1) { netdev_warn(dev->ndev, "invalid NDIS objrev %x\n", caps->header.revision); return -EINVAL; } if (caps->header.size > caps_len || caps->header.size < NDIS_OFFLOAD_SIZE_6_0) { netdev_warn(dev->ndev, "invalid NDIS objsize %u, data size %u\n", caps->header.size, caps_len); return -EINVAL; } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Stephen Hemminger168100.00%1100.00%
Total168100.00%1100.00%


static int rndis_filter_query_device_mac(struct rndis_device *dev) { u32 size = ETH_ALEN; return rndis_filter_query_device(dev, RNDIS_OID_802_3_PERMANENT_ADDRESS, dev->hw_mac_adr, &size); }

Contributors

PersonTokensPropCommitsCommitProp
Hank Janssen2167.74%116.67%
Haiyang Zhang619.35%233.33%
Greg Kroah-Hartman39.68%233.33%
Stephen Hemminger13.23%116.67%
Total31100.00%6100.00%

#define NWADR_STR "NetworkAddress" #define NWADR_STRLEN 14
int rndis_filter_set_device_mac(struct net_device *ndev, char *mac) { struct netvsc_device *nvdev = net_device_to_netvsc_device(ndev); struct rndis_device *rdev = nvdev->extension; struct rndis_request *request; struct rndis_set_request *set; struct rndis_config_parameter_info *cpi; wchar_t *cfg_nwadr, *cfg_mac; struct rndis_set_complete *set_complete; char macstr[2*ETH_ALEN+1]; u32 extlen = sizeof(struct rndis_config_parameter_info) + 2*NWADR_STRLEN + 4*ETH_ALEN; int ret; request = get_rndis_request(rdev, RNDIS_MSG_SET, RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen); if (!request) return -ENOMEM; set = &request->request_msg.msg.set_req; set->oid = RNDIS_OID_GEN_RNDIS_CONFIG_PARAMETER; set->info_buflen = extlen; set->info_buf_offset = sizeof(struct rndis_set_request); set->dev_vc_handle = 0; cpi = (struct rndis_config_parameter_info *)((ulong)set + set->info_buf_offset); cpi->parameter_name_offset = sizeof(struct rndis_config_parameter_info); /* Multiply by 2 because host needs 2 bytes (utf16) for each char */ cpi->parameter_name_length = 2*NWADR_STRLEN; cpi->parameter_type = RNDIS_CONFIG_PARAM_TYPE_STRING; cpi->parameter_value_offset = cpi->parameter_name_offset + cpi->parameter_name_length; /* Multiply by 4 because each MAC byte displayed as 2 utf16 chars */ cpi->parameter_value_length = 4*ETH_ALEN; cfg_nwadr = (wchar_t *)((ulong)cpi + cpi->parameter_name_offset); cfg_mac = (wchar_t *)((ulong)cpi + cpi->parameter_value_offset); ret = utf8s_to_utf16s(NWADR_STR, NWADR_STRLEN, UTF16_HOST_ENDIAN, cfg_nwadr, NWADR_STRLEN); if (ret < 0) goto cleanup; snprintf(macstr, 2*ETH_ALEN+1, "%pm", mac); ret = utf8s_to_utf16s(macstr, 2*ETH_ALEN, UTF16_HOST_ENDIAN, cfg_mac, 2*ETH_ALEN); if (ret < 0) goto cleanup; ret = rndis_filter_send_request(rdev, request); if (ret != 0) goto cleanup; wait_for_completion(&request->wait_event); set_complete = &request->response_msg.msg.set_complete; if (set_complete->status != RNDIS_STATUS_SUCCESS) { netdev_err(ndev, "Fail to set MAC on host side:0x%x\n", set_complete->status); ret = -EINVAL; } cleanup: put_rndis_request(rdev, request); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Haiyang Zhang38096.20%228.57%
Vitaly Kuznetsov143.54%457.14%
Nicholas Mc Guire10.25%114.29%
Total395100.00%7100.00%


static int rndis_filter_set_offload_params(struct net_device *ndev, struct ndis_offload_params *req_offloads) { struct netvsc_device *nvdev = net_device_to_netvsc_device(ndev); struct rndis_device *rdev = nvdev->extension; struct rndis_request *request; struct rndis_set_request *set; struct ndis_offload_params *offload_params; struct rndis_set_complete *set_complete; u32 extlen = sizeof(struct ndis_offload_params); int ret; u32 vsp_version = nvdev->nvsp_version; if (vsp_version <= NVSP_PROTOCOL_VERSION_4) { extlen = VERSION_4_OFFLOAD_SIZE; /* On NVSP_PROTOCOL_VERSION_4 and below, we do not support * UDP checksum offload. */ req_offloads->udp_ip_v4_csum = 0; req_offloads->udp_ip_v6_csum = 0; } request = get_rndis_request(rdev, RNDIS_MSG_SET, RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen); if (!request) return -ENOMEM; set = &request->request_msg.msg.set_req; set->oid = OID_TCP_OFFLOAD_PARAMETERS; set->info_buflen = extlen; set->info_buf_offset = sizeof(struct rndis_set_request); set->dev_vc_handle = 0; offload_params = (struct ndis_offload_params *)((ulong)set + set->info_buf_offset); *offload_params = *req_offloads; offload_params->header.type = NDIS_OBJECT_TYPE_DEFAULT; offload_params->header.revision = NDIS_OFFLOAD_PARAMETERS_REVISION_3; offload_params->header.size = extlen; ret = rndis_filter_send_request(rdev, request); if (ret != 0) goto cleanup; wait_for_completion(&request->wait_event); set_complete = &request->response_msg.msg.set_complete; if (set_complete->status != RNDIS_STATUS_SUCCESS) { netdev_err(ndev, "Fail to set offload on host side:0x%x\n", set_complete->status); ret = -EINVAL; } cleanup: put_rndis_request(rdev, request); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
K. Y. Srinivasan27194.43%225.00%
Vitaly Kuznetsov144.88%450.00%
Lad Prabhakar10.35%112.50%
Nicholas Mc Guire10.35%112.50%
Total287100.00%8100.00%


int rndis_filter_set_rss_param(struct rndis_device *rdev, const u8 *rss_key, int num_queue) { struct net_device *ndev = rdev->ndev; struct rndis_request *request; struct rndis_set_request *set; struct rndis_set_complete *set_complete; u32 extlen = sizeof(struct ndis_recv_scale_param) + 4 * ITAB_NUM + NETVSC_HASH_KEYLEN; struct ndis_recv_scale_param *rssp; u32 *itab; u8 *keyp; int i, ret; request = get_rndis_request( rdev, RNDIS_MSG_SET, RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen); if (!request) return -ENOMEM; set = &request->request_msg.msg.set_req; set->oid = OID_GEN_RECEIVE_SCALE_PARAMETERS; set->info_buflen = extlen; set->info_buf_offset = sizeof(struct rndis_set_request); set->dev_vc_handle = 0; rssp = (struct ndis_recv_scale_param *)(set + 1); rssp->hdr.type = NDIS_OBJECT_TYPE_RSS_PARAMETERS; rssp->hdr.rev = NDIS_RECEIVE_SCALE_PARAMETERS_REVISION_2; rssp->hdr.size = sizeof(struct ndis_recv_scale_param); rssp->flag = 0; rssp->hashinfo = NDIS_HASH_FUNC_TOEPLITZ | NDIS_HASH_IPV4 | NDIS_HASH_TCP_IPV4 | NDIS_HASH_IPV6 | NDIS_HASH_TCP_IPV6; rssp->indirect_tabsize = 4*ITAB_NUM; rssp->indirect_taboffset = sizeof(struct ndis_recv_scale_param); rssp->hashkey_size = NETVSC_HASH_KEYLEN; rssp->kashkey_offset = rssp->indirect_taboffset + rssp->indirect_tabsize; /* Set indirection table entries */ itab = (u32 *)(rssp + 1); for (i = 0; i < ITAB_NUM; i++) itab[i] = rdev->ind_table[i]; /* Set hask key values */ keyp = (u8 *)((unsigned long)rssp + rssp->kashkey_offset); memcpy(keyp, rss_key, NETVSC_HASH_KEYLEN); ret = rndis_filter_send_request(rdev, request); if (ret != 0) goto cleanup; wait_for_completion(&request->wait_event); set_complete = &request->response_msg.msg.set_complete; if (set_complete->status == RNDIS_STATUS_SUCCESS) memcpy(rdev->rss_key, rss_key, NETVSC_HASH_KEYLEN); else { netdev_err(ndev, "Fail to set RSS parameters:0x%x\n", set_complete->status); ret = -EINVAL; } cleanup: put_rndis_request(rdev, request); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Haiyang Zhang22657.95%529.41%
Hank Janssen11028.21%15.88%
Stephen Hemminger328.21%211.76%
Greg Kroah-Hartman143.59%317.65%
K. Y. Srinivasan61.54%423.53%
Vitaly Kuznetsov10.26%15.88%
Linus Walleij10.26%15.88%
Total390100.00%17100.00%


static int rndis_filter_query_device_link_status(struct rndis_device *dev) { u32 size = sizeof(u32); u32 link_status; int ret; ret = rndis_filter_query_device(dev, RNDIS_OID_GEN_MEDIA_CONNECT_STATUS, &link_status, &size); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Haiyang Zhang4195.35%150.00%
Hank Janssen24.65%150.00%
Total43100.00%2100.00%


static int rndis_filter_query_link_speed(struct rndis_device *dev) { u32 size = sizeof(u32); u32 link_speed; struct net_device_context *ndc; int ret; ret = rndis_filter_query_device(dev, RNDIS_OID_GEN_LINK_SPEED, &link_speed, &size); if (!ret) { ndc = netdev_priv(dev->ndev); /* The link speed reported from host is in 100bps unit, so * we convert it to Mbps here. */ ndc->speed = link_speed / 10000; } return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Haiyang Zhang73100.00%1100.00%
Total73100.00%1100.00%


int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter) { struct rndis_request *request; struct rndis_set_request *set; struct rndis_set_complete *set_complete; int ret; request = get_rndis_request(dev, RNDIS_MSG_SET, RNDIS_MESSAGE_SIZE(struct rndis_set_request) + sizeof(u32)); if (!request) { ret = -ENOMEM; goto cleanup; } /* Setup the rndis set */ set = &request->request_msg.msg.set_req; set->oid = RNDIS_OID_GEN_CURRENT_PACKET_FILTER; set->info_buflen = sizeof(u32); set->info_buf_offset = sizeof(struct rndis_set_request); memcpy((void *)(unsigned long)set + sizeof(struct rndis_set_request), &new_filter, sizeof(u32)); ret = rndis_filter_send_request(dev, request); if (ret != 0) goto cleanup; wait_for_completion(&request->wait_event); set_complete = &request->response_msg.msg.set_complete; cleanup: if (request) put_rndis_request(dev, request); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Haiyang Zhang9551.91%428.57%
Hank Janssen7138.80%17.14%
K. Y. Srinivasan73.83%428.57%
Greg Kroah-Hartman73.83%214.29%
Vitaly Kuznetsov10.55%17.14%
Bill Pemberton10.55%17.14%
Nicholas Mc Guire10.55%17.14%
Total183100.00%14100.00%


static int rndis_filter_init_device(struct rndis_device *dev) { struct rndis_request *request; struct rndis_initialize_request *init; struct rndis_initialize_complete *init_complete; u32 status; int ret; struct netvsc_device *nvdev = net_device_to_netvsc_device(dev->ndev); request = get_rndis_request(dev, RNDIS_MSG_INIT, RNDIS_MESSAGE_SIZE(struct rndis_initialize_request)); if (!request) { ret = -ENOMEM; goto cleanup; } /* Setup the rndis set */ init = &request->request_msg.msg.init_req; init->major_ver = RNDIS_MAJOR_VERSION; init->minor_ver = RNDIS_MINOR_VERSION; init->max_xfer_size = 0x4000; dev->state = RNDIS_DEV_INITIALIZING; ret = rndis_filter_send_request(dev, request); if (ret != 0) { dev->state = RNDIS_DEV_UNINITIALIZED; goto cleanup; } wait_for_completion(&request->wait_event); init_complete = &request->response_msg.msg.init_complete; status = init_complete->status; if (status == RNDIS_STATUS_SUCCESS) { dev->state = RNDIS_DEV_INITIALIZED; nvdev->max_pkt = init_complete->max_pkt_per_msg; nvdev->pkt_align = 1 << init_complete->pkt_alignment_factor; ret = 0; } else { dev->state = RNDIS_DEV_UNINITIALIZED; ret = -EINVAL; } cleanup: if (request) put_rndis_request(dev, request); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Haiyang Zhang16271.37%541.67%
Hank Janssen5022.03%18.33%
Vitaly Kuznetsov125.29%325.00%
K. Y. Srinivasan20.88%216.67%
Nicholas Mc Guire10.44%18.33%
Total227100.00%12100.00%


static bool netvsc_device_idle(const struct netvsc_device *nvdev) { int i; if (atomic_read(&nvdev->num_outstanding_recvs) > 0) return false; for (i = 0; i < nvdev->num_chn; i++) { const struct netvsc_channel *nvchan = &nvdev->chan_table[i]; if (atomic_read(&nvchan->queue_sends) > 0) return false; } return true; }

Contributors

PersonTokensPropCommitsCommitProp
Stephen Hemminger79100.00%1100.00%
Total79100.00%1100.00%


static void rndis_filter_halt_device(struct rndis_device *dev) { struct rndis_request *request; struct rndis_halt_request *halt; struct net_device_context *net_device_ctx = netdev_priv(dev->ndev); struct netvsc_device *nvdev = net_device_ctx->nvdev; struct hv_device *hdev = net_device_ctx->device_ctx; ulong flags; /* Attempt to do a rndis device halt */ request = get_rndis_request(dev, RNDIS_MSG_HALT, RNDIS_MESSAGE_SIZE(struct rndis_halt_request)); if (!request) goto cleanup; /* Setup the rndis set */ halt = &request->request_msg.msg.halt_req; halt->req_id = atomic_inc_return(&dev->new_req_id); /* Ignore return since this msg is optional. */ rndis_filter_send_request(dev, request); dev->state = RNDIS_DEV_UNINITIALIZED; cleanup: spin_lock_irqsave(&hdev->channel->inbound_lock, flags); nvdev->destroy = true; spin_unlock_irqrestore(&hdev->channel->inbound_lock, flags); /* Wait for all send completions */ wait_event(nvdev->wait_drain, netvsc_device_idle(nvdev)); if (request) put_rndis_request(dev, request); }

Contributors

PersonTokensPropCommitsCommitProp
Haiyang Zhang7543.60%535.71%
Hank Janssen6537.79%17.14%
Vitaly Kuznetsov169.30%17.14%
Greg Kroah-Hartman84.65%214.29%
Bill Pemberton42.33%214.29%
K. Y. Srinivasan21.16%17.14%
Stephen Hemminger10.58%17.14%
Linus Walleij10.58%17.14%
Total172100.00%14100.00%


static int rndis_filter_open_device(struct rndis_device *dev) { int ret; if (dev->state != RNDIS_DEV_INITIALIZED) return 0; ret = rndis_filter_set_packet_filter(dev, NDIS_PACKET_TYPE_BROADCAST | NDIS_PACKET_TYPE_ALL_MULTICAST | NDIS_PACKET_TYPE_DIRECTED); if (ret == 0) dev->state = RNDIS_DEV_DATAINITIALIZED; return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Hank Janssen4177.36%120.00%
Haiyang Zhang1018.87%360.00%
Greg Kroah-Hartman23.77%120.00%
Total53100.00%5100.00%


static int rndis_filter_close_device(struct rndis_device *dev) { int ret; if (dev->state != RNDIS_DEV_DATAINITIALIZED) return 0; ret = rndis_filter_set_packet_filter(dev, 0); if (ret == -ENODEV) ret = 0; if (ret == 0) dev->state = RNDIS_DEV_INITIALIZED; return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Hank Janssen3965.00%120.00%
Haiyang Zhang1931.67%360.00%
Greg Kroah-Hartman23.33%120.00%
Total60100.00%5100.00%


static void netvsc_sc_open(struct vmbus_channel *new_sc) { struct net_device *ndev = hv_get_drvdata(new_sc->primary_channel->device_obj); struct netvsc_device *nvscdev = net_device_to_netvsc_device(ndev); u16 chn_index = new_sc->offermsg.offer.sub_channel_index; int ret; unsigned long flags; if (chn_index >= nvscdev->num_chn) return; nvscdev->chan_table[chn_index].mrc.buf = vzalloc(NETVSC_RECVSLOT_MAX * sizeof(struct recv_comp_data)); ret = vmbus_open(new_sc, nvscdev->ring_size * PAGE_SIZE, nvscdev->ring_size * PAGE_SIZE, NULL, 0, netvsc_channel_cb, new_sc); if (ret == 0) nvscdev->chan_table[chn_index].channel = new_sc; spin_lock_irqsave(&nvscdev->sc_lock, flags); nvscdev->num_sc_offered--; spin_unlock_irqrestore(&nvscdev->sc_lock, flags); if (nvscdev->num_sc_offered == 0) complete(&nvscdev->channel_init_wait); }

Contributors

PersonTokensPropCommitsCommitProp
Haiyang Zhang12473.37%327.27%
Vitaly Kuznetsov2313.61%218.18%
Hank Janssen95.33%19.09%
Stephen Hemminger63.55%19.09%
K. Y. Srinivasan52.96%218.18%
Bill Pemberton10.59%19.09%
Nicolas Palix10.59%19.09%
Total169100.00%11100.00%


int rndis_filter_device_add(struct hv_device *dev, struct netvsc_device_info *device_info) { struct net_device *net = hv_get_drvdata(dev); struct net_device_context *net_device_ctx = netdev_priv(net); struct netvsc_device *net_device; struct rndis_device *rndis_device; struct ndis_offload hwcaps; struct ndis_offload_params offloads; struct nvsp_message *init_packet; struct ndis_recv_scale_cap rsscap; u32 rsscap_size = sizeof(struct ndis_recv_scale_cap); unsigned int gso_max_size = GSO_MAX_SIZE; u32 mtu, size; u32 num_rss_qs; u32 sc_delta; const struct cpumask *node_cpu_mask; u32 num_possible_rss_qs; unsigned long flags; int i, ret; rndis_device = get_rndis_device(); if (!rndis_device) return -ENODEV; /* * Let the inner driver handle this first to create the netvsc channel * NOTE! Once the channel is created, we may get a receive callback * (RndisFilterOnReceive()) before this call is completed */ ret = netvsc_device_add(dev, device_info); if (ret != 0) { kfree(rndis_device); return ret; } /* Initialize the rndis device */ net_device = net_device_ctx->nvdev; net_device->max_chn = 1; net_device->num_chn = 1; spin_lock_init(&net_device->sc_lock); net_device->extension = rndis_device; rndis_device->ndev = net; /* Send the rndis initialization message */ ret = rndis_filter_init_device(rndis_device); if (ret != 0) { rndis_filter_device_remove(dev, net_device); return ret; } /* Get the MTU from the host */ size = sizeof(u32); ret = rndis_filter_query_device(rndis_device, RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE, &mtu, &size); if (ret == 0 && size == sizeof(u32) && mtu < net->mtu) net->mtu = mtu; /* Get the mac address */ ret = rndis_filter_query_device_mac(rndis_device); if (ret != 0) { rndis_filter_device_remove(dev, net_device); return ret; } memcpy(device_info->mac_adr, rndis_device->hw_mac_adr, ETH_ALEN); /* Find HW offload capabilities */ ret = rndis_query_hwcaps(rndis_device, &hwcaps); if (ret != 0) { rndis_filter_device_remove(dev, net_device); return ret; } /* A value of zero means "no change"; now turn on what we want. */ memset(&offloads, 0, sizeof(struct ndis_offload_params)); /* Linux does not care about IP checksum, always does in kernel */ offloads.ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_DISABLED; /* Compute tx offload settings based on hw capabilities */ net->hw_features = NETIF_F_RXCSUM; if ((hwcaps.csum.ip4_txcsum & NDIS_TXCSUM_ALL_TCP4) == NDIS_TXCSUM_ALL_TCP4) { /* Can checksum TCP */ net->hw_features |= NETIF_F_IP_CSUM; net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV4_TCP; offloads.tcp_ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED; if (hwcaps.lsov2.ip4_encap & NDIS_OFFLOAD_ENCAP_8023) { offloads.lso_v2_ipv4 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED; net->hw_features |= NETIF_F_TSO; if (hwcaps.lsov2.ip4_maxsz < gso_max_size) gso_max_size = hwcaps.lsov2.ip4_maxsz; } if (hwcaps.csum.ip4_txcsum & NDIS_TXCSUM_CAP_UDP4) { offloads.udp_ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED; net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV4_UDP; } } if ((hwcaps.csum.ip6_txcsum & NDIS_TXCSUM_ALL_TCP6) == NDIS_TXCSUM_ALL_TCP6) { net->hw_features |= NETIF_F_IPV6_CSUM; offloads.tcp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED; net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV6_TCP; if ((hwcaps.lsov2.ip6_encap & NDIS_OFFLOAD_ENCAP_8023) && (hwcaps.lsov2.ip6_opts & NDIS_LSOV2_CAP_IP6) == NDIS_LSOV2_CAP_IP6) { offloads.lso_v2_ipv6 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED; net->hw_features |= NETIF_F_TSO6; if (hwcaps.lsov2.ip6_maxsz < gso_max_size) gso_max_size = hwcaps.lsov2.ip6_maxsz; } if (hwcaps.csum.ip6_txcsum & NDIS_TXCSUM_CAP_UDP6) { offloads.udp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED; net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV6_UDP; } } netif_set_gso_max_size(net, gso_max_size); ret = rndis_filter_set_offload_params(net, &offloads); if (ret) goto err_dev_remv; rndis_filter_query_device_link_status(rndis_device); device_info->link_state = rndis_device->link_state; netdev_dbg(net, "Device MAC %pM link state %s\n", rndis_device->hw_mac_adr, device_info->link_state ? "down" : "up"); if (net_device->nvsp_version < NVSP_PROTOCOL_VERSION_5) return 0; rndis_filter_query_link_speed(rndis_device); /* vRSS setup */ memset(&rsscap, 0, rsscap_size); ret = rndis_filter_query_device(rndis_device, OID_GEN_RECEIVE_SCALE_CAPABILITIES, &rsscap, &rsscap_size); if (ret || rsscap.num_recv_que < 2) goto out; net_device->max_chn = min_t(u32, VRSS_CHANNEL_MAX, rsscap.num_recv_que); num_rss_qs = min(device_info->max_num_vrss_chns, net_device->max_chn); /* * We will limit the VRSS channels to the number CPUs in the NUMA node * the primary channel is currently bound to. */ node_cpu_mask = cpumask_of_node(cpu_to_node(dev->channel->target_cpu)); num_possible_rss_qs = cpumask_weight(node_cpu_mask); /* We will use the given number of channels if available. */ if (device_info->num_chn && device_info->num_chn < net_device->max_chn) net_device->num_chn = device_info->num_chn; else net_device->num_chn = min(num_possible_rss_qs, num_rss_qs); num_rss_qs = net_device->num_chn - 1; for (i = 0; i < ITAB_NUM; i++) rndis_device->ind_table[i] = ethtool_rxfh_indir_default(i, net_device->num_chn); net_device->num_sc_offered = num_rss_qs; if (net_device->num_chn == 1) goto out; vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open); init_packet = &net_device->channel_init_pkt; memset(init_packet, 0, sizeof(struct nvsp_message)); init_packet->hdr.msg_type = NVSP_MSG5_TYPE_SUBCHANNEL; init_packet->msg.v5_msg.subchn_req.op = NVSP_SUBCHANNEL_ALLOCATE; init_packet->msg.v5_msg.subchn_req.num_subchannels = net_device->num_chn - 1; ret = vmbus_sendpacket(dev->channel, init_packet, sizeof(struct nvsp_message), (unsigned long)init_packet, VM_PKT_DATA_INBAND, VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); if (ret) goto out; wait_for_completion(&net_device->channel_init_wait); if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) { ret = -ENODEV; goto out; } net_device->num_chn = 1 + init_packet->msg.v5_msg.subchn_comp.num_subchannels; ret = rndis_filter_set_rss_param(rndis_device, netvsc_hash_key, net_device->num_chn); /* * Set the number of sub-channels to be received. */ spin_lock_irqsave(&net_device->sc_lock, flags); sc_delta = num_rss_qs - (net_device->num_chn - 1); net_device->num_sc_offered -= sc_delta; spin_unlock_irqrestore(&net_device->sc_lock, flags); out: if (ret) { net_device->max_chn = 1; net_device->num_chn = 1; net_device->num_sc_offered = 0; } return 0; /* return 0 because primary channel can be used alone */ err_dev_remv: rndis_filter_device_remove(dev, net_device); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Haiyang Zhang41038.61%1131.43%
Stephen Hemminger29227.50%617.14%
K. Y. Srinivasan20419.21%720.00%
Hank Janssen797.44%25.71%
Andrew Schwartzmeyer423.95%25.71%
Vitaly Kuznetsov312.92%514.29%
Bill Pemberton30.28%12.86%
Greg Kroah-Hartman10.09%12.86%
Total1062100.00%35100.00%


void rndis_filter_device_remove(struct hv_device *dev, struct netvsc_device *net_dev) { struct rndis_device *rndis_dev = net_dev->extension; /* If not all subchannel offers are complete, wait for them until * completion to avoid race. */ if (net_dev->num_sc_offered > 0) wait_for_completion(&net_dev->channel_init_wait); /* Halt and release the rndis device */ rndis_filter_halt_device(rndis_dev); kfree(rndis_dev); net_dev->extension = NULL; netvsc_device_remove(dev); }

Contributors

PersonTokensPropCommitsCommitProp
Haiyang Zhang2742.86%533.33%
Hank Janssen2133.33%16.67%
Vitaly Kuznetsov46.35%213.33%
Greg Kroah-Hartman34.76%213.33%
Stephen Hemminger34.76%16.67%
Bill Pemberton23.17%213.33%
Nicolas Palix23.17%16.67%
K. Y. Srinivasan11.59%16.67%
Total63100.00%15100.00%


int rndis_filter_open(struct netvsc_device *nvdev) { if (!nvdev) return -EINVAL; if (atomic_inc_return(&nvdev->open_cnt) != 1) return 0; return rndis_filter_open_device(nvdev->extension); }

Contributors

PersonTokensPropCommitsCommitProp
K. Y. Srinivasan1433.33%112.50%
Hank Janssen1023.81%112.50%
Bill Pemberton819.05%112.50%
Vitaly Kuznetsov511.90%112.50%
Haiyang Zhang37.14%225.00%
Greg Kroah-Hartman12.38%112.50%
Nicolas Palix12.38%112.50%
Total42100.00%8100.00%


int rndis_filter_close(struct netvsc_device *nvdev) { if (!nvdev) return -EINVAL; if (atomic_dec_return(&nvdev->open_cnt) != 0) return 0; return rndis_filter_close_device(nvdev->extension); }

Contributors

PersonTokensPropCommitsCommitProp
K. Y. Srinivasan1535.71%110.00%
Hank Janssen819.05%110.00%
Bill Pemberton819.05%110.00%
Haiyang Zhang511.90%330.00%
Vitaly Kuznetsov49.52%220.00%
Nicolas Palix12.38%110.00%
Greg Kroah-Hartman12.38%110.00%
Total42100.00%10100.00%


Overall Contributors

PersonTokensPropCommitsCommitProp
Haiyang Zhang245441.95%3529.91%
Hank Janssen131622.50%21.71%
Stephen Hemminger92115.74%97.69%
K. Y. Srinivasan63310.82%3025.64%
Greg Kroah-Hartman1903.25%1311.11%
Vitaly Kuznetsov1582.70%86.84%
Bill Pemberton570.97%75.98%
Andrew Schwartzmeyer420.72%21.71%
Wei Yongjun290.50%10.85%
Linus Walleij210.36%21.71%
Nicolas Palix120.21%21.71%
Simon Xiao50.09%10.85%
Nicholas Mc Guire40.07%10.85%
Stephen Rothwell30.05%10.85%
Tejun Heo30.05%10.85%
Jeff Kirsher10.02%10.85%
Lad Prabhakar10.02%10.85%
Total5850100.00%117100.00%
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.