Release 4.11 drivers/usb/usbip/stub_rx.c
/*
* Copyright (C) 2003-2008 Takahiro Hirofuchi
*
* This is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
* USA.
*/
#include <asm/byteorder.h>
#include <linux/kthread.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include "usbip_common.h"
#include "stub.h"
static int is_clear_halt_cmd(struct urb *urb)
{
struct usb_ctrlrequest *req;
req = (struct usb_ctrlrequest *) urb->setup_packet;
return (req->bRequest == USB_REQ_CLEAR_FEATURE) &&
(req->bRequestType == USB_RECIP_ENDPOINT) &&
(req->wValue == USB_ENDPOINT_HALT);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Takahiro Hirofuchi | 52 | 100.00% | 1 | 100.00% |
Total | 52 | 100.00% | 1 | 100.00% |
static int is_set_interface_cmd(struct urb *urb)
{
struct usb_ctrlrequest *req;
req = (struct usb_ctrlrequest *) urb->setup_packet;
return (req->bRequest == USB_REQ_SET_INTERFACE) &&
(req->bRequestType == USB_RECIP_INTERFACE);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Takahiro Hirofuchi | 44 | 100.00% | 1 | 100.00% |
Total | 44 | 100.00% | 1 | 100.00% |
static int is_set_configuration_cmd(struct urb *urb)
{
struct usb_ctrlrequest *req;
req = (struct usb_ctrlrequest *) urb->setup_packet;
return (req->bRequest == USB_REQ_SET_CONFIGURATION) &&
(req->bRequestType == USB_RECIP_DEVICE);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Takahiro Hirofuchi | 44 | 100.00% | 1 | 100.00% |
Total | 44 | 100.00% | 1 | 100.00% |
static int is_reset_device_cmd(struct urb *urb)
{
struct usb_ctrlrequest *req;
__u16 value;
__u16 index;
req = (struct usb_ctrlrequest *) urb->setup_packet;
value = le16_to_cpu(req->wValue);
index = le16_to_cpu(req->wIndex);
if ((req->bRequest == USB_REQ_SET_FEATURE) &&
(req->bRequestType == USB_RT_PORT) &&
(value == USB_PORT_FEAT_RESET)) {
usbip_dbg_stub_rx("reset_device_cmd, port %u\n", index);
return 1;
} else
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Takahiro Hirofuchi | 89 | 97.80% | 1 | 50.00% |
Brian G. Merrell | 2 | 2.20% | 1 | 50.00% |
Total | 91 | 100.00% | 2 | 100.00% |
static int tweak_clear_halt_cmd(struct urb *urb)
{
struct usb_ctrlrequest *req;
int target_endp;
int target_dir;
int target_pipe;
int ret;
req = (struct usb_ctrlrequest *) urb->setup_packet;
/*
* The stalled endpoint is specified in the wIndex value. The endpoint
* of the urb is the target of this clear_halt request (i.e., control
* endpoint).
*/
target_endp = le16_to_cpu(req->wIndex) & 0x000f;
/* the stalled endpoint direction is IN or OUT?. USB_DIR_IN is 0x80. */
target_dir = le16_to_cpu(req->wIndex) & 0x0080;
if (target_dir)
target_pipe = usb_rcvctrlpipe(urb->dev, target_endp);
else
target_pipe = usb_sndctrlpipe(urb->dev, target_endp);
ret = usb_clear_halt(urb->dev, target_pipe);
if (ret < 0)
dev_err(&urb->dev->dev,
"usb_clear_halt error: devnum %d endp %d ret %d\n",
urb->dev->devnum, target_endp, ret);
else
dev_info(&urb->dev->dev,
"usb_clear_halt done: devnum %d endp %d\n",
urb->dev->devnum, target_endp);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Takahiro Hirofuchi | 135 | 88.24% | 1 | 33.33% |
Matt Mooney | 16 | 10.46% | 1 | 33.33% |
Himangi Saraogi | 2 | 1.31% | 1 | 33.33% |
Total | 153 | 100.00% | 3 | 100.00% |
static int tweak_set_interface_cmd(struct urb *urb)
{
struct usb_ctrlrequest *req;
__u16 alternate;
__u16 interface;
int ret;
req = (struct usb_ctrlrequest *) urb->setup_packet;
alternate = le16_to_cpu(req->wValue);
interface = le16_to_cpu(req->wIndex);
usbip_dbg_stub_rx("set_interface: inf %u alt %u\n",
interface, alternate);
ret = usb_set_interface(urb->dev, interface, alternate);
if (ret < 0)
dev_err(&urb->dev->dev,
"usb_set_interface error: inf %u alt %u ret %d\n",
interface, alternate, ret);
else
dev_info(&urb->dev->dev,
"usb_set_interface done: inf %u alt %u\n",
interface, alternate);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Takahiro Hirofuchi | 101 | 84.17% | 1 | 25.00% |
Matt Mooney | 16 | 13.33% | 1 | 25.00% |
Himangi Saraogi | 2 | 1.67% | 1 | 25.00% |
Brian G. Merrell | 1 | 0.83% | 1 | 25.00% |
Total | 120 | 100.00% | 4 | 100.00% |
static int tweak_set_configuration_cmd(struct urb *urb)
{
struct stub_priv *priv = (struct stub_priv *) urb->context;
struct stub_device *sdev = priv->sdev;
struct usb_ctrlrequest *req;
__u16 config;
int err;
req = (struct usb_ctrlrequest *) urb->setup_packet;
config = le16_to_cpu(req->wValue);
err = usb_set_configuration(sdev->udev, config);
if (err && err != -ENODEV)
dev_err(&sdev->udev->dev, "can't set config #%d, error %d\n",
config, err);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Valentina Manea | 51 | 49.04% | 1 | 25.00% |
Takahiro Hirofuchi | 50 | 48.08% | 1 | 25.00% |
Kay Sievers | 2 | 1.92% | 1 | 25.00% |
Matt Mooney | 1 | 0.96% | 1 | 25.00% |
Total | 104 | 100.00% | 4 | 100.00% |
static int tweak_reset_device_cmd(struct urb *urb)
{
struct stub_priv *priv = (struct stub_priv *) urb->context;
struct stub_device *sdev = priv->sdev;
dev_info(&urb->dev->dev, "usb_queue_reset_device\n");
if (usb_lock_device_for_reset(sdev->udev, NULL) < 0) {
dev_err(&urb->dev->dev, "could not obtain lock to reset device\n");
return 0;
}
usb_reset_device(sdev->udev);
usb_unlock_device(sdev->udev);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Arjan Mels | 53 | 56.99% | 2 | 33.33% |
Takahiro Hirofuchi | 34 | 36.56% | 1 | 16.67% |
Matt Mooney | 3 | 3.23% | 1 | 16.67% |
Kay Sievers | 2 | 2.15% | 1 | 16.67% |
Alexander Popov | 1 | 1.08% | 1 | 16.67% |
Total | 93 | 100.00% | 6 | 100.00% |
/*
* clear_halt, set_interface, and set_configuration require special tricks.
*/
static void tweak_special_requests(struct urb *urb)
{
if (!urb || !urb->setup_packet)
return;
if (usb_pipetype(urb->pipe) != PIPE_CONTROL)
return;
if (is_clear_halt_cmd(urb))
/* tweak clear_halt */
tweak_clear_halt_cmd(urb);
else if (is_set_interface_cmd(urb))
/* tweak set_interface */
tweak_set_interface_cmd(urb);
else if (is_set_configuration_cmd(urb))
/* tweak set_configuration */
tweak_set_configuration_cmd(urb);
else if (is_reset_device_cmd(urb))
tweak_reset_device_cmd(urb);
else
usbip_dbg_stub_rx("no need to tweak\n");
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Takahiro Hirofuchi | 93 | 98.94% | 1 | 50.00% |
Brian G. Merrell | 1 | 1.06% | 1 | 50.00% |
Total | 94 | 100.00% | 2 | 100.00% |
/*
* stub_recv_unlink() unlinks the URB by a call to usb_unlink_urb().
* By unlinking the urb asynchronously, stub_rx can continuously
* process coming urbs. Even if the urb is unlinked, its completion
* handler will be called and stub_tx will send a return pdu.
*
* See also comments about unlinking strategy in vhci_hcd.c.
*/
static int stub_recv_cmd_unlink(struct stub_device *sdev,
struct usbip_header *pdu)
{
int ret;
unsigned long flags;
struct stub_priv *priv;
spin_lock_irqsave(&sdev->priv_lock, flags);
list_for_each_entry(priv, &sdev->priv_init, list) {
if (priv->seqnum != pdu->u.cmd_unlink.seqnum)
continue;
dev_info(&priv->urb->dev->dev, "unlink urb %p\n",
priv->urb);
/*
* This matched urb is not completed yet (i.e., be in
* flight in usb hcd hardware/driver). Now we are
* cancelling it. The unlinking flag means that we are
* now not going to return the normal result pdu of a
* submission request, but going to return a result pdu
* of the unlink request.
*/
priv->unlinking = 1;
/*
* In the case that unlinking flag is on, prev->seqnum
* is changed from the seqnum of the cancelling urb to
* the seqnum of the unlink request. This will be used
* to make the result pdu of the unlink request.
*/
priv->seqnum = pdu->base.seqnum;
spin_unlock_irqrestore(&sdev->priv_lock, flags);
/*
* usb_unlink_urb() is now out of spinlocking to avoid
* spinlock recursion since stub_complete() is
* sometimes called in this context but not in the
* interrupt context. If stub_complete() is executed
* before we call usb_unlink_urb(), usb_unlink_urb()
* will return an error value. In this case, stub_tx
* will return the result pdu of this unlink request
* though submission is completed and actual unlinking
* is not executed. OK?
*/
/* In the above case, urb->status is not -ECONNRESET,
* so a driver in a client host will know the failure
* of the unlink request ?
*/
ret = usb_unlink_urb(priv->urb);
if (ret != -EINPROGRESS)
dev_err(&priv->urb->dev->dev,
"failed to unlink a urb %p, ret %d\n",
priv->urb, ret);
return 0;
}
usbip_dbg_stub_rx("seqnum %d is not pending\n",
pdu->u.cmd_unlink.seqnum);
/*
* The urb of the unlink target is not found in priv_init queue. It was
* already completed and its results is/was going to be sent by a
* CMD_RET pdu. In this case, usb_unlink_urb() is not needed. We only
* return the completeness of this unlink request to vhci_hcd.
*/
stub_enqueue_ret_unlink(sdev, pdu->base.seqnum, 0);
spin_unlock_irqrestore(&sdev->priv_lock, flags);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Takahiro Hirofuchi | 175 | 92.11% | 1 | 25.00% |
Kurt Kanzenbach | 9 | 4.74% | 1 | 25.00% |
Alexander Beregalov | 5 | 2.63% | 1 | 25.00% |
Brian G. Merrell | 1 | 0.53% | 1 | 25.00% |
Total | 190 | 100.00% | 4 | 100.00% |
static int valid_request(struct stub_device *sdev, struct usbip_header *pdu)
{
struct usbip_device *ud = &sdev->ud;
int valid = 0;
if (pdu->base.devid == sdev->devid) {
spin_lock_irq(&ud->lock);
if (ud->status == SDEV_ST_USED) {
/* A request is valid. */
valid = 1;
}
spin_unlock_irq(&ud->lock);
}
return valid;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Takahiro Hirofuchi | 68 | 86.08% | 1 | 33.33% |
Márton Németh | 9 | 11.39% | 1 | 33.33% |
Huawei (Harvey) Yang | 2 | 2.53% | 1 | 33.33% |
Total | 79 | 100.00% | 3 | 100.00% |
static struct stub_priv *stub_priv_alloc(struct stub_device *sdev,
struct usbip_header *pdu)
{
struct stub_priv *priv;
struct usbip_device *ud = &sdev->ud;
unsigned long flags;
spin_lock_irqsave(&sdev->priv_lock, flags);
priv = kmem_cache_zalloc(stub_priv_cache, GFP_ATOMIC);
if (!priv) {
dev_err(&sdev->udev->dev, "alloc stub_priv\n");
spin_unlock_irqrestore(&sdev->priv_lock, flags);
usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC);
return NULL;
}
priv->seqnum = pdu->base.seqnum;
priv->sdev = sdev;
/*
* After a stub_priv is linked to a list_head,
* our error handler can free allocated data.
*/
list_add_tail(&priv->list, &sdev->priv_init);
spin_unlock_irqrestore(&sdev->priv_lock, flags);
return priv;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Takahiro Hirofuchi | 136 | 98.55% | 1 | 33.33% |
Wei Yongjun | 1 | 0.72% | 1 | 33.33% |
Alexander Popov | 1 | 0.72% | 1 | 33.33% |
Total | 138 | 100.00% | 3 | 100.00% |
static int get_pipe(struct stub_device *sdev, int epnum, int dir)
{
struct usb_device *udev = sdev->udev;
struct usb_host_endpoint *ep;
struct usb_endpoint_descriptor *epd = NULL;
if (dir == USBIP_DIR_IN)
ep = udev->ep_in[epnum & 0x7f];
else
ep = udev->ep_out[epnum & 0x7f];
if (!ep) {
dev_err(&sdev->udev->dev, "no such endpoint?, %d\n",
epnum);
BUG();
}
epd = &ep->desc;
if (usb_endpoint_xfer_control(epd)) {
if (dir == USBIP_DIR_OUT)
return usb_sndctrlpipe(udev, epnum);
else
return usb_rcvctrlpipe(udev, epnum);
}
if (usb_endpoint_xfer_bulk(epd)) {
if (dir == USBIP_DIR_OUT)
return usb_sndbulkpipe(udev, epnum);
else
return usb_rcvbulkpipe(udev, epnum);
}
if (usb_endpoint_xfer_int(epd)) {
if (dir == USBIP_DIR_OUT)
return usb_sndintpipe(udev, epnum);
else
return usb_rcvintpipe(udev, epnum);
}
if (usb_endpoint_xfer_isoc(epd)) {
if (dir == USBIP_DIR_OUT)
return usb_sndisocpipe(udev, epnum);
else
return usb_rcvisocpipe(udev, epnum);
}
/* NOT REACHED */
dev_err(&sdev->udev->dev, "get pipe, epnum %d\n", epnum);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Takahiro Hirofuchi | 217 | 88.93% | 1 | 25.00% |
Endre Kollar | 24 | 9.84% | 1 | 25.00% |
Alexander Popov | 2 | 0.82% | 1 | 25.00% |
Max Vozeler | 1 | 0.41% | 1 | 25.00% |
Total | 244 | 100.00% | 4 | 100.00% |
static void masking_bogus_flags(struct urb *urb)
{
int xfertype;
struct usb_device *dev;
struct usb_host_endpoint *ep;
int is_out;
unsigned int allowed;
if (!urb || urb->hcpriv || !urb->complete)
return;
dev = urb->dev;
if ((!dev) || (dev->state < USB_STATE_UNAUTHENTICATED))
return;
ep = (usb_pipein(urb->pipe) ? dev->ep_in : dev->ep_out)
[usb_pipeendpoint(urb->pipe)];
if (!ep)
return;
xfertype = usb_endpoint_type(&ep->desc);
if (xfertype == USB_ENDPOINT_XFER_CONTROL) {
struct usb_ctrlrequest *setup =
(struct usb_ctrlrequest *) urb->setup_packet;
if (!setup)
return;
is_out = !(setup->bRequestType & USB_DIR_IN) ||
!setup->wLength;
} else {
is_out = usb_endpoint_dir_out(&ep->desc);
}
/* enforce simple/standard policy */
allowed = (URB_NO_TRANSFER_DMA_MAP | URB_NO_INTERRUPT |
URB_DIR_MASK | URB_FREE_BUFFER);
switch (xfertype) {
case USB_ENDPOINT_XFER_BULK:
if (is_out)
allowed |= URB_ZERO_PACKET;
/* FALLTHROUGH */
case USB_ENDPOINT_XFER_CONTROL:
allowed |= URB_NO_FSBR; /* only affects UHCI */
/* FALLTHROUGH */
default: /* all non-iso endpoints */
if (!is_out)
allowed |= URB_SHORT_NOT_OK;
break;
case USB_ENDPOINT_XFER_ISOC:
allowed |= URB_ISO_ASAP;
break;
}
urb->transfer_flags &= allowed;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Endre Kollar | 234 | 100.00% | 1 | 100.00% |
Total | 234 | 100.00% | 1 | 100.00% |
static void stub_recv_cmd_submit(struct stub_device *sdev,
struct usbip_header *pdu)
{
int ret;
struct stub_priv *priv;
struct usbip_device *ud = &sdev->ud;
struct usb_device *udev = sdev->udev;
int pipe = get_pipe(sdev, pdu->base.ep, pdu->base.direction);
priv = stub_priv_alloc(sdev, pdu);
if (!priv)
return;
/* setup a urb */
if (usb_pipeisoc(pipe))
priv->urb = usb_alloc_urb(pdu->u.cmd_submit.number_of_packets,
GFP_KERNEL);
else
priv->urb = usb_alloc_urb(0, GFP_KERNEL);
if (!priv->urb) {
usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC);
return;
}
/* allocate urb transfer buffer, if needed */
if (pdu->u.cmd_submit.transfer_buffer_length > 0) {
priv->urb->transfer_buffer =
kzalloc(pdu->u.cmd_submit.transfer_buffer_length,
GFP_KERNEL);
if (!priv->urb->transfer_buffer) {
usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC);
return;
}
}
/* copy urb setup packet */
priv->urb->setup_packet = kmemdup(&pdu->u.cmd_submit.setup, 8,
GFP_KERNEL);
if (!priv->urb->setup_packet) {
dev_err(&udev->dev, "allocate setup_packet\n");
usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC);
return;
}
/* set other members from the base header of pdu */
priv->urb->context = (void *) priv;
priv->urb->dev = udev;
priv->urb->pipe = pipe;
priv->urb->complete = stub_complete;
usbip_pack_pdu(pdu, priv->urb, USBIP_CMD_SUBMIT, 0);
if (usbip_recv_xbuff(ud, priv->urb) < 0)
return;
if (usbip_recv_iso(ud, priv->urb) < 0)
return;
/* no need to submit an intercepted request, but harmless? */
tweak_special_requests(priv->urb);
masking_bogus_flags(priv->urb);
/* urb is now ready to submit */
ret = usb_submit_urb(priv->urb, GFP_KERNEL);
if (ret == 0)
usbip_dbg_stub_rx("submit urb ok, seqnum %u\n",
pdu->base.seqnum);
else {
dev_err(&udev->dev, "submit_urb error, %d\n", ret);
usbip_dump_header(pdu);
usbip_dump_urb(priv->urb);
/*
* Pessimistic.
* This connection will be discarded.
*/
usbip_event_add(ud, SDEV_EVENT_ERROR_SUBMIT);
}
usbip_dbg_stub_rx("Leave\n");
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Takahiro Hirofuchi | 375 | 93.98% | 1 | 14.29% |
Julia Lawall | 10 | 2.51% | 1 | 14.29% |
Endre Kollar | 7 | 1.75% | 1 | 14.29% |
Alexander Popov | 2 | 0.50% | 1 | 14.29% |
Bart Westgeest | 2 | 0.50% | 1 | 14.29% |
Brian G. Merrell | 2 | 0.50% | 1 | 14.29% |
Max Vozeler | 1 | 0.25% | 1 | 14.29% |
Total | 399 | 100.00% | 7 | 100.00% |
/* recv a pdu */
static void stub_rx_pdu(struct usbip_device *ud)
{
int ret;
struct usbip_header pdu;
struct stub_device *sdev = container_of(ud, struct stub_device, ud);
struct device *dev = &sdev->udev->dev;
usbip_dbg_stub_rx("Enter\n");
memset(&pdu, 0, sizeof(pdu));
/* receive a pdu header */
ret = usbip_recv(ud->tcp_socket, &pdu, sizeof(pdu));
if (ret != sizeof(pdu)) {
dev_err(dev, "recv a header, %d\n", ret);
usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
return;
}
usbip_header_correct_endian(&pdu, 0);
if (usbip_dbg_flag_stub_rx)
usbip_dump_header(&pdu);
if (!valid_request(sdev, &pdu)) {
dev_err(dev, "recv invalid request\n");
usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
return;
}
switch (pdu.base.command) {
case USBIP_CMD_UNLINK:
stub_recv_cmd_unlink(sdev, &pdu);
break;
case USBIP_CMD_SUBMIT:
stub_recv_cmd_submit(sdev, &pdu);
break;
default:
/* NOTREACHED */
dev_err(dev, "unknown pdu\n");
usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
break;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Takahiro Hirofuchi | 200 | 97.09% | 1 | 16.67% |
Brian G. Merrell | 2 | 0.97% | 1 | 16.67% |
Kurt Kanzenbach | 1 | 0.49% | 1 | 16.67% |
Bart Westgeest | 1 | 0.49% | 1 | 16.67% |
Valentina Manea | 1 | 0.49% | 1 | 16.67% |
Matt Mooney | 1 | 0.49% | 1 | 16.67% |
Total | 206 | 100.00% | 6 | 100.00% |
int stub_rx_loop(void *data)
{
struct usbip_device *ud = data;
while (!kthread_should_stop()) {
if (usbip_event_happened(ud))
break;
stub_rx_pdu(ud);
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Takahiro Hirofuchi | 29 | 72.50% | 1 | 33.33% |
Arnd Bergmann | 10 | 25.00% | 1 | 33.33% |
Brian G. Merrell | 1 | 2.50% | 1 | 33.33% |
Total | 40 | 100.00% | 3 | 100.00% |
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Takahiro Hirofuchi | 1852 | 78.91% | 1 | 3.85% |
Endre Kollar | 265 | 11.29% | 2 | 7.69% |
Arjan Mels | 53 | 2.26% | 2 | 7.69% |
Valentina Manea | 52 | 2.22% | 2 | 7.69% |
Matt Mooney | 44 | 1.87% | 3 | 11.54% |
Arnd Bergmann | 13 | 0.55% | 1 | 3.85% |
Brian G. Merrell | 10 | 0.43% | 1 | 3.85% |
Julia Lawall | 10 | 0.43% | 1 | 3.85% |
Kurt Kanzenbach | 10 | 0.43% | 2 | 7.69% |
Márton Németh | 9 | 0.38% | 1 | 3.85% |
Alexander Popov | 6 | 0.26% | 1 | 3.85% |
Alexander Beregalov | 5 | 0.21% | 1 | 3.85% |
Kay Sievers | 4 | 0.17% | 1 | 3.85% |
Himangi Saraogi | 4 | 0.17% | 1 | 3.85% |
Bart Westgeest | 3 | 0.13% | 2 | 7.69% |
Tejun Heo | 2 | 0.09% | 1 | 3.85% |
Huawei (Harvey) Yang | 2 | 0.09% | 1 | 3.85% |
Max Vozeler | 2 | 0.09% | 1 | 3.85% |
Wei Yongjun | 1 | 0.04% | 1 | 3.85% |
Total | 2347 | 100.00% | 26 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.