cregit-Linux how code gets into the kernel

Release 4.16 drivers/usb/usbip/vhci_rx.c

// SPDX-License-Identifier: GPL-2.0+
/*
 * Copyright (C) 2003-2008 Takahiro Hirofuchi
 */

#include <linux/kthread.h>
#include <linux/slab.h>

#include "usbip_common.h"
#include "vhci.h"

/* get URB from transmitted urb queue. caller must hold vdev->priv_lock */

struct urb *pickup_urb_and_free_priv(struct vhci_device *vdev, __u32 seqnum) { struct vhci_priv *priv, *tmp; struct urb *urb = NULL; int status; list_for_each_entry_safe(priv, tmp, &vdev->priv_rx, list) { if (priv->seqnum != seqnum) continue; urb = priv->urb; status = urb->status; usbip_dbg_vhci_rx("find urb seqnum %u\n", seqnum); switch (status) { case -ENOENT: /* fall through */ case -ECONNRESET: dev_dbg(&urb->dev->dev, "urb seq# %u was unlinked %ssynchronously\n", seqnum, status == -ENOENT ? "" : "a"); break; case -EINPROGRESS: /* no info output */ break; default: dev_dbg(&urb->dev->dev, "urb seq# %u may be in a error, status %d\n", seqnum, status); } list_del(&priv->list); kfree(priv); urb->hcpriv = NULL; break; } return urb; }

Contributors

PersonTokensPropCommitsCommitProp
Takahiro Hirofuchi13484.81%120.00%
Stefan Reif1610.13%120.00%
Shuah Khan63.80%120.00%
Brian G. Merrell10.63%120.00%
Colin Ian King10.63%120.00%
Total158100.00%5100.00%


static void vhci_recv_ret_submit(struct vhci_device *vdev, struct usbip_header *pdu) { struct vhci_hcd *vhci_hcd = vdev_to_vhci_hcd(vdev); struct vhci *vhci = vhci_hcd->vhci; struct usbip_device *ud = &vdev->ud; struct urb *urb; unsigned long flags; spin_lock_irqsave(&vdev->priv_lock, flags); urb = pickup_urb_and_free_priv(vdev, pdu->base.seqnum); spin_unlock_irqrestore(&vdev->priv_lock, flags); if (!urb) { pr_err("cannot find a urb of seqnum %u max seqnum %d\n", pdu->base.seqnum, atomic_read(&vhci_hcd->seqnum)); usbip_event_add(ud, VDEV_EVENT_ERROR_TCP); return; } /* unpack the pdu to a urb */ usbip_pack_pdu(pdu, urb, USBIP_RET_SUBMIT, 0); /* recv transfer buffer */ if (usbip_recv_xbuff(ud, urb) < 0) return; /* recv iso_packet_descriptor */ if (usbip_recv_iso(ud, urb) < 0) return; /* restore the padding in iso packets */ usbip_pad_iso(ud, urb); if (usbip_dbg_flag_vhci_rx) usbip_dump_urb(urb); usbip_dbg_vhci_rx("now giveback urb %u\n", pdu->base.seqnum); spin_lock_irqsave(&vhci->lock, flags); usb_hcd_unlink_urb_from_ep(vhci_hcd_to_hcd(vhci_hcd), urb); spin_unlock_irqrestore(&vhci->lock, flags); usb_hcd_giveback_urb(vhci_hcd_to_hcd(vhci_hcd), urb, urb->status); usbip_dbg_vhci_rx("Leave\n"); }

Contributors

PersonTokensPropCommitsCommitProp
Takahiro Hirofuchi16168.22%19.09%
Andrew Goodbody166.78%19.09%
Yuyang Du166.78%218.18%
Max Vozeler145.93%19.09%
Nobuo Iwata104.24%19.09%
Arjan Mels72.97%19.09%
Shuah Khan72.97%19.09%
Brian G. Merrell31.27%19.09%
Bart Westgeest10.42%19.09%
Matt Mooney10.42%19.09%
Total236100.00%11100.00%


static struct vhci_unlink *dequeue_pending_unlink(struct vhci_device *vdev, struct usbip_header *pdu) { struct vhci_unlink *unlink, *tmp; unsigned long flags; spin_lock_irqsave(&vdev->priv_lock, flags); list_for_each_entry_safe(unlink, tmp, &vdev->unlink_rx, list) { pr_info("unlink->seqnum %lu\n", unlink->seqnum); if (unlink->seqnum == pdu->base.seqnum) { usbip_dbg_vhci_rx("found pending unlink, %lu\n", unlink->seqnum); list_del(&unlink->list); spin_unlock_irqrestore(&vdev->priv_lock, flags); return unlink; } } spin_unlock_irqrestore(&vdev->priv_lock, flags); return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Takahiro Hirofuchi10387.29%125.00%
Andrew Goodbody1311.02%125.00%
Matt Mooney10.85%125.00%
Brian G. Merrell10.85%125.00%
Total118100.00%4100.00%


static void vhci_recv_ret_unlink(struct vhci_device *vdev, struct usbip_header *pdu) { struct vhci_hcd *vhci_hcd = vdev_to_vhci_hcd(vdev); struct vhci *vhci = vhci_hcd->vhci; struct vhci_unlink *unlink; struct urb *urb; unsigned long flags; usbip_dump_header(pdu); unlink = dequeue_pending_unlink(vdev, pdu); if (!unlink) { pr_info("cannot find the pending unlink %u\n", pdu->base.seqnum); return; } spin_lock_irqsave(&vdev->priv_lock, flags); urb = pickup_urb_and_free_priv(vdev, unlink->unlink_seqnum); spin_unlock_irqrestore(&vdev->priv_lock, flags); if (!urb) { /* * I get the result of a unlink request. But, it seems that I * already received the result of its submit result and gave * back the URB. */ pr_info("the urb (seqnum %d) was already given back\n", pdu->base.seqnum); } else { usbip_dbg_vhci_rx("now giveback urb %d\n", pdu->base.seqnum); /* If unlink is successful, status is -ECONNRESET */ urb->status = pdu->u.ret_unlink.status; pr_info("urb->status %d\n", urb->status); spin_lock_irqsave(&vhci->lock, flags); usb_hcd_unlink_urb_from_ep(vhci_hcd_to_hcd(vhci_hcd), urb); spin_unlock_irqrestore(&vhci->lock, flags); usb_hcd_giveback_urb(vhci_hcd_to_hcd(vhci_hcd), urb, urb->status); } kfree(unlink); }

Contributors

PersonTokensPropCommitsCommitProp
Takahiro Hirofuchi14968.66%19.09%
Andrew Goodbody167.37%19.09%
Yuyang Du156.91%218.18%
Max Vozeler146.45%19.09%
Nobuo Iwata104.61%19.09%
Shuah Khan62.76%19.09%
Matt Mooney41.84%19.09%
Brian G. Merrell10.46%19.09%
Christopher Harvey10.46%19.09%
Bart Westgeest10.46%19.09%
Total217100.00%11100.00%


static int vhci_priv_tx_empty(struct vhci_device *vdev) { int empty = 0; unsigned long flags; spin_lock_irqsave(&vdev->priv_lock, flags); empty = list_empty(&vdev->priv_rx); spin_unlock_irqrestore(&vdev->priv_lock, flags); return empty; }

Contributors

PersonTokensPropCommitsCommitProp
Max Vozeler4381.13%150.00%
Andrew Goodbody1018.87%150.00%
Total53100.00%2100.00%

/* recv a pdu */
static void vhci_rx_pdu(struct usbip_device *ud) { int ret; struct usbip_header pdu; struct vhci_device *vdev = container_of(ud, struct vhci_device, ud); usbip_dbg_vhci_rx("Enter\n"); memset(&pdu, 0, sizeof(pdu)); /* receive a pdu header */ ret = usbip_recv(ud->tcp_socket, &pdu, sizeof(pdu)); if (ret < 0) { if (ret == -ECONNRESET) pr_info("connection reset by peer\n"); else if (ret == -EAGAIN) { /* ignore if connection was idle */ if (vhci_priv_tx_empty(vdev)) return; pr_info("connection timed out with pending urbs\n"); } else if (ret != -ERESTARTSYS) pr_info("xmit failed %d\n", ret); usbip_event_add(ud, VDEV_EVENT_ERROR_TCP); return; } if (ret == 0) { pr_info("connection closed"); usbip_event_add(ud, VDEV_EVENT_DOWN); return; } if (ret != sizeof(pdu)) { pr_err("received pdu size is %d, should be %d\n", ret, (unsigned int)sizeof(pdu)); usbip_event_add(ud, VDEV_EVENT_ERROR_TCP); return; } usbip_header_correct_endian(&pdu, 0); if (usbip_dbg_flag_vhci_rx) usbip_dump_header(&pdu); switch (pdu.base.command) { case USBIP_RET_SUBMIT: vhci_recv_ret_submit(vdev, &pdu); break; case USBIP_RET_UNLINK: vhci_recv_ret_unlink(vdev, &pdu); break; default: /* NOT REACHED */ pr_err("unknown pdu %u\n", pdu.base.command); usbip_dump_header(&pdu); usbip_event_add(ud, VDEV_EVENT_ERROR_TCP); break; } }

Contributors

PersonTokensPropCommitsCommitProp
Takahiro Hirofuchi17062.73%111.11%
Max Vozeler8531.37%222.22%
Matt Mooney82.95%222.22%
Greg Kroah-Hartman41.48%111.11%
Brian G. Merrell20.74%111.11%
Bart Westgeest10.37%111.11%
Kurt Kanzenbach10.37%111.11%
Total271100.00%9100.00%


int vhci_rx_loop(void *data) { struct usbip_device *ud = data; while (!kthread_should_stop()) { if (usbip_event_happened(ud)) break; vhci_rx_pdu(ud); } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Takahiro Hirofuchi2972.50%133.33%
Arnd Bergmann1025.00%133.33%
Brian G. Merrell12.50%133.33%
Total40100.00%3100.00%


Overall Contributors

PersonTokensPropCommitsCommitProp
Takahiro Hirofuchi75367.90%13.85%
Max Vozeler15714.16%311.54%
Andrew Goodbody554.96%13.85%
Yuyang Du312.80%27.69%
Nobuo Iwata201.80%13.85%
Shuah Khan191.71%13.85%
Stefan Reif161.44%13.85%
Matt Mooney161.44%311.54%
Arnd Bergmann121.08%13.85%
Brian G. Merrell90.81%13.85%
Arjan Mels70.63%13.85%
Greg Kroah-Hartman60.54%311.54%
Bart Westgeest30.27%311.54%
Tejun Heo20.18%13.85%
Kurt Kanzenbach10.09%13.85%
Colin Ian King10.09%13.85%
Christopher Harvey10.09%13.85%
Total1109100.00%26100.00%
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.