Release 4.7 drivers/usb/host/ehci-hcd.c
/*
* Enhanced Host Controller Interface (EHCI) driver for USB.
*
* Maintainer: Alan Stern <stern@rowland.harvard.edu>
*
* Copyright (c) 2000-2004 by David Brownell
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/dmapool.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/sched.h>
#include <linux/vmalloc.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/hrtimer.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include <linux/moduleparam.h>
#include <linux/dma-mapping.h>
#include <linux/debugfs.h>
#include <linux/slab.h>
#include <asm/byteorder.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/unaligned.h>
#if defined(CONFIG_PPC_PS3)
#include <asm/firmware.h>
#endif
/*-------------------------------------------------------------------------*/
/*
* EHCI hc_driver implementation ... experimental, incomplete.
* Based on the final 1.0 register interface specification.
*
* USB 2.0 shows up in upcoming www.pcmcia.org technology.
* First was PCMCIA, like ISA; then CardBus, which is PCI.
* Next comes "CardBay", using USB 2.0 signals.
*
* Contains additional contributions by Brad Hards, Rory Bolt, and others.
* Special thanks to Intel and VIA for providing host controllers to
* test this driver on, and Cypress (including In-System Design) for
* providing early devices for those host controllers to talk to!
*/
#define DRIVER_AUTHOR "David Brownell"
#define DRIVER_DESC "USB 2.0 'Enhanced' Host Controller (EHCI) Driver"
static const char hcd_name [] = "ehci_hcd";
#undef EHCI_URB_TRACE
/* magic numbers that can affect system performance */
#define EHCI_TUNE_CERR 3
/* 0-3 qtd retries; 0 == don't stop */
#define EHCI_TUNE_RL_HS 4
/* nak throttle; see 4.9 */
#define EHCI_TUNE_RL_TT 0
#define EHCI_TUNE_MULT_HS 1
/* 1-3 transactions/uframe; 4.10.3 */
#define EHCI_TUNE_MULT_TT 1
/*
* Some drivers think it's safe to schedule isochronous transfers more than
* 256 ms into the future (partly as a result of an old bug in the scheduling
* code). In an attempt to avoid trouble, we will use a minimum scheduling
* length of 512 frames instead of 256.
*/
#define EHCI_TUNE_FLS 1
/* (medium) 512-frame schedule */
/* Initial IRQ latency: faster than hw default */
static int log2_irq_thresh = 0;
// 0 to 6
module_param (log2_irq_thresh, int, S_IRUGO);
MODULE_PARM_DESC (log2_irq_thresh, "log2 IRQ latency, 1-64 microframes");
/* initial park setting: slower than hw default */
static unsigned park = 0;
module_param (park, uint, S_IRUGO);
MODULE_PARM_DESC (park, "park setting; 1-3 back-to-back async packets");
/* for flakey hardware, ignore overcurrent indicators */
static bool ignore_oc;
module_param (ignore_oc, bool, S_IRUGO);
MODULE_PARM_DESC (ignore_oc, "ignore bogus hardware overcurrent indications");
#define INTR_MASK (STS_IAA | STS_FATAL | STS_PCD | STS_ERR | STS_INT)
/*-------------------------------------------------------------------------*/
#include "ehci.h"
#include "pci-quirks.h"
static void compute_tt_budget(u8 budget_table[EHCI_BANDWIDTH_SIZE],
struct ehci_tt *tt);
/*
* The MosChip MCS9990 controller updates its microframe counter
* a little before the frame counter, and occasionally we will read
* the invalid intermediate value. Avoid problems by checking the
* microframe number (the low-order 3 bits); if they are 0 then
* re-read the register to get the correct value.
*/
static unsigned ehci_moschip_read_frame_index(struct ehci_hcd *ehci)
{
unsigned uf;
uf = ehci_readl(ehci, &ehci->regs->frame_index);
if (unlikely((uf & 7) == 0))
uf = ehci_readl(ehci, &ehci->regs->frame_index);
return uf;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
alan stern | alan stern | 58 | 100.00% | 1 | 100.00% |
| Total | 58 | 100.00% | 1 | 100.00% |
static inline unsigned ehci_read_frame_index(struct ehci_hcd *ehci)
{
if (ehci->frame_index_bug)
return ehci_moschip_read_frame_index(ehci);
return ehci_readl(ehci, &ehci->regs->frame_index);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
alan stern | alan stern | 37 | 100.00% | 1 | 100.00% |
| Total | 37 | 100.00% | 1 | 100.00% |
#include "ehci-dbg.c"
/*-------------------------------------------------------------------------*/
/*
* ehci_handshake - spin reading hc until handshake completes or fails
* @ptr: address of hc register to be read
* @mask: bits to look at in result of read
* @done: value of those bits when handshake succeeds
* @usec: timeout in microseconds
*
* Returns negative errno, or zero on success
*
* Success happens when the "mask" bits have the specified value (hardware
* handshake done). There are two failure modes: "usec" have passed (major
* hardware flakeout), or the register reads as all-ones (hardware removed).
*
* That last failure should_only happen in cases like physical cardbus eject
* before driver shutdown. But it also seems to be caused by bugs in cardbus
* bridge shutdown: shutting down the bridge before the devices using it.
*/
int ehci_handshake(struct ehci_hcd *ehci, void __iomem *ptr,
u32 mask, u32 done, int usec)
{
u32 result;
do {
result = ehci_readl(ehci, ptr);
if (result == ~(u32)0) /* card removed */
return -ENODEV;
result &= mask;
if (result == done)
return 0;
udelay (1);
usec--;
} while (usec > 0);
return -ETIMEDOUT;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
david brownell | david brownell | 75 | 87.21% | 1 | 25.00% |
benjamin herrenschmidt | benjamin herrenschmidt | 8 | 9.30% | 1 | 25.00% |
greg kroah-hartman | greg kroah-hartman | 2 | 2.33% | 1 | 25.00% |
manjunath goudar | manjunath goudar | 1 | 1.16% | 1 | 25.00% |
| Total | 86 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL_GPL(ehci_handshake);
/* check TDI/ARC silicon is in host mode */
static int tdi_in_host_mode (struct ehci_hcd *ehci)
{
u32 tmp;
tmp = ehci_readl(ehci, &ehci->regs->usbmode);
return (tmp & 3) == USBMODE_CM_HC;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
matthieu castet | matthieu castet | 31 | 83.78% | 1 | 50.00% |
alan stern | alan stern | 6 | 16.22% | 1 | 50.00% |
| Total | 37 | 100.00% | 2 | 100.00% |
/*
* Force HC to halt state from unknown (EHCI spec section 2.3).
* Must be called with interrupts enabled and the lock not held.
*/
static int ehci_halt (struct ehci_hcd *ehci)
{
u32 temp;
spin_lock_irq(&ehci->lock);
/* disable any irqs left enabled by previous code */
ehci_writel(ehci, 0, &ehci->regs->intr_enable);
if (ehci_is_TDI(ehci) && !tdi_in_host_mode(ehci)) {
spin_unlock_irq(&ehci->lock);
return 0;
}
/*
* This routine gets called during probe before ehci->command
* has been initialized, so we can't rely on its value.
*/
ehci->command &= ~CMD_RUN;
temp = ehci_readl(ehci, &ehci->regs->command);
temp &= ~(CMD_RUN | CMD_IAAD);
ehci_writel(ehci, temp, &ehci->regs->command);
spin_unlock_irq(&ehci->lock);
synchronize_irq(ehci_to_hcd(ehci)->irq);
return ehci_handshake(ehci, &ehci->regs->status,
STS_HALT, STS_HALT, 16 * 125);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
david brownell | david brownell | 81 | 55.10% | 2 | 28.57% |
alan stern | alan stern | 43 | 29.25% | 2 | 28.57% |
matthieu castet | matthieu castet | 11 | 7.48% | 1 | 14.29% |
benjamin herrenschmidt | benjamin herrenschmidt | 11 | 7.48% | 1 | 14.29% |
manjunath goudar | manjunath goudar | 1 | 0.68% | 1 | 14.29% |
| Total | 147 | 100.00% | 7 | 100.00% |
/* put TDI/ARC silicon into EHCI mode */
static void tdi_reset (struct ehci_hcd *ehci)
{
u32 tmp;
tmp = ehci_readl(ehci, &ehci->regs->usbmode);
tmp |= USBMODE_CM_HC;
/* The default byte access to MMR space is LE after
* controller reset. Set the required endian mode
* for transfer buffers to match the host microprocessor
*/
if (ehci_big_endian_mmio(ehci))
tmp |= USBMODE_BE;
ehci_writel(ehci, tmp, &ehci->regs->usbmode);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
david brownell | david brownell | 20 | 34.48% | 2 | 33.33% |
vladimir barinov | vladimir barinov | 13 | 22.41% | 1 | 16.67% |
geoff levand | geoff levand | 12 | 20.69% | 1 | 16.67% |
alan stern | alan stern | 9 | 15.52% | 1 | 16.67% |
benjamin herrenschmidt | benjamin herrenschmidt | 4 | 6.90% | 1 | 16.67% |
| Total | 58 | 100.00% | 6 | 100.00% |
/*
* Reset a non-running (STS_HALT == 1) controller.
* Must be called with interrupts enabled and the lock not held.
*/
int ehci_reset(struct ehci_hcd *ehci)
{
int retval;
u32 command = ehci_readl(ehci, &ehci->regs->command);
/* If the EHCI debug controller is active, special care must be
* taken before and after a host controller reset */
if (ehci->debug && !dbgp_reset_prep(ehci_to_hcd(ehci)))
ehci->debug = NULL;
command |= CMD_RESET;
dbg_cmd (ehci, "reset", command);
ehci_writel(ehci, command, &ehci->regs->command);
ehci->rh_state = EHCI_RH_HALTED;
ehci->next_statechange = jiffies;
retval = ehci_handshake(ehci, &ehci->regs->command,
CMD_RESET, 0, 250 * 1000);
if (ehci->has_hostpc) {
ehci_writel(ehci, USBMODE_EX_HC | USBMODE_EX_VBPS,
&ehci->regs->usbmode_ex);
ehci_writel(ehci, TXFIFO_DEFAULT, &ehci->regs->txfill_tuning);
}
if (retval)
return retval;
if (ehci_is_TDI(ehci))
tdi_reset (ehci);
if (ehci->debug)
dbgp_external_startup(ehci_to_hcd(ehci));
ehci->port_c_suspend = ehci->suspended_ports =
ehci->resuming_ports = 0;
return retval;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
david brownell | david brownell | 51 | 25.63% | 5 | 35.71% |
linus torvalds | linus torvalds | 49 | 24.62% | 1 | 7.14% |
alek du | alek du | 32 | 16.08% | 1 | 7.14% |
jason wessel | jason wessel | 24 | 12.06% | 1 | 7.14% |
alan stern | alan stern | 22 | 11.06% | 3 | 21.43% |
jan beulich | jan beulich | 12 | 6.03% | 1 | 7.14% |
benjamin herrenschmidt | benjamin herrenschmidt | 8 | 4.02% | 1 | 7.14% |
manjunath goudar | manjunath goudar | 1 | 0.50% | 1 | 7.14% |
| Total | 199 | 100.00% | 14 | 100.00% |
EXPORT_SYMBOL_GPL(ehci_reset);
/*
* Idle the controller (turn off the schedules).
* Must be called with interrupts enabled and the lock not held.
*/
static void ehci_quiesce (struct ehci_hcd *ehci)
{
u32 temp;
if (ehci->rh_state != EHCI_RH_RUNNING)
return;
/* wait for any schedule enables/disables to take effect */
temp = (ehci->command << 10) & (STS_ASS | STS_PSS);
ehci_handshake(ehci, &ehci->regs->status, STS_ASS | STS_PSS, temp,
16 * 125);
/* then disable anything that's still active */
spin_lock_irq(&ehci->lock);
ehci->command &= ~(CMD_ASE | CMD_PSE);
ehci_writel(ehci, ehci->command, &ehci->regs->command);
spin_unlock_irq(&ehci->lock);
/* hardware can take 16 microframes to turn off ... */
ehci_handshake(ehci, &ehci->regs->status, STS_ASS | STS_PSS, 0,
16 * 125);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
linus torvalds | linus torvalds | 49 | 37.98% | 1 | 10.00% |
david brownell | david brownell | 42 | 32.56% | 2 | 20.00% |
alan stern | alan stern | 29 | 22.48% | 5 | 50.00% |
benjamin herrenschmidt | benjamin herrenschmidt | 7 | 5.43% | 1 | 10.00% |
manjunath goudar | manjunath goudar | 2 | 1.55% | 1 | 10.00% |
| Total | 129 | 100.00% | 10 | 100.00% |
/*-------------------------------------------------------------------------*/
static void end_iaa_cycle(struct ehci_hcd *ehci);
static void end_unlink_async(struct ehci_hcd *ehci);
static void unlink_empty_async(struct ehci_hcd *ehci);
static void ehci_work(struct ehci_hcd *ehci);
static void start_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh);
static void end_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh);
static int ehci_port_power(struct ehci_hcd *ehci, int portnum, bool enable);
#include "ehci-timer.c"
#include "ehci-hub.c"
#include "ehci-mem.c"
#include "ehci-q.c"
#include "ehci-sched.c"
#include "ehci-sysfs.c"
/*-------------------------------------------------------------------------*/
/* On some systems, leaving remote wakeup enabled prevents system shutdown.
* The firmware seems to think that powering off is a wakeup event!
* This routine turns off remote wakeup and everything else, on all ports.
*/
static void ehci_turn_off_all_ports(struct ehci_hcd *ehci)
{
int port = HCS_N_PORTS(ehci->hcs_params);
while (port--) {
ehci_writel(ehci, PORT_RWC_BITS,
&ehci->regs->port_status[port]);
spin_unlock_irq(&ehci->lock);
ehci_port_power(ehci, port, false);
spin_lock_irq(&ehci->lock);
}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
michael grzeschik | michael grzeschik | 27 | 38.57% | 1 | 14.29% |
david brownell | david brownell | 21 | 30.00% | 3 | 42.86% |
alan stern | alan stern | 20 | 28.57% | 2 | 28.57% |
benjamin herrenschmidt | benjamin herrenschmidt | 2 | 2.86% | 1 | 14.29% |
| Total | 70 | 100.00% | 7 | 100.00% |
/*
* Halt HC, turn off all ports, and let the BIOS use the companion controllers.
* Must be called with interrupts enabled and the lock not held.
*/
static void ehci_silence_controller(struct ehci_hcd *ehci)
{
ehci_halt(ehci);
spin_lock_irq(&ehci->lock);
ehci->rh_state = EHCI_RH_HALTED;
ehci_turn_off_all_ports(ehci);
/* make BIOS/etc use companion controller during reboot */
ehci_writel(ehci, 0, &ehci->regs->configured_flag);
/* unblock posted writes */
ehci_readl(ehci, &ehci->regs->configured_flag);
spin_unlock_irq(&ehci->lock);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
alan stern | alan stern | 34 | 47.89% | 3 | 23.08% |
david brownell | david brownell | 29 | 40.85% | 6 | 46.15% |
benjamin herrenschmidt | benjamin herrenschmidt | 3 | 4.23% | 1 | 7.69% |
sarah sharp | sarah sharp | 3 | 4.23% | 1 | 7.69% |
greg kroah-hartman | greg kroah-hartman | 1 | 1.41% | 1 | 7.69% |
aleksey gorelov | aleksey gorelov | 1 | 1.41% | 1 | 7.69% |
| Total | 71 | 100.00% | 13 | 100.00% |
/* ehci_shutdown kick in for silicon on any bus (not just pci, etc).
* This forcibly disables dma and IRQs, helping kexec and other cases
* where the next system software may expect clean state.
*/
static void ehci_shutdown(struct usb_hcd *hcd)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
/**
* Protect the system from crashing at system shutdown in cases where
* usb host is not added yet from OTG controller driver.
* As ehci_setup() not done yet, so stop accessing registers or
* variables initialized in ehci_setup()
*/
if (!ehci->sbrn)
return;
spin_lock_irq(&ehci->lock);
ehci->shutdown = true;
ehci->rh_state = EHCI_RH_STOPPING;
ehci->enabled_hrtimer_events = 0;
spin_unlock_irq(&ehci->lock);
ehci_silence_controller(ehci);
hrtimer_cancel(&ehci->hrtimer);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
sarah sharp | sarah sharp | 31 | 40.26% | 1 | 9.09% |
alan stern | alan stern | 30 | 38.96% | 5 | 45.45% |
srinivas kandagatla | srinivas kandagatla | 9 | 11.69% | 1 | 9.09% |
david brownell | david brownell | 7 | 9.09% | 4 | 36.36% |
| Total | 77 | 100.00% | 11 | 100.00% |
/*-------------------------------------------------------------------------*/
/*
* ehci_work is called from some interrupts, timers, and so on.
* it calls driver completion functions, after dropping ehci->lock.
*/
static void ehci_work (struct ehci_hcd *ehci)
{
/* another CPU may drop ehci->lock during a schedule scan while
* it reports urb completions. this flag guards against bogus
* attempts at re-entrant schedule scanning.
*/
if (ehci->scanning) {
ehci->need_rescan = true;
return;
}
ehci->scanning = true;
rescan:
ehci->need_rescan = false;
if (ehci->async_count)
scan_async(ehci);
if (ehci->intr_count > 0)
scan_intr(ehci);
if (ehci->isoc_count > 0)
scan_isoc(ehci);
if (ehci->need_rescan)
goto rescan;
ehci->scanning = false;
/* the IO watchdog guards against hardware or driver bugs that
* misplace IRQs, and should let us run completely without IRQs.
* such lossage has been observed on both VT6202 and VT8235.
*/
turn_on_io_watchdog(ehci);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
alan stern | alan stern | 40 | 40.40% | 4 | 50.00% |
matt porter | matt porter | 28 | 28.28% | 1 | 12.50% |
linus torvalds | linus torvalds | 22 | 22.22% | 1 | 12.50% |
david brownell | david brownell | 9 | 9.09% | 2 | 25.00% |
| Total | 99 | 100.00% | 8 | 100.00% |
/*
* Called when the ehci_hcd module is removed.
*/
static void ehci_stop (struct usb_hcd *hcd)
{
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
ehci_dbg (ehci, "stop\n");
/* no more interrupts ... */
spin_lock_irq(&ehci->lock);
ehci->enabled_hrtimer_events = 0;
spin_unlock_irq(&ehci->lock);
ehci_quiesce(ehci);
ehci_silence_controller(ehci);
ehci_reset (ehci);
hrtimer_cancel(&ehci->hrtimer);
remove_sysfs_files(ehci);
remove_debug_files (ehci);
/* root hub is shut down separately (first, when possible) */
spin_lock_irq (&ehci->lock);
end_free_itds(ehci);
spin_unlock_irq (&ehci->lock);
ehci_mem_cleanup (ehci);
if (ehci->amd_pll_fix == 1)
usb_amd_dev_put();
dbg_status (ehci, "ehci_stop completed",
ehci_readl(ehci, &ehci->regs->status));
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
david brownell | david brownell | 55 | 39.01% | 7 | 38.89% |
matt porter | matt porter | 53 | 37.59% | 1 | 5.56% |
alan stern | alan stern | 15 | 10.64% | 5 | 27.78% |
andiry xu | andiry xu | 7 | 4.96% | 1 | 5.56% |
benjamin herrenschmidt | benjamin herrenschmidt | 4 | 2.84% | 1 | 5.56% |
alex he | alex he | 4 | 2.84% | 1 | 5.56% |
sarah sharp | sarah sharp | 2 | 1.42% | 1 | 5.56% |
kirill smelkov | kirill smelkov | 1 | 0.71% | 1 | 5.56% |
| Total | 141 | 100.00% | 18 | 100.00% |
/* one-time init, only for memory state */
static int ehci_init(struct usb_hcd *hcd)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
u32 temp;
int retval;
u32 hcc_params;
struct ehci_qh_hw *hw;
spin_lock_init(&ehci->lock);
/*
* keep io watchdog by default, those good HCDs could turn off it later
*/
ehci->need_io_watchdog = 1;
hrtimer_init(&ehci->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
ehci->hrtimer.function = ehci_hrtimer_func;
ehci->next_hrtimer_event = EHCI_HRTIMER_NO_EVENT;
hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params);
/*
* by default set standard 80% (== 100 usec/uframe) max periodic
* bandwidth as required by USB 2.0
*/
ehci->uframe_periodic_max = 100;
/*
* hw default: 1K periodic list heads, one per frame.
* periodic_size can shrink by USBCMD update if hcc_params allows.
*/
ehci->periodic_size = DEFAULT_I_TDPS;
INIT_LIST_HEAD(&ehci->async_unlink);
INIT_LIST_HEAD(&ehci->async_idle);
INIT_LIST_HEAD(&ehci->intr_unlink_wait);
INIT_LIST_HEAD(&ehci->intr_unlink);
INIT_LIST_HEAD(&ehci->intr_qh_list);
INIT_LIST_HEAD(&ehci->cached_itd_list);
INIT_LIST_HEAD(&ehci->cached_sitd_list);
INIT_LIST_HEAD(&ehci->tt_list);
if (HCC_PGM_FRAMELISTLEN(hcc_params)) {
/* periodic schedule size can be smaller than default */
switch (EHCI_TUNE_FLS) {
case 0: ehci->periodic_size = 1024; break;
case 1: ehci->periodic_size = 512; break;
case 2: ehci->periodic_size = 256; break;
default: BUG();
}
}
if ((retval = ehci_mem_init(ehci, GFP_KERNEL)) < 0)
return retval;
/* controllers may cache some of the periodic schedule ... */
if (HCC_ISOC_CACHE(hcc_params)) // full frame cache
ehci->i_thresh = 0;
else // N microframes cached
ehci->i_thresh = 2 + HCC_ISOC_THRES(hcc_params);
/*
* dedicate a qh for the async ring head, since we couldn't unlink
* a 'real' qh without stopping the async schedule [4.8]. use it
* as the 'reclamation list head' too.
* its dummy is used in hw_alt_next of many tds, to prevent the qh
* from automatically advancing to the next td after short reads.
*/
ehci->async->qh_next.qh = NULL;
hw = ehci->async->hw;
hw->hw_next = QH_NEXT(ehci, ehci->async->qh_dma);
hw->hw_info1 = cpu_to_hc32(ehci, QH_HEAD);
#if defined(CONFIG_PPC_PS3)
hw->hw_info1 |= cpu_to_hc32(ehci, QH_INACTIVATE);
#endif
hw->hw_token = cpu_to_hc32(ehci, QTD_STS_HALT);
hw->hw_qtd_next = EHCI_LIST_END(ehci);
ehci->async->qh_state = QH_STATE_LINKED;
hw->hw_alt_next = QTD_NEXT(ehci, ehci->async->dummy->qtd_dma);
/* clear interrupt enables, set irq latency */
if (log2_irq_thresh < 0 || log2_irq_thresh > 6)
log2_irq_thresh = 0;
temp = 1 << (16 + log2_irq_thresh);
if (HCC_PER_PORT_CHANGE_EVENT(hcc_params)) {
ehci->has_ppcd = 1;
ehci_dbg(ehci, "enable per-port change event\n");
temp |= CMD_PPCEE;
}
if (HCC_CANPARK(hcc_params)) {
/* HW default park == 3, on hardware that supports it (like
* NVidia and ALI silicon), maximizes throughput on the async
* schedule by avoiding QH fetches between transfers.
*
* With fast usb storage devices and NForce2, "park" seems to
* make problems: throughput reduction (!), data errors...
*/
if (park) {
park = min(park, (unsigned) 3);
temp |= CMD_PARK;
temp |= park << 8;
}
ehci_dbg(ehci, "park %d\n", park);
}
if (HCC_PGM_FRAMELISTLEN(hcc_params)) {
/* periodic schedule size can be smaller than default */
temp &= ~(3 << 2);
temp |= (EHCI_TUNE_FLS << 2);
}
ehci->command = temp;
/* Accept arbitrarily long scatter-gather lists */
if (!(hcd->driver->flags & HCD_LOCAL_MEM))
hcd->self.sg_tablesize = ~0;
/* Prepare for unlinking active QHs */
ehci->old_current = ~0;
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
david brownell | david brownell | 187 | 34.76% | 8 | 23.53% |
alan stern | alan stern | 158 | 29.37% | 12 | 35.29% |
linus torvalds | linus torvalds | 75 | 13.94% | 1 | 2.94% |
alek du | alek du | 48 | 8.92% | 3 | 8.82% |
stefan roese | stefan roese | 13 | 2.42% | 1 | 2.94% |
andrea righi | andrea righi | 13 | 2.42% | 1 | 2.94% |
geoff levand | geoff levand | 10 | 1.86% | 1 | 2.94% |
karsten wiese | karsten wiese | 8 | 1.49% | 1 | 2.94% |
ming lei | ming lei | 8 | 1.49% | 1 | 2.94% |
ricardo martins | ricardo martins | 8 | 1.49% | 1 | 2.94% |
kirill smelkov | kirill smelkov | 7 | 1.30% | 1 | 2.94% |
mika kukkonen | mika kukkonen | 1 | 0.19% | 1 | 2.94% |
deepak saxena | deepak saxena | 1 | 0.19% | 1 | 2.94% |
matt porter | matt porter | 1 | 0.19% | 1 | 2.94% |
| Total | 538 | 100.00% | 34 | 100.00% |
/* start HC running; it's halted, ehci_init() has been run (once) */
static int ehci_run (struct usb_hcd *hcd)
{
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
u32 temp;
u32 hcc_params;
hcd->uses_new_polling = 1;
/* EHCI spec section 4.1 */
ehci_writel(ehci, ehci->periodic_dma, &ehci->regs->frame_list);
ehci_writel(ehci, (u32)ehci->async->qh_dma, &ehci->regs->async_next);
/*
* hcc_params controls whether ehci->regs->segment must (!!!)
* be used; it constrains QH/ITD/SITD and QTD locations.
* pci_pool consistent memory always uses segment zero.
* streaming mappings for I/O buffers, like pci_map_single(),
* can return segments above 4GB, if the device allows.
*
* NOTE: the dma mask is visible through dev->dma_mask, so
* drivers can pass this info along ... like NETIF_F_HIGHDMA,
* Scsi_Host.highmem_io, and so forth. It's readonly to all
* host side drivers though.
*/
hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params);
if (HCC_64BIT_ADDR(hcc_params)) {
ehci_writel(ehci, 0, &ehci->regs->segment);
#if 0
// this is deeply broken on almost all architectures
if (!dma_set_mask(hcd->self.controller, DMA_BIT_MASK(64)))
ehci_info(ehci, "enabled 64bit DMA\n");
#endif
}
// Philips, Intel, and maybe others need CMD_RUN before the
// root hub will detect new devices (why?); NEC doesn't
ehci->command &= ~(CMD_LRESET|CMD_IAAD|CMD_PSE|CMD_ASE|CMD_RESET);
ehci->command |= CMD_RUN;
ehci_writel(ehci, ehci->command, &ehci->regs->command);
dbg_cmd (ehci, "init", ehci->command);
/*
* Start, enabling full USB 2.0 functionality ... usb 1.1 devices
* are explicitly handed to companion controller(s), so no TT is
* involved with the root hub. (Except where one is integrated,
* and there's no companion controller unless maybe for USB OTG.)
*
* Turning on the CF flag will transfer ownership of all ports
* from the companions to the EHCI controller. If any of the
* companions are in the middle of a port reset at the time, it
* could cause trouble. Write-locking ehci_cf_port_reset_rwsem
* guarantees that no resets are in progress. After we set CF,
* a short delay lets the hardware catch up; new resets shouldn't
* be started before the port switching actions could complete.
*/
down_write(&ehci_cf_port_reset_rwsem);
ehci->rh_state = EHCI_RH_RUNNING;
ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag);
ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */
msleep(5);
up_write(&ehci_cf_port_reset_rwsem);
ehci->last_periodic_enable = ktime_get_real();
temp = HC_VERSION(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
ehci_info (ehci,
"USB %x.%x started, EHCI %x.%02x%s\n",
((ehci->sbrn & 0xf0)>>4), (ehci->sbrn & 0x0f),
temp >> 8, temp & 0xff,
ignore_oc ? ", overcurrent ignored" : "");
ehci_writel(ehci, INTR_MASK,
&ehci->regs->intr_enable); /* Turn On Interrupts */
/* GRR this is run-once init(), being done every time the HC starts.
* So long as they're part of class devices, we can't do it init()
* since the class device isn't created that early.
*/
create_debug_files(ehci);
create_sysfs_files(ehci);
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
david brownell | david brownell | 167 | 53.02% | 12 | 46.15% |
linus torvalds | linus torvalds | 78 | 24.76% | 1 | 3.85% |
benjamin herrenschmidt | benjamin herrenschmidt | 27 | 8.57% | 1 | 3.85% |
alan stern | alan stern | 21 | 6.67% | 5 | 19.23% |
oliver neukum | oliver neukum | 7 | 2.22% | 1 | 3.85% |
marcelo tosatti | marcelo tosatti | 6 | 1.90% | 1 | 3.85% |
matt porter | matt porter | 4 | 1.27% | 1 | 3.85% |
jan andersson | jan andersson | 2 | 0.63% | 1 | 3.85% |
christoph hellwig | christoph hellwig | 1 | 0.32% | 1 | 3.85% |
kirill smelkov | kirill smelkov | 1 | 0.32% | 1 | 3.85% |
yang hongyang | yang hongyang | 1 | 0.32% | 1 | 3.85% |
| Total | 315 | 100.00% | 26 | 100.00% |
int ehci_setup(struct usb_hcd *hcd)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
int retval;
ehci->regs = (void __iomem *)ehci->caps +
HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
dbg_hcs_params(ehci, "reset");
dbg_hcc_params(ehci, "reset");
/* cache this readonly data; minimize chip reads */
ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
ehci->sbrn = HCD_USB2;
/* data structure init */
retval = ehci_init(hcd);
if (retval)
return retval;
retval = ehci_halt(ehci);
if (retval) {
ehci_mem_cleanup(ehci);
return retval;
}
ehci_reset(ehci);
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
matthieu castet | matthieu castet | 122 | 91.04% | 1 | 33.33% |
jia-ju bai | jia-ju bai | 7 | 5.22% | 1 | 33.33% |
alan stern | alan stern | 5 | 3.73% | 1 | 33.33% |
| Total | 134 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL_GPL(ehci_setup);
/*-------------------------------------------------------------------------*/
static irqreturn_t ehci_irq (struct usb_hcd *hcd)
{
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
u32 status, masked_status, pcd_status = 0, cmd;
int bh;
unsigned long flags;
/*
* For threadirqs option we use spin_lock_irqsave() variant to prevent
* deadlock with ehci hrtimer callback, because hrtimer callbacks run
* in interrupt context even when threadirqs is specified. We can go
* back to spin_lock() variant when hrtimer callbacks become threaded.
*/
spin_lock_irqsave(&ehci->lock, flags);
status = ehci_readl(ehci, &ehci->regs->status);
/* e.g. cardbus physical eject */
if (status == ~(u32) 0) {
ehci_dbg (ehci, "device removed\n");
goto dead;
}
/*
* We don't use STS_FLR, but some controllers don't like it to
* remain on, so mask it out along with the other status bits.
*/
masked_status = status & (INTR_MASK | STS_FLR);
/* Shared IRQ? */
if (!masked_status || unlikely(ehci->rh_state == EHCI_RH_HALTED)) {
spin_unlock_irqrestore(&ehci->lock, flags);
return IRQ_NONE;
}
/* clear (just) interrupts */
ehci_writel(ehci, masked_status, &ehci->regs->status);
cmd = ehci_readl(ehci, &ehci->regs->command);
bh = 0;
/* normal [4.15.1.2] or error [4.15.1.1] completion */
if (likely ((status & (STS_INT|STS_ERR)) != 0)) {
if (likely ((status & STS_ERR) == 0))
COUNT (ehci->stats.normal);
else
COUNT (ehci->stats.error);
bh = 1;
}
/* complete the unlinking of some qh [4.15.2.3] */
if (status & STS_IAA) {
/* Turn off the IAA watchdog */
ehci->enabled_hrtimer_events &= ~BIT(EHCI_HRTIMER_IAA_WATCHDOG);
/*
* Mild optimization: Allow another IAAD to reset the
* hrtimer, if one occurs before the next expiration.
* In theory we could always cancel the hrtimer, but
* tests show that about half the time it will be reset
* for some other event anyway.
*/
if (ehci->next_hrtimer_event == EHCI_HRTIMER_IAA_WATCHDOG)
++ehci->next_hrtimer_event;
/* guard against (alleged) silicon errata */
if (cmd & CMD_IAAD)
ehci_dbg(ehci, "IAA with IAAD still set?\n");
if (ehci->iaa_in_progress)
COUNT(ehci->stats.iaa);
end_iaa_cycle(ehci);
}
/* remote wakeup [4.3.1] */
if (status & STS_PCD) {
unsigned i = HCS_N_PORTS (ehci->hcs_params);
u32 ppcd = ~0;
/* kick root hub later */
pcd_status = status;
/* resume root hub? */
if (ehci->rh_state == EHCI_RH_SUSPENDED)
usb_hcd_resume_root_hub(hcd);
/* get per-port change detect bits */
if (ehci->has_ppcd)
ppcd = status >> 16;
while (i--) {
int pstatus;
/* leverage per-port change bits feature */
if (!(ppcd & (1 << i)))
continue;
pstatus = ehci_readl(ehci,
&ehci->regs->port_status[i]);
if (pstatus & PORT_OWNER)
continue;
if (!(test_bit(i, &ehci->suspended_ports) &&
((pstatus & PORT_RESUME) ||
!(pstatus & PORT_SUSPEND)) &&
(pstatus & PORT_PE) &&
ehci->reset_done[i] == 0))
continue;
/* start USB_RESUME_TIMEOUT msec resume signaling from
* this port, and make hub_wq collect
* PORT_STAT_C_SUSPEND to stop that signaling.
*/
ehci->reset_done[i] = jiffies +
msecs_to_jiffies(USB_RESUME_TIMEOUT);
set_bit(i, &ehci->resuming_ports);
ehci_dbg (ehci, "port %d remote wakeup\n", i + 1);
usb_hcd_start_port_resume(&hcd->self, i);
mod_timer(&hcd->rh_timer, ehci->reset_done[i]);
}
}
/* PCI errors [4.15.2.4] */
if (unlikely ((status & STS_FATAL) != 0)) {
ehci_err(ehci, "fatal error\n");
dbg_cmd(ehci, "fatal", cmd);
dbg_status(ehci, "fatal", status);
dead:
usb_hc_died(hcd);
/* Don't let the controller do anything more */
ehci->shutdown = true;
ehci->rh_state = EHCI_RH_STOPPING;
ehci->command &= ~(CMD_RUN | CMD_ASE | CMD_PSE);
ehci_writel(ehci, ehci->command, &ehci->regs->command);
ehci_writel(ehci, 0, &ehci->regs->intr_enable);
ehci_handle_controller_death(ehci);
/* Handle completions when the controller stops */
bh = 0;
}
if (bh)
ehci_work (ehci);
spin_unlock_irqrestore(&ehci->lock, flags);
if (pcd_status)
usb_hcd_poll_rh_status(hcd);
return IRQ_HANDLED;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
david brownell | david brownell | 250 | 38.88% | 13 | 33.33% |
alan stern | alan stern | 183 | 28.46% | 19 | 48.72% |
linus torvalds | linus torvalds | 121 | 18.82% | 1 | 2.56% |
alek du | alek du | 37 | 5.75% | 1 | 2.56% |
marcelo tosatti | marcelo tosatti | 17 | 2.64% | 1 | 2.56% |
benjamin herrenschmidt | benjamin herrenschmidt | 15 | 2.33% | 1 | 2.56% |
stanislaw gruszka | stanislaw gruszka | 14 | 2.18% | 1 | 2.56% |
stephen hemminger | stephen hemminger | 4 | 0.62% | 1 | 2.56% |
felipe balbi | felipe balbi | 2 | 0.31% | 1 | 2.56% |
| Total | 643 | 100.00% | 39 | 100.00% |
/*-------------------------------------------------------------------------*/
/*
* non-error returns are a promise to giveback() the urb later
* we drop ownership so next owner (or urb unlink) can get it
*
* urb + dev is in hcd.self.controller.urb_list
* we're queueing TDs onto software and hardware lists
*
* hcd-specific init for hcpriv hasn't been done yet
*
* NOTE: control, bulk, and interrupt share the same code to append TDs
* to a (possibly active) QH, and the same QH scanning code.
*/
static int ehci_urb_enqueue (
struct usb_hcd *hcd,
struct urb *urb,
gfp_t mem_flags
) {
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
struct list_head qtd_list;
INIT_LIST_HEAD (&qtd_list);
switch (usb_pipetype (urb->pipe)) {
case PIPE_CONTROL:
/* qh_completions() code doesn't handle all the fault cases
* in multi-TD control transfers. Even 1KB is rare anyway.
*/
if (urb->transfer_buffer_length > (16 * 1024))
return -EMSGSIZE;
/* FALLTHROUGH */
/* case PIPE_BULK: */
default:
if (!qh_urb_transaction (ehci, urb, &qtd_list, mem_flags))
return -ENOMEM;
return submit_async(ehci, urb, &qtd_list, mem_flags);
case PIPE_INTERRUPT:
if (!qh_urb_transaction (ehci, urb, &qtd_list, mem_flags))
return -ENOMEM;
return intr_submit(ehci, urb, &qtd_list, mem_flags);
case PIPE_ISOCHRONOUS:
if (urb->dev->speed == USB_SPEED_HIGH)
return itd_submit (ehci, urb, mem_flags);
else
return sitd_submit (ehci, urb, mem_flags);
}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
linus torvalds | linus torvalds | 143 | 82.18% | 1 | 16.67% |
david brownell | david brownell | 30 | 17.24% | 4 | 66.67% |
al viro | al viro | 1 | 0.57% | 1 | 16.67% |
| Total | 174 | 100.00% | 6 | 100.00% |
/* remove from hardware lists
* completions normally happen asynchronously
*/
static int ehci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
{
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
struct ehci_qh *qh;
unsigned long flags;
int rc;
spin_lock_irqsave (&ehci->lock, flags);
rc = usb_hcd_check_unlink_urb(hcd, urb, status);
if (rc)
goto done;
if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
/*
* We don't expedite dequeue for isochronous URBs.
* Just wait until they complete normally or their
* time slot expires.
*/
} else {
qh = (struct ehci_qh *) urb->hcpriv;
qh->unlink_reason |= QH_UNLINK_REQUESTED;
switch (qh->qh_state) {
case QH_STATE_LINKED:
if (usb_pipetype(urb->pipe) == PIPE_INTERRUPT)
start_unlink_intr(ehci, qh);
else
start_unlink_async(ehci, qh);
break;
case QH_STATE_COMPLETING:
qh->dequeue_during_giveback = 1;
break;
case QH_STATE_UNLINK:
case QH_STATE_UNLINK_WAIT:
/* already started */
break;
case QH_STATE_IDLE:
/* QH might be waiting for a Clear-TT-Buffer */
qh_completions(ehci, qh);
break;
}
}
done:
spin_unlock_irqrestore (&ehci->lock, flags);
return rc;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
alan stern | alan stern | 98 | 52.69% | 7 | 58.33% |
david brownell | david brownell | 83 | 44.62% | 4 | 33.33% |
linus torvalds | linus torvalds | 5 | 2.69% | 1 | 8.33% |
| Total | 186 | 100.00% | 12 | 100.00% |
/*-------------------------------------------------------------------------*/
// bulk qh holds the data toggle
static void
ehci_endpoint_disable (struct usb_hcd *hcd, struct usb_host_endpoint *ep)
{
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
unsigned long flags;
struct ehci_qh *qh;
/* ASSERT: any requests/urbs are being unlinked */
/* ASSERT: nobody can be submitting urbs for this any more */
rescan:
spin_lock_irqsave (&ehci->lock, flags);
qh = ep->hcpriv;
if (!qh)
goto done;
/* endpoints can be iso streams. for now, we don't
* accelerate iso completions ... so spin a while.
*/
if (qh->hw == NULL) {
struct ehci_iso_stream *stream = ep->hcpriv;
if (!list_empty(&stream->td_list))
goto idle_timeout;
/* BUG_ON(!list_empty(&stream->free_list)); */
reserve_release_iso_bandwidth(ehci, stream, -1);
kfree(stream);
goto done;
}
qh->unlink_reason |= QH_UNLINK_REQUESTED;
switch (qh->qh_state) {
case QH_STATE_LINKED:
if (list_empty(&qh->qtd_list))
qh->unlink_reason |= QH_UNLINK_QUEUE_EMPTY;
else
WARN_ON(1);
if (usb_endpoint_type(&ep->desc) != USB_ENDPOINT_XFER_INT)
start_unlink_async(ehci, qh);
else
start_unlink_intr(ehci, qh);
/* FALL THROUGH */
case QH_STATE_COMPLETING: /* already in unlinking */
case QH_STATE_UNLINK: /* wait for hw to finish? */
case QH_STATE_UNLINK_WAIT:
idle_timeout:
spin_unlock_irqrestore (&ehci->lock, flags);
schedule_timeout_uninterruptible(1);
goto rescan;
case QH_STATE_IDLE: /* fully unlinked */
if (qh->clearing_tt)
goto idle_timeout;
if (list_empty (&qh->qtd_list)) {
if (qh->ps.bw_uperiod)
reserve_release_intr_bandwidth(ehci, qh, -1);
qh_destroy(ehci, qh);
break;
}
/* else FALL THROUGH */
default:
/* caller was supposed to have unlinked any requests;
* that's not our job. just leak this memory.
*/
ehci_err (ehci, "qh %p (#%02x) state %d%s\n",
qh, ep->desc.bEndpointAddress, qh->qh_state,
list_empty (&qh->qtd_list) ? "" : "(has tds)");
break;
}
done:
ep->hcpriv = NULL;
spin_unlock_irqrestore (&ehci->lock, flags);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
david brownell | david brownell | 104 | 32.70% | 5 | 26.32% |
alan stern | alan stern | 92 | 28.93% | 8 | 42.11% |
linus torvalds | linus torvalds | 90 | 28.30% | 1 | 5.26% |
ming lei | ming lei | 28 | 8.81% | 1 | 5.26% |
alek du | alek du | 1 | 0.31% | 1 | 5.26% |
clemens ladisch | clemens ladisch | 1 | 0.31% | 1 | 5.26% |
nishanth aravamudan | nishanth aravamudan | 1 | 0.31% | 1 | 5.26% |
mika kukkonen | mika kukkonen | 1 | 0.31% | 1 | 5.26% |
| Total | 318 | 100.00% | 19 | 100.00% |
static void
ehci_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
struct ehci_qh *qh;
int eptype = usb_endpoint_type(&ep->desc);
int epnum = usb_endpoint_num(&ep->desc);
int is_out = usb_endpoint_dir_out(&ep->desc);
unsigned long flags;
if (eptype != USB_ENDPOINT_XFER_BULK && eptype != USB_ENDPOINT_XFER_INT)
return;
spin_lock_irqsave(&ehci->lock, flags);
qh = ep->hcpriv;
/* For Bulk and Interrupt endpoints we maintain the toggle state
* in the hardware; the toggle bits in udev aren't used at all.
* When an endpoint is reset by usb_clear_halt() we must reset
* the toggle bit in the QH.
*/
if (qh) {
if (!list_empty(&qh->qtd_list)) {
WARN_ONCE(1, "clear_halt for a busy endpoint\n");
} else {
/* The toggle value in the QH can't be updated
* while the QH is active. Unlink it now;
* re-linking will call qh_refresh().
*/
usb_settoggle(qh->ps.udev, epnum, is_out, 0);
qh->unlink_reason |= QH_UNLINK_REQUESTED;
if (eptype == USB_ENDPOINT_XFER_BULK)
start_unlink_async(ehci, qh);
else
start_unlink_intr(ehci, qh);
}
}
spin_unlock_irqrestore(&ehci->lock, flags);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
alan stern | alan stern | 178 | 100.00% | 8 | 100.00% |
| Total | 178 | 100.00% | 8 | 100.00% |
static int ehci_get_frame (struct usb_hcd *hcd)
{
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
return (ehci_read_frame_index(ehci) >> 3) % ehci->periodic_size;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
matt porter | matt porter | 21 | 60.00% | 1 | 20.00% |
linus torvalds | linus torvalds | 10 | 28.57% | 1 | 20.00% |
greg kroah-hartman | greg kroah-hartman | 2 | 5.71% | 1 | 20.00% |
benjamin herrenschmidt | benjamin herrenschmidt | 1 | 2.86% | 1 | 20.00% |
alan stern | alan stern | 1 | 2.86% | 1 | 20.00% |
| Total | 35 | 100.00% | 5 | 100.00% |
/*-------------------------------------------------------------------------*/
/* Device addition and removal */
static void ehci_remove_device(struct usb_hcd *hcd, struct usb_device *udev)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
spin_lock_irq(&ehci->lock);
drop_tt(udev);
spin_unlock_irq(&ehci->lock);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
alan stern | alan stern | 47 | 100.00% | 1 | 100.00% |
| Total | 47 | 100.00% | 1 | 100.00% |
/*-------------------------------------------------------------------------*/
#ifdef CONFIG_PM
/* suspend/resume, section 4.3 */
/* These routines handle the generic parts of controller suspend/resume */
int ehci_suspend(struct usb_hcd *hcd, bool do_wakeup)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
if (time_before(jiffies, ehci->next_statechange))
msleep(10);
/*
* Root hub was already suspended. Disable IRQ emission and
* mark HW unaccessible. The PM and USB cores make sure that
* the root hub is either suspended or stopped.
*/
ehci_prepare_ports_for_controller_suspend(ehci, do_wakeup);
spin_lock_irq(&ehci->lock);
ehci_writel(ehci, 0, &ehci->regs->intr_enable);
(void) ehci_readl(ehci, &ehci->regs->intr_enable);
clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
spin_unlock_irq(&ehci->lock);
synchronize_irq(hcd->irq);
/* Check for race with a wakeup request */
if (do_wakeup && HCD_WAKEUP_PENDING(hcd)) {
ehci_resume(hcd, false);
return -EBUSY;
}
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
alan stern | alan stern | 135 | 100.00% | 2 | 100.00% |
| Total | 135 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL_GPL(ehci_suspend);
/* Returns 0 if power was preserved, 1 if power was lost */
int ehci_resume(struct usb_hcd *hcd, bool force_reset)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
if (time_before(jiffies, ehci->next_statechange))
msleep(100);
/* Mark hardware accessible again as we are back to full power by now */
set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
if (ehci->shutdown)
return 0; /* Controller is dead */
/*
* If CF is still set and reset isn't forced
* then we maintained suspend power.
* Just undo the effect of ehci_suspend().
*/
if (ehci_readl(ehci, &ehci->regs->configured_flag) == FLAG_CF &&
!force_reset) {
int mask = INTR_MASK;
ehci_prepare_ports_for_controller_resume(ehci);
spin_lock_irq(&ehci->lock);
if (ehci->shutdown)
goto skip;
if (!hcd->self.root_hub->do_remote_wakeup)
mask &= ~STS_PCD;
ehci_writel(ehci, mask, &ehci->regs->intr_enable);
ehci_readl(ehci, &ehci->regs->intr_enable);
skip:
spin_unlock_irq(&ehci->lock);
return 0;
}
/*
* Else reset, to cope with power loss or resume from hibernation
* having let the firmware kick in during reboot.
*/
usb_root_hub_lost_power(hcd->self.root_hub);
(void) ehci_halt(ehci);
(void) ehci_reset(ehci);
spin_lock_irq(&ehci->lock);
if (ehci->shutdown)
goto skip;
ehci_writel(ehci, ehci->command, &ehci->regs->command);
ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag);
ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */
ehci->rh_state = EHCI_RH_SUSPENDED;
spin_unlock_irq(&ehci->lock);
return 1;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
alan stern | alan stern | 264 | 98.88% | 2 | 66.67% |
wu liang feng | wu liang feng | 3 | 1.12% | 1 | 33.33% |
| Total | 267 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL_GPL(ehci_resume);
#endif
/*-------------------------------------------------------------------------*/
/*
* Generic structure: This gets copied for platform drivers so that
* individual entries can be overridden as needed.
*/
static const struct hc_driver ehci_hc_driver = {
.description = hcd_name,
.product_desc = "EHCI Host Controller",
.hcd_priv_size = sizeof(struct ehci_hcd),
/*
* generic hardware linkage
*/
.irq = ehci_irq,
.flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
/*
* basic lifecycle operations
*/
.reset = ehci_setup,
.start = ehci_run,
.stop = ehci_stop,
.shutdown = ehci_shutdown,
/*
* managing i/o requests and associated device resources
*/
.urb_enqueue = ehci_urb_enqueue,
.urb_dequeue = ehci_urb_dequeue,
.endpoint_disable = ehci_endpoint_disable,
.endpoint_reset = ehci_endpoint_reset,
.clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
/*
* scheduling support
*/
.get_frame_number = ehci_get_frame,
/*
* root hub support
*/
.hub_status_data = ehci_hub_status_data,
.hub_control = ehci_hub_control,
.bus_suspend = ehci_bus_suspend,
.bus_resume = ehci_bus_resume,
.relinquish_port = ehci_relinquish_port,
.port_handed_over = ehci_port_handed_over,
/*
* device support
*/
.free_dev = ehci_remove_device,
};
void ehci_init_driver(struct hc_driver *drv,
const struct ehci_driver_overrides *over)
{
/* Copy the generic table to drv and then apply the overrides */
*drv = ehci_hc_driver;
if (over) {
drv->hcd_priv_size += over->extra_priv_size;
if (over->reset)
drv->reset = over->reset;
if (over->port_power)
drv->port_power = over->port_power;
}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
alan stern | alan stern | 50 | 78.12% | 2 | 66.67% |
michael grzeschik | michael grzeschik | 14 | 21.88% | 1 | 33.33% |
| Total | 64 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL_GPL(ehci_init_driver);
/*-------------------------------------------------------------------------*/
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR (DRIVER_AUTHOR);
MODULE_LICENSE ("GPL");
#ifdef CONFIG_USB_EHCI_SH
#include "ehci-sh.c"
#define PLATFORM_DRIVER ehci_hcd_sh_driver
#endif
#ifdef CONFIG_PPC_PS3
#include "ehci-ps3.c"
#define PS3_SYSTEM_BUS_DRIVER ps3_ehci_driver
#endif
#ifdef CONFIG_USB_EHCI_HCD_PPC_OF
#include "ehci-ppc-of.c"
#define OF_PLATFORM_DRIVER ehci_hcd_ppc_of_driver
#endif
#ifdef CONFIG_XPS_USB_HCD_XILINX
#include "ehci-xilinx-of.c"
#define XILINX_OF_PLATFORM_DRIVER ehci_hcd_xilinx_of_driver
#endif
#ifdef CONFIG_TILE_USB
#include "ehci-tilegx.c"
#define PLATFORM_DRIVER ehci_hcd_tilegx_driver
#endif
#ifdef CONFIG_USB_EHCI_HCD_PMC_MSP
#include "ehci-pmcmsp.c"
#define PLATFORM_DRIVER ehci_hcd_msp_driver
#endif
#ifdef CONFIG_SPARC_LEON
#include "ehci-grlib.c"
#define PLATFORM_DRIVER ehci_grlib_driver
#endif
#ifdef CONFIG_USB_EHCI_MV
#include "ehci-mv.c"
#define PLATFORM_DRIVER ehci_mv_driver
#endif
#ifdef CONFIG_MIPS_SEAD3
#include "ehci-sead3.c"
#define PLATFORM_DRIVER ehci_hcd_sead3_driver
#endif
static int __init ehci_hcd_init(void)
{
int retval = 0;
if (usb_disabled())
return -ENODEV;
printk(KERN_INFO "%s: " DRIVER_DESC "\n", hcd_name);
set_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
if (test_bit(USB_UHCI_LOADED, &usb_hcds_loaded) ||
test_bit(USB_OHCI_LOADED, &usb_hcds_loaded))
printk(KERN_WARNING "Warning! ehci_hcd should always be loaded"
" before uhci_hcd and ohci_hcd, not after\n");
pr_debug("%s: block sizes: qh %Zd qtd %Zd itd %Zd sitd %Zd\n",
hcd_name,
sizeof(struct ehci_qh), sizeof(struct ehci_qtd),
sizeof(struct ehci_itd), sizeof(struct ehci_sitd));
#ifdef CONFIG_DYNAMIC_DEBUG
ehci_debug_root = debugfs_create_dir("ehci", usb_debug_root);
if (!ehci_debug_root) {
retval = -ENOENT;
goto err_debug;
}
#endif
#ifdef PLATFORM_DRIVER
retval = platform_driver_register(&PLATFORM_DRIVER);
if (retval < 0)
goto clean0;
#endif
#ifdef PS3_SYSTEM_BUS_DRIVER
retval = ps3_ehci_driver_register(&PS3_SYSTEM_BUS_DRIVER);
if (retval < 0)
goto clean2;
#endif
#ifdef OF_PLATFORM_DRIVER
retval = platform_driver_register(&OF_PLATFORM_DRIVER);
if (retval < 0)
goto clean3;
#endif
#ifdef XILINX_OF_PLATFORM_DRIVER
retval = platform_driver_register(&XILINX_OF_PLATFORM_DRIVER);
if (retval < 0)
goto clean4;
#endif
return retval;
#ifdef XILINX_OF_PLATFORM_DRIVER
/* platform_driver_unregister(&XILINX_OF_PLATFORM_DRIVER); */
clean4:
#endif
#ifdef OF_PLATFORM_DRIVER
platform_driver_unregister(&OF_PLATFORM_DRIVER);
clean3:
#endif
#ifdef PS3_SYSTEM_BUS_DRIVER
ps3_ehci_driver_unregister(&PS3_SYSTEM_BUS_DRIVER);
clean2:
#endif
#ifdef PLATFORM_DRIVER
platform_driver_unregister(&PLATFORM_DRIVER);
clean0:
#endif
#ifdef CONFIG_DYNAMIC_DEBUG
debugfs_remove(ehci_debug_root);
ehci_debug_root = NULL;
err_debug:
#endif
clear_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
return retval;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
kumar gala | kumar gala | 73 | 25.09% | 1 | 8.33% |
alan stern | alan stern | 70 | 24.05% | 2 | 16.67% |
valentine barshak | valentine barshak | 49 | 16.84% | 1 | 8.33% |
grant likely | grant likely | 37 | 12.71% | 2 | 16.67% |
geoffrey levand | geoffrey levand | 27 | 9.28% | 1 | 8.33% |
tony jones | tony jones | 26 | 8.93% | 1 | 8.33% |
oliver neukum | oliver neukum | 4 | 1.37% | 1 | 8.33% |
linus torvalds | linus torvalds | 2 | 0.69% | 1 | 8.33% |
xenia ragiadakou | xenia ragiadakou | 2 | 0.69% | 1 | 8.33% |
greg kroah-hartman | greg kroah-hartman | 1 | 0.34% | 1 | 8.33% |
| Total | 291 | 100.00% | 12 | 100.00% |
module_init(ehci_hcd_init);
static void __exit ehci_hcd_cleanup(void)
{
#ifdef XILINX_OF_PLATFORM_DRIVER
platform_driver_unregister(&XILINX_OF_PLATFORM_DRIVER);
#endif
#ifdef OF_PLATFORM_DRIVER
platform_driver_unregister(&OF_PLATFORM_DRIVER);
#endif
#ifdef PLATFORM_DRIVER
platform_driver_unregister(&PLATFORM_DRIVER);
#endif
#ifdef PS3_SYSTEM_BUS_DRIVER
ps3_ehci_driver_unregister(&PS3_SYSTEM_BUS_DRIVER);
#endif
#ifdef CONFIG_DYNAMIC_DEBUG
debugfs_remove(ehci_debug_root);
#endif
clear_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
kumar gala | kumar gala | 22 | 30.99% | 1 | 10.00% |
grant likely | grant likely | 12 | 16.90% | 2 | 20.00% |
valentine barshak | valentine barshak | 10 | 14.08% | 1 | 10.00% |
geoffrey levand | geoffrey levand | 9 | 12.68% | 2 | 20.00% |
alan stern | alan stern | 8 | 11.27% | 1 | 10.00% |
tony jones | tony jones | 7 | 9.86% | 1 | 10.00% |
oliver neukum | oliver neukum | 2 | 2.82% | 1 | 10.00% |
xenia ragiadakou | xenia ragiadakou | 1 | 1.41% | 1 | 10.00% |
| Total | 71 | 100.00% | 10 | 100.00% |
module_exit(ehci_hcd_cleanup);
Overall Contributors
| Person | Tokens | Prop | Commits | CommitProp |
alan stern | alan stern | 1907 | 36.17% | 68 | 34.17% |
david brownell | david brownell | 1295 | 24.56% | 53 | 26.63% |
linus torvalds | linus torvalds | 785 | 14.89% | 4 | 2.01% |
matthieu castet | matthieu castet | 165 | 3.13% | 2 | 1.01% |
alek du | alek du | 118 | 2.24% | 4 | 2.01% |
matt porter | matt porter | 112 | 2.12% | 1 | 0.50% |
kumar gala | kumar gala | 105 | 1.99% | 1 | 0.50% |
benjamin herrenschmidt | benjamin herrenschmidt | 90 | 1.71% | 1 | 0.50% |
valentine barshak | valentine barshak | 71 | 1.35% | 1 | 0.50% |
michael grzeschik | michael grzeschik | 57 | 1.08% | 1 | 0.50% |
grant likely | grant likely | 50 | 0.95% | 2 | 1.01% |
geoffrey levand | geoffrey levand | 46 | 0.87% | 2 | 1.01% |
sarah sharp | sarah sharp | 38 | 0.72% | 1 | 0.50% |
ming lei | ming lei | 36 | 0.68% | 2 | 1.01% |
tony jones | tony jones | 35 | 0.66% | 1 | 0.50% |
geoff levand | geoff levand | 33 | 0.63% | 2 | 1.01% |
jason wessel | jason wessel | 24 | 0.46% | 1 | 0.50% |
marcelo tosatti | marcelo tosatti | 23 | 0.44% | 1 | 0.50% |
oliver neukum | oliver neukum | 15 | 0.28% | 2 | 1.01% |
stanislaw gruszka | stanislaw gruszka | 14 | 0.27% | 1 | 0.50% |
andrea righi | andrea righi | 13 | 0.25% | 1 | 0.50% |
stefan roese | stefan roese | 13 | 0.25% | 1 | 0.50% |
vladimir barinov | vladimir barinov | 13 | 0.25% | 1 | 0.50% |
chris metcalf | chris metcalf | 12 | 0.23% | 1 | 0.50% |
kirill smelkov | kirill smelkov | 12 | 0.23% | 2 | 1.01% |
jan beulich | jan beulich | 12 | 0.23% | 1 | 0.50% |
jan andersson | jan andersson | 12 | 0.23% | 2 | 1.01% |
manjunath goudar | manjunath goudar | 11 | 0.21% | 1 | 0.50% |
paul mundt | paul mundt | 11 | 0.21% | 1 | 0.50% |
julie zhu | julie zhu | 11 | 0.21% | 1 | 0.50% |
anoop at paanoop1-desktop | anoop at paanoop1-desktop | 10 | 0.19% | 1 | 0.50% |
neil zhang | neil zhang | 10 | 0.19% | 1 | 0.50% |
steven j. hill | steven j. hill | 10 | 0.19% | 1 | 0.50% |
srinivas kandagatla | srinivas kandagatla | 9 | 0.17% | 1 | 0.50% |
andiry xu | andiry xu | 9 | 0.17% | 1 | 0.50% |
ricardo martins | ricardo martins | 8 | 0.15% | 1 | 0.50% |
greg kroah-hartman | greg kroah-hartman | 8 | 0.15% | 5 | 2.51% |
karsten wiese | karsten wiese | 8 | 0.15% | 1 | 0.50% |
jia-ju bai | jia-ju bai | 7 | 0.13% | 1 | 0.50% |
eric lescouet | eric lescouet | 5 | 0.09% | 1 | 0.50% |
deepak saxena | deepak saxena | 5 | 0.09% | 1 | 0.50% |
ramneek mehresh | ramneek mehresh | 5 | 0.09% | 1 | 0.50% |
stephen hemminger | stephen hemminger | 4 | 0.08% | 1 | 0.50% |
alex he | alex he | 4 | 0.08% | 1 | 0.50% |
xenia ragiadakou | xenia ragiadakou | 3 | 0.06% | 1 | 0.50% |
wu liang feng | wu liang feng | 3 | 0.06% | 1 | 0.50% |
lei ming | lei ming | 3 | 0.06% | 1 | 0.50% |
kelvin cheung | kelvin cheung | 2 | 0.04% | 1 | 0.50% |
tejun heo | tejun heo | 2 | 0.04% | 1 | 0.50% |
felipe balbi | felipe balbi | 2 | 0.04% | 1 | 0.50% |
mika kukkonen | mika kukkonen | 2 | 0.04% | 1 | 0.50% |
benoit goby | benoit goby | 2 | 0.04% | 1 | 0.50% |
tzachi perelstein | tzachi perelstein | 2 | 0.04% | 1 | 0.50% |
tanmay upadhyay | tanmay upadhyay | 2 | 0.04% | 1 | 0.50% |
al viro | al viro | 1 | 0.02% | 1 | 0.50% |
nishanth aravamudan | nishanth aravamudan | 1 | 0.02% | 1 | 0.50% |
yoshihiro shimoda | yoshihiro shimoda | 1 | 0.02% | 1 | 0.50% |
clemens ladisch | clemens ladisch | 1 | 0.02% | 1 | 0.50% |
rusty russell | rusty russell | 1 | 0.02% | 1 | 0.50% |
aleksey gorelov | aleksey gorelov | 1 | 0.02% | 1 | 0.50% |
christoph hellwig | christoph hellwig | 1 | 0.02% | 1 | 0.50% |
yang hongyang | yang hongyang | 1 | 0.02% | 1 | 0.50% |
| Total | 5272 | 100.00% | 199 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.