1 // SPDX-License-Identifier: GPL-2.0
3 * xHCI host controller driver
5 * Copyright (C) 2008 Intel Corp.
8 * Some code borrowed from the Linux EHCI driver.
11 #include <linux/pci.h>
12 #include <linux/iopoll.h>
13 #include <linux/irq.h>
14 #include <linux/log2.h>
15 #include <linux/module.h>
16 #include <linux/moduleparam.h>
17 #include <linux/slab.h>
18 #include <linux/dmi.h>
19 #include <linux/dma-mapping.h>
22 #include "xhci-trace.h"
23 #include "xhci-debugfs.h"
24 #include "xhci-dbgcap.h"
26 #define DRIVER_AUTHOR "Sarah Sharp"
27 #define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
29 #define PORT_WAKE_BITS (PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E)
31 /* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */
32 static int link_quirk;
33 module_param(link_quirk, int, S_IRUGO | S_IWUSR);
34 MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB");
36 static unsigned long long quirks;
37 module_param(quirks, ullong, S_IRUGO);
38 MODULE_PARM_DESC(quirks, "Bit flags for quirks to be enabled as default");
40 static bool td_on_ring(struct xhci_td *td, struct xhci_ring *ring)
42 struct xhci_segment *seg = ring->first_seg;
44 if (!td || !td->start_seg)
47 if (seg == td->start_seg)
50 } while (seg && seg != ring->first_seg);
56 * xhci_handshake - spin reading hc until handshake completes or fails
57 * @ptr: address of hc register to be read
58 * @mask: bits to look at in result of read
59 * @done: value of those bits when handshake succeeds
60 * @usec: timeout in microseconds
62 * Returns negative errno, or zero on success
64 * Success happens when the "mask" bits have the specified value (hardware
65 * handshake done). There are two failure modes: "usec" have passed (major
66 * hardware flakeout), or the register reads as all-ones (hardware removed).
68 int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, u64 timeout_us)
73 ret = readl_poll_timeout_atomic(ptr, result,
74 (result & mask) == done ||
77 if (result == U32_MAX) /* card removed */
84 * Disable interrupts and begin the xHCI halting process.
86 void xhci_quiesce(struct xhci_hcd *xhci)
93 halted = readl(&xhci->op_regs->status) & STS_HALT;
97 cmd = readl(&xhci->op_regs->command);
99 writel(cmd, &xhci->op_regs->command);
103 * Force HC into halt state.
105 * Disable any IRQs and clear the run/stop bit.
106 * HC will complete any current and actively pipelined transactions, and
107 * should halt within 16 ms of the run/stop bit being cleared.
108 * Read HC Halted bit in the status register to see when the HC is finished.
110 int xhci_halt(struct xhci_hcd *xhci)
114 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Halt the HC");
117 ret = xhci_handshake(&xhci->op_regs->status,
118 STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
120 xhci_warn(xhci, "Host halt failed, %d\n", ret);
124 xhci->xhc_state |= XHCI_STATE_HALTED;
125 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
131 * Set the run bit and wait for the host to be running.
133 int xhci_start(struct xhci_hcd *xhci)
138 temp = readl(&xhci->op_regs->command);
140 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Turn on HC, cmd = 0x%x.",
142 writel(temp, &xhci->op_regs->command);
145 * Wait for the HCHalted Status bit to be 0 to indicate the host is
148 ret = xhci_handshake(&xhci->op_regs->status,
149 STS_HALT, 0, XHCI_MAX_HALT_USEC);
150 if (ret == -ETIMEDOUT)
151 xhci_err(xhci, "Host took too long to start, "
152 "waited %u microseconds.\n",
155 /* clear state flags. Including dying, halted or removing */
164 * This resets pipelines, timers, counters, state machines, etc.
165 * Transactions will be terminated immediately, and operational registers
166 * will be set to their defaults.
168 int xhci_reset(struct xhci_hcd *xhci, u64 timeout_us)
174 state = readl(&xhci->op_regs->status);
176 if (state == ~(u32)0) {
177 xhci_warn(xhci, "Host not accessible, reset failed.\n");
181 if ((state & STS_HALT) == 0) {
182 xhci_warn(xhci, "Host controller not halted, aborting reset.\n");
186 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Reset the HC");
187 command = readl(&xhci->op_regs->command);
188 command |= CMD_RESET;
189 writel(command, &xhci->op_regs->command);
191 /* Existing Intel xHCI controllers require a delay of 1 mS,
192 * after setting the CMD_RESET bit, and before accessing any
193 * HC registers. This allows the HC to complete the
194 * reset operation and be ready for HC register access.
195 * Without this delay, the subsequent HC register access,
196 * may result in a system hang very rarely.
198 if (xhci->quirks & XHCI_INTEL_HOST)
201 ret = xhci_handshake(&xhci->op_regs->command, CMD_RESET, 0, timeout_us);
205 if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL)
206 usb_asmedia_modifyflowcontrol(to_pci_dev(xhci_to_hcd(xhci)->self.controller));
208 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
209 "Wait for controller to be ready for doorbell rings");
211 * xHCI cannot write to any doorbells or operational registers other
212 * than status until the "Controller Not Ready" flag is cleared.
214 ret = xhci_handshake(&xhci->op_regs->status, STS_CNR, 0, timeout_us);
216 xhci->usb2_rhub.bus_state.port_c_suspend = 0;
217 xhci->usb2_rhub.bus_state.suspended_ports = 0;
218 xhci->usb2_rhub.bus_state.resuming_ports = 0;
219 xhci->usb3_rhub.bus_state.port_c_suspend = 0;
220 xhci->usb3_rhub.bus_state.suspended_ports = 0;
221 xhci->usb3_rhub.bus_state.resuming_ports = 0;
226 static void xhci_zero_64b_regs(struct xhci_hcd *xhci)
228 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
234 * Some Renesas controllers get into a weird state if they are
235 * reset while programmed with 64bit addresses (they will preserve
236 * the top half of the address in internal, non visible
237 * registers). You end up with half the address coming from the
238 * kernel, and the other half coming from the firmware. Also,
239 * changing the programming leads to extra accesses even if the
240 * controller is supposed to be halted. The controller ends up with
241 * a fatal fault, and is then ripe for being properly reset.
243 * Special care is taken to only apply this if the device is behind
244 * an iommu. Doing anything when there is no iommu is definitely
247 if (!(xhci->quirks & XHCI_ZERO_64B_REGS) || !device_iommu_mapped(dev))
250 xhci_info(xhci, "Zeroing 64bit base registers, expecting fault\n");
252 /* Clear HSEIE so that faults do not get signaled */
253 val = readl(&xhci->op_regs->command);
255 writel(val, &xhci->op_regs->command);
257 /* Clear HSE (aka FATAL) */
258 val = readl(&xhci->op_regs->status);
260 writel(val, &xhci->op_regs->status);
262 /* Now zero the registers, and brace for impact */
263 val = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
264 if (upper_32_bits(val))
265 xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr);
266 val = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
267 if (upper_32_bits(val))
268 xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring);
270 intrs = min_t(u32, HCS_MAX_INTRS(xhci->hcs_params1),
271 ARRAY_SIZE(xhci->run_regs->ir_set));
273 for (i = 0; i < intrs; i++) {
274 struct xhci_intr_reg __iomem *ir;
276 ir = &xhci->run_regs->ir_set[i];
277 val = xhci_read_64(xhci, &ir->erst_base);
278 if (upper_32_bits(val))
279 xhci_write_64(xhci, 0, &ir->erst_base);
280 val= xhci_read_64(xhci, &ir->erst_dequeue);
281 if (upper_32_bits(val))
282 xhci_write_64(xhci, 0, &ir->erst_dequeue);
285 /* Wait for the fault to appear. It will be cleared on reset */
286 err = xhci_handshake(&xhci->op_regs->status,
287 STS_FATAL, STS_FATAL,
290 xhci_info(xhci, "Fault detected\n");
293 #ifdef CONFIG_USB_PCI
297 static int xhci_setup_msi(struct xhci_hcd *xhci)
301 * TODO:Check with MSI Soc for sysdev
303 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
305 ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
307 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
308 "failed to allocate MSI entry");
312 ret = request_irq(pdev->irq, xhci_msi_irq,
313 0, "xhci_hcd", xhci_to_hcd(xhci));
315 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
316 "disable MSI interrupt");
317 pci_free_irq_vectors(pdev);
326 static int xhci_setup_msix(struct xhci_hcd *xhci)
329 struct usb_hcd *hcd = xhci_to_hcd(xhci);
330 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
333 * calculate number of msi-x vectors supported.
334 * - HCS_MAX_INTRS: the max number of interrupts the host can handle,
335 * with max number of interrupters based on the xhci HCSPARAMS1.
336 * - num_online_cpus: maximum msi-x vectors per CPUs core.
337 * Add additional 1 vector to ensure always available interrupt.
339 xhci->msix_count = min(num_online_cpus() + 1,
340 HCS_MAX_INTRS(xhci->hcs_params1));
342 ret = pci_alloc_irq_vectors(pdev, xhci->msix_count, xhci->msix_count,
345 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
346 "Failed to enable MSI-X");
350 for (i = 0; i < xhci->msix_count; i++) {
351 ret = request_irq(pci_irq_vector(pdev, i), xhci_msi_irq, 0,
352 "xhci_hcd", xhci_to_hcd(xhci));
357 hcd->msix_enabled = 1;
361 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "disable MSI-X interrupt");
363 free_irq(pci_irq_vector(pdev, i), xhci_to_hcd(xhci));
364 pci_free_irq_vectors(pdev);
368 /* Free any IRQs and disable MSI-X */
369 static void xhci_cleanup_msix(struct xhci_hcd *xhci)
371 struct usb_hcd *hcd = xhci_to_hcd(xhci);
372 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
374 if (xhci->quirks & XHCI_PLAT)
377 /* return if using legacy interrupt */
381 if (hcd->msix_enabled) {
384 for (i = 0; i < xhci->msix_count; i++)
385 free_irq(pci_irq_vector(pdev, i), xhci_to_hcd(xhci));
387 free_irq(pci_irq_vector(pdev, 0), xhci_to_hcd(xhci));
390 pci_free_irq_vectors(pdev);
391 hcd->msix_enabled = 0;
394 static void __maybe_unused xhci_msix_sync_irqs(struct xhci_hcd *xhci)
396 struct usb_hcd *hcd = xhci_to_hcd(xhci);
398 if (hcd->msix_enabled) {
399 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
402 for (i = 0; i < xhci->msix_count; i++)
403 synchronize_irq(pci_irq_vector(pdev, i));
407 static int xhci_try_enable_msi(struct usb_hcd *hcd)
409 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
410 struct pci_dev *pdev;
413 /* The xhci platform device has set up IRQs through usb_add_hcd. */
414 if (xhci->quirks & XHCI_PLAT)
417 pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
419 * Some Fresco Logic host controllers advertise MSI, but fail to
420 * generate interrupts. Don't even try to enable MSI.
422 if (xhci->quirks & XHCI_BROKEN_MSI)
425 /* unregister the legacy interrupt */
427 free_irq(hcd->irq, hcd);
430 ret = xhci_setup_msix(xhci);
432 /* fall back to msi*/
433 ret = xhci_setup_msi(xhci);
436 hcd->msi_enabled = 1;
441 xhci_err(xhci, "No msi-x/msi found and no IRQ in BIOS\n");
446 if (!strlen(hcd->irq_descr))
447 snprintf(hcd->irq_descr, sizeof(hcd->irq_descr), "%s:usb%d",
448 hcd->driver->description, hcd->self.busnum);
450 /* fall back to legacy interrupt*/
451 ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
452 hcd->irq_descr, hcd);
454 xhci_err(xhci, "request interrupt %d failed\n",
458 hcd->irq = pdev->irq;
464 static inline int xhci_try_enable_msi(struct usb_hcd *hcd)
469 static inline void xhci_cleanup_msix(struct xhci_hcd *xhci)
473 static inline void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
479 static void compliance_mode_recovery(struct timer_list *t)
481 struct xhci_hcd *xhci;
483 struct xhci_hub *rhub;
487 xhci = from_timer(xhci, t, comp_mode_recovery_timer);
488 rhub = &xhci->usb3_rhub;
494 for (i = 0; i < rhub->num_ports; i++) {
495 temp = readl(rhub->ports[i]->addr);
496 if ((temp & PORT_PLS_MASK) == USB_SS_PORT_LS_COMP_MOD) {
498 * Compliance Mode Detected. Letting USB Core
499 * handle the Warm Reset
501 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
502 "Compliance mode detected->port %d",
504 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
505 "Attempting compliance mode recovery");
507 if (hcd->state == HC_STATE_SUSPENDED)
508 usb_hcd_resume_root_hub(hcd);
510 usb_hcd_poll_rh_status(hcd);
514 if (xhci->port_status_u0 != ((1 << rhub->num_ports) - 1))
515 mod_timer(&xhci->comp_mode_recovery_timer,
516 jiffies + msecs_to_jiffies(COMP_MODE_RCVRY_MSECS));
520 * Quirk to work around issue generated by the SN65LVPE502CP USB3.0 re-driver
521 * that causes ports behind that hardware to enter compliance mode sometimes.
522 * The quirk creates a timer that polls every 2 seconds the link state of
523 * each host controller's port and recovers it by issuing a Warm reset
524 * if Compliance mode is detected, otherwise the port will become "dead" (no
525 * device connections or disconnections will be detected anymore). Becasue no
526 * status event is generated when entering compliance mode (per xhci spec),
527 * this quirk is needed on systems that have the failing hardware installed.
529 static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci)
531 xhci->port_status_u0 = 0;
532 timer_setup(&xhci->comp_mode_recovery_timer, compliance_mode_recovery,
534 xhci->comp_mode_recovery_timer.expires = jiffies +
535 msecs_to_jiffies(COMP_MODE_RCVRY_MSECS);
537 add_timer(&xhci->comp_mode_recovery_timer);
538 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
539 "Compliance mode recovery timer initialized");
543 * This function identifies the systems that have installed the SN65LVPE502CP
544 * USB3.0 re-driver and that need the Compliance Mode Quirk.
546 * Vendor: Hewlett-Packard -> System Models: Z420, Z620 and Z820
548 static bool xhci_compliance_mode_recovery_timer_quirk_check(void)
550 const char *dmi_product_name, *dmi_sys_vendor;
552 dmi_product_name = dmi_get_system_info(DMI_PRODUCT_NAME);
553 dmi_sys_vendor = dmi_get_system_info(DMI_SYS_VENDOR);
554 if (!dmi_product_name || !dmi_sys_vendor)
557 if (!(strstr(dmi_sys_vendor, "Hewlett-Packard")))
560 if (strstr(dmi_product_name, "Z420") ||
561 strstr(dmi_product_name, "Z620") ||
562 strstr(dmi_product_name, "Z820") ||
563 strstr(dmi_product_name, "Z1 Workstation"))
569 static int xhci_all_ports_seen_u0(struct xhci_hcd *xhci)
571 return (xhci->port_status_u0 == ((1 << xhci->usb3_rhub.num_ports) - 1));
576 * Initialize memory for HCD and xHC (one-time init).
578 * Program the PAGESIZE register, initialize the device context array, create
579 * device contexts (?), set up a command ring segment (or two?), create event
580 * ring (one for now).
582 static int xhci_init(struct usb_hcd *hcd)
584 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
587 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_init");
588 spin_lock_init(&xhci->lock);
589 if (xhci->hci_version == 0x95 && link_quirk) {
590 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
591 "QUIRK: Not clearing Link TRB chain bits.");
592 xhci->quirks |= XHCI_LINK_TRB_QUIRK;
594 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
595 "xHCI doesn't need link TRB QUIRK");
597 retval = xhci_mem_init(xhci, GFP_KERNEL);
598 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished xhci_init");
600 /* Initializing Compliance Mode Recovery Data If Needed */
601 if (xhci_compliance_mode_recovery_timer_quirk_check()) {
602 xhci->quirks |= XHCI_COMP_MODE_QUIRK;
603 compliance_mode_recovery_timer_init(xhci);
609 /*-------------------------------------------------------------------------*/
612 static int xhci_run_finished(struct xhci_hcd *xhci)
614 if (xhci_start(xhci)) {
618 xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
620 if (xhci->quirks & XHCI_NEC_HOST)
621 xhci_ring_cmd_db(xhci);
627 * Start the HC after it was halted.
629 * This function is called by the USB core when the HC driver is added.
630 * Its opposite is xhci_stop().
632 * xhci_init() must be called once before this function can be called.
633 * Reset the HC, enable device slot contexts, program DCBAAP, and
634 * set command ring pointer and event ring pointer.
636 * Setup MSI-X vectors and enable interrupts.
638 int xhci_run(struct usb_hcd *hcd)
643 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
645 /* Start the xHCI host controller running only after the USB 2.0 roothub
649 hcd->uses_new_polling = 1;
650 if (!usb_hcd_is_primary_hcd(hcd))
651 return xhci_run_finished(xhci);
653 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_run");
655 ret = xhci_try_enable_msi(hcd);
659 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
660 temp_64 &= ~ERST_PTR_MASK;
661 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
662 "ERST deq = 64'h%0lx", (long unsigned int) temp_64);
664 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
665 "// Set the interrupt modulation register");
666 temp = readl(&xhci->ir_set->irq_control);
667 temp &= ~ER_IRQ_INTERVAL_MASK;
668 temp |= (xhci->imod_interval / 250) & ER_IRQ_INTERVAL_MASK;
669 writel(temp, &xhci->ir_set->irq_control);
671 /* Set the HCD state before we enable the irqs */
672 temp = readl(&xhci->op_regs->command);
674 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
675 "// Enable interrupts, cmd = 0x%x.", temp);
676 writel(temp, &xhci->op_regs->command);
678 temp = readl(&xhci->ir_set->irq_pending);
679 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
680 "// Enabling event ring interrupter %p by writing 0x%x to irq_pending",
681 xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
682 writel(ER_IRQ_ENABLE(temp), &xhci->ir_set->irq_pending);
684 if (xhci->quirks & XHCI_NEC_HOST) {
685 struct xhci_command *command;
687 command = xhci_alloc_command(xhci, false, GFP_KERNEL);
691 ret = xhci_queue_vendor_command(xhci, command, 0, 0, 0,
692 TRB_TYPE(TRB_NEC_GET_FW));
694 xhci_free_command(xhci, command);
696 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
697 "Finished %s for main hcd", __func__);
699 xhci_create_dbc_dev(xhci);
701 xhci_debugfs_init(xhci);
703 if (xhci_has_one_roothub(xhci))
704 return xhci_run_finished(xhci);
706 set_bit(HCD_FLAG_DEFER_RH_REGISTER, &hcd->flags);
710 EXPORT_SYMBOL_GPL(xhci_run);
715 * This function is called by the USB core when the HC driver is removed.
716 * Its opposite is xhci_run().
718 * Disable device contexts, disable IRQs, and quiesce the HC.
719 * Reset the HC, finish any completed transactions, and cleanup memory.
721 static void xhci_stop(struct usb_hcd *hcd)
724 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
726 mutex_lock(&xhci->mutex);
728 /* Only halt host and free memory after both hcds are removed */
729 if (!usb_hcd_is_primary_hcd(hcd)) {
730 mutex_unlock(&xhci->mutex);
734 xhci_remove_dbc_dev(xhci);
736 spin_lock_irq(&xhci->lock);
737 xhci->xhc_state |= XHCI_STATE_HALTED;
738 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
740 xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
741 spin_unlock_irq(&xhci->lock);
743 xhci_cleanup_msix(xhci);
745 /* Deleting Compliance Mode Recovery Timer */
746 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
747 (!(xhci_all_ports_seen_u0(xhci)))) {
748 del_timer_sync(&xhci->comp_mode_recovery_timer);
749 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
750 "%s: compliance mode recovery timer deleted",
754 if (xhci->quirks & XHCI_AMD_PLL_FIX)
757 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
758 "// Disabling event ring interrupts");
759 temp = readl(&xhci->op_regs->status);
760 writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status);
761 temp = readl(&xhci->ir_set->irq_pending);
762 writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending);
764 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "cleaning up memory");
765 xhci_mem_cleanup(xhci);
766 xhci_debugfs_exit(xhci);
767 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
768 "xhci_stop completed - status = %x",
769 readl(&xhci->op_regs->status));
770 mutex_unlock(&xhci->mutex);
774 * Shutdown HC (not bus-specific)
776 * This is called when the machine is rebooting or halting. We assume that the
777 * machine will be powered off, and the HC's internal state will be reset.
778 * Don't bother to free memory.
780 * This will only ever be called with the main usb_hcd (the USB3 roothub).
782 void xhci_shutdown(struct usb_hcd *hcd)
784 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
786 if (xhci->quirks & XHCI_SPURIOUS_REBOOT)
787 usb_disable_xhci_ports(to_pci_dev(hcd->self.sysdev));
789 /* Don't poll the roothubs after shutdown. */
790 xhci_dbg(xhci, "%s: stopping usb%d port polling.\n",
791 __func__, hcd->self.busnum);
792 clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
793 del_timer_sync(&hcd->rh_timer);
795 if (xhci->shared_hcd) {
796 clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
797 del_timer_sync(&xhci->shared_hcd->rh_timer);
800 spin_lock_irq(&xhci->lock);
802 /* Workaround for spurious wakeups at shutdown with HSW */
803 if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
804 xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
805 spin_unlock_irq(&xhci->lock);
807 xhci_cleanup_msix(xhci);
809 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
810 "xhci_shutdown completed - status = %x",
811 readl(&xhci->op_regs->status));
813 EXPORT_SYMBOL_GPL(xhci_shutdown);
816 static void xhci_save_registers(struct xhci_hcd *xhci)
818 xhci->s3.command = readl(&xhci->op_regs->command);
819 xhci->s3.dev_nt = readl(&xhci->op_regs->dev_notification);
820 xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
821 xhci->s3.config_reg = readl(&xhci->op_regs->config_reg);
822 xhci->s3.erst_size = readl(&xhci->ir_set->erst_size);
823 xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base);
824 xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
825 xhci->s3.irq_pending = readl(&xhci->ir_set->irq_pending);
826 xhci->s3.irq_control = readl(&xhci->ir_set->irq_control);
829 static void xhci_restore_registers(struct xhci_hcd *xhci)
831 writel(xhci->s3.command, &xhci->op_regs->command);
832 writel(xhci->s3.dev_nt, &xhci->op_regs->dev_notification);
833 xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr);
834 writel(xhci->s3.config_reg, &xhci->op_regs->config_reg);
835 writel(xhci->s3.erst_size, &xhci->ir_set->erst_size);
836 xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base);
837 xhci_write_64(xhci, xhci->s3.erst_dequeue, &xhci->ir_set->erst_dequeue);
838 writel(xhci->s3.irq_pending, &xhci->ir_set->irq_pending);
839 writel(xhci->s3.irq_control, &xhci->ir_set->irq_control);
842 static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
846 /* step 2: initialize command ring buffer */
847 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
848 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
849 (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
850 xhci->cmd_ring->dequeue) &
851 (u64) ~CMD_RING_RSVD_BITS) |
852 xhci->cmd_ring->cycle_state;
853 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
854 "// Setting command ring address to 0x%llx",
855 (long unsigned long) val_64);
856 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
860 * The whole command ring must be cleared to zero when we suspend the host.
862 * The host doesn't save the command ring pointer in the suspend well, so we
863 * need to re-program it on resume. Unfortunately, the pointer must be 64-byte
864 * aligned, because of the reserved bits in the command ring dequeue pointer
865 * register. Therefore, we can't just set the dequeue pointer back in the
866 * middle of the ring (TRBs are 16-byte aligned).
868 static void xhci_clear_command_ring(struct xhci_hcd *xhci)
870 struct xhci_ring *ring;
871 struct xhci_segment *seg;
873 ring = xhci->cmd_ring;
877 sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1));
878 seg->trbs[TRBS_PER_SEGMENT - 1].link.control &=
879 cpu_to_le32(~TRB_CYCLE);
881 } while (seg != ring->deq_seg);
883 /* Reset the software enqueue and dequeue pointers */
884 ring->deq_seg = ring->first_seg;
885 ring->dequeue = ring->first_seg->trbs;
886 ring->enq_seg = ring->deq_seg;
887 ring->enqueue = ring->dequeue;
889 ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
891 * Ring is now zeroed, so the HW should look for change of ownership
892 * when the cycle bit is set to 1.
894 ring->cycle_state = 1;
897 * Reset the hardware dequeue pointer.
898 * Yes, this will need to be re-written after resume, but we're paranoid
899 * and want to make sure the hardware doesn't access bogus memory
900 * because, say, the BIOS or an SMI started the host without changing
901 * the command ring pointers.
903 xhci_set_cmd_ring_deq(xhci);
907 * Disable port wake bits if do_wakeup is not set.
909 * Also clear a possible internal port wake state left hanging for ports that
910 * detected termination but never successfully enumerated (trained to 0U).
911 * Internal wake causes immediate xHCI wake after suspend. PORT_CSC write done
912 * at enumeration clears this wake, force one here as well for unconnected ports
915 static void xhci_disable_hub_port_wake(struct xhci_hcd *xhci,
916 struct xhci_hub *rhub,
923 spin_lock_irqsave(&xhci->lock, flags);
925 for (i = 0; i < rhub->num_ports; i++) {
926 portsc = readl(rhub->ports[i]->addr);
927 t1 = xhci_port_state_to_neutral(portsc);
930 /* clear wake bits if do_wake is not set */
932 t2 &= ~PORT_WAKE_BITS;
934 /* Don't touch csc bit if connected or connect change is set */
935 if (!(portsc & (PORT_CSC | PORT_CONNECT)))
939 writel(t2, rhub->ports[i]->addr);
940 xhci_dbg(xhci, "config port %d-%d wake bits, portsc: 0x%x, write: 0x%x\n",
941 rhub->hcd->self.busnum, i + 1, portsc, t2);
944 spin_unlock_irqrestore(&xhci->lock, flags);
947 static bool xhci_pending_portevent(struct xhci_hcd *xhci)
949 struct xhci_port **ports;
954 status = readl(&xhci->op_regs->status);
955 if (status & STS_EINT)
958 * Checking STS_EINT is not enough as there is a lag between a change
959 * bit being set and the Port Status Change Event that it generated
960 * being written to the Event Ring. See note in xhci 1.1 section 4.19.2.
963 port_index = xhci->usb2_rhub.num_ports;
964 ports = xhci->usb2_rhub.ports;
965 while (port_index--) {
966 portsc = readl(ports[port_index]->addr);
967 if (portsc & PORT_CHANGE_MASK ||
968 (portsc & PORT_PLS_MASK) == XDEV_RESUME)
971 port_index = xhci->usb3_rhub.num_ports;
972 ports = xhci->usb3_rhub.ports;
973 while (port_index--) {
974 portsc = readl(ports[port_index]->addr);
975 if (portsc & PORT_CHANGE_MASK ||
976 (portsc & PORT_PLS_MASK) == XDEV_RESUME)
983 * Stop HC (not bus-specific)
985 * This is called when the machine transition into S3/S4 mode.
988 int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
991 unsigned int delay = XHCI_MAX_HALT_USEC * 2;
992 struct usb_hcd *hcd = xhci_to_hcd(xhci);
999 if (hcd->state != HC_STATE_SUSPENDED ||
1000 (xhci->shared_hcd && xhci->shared_hcd->state != HC_STATE_SUSPENDED))
1003 /* Clear root port wake on bits if wakeup not allowed. */
1004 xhci_disable_hub_port_wake(xhci, &xhci->usb3_rhub, do_wakeup);
1005 xhci_disable_hub_port_wake(xhci, &xhci->usb2_rhub, do_wakeup);
1007 if (!HCD_HW_ACCESSIBLE(hcd))
1010 xhci_dbc_suspend(xhci);
1012 /* Don't poll the roothubs on bus suspend. */
1013 xhci_dbg(xhci, "%s: stopping usb%d port polling.\n",
1014 __func__, hcd->self.busnum);
1015 clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
1016 del_timer_sync(&hcd->rh_timer);
1017 if (xhci->shared_hcd) {
1018 clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
1019 del_timer_sync(&xhci->shared_hcd->rh_timer);
1022 if (xhci->quirks & XHCI_SUSPEND_DELAY)
1023 usleep_range(1000, 1500);
1025 spin_lock_irq(&xhci->lock);
1026 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
1027 if (xhci->shared_hcd)
1028 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
1029 /* step 1: stop endpoint */
1030 /* skipped assuming that port suspend has done */
1032 /* step 2: clear Run/Stop bit */
1033 command = readl(&xhci->op_regs->command);
1034 command &= ~CMD_RUN;
1035 writel(command, &xhci->op_regs->command);
1037 /* Some chips from Fresco Logic need an extraordinary delay */
1038 delay *= (xhci->quirks & XHCI_SLOW_SUSPEND) ? 10 : 1;
1040 if (xhci_handshake(&xhci->op_regs->status,
1041 STS_HALT, STS_HALT, delay)) {
1042 xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n");
1043 spin_unlock_irq(&xhci->lock);
1046 xhci_clear_command_ring(xhci);
1048 /* step 3: save registers */
1049 xhci_save_registers(xhci);
1051 /* step 4: set CSS flag */
1052 command = readl(&xhci->op_regs->command);
1054 writel(command, &xhci->op_regs->command);
1055 xhci->broken_suspend = 0;
1056 if (xhci_handshake(&xhci->op_regs->status,
1057 STS_SAVE, 0, 20 * 1000)) {
1059 * AMD SNPS xHC 3.0 occasionally does not clear the
1060 * SSS bit of USBSTS and when driver tries to poll
1061 * to see if the xHC clears BIT(8) which never happens
1062 * and driver assumes that controller is not responding
1063 * and times out. To workaround this, its good to check
1064 * if SRE and HCE bits are not set (as per xhci
1065 * Section 5.4.2) and bypass the timeout.
1067 res = readl(&xhci->op_regs->status);
1068 if ((xhci->quirks & XHCI_SNPS_BROKEN_SUSPEND) &&
1069 (((res & STS_SRE) == 0) &&
1070 ((res & STS_HCE) == 0))) {
1071 xhci->broken_suspend = 1;
1073 xhci_warn(xhci, "WARN: xHC save state timeout\n");
1074 spin_unlock_irq(&xhci->lock);
1078 spin_unlock_irq(&xhci->lock);
1081 * Deleting Compliance Mode Recovery Timer because the xHCI Host
1082 * is about to be suspended.
1084 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
1085 (!(xhci_all_ports_seen_u0(xhci)))) {
1086 del_timer_sync(&xhci->comp_mode_recovery_timer);
1087 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1088 "%s: compliance mode recovery timer deleted",
1092 /* step 5: remove core well power */
1093 /* synchronize irq when using MSI-X */
1094 xhci_msix_sync_irqs(xhci);
1098 EXPORT_SYMBOL_GPL(xhci_suspend);
1101 * start xHC (not bus-specific)
1103 * This is called when the machine transition from S3/S4 mode.
1106 int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
1108 u32 command, temp = 0;
1109 struct usb_hcd *hcd = xhci_to_hcd(xhci);
1110 struct usb_hcd *secondary_hcd;
1112 bool comp_timer_running = false;
1113 bool pending_portevent = false;
1114 bool reinit_xhc = false;
1119 /* Wait a bit if either of the roothubs need to settle from the
1120 * transition into bus suspend.
1123 if (time_before(jiffies, xhci->usb2_rhub.bus_state.next_statechange) ||
1124 time_before(jiffies, xhci->usb3_rhub.bus_state.next_statechange))
1127 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
1128 if (xhci->shared_hcd)
1129 set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
1131 spin_lock_irq(&xhci->lock);
1133 if (hibernated || xhci->quirks & XHCI_RESET_ON_RESUME || xhci->broken_suspend)
1138 * Some controllers might lose power during suspend, so wait
1139 * for controller not ready bit to clear, just as in xHC init.
1141 retval = xhci_handshake(&xhci->op_regs->status,
1142 STS_CNR, 0, 10 * 1000 * 1000);
1144 xhci_warn(xhci, "Controller not ready at resume %d\n",
1146 spin_unlock_irq(&xhci->lock);
1149 /* step 1: restore register */
1150 xhci_restore_registers(xhci);
1151 /* step 2: initialize command ring buffer */
1152 xhci_set_cmd_ring_deq(xhci);
1153 /* step 3: restore state and start state*/
1154 /* step 3: set CRS flag */
1155 command = readl(&xhci->op_regs->command);
1157 writel(command, &xhci->op_regs->command);
1159 * Some controllers take up to 55+ ms to complete the controller
1160 * restore so setting the timeout to 100ms. Xhci specification
1161 * doesn't mention any timeout value.
1163 if (xhci_handshake(&xhci->op_regs->status,
1164 STS_RESTORE, 0, 100 * 1000)) {
1165 xhci_warn(xhci, "WARN: xHC restore state timeout\n");
1166 spin_unlock_irq(&xhci->lock);
1171 temp = readl(&xhci->op_regs->status);
1173 /* re-initialize the HC on Restore Error, or Host Controller Error */
1174 if (temp & (STS_SRE | STS_HCE)) {
1176 xhci_warn(xhci, "xHC error in resume, USBSTS 0x%x, Reinit\n", temp);
1180 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
1181 !(xhci_all_ports_seen_u0(xhci))) {
1182 del_timer_sync(&xhci->comp_mode_recovery_timer);
1183 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1184 "Compliance Mode Recovery Timer deleted!");
1187 /* Let the USB core know _both_ roothubs lost power. */
1188 usb_root_hub_lost_power(xhci->main_hcd->self.root_hub);
1189 if (xhci->shared_hcd)
1190 usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub);
1192 xhci_dbg(xhci, "Stop HCD\n");
1194 xhci_zero_64b_regs(xhci);
1195 retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC);
1196 spin_unlock_irq(&xhci->lock);
1199 xhci_cleanup_msix(xhci);
1201 xhci_dbg(xhci, "// Disabling event ring interrupts\n");
1202 temp = readl(&xhci->op_regs->status);
1203 writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status);
1204 temp = readl(&xhci->ir_set->irq_pending);
1205 writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending);
1207 xhci_dbg(xhci, "cleaning up memory\n");
1208 xhci_mem_cleanup(xhci);
1209 xhci_debugfs_exit(xhci);
1210 xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
1211 readl(&xhci->op_regs->status));
1213 /* USB core calls the PCI reinit and start functions twice:
1214 * first with the primary HCD, and then with the secondary HCD.
1215 * If we don't do the same, the host will never be started.
1217 if (!usb_hcd_is_primary_hcd(hcd))
1218 secondary_hcd = hcd;
1220 secondary_hcd = xhci->shared_hcd;
1222 xhci_dbg(xhci, "Initialize the xhci_hcd\n");
1223 retval = xhci_init(hcd->primary_hcd);
1226 comp_timer_running = true;
1228 xhci_dbg(xhci, "Start the primary HCD\n");
1229 retval = xhci_run(hcd->primary_hcd);
1230 if (!retval && secondary_hcd) {
1231 xhci_dbg(xhci, "Start the secondary HCD\n");
1232 retval = xhci_run(secondary_hcd);
1234 hcd->state = HC_STATE_SUSPENDED;
1235 if (xhci->shared_hcd)
1236 xhci->shared_hcd->state = HC_STATE_SUSPENDED;
1240 /* step 4: set Run/Stop bit */
1241 command = readl(&xhci->op_regs->command);
1243 writel(command, &xhci->op_regs->command);
1244 xhci_handshake(&xhci->op_regs->status, STS_HALT,
1247 /* step 5: walk topology and initialize portsc,
1248 * portpmsc and portli
1250 /* this is done in bus_resume */
1252 /* step 6: restart each of the previously
1253 * Running endpoints by ringing their doorbells
1256 spin_unlock_irq(&xhci->lock);
1258 xhci_dbc_resume(xhci);
1263 * Resume roothubs only if there are pending events.
1264 * USB 3 devices resend U3 LFPS wake after a 100ms delay if
1265 * the first wake signalling failed, give it that chance.
1267 pending_portevent = xhci_pending_portevent(xhci);
1268 if (!pending_portevent) {
1270 pending_portevent = xhci_pending_portevent(xhci);
1273 if (pending_portevent) {
1274 if (xhci->shared_hcd)
1275 usb_hcd_resume_root_hub(xhci->shared_hcd);
1276 usb_hcd_resume_root_hub(hcd);
1280 * If system is subject to the Quirk, Compliance Mode Timer needs to
1281 * be re-initialized Always after a system resume. Ports are subject
1282 * to suffer the Compliance Mode issue again. It doesn't matter if
1283 * ports have entered previously to U0 before system's suspension.
1285 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !comp_timer_running)
1286 compliance_mode_recovery_timer_init(xhci);
1288 if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL)
1289 usb_asmedia_modifyflowcontrol(to_pci_dev(hcd->self.controller));
1291 /* Re-enable port polling. */
1292 xhci_dbg(xhci, "%s: starting usb%d port polling.\n",
1293 __func__, hcd->self.busnum);
1294 if (xhci->shared_hcd) {
1295 set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
1296 usb_hcd_poll_rh_status(xhci->shared_hcd);
1298 set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
1299 usb_hcd_poll_rh_status(hcd);
1303 EXPORT_SYMBOL_GPL(xhci_resume);
1304 #endif /* CONFIG_PM */
1306 /*-------------------------------------------------------------------------*/
1308 static int xhci_map_temp_buffer(struct usb_hcd *hcd, struct urb *urb)
1312 unsigned int buf_len;
1313 enum dma_data_direction dir;
1315 dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
1316 buf_len = urb->transfer_buffer_length;
1318 temp = kzalloc_node(buf_len, GFP_ATOMIC,
1319 dev_to_node(hcd->self.sysdev));
1321 if (usb_urb_dir_out(urb))
1322 sg_pcopy_to_buffer(urb->sg, urb->num_sgs,
1325 urb->transfer_buffer = temp;
1326 urb->transfer_dma = dma_map_single(hcd->self.sysdev,
1327 urb->transfer_buffer,
1328 urb->transfer_buffer_length,
1331 if (dma_mapping_error(hcd->self.sysdev,
1332 urb->transfer_dma)) {
1336 urb->transfer_flags |= URB_DMA_MAP_SINGLE;
1342 static bool xhci_urb_temp_buffer_required(struct usb_hcd *hcd,
1347 unsigned int len = 0;
1348 unsigned int trb_size;
1349 unsigned int max_pkt;
1350 struct scatterlist *sg;
1351 struct scatterlist *tail_sg;
1354 max_pkt = usb_endpoint_maxp(&urb->ep->desc);
1359 if (urb->dev->speed >= USB_SPEED_SUPER)
1360 trb_size = TRB_CACHE_SIZE_SS;
1362 trb_size = TRB_CACHE_SIZE_HS;
1364 if (urb->transfer_buffer_length != 0 &&
1365 !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) {
1366 for_each_sg(urb->sg, sg, urb->num_sgs, i) {
1367 len = len + sg->length;
1368 if (i > trb_size - 2) {
1369 len = len - tail_sg->length;
1370 if (len < max_pkt) {
1375 tail_sg = sg_next(tail_sg);
1382 static void xhci_unmap_temp_buf(struct usb_hcd *hcd, struct urb *urb)
1385 unsigned int buf_len;
1386 enum dma_data_direction dir;
1388 dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
1390 buf_len = urb->transfer_buffer_length;
1392 if (IS_ENABLED(CONFIG_HAS_DMA) &&
1393 (urb->transfer_flags & URB_DMA_MAP_SINGLE))
1394 dma_unmap_single(hcd->self.sysdev,
1396 urb->transfer_buffer_length,
1399 if (usb_urb_dir_in(urb)) {
1400 len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs,
1401 urb->transfer_buffer,
1404 if (len != buf_len) {
1405 xhci_dbg(hcd_to_xhci(hcd),
1406 "Copy from tmp buf to urb sg list failed\n");
1407 urb->actual_length = len;
1410 urb->transfer_flags &= ~URB_DMA_MAP_SINGLE;
1411 kfree(urb->transfer_buffer);
1412 urb->transfer_buffer = NULL;
1416 * Bypass the DMA mapping if URB is suitable for Immediate Transfer (IDT),
1417 * we'll copy the actual data into the TRB address register. This is limited to
1418 * transfers up to 8 bytes on output endpoints of any kind with wMaxPacketSize
1419 * >= 8 bytes. If suitable for IDT only one Transfer TRB per TD is allowed.
1421 static int xhci_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
1424 struct xhci_hcd *xhci;
1426 xhci = hcd_to_xhci(hcd);
1428 if (xhci_urb_suitable_for_idt(urb))
1431 if (xhci->quirks & XHCI_SG_TRB_CACHE_SIZE_QUIRK) {
1432 if (xhci_urb_temp_buffer_required(hcd, urb))
1433 return xhci_map_temp_buffer(hcd, urb);
1435 return usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
1438 static void xhci_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
1440 struct xhci_hcd *xhci;
1441 bool unmap_temp_buf = false;
1443 xhci = hcd_to_xhci(hcd);
1445 if (urb->num_sgs && (urb->transfer_flags & URB_DMA_MAP_SINGLE))
1446 unmap_temp_buf = true;
1448 if ((xhci->quirks & XHCI_SG_TRB_CACHE_SIZE_QUIRK) && unmap_temp_buf)
1449 xhci_unmap_temp_buf(hcd, urb);
1451 usb_hcd_unmap_urb_for_dma(hcd, urb);
1455 * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and
1456 * HCDs. Find the index for an endpoint given its descriptor. Use the return
1457 * value to right shift 1 for the bitmask.
1459 * Index = (epnum * 2) + direction - 1,
1460 * where direction = 0 for OUT, 1 for IN.
1461 * For control endpoints, the IN index is used (OUT index is unused), so
1462 * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2)
1464 unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc)
1467 if (usb_endpoint_xfer_control(desc))
1468 index = (unsigned int) (usb_endpoint_num(desc)*2);
1470 index = (unsigned int) (usb_endpoint_num(desc)*2) +
1471 (usb_endpoint_dir_in(desc) ? 1 : 0) - 1;
1474 EXPORT_SYMBOL_GPL(xhci_get_endpoint_index);
1476 /* The reverse operation to xhci_get_endpoint_index. Calculate the USB endpoint
1477 * address from the XHCI endpoint index.
1479 unsigned int xhci_get_endpoint_address(unsigned int ep_index)
1481 unsigned int number = DIV_ROUND_UP(ep_index, 2);
1482 unsigned int direction = ep_index % 2 ? USB_DIR_OUT : USB_DIR_IN;
1483 return direction | number;
1486 /* Find the flag for this endpoint (for use in the control context). Use the
1487 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
1490 static unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc)
1492 return 1 << (xhci_get_endpoint_index(desc) + 1);
1495 /* Compute the last valid endpoint context index. Basically, this is the
1496 * endpoint index plus one. For slot contexts with more than valid endpoint,
1497 * we find the most significant bit set in the added contexts flags.
1498 * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000
1499 * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one.
1501 unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
1503 return fls(added_ctxs) - 1;
1506 /* Returns 1 if the arguments are OK;
1507 * returns 0 this is a root hub; returns -EINVAL for NULL pointers.
1509 static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
1510 struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev,
1512 struct xhci_hcd *xhci;
1513 struct xhci_virt_device *virt_dev;
1515 if (!hcd || (check_ep && !ep) || !udev) {
1516 pr_debug("xHCI %s called with invalid args\n", func);
1519 if (!udev->parent) {
1520 pr_debug("xHCI %s called for root hub\n", func);
1524 xhci = hcd_to_xhci(hcd);
1525 if (check_virt_dev) {
1526 if (!udev->slot_id || !xhci->devs[udev->slot_id]) {
1527 xhci_dbg(xhci, "xHCI %s called with unaddressed device\n",
1532 virt_dev = xhci->devs[udev->slot_id];
1533 if (virt_dev->udev != udev) {
1534 xhci_dbg(xhci, "xHCI %s called with udev and "
1535 "virt_dev does not match\n", func);
1540 if (xhci->xhc_state & XHCI_STATE_HALTED)
1546 static int xhci_configure_endpoint(struct xhci_hcd *xhci,
1547 struct usb_device *udev, struct xhci_command *command,
1548 bool ctx_change, bool must_succeed);
1551 * Full speed devices may have a max packet size greater than 8 bytes, but the
1552 * USB core doesn't know that until it reads the first 8 bytes of the
1553 * descriptor. If the usb_device's max packet size changes after that point,
1554 * we need to issue an evaluate context command and wait on it.
1556 static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
1557 unsigned int ep_index, struct urb *urb, gfp_t mem_flags)
1559 struct xhci_container_ctx *out_ctx;
1560 struct xhci_input_control_ctx *ctrl_ctx;
1561 struct xhci_ep_ctx *ep_ctx;
1562 struct xhci_command *command;
1563 int max_packet_size;
1564 int hw_max_packet_size;
1567 out_ctx = xhci->devs[slot_id]->out_ctx;
1568 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1569 hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2));
1570 max_packet_size = usb_endpoint_maxp(&urb->dev->ep0.desc);
1571 if (hw_max_packet_size != max_packet_size) {
1572 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1573 "Max Packet Size for ep 0 changed.");
1574 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1575 "Max packet size in usb_device = %d",
1577 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1578 "Max packet size in xHCI HW = %d",
1579 hw_max_packet_size);
1580 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1581 "Issuing evaluate context command.");
1583 /* Set up the input context flags for the command */
1584 /* FIXME: This won't work if a non-default control endpoint
1585 * changes max packet sizes.
1588 command = xhci_alloc_command(xhci, true, mem_flags);
1592 command->in_ctx = xhci->devs[slot_id]->in_ctx;
1593 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
1595 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1598 goto command_cleanup;
1600 /* Set up the modified control endpoint 0 */
1601 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
1602 xhci->devs[slot_id]->out_ctx, ep_index);
1604 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
1605 ep_ctx->ep_info &= cpu_to_le32(~EP_STATE_MASK);/* must clear */
1606 ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK);
1607 ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size));
1609 ctrl_ctx->add_flags = cpu_to_le32(EP0_FLAG);
1610 ctrl_ctx->drop_flags = 0;
1612 ret = xhci_configure_endpoint(xhci, urb->dev, command,
1615 /* Clean up the input context for later use by bandwidth
1618 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG);
1620 kfree(command->completion);
1627 * non-error returns are a promise to giveback() the urb later
1628 * we drop ownership so next owner (or urb unlink) can get it
1630 static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
1632 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1633 unsigned long flags;
1635 unsigned int slot_id, ep_index;
1636 unsigned int *ep_state;
1637 struct urb_priv *urb_priv;
1642 ret = xhci_check_args(hcd, urb->dev, urb->ep,
1643 true, true, __func__);
1645 return ret ? ret : -EINVAL;
1647 slot_id = urb->dev->slot_id;
1648 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1649 ep_state = &xhci->devs[slot_id]->eps[ep_index].ep_state;
1651 if (!HCD_HW_ACCESSIBLE(hcd))
1654 if (xhci->devs[slot_id]->flags & VDEV_PORT_ERROR) {
1655 xhci_dbg(xhci, "Can't queue urb, port error, link inactive\n");
1659 if (usb_endpoint_xfer_isoc(&urb->ep->desc))
1660 num_tds = urb->number_of_packets;
1661 else if (usb_endpoint_is_bulk_out(&urb->ep->desc) &&
1662 urb->transfer_buffer_length > 0 &&
1663 urb->transfer_flags & URB_ZERO_PACKET &&
1664 !(urb->transfer_buffer_length % usb_endpoint_maxp(&urb->ep->desc)))
1669 urb_priv = kzalloc(struct_size(urb_priv, td, num_tds), mem_flags);
1673 urb_priv->num_tds = num_tds;
1674 urb_priv->num_tds_done = 0;
1675 urb->hcpriv = urb_priv;
1677 trace_xhci_urb_enqueue(urb);
1679 if (usb_endpoint_xfer_control(&urb->ep->desc)) {
1680 /* Check to see if the max packet size for the default control
1681 * endpoint changed during FS device enumeration
1683 if (urb->dev->speed == USB_SPEED_FULL) {
1684 ret = xhci_check_maxpacket(xhci, slot_id,
1685 ep_index, urb, mem_flags);
1687 xhci_urb_free_priv(urb_priv);
1694 spin_lock_irqsave(&xhci->lock, flags);
1696 if (xhci->xhc_state & XHCI_STATE_DYING) {
1697 xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for non-responsive xHCI host.\n",
1698 urb->ep->desc.bEndpointAddress, urb);
1702 if (*ep_state & (EP_GETTING_STREAMS | EP_GETTING_NO_STREAMS)) {
1703 xhci_warn(xhci, "WARN: Can't enqueue URB, ep in streams transition state %x\n",
1708 if (*ep_state & EP_SOFT_CLEAR_TOGGLE) {
1709 xhci_warn(xhci, "Can't enqueue URB while manually clearing toggle\n");
1714 switch (usb_endpoint_type(&urb->ep->desc)) {
1716 case USB_ENDPOINT_XFER_CONTROL:
1717 ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,
1720 case USB_ENDPOINT_XFER_BULK:
1721 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
1724 case USB_ENDPOINT_XFER_INT:
1725 ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb,
1728 case USB_ENDPOINT_XFER_ISOC:
1729 ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb,
1735 xhci_urb_free_priv(urb_priv);
1738 spin_unlock_irqrestore(&xhci->lock, flags);
1743 * Remove the URB's TD from the endpoint ring. This may cause the HC to stop
1744 * USB transfers, potentially stopping in the middle of a TRB buffer. The HC
1745 * should pick up where it left off in the TD, unless a Set Transfer Ring
1746 * Dequeue Pointer is issued.
1748 * The TRBs that make up the buffers for the canceled URB will be "removed" from
1749 * the ring. Since the ring is a contiguous structure, they can't be physically
1750 * removed. Instead, there are two options:
1752 * 1) If the HC is in the middle of processing the URB to be canceled, we
1753 * simply move the ring's dequeue pointer past those TRBs using the Set
1754 * Transfer Ring Dequeue Pointer command. This will be the common case,
1755 * when drivers timeout on the last submitted URB and attempt to cancel.
1757 * 2) If the HC is in the middle of a different TD, we turn the TRBs into a
1758 * series of 1-TRB transfer no-op TDs. (No-ops shouldn't be chained.) The
1759 * HC will need to invalidate the any TRBs it has cached after the stop
1760 * endpoint command, as noted in the xHCI 0.95 errata.
1762 * 3) The TD may have completed by the time the Stop Endpoint Command
1763 * completes, so software needs to handle that case too.
1765 * This function should protect against the TD enqueueing code ringing the
1766 * doorbell while this code is waiting for a Stop Endpoint command to complete.
1767 * It also needs to account for multiple cancellations on happening at the same
1768 * time for the same endpoint.
1770 * Note that this function can be called in any context, or so says
1771 * usb_hcd_unlink_urb()
1773 static int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1775 unsigned long flags;
1778 struct xhci_hcd *xhci;
1779 struct urb_priv *urb_priv;
1781 unsigned int ep_index;
1782 struct xhci_ring *ep_ring;
1783 struct xhci_virt_ep *ep;
1784 struct xhci_command *command;
1785 struct xhci_virt_device *vdev;
1787 xhci = hcd_to_xhci(hcd);
1788 spin_lock_irqsave(&xhci->lock, flags);
1790 trace_xhci_urb_dequeue(urb);
1792 /* Make sure the URB hasn't completed or been unlinked already */
1793 ret = usb_hcd_check_unlink_urb(hcd, urb, status);
1797 /* give back URB now if we can't queue it for cancel */
1798 vdev = xhci->devs[urb->dev->slot_id];
1799 urb_priv = urb->hcpriv;
1800 if (!vdev || !urb_priv)
1803 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1804 ep = &vdev->eps[ep_index];
1805 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
1806 if (!ep || !ep_ring)
1809 /* If xHC is dead take it down and return ALL URBs in xhci_hc_died() */
1810 temp = readl(&xhci->op_regs->status);
1811 if (temp == ~(u32)0 || xhci->xhc_state & XHCI_STATE_DYING) {
1817 * check ring is not re-allocated since URB was enqueued. If it is, then
1818 * make sure none of the ring related pointers in this URB private data
1819 * are touched, such as td_list, otherwise we overwrite freed data
1821 if (!td_on_ring(&urb_priv->td[0], ep_ring)) {
1822 xhci_err(xhci, "Canceled URB td not found on endpoint ring");
1823 for (i = urb_priv->num_tds_done; i < urb_priv->num_tds; i++) {
1824 td = &urb_priv->td[i];
1825 if (!list_empty(&td->cancelled_td_list))
1826 list_del_init(&td->cancelled_td_list);
1831 if (xhci->xhc_state & XHCI_STATE_HALTED) {
1832 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1833 "HC halted, freeing TD manually.");
1834 for (i = urb_priv->num_tds_done;
1835 i < urb_priv->num_tds;
1837 td = &urb_priv->td[i];
1838 if (!list_empty(&td->td_list))
1839 list_del_init(&td->td_list);
1840 if (!list_empty(&td->cancelled_td_list))
1841 list_del_init(&td->cancelled_td_list);
1846 i = urb_priv->num_tds_done;
1847 if (i < urb_priv->num_tds)
1848 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1849 "Cancel URB %p, dev %s, ep 0x%x, "
1850 "starting at offset 0x%llx",
1851 urb, urb->dev->devpath,
1852 urb->ep->desc.bEndpointAddress,
1853 (unsigned long long) xhci_trb_virt_to_dma(
1854 urb_priv->td[i].start_seg,
1855 urb_priv->td[i].first_trb));
1857 for (; i < urb_priv->num_tds; i++) {
1858 td = &urb_priv->td[i];
1859 /* TD can already be on cancelled list if ep halted on it */
1860 if (list_empty(&td->cancelled_td_list)) {
1861 td->cancel_status = TD_DIRTY;
1862 list_add_tail(&td->cancelled_td_list,
1863 &ep->cancelled_td_list);
1867 /* Queue a stop endpoint command, but only if this is
1868 * the first cancellation to be handled.
1870 if (!(ep->ep_state & EP_STOP_CMD_PENDING)) {
1871 command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
1876 ep->ep_state |= EP_STOP_CMD_PENDING;
1877 xhci_queue_stop_endpoint(xhci, command, urb->dev->slot_id,
1879 xhci_ring_cmd_db(xhci);
1882 spin_unlock_irqrestore(&xhci->lock, flags);
1887 xhci_urb_free_priv(urb_priv);
1888 usb_hcd_unlink_urb_from_ep(hcd, urb);
1889 spin_unlock_irqrestore(&xhci->lock, flags);
1890 usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN);
1894 /* Drop an endpoint from a new bandwidth configuration for this device.
1895 * Only one call to this function is allowed per endpoint before
1896 * check_bandwidth() or reset_bandwidth() must be called.
1897 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
1898 * add the endpoint to the schedule with possibly new parameters denoted by a
1899 * different endpoint descriptor in usb_host_endpoint.
1900 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
1903 * The USB core will not allow URBs to be queued to an endpoint that is being
1904 * disabled, so there's no need for mutual exclusion to protect
1905 * the xhci->devs[slot_id] structure.
1907 int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1908 struct usb_host_endpoint *ep)
1910 struct xhci_hcd *xhci;
1911 struct xhci_container_ctx *in_ctx, *out_ctx;
1912 struct xhci_input_control_ctx *ctrl_ctx;
1913 unsigned int ep_index;
1914 struct xhci_ep_ctx *ep_ctx;
1916 u32 new_add_flags, new_drop_flags;
1919 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
1922 xhci = hcd_to_xhci(hcd);
1923 if (xhci->xhc_state & XHCI_STATE_DYING)
1926 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
1927 drop_flag = xhci_get_endpoint_flag(&ep->desc);
1928 if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) {
1929 xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n",
1930 __func__, drop_flag);
1934 in_ctx = xhci->devs[udev->slot_id]->in_ctx;
1935 out_ctx = xhci->devs[udev->slot_id]->out_ctx;
1936 ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
1938 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1943 ep_index = xhci_get_endpoint_index(&ep->desc);
1944 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1945 /* If the HC already knows the endpoint is disabled,
1946 * or the HCD has noted it is disabled, ignore this request
1948 if ((GET_EP_CTX_STATE(ep_ctx) == EP_STATE_DISABLED) ||
1949 le32_to_cpu(ctrl_ctx->drop_flags) &
1950 xhci_get_endpoint_flag(&ep->desc)) {
1951 /* Do not warn when called after a usb_device_reset */
1952 if (xhci->devs[udev->slot_id]->eps[ep_index].ring != NULL)
1953 xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
1958 ctrl_ctx->drop_flags |= cpu_to_le32(drop_flag);
1959 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1961 ctrl_ctx->add_flags &= cpu_to_le32(~drop_flag);
1962 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1964 xhci_debugfs_remove_endpoint(xhci, xhci->devs[udev->slot_id], ep_index);
1966 xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
1968 xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
1969 (unsigned int) ep->desc.bEndpointAddress,
1971 (unsigned int) new_drop_flags,
1972 (unsigned int) new_add_flags);
1975 EXPORT_SYMBOL_GPL(xhci_drop_endpoint);
1977 /* Add an endpoint to a new possible bandwidth configuration for this device.
1978 * Only one call to this function is allowed per endpoint before
1979 * check_bandwidth() or reset_bandwidth() must be called.
1980 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
1981 * add the endpoint to the schedule with possibly new parameters denoted by a
1982 * different endpoint descriptor in usb_host_endpoint.
1983 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
1986 * The USB core will not allow URBs to be queued to an endpoint until the
1987 * configuration or alt setting is installed in the device, so there's no need
1988 * for mutual exclusion to protect the xhci->devs[slot_id] structure.
1990 int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1991 struct usb_host_endpoint *ep)
1993 struct xhci_hcd *xhci;
1994 struct xhci_container_ctx *in_ctx;
1995 unsigned int ep_index;
1996 struct xhci_input_control_ctx *ctrl_ctx;
1997 struct xhci_ep_ctx *ep_ctx;
1999 u32 new_add_flags, new_drop_flags;
2000 struct xhci_virt_device *virt_dev;
2003 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
2005 /* So we won't queue a reset ep command for a root hub */
2009 xhci = hcd_to_xhci(hcd);
2010 if (xhci->xhc_state & XHCI_STATE_DYING)
2013 added_ctxs = xhci_get_endpoint_flag(&ep->desc);
2014 if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) {
2015 /* FIXME when we have to issue an evaluate endpoint command to
2016 * deal with ep0 max packet size changing once we get the
2019 xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n",
2020 __func__, added_ctxs);
2024 virt_dev = xhci->devs[udev->slot_id];
2025 in_ctx = virt_dev->in_ctx;
2026 ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
2028 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2033 ep_index = xhci_get_endpoint_index(&ep->desc);
2034 /* If this endpoint is already in use, and the upper layers are trying
2035 * to add it again without dropping it, reject the addition.
2037 if (virt_dev->eps[ep_index].ring &&
2038 !(le32_to_cpu(ctrl_ctx->drop_flags) & added_ctxs)) {
2039 xhci_warn(xhci, "Trying to add endpoint 0x%x "
2040 "without dropping it.\n",
2041 (unsigned int) ep->desc.bEndpointAddress);
2045 /* If the HCD has already noted the endpoint is enabled,
2046 * ignore this request.
2048 if (le32_to_cpu(ctrl_ctx->add_flags) & added_ctxs) {
2049 xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
2055 * Configuration and alternate setting changes must be done in
2056 * process context, not interrupt context (or so documenation
2057 * for usb_set_interface() and usb_set_configuration() claim).
2059 if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) {
2060 dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n",
2061 __func__, ep->desc.bEndpointAddress);
2065 ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs);
2066 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
2068 /* If xhci_endpoint_disable() was called for this endpoint, but the
2069 * xHC hasn't been notified yet through the check_bandwidth() call,
2070 * this re-adds a new state for the endpoint from the new endpoint
2071 * descriptors. We must drop and re-add this endpoint, so we leave the
2074 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
2076 /* Store the usb_device pointer for later use */
2079 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
2080 trace_xhci_add_endpoint(ep_ctx);
2082 xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
2083 (unsigned int) ep->desc.bEndpointAddress,
2085 (unsigned int) new_drop_flags,
2086 (unsigned int) new_add_flags);
2089 EXPORT_SYMBOL_GPL(xhci_add_endpoint);
2091 static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev)
2093 struct xhci_input_control_ctx *ctrl_ctx;
2094 struct xhci_ep_ctx *ep_ctx;
2095 struct xhci_slot_ctx *slot_ctx;
2098 ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
2100 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2105 /* When a device's add flag and drop flag are zero, any subsequent
2106 * configure endpoint command will leave that endpoint's state
2107 * untouched. Make sure we don't leave any old state in the input
2108 * endpoint contexts.
2110 ctrl_ctx->drop_flags = 0;
2111 ctrl_ctx->add_flags = 0;
2112 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
2113 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
2114 /* Endpoint 0 is always valid */
2115 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1));
2116 for (i = 1; i < 31; i++) {
2117 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i);
2118 ep_ctx->ep_info = 0;
2119 ep_ctx->ep_info2 = 0;
2121 ep_ctx->tx_info = 0;
2125 static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
2126 struct usb_device *udev, u32 *cmd_status)
2130 switch (*cmd_status) {
2131 case COMP_COMMAND_ABORTED:
2132 case COMP_COMMAND_RING_STOPPED:
2133 xhci_warn(xhci, "Timeout while waiting for configure endpoint command\n");
2136 case COMP_RESOURCE_ERROR:
2137 dev_warn(&udev->dev,
2138 "Not enough host controller resources for new device state.\n");
2140 /* FIXME: can we allocate more resources for the HC? */
2142 case COMP_BANDWIDTH_ERROR:
2143 case COMP_SECONDARY_BANDWIDTH_ERROR:
2144 dev_warn(&udev->dev,
2145 "Not enough bandwidth for new device state.\n");
2147 /* FIXME: can we go back to the old state? */
2149 case COMP_TRB_ERROR:
2150 /* the HCD set up something wrong */
2151 dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, "
2153 "and endpoint is not disabled.\n");
2156 case COMP_INCOMPATIBLE_DEVICE_ERROR:
2157 dev_warn(&udev->dev,
2158 "ERROR: Incompatible device for endpoint configure command.\n");
2162 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
2163 "Successful Endpoint Configure command");
2167 xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n",
2175 static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
2176 struct usb_device *udev, u32 *cmd_status)
2180 switch (*cmd_status) {
2181 case COMP_COMMAND_ABORTED:
2182 case COMP_COMMAND_RING_STOPPED:
2183 xhci_warn(xhci, "Timeout while waiting for evaluate context command\n");
2186 case COMP_PARAMETER_ERROR:
2187 dev_warn(&udev->dev,
2188 "WARN: xHCI driver setup invalid evaluate context command.\n");
2191 case COMP_SLOT_NOT_ENABLED_ERROR:
2192 dev_warn(&udev->dev,
2193 "WARN: slot not enabled for evaluate context command.\n");
2196 case COMP_CONTEXT_STATE_ERROR:
2197 dev_warn(&udev->dev,
2198 "WARN: invalid context state for evaluate context command.\n");
2201 case COMP_INCOMPATIBLE_DEVICE_ERROR:
2202 dev_warn(&udev->dev,
2203 "ERROR: Incompatible device for evaluate context command.\n");
2206 case COMP_MAX_EXIT_LATENCY_TOO_LARGE_ERROR:
2207 /* Max Exit Latency too large error */
2208 dev_warn(&udev->dev, "WARN: Max Exit Latency too large\n");
2212 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
2213 "Successful evaluate context command");
2217 xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n",
2225 static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci,
2226 struct xhci_input_control_ctx *ctrl_ctx)
2228 u32 valid_add_flags;
2229 u32 valid_drop_flags;
2231 /* Ignore the slot flag (bit 0), and the default control endpoint flag
2232 * (bit 1). The default control endpoint is added during the Address
2233 * Device command and is never removed until the slot is disabled.
2235 valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2;
2236 valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2;
2238 /* Use hweight32 to count the number of ones in the add flags, or
2239 * number of endpoints added. Don't count endpoints that are changed
2240 * (both added and dropped).
2242 return hweight32(valid_add_flags) -
2243 hweight32(valid_add_flags & valid_drop_flags);
2246 static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci,
2247 struct xhci_input_control_ctx *ctrl_ctx)
2249 u32 valid_add_flags;
2250 u32 valid_drop_flags;
2252 valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2;
2253 valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2;
2255 return hweight32(valid_drop_flags) -
2256 hweight32(valid_add_flags & valid_drop_flags);
2260 * We need to reserve the new number of endpoints before the configure endpoint
2261 * command completes. We can't subtract the dropped endpoints from the number
2262 * of active endpoints until the command completes because we can oversubscribe
2263 * the host in this case:
2265 * - the first configure endpoint command drops more endpoints than it adds
2266 * - a second configure endpoint command that adds more endpoints is queued
2267 * - the first configure endpoint command fails, so the config is unchanged
2268 * - the second command may succeed, even though there isn't enough resources
2270 * Must be called with xhci->lock held.
2272 static int xhci_reserve_host_resources(struct xhci_hcd *xhci,
2273 struct xhci_input_control_ctx *ctrl_ctx)
2277 added_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx);
2278 if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) {
2279 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2280 "Not enough ep ctxs: "
2281 "%u active, need to add %u, limit is %u.",
2282 xhci->num_active_eps, added_eps,
2283 xhci->limit_active_eps);
2286 xhci->num_active_eps += added_eps;
2287 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2288 "Adding %u ep ctxs, %u now active.", added_eps,
2289 xhci->num_active_eps);
2294 * The configure endpoint was failed by the xHC for some other reason, so we
2295 * need to revert the resources that failed configuration would have used.
2297 * Must be called with xhci->lock held.
2299 static void xhci_free_host_resources(struct xhci_hcd *xhci,
2300 struct xhci_input_control_ctx *ctrl_ctx)
2304 num_failed_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx);
2305 xhci->num_active_eps -= num_failed_eps;
2306 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2307 "Removing %u failed ep ctxs, %u now active.",
2309 xhci->num_active_eps);
2313 * Now that the command has completed, clean up the active endpoint count by
2314 * subtracting out the endpoints that were dropped (but not changed).
2316 * Must be called with xhci->lock held.
2318 static void xhci_finish_resource_reservation(struct xhci_hcd *xhci,
2319 struct xhci_input_control_ctx *ctrl_ctx)
2321 u32 num_dropped_eps;
2323 num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, ctrl_ctx);
2324 xhci->num_active_eps -= num_dropped_eps;
2325 if (num_dropped_eps)
2326 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2327 "Removing %u dropped ep ctxs, %u now active.",
2329 xhci->num_active_eps);
2332 static unsigned int xhci_get_block_size(struct usb_device *udev)
2334 switch (udev->speed) {
2336 case USB_SPEED_FULL:
2338 case USB_SPEED_HIGH:
2340 case USB_SPEED_SUPER:
2341 case USB_SPEED_SUPER_PLUS:
2343 case USB_SPEED_UNKNOWN:
2344 case USB_SPEED_WIRELESS:
2346 /* Should never happen */
2352 xhci_get_largest_overhead(struct xhci_interval_bw *interval_bw)
2354 if (interval_bw->overhead[LS_OVERHEAD_TYPE])
2356 if (interval_bw->overhead[FS_OVERHEAD_TYPE])
2361 /* If we are changing a LS/FS device under a HS hub,
2362 * make sure (if we are activating a new TT) that the HS bus has enough
2363 * bandwidth for this new TT.
2365 static int xhci_check_tt_bw_table(struct xhci_hcd *xhci,
2366 struct xhci_virt_device *virt_dev,
2369 struct xhci_interval_bw_table *bw_table;
2370 struct xhci_tt_bw_info *tt_info;
2372 /* Find the bandwidth table for the root port this TT is attached to. */
2373 bw_table = &xhci->rh_bw[virt_dev->real_port - 1].bw_table;
2374 tt_info = virt_dev->tt_info;
2375 /* If this TT already had active endpoints, the bandwidth for this TT
2376 * has already been added. Removing all periodic endpoints (and thus
2377 * making the TT enactive) will only decrease the bandwidth used.
2381 if (old_active_eps == 0 && tt_info->active_eps != 0) {
2382 if (bw_table->bw_used + TT_HS_OVERHEAD > HS_BW_LIMIT)
2386 /* Not sure why we would have no new active endpoints...
2388 * Maybe because of an Evaluate Context change for a hub update or a
2389 * control endpoint 0 max packet size change?
2390 * FIXME: skip the bandwidth calculation in that case.
2395 static int xhci_check_ss_bw(struct xhci_hcd *xhci,
2396 struct xhci_virt_device *virt_dev)
2398 unsigned int bw_reserved;
2400 bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_IN, 100);
2401 if (virt_dev->bw_table->ss_bw_in > (SS_BW_LIMIT_IN - bw_reserved))
2404 bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_OUT, 100);
2405 if (virt_dev->bw_table->ss_bw_out > (SS_BW_LIMIT_OUT - bw_reserved))
2412 * This algorithm is a very conservative estimate of the worst-case scheduling
2413 * scenario for any one interval. The hardware dynamically schedules the
2414 * packets, so we can't tell which microframe could be the limiting factor in
2415 * the bandwidth scheduling. This only takes into account periodic endpoints.
2417 * Obviously, we can't solve an NP complete problem to find the minimum worst
2418 * case scenario. Instead, we come up with an estimate that is no less than
2419 * the worst case bandwidth used for any one microframe, but may be an
2422 * We walk the requirements for each endpoint by interval, starting with the
2423 * smallest interval, and place packets in the schedule where there is only one
2424 * possible way to schedule packets for that interval. In order to simplify
2425 * this algorithm, we record the largest max packet size for each interval, and
2426 * assume all packets will be that size.
2428 * For interval 0, we obviously must schedule all packets for each interval.
2429 * The bandwidth for interval 0 is just the amount of data to be transmitted
2430 * (the sum of all max ESIT payload sizes, plus any overhead per packet times
2431 * the number of packets).
2433 * For interval 1, we have two possible microframes to schedule those packets
2434 * in. For this algorithm, if we can schedule the same number of packets for
2435 * each possible scheduling opportunity (each microframe), we will do so. The
2436 * remaining number of packets will be saved to be transmitted in the gaps in
2437 * the next interval's scheduling sequence.
2439 * As we move those remaining packets to be scheduled with interval 2 packets,
2440 * we have to double the number of remaining packets to transmit. This is
2441 * because the intervals are actually powers of 2, and we would be transmitting
2442 * the previous interval's packets twice in this interval. We also have to be
2443 * sure that when we look at the largest max packet size for this interval, we
2444 * also look at the largest max packet size for the remaining packets and take
2445 * the greater of the two.
2447 * The algorithm continues to evenly distribute packets in each scheduling
2448 * opportunity, and push the remaining packets out, until we get to the last
2449 * interval. Then those packets and their associated overhead are just added
2450 * to the bandwidth used.
2452 static int xhci_check_bw_table(struct xhci_hcd *xhci,
2453 struct xhci_virt_device *virt_dev,
2456 unsigned int bw_reserved;
2457 unsigned int max_bandwidth;
2458 unsigned int bw_used;
2459 unsigned int block_size;
2460 struct xhci_interval_bw_table *bw_table;
2461 unsigned int packet_size = 0;
2462 unsigned int overhead = 0;
2463 unsigned int packets_transmitted = 0;
2464 unsigned int packets_remaining = 0;
2467 if (virt_dev->udev->speed >= USB_SPEED_SUPER)
2468 return xhci_check_ss_bw(xhci, virt_dev);
2470 if (virt_dev->udev->speed == USB_SPEED_HIGH) {
2471 max_bandwidth = HS_BW_LIMIT;
2472 /* Convert percent of bus BW reserved to blocks reserved */
2473 bw_reserved = DIV_ROUND_UP(HS_BW_RESERVED * max_bandwidth, 100);
2475 max_bandwidth = FS_BW_LIMIT;
2476 bw_reserved = DIV_ROUND_UP(FS_BW_RESERVED * max_bandwidth, 100);
2479 bw_table = virt_dev->bw_table;
2480 /* We need to translate the max packet size and max ESIT payloads into
2481 * the units the hardware uses.
2483 block_size = xhci_get_block_size(virt_dev->udev);
2485 /* If we are manipulating a LS/FS device under a HS hub, double check
2486 * that the HS bus has enough bandwidth if we are activing a new TT.
2488 if (virt_dev->tt_info) {
2489 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2490 "Recalculating BW for rootport %u",
2491 virt_dev->real_port);
2492 if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) {
2493 xhci_warn(xhci, "Not enough bandwidth on HS bus for "
2494 "newly activated TT.\n");
2497 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2498 "Recalculating BW for TT slot %u port %u",
2499 virt_dev->tt_info->slot_id,
2500 virt_dev->tt_info->ttport);
2502 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2503 "Recalculating BW for rootport %u",
2504 virt_dev->real_port);
2507 /* Add in how much bandwidth will be used for interval zero, or the
2508 * rounded max ESIT payload + number of packets * largest overhead.
2510 bw_used = DIV_ROUND_UP(bw_table->interval0_esit_payload, block_size) +
2511 bw_table->interval_bw[0].num_packets *
2512 xhci_get_largest_overhead(&bw_table->interval_bw[0]);
2514 for (i = 1; i < XHCI_MAX_INTERVAL; i++) {
2515 unsigned int bw_added;
2516 unsigned int largest_mps;
2517 unsigned int interval_overhead;
2520 * How many packets could we transmit in this interval?
2521 * If packets didn't fit in the previous interval, we will need
2522 * to transmit that many packets twice within this interval.
2524 packets_remaining = 2 * packets_remaining +
2525 bw_table->interval_bw[i].num_packets;
2527 /* Find the largest max packet size of this or the previous
2530 if (list_empty(&bw_table->interval_bw[i].endpoints))
2533 struct xhci_virt_ep *virt_ep;
2534 struct list_head *ep_entry;
2536 ep_entry = bw_table->interval_bw[i].endpoints.next;
2537 virt_ep = list_entry(ep_entry,
2538 struct xhci_virt_ep, bw_endpoint_list);
2539 /* Convert to blocks, rounding up */
2540 largest_mps = DIV_ROUND_UP(
2541 virt_ep->bw_info.max_packet_size,
2544 if (largest_mps > packet_size)
2545 packet_size = largest_mps;
2547 /* Use the larger overhead of this or the previous interval. */
2548 interval_overhead = xhci_get_largest_overhead(
2549 &bw_table->interval_bw[i]);
2550 if (interval_overhead > overhead)
2551 overhead = interval_overhead;
2553 /* How many packets can we evenly distribute across
2554 * (1 << (i + 1)) possible scheduling opportunities?
2556 packets_transmitted = packets_remaining >> (i + 1);
2558 /* Add in the bandwidth used for those scheduled packets */
2559 bw_added = packets_transmitted * (overhead + packet_size);
2561 /* How many packets do we have remaining to transmit? */
2562 packets_remaining = packets_remaining % (1 << (i + 1));
2564 /* What largest max packet size should those packets have? */
2565 /* If we've transmitted all packets, don't carry over the
2566 * largest packet size.
2568 if (packets_remaining == 0) {
2571 } else if (packets_transmitted > 0) {
2572 /* Otherwise if we do have remaining packets, and we've
2573 * scheduled some packets in this interval, take the
2574 * largest max packet size from endpoints with this
2577 packet_size = largest_mps;
2578 overhead = interval_overhead;
2580 /* Otherwise carry over packet_size and overhead from the last
2581 * time we had a remainder.
2583 bw_used += bw_added;
2584 if (bw_used > max_bandwidth) {
2585 xhci_warn(xhci, "Not enough bandwidth. "
2586 "Proposed: %u, Max: %u\n",
2587 bw_used, max_bandwidth);
2592 * Ok, we know we have some packets left over after even-handedly
2593 * scheduling interval 15. We don't know which microframes they will
2594 * fit into, so we over-schedule and say they will be scheduled every
2597 if (packets_remaining > 0)
2598 bw_used += overhead + packet_size;
2600 if (!virt_dev->tt_info && virt_dev->udev->speed == USB_SPEED_HIGH) {
2601 unsigned int port_index = virt_dev->real_port - 1;
2603 /* OK, we're manipulating a HS device attached to a
2604 * root port bandwidth domain. Include the number of active TTs
2605 * in the bandwidth used.
2607 bw_used += TT_HS_OVERHEAD *
2608 xhci->rh_bw[port_index].num_active_tts;
2611 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2612 "Final bandwidth: %u, Limit: %u, Reserved: %u, "
2613 "Available: %u " "percent",
2614 bw_used, max_bandwidth, bw_reserved,
2615 (max_bandwidth - bw_used - bw_reserved) * 100 /
2618 bw_used += bw_reserved;
2619 if (bw_used > max_bandwidth) {
2620 xhci_warn(xhci, "Not enough bandwidth. Proposed: %u, Max: %u\n",
2621 bw_used, max_bandwidth);
2625 bw_table->bw_used = bw_used;
2629 static bool xhci_is_async_ep(unsigned int ep_type)
2631 return (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP &&
2632 ep_type != ISOC_IN_EP &&
2633 ep_type != INT_IN_EP);
2636 static bool xhci_is_sync_in_ep(unsigned int ep_type)
2638 return (ep_type == ISOC_IN_EP || ep_type == INT_IN_EP);
2641 static unsigned int xhci_get_ss_bw_consumed(struct xhci_bw_info *ep_bw)
2643 unsigned int mps = DIV_ROUND_UP(ep_bw->max_packet_size, SS_BLOCK);
2645 if (ep_bw->ep_interval == 0)
2646 return SS_OVERHEAD_BURST +
2647 (ep_bw->mult * ep_bw->num_packets *
2648 (SS_OVERHEAD + mps));
2649 return DIV_ROUND_UP(ep_bw->mult * ep_bw->num_packets *
2650 (SS_OVERHEAD + mps + SS_OVERHEAD_BURST),
2651 1 << ep_bw->ep_interval);
2655 static void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci,
2656 struct xhci_bw_info *ep_bw,
2657 struct xhci_interval_bw_table *bw_table,
2658 struct usb_device *udev,
2659 struct xhci_virt_ep *virt_ep,
2660 struct xhci_tt_bw_info *tt_info)
2662 struct xhci_interval_bw *interval_bw;
2663 int normalized_interval;
2665 if (xhci_is_async_ep(ep_bw->type))
2668 if (udev->speed >= USB_SPEED_SUPER) {
2669 if (xhci_is_sync_in_ep(ep_bw->type))
2670 xhci->devs[udev->slot_id]->bw_table->ss_bw_in -=
2671 xhci_get_ss_bw_consumed(ep_bw);
2673 xhci->devs[udev->slot_id]->bw_table->ss_bw_out -=
2674 xhci_get_ss_bw_consumed(ep_bw);
2678 /* SuperSpeed endpoints never get added to intervals in the table, so
2679 * this check is only valid for HS/FS/LS devices.
2681 if (list_empty(&virt_ep->bw_endpoint_list))
2683 /* For LS/FS devices, we need to translate the interval expressed in
2684 * microframes to frames.
2686 if (udev->speed == USB_SPEED_HIGH)
2687 normalized_interval = ep_bw->ep_interval;
2689 normalized_interval = ep_bw->ep_interval - 3;
2691 if (normalized_interval == 0)
2692 bw_table->interval0_esit_payload -= ep_bw->max_esit_payload;
2693 interval_bw = &bw_table->interval_bw[normalized_interval];
2694 interval_bw->num_packets -= ep_bw->num_packets;
2695 switch (udev->speed) {
2697 interval_bw->overhead[LS_OVERHEAD_TYPE] -= 1;
2699 case USB_SPEED_FULL:
2700 interval_bw->overhead[FS_OVERHEAD_TYPE] -= 1;
2702 case USB_SPEED_HIGH:
2703 interval_bw->overhead[HS_OVERHEAD_TYPE] -= 1;
2705 case USB_SPEED_SUPER:
2706 case USB_SPEED_SUPER_PLUS:
2707 case USB_SPEED_UNKNOWN:
2708 case USB_SPEED_WIRELESS:
2709 /* Should never happen because only LS/FS/HS endpoints will get
2710 * added to the endpoint list.
2715 tt_info->active_eps -= 1;
2716 list_del_init(&virt_ep->bw_endpoint_list);
2719 static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci,
2720 struct xhci_bw_info *ep_bw,
2721 struct xhci_interval_bw_table *bw_table,
2722 struct usb_device *udev,
2723 struct xhci_virt_ep *virt_ep,
2724 struct xhci_tt_bw_info *tt_info)
2726 struct xhci_interval_bw *interval_bw;
2727 struct xhci_virt_ep *smaller_ep;
2728 int normalized_interval;
2730 if (xhci_is_async_ep(ep_bw->type))
2733 if (udev->speed == USB_SPEED_SUPER) {
2734 if (xhci_is_sync_in_ep(ep_bw->type))
2735 xhci->devs[udev->slot_id]->bw_table->ss_bw_in +=
2736 xhci_get_ss_bw_consumed(ep_bw);
2738 xhci->devs[udev->slot_id]->bw_table->ss_bw_out +=
2739 xhci_get_ss_bw_consumed(ep_bw);
2743 /* For LS/FS devices, we need to translate the interval expressed in
2744 * microframes to frames.
2746 if (udev->speed == USB_SPEED_HIGH)
2747 normalized_interval = ep_bw->ep_interval;
2749 normalized_interval = ep_bw->ep_interval - 3;
2751 if (normalized_interval == 0)
2752 bw_table->interval0_esit_payload += ep_bw->max_esit_payload;
2753 interval_bw = &bw_table->interval_bw[normalized_interval];
2754 interval_bw->num_packets += ep_bw->num_packets;
2755 switch (udev->speed) {
2757 interval_bw->overhead[LS_OVERHEAD_TYPE] += 1;
2759 case USB_SPEED_FULL:
2760 interval_bw->overhead[FS_OVERHEAD_TYPE] += 1;
2762 case USB_SPEED_HIGH:
2763 interval_bw->overhead[HS_OVERHEAD_TYPE] += 1;
2765 case USB_SPEED_SUPER:
2766 case USB_SPEED_SUPER_PLUS:
2767 case USB_SPEED_UNKNOWN:
2768 case USB_SPEED_WIRELESS:
2769 /* Should never happen because only LS/FS/HS endpoints will get
2770 * added to the endpoint list.
2776 tt_info->active_eps += 1;
2777 /* Insert the endpoint into the list, largest max packet size first. */
2778 list_for_each_entry(smaller_ep, &interval_bw->endpoints,
2780 if (ep_bw->max_packet_size >=
2781 smaller_ep->bw_info.max_packet_size) {
2782 /* Add the new ep before the smaller endpoint */
2783 list_add_tail(&virt_ep->bw_endpoint_list,
2784 &smaller_ep->bw_endpoint_list);
2788 /* Add the new endpoint at the end of the list. */
2789 list_add_tail(&virt_ep->bw_endpoint_list,
2790 &interval_bw->endpoints);
2793 void xhci_update_tt_active_eps(struct xhci_hcd *xhci,
2794 struct xhci_virt_device *virt_dev,
2797 struct xhci_root_port_bw_info *rh_bw_info;
2798 if (!virt_dev->tt_info)
2801 rh_bw_info = &xhci->rh_bw[virt_dev->real_port - 1];
2802 if (old_active_eps == 0 &&
2803 virt_dev->tt_info->active_eps != 0) {
2804 rh_bw_info->num_active_tts += 1;
2805 rh_bw_info->bw_table.bw_used += TT_HS_OVERHEAD;
2806 } else if (old_active_eps != 0 &&
2807 virt_dev->tt_info->active_eps == 0) {
2808 rh_bw_info->num_active_tts -= 1;
2809 rh_bw_info->bw_table.bw_used -= TT_HS_OVERHEAD;
2813 static int xhci_reserve_bandwidth(struct xhci_hcd *xhci,
2814 struct xhci_virt_device *virt_dev,
2815 struct xhci_container_ctx *in_ctx)
2817 struct xhci_bw_info ep_bw_info[31];
2819 struct xhci_input_control_ctx *ctrl_ctx;
2820 int old_active_eps = 0;
2822 if (virt_dev->tt_info)
2823 old_active_eps = virt_dev->tt_info->active_eps;
2825 ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
2827 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2832 for (i = 0; i < 31; i++) {
2833 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
2836 /* Make a copy of the BW info in case we need to revert this */
2837 memcpy(&ep_bw_info[i], &virt_dev->eps[i].bw_info,
2838 sizeof(ep_bw_info[i]));
2839 /* Drop the endpoint from the interval table if the endpoint is
2840 * being dropped or changed.
2842 if (EP_IS_DROPPED(ctrl_ctx, i))
2843 xhci_drop_ep_from_interval_table(xhci,
2844 &virt_dev->eps[i].bw_info,
2850 /* Overwrite the information stored in the endpoints' bw_info */
2851 xhci_update_bw_info(xhci, virt_dev->in_ctx, ctrl_ctx, virt_dev);
2852 for (i = 0; i < 31; i++) {
2853 /* Add any changed or added endpoints to the interval table */
2854 if (EP_IS_ADDED(ctrl_ctx, i))
2855 xhci_add_ep_to_interval_table(xhci,
2856 &virt_dev->eps[i].bw_info,
2863 if (!xhci_check_bw_table(xhci, virt_dev, old_active_eps)) {
2864 /* Ok, this fits in the bandwidth we have.
2865 * Update the number of active TTs.
2867 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
2871 /* We don't have enough bandwidth for this, revert the stored info. */
2872 for (i = 0; i < 31; i++) {
2873 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
2876 /* Drop the new copies of any added or changed endpoints from
2877 * the interval table.
2879 if (EP_IS_ADDED(ctrl_ctx, i)) {
2880 xhci_drop_ep_from_interval_table(xhci,
2881 &virt_dev->eps[i].bw_info,
2887 /* Revert the endpoint back to its old information */
2888 memcpy(&virt_dev->eps[i].bw_info, &ep_bw_info[i],
2889 sizeof(ep_bw_info[i]));
2890 /* Add any changed or dropped endpoints back into the table */
2891 if (EP_IS_DROPPED(ctrl_ctx, i))
2892 xhci_add_ep_to_interval_table(xhci,
2893 &virt_dev->eps[i].bw_info,
2903 /* Issue a configure endpoint command or evaluate context command
2904 * and wait for it to finish.
2906 static int xhci_configure_endpoint(struct xhci_hcd *xhci,
2907 struct usb_device *udev,
2908 struct xhci_command *command,
2909 bool ctx_change, bool must_succeed)
2912 unsigned long flags;
2913 struct xhci_input_control_ctx *ctrl_ctx;
2914 struct xhci_virt_device *virt_dev;
2915 struct xhci_slot_ctx *slot_ctx;
2920 spin_lock_irqsave(&xhci->lock, flags);
2922 if (xhci->xhc_state & XHCI_STATE_DYING) {
2923 spin_unlock_irqrestore(&xhci->lock, flags);
2927 virt_dev = xhci->devs[udev->slot_id];
2929 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
2931 spin_unlock_irqrestore(&xhci->lock, flags);
2932 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2937 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) &&
2938 xhci_reserve_host_resources(xhci, ctrl_ctx)) {
2939 spin_unlock_irqrestore(&xhci->lock, flags);
2940 xhci_warn(xhci, "Not enough host resources, "
2941 "active endpoint contexts = %u\n",
2942 xhci->num_active_eps);
2945 if ((xhci->quirks & XHCI_SW_BW_CHECKING) &&
2946 xhci_reserve_bandwidth(xhci, virt_dev, command->in_ctx)) {
2947 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
2948 xhci_free_host_resources(xhci, ctrl_ctx);
2949 spin_unlock_irqrestore(&xhci->lock, flags);
2950 xhci_warn(xhci, "Not enough bandwidth\n");
2954 slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx);
2956 trace_xhci_configure_endpoint_ctrl_ctx(ctrl_ctx);
2957 trace_xhci_configure_endpoint(slot_ctx);
2960 ret = xhci_queue_configure_endpoint(xhci, command,
2961 command->in_ctx->dma,
2962 udev->slot_id, must_succeed);
2964 ret = xhci_queue_evaluate_context(xhci, command,
2965 command->in_ctx->dma,
2966 udev->slot_id, must_succeed);
2968 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
2969 xhci_free_host_resources(xhci, ctrl_ctx);
2970 spin_unlock_irqrestore(&xhci->lock, flags);
2971 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
2972 "FIXME allocate a new ring segment");
2975 xhci_ring_cmd_db(xhci);
2976 spin_unlock_irqrestore(&xhci->lock, flags);
2978 /* Wait for the configure endpoint command to complete */
2979 wait_for_completion(command->completion);
2982 ret = xhci_configure_endpoint_result(xhci, udev,
2985 ret = xhci_evaluate_context_result(xhci, udev,
2988 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
2989 spin_lock_irqsave(&xhci->lock, flags);
2990 /* If the command failed, remove the reserved resources.
2991 * Otherwise, clean up the estimate to include dropped eps.
2994 xhci_free_host_resources(xhci, ctrl_ctx);
2996 xhci_finish_resource_reservation(xhci, ctrl_ctx);
2997 spin_unlock_irqrestore(&xhci->lock, flags);
3002 static void xhci_check_bw_drop_ep_streams(struct xhci_hcd *xhci,
3003 struct xhci_virt_device *vdev, int i)
3005 struct xhci_virt_ep *ep = &vdev->eps[i];
3007 if (ep->ep_state & EP_HAS_STREAMS) {
3008 xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on set_interface, freeing streams.\n",
3009 xhci_get_endpoint_address(i));
3010 xhci_free_stream_info(xhci, ep->stream_info);
3011 ep->stream_info = NULL;
3012 ep->ep_state &= ~EP_HAS_STREAMS;
3016 /* Called after one or more calls to xhci_add_endpoint() or
3017 * xhci_drop_endpoint(). If this call fails, the USB core is expected
3018 * to call xhci_reset_bandwidth().
3020 * Since we are in the middle of changing either configuration or
3021 * installing a new alt setting, the USB core won't allow URBs to be
3022 * enqueued for any endpoint on the old config or interface. Nothing
3023 * else should be touching the xhci->devs[slot_id] structure, so we
3024 * don't need to take the xhci->lock for manipulating that.
3026 int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
3030 struct xhci_hcd *xhci;
3031 struct xhci_virt_device *virt_dev;
3032 struct xhci_input_control_ctx *ctrl_ctx;
3033 struct xhci_slot_ctx *slot_ctx;
3034 struct xhci_command *command;
3036 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
3039 xhci = hcd_to_xhci(hcd);
3040 if ((xhci->xhc_state & XHCI_STATE_DYING) ||
3041 (xhci->xhc_state & XHCI_STATE_REMOVING))
3044 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
3045 virt_dev = xhci->devs[udev->slot_id];
3047 command = xhci_alloc_command(xhci, true, GFP_KERNEL);
3051 command->in_ctx = virt_dev->in_ctx;
3053 /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */
3054 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
3056 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3059 goto command_cleanup;
3061 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
3062 ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG);
3063 ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG));
3065 /* Don't issue the command if there's no endpoints to update. */
3066 if (ctrl_ctx->add_flags == cpu_to_le32(SLOT_FLAG) &&
3067 ctrl_ctx->drop_flags == 0) {
3069 goto command_cleanup;
3071 /* Fix up Context Entries field. Minimum value is EP0 == BIT(1). */
3072 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
3073 for (i = 31; i >= 1; i--) {
3074 __le32 le32 = cpu_to_le32(BIT(i));
3076 if ((virt_dev->eps[i-1].ring && !(ctrl_ctx->drop_flags & le32))
3077 || (ctrl_ctx->add_flags & le32) || i == 1) {
3078 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
3079 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(i));
3084 ret = xhci_configure_endpoint(xhci, udev, command,
3087 /* Callee should call reset_bandwidth() */
3088 goto command_cleanup;
3090 /* Free any rings that were dropped, but not changed. */
3091 for (i = 1; i < 31; i++) {
3092 if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) &&
3093 !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1)))) {
3094 xhci_free_endpoint_ring(xhci, virt_dev, i);
3095 xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
3098 xhci_zero_in_ctx(xhci, virt_dev);
3100 * Install any rings for completely new endpoints or changed endpoints,
3101 * and free any old rings from changed endpoints.
3103 for (i = 1; i < 31; i++) {
3104 if (!virt_dev->eps[i].new_ring)
3106 /* Only free the old ring if it exists.
3107 * It may not if this is the first add of an endpoint.
3109 if (virt_dev->eps[i].ring) {
3110 xhci_free_endpoint_ring(xhci, virt_dev, i);
3112 xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
3113 virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
3114 virt_dev->eps[i].new_ring = NULL;
3115 xhci_debugfs_create_endpoint(xhci, virt_dev, i);
3118 kfree(command->completion);
3123 EXPORT_SYMBOL_GPL(xhci_check_bandwidth);
3125 void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
3127 struct xhci_hcd *xhci;
3128 struct xhci_virt_device *virt_dev;
3131 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
3134 xhci = hcd_to_xhci(hcd);
3136 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
3137 virt_dev = xhci->devs[udev->slot_id];
3138 /* Free any rings allocated for added endpoints */
3139 for (i = 0; i < 31; i++) {
3140 if (virt_dev->eps[i].new_ring) {
3141 xhci_debugfs_remove_endpoint(xhci, virt_dev, i);
3142 xhci_ring_free(xhci, virt_dev->eps[i].new_ring);
3143 virt_dev->eps[i].new_ring = NULL;
3146 xhci_zero_in_ctx(xhci, virt_dev);
3148 EXPORT_SYMBOL_GPL(xhci_reset_bandwidth);
3150 static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
3151 struct xhci_container_ctx *in_ctx,
3152 struct xhci_container_ctx *out_ctx,
3153 struct xhci_input_control_ctx *ctrl_ctx,
3154 u32 add_flags, u32 drop_flags)
3156 ctrl_ctx->add_flags = cpu_to_le32(add_flags);
3157 ctrl_ctx->drop_flags = cpu_to_le32(drop_flags);
3158 xhci_slot_copy(xhci, in_ctx, out_ctx);
3159 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
3162 static void xhci_endpoint_disable(struct usb_hcd *hcd,
3163 struct usb_host_endpoint *host_ep)
3165 struct xhci_hcd *xhci;
3166 struct xhci_virt_device *vdev;
3167 struct xhci_virt_ep *ep;
3168 struct usb_device *udev;
3169 unsigned long flags;
3170 unsigned int ep_index;
3172 xhci = hcd_to_xhci(hcd);
3174 spin_lock_irqsave(&xhci->lock, flags);
3176 udev = (struct usb_device *)host_ep->hcpriv;
3177 if (!udev || !udev->slot_id)
3180 vdev = xhci->devs[udev->slot_id];
3184 ep_index = xhci_get_endpoint_index(&host_ep->desc);
3185 ep = &vdev->eps[ep_index];
3187 /* wait for hub_tt_work to finish clearing hub TT */
3188 if (ep->ep_state & EP_CLEARING_TT) {
3189 spin_unlock_irqrestore(&xhci->lock, flags);
3190 schedule_timeout_uninterruptible(1);
3195 xhci_dbg(xhci, "endpoint disable with ep_state 0x%x\n",
3198 host_ep->hcpriv = NULL;
3199 spin_unlock_irqrestore(&xhci->lock, flags);
3203 * Called after usb core issues a clear halt control message.
3204 * The host side of the halt should already be cleared by a reset endpoint
3205 * command issued when the STALL event was received.
3207 * The reset endpoint command may only be issued to endpoints in the halted
3208 * state. For software that wishes to reset the data toggle or sequence number
3209 * of an endpoint that isn't in the halted state this function will issue a
3210 * configure endpoint command with the Drop and Add bits set for the target
3211 * endpoint. Refer to the additional note in xhci spcification section 4.6.8.
3214 static void xhci_endpoint_reset(struct usb_hcd *hcd,
3215 struct usb_host_endpoint *host_ep)
3217 struct xhci_hcd *xhci;
3218 struct usb_device *udev;
3219 struct xhci_virt_device *vdev;
3220 struct xhci_virt_ep *ep;
3221 struct xhci_input_control_ctx *ctrl_ctx;
3222 struct xhci_command *stop_cmd, *cfg_cmd;
3223 unsigned int ep_index;
3224 unsigned long flags;
3228 xhci = hcd_to_xhci(hcd);
3229 if (!host_ep->hcpriv)
3231 udev = (struct usb_device *) host_ep->hcpriv;
3232 vdev = xhci->devs[udev->slot_id];
3235 * vdev may be lost due to xHC restore error and re-initialization
3236 * during S3/S4 resume. A new vdev will be allocated later by
3237 * xhci_discover_or_reset_device()
3239 if (!udev->slot_id || !vdev)
3241 ep_index = xhci_get_endpoint_index(&host_ep->desc);
3242 ep = &vdev->eps[ep_index];
3244 /* Bail out if toggle is already being cleared by a endpoint reset */
3245 spin_lock_irqsave(&xhci->lock, flags);
3246 if (ep->ep_state & EP_HARD_CLEAR_TOGGLE) {
3247 ep->ep_state &= ~EP_HARD_CLEAR_TOGGLE;
3248 spin_unlock_irqrestore(&xhci->lock, flags);
3251 spin_unlock_irqrestore(&xhci->lock, flags);
3252 /* Only interrupt and bulk ep's use data toggle, USB2 spec 5.5.4-> */
3253 if (usb_endpoint_xfer_control(&host_ep->desc) ||
3254 usb_endpoint_xfer_isoc(&host_ep->desc))
3257 ep_flag = xhci_get_endpoint_flag(&host_ep->desc);
3259 if (ep_flag == SLOT_FLAG || ep_flag == EP0_FLAG)
3262 stop_cmd = xhci_alloc_command(xhci, true, GFP_NOWAIT);
3266 cfg_cmd = xhci_alloc_command_with_ctx(xhci, true, GFP_NOWAIT);
3270 spin_lock_irqsave(&xhci->lock, flags);
3272 /* block queuing new trbs and ringing ep doorbell */
3273 ep->ep_state |= EP_SOFT_CLEAR_TOGGLE;
3276 * Make sure endpoint ring is empty before resetting the toggle/seq.
3277 * Driver is required to synchronously cancel all transfer request.
3278 * Stop the endpoint to force xHC to update the output context
3281 if (!list_empty(&ep->ring->td_list)) {
3282 dev_err(&udev->dev, "EP not empty, refuse reset\n");
3283 spin_unlock_irqrestore(&xhci->lock, flags);
3284 xhci_free_command(xhci, cfg_cmd);
3288 err = xhci_queue_stop_endpoint(xhci, stop_cmd, udev->slot_id,
3291 spin_unlock_irqrestore(&xhci->lock, flags);
3292 xhci_free_command(xhci, cfg_cmd);
3293 xhci_dbg(xhci, "%s: Failed to queue stop ep command, %d ",
3298 xhci_ring_cmd_db(xhci);
3299 spin_unlock_irqrestore(&xhci->lock, flags);
3301 wait_for_completion(stop_cmd->completion);
3303 spin_lock_irqsave(&xhci->lock, flags);
3305 /* config ep command clears toggle if add and drop ep flags are set */
3306 ctrl_ctx = xhci_get_input_control_ctx(cfg_cmd->in_ctx);
3308 spin_unlock_irqrestore(&xhci->lock, flags);
3309 xhci_free_command(xhci, cfg_cmd);
3310 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3315 xhci_setup_input_ctx_for_config_ep(xhci, cfg_cmd->in_ctx, vdev->out_ctx,
3316 ctrl_ctx, ep_flag, ep_flag);
3317 xhci_endpoint_copy(xhci, cfg_cmd->in_ctx, vdev->out_ctx, ep_index);
3319 err = xhci_queue_configure_endpoint(xhci, cfg_cmd, cfg_cmd->in_ctx->dma,
3320 udev->slot_id, false);
3322 spin_unlock_irqrestore(&xhci->lock, flags);
3323 xhci_free_command(xhci, cfg_cmd);
3324 xhci_dbg(xhci, "%s: Failed to queue config ep command, %d ",
3329 xhci_ring_cmd_db(xhci);
3330 spin_unlock_irqrestore(&xhci->lock, flags);
3332 wait_for_completion(cfg_cmd->completion);
3334 xhci_free_command(xhci, cfg_cmd);
3336 xhci_free_command(xhci, stop_cmd);
3337 spin_lock_irqsave(&xhci->lock, flags);
3338 if (ep->ep_state & EP_SOFT_CLEAR_TOGGLE)
3339 ep->ep_state &= ~EP_SOFT_CLEAR_TOGGLE;
3340 spin_unlock_irqrestore(&xhci->lock, flags);
3343 static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
3344 struct usb_device *udev, struct usb_host_endpoint *ep,
3345 unsigned int slot_id)
3348 unsigned int ep_index;
3349 unsigned int ep_state;
3353 ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__);
3355 return ret ? ret : -EINVAL;
3356 if (usb_ss_max_streams(&ep->ss_ep_comp) == 0) {
3357 xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion"
3358 " descriptor for ep 0x%x does not support streams\n",
3359 ep->desc.bEndpointAddress);
3363 ep_index = xhci_get_endpoint_index(&ep->desc);
3364 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
3365 if (ep_state & EP_HAS_STREAMS ||
3366 ep_state & EP_GETTING_STREAMS) {
3367 xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x "
3368 "already has streams set up.\n",
3369 ep->desc.bEndpointAddress);
3370 xhci_warn(xhci, "Send email to xHCI maintainer and ask for "
3371 "dynamic stream context array reallocation.\n");
3374 if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) {
3375 xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk "
3376 "endpoint 0x%x; URBs are pending.\n",
3377 ep->desc.bEndpointAddress);
3383 static void xhci_calculate_streams_entries(struct xhci_hcd *xhci,
3384 unsigned int *num_streams, unsigned int *num_stream_ctxs)
3386 unsigned int max_streams;
3388 /* The stream context array size must be a power of two */
3389 *num_stream_ctxs = roundup_pow_of_two(*num_streams);
3391 * Find out how many primary stream array entries the host controller
3392 * supports. Later we may use secondary stream arrays (similar to 2nd
3393 * level page entries), but that's an optional feature for xHCI host
3394 * controllers. xHCs must support at least 4 stream IDs.
3396 max_streams = HCC_MAX_PSA(xhci->hcc_params);
3397 if (*num_stream_ctxs > max_streams) {
3398 xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n",
3400 *num_stream_ctxs = max_streams;
3401 *num_streams = max_streams;
3405 /* Returns an error code if one of the endpoint already has streams.
3406 * This does not change any data structures, it only checks and gathers
3409 static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci,
3410 struct usb_device *udev,
3411 struct usb_host_endpoint **eps, unsigned int num_eps,
3412 unsigned int *num_streams, u32 *changed_ep_bitmask)
3414 unsigned int max_streams;
3415 unsigned int endpoint_flag;
3419 for (i = 0; i < num_eps; i++) {
3420 ret = xhci_check_streams_endpoint(xhci, udev,
3421 eps[i], udev->slot_id);
3425 max_streams = usb_ss_max_streams(&eps[i]->ss_ep_comp);
3426 if (max_streams < (*num_streams - 1)) {
3427 xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n",
3428 eps[i]->desc.bEndpointAddress,
3430 *num_streams = max_streams+1;
3433 endpoint_flag = xhci_get_endpoint_flag(&eps[i]->desc);
3434 if (*changed_ep_bitmask & endpoint_flag)
3436 *changed_ep_bitmask |= endpoint_flag;
3441 static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci,
3442 struct usb_device *udev,
3443 struct usb_host_endpoint **eps, unsigned int num_eps)
3445 u32 changed_ep_bitmask = 0;
3446 unsigned int slot_id;
3447 unsigned int ep_index;
3448 unsigned int ep_state;
3451 slot_id = udev->slot_id;
3452 if (!xhci->devs[slot_id])
3455 for (i = 0; i < num_eps; i++) {
3456 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3457 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
3458 /* Are streams already being freed for the endpoint? */
3459 if (ep_state & EP_GETTING_NO_STREAMS) {
3460 xhci_warn(xhci, "WARN Can't disable streams for "
3462 "streams are being disabled already\n",
3463 eps[i]->desc.bEndpointAddress);
3466 /* Are there actually any streams to free? */
3467 if (!(ep_state & EP_HAS_STREAMS) &&
3468 !(ep_state & EP_GETTING_STREAMS)) {
3469 xhci_warn(xhci, "WARN Can't disable streams for "
3471 "streams are already disabled!\n",
3472 eps[i]->desc.bEndpointAddress);
3473 xhci_warn(xhci, "WARN xhci_free_streams() called "
3474 "with non-streams endpoint\n");
3477 changed_ep_bitmask |= xhci_get_endpoint_flag(&eps[i]->desc);
3479 return changed_ep_bitmask;
3483 * The USB device drivers use this function (through the HCD interface in USB
3484 * core) to prepare a set of bulk endpoints to use streams. Streams are used to
3485 * coordinate mass storage command queueing across multiple endpoints (basically
3486 * a stream ID == a task ID).
3488 * Setting up streams involves allocating the same size stream context array
3489 * for each endpoint and issuing a configure endpoint command for all endpoints.
3491 * Don't allow the call to succeed if one endpoint only supports one stream
3492 * (which means it doesn't support streams at all).
3494 * Drivers may get less stream IDs than they asked for, if the host controller
3495 * hardware or endpoints claim they can't support the number of requested
3498 static int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
3499 struct usb_host_endpoint **eps, unsigned int num_eps,
3500 unsigned int num_streams, gfp_t mem_flags)
3503 struct xhci_hcd *xhci;
3504 struct xhci_virt_device *vdev;
3505 struct xhci_command *config_cmd;
3506 struct xhci_input_control_ctx *ctrl_ctx;
3507 unsigned int ep_index;
3508 unsigned int num_stream_ctxs;
3509 unsigned int max_packet;
3510 unsigned long flags;
3511 u32 changed_ep_bitmask = 0;
3516 /* Add one to the number of streams requested to account for
3517 * stream 0 that is reserved for xHCI usage.
3520 xhci = hcd_to_xhci(hcd);
3521 xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n",
3524 /* MaxPSASize value 0 (2 streams) means streams are not supported */
3525 if ((xhci->quirks & XHCI_BROKEN_STREAMS) ||
3526 HCC_MAX_PSA(xhci->hcc_params) < 4) {
3527 xhci_dbg(xhci, "xHCI controller does not support streams.\n");
3531 config_cmd = xhci_alloc_command_with_ctx(xhci, true, mem_flags);
3535 ctrl_ctx = xhci_get_input_control_ctx(config_cmd->in_ctx);
3537 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3539 xhci_free_command(xhci, config_cmd);
3543 /* Check to make sure all endpoints are not already configured for
3544 * streams. While we're at it, find the maximum number of streams that
3545 * all the endpoints will support and check for duplicate endpoints.
3547 spin_lock_irqsave(&xhci->lock, flags);
3548 ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps,
3549 num_eps, &num_streams, &changed_ep_bitmask);
3551 xhci_free_command(xhci, config_cmd);
3552 spin_unlock_irqrestore(&xhci->lock, flags);
3555 if (num_streams <= 1) {
3556 xhci_warn(xhci, "WARN: endpoints can't handle "
3557 "more than one stream.\n");
3558 xhci_free_command(xhci, config_cmd);
3559 spin_unlock_irqrestore(&xhci->lock, flags);
3562 vdev = xhci->devs[udev->slot_id];
3563 /* Mark each endpoint as being in transition, so
3564 * xhci_urb_enqueue() will reject all URBs.
3566 for (i = 0; i < num_eps; i++) {
3567 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3568 vdev->eps[ep_index].ep_state |= EP_GETTING_STREAMS;
3570 spin_unlock_irqrestore(&xhci->lock, flags);
3572 /* Setup internal data structures and allocate HW data structures for
3573 * streams (but don't install the HW structures in the input context
3574 * until we're sure all memory allocation succeeded).
3576 xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs);
3577 xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n",
3578 num_stream_ctxs, num_streams);
3580 for (i = 0; i < num_eps; i++) {
3581 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3582 max_packet = usb_endpoint_maxp(&eps[i]->desc);
3583 vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci,
3586 max_packet, mem_flags);
3587 if (!vdev->eps[ep_index].stream_info)
3589 /* Set maxPstreams in endpoint context and update deq ptr to
3590 * point to stream context array. FIXME
3594 /* Set up the input context for a configure endpoint command. */
3595 for (i = 0; i < num_eps; i++) {
3596 struct xhci_ep_ctx *ep_ctx;
3598 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3599 ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index);
3601 xhci_endpoint_copy(xhci, config_cmd->in_ctx,
3602 vdev->out_ctx, ep_index);
3603 xhci_setup_streams_ep_input_ctx(xhci, ep_ctx,
3604 vdev->eps[ep_index].stream_info);
3606 /* Tell the HW to drop its old copy of the endpoint context info
3607 * and add the updated copy from the input context.
3609 xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx,
3610 vdev->out_ctx, ctrl_ctx,
3611 changed_ep_bitmask, changed_ep_bitmask);
3613 /* Issue and wait for the configure endpoint command */
3614 ret = xhci_configure_endpoint(xhci, udev, config_cmd,
3617 /* xHC rejected the configure endpoint command for some reason, so we
3618 * leave the old ring intact and free our internal streams data
3624 spin_lock_irqsave(&xhci->lock, flags);
3625 for (i = 0; i < num_eps; i++) {
3626 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3627 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
3628 xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n",
3629 udev->slot_id, ep_index);
3630 vdev->eps[ep_index].ep_state |= EP_HAS_STREAMS;
3632 xhci_free_command(xhci, config_cmd);
3633 spin_unlock_irqrestore(&xhci->lock, flags);
3635 for (i = 0; i < num_eps; i++) {
3636 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3637 xhci_debugfs_create_stream_files(xhci, vdev, ep_index);
3639 /* Subtract 1 for stream 0, which drivers can't use */
3640 return num_streams - 1;
3643 /* If it didn't work, free the streams! */
3644 for (i = 0; i < num_eps; i++) {
3645 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3646 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
3647 vdev->eps[ep_index].stream_info = NULL;
3648 /* FIXME Unset maxPstreams in endpoint context and
3649 * update deq ptr to point to normal string ring.
3651 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
3652 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
3653 xhci_endpoint_zero(xhci, vdev, eps[i]);
3655 xhci_free_command(xhci, config_cmd);
3659 /* Transition the endpoint from using streams to being a "normal" endpoint
3662 * Modify the endpoint context state, submit a configure endpoint command,
3663 * and free all endpoint rings for streams if that completes successfully.
3665 static int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
3666 struct usb_host_endpoint **eps, unsigned int num_eps,
3670 struct xhci_hcd *xhci;
3671 struct xhci_virt_device *vdev;
3672 struct xhci_command *command;
3673 struct xhci_input_control_ctx *ctrl_ctx;
3674 unsigned int ep_index;
3675 unsigned long flags;
3676 u32 changed_ep_bitmask;
3678 xhci = hcd_to_xhci(hcd);
3679 vdev = xhci->devs[udev->slot_id];
3681 /* Set up a configure endpoint command to remove the streams rings */
3682 spin_lock_irqsave(&xhci->lock, flags);
3683 changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci,
3684 udev, eps, num_eps);
3685 if (changed_ep_bitmask == 0) {
3686 spin_unlock_irqrestore(&xhci->lock, flags);
3690 /* Use the xhci_command structure from the first endpoint. We may have
3691 * allocated too many, but the driver may call xhci_free_streams() for
3692 * each endpoint it grouped into one call to xhci_alloc_streams().
3694 ep_index = xhci_get_endpoint_index(&eps[0]->desc);
3695 command = vdev->eps[ep_index].stream_info->free_streams_command;
3696 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
3698 spin_unlock_irqrestore(&xhci->lock, flags);
3699 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3704 for (i = 0; i < num_eps; i++) {
3705 struct xhci_ep_ctx *ep_ctx;
3707 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3708 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
3709 xhci->devs[udev->slot_id]->eps[ep_index].ep_state |=
3710 EP_GETTING_NO_STREAMS;
3712 xhci_endpoint_copy(xhci, command->in_ctx,
3713 vdev->out_ctx, ep_index);
3714 xhci_setup_no_streams_ep_input_ctx(ep_ctx,
3715 &vdev->eps[ep_index]);
3717 xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx,
3718 vdev->out_ctx, ctrl_ctx,
3719 changed_ep_bitmask, changed_ep_bitmask);
3720 spin_unlock_irqrestore(&xhci->lock, flags);
3722 /* Issue and wait for the configure endpoint command,
3723 * which must succeed.
3725 ret = xhci_configure_endpoint(xhci, udev, command,
3728 /* xHC rejected the configure endpoint command for some reason, so we
3729 * leave the streams rings intact.
3734 spin_lock_irqsave(&xhci->lock, flags);
3735 for (i = 0; i < num_eps; i++) {
3736 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3737 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
3738 vdev->eps[ep_index].stream_info = NULL;
3739 /* FIXME Unset maxPstreams in endpoint context and
3740 * update deq ptr to point to normal string ring.
3742 vdev->eps[ep_index].ep_state &= ~EP_GETTING_NO_STREAMS;
3743 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
3745 spin_unlock_irqrestore(&xhci->lock, flags);
3751 * Deletes endpoint resources for endpoints that were active before a Reset
3752 * Device command, or a Disable Slot command. The Reset Device command leaves
3753 * the control endpoint intact, whereas the Disable Slot command deletes it.
3755 * Must be called with xhci->lock held.
3757 void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
3758 struct xhci_virt_device *virt_dev, bool drop_control_ep)
3761 unsigned int num_dropped_eps = 0;
3762 unsigned int drop_flags = 0;
3764 for (i = (drop_control_ep ? 0 : 1); i < 31; i++) {
3765 if (virt_dev->eps[i].ring) {
3766 drop_flags |= 1 << i;
3770 xhci->num_active_eps -= num_dropped_eps;
3771 if (num_dropped_eps)
3772 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3773 "Dropped %u ep ctxs, flags = 0x%x, "
3775 num_dropped_eps, drop_flags,
3776 xhci->num_active_eps);
3780 * This submits a Reset Device Command, which will set the device state to 0,
3781 * set the device address to 0, and disable all the endpoints except the default
3782 * control endpoint. The USB core should come back and call
3783 * xhci_address_device(), and then re-set up the configuration. If this is
3784 * called because of a usb_reset_and_verify_device(), then the old alternate
3785 * settings will be re-installed through the normal bandwidth allocation
3788 * Wait for the Reset Device command to finish. Remove all structures
3789 * associated with the endpoints that were disabled. Clear the input device
3790 * structure? Reset the control endpoint 0 max packet size?
3792 * If the virt_dev to be reset does not exist or does not match the udev,
3793 * it means the device is lost, possibly due to the xHC restore error and
3794 * re-initialization during S3/S4. In this case, call xhci_alloc_dev() to
3795 * re-allocate the device.
3797 static int xhci_discover_or_reset_device(struct usb_hcd *hcd,
3798 struct usb_device *udev)
3801 unsigned long flags;
3802 struct xhci_hcd *xhci;
3803 unsigned int slot_id;
3804 struct xhci_virt_device *virt_dev;
3805 struct xhci_command *reset_device_cmd;
3806 struct xhci_slot_ctx *slot_ctx;
3807 int old_active_eps = 0;
3809 ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__);
3812 xhci = hcd_to_xhci(hcd);
3813 slot_id = udev->slot_id;
3814 virt_dev = xhci->devs[slot_id];
3816 xhci_dbg(xhci, "The device to be reset with slot ID %u does "
3817 "not exist. Re-allocate the device\n", slot_id);
3818 ret = xhci_alloc_dev(hcd, udev);
3825 if (virt_dev->tt_info)
3826 old_active_eps = virt_dev->tt_info->active_eps;
3828 if (virt_dev->udev != udev) {
3829 /* If the virt_dev and the udev does not match, this virt_dev
3830 * may belong to another udev.
3831 * Re-allocate the device.
3833 xhci_dbg(xhci, "The device to be reset with slot ID %u does "
3834 "not match the udev. Re-allocate the device\n",
3836 ret = xhci_alloc_dev(hcd, udev);
3843 /* If device is not setup, there is no point in resetting it */
3844 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3845 if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
3846 SLOT_STATE_DISABLED)
3849 trace_xhci_discover_or_reset_device(slot_ctx);
3851 xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id);
3852 /* Allocate the command structure that holds the struct completion.
3853 * Assume we're in process context, since the normal device reset
3854 * process has to wait for the device anyway. Storage devices are
3855 * reset as part of error handling, so use GFP_NOIO instead of
3858 reset_device_cmd = xhci_alloc_command(xhci, true, GFP_NOIO);
3859 if (!reset_device_cmd) {
3860 xhci_dbg(xhci, "Couldn't allocate command structure.\n");
3864 /* Attempt to submit the Reset Device command to the command ring */
3865 spin_lock_irqsave(&xhci->lock, flags);
3867 ret = xhci_queue_reset_device(xhci, reset_device_cmd, slot_id);
3869 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3870 spin_unlock_irqrestore(&xhci->lock, flags);
3871 goto command_cleanup;
3873 xhci_ring_cmd_db(xhci);
3874 spin_unlock_irqrestore(&xhci->lock, flags);
3876 /* Wait for the Reset Device command to finish */
3877 wait_for_completion(reset_device_cmd->completion);
3879 /* The Reset Device command can't fail, according to the 0.95/0.96 spec,
3880 * unless we tried to reset a slot ID that wasn't enabled,
3881 * or the device wasn't in the addressed or configured state.
3883 ret = reset_device_cmd->status;
3885 case COMP_COMMAND_ABORTED:
3886 case COMP_COMMAND_RING_STOPPED:
3887 xhci_warn(xhci, "Timeout waiting for reset device command\n");
3889 goto command_cleanup;
3890 case COMP_SLOT_NOT_ENABLED_ERROR: /* 0.95 completion for bad slot ID */
3891 case COMP_CONTEXT_STATE_ERROR: /* 0.96 completion code for same thing */
3892 xhci_dbg(xhci, "Can't reset device (slot ID %u) in %s state\n",
3894 xhci_get_slot_state(xhci, virt_dev->out_ctx));
3895 xhci_dbg(xhci, "Not freeing device rings.\n");
3896 /* Don't treat this as an error. May change my mind later. */
3898 goto command_cleanup;
3900 xhci_dbg(xhci, "Successful reset device command.\n");
3903 if (xhci_is_vendor_info_code(xhci, ret))
3905 xhci_warn(xhci, "Unknown completion code %u for "
3906 "reset device command.\n", ret);
3908 goto command_cleanup;
3911 /* Free up host controller endpoint resources */
3912 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
3913 spin_lock_irqsave(&xhci->lock, flags);
3914 /* Don't delete the default control endpoint resources */
3915 xhci_free_device_endpoint_resources(xhci, virt_dev, false);
3916 spin_unlock_irqrestore(&xhci->lock, flags);
3919 /* Everything but endpoint 0 is disabled, so free the rings. */
3920 for (i = 1; i < 31; i++) {
3921 struct xhci_virt_ep *ep = &virt_dev->eps[i];
3923 if (ep->ep_state & EP_HAS_STREAMS) {
3924 xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on device reset, freeing streams.\n",
3925 xhci_get_endpoint_address(i));
3926 xhci_free_stream_info(xhci, ep->stream_info);
3927 ep->stream_info = NULL;
3928 ep->ep_state &= ~EP_HAS_STREAMS;
3932 xhci_debugfs_remove_endpoint(xhci, virt_dev, i);
3933 xhci_free_endpoint_ring(xhci, virt_dev, i);
3935 if (!list_empty(&virt_dev->eps[i].bw_endpoint_list))
3936 xhci_drop_ep_from_interval_table(xhci,
3937 &virt_dev->eps[i].bw_info,
3942 xhci_clear_endpoint_bw_info(&virt_dev->eps[i].bw_info);
3944 /* If necessary, update the number of active TTs on this root port */
3945 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
3946 virt_dev->flags = 0;
3950 xhci_free_command(xhci, reset_device_cmd);
3955 * At this point, the struct usb_device is about to go away, the device has
3956 * disconnected, and all traffic has been stopped and the endpoints have been
3957 * disabled. Free any HC data structures associated with that device.
3959 static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
3961 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3962 struct xhci_virt_device *virt_dev;
3963 struct xhci_slot_ctx *slot_ctx;
3967 * We called pm_runtime_get_noresume when the device was attached.
3968 * Decrement the counter here to allow controller to runtime suspend
3969 * if no devices remain.
3971 if (xhci->quirks & XHCI_RESET_ON_RESUME)
3972 pm_runtime_put_noidle(hcd->self.controller);
3974 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
3975 /* If the host is halted due to driver unload, we still need to free the
3978 if (ret <= 0 && ret != -ENODEV)
3981 virt_dev = xhci->devs[udev->slot_id];
3982 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3983 trace_xhci_free_dev(slot_ctx);
3985 /* Stop any wayward timer functions (which may grab the lock) */
3986 for (i = 0; i < 31; i++)
3987 virt_dev->eps[i].ep_state &= ~EP_STOP_CMD_PENDING;
3988 virt_dev->udev = NULL;
3989 xhci_disable_slot(xhci, udev->slot_id);
3990 xhci_free_virt_device(xhci, udev->slot_id);
3993 int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id)
3995 struct xhci_command *command;
3996 unsigned long flags;
4000 command = xhci_alloc_command(xhci, true, GFP_KERNEL);
4004 xhci_debugfs_remove_slot(xhci, slot_id);
4006 spin_lock_irqsave(&xhci->lock, flags);
4007 /* Don't disable the slot if the host controller is dead. */
4008 state = readl(&xhci->op_regs->status);
4009 if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
4010 (xhci->xhc_state & XHCI_STATE_HALTED)) {
4011 spin_unlock_irqrestore(&xhci->lock, flags);
4016 ret = xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT,
4019 spin_unlock_irqrestore(&xhci->lock, flags);
4023 xhci_ring_cmd_db(xhci);
4024 spin_unlock_irqrestore(&xhci->lock, flags);
4026 wait_for_completion(command->completion);
4028 if (command->status != COMP_SUCCESS)
4029 xhci_warn(xhci, "Unsuccessful disable slot %u command, status %d\n",
4030 slot_id, command->status);
4032 xhci_free_command(xhci, command);
4038 * Checks if we have enough host controller resources for the default control
4041 * Must be called with xhci->lock held.
4043 static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci)
4045 if (xhci->num_active_eps + 1 > xhci->limit_active_eps) {
4046 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
4047 "Not enough ep ctxs: "
4048 "%u active, need to add 1, limit is %u.",
4049 xhci->num_active_eps, xhci->limit_active_eps);
4052 xhci->num_active_eps += 1;
4053 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
4054 "Adding 1 ep ctx, %u now active.",
4055 xhci->num_active_eps);
4061 * Returns 0 if the xHC ran out of device slots, the Enable Slot command
4062 * timed out, or allocating memory failed. Returns 1 on success.
4064 int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
4066 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4067 struct xhci_virt_device *vdev;
4068 struct xhci_slot_ctx *slot_ctx;
4069 unsigned long flags;
4071 struct xhci_command *command;
4073 command = xhci_alloc_command(xhci, true, GFP_KERNEL);
4077 spin_lock_irqsave(&xhci->lock, flags);
4078 ret = xhci_queue_slot_control(xhci, command, TRB_ENABLE_SLOT, 0);
4080 spin_unlock_irqrestore(&xhci->lock, flags);
4081 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
4082 xhci_free_command(xhci, command);
4085 xhci_ring_cmd_db(xhci);
4086 spin_unlock_irqrestore(&xhci->lock, flags);
4088 wait_for_completion(command->completion);
4089 slot_id = command->slot_id;
4091 if (!slot_id || command->status != COMP_SUCCESS) {
4092 xhci_err(xhci, "Error while assigning device slot ID\n");
4093 xhci_err(xhci, "Max number of devices this xHCI host supports is %u.\n",
4095 readl(&xhci->cap_regs->hcs_params1)));
4096 xhci_free_command(xhci, command);
4100 xhci_free_command(xhci, command);
4102 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
4103 spin_lock_irqsave(&xhci->lock, flags);
4104 ret = xhci_reserve_host_control_ep_resources(xhci);
4106 spin_unlock_irqrestore(&xhci->lock, flags);
4107 xhci_warn(xhci, "Not enough host resources, "
4108 "active endpoint contexts = %u\n",
4109 xhci->num_active_eps);
4112 spin_unlock_irqrestore(&xhci->lock, flags);
4114 /* Use GFP_NOIO, since this function can be called from
4115 * xhci_discover_or_reset_device(), which may be called as part of
4116 * mass storage driver error handling.
4118 if (!xhci_alloc_virt_device(xhci, slot_id, udev, GFP_NOIO)) {
4119 xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
4122 vdev = xhci->devs[slot_id];
4123 slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx);
4124 trace_xhci_alloc_dev(slot_ctx);
4126 udev->slot_id = slot_id;
4128 xhci_debugfs_create_slot(xhci, slot_id);
4131 * If resetting upon resume, we can't put the controller into runtime
4132 * suspend if there is a device attached.
4134 if (xhci->quirks & XHCI_RESET_ON_RESUME)
4135 pm_runtime_get_noresume(hcd->self.controller);
4137 /* Is this a LS or FS device under a HS hub? */
4138 /* Hub or peripherial? */
4142 xhci_disable_slot(xhci, udev->slot_id);
4143 xhci_free_virt_device(xhci, udev->slot_id);
4149 * Issue an Address Device command and optionally send a corresponding
4150 * SetAddress request to the device.
4152 static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
4153 enum xhci_setup_dev setup)
4155 const char *act = setup == SETUP_CONTEXT_ONLY ? "context" : "address";
4156 unsigned long flags;
4157 struct xhci_virt_device *virt_dev;
4159 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4160 struct xhci_slot_ctx *slot_ctx;
4161 struct xhci_input_control_ctx *ctrl_ctx;
4163 struct xhci_command *command = NULL;
4165 mutex_lock(&xhci->mutex);
4167 if (xhci->xhc_state) { /* dying, removing or halted */
4172 if (!udev->slot_id) {
4173 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4174 "Bad Slot ID %d", udev->slot_id);
4179 virt_dev = xhci->devs[udev->slot_id];
4181 if (WARN_ON(!virt_dev)) {
4183 * In plug/unplug torture test with an NEC controller,
4184 * a zero-dereference was observed once due to virt_dev = 0.
4185 * Print useful debug rather than crash if it is observed again!
4187 xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n",
4192 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
4193 trace_xhci_setup_device_slot(slot_ctx);
4195 if (setup == SETUP_CONTEXT_ONLY) {
4196 if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
4197 SLOT_STATE_DEFAULT) {
4198 xhci_dbg(xhci, "Slot already in default state\n");
4203 command = xhci_alloc_command(xhci, true, GFP_KERNEL);
4209 command->in_ctx = virt_dev->in_ctx;
4211 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
4212 ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
4214 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
4220 * If this is the first Set Address since device plug-in or
4221 * virt_device realloaction after a resume with an xHCI power loss,
4222 * then set up the slot context.
4224 if (!slot_ctx->dev_info)
4225 xhci_setup_addressable_virt_dev(xhci, udev);
4226 /* Otherwise, update the control endpoint ring enqueue pointer. */
4228 xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev);
4229 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG);
4230 ctrl_ctx->drop_flags = 0;
4232 trace_xhci_address_ctx(xhci, virt_dev->in_ctx,
4233 le32_to_cpu(slot_ctx->dev_info) >> 27);
4235 trace_xhci_address_ctrl_ctx(ctrl_ctx);
4236 spin_lock_irqsave(&xhci->lock, flags);
4237 trace_xhci_setup_device(virt_dev);
4238 ret = xhci_queue_address_device(xhci, command, virt_dev->in_ctx->dma,
4239 udev->slot_id, setup);
4241 spin_unlock_irqrestore(&xhci->lock, flags);
4242 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4243 "FIXME: allocate a command ring segment");
4246 xhci_ring_cmd_db(xhci);
4247 spin_unlock_irqrestore(&xhci->lock, flags);
4249 /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */
4250 wait_for_completion(command->completion);
4252 /* FIXME: From section 4.3.4: "Software shall be responsible for timing
4253 * the SetAddress() "recovery interval" required by USB and aborting the
4254 * command on a timeout.
4256 switch (command->status) {
4257 case COMP_COMMAND_ABORTED:
4258 case COMP_COMMAND_RING_STOPPED:
4259 xhci_warn(xhci, "Timeout while waiting for setup device command\n");
4262 case COMP_CONTEXT_STATE_ERROR:
4263 case COMP_SLOT_NOT_ENABLED_ERROR:
4264 xhci_err(xhci, "Setup ERROR: setup %s command for slot %d.\n",
4265 act, udev->slot_id);
4268 case COMP_USB_TRANSACTION_ERROR:
4269 dev_warn(&udev->dev, "Device not responding to setup %s.\n", act);
4271 mutex_unlock(&xhci->mutex);
4272 ret = xhci_disable_slot(xhci, udev->slot_id);
4273 xhci_free_virt_device(xhci, udev->slot_id);
4275 xhci_alloc_dev(hcd, udev);
4276 kfree(command->completion);
4279 case COMP_INCOMPATIBLE_DEVICE_ERROR:
4280 dev_warn(&udev->dev,
4281 "ERROR: Incompatible device for setup %s command\n", act);
4285 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4286 "Successful setup %s command", act);
4290 "ERROR: unexpected setup %s command completion code 0x%x.\n",
4291 act, command->status);
4292 trace_xhci_address_ctx(xhci, virt_dev->out_ctx, 1);
4298 temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
4299 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4300 "Op regs DCBAA ptr = %#016llx", temp_64);
4301 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4302 "Slot ID %d dcbaa entry @%p = %#016llx",
4304 &xhci->dcbaa->dev_context_ptrs[udev->slot_id],
4305 (unsigned long long)
4306 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id]));
4307 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4308 "Output Context DMA address = %#08llx",
4309 (unsigned long long)virt_dev->out_ctx->dma);
4310 trace_xhci_address_ctx(xhci, virt_dev->in_ctx,
4311 le32_to_cpu(slot_ctx->dev_info) >> 27);
4313 * USB core uses address 1 for the roothubs, so we add one to the
4314 * address given back to us by the HC.
4316 trace_xhci_address_ctx(xhci, virt_dev->out_ctx,
4317 le32_to_cpu(slot_ctx->dev_info) >> 27);
4318 /* Zero the input context control for later use */
4319 ctrl_ctx->add_flags = 0;
4320 ctrl_ctx->drop_flags = 0;
4321 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
4322 udev->devaddr = (u8)(le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK);
4324 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4325 "Internal device address = %d",
4326 le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK);
4328 mutex_unlock(&xhci->mutex);
4330 kfree(command->completion);
4336 static int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
4338 return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ADDRESS);
4341 static int xhci_enable_device(struct usb_hcd *hcd, struct usb_device *udev)
4343 return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ONLY);
4347 * Transfer the port index into real index in the HW port status
4348 * registers. Caculate offset between the port's PORTSC register
4349 * and port status base. Divide the number of per port register
4350 * to get the real index. The raw port number bases 1.
4352 int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1)
4354 struct xhci_hub *rhub;
4356 rhub = xhci_get_rhub(hcd);
4357 return rhub->ports[port1 - 1]->hw_portnum + 1;
4361 * Issue an Evaluate Context command to change the Maximum Exit Latency in the
4362 * slot context. If that succeeds, store the new MEL in the xhci_virt_device.
4364 static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci,
4365 struct usb_device *udev, u16 max_exit_latency)
4367 struct xhci_virt_device *virt_dev;
4368 struct xhci_command *command;
4369 struct xhci_input_control_ctx *ctrl_ctx;
4370 struct xhci_slot_ctx *slot_ctx;
4371 unsigned long flags;
4374 command = xhci_alloc_command_with_ctx(xhci, true, GFP_KERNEL);
4378 spin_lock_irqsave(&xhci->lock, flags);
4380 virt_dev = xhci->devs[udev->slot_id];
4383 * virt_dev might not exists yet if xHC resumed from hibernate (S4) and
4384 * xHC was re-initialized. Exit latency will be set later after
4385 * hub_port_finish_reset() is done and xhci->devs[] are re-allocated
4388 if (!virt_dev || max_exit_latency == virt_dev->current_mel) {
4389 spin_unlock_irqrestore(&xhci->lock, flags);
4393 /* Attempt to issue an Evaluate Context command to change the MEL. */
4394 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
4396 spin_unlock_irqrestore(&xhci->lock, flags);
4397 xhci_free_command(xhci, command);
4398 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
4403 xhci_slot_copy(xhci, command->in_ctx, virt_dev->out_ctx);
4404 spin_unlock_irqrestore(&xhci->lock, flags);
4406 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
4407 slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx);
4408 slot_ctx->dev_info2 &= cpu_to_le32(~((u32) MAX_EXIT));
4409 slot_ctx->dev_info2 |= cpu_to_le32(max_exit_latency);
4410 slot_ctx->dev_state = 0;
4412 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
4413 "Set up evaluate context for LPM MEL change.");
4415 /* Issue and wait for the evaluate context command. */
4416 ret = xhci_configure_endpoint(xhci, udev, command,
4420 spin_lock_irqsave(&xhci->lock, flags);
4421 virt_dev->current_mel = max_exit_latency;
4422 spin_unlock_irqrestore(&xhci->lock, flags);
4425 xhci_free_command(xhci, command);
4432 /* BESL to HIRD Encoding array for USB2 LPM */
4433 static int xhci_besl_encoding[16] = {125, 150, 200, 300, 400, 500, 1000, 2000,
4434 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000};
4436 /* Calculate HIRD/BESL for USB2 PORTPMSC*/
4437 static int xhci_calculate_hird_besl(struct xhci_hcd *xhci,
4438 struct usb_device *udev)
4440 int u2del, besl, besl_host;
4441 int besl_device = 0;
4444 u2del = HCS_U2_LATENCY(xhci->hcs_params3);
4445 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
4447 if (field & USB_BESL_SUPPORT) {
4448 for (besl_host = 0; besl_host < 16; besl_host++) {
4449 if (xhci_besl_encoding[besl_host] >= u2del)
4452 /* Use baseline BESL value as default */
4453 if (field & USB_BESL_BASELINE_VALID)
4454 besl_device = USB_GET_BESL_BASELINE(field);
4455 else if (field & USB_BESL_DEEP_VALID)
4456 besl_device = USB_GET_BESL_DEEP(field);
4461 besl_host = (u2del - 51) / 75 + 1;
4464 besl = besl_host + besl_device;
4471 /* Calculate BESLD, L1 timeout and HIRDM for USB2 PORTHLPMC */
4472 static int xhci_calculate_usb2_hw_lpm_params(struct usb_device *udev)
4479 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
4481 /* xHCI l1 is set in steps of 256us, xHCI 1.0 section 5.4.11.2 */
4482 l1 = udev->l1_params.timeout / 256;
4484 /* device has preferred BESLD */
4485 if (field & USB_BESL_DEEP_VALID) {
4486 besld = USB_GET_BESL_DEEP(field);
4490 return PORT_BESLD(besld) | PORT_L1_TIMEOUT(l1) | PORT_HIRDM(hirdm);
4493 static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
4494 struct usb_device *udev, int enable)
4496 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4497 struct xhci_port **ports;
4498 __le32 __iomem *pm_addr, *hlpm_addr;
4499 u32 pm_val, hlpm_val, field;
4500 unsigned int port_num;
4501 unsigned long flags;
4502 int hird, exit_latency;
4505 if (xhci->quirks & XHCI_HW_LPM_DISABLE)
4508 if (hcd->speed >= HCD_USB3 || !xhci->hw_lpm_support ||
4512 if (!udev->parent || udev->parent->parent ||
4513 udev->descriptor.bDeviceClass == USB_CLASS_HUB)
4516 if (udev->usb2_hw_lpm_capable != 1)
4519 spin_lock_irqsave(&xhci->lock, flags);
4521 ports = xhci->usb2_rhub.ports;
4522 port_num = udev->portnum - 1;
4523 pm_addr = ports[port_num]->addr + PORTPMSC;
4524 pm_val = readl(pm_addr);
4525 hlpm_addr = ports[port_num]->addr + PORTHLPMC;
4527 xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n",
4528 enable ? "enable" : "disable", port_num + 1);
4531 /* Host supports BESL timeout instead of HIRD */
4532 if (udev->usb2_hw_lpm_besl_capable) {
4533 /* if device doesn't have a preferred BESL value use a
4534 * default one which works with mixed HIRD and BESL
4535 * systems. See XHCI_DEFAULT_BESL definition in xhci.h
4537 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
4538 if ((field & USB_BESL_SUPPORT) &&
4539 (field & USB_BESL_BASELINE_VALID))
4540 hird = USB_GET_BESL_BASELINE(field);
4542 hird = udev->l1_params.besl;
4544 exit_latency = xhci_besl_encoding[hird];
4545 spin_unlock_irqrestore(&xhci->lock, flags);
4547 ret = xhci_change_max_exit_latency(xhci, udev,
4551 spin_lock_irqsave(&xhci->lock, flags);
4553 hlpm_val = xhci_calculate_usb2_hw_lpm_params(udev);
4554 writel(hlpm_val, hlpm_addr);
4558 hird = xhci_calculate_hird_besl(xhci, udev);
4561 pm_val &= ~PORT_HIRD_MASK;
4562 pm_val |= PORT_HIRD(hird) | PORT_RWE | PORT_L1DS(udev->slot_id);
4563 writel(pm_val, pm_addr);
4564 pm_val = readl(pm_addr);
4566 writel(pm_val, pm_addr);
4570 pm_val &= ~(PORT_HLE | PORT_RWE | PORT_HIRD_MASK | PORT_L1DS_MASK);
4571 writel(pm_val, pm_addr);
4574 if (udev->usb2_hw_lpm_besl_capable) {
4575 spin_unlock_irqrestore(&xhci->lock, flags);
4576 xhci_change_max_exit_latency(xhci, udev, 0);
4577 readl_poll_timeout(ports[port_num]->addr, pm_val,
4578 (pm_val & PORT_PLS_MASK) == XDEV_U0,
4584 spin_unlock_irqrestore(&xhci->lock, flags);
4588 /* check if a usb2 port supports a given extened capability protocol
4589 * only USB2 ports extended protocol capability values are cached.
4590 * Return 1 if capability is supported
4592 static int xhci_check_usb2_port_capability(struct xhci_hcd *xhci, int port,
4593 unsigned capability)
4595 u32 port_offset, port_count;
4598 for (i = 0; i < xhci->num_ext_caps; i++) {
4599 if (xhci->ext_caps[i] & capability) {
4600 /* port offsets starts at 1 */
4601 port_offset = XHCI_EXT_PORT_OFF(xhci->ext_caps[i]) - 1;
4602 port_count = XHCI_EXT_PORT_COUNT(xhci->ext_caps[i]);
4603 if (port >= port_offset &&
4604 port < port_offset + port_count)
4611 static int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
4613 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4614 int portnum = udev->portnum - 1;
4616 if (hcd->speed >= HCD_USB3 || !udev->lpm_capable)
4619 /* we only support lpm for non-hub device connected to root hub yet */
4620 if (!udev->parent || udev->parent->parent ||
4621 udev->descriptor.bDeviceClass == USB_CLASS_HUB)
4624 if (xhci->hw_lpm_support == 1 &&
4625 xhci_check_usb2_port_capability(
4626 xhci, portnum, XHCI_HLC)) {
4627 udev->usb2_hw_lpm_capable = 1;
4628 udev->l1_params.timeout = XHCI_L1_TIMEOUT;
4629 udev->l1_params.besl = XHCI_DEFAULT_BESL;
4630 if (xhci_check_usb2_port_capability(xhci, portnum,
4632 udev->usb2_hw_lpm_besl_capable = 1;
4638 /*---------------------- USB 3.0 Link PM functions ------------------------*/
4640 /* Service interval in nanoseconds = 2^(bInterval - 1) * 125us * 1000ns / 1us */
4641 static unsigned long long xhci_service_interval_to_ns(
4642 struct usb_endpoint_descriptor *desc)
4644 return (1ULL << (desc->bInterval - 1)) * 125 * 1000;
4647 static u16 xhci_get_timeout_no_hub_lpm(struct usb_device *udev,
4648 enum usb3_link_state state)
4650 unsigned long long sel;
4651 unsigned long long pel;
4652 unsigned int max_sel_pel;
4657 /* Convert SEL and PEL stored in nanoseconds to microseconds */
4658 sel = DIV_ROUND_UP(udev->u1_params.sel, 1000);
4659 pel = DIV_ROUND_UP(udev->u1_params.pel, 1000);
4660 max_sel_pel = USB3_LPM_MAX_U1_SEL_PEL;
4664 sel = DIV_ROUND_UP(udev->u2_params.sel, 1000);
4665 pel = DIV_ROUND_UP(udev->u2_params.pel, 1000);
4666 max_sel_pel = USB3_LPM_MAX_U2_SEL_PEL;
4670 dev_warn(&udev->dev, "%s: Can't get timeout for non-U1 or U2 state.\n",
4672 return USB3_LPM_DISABLED;
4675 if (sel <= max_sel_pel && pel <= max_sel_pel)
4676 return USB3_LPM_DEVICE_INITIATED;
4678 if (sel > max_sel_pel)
4679 dev_dbg(&udev->dev, "Device-initiated %s disabled "
4680 "due to long SEL %llu ms\n",
4683 dev_dbg(&udev->dev, "Device-initiated %s disabled "
4684 "due to long PEL %llu ms\n",
4686 return USB3_LPM_DISABLED;
4689 /* The U1 timeout should be the maximum of the following values:
4690 * - For control endpoints, U1 system exit latency (SEL) * 3
4691 * - For bulk endpoints, U1 SEL * 5
4692 * - For interrupt endpoints:
4693 * - Notification EPs, U1 SEL * 3
4694 * - Periodic EPs, max(105% of bInterval, U1 SEL * 2)
4695 * - For isochronous endpoints, max(105% of bInterval, U1 SEL * 2)
4697 static unsigned long long xhci_calculate_intel_u1_timeout(
4698 struct usb_device *udev,
4699 struct usb_endpoint_descriptor *desc)
4701 unsigned long long timeout_ns;
4705 ep_type = usb_endpoint_type(desc);
4707 case USB_ENDPOINT_XFER_CONTROL:
4708 timeout_ns = udev->u1_params.sel * 3;
4710 case USB_ENDPOINT_XFER_BULK:
4711 timeout_ns = udev->u1_params.sel * 5;
4713 case USB_ENDPOINT_XFER_INT:
4714 intr_type = usb_endpoint_interrupt_type(desc);
4715 if (intr_type == USB_ENDPOINT_INTR_NOTIFICATION) {
4716 timeout_ns = udev->u1_params.sel * 3;
4719 /* Otherwise the calculation is the same as isoc eps */
4721 case USB_ENDPOINT_XFER_ISOC:
4722 timeout_ns = xhci_service_interval_to_ns(desc);
4723 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns * 105, 100);
4724 if (timeout_ns < udev->u1_params.sel * 2)
4725 timeout_ns = udev->u1_params.sel * 2;
4734 /* Returns the hub-encoded U1 timeout value. */
4735 static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci,
4736 struct usb_device *udev,
4737 struct usb_endpoint_descriptor *desc)
4739 unsigned long long timeout_ns;
4741 /* Prevent U1 if service interval is shorter than U1 exit latency */
4742 if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
4743 if (xhci_service_interval_to_ns(desc) <= udev->u1_params.mel) {
4744 dev_dbg(&udev->dev, "Disable U1, ESIT shorter than exit latency\n");
4745 return USB3_LPM_DISABLED;
4749 if (xhci->quirks & XHCI_INTEL_HOST)
4750 timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc);
4752 timeout_ns = udev->u1_params.sel;
4754 /* The U1 timeout is encoded in 1us intervals.
4755 * Don't return a timeout of zero, because that's USB3_LPM_DISABLED.
4757 if (timeout_ns == USB3_LPM_DISABLED)
4760 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 1000);
4762 /* If the necessary timeout value is bigger than what we can set in the
4763 * USB 3.0 hub, we have to disable hub-initiated U1.
4765 if (timeout_ns <= USB3_LPM_U1_MAX_TIMEOUT)
4767 dev_dbg(&udev->dev, "Hub-initiated U1 disabled "
4768 "due to long timeout %llu ms\n", timeout_ns);
4769 return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U1);
4772 /* The U2 timeout should be the maximum of:
4773 * - 10 ms (to avoid the bandwidth impact on the scheduler)
4774 * - largest bInterval of any active periodic endpoint (to avoid going
4775 * into lower power link states between intervals).
4776 * - the U2 Exit Latency of the device
4778 static unsigned long long xhci_calculate_intel_u2_timeout(
4779 struct usb_device *udev,
4780 struct usb_endpoint_descriptor *desc)
4782 unsigned long long timeout_ns;
4783 unsigned long long u2_del_ns;
4785 timeout_ns = 10 * 1000 * 1000;
4787 if ((usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) &&
4788 (xhci_service_interval_to_ns(desc) > timeout_ns))
4789 timeout_ns = xhci_service_interval_to_ns(desc);
4791 u2_del_ns = le16_to_cpu(udev->bos->ss_cap->bU2DevExitLat) * 1000ULL;
4792 if (u2_del_ns > timeout_ns)
4793 timeout_ns = u2_del_ns;
4798 /* Returns the hub-encoded U2 timeout value. */
4799 static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci,
4800 struct usb_device *udev,
4801 struct usb_endpoint_descriptor *desc)
4803 unsigned long long timeout_ns;
4805 /* Prevent U2 if service interval is shorter than U2 exit latency */
4806 if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
4807 if (xhci_service_interval_to_ns(desc) <= udev->u2_params.mel) {
4808 dev_dbg(&udev->dev, "Disable U2, ESIT shorter than exit latency\n");
4809 return USB3_LPM_DISABLED;
4813 if (xhci->quirks & XHCI_INTEL_HOST)
4814 timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc);
4816 timeout_ns = udev->u2_params.sel;
4818 /* The U2 timeout is encoded in 256us intervals */
4819 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 256 * 1000);
4820 /* If the necessary timeout value is bigger than what we can set in the
4821 * USB 3.0 hub, we have to disable hub-initiated U2.
4823 if (timeout_ns <= USB3_LPM_U2_MAX_TIMEOUT)
4825 dev_dbg(&udev->dev, "Hub-initiated U2 disabled "
4826 "due to long timeout %llu ms\n", timeout_ns);
4827 return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U2);
4830 static u16 xhci_call_host_update_timeout_for_endpoint(struct xhci_hcd *xhci,
4831 struct usb_device *udev,
4832 struct usb_endpoint_descriptor *desc,
4833 enum usb3_link_state state,
4836 if (state == USB3_LPM_U1)
4837 return xhci_calculate_u1_timeout(xhci, udev, desc);
4838 else if (state == USB3_LPM_U2)
4839 return xhci_calculate_u2_timeout(xhci, udev, desc);
4841 return USB3_LPM_DISABLED;
4844 static int xhci_update_timeout_for_endpoint(struct xhci_hcd *xhci,
4845 struct usb_device *udev,
4846 struct usb_endpoint_descriptor *desc,
4847 enum usb3_link_state state,
4852 alt_timeout = xhci_call_host_update_timeout_for_endpoint(xhci, udev,
4853 desc, state, timeout);
4855 /* If we found we can't enable hub-initiated LPM, and
4856 * the U1 or U2 exit latency was too high to allow
4857 * device-initiated LPM as well, then we will disable LPM
4858 * for this device, so stop searching any further.
4860 if (alt_timeout == USB3_LPM_DISABLED) {
4861 *timeout = alt_timeout;
4864 if (alt_timeout > *timeout)
4865 *timeout = alt_timeout;
4869 static int xhci_update_timeout_for_interface(struct xhci_hcd *xhci,
4870 struct usb_device *udev,
4871 struct usb_host_interface *alt,
4872 enum usb3_link_state state,
4877 for (j = 0; j < alt->desc.bNumEndpoints; j++) {
4878 if (xhci_update_timeout_for_endpoint(xhci, udev,
4879 &alt->endpoint[j].desc, state, timeout))
4885 static int xhci_check_intel_tier_policy(struct usb_device *udev,
4886 enum usb3_link_state state)
4888 struct usb_device *parent;
4889 unsigned int num_hubs;
4891 /* Don't enable U1 if the device is on a 2nd tier hub or lower. */
4892 for (parent = udev->parent, num_hubs = 0; parent->parent;
4893 parent = parent->parent)
4899 dev_dbg(&udev->dev, "Disabling U1/U2 link state for device"
4900 " below second-tier hub.\n");
4901 dev_dbg(&udev->dev, "Plug device into first-tier hub "
4902 "to decrease power consumption.\n");
4906 static int xhci_check_tier_policy(struct xhci_hcd *xhci,
4907 struct usb_device *udev,
4908 enum usb3_link_state state)
4910 if (xhci->quirks & XHCI_INTEL_HOST)
4911 return xhci_check_intel_tier_policy(udev, state);
4916 /* Returns the U1 or U2 timeout that should be enabled.
4917 * If the tier check or timeout setting functions return with a non-zero exit
4918 * code, that means the timeout value has been finalized and we shouldn't look
4919 * at any more endpoints.
4921 static u16 xhci_calculate_lpm_timeout(struct usb_hcd *hcd,
4922 struct usb_device *udev, enum usb3_link_state state)
4924 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4925 struct usb_host_config *config;
4928 u16 timeout = USB3_LPM_DISABLED;
4930 if (state == USB3_LPM_U1)
4932 else if (state == USB3_LPM_U2)
4935 dev_warn(&udev->dev, "Can't enable unknown link state %i\n",
4940 /* Gather some information about the currently installed configuration
4941 * and alternate interface settings.
4943 if (xhci_update_timeout_for_endpoint(xhci, udev, &udev->ep0.desc,
4947 config = udev->actconfig;
4951 for (i = 0; i < config->desc.bNumInterfaces; i++) {
4952 struct usb_driver *driver;
4953 struct usb_interface *intf = config->interface[i];
4958 /* Check if any currently bound drivers want hub-initiated LPM
4961 if (intf->dev.driver) {
4962 driver = to_usb_driver(intf->dev.driver);
4963 if (driver && driver->disable_hub_initiated_lpm) {
4964 dev_dbg(&udev->dev, "Hub-initiated %s disabled at request of driver %s\n",
4965 state_name, driver->name);
4966 timeout = xhci_get_timeout_no_hub_lpm(udev,
4968 if (timeout == USB3_LPM_DISABLED)
4973 /* Not sure how this could happen... */
4974 if (!intf->cur_altsetting)
4977 if (xhci_update_timeout_for_interface(xhci, udev,
4978 intf->cur_altsetting,
4985 static int calculate_max_exit_latency(struct usb_device *udev,
4986 enum usb3_link_state state_changed,
4987 u16 hub_encoded_timeout)
4989 unsigned long long u1_mel_us = 0;
4990 unsigned long long u2_mel_us = 0;
4991 unsigned long long mel_us = 0;
4997 disabling_u1 = (state_changed == USB3_LPM_U1 &&
4998 hub_encoded_timeout == USB3_LPM_DISABLED);
4999 disabling_u2 = (state_changed == USB3_LPM_U2 &&
5000 hub_encoded_timeout == USB3_LPM_DISABLED);
5002 enabling_u1 = (state_changed == USB3_LPM_U1 &&
5003 hub_encoded_timeout != USB3_LPM_DISABLED);
5004 enabling_u2 = (state_changed == USB3_LPM_U2 &&
5005 hub_encoded_timeout != USB3_LPM_DISABLED);
5007 /* If U1 was already enabled and we're not disabling it,
5008 * or we're going to enable U1, account for the U1 max exit latency.
5010 if ((udev->u1_params.timeout != USB3_LPM_DISABLED && !disabling_u1) ||
5012 u1_mel_us = DIV_ROUND_UP(udev->u1_params.mel, 1000);
5013 if ((udev->u2_params.timeout != USB3_LPM_DISABLED && !disabling_u2) ||
5015 u2_mel_us = DIV_ROUND_UP(udev->u2_params.mel, 1000);
5017 mel_us = max(u1_mel_us, u2_mel_us);
5019 /* xHCI host controller max exit latency field is only 16 bits wide. */
5020 if (mel_us > MAX_EXIT) {
5021 dev_warn(&udev->dev, "Link PM max exit latency of %lluus "
5022 "is too big.\n", mel_us);
5028 /* Returns the USB3 hub-encoded value for the U1/U2 timeout. */
5029 static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
5030 struct usb_device *udev, enum usb3_link_state state)
5032 struct xhci_hcd *xhci;
5033 u16 hub_encoded_timeout;
5037 xhci = hcd_to_xhci(hcd);
5038 /* The LPM timeout values are pretty host-controller specific, so don't
5039 * enable hub-initiated timeouts unless the vendor has provided
5040 * information about their timeout algorithm.
5042 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
5043 !xhci->devs[udev->slot_id])
5044 return USB3_LPM_DISABLED;
5046 if (xhci_check_tier_policy(xhci, udev, state) < 0)
5047 return USB3_LPM_DISABLED;
5049 hub_encoded_timeout = xhci_calculate_lpm_timeout(hcd, udev, state);
5050 mel = calculate_max_exit_latency(udev, state, hub_encoded_timeout);
5052 /* Max Exit Latency is too big, disable LPM. */
5053 hub_encoded_timeout = USB3_LPM_DISABLED;
5057 ret = xhci_change_max_exit_latency(xhci, udev, mel);
5060 return hub_encoded_timeout;
5063 static int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
5064 struct usb_device *udev, enum usb3_link_state state)
5066 struct xhci_hcd *xhci;
5069 xhci = hcd_to_xhci(hcd);
5070 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
5071 !xhci->devs[udev->slot_id])
5074 mel = calculate_max_exit_latency(udev, state, USB3_LPM_DISABLED);
5075 return xhci_change_max_exit_latency(xhci, udev, mel);
5077 #else /* CONFIG_PM */
5079 static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
5080 struct usb_device *udev, int enable)
5085 static int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
5090 static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
5091 struct usb_device *udev, enum usb3_link_state state)
5093 return USB3_LPM_DISABLED;
5096 static int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
5097 struct usb_device *udev, enum usb3_link_state state)
5101 #endif /* CONFIG_PM */
5103 /*-------------------------------------------------------------------------*/
5105 /* Once a hub descriptor is fetched for a device, we need to update the xHC's
5106 * internal data structures for the device.
5108 static int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
5109 struct usb_tt *tt, gfp_t mem_flags)
5111 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
5112 struct xhci_virt_device *vdev;
5113 struct xhci_command *config_cmd;
5114 struct xhci_input_control_ctx *ctrl_ctx;
5115 struct xhci_slot_ctx *slot_ctx;
5116 unsigned long flags;
5117 unsigned think_time;
5120 /* Ignore root hubs */
5124 vdev = xhci->devs[hdev->slot_id];
5126 xhci_warn(xhci, "Cannot update hub desc for unknown device.\n");
5130 config_cmd = xhci_alloc_command_with_ctx(xhci, true, mem_flags);
5134 ctrl_ctx = xhci_get_input_control_ctx(config_cmd->in_ctx);
5136 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
5138 xhci_free_command(xhci, config_cmd);
5142 spin_lock_irqsave(&xhci->lock, flags);
5143 if (hdev->speed == USB_SPEED_HIGH &&
5144 xhci_alloc_tt_info(xhci, vdev, hdev, tt, GFP_ATOMIC)) {
5145 xhci_dbg(xhci, "Could not allocate xHCI TT structure.\n");
5146 xhci_free_command(xhci, config_cmd);
5147 spin_unlock_irqrestore(&xhci->lock, flags);
5151 xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx);
5152 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
5153 slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx);
5154 slot_ctx->dev_info |= cpu_to_le32(DEV_HUB);
5156 * refer to section 6.2.2: MTT should be 0 for full speed hub,
5157 * but it may be already set to 1 when setup an xHCI virtual
5158 * device, so clear it anyway.
5161 slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
5162 else if (hdev->speed == USB_SPEED_FULL)
5163 slot_ctx->dev_info &= cpu_to_le32(~DEV_MTT);
5165 if (xhci->hci_version > 0x95) {
5166 xhci_dbg(xhci, "xHCI version %x needs hub "
5167 "TT think time and number of ports\n",
5168 (unsigned int) xhci->hci_version);
5169 slot_ctx->dev_info2 |= cpu_to_le32(XHCI_MAX_PORTS(hdev->maxchild));
5170 /* Set TT think time - convert from ns to FS bit times.
5171 * 0 = 8 FS bit times, 1 = 16 FS bit times,
5172 * 2 = 24 FS bit times, 3 = 32 FS bit times.
5174 * xHCI 1.0: this field shall be 0 if the device is not a
5177 think_time = tt->think_time;
5178 if (think_time != 0)
5179 think_time = (think_time / 666) - 1;
5180 if (xhci->hci_version < 0x100 || hdev->speed == USB_SPEED_HIGH)
5181 slot_ctx->tt_info |=
5182 cpu_to_le32(TT_THINK_TIME(think_time));
5184 xhci_dbg(xhci, "xHCI version %x doesn't need hub "
5185 "TT think time or number of ports\n",
5186 (unsigned int) xhci->hci_version);
5188 slot_ctx->dev_state = 0;
5189 spin_unlock_irqrestore(&xhci->lock, flags);
5191 xhci_dbg(xhci, "Set up %s for hub device.\n",
5192 (xhci->hci_version > 0x95) ?
5193 "configure endpoint" : "evaluate context");
5195 /* Issue and wait for the configure endpoint or
5196 * evaluate context command.
5198 if (xhci->hci_version > 0x95)
5199 ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
5202 ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
5205 xhci_free_command(xhci, config_cmd);
5209 static int xhci_get_frame(struct usb_hcd *hcd)
5211 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
5212 /* EHCI mods by the periodic size. Why? */
5213 return readl(&xhci->run_regs->microframe_index) >> 3;
5216 static void xhci_hcd_init_usb2_data(struct xhci_hcd *xhci, struct usb_hcd *hcd)
5218 xhci->usb2_rhub.hcd = hcd;
5219 hcd->speed = HCD_USB2;
5220 hcd->self.root_hub->speed = USB_SPEED_HIGH;
5222 * USB 2.0 roothub under xHCI has an integrated TT,
5223 * (rate matching hub) as opposed to having an OHCI/UHCI
5224 * companion controller.
5229 static void xhci_hcd_init_usb3_data(struct xhci_hcd *xhci, struct usb_hcd *hcd)
5231 unsigned int minor_rev;
5234 * Early xHCI 1.1 spec did not mention USB 3.1 capable hosts
5235 * should return 0x31 for sbrn, or that the minor revision
5236 * is a two digit BCD containig minor and sub-minor numbers.
5237 * This was later clarified in xHCI 1.2.
5239 * Some USB 3.1 capable hosts therefore have sbrn 0x30, and
5240 * minor revision set to 0x1 instead of 0x10.
5242 if (xhci->usb3_rhub.min_rev == 0x1)
5245 minor_rev = xhci->usb3_rhub.min_rev / 0x10;
5247 switch (minor_rev) {
5249 hcd->speed = HCD_USB32;
5250 hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS;
5251 hcd->self.root_hub->rx_lanes = 2;
5252 hcd->self.root_hub->tx_lanes = 2;
5253 hcd->self.root_hub->ssp_rate = USB_SSP_GEN_2x2;
5256 hcd->speed = HCD_USB31;
5257 hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS;
5258 hcd->self.root_hub->ssp_rate = USB_SSP_GEN_2x1;
5261 xhci_info(xhci, "Host supports USB 3.%x %sSuperSpeed\n",
5262 minor_rev, minor_rev ? "Enhanced " : "");
5264 xhci->usb3_rhub.hcd = hcd;
5267 int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
5269 struct xhci_hcd *xhci;
5271 * TODO: Check with DWC3 clients for sysdev according to
5274 struct device *dev = hcd->self.sysdev;
5277 /* Accept arbitrarily long scatter-gather lists */
5278 hcd->self.sg_tablesize = ~0;
5280 /* support to build packet from discontinuous buffers */
5281 hcd->self.no_sg_constraint = 1;
5283 /* XHCI controllers don't stop the ep queue on short packets :| */
5284 hcd->self.no_stop_on_short = 1;
5286 xhci = hcd_to_xhci(hcd);
5288 if (!usb_hcd_is_primary_hcd(hcd)) {
5289 xhci_hcd_init_usb3_data(xhci, hcd);
5293 mutex_init(&xhci->mutex);
5294 xhci->main_hcd = hcd;
5295 xhci->cap_regs = hcd->regs;
5296 xhci->op_regs = hcd->regs +
5297 HC_LENGTH(readl(&xhci->cap_regs->hc_capbase));
5298 xhci->run_regs = hcd->regs +
5299 (readl(&xhci->cap_regs->run_regs_off) & RTSOFF_MASK);
5300 /* Cache read-only capability registers */
5301 xhci->hcs_params1 = readl(&xhci->cap_regs->hcs_params1);
5302 xhci->hcs_params2 = readl(&xhci->cap_regs->hcs_params2);
5303 xhci->hcs_params3 = readl(&xhci->cap_regs->hcs_params3);
5304 xhci->hci_version = HC_VERSION(readl(&xhci->cap_regs->hc_capbase));
5305 xhci->hcc_params = readl(&xhci->cap_regs->hcc_params);
5306 if (xhci->hci_version > 0x100)
5307 xhci->hcc_params2 = readl(&xhci->cap_regs->hcc_params2);
5309 xhci->quirks |= quirks;
5311 get_quirks(dev, xhci);
5313 /* In xhci controllers which follow xhci 1.0 spec gives a spurious
5314 * success event after a short transfer. This quirk will ignore such
5317 if (xhci->hci_version > 0x96)
5318 xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
5320 /* Make sure the HC is halted. */
5321 retval = xhci_halt(xhci);
5325 xhci_zero_64b_regs(xhci);
5327 xhci_dbg(xhci, "Resetting HCD\n");
5328 /* Reset the internal HC memory state and registers. */
5329 retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC);
5332 xhci_dbg(xhci, "Reset complete\n");
5335 * On some xHCI controllers (e.g. R-Car SoCs), the AC64 bit (bit 0)
5336 * of HCCPARAMS1 is set to 1. However, the xHCs don't support 64-bit
5337 * address memory pointers actually. So, this driver clears the AC64
5338 * bit of xhci->hcc_params to call dma_set_coherent_mask(dev,
5339 * DMA_BIT_MASK(32)) in this xhci_gen_setup().
5341 if (xhci->quirks & XHCI_NO_64BIT_SUPPORT)
5342 xhci->hcc_params &= ~BIT(0);
5344 /* Set dma_mask and coherent_dma_mask to 64-bits,
5345 * if xHC supports 64-bit addressing */
5346 if (HCC_64BIT_ADDR(xhci->hcc_params) &&
5347 !dma_set_mask(dev, DMA_BIT_MASK(64))) {
5348 xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n");
5349 dma_set_coherent_mask(dev, DMA_BIT_MASK(64));
5352 * This is to avoid error in cases where a 32-bit USB
5353 * controller is used on a 64-bit capable system.
5355 retval = dma_set_mask(dev, DMA_BIT_MASK(32));
5358 xhci_dbg(xhci, "Enabling 32-bit DMA addresses.\n");
5359 dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
5362 xhci_dbg(xhci, "Calling HCD init\n");
5363 /* Initialize HCD and host controller data structures. */
5364 retval = xhci_init(hcd);
5367 xhci_dbg(xhci, "Called HCD init\n");
5369 if (xhci_hcd_is_usb3(hcd))
5370 xhci_hcd_init_usb3_data(xhci, hcd);
5372 xhci_hcd_init_usb2_data(xhci, hcd);
5374 xhci_info(xhci, "hcc params 0x%08x hci version 0x%x quirks 0x%016llx\n",
5375 xhci->hcc_params, xhci->hci_version, xhci->quirks);
5379 EXPORT_SYMBOL_GPL(xhci_gen_setup);
5381 static void xhci_clear_tt_buffer_complete(struct usb_hcd *hcd,
5382 struct usb_host_endpoint *ep)
5384 struct xhci_hcd *xhci;
5385 struct usb_device *udev;
5386 unsigned int slot_id;
5387 unsigned int ep_index;
5388 unsigned long flags;
5390 xhci = hcd_to_xhci(hcd);
5392 spin_lock_irqsave(&xhci->lock, flags);
5393 udev = (struct usb_device *)ep->hcpriv;
5394 slot_id = udev->slot_id;
5395 ep_index = xhci_get_endpoint_index(&ep->desc);
5397 xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_CLEARING_TT;
5398 xhci_ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
5399 spin_unlock_irqrestore(&xhci->lock, flags);
5402 static const struct hc_driver xhci_hc_driver = {
5403 .description = "xhci-hcd",
5404 .product_desc = "xHCI Host Controller",
5405 .hcd_priv_size = sizeof(struct xhci_hcd),
5408 * generic hardware linkage
5411 .flags = HCD_MEMORY | HCD_DMA | HCD_USB3 | HCD_SHARED |
5415 * basic lifecycle operations
5417 .reset = NULL, /* set in xhci_init_driver() */
5420 .shutdown = xhci_shutdown,
5423 * managing i/o requests and associated device resources
5425 .map_urb_for_dma = xhci_map_urb_for_dma,
5426 .unmap_urb_for_dma = xhci_unmap_urb_for_dma,
5427 .urb_enqueue = xhci_urb_enqueue,
5428 .urb_dequeue = xhci_urb_dequeue,
5429 .alloc_dev = xhci_alloc_dev,
5430 .free_dev = xhci_free_dev,
5431 .alloc_streams = xhci_alloc_streams,
5432 .free_streams = xhci_free_streams,
5433 .add_endpoint = xhci_add_endpoint,
5434 .drop_endpoint = xhci_drop_endpoint,
5435 .endpoint_disable = xhci_endpoint_disable,
5436 .endpoint_reset = xhci_endpoint_reset,
5437 .check_bandwidth = xhci_check_bandwidth,
5438 .reset_bandwidth = xhci_reset_bandwidth,
5439 .address_device = xhci_address_device,
5440 .enable_device = xhci_enable_device,
5441 .update_hub_device = xhci_update_hub_device,
5442 .reset_device = xhci_discover_or_reset_device,
5445 * scheduling support
5447 .get_frame_number = xhci_get_frame,
5452 .hub_control = xhci_hub_control,
5453 .hub_status_data = xhci_hub_status_data,
5454 .bus_suspend = xhci_bus_suspend,
5455 .bus_resume = xhci_bus_resume,
5456 .get_resuming_ports = xhci_get_resuming_ports,
5459 * call back when device connected and addressed
5461 .update_device = xhci_update_device,
5462 .set_usb2_hw_lpm = xhci_set_usb2_hardware_lpm,
5463 .enable_usb3_lpm_timeout = xhci_enable_usb3_lpm_timeout,
5464 .disable_usb3_lpm_timeout = xhci_disable_usb3_lpm_timeout,
5465 .find_raw_port_number = xhci_find_raw_port_number,
5466 .clear_tt_buffer_complete = xhci_clear_tt_buffer_complete,
5469 void xhci_init_driver(struct hc_driver *drv,
5470 const struct xhci_driver_overrides *over)
5474 /* Copy the generic table to drv then apply the overrides */
5475 *drv = xhci_hc_driver;
5478 drv->hcd_priv_size += over->extra_priv_size;
5480 drv->reset = over->reset;
5482 drv->start = over->start;
5483 if (over->add_endpoint)
5484 drv->add_endpoint = over->add_endpoint;
5485 if (over->drop_endpoint)
5486 drv->drop_endpoint = over->drop_endpoint;
5487 if (over->check_bandwidth)
5488 drv->check_bandwidth = over->check_bandwidth;
5489 if (over->reset_bandwidth)
5490 drv->reset_bandwidth = over->reset_bandwidth;
5493 EXPORT_SYMBOL_GPL(xhci_init_driver);
5495 MODULE_DESCRIPTION(DRIVER_DESC);
5496 MODULE_AUTHOR(DRIVER_AUTHOR);
5497 MODULE_LICENSE("GPL");
5499 static int __init xhci_hcd_init(void)
5502 * Check the compiler generated sizes of structures that must be laid
5503 * out in specific ways for hardware access.
5505 BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
5506 BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8);
5507 BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8);
5508 /* xhci_device_control has eight fields, and also
5509 * embeds one xhci_slot_ctx and 31 xhci_ep_ctx
5511 BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8);
5512 BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8);
5513 BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8);
5514 BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 8*32/8);
5515 BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8);
5516 /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */
5517 BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
5522 xhci_debugfs_create_root();
5529 * If an init function is provided, an exit function must also be provided
5530 * to allow module unload.
5532 static void __exit xhci_hcd_fini(void)
5534 xhci_debugfs_remove_root();
5538 module_init(xhci_hcd_init);
5539 module_exit(xhci_hcd_fini);