1 // SPDX-License-Identifier: GPL-2.0
3 * xHCI host controller driver
5 * Copyright (C) 2008 Intel Corp.
8 * Some code borrowed from the Linux EHCI driver.
11 #include <linux/pci.h>
12 #include <linux/iommu.h>
13 #include <linux/iopoll.h>
14 #include <linux/irq.h>
15 #include <linux/log2.h>
16 #include <linux/module.h>
17 #include <linux/moduleparam.h>
18 #include <linux/slab.h>
19 #include <linux/dmi.h>
20 #include <linux/dma-mapping.h>
23 #include "xhci-trace.h"
24 #include "xhci-debugfs.h"
25 #include "xhci-dbgcap.h"
27 #define DRIVER_AUTHOR "Sarah Sharp"
28 #define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
30 #define PORT_WAKE_BITS (PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E)
32 /* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */
33 static int link_quirk;
34 module_param(link_quirk, int, S_IRUGO | S_IWUSR);
35 MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB");
37 static unsigned long long quirks;
38 module_param(quirks, ullong, S_IRUGO);
39 MODULE_PARM_DESC(quirks, "Bit flags for quirks to be enabled as default");
41 static bool td_on_ring(struct xhci_td *td, struct xhci_ring *ring)
43 struct xhci_segment *seg = ring->first_seg;
45 if (!td || !td->start_seg)
48 if (seg == td->start_seg)
51 } while (seg && seg != ring->first_seg);
57 * xhci_handshake - spin reading hc until handshake completes or fails
58 * @ptr: address of hc register to be read
59 * @mask: bits to look at in result of read
60 * @done: value of those bits when handshake succeeds
61 * @usec: timeout in microseconds
63 * Returns negative errno, or zero on success
65 * Success happens when the "mask" bits have the specified value (hardware
66 * handshake done). There are two failure modes: "usec" have passed (major
67 * hardware flakeout), or the register reads as all-ones (hardware removed).
69 int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, u64 timeout_us)
74 ret = readl_poll_timeout_atomic(ptr, result,
75 (result & mask) == done ||
78 if (result == U32_MAX) /* card removed */
85 * xhci_handshake_check_state - same as xhci_handshake but takes an additional
86 * exit_state parameter, and bails out with an error immediately when xhc_state
87 * has exit_state flag set.
89 int xhci_handshake_check_state(struct xhci_hcd *xhci, void __iomem *ptr,
90 u32 mask, u32 done, int usec, unsigned int exit_state)
95 ret = readl_poll_timeout_atomic(ptr, result,
96 (result & mask) == done ||
98 xhci->xhc_state & exit_state,
101 if (result == U32_MAX || xhci->xhc_state & exit_state)
108 * Disable interrupts and begin the xHCI halting process.
110 void xhci_quiesce(struct xhci_hcd *xhci)
117 halted = readl(&xhci->op_regs->status) & STS_HALT;
121 cmd = readl(&xhci->op_regs->command);
123 writel(cmd, &xhci->op_regs->command);
127 * Force HC into halt state.
129 * Disable any IRQs and clear the run/stop bit.
130 * HC will complete any current and actively pipelined transactions, and
131 * should halt within 16 ms of the run/stop bit being cleared.
132 * Read HC Halted bit in the status register to see when the HC is finished.
134 int xhci_halt(struct xhci_hcd *xhci)
138 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Halt the HC");
141 ret = xhci_handshake(&xhci->op_regs->status,
142 STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
144 xhci_warn(xhci, "Host halt failed, %d\n", ret);
148 xhci->xhc_state |= XHCI_STATE_HALTED;
149 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
155 * Set the run bit and wait for the host to be running.
157 int xhci_start(struct xhci_hcd *xhci)
162 temp = readl(&xhci->op_regs->command);
164 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Turn on HC, cmd = 0x%x.",
166 writel(temp, &xhci->op_regs->command);
169 * Wait for the HCHalted Status bit to be 0 to indicate the host is
172 ret = xhci_handshake(&xhci->op_regs->status,
173 STS_HALT, 0, XHCI_MAX_HALT_USEC);
174 if (ret == -ETIMEDOUT)
175 xhci_err(xhci, "Host took too long to start, "
176 "waited %u microseconds.\n",
179 /* clear state flags. Including dying, halted or removing */
181 xhci->run_graceperiod = jiffies + msecs_to_jiffies(500);
190 * This resets pipelines, timers, counters, state machines, etc.
191 * Transactions will be terminated immediately, and operational registers
192 * will be set to their defaults.
194 int xhci_reset(struct xhci_hcd *xhci, u64 timeout_us)
200 state = readl(&xhci->op_regs->status);
202 if (state == ~(u32)0) {
203 xhci_warn(xhci, "Host not accessible, reset failed.\n");
207 if ((state & STS_HALT) == 0) {
208 xhci_warn(xhci, "Host controller not halted, aborting reset.\n");
212 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Reset the HC");
213 command = readl(&xhci->op_regs->command);
214 command |= CMD_RESET;
215 writel(command, &xhci->op_regs->command);
217 /* Existing Intel xHCI controllers require a delay of 1 mS,
218 * after setting the CMD_RESET bit, and before accessing any
219 * HC registers. This allows the HC to complete the
220 * reset operation and be ready for HC register access.
221 * Without this delay, the subsequent HC register access,
222 * may result in a system hang very rarely.
224 if (xhci->quirks & XHCI_INTEL_HOST)
227 ret = xhci_handshake_check_state(xhci, &xhci->op_regs->command,
228 CMD_RESET, 0, timeout_us, XHCI_STATE_REMOVING);
232 if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL)
233 usb_asmedia_modifyflowcontrol(to_pci_dev(xhci_to_hcd(xhci)->self.controller));
235 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
236 "Wait for controller to be ready for doorbell rings");
238 * xHCI cannot write to any doorbells or operational registers other
239 * than status until the "Controller Not Ready" flag is cleared.
241 ret = xhci_handshake(&xhci->op_regs->status, STS_CNR, 0, timeout_us);
243 xhci->usb2_rhub.bus_state.port_c_suspend = 0;
244 xhci->usb2_rhub.bus_state.suspended_ports = 0;
245 xhci->usb2_rhub.bus_state.resuming_ports = 0;
246 xhci->usb3_rhub.bus_state.port_c_suspend = 0;
247 xhci->usb3_rhub.bus_state.suspended_ports = 0;
248 xhci->usb3_rhub.bus_state.resuming_ports = 0;
253 static void xhci_zero_64b_regs(struct xhci_hcd *xhci)
255 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
256 struct iommu_domain *domain;
262 * Some Renesas controllers get into a weird state if they are
263 * reset while programmed with 64bit addresses (they will preserve
264 * the top half of the address in internal, non visible
265 * registers). You end up with half the address coming from the
266 * kernel, and the other half coming from the firmware. Also,
267 * changing the programming leads to extra accesses even if the
268 * controller is supposed to be halted. The controller ends up with
269 * a fatal fault, and is then ripe for being properly reset.
271 * Special care is taken to only apply this if the device is behind
272 * an iommu. Doing anything when there is no iommu is definitely
275 domain = iommu_get_domain_for_dev(dev);
276 if (!(xhci->quirks & XHCI_ZERO_64B_REGS) || !domain ||
277 domain->type == IOMMU_DOMAIN_IDENTITY)
280 xhci_info(xhci, "Zeroing 64bit base registers, expecting fault\n");
282 /* Clear HSEIE so that faults do not get signaled */
283 val = readl(&xhci->op_regs->command);
285 writel(val, &xhci->op_regs->command);
287 /* Clear HSE (aka FATAL) */
288 val = readl(&xhci->op_regs->status);
290 writel(val, &xhci->op_regs->status);
292 /* Now zero the registers, and brace for impact */
293 val = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
294 if (upper_32_bits(val))
295 xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr);
296 val = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
297 if (upper_32_bits(val))
298 xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring);
300 intrs = min_t(u32, HCS_MAX_INTRS(xhci->hcs_params1),
301 ARRAY_SIZE(xhci->run_regs->ir_set));
303 for (i = 0; i < intrs; i++) {
304 struct xhci_intr_reg __iomem *ir;
306 ir = &xhci->run_regs->ir_set[i];
307 val = xhci_read_64(xhci, &ir->erst_base);
308 if (upper_32_bits(val))
309 xhci_write_64(xhci, 0, &ir->erst_base);
310 val= xhci_read_64(xhci, &ir->erst_dequeue);
311 if (upper_32_bits(val))
312 xhci_write_64(xhci, 0, &ir->erst_dequeue);
315 /* Wait for the fault to appear. It will be cleared on reset */
316 err = xhci_handshake(&xhci->op_regs->status,
317 STS_FATAL, STS_FATAL,
320 xhci_info(xhci, "Fault detected\n");
323 static int xhci_enable_interrupter(struct xhci_interrupter *ir)
327 if (!ir || !ir->ir_set)
330 iman = readl(&ir->ir_set->irq_pending);
331 writel(ER_IRQ_ENABLE(iman), &ir->ir_set->irq_pending);
336 static int xhci_disable_interrupter(struct xhci_interrupter *ir)
340 if (!ir || !ir->ir_set)
343 iman = readl(&ir->ir_set->irq_pending);
344 writel(ER_IRQ_DISABLE(iman), &ir->ir_set->irq_pending);
349 static void compliance_mode_recovery(struct timer_list *t)
351 struct xhci_hcd *xhci;
353 struct xhci_hub *rhub;
357 xhci = from_timer(xhci, t, comp_mode_recovery_timer);
358 rhub = &xhci->usb3_rhub;
364 for (i = 0; i < rhub->num_ports; i++) {
365 temp = readl(rhub->ports[i]->addr);
366 if ((temp & PORT_PLS_MASK) == USB_SS_PORT_LS_COMP_MOD) {
368 * Compliance Mode Detected. Letting USB Core
369 * handle the Warm Reset
371 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
372 "Compliance mode detected->port %d",
374 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
375 "Attempting compliance mode recovery");
377 if (hcd->state == HC_STATE_SUSPENDED)
378 usb_hcd_resume_root_hub(hcd);
380 usb_hcd_poll_rh_status(hcd);
384 if (xhci->port_status_u0 != ((1 << rhub->num_ports) - 1))
385 mod_timer(&xhci->comp_mode_recovery_timer,
386 jiffies + msecs_to_jiffies(COMP_MODE_RCVRY_MSECS));
390 * Quirk to work around issue generated by the SN65LVPE502CP USB3.0 re-driver
391 * that causes ports behind that hardware to enter compliance mode sometimes.
392 * The quirk creates a timer that polls every 2 seconds the link state of
393 * each host controller's port and recovers it by issuing a Warm reset
394 * if Compliance mode is detected, otherwise the port will become "dead" (no
395 * device connections or disconnections will be detected anymore). Becasue no
396 * status event is generated when entering compliance mode (per xhci spec),
397 * this quirk is needed on systems that have the failing hardware installed.
399 static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci)
401 xhci->port_status_u0 = 0;
402 timer_setup(&xhci->comp_mode_recovery_timer, compliance_mode_recovery,
404 xhci->comp_mode_recovery_timer.expires = jiffies +
405 msecs_to_jiffies(COMP_MODE_RCVRY_MSECS);
407 add_timer(&xhci->comp_mode_recovery_timer);
408 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
409 "Compliance mode recovery timer initialized");
413 * This function identifies the systems that have installed the SN65LVPE502CP
414 * USB3.0 re-driver and that need the Compliance Mode Quirk.
416 * Vendor: Hewlett-Packard -> System Models: Z420, Z620 and Z820
418 static bool xhci_compliance_mode_recovery_timer_quirk_check(void)
420 const char *dmi_product_name, *dmi_sys_vendor;
422 dmi_product_name = dmi_get_system_info(DMI_PRODUCT_NAME);
423 dmi_sys_vendor = dmi_get_system_info(DMI_SYS_VENDOR);
424 if (!dmi_product_name || !dmi_sys_vendor)
427 if (!(strstr(dmi_sys_vendor, "Hewlett-Packard")))
430 if (strstr(dmi_product_name, "Z420") ||
431 strstr(dmi_product_name, "Z620") ||
432 strstr(dmi_product_name, "Z820") ||
433 strstr(dmi_product_name, "Z1 Workstation"))
439 static int xhci_all_ports_seen_u0(struct xhci_hcd *xhci)
441 return (xhci->port_status_u0 == ((1 << xhci->usb3_rhub.num_ports) - 1));
446 * Initialize memory for HCD and xHC (one-time init).
448 * Program the PAGESIZE register, initialize the device context array, create
449 * device contexts (?), set up a command ring segment (or two?), create event
450 * ring (one for now).
452 static int xhci_init(struct usb_hcd *hcd)
454 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
457 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_init");
458 spin_lock_init(&xhci->lock);
459 if (xhci->hci_version == 0x95 && link_quirk) {
460 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
461 "QUIRK: Not clearing Link TRB chain bits.");
462 xhci->quirks |= XHCI_LINK_TRB_QUIRK;
464 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
465 "xHCI doesn't need link TRB QUIRK");
467 retval = xhci_mem_init(xhci, GFP_KERNEL);
468 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished xhci_init");
470 /* Initializing Compliance Mode Recovery Data If Needed */
471 if (xhci_compliance_mode_recovery_timer_quirk_check()) {
472 xhci->quirks |= XHCI_COMP_MODE_QUIRK;
473 compliance_mode_recovery_timer_init(xhci);
479 /*-------------------------------------------------------------------------*/
481 static int xhci_run_finished(struct xhci_hcd *xhci)
483 struct xhci_interrupter *ir = xhci->interrupters[0];
488 * Enable interrupts before starting the host (xhci 4.2 and 5.5.2).
489 * Protect the short window before host is running with a lock
491 spin_lock_irqsave(&xhci->lock, flags);
493 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Enable interrupts");
494 temp = readl(&xhci->op_regs->command);
496 writel(temp, &xhci->op_regs->command);
498 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Enable primary interrupter");
499 xhci_enable_interrupter(ir);
501 if (xhci_start(xhci)) {
503 spin_unlock_irqrestore(&xhci->lock, flags);
507 xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
509 if (xhci->quirks & XHCI_NEC_HOST)
510 xhci_ring_cmd_db(xhci);
512 spin_unlock_irqrestore(&xhci->lock, flags);
518 * Start the HC after it was halted.
520 * This function is called by the USB core when the HC driver is added.
521 * Its opposite is xhci_stop().
523 * xhci_init() must be called once before this function can be called.
524 * Reset the HC, enable device slot contexts, program DCBAAP, and
525 * set command ring pointer and event ring pointer.
527 * Setup MSI-X vectors and enable interrupts.
529 int xhci_run(struct usb_hcd *hcd)
534 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
535 struct xhci_interrupter *ir = xhci->interrupters[0];
536 /* Start the xHCI host controller running only after the USB 2.0 roothub
540 hcd->uses_new_polling = 1;
541 if (hcd->msi_enabled)
542 ir->ip_autoclear = true;
544 if (!usb_hcd_is_primary_hcd(hcd))
545 return xhci_run_finished(xhci);
547 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_run");
549 temp_64 = xhci_read_64(xhci, &ir->ir_set->erst_dequeue);
550 temp_64 &= ERST_PTR_MASK;
551 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
552 "ERST deq = 64'h%0lx", (long unsigned int) temp_64);
554 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
555 "// Set the interrupt modulation register");
556 temp = readl(&ir->ir_set->irq_control);
557 temp &= ~ER_IRQ_INTERVAL_MASK;
558 temp |= (xhci->imod_interval / 250) & ER_IRQ_INTERVAL_MASK;
559 writel(temp, &ir->ir_set->irq_control);
561 if (xhci->quirks & XHCI_NEC_HOST) {
562 struct xhci_command *command;
564 command = xhci_alloc_command(xhci, false, GFP_KERNEL);
568 ret = xhci_queue_vendor_command(xhci, command, 0, 0, 0,
569 TRB_TYPE(TRB_NEC_GET_FW));
571 xhci_free_command(xhci, command);
573 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
574 "Finished %s for main hcd", __func__);
576 xhci_create_dbc_dev(xhci);
578 xhci_debugfs_init(xhci);
580 if (xhci_has_one_roothub(xhci))
581 return xhci_run_finished(xhci);
583 set_bit(HCD_FLAG_DEFER_RH_REGISTER, &hcd->flags);
587 EXPORT_SYMBOL_GPL(xhci_run);
592 * This function is called by the USB core when the HC driver is removed.
593 * Its opposite is xhci_run().
595 * Disable device contexts, disable IRQs, and quiesce the HC.
596 * Reset the HC, finish any completed transactions, and cleanup memory.
598 void xhci_stop(struct usb_hcd *hcd)
601 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
602 struct xhci_interrupter *ir = xhci->interrupters[0];
604 mutex_lock(&xhci->mutex);
606 /* Only halt host and free memory after both hcds are removed */
607 if (!usb_hcd_is_primary_hcd(hcd)) {
608 mutex_unlock(&xhci->mutex);
612 xhci_remove_dbc_dev(xhci);
614 spin_lock_irq(&xhci->lock);
615 xhci->xhc_state |= XHCI_STATE_HALTED;
616 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
618 xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
619 spin_unlock_irq(&xhci->lock);
621 /* Deleting Compliance Mode Recovery Timer */
622 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
623 (!(xhci_all_ports_seen_u0(xhci)))) {
624 del_timer_sync(&xhci->comp_mode_recovery_timer);
625 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
626 "%s: compliance mode recovery timer deleted",
630 if (xhci->quirks & XHCI_AMD_PLL_FIX)
633 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
634 "// Disabling event ring interrupts");
635 temp = readl(&xhci->op_regs->status);
636 writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status);
637 xhci_disable_interrupter(ir);
639 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "cleaning up memory");
640 xhci_mem_cleanup(xhci);
641 xhci_debugfs_exit(xhci);
642 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
643 "xhci_stop completed - status = %x",
644 readl(&xhci->op_regs->status));
645 mutex_unlock(&xhci->mutex);
647 EXPORT_SYMBOL_GPL(xhci_stop);
650 * Shutdown HC (not bus-specific)
652 * This is called when the machine is rebooting or halting. We assume that the
653 * machine will be powered off, and the HC's internal state will be reset.
654 * Don't bother to free memory.
656 * This will only ever be called with the main usb_hcd (the USB3 roothub).
658 void xhci_shutdown(struct usb_hcd *hcd)
660 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
662 if (xhci->quirks & XHCI_SPURIOUS_REBOOT)
663 usb_disable_xhci_ports(to_pci_dev(hcd->self.sysdev));
665 /* Don't poll the roothubs after shutdown. */
666 xhci_dbg(xhci, "%s: stopping usb%d port polling.\n",
667 __func__, hcd->self.busnum);
668 clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
669 del_timer_sync(&hcd->rh_timer);
671 if (xhci->shared_hcd) {
672 clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
673 del_timer_sync(&xhci->shared_hcd->rh_timer);
676 spin_lock_irq(&xhci->lock);
680 * Workaround for spurious wakeps at shutdown with HSW, and for boot
681 * firmware delay in ADL-P PCH if port are left in U3 at shutdown
683 if (xhci->quirks & XHCI_SPURIOUS_WAKEUP ||
684 xhci->quirks & XHCI_RESET_TO_DEFAULT)
685 xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
687 spin_unlock_irq(&xhci->lock);
689 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
690 "xhci_shutdown completed - status = %x",
691 readl(&xhci->op_regs->status));
693 EXPORT_SYMBOL_GPL(xhci_shutdown);
696 static void xhci_save_registers(struct xhci_hcd *xhci)
698 struct xhci_interrupter *ir;
701 xhci->s3.command = readl(&xhci->op_regs->command);
702 xhci->s3.dev_nt = readl(&xhci->op_regs->dev_notification);
703 xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
704 xhci->s3.config_reg = readl(&xhci->op_regs->config_reg);
706 /* save both primary and all secondary interrupters */
707 /* fixme, shold we lock to prevent race with remove secondary interrupter? */
708 for (i = 0; i < xhci->max_interrupters; i++) {
709 ir = xhci->interrupters[i];
713 ir->s3_erst_size = readl(&ir->ir_set->erst_size);
714 ir->s3_erst_base = xhci_read_64(xhci, &ir->ir_set->erst_base);
715 ir->s3_erst_dequeue = xhci_read_64(xhci, &ir->ir_set->erst_dequeue);
716 ir->s3_irq_pending = readl(&ir->ir_set->irq_pending);
717 ir->s3_irq_control = readl(&ir->ir_set->irq_control);
721 static void xhci_restore_registers(struct xhci_hcd *xhci)
723 struct xhci_interrupter *ir;
726 writel(xhci->s3.command, &xhci->op_regs->command);
727 writel(xhci->s3.dev_nt, &xhci->op_regs->dev_notification);
728 xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr);
729 writel(xhci->s3.config_reg, &xhci->op_regs->config_reg);
731 /* FIXME should we lock to protect against freeing of interrupters */
732 for (i = 0; i < xhci->max_interrupters; i++) {
733 ir = xhci->interrupters[i];
737 writel(ir->s3_erst_size, &ir->ir_set->erst_size);
738 xhci_write_64(xhci, ir->s3_erst_base, &ir->ir_set->erst_base);
739 xhci_write_64(xhci, ir->s3_erst_dequeue, &ir->ir_set->erst_dequeue);
740 writel(ir->s3_irq_pending, &ir->ir_set->irq_pending);
741 writel(ir->s3_irq_control, &ir->ir_set->irq_control);
745 static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
749 /* step 2: initialize command ring buffer */
750 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
751 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
752 (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
753 xhci->cmd_ring->dequeue) &
754 (u64) ~CMD_RING_RSVD_BITS) |
755 xhci->cmd_ring->cycle_state;
756 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
757 "// Setting command ring address to 0x%llx",
758 (long unsigned long) val_64);
759 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
763 * The whole command ring must be cleared to zero when we suspend the host.
765 * The host doesn't save the command ring pointer in the suspend well, so we
766 * need to re-program it on resume. Unfortunately, the pointer must be 64-byte
767 * aligned, because of the reserved bits in the command ring dequeue pointer
768 * register. Therefore, we can't just set the dequeue pointer back in the
769 * middle of the ring (TRBs are 16-byte aligned).
771 static void xhci_clear_command_ring(struct xhci_hcd *xhci)
773 struct xhci_ring *ring;
774 struct xhci_segment *seg;
776 ring = xhci->cmd_ring;
780 sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1));
781 seg->trbs[TRBS_PER_SEGMENT - 1].link.control &=
782 cpu_to_le32(~TRB_CYCLE);
784 } while (seg != ring->deq_seg);
786 /* Reset the software enqueue and dequeue pointers */
787 ring->deq_seg = ring->first_seg;
788 ring->dequeue = ring->first_seg->trbs;
789 ring->enq_seg = ring->deq_seg;
790 ring->enqueue = ring->dequeue;
792 ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
794 * Ring is now zeroed, so the HW should look for change of ownership
795 * when the cycle bit is set to 1.
797 ring->cycle_state = 1;
800 * Reset the hardware dequeue pointer.
801 * Yes, this will need to be re-written after resume, but we're paranoid
802 * and want to make sure the hardware doesn't access bogus memory
803 * because, say, the BIOS or an SMI started the host without changing
804 * the command ring pointers.
806 xhci_set_cmd_ring_deq(xhci);
810 * Disable port wake bits if do_wakeup is not set.
812 * Also clear a possible internal port wake state left hanging for ports that
813 * detected termination but never successfully enumerated (trained to 0U).
814 * Internal wake causes immediate xHCI wake after suspend. PORT_CSC write done
815 * at enumeration clears this wake, force one here as well for unconnected ports
818 static void xhci_disable_hub_port_wake(struct xhci_hcd *xhci,
819 struct xhci_hub *rhub,
826 spin_lock_irqsave(&xhci->lock, flags);
828 for (i = 0; i < rhub->num_ports; i++) {
829 portsc = readl(rhub->ports[i]->addr);
830 t1 = xhci_port_state_to_neutral(portsc);
833 /* clear wake bits if do_wake is not set */
835 t2 &= ~PORT_WAKE_BITS;
837 /* Don't touch csc bit if connected or connect change is set */
838 if (!(portsc & (PORT_CSC | PORT_CONNECT)))
842 writel(t2, rhub->ports[i]->addr);
843 xhci_dbg(xhci, "config port %d-%d wake bits, portsc: 0x%x, write: 0x%x\n",
844 rhub->hcd->self.busnum, i + 1, portsc, t2);
847 spin_unlock_irqrestore(&xhci->lock, flags);
850 static bool xhci_pending_portevent(struct xhci_hcd *xhci)
852 struct xhci_port **ports;
857 status = readl(&xhci->op_regs->status);
858 if (status & STS_EINT)
861 * Checking STS_EINT is not enough as there is a lag between a change
862 * bit being set and the Port Status Change Event that it generated
863 * being written to the Event Ring. See note in xhci 1.1 section 4.19.2.
866 port_index = xhci->usb2_rhub.num_ports;
867 ports = xhci->usb2_rhub.ports;
868 while (port_index--) {
869 portsc = readl(ports[port_index]->addr);
870 if (portsc & PORT_CHANGE_MASK ||
871 (portsc & PORT_PLS_MASK) == XDEV_RESUME)
874 port_index = xhci->usb3_rhub.num_ports;
875 ports = xhci->usb3_rhub.ports;
876 while (port_index--) {
877 portsc = readl(ports[port_index]->addr);
878 if (portsc & (PORT_CHANGE_MASK | PORT_CAS) ||
879 (portsc & PORT_PLS_MASK) == XDEV_RESUME)
886 * Stop HC (not bus-specific)
888 * This is called when the machine transition into S3/S4 mode.
891 int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
894 unsigned int delay = XHCI_MAX_HALT_USEC * 2;
895 struct usb_hcd *hcd = xhci_to_hcd(xhci);
902 if (hcd->state != HC_STATE_SUSPENDED ||
903 (xhci->shared_hcd && xhci->shared_hcd->state != HC_STATE_SUSPENDED))
906 /* Clear root port wake on bits if wakeup not allowed. */
907 xhci_disable_hub_port_wake(xhci, &xhci->usb3_rhub, do_wakeup);
908 xhci_disable_hub_port_wake(xhci, &xhci->usb2_rhub, do_wakeup);
910 if (!HCD_HW_ACCESSIBLE(hcd))
913 xhci_dbc_suspend(xhci);
915 /* Don't poll the roothubs on bus suspend. */
916 xhci_dbg(xhci, "%s: stopping usb%d port polling.\n",
917 __func__, hcd->self.busnum);
918 clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
919 del_timer_sync(&hcd->rh_timer);
920 if (xhci->shared_hcd) {
921 clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
922 del_timer_sync(&xhci->shared_hcd->rh_timer);
925 if (xhci->quirks & XHCI_SUSPEND_DELAY)
926 usleep_range(1000, 1500);
928 spin_lock_irq(&xhci->lock);
929 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
930 if (xhci->shared_hcd)
931 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
932 /* step 1: stop endpoint */
933 /* skipped assuming that port suspend has done */
935 /* step 2: clear Run/Stop bit */
936 command = readl(&xhci->op_regs->command);
938 writel(command, &xhci->op_regs->command);
940 /* Some chips from Fresco Logic need an extraordinary delay */
941 delay *= (xhci->quirks & XHCI_SLOW_SUSPEND) ? 10 : 1;
943 if (xhci_handshake(&xhci->op_regs->status,
944 STS_HALT, STS_HALT, delay)) {
945 xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n");
946 spin_unlock_irq(&xhci->lock);
949 xhci_clear_command_ring(xhci);
951 /* step 3: save registers */
952 xhci_save_registers(xhci);
954 /* step 4: set CSS flag */
955 command = readl(&xhci->op_regs->command);
957 writel(command, &xhci->op_regs->command);
958 xhci->broken_suspend = 0;
959 if (xhci_handshake(&xhci->op_regs->status,
960 STS_SAVE, 0, 20 * 1000)) {
962 * AMD SNPS xHC 3.0 occasionally does not clear the
963 * SSS bit of USBSTS and when driver tries to poll
964 * to see if the xHC clears BIT(8) which never happens
965 * and driver assumes that controller is not responding
966 * and times out. To workaround this, its good to check
967 * if SRE and HCE bits are not set (as per xhci
968 * Section 5.4.2) and bypass the timeout.
970 res = readl(&xhci->op_regs->status);
971 if ((xhci->quirks & XHCI_SNPS_BROKEN_SUSPEND) &&
972 (((res & STS_SRE) == 0) &&
973 ((res & STS_HCE) == 0))) {
974 xhci->broken_suspend = 1;
976 xhci_warn(xhci, "WARN: xHC save state timeout\n");
977 spin_unlock_irq(&xhci->lock);
981 spin_unlock_irq(&xhci->lock);
984 * Deleting Compliance Mode Recovery Timer because the xHCI Host
985 * is about to be suspended.
987 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
988 (!(xhci_all_ports_seen_u0(xhci)))) {
989 del_timer_sync(&xhci->comp_mode_recovery_timer);
990 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
991 "%s: compliance mode recovery timer deleted",
997 EXPORT_SYMBOL_GPL(xhci_suspend);
1000 * start xHC (not bus-specific)
1002 * This is called when the machine transition from S3/S4 mode.
1005 int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg)
1007 bool hibernated = (msg.event == PM_EVENT_RESTORE);
1008 u32 command, temp = 0;
1009 struct usb_hcd *hcd = xhci_to_hcd(xhci);
1011 bool comp_timer_running = false;
1012 bool pending_portevent = false;
1013 bool suspended_usb3_devs = false;
1014 bool reinit_xhc = false;
1019 /* Wait a bit if either of the roothubs need to settle from the
1020 * transition into bus suspend.
1023 if (time_before(jiffies, xhci->usb2_rhub.bus_state.next_statechange) ||
1024 time_before(jiffies, xhci->usb3_rhub.bus_state.next_statechange))
1027 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
1028 if (xhci->shared_hcd)
1029 set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
1031 spin_lock_irq(&xhci->lock);
1033 if (hibernated || xhci->quirks & XHCI_RESET_ON_RESUME || xhci->broken_suspend)
1038 * Some controllers might lose power during suspend, so wait
1039 * for controller not ready bit to clear, just as in xHC init.
1041 retval = xhci_handshake(&xhci->op_regs->status,
1042 STS_CNR, 0, 10 * 1000 * 1000);
1044 xhci_warn(xhci, "Controller not ready at resume %d\n",
1046 spin_unlock_irq(&xhci->lock);
1049 /* step 1: restore register */
1050 xhci_restore_registers(xhci);
1051 /* step 2: initialize command ring buffer */
1052 xhci_set_cmd_ring_deq(xhci);
1053 /* step 3: restore state and start state*/
1054 /* step 3: set CRS flag */
1055 command = readl(&xhci->op_regs->command);
1057 writel(command, &xhci->op_regs->command);
1059 * Some controllers take up to 55+ ms to complete the controller
1060 * restore so setting the timeout to 100ms. Xhci specification
1061 * doesn't mention any timeout value.
1063 if (xhci_handshake(&xhci->op_regs->status,
1064 STS_RESTORE, 0, 100 * 1000)) {
1065 xhci_warn(xhci, "WARN: xHC restore state timeout\n");
1066 spin_unlock_irq(&xhci->lock);
1071 temp = readl(&xhci->op_regs->status);
1073 /* re-initialize the HC on Restore Error, or Host Controller Error */
1074 if ((temp & (STS_SRE | STS_HCE)) &&
1075 !(xhci->xhc_state & XHCI_STATE_REMOVING)) {
1077 if (!xhci->broken_suspend)
1078 xhci_warn(xhci, "xHC error in resume, USBSTS 0x%x, Reinit\n", temp);
1082 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
1083 !(xhci_all_ports_seen_u0(xhci))) {
1084 del_timer_sync(&xhci->comp_mode_recovery_timer);
1085 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1086 "Compliance Mode Recovery Timer deleted!");
1089 /* Let the USB core know _both_ roothubs lost power. */
1090 usb_root_hub_lost_power(xhci->main_hcd->self.root_hub);
1091 if (xhci->shared_hcd)
1092 usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub);
1094 xhci_dbg(xhci, "Stop HCD\n");
1096 xhci_zero_64b_regs(xhci);
1097 retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC);
1098 spin_unlock_irq(&xhci->lock);
1102 xhci_dbg(xhci, "// Disabling event ring interrupts\n");
1103 temp = readl(&xhci->op_regs->status);
1104 writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status);
1105 xhci_disable_interrupter(xhci->interrupters[0]);
1107 xhci_dbg(xhci, "cleaning up memory\n");
1108 xhci_mem_cleanup(xhci);
1109 xhci_debugfs_exit(xhci);
1110 xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
1111 readl(&xhci->op_regs->status));
1113 /* USB core calls the PCI reinit and start functions twice:
1114 * first with the primary HCD, and then with the secondary HCD.
1115 * If we don't do the same, the host will never be started.
1117 xhci_dbg(xhci, "Initialize the xhci_hcd\n");
1118 retval = xhci_init(hcd);
1121 comp_timer_running = true;
1123 xhci_dbg(xhci, "Start the primary HCD\n");
1124 retval = xhci_run(hcd);
1125 if (!retval && xhci->shared_hcd) {
1126 xhci_dbg(xhci, "Start the secondary HCD\n");
1127 retval = xhci_run(xhci->shared_hcd);
1130 hcd->state = HC_STATE_SUSPENDED;
1131 if (xhci->shared_hcd)
1132 xhci->shared_hcd->state = HC_STATE_SUSPENDED;
1136 /* step 4: set Run/Stop bit */
1137 command = readl(&xhci->op_regs->command);
1139 writel(command, &xhci->op_regs->command);
1140 xhci_handshake(&xhci->op_regs->status, STS_HALT,
1143 /* step 5: walk topology and initialize portsc,
1144 * portpmsc and portli
1146 /* this is done in bus_resume */
1148 /* step 6: restart each of the previously
1149 * Running endpoints by ringing their doorbells
1152 spin_unlock_irq(&xhci->lock);
1154 xhci_dbc_resume(xhci);
1159 * Resume roothubs only if there are pending events.
1160 * USB 3 devices resend U3 LFPS wake after a 100ms delay if
1161 * the first wake signalling failed, give it that chance if
1162 * there are suspended USB 3 devices.
1164 if (xhci->usb3_rhub.bus_state.suspended_ports ||
1165 xhci->usb3_rhub.bus_state.bus_suspended)
1166 suspended_usb3_devs = true;
1168 pending_portevent = xhci_pending_portevent(xhci);
1170 if (suspended_usb3_devs && !pending_portevent &&
1171 msg.event == PM_EVENT_AUTO_RESUME) {
1173 pending_portevent = xhci_pending_portevent(xhci);
1176 if (pending_portevent) {
1177 if (xhci->shared_hcd)
1178 usb_hcd_resume_root_hub(xhci->shared_hcd);
1179 usb_hcd_resume_root_hub(hcd);
1183 * If system is subject to the Quirk, Compliance Mode Timer needs to
1184 * be re-initialized Always after a system resume. Ports are subject
1185 * to suffer the Compliance Mode issue again. It doesn't matter if
1186 * ports have entered previously to U0 before system's suspension.
1188 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !comp_timer_running)
1189 compliance_mode_recovery_timer_init(xhci);
1191 if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL)
1192 usb_asmedia_modifyflowcontrol(to_pci_dev(hcd->self.controller));
1194 /* Re-enable port polling. */
1195 xhci_dbg(xhci, "%s: starting usb%d port polling.\n",
1196 __func__, hcd->self.busnum);
1197 if (xhci->shared_hcd) {
1198 set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
1199 usb_hcd_poll_rh_status(xhci->shared_hcd);
1201 set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
1202 usb_hcd_poll_rh_status(hcd);
1206 EXPORT_SYMBOL_GPL(xhci_resume);
1207 #endif /* CONFIG_PM */
1209 /*-------------------------------------------------------------------------*/
1211 static int xhci_map_temp_buffer(struct usb_hcd *hcd, struct urb *urb)
1215 unsigned int buf_len;
1216 enum dma_data_direction dir;
1218 dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
1219 buf_len = urb->transfer_buffer_length;
1221 temp = kzalloc_node(buf_len, GFP_ATOMIC,
1222 dev_to_node(hcd->self.sysdev));
1226 if (usb_urb_dir_out(urb))
1227 sg_pcopy_to_buffer(urb->sg, urb->num_sgs,
1230 urb->transfer_buffer = temp;
1231 urb->transfer_dma = dma_map_single(hcd->self.sysdev,
1232 urb->transfer_buffer,
1233 urb->transfer_buffer_length,
1236 if (dma_mapping_error(hcd->self.sysdev,
1237 urb->transfer_dma)) {
1241 urb->transfer_flags |= URB_DMA_MAP_SINGLE;
1247 static bool xhci_urb_temp_buffer_required(struct usb_hcd *hcd,
1252 unsigned int len = 0;
1253 unsigned int trb_size;
1254 unsigned int max_pkt;
1255 struct scatterlist *sg;
1256 struct scatterlist *tail_sg;
1259 max_pkt = usb_endpoint_maxp(&urb->ep->desc);
1264 if (urb->dev->speed >= USB_SPEED_SUPER)
1265 trb_size = TRB_CACHE_SIZE_SS;
1267 trb_size = TRB_CACHE_SIZE_HS;
1269 if (urb->transfer_buffer_length != 0 &&
1270 !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) {
1271 for_each_sg(urb->sg, sg, urb->num_sgs, i) {
1272 len = len + sg->length;
1273 if (i > trb_size - 2) {
1274 len = len - tail_sg->length;
1275 if (len < max_pkt) {
1280 tail_sg = sg_next(tail_sg);
1287 static void xhci_unmap_temp_buf(struct usb_hcd *hcd, struct urb *urb)
1290 unsigned int buf_len;
1291 enum dma_data_direction dir;
1293 dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
1295 buf_len = urb->transfer_buffer_length;
1297 if (IS_ENABLED(CONFIG_HAS_DMA) &&
1298 (urb->transfer_flags & URB_DMA_MAP_SINGLE))
1299 dma_unmap_single(hcd->self.sysdev,
1301 urb->transfer_buffer_length,
1304 if (usb_urb_dir_in(urb)) {
1305 len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs,
1306 urb->transfer_buffer,
1309 if (len != buf_len) {
1310 xhci_dbg(hcd_to_xhci(hcd),
1311 "Copy from tmp buf to urb sg list failed\n");
1312 urb->actual_length = len;
1315 urb->transfer_flags &= ~URB_DMA_MAP_SINGLE;
1316 kfree(urb->transfer_buffer);
1317 urb->transfer_buffer = NULL;
1321 * Bypass the DMA mapping if URB is suitable for Immediate Transfer (IDT),
1322 * we'll copy the actual data into the TRB address register. This is limited to
1323 * transfers up to 8 bytes on output endpoints of any kind with wMaxPacketSize
1324 * >= 8 bytes. If suitable for IDT only one Transfer TRB per TD is allowed.
1326 static int xhci_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
1329 struct xhci_hcd *xhci;
1331 xhci = hcd_to_xhci(hcd);
1333 if (xhci_urb_suitable_for_idt(urb))
1336 if (xhci->quirks & XHCI_SG_TRB_CACHE_SIZE_QUIRK) {
1337 if (xhci_urb_temp_buffer_required(hcd, urb))
1338 return xhci_map_temp_buffer(hcd, urb);
1340 return usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
1343 static void xhci_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
1345 struct xhci_hcd *xhci;
1346 bool unmap_temp_buf = false;
1348 xhci = hcd_to_xhci(hcd);
1350 if (urb->num_sgs && (urb->transfer_flags & URB_DMA_MAP_SINGLE))
1351 unmap_temp_buf = true;
1353 if ((xhci->quirks & XHCI_SG_TRB_CACHE_SIZE_QUIRK) && unmap_temp_buf)
1354 xhci_unmap_temp_buf(hcd, urb);
1356 usb_hcd_unmap_urb_for_dma(hcd, urb);
1360 * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and
1361 * HCDs. Find the index for an endpoint given its descriptor. Use the return
1362 * value to right shift 1 for the bitmask.
1364 * Index = (epnum * 2) + direction - 1,
1365 * where direction = 0 for OUT, 1 for IN.
1366 * For control endpoints, the IN index is used (OUT index is unused), so
1367 * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2)
1369 unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc)
1372 if (usb_endpoint_xfer_control(desc))
1373 index = (unsigned int) (usb_endpoint_num(desc)*2);
1375 index = (unsigned int) (usb_endpoint_num(desc)*2) +
1376 (usb_endpoint_dir_in(desc) ? 1 : 0) - 1;
1379 EXPORT_SYMBOL_GPL(xhci_get_endpoint_index);
1381 /* The reverse operation to xhci_get_endpoint_index. Calculate the USB endpoint
1382 * address from the XHCI endpoint index.
1384 static unsigned int xhci_get_endpoint_address(unsigned int ep_index)
1386 unsigned int number = DIV_ROUND_UP(ep_index, 2);
1387 unsigned int direction = ep_index % 2 ? USB_DIR_OUT : USB_DIR_IN;
1388 return direction | number;
1391 /* Find the flag for this endpoint (for use in the control context). Use the
1392 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
1395 static unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc)
1397 return 1 << (xhci_get_endpoint_index(desc) + 1);
1400 /* Compute the last valid endpoint context index. Basically, this is the
1401 * endpoint index plus one. For slot contexts with more than valid endpoint,
1402 * we find the most significant bit set in the added contexts flags.
1403 * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000
1404 * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one.
1406 unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
1408 return fls(added_ctxs) - 1;
1411 /* Returns 1 if the arguments are OK;
1412 * returns 0 this is a root hub; returns -EINVAL for NULL pointers.
1414 static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
1415 struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev,
1417 struct xhci_hcd *xhci;
1418 struct xhci_virt_device *virt_dev;
1420 if (!hcd || (check_ep && !ep) || !udev) {
1421 pr_debug("xHCI %s called with invalid args\n", func);
1424 if (!udev->parent) {
1425 pr_debug("xHCI %s called for root hub\n", func);
1429 xhci = hcd_to_xhci(hcd);
1430 if (check_virt_dev) {
1431 if (!udev->slot_id || !xhci->devs[udev->slot_id]) {
1432 xhci_dbg(xhci, "xHCI %s called with unaddressed device\n",
1437 virt_dev = xhci->devs[udev->slot_id];
1438 if (virt_dev->udev != udev) {
1439 xhci_dbg(xhci, "xHCI %s called with udev and "
1440 "virt_dev does not match\n", func);
1445 if (xhci->xhc_state & XHCI_STATE_HALTED)
1451 static int xhci_configure_endpoint(struct xhci_hcd *xhci,
1452 struct usb_device *udev, struct xhci_command *command,
1453 bool ctx_change, bool must_succeed);
1456 * Full speed devices may have a max packet size greater than 8 bytes, but the
1457 * USB core doesn't know that until it reads the first 8 bytes of the
1458 * descriptor. If the usb_device's max packet size changes after that point,
1459 * we need to issue an evaluate context command and wait on it.
1461 static int xhci_check_ep0_maxpacket(struct xhci_hcd *xhci, struct xhci_virt_device *vdev)
1463 struct xhci_input_control_ctx *ctrl_ctx;
1464 struct xhci_ep_ctx *ep_ctx;
1465 struct xhci_command *command;
1466 int max_packet_size;
1467 int hw_max_packet_size;
1470 ep_ctx = xhci_get_ep_ctx(xhci, vdev->out_ctx, 0);
1471 hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2));
1472 max_packet_size = usb_endpoint_maxp(&vdev->udev->ep0.desc);
1474 if (hw_max_packet_size == max_packet_size)
1477 switch (max_packet_size) {
1478 case 8: case 16: case 32: case 64: case 9:
1479 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1480 "Max Packet Size for ep 0 changed.");
1481 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1482 "Max packet size in usb_device = %d",
1484 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1485 "Max packet size in xHCI HW = %d",
1486 hw_max_packet_size);
1487 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1488 "Issuing evaluate context command.");
1490 command = xhci_alloc_command(xhci, true, GFP_KERNEL);
1494 command->in_ctx = vdev->in_ctx;
1495 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
1497 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1502 /* Set up the modified control endpoint 0 */
1503 xhci_endpoint_copy(xhci, vdev->in_ctx, vdev->out_ctx, 0);
1505 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, 0);
1506 ep_ctx->ep_info &= cpu_to_le32(~EP_STATE_MASK);/* must clear */
1507 ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK);
1508 ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size));
1510 ctrl_ctx->add_flags = cpu_to_le32(EP0_FLAG);
1511 ctrl_ctx->drop_flags = 0;
1513 ret = xhci_configure_endpoint(xhci, vdev->udev, command,
1515 /* Clean up the input context for later use by bandwidth functions */
1516 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG);
1519 dev_dbg(&vdev->udev->dev, "incorrect max packet size %d for ep0\n",
1524 kfree(command->completion);
1531 * non-error returns are a promise to giveback() the urb later
1532 * we drop ownership so next owner (or urb unlink) can get it
1534 static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
1536 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1537 unsigned long flags;
1539 unsigned int slot_id, ep_index;
1540 unsigned int *ep_state;
1541 struct urb_priv *urb_priv;
1544 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1546 if (usb_endpoint_xfer_isoc(&urb->ep->desc))
1547 num_tds = urb->number_of_packets;
1548 else if (usb_endpoint_is_bulk_out(&urb->ep->desc) &&
1549 urb->transfer_buffer_length > 0 &&
1550 urb->transfer_flags & URB_ZERO_PACKET &&
1551 !(urb->transfer_buffer_length % usb_endpoint_maxp(&urb->ep->desc)))
1556 urb_priv = kzalloc(struct_size(urb_priv, td, num_tds), mem_flags);
1560 urb_priv->num_tds = num_tds;
1561 urb_priv->num_tds_done = 0;
1562 urb->hcpriv = urb_priv;
1564 trace_xhci_urb_enqueue(urb);
1566 spin_lock_irqsave(&xhci->lock, flags);
1568 ret = xhci_check_args(hcd, urb->dev, urb->ep,
1569 true, true, __func__);
1571 ret = ret ? ret : -EINVAL;
1575 slot_id = urb->dev->slot_id;
1577 if (!HCD_HW_ACCESSIBLE(hcd)) {
1582 if (xhci->devs[slot_id]->flags & VDEV_PORT_ERROR) {
1583 xhci_dbg(xhci, "Can't queue urb, port error, link inactive\n");
1588 if (xhci->xhc_state & XHCI_STATE_DYING) {
1589 xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for non-responsive xHCI host.\n",
1590 urb->ep->desc.bEndpointAddress, urb);
1595 ep_state = &xhci->devs[slot_id]->eps[ep_index].ep_state;
1597 if (*ep_state & (EP_GETTING_STREAMS | EP_GETTING_NO_STREAMS)) {
1598 xhci_warn(xhci, "WARN: Can't enqueue URB, ep in streams transition state %x\n",
1603 if (*ep_state & EP_SOFT_CLEAR_TOGGLE) {
1604 xhci_warn(xhci, "Can't enqueue URB while manually clearing toggle\n");
1609 switch (usb_endpoint_type(&urb->ep->desc)) {
1611 case USB_ENDPOINT_XFER_CONTROL:
1612 ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,
1615 case USB_ENDPOINT_XFER_BULK:
1616 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
1619 case USB_ENDPOINT_XFER_INT:
1620 ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb,
1623 case USB_ENDPOINT_XFER_ISOC:
1624 ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb,
1630 xhci_urb_free_priv(urb_priv);
1633 spin_unlock_irqrestore(&xhci->lock, flags);
1638 * Remove the URB's TD from the endpoint ring. This may cause the HC to stop
1639 * USB transfers, potentially stopping in the middle of a TRB buffer. The HC
1640 * should pick up where it left off in the TD, unless a Set Transfer Ring
1641 * Dequeue Pointer is issued.
1643 * The TRBs that make up the buffers for the canceled URB will be "removed" from
1644 * the ring. Since the ring is a contiguous structure, they can't be physically
1645 * removed. Instead, there are two options:
1647 * 1) If the HC is in the middle of processing the URB to be canceled, we
1648 * simply move the ring's dequeue pointer past those TRBs using the Set
1649 * Transfer Ring Dequeue Pointer command. This will be the common case,
1650 * when drivers timeout on the last submitted URB and attempt to cancel.
1652 * 2) If the HC is in the middle of a different TD, we turn the TRBs into a
1653 * series of 1-TRB transfer no-op TDs. (No-ops shouldn't be chained.) The
1654 * HC will need to invalidate the any TRBs it has cached after the stop
1655 * endpoint command, as noted in the xHCI 0.95 errata.
1657 * 3) The TD may have completed by the time the Stop Endpoint Command
1658 * completes, so software needs to handle that case too.
1660 * This function should protect against the TD enqueueing code ringing the
1661 * doorbell while this code is waiting for a Stop Endpoint command to complete.
1662 * It also needs to account for multiple cancellations on happening at the same
1663 * time for the same endpoint.
1665 * Note that this function can be called in any context, or so says
1666 * usb_hcd_unlink_urb()
1668 static int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1670 unsigned long flags;
1673 struct xhci_hcd *xhci;
1674 struct urb_priv *urb_priv;
1676 unsigned int ep_index;
1677 struct xhci_ring *ep_ring;
1678 struct xhci_virt_ep *ep;
1679 struct xhci_command *command;
1680 struct xhci_virt_device *vdev;
1682 xhci = hcd_to_xhci(hcd);
1683 spin_lock_irqsave(&xhci->lock, flags);
1685 trace_xhci_urb_dequeue(urb);
1687 /* Make sure the URB hasn't completed or been unlinked already */
1688 ret = usb_hcd_check_unlink_urb(hcd, urb, status);
1692 /* give back URB now if we can't queue it for cancel */
1693 vdev = xhci->devs[urb->dev->slot_id];
1694 urb_priv = urb->hcpriv;
1695 if (!vdev || !urb_priv)
1698 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1699 ep = &vdev->eps[ep_index];
1700 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
1701 if (!ep || !ep_ring)
1704 /* If xHC is dead take it down and return ALL URBs in xhci_hc_died() */
1705 temp = readl(&xhci->op_regs->status);
1706 if (temp == ~(u32)0 || xhci->xhc_state & XHCI_STATE_DYING) {
1712 * check ring is not re-allocated since URB was enqueued. If it is, then
1713 * make sure none of the ring related pointers in this URB private data
1714 * are touched, such as td_list, otherwise we overwrite freed data
1716 if (!td_on_ring(&urb_priv->td[0], ep_ring)) {
1717 xhci_err(xhci, "Canceled URB td not found on endpoint ring");
1718 for (i = urb_priv->num_tds_done; i < urb_priv->num_tds; i++) {
1719 td = &urb_priv->td[i];
1720 if (!list_empty(&td->cancelled_td_list))
1721 list_del_init(&td->cancelled_td_list);
1726 if (xhci->xhc_state & XHCI_STATE_HALTED) {
1727 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1728 "HC halted, freeing TD manually.");
1729 for (i = urb_priv->num_tds_done;
1730 i < urb_priv->num_tds;
1732 td = &urb_priv->td[i];
1733 if (!list_empty(&td->td_list))
1734 list_del_init(&td->td_list);
1735 if (!list_empty(&td->cancelled_td_list))
1736 list_del_init(&td->cancelled_td_list);
1741 i = urb_priv->num_tds_done;
1742 if (i < urb_priv->num_tds)
1743 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1744 "Cancel URB %p, dev %s, ep 0x%x, "
1745 "starting at offset 0x%llx",
1746 urb, urb->dev->devpath,
1747 urb->ep->desc.bEndpointAddress,
1748 (unsigned long long) xhci_trb_virt_to_dma(
1749 urb_priv->td[i].start_seg,
1750 urb_priv->td[i].first_trb));
1752 for (; i < urb_priv->num_tds; i++) {
1753 td = &urb_priv->td[i];
1754 /* TD can already be on cancelled list if ep halted on it */
1755 if (list_empty(&td->cancelled_td_list)) {
1756 td->cancel_status = TD_DIRTY;
1757 list_add_tail(&td->cancelled_td_list,
1758 &ep->cancelled_td_list);
1762 /* Queue a stop endpoint command, but only if this is
1763 * the first cancellation to be handled.
1765 if (!(ep->ep_state & EP_STOP_CMD_PENDING)) {
1766 command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
1771 ep->ep_state |= EP_STOP_CMD_PENDING;
1772 xhci_queue_stop_endpoint(xhci, command, urb->dev->slot_id,
1774 xhci_ring_cmd_db(xhci);
1777 spin_unlock_irqrestore(&xhci->lock, flags);
1782 xhci_urb_free_priv(urb_priv);
1783 usb_hcd_unlink_urb_from_ep(hcd, urb);
1784 spin_unlock_irqrestore(&xhci->lock, flags);
1785 usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN);
1789 /* Drop an endpoint from a new bandwidth configuration for this device.
1790 * Only one call to this function is allowed per endpoint before
1791 * check_bandwidth() or reset_bandwidth() must be called.
1792 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
1793 * add the endpoint to the schedule with possibly new parameters denoted by a
1794 * different endpoint descriptor in usb_host_endpoint.
1795 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
1798 * The USB core will not allow URBs to be queued to an endpoint that is being
1799 * disabled, so there's no need for mutual exclusion to protect
1800 * the xhci->devs[slot_id] structure.
1802 int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1803 struct usb_host_endpoint *ep)
1805 struct xhci_hcd *xhci;
1806 struct xhci_container_ctx *in_ctx, *out_ctx;
1807 struct xhci_input_control_ctx *ctrl_ctx;
1808 unsigned int ep_index;
1809 struct xhci_ep_ctx *ep_ctx;
1811 u32 new_add_flags, new_drop_flags;
1814 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
1817 xhci = hcd_to_xhci(hcd);
1818 if (xhci->xhc_state & XHCI_STATE_DYING)
1821 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
1822 drop_flag = xhci_get_endpoint_flag(&ep->desc);
1823 if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) {
1824 xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n",
1825 __func__, drop_flag);
1829 in_ctx = xhci->devs[udev->slot_id]->in_ctx;
1830 out_ctx = xhci->devs[udev->slot_id]->out_ctx;
1831 ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
1833 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1838 ep_index = xhci_get_endpoint_index(&ep->desc);
1839 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1840 /* If the HC already knows the endpoint is disabled,
1841 * or the HCD has noted it is disabled, ignore this request
1843 if ((GET_EP_CTX_STATE(ep_ctx) == EP_STATE_DISABLED) ||
1844 le32_to_cpu(ctrl_ctx->drop_flags) &
1845 xhci_get_endpoint_flag(&ep->desc)) {
1846 /* Do not warn when called after a usb_device_reset */
1847 if (xhci->devs[udev->slot_id]->eps[ep_index].ring != NULL)
1848 xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
1853 ctrl_ctx->drop_flags |= cpu_to_le32(drop_flag);
1854 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1856 ctrl_ctx->add_flags &= cpu_to_le32(~drop_flag);
1857 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1859 xhci_debugfs_remove_endpoint(xhci, xhci->devs[udev->slot_id], ep_index);
1861 xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
1863 xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
1864 (unsigned int) ep->desc.bEndpointAddress,
1866 (unsigned int) new_drop_flags,
1867 (unsigned int) new_add_flags);
1870 EXPORT_SYMBOL_GPL(xhci_drop_endpoint);
1872 /* Add an endpoint to a new possible bandwidth configuration for this device.
1873 * Only one call to this function is allowed per endpoint before
1874 * check_bandwidth() or reset_bandwidth() must be called.
1875 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
1876 * add the endpoint to the schedule with possibly new parameters denoted by a
1877 * different endpoint descriptor in usb_host_endpoint.
1878 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
1881 * The USB core will not allow URBs to be queued to an endpoint until the
1882 * configuration or alt setting is installed in the device, so there's no need
1883 * for mutual exclusion to protect the xhci->devs[slot_id] structure.
1885 int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1886 struct usb_host_endpoint *ep)
1888 struct xhci_hcd *xhci;
1889 struct xhci_container_ctx *in_ctx;
1890 unsigned int ep_index;
1891 struct xhci_input_control_ctx *ctrl_ctx;
1892 struct xhci_ep_ctx *ep_ctx;
1894 u32 new_add_flags, new_drop_flags;
1895 struct xhci_virt_device *virt_dev;
1898 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
1900 /* So we won't queue a reset ep command for a root hub */
1904 xhci = hcd_to_xhci(hcd);
1905 if (xhci->xhc_state & XHCI_STATE_DYING)
1908 added_ctxs = xhci_get_endpoint_flag(&ep->desc);
1909 if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) {
1910 /* FIXME when we have to issue an evaluate endpoint command to
1911 * deal with ep0 max packet size changing once we get the
1914 xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n",
1915 __func__, added_ctxs);
1919 virt_dev = xhci->devs[udev->slot_id];
1920 in_ctx = virt_dev->in_ctx;
1921 ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
1923 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1928 ep_index = xhci_get_endpoint_index(&ep->desc);
1929 /* If this endpoint is already in use, and the upper layers are trying
1930 * to add it again without dropping it, reject the addition.
1932 if (virt_dev->eps[ep_index].ring &&
1933 !(le32_to_cpu(ctrl_ctx->drop_flags) & added_ctxs)) {
1934 xhci_warn(xhci, "Trying to add endpoint 0x%x "
1935 "without dropping it.\n",
1936 (unsigned int) ep->desc.bEndpointAddress);
1940 /* If the HCD has already noted the endpoint is enabled,
1941 * ignore this request.
1943 if (le32_to_cpu(ctrl_ctx->add_flags) & added_ctxs) {
1944 xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
1950 * Configuration and alternate setting changes must be done in
1951 * process context, not interrupt context (or so documenation
1952 * for usb_set_interface() and usb_set_configuration() claim).
1954 if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) {
1955 dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n",
1956 __func__, ep->desc.bEndpointAddress);
1960 ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs);
1961 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1963 /* If xhci_endpoint_disable() was called for this endpoint, but the
1964 * xHC hasn't been notified yet through the check_bandwidth() call,
1965 * this re-adds a new state for the endpoint from the new endpoint
1966 * descriptors. We must drop and re-add this endpoint, so we leave the
1969 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1971 /* Store the usb_device pointer for later use */
1974 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
1975 trace_xhci_add_endpoint(ep_ctx);
1977 xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
1978 (unsigned int) ep->desc.bEndpointAddress,
1980 (unsigned int) new_drop_flags,
1981 (unsigned int) new_add_flags);
1984 EXPORT_SYMBOL_GPL(xhci_add_endpoint);
1986 static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev)
1988 struct xhci_input_control_ctx *ctrl_ctx;
1989 struct xhci_ep_ctx *ep_ctx;
1990 struct xhci_slot_ctx *slot_ctx;
1993 ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
1995 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2000 /* When a device's add flag and drop flag are zero, any subsequent
2001 * configure endpoint command will leave that endpoint's state
2002 * untouched. Make sure we don't leave any old state in the input
2003 * endpoint contexts.
2005 ctrl_ctx->drop_flags = 0;
2006 ctrl_ctx->add_flags = 0;
2007 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
2008 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
2009 /* Endpoint 0 is always valid */
2010 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1));
2011 for (i = 1; i < 31; i++) {
2012 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i);
2013 ep_ctx->ep_info = 0;
2014 ep_ctx->ep_info2 = 0;
2016 ep_ctx->tx_info = 0;
2020 static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
2021 struct usb_device *udev, u32 *cmd_status)
2025 switch (*cmd_status) {
2026 case COMP_COMMAND_ABORTED:
2027 case COMP_COMMAND_RING_STOPPED:
2028 xhci_warn(xhci, "Timeout while waiting for configure endpoint command\n");
2031 case COMP_RESOURCE_ERROR:
2032 dev_warn(&udev->dev,
2033 "Not enough host controller resources for new device state.\n");
2035 /* FIXME: can we allocate more resources for the HC? */
2037 case COMP_BANDWIDTH_ERROR:
2038 case COMP_SECONDARY_BANDWIDTH_ERROR:
2039 dev_warn(&udev->dev,
2040 "Not enough bandwidth for new device state.\n");
2042 /* FIXME: can we go back to the old state? */
2044 case COMP_TRB_ERROR:
2045 /* the HCD set up something wrong */
2046 dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, "
2048 "and endpoint is not disabled.\n");
2051 case COMP_INCOMPATIBLE_DEVICE_ERROR:
2052 dev_warn(&udev->dev,
2053 "ERROR: Incompatible device for endpoint configure command.\n");
2057 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
2058 "Successful Endpoint Configure command");
2062 xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n",
2070 static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
2071 struct usb_device *udev, u32 *cmd_status)
2075 switch (*cmd_status) {
2076 case COMP_COMMAND_ABORTED:
2077 case COMP_COMMAND_RING_STOPPED:
2078 xhci_warn(xhci, "Timeout while waiting for evaluate context command\n");
2081 case COMP_PARAMETER_ERROR:
2082 dev_warn(&udev->dev,
2083 "WARN: xHCI driver setup invalid evaluate context command.\n");
2086 case COMP_SLOT_NOT_ENABLED_ERROR:
2087 dev_warn(&udev->dev,
2088 "WARN: slot not enabled for evaluate context command.\n");
2091 case COMP_CONTEXT_STATE_ERROR:
2092 dev_warn(&udev->dev,
2093 "WARN: invalid context state for evaluate context command.\n");
2096 case COMP_INCOMPATIBLE_DEVICE_ERROR:
2097 dev_warn(&udev->dev,
2098 "ERROR: Incompatible device for evaluate context command.\n");
2101 case COMP_MAX_EXIT_LATENCY_TOO_LARGE_ERROR:
2102 /* Max Exit Latency too large error */
2103 dev_warn(&udev->dev, "WARN: Max Exit Latency too large\n");
2107 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
2108 "Successful evaluate context command");
2112 xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n",
2120 static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci,
2121 struct xhci_input_control_ctx *ctrl_ctx)
2123 u32 valid_add_flags;
2124 u32 valid_drop_flags;
2126 /* Ignore the slot flag (bit 0), and the default control endpoint flag
2127 * (bit 1). The default control endpoint is added during the Address
2128 * Device command and is never removed until the slot is disabled.
2130 valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2;
2131 valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2;
2133 /* Use hweight32 to count the number of ones in the add flags, or
2134 * number of endpoints added. Don't count endpoints that are changed
2135 * (both added and dropped).
2137 return hweight32(valid_add_flags) -
2138 hweight32(valid_add_flags & valid_drop_flags);
2141 static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci,
2142 struct xhci_input_control_ctx *ctrl_ctx)
2144 u32 valid_add_flags;
2145 u32 valid_drop_flags;
2147 valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2;
2148 valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2;
2150 return hweight32(valid_drop_flags) -
2151 hweight32(valid_add_flags & valid_drop_flags);
2155 * We need to reserve the new number of endpoints before the configure endpoint
2156 * command completes. We can't subtract the dropped endpoints from the number
2157 * of active endpoints until the command completes because we can oversubscribe
2158 * the host in this case:
2160 * - the first configure endpoint command drops more endpoints than it adds
2161 * - a second configure endpoint command that adds more endpoints is queued
2162 * - the first configure endpoint command fails, so the config is unchanged
2163 * - the second command may succeed, even though there isn't enough resources
2165 * Must be called with xhci->lock held.
2167 static int xhci_reserve_host_resources(struct xhci_hcd *xhci,
2168 struct xhci_input_control_ctx *ctrl_ctx)
2172 added_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx);
2173 if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) {
2174 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2175 "Not enough ep ctxs: "
2176 "%u active, need to add %u, limit is %u.",
2177 xhci->num_active_eps, added_eps,
2178 xhci->limit_active_eps);
2181 xhci->num_active_eps += added_eps;
2182 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2183 "Adding %u ep ctxs, %u now active.", added_eps,
2184 xhci->num_active_eps);
2189 * The configure endpoint was failed by the xHC for some other reason, so we
2190 * need to revert the resources that failed configuration would have used.
2192 * Must be called with xhci->lock held.
2194 static void xhci_free_host_resources(struct xhci_hcd *xhci,
2195 struct xhci_input_control_ctx *ctrl_ctx)
2199 num_failed_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx);
2200 xhci->num_active_eps -= num_failed_eps;
2201 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2202 "Removing %u failed ep ctxs, %u now active.",
2204 xhci->num_active_eps);
2208 * Now that the command has completed, clean up the active endpoint count by
2209 * subtracting out the endpoints that were dropped (but not changed).
2211 * Must be called with xhci->lock held.
2213 static void xhci_finish_resource_reservation(struct xhci_hcd *xhci,
2214 struct xhci_input_control_ctx *ctrl_ctx)
2216 u32 num_dropped_eps;
2218 num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, ctrl_ctx);
2219 xhci->num_active_eps -= num_dropped_eps;
2220 if (num_dropped_eps)
2221 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2222 "Removing %u dropped ep ctxs, %u now active.",
2224 xhci->num_active_eps);
2227 static unsigned int xhci_get_block_size(struct usb_device *udev)
2229 switch (udev->speed) {
2231 case USB_SPEED_FULL:
2233 case USB_SPEED_HIGH:
2235 case USB_SPEED_SUPER:
2236 case USB_SPEED_SUPER_PLUS:
2238 case USB_SPEED_UNKNOWN:
2240 /* Should never happen */
2246 xhci_get_largest_overhead(struct xhci_interval_bw *interval_bw)
2248 if (interval_bw->overhead[LS_OVERHEAD_TYPE])
2250 if (interval_bw->overhead[FS_OVERHEAD_TYPE])
2255 /* If we are changing a LS/FS device under a HS hub,
2256 * make sure (if we are activating a new TT) that the HS bus has enough
2257 * bandwidth for this new TT.
2259 static int xhci_check_tt_bw_table(struct xhci_hcd *xhci,
2260 struct xhci_virt_device *virt_dev,
2263 struct xhci_interval_bw_table *bw_table;
2264 struct xhci_tt_bw_info *tt_info;
2266 /* Find the bandwidth table for the root port this TT is attached to. */
2267 bw_table = &xhci->rh_bw[virt_dev->real_port - 1].bw_table;
2268 tt_info = virt_dev->tt_info;
2269 /* If this TT already had active endpoints, the bandwidth for this TT
2270 * has already been added. Removing all periodic endpoints (and thus
2271 * making the TT enactive) will only decrease the bandwidth used.
2275 if (old_active_eps == 0 && tt_info->active_eps != 0) {
2276 if (bw_table->bw_used + TT_HS_OVERHEAD > HS_BW_LIMIT)
2280 /* Not sure why we would have no new active endpoints...
2282 * Maybe because of an Evaluate Context change for a hub update or a
2283 * control endpoint 0 max packet size change?
2284 * FIXME: skip the bandwidth calculation in that case.
2289 static int xhci_check_ss_bw(struct xhci_hcd *xhci,
2290 struct xhci_virt_device *virt_dev)
2292 unsigned int bw_reserved;
2294 bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_IN, 100);
2295 if (virt_dev->bw_table->ss_bw_in > (SS_BW_LIMIT_IN - bw_reserved))
2298 bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_OUT, 100);
2299 if (virt_dev->bw_table->ss_bw_out > (SS_BW_LIMIT_OUT - bw_reserved))
2306 * This algorithm is a very conservative estimate of the worst-case scheduling
2307 * scenario for any one interval. The hardware dynamically schedules the
2308 * packets, so we can't tell which microframe could be the limiting factor in
2309 * the bandwidth scheduling. This only takes into account periodic endpoints.
2311 * Obviously, we can't solve an NP complete problem to find the minimum worst
2312 * case scenario. Instead, we come up with an estimate that is no less than
2313 * the worst case bandwidth used for any one microframe, but may be an
2316 * We walk the requirements for each endpoint by interval, starting with the
2317 * smallest interval, and place packets in the schedule where there is only one
2318 * possible way to schedule packets for that interval. In order to simplify
2319 * this algorithm, we record the largest max packet size for each interval, and
2320 * assume all packets will be that size.
2322 * For interval 0, we obviously must schedule all packets for each interval.
2323 * The bandwidth for interval 0 is just the amount of data to be transmitted
2324 * (the sum of all max ESIT payload sizes, plus any overhead per packet times
2325 * the number of packets).
2327 * For interval 1, we have two possible microframes to schedule those packets
2328 * in. For this algorithm, if we can schedule the same number of packets for
2329 * each possible scheduling opportunity (each microframe), we will do so. The
2330 * remaining number of packets will be saved to be transmitted in the gaps in
2331 * the next interval's scheduling sequence.
2333 * As we move those remaining packets to be scheduled with interval 2 packets,
2334 * we have to double the number of remaining packets to transmit. This is
2335 * because the intervals are actually powers of 2, and we would be transmitting
2336 * the previous interval's packets twice in this interval. We also have to be
2337 * sure that when we look at the largest max packet size for this interval, we
2338 * also look at the largest max packet size for the remaining packets and take
2339 * the greater of the two.
2341 * The algorithm continues to evenly distribute packets in each scheduling
2342 * opportunity, and push the remaining packets out, until we get to the last
2343 * interval. Then those packets and their associated overhead are just added
2344 * to the bandwidth used.
2346 static int xhci_check_bw_table(struct xhci_hcd *xhci,
2347 struct xhci_virt_device *virt_dev,
2350 unsigned int bw_reserved;
2351 unsigned int max_bandwidth;
2352 unsigned int bw_used;
2353 unsigned int block_size;
2354 struct xhci_interval_bw_table *bw_table;
2355 unsigned int packet_size = 0;
2356 unsigned int overhead = 0;
2357 unsigned int packets_transmitted = 0;
2358 unsigned int packets_remaining = 0;
2361 if (virt_dev->udev->speed >= USB_SPEED_SUPER)
2362 return xhci_check_ss_bw(xhci, virt_dev);
2364 if (virt_dev->udev->speed == USB_SPEED_HIGH) {
2365 max_bandwidth = HS_BW_LIMIT;
2366 /* Convert percent of bus BW reserved to blocks reserved */
2367 bw_reserved = DIV_ROUND_UP(HS_BW_RESERVED * max_bandwidth, 100);
2369 max_bandwidth = FS_BW_LIMIT;
2370 bw_reserved = DIV_ROUND_UP(FS_BW_RESERVED * max_bandwidth, 100);
2373 bw_table = virt_dev->bw_table;
2374 /* We need to translate the max packet size and max ESIT payloads into
2375 * the units the hardware uses.
2377 block_size = xhci_get_block_size(virt_dev->udev);
2379 /* If we are manipulating a LS/FS device under a HS hub, double check
2380 * that the HS bus has enough bandwidth if we are activing a new TT.
2382 if (virt_dev->tt_info) {
2383 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2384 "Recalculating BW for rootport %u",
2385 virt_dev->real_port);
2386 if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) {
2387 xhci_warn(xhci, "Not enough bandwidth on HS bus for "
2388 "newly activated TT.\n");
2391 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2392 "Recalculating BW for TT slot %u port %u",
2393 virt_dev->tt_info->slot_id,
2394 virt_dev->tt_info->ttport);
2396 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2397 "Recalculating BW for rootport %u",
2398 virt_dev->real_port);
2401 /* Add in how much bandwidth will be used for interval zero, or the
2402 * rounded max ESIT payload + number of packets * largest overhead.
2404 bw_used = DIV_ROUND_UP(bw_table->interval0_esit_payload, block_size) +
2405 bw_table->interval_bw[0].num_packets *
2406 xhci_get_largest_overhead(&bw_table->interval_bw[0]);
2408 for (i = 1; i < XHCI_MAX_INTERVAL; i++) {
2409 unsigned int bw_added;
2410 unsigned int largest_mps;
2411 unsigned int interval_overhead;
2414 * How many packets could we transmit in this interval?
2415 * If packets didn't fit in the previous interval, we will need
2416 * to transmit that many packets twice within this interval.
2418 packets_remaining = 2 * packets_remaining +
2419 bw_table->interval_bw[i].num_packets;
2421 /* Find the largest max packet size of this or the previous
2424 if (list_empty(&bw_table->interval_bw[i].endpoints))
2427 struct xhci_virt_ep *virt_ep;
2428 struct list_head *ep_entry;
2430 ep_entry = bw_table->interval_bw[i].endpoints.next;
2431 virt_ep = list_entry(ep_entry,
2432 struct xhci_virt_ep, bw_endpoint_list);
2433 /* Convert to blocks, rounding up */
2434 largest_mps = DIV_ROUND_UP(
2435 virt_ep->bw_info.max_packet_size,
2438 if (largest_mps > packet_size)
2439 packet_size = largest_mps;
2441 /* Use the larger overhead of this or the previous interval. */
2442 interval_overhead = xhci_get_largest_overhead(
2443 &bw_table->interval_bw[i]);
2444 if (interval_overhead > overhead)
2445 overhead = interval_overhead;
2447 /* How many packets can we evenly distribute across
2448 * (1 << (i + 1)) possible scheduling opportunities?
2450 packets_transmitted = packets_remaining >> (i + 1);
2452 /* Add in the bandwidth used for those scheduled packets */
2453 bw_added = packets_transmitted * (overhead + packet_size);
2455 /* How many packets do we have remaining to transmit? */
2456 packets_remaining = packets_remaining % (1 << (i + 1));
2458 /* What largest max packet size should those packets have? */
2459 /* If we've transmitted all packets, don't carry over the
2460 * largest packet size.
2462 if (packets_remaining == 0) {
2465 } else if (packets_transmitted > 0) {
2466 /* Otherwise if we do have remaining packets, and we've
2467 * scheduled some packets in this interval, take the
2468 * largest max packet size from endpoints with this
2471 packet_size = largest_mps;
2472 overhead = interval_overhead;
2474 /* Otherwise carry over packet_size and overhead from the last
2475 * time we had a remainder.
2477 bw_used += bw_added;
2478 if (bw_used > max_bandwidth) {
2479 xhci_warn(xhci, "Not enough bandwidth. "
2480 "Proposed: %u, Max: %u\n",
2481 bw_used, max_bandwidth);
2486 * Ok, we know we have some packets left over after even-handedly
2487 * scheduling interval 15. We don't know which microframes they will
2488 * fit into, so we over-schedule and say they will be scheduled every
2491 if (packets_remaining > 0)
2492 bw_used += overhead + packet_size;
2494 if (!virt_dev->tt_info && virt_dev->udev->speed == USB_SPEED_HIGH) {
2495 unsigned int port_index = virt_dev->real_port - 1;
2497 /* OK, we're manipulating a HS device attached to a
2498 * root port bandwidth domain. Include the number of active TTs
2499 * in the bandwidth used.
2501 bw_used += TT_HS_OVERHEAD *
2502 xhci->rh_bw[port_index].num_active_tts;
2505 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2506 "Final bandwidth: %u, Limit: %u, Reserved: %u, "
2507 "Available: %u " "percent",
2508 bw_used, max_bandwidth, bw_reserved,
2509 (max_bandwidth - bw_used - bw_reserved) * 100 /
2512 bw_used += bw_reserved;
2513 if (bw_used > max_bandwidth) {
2514 xhci_warn(xhci, "Not enough bandwidth. Proposed: %u, Max: %u\n",
2515 bw_used, max_bandwidth);
2519 bw_table->bw_used = bw_used;
2523 static bool xhci_is_async_ep(unsigned int ep_type)
2525 return (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP &&
2526 ep_type != ISOC_IN_EP &&
2527 ep_type != INT_IN_EP);
2530 static bool xhci_is_sync_in_ep(unsigned int ep_type)
2532 return (ep_type == ISOC_IN_EP || ep_type == INT_IN_EP);
2535 static unsigned int xhci_get_ss_bw_consumed(struct xhci_bw_info *ep_bw)
2537 unsigned int mps = DIV_ROUND_UP(ep_bw->max_packet_size, SS_BLOCK);
2539 if (ep_bw->ep_interval == 0)
2540 return SS_OVERHEAD_BURST +
2541 (ep_bw->mult * ep_bw->num_packets *
2542 (SS_OVERHEAD + mps));
2543 return DIV_ROUND_UP(ep_bw->mult * ep_bw->num_packets *
2544 (SS_OVERHEAD + mps + SS_OVERHEAD_BURST),
2545 1 << ep_bw->ep_interval);
2549 static void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci,
2550 struct xhci_bw_info *ep_bw,
2551 struct xhci_interval_bw_table *bw_table,
2552 struct usb_device *udev,
2553 struct xhci_virt_ep *virt_ep,
2554 struct xhci_tt_bw_info *tt_info)
2556 struct xhci_interval_bw *interval_bw;
2557 int normalized_interval;
2559 if (xhci_is_async_ep(ep_bw->type))
2562 if (udev->speed >= USB_SPEED_SUPER) {
2563 if (xhci_is_sync_in_ep(ep_bw->type))
2564 xhci->devs[udev->slot_id]->bw_table->ss_bw_in -=
2565 xhci_get_ss_bw_consumed(ep_bw);
2567 xhci->devs[udev->slot_id]->bw_table->ss_bw_out -=
2568 xhci_get_ss_bw_consumed(ep_bw);
2572 /* SuperSpeed endpoints never get added to intervals in the table, so
2573 * this check is only valid for HS/FS/LS devices.
2575 if (list_empty(&virt_ep->bw_endpoint_list))
2577 /* For LS/FS devices, we need to translate the interval expressed in
2578 * microframes to frames.
2580 if (udev->speed == USB_SPEED_HIGH)
2581 normalized_interval = ep_bw->ep_interval;
2583 normalized_interval = ep_bw->ep_interval - 3;
2585 if (normalized_interval == 0)
2586 bw_table->interval0_esit_payload -= ep_bw->max_esit_payload;
2587 interval_bw = &bw_table->interval_bw[normalized_interval];
2588 interval_bw->num_packets -= ep_bw->num_packets;
2589 switch (udev->speed) {
2591 interval_bw->overhead[LS_OVERHEAD_TYPE] -= 1;
2593 case USB_SPEED_FULL:
2594 interval_bw->overhead[FS_OVERHEAD_TYPE] -= 1;
2596 case USB_SPEED_HIGH:
2597 interval_bw->overhead[HS_OVERHEAD_TYPE] -= 1;
2600 /* Should never happen because only LS/FS/HS endpoints will get
2601 * added to the endpoint list.
2606 tt_info->active_eps -= 1;
2607 list_del_init(&virt_ep->bw_endpoint_list);
2610 static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci,
2611 struct xhci_bw_info *ep_bw,
2612 struct xhci_interval_bw_table *bw_table,
2613 struct usb_device *udev,
2614 struct xhci_virt_ep *virt_ep,
2615 struct xhci_tt_bw_info *tt_info)
2617 struct xhci_interval_bw *interval_bw;
2618 struct xhci_virt_ep *smaller_ep;
2619 int normalized_interval;
2621 if (xhci_is_async_ep(ep_bw->type))
2624 if (udev->speed == USB_SPEED_SUPER) {
2625 if (xhci_is_sync_in_ep(ep_bw->type))
2626 xhci->devs[udev->slot_id]->bw_table->ss_bw_in +=
2627 xhci_get_ss_bw_consumed(ep_bw);
2629 xhci->devs[udev->slot_id]->bw_table->ss_bw_out +=
2630 xhci_get_ss_bw_consumed(ep_bw);
2634 /* For LS/FS devices, we need to translate the interval expressed in
2635 * microframes to frames.
2637 if (udev->speed == USB_SPEED_HIGH)
2638 normalized_interval = ep_bw->ep_interval;
2640 normalized_interval = ep_bw->ep_interval - 3;
2642 if (normalized_interval == 0)
2643 bw_table->interval0_esit_payload += ep_bw->max_esit_payload;
2644 interval_bw = &bw_table->interval_bw[normalized_interval];
2645 interval_bw->num_packets += ep_bw->num_packets;
2646 switch (udev->speed) {
2648 interval_bw->overhead[LS_OVERHEAD_TYPE] += 1;
2650 case USB_SPEED_FULL:
2651 interval_bw->overhead[FS_OVERHEAD_TYPE] += 1;
2653 case USB_SPEED_HIGH:
2654 interval_bw->overhead[HS_OVERHEAD_TYPE] += 1;
2657 /* Should never happen because only LS/FS/HS endpoints will get
2658 * added to the endpoint list.
2664 tt_info->active_eps += 1;
2665 /* Insert the endpoint into the list, largest max packet size first. */
2666 list_for_each_entry(smaller_ep, &interval_bw->endpoints,
2668 if (ep_bw->max_packet_size >=
2669 smaller_ep->bw_info.max_packet_size) {
2670 /* Add the new ep before the smaller endpoint */
2671 list_add_tail(&virt_ep->bw_endpoint_list,
2672 &smaller_ep->bw_endpoint_list);
2676 /* Add the new endpoint at the end of the list. */
2677 list_add_tail(&virt_ep->bw_endpoint_list,
2678 &interval_bw->endpoints);
2681 void xhci_update_tt_active_eps(struct xhci_hcd *xhci,
2682 struct xhci_virt_device *virt_dev,
2685 struct xhci_root_port_bw_info *rh_bw_info;
2686 if (!virt_dev->tt_info)
2689 rh_bw_info = &xhci->rh_bw[virt_dev->real_port - 1];
2690 if (old_active_eps == 0 &&
2691 virt_dev->tt_info->active_eps != 0) {
2692 rh_bw_info->num_active_tts += 1;
2693 rh_bw_info->bw_table.bw_used += TT_HS_OVERHEAD;
2694 } else if (old_active_eps != 0 &&
2695 virt_dev->tt_info->active_eps == 0) {
2696 rh_bw_info->num_active_tts -= 1;
2697 rh_bw_info->bw_table.bw_used -= TT_HS_OVERHEAD;
2701 static int xhci_reserve_bandwidth(struct xhci_hcd *xhci,
2702 struct xhci_virt_device *virt_dev,
2703 struct xhci_container_ctx *in_ctx)
2705 struct xhci_bw_info ep_bw_info[31];
2707 struct xhci_input_control_ctx *ctrl_ctx;
2708 int old_active_eps = 0;
2710 if (virt_dev->tt_info)
2711 old_active_eps = virt_dev->tt_info->active_eps;
2713 ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
2715 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2720 for (i = 0; i < 31; i++) {
2721 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
2724 /* Make a copy of the BW info in case we need to revert this */
2725 memcpy(&ep_bw_info[i], &virt_dev->eps[i].bw_info,
2726 sizeof(ep_bw_info[i]));
2727 /* Drop the endpoint from the interval table if the endpoint is
2728 * being dropped or changed.
2730 if (EP_IS_DROPPED(ctrl_ctx, i))
2731 xhci_drop_ep_from_interval_table(xhci,
2732 &virt_dev->eps[i].bw_info,
2738 /* Overwrite the information stored in the endpoints' bw_info */
2739 xhci_update_bw_info(xhci, virt_dev->in_ctx, ctrl_ctx, virt_dev);
2740 for (i = 0; i < 31; i++) {
2741 /* Add any changed or added endpoints to the interval table */
2742 if (EP_IS_ADDED(ctrl_ctx, i))
2743 xhci_add_ep_to_interval_table(xhci,
2744 &virt_dev->eps[i].bw_info,
2751 if (!xhci_check_bw_table(xhci, virt_dev, old_active_eps)) {
2752 /* Ok, this fits in the bandwidth we have.
2753 * Update the number of active TTs.
2755 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
2759 /* We don't have enough bandwidth for this, revert the stored info. */
2760 for (i = 0; i < 31; i++) {
2761 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
2764 /* Drop the new copies of any added or changed endpoints from
2765 * the interval table.
2767 if (EP_IS_ADDED(ctrl_ctx, i)) {
2768 xhci_drop_ep_from_interval_table(xhci,
2769 &virt_dev->eps[i].bw_info,
2775 /* Revert the endpoint back to its old information */
2776 memcpy(&virt_dev->eps[i].bw_info, &ep_bw_info[i],
2777 sizeof(ep_bw_info[i]));
2778 /* Add any changed or dropped endpoints back into the table */
2779 if (EP_IS_DROPPED(ctrl_ctx, i))
2780 xhci_add_ep_to_interval_table(xhci,
2781 &virt_dev->eps[i].bw_info,
2791 /* Issue a configure endpoint command or evaluate context command
2792 * and wait for it to finish.
2794 static int xhci_configure_endpoint(struct xhci_hcd *xhci,
2795 struct usb_device *udev,
2796 struct xhci_command *command,
2797 bool ctx_change, bool must_succeed)
2800 unsigned long flags;
2801 struct xhci_input_control_ctx *ctrl_ctx;
2802 struct xhci_virt_device *virt_dev;
2803 struct xhci_slot_ctx *slot_ctx;
2808 spin_lock_irqsave(&xhci->lock, flags);
2810 if (xhci->xhc_state & XHCI_STATE_DYING) {
2811 spin_unlock_irqrestore(&xhci->lock, flags);
2815 virt_dev = xhci->devs[udev->slot_id];
2817 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
2819 spin_unlock_irqrestore(&xhci->lock, flags);
2820 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2825 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) &&
2826 xhci_reserve_host_resources(xhci, ctrl_ctx)) {
2827 spin_unlock_irqrestore(&xhci->lock, flags);
2828 xhci_warn(xhci, "Not enough host resources, "
2829 "active endpoint contexts = %u\n",
2830 xhci->num_active_eps);
2833 if ((xhci->quirks & XHCI_SW_BW_CHECKING) &&
2834 xhci_reserve_bandwidth(xhci, virt_dev, command->in_ctx)) {
2835 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
2836 xhci_free_host_resources(xhci, ctrl_ctx);
2837 spin_unlock_irqrestore(&xhci->lock, flags);
2838 xhci_warn(xhci, "Not enough bandwidth\n");
2842 slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx);
2844 trace_xhci_configure_endpoint_ctrl_ctx(ctrl_ctx);
2845 trace_xhci_configure_endpoint(slot_ctx);
2848 ret = xhci_queue_configure_endpoint(xhci, command,
2849 command->in_ctx->dma,
2850 udev->slot_id, must_succeed);
2852 ret = xhci_queue_evaluate_context(xhci, command,
2853 command->in_ctx->dma,
2854 udev->slot_id, must_succeed);
2856 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
2857 xhci_free_host_resources(xhci, ctrl_ctx);
2858 spin_unlock_irqrestore(&xhci->lock, flags);
2859 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
2860 "FIXME allocate a new ring segment");
2863 xhci_ring_cmd_db(xhci);
2864 spin_unlock_irqrestore(&xhci->lock, flags);
2866 /* Wait for the configure endpoint command to complete */
2867 wait_for_completion(command->completion);
2870 ret = xhci_configure_endpoint_result(xhci, udev,
2873 ret = xhci_evaluate_context_result(xhci, udev,
2876 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
2877 spin_lock_irqsave(&xhci->lock, flags);
2878 /* If the command failed, remove the reserved resources.
2879 * Otherwise, clean up the estimate to include dropped eps.
2882 xhci_free_host_resources(xhci, ctrl_ctx);
2884 xhci_finish_resource_reservation(xhci, ctrl_ctx);
2885 spin_unlock_irqrestore(&xhci->lock, flags);
2890 static void xhci_check_bw_drop_ep_streams(struct xhci_hcd *xhci,
2891 struct xhci_virt_device *vdev, int i)
2893 struct xhci_virt_ep *ep = &vdev->eps[i];
2895 if (ep->ep_state & EP_HAS_STREAMS) {
2896 xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on set_interface, freeing streams.\n",
2897 xhci_get_endpoint_address(i));
2898 xhci_free_stream_info(xhci, ep->stream_info);
2899 ep->stream_info = NULL;
2900 ep->ep_state &= ~EP_HAS_STREAMS;
2904 /* Called after one or more calls to xhci_add_endpoint() or
2905 * xhci_drop_endpoint(). If this call fails, the USB core is expected
2906 * to call xhci_reset_bandwidth().
2908 * Since we are in the middle of changing either configuration or
2909 * installing a new alt setting, the USB core won't allow URBs to be
2910 * enqueued for any endpoint on the old config or interface. Nothing
2911 * else should be touching the xhci->devs[slot_id] structure, so we
2912 * don't need to take the xhci->lock for manipulating that.
2914 int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
2918 struct xhci_hcd *xhci;
2919 struct xhci_virt_device *virt_dev;
2920 struct xhci_input_control_ctx *ctrl_ctx;
2921 struct xhci_slot_ctx *slot_ctx;
2922 struct xhci_command *command;
2924 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
2927 xhci = hcd_to_xhci(hcd);
2928 if ((xhci->xhc_state & XHCI_STATE_DYING) ||
2929 (xhci->xhc_state & XHCI_STATE_REMOVING))
2932 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
2933 virt_dev = xhci->devs[udev->slot_id];
2935 command = xhci_alloc_command(xhci, true, GFP_KERNEL);
2939 command->in_ctx = virt_dev->in_ctx;
2941 /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */
2942 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
2944 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2947 goto command_cleanup;
2949 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
2950 ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG);
2951 ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG));
2953 /* Don't issue the command if there's no endpoints to update. */
2954 if (ctrl_ctx->add_flags == cpu_to_le32(SLOT_FLAG) &&
2955 ctrl_ctx->drop_flags == 0) {
2957 goto command_cleanup;
2959 /* Fix up Context Entries field. Minimum value is EP0 == BIT(1). */
2960 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
2961 for (i = 31; i >= 1; i--) {
2962 __le32 le32 = cpu_to_le32(BIT(i));
2964 if ((virt_dev->eps[i-1].ring && !(ctrl_ctx->drop_flags & le32))
2965 || (ctrl_ctx->add_flags & le32) || i == 1) {
2966 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
2967 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(i));
2972 ret = xhci_configure_endpoint(xhci, udev, command,
2975 /* Callee should call reset_bandwidth() */
2976 goto command_cleanup;
2978 /* Free any rings that were dropped, but not changed. */
2979 for (i = 1; i < 31; i++) {
2980 if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) &&
2981 !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1)))) {
2982 xhci_free_endpoint_ring(xhci, virt_dev, i);
2983 xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
2986 xhci_zero_in_ctx(xhci, virt_dev);
2988 * Install any rings for completely new endpoints or changed endpoints,
2989 * and free any old rings from changed endpoints.
2991 for (i = 1; i < 31; i++) {
2992 if (!virt_dev->eps[i].new_ring)
2994 /* Only free the old ring if it exists.
2995 * It may not if this is the first add of an endpoint.
2997 if (virt_dev->eps[i].ring) {
2998 xhci_free_endpoint_ring(xhci, virt_dev, i);
3000 xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
3001 virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
3002 virt_dev->eps[i].new_ring = NULL;
3003 xhci_debugfs_create_endpoint(xhci, virt_dev, i);
3006 kfree(command->completion);
3011 EXPORT_SYMBOL_GPL(xhci_check_bandwidth);
3013 void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
3015 struct xhci_hcd *xhci;
3016 struct xhci_virt_device *virt_dev;
3019 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
3022 xhci = hcd_to_xhci(hcd);
3024 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
3025 virt_dev = xhci->devs[udev->slot_id];
3026 /* Free any rings allocated for added endpoints */
3027 for (i = 0; i < 31; i++) {
3028 if (virt_dev->eps[i].new_ring) {
3029 xhci_debugfs_remove_endpoint(xhci, virt_dev, i);
3030 xhci_ring_free(xhci, virt_dev->eps[i].new_ring);
3031 virt_dev->eps[i].new_ring = NULL;
3034 xhci_zero_in_ctx(xhci, virt_dev);
3036 EXPORT_SYMBOL_GPL(xhci_reset_bandwidth);
3038 static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
3039 struct xhci_container_ctx *in_ctx,
3040 struct xhci_container_ctx *out_ctx,
3041 struct xhci_input_control_ctx *ctrl_ctx,
3042 u32 add_flags, u32 drop_flags)
3044 ctrl_ctx->add_flags = cpu_to_le32(add_flags);
3045 ctrl_ctx->drop_flags = cpu_to_le32(drop_flags);
3046 xhci_slot_copy(xhci, in_ctx, out_ctx);
3047 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
3050 static void xhci_endpoint_disable(struct usb_hcd *hcd,
3051 struct usb_host_endpoint *host_ep)
3053 struct xhci_hcd *xhci;
3054 struct xhci_virt_device *vdev;
3055 struct xhci_virt_ep *ep;
3056 struct usb_device *udev;
3057 unsigned long flags;
3058 unsigned int ep_index;
3060 xhci = hcd_to_xhci(hcd);
3062 spin_lock_irqsave(&xhci->lock, flags);
3064 udev = (struct usb_device *)host_ep->hcpriv;
3065 if (!udev || !udev->slot_id)
3068 vdev = xhci->devs[udev->slot_id];
3072 ep_index = xhci_get_endpoint_index(&host_ep->desc);
3073 ep = &vdev->eps[ep_index];
3075 /* wait for hub_tt_work to finish clearing hub TT */
3076 if (ep->ep_state & EP_CLEARING_TT) {
3077 spin_unlock_irqrestore(&xhci->lock, flags);
3078 schedule_timeout_uninterruptible(1);
3083 xhci_dbg(xhci, "endpoint disable with ep_state 0x%x\n",
3086 host_ep->hcpriv = NULL;
3087 spin_unlock_irqrestore(&xhci->lock, flags);
3091 * Called after usb core issues a clear halt control message.
3092 * The host side of the halt should already be cleared by a reset endpoint
3093 * command issued when the STALL event was received.
3095 * The reset endpoint command may only be issued to endpoints in the halted
3096 * state. For software that wishes to reset the data toggle or sequence number
3097 * of an endpoint that isn't in the halted state this function will issue a
3098 * configure endpoint command with the Drop and Add bits set for the target
3099 * endpoint. Refer to the additional note in xhci spcification section 4.6.8.
3101 * vdev may be lost due to xHC restore error and re-initialization during S3/S4
3102 * resume. A new vdev will be allocated later by xhci_discover_or_reset_device()
3105 static void xhci_endpoint_reset(struct usb_hcd *hcd,
3106 struct usb_host_endpoint *host_ep)
3108 struct xhci_hcd *xhci;
3109 struct usb_device *udev;
3110 struct xhci_virt_device *vdev;
3111 struct xhci_virt_ep *ep;
3112 struct xhci_input_control_ctx *ctrl_ctx;
3113 struct xhci_command *stop_cmd, *cfg_cmd;
3114 unsigned int ep_index;
3115 unsigned long flags;
3119 xhci = hcd_to_xhci(hcd);
3120 ep_index = xhci_get_endpoint_index(&host_ep->desc);
3123 * Usb core assumes a max packet value for ep0 on FS devices until the
3124 * real value is read from the descriptor. Core resets Ep0 if values
3125 * mismatch. Reconfigure the xhci ep0 endpoint context here in that case
3127 if (usb_endpoint_xfer_control(&host_ep->desc) && ep_index == 0) {
3129 udev = container_of(host_ep, struct usb_device, ep0);
3130 if (udev->speed != USB_SPEED_FULL || !udev->slot_id)
3133 vdev = xhci->devs[udev->slot_id];
3134 if (!vdev || vdev->udev != udev)
3137 xhci_check_ep0_maxpacket(xhci, vdev);
3139 /* Nothing else should be done here for ep0 during ep reset */
3143 if (!host_ep->hcpriv)
3145 udev = (struct usb_device *) host_ep->hcpriv;
3146 vdev = xhci->devs[udev->slot_id];
3148 if (!udev->slot_id || !vdev)
3151 ep = &vdev->eps[ep_index];
3153 /* Bail out if toggle is already being cleared by a endpoint reset */
3154 spin_lock_irqsave(&xhci->lock, flags);
3155 if (ep->ep_state & EP_HARD_CLEAR_TOGGLE) {
3156 ep->ep_state &= ~EP_HARD_CLEAR_TOGGLE;
3157 spin_unlock_irqrestore(&xhci->lock, flags);
3160 spin_unlock_irqrestore(&xhci->lock, flags);
3161 /* Only interrupt and bulk ep's use data toggle, USB2 spec 5.5.4-> */
3162 if (usb_endpoint_xfer_control(&host_ep->desc) ||
3163 usb_endpoint_xfer_isoc(&host_ep->desc))
3166 ep_flag = xhci_get_endpoint_flag(&host_ep->desc);
3168 if (ep_flag == SLOT_FLAG || ep_flag == EP0_FLAG)
3171 stop_cmd = xhci_alloc_command(xhci, true, GFP_NOWAIT);
3175 cfg_cmd = xhci_alloc_command_with_ctx(xhci, true, GFP_NOWAIT);
3179 spin_lock_irqsave(&xhci->lock, flags);
3181 /* block queuing new trbs and ringing ep doorbell */
3182 ep->ep_state |= EP_SOFT_CLEAR_TOGGLE;
3185 * Make sure endpoint ring is empty before resetting the toggle/seq.
3186 * Driver is required to synchronously cancel all transfer request.
3187 * Stop the endpoint to force xHC to update the output context
3190 if (!list_empty(&ep->ring->td_list)) {
3191 dev_err(&udev->dev, "EP not empty, refuse reset\n");
3192 spin_unlock_irqrestore(&xhci->lock, flags);
3193 xhci_free_command(xhci, cfg_cmd);
3197 err = xhci_queue_stop_endpoint(xhci, stop_cmd, udev->slot_id,
3200 spin_unlock_irqrestore(&xhci->lock, flags);
3201 xhci_free_command(xhci, cfg_cmd);
3202 xhci_dbg(xhci, "%s: Failed to queue stop ep command, %d ",
3207 xhci_ring_cmd_db(xhci);
3208 spin_unlock_irqrestore(&xhci->lock, flags);
3210 wait_for_completion(stop_cmd->completion);
3212 spin_lock_irqsave(&xhci->lock, flags);
3214 /* config ep command clears toggle if add and drop ep flags are set */
3215 ctrl_ctx = xhci_get_input_control_ctx(cfg_cmd->in_ctx);
3217 spin_unlock_irqrestore(&xhci->lock, flags);
3218 xhci_free_command(xhci, cfg_cmd);
3219 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3224 xhci_setup_input_ctx_for_config_ep(xhci, cfg_cmd->in_ctx, vdev->out_ctx,
3225 ctrl_ctx, ep_flag, ep_flag);
3226 xhci_endpoint_copy(xhci, cfg_cmd->in_ctx, vdev->out_ctx, ep_index);
3228 err = xhci_queue_configure_endpoint(xhci, cfg_cmd, cfg_cmd->in_ctx->dma,
3229 udev->slot_id, false);
3231 spin_unlock_irqrestore(&xhci->lock, flags);
3232 xhci_free_command(xhci, cfg_cmd);
3233 xhci_dbg(xhci, "%s: Failed to queue config ep command, %d ",
3238 xhci_ring_cmd_db(xhci);
3239 spin_unlock_irqrestore(&xhci->lock, flags);
3241 wait_for_completion(cfg_cmd->completion);
3243 xhci_free_command(xhci, cfg_cmd);
3245 xhci_free_command(xhci, stop_cmd);
3246 spin_lock_irqsave(&xhci->lock, flags);
3247 if (ep->ep_state & EP_SOFT_CLEAR_TOGGLE)
3248 ep->ep_state &= ~EP_SOFT_CLEAR_TOGGLE;
3249 spin_unlock_irqrestore(&xhci->lock, flags);
3252 static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
3253 struct usb_device *udev, struct usb_host_endpoint *ep,
3254 unsigned int slot_id)
3257 unsigned int ep_index;
3258 unsigned int ep_state;
3262 ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__);
3264 return ret ? ret : -EINVAL;
3265 if (usb_ss_max_streams(&ep->ss_ep_comp) == 0) {
3266 xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion"
3267 " descriptor for ep 0x%x does not support streams\n",
3268 ep->desc.bEndpointAddress);
3272 ep_index = xhci_get_endpoint_index(&ep->desc);
3273 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
3274 if (ep_state & EP_HAS_STREAMS ||
3275 ep_state & EP_GETTING_STREAMS) {
3276 xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x "
3277 "already has streams set up.\n",
3278 ep->desc.bEndpointAddress);
3279 xhci_warn(xhci, "Send email to xHCI maintainer and ask for "
3280 "dynamic stream context array reallocation.\n");
3283 if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) {
3284 xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk "
3285 "endpoint 0x%x; URBs are pending.\n",
3286 ep->desc.bEndpointAddress);
3292 static void xhci_calculate_streams_entries(struct xhci_hcd *xhci,
3293 unsigned int *num_streams, unsigned int *num_stream_ctxs)
3295 unsigned int max_streams;
3297 /* The stream context array size must be a power of two */
3298 *num_stream_ctxs = roundup_pow_of_two(*num_streams);
3300 * Find out how many primary stream array entries the host controller
3301 * supports. Later we may use secondary stream arrays (similar to 2nd
3302 * level page entries), but that's an optional feature for xHCI host
3303 * controllers. xHCs must support at least 4 stream IDs.
3305 max_streams = HCC_MAX_PSA(xhci->hcc_params);
3306 if (*num_stream_ctxs > max_streams) {
3307 xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n",
3309 *num_stream_ctxs = max_streams;
3310 *num_streams = max_streams;
3314 /* Returns an error code if one of the endpoint already has streams.
3315 * This does not change any data structures, it only checks and gathers
3318 static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci,
3319 struct usb_device *udev,
3320 struct usb_host_endpoint **eps, unsigned int num_eps,
3321 unsigned int *num_streams, u32 *changed_ep_bitmask)
3323 unsigned int max_streams;
3324 unsigned int endpoint_flag;
3328 for (i = 0; i < num_eps; i++) {
3329 ret = xhci_check_streams_endpoint(xhci, udev,
3330 eps[i], udev->slot_id);
3334 max_streams = usb_ss_max_streams(&eps[i]->ss_ep_comp);
3335 if (max_streams < (*num_streams - 1)) {
3336 xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n",
3337 eps[i]->desc.bEndpointAddress,
3339 *num_streams = max_streams+1;
3342 endpoint_flag = xhci_get_endpoint_flag(&eps[i]->desc);
3343 if (*changed_ep_bitmask & endpoint_flag)
3345 *changed_ep_bitmask |= endpoint_flag;
3350 static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci,
3351 struct usb_device *udev,
3352 struct usb_host_endpoint **eps, unsigned int num_eps)
3354 u32 changed_ep_bitmask = 0;
3355 unsigned int slot_id;
3356 unsigned int ep_index;
3357 unsigned int ep_state;
3360 slot_id = udev->slot_id;
3361 if (!xhci->devs[slot_id])
3364 for (i = 0; i < num_eps; i++) {
3365 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3366 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
3367 /* Are streams already being freed for the endpoint? */
3368 if (ep_state & EP_GETTING_NO_STREAMS) {
3369 xhci_warn(xhci, "WARN Can't disable streams for "
3371 "streams are being disabled already\n",
3372 eps[i]->desc.bEndpointAddress);
3375 /* Are there actually any streams to free? */
3376 if (!(ep_state & EP_HAS_STREAMS) &&
3377 !(ep_state & EP_GETTING_STREAMS)) {
3378 xhci_warn(xhci, "WARN Can't disable streams for "
3380 "streams are already disabled!\n",
3381 eps[i]->desc.bEndpointAddress);
3382 xhci_warn(xhci, "WARN xhci_free_streams() called "
3383 "with non-streams endpoint\n");
3386 changed_ep_bitmask |= xhci_get_endpoint_flag(&eps[i]->desc);
3388 return changed_ep_bitmask;
3392 * The USB device drivers use this function (through the HCD interface in USB
3393 * core) to prepare a set of bulk endpoints to use streams. Streams are used to
3394 * coordinate mass storage command queueing across multiple endpoints (basically
3395 * a stream ID == a task ID).
3397 * Setting up streams involves allocating the same size stream context array
3398 * for each endpoint and issuing a configure endpoint command for all endpoints.
3400 * Don't allow the call to succeed if one endpoint only supports one stream
3401 * (which means it doesn't support streams at all).
3403 * Drivers may get less stream IDs than they asked for, if the host controller
3404 * hardware or endpoints claim they can't support the number of requested
3407 static int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
3408 struct usb_host_endpoint **eps, unsigned int num_eps,
3409 unsigned int num_streams, gfp_t mem_flags)
3412 struct xhci_hcd *xhci;
3413 struct xhci_virt_device *vdev;
3414 struct xhci_command *config_cmd;
3415 struct xhci_input_control_ctx *ctrl_ctx;
3416 unsigned int ep_index;
3417 unsigned int num_stream_ctxs;
3418 unsigned int max_packet;
3419 unsigned long flags;
3420 u32 changed_ep_bitmask = 0;
3425 /* Add one to the number of streams requested to account for
3426 * stream 0 that is reserved for xHCI usage.
3429 xhci = hcd_to_xhci(hcd);
3430 xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n",
3433 /* MaxPSASize value 0 (2 streams) means streams are not supported */
3434 if ((xhci->quirks & XHCI_BROKEN_STREAMS) ||
3435 HCC_MAX_PSA(xhci->hcc_params) < 4) {
3436 xhci_dbg(xhci, "xHCI controller does not support streams.\n");
3440 config_cmd = xhci_alloc_command_with_ctx(xhci, true, mem_flags);
3444 ctrl_ctx = xhci_get_input_control_ctx(config_cmd->in_ctx);
3446 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3448 xhci_free_command(xhci, config_cmd);
3452 /* Check to make sure all endpoints are not already configured for
3453 * streams. While we're at it, find the maximum number of streams that
3454 * all the endpoints will support and check for duplicate endpoints.
3456 spin_lock_irqsave(&xhci->lock, flags);
3457 ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps,
3458 num_eps, &num_streams, &changed_ep_bitmask);
3460 xhci_free_command(xhci, config_cmd);
3461 spin_unlock_irqrestore(&xhci->lock, flags);
3464 if (num_streams <= 1) {
3465 xhci_warn(xhci, "WARN: endpoints can't handle "
3466 "more than one stream.\n");
3467 xhci_free_command(xhci, config_cmd);
3468 spin_unlock_irqrestore(&xhci->lock, flags);
3471 vdev = xhci->devs[udev->slot_id];
3472 /* Mark each endpoint as being in transition, so
3473 * xhci_urb_enqueue() will reject all URBs.
3475 for (i = 0; i < num_eps; i++) {
3476 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3477 vdev->eps[ep_index].ep_state |= EP_GETTING_STREAMS;
3479 spin_unlock_irqrestore(&xhci->lock, flags);
3481 /* Setup internal data structures and allocate HW data structures for
3482 * streams (but don't install the HW structures in the input context
3483 * until we're sure all memory allocation succeeded).
3485 xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs);
3486 xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n",
3487 num_stream_ctxs, num_streams);
3489 for (i = 0; i < num_eps; i++) {
3490 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3491 max_packet = usb_endpoint_maxp(&eps[i]->desc);
3492 vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci,
3495 max_packet, mem_flags);
3496 if (!vdev->eps[ep_index].stream_info)
3498 /* Set maxPstreams in endpoint context and update deq ptr to
3499 * point to stream context array. FIXME
3503 /* Set up the input context for a configure endpoint command. */
3504 for (i = 0; i < num_eps; i++) {
3505 struct xhci_ep_ctx *ep_ctx;
3507 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3508 ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index);
3510 xhci_endpoint_copy(xhci, config_cmd->in_ctx,
3511 vdev->out_ctx, ep_index);
3512 xhci_setup_streams_ep_input_ctx(xhci, ep_ctx,
3513 vdev->eps[ep_index].stream_info);
3515 /* Tell the HW to drop its old copy of the endpoint context info
3516 * and add the updated copy from the input context.
3518 xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx,
3519 vdev->out_ctx, ctrl_ctx,
3520 changed_ep_bitmask, changed_ep_bitmask);
3522 /* Issue and wait for the configure endpoint command */
3523 ret = xhci_configure_endpoint(xhci, udev, config_cmd,
3526 /* xHC rejected the configure endpoint command for some reason, so we
3527 * leave the old ring intact and free our internal streams data
3533 spin_lock_irqsave(&xhci->lock, flags);
3534 for (i = 0; i < num_eps; i++) {
3535 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3536 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
3537 xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n",
3538 udev->slot_id, ep_index);
3539 vdev->eps[ep_index].ep_state |= EP_HAS_STREAMS;
3541 xhci_free_command(xhci, config_cmd);
3542 spin_unlock_irqrestore(&xhci->lock, flags);
3544 for (i = 0; i < num_eps; i++) {
3545 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3546 xhci_debugfs_create_stream_files(xhci, vdev, ep_index);
3548 /* Subtract 1 for stream 0, which drivers can't use */
3549 return num_streams - 1;
3552 /* If it didn't work, free the streams! */
3553 for (i = 0; i < num_eps; i++) {
3554 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3555 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
3556 vdev->eps[ep_index].stream_info = NULL;
3557 /* FIXME Unset maxPstreams in endpoint context and
3558 * update deq ptr to point to normal string ring.
3560 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
3561 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
3562 xhci_endpoint_zero(xhci, vdev, eps[i]);
3564 xhci_free_command(xhci, config_cmd);
3568 /* Transition the endpoint from using streams to being a "normal" endpoint
3571 * Modify the endpoint context state, submit a configure endpoint command,
3572 * and free all endpoint rings for streams if that completes successfully.
3574 static int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
3575 struct usb_host_endpoint **eps, unsigned int num_eps,
3579 struct xhci_hcd *xhci;
3580 struct xhci_virt_device *vdev;
3581 struct xhci_command *command;
3582 struct xhci_input_control_ctx *ctrl_ctx;
3583 unsigned int ep_index;
3584 unsigned long flags;
3585 u32 changed_ep_bitmask;
3587 xhci = hcd_to_xhci(hcd);
3588 vdev = xhci->devs[udev->slot_id];
3590 /* Set up a configure endpoint command to remove the streams rings */
3591 spin_lock_irqsave(&xhci->lock, flags);
3592 changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci,
3593 udev, eps, num_eps);
3594 if (changed_ep_bitmask == 0) {
3595 spin_unlock_irqrestore(&xhci->lock, flags);
3599 /* Use the xhci_command structure from the first endpoint. We may have
3600 * allocated too many, but the driver may call xhci_free_streams() for
3601 * each endpoint it grouped into one call to xhci_alloc_streams().
3603 ep_index = xhci_get_endpoint_index(&eps[0]->desc);
3604 command = vdev->eps[ep_index].stream_info->free_streams_command;
3605 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
3607 spin_unlock_irqrestore(&xhci->lock, flags);
3608 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3613 for (i = 0; i < num_eps; i++) {
3614 struct xhci_ep_ctx *ep_ctx;
3616 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3617 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
3618 xhci->devs[udev->slot_id]->eps[ep_index].ep_state |=
3619 EP_GETTING_NO_STREAMS;
3621 xhci_endpoint_copy(xhci, command->in_ctx,
3622 vdev->out_ctx, ep_index);
3623 xhci_setup_no_streams_ep_input_ctx(ep_ctx,
3624 &vdev->eps[ep_index]);
3626 xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx,
3627 vdev->out_ctx, ctrl_ctx,
3628 changed_ep_bitmask, changed_ep_bitmask);
3629 spin_unlock_irqrestore(&xhci->lock, flags);
3631 /* Issue and wait for the configure endpoint command,
3632 * which must succeed.
3634 ret = xhci_configure_endpoint(xhci, udev, command,
3637 /* xHC rejected the configure endpoint command for some reason, so we
3638 * leave the streams rings intact.
3643 spin_lock_irqsave(&xhci->lock, flags);
3644 for (i = 0; i < num_eps; i++) {
3645 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3646 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
3647 vdev->eps[ep_index].stream_info = NULL;
3648 /* FIXME Unset maxPstreams in endpoint context and
3649 * update deq ptr to point to normal string ring.
3651 vdev->eps[ep_index].ep_state &= ~EP_GETTING_NO_STREAMS;
3652 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
3654 spin_unlock_irqrestore(&xhci->lock, flags);
3660 * Deletes endpoint resources for endpoints that were active before a Reset
3661 * Device command, or a Disable Slot command. The Reset Device command leaves
3662 * the control endpoint intact, whereas the Disable Slot command deletes it.
3664 * Must be called with xhci->lock held.
3666 void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
3667 struct xhci_virt_device *virt_dev, bool drop_control_ep)
3670 unsigned int num_dropped_eps = 0;
3671 unsigned int drop_flags = 0;
3673 for (i = (drop_control_ep ? 0 : 1); i < 31; i++) {
3674 if (virt_dev->eps[i].ring) {
3675 drop_flags |= 1 << i;
3679 xhci->num_active_eps -= num_dropped_eps;
3680 if (num_dropped_eps)
3681 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3682 "Dropped %u ep ctxs, flags = 0x%x, "
3684 num_dropped_eps, drop_flags,
3685 xhci->num_active_eps);
3689 * This submits a Reset Device Command, which will set the device state to 0,
3690 * set the device address to 0, and disable all the endpoints except the default
3691 * control endpoint. The USB core should come back and call
3692 * xhci_address_device(), and then re-set up the configuration. If this is
3693 * called because of a usb_reset_and_verify_device(), then the old alternate
3694 * settings will be re-installed through the normal bandwidth allocation
3697 * Wait for the Reset Device command to finish. Remove all structures
3698 * associated with the endpoints that were disabled. Clear the input device
3699 * structure? Reset the control endpoint 0 max packet size?
3701 * If the virt_dev to be reset does not exist or does not match the udev,
3702 * it means the device is lost, possibly due to the xHC restore error and
3703 * re-initialization during S3/S4. In this case, call xhci_alloc_dev() to
3704 * re-allocate the device.
3706 static int xhci_discover_or_reset_device(struct usb_hcd *hcd,
3707 struct usb_device *udev)
3710 unsigned long flags;
3711 struct xhci_hcd *xhci;
3712 unsigned int slot_id;
3713 struct xhci_virt_device *virt_dev;
3714 struct xhci_command *reset_device_cmd;
3715 struct xhci_slot_ctx *slot_ctx;
3716 int old_active_eps = 0;
3718 ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__);
3721 xhci = hcd_to_xhci(hcd);
3722 slot_id = udev->slot_id;
3723 virt_dev = xhci->devs[slot_id];
3725 xhci_dbg(xhci, "The device to be reset with slot ID %u does "
3726 "not exist. Re-allocate the device\n", slot_id);
3727 ret = xhci_alloc_dev(hcd, udev);
3734 if (virt_dev->tt_info)
3735 old_active_eps = virt_dev->tt_info->active_eps;
3737 if (virt_dev->udev != udev) {
3738 /* If the virt_dev and the udev does not match, this virt_dev
3739 * may belong to another udev.
3740 * Re-allocate the device.
3742 xhci_dbg(xhci, "The device to be reset with slot ID %u does "
3743 "not match the udev. Re-allocate the device\n",
3745 ret = xhci_alloc_dev(hcd, udev);
3752 /* If device is not setup, there is no point in resetting it */
3753 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3754 if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
3755 SLOT_STATE_DISABLED)
3758 trace_xhci_discover_or_reset_device(slot_ctx);
3760 xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id);
3761 /* Allocate the command structure that holds the struct completion.
3762 * Assume we're in process context, since the normal device reset
3763 * process has to wait for the device anyway. Storage devices are
3764 * reset as part of error handling, so use GFP_NOIO instead of
3767 reset_device_cmd = xhci_alloc_command(xhci, true, GFP_NOIO);
3768 if (!reset_device_cmd) {
3769 xhci_dbg(xhci, "Couldn't allocate command structure.\n");
3773 /* Attempt to submit the Reset Device command to the command ring */
3774 spin_lock_irqsave(&xhci->lock, flags);
3776 ret = xhci_queue_reset_device(xhci, reset_device_cmd, slot_id);
3778 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3779 spin_unlock_irqrestore(&xhci->lock, flags);
3780 goto command_cleanup;
3782 xhci_ring_cmd_db(xhci);
3783 spin_unlock_irqrestore(&xhci->lock, flags);
3785 /* Wait for the Reset Device command to finish */
3786 wait_for_completion(reset_device_cmd->completion);
3788 /* The Reset Device command can't fail, according to the 0.95/0.96 spec,
3789 * unless we tried to reset a slot ID that wasn't enabled,
3790 * or the device wasn't in the addressed or configured state.
3792 ret = reset_device_cmd->status;
3794 case COMP_COMMAND_ABORTED:
3795 case COMP_COMMAND_RING_STOPPED:
3796 xhci_warn(xhci, "Timeout waiting for reset device command\n");
3798 goto command_cleanup;
3799 case COMP_SLOT_NOT_ENABLED_ERROR: /* 0.95 completion for bad slot ID */
3800 case COMP_CONTEXT_STATE_ERROR: /* 0.96 completion code for same thing */
3801 xhci_dbg(xhci, "Can't reset device (slot ID %u) in %s state\n",
3803 xhci_get_slot_state(xhci, virt_dev->out_ctx));
3804 xhci_dbg(xhci, "Not freeing device rings.\n");
3805 /* Don't treat this as an error. May change my mind later. */
3807 goto command_cleanup;
3809 xhci_dbg(xhci, "Successful reset device command.\n");
3812 if (xhci_is_vendor_info_code(xhci, ret))
3814 xhci_warn(xhci, "Unknown completion code %u for "
3815 "reset device command.\n", ret);
3817 goto command_cleanup;
3820 /* Free up host controller endpoint resources */
3821 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
3822 spin_lock_irqsave(&xhci->lock, flags);
3823 /* Don't delete the default control endpoint resources */
3824 xhci_free_device_endpoint_resources(xhci, virt_dev, false);
3825 spin_unlock_irqrestore(&xhci->lock, flags);
3828 /* Everything but endpoint 0 is disabled, so free the rings. */
3829 for (i = 1; i < 31; i++) {
3830 struct xhci_virt_ep *ep = &virt_dev->eps[i];
3832 if (ep->ep_state & EP_HAS_STREAMS) {
3833 xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on device reset, freeing streams.\n",
3834 xhci_get_endpoint_address(i));
3835 xhci_free_stream_info(xhci, ep->stream_info);
3836 ep->stream_info = NULL;
3837 ep->ep_state &= ~EP_HAS_STREAMS;
3841 xhci_debugfs_remove_endpoint(xhci, virt_dev, i);
3842 xhci_free_endpoint_ring(xhci, virt_dev, i);
3844 if (!list_empty(&virt_dev->eps[i].bw_endpoint_list))
3845 xhci_drop_ep_from_interval_table(xhci,
3846 &virt_dev->eps[i].bw_info,
3851 xhci_clear_endpoint_bw_info(&virt_dev->eps[i].bw_info);
3853 /* If necessary, update the number of active TTs on this root port */
3854 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
3855 virt_dev->flags = 0;
3859 xhci_free_command(xhci, reset_device_cmd);
3864 * At this point, the struct usb_device is about to go away, the device has
3865 * disconnected, and all traffic has been stopped and the endpoints have been
3866 * disabled. Free any HC data structures associated with that device.
3868 static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
3870 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3871 struct xhci_virt_device *virt_dev;
3872 struct xhci_slot_ctx *slot_ctx;
3873 unsigned long flags;
3877 * We called pm_runtime_get_noresume when the device was attached.
3878 * Decrement the counter here to allow controller to runtime suspend
3879 * if no devices remain.
3881 if (xhci->quirks & XHCI_RESET_ON_RESUME)
3882 pm_runtime_put_noidle(hcd->self.controller);
3884 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
3885 /* If the host is halted due to driver unload, we still need to free the
3888 if (ret <= 0 && ret != -ENODEV)
3891 virt_dev = xhci->devs[udev->slot_id];
3892 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3893 trace_xhci_free_dev(slot_ctx);
3895 /* Stop any wayward timer functions (which may grab the lock) */
3896 for (i = 0; i < 31; i++)
3897 virt_dev->eps[i].ep_state &= ~EP_STOP_CMD_PENDING;
3898 virt_dev->udev = NULL;
3899 xhci_disable_slot(xhci, udev->slot_id);
3901 spin_lock_irqsave(&xhci->lock, flags);
3902 xhci_free_virt_device(xhci, udev->slot_id);
3903 spin_unlock_irqrestore(&xhci->lock, flags);
3907 int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id)
3909 struct xhci_command *command;
3910 unsigned long flags;
3914 command = xhci_alloc_command(xhci, true, GFP_KERNEL);
3918 xhci_debugfs_remove_slot(xhci, slot_id);
3920 spin_lock_irqsave(&xhci->lock, flags);
3921 /* Don't disable the slot if the host controller is dead. */
3922 state = readl(&xhci->op_regs->status);
3923 if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
3924 (xhci->xhc_state & XHCI_STATE_HALTED)) {
3925 spin_unlock_irqrestore(&xhci->lock, flags);
3930 ret = xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT,
3933 spin_unlock_irqrestore(&xhci->lock, flags);
3937 xhci_ring_cmd_db(xhci);
3938 spin_unlock_irqrestore(&xhci->lock, flags);
3940 wait_for_completion(command->completion);
3942 if (command->status != COMP_SUCCESS)
3943 xhci_warn(xhci, "Unsuccessful disable slot %u command, status %d\n",
3944 slot_id, command->status);
3946 xhci_free_command(xhci, command);
3952 * Checks if we have enough host controller resources for the default control
3955 * Must be called with xhci->lock held.
3957 static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci)
3959 if (xhci->num_active_eps + 1 > xhci->limit_active_eps) {
3960 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3961 "Not enough ep ctxs: "
3962 "%u active, need to add 1, limit is %u.",
3963 xhci->num_active_eps, xhci->limit_active_eps);
3966 xhci->num_active_eps += 1;
3967 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3968 "Adding 1 ep ctx, %u now active.",
3969 xhci->num_active_eps);
3975 * Returns 0 if the xHC ran out of device slots, the Enable Slot command
3976 * timed out, or allocating memory failed. Returns 1 on success.
3978 int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
3980 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3981 struct xhci_virt_device *vdev;
3982 struct xhci_slot_ctx *slot_ctx;
3983 unsigned long flags;
3985 struct xhci_command *command;
3987 command = xhci_alloc_command(xhci, true, GFP_KERNEL);
3991 spin_lock_irqsave(&xhci->lock, flags);
3992 ret = xhci_queue_slot_control(xhci, command, TRB_ENABLE_SLOT, 0);
3994 spin_unlock_irqrestore(&xhci->lock, flags);
3995 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3996 xhci_free_command(xhci, command);
3999 xhci_ring_cmd_db(xhci);
4000 spin_unlock_irqrestore(&xhci->lock, flags);
4002 wait_for_completion(command->completion);
4003 slot_id = command->slot_id;
4005 if (!slot_id || command->status != COMP_SUCCESS) {
4006 xhci_err(xhci, "Error while assigning device slot ID: %s\n",
4007 xhci_trb_comp_code_string(command->status));
4008 xhci_err(xhci, "Max number of devices this xHCI host supports is %u.\n",
4010 readl(&xhci->cap_regs->hcs_params1)));
4011 xhci_free_command(xhci, command);
4015 xhci_free_command(xhci, command);
4017 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
4018 spin_lock_irqsave(&xhci->lock, flags);
4019 ret = xhci_reserve_host_control_ep_resources(xhci);
4021 spin_unlock_irqrestore(&xhci->lock, flags);
4022 xhci_warn(xhci, "Not enough host resources, "
4023 "active endpoint contexts = %u\n",
4024 xhci->num_active_eps);
4027 spin_unlock_irqrestore(&xhci->lock, flags);
4029 /* Use GFP_NOIO, since this function can be called from
4030 * xhci_discover_or_reset_device(), which may be called as part of
4031 * mass storage driver error handling.
4033 if (!xhci_alloc_virt_device(xhci, slot_id, udev, GFP_NOIO)) {
4034 xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
4037 vdev = xhci->devs[slot_id];
4038 slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx);
4039 trace_xhci_alloc_dev(slot_ctx);
4041 udev->slot_id = slot_id;
4043 xhci_debugfs_create_slot(xhci, slot_id);
4046 * If resetting upon resume, we can't put the controller into runtime
4047 * suspend if there is a device attached.
4049 if (xhci->quirks & XHCI_RESET_ON_RESUME)
4050 pm_runtime_get_noresume(hcd->self.controller);
4052 /* Is this a LS or FS device under a HS hub? */
4053 /* Hub or peripherial? */
4057 xhci_disable_slot(xhci, udev->slot_id);
4058 xhci_free_virt_device(xhci, udev->slot_id);
4064 * xhci_setup_device - issues an Address Device command to assign a unique
4066 * @hcd: USB host controller data structure.
4067 * @udev: USB dev structure representing the connected device.
4068 * @setup: Enum specifying setup mode: address only or with context.
4069 * @timeout_ms: Max wait time (ms) for the command operation to complete.
4071 * Return: 0 if successful; otherwise, negative error code.
4073 static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
4074 enum xhci_setup_dev setup, unsigned int timeout_ms)
4076 const char *act = setup == SETUP_CONTEXT_ONLY ? "context" : "address";
4077 unsigned long flags;
4078 struct xhci_virt_device *virt_dev;
4080 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4081 struct xhci_slot_ctx *slot_ctx;
4082 struct xhci_input_control_ctx *ctrl_ctx;
4084 struct xhci_command *command = NULL;
4086 mutex_lock(&xhci->mutex);
4088 if (xhci->xhc_state) { /* dying, removing or halted */
4093 if (!udev->slot_id) {
4094 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4095 "Bad Slot ID %d", udev->slot_id);
4100 virt_dev = xhci->devs[udev->slot_id];
4102 if (WARN_ON(!virt_dev)) {
4104 * In plug/unplug torture test with an NEC controller,
4105 * a zero-dereference was observed once due to virt_dev = 0.
4106 * Print useful debug rather than crash if it is observed again!
4108 xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n",
4113 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
4114 trace_xhci_setup_device_slot(slot_ctx);
4116 if (setup == SETUP_CONTEXT_ONLY) {
4117 if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
4118 SLOT_STATE_DEFAULT) {
4119 xhci_dbg(xhci, "Slot already in default state\n");
4124 command = xhci_alloc_command(xhci, true, GFP_KERNEL);
4130 command->in_ctx = virt_dev->in_ctx;
4131 command->timeout_ms = timeout_ms;
4133 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
4134 ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
4136 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
4142 * If this is the first Set Address since device plug-in or
4143 * virt_device realloaction after a resume with an xHCI power loss,
4144 * then set up the slot context.
4146 if (!slot_ctx->dev_info)
4147 xhci_setup_addressable_virt_dev(xhci, udev);
4148 /* Otherwise, update the control endpoint ring enqueue pointer. */
4150 xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev);
4151 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG);
4152 ctrl_ctx->drop_flags = 0;
4154 trace_xhci_address_ctx(xhci, virt_dev->in_ctx,
4155 le32_to_cpu(slot_ctx->dev_info) >> 27);
4157 trace_xhci_address_ctrl_ctx(ctrl_ctx);
4158 spin_lock_irqsave(&xhci->lock, flags);
4159 trace_xhci_setup_device(virt_dev);
4160 ret = xhci_queue_address_device(xhci, command, virt_dev->in_ctx->dma,
4161 udev->slot_id, setup);
4163 spin_unlock_irqrestore(&xhci->lock, flags);
4164 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4165 "FIXME: allocate a command ring segment");
4168 xhci_ring_cmd_db(xhci);
4169 spin_unlock_irqrestore(&xhci->lock, flags);
4171 /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */
4172 wait_for_completion(command->completion);
4174 /* FIXME: From section 4.3.4: "Software shall be responsible for timing
4175 * the SetAddress() "recovery interval" required by USB and aborting the
4176 * command on a timeout.
4178 switch (command->status) {
4179 case COMP_COMMAND_ABORTED:
4180 case COMP_COMMAND_RING_STOPPED:
4181 xhci_warn(xhci, "Timeout while waiting for setup device command\n");
4184 case COMP_CONTEXT_STATE_ERROR:
4185 case COMP_SLOT_NOT_ENABLED_ERROR:
4186 xhci_err(xhci, "Setup ERROR: setup %s command for slot %d.\n",
4187 act, udev->slot_id);
4190 case COMP_USB_TRANSACTION_ERROR:
4191 dev_warn(&udev->dev, "Device not responding to setup %s.\n", act);
4193 mutex_unlock(&xhci->mutex);
4194 ret = xhci_disable_slot(xhci, udev->slot_id);
4195 xhci_free_virt_device(xhci, udev->slot_id);
4197 xhci_alloc_dev(hcd, udev);
4198 kfree(command->completion);
4201 case COMP_INCOMPATIBLE_DEVICE_ERROR:
4202 dev_warn(&udev->dev,
4203 "ERROR: Incompatible device for setup %s command\n", act);
4207 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4208 "Successful setup %s command", act);
4212 "ERROR: unexpected setup %s command completion code 0x%x.\n",
4213 act, command->status);
4214 trace_xhci_address_ctx(xhci, virt_dev->out_ctx, 1);
4220 temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
4221 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4222 "Op regs DCBAA ptr = %#016llx", temp_64);
4223 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4224 "Slot ID %d dcbaa entry @%p = %#016llx",
4226 &xhci->dcbaa->dev_context_ptrs[udev->slot_id],
4227 (unsigned long long)
4228 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id]));
4229 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4230 "Output Context DMA address = %#08llx",
4231 (unsigned long long)virt_dev->out_ctx->dma);
4232 trace_xhci_address_ctx(xhci, virt_dev->in_ctx,
4233 le32_to_cpu(slot_ctx->dev_info) >> 27);
4235 * USB core uses address 1 for the roothubs, so we add one to the
4236 * address given back to us by the HC.
4238 trace_xhci_address_ctx(xhci, virt_dev->out_ctx,
4239 le32_to_cpu(slot_ctx->dev_info) >> 27);
4240 /* Zero the input context control for later use */
4241 ctrl_ctx->add_flags = 0;
4242 ctrl_ctx->drop_flags = 0;
4243 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
4244 udev->devaddr = (u8)(le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK);
4246 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4247 "Internal device address = %d",
4248 le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK);
4250 mutex_unlock(&xhci->mutex);
4252 kfree(command->completion);
4258 static int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev,
4259 unsigned int timeout_ms)
4261 return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ADDRESS, timeout_ms);
4264 static int xhci_enable_device(struct usb_hcd *hcd, struct usb_device *udev)
4266 return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ONLY,
4267 XHCI_CMD_DEFAULT_TIMEOUT);
4271 * Transfer the port index into real index in the HW port status
4272 * registers. Caculate offset between the port's PORTSC register
4273 * and port status base. Divide the number of per port register
4274 * to get the real index. The raw port number bases 1.
4276 int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1)
4278 struct xhci_hub *rhub;
4280 rhub = xhci_get_rhub(hcd);
4281 return rhub->ports[port1 - 1]->hw_portnum + 1;
4285 * Issue an Evaluate Context command to change the Maximum Exit Latency in the
4286 * slot context. If that succeeds, store the new MEL in the xhci_virt_device.
4288 static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci,
4289 struct usb_device *udev, u16 max_exit_latency)
4291 struct xhci_virt_device *virt_dev;
4292 struct xhci_command *command;
4293 struct xhci_input_control_ctx *ctrl_ctx;
4294 struct xhci_slot_ctx *slot_ctx;
4295 unsigned long flags;
4298 command = xhci_alloc_command_with_ctx(xhci, true, GFP_KERNEL);
4302 spin_lock_irqsave(&xhci->lock, flags);
4304 virt_dev = xhci->devs[udev->slot_id];
4307 * virt_dev might not exists yet if xHC resumed from hibernate (S4) and
4308 * xHC was re-initialized. Exit latency will be set later after
4309 * hub_port_finish_reset() is done and xhci->devs[] are re-allocated
4312 if (!virt_dev || max_exit_latency == virt_dev->current_mel) {
4313 spin_unlock_irqrestore(&xhci->lock, flags);
4314 xhci_free_command(xhci, command);
4318 /* Attempt to issue an Evaluate Context command to change the MEL. */
4319 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
4321 spin_unlock_irqrestore(&xhci->lock, flags);
4322 xhci_free_command(xhci, command);
4323 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
4328 xhci_slot_copy(xhci, command->in_ctx, virt_dev->out_ctx);
4329 spin_unlock_irqrestore(&xhci->lock, flags);
4331 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
4332 slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx);
4333 slot_ctx->dev_info2 &= cpu_to_le32(~((u32) MAX_EXIT));
4334 slot_ctx->dev_info2 |= cpu_to_le32(max_exit_latency);
4335 slot_ctx->dev_state = 0;
4337 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
4338 "Set up evaluate context for LPM MEL change.");
4340 /* Issue and wait for the evaluate context command. */
4341 ret = xhci_configure_endpoint(xhci, udev, command,
4345 spin_lock_irqsave(&xhci->lock, flags);
4346 virt_dev->current_mel = max_exit_latency;
4347 spin_unlock_irqrestore(&xhci->lock, flags);
4350 xhci_free_command(xhci, command);
4357 /* BESL to HIRD Encoding array for USB2 LPM */
4358 static int xhci_besl_encoding[16] = {125, 150, 200, 300, 400, 500, 1000, 2000,
4359 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000};
4361 /* Calculate HIRD/BESL for USB2 PORTPMSC*/
4362 static int xhci_calculate_hird_besl(struct xhci_hcd *xhci,
4363 struct usb_device *udev)
4365 int u2del, besl, besl_host;
4366 int besl_device = 0;
4369 u2del = HCS_U2_LATENCY(xhci->hcs_params3);
4370 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
4372 if (field & USB_BESL_SUPPORT) {
4373 for (besl_host = 0; besl_host < 16; besl_host++) {
4374 if (xhci_besl_encoding[besl_host] >= u2del)
4377 /* Use baseline BESL value as default */
4378 if (field & USB_BESL_BASELINE_VALID)
4379 besl_device = USB_GET_BESL_BASELINE(field);
4380 else if (field & USB_BESL_DEEP_VALID)
4381 besl_device = USB_GET_BESL_DEEP(field);
4386 besl_host = (u2del - 51) / 75 + 1;
4389 besl = besl_host + besl_device;
4396 /* Calculate BESLD, L1 timeout and HIRDM for USB2 PORTHLPMC */
4397 static int xhci_calculate_usb2_hw_lpm_params(struct usb_device *udev)
4404 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
4406 /* xHCI l1 is set in steps of 256us, xHCI 1.0 section 5.4.11.2 */
4407 l1 = udev->l1_params.timeout / 256;
4409 /* device has preferred BESLD */
4410 if (field & USB_BESL_DEEP_VALID) {
4411 besld = USB_GET_BESL_DEEP(field);
4415 return PORT_BESLD(besld) | PORT_L1_TIMEOUT(l1) | PORT_HIRDM(hirdm);
4418 static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
4419 struct usb_device *udev, int enable)
4421 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4422 struct xhci_port **ports;
4423 __le32 __iomem *pm_addr, *hlpm_addr;
4424 u32 pm_val, hlpm_val, field;
4425 unsigned int port_num;
4426 unsigned long flags;
4427 int hird, exit_latency;
4430 if (xhci->quirks & XHCI_HW_LPM_DISABLE)
4433 if (hcd->speed >= HCD_USB3 || !xhci->hw_lpm_support ||
4437 if (!udev->parent || udev->parent->parent ||
4438 udev->descriptor.bDeviceClass == USB_CLASS_HUB)
4441 if (udev->usb2_hw_lpm_capable != 1)
4444 spin_lock_irqsave(&xhci->lock, flags);
4446 ports = xhci->usb2_rhub.ports;
4447 port_num = udev->portnum - 1;
4448 pm_addr = ports[port_num]->addr + PORTPMSC;
4449 pm_val = readl(pm_addr);
4450 hlpm_addr = ports[port_num]->addr + PORTHLPMC;
4452 xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n",
4453 enable ? "enable" : "disable", port_num + 1);
4456 /* Host supports BESL timeout instead of HIRD */
4457 if (udev->usb2_hw_lpm_besl_capable) {
4458 /* if device doesn't have a preferred BESL value use a
4459 * default one which works with mixed HIRD and BESL
4460 * systems. See XHCI_DEFAULT_BESL definition in xhci.h
4462 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
4463 if ((field & USB_BESL_SUPPORT) &&
4464 (field & USB_BESL_BASELINE_VALID))
4465 hird = USB_GET_BESL_BASELINE(field);
4467 hird = udev->l1_params.besl;
4469 exit_latency = xhci_besl_encoding[hird];
4470 spin_unlock_irqrestore(&xhci->lock, flags);
4472 ret = xhci_change_max_exit_latency(xhci, udev,
4476 spin_lock_irqsave(&xhci->lock, flags);
4478 hlpm_val = xhci_calculate_usb2_hw_lpm_params(udev);
4479 writel(hlpm_val, hlpm_addr);
4483 hird = xhci_calculate_hird_besl(xhci, udev);
4486 pm_val &= ~PORT_HIRD_MASK;
4487 pm_val |= PORT_HIRD(hird) | PORT_RWE | PORT_L1DS(udev->slot_id);
4488 writel(pm_val, pm_addr);
4489 pm_val = readl(pm_addr);
4491 writel(pm_val, pm_addr);
4495 pm_val &= ~(PORT_HLE | PORT_RWE | PORT_HIRD_MASK | PORT_L1DS_MASK);
4496 writel(pm_val, pm_addr);
4499 if (udev->usb2_hw_lpm_besl_capable) {
4500 spin_unlock_irqrestore(&xhci->lock, flags);
4501 xhci_change_max_exit_latency(xhci, udev, 0);
4502 readl_poll_timeout(ports[port_num]->addr, pm_val,
4503 (pm_val & PORT_PLS_MASK) == XDEV_U0,
4509 spin_unlock_irqrestore(&xhci->lock, flags);
4513 /* check if a usb2 port supports a given extened capability protocol
4514 * only USB2 ports extended protocol capability values are cached.
4515 * Return 1 if capability is supported
4517 static int xhci_check_usb2_port_capability(struct xhci_hcd *xhci, int port,
4518 unsigned capability)
4520 u32 port_offset, port_count;
4523 for (i = 0; i < xhci->num_ext_caps; i++) {
4524 if (xhci->ext_caps[i] & capability) {
4525 /* port offsets starts at 1 */
4526 port_offset = XHCI_EXT_PORT_OFF(xhci->ext_caps[i]) - 1;
4527 port_count = XHCI_EXT_PORT_COUNT(xhci->ext_caps[i]);
4528 if (port >= port_offset &&
4529 port < port_offset + port_count)
4536 static int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
4538 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4539 int portnum = udev->portnum - 1;
4541 if (hcd->speed >= HCD_USB3 || !udev->lpm_capable)
4544 /* we only support lpm for non-hub device connected to root hub yet */
4545 if (!udev->parent || udev->parent->parent ||
4546 udev->descriptor.bDeviceClass == USB_CLASS_HUB)
4549 if (xhci->hw_lpm_support == 1 &&
4550 xhci_check_usb2_port_capability(
4551 xhci, portnum, XHCI_HLC)) {
4552 udev->usb2_hw_lpm_capable = 1;
4553 udev->l1_params.timeout = XHCI_L1_TIMEOUT;
4554 udev->l1_params.besl = XHCI_DEFAULT_BESL;
4555 if (xhci_check_usb2_port_capability(xhci, portnum,
4557 udev->usb2_hw_lpm_besl_capable = 1;
4563 /*---------------------- USB 3.0 Link PM functions ------------------------*/
4565 /* Service interval in nanoseconds = 2^(bInterval - 1) * 125us * 1000ns / 1us */
4566 static unsigned long long xhci_service_interval_to_ns(
4567 struct usb_endpoint_descriptor *desc)
4569 return (1ULL << (desc->bInterval - 1)) * 125 * 1000;
4572 static u16 xhci_get_timeout_no_hub_lpm(struct usb_device *udev,
4573 enum usb3_link_state state)
4575 unsigned long long sel;
4576 unsigned long long pel;
4577 unsigned int max_sel_pel;
4582 /* Convert SEL and PEL stored in nanoseconds to microseconds */
4583 sel = DIV_ROUND_UP(udev->u1_params.sel, 1000);
4584 pel = DIV_ROUND_UP(udev->u1_params.pel, 1000);
4585 max_sel_pel = USB3_LPM_MAX_U1_SEL_PEL;
4589 sel = DIV_ROUND_UP(udev->u2_params.sel, 1000);
4590 pel = DIV_ROUND_UP(udev->u2_params.pel, 1000);
4591 max_sel_pel = USB3_LPM_MAX_U2_SEL_PEL;
4595 dev_warn(&udev->dev, "%s: Can't get timeout for non-U1 or U2 state.\n",
4597 return USB3_LPM_DISABLED;
4600 if (sel <= max_sel_pel && pel <= max_sel_pel)
4601 return USB3_LPM_DEVICE_INITIATED;
4603 if (sel > max_sel_pel)
4604 dev_dbg(&udev->dev, "Device-initiated %s disabled "
4605 "due to long SEL %llu ms\n",
4608 dev_dbg(&udev->dev, "Device-initiated %s disabled "
4609 "due to long PEL %llu ms\n",
4611 return USB3_LPM_DISABLED;
4614 /* The U1 timeout should be the maximum of the following values:
4615 * - For control endpoints, U1 system exit latency (SEL) * 3
4616 * - For bulk endpoints, U1 SEL * 5
4617 * - For interrupt endpoints:
4618 * - Notification EPs, U1 SEL * 3
4619 * - Periodic EPs, max(105% of bInterval, U1 SEL * 2)
4620 * - For isochronous endpoints, max(105% of bInterval, U1 SEL * 2)
4622 static unsigned long long xhci_calculate_intel_u1_timeout(
4623 struct usb_device *udev,
4624 struct usb_endpoint_descriptor *desc)
4626 unsigned long long timeout_ns;
4630 ep_type = usb_endpoint_type(desc);
4632 case USB_ENDPOINT_XFER_CONTROL:
4633 timeout_ns = udev->u1_params.sel * 3;
4635 case USB_ENDPOINT_XFER_BULK:
4636 timeout_ns = udev->u1_params.sel * 5;
4638 case USB_ENDPOINT_XFER_INT:
4639 intr_type = usb_endpoint_interrupt_type(desc);
4640 if (intr_type == USB_ENDPOINT_INTR_NOTIFICATION) {
4641 timeout_ns = udev->u1_params.sel * 3;
4644 /* Otherwise the calculation is the same as isoc eps */
4646 case USB_ENDPOINT_XFER_ISOC:
4647 timeout_ns = xhci_service_interval_to_ns(desc);
4648 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns * 105, 100);
4649 if (timeout_ns < udev->u1_params.sel * 2)
4650 timeout_ns = udev->u1_params.sel * 2;
4659 /* Returns the hub-encoded U1 timeout value. */
4660 static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci,
4661 struct usb_device *udev,
4662 struct usb_endpoint_descriptor *desc)
4664 unsigned long long timeout_ns;
4666 /* Prevent U1 if service interval is shorter than U1 exit latency */
4667 if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
4668 if (xhci_service_interval_to_ns(desc) <= udev->u1_params.mel) {
4669 dev_dbg(&udev->dev, "Disable U1, ESIT shorter than exit latency\n");
4670 return USB3_LPM_DISABLED;
4674 if (xhci->quirks & (XHCI_INTEL_HOST | XHCI_ZHAOXIN_HOST))
4675 timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc);
4677 timeout_ns = udev->u1_params.sel;
4679 /* The U1 timeout is encoded in 1us intervals.
4680 * Don't return a timeout of zero, because that's USB3_LPM_DISABLED.
4682 if (timeout_ns == USB3_LPM_DISABLED)
4685 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 1000);
4687 /* If the necessary timeout value is bigger than what we can set in the
4688 * USB 3.0 hub, we have to disable hub-initiated U1.
4690 if (timeout_ns <= USB3_LPM_U1_MAX_TIMEOUT)
4692 dev_dbg(&udev->dev, "Hub-initiated U1 disabled "
4693 "due to long timeout %llu ms\n", timeout_ns);
4694 return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U1);
4697 /* The U2 timeout should be the maximum of:
4698 * - 10 ms (to avoid the bandwidth impact on the scheduler)
4699 * - largest bInterval of any active periodic endpoint (to avoid going
4700 * into lower power link states between intervals).
4701 * - the U2 Exit Latency of the device
4703 static unsigned long long xhci_calculate_intel_u2_timeout(
4704 struct usb_device *udev,
4705 struct usb_endpoint_descriptor *desc)
4707 unsigned long long timeout_ns;
4708 unsigned long long u2_del_ns;
4710 timeout_ns = 10 * 1000 * 1000;
4712 if ((usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) &&
4713 (xhci_service_interval_to_ns(desc) > timeout_ns))
4714 timeout_ns = xhci_service_interval_to_ns(desc);
4716 u2_del_ns = le16_to_cpu(udev->bos->ss_cap->bU2DevExitLat) * 1000ULL;
4717 if (u2_del_ns > timeout_ns)
4718 timeout_ns = u2_del_ns;
4723 /* Returns the hub-encoded U2 timeout value. */
4724 static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci,
4725 struct usb_device *udev,
4726 struct usb_endpoint_descriptor *desc)
4728 unsigned long long timeout_ns;
4730 /* Prevent U2 if service interval is shorter than U2 exit latency */
4731 if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
4732 if (xhci_service_interval_to_ns(desc) <= udev->u2_params.mel) {
4733 dev_dbg(&udev->dev, "Disable U2, ESIT shorter than exit latency\n");
4734 return USB3_LPM_DISABLED;
4738 if (xhci->quirks & (XHCI_INTEL_HOST | XHCI_ZHAOXIN_HOST))
4739 timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc);
4741 timeout_ns = udev->u2_params.sel;
4743 /* The U2 timeout is encoded in 256us intervals */
4744 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 256 * 1000);
4745 /* If the necessary timeout value is bigger than what we can set in the
4746 * USB 3.0 hub, we have to disable hub-initiated U2.
4748 if (timeout_ns <= USB3_LPM_U2_MAX_TIMEOUT)
4750 dev_dbg(&udev->dev, "Hub-initiated U2 disabled "
4751 "due to long timeout %llu ms\n", timeout_ns);
4752 return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U2);
4755 static u16 xhci_call_host_update_timeout_for_endpoint(struct xhci_hcd *xhci,
4756 struct usb_device *udev,
4757 struct usb_endpoint_descriptor *desc,
4758 enum usb3_link_state state,
4761 if (state == USB3_LPM_U1)
4762 return xhci_calculate_u1_timeout(xhci, udev, desc);
4763 else if (state == USB3_LPM_U2)
4764 return xhci_calculate_u2_timeout(xhci, udev, desc);
4766 return USB3_LPM_DISABLED;
4769 static int xhci_update_timeout_for_endpoint(struct xhci_hcd *xhci,
4770 struct usb_device *udev,
4771 struct usb_endpoint_descriptor *desc,
4772 enum usb3_link_state state,
4777 alt_timeout = xhci_call_host_update_timeout_for_endpoint(xhci, udev,
4778 desc, state, timeout);
4780 /* If we found we can't enable hub-initiated LPM, and
4781 * the U1 or U2 exit latency was too high to allow
4782 * device-initiated LPM as well, then we will disable LPM
4783 * for this device, so stop searching any further.
4785 if (alt_timeout == USB3_LPM_DISABLED) {
4786 *timeout = alt_timeout;
4789 if (alt_timeout > *timeout)
4790 *timeout = alt_timeout;
4794 static int xhci_update_timeout_for_interface(struct xhci_hcd *xhci,
4795 struct usb_device *udev,
4796 struct usb_host_interface *alt,
4797 enum usb3_link_state state,
4802 for (j = 0; j < alt->desc.bNumEndpoints; j++) {
4803 if (xhci_update_timeout_for_endpoint(xhci, udev,
4804 &alt->endpoint[j].desc, state, timeout))
4810 static int xhci_check_tier_policy(struct xhci_hcd *xhci,
4811 struct usb_device *udev,
4812 enum usb3_link_state state)
4814 struct usb_device *parent = udev->parent;
4815 int tier = 1; /* roothub is tier1 */
4818 parent = parent->parent;
4822 if (xhci->quirks & XHCI_INTEL_HOST && tier > 3)
4824 if (xhci->quirks & XHCI_ZHAOXIN_HOST && tier > 2)
4829 dev_dbg(&udev->dev, "Tier policy prevents U1/U2 LPM states for devices at tier %d\n",
4834 /* Returns the U1 or U2 timeout that should be enabled.
4835 * If the tier check or timeout setting functions return with a non-zero exit
4836 * code, that means the timeout value has been finalized and we shouldn't look
4837 * at any more endpoints.
4839 static u16 xhci_calculate_lpm_timeout(struct usb_hcd *hcd,
4840 struct usb_device *udev, enum usb3_link_state state)
4842 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4843 struct usb_host_config *config;
4846 u16 timeout = USB3_LPM_DISABLED;
4848 if (state == USB3_LPM_U1)
4850 else if (state == USB3_LPM_U2)
4853 dev_warn(&udev->dev, "Can't enable unknown link state %i\n",
4858 /* Gather some information about the currently installed configuration
4859 * and alternate interface settings.
4861 if (xhci_update_timeout_for_endpoint(xhci, udev, &udev->ep0.desc,
4865 config = udev->actconfig;
4869 for (i = 0; i < config->desc.bNumInterfaces; i++) {
4870 struct usb_driver *driver;
4871 struct usb_interface *intf = config->interface[i];
4876 /* Check if any currently bound drivers want hub-initiated LPM
4879 if (intf->dev.driver) {
4880 driver = to_usb_driver(intf->dev.driver);
4881 if (driver && driver->disable_hub_initiated_lpm) {
4882 dev_dbg(&udev->dev, "Hub-initiated %s disabled at request of driver %s\n",
4883 state_name, driver->name);
4884 timeout = xhci_get_timeout_no_hub_lpm(udev,
4886 if (timeout == USB3_LPM_DISABLED)
4891 /* Not sure how this could happen... */
4892 if (!intf->cur_altsetting)
4895 if (xhci_update_timeout_for_interface(xhci, udev,
4896 intf->cur_altsetting,
4903 static int calculate_max_exit_latency(struct usb_device *udev,
4904 enum usb3_link_state state_changed,
4905 u16 hub_encoded_timeout)
4907 unsigned long long u1_mel_us = 0;
4908 unsigned long long u2_mel_us = 0;
4909 unsigned long long mel_us = 0;
4915 disabling_u1 = (state_changed == USB3_LPM_U1 &&
4916 hub_encoded_timeout == USB3_LPM_DISABLED);
4917 disabling_u2 = (state_changed == USB3_LPM_U2 &&
4918 hub_encoded_timeout == USB3_LPM_DISABLED);
4920 enabling_u1 = (state_changed == USB3_LPM_U1 &&
4921 hub_encoded_timeout != USB3_LPM_DISABLED);
4922 enabling_u2 = (state_changed == USB3_LPM_U2 &&
4923 hub_encoded_timeout != USB3_LPM_DISABLED);
4925 /* If U1 was already enabled and we're not disabling it,
4926 * or we're going to enable U1, account for the U1 max exit latency.
4928 if ((udev->u1_params.timeout != USB3_LPM_DISABLED && !disabling_u1) ||
4930 u1_mel_us = DIV_ROUND_UP(udev->u1_params.mel, 1000);
4931 if ((udev->u2_params.timeout != USB3_LPM_DISABLED && !disabling_u2) ||
4933 u2_mel_us = DIV_ROUND_UP(udev->u2_params.mel, 1000);
4935 mel_us = max(u1_mel_us, u2_mel_us);
4937 /* xHCI host controller max exit latency field is only 16 bits wide. */
4938 if (mel_us > MAX_EXIT) {
4939 dev_warn(&udev->dev, "Link PM max exit latency of %lluus "
4940 "is too big.\n", mel_us);
4946 /* Returns the USB3 hub-encoded value for the U1/U2 timeout. */
4947 static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
4948 struct usb_device *udev, enum usb3_link_state state)
4950 struct xhci_hcd *xhci;
4951 struct xhci_port *port;
4952 u16 hub_encoded_timeout;
4956 xhci = hcd_to_xhci(hcd);
4957 /* The LPM timeout values are pretty host-controller specific, so don't
4958 * enable hub-initiated timeouts unless the vendor has provided
4959 * information about their timeout algorithm.
4961 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
4962 !xhci->devs[udev->slot_id])
4963 return USB3_LPM_DISABLED;
4965 if (xhci_check_tier_policy(xhci, udev, state) < 0)
4966 return USB3_LPM_DISABLED;
4968 /* If connected to root port then check port can handle lpm */
4969 if (udev->parent && !udev->parent->parent) {
4970 port = xhci->usb3_rhub.ports[udev->portnum - 1];
4971 if (port->lpm_incapable)
4972 return USB3_LPM_DISABLED;
4975 hub_encoded_timeout = xhci_calculate_lpm_timeout(hcd, udev, state);
4976 mel = calculate_max_exit_latency(udev, state, hub_encoded_timeout);
4978 /* Max Exit Latency is too big, disable LPM. */
4979 hub_encoded_timeout = USB3_LPM_DISABLED;
4983 ret = xhci_change_max_exit_latency(xhci, udev, mel);
4986 return hub_encoded_timeout;
4989 static int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
4990 struct usb_device *udev, enum usb3_link_state state)
4992 struct xhci_hcd *xhci;
4995 xhci = hcd_to_xhci(hcd);
4996 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
4997 !xhci->devs[udev->slot_id])
5000 mel = calculate_max_exit_latency(udev, state, USB3_LPM_DISABLED);
5001 return xhci_change_max_exit_latency(xhci, udev, mel);
5003 #else /* CONFIG_PM */
5005 static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
5006 struct usb_device *udev, int enable)
5011 static int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
5016 static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
5017 struct usb_device *udev, enum usb3_link_state state)
5019 return USB3_LPM_DISABLED;
5022 static int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
5023 struct usb_device *udev, enum usb3_link_state state)
5027 #endif /* CONFIG_PM */
5029 /*-------------------------------------------------------------------------*/
5031 /* Once a hub descriptor is fetched for a device, we need to update the xHC's
5032 * internal data structures for the device.
5034 int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
5035 struct usb_tt *tt, gfp_t mem_flags)
5037 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
5038 struct xhci_virt_device *vdev;
5039 struct xhci_command *config_cmd;
5040 struct xhci_input_control_ctx *ctrl_ctx;
5041 struct xhci_slot_ctx *slot_ctx;
5042 unsigned long flags;
5043 unsigned think_time;
5046 /* Ignore root hubs */
5050 vdev = xhci->devs[hdev->slot_id];
5052 xhci_warn(xhci, "Cannot update hub desc for unknown device.\n");
5056 config_cmd = xhci_alloc_command_with_ctx(xhci, true, mem_flags);
5060 ctrl_ctx = xhci_get_input_control_ctx(config_cmd->in_ctx);
5062 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
5064 xhci_free_command(xhci, config_cmd);
5068 spin_lock_irqsave(&xhci->lock, flags);
5069 if (hdev->speed == USB_SPEED_HIGH &&
5070 xhci_alloc_tt_info(xhci, vdev, hdev, tt, GFP_ATOMIC)) {
5071 xhci_dbg(xhci, "Could not allocate xHCI TT structure.\n");
5072 xhci_free_command(xhci, config_cmd);
5073 spin_unlock_irqrestore(&xhci->lock, flags);
5077 xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx);
5078 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
5079 slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx);
5080 slot_ctx->dev_info |= cpu_to_le32(DEV_HUB);
5082 * refer to section 6.2.2: MTT should be 0 for full speed hub,
5083 * but it may be already set to 1 when setup an xHCI virtual
5084 * device, so clear it anyway.
5087 slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
5088 else if (hdev->speed == USB_SPEED_FULL)
5089 slot_ctx->dev_info &= cpu_to_le32(~DEV_MTT);
5091 if (xhci->hci_version > 0x95) {
5092 xhci_dbg(xhci, "xHCI version %x needs hub "
5093 "TT think time and number of ports\n",
5094 (unsigned int) xhci->hci_version);
5095 slot_ctx->dev_info2 |= cpu_to_le32(XHCI_MAX_PORTS(hdev->maxchild));
5096 /* Set TT think time - convert from ns to FS bit times.
5097 * 0 = 8 FS bit times, 1 = 16 FS bit times,
5098 * 2 = 24 FS bit times, 3 = 32 FS bit times.
5100 * xHCI 1.0: this field shall be 0 if the device is not a
5103 think_time = tt->think_time;
5104 if (think_time != 0)
5105 think_time = (think_time / 666) - 1;
5106 if (xhci->hci_version < 0x100 || hdev->speed == USB_SPEED_HIGH)
5107 slot_ctx->tt_info |=
5108 cpu_to_le32(TT_THINK_TIME(think_time));
5110 xhci_dbg(xhci, "xHCI version %x doesn't need hub "
5111 "TT think time or number of ports\n",
5112 (unsigned int) xhci->hci_version);
5114 slot_ctx->dev_state = 0;
5115 spin_unlock_irqrestore(&xhci->lock, flags);
5117 xhci_dbg(xhci, "Set up %s for hub device.\n",
5118 (xhci->hci_version > 0x95) ?
5119 "configure endpoint" : "evaluate context");
5121 /* Issue and wait for the configure endpoint or
5122 * evaluate context command.
5124 if (xhci->hci_version > 0x95)
5125 ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
5128 ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
5131 xhci_free_command(xhci, config_cmd);
5134 EXPORT_SYMBOL_GPL(xhci_update_hub_device);
5136 static int xhci_get_frame(struct usb_hcd *hcd)
5138 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
5139 /* EHCI mods by the periodic size. Why? */
5140 return readl(&xhci->run_regs->microframe_index) >> 3;
5143 static void xhci_hcd_init_usb2_data(struct xhci_hcd *xhci, struct usb_hcd *hcd)
5145 xhci->usb2_rhub.hcd = hcd;
5146 hcd->speed = HCD_USB2;
5147 hcd->self.root_hub->speed = USB_SPEED_HIGH;
5149 * USB 2.0 roothub under xHCI has an integrated TT,
5150 * (rate matching hub) as opposed to having an OHCI/UHCI
5151 * companion controller.
5156 static void xhci_hcd_init_usb3_data(struct xhci_hcd *xhci, struct usb_hcd *hcd)
5158 unsigned int minor_rev;
5161 * Early xHCI 1.1 spec did not mention USB 3.1 capable hosts
5162 * should return 0x31 for sbrn, or that the minor revision
5163 * is a two digit BCD containig minor and sub-minor numbers.
5164 * This was later clarified in xHCI 1.2.
5166 * Some USB 3.1 capable hosts therefore have sbrn 0x30, and
5167 * minor revision set to 0x1 instead of 0x10.
5169 if (xhci->usb3_rhub.min_rev == 0x1)
5172 minor_rev = xhci->usb3_rhub.min_rev / 0x10;
5174 switch (minor_rev) {
5176 hcd->speed = HCD_USB32;
5177 hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS;
5178 hcd->self.root_hub->rx_lanes = 2;
5179 hcd->self.root_hub->tx_lanes = 2;
5180 hcd->self.root_hub->ssp_rate = USB_SSP_GEN_2x2;
5183 hcd->speed = HCD_USB31;
5184 hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS;
5185 hcd->self.root_hub->ssp_rate = USB_SSP_GEN_2x1;
5188 xhci_info(xhci, "Host supports USB 3.%x %sSuperSpeed\n",
5189 minor_rev, minor_rev ? "Enhanced " : "");
5191 xhci->usb3_rhub.hcd = hcd;
5194 int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
5196 struct xhci_hcd *xhci;
5198 * TODO: Check with DWC3 clients for sysdev according to
5201 struct device *dev = hcd->self.sysdev;
5204 /* Accept arbitrarily long scatter-gather lists */
5205 hcd->self.sg_tablesize = ~0;
5207 /* support to build packet from discontinuous buffers */
5208 hcd->self.no_sg_constraint = 1;
5210 /* XHCI controllers don't stop the ep queue on short packets :| */
5211 hcd->self.no_stop_on_short = 1;
5213 xhci = hcd_to_xhci(hcd);
5215 if (!usb_hcd_is_primary_hcd(hcd)) {
5216 xhci_hcd_init_usb3_data(xhci, hcd);
5220 mutex_init(&xhci->mutex);
5221 xhci->main_hcd = hcd;
5222 xhci->cap_regs = hcd->regs;
5223 xhci->op_regs = hcd->regs +
5224 HC_LENGTH(readl(&xhci->cap_regs->hc_capbase));
5225 xhci->run_regs = hcd->regs +
5226 (readl(&xhci->cap_regs->run_regs_off) & RTSOFF_MASK);
5227 /* Cache read-only capability registers */
5228 xhci->hcs_params1 = readl(&xhci->cap_regs->hcs_params1);
5229 xhci->hcs_params2 = readl(&xhci->cap_regs->hcs_params2);
5230 xhci->hcs_params3 = readl(&xhci->cap_regs->hcs_params3);
5231 xhci->hci_version = HC_VERSION(readl(&xhci->cap_regs->hc_capbase));
5232 xhci->hcc_params = readl(&xhci->cap_regs->hcc_params);
5233 if (xhci->hci_version > 0x100)
5234 xhci->hcc_params2 = readl(&xhci->cap_regs->hcc_params2);
5236 /* xhci-plat or xhci-pci might have set max_interrupters already */
5237 if ((!xhci->max_interrupters) ||
5238 xhci->max_interrupters > HCS_MAX_INTRS(xhci->hcs_params1))
5239 xhci->max_interrupters = HCS_MAX_INTRS(xhci->hcs_params1);
5241 xhci->quirks |= quirks;
5244 get_quirks(dev, xhci);
5246 /* In xhci controllers which follow xhci 1.0 spec gives a spurious
5247 * success event after a short transfer. This quirk will ignore such
5250 if (xhci->hci_version > 0x96)
5251 xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
5253 /* Make sure the HC is halted. */
5254 retval = xhci_halt(xhci);
5258 xhci_zero_64b_regs(xhci);
5260 xhci_dbg(xhci, "Resetting HCD\n");
5261 /* Reset the internal HC memory state and registers. */
5262 retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC);
5265 xhci_dbg(xhci, "Reset complete\n");
5268 * On some xHCI controllers (e.g. R-Car SoCs), the AC64 bit (bit 0)
5269 * of HCCPARAMS1 is set to 1. However, the xHCs don't support 64-bit
5270 * address memory pointers actually. So, this driver clears the AC64
5271 * bit of xhci->hcc_params to call dma_set_coherent_mask(dev,
5272 * DMA_BIT_MASK(32)) in this xhci_gen_setup().
5274 if (xhci->quirks & XHCI_NO_64BIT_SUPPORT)
5275 xhci->hcc_params &= ~BIT(0);
5277 /* Set dma_mask and coherent_dma_mask to 64-bits,
5278 * if xHC supports 64-bit addressing */
5279 if (HCC_64BIT_ADDR(xhci->hcc_params) &&
5280 !dma_set_mask(dev, DMA_BIT_MASK(64))) {
5281 xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n");
5282 dma_set_coherent_mask(dev, DMA_BIT_MASK(64));
5285 * This is to avoid error in cases where a 32-bit USB
5286 * controller is used on a 64-bit capable system.
5288 retval = dma_set_mask(dev, DMA_BIT_MASK(32));
5291 xhci_dbg(xhci, "Enabling 32-bit DMA addresses.\n");
5292 dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
5295 xhci_dbg(xhci, "Calling HCD init\n");
5296 /* Initialize HCD and host controller data structures. */
5297 retval = xhci_init(hcd);
5300 xhci_dbg(xhci, "Called HCD init\n");
5302 if (xhci_hcd_is_usb3(hcd))
5303 xhci_hcd_init_usb3_data(xhci, hcd);
5305 xhci_hcd_init_usb2_data(xhci, hcd);
5307 xhci_info(xhci, "hcc params 0x%08x hci version 0x%x quirks 0x%016llx\n",
5308 xhci->hcc_params, xhci->hci_version, xhci->quirks);
5312 EXPORT_SYMBOL_GPL(xhci_gen_setup);
5314 static void xhci_clear_tt_buffer_complete(struct usb_hcd *hcd,
5315 struct usb_host_endpoint *ep)
5317 struct xhci_hcd *xhci;
5318 struct usb_device *udev;
5319 unsigned int slot_id;
5320 unsigned int ep_index;
5321 unsigned long flags;
5323 xhci = hcd_to_xhci(hcd);
5325 spin_lock_irqsave(&xhci->lock, flags);
5326 udev = (struct usb_device *)ep->hcpriv;
5327 slot_id = udev->slot_id;
5328 ep_index = xhci_get_endpoint_index(&ep->desc);
5330 xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_CLEARING_TT;
5331 xhci_ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
5332 spin_unlock_irqrestore(&xhci->lock, flags);
5335 static const struct hc_driver xhci_hc_driver = {
5336 .description = "xhci-hcd",
5337 .product_desc = "xHCI Host Controller",
5338 .hcd_priv_size = sizeof(struct xhci_hcd),
5341 * generic hardware linkage
5344 .flags = HCD_MEMORY | HCD_DMA | HCD_USB3 | HCD_SHARED |
5348 * basic lifecycle operations
5350 .reset = NULL, /* set in xhci_init_driver() */
5353 .shutdown = xhci_shutdown,
5356 * managing i/o requests and associated device resources
5358 .map_urb_for_dma = xhci_map_urb_for_dma,
5359 .unmap_urb_for_dma = xhci_unmap_urb_for_dma,
5360 .urb_enqueue = xhci_urb_enqueue,
5361 .urb_dequeue = xhci_urb_dequeue,
5362 .alloc_dev = xhci_alloc_dev,
5363 .free_dev = xhci_free_dev,
5364 .alloc_streams = xhci_alloc_streams,
5365 .free_streams = xhci_free_streams,
5366 .add_endpoint = xhci_add_endpoint,
5367 .drop_endpoint = xhci_drop_endpoint,
5368 .endpoint_disable = xhci_endpoint_disable,
5369 .endpoint_reset = xhci_endpoint_reset,
5370 .check_bandwidth = xhci_check_bandwidth,
5371 .reset_bandwidth = xhci_reset_bandwidth,
5372 .address_device = xhci_address_device,
5373 .enable_device = xhci_enable_device,
5374 .update_hub_device = xhci_update_hub_device,
5375 .reset_device = xhci_discover_or_reset_device,
5378 * scheduling support
5380 .get_frame_number = xhci_get_frame,
5385 .hub_control = xhci_hub_control,
5386 .hub_status_data = xhci_hub_status_data,
5387 .bus_suspend = xhci_bus_suspend,
5388 .bus_resume = xhci_bus_resume,
5389 .get_resuming_ports = xhci_get_resuming_ports,
5392 * call back when device connected and addressed
5394 .update_device = xhci_update_device,
5395 .set_usb2_hw_lpm = xhci_set_usb2_hardware_lpm,
5396 .enable_usb3_lpm_timeout = xhci_enable_usb3_lpm_timeout,
5397 .disable_usb3_lpm_timeout = xhci_disable_usb3_lpm_timeout,
5398 .find_raw_port_number = xhci_find_raw_port_number,
5399 .clear_tt_buffer_complete = xhci_clear_tt_buffer_complete,
5402 void xhci_init_driver(struct hc_driver *drv,
5403 const struct xhci_driver_overrides *over)
5407 /* Copy the generic table to drv then apply the overrides */
5408 *drv = xhci_hc_driver;
5411 drv->hcd_priv_size += over->extra_priv_size;
5413 drv->reset = over->reset;
5415 drv->start = over->start;
5416 if (over->add_endpoint)
5417 drv->add_endpoint = over->add_endpoint;
5418 if (over->drop_endpoint)
5419 drv->drop_endpoint = over->drop_endpoint;
5420 if (over->check_bandwidth)
5421 drv->check_bandwidth = over->check_bandwidth;
5422 if (over->reset_bandwidth)
5423 drv->reset_bandwidth = over->reset_bandwidth;
5424 if (over->update_hub_device)
5425 drv->update_hub_device = over->update_hub_device;
5426 if (over->hub_control)
5427 drv->hub_control = over->hub_control;
5430 EXPORT_SYMBOL_GPL(xhci_init_driver);
5432 MODULE_DESCRIPTION(DRIVER_DESC);
5433 MODULE_AUTHOR(DRIVER_AUTHOR);
5434 MODULE_LICENSE("GPL");
5436 static int __init xhci_hcd_init(void)
5439 * Check the compiler generated sizes of structures that must be laid
5440 * out in specific ways for hardware access.
5442 BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
5443 BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8);
5444 BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8);
5445 /* xhci_device_control has eight fields, and also
5446 * embeds one xhci_slot_ctx and 31 xhci_ep_ctx
5448 BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8);
5449 BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8);
5450 BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8);
5451 BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 8*32/8);
5452 BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8);
5453 /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */
5454 BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
5459 xhci_debugfs_create_root();
5466 * If an init function is provided, an exit function must also be provided
5467 * to allow module unload.
5469 static void __exit xhci_hcd_fini(void)
5471 xhci_debugfs_remove_root();
5475 module_init(xhci_hcd_init);
5476 module_exit(xhci_hcd_fini);