1 // SPDX-License-Identifier: GPL-2.0
3 * xHCI host controller driver
5 * Copyright (C) 2008 Intel Corp.
8 * Some code borrowed from the Linux EHCI driver.
11 #include <linux/usb.h>
12 #include <linux/pci.h>
13 #include <linux/slab.h>
14 #include <linux/dmapool.h>
15 #include <linux/dma-mapping.h>
18 #include "xhci-trace.h"
19 #include "xhci-debugfs.h"
22 * Allocates a generic ring segment from the ring pool, sets the dma address,
23 * initializes the segment to zero, and sets the private next pointer to NULL.
26 * "All components of all Command and Transfer TRBs shall be initialized to '0'"
28 static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci,
29 unsigned int cycle_state,
30 unsigned int max_packet,
33 struct xhci_segment *seg;
36 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
38 seg = kzalloc_node(sizeof(*seg), flags, dev_to_node(dev));
42 seg->trbs = dma_pool_zalloc(xhci->segment_pool, flags, &dma);
49 seg->bounce_buf = kzalloc_node(max_packet, flags,
51 if (!seg->bounce_buf) {
52 dma_pool_free(xhci->segment_pool, seg->trbs, dma);
57 /* If the cycle state is 0, set the cycle bit to 1 for all the TRBs */
58 if (cycle_state == 0) {
59 for (i = 0; i < TRBS_PER_SEGMENT; i++)
60 seg->trbs[i].link.control = cpu_to_le32(TRB_CYCLE);
68 static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
71 dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma);
74 kfree(seg->bounce_buf);
78 static void xhci_free_segments_for_ring(struct xhci_hcd *xhci,
79 struct xhci_segment *first)
81 struct xhci_segment *seg;
84 while (seg != first) {
85 struct xhci_segment *next = seg->next;
86 xhci_segment_free(xhci, seg);
89 xhci_segment_free(xhci, first);
93 * Make the prev segment point to the next segment.
95 * Change the last TRB in the prev segment to be a Link TRB which points to the
96 * DMA address of the next segment. The caller needs to set any Link TRB
97 * related flags, such as End TRB, Toggle Cycle, and no snoop.
99 static void xhci_link_segments(struct xhci_segment *prev,
100 struct xhci_segment *next,
101 enum xhci_ring_type type, bool chain_links)
108 if (type != TYPE_EVENT) {
109 prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr =
110 cpu_to_le64(next->dma);
112 /* Set the last TRB in the segment to have a TRB type ID of Link TRB */
113 val = le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control);
114 val &= ~TRB_TYPE_BITMASK;
115 val |= TRB_TYPE(TRB_LINK);
118 prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
123 * Link the ring to the new segments.
124 * Set Toggle Cycle for the new ring if needed.
126 static void xhci_link_rings(struct xhci_hcd *xhci, struct xhci_ring *ring,
127 struct xhci_segment *first, struct xhci_segment *last,
128 unsigned int num_segs)
130 struct xhci_segment *next;
133 if (!ring || !first || !last)
136 /* Set chain bit for 0.95 hosts, and for isoc rings on AMD 0.96 host */
137 chain_links = !!(xhci_link_trb_quirk(xhci) ||
138 (ring->type == TYPE_ISOC &&
139 (xhci->quirks & XHCI_AMD_0x96_HOST)));
141 next = ring->enq_seg->next;
142 xhci_link_segments(ring->enq_seg, first, ring->type, chain_links);
143 xhci_link_segments(last, next, ring->type, chain_links);
144 ring->num_segs += num_segs;
145 ring->num_trbs_free += (TRBS_PER_SEGMENT - 1) * num_segs;
147 if (ring->type != TYPE_EVENT && ring->enq_seg == ring->last_seg) {
148 ring->last_seg->trbs[TRBS_PER_SEGMENT-1].link.control
149 &= ~cpu_to_le32(LINK_TOGGLE);
150 last->trbs[TRBS_PER_SEGMENT-1].link.control
151 |= cpu_to_le32(LINK_TOGGLE);
152 ring->last_seg = last;
157 * We need a radix tree for mapping physical addresses of TRBs to which stream
158 * ID they belong to. We need to do this because the host controller won't tell
159 * us which stream ring the TRB came from. We could store the stream ID in an
160 * event data TRB, but that doesn't help us for the cancellation case, since the
161 * endpoint may stop before it reaches that event data TRB.
163 * The radix tree maps the upper portion of the TRB DMA address to a ring
164 * segment that has the same upper portion of DMA addresses. For example, say I
165 * have segments of size 1KB, that are always 1KB aligned. A segment may
166 * start at 0x10c91000 and end at 0x10c913f0. If I use the upper 10 bits, the
167 * key to the stream ID is 0x43244. I can use the DMA address of the TRB to
168 * pass the radix tree a key to get the right stream ID:
170 * 0x10c90fff >> 10 = 0x43243
171 * 0x10c912c0 >> 10 = 0x43244
172 * 0x10c91400 >> 10 = 0x43245
174 * Obviously, only those TRBs with DMA addresses that are within the segment
175 * will make the radix tree return the stream ID for that ring.
177 * Caveats for the radix tree:
179 * The radix tree uses an unsigned long as a key pair. On 32-bit systems, an
180 * unsigned long will be 32-bits; on a 64-bit system an unsigned long will be
181 * 64-bits. Since we only request 32-bit DMA addresses, we can use that as the
182 * key on 32-bit or 64-bit systems (it would also be fine if we asked for 64-bit
183 * PCI DMA addresses on a 64-bit system). There might be a problem on 32-bit
184 * extended systems (where the DMA address can be bigger than 32-bits),
185 * if we allow the PCI dma mask to be bigger than 32-bits. So don't do that.
187 static int xhci_insert_segment_mapping(struct radix_tree_root *trb_address_map,
188 struct xhci_ring *ring,
189 struct xhci_segment *seg,
195 key = (unsigned long)(seg->dma >> TRB_SEGMENT_SHIFT);
196 /* Skip any segments that were already added. */
197 if (radix_tree_lookup(trb_address_map, key))
200 ret = radix_tree_maybe_preload(mem_flags);
203 ret = radix_tree_insert(trb_address_map,
205 radix_tree_preload_end();
209 static void xhci_remove_segment_mapping(struct radix_tree_root *trb_address_map,
210 struct xhci_segment *seg)
214 key = (unsigned long)(seg->dma >> TRB_SEGMENT_SHIFT);
215 if (radix_tree_lookup(trb_address_map, key))
216 radix_tree_delete(trb_address_map, key);
219 static int xhci_update_stream_segment_mapping(
220 struct radix_tree_root *trb_address_map,
221 struct xhci_ring *ring,
222 struct xhci_segment *first_seg,
223 struct xhci_segment *last_seg,
226 struct xhci_segment *seg;
227 struct xhci_segment *failed_seg;
230 if (WARN_ON_ONCE(trb_address_map == NULL))
235 ret = xhci_insert_segment_mapping(trb_address_map,
236 ring, seg, mem_flags);
242 } while (seg != first_seg);
250 xhci_remove_segment_mapping(trb_address_map, seg);
251 if (seg == failed_seg)
254 } while (seg != first_seg);
259 static void xhci_remove_stream_mapping(struct xhci_ring *ring)
261 struct xhci_segment *seg;
263 if (WARN_ON_ONCE(ring->trb_address_map == NULL))
266 seg = ring->first_seg;
268 xhci_remove_segment_mapping(ring->trb_address_map, seg);
270 } while (seg != ring->first_seg);
273 static int xhci_update_stream_mapping(struct xhci_ring *ring, gfp_t mem_flags)
275 return xhci_update_stream_segment_mapping(ring->trb_address_map, ring,
276 ring->first_seg, ring->last_seg, mem_flags);
279 /* XXX: Do we need the hcd structure in all these functions? */
280 void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
285 trace_xhci_ring_free(ring);
287 if (ring->first_seg) {
288 if (ring->type == TYPE_STREAM)
289 xhci_remove_stream_mapping(ring);
290 xhci_free_segments_for_ring(xhci, ring->first_seg);
296 void xhci_initialize_ring_info(struct xhci_ring *ring,
297 unsigned int cycle_state)
299 /* The ring is empty, so the enqueue pointer == dequeue pointer */
300 ring->enqueue = ring->first_seg->trbs;
301 ring->enq_seg = ring->first_seg;
302 ring->dequeue = ring->enqueue;
303 ring->deq_seg = ring->first_seg;
304 /* The ring is initialized to 0. The producer must write 1 to the cycle
305 * bit to handover ownership of the TRB, so PCS = 1. The consumer must
306 * compare CCS to the cycle bit to check ownership, so CCS = 1.
308 * New rings are initialized with cycle state equal to 1; if we are
309 * handling ring expansion, set the cycle state equal to the old ring.
311 ring->cycle_state = cycle_state;
314 * Each segment has a link TRB, and leave an extra TRB for SW
317 ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
320 /* Allocate segments and link them for a ring */
321 static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
322 struct xhci_segment **first, struct xhci_segment **last,
323 unsigned int num_segs, unsigned int cycle_state,
324 enum xhci_ring_type type, unsigned int max_packet, gfp_t flags)
326 struct xhci_segment *prev;
329 /* Set chain bit for 0.95 hosts, and for isoc rings on AMD 0.96 host */
330 chain_links = !!(xhci_link_trb_quirk(xhci) ||
331 (type == TYPE_ISOC &&
332 (xhci->quirks & XHCI_AMD_0x96_HOST)));
334 prev = xhci_segment_alloc(xhci, cycle_state, max_packet, flags);
340 while (num_segs > 0) {
341 struct xhci_segment *next;
343 next = xhci_segment_alloc(xhci, cycle_state, max_packet, flags);
348 xhci_segment_free(xhci, prev);
353 xhci_link_segments(prev, next, type, chain_links);
358 xhci_link_segments(prev, *first, type, chain_links);
365 * Create a new ring with zero or more segments.
367 * Link each segment together into a ring.
368 * Set the end flag and the cycle toggle bit on the last segment.
369 * See section 4.9.1 and figures 15 and 16.
371 struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
372 unsigned int num_segs, unsigned int cycle_state,
373 enum xhci_ring_type type, unsigned int max_packet, gfp_t flags)
375 struct xhci_ring *ring;
377 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
379 ring = kzalloc_node(sizeof(*ring), flags, dev_to_node(dev));
383 ring->num_segs = num_segs;
384 ring->bounce_buf_len = max_packet;
385 INIT_LIST_HEAD(&ring->td_list);
390 ret = xhci_alloc_segments_for_ring(xhci, &ring->first_seg,
391 &ring->last_seg, num_segs, cycle_state, type,
396 /* Only event ring does not use link TRB */
397 if (type != TYPE_EVENT) {
398 /* See section 4.9.2.1 and 6.4.4.1 */
399 ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control |=
400 cpu_to_le32(LINK_TOGGLE);
402 xhci_initialize_ring_info(ring, cycle_state);
403 trace_xhci_ring_alloc(ring);
411 void xhci_free_endpoint_ring(struct xhci_hcd *xhci,
412 struct xhci_virt_device *virt_dev,
413 unsigned int ep_index)
415 xhci_ring_free(xhci, virt_dev->eps[ep_index].ring);
416 virt_dev->eps[ep_index].ring = NULL;
420 * Expand an existing ring.
421 * Allocate a new ring which has same segment numbers and link the two rings.
423 int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
424 unsigned int num_trbs, gfp_t flags)
426 struct xhci_segment *first;
427 struct xhci_segment *last;
428 unsigned int num_segs;
429 unsigned int num_segs_needed;
432 num_segs_needed = (num_trbs + (TRBS_PER_SEGMENT - 1) - 1) /
433 (TRBS_PER_SEGMENT - 1);
435 /* Allocate number of segments we needed, or double the ring size */
436 num_segs = max(ring->num_segs, num_segs_needed);
438 ret = xhci_alloc_segments_for_ring(xhci, &first, &last,
439 num_segs, ring->cycle_state, ring->type,
440 ring->bounce_buf_len, flags);
444 if (ring->type == TYPE_STREAM)
445 ret = xhci_update_stream_segment_mapping(ring->trb_address_map,
446 ring, first, last, flags);
448 struct xhci_segment *next;
451 xhci_segment_free(xhci, first);
459 xhci_link_rings(xhci, ring, first, last, num_segs);
460 trace_xhci_ring_expansion(ring);
461 xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion,
462 "ring expansion succeed, now has %d segments",
468 struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
469 int type, gfp_t flags)
471 struct xhci_container_ctx *ctx;
472 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
474 if ((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT))
477 ctx = kzalloc_node(sizeof(*ctx), flags, dev_to_node(dev));
482 ctx->size = HCC_64BYTE_CONTEXT(xhci->hcc_params) ? 2048 : 1024;
483 if (type == XHCI_CTX_TYPE_INPUT)
484 ctx->size += CTX_SIZE(xhci->hcc_params);
486 ctx->bytes = dma_pool_zalloc(xhci->device_pool, flags, &ctx->dma);
494 void xhci_free_container_ctx(struct xhci_hcd *xhci,
495 struct xhci_container_ctx *ctx)
499 dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma);
503 struct xhci_input_control_ctx *xhci_get_input_control_ctx(
504 struct xhci_container_ctx *ctx)
506 if (ctx->type != XHCI_CTX_TYPE_INPUT)
509 return (struct xhci_input_control_ctx *)ctx->bytes;
512 struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci,
513 struct xhci_container_ctx *ctx)
515 if (ctx->type == XHCI_CTX_TYPE_DEVICE)
516 return (struct xhci_slot_ctx *)ctx->bytes;
518 return (struct xhci_slot_ctx *)
519 (ctx->bytes + CTX_SIZE(xhci->hcc_params));
522 struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci,
523 struct xhci_container_ctx *ctx,
524 unsigned int ep_index)
526 /* increment ep index by offset of start of ep ctx array */
528 if (ctx->type == XHCI_CTX_TYPE_INPUT)
531 return (struct xhci_ep_ctx *)
532 (ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params)));
534 EXPORT_SYMBOL_GPL(xhci_get_ep_ctx);
536 /***************** Streams structures manipulation *************************/
538 static void xhci_free_stream_ctx(struct xhci_hcd *xhci,
539 unsigned int num_stream_ctxs,
540 struct xhci_stream_ctx *stream_ctx, dma_addr_t dma)
542 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
543 size_t size = sizeof(struct xhci_stream_ctx) * num_stream_ctxs;
545 if (size > MEDIUM_STREAM_ARRAY_SIZE)
546 dma_free_coherent(dev, size,
548 else if (size <= SMALL_STREAM_ARRAY_SIZE)
549 return dma_pool_free(xhci->small_streams_pool,
552 return dma_pool_free(xhci->medium_streams_pool,
557 * The stream context array for each endpoint with bulk streams enabled can
558 * vary in size, based on:
559 * - how many streams the endpoint supports,
560 * - the maximum primary stream array size the host controller supports,
561 * - and how many streams the device driver asks for.
563 * The stream context array must be a power of 2, and can be as small as
564 * 64 bytes or as large as 1MB.
566 static struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci,
567 unsigned int num_stream_ctxs, dma_addr_t *dma,
570 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
571 size_t size = sizeof(struct xhci_stream_ctx) * num_stream_ctxs;
573 if (size > MEDIUM_STREAM_ARRAY_SIZE)
574 return dma_alloc_coherent(dev, size,
576 else if (size <= SMALL_STREAM_ARRAY_SIZE)
577 return dma_pool_alloc(xhci->small_streams_pool,
580 return dma_pool_alloc(xhci->medium_streams_pool,
584 struct xhci_ring *xhci_dma_to_transfer_ring(
585 struct xhci_virt_ep *ep,
588 if (ep->ep_state & EP_HAS_STREAMS)
589 return radix_tree_lookup(&ep->stream_info->trb_address_map,
590 address >> TRB_SEGMENT_SHIFT);
595 * Change an endpoint's internal structure so it supports stream IDs. The
596 * number of requested streams includes stream 0, which cannot be used by device
599 * The number of stream contexts in the stream context array may be bigger than
600 * the number of streams the driver wants to use. This is because the number of
601 * stream context array entries must be a power of two.
603 struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
604 unsigned int num_stream_ctxs,
605 unsigned int num_streams,
606 unsigned int max_packet, gfp_t mem_flags)
608 struct xhci_stream_info *stream_info;
610 struct xhci_ring *cur_ring;
613 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
615 xhci_dbg(xhci, "Allocating %u streams and %u "
616 "stream context array entries.\n",
617 num_streams, num_stream_ctxs);
618 if (xhci->cmd_ring_reserved_trbs == MAX_RSVD_CMD_TRBS) {
619 xhci_dbg(xhci, "Command ring has no reserved TRBs available\n");
622 xhci->cmd_ring_reserved_trbs++;
624 stream_info = kzalloc_node(sizeof(*stream_info), mem_flags,
629 stream_info->num_streams = num_streams;
630 stream_info->num_stream_ctxs = num_stream_ctxs;
632 /* Initialize the array of virtual pointers to stream rings. */
633 stream_info->stream_rings = kcalloc_node(
634 num_streams, sizeof(struct xhci_ring *), mem_flags,
636 if (!stream_info->stream_rings)
639 /* Initialize the array of DMA addresses for stream rings for the HW. */
640 stream_info->stream_ctx_array = xhci_alloc_stream_ctx(xhci,
641 num_stream_ctxs, &stream_info->ctx_array_dma,
643 if (!stream_info->stream_ctx_array)
645 memset(stream_info->stream_ctx_array, 0,
646 sizeof(struct xhci_stream_ctx)*num_stream_ctxs);
648 /* Allocate everything needed to free the stream rings later */
649 stream_info->free_streams_command =
650 xhci_alloc_command_with_ctx(xhci, true, mem_flags);
651 if (!stream_info->free_streams_command)
654 INIT_RADIX_TREE(&stream_info->trb_address_map, GFP_ATOMIC);
656 /* Allocate rings for all the streams that the driver will use,
657 * and add their segment DMA addresses to the radix tree.
658 * Stream 0 is reserved.
661 for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
662 stream_info->stream_rings[cur_stream] =
663 xhci_ring_alloc(xhci, 2, 1, TYPE_STREAM, max_packet,
665 cur_ring = stream_info->stream_rings[cur_stream];
668 cur_ring->stream_id = cur_stream;
669 cur_ring->trb_address_map = &stream_info->trb_address_map;
670 /* Set deq ptr, cycle bit, and stream context type */
671 addr = cur_ring->first_seg->dma |
672 SCT_FOR_CTX(SCT_PRI_TR) |
673 cur_ring->cycle_state;
674 stream_info->stream_ctx_array[cur_stream].stream_ring =
676 xhci_dbg(xhci, "Setting stream %d ring ptr to 0x%08llx\n",
677 cur_stream, (unsigned long long) addr);
679 ret = xhci_update_stream_mapping(cur_ring, mem_flags);
681 xhci_ring_free(xhci, cur_ring);
682 stream_info->stream_rings[cur_stream] = NULL;
686 /* Leave the other unused stream ring pointers in the stream context
687 * array initialized to zero. This will cause the xHC to give us an
688 * error if the device asks for a stream ID we don't have setup (if it
689 * was any other way, the host controller would assume the ring is
690 * "empty" and wait forever for data to be queued to that stream ID).
696 for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
697 cur_ring = stream_info->stream_rings[cur_stream];
699 xhci_ring_free(xhci, cur_ring);
700 stream_info->stream_rings[cur_stream] = NULL;
703 xhci_free_command(xhci, stream_info->free_streams_command);
705 kfree(stream_info->stream_rings);
709 xhci->cmd_ring_reserved_trbs--;
713 * Sets the MaxPStreams field and the Linear Stream Array field.
714 * Sets the dequeue pointer to the stream context array.
716 void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci,
717 struct xhci_ep_ctx *ep_ctx,
718 struct xhci_stream_info *stream_info)
720 u32 max_primary_streams;
721 /* MaxPStreams is the number of stream context array entries, not the
722 * number we're actually using. Must be in 2^(MaxPstreams + 1) format.
723 * fls(0) = 0, fls(0x1) = 1, fls(0x10) = 2, fls(0x100) = 3, etc.
725 max_primary_streams = fls(stream_info->num_stream_ctxs) - 2;
726 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
727 "Setting number of stream ctx array entries to %u",
728 1 << (max_primary_streams + 1));
729 ep_ctx->ep_info &= cpu_to_le32(~EP_MAXPSTREAMS_MASK);
730 ep_ctx->ep_info |= cpu_to_le32(EP_MAXPSTREAMS(max_primary_streams)
732 ep_ctx->deq = cpu_to_le64(stream_info->ctx_array_dma);
736 * Sets the MaxPStreams field and the Linear Stream Array field to 0.
737 * Reinstalls the "normal" endpoint ring (at its previous dequeue mark,
738 * not at the beginning of the ring).
740 void xhci_setup_no_streams_ep_input_ctx(struct xhci_ep_ctx *ep_ctx,
741 struct xhci_virt_ep *ep)
744 ep_ctx->ep_info &= cpu_to_le32(~(EP_MAXPSTREAMS_MASK | EP_HAS_LSA));
745 addr = xhci_trb_virt_to_dma(ep->ring->deq_seg, ep->ring->dequeue);
746 ep_ctx->deq = cpu_to_le64(addr | ep->ring->cycle_state);
749 /* Frees all stream contexts associated with the endpoint,
751 * Caller should fix the endpoint context streams fields.
753 void xhci_free_stream_info(struct xhci_hcd *xhci,
754 struct xhci_stream_info *stream_info)
757 struct xhci_ring *cur_ring;
762 for (cur_stream = 1; cur_stream < stream_info->num_streams;
764 cur_ring = stream_info->stream_rings[cur_stream];
766 xhci_ring_free(xhci, cur_ring);
767 stream_info->stream_rings[cur_stream] = NULL;
770 xhci_free_command(xhci, stream_info->free_streams_command);
771 xhci->cmd_ring_reserved_trbs--;
772 if (stream_info->stream_ctx_array)
773 xhci_free_stream_ctx(xhci,
774 stream_info->num_stream_ctxs,
775 stream_info->stream_ctx_array,
776 stream_info->ctx_array_dma);
778 kfree(stream_info->stream_rings);
783 /***************** Device context manipulation *************************/
785 static void xhci_free_tt_info(struct xhci_hcd *xhci,
786 struct xhci_virt_device *virt_dev,
789 struct list_head *tt_list_head;
790 struct xhci_tt_bw_info *tt_info, *next;
791 bool slot_found = false;
793 /* If the device never made it past the Set Address stage,
794 * it may not have the real_port set correctly.
796 if (virt_dev->real_port == 0 ||
797 virt_dev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) {
798 xhci_dbg(xhci, "Bad real port.\n");
802 tt_list_head = &(xhci->rh_bw[virt_dev->real_port - 1].tts);
803 list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
804 /* Multi-TT hubs will have more than one entry */
805 if (tt_info->slot_id == slot_id) {
807 list_del(&tt_info->tt_list);
809 } else if (slot_found) {
815 int xhci_alloc_tt_info(struct xhci_hcd *xhci,
816 struct xhci_virt_device *virt_dev,
817 struct usb_device *hdev,
818 struct usb_tt *tt, gfp_t mem_flags)
820 struct xhci_tt_bw_info *tt_info;
821 unsigned int num_ports;
823 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
828 num_ports = hdev->maxchild;
830 for (i = 0; i < num_ports; i++, tt_info++) {
831 struct xhci_interval_bw_table *bw_table;
833 tt_info = kzalloc_node(sizeof(*tt_info), mem_flags,
837 INIT_LIST_HEAD(&tt_info->tt_list);
838 list_add(&tt_info->tt_list,
839 &xhci->rh_bw[virt_dev->real_port - 1].tts);
840 tt_info->slot_id = virt_dev->udev->slot_id;
842 tt_info->ttport = i+1;
843 bw_table = &tt_info->bw_table;
844 for (j = 0; j < XHCI_MAX_INTERVAL; j++)
845 INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints);
850 xhci_free_tt_info(xhci, virt_dev, virt_dev->udev->slot_id);
855 /* All the xhci_tds in the ring's TD list should be freed at this point.
856 * Should be called with xhci->lock held if there is any chance the TT lists
857 * will be manipulated by the configure endpoint, allocate device, or update
858 * hub functions while this function is removing the TT entries from the list.
860 void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
862 struct xhci_virt_device *dev;
864 int old_active_eps = 0;
866 /* Slot ID 0 is reserved */
867 if (slot_id == 0 || !xhci->devs[slot_id])
870 dev = xhci->devs[slot_id];
872 xhci->dcbaa->dev_context_ptrs[slot_id] = 0;
876 trace_xhci_free_virt_device(dev);
879 old_active_eps = dev->tt_info->active_eps;
881 for (i = 0; i < 31; i++) {
882 if (dev->eps[i].ring)
883 xhci_ring_free(xhci, dev->eps[i].ring);
884 if (dev->eps[i].stream_info)
885 xhci_free_stream_info(xhci,
886 dev->eps[i].stream_info);
887 /* Endpoints on the TT/root port lists should have been removed
888 * when usb_disable_device() was called for the device.
889 * We can't drop them anyway, because the udev might have gone
890 * away by this point, and we can't tell what speed it was.
892 if (!list_empty(&dev->eps[i].bw_endpoint_list))
893 xhci_warn(xhci, "Slot %u endpoint %u "
894 "not removed from BW list!\n",
897 /* If this is a hub, free the TT(s) from the TT list */
898 xhci_free_tt_info(xhci, dev, slot_id);
899 /* If necessary, update the number of active TTs on this root port */
900 xhci_update_tt_active_eps(xhci, dev, old_active_eps);
903 xhci_free_container_ctx(xhci, dev->in_ctx);
905 xhci_free_container_ctx(xhci, dev->out_ctx);
907 if (dev->udev && dev->udev->slot_id)
908 dev->udev->slot_id = 0;
909 kfree(xhci->devs[slot_id]);
910 xhci->devs[slot_id] = NULL;
914 * Free a virt_device structure.
915 * If the virt_device added a tt_info (a hub) and has children pointing to
916 * that tt_info, then free the child first. Recursive.
917 * We can't rely on udev at this point to find child-parent relationships.
919 static void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id)
921 struct xhci_virt_device *vdev;
922 struct list_head *tt_list_head;
923 struct xhci_tt_bw_info *tt_info, *next;
926 vdev = xhci->devs[slot_id];
930 if (vdev->real_port == 0 ||
931 vdev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) {
932 xhci_dbg(xhci, "Bad vdev->real_port.\n");
936 tt_list_head = &(xhci->rh_bw[vdev->real_port - 1].tts);
937 list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
938 /* is this a hub device that added a tt_info to the tts list */
939 if (tt_info->slot_id == slot_id) {
940 /* are any devices using this tt_info? */
941 for (i = 1; i < HCS_MAX_SLOTS(xhci->hcs_params1); i++) {
942 vdev = xhci->devs[i];
943 if (vdev && (vdev->tt_info == tt_info))
944 xhci_free_virt_devices_depth_first(
950 /* we are now at a leaf device */
951 xhci_debugfs_remove_slot(xhci, slot_id);
952 xhci_free_virt_device(xhci, slot_id);
955 int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
956 struct usb_device *udev, gfp_t flags)
958 struct xhci_virt_device *dev;
961 /* Slot ID 0 is reserved */
962 if (slot_id == 0 || xhci->devs[slot_id]) {
963 xhci_warn(xhci, "Bad Slot ID %d\n", slot_id);
967 dev = kzalloc(sizeof(*dev), flags);
971 dev->slot_id = slot_id;
973 /* Allocate the (output) device context that will be used in the HC. */
974 dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags);
978 xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id,
979 (unsigned long long)dev->out_ctx->dma);
981 /* Allocate the (input) device context for address device command */
982 dev->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, flags);
986 xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id,
987 (unsigned long long)dev->in_ctx->dma);
989 /* Initialize the cancellation and bandwidth list for each ep */
990 for (i = 0; i < 31; i++) {
991 dev->eps[i].ep_index = i;
992 dev->eps[i].vdev = dev;
993 dev->eps[i].xhci = xhci;
994 INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list);
995 INIT_LIST_HEAD(&dev->eps[i].bw_endpoint_list);
998 /* Allocate endpoint 0 ring */
999 dev->eps[0].ring = xhci_ring_alloc(xhci, 2, 1, TYPE_CTRL, 0, flags);
1000 if (!dev->eps[0].ring)
1005 /* Point to output device context in dcbaa. */
1006 xhci->dcbaa->dev_context_ptrs[slot_id] = cpu_to_le64(dev->out_ctx->dma);
1007 xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n",
1009 &xhci->dcbaa->dev_context_ptrs[slot_id],
1010 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[slot_id]));
1012 trace_xhci_alloc_virt_device(dev);
1014 xhci->devs[slot_id] = dev;
1020 xhci_free_container_ctx(xhci, dev->in_ctx);
1022 xhci_free_container_ctx(xhci, dev->out_ctx);
1028 void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci,
1029 struct usb_device *udev)
1031 struct xhci_virt_device *virt_dev;
1032 struct xhci_ep_ctx *ep0_ctx;
1033 struct xhci_ring *ep_ring;
1035 virt_dev = xhci->devs[udev->slot_id];
1036 ep0_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, 0);
1037 ep_ring = virt_dev->eps[0].ring;
1039 * FIXME we don't keep track of the dequeue pointer very well after a
1040 * Set TR dequeue pointer, so we're setting the dequeue pointer of the
1041 * host to our enqueue pointer. This should only be called after a
1042 * configured device has reset, so all control transfers should have
1043 * been completed or cancelled before the reset.
1045 ep0_ctx->deq = cpu_to_le64(xhci_trb_virt_to_dma(ep_ring->enq_seg,
1047 | ep_ring->cycle_state);
1051 * The xHCI roothub may have ports of differing speeds in any order in the port
1054 * The xHCI hardware wants to know the roothub port number that the USB device
1055 * is attached to (or the roothub port its ancestor hub is attached to). All we
1056 * know is the index of that port under either the USB 2.0 or the USB 3.0
1057 * roothub, but that doesn't give us the real index into the HW port status
1058 * registers. Call xhci_find_raw_port_number() to get real index.
1060 static u32 xhci_find_real_port_number(struct xhci_hcd *xhci,
1061 struct usb_device *udev)
1063 struct usb_device *top_dev;
1064 struct usb_hcd *hcd;
1066 if (udev->speed >= USB_SPEED_SUPER)
1067 hcd = xhci_get_usb3_hcd(xhci);
1069 hcd = xhci->main_hcd;
1071 for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
1072 top_dev = top_dev->parent)
1073 /* Found device below root hub */;
1075 return xhci_find_raw_port_number(hcd, top_dev->portnum);
1078 /* Setup an xHCI virtual device for a Set Address command */
1079 int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev)
1081 struct xhci_virt_device *dev;
1082 struct xhci_ep_ctx *ep0_ctx;
1083 struct xhci_slot_ctx *slot_ctx;
1086 struct usb_device *top_dev;
1088 dev = xhci->devs[udev->slot_id];
1089 /* Slot ID 0 is reserved */
1090 if (udev->slot_id == 0 || !dev) {
1091 xhci_warn(xhci, "Slot ID %d is not assigned to this device\n",
1095 ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0);
1096 slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx);
1098 /* 3) Only the control endpoint is valid - one endpoint context */
1099 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | udev->route);
1100 switch (udev->speed) {
1101 case USB_SPEED_SUPER_PLUS:
1102 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SSP);
1103 max_packets = MAX_PACKET(512);
1105 case USB_SPEED_SUPER:
1106 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SS);
1107 max_packets = MAX_PACKET(512);
1109 case USB_SPEED_HIGH:
1110 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_HS);
1111 max_packets = MAX_PACKET(64);
1113 /* USB core guesses at a 64-byte max packet first for FS devices */
1114 case USB_SPEED_FULL:
1115 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_FS);
1116 max_packets = MAX_PACKET(64);
1119 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_LS);
1120 max_packets = MAX_PACKET(8);
1122 case USB_SPEED_WIRELESS:
1123 xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
1126 /* Speed was set earlier, this shouldn't happen. */
1129 /* Find the root hub port this device is under */
1130 port_num = xhci_find_real_port_number(xhci, udev);
1133 slot_ctx->dev_info2 |= cpu_to_le32(ROOT_HUB_PORT(port_num));
1134 /* Set the port number in the virtual_device to the faked port number */
1135 for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
1136 top_dev = top_dev->parent)
1137 /* Found device below root hub */;
1138 dev->fake_port = top_dev->portnum;
1139 dev->real_port = port_num;
1140 xhci_dbg(xhci, "Set root hub portnum to %d\n", port_num);
1141 xhci_dbg(xhci, "Set fake root hub portnum to %d\n", dev->fake_port);
1143 /* Find the right bandwidth table that this device will be a part of.
1144 * If this is a full speed device attached directly to a root port (or a
1145 * decendent of one), it counts as a primary bandwidth domain, not a
1146 * secondary bandwidth domain under a TT. An xhci_tt_info structure
1147 * will never be created for the HS root hub.
1149 if (!udev->tt || !udev->tt->hub->parent) {
1150 dev->bw_table = &xhci->rh_bw[port_num - 1].bw_table;
1152 struct xhci_root_port_bw_info *rh_bw;
1153 struct xhci_tt_bw_info *tt_bw;
1155 rh_bw = &xhci->rh_bw[port_num - 1];
1156 /* Find the right TT. */
1157 list_for_each_entry(tt_bw, &rh_bw->tts, tt_list) {
1158 if (tt_bw->slot_id != udev->tt->hub->slot_id)
1161 if (!dev->udev->tt->multi ||
1163 tt_bw->ttport == dev->udev->ttport)) {
1164 dev->bw_table = &tt_bw->bw_table;
1165 dev->tt_info = tt_bw;
1170 xhci_warn(xhci, "WARN: Didn't find a matching TT\n");
1173 /* Is this a LS/FS device under an external HS hub? */
1174 if (udev->tt && udev->tt->hub->parent) {
1175 slot_ctx->tt_info = cpu_to_le32(udev->tt->hub->slot_id |
1176 (udev->ttport << 8));
1177 if (udev->tt->multi)
1178 slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
1180 xhci_dbg(xhci, "udev->tt = %p\n", udev->tt);
1181 xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport);
1183 /* Step 4 - ring already allocated */
1185 ep0_ctx->ep_info2 = cpu_to_le32(EP_TYPE(CTRL_EP));
1187 /* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */
1188 ep0_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(0) | ERROR_COUNT(3) |
1191 ep0_ctx->deq = cpu_to_le64(dev->eps[0].ring->first_seg->dma |
1192 dev->eps[0].ring->cycle_state);
1194 trace_xhci_setup_addressable_virt_device(dev);
1196 /* Steps 7 and 8 were done in xhci_alloc_virt_device() */
1202 * Convert interval expressed as 2^(bInterval - 1) == interval into
1203 * straight exponent value 2^n == interval.
1206 static unsigned int xhci_parse_exponent_interval(struct usb_device *udev,
1207 struct usb_host_endpoint *ep)
1209 unsigned int interval;
1211 interval = clamp_val(ep->desc.bInterval, 1, 16) - 1;
1212 if (interval != ep->desc.bInterval - 1)
1213 dev_warn(&udev->dev,
1214 "ep %#x - rounding interval to %d %sframes\n",
1215 ep->desc.bEndpointAddress,
1217 udev->speed == USB_SPEED_FULL ? "" : "micro");
1219 if (udev->speed == USB_SPEED_FULL) {
1221 * Full speed isoc endpoints specify interval in frames,
1222 * not microframes. We are using microframes everywhere,
1223 * so adjust accordingly.
1225 interval += 3; /* 1 frame = 2^3 uframes */
1232 * Convert bInterval expressed in microframes (in 1-255 range) to exponent of
1233 * microframes, rounded down to nearest power of 2.
1235 static unsigned int xhci_microframes_to_exponent(struct usb_device *udev,
1236 struct usb_host_endpoint *ep, unsigned int desc_interval,
1237 unsigned int min_exponent, unsigned int max_exponent)
1239 unsigned int interval;
1241 interval = fls(desc_interval) - 1;
1242 interval = clamp_val(interval, min_exponent, max_exponent);
1243 if ((1 << interval) != desc_interval)
1245 "ep %#x - rounding interval to %d microframes, ep desc says %d microframes\n",
1246 ep->desc.bEndpointAddress,
1253 static unsigned int xhci_parse_microframe_interval(struct usb_device *udev,
1254 struct usb_host_endpoint *ep)
1256 if (ep->desc.bInterval == 0)
1258 return xhci_microframes_to_exponent(udev, ep,
1259 ep->desc.bInterval, 0, 15);
1263 static unsigned int xhci_parse_frame_interval(struct usb_device *udev,
1264 struct usb_host_endpoint *ep)
1266 return xhci_microframes_to_exponent(udev, ep,
1267 ep->desc.bInterval * 8, 3, 10);
1270 /* Return the polling or NAK interval.
1272 * The polling interval is expressed in "microframes". If xHCI's Interval field
1273 * is set to N, it will service the endpoint every 2^(Interval)*125us.
1275 * The NAK interval is one NAK per 1 to 255 microframes, or no NAKs if interval
1278 static unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
1279 struct usb_host_endpoint *ep)
1281 unsigned int interval = 0;
1283 switch (udev->speed) {
1284 case USB_SPEED_HIGH:
1286 if (usb_endpoint_xfer_control(&ep->desc) ||
1287 usb_endpoint_xfer_bulk(&ep->desc)) {
1288 interval = xhci_parse_microframe_interval(udev, ep);
1291 fallthrough; /* SS and HS isoc/int have same decoding */
1293 case USB_SPEED_SUPER_PLUS:
1294 case USB_SPEED_SUPER:
1295 if (usb_endpoint_xfer_int(&ep->desc) ||
1296 usb_endpoint_xfer_isoc(&ep->desc)) {
1297 interval = xhci_parse_exponent_interval(udev, ep);
1301 case USB_SPEED_FULL:
1302 if (usb_endpoint_xfer_isoc(&ep->desc)) {
1303 interval = xhci_parse_exponent_interval(udev, ep);
1307 * Fall through for interrupt endpoint interval decoding
1308 * since it uses the same rules as low speed interrupt
1314 if (usb_endpoint_xfer_int(&ep->desc) ||
1315 usb_endpoint_xfer_isoc(&ep->desc)) {
1317 interval = xhci_parse_frame_interval(udev, ep);
1327 /* The "Mult" field in the endpoint context is only set for SuperSpeed isoc eps.
1328 * High speed endpoint descriptors can define "the number of additional
1329 * transaction opportunities per microframe", but that goes in the Max Burst
1330 * endpoint context field.
1332 static u32 xhci_get_endpoint_mult(struct usb_device *udev,
1333 struct usb_host_endpoint *ep)
1335 if (udev->speed < USB_SPEED_SUPER ||
1336 !usb_endpoint_xfer_isoc(&ep->desc))
1338 return ep->ss_ep_comp.bmAttributes;
1341 static u32 xhci_get_endpoint_max_burst(struct usb_device *udev,
1342 struct usb_host_endpoint *ep)
1344 /* Super speed and Plus have max burst in ep companion desc */
1345 if (udev->speed >= USB_SPEED_SUPER)
1346 return ep->ss_ep_comp.bMaxBurst;
1348 if (udev->speed == USB_SPEED_HIGH &&
1349 (usb_endpoint_xfer_isoc(&ep->desc) ||
1350 usb_endpoint_xfer_int(&ep->desc)))
1351 return usb_endpoint_maxp_mult(&ep->desc) - 1;
1356 static u32 xhci_get_endpoint_type(struct usb_host_endpoint *ep)
1360 in = usb_endpoint_dir_in(&ep->desc);
1362 switch (usb_endpoint_type(&ep->desc)) {
1363 case USB_ENDPOINT_XFER_CONTROL:
1365 case USB_ENDPOINT_XFER_BULK:
1366 return in ? BULK_IN_EP : BULK_OUT_EP;
1367 case USB_ENDPOINT_XFER_ISOC:
1368 return in ? ISOC_IN_EP : ISOC_OUT_EP;
1369 case USB_ENDPOINT_XFER_INT:
1370 return in ? INT_IN_EP : INT_OUT_EP;
1375 /* Return the maximum endpoint service interval time (ESIT) payload.
1376 * Basically, this is the maxpacket size, multiplied by the burst size
1379 static u32 xhci_get_max_esit_payload(struct usb_device *udev,
1380 struct usb_host_endpoint *ep)
1385 /* Only applies for interrupt or isochronous endpoints */
1386 if (usb_endpoint_xfer_control(&ep->desc) ||
1387 usb_endpoint_xfer_bulk(&ep->desc))
1390 /* SuperSpeedPlus Isoc ep sending over 48k per esit */
1391 if ((udev->speed >= USB_SPEED_SUPER_PLUS) &&
1392 USB_SS_SSP_ISOC_COMP(ep->ss_ep_comp.bmAttributes))
1393 return le32_to_cpu(ep->ssp_isoc_ep_comp.dwBytesPerInterval);
1394 /* SuperSpeed or SuperSpeedPlus Isoc ep with less than 48k per esit */
1395 else if (udev->speed >= USB_SPEED_SUPER)
1396 return le16_to_cpu(ep->ss_ep_comp.wBytesPerInterval);
1398 max_packet = usb_endpoint_maxp(&ep->desc);
1399 max_burst = usb_endpoint_maxp_mult(&ep->desc);
1400 /* A 0 in max burst means 1 transfer per ESIT */
1401 return max_packet * max_burst;
1404 /* Set up an endpoint with one ring segment. Do not allocate stream rings.
1405 * Drivers will have to call usb_alloc_streams() to do that.
1407 int xhci_endpoint_init(struct xhci_hcd *xhci,
1408 struct xhci_virt_device *virt_dev,
1409 struct usb_device *udev,
1410 struct usb_host_endpoint *ep,
1413 unsigned int ep_index;
1414 struct xhci_ep_ctx *ep_ctx;
1415 struct xhci_ring *ep_ring;
1416 unsigned int max_packet;
1417 enum xhci_ring_type ring_type;
1418 u32 max_esit_payload;
1420 unsigned int max_burst;
1421 unsigned int interval;
1423 unsigned int avg_trb_len;
1424 unsigned int err_count = 0;
1426 ep_index = xhci_get_endpoint_index(&ep->desc);
1427 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
1429 endpoint_type = xhci_get_endpoint_type(ep);
1433 ring_type = usb_endpoint_type(&ep->desc);
1436 * Get values to fill the endpoint context, mostly from ep descriptor.
1437 * The average TRB buffer lengt for bulk endpoints is unclear as we
1438 * have no clue on scatter gather list entry size. For Isoc and Int,
1439 * set it to max available. See xHCI 1.1 spec 4.14.1.1 for details.
1441 max_esit_payload = xhci_get_max_esit_payload(udev, ep);
1442 interval = xhci_get_endpoint_interval(udev, ep);
1444 /* Periodic endpoint bInterval limit quirk */
1445 if (usb_endpoint_xfer_int(&ep->desc) ||
1446 usb_endpoint_xfer_isoc(&ep->desc)) {
1447 if ((xhci->quirks & XHCI_LIMIT_ENDPOINT_INTERVAL_7) &&
1448 udev->speed >= USB_SPEED_HIGH &&
1454 mult = xhci_get_endpoint_mult(udev, ep);
1455 max_packet = usb_endpoint_maxp(&ep->desc);
1456 max_burst = xhci_get_endpoint_max_burst(udev, ep);
1457 avg_trb_len = max_esit_payload;
1459 /* FIXME dig Mult and streams info out of ep companion desc */
1461 /* Allow 3 retries for everything but isoc, set CErr = 3 */
1462 if (!usb_endpoint_xfer_isoc(&ep->desc))
1464 /* HS bulk max packet should be 512, FS bulk supports 8, 16, 32 or 64 */
1465 if (usb_endpoint_xfer_bulk(&ep->desc)) {
1466 if (udev->speed == USB_SPEED_HIGH)
1468 if (udev->speed == USB_SPEED_FULL) {
1469 max_packet = rounddown_pow_of_two(max_packet);
1470 max_packet = clamp_val(max_packet, 8, 64);
1473 /* xHCI 1.0 and 1.1 indicates that ctrl ep avg TRB Length should be 8 */
1474 if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version >= 0x100)
1476 /* xhci 1.1 with LEC support doesn't use mult field, use RsvdZ */
1477 if ((xhci->hci_version > 0x100) && HCC2_LEC(xhci->hcc_params2))
1480 /* Set up the endpoint ring */
1481 virt_dev->eps[ep_index].new_ring =
1482 xhci_ring_alloc(xhci, 2, 1, ring_type, max_packet, mem_flags);
1483 if (!virt_dev->eps[ep_index].new_ring)
1486 virt_dev->eps[ep_index].skip = false;
1487 ep_ring = virt_dev->eps[ep_index].new_ring;
1489 /* Fill the endpoint context */
1490 ep_ctx->ep_info = cpu_to_le32(EP_MAX_ESIT_PAYLOAD_HI(max_esit_payload) |
1491 EP_INTERVAL(interval) |
1493 ep_ctx->ep_info2 = cpu_to_le32(EP_TYPE(endpoint_type) |
1494 MAX_PACKET(max_packet) |
1495 MAX_BURST(max_burst) |
1496 ERROR_COUNT(err_count));
1497 ep_ctx->deq = cpu_to_le64(ep_ring->first_seg->dma |
1498 ep_ring->cycle_state);
1500 ep_ctx->tx_info = cpu_to_le32(EP_MAX_ESIT_PAYLOAD_LO(max_esit_payload) |
1501 EP_AVG_TRB_LENGTH(avg_trb_len));
1506 void xhci_endpoint_zero(struct xhci_hcd *xhci,
1507 struct xhci_virt_device *virt_dev,
1508 struct usb_host_endpoint *ep)
1510 unsigned int ep_index;
1511 struct xhci_ep_ctx *ep_ctx;
1513 ep_index = xhci_get_endpoint_index(&ep->desc);
1514 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
1516 ep_ctx->ep_info = 0;
1517 ep_ctx->ep_info2 = 0;
1519 ep_ctx->tx_info = 0;
1520 /* Don't free the endpoint ring until the set interface or configuration
1525 void xhci_clear_endpoint_bw_info(struct xhci_bw_info *bw_info)
1527 bw_info->ep_interval = 0;
1529 bw_info->num_packets = 0;
1530 bw_info->max_packet_size = 0;
1532 bw_info->max_esit_payload = 0;
1535 void xhci_update_bw_info(struct xhci_hcd *xhci,
1536 struct xhci_container_ctx *in_ctx,
1537 struct xhci_input_control_ctx *ctrl_ctx,
1538 struct xhci_virt_device *virt_dev)
1540 struct xhci_bw_info *bw_info;
1541 struct xhci_ep_ctx *ep_ctx;
1542 unsigned int ep_type;
1545 for (i = 1; i < 31; i++) {
1546 bw_info = &virt_dev->eps[i].bw_info;
1548 /* We can't tell what endpoint type is being dropped, but
1549 * unconditionally clearing the bandwidth info for non-periodic
1550 * endpoints should be harmless because the info will never be
1551 * set in the first place.
1553 if (!EP_IS_ADDED(ctrl_ctx, i) && EP_IS_DROPPED(ctrl_ctx, i)) {
1554 /* Dropped endpoint */
1555 xhci_clear_endpoint_bw_info(bw_info);
1559 if (EP_IS_ADDED(ctrl_ctx, i)) {
1560 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, i);
1561 ep_type = CTX_TO_EP_TYPE(le32_to_cpu(ep_ctx->ep_info2));
1563 /* Ignore non-periodic endpoints */
1564 if (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP &&
1565 ep_type != ISOC_IN_EP &&
1566 ep_type != INT_IN_EP)
1569 /* Added or changed endpoint */
1570 bw_info->ep_interval = CTX_TO_EP_INTERVAL(
1571 le32_to_cpu(ep_ctx->ep_info));
1572 /* Number of packets and mult are zero-based in the
1573 * input context, but we want one-based for the
1576 bw_info->mult = CTX_TO_EP_MULT(
1577 le32_to_cpu(ep_ctx->ep_info)) + 1;
1578 bw_info->num_packets = CTX_TO_MAX_BURST(
1579 le32_to_cpu(ep_ctx->ep_info2)) + 1;
1580 bw_info->max_packet_size = MAX_PACKET_DECODED(
1581 le32_to_cpu(ep_ctx->ep_info2));
1582 bw_info->type = ep_type;
1583 bw_info->max_esit_payload = CTX_TO_MAX_ESIT_PAYLOAD(
1584 le32_to_cpu(ep_ctx->tx_info));
1589 /* Copy output xhci_ep_ctx to the input xhci_ep_ctx copy.
1590 * Useful when you want to change one particular aspect of the endpoint and then
1591 * issue a configure endpoint command.
1593 void xhci_endpoint_copy(struct xhci_hcd *xhci,
1594 struct xhci_container_ctx *in_ctx,
1595 struct xhci_container_ctx *out_ctx,
1596 unsigned int ep_index)
1598 struct xhci_ep_ctx *out_ep_ctx;
1599 struct xhci_ep_ctx *in_ep_ctx;
1601 out_ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1602 in_ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
1604 in_ep_ctx->ep_info = out_ep_ctx->ep_info;
1605 in_ep_ctx->ep_info2 = out_ep_ctx->ep_info2;
1606 in_ep_ctx->deq = out_ep_ctx->deq;
1607 in_ep_ctx->tx_info = out_ep_ctx->tx_info;
1608 if (xhci->quirks & XHCI_MTK_HOST) {
1609 in_ep_ctx->reserved[0] = out_ep_ctx->reserved[0];
1610 in_ep_ctx->reserved[1] = out_ep_ctx->reserved[1];
1614 /* Copy output xhci_slot_ctx to the input xhci_slot_ctx.
1615 * Useful when you want to change one particular aspect of the endpoint and then
1616 * issue a configure endpoint command. Only the context entries field matters,
1617 * but we'll copy the whole thing anyway.
1619 void xhci_slot_copy(struct xhci_hcd *xhci,
1620 struct xhci_container_ctx *in_ctx,
1621 struct xhci_container_ctx *out_ctx)
1623 struct xhci_slot_ctx *in_slot_ctx;
1624 struct xhci_slot_ctx *out_slot_ctx;
1626 in_slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
1627 out_slot_ctx = xhci_get_slot_ctx(xhci, out_ctx);
1629 in_slot_ctx->dev_info = out_slot_ctx->dev_info;
1630 in_slot_ctx->dev_info2 = out_slot_ctx->dev_info2;
1631 in_slot_ctx->tt_info = out_slot_ctx->tt_info;
1632 in_slot_ctx->dev_state = out_slot_ctx->dev_state;
1635 /* Set up the scratchpad buffer array and scratchpad buffers, if needed. */
1636 static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
1639 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
1640 int num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
1642 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
1643 "Allocating %d scratchpad buffers", num_sp);
1648 xhci->scratchpad = kzalloc_node(sizeof(*xhci->scratchpad), flags,
1650 if (!xhci->scratchpad)
1653 xhci->scratchpad->sp_array = dma_alloc_coherent(dev,
1654 num_sp * sizeof(u64),
1655 &xhci->scratchpad->sp_dma, flags);
1656 if (!xhci->scratchpad->sp_array)
1659 xhci->scratchpad->sp_buffers = kcalloc_node(num_sp, sizeof(void *),
1660 flags, dev_to_node(dev));
1661 if (!xhci->scratchpad->sp_buffers)
1664 xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma);
1665 for (i = 0; i < num_sp; i++) {
1667 void *buf = dma_alloc_coherent(dev, xhci->page_size, &dma,
1672 xhci->scratchpad->sp_array[i] = dma;
1673 xhci->scratchpad->sp_buffers[i] = buf;
1679 for (i = i - 1; i >= 0; i--) {
1680 dma_free_coherent(dev, xhci->page_size,
1681 xhci->scratchpad->sp_buffers[i],
1682 xhci->scratchpad->sp_array[i]);
1685 kfree(xhci->scratchpad->sp_buffers);
1688 dma_free_coherent(dev, num_sp * sizeof(u64),
1689 xhci->scratchpad->sp_array,
1690 xhci->scratchpad->sp_dma);
1693 kfree(xhci->scratchpad);
1694 xhci->scratchpad = NULL;
1700 static void scratchpad_free(struct xhci_hcd *xhci)
1704 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
1706 if (!xhci->scratchpad)
1709 num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
1711 for (i = 0; i < num_sp; i++) {
1712 dma_free_coherent(dev, xhci->page_size,
1713 xhci->scratchpad->sp_buffers[i],
1714 xhci->scratchpad->sp_array[i]);
1716 kfree(xhci->scratchpad->sp_buffers);
1717 dma_free_coherent(dev, num_sp * sizeof(u64),
1718 xhci->scratchpad->sp_array,
1719 xhci->scratchpad->sp_dma);
1720 kfree(xhci->scratchpad);
1721 xhci->scratchpad = NULL;
1724 struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
1725 bool allocate_completion, gfp_t mem_flags)
1727 struct xhci_command *command;
1728 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
1730 command = kzalloc_node(sizeof(*command), mem_flags, dev_to_node(dev));
1734 if (allocate_completion) {
1735 command->completion =
1736 kzalloc_node(sizeof(struct completion), mem_flags,
1738 if (!command->completion) {
1742 init_completion(command->completion);
1745 command->status = 0;
1746 INIT_LIST_HEAD(&command->cmd_list);
1750 struct xhci_command *xhci_alloc_command_with_ctx(struct xhci_hcd *xhci,
1751 bool allocate_completion, gfp_t mem_flags)
1753 struct xhci_command *command;
1755 command = xhci_alloc_command(xhci, allocate_completion, mem_flags);
1759 command->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT,
1761 if (!command->in_ctx) {
1762 kfree(command->completion);
1769 void xhci_urb_free_priv(struct urb_priv *urb_priv)
1774 void xhci_free_command(struct xhci_hcd *xhci,
1775 struct xhci_command *command)
1777 xhci_free_container_ctx(xhci,
1779 kfree(command->completion);
1783 int xhci_alloc_erst(struct xhci_hcd *xhci,
1784 struct xhci_ring *evt_ring,
1785 struct xhci_erst *erst,
1790 struct xhci_segment *seg;
1791 struct xhci_erst_entry *entry;
1793 size = sizeof(struct xhci_erst_entry) * evt_ring->num_segs;
1794 erst->entries = dma_alloc_coherent(xhci_to_hcd(xhci)->self.sysdev,
1795 size, &erst->erst_dma_addr, flags);
1799 erst->num_entries = evt_ring->num_segs;
1801 seg = evt_ring->first_seg;
1802 for (val = 0; val < evt_ring->num_segs; val++) {
1803 entry = &erst->entries[val];
1804 entry->seg_addr = cpu_to_le64(seg->dma);
1805 entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
1813 void xhci_free_erst(struct xhci_hcd *xhci, struct xhci_erst *erst)
1816 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
1818 size = sizeof(struct xhci_erst_entry) * (erst->num_entries);
1820 dma_free_coherent(dev, size,
1822 erst->erst_dma_addr);
1823 erst->entries = NULL;
1826 void xhci_mem_cleanup(struct xhci_hcd *xhci)
1828 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
1829 int i, j, num_ports;
1831 cancel_delayed_work_sync(&xhci->cmd_timer);
1833 xhci_free_erst(xhci, &xhci->erst);
1835 if (xhci->event_ring)
1836 xhci_ring_free(xhci, xhci->event_ring);
1837 xhci->event_ring = NULL;
1838 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed event ring");
1841 xhci_ring_free(xhci, xhci->cmd_ring);
1842 xhci->cmd_ring = NULL;
1843 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed command ring");
1844 xhci_cleanup_command_queue(xhci);
1846 num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
1847 for (i = 0; i < num_ports && xhci->rh_bw; i++) {
1848 struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table;
1849 for (j = 0; j < XHCI_MAX_INTERVAL; j++) {
1850 struct list_head *ep = &bwt->interval_bw[j].endpoints;
1851 while (!list_empty(ep))
1852 list_del_init(ep->next);
1856 for (i = HCS_MAX_SLOTS(xhci->hcs_params1); i > 0; i--)
1857 xhci_free_virt_devices_depth_first(xhci, i);
1859 dma_pool_destroy(xhci->segment_pool);
1860 xhci->segment_pool = NULL;
1861 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed segment pool");
1863 dma_pool_destroy(xhci->device_pool);
1864 xhci->device_pool = NULL;
1865 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed device context pool");
1867 dma_pool_destroy(xhci->small_streams_pool);
1868 xhci->small_streams_pool = NULL;
1869 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
1870 "Freed small stream array pool");
1872 dma_pool_destroy(xhci->medium_streams_pool);
1873 xhci->medium_streams_pool = NULL;
1874 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
1875 "Freed medium stream array pool");
1878 dma_free_coherent(dev, sizeof(*xhci->dcbaa),
1879 xhci->dcbaa, xhci->dcbaa->dma);
1882 scratchpad_free(xhci);
1887 for (i = 0; i < num_ports; i++) {
1888 struct xhci_tt_bw_info *tt, *n;
1889 list_for_each_entry_safe(tt, n, &xhci->rh_bw[i].tts, tt_list) {
1890 list_del(&tt->tt_list);
1896 xhci->cmd_ring_reserved_trbs = 0;
1897 xhci->usb2_rhub.num_ports = 0;
1898 xhci->usb3_rhub.num_ports = 0;
1899 xhci->num_active_eps = 0;
1900 kfree(xhci->usb2_rhub.ports);
1901 kfree(xhci->usb3_rhub.ports);
1902 kfree(xhci->hw_ports);
1904 kfree(xhci->ext_caps);
1905 for (i = 0; i < xhci->num_port_caps; i++)
1906 kfree(xhci->port_caps[i].psi);
1907 kfree(xhci->port_caps);
1908 xhci->num_port_caps = 0;
1910 xhci->usb2_rhub.ports = NULL;
1911 xhci->usb3_rhub.ports = NULL;
1912 xhci->hw_ports = NULL;
1914 xhci->ext_caps = NULL;
1915 xhci->port_caps = NULL;
1917 xhci->page_size = 0;
1918 xhci->page_shift = 0;
1919 xhci->usb2_rhub.bus_state.bus_suspended = 0;
1920 xhci->usb3_rhub.bus_state.bus_suspended = 0;
1923 static int xhci_test_trb_in_td(struct xhci_hcd *xhci,
1924 struct xhci_segment *input_seg,
1925 union xhci_trb *start_trb,
1926 union xhci_trb *end_trb,
1927 dma_addr_t input_dma,
1928 struct xhci_segment *result_seg,
1929 char *test_name, int test_number)
1931 unsigned long long start_dma;
1932 unsigned long long end_dma;
1933 struct xhci_segment *seg;
1935 start_dma = xhci_trb_virt_to_dma(input_seg, start_trb);
1936 end_dma = xhci_trb_virt_to_dma(input_seg, end_trb);
1938 seg = trb_in_td(xhci, input_seg, start_trb, end_trb, input_dma, false);
1939 if (seg != result_seg) {
1940 xhci_warn(xhci, "WARN: %s TRB math test %d failed!\n",
1941 test_name, test_number);
1942 xhci_warn(xhci, "Tested TRB math w/ seg %p and "
1943 "input DMA 0x%llx\n",
1945 (unsigned long long) input_dma);
1946 xhci_warn(xhci, "starting TRB %p (0x%llx DMA), "
1947 "ending TRB %p (0x%llx DMA)\n",
1948 start_trb, start_dma,
1950 xhci_warn(xhci, "Expected seg %p, got seg %p\n",
1952 trb_in_td(xhci, input_seg, start_trb, end_trb, input_dma,
1959 /* TRB math checks for xhci_trb_in_td(), using the command and event rings. */
1960 static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci)
1963 dma_addr_t input_dma;
1964 struct xhci_segment *result_seg;
1965 } simple_test_vector [] = {
1966 /* A zeroed DMA field should fail */
1968 /* One TRB before the ring start should fail */
1969 { xhci->event_ring->first_seg->dma - 16, NULL },
1970 /* One byte before the ring start should fail */
1971 { xhci->event_ring->first_seg->dma - 1, NULL },
1972 /* Starting TRB should succeed */
1973 { xhci->event_ring->first_seg->dma, xhci->event_ring->first_seg },
1974 /* Ending TRB should succeed */
1975 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16,
1976 xhci->event_ring->first_seg },
1977 /* One byte after the ring end should fail */
1978 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16 + 1, NULL },
1979 /* One TRB after the ring end should fail */
1980 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT)*16, NULL },
1981 /* An address of all ones should fail */
1982 { (dma_addr_t) (~0), NULL },
1985 struct xhci_segment *input_seg;
1986 union xhci_trb *start_trb;
1987 union xhci_trb *end_trb;
1988 dma_addr_t input_dma;
1989 struct xhci_segment *result_seg;
1990 } complex_test_vector [] = {
1991 /* Test feeding a valid DMA address from a different ring */
1992 { .input_seg = xhci->event_ring->first_seg,
1993 .start_trb = xhci->event_ring->first_seg->trbs,
1994 .end_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
1995 .input_dma = xhci->cmd_ring->first_seg->dma,
1998 /* Test feeding a valid end TRB from a different ring */
1999 { .input_seg = xhci->event_ring->first_seg,
2000 .start_trb = xhci->event_ring->first_seg->trbs,
2001 .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
2002 .input_dma = xhci->cmd_ring->first_seg->dma,
2005 /* Test feeding a valid start and end TRB from a different ring */
2006 { .input_seg = xhci->event_ring->first_seg,
2007 .start_trb = xhci->cmd_ring->first_seg->trbs,
2008 .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
2009 .input_dma = xhci->cmd_ring->first_seg->dma,
2012 /* TRB in this ring, but after this TD */
2013 { .input_seg = xhci->event_ring->first_seg,
2014 .start_trb = &xhci->event_ring->first_seg->trbs[0],
2015 .end_trb = &xhci->event_ring->first_seg->trbs[3],
2016 .input_dma = xhci->event_ring->first_seg->dma + 4*16,
2019 /* TRB in this ring, but before this TD */
2020 { .input_seg = xhci->event_ring->first_seg,
2021 .start_trb = &xhci->event_ring->first_seg->trbs[3],
2022 .end_trb = &xhci->event_ring->first_seg->trbs[6],
2023 .input_dma = xhci->event_ring->first_seg->dma + 2*16,
2026 /* TRB in this ring, but after this wrapped TD */
2027 { .input_seg = xhci->event_ring->first_seg,
2028 .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
2029 .end_trb = &xhci->event_ring->first_seg->trbs[1],
2030 .input_dma = xhci->event_ring->first_seg->dma + 2*16,
2033 /* TRB in this ring, but before this wrapped TD */
2034 { .input_seg = xhci->event_ring->first_seg,
2035 .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
2036 .end_trb = &xhci->event_ring->first_seg->trbs[1],
2037 .input_dma = xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 4)*16,
2040 /* TRB not in this ring, and we have a wrapped TD */
2041 { .input_seg = xhci->event_ring->first_seg,
2042 .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
2043 .end_trb = &xhci->event_ring->first_seg->trbs[1],
2044 .input_dma = xhci->cmd_ring->first_seg->dma + 2*16,
2049 unsigned int num_tests;
2052 num_tests = ARRAY_SIZE(simple_test_vector);
2053 for (i = 0; i < num_tests; i++) {
2054 ret = xhci_test_trb_in_td(xhci,
2055 xhci->event_ring->first_seg,
2056 xhci->event_ring->first_seg->trbs,
2057 &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
2058 simple_test_vector[i].input_dma,
2059 simple_test_vector[i].result_seg,
2065 num_tests = ARRAY_SIZE(complex_test_vector);
2066 for (i = 0; i < num_tests; i++) {
2067 ret = xhci_test_trb_in_td(xhci,
2068 complex_test_vector[i].input_seg,
2069 complex_test_vector[i].start_trb,
2070 complex_test_vector[i].end_trb,
2071 complex_test_vector[i].input_dma,
2072 complex_test_vector[i].result_seg,
2077 xhci_dbg(xhci, "TRB math tests passed.\n");
2081 static void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
2086 deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
2087 xhci->event_ring->dequeue);
2089 xhci_warn(xhci, "WARN something wrong with SW event ring "
2091 /* Update HC event ring dequeue pointer */
2092 temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
2093 temp &= ERST_PTR_MASK;
2094 /* Don't clear the EHB bit (which is RW1C) because
2095 * there might be more events to service.
2098 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2099 "// Write event ring dequeue pointer, "
2100 "preserving EHB bit");
2101 xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
2102 &xhci->ir_set->erst_dequeue);
2105 static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
2106 __le32 __iomem *addr, int max_caps)
2108 u32 temp, port_offset, port_count;
2110 u8 major_revision, minor_revision;
2111 struct xhci_hub *rhub;
2112 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
2113 struct xhci_port_cap *port_cap;
2116 major_revision = XHCI_EXT_PORT_MAJOR(temp);
2117 minor_revision = XHCI_EXT_PORT_MINOR(temp);
2119 if (major_revision == 0x03) {
2120 rhub = &xhci->usb3_rhub;
2122 * Some hosts incorrectly use sub-minor version for minor
2123 * version (i.e. 0x02 instead of 0x20 for bcdUSB 0x320 and 0x01
2124 * for bcdUSB 0x310). Since there is no USB release with sub
2125 * minor version 0x301 to 0x309, we can assume that they are
2126 * incorrect and fix it here.
2128 if (minor_revision > 0x00 && minor_revision < 0x10)
2129 minor_revision <<= 4;
2130 } else if (major_revision <= 0x02) {
2131 rhub = &xhci->usb2_rhub;
2133 xhci_warn(xhci, "Ignoring unknown port speed, "
2134 "Ext Cap %p, revision = 0x%x\n",
2135 addr, major_revision);
2136 /* Ignoring port protocol we can't understand. FIXME */
2139 rhub->maj_rev = XHCI_EXT_PORT_MAJOR(temp);
2141 if (rhub->min_rev < minor_revision)
2142 rhub->min_rev = minor_revision;
2144 /* Port offset and count in the third dword, see section 7.2 */
2145 temp = readl(addr + 2);
2146 port_offset = XHCI_EXT_PORT_OFF(temp);
2147 port_count = XHCI_EXT_PORT_COUNT(temp);
2148 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2149 "Ext Cap %p, port offset = %u, "
2150 "count = %u, revision = 0x%x",
2151 addr, port_offset, port_count, major_revision);
2152 /* Port count includes the current port offset */
2153 if (port_offset == 0 || (port_offset + port_count - 1) > num_ports)
2154 /* WTF? "Valid values are ‘1’ to MaxPorts" */
2157 port_cap = &xhci->port_caps[xhci->num_port_caps++];
2158 if (xhci->num_port_caps > max_caps)
2161 port_cap->maj_rev = major_revision;
2162 port_cap->min_rev = minor_revision;
2163 port_cap->psi_count = XHCI_EXT_PORT_PSIC(temp);
2165 if (port_cap->psi_count) {
2166 port_cap->psi = kcalloc_node(port_cap->psi_count,
2167 sizeof(*port_cap->psi),
2168 GFP_KERNEL, dev_to_node(dev));
2170 port_cap->psi_count = 0;
2172 port_cap->psi_uid_count++;
2173 for (i = 0; i < port_cap->psi_count; i++) {
2174 port_cap->psi[i] = readl(addr + 4 + i);
2176 /* count unique ID values, two consecutive entries can
2177 * have the same ID if link is assymetric
2179 if (i && (XHCI_EXT_PORT_PSIV(port_cap->psi[i]) !=
2180 XHCI_EXT_PORT_PSIV(port_cap->psi[i - 1])))
2181 port_cap->psi_uid_count++;
2183 xhci_dbg(xhci, "PSIV:%d PSIE:%d PLT:%d PFD:%d LP:%d PSIM:%d\n",
2184 XHCI_EXT_PORT_PSIV(port_cap->psi[i]),
2185 XHCI_EXT_PORT_PSIE(port_cap->psi[i]),
2186 XHCI_EXT_PORT_PLT(port_cap->psi[i]),
2187 XHCI_EXT_PORT_PFD(port_cap->psi[i]),
2188 XHCI_EXT_PORT_LP(port_cap->psi[i]),
2189 XHCI_EXT_PORT_PSIM(port_cap->psi[i]));
2192 /* cache usb2 port capabilities */
2193 if (major_revision < 0x03 && xhci->num_ext_caps < max_caps)
2194 xhci->ext_caps[xhci->num_ext_caps++] = temp;
2196 if ((xhci->hci_version >= 0x100) && (major_revision != 0x03) &&
2197 (temp & XHCI_HLC)) {
2198 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2199 "xHCI 1.0: support USB2 hardware lpm");
2200 xhci->hw_lpm_support = 1;
2204 for (i = port_offset; i < (port_offset + port_count); i++) {
2205 struct xhci_port *hw_port = &xhci->hw_ports[i];
2206 /* Duplicate entry. Ignore the port if the revisions differ. */
2207 if (hw_port->rhub) {
2208 xhci_warn(xhci, "Duplicate port entry, Ext Cap %p,"
2209 " port %u\n", addr, i);
2210 xhci_warn(xhci, "Port was marked as USB %u, "
2211 "duplicated as USB %u\n",
2212 hw_port->rhub->maj_rev, major_revision);
2213 /* Only adjust the roothub port counts if we haven't
2214 * found a similar duplicate.
2216 if (hw_port->rhub != rhub &&
2217 hw_port->hcd_portnum != DUPLICATE_ENTRY) {
2218 hw_port->rhub->num_ports--;
2219 hw_port->hcd_portnum = DUPLICATE_ENTRY;
2223 hw_port->rhub = rhub;
2224 hw_port->port_cap = port_cap;
2227 /* FIXME: Should we disable ports not in the Extended Capabilities? */
2230 static void xhci_create_rhub_port_array(struct xhci_hcd *xhci,
2231 struct xhci_hub *rhub, gfp_t flags)
2235 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
2237 if (!rhub->num_ports)
2239 rhub->ports = kcalloc_node(rhub->num_ports, sizeof(*rhub->ports),
2240 flags, dev_to_node(dev));
2244 for (i = 0; i < HCS_MAX_PORTS(xhci->hcs_params1); i++) {
2245 if (xhci->hw_ports[i].rhub != rhub ||
2246 xhci->hw_ports[i].hcd_portnum == DUPLICATE_ENTRY)
2248 xhci->hw_ports[i].hcd_portnum = port_index;
2249 rhub->ports[port_index] = &xhci->hw_ports[i];
2251 if (port_index == rhub->num_ports)
2257 * Scan the Extended Capabilities for the "Supported Protocol Capabilities" that
2258 * specify what speeds each port is supposed to be. We can't count on the port
2259 * speed bits in the PORTSC register being correct until a device is connected,
2260 * but we need to set up the two fake roothubs with the correct number of USB
2261 * 3.0 and USB 2.0 ports at host controller initialization time.
2263 static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
2267 unsigned int num_ports;
2271 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
2273 num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
2274 xhci->hw_ports = kcalloc_node(num_ports, sizeof(*xhci->hw_ports),
2275 flags, dev_to_node(dev));
2276 if (!xhci->hw_ports)
2279 for (i = 0; i < num_ports; i++) {
2280 xhci->hw_ports[i].addr = &xhci->op_regs->port_status_base +
2282 xhci->hw_ports[i].hw_portnum = i;
2285 xhci->rh_bw = kcalloc_node(num_ports, sizeof(*xhci->rh_bw), flags,
2289 for (i = 0; i < num_ports; i++) {
2290 struct xhci_interval_bw_table *bw_table;
2292 INIT_LIST_HEAD(&xhci->rh_bw[i].tts);
2293 bw_table = &xhci->rh_bw[i].bw_table;
2294 for (j = 0; j < XHCI_MAX_INTERVAL; j++)
2295 INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints);
2297 base = &xhci->cap_regs->hc_capbase;
2299 cap_start = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_PROTOCOL);
2301 xhci_err(xhci, "No Extended Capability registers, unable to set up roothub\n");
2306 /* count extended protocol capability entries for later caching */
2309 offset = xhci_find_next_ext_cap(base, offset,
2310 XHCI_EXT_CAPS_PROTOCOL);
2313 xhci->ext_caps = kcalloc_node(cap_count, sizeof(*xhci->ext_caps),
2314 flags, dev_to_node(dev));
2315 if (!xhci->ext_caps)
2318 xhci->port_caps = kcalloc_node(cap_count, sizeof(*xhci->port_caps),
2319 flags, dev_to_node(dev));
2320 if (!xhci->port_caps)
2326 xhci_add_in_port(xhci, num_ports, base + offset, cap_count);
2327 if (xhci->usb2_rhub.num_ports + xhci->usb3_rhub.num_ports ==
2330 offset = xhci_find_next_ext_cap(base, offset,
2331 XHCI_EXT_CAPS_PROTOCOL);
2333 if (xhci->usb2_rhub.num_ports == 0 && xhci->usb3_rhub.num_ports == 0) {
2334 xhci_warn(xhci, "No ports on the roothubs?\n");
2337 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2338 "Found %u USB 2.0 ports and %u USB 3.0 ports.",
2339 xhci->usb2_rhub.num_ports, xhci->usb3_rhub.num_ports);
2341 /* Place limits on the number of roothub ports so that the hub
2342 * descriptors aren't longer than the USB core will allocate.
2344 if (xhci->usb3_rhub.num_ports > USB_SS_MAXPORTS) {
2345 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2346 "Limiting USB 3.0 roothub ports to %u.",
2348 xhci->usb3_rhub.num_ports = USB_SS_MAXPORTS;
2350 if (xhci->usb2_rhub.num_ports > USB_MAXCHILDREN) {
2351 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2352 "Limiting USB 2.0 roothub ports to %u.",
2354 xhci->usb2_rhub.num_ports = USB_MAXCHILDREN;
2357 if (!xhci->usb2_rhub.num_ports)
2358 xhci_info(xhci, "USB2 root hub has no ports\n");
2360 if (!xhci->usb3_rhub.num_ports)
2361 xhci_info(xhci, "USB3 root hub has no ports\n");
2363 xhci_create_rhub_port_array(xhci, &xhci->usb2_rhub, flags);
2364 xhci_create_rhub_port_array(xhci, &xhci->usb3_rhub, flags);
2369 int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
2372 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
2373 unsigned int val, val2;
2375 u32 page_size, temp;
2378 INIT_LIST_HEAD(&xhci->cmd_list);
2380 /* init command timeout work */
2381 INIT_DELAYED_WORK(&xhci->cmd_timer, xhci_handle_command_timeout);
2382 init_completion(&xhci->cmd_ring_stop_completion);
2384 page_size = readl(&xhci->op_regs->page_size);
2385 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2386 "Supported page size register = 0x%x", page_size);
2389 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2390 "Supported page size of %iK", (1 << (i+12)) / 1024);
2392 xhci_warn(xhci, "WARN: no supported page size\n");
2393 /* Use 4K pages, since that's common and the minimum the HC supports */
2394 xhci->page_shift = 12;
2395 xhci->page_size = 1 << xhci->page_shift;
2396 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2397 "HCD page size set to %iK", xhci->page_size / 1024);
2400 * Program the Number of Device Slots Enabled field in the CONFIG
2401 * register with the max value of slots the HC can handle.
2403 val = HCS_MAX_SLOTS(readl(&xhci->cap_regs->hcs_params1));
2404 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2405 "// xHC can handle at most %d device slots.", val);
2406 val2 = readl(&xhci->op_regs->config_reg);
2407 val |= (val2 & ~HCS_SLOTS_MASK);
2408 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2409 "// Setting Max device slots reg = 0x%x.", val);
2410 writel(val, &xhci->op_regs->config_reg);
2413 * xHCI section 5.4.6 - Device Context array must be
2414 * "physically contiguous and 64-byte (cache line) aligned".
2416 xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma,
2420 xhci->dcbaa->dma = dma;
2421 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2422 "// Device context base array address = 0x%llx (DMA), %p (virt)",
2423 (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa);
2424 xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr);
2427 * Initialize the ring segment pool. The ring must be a contiguous
2428 * structure comprised of TRBs. The TRBs must be 16 byte aligned,
2429 * however, the command ring segment needs 64-byte aligned segments
2430 * and our use of dma addresses in the trb_address_map radix tree needs
2431 * TRB_SEGMENT_SIZE alignment, so we pick the greater alignment need.
2433 xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
2434 TRB_SEGMENT_SIZE, TRB_SEGMENT_SIZE, xhci->page_size);
2436 /* See Table 46 and Note on Figure 55 */
2437 xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev,
2438 2112, 64, xhci->page_size);
2439 if (!xhci->segment_pool || !xhci->device_pool)
2442 /* Linear stream context arrays don't have any boundary restrictions,
2443 * and only need to be 16-byte aligned.
2445 xhci->small_streams_pool =
2446 dma_pool_create("xHCI 256 byte stream ctx arrays",
2447 dev, SMALL_STREAM_ARRAY_SIZE, 16, 0);
2448 xhci->medium_streams_pool =
2449 dma_pool_create("xHCI 1KB stream ctx arrays",
2450 dev, MEDIUM_STREAM_ARRAY_SIZE, 16, 0);
2451 /* Any stream context array bigger than MEDIUM_STREAM_ARRAY_SIZE
2452 * will be allocated with dma_alloc_coherent()
2455 if (!xhci->small_streams_pool || !xhci->medium_streams_pool)
2458 /* Set up the command ring to have one segments for now. */
2459 xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, 0, flags);
2460 if (!xhci->cmd_ring)
2462 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2463 "Allocated command ring at %p", xhci->cmd_ring);
2464 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "First segment DMA is 0x%llx",
2465 (unsigned long long)xhci->cmd_ring->first_seg->dma);
2467 /* Set the address in the Command Ring Control register */
2468 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
2469 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
2470 (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) |
2471 xhci->cmd_ring->cycle_state;
2472 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2473 "// Setting command ring address to 0x%016llx", val_64);
2474 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
2476 /* Reserve one command ring TRB for disabling LPM.
2477 * Since the USB core grabs the shared usb_bus bandwidth mutex before
2478 * disabling LPM, we only need to reserve one TRB for all devices.
2480 xhci->cmd_ring_reserved_trbs++;
2482 val = readl(&xhci->cap_regs->db_off);
2484 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2485 "// Doorbell array is located at offset 0x%x"
2486 " from cap regs base addr", val);
2487 xhci->dba = (void __iomem *) xhci->cap_regs + val;
2488 /* Set ir_set to interrupt register set 0 */
2489 xhci->ir_set = &xhci->run_regs->ir_set[0];
2492 * Event ring setup: Allocate a normal ring, but also setup
2493 * the event ring segment table (ERST). Section 4.9.3.
2495 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Allocating event ring");
2496 xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT,
2498 if (!xhci->event_ring)
2500 if (xhci_check_trb_in_td_math(xhci) < 0)
2503 ret = xhci_alloc_erst(xhci, xhci->event_ring, &xhci->erst, flags);
2507 /* set ERST count with the number of entries in the segment table */
2508 val = readl(&xhci->ir_set->erst_size);
2509 val &= ERST_SIZE_MASK;
2510 val |= ERST_NUM_SEGS;
2511 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2512 "// Write ERST size = %i to ir_set 0 (some bits preserved)",
2514 writel(val, &xhci->ir_set->erst_size);
2516 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2517 "// Set ERST entries to point to event ring.");
2518 /* set the segment table base address */
2519 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2520 "// Set ERST base address for ir_set 0 = 0x%llx",
2521 (unsigned long long)xhci->erst.erst_dma_addr);
2522 val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base);
2523 val_64 &= ERST_PTR_MASK;
2524 val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK);
2525 xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base);
2527 /* Set the event ring dequeue address */
2528 xhci_set_hc_event_deq(xhci);
2529 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2530 "Wrote ERST address to ir_set 0.");
2532 xhci->isoc_bei_interval = AVOID_BEI_INTERVAL_MAX;
2535 * XXX: Might need to set the Interrupter Moderation Register to
2536 * something other than the default (~1ms minimum between interrupts).
2537 * See section 5.5.1.2.
2539 for (i = 0; i < MAX_HC_SLOTS; i++)
2540 xhci->devs[i] = NULL;
2541 for (i = 0; i < USB_MAXCHILDREN; i++) {
2542 xhci->usb2_rhub.bus_state.resume_done[i] = 0;
2543 xhci->usb3_rhub.bus_state.resume_done[i] = 0;
2544 /* Only the USB 2.0 completions will ever be used. */
2545 init_completion(&xhci->usb2_rhub.bus_state.rexit_done[i]);
2546 init_completion(&xhci->usb3_rhub.bus_state.u3exit_done[i]);
2549 if (scratchpad_alloc(xhci, flags))
2551 if (xhci_setup_port_arrays(xhci, flags))
2554 /* Enable USB 3.0 device notifications for function remote wake, which
2555 * is necessary for allowing USB 3.0 devices to do remote wakeup from
2556 * U3 (device suspend).
2558 temp = readl(&xhci->op_regs->dev_notification);
2559 temp &= ~DEV_NOTE_MASK;
2560 temp |= DEV_NOTE_FWAKE;
2561 writel(temp, &xhci->op_regs->dev_notification);
2567 xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
2568 xhci_mem_cleanup(xhci);