1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright (c) 2001-2004 by David Brownell
4 * Copyright (c) 2003 Michal Sojka, for high-speed iso transfers
7 /* this file is part of ehci-hcd.c */
9 /*-------------------------------------------------------------------------*/
12 * EHCI scheduled transaction support: interrupt, iso, split iso
13 * These are called "periodic" transactions in the EHCI spec.
15 * Note that for interrupt transfers, the QH/QTD manipulation is shared
16 * with the "asynchronous" transaction support (control/bulk transfers).
17 * The only real difference is in how interrupt transfers are scheduled.
19 * For ISO, we make an "iso_stream" head to serve the same role as a QH.
20 * It keeps track of every ITD (or SITD) that's linked, and holds enough
21 * pre-calculated schedule data to make appending to the queue be quick.
24 static int ehci_get_frame(struct usb_hcd *hcd);
27 * periodic_next_shadow - return "next" pointer on shadow list
28 * @periodic: host pointer to qh/itd/sitd
29 * @tag: hardware tag for type of this record
31 static union ehci_shadow *
32 periodic_next_shadow(struct ehci_hcd *ehci, union ehci_shadow *periodic,
35 switch (hc32_to_cpu(ehci, tag)) {
37 return &periodic->qh->qh_next;
39 return &periodic->fstn->fstn_next;
41 return &periodic->itd->itd_next;
42 /* case Q_TYPE_SITD: */
44 return &periodic->sitd->sitd_next;
49 shadow_next_periodic(struct ehci_hcd *ehci, union ehci_shadow *periodic,
52 switch (hc32_to_cpu(ehci, tag)) {
53 /* our ehci_shadow.qh is actually software part */
55 return &periodic->qh->hw->hw_next;
56 /* others are hw parts */
58 return periodic->hw_next;
62 /* caller must hold ehci->lock */
63 static void periodic_unlink(struct ehci_hcd *ehci, unsigned frame, void *ptr)
65 union ehci_shadow *prev_p = &ehci->pshadow[frame];
66 __hc32 *hw_p = &ehci->periodic[frame];
67 union ehci_shadow here = *prev_p;
69 /* find predecessor of "ptr"; hw and shadow lists are in sync */
70 while (here.ptr && here.ptr != ptr) {
71 prev_p = periodic_next_shadow(ehci, prev_p,
72 Q_NEXT_TYPE(ehci, *hw_p));
73 hw_p = shadow_next_periodic(ehci, &here,
74 Q_NEXT_TYPE(ehci, *hw_p));
77 /* an interrupt entry (at list end) could have been shared */
81 /* update shadow and hardware lists ... the old "next" pointers
82 * from ptr may still be in use, the caller updates them.
84 *prev_p = *periodic_next_shadow(ehci, &here,
85 Q_NEXT_TYPE(ehci, *hw_p));
87 if (!ehci->use_dummy_qh ||
88 *shadow_next_periodic(ehci, &here, Q_NEXT_TYPE(ehci, *hw_p))
89 != EHCI_LIST_END(ehci))
90 *hw_p = *shadow_next_periodic(ehci, &here,
91 Q_NEXT_TYPE(ehci, *hw_p));
93 *hw_p = cpu_to_hc32(ehci, ehci->dummy->qh_dma);
96 /*-------------------------------------------------------------------------*/
98 /* Bandwidth and TT management */
100 /* Find the TT data structure for this device; create it if necessary */
101 static struct ehci_tt *find_tt(struct usb_device *udev)
103 struct usb_tt *utt = udev->tt;
104 struct ehci_tt *tt, **tt_index, **ptt;
106 bool allocated_index = false;
109 return NULL; /* Not below a TT */
112 * Find/create our data structure.
113 * For hubs with a single TT, we get it directly.
114 * For hubs with multiple TTs, there's an extra level of pointers.
118 tt_index = utt->hcpriv;
119 if (!tt_index) { /* Create the index array */
120 tt_index = kcalloc(utt->hub->maxchild,
124 return ERR_PTR(-ENOMEM);
125 utt->hcpriv = tt_index;
126 allocated_index = true;
128 port = udev->ttport - 1;
129 ptt = &tt_index[port];
132 ptt = (struct ehci_tt **) &utt->hcpriv;
136 if (!tt) { /* Create the ehci_tt */
137 struct ehci_hcd *ehci =
138 hcd_to_ehci(bus_to_hcd(udev->bus));
140 tt = kzalloc(sizeof(*tt), GFP_ATOMIC);
142 if (allocated_index) {
146 return ERR_PTR(-ENOMEM);
148 list_add_tail(&tt->tt_list, &ehci->tt_list);
149 INIT_LIST_HEAD(&tt->ps_list);
158 /* Release the TT above udev, if it's not in use */
159 static void drop_tt(struct usb_device *udev)
161 struct usb_tt *utt = udev->tt;
162 struct ehci_tt *tt, **tt_index, **ptt;
165 if (!utt || !utt->hcpriv)
166 return; /* Not below a TT, or never allocated */
170 tt_index = utt->hcpriv;
171 ptt = &tt_index[udev->ttport - 1];
173 /* How many entries are left in tt_index? */
174 for (i = 0; i < utt->hub->maxchild; ++i)
175 cnt += !!tt_index[i];
178 ptt = (struct ehci_tt **) &utt->hcpriv;
182 if (!tt || !list_empty(&tt->ps_list))
183 return; /* never allocated, or still in use */
185 list_del(&tt->tt_list);
194 static void bandwidth_dbg(struct ehci_hcd *ehci, int sign, char *type,
195 struct ehci_per_sched *ps)
197 dev_dbg(&ps->udev->dev,
198 "ep %02x: %s %s @ %u+%u (%u.%u+%u) [%u/%u us] mask %04x\n",
199 ps->ep->desc.bEndpointAddress,
200 (sign >= 0 ? "reserve" : "release"), type,
201 (ps->bw_phase << 3) + ps->phase_uf, ps->bw_uperiod,
202 ps->phase, ps->phase_uf, ps->period,
203 ps->usecs, ps->c_usecs, ps->cs_mask);
206 static void reserve_release_intr_bandwidth(struct ehci_hcd *ehci,
207 struct ehci_qh *qh, int sign)
211 int usecs = qh->ps.usecs;
212 int c_usecs = qh->ps.c_usecs;
213 int tt_usecs = qh->ps.tt_usecs;
216 if (qh->ps.phase == NO_FRAME) /* Bandwidth wasn't reserved */
218 start_uf = qh->ps.bw_phase << 3;
220 bandwidth_dbg(ehci, sign, "intr", &qh->ps);
222 if (sign < 0) { /* Release bandwidth */
225 tt_usecs = -tt_usecs;
228 /* Entire transaction (high speed) or start-split (full/low speed) */
229 for (i = start_uf + qh->ps.phase_uf; i < EHCI_BANDWIDTH_SIZE;
230 i += qh->ps.bw_uperiod)
231 ehci->bandwidth[i] += usecs;
233 /* Complete-split (full/low speed) */
234 if (qh->ps.c_usecs) {
235 /* NOTE: adjustments needed for FSTN */
236 for (i = start_uf; i < EHCI_BANDWIDTH_SIZE;
237 i += qh->ps.bw_uperiod) {
238 for ((j = 2, m = 1 << (j+8)); j < 8; (++j, m <<= 1)) {
239 if (qh->ps.cs_mask & m)
240 ehci->bandwidth[i+j] += c_usecs;
245 /* FS/LS bus bandwidth */
247 tt = find_tt(qh->ps.udev);
249 list_add_tail(&qh->ps.ps_list, &tt->ps_list);
251 list_del(&qh->ps.ps_list);
253 for (i = start_uf >> 3; i < EHCI_BANDWIDTH_FRAMES;
254 i += qh->ps.bw_period)
255 tt->bandwidth[i] += tt_usecs;
259 /*-------------------------------------------------------------------------*/
261 static void compute_tt_budget(u8 budget_table[EHCI_BANDWIDTH_SIZE],
264 struct ehci_per_sched *ps;
265 unsigned uframe, uf, x;
270 memset(budget_table, 0, EHCI_BANDWIDTH_SIZE);
272 /* Add up the contributions from all the endpoints using this TT */
273 list_for_each_entry(ps, &tt->ps_list, ps_list) {
274 for (uframe = ps->bw_phase << 3; uframe < EHCI_BANDWIDTH_SIZE;
275 uframe += ps->bw_uperiod) {
276 budget_line = &budget_table[uframe];
279 /* propagate the time forward */
280 for (uf = ps->phase_uf; uf < 8; ++uf) {
281 x += budget_line[uf];
283 /* Each microframe lasts 125 us */
288 budget_line[uf] = 125;
295 static int __maybe_unused same_tt(struct usb_device *dev1,
296 struct usb_device *dev2)
298 if (!dev1->tt || !dev2->tt)
300 if (dev1->tt != dev2->tt)
303 return dev1->ttport == dev2->ttport;
308 #ifdef CONFIG_USB_EHCI_TT_NEWSCHED
310 static const unsigned char
311 max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 30, 0 };
313 /* carryover low/fullspeed bandwidth that crosses uframe boundries */
314 static inline void carryover_tt_bandwidth(unsigned short tt_usecs[8])
318 for (i = 0; i < 7; i++) {
319 if (max_tt_usecs[i] < tt_usecs[i]) {
320 tt_usecs[i+1] += tt_usecs[i] - max_tt_usecs[i];
321 tt_usecs[i] = max_tt_usecs[i];
327 * Return true if the device's tt's downstream bus is available for a
328 * periodic transfer of the specified length (usecs), starting at the
329 * specified frame/uframe. Note that (as summarized in section 11.19
330 * of the usb 2.0 spec) TTs can buffer multiple transactions for each
333 * The uframe parameter is when the fullspeed/lowspeed transfer
334 * should be executed in "B-frame" terms, which is the same as the
335 * highspeed ssplit's uframe (which is in "H-frame" terms). For example
336 * a ssplit in "H-frame" 0 causes a transfer in "B-frame" 0.
337 * See the EHCI spec sec 4.5 and fig 4.7.
339 * This checks if the full/lowspeed bus, at the specified starting uframe,
340 * has the specified bandwidth available, according to rules listed
341 * in USB 2.0 spec section 11.18.1 fig 11-60.
343 * This does not check if the transfer would exceed the max ssplit
344 * limit of 16, specified in USB 2.0 spec section 11.18.4 requirement #4,
345 * since proper scheduling limits ssplits to less than 16 per uframe.
347 static int tt_available(
348 struct ehci_hcd *ehci,
349 struct ehci_per_sched *ps,
355 unsigned period = ps->bw_period;
356 unsigned usecs = ps->tt_usecs;
358 if ((period == 0) || (uframe >= 7)) /* error */
361 for (frame &= period - 1; frame < EHCI_BANDWIDTH_FRAMES;
364 unsigned short tt_usecs[8];
366 if (tt->bandwidth[frame] + usecs > 900)
370 for (i = 0; i < 8; (++i, ++uf))
371 tt_usecs[i] = ehci->tt_budget[uf];
373 if (max_tt_usecs[uframe] <= tt_usecs[uframe])
376 /* special case for isoc transfers larger than 125us:
377 * the first and each subsequent fully used uframe
378 * must be empty, so as to not illegally delay
379 * already scheduled transactions
382 int ufs = (usecs / 125);
384 for (i = uframe; i < (uframe + ufs) && i < 8; i++)
389 tt_usecs[uframe] += usecs;
391 carryover_tt_bandwidth(tt_usecs);
393 /* fail if the carryover pushed bw past the last uframe's limit */
394 if (max_tt_usecs[7] < tt_usecs[7])
403 /* return true iff the device's transaction translator is available
404 * for a periodic transfer starting at the specified frame, using
405 * all the uframes in the mask.
407 static int tt_no_collision(
408 struct ehci_hcd *ehci,
410 struct usb_device *dev,
415 if (period == 0) /* error */
418 /* note bandwidth wastage: split never follows csplit
419 * (different dev or endpoint) until the next uframe.
420 * calling convention doesn't make that distinction.
422 for (; frame < ehci->periodic_size; frame += period) {
423 union ehci_shadow here;
425 struct ehci_qh_hw *hw;
427 here = ehci->pshadow[frame];
428 type = Q_NEXT_TYPE(ehci, ehci->periodic[frame]);
430 switch (hc32_to_cpu(ehci, type)) {
432 type = Q_NEXT_TYPE(ehci, here.itd->hw_next);
433 here = here.itd->itd_next;
437 if (same_tt(dev, here.qh->ps.udev)) {
440 mask = hc32_to_cpu(ehci,
442 /* "knows" no gap is needed */
447 type = Q_NEXT_TYPE(ehci, hw->hw_next);
448 here = here.qh->qh_next;
451 if (same_tt(dev, here.sitd->urb->dev)) {
454 mask = hc32_to_cpu(ehci, here.sitd
456 /* FIXME assumes no gap for IN! */
461 type = Q_NEXT_TYPE(ehci, here.sitd->hw_next);
462 here = here.sitd->sitd_next;
464 /* case Q_TYPE_FSTN: */
467 "periodic frame %d bogus type %d\n",
471 /* collision or error */
480 #endif /* CONFIG_USB_EHCI_TT_NEWSCHED */
482 /*-------------------------------------------------------------------------*/
484 static void enable_periodic(struct ehci_hcd *ehci)
486 if (ehci->periodic_count++)
489 /* Stop waiting to turn off the periodic schedule */
490 ehci->enabled_hrtimer_events &= ~BIT(EHCI_HRTIMER_DISABLE_PERIODIC);
492 /* Don't start the schedule until PSS is 0 */
494 turn_on_io_watchdog(ehci);
497 static void disable_periodic(struct ehci_hcd *ehci)
499 if (--ehci->periodic_count)
502 /* Don't turn off the schedule until PSS is 1 */
506 /*-------------------------------------------------------------------------*/
508 /* periodic schedule slots have iso tds (normal or split) first, then a
509 * sparse tree for active interrupt transfers.
511 * this just links in a qh; caller guarantees uframe masks are set right.
512 * no FSTN support (yet; ehci 0.96+)
514 static void qh_link_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh)
517 unsigned period = qh->ps.period;
519 dev_dbg(&qh->ps.udev->dev,
520 "link qh%d-%04x/%p start %d [%d/%d us]\n",
521 period, hc32_to_cpup(ehci, &qh->hw->hw_info2)
522 & (QH_CMASK | QH_SMASK),
523 qh, qh->ps.phase, qh->ps.usecs, qh->ps.c_usecs);
525 /* high bandwidth, or otherwise every microframe */
529 for (i = qh->ps.phase; i < ehci->periodic_size; i += period) {
530 union ehci_shadow *prev = &ehci->pshadow[i];
531 __hc32 *hw_p = &ehci->periodic[i];
532 union ehci_shadow here = *prev;
535 /* skip the iso nodes at list head */
537 type = Q_NEXT_TYPE(ehci, *hw_p);
538 if (type == cpu_to_hc32(ehci, Q_TYPE_QH))
540 prev = periodic_next_shadow(ehci, prev, type);
541 hw_p = shadow_next_periodic(ehci, &here, type);
545 /* sorting each branch by period (slow-->fast)
546 * enables sharing interior tree nodes
548 while (here.ptr && qh != here.qh) {
549 if (qh->ps.period > here.qh->ps.period)
551 prev = &here.qh->qh_next;
552 hw_p = &here.qh->hw->hw_next;
555 /* link in this qh, unless some earlier pass did that */
559 qh->hw->hw_next = *hw_p;
562 *hw_p = QH_NEXT(ehci, qh->qh_dma);
565 qh->qh_state = QH_STATE_LINKED;
567 qh->unlink_reason = 0;
569 /* update per-qh bandwidth for debugfs */
570 ehci_to_hcd(ehci)->self.bandwidth_allocated += qh->ps.bw_period
571 ? ((qh->ps.usecs + qh->ps.c_usecs) / qh->ps.bw_period)
572 : (qh->ps.usecs * 8);
574 list_add(&qh->intr_node, &ehci->intr_qh_list);
576 /* maybe enable periodic schedule processing */
578 enable_periodic(ehci);
581 static void qh_unlink_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh)
587 * If qh is for a low/full-speed device, simply unlinking it
588 * could interfere with an ongoing split transaction. To unlink
589 * it safely would require setting the QH_INACTIVATE bit and
590 * waiting at least one frame, as described in EHCI 4.12.2.5.
592 * We won't bother with any of this. Instead, we assume that the
593 * only reason for unlinking an interrupt QH while the current URB
594 * is still active is to dequeue all the URBs (flush the whole
597 * If rebalancing the periodic schedule is ever implemented, this
598 * approach will no longer be valid.
601 /* high bandwidth, or otherwise part of every microframe */
602 period = qh->ps.period ? : 1;
604 for (i = qh->ps.phase; i < ehci->periodic_size; i += period)
605 periodic_unlink(ehci, i, qh);
607 /* update per-qh bandwidth for debugfs */
608 ehci_to_hcd(ehci)->self.bandwidth_allocated -= qh->ps.bw_period
609 ? ((qh->ps.usecs + qh->ps.c_usecs) / qh->ps.bw_period)
610 : (qh->ps.usecs * 8);
612 dev_dbg(&qh->ps.udev->dev,
613 "unlink qh%d-%04x/%p start %d [%d/%d us]\n",
615 hc32_to_cpup(ehci, &qh->hw->hw_info2) & (QH_CMASK | QH_SMASK),
616 qh, qh->ps.phase, qh->ps.usecs, qh->ps.c_usecs);
618 /* qh->qh_next still "live" to HC */
619 qh->qh_state = QH_STATE_UNLINK;
620 qh->qh_next.ptr = NULL;
622 if (ehci->qh_scan_next == qh)
623 ehci->qh_scan_next = list_entry(qh->intr_node.next,
624 struct ehci_qh, intr_node);
625 list_del(&qh->intr_node);
628 static void cancel_unlink_wait_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
630 if (qh->qh_state != QH_STATE_LINKED ||
631 list_empty(&qh->unlink_node))
634 list_del_init(&qh->unlink_node);
637 * TODO: disable the event of EHCI_HRTIMER_START_UNLINK_INTR for
638 * avoiding unnecessary CPU wakeup
642 static void start_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
644 /* If the QH isn't linked then there's nothing we can do. */
645 if (qh->qh_state != QH_STATE_LINKED)
648 /* if the qh is waiting for unlink, cancel it now */
649 cancel_unlink_wait_intr(ehci, qh);
651 qh_unlink_periodic(ehci, qh);
653 /* Make sure the unlinks are visible before starting the timer */
657 * The EHCI spec doesn't say how long it takes the controller to
658 * stop accessing an unlinked interrupt QH. The timer delay is
659 * 9 uframes; presumably that will be long enough.
661 qh->unlink_cycle = ehci->intr_unlink_cycle;
663 /* New entries go at the end of the intr_unlink list */
664 list_add_tail(&qh->unlink_node, &ehci->intr_unlink);
666 if (ehci->intr_unlinking)
667 ; /* Avoid recursive calls */
668 else if (ehci->rh_state < EHCI_RH_RUNNING)
669 ehci_handle_intr_unlinks(ehci);
670 else if (ehci->intr_unlink.next == &qh->unlink_node) {
671 ehci_enable_event(ehci, EHCI_HRTIMER_UNLINK_INTR, true);
672 ++ehci->intr_unlink_cycle;
677 * It is common only one intr URB is scheduled on one qh, and
678 * given complete() is run in tasklet context, introduce a bit
679 * delay to avoid unlink qh too early.
681 static void start_unlink_intr_wait(struct ehci_hcd *ehci,
684 qh->unlink_cycle = ehci->intr_unlink_wait_cycle;
686 /* New entries go at the end of the intr_unlink_wait list */
687 list_add_tail(&qh->unlink_node, &ehci->intr_unlink_wait);
689 if (ehci->rh_state < EHCI_RH_RUNNING)
690 ehci_handle_start_intr_unlinks(ehci);
691 else if (ehci->intr_unlink_wait.next == &qh->unlink_node) {
692 ehci_enable_event(ehci, EHCI_HRTIMER_START_UNLINK_INTR, true);
693 ++ehci->intr_unlink_wait_cycle;
697 static void end_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
699 struct ehci_qh_hw *hw = qh->hw;
702 qh->qh_state = QH_STATE_IDLE;
703 hw->hw_next = EHCI_LIST_END(ehci);
705 if (!list_empty(&qh->qtd_list))
706 qh_completions(ehci, qh);
708 /* reschedule QH iff another request is queued */
709 if (!list_empty(&qh->qtd_list) && ehci->rh_state == EHCI_RH_RUNNING) {
710 rc = qh_schedule(ehci, qh);
712 qh_refresh(ehci, qh);
713 qh_link_periodic(ehci, qh);
716 /* An error here likely indicates handshake failure
717 * or no space left in the schedule. Neither fault
718 * should happen often ...
720 * FIXME kill the now-dysfunctional queued urbs
723 ehci_err(ehci, "can't reschedule qh %p, err %d\n",
728 /* maybe turn off periodic schedule */
730 disable_periodic(ehci);
733 /*-------------------------------------------------------------------------*/
735 static int check_period(
736 struct ehci_hcd *ehci,
742 /* complete split running into next frame?
743 * given FSTN support, we could sometimes check...
748 /* convert "usecs we need" to "max already claimed" */
749 usecs = ehci->uframe_periodic_max - usecs;
751 for (uframe += frame << 3; uframe < EHCI_BANDWIDTH_SIZE;
753 if (ehci->bandwidth[uframe] > usecs)
761 static int check_intr_schedule(
762 struct ehci_hcd *ehci,
770 int retval = -ENOSPC;
773 if (qh->ps.c_usecs && uframe >= 6) /* FSTN territory? */
776 if (!check_period(ehci, frame, uframe, qh->ps.bw_uperiod, qh->ps.usecs))
778 if (!qh->ps.c_usecs) {
784 #ifdef CONFIG_USB_EHCI_TT_NEWSCHED
785 if (tt_available(ehci, &qh->ps, tt, frame, uframe)) {
788 /* TODO : this may need FSTN for SSPLIT in uframe 5. */
789 for (i = uframe+2; i < 8 && i <= uframe+4; i++)
790 if (!check_period(ehci, frame, i,
791 qh->ps.bw_uperiod, qh->ps.c_usecs))
801 /* Make sure this tt's buffer is also available for CSPLITs.
802 * We pessimize a bit; probably the typical full speed case
803 * doesn't need the second CSPLIT.
805 * NOTE: both SPLIT and CSPLIT could be checked in just
808 mask = 0x03 << (uframe + qh->gap_uf);
812 if (tt_no_collision(ehci, qh->ps.bw_period, qh->ps.udev, frame, mask)) {
813 if (!check_period(ehci, frame, uframe + qh->gap_uf + 1,
814 qh->ps.bw_uperiod, qh->ps.c_usecs))
816 if (!check_period(ehci, frame, uframe + qh->gap_uf,
817 qh->ps.bw_uperiod, qh->ps.c_usecs))
826 /* "first fit" scheduling policy used the first time through,
827 * or when the previous schedule slot can't be re-used.
829 static int qh_schedule(struct ehci_hcd *ehci, struct ehci_qh *qh)
834 struct ehci_qh_hw *hw = qh->hw;
837 hw->hw_next = EHCI_LIST_END(ehci);
839 /* reuse the previous schedule slots, if we can */
840 if (qh->ps.phase != NO_FRAME) {
841 ehci_dbg(ehci, "reused qh %p schedule\n", qh);
847 tt = find_tt(qh->ps.udev);
849 status = PTR_ERR(tt);
852 compute_tt_budget(ehci->tt_budget, tt);
854 /* else scan the schedule to find a group of slots such that all
855 * uframes have enough periodic bandwidth available.
857 /* "normal" case, uframing flexible except with splits */
858 if (qh->ps.bw_period) {
862 for (i = qh->ps.bw_period; i > 0; --i) {
863 frame = ++ehci->random_frame & (qh->ps.bw_period - 1);
864 for (uframe = 0; uframe < 8; uframe++) {
865 status = check_intr_schedule(ehci,
866 frame, uframe, qh, &c_mask, tt);
872 /* qh->ps.bw_period == 0 means every uframe */
874 status = check_intr_schedule(ehci, 0, 0, qh, &c_mask, tt);
880 qh->ps.phase = (qh->ps.period ? ehci->random_frame &
881 (qh->ps.period - 1) : 0);
882 qh->ps.bw_phase = qh->ps.phase & (qh->ps.bw_period - 1);
883 qh->ps.phase_uf = uframe;
884 qh->ps.cs_mask = qh->ps.period ?
885 (c_mask << 8) | (1 << uframe) :
888 /* reset S-frame and (maybe) C-frame masks */
889 hw->hw_info2 &= cpu_to_hc32(ehci, ~(QH_CMASK | QH_SMASK));
890 hw->hw_info2 |= cpu_to_hc32(ehci, qh->ps.cs_mask);
891 reserve_release_intr_bandwidth(ehci, qh, 1);
897 static int intr_submit(
898 struct ehci_hcd *ehci,
900 struct list_head *qtd_list,
907 struct list_head empty;
909 /* get endpoint and transfer/schedule data */
910 epnum = urb->ep->desc.bEndpointAddress;
912 spin_lock_irqsave(&ehci->lock, flags);
914 if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
916 goto done_not_linked;
918 status = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
919 if (unlikely(status))
920 goto done_not_linked;
922 /* get qh and force any scheduling errors */
923 INIT_LIST_HEAD(&empty);
924 qh = qh_append_tds(ehci, urb, &empty, epnum, &urb->ep->hcpriv);
929 if (qh->qh_state == QH_STATE_IDLE) {
930 status = qh_schedule(ehci, qh);
935 /* then queue the urb's tds to the qh */
936 qh = qh_append_tds(ehci, urb, qtd_list, epnum, &urb->ep->hcpriv);
939 /* stuff into the periodic schedule */
940 if (qh->qh_state == QH_STATE_IDLE) {
941 qh_refresh(ehci, qh);
942 qh_link_periodic(ehci, qh);
944 /* cancel unlink wait for the qh */
945 cancel_unlink_wait_intr(ehci, qh);
948 /* ... update usbfs periodic stats */
949 ehci_to_hcd(ehci)->self.bandwidth_int_reqs++;
952 if (unlikely(status))
953 usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
955 spin_unlock_irqrestore(&ehci->lock, flags);
957 qtd_list_free(ehci, urb, qtd_list);
962 static void scan_intr(struct ehci_hcd *ehci)
966 list_for_each_entry_safe(qh, ehci->qh_scan_next, &ehci->intr_qh_list,
969 /* clean any finished work for this qh */
970 if (!list_empty(&qh->qtd_list)) {
974 * Unlinks could happen here; completion reporting
975 * drops the lock. That's why ehci->qh_scan_next
976 * always holds the next qh to scan; if the next qh
977 * gets unlinked then ehci->qh_scan_next is adjusted
978 * in qh_unlink_periodic().
980 temp = qh_completions(ehci, qh);
982 start_unlink_intr(ehci, qh);
983 else if (unlikely(list_empty(&qh->qtd_list) &&
984 qh->qh_state == QH_STATE_LINKED))
985 start_unlink_intr_wait(ehci, qh);
990 /*-------------------------------------------------------------------------*/
992 /* ehci_iso_stream ops work with both ITD and SITD */
994 static struct ehci_iso_stream *
995 iso_stream_alloc(gfp_t mem_flags)
997 struct ehci_iso_stream *stream;
999 stream = kzalloc(sizeof(*stream), mem_flags);
1000 if (likely(stream != NULL)) {
1001 INIT_LIST_HEAD(&stream->td_list);
1002 INIT_LIST_HEAD(&stream->free_list);
1003 stream->next_uframe = NO_FRAME;
1004 stream->ps.phase = NO_FRAME;
1011 struct ehci_hcd *ehci,
1012 struct ehci_iso_stream *stream,
1016 static const u8 smask_out[] = { 0x01, 0x03, 0x07, 0x0f, 0x1f, 0x3f };
1018 struct usb_device *dev = urb->dev;
1020 unsigned epnum, maxp;
1025 * this might be a "high bandwidth" highspeed endpoint,
1026 * as encoded in the ep descriptor's wMaxPacket field
1028 epnum = usb_pipeendpoint(urb->pipe);
1029 is_input = usb_pipein(urb->pipe) ? USB_DIR_IN : 0;
1030 maxp = usb_endpoint_maxp(&urb->ep->desc);
1031 buf1 = is_input ? 1 << 11 : 0;
1033 /* knows about ITD vs SITD */
1034 if (dev->speed == USB_SPEED_HIGH) {
1035 unsigned multi = usb_endpoint_maxp_mult(&urb->ep->desc);
1037 stream->highspeed = 1;
1042 stream->buf0 = cpu_to_hc32(ehci, (epnum << 8) | dev->devnum);
1043 stream->buf1 = cpu_to_hc32(ehci, buf1);
1044 stream->buf2 = cpu_to_hc32(ehci, multi);
1046 /* usbfs wants to report the average usecs per frame tied up
1047 * when transfers on this endpoint are scheduled ...
1049 stream->ps.usecs = HS_USECS_ISO(maxp);
1051 /* period for bandwidth allocation */
1052 tmp = min_t(unsigned, EHCI_BANDWIDTH_SIZE,
1053 1 << (urb->ep->desc.bInterval - 1));
1055 /* Allow urb->interval to override */
1056 stream->ps.bw_uperiod = min_t(unsigned, tmp, urb->interval);
1058 stream->uperiod = urb->interval;
1059 stream->ps.period = urb->interval >> 3;
1060 stream->bandwidth = stream->ps.usecs * 8 /
1061 stream->ps.bw_uperiod;
1068 addr = dev->ttport << 24;
1069 if (!ehci_is_TDI(ehci)
1071 ehci_to_hcd(ehci)->self.root_hub))
1072 addr |= dev->tt->hub->devnum << 16;
1074 addr |= dev->devnum;
1075 stream->ps.usecs = HS_USECS_ISO(maxp);
1076 think_time = dev->tt->think_time;
1077 stream->ps.tt_usecs = NS_TO_US(think_time + usb_calc_bus_time(
1078 dev->speed, is_input, 1, maxp));
1079 hs_transfers = max(1u, (maxp + 187) / 188);
1084 stream->ps.c_usecs = stream->ps.usecs;
1085 stream->ps.usecs = HS_USECS_ISO(1);
1086 stream->ps.cs_mask = 1;
1088 /* c-mask as specified in USB 2.0 11.18.4 3.c */
1089 tmp = (1 << (hs_transfers + 2)) - 1;
1090 stream->ps.cs_mask |= tmp << (8 + 2);
1092 stream->ps.cs_mask = smask_out[hs_transfers - 1];
1094 /* period for bandwidth allocation */
1095 tmp = min_t(unsigned, EHCI_BANDWIDTH_FRAMES,
1096 1 << (urb->ep->desc.bInterval - 1));
1098 /* Allow urb->interval to override */
1099 stream->ps.bw_period = min_t(unsigned, tmp, urb->interval);
1100 stream->ps.bw_uperiod = stream->ps.bw_period << 3;
1102 stream->ps.period = urb->interval;
1103 stream->uperiod = urb->interval << 3;
1104 stream->bandwidth = (stream->ps.usecs + stream->ps.c_usecs) /
1105 stream->ps.bw_period;
1107 /* stream->splits gets created from cs_mask later */
1108 stream->address = cpu_to_hc32(ehci, addr);
1111 stream->ps.udev = dev;
1112 stream->ps.ep = urb->ep;
1114 stream->bEndpointAddress = is_input | epnum;
1115 stream->maxp = maxp;
1118 static struct ehci_iso_stream *
1119 iso_stream_find(struct ehci_hcd *ehci, struct urb *urb)
1122 struct ehci_iso_stream *stream;
1123 struct usb_host_endpoint *ep;
1124 unsigned long flags;
1126 epnum = usb_pipeendpoint (urb->pipe);
1127 if (usb_pipein(urb->pipe))
1128 ep = urb->dev->ep_in[epnum];
1130 ep = urb->dev->ep_out[epnum];
1132 spin_lock_irqsave(&ehci->lock, flags);
1133 stream = ep->hcpriv;
1135 if (unlikely(stream == NULL)) {
1136 stream = iso_stream_alloc(GFP_ATOMIC);
1137 if (likely(stream != NULL)) {
1138 ep->hcpriv = stream;
1139 iso_stream_init(ehci, stream, urb);
1142 /* if dev->ep [epnum] is a QH, hw is set */
1143 } else if (unlikely(stream->hw != NULL)) {
1144 ehci_dbg(ehci, "dev %s ep%d%s, not iso??\n",
1145 urb->dev->devpath, epnum,
1146 usb_pipein(urb->pipe) ? "in" : "out");
1150 spin_unlock_irqrestore(&ehci->lock, flags);
1154 /*-------------------------------------------------------------------------*/
1156 /* ehci_iso_sched ops can be ITD-only or SITD-only */
1158 static struct ehci_iso_sched *
1159 iso_sched_alloc(unsigned packets, gfp_t mem_flags)
1161 struct ehci_iso_sched *iso_sched;
1162 int size = sizeof(*iso_sched);
1164 size += packets * sizeof(struct ehci_iso_packet);
1165 iso_sched = kzalloc(size, mem_flags);
1166 if (likely(iso_sched != NULL))
1167 INIT_LIST_HEAD(&iso_sched->td_list);
1174 struct ehci_hcd *ehci,
1175 struct ehci_iso_sched *iso_sched,
1176 struct ehci_iso_stream *stream,
1181 dma_addr_t dma = urb->transfer_dma;
1183 /* how many uframes are needed for these transfers */
1184 iso_sched->span = urb->number_of_packets * stream->uperiod;
1186 /* figure out per-uframe itd fields that we'll need later
1187 * when we fit new itds into the schedule.
1189 for (i = 0; i < urb->number_of_packets; i++) {
1190 struct ehci_iso_packet *uframe = &iso_sched->packet[i];
1195 length = urb->iso_frame_desc[i].length;
1196 buf = dma + urb->iso_frame_desc[i].offset;
1198 trans = EHCI_ISOC_ACTIVE;
1199 trans |= buf & 0x0fff;
1200 if (unlikely(((i + 1) == urb->number_of_packets))
1201 && !(urb->transfer_flags & URB_NO_INTERRUPT))
1202 trans |= EHCI_ITD_IOC;
1203 trans |= length << 16;
1204 uframe->transaction = cpu_to_hc32(ehci, trans);
1206 /* might need to cross a buffer page within a uframe */
1207 uframe->bufp = (buf & ~(u64)0x0fff);
1209 if (unlikely((uframe->bufp != (buf & ~(u64)0x0fff))))
1216 struct ehci_iso_stream *stream,
1217 struct ehci_iso_sched *iso_sched
1222 /* caller must hold ehci->lock! */
1223 list_splice(&iso_sched->td_list, &stream->free_list);
1228 itd_urb_transaction(
1229 struct ehci_iso_stream *stream,
1230 struct ehci_hcd *ehci,
1235 struct ehci_itd *itd;
1239 struct ehci_iso_sched *sched;
1240 unsigned long flags;
1242 sched = iso_sched_alloc(urb->number_of_packets, mem_flags);
1243 if (unlikely(sched == NULL))
1246 itd_sched_init(ehci, sched, stream, urb);
1248 if (urb->interval < 8)
1249 num_itds = 1 + (sched->span + 7) / 8;
1251 num_itds = urb->number_of_packets;
1253 /* allocate/init ITDs */
1254 spin_lock_irqsave(&ehci->lock, flags);
1255 for (i = 0; i < num_itds; i++) {
1258 * Use iTDs from the free list, but not iTDs that may
1259 * still be in use by the hardware.
1261 if (likely(!list_empty(&stream->free_list))) {
1262 itd = list_first_entry(&stream->free_list,
1263 struct ehci_itd, itd_list);
1264 if (itd->frame == ehci->now_frame)
1266 list_del(&itd->itd_list);
1267 itd_dma = itd->itd_dma;
1270 spin_unlock_irqrestore(&ehci->lock, flags);
1271 itd = dma_pool_alloc(ehci->itd_pool, mem_flags,
1273 spin_lock_irqsave(&ehci->lock, flags);
1275 iso_sched_free(stream, sched);
1276 spin_unlock_irqrestore(&ehci->lock, flags);
1281 memset(itd, 0, sizeof(*itd));
1282 itd->itd_dma = itd_dma;
1283 itd->frame = NO_FRAME;
1284 list_add(&itd->itd_list, &sched->td_list);
1286 spin_unlock_irqrestore(&ehci->lock, flags);
1288 /* temporarily store schedule info in hcpriv */
1289 urb->hcpriv = sched;
1290 urb->error_count = 0;
1294 /*-------------------------------------------------------------------------*/
1296 static void reserve_release_iso_bandwidth(struct ehci_hcd *ehci,
1297 struct ehci_iso_stream *stream, int sign)
1301 unsigned s_mask, c_mask, m;
1302 int usecs = stream->ps.usecs;
1303 int c_usecs = stream->ps.c_usecs;
1304 int tt_usecs = stream->ps.tt_usecs;
1307 if (stream->ps.phase == NO_FRAME) /* Bandwidth wasn't reserved */
1309 uframe = stream->ps.bw_phase << 3;
1311 bandwidth_dbg(ehci, sign, "iso", &stream->ps);
1313 if (sign < 0) { /* Release bandwidth */
1316 tt_usecs = -tt_usecs;
1319 if (!stream->splits) { /* High speed */
1320 for (i = uframe + stream->ps.phase_uf; i < EHCI_BANDWIDTH_SIZE;
1321 i += stream->ps.bw_uperiod)
1322 ehci->bandwidth[i] += usecs;
1324 } else { /* Full speed */
1325 s_mask = stream->ps.cs_mask;
1326 c_mask = s_mask >> 8;
1328 /* NOTE: adjustment needed for frame overflow */
1329 for (i = uframe; i < EHCI_BANDWIDTH_SIZE;
1330 i += stream->ps.bw_uperiod) {
1331 for ((j = stream->ps.phase_uf, m = 1 << j); j < 8;
1334 ehci->bandwidth[i+j] += usecs;
1335 else if (c_mask & m)
1336 ehci->bandwidth[i+j] += c_usecs;
1340 tt = find_tt(stream->ps.udev);
1342 list_add_tail(&stream->ps.ps_list, &tt->ps_list);
1344 list_del(&stream->ps.ps_list);
1346 for (i = uframe >> 3; i < EHCI_BANDWIDTH_FRAMES;
1347 i += stream->ps.bw_period)
1348 tt->bandwidth[i] += tt_usecs;
1354 struct ehci_hcd *ehci,
1355 struct ehci_iso_stream *stream,
1361 /* convert "usecs we need" to "max already claimed" */
1362 usecs = ehci->uframe_periodic_max - stream->ps.usecs;
1364 for (uframe &= stream->ps.bw_uperiod - 1; uframe < EHCI_BANDWIDTH_SIZE;
1365 uframe += stream->ps.bw_uperiod) {
1366 if (ehci->bandwidth[uframe] > usecs)
1374 struct ehci_hcd *ehci,
1375 struct ehci_iso_stream *stream,
1377 struct ehci_iso_sched *sched,
1384 mask = stream->ps.cs_mask << (uframe & 7);
1386 /* for OUT, don't wrap SSPLIT into H-microframe 7 */
1387 if (((stream->ps.cs_mask & 0xff) << (uframe & 7)) >= (1 << 7))
1390 /* for IN, don't wrap CSPLIT into the next frame */
1394 /* check bandwidth */
1395 uframe &= stream->ps.bw_uperiod - 1;
1396 frame = uframe >> 3;
1398 #ifdef CONFIG_USB_EHCI_TT_NEWSCHED
1399 /* The tt's fullspeed bus bandwidth must be available.
1400 * tt_available scheduling guarantees 10+% for control/bulk.
1403 if (!tt_available(ehci, &stream->ps, tt, frame, uf))
1406 /* tt must be idle for start(s), any gap, and csplit.
1407 * assume scheduling slop leaves 10+% for control/bulk.
1409 if (!tt_no_collision(ehci, stream->ps.bw_period,
1410 stream->ps.udev, frame, mask))
1418 /* check starts (OUT uses more than one) */
1420 max_used = ehci->uframe_periodic_max - stream->ps.usecs;
1421 for (tmp = stream->ps.cs_mask & 0xff; tmp; tmp >>= 1, uf++) {
1422 if (ehci->bandwidth[uf] > max_used)
1426 /* for IN, check CSPLIT */
1427 if (stream->ps.c_usecs) {
1428 max_used = ehci->uframe_periodic_max -
1432 for (i = (uframe & 7) + 2; i < 8; (++i, tmp <<= 1)) {
1433 if ((stream->ps.cs_mask & tmp) == 0)
1435 if (ehci->bandwidth[uf+i] > max_used)
1440 uframe += stream->ps.bw_uperiod;
1441 } while (uframe < EHCI_BANDWIDTH_SIZE);
1443 stream->ps.cs_mask <<= uframe & 7;
1444 stream->splits = cpu_to_hc32(ehci, stream->ps.cs_mask);
1449 * This scheduler plans almost as far into the future as it has actual
1450 * periodic schedule slots. (Affected by TUNE_FLS, which defaults to
1451 * "as small as possible" to be cache-friendlier.) That limits the size
1452 * transfers you can stream reliably; avoid more than 64 msec per urb.
1453 * Also avoid queue depths of less than ehci's worst irq latency (affected
1454 * by the per-urb URB_NO_INTERRUPT hint, the log2_irq_thresh module parameter,
1455 * and other factors); or more than about 230 msec total (for portability,
1456 * given EHCI_TUNE_FLS and the slop). Or, write a smarter scheduler!
1460 iso_stream_schedule(
1461 struct ehci_hcd *ehci,
1463 struct ehci_iso_stream *stream
1466 u32 now, base, next, start, period, span, now2;
1467 u32 wrap = 0, skip = 0;
1469 unsigned mod = ehci->periodic_size << 3;
1470 struct ehci_iso_sched *sched = urb->hcpriv;
1471 bool empty = list_empty(&stream->td_list);
1472 bool new_stream = false;
1474 period = stream->uperiod;
1476 if (!stream->highspeed)
1479 /* Start a new isochronous stream? */
1480 if (unlikely(empty && !hcd_periodic_completion_in_progress(
1481 ehci_to_hcd(ehci), urb->ep))) {
1483 /* Schedule the endpoint */
1484 if (stream->ps.phase == NO_FRAME) {
1486 struct ehci_tt *tt = find_tt(stream->ps.udev);
1489 status = PTR_ERR(tt);
1492 compute_tt_budget(ehci->tt_budget, tt);
1494 start = ((-(++ehci->random_frame)) << 3) & (period - 1);
1496 /* find a uframe slot with enough bandwidth.
1497 * Early uframes are more precious because full-speed
1498 * iso IN transfers can't use late uframes,
1499 * and therefore they should be allocated last.
1505 /* check schedule: enough space? */
1506 if (stream->highspeed) {
1507 if (itd_slot_ok(ehci, stream, start))
1510 if ((start % 8) >= 6)
1512 if (sitd_slot_ok(ehci, stream, start,
1516 } while (start > next && !done);
1518 /* no room in the schedule */
1520 ehci_dbg(ehci, "iso sched full %p", urb);
1524 stream->ps.phase = (start >> 3) &
1525 (stream->ps.period - 1);
1526 stream->ps.bw_phase = stream->ps.phase &
1527 (stream->ps.bw_period - 1);
1528 stream->ps.phase_uf = start & 7;
1529 reserve_release_iso_bandwidth(ehci, stream, 1);
1532 /* New stream is already scheduled; use the upcoming slot */
1534 start = (stream->ps.phase << 3) + stream->ps.phase_uf;
1537 stream->next_uframe = start;
1541 now = ehci_read_frame_index(ehci) & (mod - 1);
1543 /* Take the isochronous scheduling threshold into account */
1545 next = now + ehci->i_thresh; /* uframe cache */
1547 next = (now + 2 + 7) & ~0x07; /* full frame cache */
1549 /* If needed, initialize last_iso_frame so that this URB will be seen */
1550 if (ehci->isoc_count == 0)
1551 ehci->last_iso_frame = now >> 3;
1554 * Use ehci->last_iso_frame as the base. There can't be any
1555 * TDs scheduled for earlier than that.
1557 base = ehci->last_iso_frame << 3;
1558 next = (next - base) & (mod - 1);
1559 start = (stream->next_uframe - base) & (mod - 1);
1561 if (unlikely(new_stream))
1565 * Typical case: reuse current schedule, stream may still be active.
1566 * Hopefully there are no gaps from the host falling behind
1567 * (irq delays etc). If there are, the behavior depends on
1568 * whether URB_ISO_ASAP is set.
1570 now2 = (now - base) & (mod - 1);
1572 /* Is the schedule about to wrap around? */
1573 if (unlikely(!empty && start < period)) {
1574 ehci_dbg(ehci, "request %p would overflow (%u-%u < %u mod %u)\n",
1575 urb, stream->next_uframe, base, period, mod);
1580 /* Is the next packet scheduled after the base time? */
1581 if (likely(!empty || start <= now2 + period)) {
1583 /* URB_ISO_ASAP: make sure that start >= next */
1584 if (unlikely(start < next &&
1585 (urb->transfer_flags & URB_ISO_ASAP)))
1588 /* Otherwise use start, if it's not in the past */
1589 if (likely(start >= now2))
1592 /* Otherwise we got an underrun while the queue was empty */
1594 if (urb->transfer_flags & URB_ISO_ASAP)
1600 /* How many uframes and packets do we need to skip? */
1601 skip = (now2 - start + period - 1) & -period;
1602 if (skip >= span) { /* Entirely in the past? */
1603 ehci_dbg(ehci, "iso underrun %p (%u+%u < %u) [%u]\n",
1604 urb, start + base, span - period, now2 + base,
1607 /* Try to keep the last TD intact for scanning later */
1608 skip = span - period;
1610 /* Will it come before the current scan position? */
1612 skip = span; /* Skip the entire URB */
1613 status = 1; /* and give it back immediately */
1614 iso_sched_free(stream, sched);
1618 urb->error_count = skip / period;
1620 sched->first_packet = urb->error_count;
1624 /* Use the first slot after "next" */
1625 start = next + ((start - next) & (period - 1));
1628 /* Tried to schedule too far into the future? */
1629 if (unlikely(start + span - period >= mod + wrap)) {
1630 ehci_dbg(ehci, "request %p would overflow (%u+%u >= %u)\n",
1631 urb, start, span - period, mod + wrap);
1637 stream->next_uframe = (start + skip) & (mod - 1);
1639 /* report high speed start in uframes; full speed, in frames */
1640 urb->start_frame = start & (mod - 1);
1641 if (!stream->highspeed)
1642 urb->start_frame >>= 3;
1646 iso_sched_free(stream, sched);
1651 /*-------------------------------------------------------------------------*/
1654 itd_init(struct ehci_hcd *ehci, struct ehci_iso_stream *stream,
1655 struct ehci_itd *itd)
1659 /* it's been recently zeroed */
1660 itd->hw_next = EHCI_LIST_END(ehci);
1661 itd->hw_bufp[0] = stream->buf0;
1662 itd->hw_bufp[1] = stream->buf1;
1663 itd->hw_bufp[2] = stream->buf2;
1665 for (i = 0; i < 8; i++)
1668 /* All other fields are filled when scheduling */
1673 struct ehci_hcd *ehci,
1674 struct ehci_itd *itd,
1675 struct ehci_iso_sched *iso_sched,
1680 struct ehci_iso_packet *uf = &iso_sched->packet[index];
1681 unsigned pg = itd->pg;
1683 /* BUG_ON(pg == 6 && uf->cross); */
1686 itd->index[uframe] = index;
1688 itd->hw_transaction[uframe] = uf->transaction;
1689 itd->hw_transaction[uframe] |= cpu_to_hc32(ehci, pg << 12);
1690 itd->hw_bufp[pg] |= cpu_to_hc32(ehci, uf->bufp & ~(u32)0);
1691 itd->hw_bufp_hi[pg] |= cpu_to_hc32(ehci, (u32)(uf->bufp >> 32));
1693 /* iso_frame_desc[].offset must be strictly increasing */
1694 if (unlikely(uf->cross)) {
1695 u64 bufp = uf->bufp + 4096;
1698 itd->hw_bufp[pg] |= cpu_to_hc32(ehci, bufp & ~(u32)0);
1699 itd->hw_bufp_hi[pg] |= cpu_to_hc32(ehci, (u32)(bufp >> 32));
1704 itd_link(struct ehci_hcd *ehci, unsigned frame, struct ehci_itd *itd)
1706 union ehci_shadow *prev = &ehci->pshadow[frame];
1707 __hc32 *hw_p = &ehci->periodic[frame];
1708 union ehci_shadow here = *prev;
1711 /* skip any iso nodes which might belong to previous microframes */
1713 type = Q_NEXT_TYPE(ehci, *hw_p);
1714 if (type == cpu_to_hc32(ehci, Q_TYPE_QH))
1716 prev = periodic_next_shadow(ehci, prev, type);
1717 hw_p = shadow_next_periodic(ehci, &here, type);
1721 itd->itd_next = here;
1722 itd->hw_next = *hw_p;
1726 *hw_p = cpu_to_hc32(ehci, itd->itd_dma | Q_TYPE_ITD);
1729 /* fit urb's itds into the selected schedule slot; activate as needed */
1730 static void itd_link_urb(
1731 struct ehci_hcd *ehci,
1734 struct ehci_iso_stream *stream
1738 unsigned next_uframe, uframe, frame;
1739 struct ehci_iso_sched *iso_sched = urb->hcpriv;
1740 struct ehci_itd *itd;
1742 next_uframe = stream->next_uframe & (mod - 1);
1744 if (unlikely(list_empty(&stream->td_list)))
1745 ehci_to_hcd(ehci)->self.bandwidth_allocated
1746 += stream->bandwidth;
1748 if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
1749 if (ehci->amd_pll_fix == 1)
1750 usb_amd_quirk_pll_disable();
1753 ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++;
1755 /* fill iTDs uframe by uframe */
1756 for (packet = iso_sched->first_packet, itd = NULL;
1757 packet < urb->number_of_packets;) {
1759 /* ASSERT: we have all necessary itds */
1760 /* BUG_ON(list_empty(&iso_sched->td_list)); */
1762 /* ASSERT: no itds for this endpoint in this uframe */
1764 itd = list_entry(iso_sched->td_list.next,
1765 struct ehci_itd, itd_list);
1766 list_move_tail(&itd->itd_list, &stream->td_list);
1767 itd->stream = stream;
1769 itd_init(ehci, stream, itd);
1772 uframe = next_uframe & 0x07;
1773 frame = next_uframe >> 3;
1775 itd_patch(ehci, itd, iso_sched, packet, uframe);
1777 next_uframe += stream->uperiod;
1778 next_uframe &= mod - 1;
1781 /* link completed itds into the schedule */
1782 if (((next_uframe >> 3) != frame)
1783 || packet == urb->number_of_packets) {
1784 itd_link(ehci, frame & (ehci->periodic_size - 1), itd);
1788 stream->next_uframe = next_uframe;
1790 /* don't need that schedule data any more */
1791 iso_sched_free(stream, iso_sched);
1792 urb->hcpriv = stream;
1795 enable_periodic(ehci);
1798 #define ISO_ERRS (EHCI_ISOC_BUF_ERR | EHCI_ISOC_BABBLE | EHCI_ISOC_XACTERR)
1800 /* Process and recycle a completed ITD. Return true iff its urb completed,
1801 * and hence its completion callback probably added things to the hardware
1804 * Note that we carefully avoid recycling this descriptor until after any
1805 * completion callback runs, so that it won't be reused quickly. That is,
1806 * assuming (a) no more than two urbs per frame on this endpoint, and also
1807 * (b) only this endpoint's completions submit URBs. It seems some silicon
1808 * corrupts things if you reuse completed descriptors very quickly...
1810 static bool itd_complete(struct ehci_hcd *ehci, struct ehci_itd *itd)
1812 struct urb *urb = itd->urb;
1813 struct usb_iso_packet_descriptor *desc;
1817 struct ehci_iso_stream *stream = itd->stream;
1818 bool retval = false;
1820 /* for each uframe with a packet */
1821 for (uframe = 0; uframe < 8; uframe++) {
1822 if (likely(itd->index[uframe] == -1))
1824 urb_index = itd->index[uframe];
1825 desc = &urb->iso_frame_desc[urb_index];
1827 t = hc32_to_cpup(ehci, &itd->hw_transaction[uframe]);
1828 itd->hw_transaction[uframe] = 0;
1830 /* report transfer status */
1831 if (unlikely(t & ISO_ERRS)) {
1833 if (t & EHCI_ISOC_BUF_ERR)
1834 desc->status = usb_pipein(urb->pipe)
1835 ? -ENOSR /* hc couldn't read */
1836 : -ECOMM; /* hc couldn't write */
1837 else if (t & EHCI_ISOC_BABBLE)
1838 desc->status = -EOVERFLOW;
1839 else /* (t & EHCI_ISOC_XACTERR) */
1840 desc->status = -EPROTO;
1842 /* HC need not update length with this error */
1843 if (!(t & EHCI_ISOC_BABBLE)) {
1844 desc->actual_length = EHCI_ITD_LENGTH(t);
1845 urb->actual_length += desc->actual_length;
1847 } else if (likely((t & EHCI_ISOC_ACTIVE) == 0)) {
1849 desc->actual_length = EHCI_ITD_LENGTH(t);
1850 urb->actual_length += desc->actual_length;
1852 /* URB was too late */
1857 /* handle completion now? */
1858 if (likely((urb_index + 1) != urb->number_of_packets))
1862 * ASSERT: it's really the last itd for this urb
1863 * list_for_each_entry (itd, &stream->td_list, itd_list)
1864 * BUG_ON(itd->urb == urb);
1867 /* give urb back to the driver; completion often (re)submits */
1868 ehci_urb_done(ehci, urb, 0);
1873 disable_periodic(ehci);
1875 ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
1876 if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
1877 if (ehci->amd_pll_fix == 1)
1878 usb_amd_quirk_pll_enable();
1881 if (unlikely(list_is_singular(&stream->td_list)))
1882 ehci_to_hcd(ehci)->self.bandwidth_allocated
1883 -= stream->bandwidth;
1888 /* Add to the end of the free list for later reuse */
1889 list_move_tail(&itd->itd_list, &stream->free_list);
1891 /* Recycle the iTDs when the pipeline is empty (ep no longer in use) */
1892 if (list_empty(&stream->td_list)) {
1893 list_splice_tail_init(&stream->free_list,
1894 &ehci->cached_itd_list);
1895 start_free_itds(ehci);
1901 /*-------------------------------------------------------------------------*/
1903 static int itd_submit(struct ehci_hcd *ehci, struct urb *urb,
1906 int status = -EINVAL;
1907 unsigned long flags;
1908 struct ehci_iso_stream *stream;
1910 /* Get iso_stream head */
1911 stream = iso_stream_find(ehci, urb);
1912 if (unlikely(stream == NULL)) {
1913 ehci_dbg(ehci, "can't get iso stream\n");
1916 if (unlikely(urb->interval != stream->uperiod)) {
1917 ehci_dbg(ehci, "can't change iso interval %d --> %d\n",
1918 stream->uperiod, urb->interval);
1922 #ifdef EHCI_URB_TRACE
1924 "%s %s urb %p ep%d%s len %d, %d pkts %d uframes [%p]\n",
1925 __func__, urb->dev->devpath, urb,
1926 usb_pipeendpoint(urb->pipe),
1927 usb_pipein(urb->pipe) ? "in" : "out",
1928 urb->transfer_buffer_length,
1929 urb->number_of_packets, urb->interval,
1933 /* allocate ITDs w/o locking anything */
1934 status = itd_urb_transaction(stream, ehci, urb, mem_flags);
1935 if (unlikely(status < 0)) {
1936 ehci_dbg(ehci, "can't init itds\n");
1940 /* schedule ... need to lock */
1941 spin_lock_irqsave(&ehci->lock, flags);
1942 if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
1943 status = -ESHUTDOWN;
1944 goto done_not_linked;
1946 status = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
1947 if (unlikely(status))
1948 goto done_not_linked;
1949 status = iso_stream_schedule(ehci, urb, stream);
1950 if (likely(status == 0)) {
1951 itd_link_urb(ehci, urb, ehci->periodic_size << 3, stream);
1952 } else if (status > 0) {
1954 ehci_urb_done(ehci, urb, 0);
1956 usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
1959 spin_unlock_irqrestore(&ehci->lock, flags);
1964 /*-------------------------------------------------------------------------*/
1967 * "Split ISO TDs" ... used for USB 1.1 devices going through the
1968 * TTs in USB 2.0 hubs. These need microframe scheduling.
1973 struct ehci_hcd *ehci,
1974 struct ehci_iso_sched *iso_sched,
1975 struct ehci_iso_stream *stream,
1980 dma_addr_t dma = urb->transfer_dma;
1982 /* how many frames are needed for these transfers */
1983 iso_sched->span = urb->number_of_packets * stream->ps.period;
1985 /* figure out per-frame sitd fields that we'll need later
1986 * when we fit new sitds into the schedule.
1988 for (i = 0; i < urb->number_of_packets; i++) {
1989 struct ehci_iso_packet *packet = &iso_sched->packet[i];
1994 length = urb->iso_frame_desc[i].length & 0x03ff;
1995 buf = dma + urb->iso_frame_desc[i].offset;
1997 trans = SITD_STS_ACTIVE;
1998 if (((i + 1) == urb->number_of_packets)
1999 && !(urb->transfer_flags & URB_NO_INTERRUPT))
2001 trans |= length << 16;
2002 packet->transaction = cpu_to_hc32(ehci, trans);
2004 /* might need to cross a buffer page within a td */
2006 packet->buf1 = (buf + length) & ~0x0fff;
2007 if (packet->buf1 != (buf & ~(u64)0x0fff))
2010 /* OUT uses multiple start-splits */
2011 if (stream->bEndpointAddress & USB_DIR_IN)
2013 length = (length + 187) / 188;
2014 if (length > 1) /* BEGIN vs ALL */
2016 packet->buf1 |= length;
2021 sitd_urb_transaction(
2022 struct ehci_iso_stream *stream,
2023 struct ehci_hcd *ehci,
2028 struct ehci_sitd *sitd;
2029 dma_addr_t sitd_dma;
2031 struct ehci_iso_sched *iso_sched;
2032 unsigned long flags;
2034 iso_sched = iso_sched_alloc(urb->number_of_packets, mem_flags);
2035 if (iso_sched == NULL)
2038 sitd_sched_init(ehci, iso_sched, stream, urb);
2040 /* allocate/init sITDs */
2041 spin_lock_irqsave(&ehci->lock, flags);
2042 for (i = 0; i < urb->number_of_packets; i++) {
2044 /* NOTE: for now, we don't try to handle wraparound cases
2045 * for IN (using sitd->hw_backpointer, like a FSTN), which
2046 * means we never need two sitds for full speed packets.
2050 * Use siTDs from the free list, but not siTDs that may
2051 * still be in use by the hardware.
2053 if (likely(!list_empty(&stream->free_list))) {
2054 sitd = list_first_entry(&stream->free_list,
2055 struct ehci_sitd, sitd_list);
2056 if (sitd->frame == ehci->now_frame)
2058 list_del(&sitd->sitd_list);
2059 sitd_dma = sitd->sitd_dma;
2062 spin_unlock_irqrestore(&ehci->lock, flags);
2063 sitd = dma_pool_alloc(ehci->sitd_pool, mem_flags,
2065 spin_lock_irqsave(&ehci->lock, flags);
2067 iso_sched_free(stream, iso_sched);
2068 spin_unlock_irqrestore(&ehci->lock, flags);
2073 memset(sitd, 0, sizeof(*sitd));
2074 sitd->sitd_dma = sitd_dma;
2075 sitd->frame = NO_FRAME;
2076 list_add(&sitd->sitd_list, &iso_sched->td_list);
2079 /* temporarily store schedule info in hcpriv */
2080 urb->hcpriv = iso_sched;
2081 urb->error_count = 0;
2083 spin_unlock_irqrestore(&ehci->lock, flags);
2087 /*-------------------------------------------------------------------------*/
2091 struct ehci_hcd *ehci,
2092 struct ehci_iso_stream *stream,
2093 struct ehci_sitd *sitd,
2094 struct ehci_iso_sched *iso_sched,
2098 struct ehci_iso_packet *uf = &iso_sched->packet[index];
2101 sitd->hw_next = EHCI_LIST_END(ehci);
2102 sitd->hw_fullspeed_ep = stream->address;
2103 sitd->hw_uframe = stream->splits;
2104 sitd->hw_results = uf->transaction;
2105 sitd->hw_backpointer = EHCI_LIST_END(ehci);
2108 sitd->hw_buf[0] = cpu_to_hc32(ehci, bufp);
2109 sitd->hw_buf_hi[0] = cpu_to_hc32(ehci, bufp >> 32);
2111 sitd->hw_buf[1] = cpu_to_hc32(ehci, uf->buf1);
2114 sitd->hw_buf_hi[1] = cpu_to_hc32(ehci, bufp >> 32);
2115 sitd->index = index;
2119 sitd_link(struct ehci_hcd *ehci, unsigned frame, struct ehci_sitd *sitd)
2121 /* note: sitd ordering could matter (CSPLIT then SSPLIT) */
2122 sitd->sitd_next = ehci->pshadow[frame];
2123 sitd->hw_next = ehci->periodic[frame];
2124 ehci->pshadow[frame].sitd = sitd;
2125 sitd->frame = frame;
2127 ehci->periodic[frame] = cpu_to_hc32(ehci, sitd->sitd_dma | Q_TYPE_SITD);
2130 /* fit urb's sitds into the selected schedule slot; activate as needed */
2131 static void sitd_link_urb(
2132 struct ehci_hcd *ehci,
2135 struct ehci_iso_stream *stream
2139 unsigned next_uframe;
2140 struct ehci_iso_sched *sched = urb->hcpriv;
2141 struct ehci_sitd *sitd;
2143 next_uframe = stream->next_uframe;
2145 if (list_empty(&stream->td_list))
2146 /* usbfs ignores TT bandwidth */
2147 ehci_to_hcd(ehci)->self.bandwidth_allocated
2148 += stream->bandwidth;
2150 if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
2151 if (ehci->amd_pll_fix == 1)
2152 usb_amd_quirk_pll_disable();
2155 ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++;
2157 /* fill sITDs frame by frame */
2158 for (packet = sched->first_packet, sitd = NULL;
2159 packet < urb->number_of_packets;
2162 /* ASSERT: we have all necessary sitds */
2163 BUG_ON(list_empty(&sched->td_list));
2165 /* ASSERT: no itds for this endpoint in this frame */
2167 sitd = list_entry(sched->td_list.next,
2168 struct ehci_sitd, sitd_list);
2169 list_move_tail(&sitd->sitd_list, &stream->td_list);
2170 sitd->stream = stream;
2173 sitd_patch(ehci, stream, sitd, sched, packet);
2174 sitd_link(ehci, (next_uframe >> 3) & (ehci->periodic_size - 1),
2177 next_uframe += stream->uperiod;
2179 stream->next_uframe = next_uframe & (mod - 1);
2181 /* don't need that schedule data any more */
2182 iso_sched_free(stream, sched);
2183 urb->hcpriv = stream;
2186 enable_periodic(ehci);
2189 /*-------------------------------------------------------------------------*/
2191 #define SITD_ERRS (SITD_STS_ERR | SITD_STS_DBE | SITD_STS_BABBLE \
2192 | SITD_STS_XACT | SITD_STS_MMF)
2194 /* Process and recycle a completed SITD. Return true iff its urb completed,
2195 * and hence its completion callback probably added things to the hardware
2198 * Note that we carefully avoid recycling this descriptor until after any
2199 * completion callback runs, so that it won't be reused quickly. That is,
2200 * assuming (a) no more than two urbs per frame on this endpoint, and also
2201 * (b) only this endpoint's completions submit URBs. It seems some silicon
2202 * corrupts things if you reuse completed descriptors very quickly...
2204 static bool sitd_complete(struct ehci_hcd *ehci, struct ehci_sitd *sitd)
2206 struct urb *urb = sitd->urb;
2207 struct usb_iso_packet_descriptor *desc;
2210 struct ehci_iso_stream *stream = sitd->stream;
2211 bool retval = false;
2213 urb_index = sitd->index;
2214 desc = &urb->iso_frame_desc[urb_index];
2215 t = hc32_to_cpup(ehci, &sitd->hw_results);
2217 /* report transfer status */
2218 if (unlikely(t & SITD_ERRS)) {
2220 if (t & SITD_STS_DBE)
2221 desc->status = usb_pipein(urb->pipe)
2222 ? -ENOSR /* hc couldn't read */
2223 : -ECOMM; /* hc couldn't write */
2224 else if (t & SITD_STS_BABBLE)
2225 desc->status = -EOVERFLOW;
2226 else /* XACT, MMF, etc */
2227 desc->status = -EPROTO;
2228 } else if (unlikely(t & SITD_STS_ACTIVE)) {
2229 /* URB was too late */
2233 desc->actual_length = desc->length - SITD_LENGTH(t);
2234 urb->actual_length += desc->actual_length;
2237 /* handle completion now? */
2238 if ((urb_index + 1) != urb->number_of_packets)
2242 * ASSERT: it's really the last sitd for this urb
2243 * list_for_each_entry (sitd, &stream->td_list, sitd_list)
2244 * BUG_ON(sitd->urb == urb);
2247 /* give urb back to the driver; completion often (re)submits */
2248 ehci_urb_done(ehci, urb, 0);
2253 disable_periodic(ehci);
2255 ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
2256 if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
2257 if (ehci->amd_pll_fix == 1)
2258 usb_amd_quirk_pll_enable();
2261 if (list_is_singular(&stream->td_list))
2262 ehci_to_hcd(ehci)->self.bandwidth_allocated
2263 -= stream->bandwidth;
2268 /* Add to the end of the free list for later reuse */
2269 list_move_tail(&sitd->sitd_list, &stream->free_list);
2271 /* Recycle the siTDs when the pipeline is empty (ep no longer in use) */
2272 if (list_empty(&stream->td_list)) {
2273 list_splice_tail_init(&stream->free_list,
2274 &ehci->cached_sitd_list);
2275 start_free_itds(ehci);
2282 static int sitd_submit(struct ehci_hcd *ehci, struct urb *urb,
2285 int status = -EINVAL;
2286 unsigned long flags;
2287 struct ehci_iso_stream *stream;
2289 /* Get iso_stream head */
2290 stream = iso_stream_find(ehci, urb);
2291 if (stream == NULL) {
2292 ehci_dbg(ehci, "can't get iso stream\n");
2295 if (urb->interval != stream->ps.period) {
2296 ehci_dbg(ehci, "can't change iso interval %d --> %d\n",
2297 stream->ps.period, urb->interval);
2301 #ifdef EHCI_URB_TRACE
2303 "submit %p dev%s ep%d%s-iso len %d\n",
2304 urb, urb->dev->devpath,
2305 usb_pipeendpoint(urb->pipe),
2306 usb_pipein(urb->pipe) ? "in" : "out",
2307 urb->transfer_buffer_length);
2310 /* allocate SITDs */
2311 status = sitd_urb_transaction(stream, ehci, urb, mem_flags);
2313 ehci_dbg(ehci, "can't init sitds\n");
2317 /* schedule ... need to lock */
2318 spin_lock_irqsave(&ehci->lock, flags);
2319 if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
2320 status = -ESHUTDOWN;
2321 goto done_not_linked;
2323 status = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
2324 if (unlikely(status))
2325 goto done_not_linked;
2326 status = iso_stream_schedule(ehci, urb, stream);
2327 if (likely(status == 0)) {
2328 sitd_link_urb(ehci, urb, ehci->periodic_size << 3, stream);
2329 } else if (status > 0) {
2331 ehci_urb_done(ehci, urb, 0);
2333 usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
2336 spin_unlock_irqrestore(&ehci->lock, flags);
2341 /*-------------------------------------------------------------------------*/
2343 static void scan_isoc(struct ehci_hcd *ehci)
2345 unsigned uf, now_frame, frame;
2346 unsigned fmask = ehci->periodic_size - 1;
2347 bool modified, live;
2348 union ehci_shadow q, *q_p;
2352 * When running, scan from last scan point up to "now"
2353 * else clean up by scanning everything that's left.
2354 * Touches as few pages as possible: cache-friendly.
2356 if (ehci->rh_state >= EHCI_RH_RUNNING) {
2357 uf = ehci_read_frame_index(ehci);
2358 now_frame = (uf >> 3) & fmask;
2361 now_frame = (ehci->last_iso_frame - 1) & fmask;
2364 ehci->now_frame = now_frame;
2366 frame = ehci->last_iso_frame;
2369 /* Scan each element in frame's queue for completions */
2370 q_p = &ehci->pshadow[frame];
2371 hw_p = &ehci->periodic[frame];
2373 type = Q_NEXT_TYPE(ehci, *hw_p);
2376 while (q.ptr != NULL) {
2377 switch (hc32_to_cpu(ehci, type)) {
2380 * If this ITD is still active, leave it for
2381 * later processing ... check the next entry.
2382 * No need to check for activity unless the
2385 if (frame == now_frame && live) {
2387 for (uf = 0; uf < 8; uf++) {
2388 if (q.itd->hw_transaction[uf] &
2393 q_p = &q.itd->itd_next;
2394 hw_p = &q.itd->hw_next;
2395 type = Q_NEXT_TYPE(ehci,
2403 * Take finished ITDs out of the schedule
2404 * and process them: recycle, maybe report
2405 * URB completion. HC won't cache the
2406 * pointer for much longer, if at all.
2408 *q_p = q.itd->itd_next;
2409 if (!ehci->use_dummy_qh ||
2410 q.itd->hw_next != EHCI_LIST_END(ehci))
2411 *hw_p = q.itd->hw_next;
2413 *hw_p = cpu_to_hc32(ehci, ehci->dummy->qh_dma);
2414 type = Q_NEXT_TYPE(ehci, q.itd->hw_next);
2416 modified = itd_complete(ehci, q.itd);
2421 * If this SITD is still active, leave it for
2422 * later processing ... check the next entry.
2423 * No need to check for activity unless the
2426 if (((frame == now_frame) ||
2427 (((frame + 1) & fmask) == now_frame))
2429 && (q.sitd->hw_results & SITD_ACTIVE(ehci))) {
2431 q_p = &q.sitd->sitd_next;
2432 hw_p = &q.sitd->hw_next;
2433 type = Q_NEXT_TYPE(ehci, q.sitd->hw_next);
2439 * Take finished SITDs out of the schedule
2440 * and process them: recycle, maybe report
2443 *q_p = q.sitd->sitd_next;
2444 if (!ehci->use_dummy_qh ||
2445 q.sitd->hw_next != EHCI_LIST_END(ehci))
2446 *hw_p = q.sitd->hw_next;
2448 *hw_p = cpu_to_hc32(ehci, ehci->dummy->qh_dma);
2449 type = Q_NEXT_TYPE(ehci, q.sitd->hw_next);
2451 modified = sitd_complete(ehci, q.sitd);
2455 ehci_dbg(ehci, "corrupt type %d frame %d shadow %p\n",
2456 type, frame, q.ptr);
2461 /* End of the iTDs and siTDs */
2466 /* Assume completion callbacks modify the queue */
2467 if (unlikely(modified && ehci->isoc_count > 0))
2471 /* Stop when we have reached the current frame */
2472 if (frame == now_frame)
2475 /* The last frame may still have active siTDs */
2476 ehci->last_iso_frame = frame;
2477 frame = (frame + 1) & fmask;