1 #include <linux/module.h>
2 #include <linux/string.h>
3 #include <linux/bitops.h>
4 #include <linux/slab.h>
5 #include <linux/log2.h>
7 #include <linux/wait.h>
8 #include <linux/usb/hcd.h>
9 #include <linux/scatterlist.h>
11 #define to_urb(d) container_of(d, struct urb, kref)
14 static void urb_destroy(struct kref *kref)
16 struct urb *urb = to_urb(kref);
18 if (urb->transfer_flags & URB_FREE_BUFFER)
19 kfree(urb->transfer_buffer);
25 * usb_init_urb - initializes a urb so that it can be used by a USB driver
26 * @urb: pointer to the urb to initialize
28 * Initializes a urb so that the USB subsystem can use it properly.
30 * If a urb is created with a call to usb_alloc_urb() it is not
31 * necessary to call this function. Only use this if you allocate the
32 * space for a struct urb on your own. If you call this function, be
33 * careful when freeing the memory for your urb that it is no longer in
34 * use by the USB core.
36 * Only use this function if you _really_ understand what you are doing.
38 void usb_init_urb(struct urb *urb)
41 memset(urb, 0, sizeof(*urb));
42 kref_init(&urb->kref);
43 INIT_LIST_HEAD(&urb->urb_list);
44 INIT_LIST_HEAD(&urb->anchor_list);
47 EXPORT_SYMBOL_GPL(usb_init_urb);
50 * usb_alloc_urb - creates a new urb for a USB driver to use
51 * @iso_packets: number of iso packets for this urb
52 * @mem_flags: the type of memory to allocate, see kmalloc() for a list of
53 * valid options for this.
55 * Creates an urb for the USB driver to use, initializes a few internal
56 * structures, increments the usage counter, and returns a pointer to it.
58 * If the driver want to use this urb for interrupt, control, or bulk
59 * endpoints, pass '0' as the number of iso packets.
61 * The driver must call usb_free_urb() when it is finished with the urb.
63 * Return: A pointer to the new urb, or %NULL if no memory is available.
65 struct urb *usb_alloc_urb(int iso_packets, gfp_t mem_flags)
69 urb = kmalloc(sizeof(struct urb) +
70 iso_packets * sizeof(struct usb_iso_packet_descriptor),
77 EXPORT_SYMBOL_GPL(usb_alloc_urb);
80 * usb_free_urb - frees the memory used by a urb when all users of it are finished
81 * @urb: pointer to the urb to free, may be NULL
83 * Must be called when a user of a urb is finished with it. When the last user
84 * of the urb calls this function, the memory of the urb is freed.
86 * Note: The transfer buffer associated with the urb is not freed unless the
87 * URB_FREE_BUFFER transfer flag is set.
89 void usb_free_urb(struct urb *urb)
92 kref_put(&urb->kref, urb_destroy);
94 EXPORT_SYMBOL_GPL(usb_free_urb);
97 * usb_get_urb - increments the reference count of the urb
98 * @urb: pointer to the urb to modify, may be NULL
100 * This must be called whenever a urb is transferred from a device driver to a
101 * host controller driver. This allows proper reference counting to happen
104 * Return: A pointer to the urb with the incremented reference counter.
106 struct urb *usb_get_urb(struct urb *urb)
109 kref_get(&urb->kref);
112 EXPORT_SYMBOL_GPL(usb_get_urb);
115 * usb_anchor_urb - anchors an URB while it is processed
116 * @urb: pointer to the urb to anchor
117 * @anchor: pointer to the anchor
119 * This can be called to have access to URBs which are to be executed
120 * without bothering to track them
122 void usb_anchor_urb(struct urb *urb, struct usb_anchor *anchor)
126 spin_lock_irqsave(&anchor->lock, flags);
128 list_add_tail(&urb->anchor_list, &anchor->urb_list);
129 urb->anchor = anchor;
131 if (unlikely(anchor->poisoned))
132 atomic_inc(&urb->reject);
134 spin_unlock_irqrestore(&anchor->lock, flags);
136 EXPORT_SYMBOL_GPL(usb_anchor_urb);
138 static int usb_anchor_check_wakeup(struct usb_anchor *anchor)
140 return atomic_read(&anchor->suspend_wakeups) == 0 &&
141 list_empty(&anchor->urb_list);
144 /* Callers must hold anchor->lock */
145 static void __usb_unanchor_urb(struct urb *urb, struct usb_anchor *anchor)
148 list_del(&urb->anchor_list);
150 if (usb_anchor_check_wakeup(anchor))
151 wake_up(&anchor->wait);
155 * usb_unanchor_urb - unanchors an URB
156 * @urb: pointer to the urb to anchor
158 * Call this to stop the system keeping track of this URB
160 void usb_unanchor_urb(struct urb *urb)
163 struct usb_anchor *anchor;
168 anchor = urb->anchor;
172 spin_lock_irqsave(&anchor->lock, flags);
174 * At this point, we could be competing with another thread which
175 * has the same intention. To protect the urb from being unanchored
176 * twice, only the winner of the race gets the job.
178 if (likely(anchor == urb->anchor))
179 __usb_unanchor_urb(urb, anchor);
180 spin_unlock_irqrestore(&anchor->lock, flags);
182 EXPORT_SYMBOL_GPL(usb_unanchor_urb);
184 /*-------------------------------------------------------------------*/
186 static const int pipetypes[4] = {
187 PIPE_CONTROL, PIPE_ISOCHRONOUS, PIPE_BULK, PIPE_INTERRUPT
191 * usb_urb_ep_type_check - sanity check of endpoint in the given urb
192 * @urb: urb to be checked
194 * This performs a light-weight sanity check for the endpoint in the
195 * given urb. It returns 0 if the urb contains a valid endpoint, otherwise
196 * a negative error code.
198 int usb_urb_ep_type_check(const struct urb *urb)
200 const struct usb_host_endpoint *ep;
202 ep = usb_pipe_endpoint(urb->dev, urb->pipe);
205 if (usb_pipetype(urb->pipe) != pipetypes[usb_endpoint_type(&ep->desc)])
209 EXPORT_SYMBOL_GPL(usb_urb_ep_type_check);
212 * usb_submit_urb - issue an asynchronous transfer request for an endpoint
213 * @urb: pointer to the urb describing the request
214 * @mem_flags: the type of memory to allocate, see kmalloc() for a list
215 * of valid options for this.
217 * This submits a transfer request, and transfers control of the URB
218 * describing that request to the USB subsystem. Request completion will
219 * be indicated later, asynchronously, by calling the completion handler.
220 * The three types of completion are success, error, and unlink
221 * (a software-induced fault, also called "request cancellation").
223 * URBs may be submitted in interrupt context.
225 * The caller must have correctly initialized the URB before submitting
226 * it. Functions such as usb_fill_bulk_urb() and usb_fill_control_urb() are
227 * available to ensure that most fields are correctly initialized, for
228 * the particular kind of transfer, although they will not initialize
229 * any transfer flags.
231 * If the submission is successful, the complete() callback from the URB
232 * will be called exactly once, when the USB core and Host Controller Driver
233 * (HCD) are finished with the URB. When the completion function is called,
234 * control of the URB is returned to the device driver which issued the
235 * request. The completion handler may then immediately free or reuse that
238 * With few exceptions, USB device drivers should never access URB fields
239 * provided by usbcore or the HCD until its complete() is called.
240 * The exceptions relate to periodic transfer scheduling. For both
241 * interrupt and isochronous urbs, as part of successful URB submission
242 * urb->interval is modified to reflect the actual transfer period used
243 * (normally some power of two units). And for isochronous urbs,
244 * urb->start_frame is modified to reflect when the URB's transfers were
245 * scheduled to start.
247 * Not all isochronous transfer scheduling policies will work, but most
248 * host controller drivers should easily handle ISO queues going from now
249 * until 10-200 msec into the future. Drivers should try to keep at
250 * least one or two msec of data in the queue; many controllers require
251 * that new transfers start at least 1 msec in the future when they are
252 * added. If the driver is unable to keep up and the queue empties out,
253 * the behavior for new submissions is governed by the URB_ISO_ASAP flag.
254 * If the flag is set, or if the queue is idle, then the URB is always
255 * assigned to the first available (and not yet expired) slot in the
256 * endpoint's schedule. If the flag is not set and the queue is active
257 * then the URB is always assigned to the next slot in the schedule
258 * following the end of the endpoint's previous URB, even if that slot is
259 * in the past. When a packet is assigned in this way to a slot that has
260 * already expired, the packet is not transmitted and the corresponding
261 * usb_iso_packet_descriptor's status field will return -EXDEV. If this
262 * would happen to all the packets in the URB, submission fails with a
265 * For control endpoints, the synchronous usb_control_msg() call is
266 * often used (in non-interrupt context) instead of this call.
267 * That is often used through convenience wrappers, for the requests
268 * that are standardized in the USB 2.0 specification. For bulk
269 * endpoints, a synchronous usb_bulk_msg() call is available.
272 * 0 on successful submissions. A negative error number otherwise.
276 * URBs may be submitted to endpoints before previous ones complete, to
277 * minimize the impact of interrupt latencies and system overhead on data
278 * throughput. With that queuing policy, an endpoint's queue would never
279 * be empty. This is required for continuous isochronous data streams,
280 * and may also be required for some kinds of interrupt transfers. Such
281 * queuing also maximizes bandwidth utilization by letting USB controllers
282 * start work on later requests before driver software has finished the
283 * completion processing for earlier (successful) requests.
285 * As of Linux 2.6, all USB endpoint transfer queues support depths greater
286 * than one. This was previously a HCD-specific behavior, except for ISO
287 * transfers. Non-isochronous endpoint queues are inactive during cleanup
288 * after faults (transfer errors or cancellation).
290 * Reserved Bandwidth Transfers:
292 * Periodic transfers (interrupt or isochronous) are performed repeatedly,
293 * using the interval specified in the urb. Submitting the first urb to
294 * the endpoint reserves the bandwidth necessary to make those transfers.
295 * If the USB subsystem can't allocate sufficient bandwidth to perform
296 * the periodic request, submitting such a periodic request should fail.
298 * For devices under xHCI, the bandwidth is reserved at configuration time, or
299 * when the alt setting is selected. If there is not enough bus bandwidth, the
300 * configuration/alt setting request will fail. Therefore, submissions to
301 * periodic endpoints on devices under xHCI should never fail due to bandwidth
304 * Device drivers must explicitly request that repetition, by ensuring that
305 * some URB is always on the endpoint's queue (except possibly for short
306 * periods during completion callbacks). When there is no longer an urb
307 * queued, the endpoint's bandwidth reservation is canceled. This means
308 * drivers can use their completion handlers to ensure they keep bandwidth
309 * they need, by reinitializing and resubmitting the just-completed urb
310 * until the driver longer needs that periodic bandwidth.
314 * The general rules for how to decide which mem_flags to use
315 * are the same as for kmalloc. There are four
316 * different possible values; GFP_KERNEL, GFP_NOFS, GFP_NOIO and
319 * GFP_NOFS is not ever used, as it has not been implemented yet.
321 * GFP_ATOMIC is used when
322 * (a) you are inside a completion handler, an interrupt, bottom half,
323 * tasklet or timer, or
324 * (b) you are holding a spinlock or rwlock (does not apply to
326 * (c) current->state != TASK_RUNNING, this is the case only after
329 * GFP_NOIO is used in the block io path and error handling of storage
332 * All other situations use GFP_KERNEL.
334 * Some more specific rules for mem_flags can be inferred, such as
335 * (1) start_xmit, timeout, and receive methods of network drivers must
336 * use GFP_ATOMIC (they are called with a spinlock held);
337 * (2) queuecommand methods of scsi drivers must use GFP_ATOMIC (also
338 * called with a spinlock held);
339 * (3) If you use a kernel thread with a network driver you must use
340 * GFP_NOIO, unless (b) or (c) apply;
341 * (4) after you have done a down() you can use GFP_KERNEL, unless (b) or (c)
342 * apply or your are in a storage driver's block io path;
343 * (5) USB probe and disconnect can use GFP_KERNEL unless (b) or (c) apply; and
344 * (6) changing firmware on a running storage or net device uses
345 * GFP_NOIO, unless b) or c) apply
348 int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
351 struct usb_device *dev;
352 struct usb_host_endpoint *ep;
354 unsigned int allowed;
356 if (!urb || !urb->complete)
359 WARN_ONCE(1, "URB %pK submitted while active\n", urb);
364 if ((!dev) || (dev->state < USB_STATE_UNAUTHENTICATED))
367 /* For now, get the endpoint from the pipe. Eventually drivers
368 * will be required to set urb->ep directly and we will eliminate
371 ep = usb_pipe_endpoint(dev, urb->pipe);
376 urb->status = -EINPROGRESS;
377 urb->actual_length = 0;
379 /* Lots of sanity checks, so HCDs can rely on clean data
380 * and don't need to duplicate tests
382 xfertype = usb_endpoint_type(&ep->desc);
383 if (xfertype == USB_ENDPOINT_XFER_CONTROL) {
384 struct usb_ctrlrequest *setup =
385 (struct usb_ctrlrequest *) urb->setup_packet;
389 is_out = !(setup->bRequestType & USB_DIR_IN) ||
392 is_out = usb_endpoint_dir_out(&ep->desc);
395 /* Clear the internal flags and cache the direction for later use */
396 urb->transfer_flags &= ~(URB_DIR_MASK | URB_DMA_MAP_SINGLE |
397 URB_DMA_MAP_PAGE | URB_DMA_MAP_SG | URB_MAP_LOCAL |
398 URB_SETUP_MAP_SINGLE | URB_SETUP_MAP_LOCAL |
399 URB_DMA_SG_COMBINED);
400 urb->transfer_flags |= (is_out ? URB_DIR_OUT : URB_DIR_IN);
402 if (xfertype != USB_ENDPOINT_XFER_CONTROL &&
403 dev->state < USB_STATE_CONFIGURED)
406 max = usb_endpoint_maxp(&ep->desc);
409 "bogus endpoint ep%d%s in %s (bad maxpacket %d)\n",
410 usb_endpoint_num(&ep->desc), is_out ? "out" : "in",
415 /* periodic transfers limit size per frame/uframe,
416 * but drivers only control those sizes for ISO.
417 * while we're checking, initialize return status.
419 if (xfertype == USB_ENDPOINT_XFER_ISOC) {
422 /* SuperSpeed isoc endpoints have up to 16 bursts of up to
425 if (dev->speed >= USB_SPEED_SUPER) {
426 int burst = 1 + ep->ss_ep_comp.bMaxBurst;
427 int mult = USB_SS_MULT(ep->ss_ep_comp.bmAttributes);
432 /* "high bandwidth" mode, 1-3 packets/uframe? */
433 if (dev->speed == USB_SPEED_HIGH) {
434 int mult = 1 + ((max >> 11) & 0x03);
439 if (urb->number_of_packets <= 0)
441 for (n = 0; n < urb->number_of_packets; n++) {
442 len = urb->iso_frame_desc[n].length;
443 if (len < 0 || len > max)
445 urb->iso_frame_desc[n].status = -EXDEV;
446 urb->iso_frame_desc[n].actual_length = 0;
448 } else if (urb->num_sgs && !urb->dev->bus->no_sg_constraint &&
449 dev->speed != USB_SPEED_WIRELESS) {
450 struct scatterlist *sg;
453 for_each_sg(urb->sg, sg, urb->num_sgs - 1, i)
454 if (sg->length % max)
458 /* the I/O buffer must be mapped/unmapped, except when length=0 */
459 if (urb->transfer_buffer_length > INT_MAX)
463 * stuff that drivers shouldn't do, but which shouldn't
464 * cause problems in HCDs if they get it wrong.
467 /* Check that the pipe's type matches the endpoint's type */
468 if (usb_urb_ep_type_check(urb))
469 dev_WARN(&dev->dev, "BOGUS urb xfer, pipe %x != type %x\n",
470 usb_pipetype(urb->pipe), pipetypes[xfertype]);
472 /* Check against a simple/standard policy */
473 allowed = (URB_NO_TRANSFER_DMA_MAP | URB_NO_INTERRUPT | URB_DIR_MASK |
476 case USB_ENDPOINT_XFER_BULK:
477 case USB_ENDPOINT_XFER_INT:
479 allowed |= URB_ZERO_PACKET;
481 case USB_ENDPOINT_XFER_CONTROL:
482 allowed |= URB_NO_FSBR; /* only affects UHCI */
484 default: /* all non-iso endpoints */
486 allowed |= URB_SHORT_NOT_OK;
488 case USB_ENDPOINT_XFER_ISOC:
489 allowed |= URB_ISO_ASAP;
492 allowed &= urb->transfer_flags;
494 /* warn if submitter gave bogus flags */
495 if (allowed != urb->transfer_flags)
496 dev_WARN(&dev->dev, "BOGUS urb flags, %x --> %x\n",
497 urb->transfer_flags, allowed);
500 * Force periodic transfer intervals to be legal values that are
501 * a power of two (so HCDs don't need to).
503 * FIXME want bus->{intr,iso}_sched_horizon values here. Each HC
504 * supports different values... this uses EHCI/UHCI defaults (and
505 * EHCI can use smaller non-default values).
508 case USB_ENDPOINT_XFER_ISOC:
509 case USB_ENDPOINT_XFER_INT:
511 switch (dev->speed) {
512 case USB_SPEED_WIRELESS:
513 if ((urb->interval < 6)
514 && (xfertype == USB_ENDPOINT_XFER_INT))
517 if (urb->interval <= 0)
522 switch (dev->speed) {
523 case USB_SPEED_SUPER_PLUS:
524 case USB_SPEED_SUPER: /* units are 125us */
525 /* Handle up to 2^(16-1) microframes */
526 if (urb->interval > (1 << 15))
530 case USB_SPEED_WIRELESS:
531 if (urb->interval > 16)
534 case USB_SPEED_HIGH: /* units are microframes */
535 /* NOTE usb handles 2^15 */
536 if (urb->interval > (1024 * 8))
537 urb->interval = 1024 * 8;
540 case USB_SPEED_FULL: /* units are frames/msec */
542 if (xfertype == USB_ENDPOINT_XFER_INT) {
543 if (urb->interval > 255)
545 /* NOTE ohci only handles up to 32 */
548 if (urb->interval > 1024)
549 urb->interval = 1024;
550 /* NOTE usb and ohci handle up to 2^15 */
557 if (dev->speed != USB_SPEED_WIRELESS) {
558 /* Round down to a power of 2, no more than max */
559 urb->interval = min(max, 1 << ilog2(urb->interval));
563 return usb_hcd_submit_urb(urb, mem_flags);
565 EXPORT_SYMBOL_GPL(usb_submit_urb);
567 /*-------------------------------------------------------------------*/
570 * usb_unlink_urb - abort/cancel a transfer request for an endpoint
571 * @urb: pointer to urb describing a previously submitted request,
574 * This routine cancels an in-progress request. URBs complete only once
575 * per submission, and may be canceled only once per submission.
576 * Successful cancellation means termination of @urb will be expedited
577 * and the completion handler will be called with a status code
578 * indicating that the request has been canceled (rather than any other
581 * Drivers should not call this routine or related routines, such as
582 * usb_kill_urb() or usb_unlink_anchored_urbs(), after their disconnect
583 * method has returned. The disconnect function should synchronize with
584 * a driver's I/O routines to insure that all URB-related activity has
585 * completed before it returns.
587 * This request is asynchronous, however the HCD might call the ->complete()
588 * callback during unlink. Therefore when drivers call usb_unlink_urb(), they
589 * must not hold any locks that may be taken by the completion function.
590 * Success is indicated by returning -EINPROGRESS, at which time the URB will
591 * probably not yet have been given back to the device driver. When it is
592 * eventually called, the completion function will see @urb->status ==
594 * Failure is indicated by usb_unlink_urb() returning any other value.
595 * Unlinking will fail when @urb is not currently "linked" (i.e., it was
596 * never submitted, or it was unlinked before, or the hardware is already
597 * finished with it), even if the completion handler has not yet run.
599 * The URB must not be deallocated while this routine is running. In
600 * particular, when a driver calls this routine, it must insure that the
601 * completion handler cannot deallocate the URB.
603 * Return: -EINPROGRESS on success. See description for other values on
606 * Unlinking and Endpoint Queues:
608 * [The behaviors and guarantees described below do not apply to virtual
609 * root hubs but only to endpoint queues for physical USB devices.]
611 * Host Controller Drivers (HCDs) place all the URBs for a particular
612 * endpoint in a queue. Normally the queue advances as the controller
613 * hardware processes each request. But when an URB terminates with an
614 * error its queue generally stops (see below), at least until that URB's
615 * completion routine returns. It is guaranteed that a stopped queue
616 * will not restart until all its unlinked URBs have been fully retired,
617 * with their completion routines run, even if that's not until some time
618 * after the original completion handler returns. The same behavior and
619 * guarantee apply when an URB terminates because it was unlinked.
621 * Bulk and interrupt endpoint queues are guaranteed to stop whenever an
622 * URB terminates with any sort of error, including -ECONNRESET, -ENOENT,
623 * and -EREMOTEIO. Control endpoint queues behave the same way except
624 * that they are not guaranteed to stop for -EREMOTEIO errors. Queues
625 * for isochronous endpoints are treated differently, because they must
626 * advance at fixed rates. Such queues do not stop when an URB
627 * encounters an error or is unlinked. An unlinked isochronous URB may
628 * leave a gap in the stream of packets; it is undefined whether such
629 * gaps can be filled in.
631 * Note that early termination of an URB because a short packet was
632 * received will generate a -EREMOTEIO error if and only if the
633 * URB_SHORT_NOT_OK flag is set. By setting this flag, USB device
634 * drivers can build deep queues for large or complex bulk transfers
635 * and clean them up reliably after any sort of aborted transfer by
636 * unlinking all pending URBs at the first fault.
638 * When a control URB terminates with an error other than -EREMOTEIO, it
639 * is quite likely that the status stage of the transfer will not take
642 int usb_unlink_urb(struct urb *urb)
650 return usb_hcd_unlink_urb(urb, -ECONNRESET);
652 EXPORT_SYMBOL_GPL(usb_unlink_urb);
655 * usb_kill_urb - cancel a transfer request and wait for it to finish
656 * @urb: pointer to URB describing a previously submitted request,
659 * This routine cancels an in-progress request. It is guaranteed that
660 * upon return all completion handlers will have finished and the URB
661 * will be totally idle and available for reuse. These features make
662 * this an ideal way to stop I/O in a disconnect() callback or close()
663 * function. If the request has not already finished or been unlinked
664 * the completion handler will see urb->status == -ENOENT.
666 * While the routine is running, attempts to resubmit the URB will fail
667 * with error -EPERM. Thus even if the URB's completion handler always
668 * tries to resubmit, it will not succeed and the URB will become idle.
670 * The URB must not be deallocated while this routine is running. In
671 * particular, when a driver calls this routine, it must insure that the
672 * completion handler cannot deallocate the URB.
674 * This routine may not be used in an interrupt context (such as a bottom
675 * half or a completion handler), or when holding a spinlock, or in other
676 * situations where the caller can't schedule().
678 * This routine should not be called by a driver after its disconnect
679 * method has returned.
681 void usb_kill_urb(struct urb *urb)
684 if (!(urb && urb->dev && urb->ep))
686 atomic_inc(&urb->reject);
688 * Order the write of urb->reject above before the read
689 * of urb->use_count below. Pairs with the barriers in
690 * __usb_hcd_giveback_urb() and usb_hcd_submit_urb().
692 smp_mb__after_atomic();
694 usb_hcd_unlink_urb(urb, -ENOENT);
695 wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0);
697 atomic_dec(&urb->reject);
699 EXPORT_SYMBOL_GPL(usb_kill_urb);
702 * usb_poison_urb - reliably kill a transfer and prevent further use of an URB
703 * @urb: pointer to URB describing a previously submitted request,
706 * This routine cancels an in-progress request. It is guaranteed that
707 * upon return all completion handlers will have finished and the URB
708 * will be totally idle and cannot be reused. These features make
709 * this an ideal way to stop I/O in a disconnect() callback.
710 * If the request has not already finished or been unlinked
711 * the completion handler will see urb->status == -ENOENT.
713 * After and while the routine runs, attempts to resubmit the URB will fail
714 * with error -EPERM. Thus even if the URB's completion handler always
715 * tries to resubmit, it will not succeed and the URB will become idle.
717 * The URB must not be deallocated while this routine is running. In
718 * particular, when a driver calls this routine, it must insure that the
719 * completion handler cannot deallocate the URB.
721 * This routine may not be used in an interrupt context (such as a bottom
722 * half or a completion handler), or when holding a spinlock, or in other
723 * situations where the caller can't schedule().
725 * This routine should not be called by a driver after its disconnect
726 * method has returned.
728 void usb_poison_urb(struct urb *urb)
733 atomic_inc(&urb->reject);
735 * Order the write of urb->reject above before the read
736 * of urb->use_count below. Pairs with the barriers in
737 * __usb_hcd_giveback_urb() and usb_hcd_submit_urb().
739 smp_mb__after_atomic();
741 if (!urb->dev || !urb->ep)
744 usb_hcd_unlink_urb(urb, -ENOENT);
745 wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0);
747 EXPORT_SYMBOL_GPL(usb_poison_urb);
749 void usb_unpoison_urb(struct urb *urb)
754 atomic_dec(&urb->reject);
756 EXPORT_SYMBOL_GPL(usb_unpoison_urb);
759 * usb_block_urb - reliably prevent further use of an URB
760 * @urb: pointer to URB to be blocked, may be NULL
762 * After the routine has run, attempts to resubmit the URB will fail
763 * with error -EPERM. Thus even if the URB's completion handler always
764 * tries to resubmit, it will not succeed and the URB will become idle.
766 * The URB must not be deallocated while this routine is running. In
767 * particular, when a driver calls this routine, it must insure that the
768 * completion handler cannot deallocate the URB.
770 void usb_block_urb(struct urb *urb)
775 atomic_inc(&urb->reject);
777 EXPORT_SYMBOL_GPL(usb_block_urb);
780 * usb_kill_anchored_urbs - kill all URBs associated with an anchor
781 * @anchor: anchor the requests are bound to
783 * This kills all outstanding URBs starting from the back of the queue,
784 * with guarantee that no completer callbacks will take place from the
785 * anchor after this function returns.
787 * This routine should not be called by a driver after its disconnect
788 * method has returned.
790 void usb_kill_anchored_urbs(struct usb_anchor *anchor)
796 spin_lock_irq(&anchor->lock);
797 while (!list_empty(&anchor->urb_list)) {
798 victim = list_entry(anchor->urb_list.prev,
799 struct urb, anchor_list);
800 /* make sure the URB isn't freed before we kill it */
802 spin_unlock_irq(&anchor->lock);
803 /* this will unanchor the URB */
804 usb_kill_urb(victim);
806 spin_lock_irq(&anchor->lock);
808 surely_empty = usb_anchor_check_wakeup(anchor);
810 spin_unlock_irq(&anchor->lock);
812 } while (!surely_empty);
814 EXPORT_SYMBOL_GPL(usb_kill_anchored_urbs);
818 * usb_poison_anchored_urbs - cease all traffic from an anchor
819 * @anchor: anchor the requests are bound to
821 * this allows all outstanding URBs to be poisoned starting
822 * from the back of the queue. Newly added URBs will also be
825 * This routine should not be called by a driver after its disconnect
826 * method has returned.
828 void usb_poison_anchored_urbs(struct usb_anchor *anchor)
834 spin_lock_irq(&anchor->lock);
835 anchor->poisoned = 1;
836 while (!list_empty(&anchor->urb_list)) {
837 victim = list_entry(anchor->urb_list.prev,
838 struct urb, anchor_list);
839 /* make sure the URB isn't freed before we kill it */
841 spin_unlock_irq(&anchor->lock);
842 /* this will unanchor the URB */
843 usb_poison_urb(victim);
845 spin_lock_irq(&anchor->lock);
847 surely_empty = usb_anchor_check_wakeup(anchor);
849 spin_unlock_irq(&anchor->lock);
851 } while (!surely_empty);
853 EXPORT_SYMBOL_GPL(usb_poison_anchored_urbs);
856 * usb_unpoison_anchored_urbs - let an anchor be used successfully again
857 * @anchor: anchor the requests are bound to
859 * Reverses the effect of usb_poison_anchored_urbs
860 * the anchor can be used normally after it returns
862 void usb_unpoison_anchored_urbs(struct usb_anchor *anchor)
867 spin_lock_irqsave(&anchor->lock, flags);
868 list_for_each_entry(lazarus, &anchor->urb_list, anchor_list) {
869 usb_unpoison_urb(lazarus);
871 anchor->poisoned = 0;
872 spin_unlock_irqrestore(&anchor->lock, flags);
874 EXPORT_SYMBOL_GPL(usb_unpoison_anchored_urbs);
876 * usb_unlink_anchored_urbs - asynchronously cancel transfer requests en masse
877 * @anchor: anchor the requests are bound to
879 * this allows all outstanding URBs to be unlinked starting
880 * from the back of the queue. This function is asynchronous.
881 * The unlinking is just triggered. It may happen after this
882 * function has returned.
884 * This routine should not be called by a driver after its disconnect
885 * method has returned.
887 void usb_unlink_anchored_urbs(struct usb_anchor *anchor)
891 while ((victim = usb_get_from_anchor(anchor)) != NULL) {
892 usb_unlink_urb(victim);
896 EXPORT_SYMBOL_GPL(usb_unlink_anchored_urbs);
899 * usb_anchor_suspend_wakeups
900 * @anchor: the anchor you want to suspend wakeups on
902 * Call this to stop the last urb being unanchored from waking up any
903 * usb_wait_anchor_empty_timeout waiters. This is used in the hcd urb give-
904 * back path to delay waking up until after the completion handler has run.
906 void usb_anchor_suspend_wakeups(struct usb_anchor *anchor)
909 atomic_inc(&anchor->suspend_wakeups);
911 EXPORT_SYMBOL_GPL(usb_anchor_suspend_wakeups);
914 * usb_anchor_resume_wakeups
915 * @anchor: the anchor you want to resume wakeups on
917 * Allow usb_wait_anchor_empty_timeout waiters to be woken up again, and
918 * wake up any current waiters if the anchor is empty.
920 void usb_anchor_resume_wakeups(struct usb_anchor *anchor)
925 atomic_dec(&anchor->suspend_wakeups);
926 if (usb_anchor_check_wakeup(anchor))
927 wake_up(&anchor->wait);
929 EXPORT_SYMBOL_GPL(usb_anchor_resume_wakeups);
932 * usb_wait_anchor_empty_timeout - wait for an anchor to be unused
933 * @anchor: the anchor you want to become unused
934 * @timeout: how long you are willing to wait in milliseconds
936 * Call this is you want to be sure all an anchor's
939 * Return: Non-zero if the anchor became unused. Zero on timeout.
941 int usb_wait_anchor_empty_timeout(struct usb_anchor *anchor,
942 unsigned int timeout)
944 return wait_event_timeout(anchor->wait,
945 usb_anchor_check_wakeup(anchor),
946 msecs_to_jiffies(timeout));
948 EXPORT_SYMBOL_GPL(usb_wait_anchor_empty_timeout);
951 * usb_get_from_anchor - get an anchor's oldest urb
952 * @anchor: the anchor whose urb you want
954 * This will take the oldest urb from an anchor,
955 * unanchor and return it
957 * Return: The oldest urb from @anchor, or %NULL if @anchor has no
958 * urbs associated with it.
960 struct urb *usb_get_from_anchor(struct usb_anchor *anchor)
965 spin_lock_irqsave(&anchor->lock, flags);
966 if (!list_empty(&anchor->urb_list)) {
967 victim = list_entry(anchor->urb_list.next, struct urb,
970 __usb_unanchor_urb(victim, anchor);
974 spin_unlock_irqrestore(&anchor->lock, flags);
979 EXPORT_SYMBOL_GPL(usb_get_from_anchor);
982 * usb_scuttle_anchored_urbs - unanchor all an anchor's urbs
983 * @anchor: the anchor whose urbs you want to unanchor
985 * use this to get rid of all an anchor's urbs
987 void usb_scuttle_anchored_urbs(struct usb_anchor *anchor)
994 spin_lock_irqsave(&anchor->lock, flags);
995 while (!list_empty(&anchor->urb_list)) {
996 victim = list_entry(anchor->urb_list.prev,
997 struct urb, anchor_list);
998 __usb_unanchor_urb(victim, anchor);
1000 surely_empty = usb_anchor_check_wakeup(anchor);
1002 spin_unlock_irqrestore(&anchor->lock, flags);
1004 } while (!surely_empty);
1007 EXPORT_SYMBOL_GPL(usb_scuttle_anchored_urbs);
1010 * usb_anchor_empty - is an anchor empty
1011 * @anchor: the anchor you want to query
1013 * Return: 1 if the anchor has no urbs associated with it.
1015 int usb_anchor_empty(struct usb_anchor *anchor)
1017 return list_empty(&anchor->urb_list);
1020 EXPORT_SYMBOL_GPL(usb_anchor_empty);