1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright (C) 2003-2008 Takahiro Hirofuchi
6 #include <asm/byteorder.h>
7 #include <linux/kthread.h>
9 #include <linux/usb/hcd.h>
10 #include <linux/scatterlist.h>
12 #include "usbip_common.h"
15 static int is_clear_halt_cmd(struct urb *urb)
17 struct usb_ctrlrequest *req;
19 req = (struct usb_ctrlrequest *) urb->setup_packet;
21 return (req->bRequest == USB_REQ_CLEAR_FEATURE) &&
22 (req->bRequestType == USB_RECIP_ENDPOINT) &&
23 (req->wValue == USB_ENDPOINT_HALT);
26 static int is_set_interface_cmd(struct urb *urb)
28 struct usb_ctrlrequest *req;
30 req = (struct usb_ctrlrequest *) urb->setup_packet;
32 return (req->bRequest == USB_REQ_SET_INTERFACE) &&
33 (req->bRequestType == USB_RECIP_INTERFACE);
36 static int is_set_configuration_cmd(struct urb *urb)
38 struct usb_ctrlrequest *req;
40 req = (struct usb_ctrlrequest *) urb->setup_packet;
42 return (req->bRequest == USB_REQ_SET_CONFIGURATION) &&
43 (req->bRequestType == USB_RECIP_DEVICE);
46 static int is_reset_device_cmd(struct urb *urb)
48 struct usb_ctrlrequest *req;
52 req = (struct usb_ctrlrequest *) urb->setup_packet;
53 value = le16_to_cpu(req->wValue);
54 index = le16_to_cpu(req->wIndex);
56 if ((req->bRequest == USB_REQ_SET_FEATURE) &&
57 (req->bRequestType == USB_RT_PORT) &&
58 (value == USB_PORT_FEAT_RESET)) {
59 usbip_dbg_stub_rx("reset_device_cmd, port %u\n", index);
65 static int tweak_clear_halt_cmd(struct urb *urb)
67 struct usb_ctrlrequest *req;
73 req = (struct usb_ctrlrequest *) urb->setup_packet;
76 * The stalled endpoint is specified in the wIndex value. The endpoint
77 * of the urb is the target of this clear_halt request (i.e., control
80 target_endp = le16_to_cpu(req->wIndex) & 0x000f;
82 /* the stalled endpoint direction is IN or OUT?. USB_DIR_IN is 0x80. */
83 target_dir = le16_to_cpu(req->wIndex) & 0x0080;
86 target_pipe = usb_rcvctrlpipe(urb->dev, target_endp);
88 target_pipe = usb_sndctrlpipe(urb->dev, target_endp);
90 ret = usb_clear_halt(urb->dev, target_pipe);
92 dev_err(&urb->dev->dev,
93 "usb_clear_halt error: devnum %d endp %d ret %d\n",
94 urb->dev->devnum, target_endp, ret);
96 dev_info(&urb->dev->dev,
97 "usb_clear_halt done: devnum %d endp %d\n",
98 urb->dev->devnum, target_endp);
103 static int tweak_set_interface_cmd(struct urb *urb)
105 struct usb_ctrlrequest *req;
110 req = (struct usb_ctrlrequest *) urb->setup_packet;
111 alternate = le16_to_cpu(req->wValue);
112 interface = le16_to_cpu(req->wIndex);
114 usbip_dbg_stub_rx("set_interface: inf %u alt %u\n",
115 interface, alternate);
117 ret = usb_set_interface(urb->dev, interface, alternate);
119 dev_err(&urb->dev->dev,
120 "usb_set_interface error: inf %u alt %u ret %d\n",
121 interface, alternate, ret);
123 dev_info(&urb->dev->dev,
124 "usb_set_interface done: inf %u alt %u\n",
125 interface, alternate);
130 static int tweak_set_configuration_cmd(struct urb *urb)
132 struct stub_priv *priv = (struct stub_priv *) urb->context;
133 struct stub_device *sdev = priv->sdev;
134 struct usb_ctrlrequest *req;
138 req = (struct usb_ctrlrequest *) urb->setup_packet;
139 config = le16_to_cpu(req->wValue);
141 usb_lock_device(sdev->udev);
142 err = usb_set_configuration(sdev->udev, config);
143 usb_unlock_device(sdev->udev);
144 if (err && err != -ENODEV)
145 dev_err(&sdev->udev->dev, "can't set config #%d, error %d\n",
150 static int tweak_reset_device_cmd(struct urb *urb)
152 struct stub_priv *priv = (struct stub_priv *) urb->context;
153 struct stub_device *sdev = priv->sdev;
155 dev_info(&urb->dev->dev, "usb_queue_reset_device\n");
157 if (usb_lock_device_for_reset(sdev->udev, NULL) < 0) {
158 dev_err(&urb->dev->dev, "could not obtain lock to reset device\n");
161 usb_reset_device(sdev->udev);
162 usb_unlock_device(sdev->udev);
168 * clear_halt, set_interface, and set_configuration require special tricks.
170 static void tweak_special_requests(struct urb *urb)
172 if (!urb || !urb->setup_packet)
175 if (usb_pipetype(urb->pipe) != PIPE_CONTROL)
178 if (is_clear_halt_cmd(urb))
179 /* tweak clear_halt */
180 tweak_clear_halt_cmd(urb);
182 else if (is_set_interface_cmd(urb))
183 /* tweak set_interface */
184 tweak_set_interface_cmd(urb);
186 else if (is_set_configuration_cmd(urb))
187 /* tweak set_configuration */
188 tweak_set_configuration_cmd(urb);
190 else if (is_reset_device_cmd(urb))
191 tweak_reset_device_cmd(urb);
193 usbip_dbg_stub_rx("no need to tweak\n");
197 * stub_recv_unlink() unlinks the URB by a call to usb_unlink_urb().
198 * By unlinking the urb asynchronously, stub_rx can continuously
199 * process coming urbs. Even if the urb is unlinked, its completion
200 * handler will be called and stub_tx will send a return pdu.
202 * See also comments about unlinking strategy in vhci_hcd.c.
204 static int stub_recv_cmd_unlink(struct stub_device *sdev,
205 struct usbip_header *pdu)
209 struct stub_priv *priv;
211 spin_lock_irqsave(&sdev->priv_lock, flags);
213 list_for_each_entry(priv, &sdev->priv_init, list) {
214 if (priv->seqnum != pdu->u.cmd_unlink.seqnum)
218 * This matched urb is not completed yet (i.e., be in
219 * flight in usb hcd hardware/driver). Now we are
220 * cancelling it. The unlinking flag means that we are
221 * now not going to return the normal result pdu of a
222 * submission request, but going to return a result pdu
223 * of the unlink request.
228 * In the case that unlinking flag is on, prev->seqnum
229 * is changed from the seqnum of the cancelling urb to
230 * the seqnum of the unlink request. This will be used
231 * to make the result pdu of the unlink request.
233 priv->seqnum = pdu->base.seqnum;
235 spin_unlock_irqrestore(&sdev->priv_lock, flags);
238 * usb_unlink_urb() is now out of spinlocking to avoid
239 * spinlock recursion since stub_complete() is
240 * sometimes called in this context but not in the
241 * interrupt context. If stub_complete() is executed
242 * before we call usb_unlink_urb(), usb_unlink_urb()
243 * will return an error value. In this case, stub_tx
244 * will return the result pdu of this unlink request
245 * though submission is completed and actual unlinking
246 * is not executed. OK?
248 /* In the above case, urb->status is not -ECONNRESET,
249 * so a driver in a client host will know the failure
250 * of the unlink request ?
252 for (i = priv->completed_urbs; i < priv->num_urbs; i++) {
253 ret = usb_unlink_urb(priv->urbs[i]);
254 if (ret != -EINPROGRESS)
255 dev_err(&priv->urbs[i]->dev->dev,
256 "failed to unlink %d/%d urb of seqnum %lu, ret %d\n",
257 i + 1, priv->num_urbs,
263 usbip_dbg_stub_rx("seqnum %d is not pending\n",
264 pdu->u.cmd_unlink.seqnum);
267 * The urb of the unlink target is not found in priv_init queue. It was
268 * already completed and its results is/was going to be sent by a
269 * CMD_RET pdu. In this case, usb_unlink_urb() is not needed. We only
270 * return the completeness of this unlink request to vhci_hcd.
272 stub_enqueue_ret_unlink(sdev, pdu->base.seqnum, 0);
274 spin_unlock_irqrestore(&sdev->priv_lock, flags);
279 static int valid_request(struct stub_device *sdev, struct usbip_header *pdu)
281 struct usbip_device *ud = &sdev->ud;
284 if (pdu->base.devid == sdev->devid) {
285 spin_lock_irq(&ud->lock);
286 if (ud->status == SDEV_ST_USED) {
287 /* A request is valid. */
290 spin_unlock_irq(&ud->lock);
296 static struct stub_priv *stub_priv_alloc(struct stub_device *sdev,
297 struct usbip_header *pdu)
299 struct stub_priv *priv;
300 struct usbip_device *ud = &sdev->ud;
303 spin_lock_irqsave(&sdev->priv_lock, flags);
305 priv = kmem_cache_zalloc(stub_priv_cache, GFP_ATOMIC);
307 dev_err(&sdev->udev->dev, "alloc stub_priv\n");
308 spin_unlock_irqrestore(&sdev->priv_lock, flags);
309 usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC);
313 priv->seqnum = pdu->base.seqnum;
317 * After a stub_priv is linked to a list_head,
318 * our error handler can free allocated data.
320 list_add_tail(&priv->list, &sdev->priv_init);
322 spin_unlock_irqrestore(&sdev->priv_lock, flags);
327 static int get_pipe(struct stub_device *sdev, struct usbip_header *pdu)
329 struct usb_device *udev = sdev->udev;
330 struct usb_host_endpoint *ep;
331 struct usb_endpoint_descriptor *epd = NULL;
332 int epnum = pdu->base.ep;
333 int dir = pdu->base.direction;
335 if (epnum < 0 || epnum > 15)
338 if (dir == USBIP_DIR_IN)
339 ep = udev->ep_in[epnum & 0x7f];
341 ep = udev->ep_out[epnum & 0x7f];
347 if (usb_endpoint_xfer_control(epd)) {
348 if (dir == USBIP_DIR_OUT)
349 return usb_sndctrlpipe(udev, epnum);
351 return usb_rcvctrlpipe(udev, epnum);
354 if (usb_endpoint_xfer_bulk(epd)) {
355 if (dir == USBIP_DIR_OUT)
356 return usb_sndbulkpipe(udev, epnum);
358 return usb_rcvbulkpipe(udev, epnum);
361 if (usb_endpoint_xfer_int(epd)) {
362 if (dir == USBIP_DIR_OUT)
363 return usb_sndintpipe(udev, epnum);
365 return usb_rcvintpipe(udev, epnum);
368 if (usb_endpoint_xfer_isoc(epd)) {
369 /* validate number of packets */
370 if (pdu->u.cmd_submit.number_of_packets < 0 ||
371 pdu->u.cmd_submit.number_of_packets >
372 USBIP_MAX_ISO_PACKETS) {
373 dev_err(&sdev->udev->dev,
374 "CMD_SUBMIT: isoc invalid num packets %d\n",
375 pdu->u.cmd_submit.number_of_packets);
378 if (dir == USBIP_DIR_OUT)
379 return usb_sndisocpipe(udev, epnum);
381 return usb_rcvisocpipe(udev, epnum);
386 dev_err(&sdev->udev->dev, "CMD_SUBMIT: invalid epnum %d\n", epnum);
390 static void masking_bogus_flags(struct urb *urb)
393 struct usb_device *dev;
394 struct usb_host_endpoint *ep;
396 unsigned int allowed;
398 if (!urb || urb->hcpriv || !urb->complete)
401 if ((!dev) || (dev->state < USB_STATE_UNAUTHENTICATED))
404 ep = (usb_pipein(urb->pipe) ? dev->ep_in : dev->ep_out)
405 [usb_pipeendpoint(urb->pipe)];
409 xfertype = usb_endpoint_type(&ep->desc);
410 if (xfertype == USB_ENDPOINT_XFER_CONTROL) {
411 struct usb_ctrlrequest *setup =
412 (struct usb_ctrlrequest *) urb->setup_packet;
416 is_out = !(setup->bRequestType & USB_DIR_IN) ||
419 is_out = usb_endpoint_dir_out(&ep->desc);
422 /* enforce simple/standard policy */
423 allowed = (URB_NO_TRANSFER_DMA_MAP | URB_NO_INTERRUPT |
424 URB_DIR_MASK | URB_FREE_BUFFER);
426 case USB_ENDPOINT_XFER_BULK:
428 allowed |= URB_ZERO_PACKET;
430 default: /* all non-iso endpoints */
432 allowed |= URB_SHORT_NOT_OK;
434 case USB_ENDPOINT_XFER_ISOC:
435 allowed |= URB_ISO_ASAP;
438 urb->transfer_flags &= allowed;
441 static int stub_recv_xbuff(struct usbip_device *ud, struct stub_priv *priv)
446 for (i = 0; i < priv->num_urbs; i++) {
447 ret = usbip_recv_xbuff(ud, priv->urbs[i]);
455 static void stub_recv_cmd_submit(struct stub_device *sdev,
456 struct usbip_header *pdu)
458 struct stub_priv *priv;
459 struct usbip_device *ud = &sdev->ud;
460 struct usb_device *udev = sdev->udev;
461 struct scatterlist *sgl = NULL, *sg;
463 unsigned long long buf_len;
466 int pipe = get_pipe(sdev, pdu);
467 int use_sg = pdu->u.cmd_submit.transfer_flags & URB_DMA_MAP_SG;
476 * Smatch reported the error case where use_sg is true and buf_len is 0.
477 * In this case, It adds SDEV_EVENT_ERROR_MALLOC and stub_priv will be
478 * released by stub event handler and connection will be shut down.
480 priv = stub_priv_alloc(sdev, pdu);
484 buf_len = (unsigned long long)pdu->u.cmd_submit.transfer_buffer_length;
486 if (use_sg && !buf_len) {
487 dev_err(&udev->dev, "sg buffer with zero length\n");
491 /* allocate urb transfer buffer, if needed */
494 sgl = sgl_alloc(buf_len, GFP_KERNEL, &nents);
498 /* Check if the server's HCD supports SG */
499 if (!udev->bus->sg_tablesize) {
501 * If the server's HCD doesn't support SG, break
502 * a single SG request into several URBs and map
503 * each SG list entry to corresponding URB
504 * buffer. The previously allocated SG list is
505 * stored in priv->sgl (If the server's HCD
506 * support SG, SG list is stored only in
507 * urb->sg) and it is used as an indicator that
508 * the server split single SG request into
509 * several URBs. Later, priv->sgl is used by
510 * stub_complete() and stub_send_ret_submit() to
511 * reassemble the divied URBs.
515 priv->completed_urbs = 0;
516 pdu->u.cmd_submit.transfer_flags &=
520 buffer = kzalloc(buf_len, GFP_KERNEL);
526 /* allocate urb array */
527 priv->num_urbs = num_urbs;
528 priv->urbs = kmalloc_array(num_urbs, sizeof(*priv->urbs), GFP_KERNEL);
534 if (usb_pipeisoc(pipe))
535 np = pdu->u.cmd_submit.number_of_packets;
537 priv->urbs[0] = usb_alloc_urb(np, GFP_KERNEL);
543 priv->urbs[0]->sg = sgl;
544 priv->urbs[0]->num_sgs = nents;
545 priv->urbs[0]->transfer_buffer = NULL;
547 priv->urbs[0]->transfer_buffer = buffer;
551 /* copy urb setup packet */
552 priv->urbs[0]->setup_packet = kmemdup(&pdu->u.cmd_submit.setup,
554 if (!priv->urbs[0]->setup_packet) {
555 usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC);
559 usbip_pack_pdu(pdu, priv->urbs[0], USBIP_CMD_SUBMIT, 0);
561 for_each_sg(sgl, sg, nents, i) {
562 priv->urbs[i] = usb_alloc_urb(0, GFP_KERNEL);
563 /* The URBs which is previously allocated will be freed
564 * in stub_device_cleanup_urbs() if error occurs.
569 usbip_pack_pdu(pdu, priv->urbs[i], USBIP_CMD_SUBMIT, 0);
570 priv->urbs[i]->transfer_buffer = sg_virt(sg);
571 priv->urbs[i]->transfer_buffer_length = sg->length;
576 for (i = 0; i < num_urbs; i++) {
577 /* set other members from the base header of pdu */
578 priv->urbs[i]->context = (void *) priv;
579 priv->urbs[i]->dev = udev;
580 priv->urbs[i]->pipe = pipe;
581 priv->urbs[i]->complete = stub_complete;
583 /* no need to submit an intercepted request, but harmless? */
584 tweak_special_requests(priv->urbs[i]);
586 masking_bogus_flags(priv->urbs[i]);
589 if (stub_recv_xbuff(ud, priv) < 0)
592 if (usbip_recv_iso(ud, priv->urbs[0]) < 0)
595 /* urb is now ready to submit */
596 for (i = 0; i < priv->num_urbs; i++) {
597 ret = usb_submit_urb(priv->urbs[i], GFP_KERNEL);
600 usbip_dbg_stub_rx("submit urb ok, seqnum %u\n",
603 dev_err(&udev->dev, "submit_urb error, %d\n", ret);
604 usbip_dump_header(pdu);
605 usbip_dump_urb(priv->urbs[i]);
609 * This connection will be discarded.
611 usbip_event_add(ud, SDEV_EVENT_ERROR_SUBMIT);
616 usbip_dbg_stub_rx("Leave\n");
625 usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC);
629 static void stub_rx_pdu(struct usbip_device *ud)
632 struct usbip_header pdu;
633 struct stub_device *sdev = container_of(ud, struct stub_device, ud);
634 struct device *dev = &sdev->udev->dev;
636 usbip_dbg_stub_rx("Enter\n");
638 memset(&pdu, 0, sizeof(pdu));
640 /* receive a pdu header */
641 ret = usbip_recv(ud->tcp_socket, &pdu, sizeof(pdu));
642 if (ret != sizeof(pdu)) {
643 dev_err(dev, "recv a header, %d\n", ret);
644 usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
648 usbip_header_correct_endian(&pdu, 0);
650 if (usbip_dbg_flag_stub_rx)
651 usbip_dump_header(&pdu);
653 if (!valid_request(sdev, &pdu)) {
654 dev_err(dev, "recv invalid request\n");
655 usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
659 switch (pdu.base.command) {
660 case USBIP_CMD_UNLINK:
661 stub_recv_cmd_unlink(sdev, &pdu);
664 case USBIP_CMD_SUBMIT:
665 stub_recv_cmd_submit(sdev, &pdu);
670 dev_err(dev, "unknown pdu\n");
671 usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
676 int stub_rx_loop(void *data)
678 struct usbip_device *ud = data;
680 while (!kthread_should_stop()) {
681 if (usbip_event_happened(ud))