2 * bdc_ep.c - BRCM BDC USB3.0 device controller endpoint related functions
4 * Copyright (C) 2014 Broadcom Corporation
6 * Author: Ashwini Pahuja
8 * Based on drivers under drivers/usb/
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
16 #include <linux/module.h>
17 #include <linux/pci.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/kernel.h>
20 #include <linux/delay.h>
21 #include <linux/dmapool.h>
22 #include <linux/ioport.h>
23 #include <linux/sched.h>
24 #include <linux/slab.h>
25 #include <linux/errno.h>
26 #include <linux/init.h>
27 #include <linux/timer.h>
28 #include <linux/list.h>
29 #include <linux/interrupt.h>
30 #include <linux/moduleparam.h>
31 #include <linux/device.h>
32 #include <linux/usb/ch9.h>
33 #include <linux/usb/gadget.h>
34 #include <linux/usb/otg.h>
37 #include <linux/irq.h>
38 #include <asm/unaligned.h>
39 #include <linux/platform_device.h>
40 #include <linux/usb/composite.h>
47 static const char * const ep0_state_string[] = {
49 "WAIT_FOR_DATA_START",
51 "WAIT_FOR_STATUS_START",
52 "WAIT_FOR_STATUS_XMIT",
56 /* Free the bdl during ep disable */
57 static void ep_bd_list_free(struct bdc_ep *ep, u32 num_tabs)
59 struct bd_list *bd_list = &ep->bd_list;
60 struct bdc *bdc = ep->bdc;
61 struct bd_table *bd_table;
64 dev_dbg(bdc->dev, "%s ep:%s num_tabs:%d\n",
65 __func__, ep->name, num_tabs);
67 if (!bd_list->bd_table_array) {
68 dev_dbg(bdc->dev, "%s already freed\n", ep->name);
71 for (index = 0; index < num_tabs; index++) {
73 * check if the bd_table struct is allocated ?
74 * if yes, then check if bd memory has been allocated, then
75 * free the dma_pool and also the bd_table struct memory
77 bd_table = bd_list->bd_table_array[index];
78 dev_dbg(bdc->dev, "bd_table:%p index:%d\n", bd_table, index);
80 dev_dbg(bdc->dev, "bd_table not allocated\n");
83 if (!bd_table->start_bd) {
84 dev_dbg(bdc->dev, "bd dma pool not allocted\n");
89 "Free dma pool start_bd:%p dma:%llx\n",
91 (unsigned long long)bd_table->dma);
93 dma_pool_free(bdc->bd_table_pool,
96 /* Free the bd_table structure */
99 /* Free the bd table array */
100 kfree(ep->bd_list.bd_table_array);
104 * chain the tables, by insteting a chain bd at the end of prev_table, pointing
107 static inline void chain_table(struct bd_table *prev_table,
108 struct bd_table *next_table,
111 /* Chain the prev table to next table */
112 prev_table->start_bd[bd_p_tab-1].offset[0] =
113 cpu_to_le32(lower_32_bits(next_table->dma));
115 prev_table->start_bd[bd_p_tab-1].offset[1] =
116 cpu_to_le32(upper_32_bits(next_table->dma));
118 prev_table->start_bd[bd_p_tab-1].offset[2] =
121 prev_table->start_bd[bd_p_tab-1].offset[3] =
122 cpu_to_le32(MARK_CHAIN_BD);
125 /* Allocate the bdl for ep, during config ep */
126 static int ep_bd_list_alloc(struct bdc_ep *ep)
128 struct bd_table *prev_table = NULL;
129 int index, num_tabs, bd_p_tab;
130 struct bdc *bdc = ep->bdc;
131 struct bd_table *bd_table;
134 if (usb_endpoint_xfer_isoc(ep->desc))
135 num_tabs = NUM_TABLES_ISOCH;
137 num_tabs = NUM_TABLES;
139 bd_p_tab = NUM_BDS_PER_TABLE;
140 /* if there is only 1 table in bd list then loop chain to self */
142 "%s ep:%p num_tabs:%d\n",
143 __func__, ep, num_tabs);
145 /* Allocate memory for table array */
146 ep->bd_list.bd_table_array = kzalloc(
147 num_tabs * sizeof(struct bd_table *),
149 if (!ep->bd_list.bd_table_array)
152 /* Allocate memory for each table */
153 for (index = 0; index < num_tabs; index++) {
154 /* Allocate memory for bd_table structure */
155 bd_table = kzalloc(sizeof(struct bd_table), GFP_ATOMIC);
159 bd_table->start_bd = dma_pool_alloc(bdc->bd_table_pool,
162 if (!bd_table->start_bd) {
170 "index:%d start_bd:%p dma=%08llx prev_table:%p\n",
171 index, bd_table->start_bd,
172 (unsigned long long)bd_table->dma, prev_table);
174 ep->bd_list.bd_table_array[index] = bd_table;
175 memset(bd_table->start_bd, 0, bd_p_tab * sizeof(struct bdc_bd));
177 chain_table(prev_table, bd_table, bd_p_tab);
179 prev_table = bd_table;
181 chain_table(prev_table, ep->bd_list.bd_table_array[0], bd_p_tab);
182 /* Memory allocation is successful, now init the internal fields */
183 ep->bd_list.num_tabs = num_tabs;
184 ep->bd_list.max_bdi = (num_tabs * bd_p_tab) - 1;
185 ep->bd_list.num_tabs = num_tabs;
186 ep->bd_list.num_bds_table = bd_p_tab;
187 ep->bd_list.eqp_bdi = 0;
188 ep->bd_list.hwd_bdi = 0;
192 /* Free the bd_table_array, bd_table struct, bd's */
193 ep_bd_list_free(ep, num_tabs);
198 /* returns how many bd's are need for this transfer */
199 static inline int bd_needed_req(struct bdc_req *req)
204 /* 1 bd needed for 0 byte transfer */
205 if (req->usb_req.length == 0)
208 /* remaining bytes after tranfering all max BD size BD's */
209 remaining = req->usb_req.length % BD_MAX_BUFF_SIZE;
213 /* How many maximum BUFF size BD's ? */
214 remaining = req->usb_req.length / BD_MAX_BUFF_SIZE;
215 bd_needed += remaining;
220 /* returns the bd index(bdi) corresponding to bd dma address */
221 static int bd_add_to_bdi(struct bdc_ep *ep, dma_addr_t bd_dma_addr)
223 struct bd_list *bd_list = &ep->bd_list;
224 dma_addr_t dma_first_bd, dma_last_bd;
225 struct bdc *bdc = ep->bdc;
226 struct bd_table *bd_table;
230 dma_first_bd = dma_last_bd = 0;
231 dev_dbg(bdc->dev, "%s %llx\n",
232 __func__, (unsigned long long)bd_dma_addr);
234 * Find in which table this bd_dma_addr belongs?, go through the table
235 * array and compare addresses of first and last address of bd of each
238 for (tbi = 0; tbi < bd_list->num_tabs; tbi++) {
239 bd_table = bd_list->bd_table_array[tbi];
240 dma_first_bd = bd_table->dma;
241 dma_last_bd = bd_table->dma +
242 (sizeof(struct bdc_bd) *
243 (bd_list->num_bds_table - 1));
244 dev_dbg(bdc->dev, "dma_first_bd:%llx dma_last_bd:%llx\n",
245 (unsigned long long)dma_first_bd,
246 (unsigned long long)dma_last_bd);
247 if (bd_dma_addr >= dma_first_bd && bd_dma_addr <= dma_last_bd) {
252 if (unlikely(!found)) {
253 dev_err(bdc->dev, "%s FATAL err, bd not found\n", __func__);
256 /* Now we know the table, find the bdi */
257 bdi = (bd_dma_addr - dma_first_bd) / sizeof(struct bdc_bd);
259 /* return the global bdi, to compare with ep eqp_bdi */
260 return (bdi + (tbi * bd_list->num_bds_table));
263 /* returns the table index(tbi) of the given bdi */
264 static int bdi_to_tbi(struct bdc_ep *ep, int bdi)
268 tbi = bdi / ep->bd_list.num_bds_table;
269 dev_vdbg(ep->bdc->dev,
270 "bdi:%d num_bds_table:%d tbi:%d\n",
271 bdi, ep->bd_list.num_bds_table, tbi);
276 /* Find the bdi last bd in the transfer */
277 static inline int find_end_bdi(struct bdc_ep *ep, int next_hwd_bdi)
281 end_bdi = next_hwd_bdi - 1;
283 end_bdi = ep->bd_list.max_bdi - 1;
284 else if ((end_bdi % (ep->bd_list.num_bds_table-1)) == 0)
291 * How many transfer bd's are available on this ep bdl, chain bds are not
292 * counted in available bds
294 static int bd_available_ep(struct bdc_ep *ep)
296 struct bd_list *bd_list = &ep->bd_list;
297 int available1, available2;
298 struct bdc *bdc = ep->bdc;
299 int chain_bd1, chain_bd2;
300 int available_bd = 0;
302 available1 = available2 = chain_bd1 = chain_bd2 = 0;
303 /* if empty then we have all bd's available - number of chain bd's */
304 if (bd_list->eqp_bdi == bd_list->hwd_bdi)
305 return bd_list->max_bdi - bd_list->num_tabs;
308 * Depending upon where eqp and dqp pointers are, caculate number
311 if (bd_list->hwd_bdi < bd_list->eqp_bdi) {
312 /* available bd's are from eqp..max_bds + 0..dqp - chain_bds */
313 available1 = bd_list->max_bdi - bd_list->eqp_bdi;
314 available2 = bd_list->hwd_bdi;
315 chain_bd1 = available1 / bd_list->num_bds_table;
316 chain_bd2 = available2 / bd_list->num_bds_table;
317 dev_vdbg(bdc->dev, "chain_bd1:%d chain_bd2:%d\n",
318 chain_bd1, chain_bd2);
319 available_bd = available1 + available2 - chain_bd1 - chain_bd2;
321 /* available bd's are from eqp..dqp - number of chain bd's */
322 available1 = bd_list->hwd_bdi - bd_list->eqp_bdi;
323 /* if gap between eqp and dqp is less than NUM_BDS_PER_TABLE */
324 if ((bd_list->hwd_bdi - bd_list->eqp_bdi)
325 <= bd_list->num_bds_table) {
326 /* If there any chain bd in between */
327 if (!(bdi_to_tbi(ep, bd_list->hwd_bdi)
328 == bdi_to_tbi(ep, bd_list->eqp_bdi))) {
329 available_bd = available1 - 1;
332 chain_bd1 = available1 / bd_list->num_bds_table;
333 available_bd = available1 - chain_bd1;
337 * we need to keep one extra bd to check if ring is full or empty so
341 dev_vdbg(bdc->dev, "available_bd:%d\n", available_bd);
346 /* Notify the hardware after queueing the bd to bdl */
347 void bdc_notify_xfr(struct bdc *bdc, u32 epnum)
349 struct bdc_ep *ep = bdc->bdc_ep_array[epnum];
351 dev_vdbg(bdc->dev, "%s epnum:%d\n", __func__, epnum);
353 * We don't have anyway to check if ep state is running,
354 * except the software flags.
356 if (unlikely(ep->flags & BDC_EP_STOP))
357 ep->flags &= ~BDC_EP_STOP;
359 bdc_writel(bdc->regs, BDC_XSFNTF, epnum);
362 /* returns the bd corresponding to bdi */
363 static struct bdc_bd *bdi_to_bd(struct bdc_ep *ep, int bdi)
365 int tbi = bdi_to_tbi(ep, bdi);
368 local_bdi = bdi - (tbi * ep->bd_list.num_bds_table);
369 dev_vdbg(ep->bdc->dev,
370 "%s bdi:%d local_bdi:%d\n",
371 __func__, bdi, local_bdi);
373 return (ep->bd_list.bd_table_array[tbi]->start_bd + local_bdi);
376 /* Advance the enqueue pointer */
377 static void ep_bdlist_eqp_adv(struct bdc_ep *ep)
379 ep->bd_list.eqp_bdi++;
380 /* if it's chain bd, then move to next */
381 if (((ep->bd_list.eqp_bdi + 1) % ep->bd_list.num_bds_table) == 0)
382 ep->bd_list.eqp_bdi++;
384 /* if the eqp is pointing to last + 1 then move back to 0 */
385 if (ep->bd_list.eqp_bdi == (ep->bd_list.max_bdi + 1))
386 ep->bd_list.eqp_bdi = 0;
389 /* Setup the first bd for ep0 transfer */
390 static int setup_first_bd_ep0(struct bdc *bdc, struct bdc_req *req, u32 *dword3)
396 req_len = req->usb_req.length;
397 switch (bdc->ep0_state) {
398 case WAIT_FOR_DATA_START:
399 *dword3 |= BD_TYPE_DS;
400 if (bdc->setup_pkt.bRequestType & USB_DIR_IN)
401 *dword3 |= BD_DIR_IN;
403 /* check if zlp will be needed */
404 wValue = le16_to_cpu(bdc->setup_pkt.wValue);
405 if ((wValue > req_len) &&
406 (req_len % bdc->gadget.ep0->maxpacket == 0)) {
407 dev_dbg(bdc->dev, "ZLP needed wVal:%d len:%d MaxP:%d\n",
409 bdc->gadget.ep0->maxpacket);
410 bdc->zlp_needed = true;
414 case WAIT_FOR_STATUS_START:
415 *dword3 |= BD_TYPE_SS;
416 if (!le16_to_cpu(bdc->setup_pkt.wLength) ||
417 !(bdc->setup_pkt.bRequestType & USB_DIR_IN))
418 *dword3 |= BD_DIR_IN;
422 "Unknown ep0 state for queueing bd ep0_state:%s\n",
423 ep0_state_string[bdc->ep0_state]);
430 /* Setup the bd dma descriptor for a given request */
431 static int setup_bd_list_xfr(struct bdc *bdc, struct bdc_req *req, int num_bds)
433 dma_addr_t buf_add = req->usb_req.dma;
434 u32 maxp, tfs, dword2, dword3;
435 struct bd_transfer *bd_xfr;
436 struct bd_list *bd_list;
443 bd_list = &ep->bd_list;
444 bd_xfr = &req->bd_xfr;
446 bd_xfr->start_bdi = bd_list->eqp_bdi;
447 bd = bdi_to_bd(ep, bd_list->eqp_bdi);
448 req_len = req->usb_req.length;
449 maxp = usb_endpoint_maxp(ep->desc) & 0x7ff;
450 tfs = roundup(req->usb_req.length, maxp);
452 dev_vdbg(bdc->dev, "%s ep:%s num_bds:%d tfs:%d r_len:%d bd:%p\n",
453 __func__, ep->name, num_bds, tfs, req_len, bd);
455 for (bdnum = 0; bdnum < num_bds; bdnum++) {
459 dword3 |= BD_SOT|BD_SBF|(tfs<<BD_TFS_SHIFT);
461 /* format of first bd for ep0 is different than other */
462 if (ep->ep_num == 1) {
463 ret = setup_first_bd_ep0(bdc, req, &dword3);
471 if (req_len > BD_MAX_BUFF_SIZE) {
472 dword2 |= BD_MAX_BUFF_SIZE;
473 req_len -= BD_MAX_BUFF_SIZE;
475 /* this should be the last bd */
480 /* Currently only 1 INT target is supported */
481 dword2 |= BD_INTR_TARGET(0);
482 bd = bdi_to_bd(ep, ep->bd_list.eqp_bdi);
484 dev_err(bdc->dev, "Err bd pointing to wrong addr\n");
488 bd->offset[0] = cpu_to_le32(lower_32_bits(buf_add));
489 bd->offset[1] = cpu_to_le32(upper_32_bits(buf_add));
490 bd->offset[2] = cpu_to_le32(dword2);
491 bd->offset[3] = cpu_to_le32(dword3);
492 /* advance eqp pointer */
493 ep_bdlist_eqp_adv(ep);
494 /* advance the buff pointer */
495 buf_add += BD_MAX_BUFF_SIZE;
496 dev_vdbg(bdc->dev, "buf_add:%08llx req_len:%d bd:%p eqp:%d\n",
497 (unsigned long long)buf_add, req_len, bd,
498 ep->bd_list.eqp_bdi);
499 bd = bdi_to_bd(ep, ep->bd_list.eqp_bdi);
500 bd->offset[3] = cpu_to_le32(BD_SBF);
502 /* clear the STOP BD fetch bit from the first bd of this xfr */
503 bd = bdi_to_bd(ep, bd_xfr->start_bdi);
504 bd->offset[3] &= cpu_to_le32(~BD_SBF);
505 /* the new eqp will be next hw dqp */
506 bd_xfr->num_bds = num_bds;
507 bd_xfr->next_hwd_bdi = ep->bd_list.eqp_bdi;
508 /* everything is written correctly before notifying the HW */
515 static int bdc_queue_xfr(struct bdc *bdc, struct bdc_req *req)
517 int num_bds, bd_available;
522 dev_dbg(bdc->dev, "%s req:%p\n", __func__, req);
523 dev_dbg(bdc->dev, "eqp_bdi:%d hwd_bdi:%d\n",
524 ep->bd_list.eqp_bdi, ep->bd_list.hwd_bdi);
526 num_bds = bd_needed_req(req);
527 bd_available = bd_available_ep(ep);
529 /* how many bd's are avaialble on ep */
530 if (num_bds > bd_available)
533 ret = setup_bd_list_xfr(bdc, req, num_bds);
536 list_add_tail(&req->queue, &ep->queue);
537 bdc_dbg_bd_list(bdc, ep);
538 bdc_notify_xfr(bdc, ep->ep_num);
543 /* callback to gadget layer when xfr completes */
544 static void bdc_req_complete(struct bdc_ep *ep, struct bdc_req *req,
547 struct bdc *bdc = ep->bdc;
552 dev_dbg(bdc->dev, "%s ep:%s status:%d\n", __func__, ep->name, status);
553 list_del(&req->queue);
554 req->usb_req.status = status;
555 usb_gadget_unmap_request(&bdc->gadget, &req->usb_req, ep->dir);
556 if (req->usb_req.complete) {
557 spin_unlock(&bdc->lock);
558 usb_gadget_giveback_request(&ep->usb_ep, &req->usb_req);
559 spin_lock(&bdc->lock);
563 /* Disable the endpoint */
564 int bdc_ep_disable(struct bdc_ep *ep)
572 dev_dbg(bdc->dev, "%s() ep->ep_num=%d\n", __func__, ep->ep_num);
573 /* Stop the endpoint */
574 ret = bdc_stop_ep(bdc, ep->ep_num);
577 * Intentionally don't check the ret value of stop, it can fail in
578 * disconnect scenarios, continue with dconfig
580 /* de-queue any pending requests */
581 while (!list_empty(&ep->queue)) {
582 req = list_entry(ep->queue.next, struct bdc_req,
584 bdc_req_complete(ep, req, -ESHUTDOWN);
586 /* deconfigure the endpoint */
587 ret = bdc_dconfig_ep(bdc, ep);
590 "dconfig fail but continue with memory free");
593 /* ep0 memory is not freed, but reused on next connect sr */
597 /* Free the bdl memory */
598 ep_bd_list_free(ep, ep->bd_list.num_tabs);
600 ep->comp_desc = NULL;
601 ep->usb_ep.desc = NULL;
608 int bdc_ep_enable(struct bdc_ep *ep)
614 dev_dbg(bdc->dev, "%s NUM_TABLES:%d %d\n",
615 __func__, NUM_TABLES, NUM_TABLES_ISOCH);
617 ret = ep_bd_list_alloc(ep);
619 dev_err(bdc->dev, "ep bd list allocation failed:%d\n", ret);
622 bdc_dbg_bd_list(bdc, ep);
623 /* only for ep0: config ep is called for ep0 from connect event */
627 /* Issue a configure endpoint command */
628 ret = bdc_config_ep(bdc, ep);
632 ep->usb_ep.maxpacket = usb_endpoint_maxp(ep->desc);
633 ep->usb_ep.desc = ep->desc;
634 ep->usb_ep.comp_desc = ep->comp_desc;
635 ep->ep_type = usb_endpoint_type(ep->desc);
636 ep->flags |= BDC_EP_ENABLED;
641 /* EP0 related code */
643 /* Queue a status stage BD */
644 static int ep0_queue_status_stage(struct bdc *bdc)
646 struct bdc_req *status_req;
649 status_req = &bdc->status_req;
650 ep = bdc->bdc_ep_array[1];
652 status_req->usb_req.length = 0;
653 status_req->usb_req.status = -EINPROGRESS;
654 status_req->usb_req.actual = 0;
655 status_req->usb_req.complete = NULL;
656 bdc_queue_xfr(bdc, status_req);
661 /* Queue xfr on ep0 */
662 static int ep0_queue(struct bdc_ep *ep, struct bdc_req *req)
668 dev_dbg(bdc->dev, "%s()\n", __func__);
669 req->usb_req.actual = 0;
670 req->usb_req.status = -EINPROGRESS;
671 req->epnum = ep->ep_num;
673 if (bdc->delayed_status) {
674 bdc->delayed_status = false;
675 /* if status stage was delayed? */
676 if (bdc->ep0_state == WAIT_FOR_STATUS_START) {
677 /* Queue a status stage BD */
678 ep0_queue_status_stage(bdc);
679 bdc->ep0_state = WAIT_FOR_STATUS_XMIT;
684 * if delayed status is false and 0 length transfer is requested
685 * i.e. for status stage of some setup request, then just
686 * return from here the status stage is queued independently
688 if (req->usb_req.length == 0)
692 ret = usb_gadget_map_request(&bdc->gadget, &req->usb_req, ep->dir);
694 dev_err(bdc->dev, "dma mapping failed %s\n", ep->name);
698 return bdc_queue_xfr(bdc, req);
701 /* Queue data stage */
702 static int ep0_queue_data_stage(struct bdc *bdc)
704 struct usb_request *ep0_usb_req;
707 dev_dbg(bdc->dev, "%s\n", __func__);
708 ep0_usb_req = &bdc->ep0_req.usb_req;
709 ep = bdc->bdc_ep_array[1];
710 bdc->ep0_req.ep = ep;
711 bdc->ep0_req.usb_req.complete = NULL;
713 return ep0_queue(ep, &bdc->ep0_req);
716 /* Queue req on ep */
717 static int ep_queue(struct bdc_ep *ep, struct bdc_req *req)
722 if (!req || !ep->usb_ep.desc)
727 req->usb_req.actual = 0;
728 req->usb_req.status = -EINPROGRESS;
729 req->epnum = ep->ep_num;
731 ret = usb_gadget_map_request(&bdc->gadget, &req->usb_req, ep->dir);
733 dev_err(bdc->dev, "dma mapping failed\n");
737 return bdc_queue_xfr(bdc, req);
740 /* Dequeue a request from ep */
741 static int ep_dequeue(struct bdc_ep *ep, struct bdc_req *req)
743 int start_bdi, end_bdi, tbi, eqp_bdi, curr_hw_dqpi;
744 bool start_pending, end_pending;
745 bool first_remove = false;
746 struct bdc_req *first_req;
747 struct bdc_bd *bd_start;
748 struct bd_table *table;
749 dma_addr_t next_bd_dma;
756 start_pending = end_pending = false;
757 eqp_bdi = ep->bd_list.eqp_bdi - 1;
760 eqp_bdi = ep->bd_list.max_bdi;
762 start_bdi = req->bd_xfr.start_bdi;
763 end_bdi = find_end_bdi(ep, req->bd_xfr.next_hwd_bdi);
765 dev_dbg(bdc->dev, "%s ep:%s start:%d end:%d\n",
766 __func__, ep->name, start_bdi, end_bdi);
767 dev_dbg(bdc->dev, "ep_dequeue ep=%p ep->desc=%p\n",
768 ep, (void *)ep->usb_ep.desc);
769 /* if still connected, stop the ep to see where the HW is ? */
770 if (!(bdc_readl(bdc->regs, BDC_USPC) & BDC_PST_MASK)) {
771 ret = bdc_stop_ep(bdc, ep->ep_num);
772 /* if there is an issue, then no need to go further */
779 * After endpoint is stopped, there can be 3 cases, the request
780 * is processed, pending or in the middle of processing
783 /* The current hw dequeue pointer */
784 tmp_32 = bdc_readl(bdc->regs, BDC_EPSTS0(0));
786 tmp_32 = bdc_readl(bdc->regs, BDC_EPSTS1(0));
787 deq_ptr_64 |= ((u64)tmp_32 << 32);
789 /* we have the dma addr of next bd that will be fetched by hardware */
790 curr_hw_dqpi = bd_add_to_bdi(ep, deq_ptr_64);
791 if (curr_hw_dqpi < 0)
795 * curr_hw_dqpi points to actual dqp of HW and HW owns bd's from
796 * curr_hw_dqbdi..eqp_bdi.
799 /* Check if start_bdi and end_bdi are in range of HW owned BD's */
800 if (curr_hw_dqpi > eqp_bdi) {
801 /* there is a wrap from last to 0 */
802 if (start_bdi >= curr_hw_dqpi || start_bdi <= eqp_bdi) {
803 start_pending = true;
805 } else if (end_bdi >= curr_hw_dqpi || end_bdi <= eqp_bdi) {
809 if (start_bdi >= curr_hw_dqpi) {
810 start_pending = true;
812 } else if (end_bdi >= curr_hw_dqpi) {
817 "start_pending:%d end_pending:%d speed:%d\n",
818 start_pending, end_pending, bdc->gadget.speed);
820 /* If both start till end are processes, we cannot deq req */
821 if (!start_pending && !end_pending)
825 * if ep_dequeue is called after disconnect then just return
828 if (bdc->gadget.speed == USB_SPEED_UNKNOWN)
830 tbi = bdi_to_tbi(ep, req->bd_xfr.next_hwd_bdi);
831 table = ep->bd_list.bd_table_array[tbi];
832 next_bd_dma = table->dma +
833 sizeof(struct bdc_bd)*(req->bd_xfr.next_hwd_bdi -
834 tbi * ep->bd_list.num_bds_table);
836 first_req = list_first_entry(&ep->queue, struct bdc_req,
839 if (req == first_req)
843 * Due to HW limitation we need to bypadd chain bd's and issue ep_bla,
844 * incase if start is pending this is the first request in the list
845 * then issue ep_bla instead of marking as chain bd
847 if (start_pending && !first_remove) {
849 * Mark the start bd as Chain bd, and point the chain
852 bd_start = bdi_to_bd(ep, start_bdi);
853 bd_start->offset[0] = cpu_to_le32(lower_32_bits(next_bd_dma));
854 bd_start->offset[1] = cpu_to_le32(upper_32_bits(next_bd_dma));
855 bd_start->offset[2] = 0x0;
856 bd_start->offset[3] = cpu_to_le32(MARK_CHAIN_BD);
857 bdc_dbg_bd_list(bdc, ep);
858 } else if (end_pending) {
860 * The transfer is stopped in the middle, move the
861 * HW deq pointer to next_bd_dma
863 ret = bdc_ep_bla(bdc, ep, next_bd_dma);
865 dev_err(bdc->dev, "error in ep_bla:%d\n", ret);
873 /* Halt/Clear the ep based on value */
874 static int ep_set_halt(struct bdc_ep *ep, u32 value)
880 dev_dbg(bdc->dev, "%s ep:%s value=%d\n", __func__, ep->name, value);
883 dev_dbg(bdc->dev, "Halt\n");
885 bdc->ep0_state = WAIT_FOR_SETUP;
887 ret = bdc_ep_set_stall(bdc, ep->ep_num);
889 dev_err(bdc->dev, "failed to set STALL on %s\n",
892 ep->flags |= BDC_EP_STALL;
895 dev_dbg(bdc->dev, "Before Clear\n");
896 ret = bdc_ep_clear_stall(bdc, ep->ep_num);
898 dev_err(bdc->dev, "failed to clear STALL on %s\n",
901 ep->flags &= ~BDC_EP_STALL;
902 dev_dbg(bdc->dev, "After Clear\n");
908 /* Free all the ep */
909 void bdc_free_ep(struct bdc *bdc)
914 dev_dbg(bdc->dev, "%s\n", __func__);
915 for (epnum = 1; epnum < bdc->num_eps; epnum++) {
916 ep = bdc->bdc_ep_array[epnum];
920 if (ep->flags & BDC_EP_ENABLED)
921 ep_bd_list_free(ep, ep->bd_list.num_tabs);
923 /* ep0 is not in this gadget list */
925 list_del(&ep->usb_ep.ep_list);
931 /* USB2 spec, section 7.1.20 */
932 static int bdc_set_test_mode(struct bdc *bdc)
936 usb2_pm = bdc_readl(bdc->regs, BDC_USPPM2);
937 usb2_pm &= ~BDC_PTC_MASK;
938 dev_dbg(bdc->dev, "%s\n", __func__);
939 switch (bdc->test_mode) {
945 usb2_pm |= bdc->test_mode << 28;
950 dev_dbg(bdc->dev, "usb2_pm=%08x", usb2_pm);
951 bdc_writel(bdc->regs, BDC_USPPM2, usb2_pm);
957 * Helper function to handle Transfer status report with status as either
960 static void handle_xsr_succ_status(struct bdc *bdc, struct bdc_ep *ep,
961 struct bdc_sr *sreport)
963 int short_bdi, start_bdi, end_bdi, max_len_bds, chain_bds;
964 struct bd_list *bd_list = &ep->bd_list;
965 int actual_length, length_short;
966 struct bd_transfer *bd_xfr;
967 struct bdc_bd *short_bd;
974 dev_dbg(bdc->dev, "%s ep:%p\n", __func__, ep);
976 /* do not process thie sr if ignore flag is set */
977 if (ep->ignore_next_sr) {
978 ep->ignore_next_sr = false;
982 if (unlikely(list_empty(&ep->queue))) {
983 dev_warn(bdc->dev, "xfr srr with no BD's queued\n");
986 req = list_entry(ep->queue.next, struct bdc_req,
989 bd_xfr = &req->bd_xfr;
990 sr_status = XSF_STS(le32_to_cpu(sreport->offset[3]));
993 * sr_status is short and this transfer has more than 1 bd then it needs
994 * special handling, this is only applicable for bulk and ctrl
996 if (sr_status == XSF_SHORT && bd_xfr->num_bds > 1) {
998 * This is multi bd xfr, lets see which bd
999 * caused short transfer and how many bytes have been
1000 * transferred so far.
1002 tmp_32 = le32_to_cpu(sreport->offset[0]);
1003 deq_ptr_64 = tmp_32;
1004 tmp_32 = le32_to_cpu(sreport->offset[1]);
1005 deq_ptr_64 |= ((u64)tmp_32 << 32);
1006 short_bdi = bd_add_to_bdi(ep, deq_ptr_64);
1007 if (unlikely(short_bdi < 0))
1008 dev_warn(bdc->dev, "bd doesn't exist?\n");
1010 start_bdi = bd_xfr->start_bdi;
1012 * We know the start_bdi and short_bdi, how many xfr
1015 if (start_bdi <= short_bdi) {
1016 max_len_bds = short_bdi - start_bdi;
1017 if (max_len_bds <= bd_list->num_bds_table) {
1018 if (!(bdi_to_tbi(ep, start_bdi) ==
1019 bdi_to_tbi(ep, short_bdi)))
1022 chain_bds = max_len_bds/bd_list->num_bds_table;
1023 max_len_bds -= chain_bds;
1026 /* there is a wrap in the ring within a xfr */
1027 chain_bds = (bd_list->max_bdi - start_bdi)/
1028 bd_list->num_bds_table;
1029 chain_bds += short_bdi/bd_list->num_bds_table;
1030 max_len_bds = bd_list->max_bdi - start_bdi;
1031 max_len_bds += short_bdi;
1032 max_len_bds -= chain_bds;
1034 /* max_len_bds is the number of full length bds */
1035 end_bdi = find_end_bdi(ep, bd_xfr->next_hwd_bdi);
1036 if (!(end_bdi == short_bdi))
1037 ep->ignore_next_sr = true;
1039 actual_length = max_len_bds * BD_MAX_BUFF_SIZE;
1040 short_bd = bdi_to_bd(ep, short_bdi);
1042 length_short = le32_to_cpu(short_bd->offset[2]) & 0x1FFFFF;
1043 /* actual length trensfered */
1044 length_short -= SR_BD_LEN(le32_to_cpu(sreport->offset[2]));
1045 actual_length += length_short;
1046 req->usb_req.actual = actual_length;
1048 req->usb_req.actual = req->usb_req.length -
1049 SR_BD_LEN(le32_to_cpu(sreport->offset[2]));
1051 "len=%d actual=%d bd_xfr->next_hwd_bdi:%d\n",
1052 req->usb_req.length, req->usb_req.actual,
1053 bd_xfr->next_hwd_bdi);
1056 /* Update the dequeue pointer */
1057 ep->bd_list.hwd_bdi = bd_xfr->next_hwd_bdi;
1058 if (req->usb_req.actual < req->usb_req.length) {
1059 dev_dbg(bdc->dev, "short xfr on %d\n", ep->ep_num);
1060 if (req->usb_req.short_not_ok)
1061 status = -EREMOTEIO;
1063 bdc_req_complete(ep, bd_xfr->req, status);
1066 /* EP0 setup related packet handlers */
1069 * Setup packet received, just store the packet and process on next DS or SS
1072 void bdc_xsf_ep0_setup_recv(struct bdc *bdc, struct bdc_sr *sreport)
1074 struct usb_ctrlrequest *setup_pkt;
1078 "%s ep0_state:%s\n",
1079 __func__, ep0_state_string[bdc->ep0_state]);
1080 /* Store received setup packet */
1081 setup_pkt = &bdc->setup_pkt;
1082 memcpy(setup_pkt, &sreport->offset[0], sizeof(*setup_pkt));
1083 len = le16_to_cpu(setup_pkt->wLength);
1085 bdc->ep0_state = WAIT_FOR_STATUS_START;
1087 bdc->ep0_state = WAIT_FOR_DATA_START;
1091 "%s exit ep0_state:%s\n",
1092 __func__, ep0_state_string[bdc->ep0_state]);
1096 static void ep0_stall(struct bdc *bdc)
1098 struct bdc_ep *ep = bdc->bdc_ep_array[1];
1099 struct bdc_req *req;
1101 dev_dbg(bdc->dev, "%s\n", __func__);
1102 bdc->delayed_status = false;
1105 /* de-queue any pendig requests */
1106 while (!list_empty(&ep->queue)) {
1107 req = list_entry(ep->queue.next, struct bdc_req,
1109 bdc_req_complete(ep, req, -ESHUTDOWN);
1113 /* SET_ADD handlers */
1114 static int ep0_set_address(struct bdc *bdc, struct usb_ctrlrequest *ctrl)
1116 enum usb_device_state state = bdc->gadget.state;
1120 addr = le16_to_cpu(ctrl->wValue);
1122 "%s addr:%d dev state:%d\n",
1123 __func__, addr, state);
1129 case USB_STATE_DEFAULT:
1130 case USB_STATE_ADDRESS:
1131 /* Issue Address device command */
1132 ret = bdc_address_device(bdc, addr);
1137 usb_gadget_set_state(&bdc->gadget, USB_STATE_ADDRESS);
1139 usb_gadget_set_state(&bdc->gadget, USB_STATE_DEFAULT);
1141 bdc->dev_addr = addr;
1145 "SET Address in wrong device state %d\n",
1153 /* Handler for SET/CLEAR FEATURE requests for device */
1154 static int ep0_handle_feature_dev(struct bdc *bdc, u16 wValue,
1155 u16 wIndex, bool set)
1157 enum usb_device_state state = bdc->gadget.state;
1160 dev_dbg(bdc->dev, "%s set:%d dev state:%d\n",
1161 __func__, set, state);
1163 case USB_DEVICE_REMOTE_WAKEUP:
1164 dev_dbg(bdc->dev, "USB_DEVICE_REMOTE_WAKEUP\n");
1166 bdc->devstatus |= REMOTE_WAKE_ENABLE;
1168 bdc->devstatus &= ~REMOTE_WAKE_ENABLE;
1171 case USB_DEVICE_TEST_MODE:
1172 dev_dbg(bdc->dev, "USB_DEVICE_TEST_MODE\n");
1173 if ((wIndex & 0xFF) ||
1174 (bdc->gadget.speed != USB_SPEED_HIGH) || !set)
1177 bdc->test_mode = wIndex >> 8;
1180 case USB_DEVICE_U1_ENABLE:
1181 dev_dbg(bdc->dev, "USB_DEVICE_U1_ENABLE\n");
1183 if (bdc->gadget.speed != USB_SPEED_SUPER ||
1184 state != USB_STATE_CONFIGURED)
1187 usppms = bdc_readl(bdc->regs, BDC_USPPMS);
1189 /* clear previous u1t */
1190 usppms &= ~BDC_U1T(BDC_U1T_MASK);
1191 usppms |= BDC_U1T(U1_TIMEOUT);
1192 usppms |= BDC_U1E | BDC_PORT_W1S;
1193 bdc->devstatus |= (1 << USB_DEV_STAT_U1_ENABLED);
1196 usppms |= BDC_PORT_W1S;
1197 bdc->devstatus &= ~(1 << USB_DEV_STAT_U1_ENABLED);
1199 bdc_writel(bdc->regs, BDC_USPPMS, usppms);
1202 case USB_DEVICE_U2_ENABLE:
1203 dev_dbg(bdc->dev, "USB_DEVICE_U2_ENABLE\n");
1205 if (bdc->gadget.speed != USB_SPEED_SUPER ||
1206 state != USB_STATE_CONFIGURED)
1209 usppms = bdc_readl(bdc->regs, BDC_USPPMS);
1213 bdc->devstatus |= (1 << USB_DEV_STAT_U2_ENABLED);
1217 bdc->devstatus &= ~(1 << USB_DEV_STAT_U2_ENABLED);
1219 bdc_writel(bdc->regs, BDC_USPPMS, usppms);
1222 case USB_DEVICE_LTM_ENABLE:
1223 dev_dbg(bdc->dev, "USB_DEVICE_LTM_ENABLE?\n");
1224 if (bdc->gadget.speed != USB_SPEED_SUPER ||
1225 state != USB_STATE_CONFIGURED)
1229 dev_err(bdc->dev, "Unknown wValue:%d\n", wValue);
1231 } /* USB_RECIP_DEVICE end */
1236 /* SET/CLEAR FEATURE handler */
1237 static int ep0_handle_feature(struct bdc *bdc,
1238 struct usb_ctrlrequest *setup_pkt, bool set)
1240 enum usb_device_state state = bdc->gadget.state;
1246 wValue = le16_to_cpu(setup_pkt->wValue);
1247 wIndex = le16_to_cpu(setup_pkt->wIndex);
1250 "%s wValue=%d wIndex=%d devstate=%08x speed=%d set=%d",
1251 __func__, wValue, wIndex, state,
1252 bdc->gadget.speed, set);
1254 switch (setup_pkt->bRequestType & USB_RECIP_MASK) {
1255 case USB_RECIP_DEVICE:
1256 return ep0_handle_feature_dev(bdc, wValue, wIndex, set);
1257 case USB_RECIP_INTERFACE:
1258 dev_dbg(bdc->dev, "USB_RECIP_INTERFACE\n");
1259 /* USB3 spec, sec 9.4.9 */
1260 if (wValue != USB_INTRF_FUNC_SUSPEND)
1262 /* USB3 spec, Table 9-8 */
1264 if (wIndex & USB_INTRF_FUNC_SUSPEND_RW) {
1265 dev_dbg(bdc->dev, "SET REMOTE_WAKEUP\n");
1266 bdc->devstatus |= REMOTE_WAKE_ENABLE;
1268 dev_dbg(bdc->dev, "CLEAR REMOTE_WAKEUP\n");
1269 bdc->devstatus &= ~REMOTE_WAKE_ENABLE;
1274 case USB_RECIP_ENDPOINT:
1275 dev_dbg(bdc->dev, "USB_RECIP_ENDPOINT\n");
1276 if (wValue != USB_ENDPOINT_HALT)
1279 epnum = wIndex & USB_ENDPOINT_NUMBER_MASK;
1281 if ((wIndex & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN)
1282 epnum = epnum * 2 + 1;
1289 * If CLEAR_FEATURE on ep0 then don't do anything as the stall
1290 * condition on ep0 has already been cleared when SETUP packet
1293 if (epnum == 1 && !set) {
1294 dev_dbg(bdc->dev, "ep0 stall already cleared\n");
1297 dev_dbg(bdc->dev, "epnum=%d\n", epnum);
1298 ep = bdc->bdc_ep_array[epnum];
1302 return ep_set_halt(ep, set);
1304 dev_err(bdc->dev, "Unknown recipient\n");
1311 /* GET_STATUS request handler */
1312 static int ep0_handle_status(struct bdc *bdc,
1313 struct usb_ctrlrequest *setup_pkt)
1315 enum usb_device_state state = bdc->gadget.state;
1321 /* USB2.0 spec sec 9.4.5 */
1322 if (state == USB_STATE_DEFAULT)
1324 wIndex = le16_to_cpu(setup_pkt->wIndex);
1325 dev_dbg(bdc->dev, "%s\n", __func__);
1326 usb_status = bdc->devstatus;
1327 switch (setup_pkt->bRequestType & USB_RECIP_MASK) {
1328 case USB_RECIP_DEVICE:
1330 "USB_RECIP_DEVICE devstatus:%08x\n",
1332 /* USB3 spec, sec 9.4.5 */
1333 if (bdc->gadget.speed == USB_SPEED_SUPER)
1334 usb_status &= ~REMOTE_WAKE_ENABLE;
1337 case USB_RECIP_INTERFACE:
1338 dev_dbg(bdc->dev, "USB_RECIP_INTERFACE\n");
1339 if (bdc->gadget.speed == USB_SPEED_SUPER) {
1341 * This should come from func for Func remote wkup
1344 if (bdc->devstatus & REMOTE_WAKE_ENABLE)
1345 usb_status |= REMOTE_WAKE_ENABLE;
1352 case USB_RECIP_ENDPOINT:
1353 dev_dbg(bdc->dev, "USB_RECIP_ENDPOINT\n");
1354 epnum = wIndex & USB_ENDPOINT_NUMBER_MASK;
1356 if ((wIndex & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN)
1357 epnum = epnum*2 + 1;
1361 epnum = 1; /* EP0 */
1364 ep = bdc->bdc_ep_array[epnum];
1366 dev_err(bdc->dev, "ISSUE, GET_STATUS for invalid EP ?");
1369 if (ep->flags & BDC_EP_STALL)
1370 usb_status |= 1 << USB_ENDPOINT_HALT;
1374 dev_err(bdc->dev, "Unknown recipient for get_status\n");
1377 /* prepare a data stage for GET_STATUS */
1378 dev_dbg(bdc->dev, "usb_status=%08x\n", usb_status);
1379 *(__le16 *)bdc->ep0_response_buff = cpu_to_le16(usb_status);
1380 bdc->ep0_req.usb_req.length = 2;
1381 bdc->ep0_req.usb_req.buf = &bdc->ep0_response_buff;
1382 ep0_queue_data_stage(bdc);
1387 static void ep0_set_sel_cmpl(struct usb_ep *_ep, struct usb_request *_req)
1389 /* ep0_set_sel_cmpl */
1392 /* Queue data stage to handle 6 byte SET_SEL request */
1393 static int ep0_set_sel(struct bdc *bdc,
1394 struct usb_ctrlrequest *setup_pkt)
1400 dev_dbg(bdc->dev, "%s\n", __func__);
1401 wValue = le16_to_cpu(setup_pkt->wValue);
1402 wLength = le16_to_cpu(setup_pkt->wLength);
1403 if (unlikely(wLength != 6)) {
1404 dev_err(bdc->dev, "%s Wrong wLength:%d\n", __func__, wLength);
1407 ep = bdc->bdc_ep_array[1];
1408 bdc->ep0_req.ep = ep;
1409 bdc->ep0_req.usb_req.length = 6;
1410 bdc->ep0_req.usb_req.buf = bdc->ep0_response_buff;
1411 bdc->ep0_req.usb_req.complete = ep0_set_sel_cmpl;
1412 ep0_queue_data_stage(bdc);
1418 * Queue a 0 byte bd only if wLength is more than the length and and length is
1419 * a multiple of MaxPacket then queue 0 byte BD
1421 static int ep0_queue_zlp(struct bdc *bdc)
1425 dev_dbg(bdc->dev, "%s\n", __func__);
1426 bdc->ep0_req.ep = bdc->bdc_ep_array[1];
1427 bdc->ep0_req.usb_req.length = 0;
1428 bdc->ep0_req.usb_req.complete = NULL;
1429 bdc->ep0_state = WAIT_FOR_DATA_START;
1430 ret = bdc_queue_xfr(bdc, &bdc->ep0_req);
1432 dev_err(bdc->dev, "err queueing zlp :%d\n", ret);
1435 bdc->ep0_state = WAIT_FOR_DATA_XMIT;
1440 /* Control request handler */
1441 static int handle_control_request(struct bdc *bdc)
1443 enum usb_device_state state = bdc->gadget.state;
1444 struct usb_ctrlrequest *setup_pkt;
1445 int delegate_setup = 0;
1449 setup_pkt = &bdc->setup_pkt;
1450 dev_dbg(bdc->dev, "%s\n", __func__);
1451 if ((setup_pkt->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
1452 switch (setup_pkt->bRequest) {
1453 case USB_REQ_SET_ADDRESS:
1454 dev_dbg(bdc->dev, "USB_REQ_SET_ADDRESS\n");
1455 ret = ep0_set_address(bdc, setup_pkt);
1456 bdc->devstatus &= DEVSTATUS_CLEAR;
1459 case USB_REQ_SET_CONFIGURATION:
1460 dev_dbg(bdc->dev, "USB_REQ_SET_CONFIGURATION\n");
1461 if (state == USB_STATE_ADDRESS) {
1462 usb_gadget_set_state(&bdc->gadget,
1463 USB_STATE_CONFIGURED);
1464 } else if (state == USB_STATE_CONFIGURED) {
1466 * USB2 spec sec 9.4.7, if wValue is 0 then dev
1467 * is moved to addressed state
1469 config = le16_to_cpu(setup_pkt->wValue);
1471 usb_gadget_set_state(
1478 case USB_REQ_SET_FEATURE:
1479 dev_dbg(bdc->dev, "USB_REQ_SET_FEATURE\n");
1480 ret = ep0_handle_feature(bdc, setup_pkt, 1);
1483 case USB_REQ_CLEAR_FEATURE:
1484 dev_dbg(bdc->dev, "USB_REQ_CLEAR_FEATURE\n");
1485 ret = ep0_handle_feature(bdc, setup_pkt, 0);
1488 case USB_REQ_GET_STATUS:
1489 dev_dbg(bdc->dev, "USB_REQ_GET_STATUS\n");
1490 ret = ep0_handle_status(bdc, setup_pkt);
1493 case USB_REQ_SET_SEL:
1494 dev_dbg(bdc->dev, "USB_REQ_SET_SEL\n");
1495 ret = ep0_set_sel(bdc, setup_pkt);
1498 case USB_REQ_SET_ISOCH_DELAY:
1500 "USB_REQ_SET_ISOCH_DELAY not handled\n");
1510 if (delegate_setup) {
1511 spin_unlock(&bdc->lock);
1512 ret = bdc->gadget_driver->setup(&bdc->gadget, setup_pkt);
1513 spin_lock(&bdc->lock);
1519 /* EP0: Data stage started */
1520 void bdc_xsf_ep0_data_start(struct bdc *bdc, struct bdc_sr *sreport)
1525 dev_dbg(bdc->dev, "%s\n", __func__);
1526 ep = bdc->bdc_ep_array[1];
1527 /* If ep0 was stalled, the clear it first */
1528 if (ep->flags & BDC_EP_STALL) {
1529 ret = ep_set_halt(ep, 0);
1533 if (bdc->ep0_state != WAIT_FOR_DATA_START)
1535 "Data stage not expected ep0_state:%s\n",
1536 ep0_state_string[bdc->ep0_state]);
1538 ret = handle_control_request(bdc);
1539 if (ret == USB_GADGET_DELAYED_STATUS) {
1541 * The ep0 state will remain WAIT_FOR_DATA_START till
1542 * we received ep_queue on ep0
1544 bdc->delayed_status = true;
1548 bdc->ep0_state = WAIT_FOR_DATA_XMIT;
1550 "ep0_state:%s", ep0_state_string[bdc->ep0_state]);
1557 /* EP0: status stage started */
1558 void bdc_xsf_ep0_status_start(struct bdc *bdc, struct bdc_sr *sreport)
1560 struct usb_ctrlrequest *setup_pkt;
1566 __func__, ep0_state_string[bdc->ep0_state]);
1567 ep = bdc->bdc_ep_array[1];
1569 /* check if ZLP was queued? */
1570 if (bdc->zlp_needed)
1571 bdc->zlp_needed = false;
1573 if (ep->flags & BDC_EP_STALL) {
1574 ret = ep_set_halt(ep, 0);
1579 if ((bdc->ep0_state != WAIT_FOR_STATUS_START) &&
1580 (bdc->ep0_state != WAIT_FOR_DATA_XMIT))
1582 "Status stage recv but ep0_state:%s\n",
1583 ep0_state_string[bdc->ep0_state]);
1585 /* check if data stage is in progress ? */
1586 if (bdc->ep0_state == WAIT_FOR_DATA_XMIT) {
1587 bdc->ep0_state = STATUS_PENDING;
1588 /* Status stage will be queued upon Data stage transmit event */
1590 "status started but data not transmitted yet\n");
1593 setup_pkt = &bdc->setup_pkt;
1596 * 2 stage setup then only process the setup, for 3 stage setup the date
1597 * stage is already handled
1599 if (!le16_to_cpu(setup_pkt->wLength)) {
1600 ret = handle_control_request(bdc);
1601 if (ret == USB_GADGET_DELAYED_STATUS) {
1602 bdc->delayed_status = true;
1603 /* ep0_state will remain WAIT_FOR_STATUS_START */
1608 /* Queue a status stage BD */
1609 ep0_queue_status_stage(bdc);
1610 bdc->ep0_state = WAIT_FOR_STATUS_XMIT;
1612 "ep0_state:%s", ep0_state_string[bdc->ep0_state]);
1619 /* Helper function to update ep0 upon SR with xsf_succ or xsf_short */
1620 static void ep0_xsf_complete(struct bdc *bdc, struct bdc_sr *sreport)
1622 dev_dbg(bdc->dev, "%s\n", __func__);
1623 switch (bdc->ep0_state) {
1624 case WAIT_FOR_DATA_XMIT:
1625 bdc->ep0_state = WAIT_FOR_STATUS_START;
1627 case WAIT_FOR_STATUS_XMIT:
1628 bdc->ep0_state = WAIT_FOR_SETUP;
1629 if (bdc->test_mode) {
1632 dev_dbg(bdc->dev, "test_mode:%d\n", bdc->test_mode);
1633 ret = bdc_set_test_mode(bdc);
1635 dev_err(bdc->dev, "Err in setting Test mode\n");
1641 case STATUS_PENDING:
1642 bdc_xsf_ep0_status_start(bdc, sreport);
1647 "Unknown ep0_state:%s\n",
1648 ep0_state_string[bdc->ep0_state]);
1653 /* xfr completion status report handler */
1654 void bdc_sr_xsf(struct bdc *bdc, struct bdc_sr *sreport)
1660 ep_num = (le32_to_cpu(sreport->offset[3])>>4) & 0x1f;
1661 ep = bdc->bdc_ep_array[ep_num];
1662 if (!ep || !(ep->flags & BDC_EP_ENABLED)) {
1663 dev_err(bdc->dev, "xsf for ep not enabled\n");
1667 * check if this transfer is after link went from U3->U0 due
1670 if (bdc->devstatus & FUNC_WAKE_ISSUED) {
1671 bdc->devstatus &= ~(FUNC_WAKE_ISSUED);
1672 dev_dbg(bdc->dev, "%s clearing FUNC_WAKE_ISSUED flag\n",
1675 sr_status = XSF_STS(le32_to_cpu(sreport->offset[3]));
1676 dev_dbg_ratelimited(bdc->dev, "%s sr_status=%d ep:%s\n",
1677 __func__, sr_status, ep->name);
1679 switch (sr_status) {
1682 handle_xsr_succ_status(bdc, ep, sreport);
1684 ep0_xsf_complete(bdc, sreport);
1687 case XSF_SETUP_RECV:
1688 case XSF_DATA_START:
1689 case XSF_STATUS_START:
1692 "ep0 related packets on non ep0 endpoint");
1695 bdc->sr_xsf_ep0[sr_status - XSF_SETUP_RECV](bdc, sreport);
1700 dev_dbg(bdc->dev, "Babble on ep0 zlp_need:%d\n",
1703 * If the last completed transfer had wLength >Data Len,
1704 * and Len is multiple of MaxPacket,then queue ZLP
1706 if (bdc->zlp_needed) {
1707 /* queue 0 length bd */
1712 dev_warn(bdc->dev, "Babble on ep not handled\n");
1715 dev_warn(bdc->dev, "sr status not handled:%x\n", sr_status);
1720 static int bdc_gadget_ep_queue(struct usb_ep *_ep,
1721 struct usb_request *_req, gfp_t gfp_flags)
1723 struct bdc_req *req;
1724 unsigned long flags;
1729 if (!_ep || !_ep->desc)
1732 if (!_req || !_req->complete || !_req->buf)
1735 ep = to_bdc_ep(_ep);
1736 req = to_bdc_req(_req);
1738 dev_dbg(bdc->dev, "%s ep:%p req:%p\n", __func__, ep, req);
1739 dev_dbg(bdc->dev, "queuing request %p to %s length %d zero:%d\n",
1740 _req, ep->name, _req->length, _req->zero);
1742 if (!ep->usb_ep.desc) {
1744 "trying to queue req %p to disabled %s\n",
1749 if (_req->length > MAX_XFR_LEN) {
1751 "req length > supported MAX:%d requested:%d\n",
1752 MAX_XFR_LEN, _req->length);
1755 spin_lock_irqsave(&bdc->lock, flags);
1756 if (ep == bdc->bdc_ep_array[1])
1757 ret = ep0_queue(ep, req);
1759 ret = ep_queue(ep, req);
1761 spin_unlock_irqrestore(&bdc->lock, flags);
1766 static int bdc_gadget_ep_dequeue(struct usb_ep *_ep,
1767 struct usb_request *_req)
1769 struct bdc_req *req;
1770 unsigned long flags;
1778 ep = to_bdc_ep(_ep);
1779 req = to_bdc_req(_req);
1781 dev_dbg(bdc->dev, "%s ep:%s req:%p\n", __func__, ep->name, req);
1782 bdc_dbg_bd_list(bdc, ep);
1783 spin_lock_irqsave(&bdc->lock, flags);
1784 /* make sure it's still queued on this endpoint */
1785 list_for_each_entry(req, &ep->queue, queue) {
1786 if (&req->usb_req == _req)
1789 if (&req->usb_req != _req) {
1790 spin_unlock_irqrestore(&bdc->lock, flags);
1791 dev_err(bdc->dev, "usb_req !=req n");
1794 ret = ep_dequeue(ep, req);
1799 bdc_req_complete(ep, req, -ECONNRESET);
1802 bdc_dbg_bd_list(bdc, ep);
1803 spin_unlock_irqrestore(&bdc->lock, flags);
1808 static int bdc_gadget_ep_set_halt(struct usb_ep *_ep, int value)
1810 unsigned long flags;
1815 ep = to_bdc_ep(_ep);
1817 dev_dbg(bdc->dev, "%s ep:%s value=%d\n", __func__, ep->name, value);
1818 spin_lock_irqsave(&bdc->lock, flags);
1819 if (usb_endpoint_xfer_isoc(ep->usb_ep.desc))
1821 else if (!list_empty(&ep->queue))
1824 ret = ep_set_halt(ep, value);
1826 spin_unlock_irqrestore(&bdc->lock, flags);
1831 static struct usb_request *bdc_gadget_alloc_request(struct usb_ep *_ep,
1834 struct bdc_req *req;
1837 req = kzalloc(sizeof(*req), gfp_flags);
1841 ep = to_bdc_ep(_ep);
1843 req->epnum = ep->ep_num;
1844 req->usb_req.dma = DMA_ADDR_INVALID;
1845 dev_dbg(ep->bdc->dev, "%s ep:%s req:%p\n", __func__, ep->name, req);
1847 return &req->usb_req;
1850 static void bdc_gadget_free_request(struct usb_ep *_ep,
1851 struct usb_request *_req)
1853 struct bdc_req *req;
1855 req = to_bdc_req(_req);
1859 /* endpoint operations */
1861 /* configure endpoint and also allocate resources */
1862 static int bdc_gadget_ep_enable(struct usb_ep *_ep,
1863 const struct usb_endpoint_descriptor *desc)
1865 unsigned long flags;
1870 if (!_ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
1871 pr_debug("bdc_gadget_ep_enable invalid parameters\n");
1875 if (!desc->wMaxPacketSize) {
1876 pr_debug("bdc_gadget_ep_enable missing wMaxPacketSize\n");
1880 ep = to_bdc_ep(_ep);
1883 /* Sanity check, upper layer will not send enable for ep0 */
1884 if (ep == bdc->bdc_ep_array[1])
1887 if (!bdc->gadget_driver
1888 || bdc->gadget.speed == USB_SPEED_UNKNOWN) {
1892 dev_dbg(bdc->dev, "%s Enabling %s\n", __func__, ep->name);
1893 spin_lock_irqsave(&bdc->lock, flags);
1895 ep->comp_desc = _ep->comp_desc;
1896 ret = bdc_ep_enable(ep);
1897 spin_unlock_irqrestore(&bdc->lock, flags);
1902 static int bdc_gadget_ep_disable(struct usb_ep *_ep)
1904 unsigned long flags;
1910 pr_debug("bdc: invalid parameters\n");
1913 ep = to_bdc_ep(_ep);
1916 /* Upper layer will not call this for ep0, but do a sanity check */
1917 if (ep == bdc->bdc_ep_array[1]) {
1918 dev_warn(bdc->dev, "%s called for ep0\n", __func__);
1922 "%s() ep:%s ep->flags:%08x\n",
1923 __func__, ep->name, ep->flags);
1925 if (!(ep->flags & BDC_EP_ENABLED)) {
1926 if (bdc->gadget.speed != USB_SPEED_UNKNOWN)
1927 dev_warn(bdc->dev, "%s is already disabled\n",
1931 spin_lock_irqsave(&bdc->lock, flags);
1932 ret = bdc_ep_disable(ep);
1933 spin_unlock_irqrestore(&bdc->lock, flags);
1938 static const struct usb_ep_ops bdc_gadget_ep_ops = {
1939 .enable = bdc_gadget_ep_enable,
1940 .disable = bdc_gadget_ep_disable,
1941 .alloc_request = bdc_gadget_alloc_request,
1942 .free_request = bdc_gadget_free_request,
1943 .queue = bdc_gadget_ep_queue,
1944 .dequeue = bdc_gadget_ep_dequeue,
1945 .set_halt = bdc_gadget_ep_set_halt
1949 static int init_ep(struct bdc *bdc, u32 epnum, u32 dir)
1953 dev_dbg(bdc->dev, "%s epnum=%d dir=%d\n", __func__, epnum, dir);
1954 ep = kzalloc(sizeof(*ep), GFP_KERNEL);
1962 ep->usb_ep.caps.dir_in = true;
1964 ep->usb_ep.caps.dir_out = true;
1966 /* ep->ep_num is the index inside bdc_ep */
1969 bdc->bdc_ep_array[ep->ep_num] = ep;
1970 snprintf(ep->name, sizeof(ep->name), "ep%d", epnum - 1);
1971 usb_ep_set_maxpacket_limit(&ep->usb_ep, EP0_MAX_PKT_SIZE);
1972 ep->usb_ep.caps.type_control = true;
1973 ep->comp_desc = NULL;
1974 bdc->gadget.ep0 = &ep->usb_ep;
1977 ep->ep_num = epnum * 2 - 1;
1979 ep->ep_num = epnum * 2 - 2;
1981 bdc->bdc_ep_array[ep->ep_num] = ep;
1982 snprintf(ep->name, sizeof(ep->name), "ep%d%s", epnum - 1,
1983 dir & 1 ? "in" : "out");
1985 usb_ep_set_maxpacket_limit(&ep->usb_ep, 1024);
1986 ep->usb_ep.caps.type_iso = true;
1987 ep->usb_ep.caps.type_bulk = true;
1988 ep->usb_ep.caps.type_int = true;
1989 ep->usb_ep.max_streams = 0;
1990 list_add_tail(&ep->usb_ep.ep_list, &bdc->gadget.ep_list);
1992 ep->usb_ep.ops = &bdc_gadget_ep_ops;
1993 ep->usb_ep.name = ep->name;
1995 ep->ignore_next_sr = false;
1996 dev_dbg(bdc->dev, "ep=%p ep->usb_ep.name=%s epnum=%d ep->epnum=%d\n",
1997 ep, ep->usb_ep.name, epnum, ep->ep_num);
1999 INIT_LIST_HEAD(&ep->queue);
2005 int bdc_init_ep(struct bdc *bdc)
2010 dev_dbg(bdc->dev, "%s()\n", __func__);
2011 INIT_LIST_HEAD(&bdc->gadget.ep_list);
2013 ret = init_ep(bdc, 1, 0);
2015 dev_err(bdc->dev, "init ep ep0 fail %d\n", ret);
2019 for (epnum = 2; epnum <= bdc->num_eps / 2; epnum++) {
2021 ret = init_ep(bdc, epnum, 0);
2024 "init ep failed for:%d error: %d\n",
2030 ret = init_ep(bdc, epnum, 1);
2033 "init ep failed for:%d error: %d\n",