GNU Linux-libre 6.8.7-gnu
[releases.git] / drivers / usb / gadget / udc / cdns2 / cdns2-gadget.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Cadence USBHS-DEV Driver - gadget side.
4  *
5  * Copyright (C) 2023 Cadence Design Systems.
6  *
7  * Authors: Pawel Laszczak <pawell@cadence.com>
8  */
9
10 /*
11  * Work around 1:
12  * At some situations, the controller may get stale data address in TRB
13  * at below sequences:
14  * 1. Controller read TRB includes data address
15  * 2. Software updates TRBs includes data address and Cycle bit
16  * 3. Controller read TRB which includes Cycle bit
17  * 4. DMA run with stale data address
18  *
19  * To fix this problem, driver needs to make the first TRB in TD as invalid.
20  * After preparing all TRBs driver needs to check the position of DMA and
21  * if the DMA point to the first just added TRB and doorbell is 1,
22  * then driver must defer making this TRB as valid. This TRB will be make
23  * as valid during adding next TRB only if DMA is stopped or at TRBERR
24  * interrupt.
25  *
26  */
27
28 #include <linux/dma-mapping.h>
29 #include <linux/pm_runtime.h>
30 #include <linux/interrupt.h>
31 #include <linux/property.h>
32 #include <linux/dmapool.h>
33 #include <linux/iopoll.h>
34
35 #include "cdns2-gadget.h"
36 #include "cdns2-trace.h"
37
38 /**
39  * set_reg_bit_32 - set bit in given 32 bits register.
40  * @ptr: register address.
41  * @mask: bits to set.
42  */
43 static void set_reg_bit_32(void __iomem *ptr, u32 mask)
44 {
45         mask = readl(ptr) | mask;
46         writel(mask, ptr);
47 }
48
49 /*
50  * clear_reg_bit_32 - clear bit in given 32 bits register.
51  * @ptr: register address.
52  * @mask: bits to clear.
53  */
54 static void clear_reg_bit_32(void __iomem *ptr, u32 mask)
55 {
56         mask = readl(ptr) & ~mask;
57         writel(mask, ptr);
58 }
59
60 /* Clear bit in given 8 bits register. */
61 static void clear_reg_bit_8(void __iomem *ptr, u8 mask)
62 {
63         mask = readb(ptr) & ~mask;
64         writeb(mask, ptr);
65 }
66
67 /* Set bit in given 16 bits register. */
68 void set_reg_bit_8(void __iomem *ptr, u8 mask)
69 {
70         mask = readb(ptr) | mask;
71         writeb(mask, ptr);
72 }
73
74 static int cdns2_get_dma_pos(struct cdns2_device *pdev,
75                              struct cdns2_endpoint *pep)
76 {
77         int dma_index;
78
79         dma_index = readl(&pdev->adma_regs->ep_traddr) - pep->ring.dma;
80
81         return dma_index / TRB_SIZE;
82 }
83
84 /* Get next private request from list. */
85 struct cdns2_request *cdns2_next_preq(struct list_head *list)
86 {
87         return list_first_entry_or_null(list, struct cdns2_request, list);
88 }
89
90 void cdns2_select_ep(struct cdns2_device *pdev, u32 ep)
91 {
92         if (pdev->selected_ep == ep)
93                 return;
94
95         pdev->selected_ep = ep;
96         writel(ep, &pdev->adma_regs->ep_sel);
97 }
98
99 dma_addr_t cdns2_trb_virt_to_dma(struct cdns2_endpoint *pep,
100                                  struct cdns2_trb *trb)
101 {
102         u32 offset = (char *)trb - (char *)pep->ring.trbs;
103
104         return pep->ring.dma + offset;
105 }
106
107 static void cdns2_free_tr_segment(struct cdns2_endpoint *pep)
108 {
109         struct cdns2_device *pdev = pep->pdev;
110         struct cdns2_ring *ring = &pep->ring;
111
112         if (pep->ring.trbs) {
113                 dma_pool_free(pdev->eps_dma_pool, ring->trbs, ring->dma);
114                 memset(ring, 0, sizeof(*ring));
115         }
116 }
117
118 /* Allocates Transfer Ring segment. */
119 static int cdns2_alloc_tr_segment(struct cdns2_endpoint *pep)
120 {
121         struct cdns2_device *pdev = pep->pdev;
122         struct cdns2_trb *link_trb;
123         struct cdns2_ring *ring;
124
125         ring = &pep->ring;
126
127         if (!ring->trbs) {
128                 ring->trbs = dma_pool_alloc(pdev->eps_dma_pool,
129                                             GFP_DMA32 | GFP_ATOMIC,
130                                             &ring->dma);
131                 if (!ring->trbs)
132                         return -ENOMEM;
133         }
134
135         memset(ring->trbs, 0, TR_SEG_SIZE);
136
137         if (!pep->num)
138                 return 0;
139
140         /* Initialize the last TRB as Link TRB */
141         link_trb = (ring->trbs + (TRBS_PER_SEGMENT - 1));
142         link_trb->buffer = cpu_to_le32(TRB_BUFFER(ring->dma));
143         link_trb->control = cpu_to_le32(TRB_CYCLE | TRB_TYPE(TRB_LINK) |
144                                         TRB_TOGGLE);
145
146         return 0;
147 }
148
149 /*
150  * Stalls and flushes selected endpoint.
151  * Endpoint must be selected before invoking this function.
152  */
153 static void cdns2_ep_stall_flush(struct cdns2_endpoint *pep)
154 {
155         struct cdns2_device *pdev = pep->pdev;
156         int val;
157
158         trace_cdns2_ep_halt(pep, 1, 1);
159
160         writel(DMA_EP_CMD_DFLUSH, &pdev->adma_regs->ep_cmd);
161
162         /* Wait for DFLUSH cleared. */
163         readl_poll_timeout_atomic(&pdev->adma_regs->ep_cmd, val,
164                                   !(val & DMA_EP_CMD_DFLUSH), 1, 1000);
165         pep->ep_state |= EP_STALLED;
166         pep->ep_state &= ~EP_STALL_PENDING;
167 }
168
169 /*
170  * Increment a trb index.
171  *
172  * The index should never point to the last link TRB in TR. After incrementing,
173  * if it point to the link TRB, wrap around to the beginning and revert
174  * cycle state bit. The link TRB is always at the last TRB entry.
175  */
176 static void cdns2_ep_inc_trb(int *index, u8 *cs, int trb_in_seg)
177 {
178         (*index)++;
179         if (*index == (trb_in_seg - 1)) {
180                 *index = 0;
181                 *cs ^=  1;
182         }
183 }
184
185 static void cdns2_ep_inc_enq(struct cdns2_ring *ring)
186 {
187         ring->free_trbs--;
188         cdns2_ep_inc_trb(&ring->enqueue, &ring->pcs, TRBS_PER_SEGMENT);
189 }
190
191 static void cdns2_ep_inc_deq(struct cdns2_ring *ring)
192 {
193         ring->free_trbs++;
194         cdns2_ep_inc_trb(&ring->dequeue, &ring->ccs, TRBS_PER_SEGMENT);
195 }
196
197 /*
198  * Enable/disable LPM.
199  *
200  * If bit USBCS_LPMNYET is not set and device receive Extended Token packet,
201  * then controller answer with ACK handshake.
202  * If bit USBCS_LPMNYET is set and device receive Extended Token packet,
203  * then controller answer with NYET handshake.
204  */
205 static void cdns2_enable_l1(struct cdns2_device *pdev, int enable)
206 {
207         if (enable) {
208                 clear_reg_bit_8(&pdev->usb_regs->usbcs, USBCS_LPMNYET);
209                 writeb(LPMCLOCK_SLEEP_ENTRY, &pdev->usb_regs->lpmclock);
210         } else {
211                 set_reg_bit_8(&pdev->usb_regs->usbcs, USBCS_LPMNYET);
212         }
213 }
214
215 static enum usb_device_speed cdns2_get_speed(struct cdns2_device *pdev)
216 {
217         u8 speed = readb(&pdev->usb_regs->speedctrl);
218
219         if (speed & SPEEDCTRL_HS)
220                 return USB_SPEED_HIGH;
221         else if (speed & SPEEDCTRL_FS)
222                 return USB_SPEED_FULL;
223
224         return USB_SPEED_UNKNOWN;
225 }
226
227 static struct cdns2_trb *cdns2_next_trb(struct cdns2_endpoint *pep,
228                                         struct cdns2_trb *trb)
229 {
230         if (trb == (pep->ring.trbs + (TRBS_PER_SEGMENT - 1)))
231                 return pep->ring.trbs;
232         else
233                 return ++trb;
234 }
235
236 void cdns2_gadget_giveback(struct cdns2_endpoint *pep,
237                            struct cdns2_request *preq,
238                            int status)
239 {
240         struct usb_request *request = &preq->request;
241         struct cdns2_device *pdev = pep->pdev;
242
243         list_del_init(&preq->list);
244
245         if (request->status == -EINPROGRESS)
246                 request->status = status;
247
248         usb_gadget_unmap_request_by_dev(pdev->dev, request, pep->dir);
249
250         /* All TRBs have finished, clear the counter. */
251         preq->finished_trb = 0;
252
253         trace_cdns2_request_giveback(preq);
254
255         if (request->complete) {
256                 spin_unlock(&pdev->lock);
257                 usb_gadget_giveback_request(&pep->endpoint, request);
258                 spin_lock(&pdev->lock);
259         }
260
261         if (request->buf == pdev->zlp_buf)
262                 cdns2_gadget_ep_free_request(&pep->endpoint, request);
263 }
264
265 static void cdns2_wa1_restore_cycle_bit(struct cdns2_endpoint *pep)
266 {
267         /* Work around for stale data address in TRB. */
268         if (pep->wa1_set) {
269                 trace_cdns2_wa1(pep, "restore cycle bit");
270
271                 pep->wa1_set = 0;
272                 pep->wa1_trb_index = 0xFFFF;
273                 if (pep->wa1_cycle_bit)
274                         pep->wa1_trb->control |= cpu_to_le32(0x1);
275                 else
276                         pep->wa1_trb->control &= cpu_to_le32(~0x1);
277         }
278 }
279
280 static int cdns2_wa1_update_guard(struct cdns2_endpoint *pep,
281                                   struct cdns2_trb *trb)
282 {
283         struct cdns2_device *pdev = pep->pdev;
284
285         if (!pep->wa1_set) {
286                 u32 doorbell;
287
288                 doorbell = !!(readl(&pdev->adma_regs->ep_cmd) & DMA_EP_CMD_DRDY);
289
290                 if (doorbell) {
291                         pep->wa1_cycle_bit = pep->ring.pcs ? TRB_CYCLE : 0;
292                         pep->wa1_set = 1;
293                         pep->wa1_trb = trb;
294                         pep->wa1_trb_index = pep->ring.enqueue;
295                         trace_cdns2_wa1(pep, "set guard");
296                         return 0;
297                 }
298         }
299         return 1;
300 }
301
302 static void cdns2_wa1_tray_restore_cycle_bit(struct cdns2_device *pdev,
303                                              struct cdns2_endpoint *pep)
304 {
305         int dma_index;
306         u32 doorbell;
307
308         doorbell = !!(readl(&pdev->adma_regs->ep_cmd) & DMA_EP_CMD_DRDY);
309         dma_index = cdns2_get_dma_pos(pdev, pep);
310
311         if (!doorbell || dma_index != pep->wa1_trb_index)
312                 cdns2_wa1_restore_cycle_bit(pep);
313 }
314
315 static int cdns2_prepare_ring(struct cdns2_device *pdev,
316                               struct cdns2_endpoint *pep,
317                               int num_trbs)
318 {
319         struct cdns2_trb *link_trb = NULL;
320         int doorbell, dma_index;
321         struct cdns2_ring *ring;
322         u32 ch_bit = 0;
323
324         ring = &pep->ring;
325
326         if (num_trbs > ring->free_trbs) {
327                 pep->ep_state |= EP_RING_FULL;
328                 trace_cdns2_no_room_on_ring("Ring full\n");
329                 return -ENOBUFS;
330         }
331
332         if ((ring->enqueue + num_trbs)  >= (TRBS_PER_SEGMENT - 1)) {
333                 doorbell = !!(readl(&pdev->adma_regs->ep_cmd) & DMA_EP_CMD_DRDY);
334                 dma_index = cdns2_get_dma_pos(pdev, pep);
335
336                 /* Driver can't update LINK TRB if it is current processed. */
337                 if (doorbell && dma_index == TRBS_PER_SEGMENT - 1) {
338                         pep->ep_state |= EP_DEFERRED_DRDY;
339                         return -ENOBUFS;
340                 }
341
342                 /* Update C bt in Link TRB before starting DMA. */
343                 link_trb = ring->trbs + (TRBS_PER_SEGMENT - 1);
344
345                 /*
346                  * For TRs size equal 2 enabling TRB_CHAIN for epXin causes
347                  * that DMA stuck at the LINK TRB.
348                  * On the other hand, removing TRB_CHAIN for longer TRs for
349                  * epXout cause that DMA stuck after handling LINK TRB.
350                  * To eliminate this strange behavioral driver set TRB_CHAIN
351                  * bit only for TR size > 2.
352                  */
353                 if (pep->type == USB_ENDPOINT_XFER_ISOC || TRBS_PER_SEGMENT > 2)
354                         ch_bit = TRB_CHAIN;
355
356                 link_trb->control = cpu_to_le32(((ring->pcs) ? TRB_CYCLE : 0) |
357                                     TRB_TYPE(TRB_LINK) | TRB_TOGGLE | ch_bit);
358         }
359
360         return 0;
361 }
362
363 static void cdns2_dbg_request_trbs(struct cdns2_endpoint *pep,
364                                    struct cdns2_request *preq)
365 {
366         struct cdns2_trb *link_trb = pep->ring.trbs + (TRBS_PER_SEGMENT - 1);
367         struct cdns2_trb *trb = preq->trb;
368         int num_trbs = preq->num_of_trb;
369         int i = 0;
370
371         while (i < num_trbs) {
372                 trace_cdns2_queue_trb(pep, trb + i);
373                 if (trb + i == link_trb) {
374                         trb = pep->ring.trbs;
375                         num_trbs = num_trbs - i;
376                         i = 0;
377                 } else {
378                         i++;
379                 }
380         }
381 }
382
383 static unsigned int cdns2_count_trbs(struct cdns2_endpoint *pep,
384                                      u64 addr, u64 len)
385 {
386         unsigned int num_trbs = 1;
387
388         if (pep->type == USB_ENDPOINT_XFER_ISOC) {
389                 /*
390                  * To speed up DMA performance address should not exceed 4KB.
391                  * for high bandwidth transfer and driver will split
392                  * such buffer into two TRBs.
393                  */
394                 num_trbs = DIV_ROUND_UP(len +
395                                         (addr & (TRB_MAX_ISO_BUFF_SIZE - 1)),
396                                         TRB_MAX_ISO_BUFF_SIZE);
397
398                 if (pep->interval > 1)
399                         num_trbs = pep->dir ? num_trbs * pep->interval : 1;
400         } else if (pep->dir) {
401                 /*
402                  * One extra link trb for IN direction.
403                  * Sometimes DMA doesn't want advance to next TD and transfer
404                  * hangs. This extra Link TRB force DMA to advance to next TD.
405                  */
406                 num_trbs++;
407         }
408
409         return num_trbs;
410 }
411
412 static unsigned int cdns2_count_sg_trbs(struct cdns2_endpoint *pep,
413                                         struct usb_request *req)
414 {
415         unsigned int i, len, full_len, num_trbs = 0;
416         struct scatterlist *sg;
417         int trb_len = 0;
418
419         full_len = req->length;
420
421         for_each_sg(req->sg, sg, req->num_sgs, i) {
422                 len = sg_dma_len(sg);
423                 num_trbs += cdns2_count_trbs(pep, sg_dma_address(sg), len);
424                 len = min(len, full_len);
425
426                 /*
427                  * For HS ISO transfer TRBs should not exceed max packet size.
428                  * When DMA is working, and data exceed max packet size then
429                  * some data will be read in single mode instead burst mode.
430                  * This behavior will drastically reduce the copying speed.
431                  * To avoid this we need one or two extra TRBs.
432                  * This issue occurs for UVC class with sg_supported = 1
433                  * because buffers addresses are not aligned to 1024.
434                  */
435                 if (pep->type == USB_ENDPOINT_XFER_ISOC) {
436                         u8 temp;
437
438                         trb_len += len;
439                         temp = trb_len >> 10;
440
441                         if (temp) {
442                                 if (trb_len % 1024)
443                                         num_trbs = num_trbs + temp;
444                                 else
445                                         num_trbs = num_trbs + temp - 1;
446
447                                 trb_len = trb_len - (temp << 10);
448                         }
449                 }
450
451                 full_len -= len;
452                 if (full_len == 0)
453                         break;
454         }
455
456         return num_trbs;
457 }
458
459 /*
460  * Function prepares the array with optimized AXI burst value for different
461  * transfer lengths. Controller handles the final data which are less
462  * then AXI burst size as single byte transactions.
463  * e.g.:
464  * Let's assume that driver prepares trb with trb->length 700 and burst size
465  * will be set to 128. In this case the controller will handle a first 512 as
466  * single AXI transaction but the next 188 bytes will be handled
467  * as 47 separate AXI transaction.
468  * The better solution is to use the burst size equal 16 and then we will
469  * have only 25 AXI transaction (10 * 64 + 15 *4).
470  */
471 static void cdsn2_isoc_burst_opt(struct cdns2_device *pdev)
472 {
473         int axi_burst_option[]  =  {1, 2, 4, 8, 16, 32, 64, 128};
474         int best_burst;
475         int array_size;
476         int opt_burst;
477         int trb_size;
478         int i, j;
479
480         array_size = ARRAY_SIZE(axi_burst_option);
481
482         for (i = 0; i <= MAX_ISO_SIZE; i++) {
483                 trb_size = i / 4;
484                 best_burst = trb_size ? trb_size : 1;
485
486                 for (j = 0; j < array_size; j++) {
487                         opt_burst = trb_size / axi_burst_option[j];
488                         opt_burst += trb_size % axi_burst_option[j];
489
490                         if (opt_burst < best_burst) {
491                                 best_burst = opt_burst;
492                                 pdev->burst_opt[i] = axi_burst_option[j];
493                         }
494                 }
495         }
496 }
497
498 static void cdns2_ep_tx_isoc(struct cdns2_endpoint *pep,
499                              struct cdns2_request *preq,
500                              int num_trbs)
501 {
502         struct scatterlist *sg = NULL;
503         u32 remaining_packet_size = 0;
504         struct cdns2_trb *trb;
505         bool first_trb = true;
506         dma_addr_t trb_dma;
507         u32 trb_buff_len;
508         u32 block_length;
509         int td_idx = 0;
510         int split_size;
511         u32 full_len;
512         int enqd_len;
513         int sent_len;
514         int sg_iter;
515         u32 control;
516         int num_tds;
517         u32 length;
518
519         /*
520          * For OUT direction 1 TD per interval is enough
521          * because TRBs are not dumped by controller.
522          */
523         num_tds = pep->dir ? pep->interval : 1;
524         split_size = preq->request.num_sgs ? 1024 : 3072;
525
526         for (td_idx = 0; td_idx < num_tds; td_idx++) {
527                 if (preq->request.num_sgs) {
528                         sg = preq->request.sg;
529                         trb_dma = sg_dma_address(sg);
530                         block_length = sg_dma_len(sg);
531                 } else {
532                         trb_dma = preq->request.dma;
533                         block_length = preq->request.length;
534                 }
535
536                 full_len = preq->request.length;
537                 sg_iter = preq->request.num_sgs ? preq->request.num_sgs : 1;
538                 remaining_packet_size = split_size;
539
540                 for (enqd_len = 0;  enqd_len < full_len;
541                      enqd_len += trb_buff_len) {
542                         if (remaining_packet_size == 0)
543                                 remaining_packet_size = split_size;
544
545                         /*
546                          * Calculate TRB length.- buffer can't across 4KB
547                          * and max packet size.
548                          */
549                         trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(trb_dma);
550                         trb_buff_len = min(trb_buff_len, remaining_packet_size);
551                         trb_buff_len = min(trb_buff_len, block_length);
552
553                         if (trb_buff_len > full_len - enqd_len)
554                                 trb_buff_len = full_len - enqd_len;
555
556                         control = TRB_TYPE(TRB_NORMAL);
557
558                         /*
559                          * For IN direction driver has to set the IOC for
560                          * last TRB in last TD.
561                          * For OUT direction driver must set IOC and ISP
562                          * only for last TRB in each TDs.
563                          */
564                         if (enqd_len + trb_buff_len >= full_len || !pep->dir)
565                                 control |= TRB_IOC | TRB_ISP;
566
567                         /*
568                          * Don't give the first TRB to the hardware (by toggling
569                          * the cycle bit) until we've finished creating all the
570                          * other TRBs.
571                          */
572                         if (first_trb) {
573                                 first_trb = false;
574                                 if (pep->ring.pcs == 0)
575                                         control |= TRB_CYCLE;
576                         } else {
577                                 control |= pep->ring.pcs;
578                         }
579
580                         if (enqd_len + trb_buff_len < full_len)
581                                 control |= TRB_CHAIN;
582
583                         length = TRB_LEN(trb_buff_len) |
584                                  TRB_BURST(pep->pdev->burst_opt[trb_buff_len]);
585
586                         trb = pep->ring.trbs + pep->ring.enqueue;
587                         trb->buffer = cpu_to_le32(TRB_BUFFER(trb_dma));
588                         trb->length = cpu_to_le32(length);
589                         trb->control = cpu_to_le32(control);
590
591                         trb_dma += trb_buff_len;
592                         sent_len = trb_buff_len;
593
594                         if (sg && sent_len >= block_length) {
595                                 /* New sg entry */
596                                 --sg_iter;
597                                 sent_len -= block_length;
598                                 if (sg_iter != 0) {
599                                         sg = sg_next(sg);
600                                         trb_dma = sg_dma_address(sg);
601                                         block_length = sg_dma_len(sg);
602                                 }
603                         }
604
605                         remaining_packet_size -= trb_buff_len;
606                         block_length -= sent_len;
607                         preq->end_trb = pep->ring.enqueue;
608
609                         cdns2_ep_inc_enq(&pep->ring);
610                 }
611         }
612 }
613
614 static void cdns2_ep_tx_bulk(struct cdns2_endpoint *pep,
615                              struct cdns2_request *preq,
616                              int trbs_per_td)
617 {
618         struct scatterlist *sg = NULL;
619         struct cdns2_ring *ring;
620         struct cdns2_trb *trb;
621         dma_addr_t trb_dma;
622         int sg_iter = 0;
623         u32 control;
624         u32 length;
625
626         if (preq->request.num_sgs) {
627                 sg = preq->request.sg;
628                 trb_dma = sg_dma_address(sg);
629                 length = sg_dma_len(sg);
630         } else {
631                 trb_dma = preq->request.dma;
632                 length = preq->request.length;
633         }
634
635         ring = &pep->ring;
636
637         for (sg_iter = 0; sg_iter < trbs_per_td; sg_iter++) {
638                 control = TRB_TYPE(TRB_NORMAL) | ring->pcs | TRB_ISP;
639                 trb = pep->ring.trbs + ring->enqueue;
640
641                 if (pep->dir && sg_iter == trbs_per_td - 1) {
642                         preq->end_trb = ring->enqueue;
643                         control = ring->pcs | TRB_TYPE(TRB_LINK) | TRB_CHAIN
644                                   | TRB_IOC;
645                         cdns2_ep_inc_enq(&pep->ring);
646
647                         if (ring->enqueue == 0)
648                                 control |= TRB_TOGGLE;
649
650                         /* Point to next bad TRB. */
651                         trb->buffer = cpu_to_le32(pep->ring.dma +
652                                                   (ring->enqueue * TRB_SIZE));
653                         trb->length = 0;
654                         trb->control = cpu_to_le32(control);
655                         break;
656                 }
657
658                 /*
659                  * Don't give the first TRB to the hardware (by toggling
660                  * the cycle bit) until we've finished creating all the
661                  * other TRBs.
662                  */
663                 if (sg_iter == 0)
664                         control = control ^ TRB_CYCLE;
665
666                 /* For last TRB in TD. */
667                 if (sg_iter == (trbs_per_td - (pep->dir ? 2 : 1)))
668                         control |= TRB_IOC;
669                 else
670                         control |= TRB_CHAIN;
671
672                 trb->buffer = cpu_to_le32(trb_dma);
673                 trb->length = cpu_to_le32(TRB_BURST(pep->trb_burst_size) |
674                                            TRB_LEN(length));
675                 trb->control = cpu_to_le32(control);
676
677                 if (sg && sg_iter < (trbs_per_td - 1)) {
678                         sg = sg_next(sg);
679                         trb_dma = sg_dma_address(sg);
680                         length = sg_dma_len(sg);
681                 }
682
683                 preq->end_trb = ring->enqueue;
684                 cdns2_ep_inc_enq(&pep->ring);
685         }
686 }
687
688 static void cdns2_set_drdy(struct cdns2_device *pdev,
689                            struct cdns2_endpoint *pep)
690 {
691         trace_cdns2_ring(pep);
692
693         /*
694          * Memory barrier - Cycle Bit must be set before doorbell.
695          */
696         dma_wmb();
697
698         /* Clearing TRBERR and DESCMIS before setting DRDY. */
699         writel(DMA_EP_STS_TRBERR | DMA_EP_STS_DESCMIS,
700                &pdev->adma_regs->ep_sts);
701         writel(DMA_EP_CMD_DRDY, &pdev->adma_regs->ep_cmd);
702
703         if (readl(&pdev->adma_regs->ep_sts) & DMA_EP_STS_TRBERR) {
704                 writel(DMA_EP_STS_TRBERR, &pdev->adma_regs->ep_sts);
705                 writel(DMA_EP_CMD_DRDY, &pdev->adma_regs->ep_cmd);
706         }
707
708         trace_cdns2_doorbell_epx(pep, readl(&pdev->adma_regs->ep_traddr));
709 }
710
711 static int cdns2_prepare_first_isoc_transfer(struct cdns2_device *pdev,
712                                              struct cdns2_endpoint *pep)
713 {
714         struct cdns2_trb *trb;
715         u32 buffer;
716         u8 hw_ccs;
717
718         if ((readl(&pdev->adma_regs->ep_cmd) & DMA_EP_CMD_DRDY))
719                 return -EBUSY;
720
721         if (!pep->dir) {
722                 set_reg_bit_32(&pdev->adma_regs->ep_cfg, DMA_EP_CFG_ENABLE);
723                 writel(pep->ring.dma + pep->ring.dequeue,
724                        &pdev->adma_regs->ep_traddr);
725                 return 0;
726         }
727
728         /*
729          * The first packet after doorbell can be corrupted so,
730          * driver prepares 0 length packet as first packet.
731          */
732         buffer = pep->ring.dma + pep->ring.dequeue * TRB_SIZE;
733         hw_ccs = !!DMA_EP_STS_CCS(readl(&pdev->adma_regs->ep_sts));
734
735         trb = &pep->ring.trbs[TRBS_PER_SEGMENT];
736         trb->length = 0;
737         trb->buffer = cpu_to_le32(TRB_BUFFER(buffer));
738         trb->control = cpu_to_le32((hw_ccs ? TRB_CYCLE : 0) | TRB_TYPE(TRB_NORMAL));
739
740         /*
741          * LINK TRB is used to force updating cycle bit in controller and
742          * move to correct place in transfer ring.
743          */
744         trb++;
745         trb->length = 0;
746         trb->buffer = cpu_to_le32(TRB_BUFFER(buffer));
747         trb->control = cpu_to_le32((hw_ccs ? TRB_CYCLE : 0) |
748                                     TRB_TYPE(TRB_LINK) | TRB_CHAIN);
749
750         if (hw_ccs !=  pep->ring.ccs)
751                 trb->control |= cpu_to_le32(TRB_TOGGLE);
752
753         set_reg_bit_32(&pdev->adma_regs->ep_cfg, DMA_EP_CFG_ENABLE);
754         writel(pep->ring.dma + (TRBS_PER_SEGMENT * TRB_SIZE),
755                &pdev->adma_regs->ep_traddr);
756
757         return 0;
758 }
759
760 /* Prepare and start transfer on no-default endpoint. */
761 static int cdns2_ep_run_transfer(struct cdns2_endpoint *pep,
762                                  struct cdns2_request *preq)
763 {
764         struct cdns2_device *pdev = pep->pdev;
765         struct cdns2_ring *ring;
766         u32 togle_pcs = 1;
767         int num_trbs;
768         int ret;
769
770         cdns2_select_ep(pdev, pep->endpoint.address);
771
772         if (preq->request.sg)
773                 num_trbs = cdns2_count_sg_trbs(pep, &preq->request);
774         else
775                 num_trbs = cdns2_count_trbs(pep, preq->request.dma,
776                                             preq->request.length);
777
778         ret = cdns2_prepare_ring(pdev, pep, num_trbs);
779         if (ret)
780                 return ret;
781
782         ring = &pep->ring;
783         preq->start_trb = ring->enqueue;
784         preq->trb = ring->trbs + ring->enqueue;
785
786         if (usb_endpoint_xfer_isoc(pep->endpoint.desc)) {
787                 cdns2_ep_tx_isoc(pep, preq, num_trbs);
788         } else {
789                 togle_pcs = cdns2_wa1_update_guard(pep, ring->trbs + ring->enqueue);
790                 cdns2_ep_tx_bulk(pep, preq, num_trbs);
791         }
792
793         preq->num_of_trb = num_trbs;
794
795         /*
796          * Memory barrier - cycle bit must be set as the last operation.
797          */
798         dma_wmb();
799
800         /* Give the TD to the consumer. */
801         if (togle_pcs)
802                 preq->trb->control = preq->trb->control ^ cpu_to_le32(1);
803
804         cdns2_wa1_tray_restore_cycle_bit(pdev, pep);
805         cdns2_dbg_request_trbs(pep, preq);
806
807         if (!pep->wa1_set && !(pep->ep_state & EP_STALLED) && !pep->skip) {
808                 if (pep->type == USB_ENDPOINT_XFER_ISOC) {
809                         ret = cdns2_prepare_first_isoc_transfer(pdev, pep);
810                         if (ret)
811                                 return 0;
812                 }
813
814                 cdns2_set_drdy(pdev, pep);
815         }
816
817         return 0;
818 }
819
820 /* Prepare and start transfer for all not started requests. */
821 static int cdns2_start_all_request(struct cdns2_device *pdev,
822                                    struct cdns2_endpoint *pep)
823 {
824         struct cdns2_request *preq;
825         int ret;
826
827         while (!list_empty(&pep->deferred_list)) {
828                 preq = cdns2_next_preq(&pep->deferred_list);
829
830                 ret = cdns2_ep_run_transfer(pep, preq);
831                 if (ret)
832                         return ret;
833
834                 list_move_tail(&preq->list, &pep->pending_list);
835         }
836
837         pep->ep_state &= ~EP_RING_FULL;
838
839         return 0;
840 }
841
842 /*
843  * Check whether trb has been handled by DMA.
844  *
845  * Endpoint must be selected before invoking this function.
846  *
847  * Returns false if request has not been handled by DMA, else returns true.
848  *
849  * SR - start ring
850  * ER - end ring
851  * DQ = ring->dequeue - dequeue position
852  * EQ = ring->enqueue - enqueue position
853  * ST = preq->start_trb - index of first TRB in transfer ring
854  * ET = preq->end_trb - index of last TRB in transfer ring
855  * CI = current_index - index of processed TRB by DMA.
856  *
857  * As first step, we check if the TRB between the ST and ET.
858  * Then, we check if cycle bit for index pep->dequeue
859  * is correct.
860  *
861  * some rules:
862  * 1. ring->dequeue never equals to current_index.
863  * 2  ring->enqueue never exceed ring->dequeue
864  * 3. exception: ring->enqueue == ring->dequeue
865  *    and ring->free_trbs is zero.
866  *    This case indicate that TR is full.
867  *
868  * At below two cases, the request have been handled.
869  * Case 1 - ring->dequeue < current_index
870  *      SR ... EQ ... DQ ... CI ... ER
871  *      SR ... DQ ... CI ... EQ ... ER
872  *
873  * Case 2 - ring->dequeue > current_index
874  * This situation takes place when CI go through the LINK TRB at the end of
875  * transfer ring.
876  *      SR ... CI ... EQ ... DQ ... ER
877  */
878 static bool cdns2_trb_handled(struct cdns2_endpoint *pep,
879                               struct cdns2_request *preq)
880 {
881         struct cdns2_device *pdev = pep->pdev;
882         struct cdns2_ring *ring;
883         struct cdns2_trb *trb;
884         int current_index = 0;
885         int handled = 0;
886         int doorbell;
887
888         ring = &pep->ring;
889         current_index = cdns2_get_dma_pos(pdev, pep);
890         doorbell = !!(readl(&pdev->adma_regs->ep_cmd) & DMA_EP_CMD_DRDY);
891
892         /*
893          * Only ISO transfer can use 2 entries outside the standard
894          * Transfer Ring. First of them is used as zero length packet and the
895          * second as LINK TRB.
896          */
897         if (current_index >= TRBS_PER_SEGMENT)
898                 goto finish;
899
900         /* Current trb doesn't belong to this request. */
901         if (preq->start_trb < preq->end_trb) {
902                 if (ring->dequeue > preq->end_trb)
903                         goto finish;
904
905                 if (ring->dequeue < preq->start_trb)
906                         goto finish;
907         }
908
909         if (preq->start_trb > preq->end_trb && ring->dequeue > preq->end_trb &&
910             ring->dequeue < preq->start_trb)
911                 goto finish;
912
913         if (preq->start_trb == preq->end_trb && ring->dequeue != preq->end_trb)
914                 goto finish;
915
916         trb = &ring->trbs[ring->dequeue];
917
918         if ((le32_to_cpu(trb->control) & TRB_CYCLE) != ring->ccs)
919                 goto finish;
920
921         if (doorbell == 1 && current_index == ring->dequeue)
922                 goto finish;
923
924         /* The corner case for TRBS_PER_SEGMENT equal 2). */
925         if (TRBS_PER_SEGMENT == 2 && pep->type != USB_ENDPOINT_XFER_ISOC) {
926                 handled = 1;
927                 goto finish;
928         }
929
930         if (ring->enqueue == ring->dequeue &&
931             ring->free_trbs == 0) {
932                 handled = 1;
933         } else if (ring->dequeue < current_index) {
934                 if ((current_index == (TRBS_PER_SEGMENT - 1)) &&
935                     !ring->dequeue)
936                         goto finish;
937
938                 handled = 1;
939         } else if (ring->dequeue  > current_index) {
940                 handled = 1;
941         }
942
943 finish:
944         trace_cdns2_request_handled(preq, current_index, handled);
945
946         return handled;
947 }
948
949 static void cdns2_skip_isoc_td(struct cdns2_device *pdev,
950                                struct cdns2_endpoint *pep,
951                                struct cdns2_request *preq)
952 {
953         struct cdns2_trb *trb;
954         int i;
955
956         trb = pep->ring.trbs + pep->ring.dequeue;
957
958         for (i = preq->finished_trb ; i < preq->num_of_trb; i++) {
959                 preq->finished_trb++;
960                 trace_cdns2_complete_trb(pep, trb);
961                 cdns2_ep_inc_deq(&pep->ring);
962                 trb = cdns2_next_trb(pep, trb);
963         }
964
965         cdns2_gadget_giveback(pep, preq, 0);
966         cdns2_prepare_first_isoc_transfer(pdev, pep);
967         pep->skip = false;
968         cdns2_set_drdy(pdev, pep);
969 }
970
971 static void cdns2_transfer_completed(struct cdns2_device *pdev,
972                                      struct cdns2_endpoint *pep)
973 {
974         struct cdns2_request *preq = NULL;
975         bool request_handled = false;
976         struct cdns2_trb *trb;
977
978         while (!list_empty(&pep->pending_list)) {
979                 preq = cdns2_next_preq(&pep->pending_list);
980                 trb = pep->ring.trbs + pep->ring.dequeue;
981
982                 /*
983                  * The TRB was changed as link TRB, and the request
984                  * was handled at ep_dequeue.
985                  */
986                 while (TRB_FIELD_TO_TYPE(le32_to_cpu(trb->control)) == TRB_LINK &&
987                        le32_to_cpu(trb->length)) {
988                         trace_cdns2_complete_trb(pep, trb);
989                         cdns2_ep_inc_deq(&pep->ring);
990                         trb = pep->ring.trbs + pep->ring.dequeue;
991                 }
992
993                 /*
994                  * Re-select endpoint. It could be changed by other CPU
995                  * during handling usb_gadget_giveback_request.
996                  */
997                 cdns2_select_ep(pdev, pep->endpoint.address);
998
999                 while (cdns2_trb_handled(pep, preq)) {
1000                         preq->finished_trb++;
1001
1002                         if (preq->finished_trb >= preq->num_of_trb)
1003                                 request_handled = true;
1004
1005                         trb = pep->ring.trbs + pep->ring.dequeue;
1006                         trace_cdns2_complete_trb(pep, trb);
1007
1008                         if (pep->dir && pep->type == USB_ENDPOINT_XFER_ISOC)
1009                                 /*
1010                                  * For ISOC IN controller doens't update the
1011                                  * trb->length.
1012                                  */
1013                                 preq->request.actual = preq->request.length;
1014                         else
1015                                 preq->request.actual +=
1016                                         TRB_LEN(le32_to_cpu(trb->length));
1017
1018                         cdns2_ep_inc_deq(&pep->ring);
1019                 }
1020
1021                 if (request_handled) {
1022                         cdns2_gadget_giveback(pep, preq, 0);
1023                         request_handled = false;
1024                 } else {
1025                         goto prepare_next_td;
1026                 }
1027
1028                 if (pep->type != USB_ENDPOINT_XFER_ISOC &&
1029                     TRBS_PER_SEGMENT == 2)
1030                         break;
1031         }
1032
1033 prepare_next_td:
1034         if (pep->skip && preq)
1035                 cdns2_skip_isoc_td(pdev, pep, preq);
1036
1037         if (!(pep->ep_state & EP_STALLED) &&
1038             !(pep->ep_state & EP_STALL_PENDING))
1039                 cdns2_start_all_request(pdev, pep);
1040 }
1041
1042 static void cdns2_wakeup(struct cdns2_device *pdev)
1043 {
1044         if (!pdev->may_wakeup)
1045                 return;
1046
1047         /* Start driving resume signaling to indicate remote wakeup. */
1048         set_reg_bit_8(&pdev->usb_regs->usbcs, USBCS_SIGRSUME);
1049 }
1050
1051 static void cdns2_rearm_transfer(struct cdns2_endpoint *pep, u8 rearm)
1052 {
1053         struct cdns2_device *pdev = pep->pdev;
1054
1055         cdns2_wa1_restore_cycle_bit(pep);
1056
1057         if (rearm) {
1058                 trace_cdns2_ring(pep);
1059
1060                 /* Cycle Bit must be updated before arming DMA. */
1061                 dma_wmb();
1062
1063                 writel(DMA_EP_CMD_DRDY, &pdev->adma_regs->ep_cmd);
1064
1065                 cdns2_wakeup(pdev);
1066
1067                 trace_cdns2_doorbell_epx(pep,
1068                                          readl(&pdev->adma_regs->ep_traddr));
1069         }
1070 }
1071
1072 static void cdns2_handle_epx_interrupt(struct cdns2_endpoint *pep)
1073 {
1074         struct cdns2_device *pdev = pep->pdev;
1075         u8 isoerror = 0;
1076         u32 ep_sts_reg;
1077         u32 val;
1078
1079         cdns2_select_ep(pdev, pep->endpoint.address);
1080
1081         trace_cdns2_epx_irq(pdev, pep);
1082
1083         ep_sts_reg = readl(&pdev->adma_regs->ep_sts);
1084         writel(ep_sts_reg, &pdev->adma_regs->ep_sts);
1085
1086         if (pep->type == USB_ENDPOINT_XFER_ISOC) {
1087                 u8 mult;
1088                 u8 cs;
1089
1090                 mult = USB_EP_MAXP_MULT(pep->endpoint.desc->wMaxPacketSize);
1091                 cs = pep->dir ? readb(&pdev->epx_regs->ep[pep->num - 1].txcs) :
1092                                 readb(&pdev->epx_regs->ep[pep->num - 1].rxcs);
1093                 if (mult > 0)
1094                         isoerror = EPX_CS_ERR(cs);
1095         }
1096
1097         /*
1098          * Sometimes ISO Error for mult=1 or mult=2 is not propagated on time
1099          * from USB module to DMA module. To protect against this driver
1100          * checks also the txcs/rxcs registers.
1101          */
1102         if ((ep_sts_reg & DMA_EP_STS_ISOERR) || isoerror) {
1103                 clear_reg_bit_32(&pdev->adma_regs->ep_cfg, DMA_EP_CFG_ENABLE);
1104
1105                 /* Wait for DBUSY cleared. */
1106                 readl_poll_timeout_atomic(&pdev->adma_regs->ep_sts, val,
1107                                           !(val & DMA_EP_STS_DBUSY), 1, 125);
1108
1109                 writel(DMA_EP_CMD_DFLUSH, &pep->pdev->adma_regs->ep_cmd);
1110
1111                 /* Wait for DFLUSH cleared. */
1112                 readl_poll_timeout_atomic(&pep->pdev->adma_regs->ep_cmd, val,
1113                                           !(val & DMA_EP_CMD_DFLUSH), 1, 10);
1114
1115                 pep->skip = true;
1116         }
1117
1118         if (ep_sts_reg & DMA_EP_STS_TRBERR || pep->skip) {
1119                 if (pep->ep_state & EP_STALL_PENDING &&
1120                     !(ep_sts_reg & DMA_EP_STS_DESCMIS))
1121                         cdns2_ep_stall_flush(pep);
1122
1123                 /*
1124                  * For isochronous transfer driver completes request on
1125                  * IOC or on TRBERR. IOC appears only when device receive
1126                  * OUT data packet. If host disable stream or lost some packet
1127                  * then the only way to finish all queued transfer is to do it
1128                  * on TRBERR event.
1129                  */
1130                 if (pep->type == USB_ENDPOINT_XFER_ISOC && !pep->wa1_set) {
1131                         if (!pep->dir)
1132                                 clear_reg_bit_32(&pdev->adma_regs->ep_cfg,
1133                                                  DMA_EP_CFG_ENABLE);
1134
1135                         cdns2_transfer_completed(pdev, pep);
1136                         if (pep->ep_state & EP_DEFERRED_DRDY) {
1137                                 pep->ep_state &= ~EP_DEFERRED_DRDY;
1138                                 cdns2_set_drdy(pdev, pep);
1139                         }
1140
1141                         return;
1142                 }
1143
1144                 cdns2_transfer_completed(pdev, pep);
1145
1146                 if (!(pep->ep_state & EP_STALLED) &&
1147                     !(pep->ep_state & EP_STALL_PENDING)) {
1148                         if (pep->ep_state & EP_DEFERRED_DRDY) {
1149                                 pep->ep_state &= ~EP_DEFERRED_DRDY;
1150                                 cdns2_start_all_request(pdev, pep);
1151                         } else {
1152                                 cdns2_rearm_transfer(pep, pep->wa1_set);
1153                         }
1154                 }
1155
1156                 return;
1157         }
1158
1159         if ((ep_sts_reg & DMA_EP_STS_IOC) || (ep_sts_reg & DMA_EP_STS_ISP))
1160                 cdns2_transfer_completed(pdev, pep);
1161 }
1162
1163 static void cdns2_disconnect_gadget(struct cdns2_device *pdev)
1164 {
1165         if (pdev->gadget_driver && pdev->gadget_driver->disconnect)
1166                 pdev->gadget_driver->disconnect(&pdev->gadget);
1167 }
1168
1169 static irqreturn_t cdns2_usb_irq_handler(int irq, void *data)
1170 {
1171         struct cdns2_device *pdev = data;
1172         unsigned long reg_ep_ists;
1173         u8 reg_usb_irq_m;
1174         u8 reg_ext_irq_m;
1175         u8 reg_usb_irq;
1176         u8 reg_ext_irq;
1177
1178         if (pdev->in_lpm)
1179                 return IRQ_NONE;
1180
1181         reg_usb_irq_m = readb(&pdev->interrupt_regs->usbien);
1182         reg_ext_irq_m = readb(&pdev->interrupt_regs->extien);
1183
1184         /* Mask all sources of interrupt. */
1185         writeb(0, &pdev->interrupt_regs->usbien);
1186         writeb(0, &pdev->interrupt_regs->extien);
1187         writel(0, &pdev->adma_regs->ep_ien);
1188
1189         /* Clear interrupt sources. */
1190         writel(0, &pdev->adma_regs->ep_sts);
1191         writeb(0, &pdev->interrupt_regs->usbirq);
1192         writeb(0, &pdev->interrupt_regs->extirq);
1193
1194         reg_ep_ists = readl(&pdev->adma_regs->ep_ists);
1195         reg_usb_irq = readb(&pdev->interrupt_regs->usbirq);
1196         reg_ext_irq = readb(&pdev->interrupt_regs->extirq);
1197
1198         if (reg_ep_ists || (reg_usb_irq & reg_usb_irq_m) ||
1199             (reg_ext_irq & reg_ext_irq_m))
1200                 return IRQ_WAKE_THREAD;
1201
1202         writeb(USB_IEN_INIT, &pdev->interrupt_regs->usbien);
1203         writeb(EXTIRQ_WAKEUP, &pdev->interrupt_regs->extien);
1204         writel(~0, &pdev->adma_regs->ep_ien);
1205
1206         return IRQ_NONE;
1207 }
1208
1209 static irqreturn_t cdns2_thread_usb_irq_handler(struct cdns2_device *pdev)
1210 {
1211         u8 usb_irq, ext_irq;
1212         int speed;
1213         int i;
1214
1215         ext_irq = readb(&pdev->interrupt_regs->extirq) & EXTIRQ_WAKEUP;
1216         writeb(ext_irq, &pdev->interrupt_regs->extirq);
1217
1218         usb_irq = readb(&pdev->interrupt_regs->usbirq) & USB_IEN_INIT;
1219         writeb(usb_irq, &pdev->interrupt_regs->usbirq);
1220
1221         if (!ext_irq && !usb_irq)
1222                 return IRQ_NONE;
1223
1224         trace_cdns2_usb_irq(usb_irq, ext_irq);
1225
1226         if (ext_irq & EXTIRQ_WAKEUP) {
1227                 if (pdev->gadget_driver && pdev->gadget_driver->resume) {
1228                         spin_unlock(&pdev->lock);
1229                         pdev->gadget_driver->resume(&pdev->gadget);
1230                         spin_lock(&pdev->lock);
1231                 }
1232         }
1233
1234         if (usb_irq & USBIRQ_LPM) {
1235                 u8 reg = readb(&pdev->usb_regs->lpmctrl);
1236
1237                 /* LPM1 enter */
1238                 if (!(reg & LPMCTRLLH_LPMNYET))
1239                         writeb(0, &pdev->usb_regs->sleep_clkgate);
1240         }
1241
1242         if (usb_irq & USBIRQ_SUSPEND) {
1243                 if (pdev->gadget_driver && pdev->gadget_driver->suspend) {
1244                         spin_unlock(&pdev->lock);
1245                         pdev->gadget_driver->suspend(&pdev->gadget);
1246                         spin_lock(&pdev->lock);
1247                 }
1248         }
1249
1250         if (usb_irq & USBIRQ_URESET) {
1251                 if (pdev->gadget_driver) {
1252                         pdev->dev_address = 0;
1253
1254                         spin_unlock(&pdev->lock);
1255                         usb_gadget_udc_reset(&pdev->gadget,
1256                                              pdev->gadget_driver);
1257                         spin_lock(&pdev->lock);
1258
1259                         /*
1260                          * The USBIRQ_URESET is reported at the beginning of
1261                          * reset signal. 100ms is enough time to finish reset
1262                          * process. For high-speed reset procedure is completed
1263                          * when controller detect HS mode.
1264                          */
1265                         for (i = 0; i < 100; i++) {
1266                                 mdelay(1);
1267                                 speed = cdns2_get_speed(pdev);
1268                                 if (speed == USB_SPEED_HIGH)
1269                                         break;
1270                         }
1271
1272                         pdev->gadget.speed = speed;
1273                         cdns2_enable_l1(pdev, 0);
1274                         cdns2_ep0_config(pdev);
1275                         pdev->may_wakeup = 0;
1276                 }
1277         }
1278
1279         if (usb_irq & USBIRQ_SUDAV) {
1280                 pdev->ep0_stage = CDNS2_SETUP_STAGE;
1281                 cdns2_handle_setup_packet(pdev);
1282         }
1283
1284         return IRQ_HANDLED;
1285 }
1286
1287 /* Deferred USB interrupt handler. */
1288 static irqreturn_t cdns2_thread_irq_handler(int irq, void *data)
1289 {
1290         struct cdns2_device *pdev = data;
1291         unsigned long  dma_ep_ists;
1292         unsigned long flags;
1293         unsigned int bit;
1294
1295         local_bh_disable();
1296         spin_lock_irqsave(&pdev->lock, flags);
1297
1298         cdns2_thread_usb_irq_handler(pdev);
1299
1300         dma_ep_ists = readl(&pdev->adma_regs->ep_ists);
1301         if (!dma_ep_ists)
1302                 goto unlock;
1303
1304         trace_cdns2_dma_ep_ists(dma_ep_ists);
1305
1306         /* Handle default endpoint OUT. */
1307         if (dma_ep_ists & DMA_EP_ISTS_EP_OUT0)
1308                 cdns2_handle_ep0_interrupt(pdev, USB_DIR_OUT);
1309
1310         /* Handle default endpoint IN. */
1311         if (dma_ep_ists & DMA_EP_ISTS_EP_IN0)
1312                 cdns2_handle_ep0_interrupt(pdev, USB_DIR_IN);
1313
1314         dma_ep_ists &= ~(DMA_EP_ISTS_EP_OUT0 | DMA_EP_ISTS_EP_IN0);
1315
1316         for_each_set_bit(bit, &dma_ep_ists, sizeof(u32) * BITS_PER_BYTE) {
1317                 u8 ep_idx = bit > 16 ? (bit - 16) * 2 : (bit * 2) - 1;
1318
1319                 /*
1320                  * Endpoints in pdev->eps[] are held in order:
1321                  * ep0, ep1out, ep1in, ep2out, ep2in... ep15out, ep15in.
1322                  * but in dma_ep_ists in order:
1323                  * ep0 ep1out ep2out ... ep15out ep0in ep1in .. ep15in
1324                  */
1325                 cdns2_handle_epx_interrupt(&pdev->eps[ep_idx]);
1326         }
1327
1328 unlock:
1329         writel(~0, &pdev->adma_regs->ep_ien);
1330         writeb(USB_IEN_INIT, &pdev->interrupt_regs->usbien);
1331         writeb(EXTIRQ_WAKEUP, &pdev->interrupt_regs->extien);
1332
1333         spin_unlock_irqrestore(&pdev->lock, flags);
1334         local_bh_enable();
1335
1336         return IRQ_HANDLED;
1337 }
1338
1339 /* Calculates and assigns onchip memory for endpoints. */
1340 static void cdns2_eps_onchip_buffer_init(struct cdns2_device *pdev)
1341 {
1342         struct cdns2_endpoint *pep;
1343         int min_buf_tx = 0;
1344         int min_buf_rx = 0;
1345         u16 tx_offset = 0;
1346         u16 rx_offset = 0;
1347         int free;
1348         int i;
1349
1350         for (i = 0; i < CDNS2_ENDPOINTS_NUM; i++) {
1351                 pep = &pdev->eps[i];
1352
1353                 if (!(pep->ep_state & EP_CLAIMED))
1354                         continue;
1355
1356                 if (pep->dir)
1357                         min_buf_tx += pep->buffering;
1358                 else
1359                         min_buf_rx += pep->buffering;
1360         }
1361
1362         for (i = 0; i < CDNS2_ENDPOINTS_NUM; i++) {
1363                 pep = &pdev->eps[i];
1364
1365                 if (!(pep->ep_state & EP_CLAIMED))
1366                         continue;
1367
1368                 if (pep->dir) {
1369                         free = pdev->onchip_tx_buf - min_buf_tx;
1370
1371                         if (free + pep->buffering >= 4)
1372                                 free = 4;
1373                         else
1374                                 free = free + pep->buffering;
1375
1376                         min_buf_tx = min_buf_tx - pep->buffering + free;
1377
1378                         pep->buffering = free;
1379
1380                         writel(tx_offset,
1381                                &pdev->epx_regs->txstaddr[pep->num - 1]);
1382                         pdev->epx_regs->txstaddr[pep->num - 1] = tx_offset;
1383
1384                         dev_dbg(pdev->dev, "%s onchip address %04x, buffering: %d\n",
1385                                 pep->name, tx_offset, pep->buffering);
1386
1387                         tx_offset += pep->buffering * 1024;
1388                 } else {
1389                         free = pdev->onchip_rx_buf - min_buf_rx;
1390
1391                         if (free + pep->buffering >= 4)
1392                                 free = 4;
1393                         else
1394                                 free = free + pep->buffering;
1395
1396                         min_buf_rx = min_buf_rx - pep->buffering + free;
1397
1398                         pep->buffering = free;
1399                         writel(rx_offset,
1400                                &pdev->epx_regs->rxstaddr[pep->num - 1]);
1401
1402                         dev_dbg(pdev->dev, "%s onchip address %04x, buffering: %d\n",
1403                                 pep->name, rx_offset, pep->buffering);
1404
1405                         rx_offset += pep->buffering * 1024;
1406                 }
1407         }
1408 }
1409
1410 /* Configure hardware endpoint. */
1411 static int cdns2_ep_config(struct cdns2_endpoint *pep, bool enable)
1412 {
1413         bool is_iso_ep = (pep->type == USB_ENDPOINT_XFER_ISOC);
1414         struct cdns2_device *pdev = pep->pdev;
1415         u32 max_packet_size;
1416         u8 dir = 0;
1417         u8 ep_cfg;
1418         u8 mult;
1419         u32 val;
1420         int ret;
1421
1422         switch (pep->type) {
1423         case USB_ENDPOINT_XFER_INT:
1424                 ep_cfg = EPX_CON_TYPE_INT;
1425                 break;
1426         case USB_ENDPOINT_XFER_BULK:
1427                 ep_cfg = EPX_CON_TYPE_BULK;
1428                 break;
1429         default:
1430                 mult = USB_EP_MAXP_MULT(pep->endpoint.desc->wMaxPacketSize);
1431                 ep_cfg = mult << EPX_CON_ISOD_SHIFT;
1432                 ep_cfg |= EPX_CON_TYPE_ISOC;
1433
1434                 if (pep->dir) {
1435                         set_reg_bit_8(&pdev->epx_regs->isoautoarm, BIT(pep->num));
1436                         set_reg_bit_8(&pdev->epx_regs->isoautodump, BIT(pep->num));
1437                         set_reg_bit_8(&pdev->epx_regs->isodctrl, BIT(pep->num));
1438                 }
1439         }
1440
1441         switch (pdev->gadget.speed) {
1442         case USB_SPEED_FULL:
1443                 max_packet_size = is_iso_ep ? 1023 : 64;
1444                 break;
1445         case USB_SPEED_HIGH:
1446                 max_packet_size = is_iso_ep ? 1024 : 512;
1447                 break;
1448         default:
1449                 /* All other speed are not supported. */
1450                 return -EINVAL;
1451         }
1452
1453         ep_cfg |= (EPX_CON_VAL | (pep->buffering - 1));
1454
1455         if (pep->dir) {
1456                 dir = FIFOCTRL_IO_TX;
1457                 writew(max_packet_size, &pdev->epx_regs->txmaxpack[pep->num - 1]);
1458                 writeb(ep_cfg, &pdev->epx_regs->ep[pep->num - 1].txcon);
1459         } else {
1460                 writew(max_packet_size, &pdev->epx_regs->rxmaxpack[pep->num - 1]);
1461                 writeb(ep_cfg, &pdev->epx_regs->ep[pep->num - 1].rxcon);
1462         }
1463
1464         writeb(pep->num | dir | FIFOCTRL_FIFOAUTO,
1465                &pdev->usb_regs->fifoctrl);
1466         writeb(pep->num | dir, &pdev->epx_regs->endprst);
1467         writeb(pep->num | ENDPRST_FIFORST | ENDPRST_TOGRST | dir,
1468                &pdev->epx_regs->endprst);
1469
1470         if (max_packet_size == 1024)
1471                 pep->trb_burst_size = 128;
1472         else if (max_packet_size >= 512)
1473                 pep->trb_burst_size = 64;
1474         else
1475                 pep->trb_burst_size = 16;
1476
1477         cdns2_select_ep(pdev, pep->num | pep->dir);
1478         writel(DMA_EP_CMD_EPRST | DMA_EP_CMD_DFLUSH, &pdev->adma_regs->ep_cmd);
1479
1480         ret = readl_poll_timeout_atomic(&pdev->adma_regs->ep_cmd, val,
1481                                         !(val & (DMA_EP_CMD_DFLUSH |
1482                                         DMA_EP_CMD_EPRST)),
1483                                         1, 1000);
1484
1485         if (ret)
1486                 return ret;
1487
1488         writel(DMA_EP_STS_TRBERR | DMA_EP_STS_ISOERR, &pdev->adma_regs->ep_sts_en);
1489
1490         if (enable)
1491                 writel(DMA_EP_CFG_ENABLE, &pdev->adma_regs->ep_cfg);
1492
1493         trace_cdns2_epx_hw_cfg(pdev, pep);
1494
1495         dev_dbg(pdev->dev, "Configure %s: with MPS: %08x, ep con: %02x\n",
1496                 pep->name, max_packet_size, ep_cfg);
1497
1498         return 0;
1499 }
1500
1501 struct usb_request *cdns2_gadget_ep_alloc_request(struct usb_ep *ep,
1502                                                   gfp_t gfp_flags)
1503 {
1504         struct cdns2_endpoint *pep = ep_to_cdns2_ep(ep);
1505         struct cdns2_request *preq;
1506
1507         preq = kzalloc(sizeof(*preq), gfp_flags);
1508         if (!preq)
1509                 return NULL;
1510
1511         preq->pep = pep;
1512
1513         trace_cdns2_alloc_request(preq);
1514
1515         return &preq->request;
1516 }
1517
1518 void cdns2_gadget_ep_free_request(struct usb_ep *ep,
1519                                   struct usb_request *request)
1520 {
1521         struct cdns2_request *preq = to_cdns2_request(request);
1522
1523         trace_cdns2_free_request(preq);
1524         kfree(preq);
1525 }
1526
1527 static int cdns2_gadget_ep_enable(struct usb_ep *ep,
1528                                   const struct usb_endpoint_descriptor *desc)
1529 {
1530         u32 reg = DMA_EP_STS_EN_TRBERREN;
1531         struct cdns2_endpoint *pep;
1532         struct cdns2_device *pdev;
1533         unsigned long flags;
1534         int enable = 1;
1535         int ret = 0;
1536
1537         if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT ||
1538             !desc->wMaxPacketSize) {
1539                 return -EINVAL;
1540         }
1541
1542         pep = ep_to_cdns2_ep(ep);
1543         pdev = pep->pdev;
1544
1545         if (dev_WARN_ONCE(pdev->dev, pep->ep_state & EP_ENABLED,
1546                           "%s is already enabled\n", pep->name))
1547                 return 0;
1548
1549         spin_lock_irqsave(&pdev->lock, flags);
1550
1551         pep->type = usb_endpoint_type(desc);
1552         pep->interval = desc->bInterval ? BIT(desc->bInterval - 1) : 0;
1553
1554         if (pdev->gadget.speed == USB_SPEED_FULL)
1555                 if (pep->type == USB_ENDPOINT_XFER_INT)
1556                         pep->interval = desc->bInterval;
1557
1558         if (pep->interval > ISO_MAX_INTERVAL &&
1559             pep->type == USB_ENDPOINT_XFER_ISOC) {
1560                 dev_err(pdev->dev, "ISO period is limited to %d (current: %d)\n",
1561                         ISO_MAX_INTERVAL, pep->interval);
1562
1563                 ret =  -EINVAL;
1564                 goto exit;
1565         }
1566
1567         /*
1568          * During ISO OUT traffic DMA reads Transfer Ring for the EP which has
1569          * never got doorbell.
1570          * This issue was detected only on simulation, but to avoid this issue
1571          * driver add protection against it. To fix it driver enable ISO OUT
1572          * endpoint before setting DRBL. This special treatment of ISO OUT
1573          * endpoints are recommended by controller specification.
1574          */
1575         if (pep->type == USB_ENDPOINT_XFER_ISOC  && !pep->dir)
1576                 enable = 0;
1577
1578         ret = cdns2_alloc_tr_segment(pep);
1579         if (ret)
1580                 goto exit;
1581
1582         ret = cdns2_ep_config(pep, enable);
1583         if (ret) {
1584                 cdns2_free_tr_segment(pep);
1585                 ret =  -EINVAL;
1586                 goto exit;
1587         }
1588
1589         trace_cdns2_gadget_ep_enable(pep);
1590
1591         pep->ep_state &= ~(EP_STALLED | EP_STALL_PENDING);
1592         pep->ep_state |= EP_ENABLED;
1593         pep->wa1_set = 0;
1594         pep->ring.enqueue = 0;
1595         pep->ring.dequeue = 0;
1596         reg = readl(&pdev->adma_regs->ep_sts);
1597         pep->ring.pcs = !!DMA_EP_STS_CCS(reg);
1598         pep->ring.ccs = !!DMA_EP_STS_CCS(reg);
1599
1600         writel(pep->ring.dma, &pdev->adma_regs->ep_traddr);
1601
1602         /* one TRB is reserved for link TRB used in DMULT mode*/
1603         pep->ring.free_trbs = TRBS_PER_SEGMENT - 1;
1604
1605 exit:
1606         spin_unlock_irqrestore(&pdev->lock, flags);
1607
1608         return ret;
1609 }
1610
1611 static int cdns2_gadget_ep_disable(struct usb_ep *ep)
1612 {
1613         struct cdns2_endpoint *pep;
1614         struct cdns2_request *preq;
1615         struct cdns2_device *pdev;
1616         unsigned long flags;
1617         int val;
1618
1619         if (!ep)
1620                 return -EINVAL;
1621
1622         pep = ep_to_cdns2_ep(ep);
1623         pdev = pep->pdev;
1624
1625         if (dev_WARN_ONCE(pdev->dev, !(pep->ep_state & EP_ENABLED),
1626                           "%s is already disabled\n", pep->name))
1627                 return 0;
1628
1629         spin_lock_irqsave(&pdev->lock, flags);
1630
1631         trace_cdns2_gadget_ep_disable(pep);
1632
1633         cdns2_select_ep(pdev, ep->desc->bEndpointAddress);
1634
1635         clear_reg_bit_32(&pdev->adma_regs->ep_cfg, DMA_EP_CFG_ENABLE);
1636
1637         /*
1638          * Driver needs some time before resetting endpoint.
1639          * It need waits for clearing DBUSY bit or for timeout expired.
1640          * 10us is enough time for controller to stop transfer.
1641          */
1642         readl_poll_timeout_atomic(&pdev->adma_regs->ep_sts, val,
1643                                   !(val & DMA_EP_STS_DBUSY), 1, 10);
1644         writel(DMA_EP_CMD_EPRST, &pdev->adma_regs->ep_cmd);
1645
1646         readl_poll_timeout_atomic(&pdev->adma_regs->ep_cmd, val,
1647                                   !(val & (DMA_EP_CMD_DFLUSH | DMA_EP_CMD_EPRST)),
1648                                   1, 1000);
1649
1650         while (!list_empty(&pep->pending_list)) {
1651                 preq = cdns2_next_preq(&pep->pending_list);
1652                 cdns2_gadget_giveback(pep, preq, -ESHUTDOWN);
1653         }
1654
1655         while (!list_empty(&pep->deferred_list)) {
1656                 preq = cdns2_next_preq(&pep->deferred_list);
1657                 cdns2_gadget_giveback(pep, preq, -ESHUTDOWN);
1658         }
1659
1660         ep->desc = NULL;
1661         pep->ep_state &= ~EP_ENABLED;
1662
1663         spin_unlock_irqrestore(&pdev->lock, flags);
1664
1665         return 0;
1666 }
1667
1668 static int cdns2_ep_enqueue(struct cdns2_endpoint *pep,
1669                             struct cdns2_request *preq,
1670                             gfp_t gfp_flags)
1671 {
1672         struct cdns2_device *pdev = pep->pdev;
1673         struct usb_request *request;
1674         int ret;
1675
1676         request = &preq->request;
1677         request->actual = 0;
1678         request->status = -EINPROGRESS;
1679
1680         ret = usb_gadget_map_request_by_dev(pdev->dev, request, pep->dir);
1681         if (ret) {
1682                 trace_cdns2_request_enqueue_error(preq);
1683                 return ret;
1684         }
1685
1686         list_add_tail(&preq->list, &pep->deferred_list);
1687         trace_cdns2_request_enqueue(preq);
1688
1689         if (!(pep->ep_state & EP_STALLED) && !(pep->ep_state & EP_STALL_PENDING))
1690                 cdns2_start_all_request(pdev, pep);
1691
1692         return 0;
1693 }
1694
1695 static int cdns2_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
1696                                  gfp_t gfp_flags)
1697 {
1698         struct usb_request *zlp_request;
1699         struct cdns2_request *preq;
1700         struct cdns2_endpoint *pep;
1701         struct cdns2_device *pdev;
1702         unsigned long flags;
1703         int ret;
1704
1705         if (!request || !ep)
1706                 return -EINVAL;
1707
1708         pep = ep_to_cdns2_ep(ep);
1709         pdev = pep->pdev;
1710
1711         if (!(pep->ep_state & EP_ENABLED)) {
1712                 dev_err(pdev->dev, "%s: can't queue to disabled endpoint\n",
1713                         pep->name);
1714                 return -EINVAL;
1715         }
1716
1717         spin_lock_irqsave(&pdev->lock, flags);
1718
1719         preq =  to_cdns2_request(request);
1720         ret = cdns2_ep_enqueue(pep, preq, gfp_flags);
1721
1722         if (ret == 0 && request->zero && request->length &&
1723             (request->length % ep->maxpacket == 0)) {
1724                 struct cdns2_request *preq;
1725
1726                 zlp_request = cdns2_gadget_ep_alloc_request(ep, GFP_ATOMIC);
1727                 zlp_request->buf = pdev->zlp_buf;
1728                 zlp_request->length = 0;
1729
1730                 preq = to_cdns2_request(zlp_request);
1731                 ret = cdns2_ep_enqueue(pep, preq, gfp_flags);
1732         }
1733
1734         spin_unlock_irqrestore(&pdev->lock, flags);
1735         return ret;
1736 }
1737
1738 int cdns2_gadget_ep_dequeue(struct usb_ep *ep,
1739                             struct usb_request *request)
1740 {
1741         struct cdns2_request *preq, *preq_temp, *cur_preq;
1742         struct cdns2_endpoint *pep;
1743         struct cdns2_trb *link_trb;
1744         u8 req_on_hw_ring = 0;
1745         unsigned long flags;
1746         u32 buffer;
1747         int val, i;
1748
1749         if (!ep || !request || !ep->desc)
1750                 return -EINVAL;
1751
1752         pep = ep_to_cdns2_ep(ep);
1753         if (!pep->endpoint.desc) {
1754                 dev_err(pep->pdev->dev, "%s: can't dequeue to disabled endpoint\n",
1755                         pep->name);
1756                 return -ESHUTDOWN;
1757         }
1758
1759         /* Requests has been dequeued during disabling endpoint. */
1760         if (!(pep->ep_state & EP_ENABLED))
1761                 return 0;
1762
1763         spin_lock_irqsave(&pep->pdev->lock, flags);
1764
1765         cur_preq = to_cdns2_request(request);
1766         trace_cdns2_request_dequeue(cur_preq);
1767
1768         list_for_each_entry_safe(preq, preq_temp, &pep->pending_list, list) {
1769                 if (cur_preq == preq) {
1770                         req_on_hw_ring = 1;
1771                         goto found;
1772                 }
1773         }
1774
1775         list_for_each_entry_safe(preq, preq_temp, &pep->deferred_list, list) {
1776                 if (cur_preq == preq)
1777                         goto found;
1778         }
1779
1780         goto not_found;
1781
1782 found:
1783         link_trb = preq->trb;
1784
1785         /* Update ring only if removed request is on pending_req_list list. */
1786         if (req_on_hw_ring && link_trb) {
1787                 /* Stop DMA */
1788                 writel(DMA_EP_CMD_DFLUSH, &pep->pdev->adma_regs->ep_cmd);
1789
1790                 /* Wait for DFLUSH cleared. */
1791                 readl_poll_timeout_atomic(&pep->pdev->adma_regs->ep_cmd, val,
1792                                           !(val & DMA_EP_CMD_DFLUSH), 1, 1000);
1793
1794                 buffer = cpu_to_le32(TRB_BUFFER(pep->ring.dma +
1795                                     ((preq->end_trb + 1) * TRB_SIZE)));
1796
1797                 for (i = 0; i < preq->num_of_trb; i++) {
1798                         link_trb->buffer = buffer;
1799                         link_trb->control = cpu_to_le32((le32_to_cpu(link_trb->control)
1800                                             & TRB_CYCLE) | TRB_CHAIN |
1801                                             TRB_TYPE(TRB_LINK));
1802
1803                         trace_cdns2_queue_trb(pep, link_trb);
1804                         link_trb = cdns2_next_trb(pep, link_trb);
1805                 }
1806
1807                 if (pep->wa1_trb == preq->trb)
1808                         cdns2_wa1_restore_cycle_bit(pep);
1809         }
1810
1811         cdns2_gadget_giveback(pep, cur_preq, -ECONNRESET);
1812
1813         preq = cdns2_next_preq(&pep->pending_list);
1814         if (preq)
1815                 cdns2_rearm_transfer(pep, 1);
1816
1817 not_found:
1818         spin_unlock_irqrestore(&pep->pdev->lock, flags);
1819         return 0;
1820 }
1821
1822 int cdns2_halt_endpoint(struct cdns2_device *pdev,
1823                         struct cdns2_endpoint *pep,
1824                         int value)
1825 {
1826         u8 __iomem *conf;
1827         int dir = 0;
1828
1829         if (!(pep->ep_state & EP_ENABLED))
1830                 return -EPERM;
1831
1832         if (pep->dir) {
1833                 dir = ENDPRST_IO_TX;
1834                 conf = &pdev->epx_regs->ep[pep->num - 1].txcon;
1835         } else {
1836                 conf = &pdev->epx_regs->ep[pep->num - 1].rxcon;
1837         }
1838
1839         if (!value) {
1840                 struct cdns2_trb *trb = NULL;
1841                 struct cdns2_request *preq;
1842                 struct cdns2_trb trb_tmp;
1843
1844                 preq = cdns2_next_preq(&pep->pending_list);
1845                 if (preq) {
1846                         trb = preq->trb;
1847                         if (trb) {
1848                                 trb_tmp = *trb;
1849                                 trb->control = trb->control ^ cpu_to_le32(TRB_CYCLE);
1850                         }
1851                 }
1852
1853                 trace_cdns2_ep_halt(pep, 0, 0);
1854
1855                 /* Resets Sequence Number */
1856                 writeb(dir | pep->num, &pdev->epx_regs->endprst);
1857                 writeb(dir | ENDPRST_TOGRST | pep->num,
1858                        &pdev->epx_regs->endprst);
1859
1860                 clear_reg_bit_8(conf, EPX_CON_STALL);
1861
1862                 pep->ep_state &= ~(EP_STALLED | EP_STALL_PENDING);
1863
1864                 if (preq) {
1865                         if (trb)
1866                                 *trb = trb_tmp;
1867
1868                         cdns2_rearm_transfer(pep, 1);
1869                 }
1870
1871                 cdns2_start_all_request(pdev, pep);
1872         } else {
1873                 trace_cdns2_ep_halt(pep, 1, 0);
1874                 set_reg_bit_8(conf, EPX_CON_STALL);
1875                 writeb(dir | pep->num, &pdev->epx_regs->endprst);
1876                 writeb(dir | ENDPRST_FIFORST | pep->num,
1877                        &pdev->epx_regs->endprst);
1878                 pep->ep_state |= EP_STALLED;
1879         }
1880
1881         return 0;
1882 }
1883
1884 /* Sets/clears stall on selected endpoint. */
1885 static int cdns2_gadget_ep_set_halt(struct usb_ep *ep, int value)
1886 {
1887         struct cdns2_endpoint *pep = ep_to_cdns2_ep(ep);
1888         struct cdns2_device *pdev = pep->pdev;
1889         struct cdns2_request *preq;
1890         unsigned long flags = 0;
1891         int ret;
1892
1893         spin_lock_irqsave(&pdev->lock, flags);
1894
1895         preq = cdns2_next_preq(&pep->pending_list);
1896         if (value && preq) {
1897                 trace_cdns2_ep_busy_try_halt_again(pep);
1898                 ret = -EAGAIN;
1899                 goto done;
1900         }
1901
1902         if (!value)
1903                 pep->ep_state &= ~EP_WEDGE;
1904
1905         ret = cdns2_halt_endpoint(pdev, pep, value);
1906
1907 done:
1908         spin_unlock_irqrestore(&pdev->lock, flags);
1909         return ret;
1910 }
1911
1912 static int cdns2_gadget_ep_set_wedge(struct usb_ep *ep)
1913 {
1914         struct cdns2_endpoint *pep = ep_to_cdns2_ep(ep);
1915
1916         cdns2_gadget_ep_set_halt(ep, 1);
1917         pep->ep_state |= EP_WEDGE;
1918
1919         return 0;
1920 }
1921
1922 static struct
1923 cdns2_endpoint *cdns2_find_available_ep(struct cdns2_device *pdev,
1924                                         struct usb_endpoint_descriptor *desc)
1925 {
1926         struct cdns2_endpoint *pep;
1927         struct usb_ep *ep;
1928         int ep_correct;
1929
1930         list_for_each_entry(ep, &pdev->gadget.ep_list, ep_list) {
1931                 unsigned long num;
1932                 int ret;
1933                 /* ep name pattern likes epXin or epXout. */
1934                 char c[2] = {ep->name[2], '\0'};
1935
1936                 ret = kstrtoul(c, 10, &num);
1937                 if (ret)
1938                         return ERR_PTR(ret);
1939                 pep = ep_to_cdns2_ep(ep);
1940
1941                 if (pep->num != num)
1942                         continue;
1943
1944                 ep_correct = (pep->endpoint.caps.dir_in &&
1945                               usb_endpoint_dir_in(desc)) ||
1946                              (pep->endpoint.caps.dir_out &&
1947                               usb_endpoint_dir_out(desc));
1948
1949                 if (ep_correct && !(pep->ep_state & EP_CLAIMED))
1950                         return pep;
1951         }
1952
1953         return ERR_PTR(-ENOENT);
1954 }
1955
1956 /*
1957  * Function used to recognize which endpoints will be used to optimize
1958  * on-chip memory usage.
1959  */
1960 static struct
1961 usb_ep *cdns2_gadget_match_ep(struct usb_gadget *gadget,
1962                               struct usb_endpoint_descriptor *desc,
1963                               struct usb_ss_ep_comp_descriptor *comp_desc)
1964 {
1965         struct cdns2_device *pdev = gadget_to_cdns2_device(gadget);
1966         struct cdns2_endpoint *pep;
1967         unsigned long flags;
1968
1969         pep = cdns2_find_available_ep(pdev, desc);
1970         if (IS_ERR(pep)) {
1971                 dev_err(pdev->dev, "no available ep\n");
1972                 return NULL;
1973         }
1974
1975         spin_lock_irqsave(&pdev->lock, flags);
1976
1977         if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_ISOC)
1978                 pep->buffering = 4;
1979         else
1980                 pep->buffering = 1;
1981
1982         pep->ep_state |= EP_CLAIMED;
1983         spin_unlock_irqrestore(&pdev->lock, flags);
1984
1985         return &pep->endpoint;
1986 }
1987
1988 static const struct usb_ep_ops cdns2_gadget_ep_ops = {
1989         .enable = cdns2_gadget_ep_enable,
1990         .disable = cdns2_gadget_ep_disable,
1991         .alloc_request = cdns2_gadget_ep_alloc_request,
1992         .free_request = cdns2_gadget_ep_free_request,
1993         .queue = cdns2_gadget_ep_queue,
1994         .dequeue = cdns2_gadget_ep_dequeue,
1995         .set_halt = cdns2_gadget_ep_set_halt,
1996         .set_wedge = cdns2_gadget_ep_set_wedge,
1997 };
1998
1999 static int cdns2_gadget_get_frame(struct usb_gadget *gadget)
2000 {
2001         struct cdns2_device *pdev = gadget_to_cdns2_device(gadget);
2002
2003         return readw(&pdev->usb_regs->frmnr);
2004 }
2005
2006 static int cdns2_gadget_wakeup(struct usb_gadget *gadget)
2007 {
2008         struct cdns2_device *pdev = gadget_to_cdns2_device(gadget);
2009         unsigned long flags;
2010
2011         spin_lock_irqsave(&pdev->lock, flags);
2012         cdns2_wakeup(pdev);
2013         spin_unlock_irqrestore(&pdev->lock, flags);
2014
2015         return 0;
2016 }
2017
2018 static int cdns2_gadget_set_selfpowered(struct usb_gadget *gadget,
2019                                         int is_selfpowered)
2020 {
2021         struct cdns2_device *pdev = gadget_to_cdns2_device(gadget);
2022         unsigned long flags;
2023
2024         spin_lock_irqsave(&pdev->lock, flags);
2025         pdev->is_selfpowered = !!is_selfpowered;
2026         spin_unlock_irqrestore(&pdev->lock, flags);
2027         return 0;
2028 }
2029
2030 /*  Disable interrupts and begin the controller halting process. */
2031 static void cdns2_quiesce(struct cdns2_device *pdev)
2032 {
2033         set_reg_bit_8(&pdev->usb_regs->usbcs, USBCS_DISCON);
2034
2035         /* Disable interrupt. */
2036         writeb(0, &pdev->interrupt_regs->extien),
2037         writeb(0, &pdev->interrupt_regs->usbien),
2038         writew(0, &pdev->adma_regs->ep_ien);
2039
2040         /* Clear interrupt line. */
2041         writeb(0x0, &pdev->interrupt_regs->usbirq);
2042 }
2043
2044 static void cdns2_gadget_config(struct cdns2_device *pdev)
2045 {
2046         cdns2_ep0_config(pdev);
2047
2048         /* Enable DMA interrupts for all endpoints. */
2049         writel(~0x0, &pdev->adma_regs->ep_ien);
2050         cdns2_enable_l1(pdev, 0);
2051         writeb(USB_IEN_INIT, &pdev->interrupt_regs->usbien);
2052         writeb(EXTIRQ_WAKEUP, &pdev->interrupt_regs->extien);
2053         writel(DMA_CONF_DMULT, &pdev->adma_regs->conf);
2054 }
2055
2056 static int cdns2_gadget_pullup(struct usb_gadget *gadget, int is_on)
2057 {
2058         struct cdns2_device *pdev = gadget_to_cdns2_device(gadget);
2059         unsigned long flags;
2060
2061         trace_cdns2_pullup(is_on);
2062
2063         /*
2064          * Disable events handling while controller is being
2065          * enabled/disabled.
2066          */
2067         disable_irq(pdev->irq);
2068         spin_lock_irqsave(&pdev->lock, flags);
2069
2070         if (is_on) {
2071                 cdns2_gadget_config(pdev);
2072                 clear_reg_bit_8(&pdev->usb_regs->usbcs, USBCS_DISCON);
2073         } else {
2074                 cdns2_quiesce(pdev);
2075         }
2076
2077         spin_unlock_irqrestore(&pdev->lock, flags);
2078         enable_irq(pdev->irq);
2079
2080         return 0;
2081 }
2082
2083 static int cdns2_gadget_udc_start(struct usb_gadget *gadget,
2084                                   struct usb_gadget_driver *driver)
2085 {
2086         struct cdns2_device *pdev = gadget_to_cdns2_device(gadget);
2087         enum usb_device_speed max_speed = driver->max_speed;
2088         unsigned long flags;
2089
2090         spin_lock_irqsave(&pdev->lock, flags);
2091         pdev->gadget_driver = driver;
2092
2093         /* Limit speed if necessary. */
2094         max_speed = min(driver->max_speed, gadget->max_speed);
2095
2096         switch (max_speed) {
2097         case USB_SPEED_FULL:
2098                 writeb(SPEEDCTRL_HSDISABLE, &pdev->usb_regs->speedctrl);
2099                 break;
2100         case USB_SPEED_HIGH:
2101                 writeb(0, &pdev->usb_regs->speedctrl);
2102                 break;
2103         default:
2104                 dev_err(pdev->dev, "invalid maximum_speed parameter %d\n",
2105                         max_speed);
2106                 fallthrough;
2107         case USB_SPEED_UNKNOWN:
2108                 /* Default to highspeed. */
2109                 max_speed = USB_SPEED_HIGH;
2110                 break;
2111         }
2112
2113         /* Reset all USB endpoints. */
2114         writeb(ENDPRST_IO_TX, &pdev->usb_regs->endprst);
2115         writeb(ENDPRST_FIFORST | ENDPRST_TOGRST | ENDPRST_IO_TX,
2116                &pdev->usb_regs->endprst);
2117         writeb(ENDPRST_FIFORST | ENDPRST_TOGRST, &pdev->usb_regs->endprst);
2118
2119         cdns2_eps_onchip_buffer_init(pdev);
2120
2121         cdns2_gadget_config(pdev);
2122         spin_unlock_irqrestore(&pdev->lock, flags);
2123
2124         return 0;
2125 }
2126
2127 static int cdns2_gadget_udc_stop(struct usb_gadget *gadget)
2128 {
2129         struct cdns2_device *pdev = gadget_to_cdns2_device(gadget);
2130         struct cdns2_endpoint *pep;
2131         u32 bEndpointAddress;
2132         struct usb_ep *ep;
2133         int val;
2134
2135         pdev->gadget_driver = NULL;
2136         pdev->gadget.speed = USB_SPEED_UNKNOWN;
2137
2138         list_for_each_entry(ep, &pdev->gadget.ep_list, ep_list) {
2139                 pep = ep_to_cdns2_ep(ep);
2140                 bEndpointAddress = pep->num | pep->dir;
2141                 cdns2_select_ep(pdev, bEndpointAddress);
2142                 writel(DMA_EP_CMD_EPRST, &pdev->adma_regs->ep_cmd);
2143                 readl_poll_timeout_atomic(&pdev->adma_regs->ep_cmd, val,
2144                                           !(val & DMA_EP_CMD_EPRST), 1, 100);
2145         }
2146
2147         cdns2_quiesce(pdev);
2148
2149         writeb(ENDPRST_IO_TX, &pdev->usb_regs->endprst);
2150         writeb(ENDPRST_FIFORST | ENDPRST_TOGRST | ENDPRST_IO_TX,
2151                &pdev->epx_regs->endprst);
2152         writeb(ENDPRST_FIFORST | ENDPRST_TOGRST, &pdev->epx_regs->endprst);
2153
2154         return 0;
2155 }
2156
2157 static const struct usb_gadget_ops cdns2_gadget_ops = {
2158         .get_frame = cdns2_gadget_get_frame,
2159         .wakeup = cdns2_gadget_wakeup,
2160         .set_selfpowered = cdns2_gadget_set_selfpowered,
2161         .pullup = cdns2_gadget_pullup,
2162         .udc_start = cdns2_gadget_udc_start,
2163         .udc_stop = cdns2_gadget_udc_stop,
2164         .match_ep = cdns2_gadget_match_ep,
2165 };
2166
2167 static void cdns2_free_all_eps(struct cdns2_device *pdev)
2168 {
2169         int i;
2170
2171         for (i = 0; i < CDNS2_ENDPOINTS_NUM; i++)
2172                 cdns2_free_tr_segment(&pdev->eps[i]);
2173 }
2174
2175 /* Initializes software endpoints of gadget. */
2176 static int cdns2_init_eps(struct cdns2_device *pdev)
2177 {
2178         struct cdns2_endpoint *pep;
2179         int i;
2180
2181         for (i = 0; i < CDNS2_ENDPOINTS_NUM; i++) {
2182                 bool direction = !(i & 1); /* Start from OUT endpoint. */
2183                 u8 epnum = ((i + 1) >> 1);
2184
2185                 /*
2186                  * Endpoints are being held in pdev->eps[] in form:
2187                  * ep0, ep1out, ep1in ... ep15out, ep15in.
2188                  */
2189                 if (!CDNS2_IF_EP_EXIST(pdev, epnum, direction))
2190                         continue;
2191
2192                 pep = &pdev->eps[i];
2193                 pep->pdev = pdev;
2194                 pep->num = epnum;
2195                 /* 0 for OUT, 1 for IN. */
2196                 pep->dir = direction ? USB_DIR_IN : USB_DIR_OUT;
2197                 pep->idx = i;
2198
2199                 /* Ep0in and ep0out are represented by pdev->eps[0]. */
2200                 if (!epnum) {
2201                         int ret;
2202
2203                         snprintf(pep->name, sizeof(pep->name), "ep%d%s",
2204                                  epnum, "BiDir");
2205
2206                         cdns2_init_ep0(pdev, pep);
2207
2208                         ret = cdns2_alloc_tr_segment(pep);
2209                         if (ret) {
2210                                 dev_err(pdev->dev, "Failed to init ep0\n");
2211                                 return ret;
2212                         }
2213                 } else {
2214                         snprintf(pep->name, sizeof(pep->name), "ep%d%s",
2215                                  epnum, !!direction ? "in" : "out");
2216                         pep->endpoint.name = pep->name;
2217
2218                         usb_ep_set_maxpacket_limit(&pep->endpoint, 1024);
2219                         pep->endpoint.ops = &cdns2_gadget_ep_ops;
2220                         list_add_tail(&pep->endpoint.ep_list, &pdev->gadget.ep_list);
2221
2222                         pep->endpoint.caps.dir_in = direction;
2223                         pep->endpoint.caps.dir_out = !direction;
2224
2225                         pep->endpoint.caps.type_iso = 1;
2226                         pep->endpoint.caps.type_bulk = 1;
2227                         pep->endpoint.caps.type_int = 1;
2228                 }
2229
2230                 pep->endpoint.name = pep->name;
2231                 pep->ep_state = 0;
2232
2233                 dev_dbg(pdev->dev, "Init %s, SupType: CTRL: %s, INT: %s, "
2234                         "BULK: %s, ISOC %s, SupDir IN: %s, OUT: %s\n",
2235                         pep->name,
2236                         (pep->endpoint.caps.type_control) ? "yes" : "no",
2237                         (pep->endpoint.caps.type_int) ? "yes" : "no",
2238                         (pep->endpoint.caps.type_bulk) ? "yes" : "no",
2239                         (pep->endpoint.caps.type_iso) ? "yes" : "no",
2240                         (pep->endpoint.caps.dir_in) ? "yes" : "no",
2241                         (pep->endpoint.caps.dir_out) ? "yes" : "no");
2242
2243                 INIT_LIST_HEAD(&pep->pending_list);
2244                 INIT_LIST_HEAD(&pep->deferred_list);
2245         }
2246
2247         return 0;
2248 }
2249
2250 static int cdns2_gadget_start(struct cdns2_device *pdev)
2251 {
2252         u32 max_speed;
2253         void *buf;
2254         int val;
2255         int ret;
2256
2257         pdev->usb_regs = pdev->regs;
2258         pdev->ep0_regs = pdev->regs;
2259         pdev->epx_regs = pdev->regs;
2260         pdev->interrupt_regs = pdev->regs;
2261         pdev->adma_regs = pdev->regs + CDNS2_ADMA_REGS_OFFSET;
2262
2263         /* Reset controller. */
2264         set_reg_bit_8(&pdev->usb_regs->cpuctrl, CPUCTRL_SW_RST);
2265
2266         ret = readl_poll_timeout_atomic(&pdev->usb_regs->cpuctrl, val,
2267                                         !(val & CPUCTRL_SW_RST), 1, 10000);
2268         if (ret) {
2269                 dev_err(pdev->dev, "Error: reset controller timeout\n");
2270                 return -EINVAL;
2271         }
2272
2273         usb_initialize_gadget(pdev->dev, &pdev->gadget, NULL);
2274
2275         device_property_read_u16(pdev->dev, "cdns,on-chip-tx-buff-size",
2276                                  &pdev->onchip_tx_buf);
2277         device_property_read_u16(pdev->dev, "cdns,on-chip-rx-buff-size",
2278                                  &pdev->onchip_rx_buf);
2279         device_property_read_u32(pdev->dev, "cdns,avail-endpoints",
2280                                  &pdev->eps_supported);
2281
2282         /*
2283          * Driver assumes that each USBHS controller has at least
2284          * one IN and one OUT non control endpoint.
2285          */
2286         if (!pdev->onchip_tx_buf && !pdev->onchip_rx_buf) {
2287                 ret = -EINVAL;
2288                 dev_err(pdev->dev, "Invalid on-chip memory configuration\n");
2289                 goto put_gadget;
2290         }
2291
2292         if (!(pdev->eps_supported & ~0x00010001)) {
2293                 ret = -EINVAL;
2294                 dev_err(pdev->dev, "No hardware endpoints available\n");
2295                 goto put_gadget;
2296         }
2297
2298         max_speed = usb_get_maximum_speed(pdev->dev);
2299
2300         switch (max_speed) {
2301         case USB_SPEED_FULL:
2302         case USB_SPEED_HIGH:
2303                 break;
2304         default:
2305                 dev_err(pdev->dev, "invalid maximum_speed parameter %d\n",
2306                         max_speed);
2307                 fallthrough;
2308         case USB_SPEED_UNKNOWN:
2309                 max_speed = USB_SPEED_HIGH;
2310                 break;
2311         }
2312
2313         pdev->gadget.max_speed = max_speed;
2314         pdev->gadget.speed = USB_SPEED_UNKNOWN;
2315         pdev->gadget.ops = &cdns2_gadget_ops;
2316         pdev->gadget.name = "usbhs-gadget";
2317         pdev->gadget.quirk_avoids_skb_reserve = 1;
2318         pdev->gadget.irq = pdev->irq;
2319
2320         spin_lock_init(&pdev->lock);
2321         INIT_WORK(&pdev->pending_status_wq, cdns2_pending_setup_status_handler);
2322
2323         /* Initialize endpoint container. */
2324         INIT_LIST_HEAD(&pdev->gadget.ep_list);
2325         pdev->eps_dma_pool = dma_pool_create("cdns2_eps_dma_pool", pdev->dev,
2326                                              TR_SEG_SIZE, 8, 0);
2327         if (!pdev->eps_dma_pool) {
2328                 dev_err(pdev->dev, "Failed to create TRB dma pool\n");
2329                 ret = -ENOMEM;
2330                 goto put_gadget;
2331         }
2332
2333         ret = cdns2_init_eps(pdev);
2334         if (ret) {
2335                 dev_err(pdev->dev, "Failed to create endpoints\n");
2336                 goto destroy_dma_pool;
2337         }
2338
2339         pdev->gadget.sg_supported = 1;
2340
2341         pdev->zlp_buf = kzalloc(CDNS2_EP_ZLP_BUF_SIZE, GFP_KERNEL);
2342         if (!pdev->zlp_buf) {
2343                 ret = -ENOMEM;
2344                 goto destroy_dma_pool;
2345         }
2346
2347         /* Allocate memory for setup packet buffer. */
2348         buf = dma_alloc_coherent(pdev->dev, 8, &pdev->ep0_preq.request.dma,
2349                                  GFP_DMA);
2350         pdev->ep0_preq.request.buf = buf;
2351
2352         if (!pdev->ep0_preq.request.buf) {
2353                 ret = -ENOMEM;
2354                 goto free_zlp_buf;
2355         }
2356
2357         /* Add USB gadget device. */
2358         ret = usb_add_gadget(&pdev->gadget);
2359         if (ret < 0) {
2360                 dev_err(pdev->dev, "Failed to add gadget\n");
2361                 goto free_ep0_buf;
2362         }
2363
2364         return 0;
2365
2366 free_ep0_buf:
2367         dma_free_coherent(pdev->dev, 8, pdev->ep0_preq.request.buf,
2368                           pdev->ep0_preq.request.dma);
2369 free_zlp_buf:
2370         kfree(pdev->zlp_buf);
2371 destroy_dma_pool:
2372         dma_pool_destroy(pdev->eps_dma_pool);
2373 put_gadget:
2374         usb_put_gadget(&pdev->gadget);
2375
2376         return ret;
2377 }
2378
2379 int cdns2_gadget_suspend(struct cdns2_device *pdev)
2380 {
2381         unsigned long flags;
2382
2383         cdns2_disconnect_gadget(pdev);
2384
2385         spin_lock_irqsave(&pdev->lock, flags);
2386         pdev->gadget.speed = USB_SPEED_UNKNOWN;
2387
2388         trace_cdns2_device_state("notattached");
2389         usb_gadget_set_state(&pdev->gadget, USB_STATE_NOTATTACHED);
2390         cdns2_enable_l1(pdev, 0);
2391
2392         /* Disable interrupt for device. */
2393         writeb(0, &pdev->interrupt_regs->usbien);
2394         writel(0, &pdev->adma_regs->ep_ien);
2395         spin_unlock_irqrestore(&pdev->lock, flags);
2396
2397         return 0;
2398 }
2399
2400 int cdns2_gadget_resume(struct cdns2_device *pdev, bool hibernated)
2401 {
2402         unsigned long flags;
2403
2404         spin_lock_irqsave(&pdev->lock, flags);
2405
2406         if (!pdev->gadget_driver) {
2407                 spin_unlock_irqrestore(&pdev->lock, flags);
2408                 return 0;
2409         }
2410
2411         cdns2_gadget_config(pdev);
2412
2413         if (hibernated)
2414                 clear_reg_bit_8(&pdev->usb_regs->usbcs, USBCS_DISCON);
2415
2416         spin_unlock_irqrestore(&pdev->lock, flags);
2417
2418         return 0;
2419 }
2420
2421 void cdns2_gadget_remove(struct cdns2_device *pdev)
2422 {
2423         pm_runtime_mark_last_busy(pdev->dev);
2424         pm_runtime_put_autosuspend(pdev->dev);
2425
2426         usb_del_gadget(&pdev->gadget);
2427         cdns2_free_all_eps(pdev);
2428
2429         dma_pool_destroy(pdev->eps_dma_pool);
2430         kfree(pdev->zlp_buf);
2431         usb_put_gadget(&pdev->gadget);
2432 }
2433
2434 int cdns2_gadget_init(struct cdns2_device *pdev)
2435 {
2436         int ret;
2437
2438         /* Ensure 32-bit DMA Mask. */
2439         ret = dma_set_mask_and_coherent(pdev->dev, DMA_BIT_MASK(32));
2440         if (ret) {
2441                 dev_err(pdev->dev, "Failed to set dma mask: %d\n", ret);
2442                 return ret;
2443         }
2444
2445         pm_runtime_get_sync(pdev->dev);
2446
2447         cdsn2_isoc_burst_opt(pdev);
2448
2449         ret = cdns2_gadget_start(pdev);
2450         if (ret) {
2451                 pm_runtime_put_sync(pdev->dev);
2452                 return ret;
2453         }
2454
2455         /*
2456          * Because interrupt line can be shared with other components in
2457          * driver it can't use IRQF_ONESHOT flag here.
2458          */
2459         ret = devm_request_threaded_irq(pdev->dev, pdev->irq,
2460                                         cdns2_usb_irq_handler,
2461                                         cdns2_thread_irq_handler,
2462                                         IRQF_SHARED,
2463                                         dev_name(pdev->dev),
2464                                         pdev);
2465         if (ret)
2466                 goto err0;
2467
2468         return 0;
2469
2470 err0:
2471         cdns2_gadget_remove(pdev);
2472
2473         return ret;
2474 }