GNU Linux-libre 4.4.300-gnu1
[releases.git] / drivers / usb / gadget / udc / net2280.c
1 /*
2  * Driver for the PLX NET2280 USB device controller.
3  * Specs and errata are available from <http://www.plxtech.com>.
4  *
5  * PLX Technology Inc. (formerly NetChip Technology) supported the
6  * development of this driver.
7  *
8  *
9  * CODE STATUS HIGHLIGHTS
10  *
11  * This driver should work well with most "gadget" drivers, including
12  * the Mass Storage, Serial, and Ethernet/RNDIS gadget drivers
13  * as well as Gadget Zero and Gadgetfs.
14  *
15  * DMA is enabled by default.
16  *
17  * MSI is enabled by default.  The legacy IRQ is used if MSI couldn't
18  * be enabled.
19  *
20  * Note that almost all the errata workarounds here are only needed for
21  * rev1 chips.  Rev1a silicon (0110) fixes almost all of them.
22  */
23
24 /*
25  * Copyright (C) 2003 David Brownell
26  * Copyright (C) 2003-2005 PLX Technology, Inc.
27  * Copyright (C) 2014 Ricardo Ribalda - Qtechnology/AS
28  *
29  * Modified Seth Levy 2005 PLX Technology, Inc. to provide compatibility
30  *      with 2282 chip
31  *
32  * Modified Ricardo Ribalda Qtechnology AS  to provide compatibility
33  *      with usb 338x chip. Based on PLX driver
34  *
35  * This program is free software; you can redistribute it and/or modify
36  * it under the terms of the GNU General Public License as published by
37  * the Free Software Foundation; either version 2 of the License, or
38  * (at your option) any later version.
39  */
40
41 #include <linux/module.h>
42 #include <linux/pci.h>
43 #include <linux/dma-mapping.h>
44 #include <linux/kernel.h>
45 #include <linux/delay.h>
46 #include <linux/ioport.h>
47 #include <linux/slab.h>
48 #include <linux/errno.h>
49 #include <linux/init.h>
50 #include <linux/timer.h>
51 #include <linux/list.h>
52 #include <linux/interrupt.h>
53 #include <linux/moduleparam.h>
54 #include <linux/device.h>
55 #include <linux/usb/ch9.h>
56 #include <linux/usb/gadget.h>
57 #include <linux/prefetch.h>
58 #include <linux/io.h>
59
60 #include <asm/byteorder.h>
61 #include <asm/irq.h>
62 #include <asm/unaligned.h>
63
64 #define DRIVER_DESC             "PLX NET228x/USB338x USB Peripheral Controller"
65 #define DRIVER_VERSION          "2005 Sept 27/v3.0"
66
67 #define EP_DONTUSE              13      /* nonzero */
68
69 #define USE_RDK_LEDS            /* GPIO pins control three LEDs */
70
71
72 static const char driver_name[] = "net2280";
73 static const char driver_desc[] = DRIVER_DESC;
74
75 static const u32 ep_bit[9] = { 0, 17, 2, 19, 4, 1, 18, 3, 20 };
76 static const char ep0name[] = "ep0";
77
78 #define EP_INFO(_name, _caps) \
79         { \
80                 .name = _name, \
81                 .caps = _caps, \
82         }
83
84 static const struct {
85         const char *name;
86         const struct usb_ep_caps caps;
87 } ep_info_dft[] = { /* Default endpoint configuration */
88         EP_INFO(ep0name,
89                 USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_ALL)),
90         EP_INFO("ep-a",
91                 USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_ALL)),
92         EP_INFO("ep-b",
93                 USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_ALL)),
94         EP_INFO("ep-c",
95                 USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_ALL)),
96         EP_INFO("ep-d",
97                 USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_ALL)),
98         EP_INFO("ep-e",
99                 USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_ALL)),
100         EP_INFO("ep-f",
101                 USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_ALL)),
102         EP_INFO("ep-g",
103                 USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_ALL)),
104         EP_INFO("ep-h",
105                 USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_ALL)),
106 }, ep_info_adv[] = { /* Endpoints for usb3380 advance mode */
107         EP_INFO(ep0name,
108                 USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_ALL)),
109         EP_INFO("ep1in",
110                 USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_IN)),
111         EP_INFO("ep2out",
112                 USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_OUT)),
113         EP_INFO("ep3in",
114                 USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_IN)),
115         EP_INFO("ep4out",
116                 USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_OUT)),
117         EP_INFO("ep1out",
118                 USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_OUT)),
119         EP_INFO("ep2in",
120                 USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_IN)),
121         EP_INFO("ep3out",
122                 USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_OUT)),
123         EP_INFO("ep4in",
124                 USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_IN)),
125 };
126
127 #undef EP_INFO
128
129 /* mode 0 == ep-{a,b,c,d} 1K fifo each
130  * mode 1 == ep-{a,b} 2K fifo each, ep-{c,d} unavailable
131  * mode 2 == ep-a 2K fifo, ep-{b,c} 1K each, ep-d unavailable
132  */
133 static ushort fifo_mode;
134
135 /* "modprobe net2280 fifo_mode=1" etc */
136 module_param(fifo_mode, ushort, 0644);
137
138 /* enable_suspend -- When enabled, the driver will respond to
139  * USB suspend requests by powering down the NET2280.  Otherwise,
140  * USB suspend requests will be ignored.  This is acceptable for
141  * self-powered devices
142  */
143 static bool enable_suspend;
144
145 /* "modprobe net2280 enable_suspend=1" etc */
146 module_param(enable_suspend, bool, 0444);
147
148 #define DIR_STRING(bAddress) (((bAddress) & USB_DIR_IN) ? "in" : "out")
149
150 static char *type_string(u8 bmAttributes)
151 {
152         switch ((bmAttributes) & USB_ENDPOINT_XFERTYPE_MASK) {
153         case USB_ENDPOINT_XFER_BULK:    return "bulk";
154         case USB_ENDPOINT_XFER_ISOC:    return "iso";
155         case USB_ENDPOINT_XFER_INT:     return "intr";
156         }
157         return "control";
158 }
159
160 #include "net2280.h"
161
162 #define valid_bit       cpu_to_le32(BIT(VALID_BIT))
163 #define dma_done_ie     cpu_to_le32(BIT(DMA_DONE_INTERRUPT_ENABLE))
164
165 static void ep_clear_seqnum(struct net2280_ep *ep);
166 static void stop_activity(struct net2280 *dev,
167                                         struct usb_gadget_driver *driver);
168 static void ep0_start(struct net2280 *dev);
169
170 /*-------------------------------------------------------------------------*/
171 static inline void enable_pciirqenb(struct net2280_ep *ep)
172 {
173         u32 tmp = readl(&ep->dev->regs->pciirqenb0);
174
175         if (ep->dev->quirks & PLX_LEGACY)
176                 tmp |= BIT(ep->num);
177         else
178                 tmp |= BIT(ep_bit[ep->num]);
179         writel(tmp, &ep->dev->regs->pciirqenb0);
180
181         return;
182 }
183
184 static int
185 net2280_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
186 {
187         struct net2280          *dev;
188         struct net2280_ep       *ep;
189         u32                     max;
190         u32 tmp = 0;
191         u32 type;
192         unsigned long           flags;
193         static const u32 ep_key[9] = { 1, 0, 1, 0, 1, 1, 0, 1, 0 };
194         int ret = 0;
195
196         ep = container_of(_ep, struct net2280_ep, ep);
197         if (!_ep || !desc || ep->desc || _ep->name == ep0name ||
198                         desc->bDescriptorType != USB_DT_ENDPOINT) {
199                 pr_err("%s: failed at line=%d\n", __func__, __LINE__);
200                 return -EINVAL;
201         }
202         dev = ep->dev;
203         if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) {
204                 ret = -ESHUTDOWN;
205                 goto print_err;
206         }
207
208         /* erratum 0119 workaround ties up an endpoint number */
209         if ((desc->bEndpointAddress & 0x0f) == EP_DONTUSE) {
210                 ret = -EDOM;
211                 goto print_err;
212         }
213
214         if (dev->quirks & PLX_SUPERSPEED) {
215                 if ((desc->bEndpointAddress & 0x0f) >= 0x0c) {
216                         ret = -EDOM;
217                         goto print_err;
218                 }
219                 ep->is_in = !!usb_endpoint_dir_in(desc);
220                 if (dev->enhanced_mode && ep->is_in && ep_key[ep->num]) {
221                         ret = -EINVAL;
222                         goto print_err;
223                 }
224         }
225
226         /* sanity check ep-e/ep-f since their fifos are small */
227         max = usb_endpoint_maxp(desc) & 0x1fff;
228         if (ep->num > 4 && max > 64 && (dev->quirks & PLX_LEGACY)) {
229                 ret = -ERANGE;
230                 goto print_err;
231         }
232
233         spin_lock_irqsave(&dev->lock, flags);
234         _ep->maxpacket = max & 0x7ff;
235         ep->desc = desc;
236
237         /* ep_reset() has already been called */
238         ep->stopped = 0;
239         ep->wedged = 0;
240         ep->out_overflow = 0;
241
242         /* set speed-dependent max packet; may kick in high bandwidth */
243         set_max_speed(ep, max);
244
245         /* set type, direction, address; reset fifo counters */
246         writel(BIT(FIFO_FLUSH), &ep->regs->ep_stat);
247
248         if ((dev->quirks & PLX_SUPERSPEED) && dev->enhanced_mode) {
249                 tmp = readl(&ep->cfg->ep_cfg);
250                 /* If USB ep number doesn't match hardware ep number */
251                 if ((tmp & 0xf) != usb_endpoint_num(desc)) {
252                         ret = -EINVAL;
253                         spin_unlock_irqrestore(&dev->lock, flags);
254                         goto print_err;
255                 }
256                 if (ep->is_in)
257                         tmp &= ~USB3380_EP_CFG_MASK_IN;
258                 else
259                         tmp &= ~USB3380_EP_CFG_MASK_OUT;
260         }
261         type = (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK);
262         if (type == USB_ENDPOINT_XFER_INT) {
263                 /* erratum 0105 workaround prevents hs NYET */
264                 if (dev->chiprev == 0100 &&
265                                 dev->gadget.speed == USB_SPEED_HIGH &&
266                                 !(desc->bEndpointAddress & USB_DIR_IN))
267                         writel(BIT(CLEAR_NAK_OUT_PACKETS_MODE),
268                                 &ep->regs->ep_rsp);
269         } else if (type == USB_ENDPOINT_XFER_BULK) {
270                 /* catch some particularly blatant driver bugs */
271                 if ((dev->gadget.speed == USB_SPEED_SUPER && max != 1024) ||
272                     (dev->gadget.speed == USB_SPEED_HIGH && max != 512) ||
273                     (dev->gadget.speed == USB_SPEED_FULL && max > 64)) {
274                         spin_unlock_irqrestore(&dev->lock, flags);
275                         ret = -ERANGE;
276                         goto print_err;
277                 }
278         }
279         ep->is_iso = (type == USB_ENDPOINT_XFER_ISOC);
280         /* Enable this endpoint */
281         if (dev->quirks & PLX_LEGACY) {
282                 tmp |= type << ENDPOINT_TYPE;
283                 tmp |= desc->bEndpointAddress;
284                 /* default full fifo lines */
285                 tmp |= (4 << ENDPOINT_BYTE_COUNT);
286                 tmp |= BIT(ENDPOINT_ENABLE);
287                 ep->is_in = (tmp & USB_DIR_IN) != 0;
288         } else {
289                 /* In Legacy mode, only OUT endpoints are used */
290                 if (dev->enhanced_mode && ep->is_in) {
291                         tmp |= type << IN_ENDPOINT_TYPE;
292                         tmp |= BIT(IN_ENDPOINT_ENABLE);
293                 } else {
294                         tmp |= type << OUT_ENDPOINT_TYPE;
295                         tmp |= BIT(OUT_ENDPOINT_ENABLE);
296                         tmp |= (ep->is_in << ENDPOINT_DIRECTION);
297                 }
298
299                 tmp |= (4 << ENDPOINT_BYTE_COUNT);
300                 if (!dev->enhanced_mode)
301                         tmp |= usb_endpoint_num(desc);
302                 tmp |= (ep->ep.maxburst << MAX_BURST_SIZE);
303         }
304
305         /* Make sure all the registers are written before ep_rsp*/
306         wmb();
307
308         /* for OUT transfers, block the rx fifo until a read is posted */
309         if (!ep->is_in)
310                 writel(BIT(SET_NAK_OUT_PACKETS), &ep->regs->ep_rsp);
311         else if (!(dev->quirks & PLX_2280)) {
312                 /* Added for 2282, Don't use nak packets on an in endpoint,
313                  * this was ignored on 2280
314                  */
315                 writel(BIT(CLEAR_NAK_OUT_PACKETS) |
316                         BIT(CLEAR_NAK_OUT_PACKETS_MODE), &ep->regs->ep_rsp);
317         }
318
319         if (dev->quirks & PLX_SUPERSPEED)
320                 ep_clear_seqnum(ep);
321         writel(tmp, &ep->cfg->ep_cfg);
322
323         /* enable irqs */
324         if (!ep->dma) {                         /* pio, per-packet */
325                 enable_pciirqenb(ep);
326
327                 tmp = BIT(DATA_PACKET_RECEIVED_INTERRUPT_ENABLE) |
328                         BIT(DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE);
329                 if (dev->quirks & PLX_2280)
330                         tmp |= readl(&ep->regs->ep_irqenb);
331                 writel(tmp, &ep->regs->ep_irqenb);
332         } else {                                /* dma, per-request */
333                 tmp = BIT((8 + ep->num));       /* completion */
334                 tmp |= readl(&dev->regs->pciirqenb1);
335                 writel(tmp, &dev->regs->pciirqenb1);
336
337                 /* for short OUT transfers, dma completions can't
338                  * advance the queue; do it pio-style, by hand.
339                  * NOTE erratum 0112 workaround #2
340                  */
341                 if ((desc->bEndpointAddress & USB_DIR_IN) == 0) {
342                         tmp = BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT_ENABLE);
343                         writel(tmp, &ep->regs->ep_irqenb);
344
345                         enable_pciirqenb(ep);
346                 }
347         }
348
349         tmp = desc->bEndpointAddress;
350         ep_dbg(dev, "enabled %s (ep%d%s-%s) %s max %04x\n",
351                 _ep->name, tmp & 0x0f, DIR_STRING(tmp),
352                 type_string(desc->bmAttributes),
353                 ep->dma ? "dma" : "pio", max);
354
355         /* pci writes may still be posted */
356         spin_unlock_irqrestore(&dev->lock, flags);
357         return ret;
358
359 print_err:
360         dev_err(&ep->dev->pdev->dev, "%s: error=%d\n", __func__, ret);
361         return ret;
362 }
363
364 static int handshake(u32 __iomem *ptr, u32 mask, u32 done, int usec)
365 {
366         u32     result;
367
368         do {
369                 result = readl(ptr);
370                 if (result == ~(u32)0)          /* "device unplugged" */
371                         return -ENODEV;
372                 result &= mask;
373                 if (result == done)
374                         return 0;
375                 udelay(1);
376                 usec--;
377         } while (usec > 0);
378         return -ETIMEDOUT;
379 }
380
381 static const struct usb_ep_ops net2280_ep_ops;
382
383 static void ep_reset_228x(struct net2280_regs __iomem *regs,
384                           struct net2280_ep *ep)
385 {
386         u32             tmp;
387
388         ep->desc = NULL;
389         INIT_LIST_HEAD(&ep->queue);
390
391         usb_ep_set_maxpacket_limit(&ep->ep, ~0);
392         ep->ep.ops = &net2280_ep_ops;
393
394         /* disable the dma, irqs, endpoint... */
395         if (ep->dma) {
396                 writel(0, &ep->dma->dmactl);
397                 writel(BIT(DMA_SCATTER_GATHER_DONE_INTERRUPT) |
398                         BIT(DMA_TRANSACTION_DONE_INTERRUPT) |
399                         BIT(DMA_ABORT),
400                         &ep->dma->dmastat);
401
402                 tmp = readl(&regs->pciirqenb0);
403                 tmp &= ~BIT(ep->num);
404                 writel(tmp, &regs->pciirqenb0);
405         } else {
406                 tmp = readl(&regs->pciirqenb1);
407                 tmp &= ~BIT((8 + ep->num));     /* completion */
408                 writel(tmp, &regs->pciirqenb1);
409         }
410         writel(0, &ep->regs->ep_irqenb);
411
412         /* init to our chosen defaults, notably so that we NAK OUT
413          * packets until the driver queues a read (+note erratum 0112)
414          */
415         if (!ep->is_in || (ep->dev->quirks & PLX_2280)) {
416                 tmp = BIT(SET_NAK_OUT_PACKETS_MODE) |
417                 BIT(SET_NAK_OUT_PACKETS) |
418                 BIT(CLEAR_EP_HIDE_STATUS_PHASE) |
419                 BIT(CLEAR_INTERRUPT_MODE);
420         } else {
421                 /* added for 2282 */
422                 tmp = BIT(CLEAR_NAK_OUT_PACKETS_MODE) |
423                 BIT(CLEAR_NAK_OUT_PACKETS) |
424                 BIT(CLEAR_EP_HIDE_STATUS_PHASE) |
425                 BIT(CLEAR_INTERRUPT_MODE);
426         }
427
428         if (ep->num != 0) {
429                 tmp |= BIT(CLEAR_ENDPOINT_TOGGLE) |
430                         BIT(CLEAR_ENDPOINT_HALT);
431         }
432         writel(tmp, &ep->regs->ep_rsp);
433
434         /* scrub most status bits, and flush any fifo state */
435         if (ep->dev->quirks & PLX_2280)
436                 tmp = BIT(FIFO_OVERFLOW) |
437                         BIT(FIFO_UNDERFLOW);
438         else
439                 tmp = 0;
440
441         writel(tmp | BIT(TIMEOUT) |
442                 BIT(USB_STALL_SENT) |
443                 BIT(USB_IN_NAK_SENT) |
444                 BIT(USB_IN_ACK_RCVD) |
445                 BIT(USB_OUT_PING_NAK_SENT) |
446                 BIT(USB_OUT_ACK_SENT) |
447                 BIT(FIFO_FLUSH) |
448                 BIT(SHORT_PACKET_OUT_DONE_INTERRUPT) |
449                 BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT) |
450                 BIT(DATA_PACKET_RECEIVED_INTERRUPT) |
451                 BIT(DATA_PACKET_TRANSMITTED_INTERRUPT) |
452                 BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
453                 BIT(DATA_IN_TOKEN_INTERRUPT),
454                 &ep->regs->ep_stat);
455
456         /* fifo size is handled separately */
457 }
458
459 static void ep_reset_338x(struct net2280_regs __iomem *regs,
460                                         struct net2280_ep *ep)
461 {
462         u32 tmp, dmastat;
463
464         ep->desc = NULL;
465         INIT_LIST_HEAD(&ep->queue);
466
467         usb_ep_set_maxpacket_limit(&ep->ep, ~0);
468         ep->ep.ops = &net2280_ep_ops;
469
470         /* disable the dma, irqs, endpoint... */
471         if (ep->dma) {
472                 writel(0, &ep->dma->dmactl);
473                 writel(BIT(DMA_ABORT_DONE_INTERRUPT) |
474                        BIT(DMA_PAUSE_DONE_INTERRUPT) |
475                        BIT(DMA_SCATTER_GATHER_DONE_INTERRUPT) |
476                        BIT(DMA_TRANSACTION_DONE_INTERRUPT),
477                        /* | BIT(DMA_ABORT), */
478                        &ep->dma->dmastat);
479
480                 dmastat = readl(&ep->dma->dmastat);
481                 if (dmastat == 0x5002) {
482                         ep_warn(ep->dev, "The dmastat return = %x!!\n",
483                                dmastat);
484                         writel(0x5a, &ep->dma->dmastat);
485                 }
486
487                 tmp = readl(&regs->pciirqenb0);
488                 tmp &= ~BIT(ep_bit[ep->num]);
489                 writel(tmp, &regs->pciirqenb0);
490         } else {
491                 if (ep->num < 5) {
492                         tmp = readl(&regs->pciirqenb1);
493                         tmp &= ~BIT((8 + ep->num));     /* completion */
494                         writel(tmp, &regs->pciirqenb1);
495                 }
496         }
497         writel(0, &ep->regs->ep_irqenb);
498
499         writel(BIT(SHORT_PACKET_OUT_DONE_INTERRUPT) |
500                BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT) |
501                BIT(FIFO_OVERFLOW) |
502                BIT(DATA_PACKET_RECEIVED_INTERRUPT) |
503                BIT(DATA_PACKET_TRANSMITTED_INTERRUPT) |
504                BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
505                BIT(DATA_IN_TOKEN_INTERRUPT), &ep->regs->ep_stat);
506
507         tmp = readl(&ep->cfg->ep_cfg);
508         if (ep->is_in)
509                 tmp &= ~USB3380_EP_CFG_MASK_IN;
510         else
511                 tmp &= ~USB3380_EP_CFG_MASK_OUT;
512         writel(tmp, &ep->cfg->ep_cfg);
513 }
514
515 static void nuke(struct net2280_ep *);
516
517 static int net2280_disable(struct usb_ep *_ep)
518 {
519         struct net2280_ep       *ep;
520         unsigned long           flags;
521
522         ep = container_of(_ep, struct net2280_ep, ep);
523         if (!_ep || !ep->desc || _ep->name == ep0name) {
524                 pr_err("%s: Invalid ep=%p or ep->desc\n", __func__, _ep);
525                 return -EINVAL;
526         }
527         spin_lock_irqsave(&ep->dev->lock, flags);
528         nuke(ep);
529
530         if (ep->dev->quirks & PLX_SUPERSPEED)
531                 ep_reset_338x(ep->dev->regs, ep);
532         else
533                 ep_reset_228x(ep->dev->regs, ep);
534
535         ep_vdbg(ep->dev, "disabled %s %s\n",
536                         ep->dma ? "dma" : "pio", _ep->name);
537
538         /* synch memory views with the device */
539         (void)readl(&ep->cfg->ep_cfg);
540
541         if (!ep->dma && ep->num >= 1 && ep->num <= 4)
542                 ep->dma = &ep->dev->dma[ep->num - 1];
543
544         spin_unlock_irqrestore(&ep->dev->lock, flags);
545         return 0;
546 }
547
548 /*-------------------------------------------------------------------------*/
549
550 static struct usb_request
551 *net2280_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
552 {
553         struct net2280_ep       *ep;
554         struct net2280_request  *req;
555
556         if (!_ep) {
557                 pr_err("%s: Invalid ep\n", __func__);
558                 return NULL;
559         }
560         ep = container_of(_ep, struct net2280_ep, ep);
561
562         req = kzalloc(sizeof(*req), gfp_flags);
563         if (!req)
564                 return NULL;
565
566         INIT_LIST_HEAD(&req->queue);
567
568         /* this dma descriptor may be swapped with the previous dummy */
569         if (ep->dma) {
570                 struct net2280_dma      *td;
571
572                 td = pci_pool_alloc(ep->dev->requests, gfp_flags,
573                                 &req->td_dma);
574                 if (!td) {
575                         kfree(req);
576                         return NULL;
577                 }
578                 td->dmacount = 0;       /* not VALID */
579                 td->dmadesc = td->dmaaddr;
580                 req->td = td;
581         }
582         return &req->req;
583 }
584
585 static void net2280_free_request(struct usb_ep *_ep, struct usb_request *_req)
586 {
587         struct net2280_ep       *ep;
588         struct net2280_request  *req;
589
590         ep = container_of(_ep, struct net2280_ep, ep);
591         if (!_ep || !_req) {
592                 dev_err(&ep->dev->pdev->dev, "%s: Inavlid ep=%p or req=%p\n",
593                                                         __func__, _ep, _req);
594                 return;
595         }
596
597         req = container_of(_req, struct net2280_request, req);
598         WARN_ON(!list_empty(&req->queue));
599         if (req->td)
600                 pci_pool_free(ep->dev->requests, req->td, req->td_dma);
601         kfree(req);
602 }
603
604 /*-------------------------------------------------------------------------*/
605
606 /* load a packet into the fifo we use for usb IN transfers.
607  * works for all endpoints.
608  *
609  * NOTE: pio with ep-a..ep-d could stuff multiple packets into the fifo
610  * at a time, but this code is simpler because it knows it only writes
611  * one packet.  ep-a..ep-d should use dma instead.
612  */
613 static void write_fifo(struct net2280_ep *ep, struct usb_request *req)
614 {
615         struct net2280_ep_regs  __iomem *regs = ep->regs;
616         u8                      *buf;
617         u32                     tmp;
618         unsigned                count, total;
619
620         /* INVARIANT:  fifo is currently empty. (testable) */
621
622         if (req) {
623                 buf = req->buf + req->actual;
624                 prefetch(buf);
625                 total = req->length - req->actual;
626         } else {
627                 total = 0;
628                 buf = NULL;
629         }
630
631         /* write just one packet at a time */
632         count = ep->ep.maxpacket;
633         if (count > total)      /* min() cannot be used on a bitfield */
634                 count = total;
635
636         ep_vdbg(ep->dev, "write %s fifo (IN) %d bytes%s req %p\n",
637                         ep->ep.name, count,
638                         (count != ep->ep.maxpacket) ? " (short)" : "",
639                         req);
640         while (count >= 4) {
641                 /* NOTE be careful if you try to align these. fifo lines
642                  * should normally be full (4 bytes) and successive partial
643                  * lines are ok only in certain cases.
644                  */
645                 tmp = get_unaligned((u32 *)buf);
646                 cpu_to_le32s(&tmp);
647                 writel(tmp, &regs->ep_data);
648                 buf += 4;
649                 count -= 4;
650         }
651
652         /* last fifo entry is "short" unless we wrote a full packet.
653          * also explicitly validate last word in (periodic) transfers
654          * when maxpacket is not a multiple of 4 bytes.
655          */
656         if (count || total < ep->ep.maxpacket) {
657                 tmp = count ? get_unaligned((u32 *)buf) : count;
658                 cpu_to_le32s(&tmp);
659                 set_fifo_bytecount(ep, count & 0x03);
660                 writel(tmp, &regs->ep_data);
661         }
662
663         /* pci writes may still be posted */
664 }
665
666 /* work around erratum 0106: PCI and USB race over the OUT fifo.
667  * caller guarantees chiprev 0100, out endpoint is NAKing, and
668  * there's no real data in the fifo.
669  *
670  * NOTE:  also used in cases where that erratum doesn't apply:
671  * where the host wrote "too much" data to us.
672  */
673 static void out_flush(struct net2280_ep *ep)
674 {
675         u32     __iomem *statp;
676         u32     tmp;
677
678         statp = &ep->regs->ep_stat;
679
680         tmp = readl(statp);
681         if (tmp & BIT(NAK_OUT_PACKETS)) {
682                 ep_dbg(ep->dev, "%s %s %08x !NAK\n",
683                         ep->ep.name, __func__, tmp);
684                 writel(BIT(SET_NAK_OUT_PACKETS), &ep->regs->ep_rsp);
685         }
686
687         writel(BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
688                 BIT(DATA_PACKET_RECEIVED_INTERRUPT),
689                 statp);
690         writel(BIT(FIFO_FLUSH), statp);
691         /* Make sure that stap is written */
692         mb();
693         tmp = readl(statp);
694         if (tmp & BIT(DATA_OUT_PING_TOKEN_INTERRUPT) &&
695                         /* high speed did bulk NYET; fifo isn't filling */
696                         ep->dev->gadget.speed == USB_SPEED_FULL) {
697                 unsigned        usec;
698
699                 usec = 50;              /* 64 byte bulk/interrupt */
700                 handshake(statp, BIT(USB_OUT_PING_NAK_SENT),
701                                 BIT(USB_OUT_PING_NAK_SENT), usec);
702                 /* NAK done; now CLEAR_NAK_OUT_PACKETS is safe */
703         }
704 }
705
706 /* unload packet(s) from the fifo we use for usb OUT transfers.
707  * returns true iff the request completed, because of short packet
708  * or the request buffer having filled with full packets.
709  *
710  * for ep-a..ep-d this will read multiple packets out when they
711  * have been accepted.
712  */
713 static int read_fifo(struct net2280_ep *ep, struct net2280_request *req)
714 {
715         struct net2280_ep_regs  __iomem *regs = ep->regs;
716         u8                      *buf = req->req.buf + req->req.actual;
717         unsigned                count, tmp, is_short;
718         unsigned                cleanup = 0, prevent = 0;
719
720         /* erratum 0106 ... packets coming in during fifo reads might
721          * be incompletely rejected.  not all cases have workarounds.
722          */
723         if (ep->dev->chiprev == 0x0100 &&
724                         ep->dev->gadget.speed == USB_SPEED_FULL) {
725                 udelay(1);
726                 tmp = readl(&ep->regs->ep_stat);
727                 if ((tmp & BIT(NAK_OUT_PACKETS)))
728                         cleanup = 1;
729                 else if ((tmp & BIT(FIFO_FULL))) {
730                         start_out_naking(ep);
731                         prevent = 1;
732                 }
733                 /* else: hope we don't see the problem */
734         }
735
736         /* never overflow the rx buffer. the fifo reads packets until
737          * it sees a short one; we might not be ready for them all.
738          */
739         prefetchw(buf);
740         count = readl(&regs->ep_avail);
741         if (unlikely(count == 0)) {
742                 udelay(1);
743                 tmp = readl(&ep->regs->ep_stat);
744                 count = readl(&regs->ep_avail);
745                 /* handled that data already? */
746                 if (count == 0 && (tmp & BIT(NAK_OUT_PACKETS)) == 0)
747                         return 0;
748         }
749
750         tmp = req->req.length - req->req.actual;
751         if (count > tmp) {
752                 /* as with DMA, data overflow gets flushed */
753                 if ((tmp % ep->ep.maxpacket) != 0) {
754                         ep_err(ep->dev,
755                                 "%s out fifo %d bytes, expected %d\n",
756                                 ep->ep.name, count, tmp);
757                         req->req.status = -EOVERFLOW;
758                         cleanup = 1;
759                         /* NAK_OUT_PACKETS will be set, so flushing is safe;
760                          * the next read will start with the next packet
761                          */
762                 } /* else it's a ZLP, no worries */
763                 count = tmp;
764         }
765         req->req.actual += count;
766
767         is_short = (count == 0) || ((count % ep->ep.maxpacket) != 0);
768
769         ep_vdbg(ep->dev, "read %s fifo (OUT) %d bytes%s%s%s req %p %d/%d\n",
770                         ep->ep.name, count, is_short ? " (short)" : "",
771                         cleanup ? " flush" : "", prevent ? " nak" : "",
772                         req, req->req.actual, req->req.length);
773
774         while (count >= 4) {
775                 tmp = readl(&regs->ep_data);
776                 cpu_to_le32s(&tmp);
777                 put_unaligned(tmp, (u32 *)buf);
778                 buf += 4;
779                 count -= 4;
780         }
781         if (count) {
782                 tmp = readl(&regs->ep_data);
783                 /* LE conversion is implicit here: */
784                 do {
785                         *buf++ = (u8) tmp;
786                         tmp >>= 8;
787                 } while (--count);
788         }
789         if (cleanup)
790                 out_flush(ep);
791         if (prevent) {
792                 writel(BIT(CLEAR_NAK_OUT_PACKETS), &ep->regs->ep_rsp);
793                 (void) readl(&ep->regs->ep_rsp);
794         }
795
796         return is_short || ((req->req.actual == req->req.length) &&
797                         !req->req.zero);
798 }
799
800 /* fill out dma descriptor to match a given request */
801 static void fill_dma_desc(struct net2280_ep *ep,
802                                         struct net2280_request *req, int valid)
803 {
804         struct net2280_dma      *td = req->td;
805         u32                     dmacount = req->req.length;
806
807         /* don't let DMA continue after a short OUT packet,
808          * so overruns can't affect the next transfer.
809          * in case of overruns on max-size packets, we can't
810          * stop the fifo from filling but we can flush it.
811          */
812         if (ep->is_in)
813                 dmacount |= BIT(DMA_DIRECTION);
814         if ((!ep->is_in && (dmacount % ep->ep.maxpacket) != 0) ||
815                                         !(ep->dev->quirks & PLX_2280))
816                 dmacount |= BIT(END_OF_CHAIN);
817
818         req->valid = valid;
819         if (valid)
820                 dmacount |= BIT(VALID_BIT);
821         dmacount |= BIT(DMA_DONE_INTERRUPT_ENABLE);
822
823         /* td->dmadesc = previously set by caller */
824         td->dmaaddr = cpu_to_le32 (req->req.dma);
825
826         /* 2280 may be polling VALID_BIT through ep->dma->dmadesc */
827         wmb();
828         td->dmacount = cpu_to_le32(dmacount);
829 }
830
831 static const u32 dmactl_default =
832                 BIT(DMA_SCATTER_GATHER_DONE_INTERRUPT) |
833                 BIT(DMA_CLEAR_COUNT_ENABLE) |
834                 /* erratum 0116 workaround part 1 (use POLLING) */
835                 (POLL_100_USEC << DESCRIPTOR_POLLING_RATE) |
836                 BIT(DMA_VALID_BIT_POLLING_ENABLE) |
837                 BIT(DMA_VALID_BIT_ENABLE) |
838                 BIT(DMA_SCATTER_GATHER_ENABLE) |
839                 /* erratum 0116 workaround part 2 (no AUTOSTART) */
840                 BIT(DMA_ENABLE);
841
842 static inline void spin_stop_dma(struct net2280_dma_regs __iomem *dma)
843 {
844         handshake(&dma->dmactl, BIT(DMA_ENABLE), 0, 50);
845 }
846
847 static inline void stop_dma(struct net2280_dma_regs __iomem *dma)
848 {
849         writel(readl(&dma->dmactl) & ~BIT(DMA_ENABLE), &dma->dmactl);
850         spin_stop_dma(dma);
851 }
852
853 static void start_queue(struct net2280_ep *ep, u32 dmactl, u32 td_dma)
854 {
855         struct net2280_dma_regs __iomem *dma = ep->dma;
856         unsigned int tmp = BIT(VALID_BIT) | (ep->is_in << DMA_DIRECTION);
857
858         if (!(ep->dev->quirks & PLX_2280))
859                 tmp |= BIT(END_OF_CHAIN);
860
861         writel(tmp, &dma->dmacount);
862         writel(readl(&dma->dmastat), &dma->dmastat);
863
864         writel(td_dma, &dma->dmadesc);
865         if (ep->dev->quirks & PLX_SUPERSPEED)
866                 dmactl |= BIT(DMA_REQUEST_OUTSTANDING);
867         writel(dmactl, &dma->dmactl);
868
869         /* erratum 0116 workaround part 3:  pci arbiter away from net2280 */
870         (void) readl(&ep->dev->pci->pcimstctl);
871
872         writel(BIT(DMA_START), &dma->dmastat);
873 }
874
875 static void start_dma(struct net2280_ep *ep, struct net2280_request *req)
876 {
877         u32                     tmp;
878         struct net2280_dma_regs __iomem *dma = ep->dma;
879
880         /* FIXME can't use DMA for ZLPs */
881
882         /* on this path we "know" there's no dma active (yet) */
883         WARN_ON(readl(&dma->dmactl) & BIT(DMA_ENABLE));
884         writel(0, &ep->dma->dmactl);
885
886         /* previous OUT packet might have been short */
887         if (!ep->is_in && (readl(&ep->regs->ep_stat) &
888                                 BIT(NAK_OUT_PACKETS))) {
889                 writel(BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT),
890                         &ep->regs->ep_stat);
891
892                 tmp = readl(&ep->regs->ep_avail);
893                 if (tmp) {
894                         writel(readl(&dma->dmastat), &dma->dmastat);
895
896                         /* transfer all/some fifo data */
897                         writel(req->req.dma, &dma->dmaaddr);
898                         tmp = min(tmp, req->req.length);
899
900                         /* dma irq, faking scatterlist status */
901                         req->td->dmacount = cpu_to_le32(req->req.length - tmp);
902                         writel(BIT(DMA_DONE_INTERRUPT_ENABLE) | tmp,
903                                         &dma->dmacount);
904                         req->td->dmadesc = 0;
905                         req->valid = 1;
906
907                         writel(BIT(DMA_ENABLE), &dma->dmactl);
908                         writel(BIT(DMA_START), &dma->dmastat);
909                         return;
910                 }
911                 stop_out_naking(ep);
912         }
913
914         tmp = dmactl_default;
915
916         /* force packet boundaries between dma requests, but prevent the
917          * controller from automagically writing a last "short" packet
918          * (zero length) unless the driver explicitly said to do that.
919          */
920         if (ep->is_in) {
921                 if (likely((req->req.length % ep->ep.maxpacket) ||
922                                                         req->req.zero)){
923                         tmp |= BIT(DMA_FIFO_VALIDATE);
924                         ep->in_fifo_validate = 1;
925                 } else
926                         ep->in_fifo_validate = 0;
927         }
928
929         /* init req->td, pointing to the current dummy */
930         req->td->dmadesc = cpu_to_le32 (ep->td_dma);
931         fill_dma_desc(ep, req, 1);
932
933         req->td->dmacount |= cpu_to_le32(BIT(END_OF_CHAIN));
934
935         start_queue(ep, tmp, req->td_dma);
936 }
937
938 static inline void
939 queue_dma(struct net2280_ep *ep, struct net2280_request *req, int valid)
940 {
941         struct net2280_dma      *end;
942         dma_addr_t              tmp;
943
944         /* swap new dummy for old, link; fill and maybe activate */
945         end = ep->dummy;
946         ep->dummy = req->td;
947         req->td = end;
948
949         tmp = ep->td_dma;
950         ep->td_dma = req->td_dma;
951         req->td_dma = tmp;
952
953         end->dmadesc = cpu_to_le32 (ep->td_dma);
954
955         fill_dma_desc(ep, req, valid);
956 }
957
958 static void
959 done(struct net2280_ep *ep, struct net2280_request *req, int status)
960 {
961         struct net2280          *dev;
962         unsigned                stopped = ep->stopped;
963
964         list_del_init(&req->queue);
965
966         if (req->req.status == -EINPROGRESS)
967                 req->req.status = status;
968         else
969                 status = req->req.status;
970
971         dev = ep->dev;
972         if (ep->dma)
973                 usb_gadget_unmap_request(&dev->gadget, &req->req, ep->is_in);
974
975         if (status && status != -ESHUTDOWN)
976                 ep_vdbg(dev, "complete %s req %p stat %d len %u/%u\n",
977                         ep->ep.name, &req->req, status,
978                         req->req.actual, req->req.length);
979
980         /* don't modify queue heads during completion callback */
981         ep->stopped = 1;
982         spin_unlock(&dev->lock);
983         usb_gadget_giveback_request(&ep->ep, &req->req);
984         spin_lock(&dev->lock);
985         ep->stopped = stopped;
986 }
987
988 /*-------------------------------------------------------------------------*/
989
990 static int
991 net2280_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
992 {
993         struct net2280_request  *req;
994         struct net2280_ep       *ep;
995         struct net2280          *dev;
996         unsigned long           flags;
997         int ret = 0;
998
999         /* we always require a cpu-view buffer, so that we can
1000          * always use pio (as fallback or whatever).
1001          */
1002         ep = container_of(_ep, struct net2280_ep, ep);
1003         if (!_ep || (!ep->desc && ep->num != 0)) {
1004                 pr_err("%s: Invalid ep=%p or ep->desc\n", __func__, _ep);
1005                 return -EINVAL;
1006         }
1007         req = container_of(_req, struct net2280_request, req);
1008         if (!_req || !_req->complete || !_req->buf ||
1009                                 !list_empty(&req->queue)) {
1010                 ret = -EINVAL;
1011                 goto print_err;
1012         }
1013         if (_req->length > (~0 & DMA_BYTE_COUNT_MASK)) {
1014                 ret = -EDOM;
1015                 goto print_err;
1016         }
1017         dev = ep->dev;
1018         if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) {
1019                 ret = -ESHUTDOWN;
1020                 goto print_err;
1021         }
1022
1023         /* FIXME implement PIO fallback for ZLPs with DMA */
1024         if (ep->dma && _req->length == 0) {
1025                 ret = -EOPNOTSUPP;
1026                 goto print_err;
1027         }
1028
1029         /* set up dma mapping in case the caller didn't */
1030         if (ep->dma) {
1031                 ret = usb_gadget_map_request(&dev->gadget, _req,
1032                                 ep->is_in);
1033                 if (ret)
1034                         goto print_err;
1035         }
1036
1037         ep_vdbg(dev, "%s queue req %p, len %d buf %p\n",
1038                         _ep->name, _req, _req->length, _req->buf);
1039
1040         spin_lock_irqsave(&dev->lock, flags);
1041
1042         _req->status = -EINPROGRESS;
1043         _req->actual = 0;
1044
1045         /* kickstart this i/o queue? */
1046         if  (list_empty(&ep->queue) && !ep->stopped &&
1047                 !((dev->quirks & PLX_SUPERSPEED) && ep->dma &&
1048                   (readl(&ep->regs->ep_rsp) & BIT(CLEAR_ENDPOINT_HALT)))) {
1049
1050                 /* use DMA if the endpoint supports it, else pio */
1051                 if (ep->dma)
1052                         start_dma(ep, req);
1053                 else {
1054                         /* maybe there's no control data, just status ack */
1055                         if (ep->num == 0 && _req->length == 0) {
1056                                 allow_status(ep);
1057                                 done(ep, req, 0);
1058                                 ep_vdbg(dev, "%s status ack\n", ep->ep.name);
1059                                 goto done;
1060                         }
1061
1062                         /* PIO ... stuff the fifo, or unblock it.  */
1063                         if (ep->is_in)
1064                                 write_fifo(ep, _req);
1065                         else if (list_empty(&ep->queue)) {
1066                                 u32     s;
1067
1068                                 /* OUT FIFO might have packet(s) buffered */
1069                                 s = readl(&ep->regs->ep_stat);
1070                                 if ((s & BIT(FIFO_EMPTY)) == 0) {
1071                                         /* note:  _req->short_not_ok is
1072                                          * ignored here since PIO _always_
1073                                          * stops queue advance here, and
1074                                          * _req->status doesn't change for
1075                                          * short reads (only _req->actual)
1076                                          */
1077                                         if (read_fifo(ep, req) &&
1078                                                         ep->num == 0) {
1079                                                 done(ep, req, 0);
1080                                                 allow_status(ep);
1081                                                 /* don't queue it */
1082                                                 req = NULL;
1083                                         } else if (read_fifo(ep, req) &&
1084                                                         ep->num != 0) {
1085                                                 done(ep, req, 0);
1086                                                 req = NULL;
1087                                         } else
1088                                                 s = readl(&ep->regs->ep_stat);
1089                                 }
1090
1091                                 /* don't NAK, let the fifo fill */
1092                                 if (req && (s & BIT(NAK_OUT_PACKETS)))
1093                                         writel(BIT(CLEAR_NAK_OUT_PACKETS),
1094                                                         &ep->regs->ep_rsp);
1095                         }
1096                 }
1097
1098         } else if (ep->dma) {
1099                 int     valid = 1;
1100
1101                 if (ep->is_in) {
1102                         int     expect;
1103
1104                         /* preventing magic zlps is per-engine state, not
1105                          * per-transfer; irq logic must recover hiccups.
1106                          */
1107                         expect = likely(req->req.zero ||
1108                                 (req->req.length % ep->ep.maxpacket));
1109                         if (expect != ep->in_fifo_validate)
1110                                 valid = 0;
1111                 }
1112                 queue_dma(ep, req, valid);
1113
1114         } /* else the irq handler advances the queue. */
1115
1116         ep->responded = 1;
1117         if (req)
1118                 list_add_tail(&req->queue, &ep->queue);
1119 done:
1120         spin_unlock_irqrestore(&dev->lock, flags);
1121
1122         /* pci writes may still be posted */
1123         return ret;
1124
1125 print_err:
1126         dev_err(&ep->dev->pdev->dev, "%s: error=%d\n", __func__, ret);
1127         return ret;
1128 }
1129
1130 static inline void
1131 dma_done(struct net2280_ep *ep, struct net2280_request *req, u32 dmacount,
1132                 int status)
1133 {
1134         req->req.actual = req->req.length - (DMA_BYTE_COUNT_MASK & dmacount);
1135         done(ep, req, status);
1136 }
1137
1138 static void scan_dma_completions(struct net2280_ep *ep)
1139 {
1140         /* only look at descriptors that were "naturally" retired,
1141          * so fifo and list head state won't matter
1142          */
1143         while (!list_empty(&ep->queue)) {
1144                 struct net2280_request  *req;
1145                 u32                     tmp;
1146
1147                 req = list_entry(ep->queue.next,
1148                                 struct net2280_request, queue);
1149                 if (!req->valid)
1150                         break;
1151                 rmb();
1152                 tmp = le32_to_cpup(&req->td->dmacount);
1153                 if ((tmp & BIT(VALID_BIT)) != 0)
1154                         break;
1155
1156                 /* SHORT_PACKET_TRANSFERRED_INTERRUPT handles "usb-short"
1157                  * cases where DMA must be aborted; this code handles
1158                  * all non-abort DMA completions.
1159                  */
1160                 if (unlikely(req->td->dmadesc == 0)) {
1161                         /* paranoia */
1162                         tmp = readl(&ep->dma->dmacount);
1163                         if (tmp & DMA_BYTE_COUNT_MASK)
1164                                 break;
1165                         /* single transfer mode */
1166                         dma_done(ep, req, tmp, 0);
1167                         break;
1168                 } else if (!ep->is_in &&
1169                            (req->req.length % ep->ep.maxpacket) &&
1170                            !(ep->dev->quirks & PLX_SUPERSPEED)) {
1171
1172                         tmp = readl(&ep->regs->ep_stat);
1173                         /* AVOID TROUBLE HERE by not issuing short reads from
1174                          * your gadget driver.  That helps avoids errata 0121,
1175                          * 0122, and 0124; not all cases trigger the warning.
1176                          */
1177                         if ((tmp & BIT(NAK_OUT_PACKETS)) == 0) {
1178                                 ep_warn(ep->dev, "%s lost packet sync!\n",
1179                                                 ep->ep.name);
1180                                 req->req.status = -EOVERFLOW;
1181                         } else {
1182                                 tmp = readl(&ep->regs->ep_avail);
1183                                 if (tmp) {
1184                                         /* fifo gets flushed later */
1185                                         ep->out_overflow = 1;
1186                                         ep_dbg(ep->dev,
1187                                                 "%s dma, discard %d len %d\n",
1188                                                 ep->ep.name, tmp,
1189                                                 req->req.length);
1190                                         req->req.status = -EOVERFLOW;
1191                                 }
1192                         }
1193                 }
1194                 dma_done(ep, req, tmp, 0);
1195         }
1196 }
1197
1198 static void restart_dma(struct net2280_ep *ep)
1199 {
1200         struct net2280_request  *req;
1201
1202         if (ep->stopped)
1203                 return;
1204         req = list_entry(ep->queue.next, struct net2280_request, queue);
1205
1206         start_dma(ep, req);
1207 }
1208
1209 static void abort_dma(struct net2280_ep *ep)
1210 {
1211         /* abort the current transfer */
1212         if (likely(!list_empty(&ep->queue))) {
1213                 /* FIXME work around errata 0121, 0122, 0124 */
1214                 writel(BIT(DMA_ABORT), &ep->dma->dmastat);
1215                 spin_stop_dma(ep->dma);
1216         } else
1217                 stop_dma(ep->dma);
1218         scan_dma_completions(ep);
1219 }
1220
1221 /* dequeue ALL requests */
1222 static void nuke(struct net2280_ep *ep)
1223 {
1224         struct net2280_request  *req;
1225
1226         /* called with spinlock held */
1227         ep->stopped = 1;
1228         if (ep->dma)
1229                 abort_dma(ep);
1230         while (!list_empty(&ep->queue)) {
1231                 req = list_entry(ep->queue.next,
1232                                 struct net2280_request,
1233                                 queue);
1234                 done(ep, req, -ESHUTDOWN);
1235         }
1236 }
1237
1238 /* dequeue JUST ONE request */
1239 static int net2280_dequeue(struct usb_ep *_ep, struct usb_request *_req)
1240 {
1241         struct net2280_ep       *ep;
1242         struct net2280_request  *req;
1243         unsigned long           flags;
1244         u32                     dmactl;
1245         int                     stopped;
1246
1247         ep = container_of(_ep, struct net2280_ep, ep);
1248         if (!_ep || (!ep->desc && ep->num != 0) || !_req) {
1249                 pr_err("%s: Invalid ep=%p or ep->desc or req=%p\n",
1250                                                 __func__, _ep, _req);
1251                 return -EINVAL;
1252         }
1253
1254         spin_lock_irqsave(&ep->dev->lock, flags);
1255         stopped = ep->stopped;
1256
1257         /* quiesce dma while we patch the queue */
1258         dmactl = 0;
1259         ep->stopped = 1;
1260         if (ep->dma) {
1261                 dmactl = readl(&ep->dma->dmactl);
1262                 /* WARNING erratum 0127 may kick in ... */
1263                 stop_dma(ep->dma);
1264                 scan_dma_completions(ep);
1265         }
1266
1267         /* make sure it's still queued on this endpoint */
1268         list_for_each_entry(req, &ep->queue, queue) {
1269                 if (&req->req == _req)
1270                         break;
1271         }
1272         if (&req->req != _req) {
1273                 ep->stopped = stopped;
1274                 spin_unlock_irqrestore(&ep->dev->lock, flags);
1275                 ep_dbg(ep->dev, "%s: Request mismatch\n", __func__);
1276                 return -EINVAL;
1277         }
1278
1279         /* queue head may be partially complete. */
1280         if (ep->queue.next == &req->queue) {
1281                 if (ep->dma) {
1282                         ep_dbg(ep->dev, "unlink (%s) dma\n", _ep->name);
1283                         _req->status = -ECONNRESET;
1284                         abort_dma(ep);
1285                         if (likely(ep->queue.next == &req->queue)) {
1286                                 /* NOTE: misreports single-transfer mode*/
1287                                 req->td->dmacount = 0;  /* invalidate */
1288                                 dma_done(ep, req,
1289                                         readl(&ep->dma->dmacount),
1290                                         -ECONNRESET);
1291                         }
1292                 } else {
1293                         ep_dbg(ep->dev, "unlink (%s) pio\n", _ep->name);
1294                         done(ep, req, -ECONNRESET);
1295                 }
1296                 req = NULL;
1297         }
1298
1299         if (req)
1300                 done(ep, req, -ECONNRESET);
1301         ep->stopped = stopped;
1302
1303         if (ep->dma) {
1304                 /* turn off dma on inactive queues */
1305                 if (list_empty(&ep->queue))
1306                         stop_dma(ep->dma);
1307                 else if (!ep->stopped) {
1308                         /* resume current request, or start new one */
1309                         if (req)
1310                                 writel(dmactl, &ep->dma->dmactl);
1311                         else
1312                                 start_dma(ep, list_entry(ep->queue.next,
1313                                         struct net2280_request, queue));
1314                 }
1315         }
1316
1317         spin_unlock_irqrestore(&ep->dev->lock, flags);
1318         return 0;
1319 }
1320
1321 /*-------------------------------------------------------------------------*/
1322
1323 static int net2280_fifo_status(struct usb_ep *_ep);
1324
1325 static int
1326 net2280_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedged)
1327 {
1328         struct net2280_ep       *ep;
1329         unsigned long           flags;
1330         int                     retval = 0;
1331
1332         ep = container_of(_ep, struct net2280_ep, ep);
1333         if (!_ep || (!ep->desc && ep->num != 0)) {
1334                 pr_err("%s: Invalid ep=%p or ep->desc\n", __func__, _ep);
1335                 return -EINVAL;
1336         }
1337         if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN) {
1338                 retval = -ESHUTDOWN;
1339                 goto print_err;
1340         }
1341         if (ep->desc /* not ep0 */ && (ep->desc->bmAttributes & 0x03)
1342                                                 == USB_ENDPOINT_XFER_ISOC) {
1343                 retval = -EINVAL;
1344                 goto print_err;
1345         }
1346
1347         spin_lock_irqsave(&ep->dev->lock, flags);
1348         if (!list_empty(&ep->queue)) {
1349                 retval = -EAGAIN;
1350                 goto print_unlock;
1351         } else if (ep->is_in && value && net2280_fifo_status(_ep) != 0) {
1352                 retval = -EAGAIN;
1353                 goto print_unlock;
1354         } else {
1355                 ep_vdbg(ep->dev, "%s %s %s\n", _ep->name,
1356                                 value ? "set" : "clear",
1357                                 wedged ? "wedge" : "halt");
1358                 /* set/clear, then synch memory views with the device */
1359                 if (value) {
1360                         if (ep->num == 0)
1361                                 ep->dev->protocol_stall = 1;
1362                         else
1363                                 set_halt(ep);
1364                         if (wedged)
1365                                 ep->wedged = 1;
1366                 } else {
1367                         clear_halt(ep);
1368                         if (ep->dev->quirks & PLX_SUPERSPEED &&
1369                                 !list_empty(&ep->queue) && ep->td_dma)
1370                                         restart_dma(ep);
1371                         ep->wedged = 0;
1372                 }
1373                 (void) readl(&ep->regs->ep_rsp);
1374         }
1375         spin_unlock_irqrestore(&ep->dev->lock, flags);
1376
1377         return retval;
1378
1379 print_unlock:
1380         spin_unlock_irqrestore(&ep->dev->lock, flags);
1381 print_err:
1382         dev_err(&ep->dev->pdev->dev, "%s: error=%d\n", __func__, retval);
1383         return retval;
1384 }
1385
1386 static int net2280_set_halt(struct usb_ep *_ep, int value)
1387 {
1388         return net2280_set_halt_and_wedge(_ep, value, 0);
1389 }
1390
1391 static int net2280_set_wedge(struct usb_ep *_ep)
1392 {
1393         if (!_ep || _ep->name == ep0name) {
1394                 pr_err("%s: Invalid ep=%p or ep0\n", __func__, _ep);
1395                 return -EINVAL;
1396         }
1397         return net2280_set_halt_and_wedge(_ep, 1, 1);
1398 }
1399
1400 static int net2280_fifo_status(struct usb_ep *_ep)
1401 {
1402         struct net2280_ep       *ep;
1403         u32                     avail;
1404
1405         ep = container_of(_ep, struct net2280_ep, ep);
1406         if (!_ep || (!ep->desc && ep->num != 0)) {
1407                 pr_err("%s: Invalid ep=%p or ep->desc\n", __func__, _ep);
1408                 return -ENODEV;
1409         }
1410         if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN) {
1411                 dev_err(&ep->dev->pdev->dev,
1412                         "%s: Invalid driver=%p or speed=%d\n",
1413                         __func__, ep->dev->driver, ep->dev->gadget.speed);
1414                 return -ESHUTDOWN;
1415         }
1416
1417         avail = readl(&ep->regs->ep_avail) & (BIT(12) - 1);
1418         if (avail > ep->fifo_size) {
1419                 dev_err(&ep->dev->pdev->dev, "%s: Fifo overflow\n", __func__);
1420                 return -EOVERFLOW;
1421         }
1422         if (ep->is_in)
1423                 avail = ep->fifo_size - avail;
1424         return avail;
1425 }
1426
1427 static void net2280_fifo_flush(struct usb_ep *_ep)
1428 {
1429         struct net2280_ep       *ep;
1430
1431         ep = container_of(_ep, struct net2280_ep, ep);
1432         if (!_ep || (!ep->desc && ep->num != 0)) {
1433                 pr_err("%s: Invalid ep=%p or ep->desc\n", __func__, _ep);
1434                 return;
1435         }
1436         if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN) {
1437                 dev_err(&ep->dev->pdev->dev,
1438                         "%s: Invalid driver=%p or speed=%d\n",
1439                         __func__, ep->dev->driver, ep->dev->gadget.speed);
1440                 return;
1441         }
1442
1443         writel(BIT(FIFO_FLUSH), &ep->regs->ep_stat);
1444         (void) readl(&ep->regs->ep_rsp);
1445 }
1446
1447 static const struct usb_ep_ops net2280_ep_ops = {
1448         .enable         = net2280_enable,
1449         .disable        = net2280_disable,
1450
1451         .alloc_request  = net2280_alloc_request,
1452         .free_request   = net2280_free_request,
1453
1454         .queue          = net2280_queue,
1455         .dequeue        = net2280_dequeue,
1456
1457         .set_halt       = net2280_set_halt,
1458         .set_wedge      = net2280_set_wedge,
1459         .fifo_status    = net2280_fifo_status,
1460         .fifo_flush     = net2280_fifo_flush,
1461 };
1462
1463 /*-------------------------------------------------------------------------*/
1464
1465 static int net2280_get_frame(struct usb_gadget *_gadget)
1466 {
1467         struct net2280          *dev;
1468         unsigned long           flags;
1469         u16                     retval;
1470
1471         if (!_gadget)
1472                 return -ENODEV;
1473         dev = container_of(_gadget, struct net2280, gadget);
1474         spin_lock_irqsave(&dev->lock, flags);
1475         retval = get_idx_reg(dev->regs, REG_FRAME) & 0x03ff;
1476         spin_unlock_irqrestore(&dev->lock, flags);
1477         return retval;
1478 }
1479
1480 static int net2280_wakeup(struct usb_gadget *_gadget)
1481 {
1482         struct net2280          *dev;
1483         u32                     tmp;
1484         unsigned long           flags;
1485
1486         if (!_gadget)
1487                 return 0;
1488         dev = container_of(_gadget, struct net2280, gadget);
1489
1490         spin_lock_irqsave(&dev->lock, flags);
1491         tmp = readl(&dev->usb->usbctl);
1492         if (tmp & BIT(DEVICE_REMOTE_WAKEUP_ENABLE))
1493                 writel(BIT(GENERATE_RESUME), &dev->usb->usbstat);
1494         spin_unlock_irqrestore(&dev->lock, flags);
1495
1496         /* pci writes may still be posted */
1497         return 0;
1498 }
1499
1500 static int net2280_set_selfpowered(struct usb_gadget *_gadget, int value)
1501 {
1502         struct net2280          *dev;
1503         u32                     tmp;
1504         unsigned long           flags;
1505
1506         if (!_gadget)
1507                 return 0;
1508         dev = container_of(_gadget, struct net2280, gadget);
1509
1510         spin_lock_irqsave(&dev->lock, flags);
1511         tmp = readl(&dev->usb->usbctl);
1512         if (value) {
1513                 tmp |= BIT(SELF_POWERED_STATUS);
1514                 _gadget->is_selfpowered = 1;
1515         } else {
1516                 tmp &= ~BIT(SELF_POWERED_STATUS);
1517                 _gadget->is_selfpowered = 0;
1518         }
1519         writel(tmp, &dev->usb->usbctl);
1520         spin_unlock_irqrestore(&dev->lock, flags);
1521
1522         return 0;
1523 }
1524
1525 static int net2280_pullup(struct usb_gadget *_gadget, int is_on)
1526 {
1527         struct net2280  *dev;
1528         u32             tmp;
1529         unsigned long   flags;
1530
1531         if (!_gadget)
1532                 return -ENODEV;
1533         dev = container_of(_gadget, struct net2280, gadget);
1534
1535         spin_lock_irqsave(&dev->lock, flags);
1536         tmp = readl(&dev->usb->usbctl);
1537         dev->softconnect = (is_on != 0);
1538         if (is_on) {
1539                 ep0_start(dev);
1540                 writel(tmp | BIT(USB_DETECT_ENABLE), &dev->usb->usbctl);
1541         } else {
1542                 writel(tmp & ~BIT(USB_DETECT_ENABLE), &dev->usb->usbctl);
1543                 stop_activity(dev, NULL);
1544         }
1545
1546         spin_unlock_irqrestore(&dev->lock, flags);
1547
1548         if (!is_on && dev->driver)
1549                 dev->driver->disconnect(&dev->gadget);
1550
1551         return 0;
1552 }
1553
1554 static struct usb_ep *net2280_match_ep(struct usb_gadget *_gadget,
1555                 struct usb_endpoint_descriptor *desc,
1556                 struct usb_ss_ep_comp_descriptor *ep_comp)
1557 {
1558         char name[8];
1559         struct usb_ep *ep;
1560
1561         if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_INT) {
1562                 /* ep-e, ep-f are PIO with only 64 byte fifos */
1563                 ep = gadget_find_ep_by_name(_gadget, "ep-e");
1564                 if (ep && usb_gadget_ep_match_desc(_gadget, ep, desc, ep_comp))
1565                         return ep;
1566                 ep = gadget_find_ep_by_name(_gadget, "ep-f");
1567                 if (ep && usb_gadget_ep_match_desc(_gadget, ep, desc, ep_comp))
1568                         return ep;
1569         }
1570
1571         /* USB3380: use same address for usb and hardware endpoints */
1572         snprintf(name, sizeof(name), "ep%d%s", usb_endpoint_num(desc),
1573                         usb_endpoint_dir_in(desc) ? "in" : "out");
1574         ep = gadget_find_ep_by_name(_gadget, name);
1575         if (ep && usb_gadget_ep_match_desc(_gadget, ep, desc, ep_comp))
1576                 return ep;
1577
1578         return NULL;
1579 }
1580
1581 static int net2280_start(struct usb_gadget *_gadget,
1582                 struct usb_gadget_driver *driver);
1583 static int net2280_stop(struct usb_gadget *_gadget);
1584
1585 static const struct usb_gadget_ops net2280_ops = {
1586         .get_frame      = net2280_get_frame,
1587         .wakeup         = net2280_wakeup,
1588         .set_selfpowered = net2280_set_selfpowered,
1589         .pullup         = net2280_pullup,
1590         .udc_start      = net2280_start,
1591         .udc_stop       = net2280_stop,
1592         .match_ep       = net2280_match_ep,
1593 };
1594
1595 /*-------------------------------------------------------------------------*/
1596
1597 #ifdef  CONFIG_USB_GADGET_DEBUG_FILES
1598
1599 /* FIXME move these into procfs, and use seq_file.
1600  * Sysfs _still_ doesn't behave for arbitrarily sized files,
1601  * and also doesn't help products using this with 2.4 kernels.
1602  */
1603
1604 /* "function" sysfs attribute */
1605 static ssize_t function_show(struct device *_dev, struct device_attribute *attr,
1606                              char *buf)
1607 {
1608         struct net2280  *dev = dev_get_drvdata(_dev);
1609
1610         if (!dev->driver || !dev->driver->function ||
1611                         strlen(dev->driver->function) > PAGE_SIZE)
1612                 return 0;
1613         return scnprintf(buf, PAGE_SIZE, "%s\n", dev->driver->function);
1614 }
1615 static DEVICE_ATTR_RO(function);
1616
1617 static ssize_t registers_show(struct device *_dev,
1618                               struct device_attribute *attr, char *buf)
1619 {
1620         struct net2280          *dev;
1621         char                    *next;
1622         unsigned                size, t;
1623         unsigned long           flags;
1624         int                     i;
1625         u32                     t1, t2;
1626         const char              *s;
1627
1628         dev = dev_get_drvdata(_dev);
1629         next = buf;
1630         size = PAGE_SIZE;
1631         spin_lock_irqsave(&dev->lock, flags);
1632
1633         if (dev->driver)
1634                 s = dev->driver->driver.name;
1635         else
1636                 s = "(none)";
1637
1638         /* Main Control Registers */
1639         t = scnprintf(next, size, "%s version " DRIVER_VERSION
1640                         ", chiprev %04x\n\n"
1641                         "devinit %03x fifoctl %08x gadget '%s'\n"
1642                         "pci irqenb0 %02x irqenb1 %08x "
1643                         "irqstat0 %04x irqstat1 %08x\n",
1644                         driver_name, dev->chiprev,
1645                         readl(&dev->regs->devinit),
1646                         readl(&dev->regs->fifoctl),
1647                         s,
1648                         readl(&dev->regs->pciirqenb0),
1649                         readl(&dev->regs->pciirqenb1),
1650                         readl(&dev->regs->irqstat0),
1651                         readl(&dev->regs->irqstat1));
1652         size -= t;
1653         next += t;
1654
1655         /* USB Control Registers */
1656         t1 = readl(&dev->usb->usbctl);
1657         t2 = readl(&dev->usb->usbstat);
1658         if (t1 & BIT(VBUS_PIN)) {
1659                 if (t2 & BIT(HIGH_SPEED))
1660                         s = "high speed";
1661                 else if (dev->gadget.speed == USB_SPEED_UNKNOWN)
1662                         s = "powered";
1663                 else
1664                         s = "full speed";
1665                 /* full speed bit (6) not working?? */
1666         } else
1667                         s = "not attached";
1668         t = scnprintf(next, size,
1669                         "stdrsp %08x usbctl %08x usbstat %08x "
1670                                 "addr 0x%02x (%s)\n",
1671                         readl(&dev->usb->stdrsp), t1, t2,
1672                         readl(&dev->usb->ouraddr), s);
1673         size -= t;
1674         next += t;
1675
1676         /* PCI Master Control Registers */
1677
1678         /* DMA Control Registers */
1679
1680         /* Configurable EP Control Registers */
1681         for (i = 0; i < dev->n_ep; i++) {
1682                 struct net2280_ep       *ep;
1683
1684                 ep = &dev->ep[i];
1685                 if (i && !ep->desc)
1686                         continue;
1687
1688                 t1 = readl(&ep->cfg->ep_cfg);
1689                 t2 = readl(&ep->regs->ep_rsp) & 0xff;
1690                 t = scnprintf(next, size,
1691                                 "\n%s\tcfg %05x rsp (%02x) %s%s%s%s%s%s%s%s"
1692                                         "irqenb %02x\n",
1693                                 ep->ep.name, t1, t2,
1694                                 (t2 & BIT(CLEAR_NAK_OUT_PACKETS))
1695                                         ? "NAK " : "",
1696                                 (t2 & BIT(CLEAR_EP_HIDE_STATUS_PHASE))
1697                                         ? "hide " : "",
1698                                 (t2 & BIT(CLEAR_EP_FORCE_CRC_ERROR))
1699                                         ? "CRC " : "",
1700                                 (t2 & BIT(CLEAR_INTERRUPT_MODE))
1701                                         ? "interrupt " : "",
1702                                 (t2 & BIT(CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE))
1703                                         ? "status " : "",
1704                                 (t2 & BIT(CLEAR_NAK_OUT_PACKETS_MODE))
1705                                         ? "NAKmode " : "",
1706                                 (t2 & BIT(CLEAR_ENDPOINT_TOGGLE))
1707                                         ? "DATA1 " : "DATA0 ",
1708                                 (t2 & BIT(CLEAR_ENDPOINT_HALT))
1709                                         ? "HALT " : "",
1710                                 readl(&ep->regs->ep_irqenb));
1711                 size -= t;
1712                 next += t;
1713
1714                 t = scnprintf(next, size,
1715                                 "\tstat %08x avail %04x "
1716                                 "(ep%d%s-%s)%s\n",
1717                                 readl(&ep->regs->ep_stat),
1718                                 readl(&ep->regs->ep_avail),
1719                                 t1 & 0x0f, DIR_STRING(t1),
1720                                 type_string(t1 >> 8),
1721                                 ep->stopped ? "*" : "");
1722                 size -= t;
1723                 next += t;
1724
1725                 if (!ep->dma)
1726                         continue;
1727
1728                 t = scnprintf(next, size,
1729                                 "  dma\tctl %08x stat %08x count %08x\n"
1730                                 "\taddr %08x desc %08x\n",
1731                                 readl(&ep->dma->dmactl),
1732                                 readl(&ep->dma->dmastat),
1733                                 readl(&ep->dma->dmacount),
1734                                 readl(&ep->dma->dmaaddr),
1735                                 readl(&ep->dma->dmadesc));
1736                 size -= t;
1737                 next += t;
1738
1739         }
1740
1741         /* Indexed Registers (none yet) */
1742
1743         /* Statistics */
1744         t = scnprintf(next, size, "\nirqs:  ");
1745         size -= t;
1746         next += t;
1747         for (i = 0; i < dev->n_ep; i++) {
1748                 struct net2280_ep       *ep;
1749
1750                 ep = &dev->ep[i];
1751                 if (i && !ep->irqs)
1752                         continue;
1753                 t = scnprintf(next, size, " %s/%lu", ep->ep.name, ep->irqs);
1754                 size -= t;
1755                 next += t;
1756
1757         }
1758         t = scnprintf(next, size, "\n");
1759         size -= t;
1760         next += t;
1761
1762         spin_unlock_irqrestore(&dev->lock, flags);
1763
1764         return PAGE_SIZE - size;
1765 }
1766 static DEVICE_ATTR_RO(registers);
1767
1768 static ssize_t queues_show(struct device *_dev, struct device_attribute *attr,
1769                            char *buf)
1770 {
1771         struct net2280          *dev;
1772         char                    *next;
1773         unsigned                size;
1774         unsigned long           flags;
1775         int                     i;
1776
1777         dev = dev_get_drvdata(_dev);
1778         next = buf;
1779         size = PAGE_SIZE;
1780         spin_lock_irqsave(&dev->lock, flags);
1781
1782         for (i = 0; i < dev->n_ep; i++) {
1783                 struct net2280_ep               *ep = &dev->ep[i];
1784                 struct net2280_request          *req;
1785                 int                             t;
1786
1787                 if (i != 0) {
1788                         const struct usb_endpoint_descriptor    *d;
1789
1790                         d = ep->desc;
1791                         if (!d)
1792                                 continue;
1793                         t = d->bEndpointAddress;
1794                         t = scnprintf(next, size,
1795                                 "\n%s (ep%d%s-%s) max %04x %s fifo %d\n",
1796                                 ep->ep.name, t & USB_ENDPOINT_NUMBER_MASK,
1797                                 (t & USB_DIR_IN) ? "in" : "out",
1798                                 type_string(d->bmAttributes),
1799                                 usb_endpoint_maxp(d) & 0x1fff,
1800                                 ep->dma ? "dma" : "pio", ep->fifo_size
1801                                 );
1802                 } else /* ep0 should only have one transfer queued */
1803                         t = scnprintf(next, size, "ep0 max 64 pio %s\n",
1804                                         ep->is_in ? "in" : "out");
1805                 if (t <= 0 || t > size)
1806                         goto done;
1807                 size -= t;
1808                 next += t;
1809
1810                 if (list_empty(&ep->queue)) {
1811                         t = scnprintf(next, size, "\t(nothing queued)\n");
1812                         if (t <= 0 || t > size)
1813                                 goto done;
1814                         size -= t;
1815                         next += t;
1816                         continue;
1817                 }
1818                 list_for_each_entry(req, &ep->queue, queue) {
1819                         if (ep->dma && req->td_dma == readl(&ep->dma->dmadesc))
1820                                 t = scnprintf(next, size,
1821                                         "\treq %p len %d/%d "
1822                                         "buf %p (dmacount %08x)\n",
1823                                         &req->req, req->req.actual,
1824                                         req->req.length, req->req.buf,
1825                                         readl(&ep->dma->dmacount));
1826                         else
1827                                 t = scnprintf(next, size,
1828                                         "\treq %p len %d/%d buf %p\n",
1829                                         &req->req, req->req.actual,
1830                                         req->req.length, req->req.buf);
1831                         if (t <= 0 || t > size)
1832                                 goto done;
1833                         size -= t;
1834                         next += t;
1835
1836                         if (ep->dma) {
1837                                 struct net2280_dma      *td;
1838
1839                                 td = req->td;
1840                                 t = scnprintf(next, size, "\t    td %08x "
1841                                         " count %08x buf %08x desc %08x\n",
1842                                         (u32) req->td_dma,
1843                                         le32_to_cpu(td->dmacount),
1844                                         le32_to_cpu(td->dmaaddr),
1845                                         le32_to_cpu(td->dmadesc));
1846                                 if (t <= 0 || t > size)
1847                                         goto done;
1848                                 size -= t;
1849                                 next += t;
1850                         }
1851                 }
1852         }
1853
1854 done:
1855         spin_unlock_irqrestore(&dev->lock, flags);
1856         return PAGE_SIZE - size;
1857 }
1858 static DEVICE_ATTR_RO(queues);
1859
1860
1861 #else
1862
1863 #define device_create_file(a, b)        (0)
1864 #define device_remove_file(a, b)        do { } while (0)
1865
1866 #endif
1867
1868 /*-------------------------------------------------------------------------*/
1869
1870 /* another driver-specific mode might be a request type doing dma
1871  * to/from another device fifo instead of to/from memory.
1872  */
1873
1874 static void set_fifo_mode(struct net2280 *dev, int mode)
1875 {
1876         /* keeping high bits preserves BAR2 */
1877         writel((0xffff << PCI_BASE2_RANGE) | mode, &dev->regs->fifoctl);
1878
1879         /* always ep-{a,b,e,f} ... maybe not ep-c or ep-d */
1880         INIT_LIST_HEAD(&dev->gadget.ep_list);
1881         list_add_tail(&dev->ep[1].ep.ep_list, &dev->gadget.ep_list);
1882         list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
1883         switch (mode) {
1884         case 0:
1885                 list_add_tail(&dev->ep[3].ep.ep_list, &dev->gadget.ep_list);
1886                 list_add_tail(&dev->ep[4].ep.ep_list, &dev->gadget.ep_list);
1887                 dev->ep[1].fifo_size = dev->ep[2].fifo_size = 1024;
1888                 break;
1889         case 1:
1890                 dev->ep[1].fifo_size = dev->ep[2].fifo_size = 2048;
1891                 break;
1892         case 2:
1893                 list_add_tail(&dev->ep[3].ep.ep_list, &dev->gadget.ep_list);
1894                 dev->ep[1].fifo_size = 2048;
1895                 dev->ep[2].fifo_size = 1024;
1896                 break;
1897         }
1898         /* fifo sizes for ep0, ep-c, ep-d, ep-e, and ep-f never change */
1899         list_add_tail(&dev->ep[5].ep.ep_list, &dev->gadget.ep_list);
1900         list_add_tail(&dev->ep[6].ep.ep_list, &dev->gadget.ep_list);
1901 }
1902
1903 static void defect7374_disable_data_eps(struct net2280 *dev)
1904 {
1905         /*
1906          * For Defect 7374, disable data EPs (and more):
1907          *  - This phase undoes the earlier phase of the Defect 7374 workaround,
1908          *    returing ep regs back to normal.
1909          */
1910         struct net2280_ep *ep;
1911         int i;
1912         unsigned char ep_sel;
1913         u32 tmp_reg;
1914
1915         for (i = 1; i < 5; i++) {
1916                 ep = &dev->ep[i];
1917                 writel(i, &ep->cfg->ep_cfg);
1918         }
1919
1920         /* CSROUT, CSRIN, PCIOUT, PCIIN, STATIN, RCIN */
1921         for (i = 0; i < 6; i++)
1922                 writel(0, &dev->dep[i].dep_cfg);
1923
1924         for (ep_sel = 0; ep_sel <= 21; ep_sel++) {
1925                 /* Select an endpoint for subsequent operations: */
1926                 tmp_reg = readl(&dev->plregs->pl_ep_ctrl);
1927                 writel(((tmp_reg & ~0x1f) | ep_sel), &dev->plregs->pl_ep_ctrl);
1928
1929                 if (ep_sel < 2 || (ep_sel > 9 && ep_sel < 14) ||
1930                                         ep_sel == 18 || ep_sel == 20)
1931                         continue;
1932
1933                 /* Change settings on some selected endpoints */
1934                 tmp_reg = readl(&dev->plregs->pl_ep_cfg_4);
1935                 tmp_reg &= ~BIT(NON_CTRL_IN_TOLERATE_BAD_DIR);
1936                 writel(tmp_reg, &dev->plregs->pl_ep_cfg_4);
1937                 tmp_reg = readl(&dev->plregs->pl_ep_ctrl);
1938                 tmp_reg |= BIT(EP_INITIALIZED);
1939                 writel(tmp_reg, &dev->plregs->pl_ep_ctrl);
1940         }
1941 }
1942
1943 static void defect7374_enable_data_eps_zero(struct net2280 *dev)
1944 {
1945         u32 tmp = 0, tmp_reg;
1946         u32 scratch;
1947         int i;
1948         unsigned char ep_sel;
1949
1950         scratch = get_idx_reg(dev->regs, SCRATCH);
1951
1952         WARN_ON((scratch & (0xf << DEFECT7374_FSM_FIELD))
1953                 == DEFECT7374_FSM_SS_CONTROL_READ);
1954
1955         scratch &= ~(0xf << DEFECT7374_FSM_FIELD);
1956
1957         ep_warn(dev, "Operate Defect 7374 workaround soft this time");
1958         ep_warn(dev, "It will operate on cold-reboot and SS connect");
1959
1960         /*GPEPs:*/
1961         tmp = ((0 << ENDPOINT_NUMBER) | BIT(ENDPOINT_DIRECTION) |
1962                         (2 << OUT_ENDPOINT_TYPE) | (2 << IN_ENDPOINT_TYPE) |
1963                         ((dev->enhanced_mode) ?
1964                          BIT(OUT_ENDPOINT_ENABLE) | BIT(IN_ENDPOINT_ENABLE) :
1965                          BIT(ENDPOINT_ENABLE)));
1966
1967         for (i = 1; i < 5; i++)
1968                 writel(tmp, &dev->ep[i].cfg->ep_cfg);
1969
1970         /* CSRIN, PCIIN, STATIN, RCIN*/
1971         tmp = ((0 << ENDPOINT_NUMBER) | BIT(ENDPOINT_ENABLE));
1972         writel(tmp, &dev->dep[1].dep_cfg);
1973         writel(tmp, &dev->dep[3].dep_cfg);
1974         writel(tmp, &dev->dep[4].dep_cfg);
1975         writel(tmp, &dev->dep[5].dep_cfg);
1976
1977         /*Implemented for development and debug.
1978          * Can be refined/tuned later.*/
1979         for (ep_sel = 0; ep_sel <= 21; ep_sel++) {
1980                 /* Select an endpoint for subsequent operations: */
1981                 tmp_reg = readl(&dev->plregs->pl_ep_ctrl);
1982                 writel(((tmp_reg & ~0x1f) | ep_sel),
1983                                 &dev->plregs->pl_ep_ctrl);
1984
1985                 if (ep_sel == 1) {
1986                         tmp =
1987                                 (readl(&dev->plregs->pl_ep_ctrl) |
1988                                  BIT(CLEAR_ACK_ERROR_CODE) | 0);
1989                         writel(tmp, &dev->plregs->pl_ep_ctrl);
1990                         continue;
1991                 }
1992
1993                 if (ep_sel == 0 || (ep_sel > 9 && ep_sel < 14) ||
1994                                 ep_sel == 18  || ep_sel == 20)
1995                         continue;
1996
1997                 tmp = (readl(&dev->plregs->pl_ep_cfg_4) |
1998                                 BIT(NON_CTRL_IN_TOLERATE_BAD_DIR) | 0);
1999                 writel(tmp, &dev->plregs->pl_ep_cfg_4);
2000
2001                 tmp = readl(&dev->plregs->pl_ep_ctrl) &
2002                         ~BIT(EP_INITIALIZED);
2003                 writel(tmp, &dev->plregs->pl_ep_ctrl);
2004
2005         }
2006
2007         /* Set FSM to focus on the first Control Read:
2008          * - Tip: Connection speed is known upon the first
2009          * setup request.*/
2010         scratch |= DEFECT7374_FSM_WAITING_FOR_CONTROL_READ;
2011         set_idx_reg(dev->regs, SCRATCH, scratch);
2012
2013 }
2014
2015 /* keeping it simple:
2016  * - one bus driver, initted first;
2017  * - one function driver, initted second
2018  *
2019  * most of the work to support multiple net2280 controllers would
2020  * be to associate this gadget driver (yes?) with all of them, or
2021  * perhaps to bind specific drivers to specific devices.
2022  */
2023
2024 static void usb_reset_228x(struct net2280 *dev)
2025 {
2026         u32     tmp;
2027
2028         dev->gadget.speed = USB_SPEED_UNKNOWN;
2029         (void) readl(&dev->usb->usbctl);
2030
2031         net2280_led_init(dev);
2032
2033         /* disable automatic responses, and irqs */
2034         writel(0, &dev->usb->stdrsp);
2035         writel(0, &dev->regs->pciirqenb0);
2036         writel(0, &dev->regs->pciirqenb1);
2037
2038         /* clear old dma and irq state */
2039         for (tmp = 0; tmp < 4; tmp++) {
2040                 struct net2280_ep       *ep = &dev->ep[tmp + 1];
2041                 if (ep->dma)
2042                         abort_dma(ep);
2043         }
2044
2045         writel(~0, &dev->regs->irqstat0),
2046         writel(~(u32)BIT(SUSPEND_REQUEST_INTERRUPT), &dev->regs->irqstat1),
2047
2048         /* reset, and enable pci */
2049         tmp = readl(&dev->regs->devinit) |
2050                 BIT(PCI_ENABLE) |
2051                 BIT(FIFO_SOFT_RESET) |
2052                 BIT(USB_SOFT_RESET) |
2053                 BIT(M8051_RESET);
2054         writel(tmp, &dev->regs->devinit);
2055
2056         /* standard fifo and endpoint allocations */
2057         set_fifo_mode(dev, (fifo_mode <= 2) ? fifo_mode : 0);
2058 }
2059
2060 static void usb_reset_338x(struct net2280 *dev)
2061 {
2062         u32 tmp;
2063
2064         dev->gadget.speed = USB_SPEED_UNKNOWN;
2065         (void)readl(&dev->usb->usbctl);
2066
2067         net2280_led_init(dev);
2068
2069         if (dev->bug7734_patched) {
2070                 /* disable automatic responses, and irqs */
2071                 writel(0, &dev->usb->stdrsp);
2072                 writel(0, &dev->regs->pciirqenb0);
2073                 writel(0, &dev->regs->pciirqenb1);
2074         }
2075
2076         /* clear old dma and irq state */
2077         for (tmp = 0; tmp < 4; tmp++) {
2078                 struct net2280_ep *ep = &dev->ep[tmp + 1];
2079                 struct net2280_dma_regs __iomem *dma;
2080
2081                 if (ep->dma) {
2082                         abort_dma(ep);
2083                 } else {
2084                         dma = &dev->dma[tmp];
2085                         writel(BIT(DMA_ABORT), &dma->dmastat);
2086                         writel(0, &dma->dmactl);
2087                 }
2088         }
2089
2090         writel(~0, &dev->regs->irqstat0), writel(~0, &dev->regs->irqstat1);
2091
2092         if (dev->bug7734_patched) {
2093                 /* reset, and enable pci */
2094                 tmp = readl(&dev->regs->devinit) |
2095                     BIT(PCI_ENABLE) |
2096                     BIT(FIFO_SOFT_RESET) |
2097                     BIT(USB_SOFT_RESET) |
2098                     BIT(M8051_RESET);
2099
2100                 writel(tmp, &dev->regs->devinit);
2101         }
2102
2103         /* always ep-{1,2,3,4} ... maybe not ep-3 or ep-4 */
2104         INIT_LIST_HEAD(&dev->gadget.ep_list);
2105
2106         for (tmp = 1; tmp < dev->n_ep; tmp++)
2107                 list_add_tail(&dev->ep[tmp].ep.ep_list, &dev->gadget.ep_list);
2108
2109 }
2110
2111 static void usb_reset(struct net2280 *dev)
2112 {
2113         if (dev->quirks & PLX_LEGACY)
2114                 return usb_reset_228x(dev);
2115         return usb_reset_338x(dev);
2116 }
2117
2118 static void usb_reinit_228x(struct net2280 *dev)
2119 {
2120         u32     tmp;
2121
2122         /* basic endpoint init */
2123         for (tmp = 0; tmp < 7; tmp++) {
2124                 struct net2280_ep       *ep = &dev->ep[tmp];
2125
2126                 ep->ep.name = ep_info_dft[tmp].name;
2127                 ep->ep.caps = ep_info_dft[tmp].caps;
2128                 ep->dev = dev;
2129                 ep->num = tmp;
2130
2131                 if (tmp > 0 && tmp <= 4) {
2132                         ep->fifo_size = 1024;
2133                         ep->dma = &dev->dma[tmp - 1];
2134                 } else
2135                         ep->fifo_size = 64;
2136                 ep->regs = &dev->epregs[tmp];
2137                 ep->cfg = &dev->epregs[tmp];
2138                 ep_reset_228x(dev->regs, ep);
2139         }
2140         usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 64);
2141         usb_ep_set_maxpacket_limit(&dev->ep[5].ep, 64);
2142         usb_ep_set_maxpacket_limit(&dev->ep[6].ep, 64);
2143
2144         dev->gadget.ep0 = &dev->ep[0].ep;
2145         dev->ep[0].stopped = 0;
2146         INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
2147
2148         /* we want to prevent lowlevel/insecure access from the USB host,
2149          * but erratum 0119 means this enable bit is ignored
2150          */
2151         for (tmp = 0; tmp < 5; tmp++)
2152                 writel(EP_DONTUSE, &dev->dep[tmp].dep_cfg);
2153 }
2154
2155 static void usb_reinit_338x(struct net2280 *dev)
2156 {
2157         int i;
2158         u32 tmp, val;
2159         static const u32 ne[9] = { 0, 1, 2, 3, 4, 1, 2, 3, 4 };
2160         static const u32 ep_reg_addr[9] = { 0x00, 0xC0, 0x00, 0xC0, 0x00,
2161                                                 0x00, 0xC0, 0x00, 0xC0 };
2162
2163         /* basic endpoint init */
2164         for (i = 0; i < dev->n_ep; i++) {
2165                 struct net2280_ep *ep = &dev->ep[i];
2166
2167                 ep->ep.name = dev->enhanced_mode ? ep_info_adv[i].name :
2168                                                    ep_info_dft[i].name;
2169                 ep->ep.caps = dev->enhanced_mode ? ep_info_adv[i].caps :
2170                                                    ep_info_dft[i].caps;
2171                 ep->dev = dev;
2172                 ep->num = i;
2173
2174                 if (i > 0 && i <= 4)
2175                         ep->dma = &dev->dma[i - 1];
2176
2177                 if (dev->enhanced_mode) {
2178                         ep->cfg = &dev->epregs[ne[i]];
2179                         /*
2180                          * Set USB endpoint number, hardware allows same number
2181                          * in both directions.
2182                          */
2183                          if (i > 0 && i < 5)
2184                                 writel(ne[i], &ep->cfg->ep_cfg);
2185                         ep->regs = (struct net2280_ep_regs __iomem *)
2186                                 (((void __iomem *)&dev->epregs[ne[i]]) +
2187                                 ep_reg_addr[i]);
2188                 } else {
2189                         ep->cfg = &dev->epregs[i];
2190                         ep->regs = &dev->epregs[i];
2191                 }
2192
2193                 ep->fifo_size = (i != 0) ? 2048 : 512;
2194
2195                 ep_reset_338x(dev->regs, ep);
2196         }
2197         usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 512);
2198
2199         dev->gadget.ep0 = &dev->ep[0].ep;
2200         dev->ep[0].stopped = 0;
2201
2202         /* Link layer set up */
2203         if (dev->bug7734_patched) {
2204                 tmp = readl(&dev->usb_ext->usbctl2) &
2205                     ~(BIT(U1_ENABLE) | BIT(U2_ENABLE) | BIT(LTM_ENABLE));
2206                 writel(tmp, &dev->usb_ext->usbctl2);
2207         }
2208
2209         /* Hardware Defect and Workaround */
2210         val = readl(&dev->ll_lfps_regs->ll_lfps_5);
2211         val &= ~(0xf << TIMER_LFPS_6US);
2212         val |= 0x5 << TIMER_LFPS_6US;
2213         writel(val, &dev->ll_lfps_regs->ll_lfps_5);
2214
2215         val = readl(&dev->ll_lfps_regs->ll_lfps_6);
2216         val &= ~(0xffff << TIMER_LFPS_80US);
2217         val |= 0x0100 << TIMER_LFPS_80US;
2218         writel(val, &dev->ll_lfps_regs->ll_lfps_6);
2219
2220         /*
2221          * AA_AB Errata. Issue 4. Workaround for SuperSpeed USB
2222          * Hot Reset Exit Handshake may Fail in Specific Case using
2223          * Default Register Settings. Workaround for Enumeration test.
2224          */
2225         val = readl(&dev->ll_tsn_regs->ll_tsn_counters_2);
2226         val &= ~(0x1f << HOT_TX_NORESET_TS2);
2227         val |= 0x10 << HOT_TX_NORESET_TS2;
2228         writel(val, &dev->ll_tsn_regs->ll_tsn_counters_2);
2229
2230         val = readl(&dev->ll_tsn_regs->ll_tsn_counters_3);
2231         val &= ~(0x1f << HOT_RX_RESET_TS2);
2232         val |= 0x3 << HOT_RX_RESET_TS2;
2233         writel(val, &dev->ll_tsn_regs->ll_tsn_counters_3);
2234
2235         /*
2236          * Set Recovery Idle to Recover bit:
2237          * - On SS connections, setting Recovery Idle to Recover Fmw improves
2238          *   link robustness with various hosts and hubs.
2239          * - It is safe to set for all connection speeds; all chip revisions.
2240          * - R-M-W to leave other bits undisturbed.
2241          * - Reference PLX TT-7372
2242         */
2243         val = readl(&dev->ll_chicken_reg->ll_tsn_chicken_bit);
2244         val |= BIT(RECOVERY_IDLE_TO_RECOVER_FMW);
2245         writel(val, &dev->ll_chicken_reg->ll_tsn_chicken_bit);
2246
2247         INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
2248
2249         /* disable dedicated endpoints */
2250         writel(0x0D, &dev->dep[0].dep_cfg);
2251         writel(0x0D, &dev->dep[1].dep_cfg);
2252         writel(0x0E, &dev->dep[2].dep_cfg);
2253         writel(0x0E, &dev->dep[3].dep_cfg);
2254         writel(0x0F, &dev->dep[4].dep_cfg);
2255         writel(0x0C, &dev->dep[5].dep_cfg);
2256 }
2257
2258 static void usb_reinit(struct net2280 *dev)
2259 {
2260         if (dev->quirks & PLX_LEGACY)
2261                 return usb_reinit_228x(dev);
2262         return usb_reinit_338x(dev);
2263 }
2264
2265 static void ep0_start_228x(struct net2280 *dev)
2266 {
2267         writel(BIT(CLEAR_EP_HIDE_STATUS_PHASE) |
2268                 BIT(CLEAR_NAK_OUT_PACKETS) |
2269                 BIT(CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE),
2270                 &dev->epregs[0].ep_rsp);
2271
2272         /*
2273          * hardware optionally handles a bunch of standard requests
2274          * that the API hides from drivers anyway.  have it do so.
2275          * endpoint status/features are handled in software, to
2276          * help pass tests for some dubious behavior.
2277          */
2278         writel(BIT(SET_TEST_MODE) |
2279                 BIT(SET_ADDRESS) |
2280                 BIT(DEVICE_SET_CLEAR_DEVICE_REMOTE_WAKEUP) |
2281                 BIT(GET_DEVICE_STATUS) |
2282                 BIT(GET_INTERFACE_STATUS),
2283                 &dev->usb->stdrsp);
2284         writel(BIT(USB_ROOT_PORT_WAKEUP_ENABLE) |
2285                 BIT(SELF_POWERED_USB_DEVICE) |
2286                 BIT(REMOTE_WAKEUP_SUPPORT) |
2287                 (dev->softconnect << USB_DETECT_ENABLE) |
2288                 BIT(SELF_POWERED_STATUS),
2289                 &dev->usb->usbctl);
2290
2291         /* enable irqs so we can see ep0 and general operation  */
2292         writel(BIT(SETUP_PACKET_INTERRUPT_ENABLE) |
2293                 BIT(ENDPOINT_0_INTERRUPT_ENABLE),
2294                 &dev->regs->pciirqenb0);
2295         writel(BIT(PCI_INTERRUPT_ENABLE) |
2296                 BIT(PCI_MASTER_ABORT_RECEIVED_INTERRUPT_ENABLE) |
2297                 BIT(PCI_TARGET_ABORT_RECEIVED_INTERRUPT_ENABLE) |
2298                 BIT(PCI_RETRY_ABORT_INTERRUPT_ENABLE) |
2299                 BIT(VBUS_INTERRUPT_ENABLE) |
2300                 BIT(ROOT_PORT_RESET_INTERRUPT_ENABLE) |
2301                 BIT(SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE),
2302                 &dev->regs->pciirqenb1);
2303
2304         /* don't leave any writes posted */
2305         (void) readl(&dev->usb->usbctl);
2306 }
2307
2308 static void ep0_start_338x(struct net2280 *dev)
2309 {
2310
2311         if (dev->bug7734_patched)
2312                 writel(BIT(CLEAR_NAK_OUT_PACKETS_MODE) |
2313                        BIT(SET_EP_HIDE_STATUS_PHASE),
2314                        &dev->epregs[0].ep_rsp);
2315
2316         /*
2317          * hardware optionally handles a bunch of standard requests
2318          * that the API hides from drivers anyway.  have it do so.
2319          * endpoint status/features are handled in software, to
2320          * help pass tests for some dubious behavior.
2321          */
2322         writel(BIT(SET_ISOCHRONOUS_DELAY) |
2323                BIT(SET_SEL) |
2324                BIT(SET_TEST_MODE) |
2325                BIT(SET_ADDRESS) |
2326                BIT(GET_INTERFACE_STATUS) |
2327                BIT(GET_DEVICE_STATUS),
2328                 &dev->usb->stdrsp);
2329         dev->wakeup_enable = 1;
2330         writel(BIT(USB_ROOT_PORT_WAKEUP_ENABLE) |
2331                (dev->softconnect << USB_DETECT_ENABLE) |
2332                BIT(DEVICE_REMOTE_WAKEUP_ENABLE),
2333                &dev->usb->usbctl);
2334
2335         /* enable irqs so we can see ep0 and general operation  */
2336         writel(BIT(SETUP_PACKET_INTERRUPT_ENABLE) |
2337                BIT(ENDPOINT_0_INTERRUPT_ENABLE),
2338                &dev->regs->pciirqenb0);
2339         writel(BIT(PCI_INTERRUPT_ENABLE) |
2340                BIT(ROOT_PORT_RESET_INTERRUPT_ENABLE) |
2341                BIT(SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE) |
2342                BIT(VBUS_INTERRUPT_ENABLE),
2343                &dev->regs->pciirqenb1);
2344
2345         /* don't leave any writes posted */
2346         (void)readl(&dev->usb->usbctl);
2347 }
2348
2349 static void ep0_start(struct net2280 *dev)
2350 {
2351         if (dev->quirks & PLX_LEGACY)
2352                 return ep0_start_228x(dev);
2353         return ep0_start_338x(dev);
2354 }
2355
2356 /* when a driver is successfully registered, it will receive
2357  * control requests including set_configuration(), which enables
2358  * non-control requests.  then usb traffic follows until a
2359  * disconnect is reported.  then a host may connect again, or
2360  * the driver might get unbound.
2361  */
2362 static int net2280_start(struct usb_gadget *_gadget,
2363                 struct usb_gadget_driver *driver)
2364 {
2365         struct net2280          *dev;
2366         int                     retval;
2367         unsigned                i;
2368
2369         /* insist on high speed support from the driver, since
2370          * (dev->usb->xcvrdiag & FORCE_FULL_SPEED_MODE)
2371          * "must not be used in normal operation"
2372          */
2373         if (!driver || driver->max_speed < USB_SPEED_HIGH ||
2374                         !driver->setup)
2375                 return -EINVAL;
2376
2377         dev = container_of(_gadget, struct net2280, gadget);
2378
2379         for (i = 0; i < dev->n_ep; i++)
2380                 dev->ep[i].irqs = 0;
2381
2382         /* hook up the driver ... */
2383         driver->driver.bus = NULL;
2384         dev->driver = driver;
2385
2386         retval = device_create_file(&dev->pdev->dev, &dev_attr_function);
2387         if (retval)
2388                 goto err_unbind;
2389         retval = device_create_file(&dev->pdev->dev, &dev_attr_queues);
2390         if (retval)
2391                 goto err_func;
2392
2393         /* enable host detection and ep0; and we're ready
2394          * for set_configuration as well as eventual disconnect.
2395          */
2396         net2280_led_active(dev, 1);
2397
2398         if ((dev->quirks & PLX_SUPERSPEED) && !dev->bug7734_patched)
2399                 defect7374_enable_data_eps_zero(dev);
2400
2401         ep0_start(dev);
2402
2403         /* pci writes may still be posted */
2404         return 0;
2405
2406 err_func:
2407         device_remove_file(&dev->pdev->dev, &dev_attr_function);
2408 err_unbind:
2409         dev->driver = NULL;
2410         return retval;
2411 }
2412
2413 static void stop_activity(struct net2280 *dev, struct usb_gadget_driver *driver)
2414 {
2415         int                     i;
2416
2417         /* don't disconnect if it's not connected */
2418         if (dev->gadget.speed == USB_SPEED_UNKNOWN)
2419                 driver = NULL;
2420
2421         /* stop hardware; prevent new request submissions;
2422          * and kill any outstanding requests.
2423          */
2424         usb_reset(dev);
2425         for (i = 0; i < dev->n_ep; i++)
2426                 nuke(&dev->ep[i]);
2427
2428         /* report disconnect; the driver is already quiesced */
2429         if (driver) {
2430                 spin_unlock(&dev->lock);
2431                 driver->disconnect(&dev->gadget);
2432                 spin_lock(&dev->lock);
2433         }
2434
2435         usb_reinit(dev);
2436 }
2437
2438 static int net2280_stop(struct usb_gadget *_gadget)
2439 {
2440         struct net2280  *dev;
2441         unsigned long   flags;
2442
2443         dev = container_of(_gadget, struct net2280, gadget);
2444
2445         spin_lock_irqsave(&dev->lock, flags);
2446         stop_activity(dev, NULL);
2447         spin_unlock_irqrestore(&dev->lock, flags);
2448
2449         net2280_led_active(dev, 0);
2450
2451         device_remove_file(&dev->pdev->dev, &dev_attr_function);
2452         device_remove_file(&dev->pdev->dev, &dev_attr_queues);
2453
2454         dev->driver = NULL;
2455
2456         return 0;
2457 }
2458
2459 /*-------------------------------------------------------------------------*/
2460
2461 /* handle ep0, ep-e, ep-f with 64 byte packets: packet per irq.
2462  * also works for dma-capable endpoints, in pio mode or just
2463  * to manually advance the queue after short OUT transfers.
2464  */
2465 static void handle_ep_small(struct net2280_ep *ep)
2466 {
2467         struct net2280_request  *req;
2468         u32                     t;
2469         /* 0 error, 1 mid-data, 2 done */
2470         int                     mode = 1;
2471
2472         if (!list_empty(&ep->queue))
2473                 req = list_entry(ep->queue.next,
2474                         struct net2280_request, queue);
2475         else
2476                 req = NULL;
2477
2478         /* ack all, and handle what we care about */
2479         t = readl(&ep->regs->ep_stat);
2480         ep->irqs++;
2481
2482         ep_vdbg(ep->dev, "%s ack ep_stat %08x, req %p\n",
2483                         ep->ep.name, t, req ? &req->req : NULL);
2484
2485         if (!ep->is_in || (ep->dev->quirks & PLX_2280))
2486                 writel(t & ~BIT(NAK_OUT_PACKETS), &ep->regs->ep_stat);
2487         else
2488                 /* Added for 2282 */
2489                 writel(t, &ep->regs->ep_stat);
2490
2491         /* for ep0, monitor token irqs to catch data stage length errors
2492          * and to synchronize on status.
2493          *
2494          * also, to defer reporting of protocol stalls ... here's where
2495          * data or status first appears, handling stalls here should never
2496          * cause trouble on the host side..
2497          *
2498          * control requests could be slightly faster without token synch for
2499          * status, but status can jam up that way.
2500          */
2501         if (unlikely(ep->num == 0)) {
2502                 if (ep->is_in) {
2503                         /* status; stop NAKing */
2504                         if (t & BIT(DATA_OUT_PING_TOKEN_INTERRUPT)) {
2505                                 if (ep->dev->protocol_stall) {
2506                                         ep->stopped = 1;
2507                                         set_halt(ep);
2508                                 }
2509                                 if (!req)
2510                                         allow_status(ep);
2511                                 mode = 2;
2512                         /* reply to extra IN data tokens with a zlp */
2513                         } else if (t & BIT(DATA_IN_TOKEN_INTERRUPT)) {
2514                                 if (ep->dev->protocol_stall) {
2515                                         ep->stopped = 1;
2516                                         set_halt(ep);
2517                                         mode = 2;
2518                                 } else if (ep->responded &&
2519                                                 !req && !ep->stopped)
2520                                         write_fifo(ep, NULL);
2521                         }
2522                 } else {
2523                         /* status; stop NAKing */
2524                         if (t & BIT(DATA_IN_TOKEN_INTERRUPT)) {
2525                                 if (ep->dev->protocol_stall) {
2526                                         ep->stopped = 1;
2527                                         set_halt(ep);
2528                                 }
2529                                 mode = 2;
2530                         /* an extra OUT token is an error */
2531                         } else if (((t & BIT(DATA_OUT_PING_TOKEN_INTERRUPT)) &&
2532                                         req &&
2533                                         req->req.actual == req->req.length) ||
2534                                         (ep->responded && !req)) {
2535                                 ep->dev->protocol_stall = 1;
2536                                 set_halt(ep);
2537                                 ep->stopped = 1;
2538                                 if (req)
2539                                         done(ep, req, -EOVERFLOW);
2540                                 req = NULL;
2541                         }
2542                 }
2543         }
2544
2545         if (unlikely(!req))
2546                 return;
2547
2548         /* manual DMA queue advance after short OUT */
2549         if (likely(ep->dma)) {
2550                 if (t & BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT)) {
2551                         u32     count;
2552                         int     stopped = ep->stopped;
2553
2554                         /* TRANSFERRED works around OUT_DONE erratum 0112.
2555                          * we expect (N <= maxpacket) bytes; host wrote M.
2556                          * iff (M < N) we won't ever see a DMA interrupt.
2557                          */
2558                         ep->stopped = 1;
2559                         for (count = 0; ; t = readl(&ep->regs->ep_stat)) {
2560
2561                                 /* any preceding dma transfers must finish.
2562                                  * dma handles (M >= N), may empty the queue
2563                                  */
2564                                 scan_dma_completions(ep);
2565                                 if (unlikely(list_empty(&ep->queue) ||
2566                                                 ep->out_overflow)) {
2567                                         req = NULL;
2568                                         break;
2569                                 }
2570                                 req = list_entry(ep->queue.next,
2571                                         struct net2280_request, queue);
2572
2573                                 /* here either (M < N), a "real" short rx;
2574                                  * or (M == N) and the queue didn't empty
2575                                  */
2576                                 if (likely(t & BIT(FIFO_EMPTY))) {
2577                                         count = readl(&ep->dma->dmacount);
2578                                         count &= DMA_BYTE_COUNT_MASK;
2579                                         if (readl(&ep->dma->dmadesc)
2580                                                         != req->td_dma)
2581                                                 req = NULL;
2582                                         break;
2583                                 }
2584                                 udelay(1);
2585                         }
2586
2587                         /* stop DMA, leave ep NAKing */
2588                         writel(BIT(DMA_ABORT), &ep->dma->dmastat);
2589                         spin_stop_dma(ep->dma);
2590
2591                         if (likely(req)) {
2592                                 req->td->dmacount = 0;
2593                                 t = readl(&ep->regs->ep_avail);
2594                                 dma_done(ep, req, count,
2595                                         (ep->out_overflow || t)
2596                                                 ? -EOVERFLOW : 0);
2597                         }
2598
2599                         /* also flush to prevent erratum 0106 trouble */
2600                         if (unlikely(ep->out_overflow ||
2601                                         (ep->dev->chiprev == 0x0100 &&
2602                                         ep->dev->gadget.speed
2603                                         == USB_SPEED_FULL))) {
2604                                 out_flush(ep);
2605                                 ep->out_overflow = 0;
2606                         }
2607
2608                         /* (re)start dma if needed, stop NAKing */
2609                         ep->stopped = stopped;
2610                         if (!list_empty(&ep->queue))
2611                                 restart_dma(ep);
2612                 } else
2613                         ep_dbg(ep->dev, "%s dma ep_stat %08x ??\n",
2614                                         ep->ep.name, t);
2615                 return;
2616
2617         /* data packet(s) received (in the fifo, OUT) */
2618         } else if (t & BIT(DATA_PACKET_RECEIVED_INTERRUPT)) {
2619                 if (read_fifo(ep, req) && ep->num != 0)
2620                         mode = 2;
2621
2622         /* data packet(s) transmitted (IN) */
2623         } else if (t & BIT(DATA_PACKET_TRANSMITTED_INTERRUPT)) {
2624                 unsigned        len;
2625
2626                 len = req->req.length - req->req.actual;
2627                 if (len > ep->ep.maxpacket)
2628                         len = ep->ep.maxpacket;
2629                 req->req.actual += len;
2630
2631                 /* if we wrote it all, we're usually done */
2632                 /* send zlps until the status stage */
2633                 if ((req->req.actual == req->req.length) &&
2634                         (!req->req.zero || len != ep->ep.maxpacket) && ep->num)
2635                                 mode = 2;
2636
2637         /* there was nothing to do ...  */
2638         } else if (mode == 1)
2639                 return;
2640
2641         /* done */
2642         if (mode == 2) {
2643                 /* stream endpoints often resubmit/unlink in completion */
2644                 done(ep, req, 0);
2645
2646                 /* maybe advance queue to next request */
2647                 if (ep->num == 0) {
2648                         /* NOTE:  net2280 could let gadget driver start the
2649                          * status stage later. since not all controllers let
2650                          * them control that, the api doesn't (yet) allow it.
2651                          */
2652                         if (!ep->stopped)
2653                                 allow_status(ep);
2654                         req = NULL;
2655                 } else {
2656                         if (!list_empty(&ep->queue) && !ep->stopped)
2657                                 req = list_entry(ep->queue.next,
2658                                         struct net2280_request, queue);
2659                         else
2660                                 req = NULL;
2661                         if (req && !ep->is_in)
2662                                 stop_out_naking(ep);
2663                 }
2664         }
2665
2666         /* is there a buffer for the next packet?
2667          * for best streaming performance, make sure there is one.
2668          */
2669         if (req && !ep->stopped) {
2670
2671                 /* load IN fifo with next packet (may be zlp) */
2672                 if (t & BIT(DATA_PACKET_TRANSMITTED_INTERRUPT))
2673                         write_fifo(ep, &req->req);
2674         }
2675 }
2676
2677 static struct net2280_ep *get_ep_by_addr(struct net2280 *dev, u16 wIndex)
2678 {
2679         struct net2280_ep       *ep;
2680
2681         if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0)
2682                 return &dev->ep[0];
2683         list_for_each_entry(ep, &dev->gadget.ep_list, ep.ep_list) {
2684                 u8      bEndpointAddress;
2685
2686                 if (!ep->desc)
2687                         continue;
2688                 bEndpointAddress = ep->desc->bEndpointAddress;
2689                 if ((wIndex ^ bEndpointAddress) & USB_DIR_IN)
2690                         continue;
2691                 if ((wIndex & 0x0f) == (bEndpointAddress & 0x0f))
2692                         return ep;
2693         }
2694         return NULL;
2695 }
2696
2697 static void defect7374_workaround(struct net2280 *dev, struct usb_ctrlrequest r)
2698 {
2699         u32 scratch, fsmvalue;
2700         u32 ack_wait_timeout, state;
2701
2702         /* Workaround for Defect 7374 (U1/U2 erroneously rejected): */
2703         scratch = get_idx_reg(dev->regs, SCRATCH);
2704         fsmvalue = scratch & (0xf << DEFECT7374_FSM_FIELD);
2705         scratch &= ~(0xf << DEFECT7374_FSM_FIELD);
2706
2707         if (!((fsmvalue == DEFECT7374_FSM_WAITING_FOR_CONTROL_READ) &&
2708                                 (r.bRequestType & USB_DIR_IN)))
2709                 return;
2710
2711         /* This is the first Control Read for this connection: */
2712         if (!(readl(&dev->usb->usbstat) & BIT(SUPER_SPEED_MODE))) {
2713                 /*
2714                  * Connection is NOT SS:
2715                  * - Connection must be FS or HS.
2716                  * - This FSM state should allow workaround software to
2717                  * run after the next USB connection.
2718                  */
2719                 scratch |= DEFECT7374_FSM_NON_SS_CONTROL_READ;
2720                 dev->bug7734_patched = 1;
2721                 goto restore_data_eps;
2722         }
2723
2724         /* Connection is SS: */
2725         for (ack_wait_timeout = 0;
2726                         ack_wait_timeout < DEFECT_7374_NUMBEROF_MAX_WAIT_LOOPS;
2727                         ack_wait_timeout++) {
2728
2729                 state = readl(&dev->plregs->pl_ep_status_1)
2730                         & (0xff << STATE);
2731                 if ((state >= (ACK_GOOD_NORMAL << STATE)) &&
2732                         (state <= (ACK_GOOD_MORE_ACKS_TO_COME << STATE))) {
2733                         scratch |= DEFECT7374_FSM_SS_CONTROL_READ;
2734                         dev->bug7734_patched = 1;
2735                         break;
2736                 }
2737
2738                 /*
2739                  * We have not yet received host's Data Phase ACK
2740                  * - Wait and try again.
2741                  */
2742                 udelay(DEFECT_7374_PROCESSOR_WAIT_TIME);
2743
2744                 continue;
2745         }
2746
2747
2748         if (ack_wait_timeout >= DEFECT_7374_NUMBEROF_MAX_WAIT_LOOPS) {
2749                 ep_err(dev, "FAIL: Defect 7374 workaround waited but failed "
2750                 "to detect SS host's data phase ACK.");
2751                 ep_err(dev, "PL_EP_STATUS_1(23:16):.Expected from 0x11 to 0x16"
2752                 "got 0x%2.2x.\n", state >> STATE);
2753         } else {
2754                 ep_warn(dev, "INFO: Defect 7374 workaround waited about\n"
2755                 "%duSec for Control Read Data Phase ACK\n",
2756                         DEFECT_7374_PROCESSOR_WAIT_TIME * ack_wait_timeout);
2757         }
2758
2759 restore_data_eps:
2760         /*
2761          * Restore data EPs to their pre-workaround settings (disabled,
2762          * initialized, and other details).
2763          */
2764         defect7374_disable_data_eps(dev);
2765
2766         set_idx_reg(dev->regs, SCRATCH, scratch);
2767
2768         return;
2769 }
2770
2771 static void ep_clear_seqnum(struct net2280_ep *ep)
2772 {
2773         struct net2280 *dev = ep->dev;
2774         u32 val;
2775         static const u32 ep_pl[9] = { 0, 3, 4, 7, 8, 2, 5, 6, 9 };
2776
2777         val = readl(&dev->plregs->pl_ep_ctrl) & ~0x1f;
2778         val |= ep_pl[ep->num];
2779         writel(val, &dev->plregs->pl_ep_ctrl);
2780         val |= BIT(SEQUENCE_NUMBER_RESET);
2781         writel(val, &dev->plregs->pl_ep_ctrl);
2782
2783         return;
2784 }
2785
2786 static void handle_stat0_irqs_superspeed(struct net2280 *dev,
2787                 struct net2280_ep *ep, struct usb_ctrlrequest r)
2788 {
2789         int tmp = 0;
2790
2791 #define w_value         le16_to_cpu(r.wValue)
2792 #define w_index         le16_to_cpu(r.wIndex)
2793 #define w_length        le16_to_cpu(r.wLength)
2794
2795         switch (r.bRequest) {
2796                 struct net2280_ep *e;
2797                 u16 status;
2798
2799         case USB_REQ_SET_CONFIGURATION:
2800                 dev->addressed_state = !w_value;
2801                 goto usb3_delegate;
2802
2803         case USB_REQ_GET_STATUS:
2804                 switch (r.bRequestType) {
2805                 case (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE):
2806                         status = dev->wakeup_enable ? 0x02 : 0x00;
2807                         if (dev->gadget.is_selfpowered)
2808                                 status |= BIT(0);
2809                         status |= (dev->u1_enable << 2 | dev->u2_enable << 3 |
2810                                                         dev->ltm_enable << 4);
2811                         writel(0, &dev->epregs[0].ep_irqenb);
2812                         set_fifo_bytecount(ep, sizeof(status));
2813                         writel((__force u32) status, &dev->epregs[0].ep_data);
2814                         allow_status_338x(ep);
2815                         break;
2816
2817                 case (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_ENDPOINT):
2818                         e = get_ep_by_addr(dev, w_index);
2819                         if (!e)
2820                                 goto do_stall3;
2821                         status = readl(&e->regs->ep_rsp) &
2822                                                 BIT(CLEAR_ENDPOINT_HALT);
2823                         writel(0, &dev->epregs[0].ep_irqenb);
2824                         set_fifo_bytecount(ep, sizeof(status));
2825                         writel((__force u32) status, &dev->epregs[0].ep_data);
2826                         allow_status_338x(ep);
2827                         break;
2828
2829                 default:
2830                         goto usb3_delegate;
2831                 }
2832                 break;
2833
2834         case USB_REQ_CLEAR_FEATURE:
2835                 switch (r.bRequestType) {
2836                 case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE):
2837                         if (!dev->addressed_state) {
2838                                 switch (w_value) {
2839                                 case USB_DEVICE_U1_ENABLE:
2840                                         dev->u1_enable = 0;
2841                                         writel(readl(&dev->usb_ext->usbctl2) &
2842                                                 ~BIT(U1_ENABLE),
2843                                                 &dev->usb_ext->usbctl2);
2844                                         allow_status_338x(ep);
2845                                         goto next_endpoints3;
2846
2847                                 case USB_DEVICE_U2_ENABLE:
2848                                         dev->u2_enable = 0;
2849                                         writel(readl(&dev->usb_ext->usbctl2) &
2850                                                 ~BIT(U2_ENABLE),
2851                                                 &dev->usb_ext->usbctl2);
2852                                         allow_status_338x(ep);
2853                                         goto next_endpoints3;
2854
2855                                 case USB_DEVICE_LTM_ENABLE:
2856                                         dev->ltm_enable = 0;
2857                                         writel(readl(&dev->usb_ext->usbctl2) &
2858                                                 ~BIT(LTM_ENABLE),
2859                                                 &dev->usb_ext->usbctl2);
2860                                         allow_status_338x(ep);
2861                                         goto next_endpoints3;
2862
2863                                 default:
2864                                         break;
2865                                 }
2866                         }
2867                         if (w_value == USB_DEVICE_REMOTE_WAKEUP) {
2868                                 dev->wakeup_enable = 0;
2869                                 writel(readl(&dev->usb->usbctl) &
2870                                         ~BIT(DEVICE_REMOTE_WAKEUP_ENABLE),
2871                                         &dev->usb->usbctl);
2872                                 allow_status_338x(ep);
2873                                 break;
2874                         }
2875                         goto usb3_delegate;
2876
2877                 case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_ENDPOINT):
2878                         e = get_ep_by_addr(dev, w_index);
2879                         if (!e)
2880                                 goto do_stall3;
2881                         if (w_value != USB_ENDPOINT_HALT)
2882                                 goto do_stall3;
2883                         ep_vdbg(dev, "%s clear halt\n", e->ep.name);
2884                         /*
2885                          * Workaround for SS SeqNum not cleared via
2886                          * Endpoint Halt (Clear) bit. select endpoint
2887                          */
2888                         ep_clear_seqnum(e);
2889                         clear_halt(e);
2890                         if (!list_empty(&e->queue) && e->td_dma)
2891                                 restart_dma(e);
2892                         allow_status(ep);
2893                         ep->stopped = 1;
2894                         break;
2895
2896                 default:
2897                         goto usb3_delegate;
2898                 }
2899                 break;
2900         case USB_REQ_SET_FEATURE:
2901                 switch (r.bRequestType) {
2902                 case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE):
2903                         if (!dev->addressed_state) {
2904                                 switch (w_value) {
2905                                 case USB_DEVICE_U1_ENABLE:
2906                                         dev->u1_enable = 1;
2907                                         writel(readl(&dev->usb_ext->usbctl2) |
2908                                                 BIT(U1_ENABLE),
2909                                                 &dev->usb_ext->usbctl2);
2910                                         allow_status_338x(ep);
2911                                         goto next_endpoints3;
2912
2913                                 case USB_DEVICE_U2_ENABLE:
2914                                         dev->u2_enable = 1;
2915                                         writel(readl(&dev->usb_ext->usbctl2) |
2916                                                 BIT(U2_ENABLE),
2917                                                 &dev->usb_ext->usbctl2);
2918                                         allow_status_338x(ep);
2919                                         goto next_endpoints3;
2920
2921                                 case USB_DEVICE_LTM_ENABLE:
2922                                         dev->ltm_enable = 1;
2923                                         writel(readl(&dev->usb_ext->usbctl2) |
2924                                                 BIT(LTM_ENABLE),
2925                                                 &dev->usb_ext->usbctl2);
2926                                         allow_status_338x(ep);
2927                                         goto next_endpoints3;
2928                                 default:
2929                                         break;
2930                                 }
2931                         }
2932
2933                         if (w_value == USB_DEVICE_REMOTE_WAKEUP) {
2934                                 dev->wakeup_enable = 1;
2935                                 writel(readl(&dev->usb->usbctl) |
2936                                         BIT(DEVICE_REMOTE_WAKEUP_ENABLE),
2937                                         &dev->usb->usbctl);
2938                                 allow_status_338x(ep);
2939                                 break;
2940                         }
2941                         goto usb3_delegate;
2942
2943                 case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_ENDPOINT):
2944                         e = get_ep_by_addr(dev, w_index);
2945                         if (!e || (w_value != USB_ENDPOINT_HALT))
2946                                 goto do_stall3;
2947                         ep->stopped = 1;
2948                         if (ep->num == 0)
2949                                 ep->dev->protocol_stall = 1;
2950                         else {
2951                                 if (ep->dma)
2952                                         abort_dma(ep);
2953                                 set_halt(ep);
2954                         }
2955                         allow_status_338x(ep);
2956                         break;
2957
2958                 default:
2959                         goto usb3_delegate;
2960                 }
2961
2962                 break;
2963         default:
2964
2965 usb3_delegate:
2966                 ep_vdbg(dev, "setup %02x.%02x v%04x i%04x l%04x ep_cfg %08x\n",
2967                                 r.bRequestType, r.bRequest,
2968                                 w_value, w_index, w_length,
2969                                 readl(&ep->cfg->ep_cfg));
2970
2971                 ep->responded = 0;
2972                 spin_unlock(&dev->lock);
2973                 tmp = dev->driver->setup(&dev->gadget, &r);
2974                 spin_lock(&dev->lock);
2975         }
2976 do_stall3:
2977         if (tmp < 0) {
2978                 ep_vdbg(dev, "req %02x.%02x protocol STALL; stat %d\n",
2979                                 r.bRequestType, r.bRequest, tmp);
2980                 dev->protocol_stall = 1;
2981                 /* TD 9.9 Halt Endpoint test. TD 9.22 Set feature test */
2982                 set_halt(ep);
2983         }
2984
2985 next_endpoints3:
2986
2987 #undef  w_value
2988 #undef  w_index
2989 #undef  w_length
2990
2991         return;
2992 }
2993
2994 static void usb338x_handle_ep_intr(struct net2280 *dev, u32 stat0)
2995 {
2996         u32 index;
2997         u32 bit;
2998
2999         for (index = 0; index < ARRAY_SIZE(ep_bit); index++) {
3000                 bit = BIT(ep_bit[index]);
3001
3002                 if (!stat0)
3003                         break;
3004
3005                 if (!(stat0 & bit))
3006                         continue;
3007
3008                 stat0 &= ~bit;
3009
3010                 handle_ep_small(&dev->ep[index]);
3011         }
3012 }
3013
3014 static void handle_stat0_irqs(struct net2280 *dev, u32 stat)
3015 {
3016         struct net2280_ep       *ep;
3017         u32                     num, scratch;
3018
3019         /* most of these don't need individual acks */
3020         stat &= ~BIT(INTA_ASSERTED);
3021         if (!stat)
3022                 return;
3023         /* ep_dbg(dev, "irqstat0 %04x\n", stat); */
3024
3025         /* starting a control request? */
3026         if (unlikely(stat & BIT(SETUP_PACKET_INTERRUPT))) {
3027                 union {
3028                         u32                     raw[2];
3029                         struct usb_ctrlrequest  r;
3030                 } u;
3031                 int                             tmp;
3032                 struct net2280_request          *req;
3033
3034                 if (dev->gadget.speed == USB_SPEED_UNKNOWN) {
3035                         u32 val = readl(&dev->usb->usbstat);
3036                         if (val & BIT(SUPER_SPEED)) {
3037                                 dev->gadget.speed = USB_SPEED_SUPER;
3038                                 usb_ep_set_maxpacket_limit(&dev->ep[0].ep,
3039                                                 EP0_SS_MAX_PACKET_SIZE);
3040                         } else if (val & BIT(HIGH_SPEED)) {
3041                                 dev->gadget.speed = USB_SPEED_HIGH;
3042                                 usb_ep_set_maxpacket_limit(&dev->ep[0].ep,
3043                                                 EP0_HS_MAX_PACKET_SIZE);
3044                         } else {
3045                                 dev->gadget.speed = USB_SPEED_FULL;
3046                                 usb_ep_set_maxpacket_limit(&dev->ep[0].ep,
3047                                                 EP0_HS_MAX_PACKET_SIZE);
3048                         }
3049                         net2280_led_speed(dev, dev->gadget.speed);
3050                         ep_dbg(dev, "%s\n",
3051                                         usb_speed_string(dev->gadget.speed));
3052                 }
3053
3054                 ep = &dev->ep[0];
3055                 ep->irqs++;
3056
3057                 /* make sure any leftover request state is cleared */
3058                 stat &= ~BIT(ENDPOINT_0_INTERRUPT);
3059                 while (!list_empty(&ep->queue)) {
3060                         req = list_entry(ep->queue.next,
3061                                         struct net2280_request, queue);
3062                         done(ep, req, (req->req.actual == req->req.length)
3063                                                 ? 0 : -EPROTO);
3064                 }
3065                 ep->stopped = 0;
3066                 dev->protocol_stall = 0;
3067                 if (!(dev->quirks & PLX_SUPERSPEED)) {
3068                         if (ep->dev->quirks & PLX_2280)
3069                                 tmp = BIT(FIFO_OVERFLOW) |
3070                                     BIT(FIFO_UNDERFLOW);
3071                         else
3072                                 tmp = 0;
3073
3074                         writel(tmp | BIT(TIMEOUT) |
3075                                    BIT(USB_STALL_SENT) |
3076                                    BIT(USB_IN_NAK_SENT) |
3077                                    BIT(USB_IN_ACK_RCVD) |
3078                                    BIT(USB_OUT_PING_NAK_SENT) |
3079                                    BIT(USB_OUT_ACK_SENT) |
3080                                    BIT(SHORT_PACKET_OUT_DONE_INTERRUPT) |
3081                                    BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT) |
3082                                    BIT(DATA_PACKET_RECEIVED_INTERRUPT) |
3083                                    BIT(DATA_PACKET_TRANSMITTED_INTERRUPT) |
3084                                    BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
3085                                    BIT(DATA_IN_TOKEN_INTERRUPT),
3086                                    &ep->regs->ep_stat);
3087                 }
3088                 u.raw[0] = readl(&dev->usb->setup0123);
3089                 u.raw[1] = readl(&dev->usb->setup4567);
3090
3091                 cpu_to_le32s(&u.raw[0]);
3092                 cpu_to_le32s(&u.raw[1]);
3093
3094                 if ((dev->quirks & PLX_SUPERSPEED) && !dev->bug7734_patched)
3095                         defect7374_workaround(dev, u.r);
3096
3097                 tmp = 0;
3098
3099 #define w_value         le16_to_cpu(u.r.wValue)
3100 #define w_index         le16_to_cpu(u.r.wIndex)
3101 #define w_length        le16_to_cpu(u.r.wLength)
3102
3103                 /* ack the irq */
3104                 writel(BIT(SETUP_PACKET_INTERRUPT), &dev->regs->irqstat0);
3105                 stat ^= BIT(SETUP_PACKET_INTERRUPT);
3106
3107                 /* watch control traffic at the token level, and force
3108                  * synchronization before letting the status stage happen.
3109                  * FIXME ignore tokens we'll NAK, until driver responds.
3110                  * that'll mean a lot less irqs for some drivers.
3111                  */
3112                 ep->is_in = (u.r.bRequestType & USB_DIR_IN) != 0;
3113                 if (ep->is_in) {
3114                         scratch = BIT(DATA_PACKET_TRANSMITTED_INTERRUPT) |
3115                                 BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
3116                                 BIT(DATA_IN_TOKEN_INTERRUPT);
3117                         stop_out_naking(ep);
3118                 } else
3119                         scratch = BIT(DATA_PACKET_RECEIVED_INTERRUPT) |
3120                                 BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
3121                                 BIT(DATA_IN_TOKEN_INTERRUPT);
3122                 writel(scratch, &dev->epregs[0].ep_irqenb);
3123
3124                 /* we made the hardware handle most lowlevel requests;
3125                  * everything else goes uplevel to the gadget code.
3126                  */
3127                 ep->responded = 1;
3128
3129                 if (dev->gadget.speed == USB_SPEED_SUPER) {
3130                         handle_stat0_irqs_superspeed(dev, ep, u.r);
3131                         goto next_endpoints;
3132                 }
3133
3134                 switch (u.r.bRequest) {
3135                 case USB_REQ_GET_STATUS: {
3136                         struct net2280_ep       *e;
3137                         __le32                  status;
3138
3139                         /* hw handles device and interface status */
3140                         if (u.r.bRequestType != (USB_DIR_IN|USB_RECIP_ENDPOINT))
3141                                 goto delegate;
3142                         e = get_ep_by_addr(dev, w_index);
3143                         if (!e || w_length > 2)
3144                                 goto do_stall;
3145
3146                         if (readl(&e->regs->ep_rsp) & BIT(SET_ENDPOINT_HALT))
3147                                 status = cpu_to_le32(1);
3148                         else
3149                                 status = cpu_to_le32(0);
3150
3151                         /* don't bother with a request object! */
3152                         writel(0, &dev->epregs[0].ep_irqenb);
3153                         set_fifo_bytecount(ep, w_length);
3154                         writel((__force u32)status, &dev->epregs[0].ep_data);
3155                         allow_status(ep);
3156                         ep_vdbg(dev, "%s stat %02x\n", ep->ep.name, status);
3157                         goto next_endpoints;
3158                         }
3159                         break;
3160                 case USB_REQ_CLEAR_FEATURE: {
3161                         struct net2280_ep       *e;
3162
3163                         /* hw handles device features */
3164                         if (u.r.bRequestType != USB_RECIP_ENDPOINT)
3165                                 goto delegate;
3166                         if (w_value != USB_ENDPOINT_HALT || w_length != 0)
3167                                 goto do_stall;
3168                         e = get_ep_by_addr(dev, w_index);
3169                         if (!e)
3170                                 goto do_stall;
3171                         if (e->wedged) {
3172                                 ep_vdbg(dev, "%s wedged, halt not cleared\n",
3173                                                 ep->ep.name);
3174                         } else {
3175                                 ep_vdbg(dev, "%s clear halt\n", e->ep.name);
3176                                 clear_halt(e);
3177                                 if ((ep->dev->quirks & PLX_SUPERSPEED) &&
3178                                         !list_empty(&e->queue) && e->td_dma)
3179                                                 restart_dma(e);
3180                         }
3181                         allow_status(ep);
3182                         goto next_endpoints;
3183                         }
3184                         break;
3185                 case USB_REQ_SET_FEATURE: {
3186                         struct net2280_ep       *e;
3187
3188                         /* hw handles device features */
3189                         if (u.r.bRequestType != USB_RECIP_ENDPOINT)
3190                                 goto delegate;
3191                         if (w_value != USB_ENDPOINT_HALT || w_length != 0)
3192                                 goto do_stall;
3193                         e = get_ep_by_addr(dev, w_index);
3194                         if (!e)
3195                                 goto do_stall;
3196                         if (e->ep.name == ep0name)
3197                                 goto do_stall;
3198                         set_halt(e);
3199                         if ((dev->quirks & PLX_SUPERSPEED) && e->dma)
3200                                 abort_dma(e);
3201                         allow_status(ep);
3202                         ep_vdbg(dev, "%s set halt\n", ep->ep.name);
3203                         goto next_endpoints;
3204                         }
3205                         break;
3206                 default:
3207 delegate:
3208                         ep_vdbg(dev, "setup %02x.%02x v%04x i%04x l%04x "
3209                                 "ep_cfg %08x\n",
3210                                 u.r.bRequestType, u.r.bRequest,
3211                                 w_value, w_index, w_length,
3212                                 readl(&ep->cfg->ep_cfg));
3213                         ep->responded = 0;
3214                         spin_unlock(&dev->lock);
3215                         tmp = dev->driver->setup(&dev->gadget, &u.r);
3216                         spin_lock(&dev->lock);
3217                 }
3218
3219                 /* stall ep0 on error */
3220                 if (tmp < 0) {
3221 do_stall:
3222                         ep_vdbg(dev, "req %02x.%02x protocol STALL; stat %d\n",
3223                                         u.r.bRequestType, u.r.bRequest, tmp);
3224                         dev->protocol_stall = 1;
3225                 }
3226
3227                 /* some in/out token irq should follow; maybe stall then.
3228                  * driver must queue a request (even zlp) or halt ep0
3229                  * before the host times out.
3230                  */
3231         }
3232
3233 #undef  w_value
3234 #undef  w_index
3235 #undef  w_length
3236
3237 next_endpoints:
3238         if ((dev->quirks & PLX_SUPERSPEED) && dev->enhanced_mode) {
3239                 u32 mask = (BIT(ENDPOINT_0_INTERRUPT) |
3240                         USB3380_IRQSTAT0_EP_INTR_MASK_IN |
3241                         USB3380_IRQSTAT0_EP_INTR_MASK_OUT);
3242
3243                 if (stat & mask) {
3244                         usb338x_handle_ep_intr(dev, stat & mask);
3245                         stat &= ~mask;
3246                 }
3247         } else {
3248                 /* endpoint data irq ? */
3249                 scratch = stat & 0x7f;
3250                 stat &= ~0x7f;
3251                 for (num = 0; scratch; num++) {
3252                         u32             t;
3253
3254                         /* do this endpoint's FIFO and queue need tending? */
3255                         t = BIT(num);
3256                         if ((scratch & t) == 0)
3257                                 continue;
3258                         scratch ^= t;
3259
3260                         ep = &dev->ep[num];
3261                         handle_ep_small(ep);
3262                 }
3263         }
3264
3265         if (stat)
3266                 ep_dbg(dev, "unhandled irqstat0 %08x\n", stat);
3267 }
3268
3269 #define DMA_INTERRUPTS (BIT(DMA_D_INTERRUPT) | \
3270                 BIT(DMA_C_INTERRUPT) | \
3271                 BIT(DMA_B_INTERRUPT) | \
3272                 BIT(DMA_A_INTERRUPT))
3273 #define PCI_ERROR_INTERRUPTS ( \
3274                 BIT(PCI_MASTER_ABORT_RECEIVED_INTERRUPT) | \
3275                 BIT(PCI_TARGET_ABORT_RECEIVED_INTERRUPT) | \
3276                 BIT(PCI_RETRY_ABORT_INTERRUPT))
3277
3278 static void handle_stat1_irqs(struct net2280 *dev, u32 stat)
3279 __releases(dev->lock)
3280 __acquires(dev->lock)
3281 {
3282         struct net2280_ep       *ep;
3283         u32                     tmp, num, mask, scratch;
3284
3285         /* after disconnect there's nothing else to do! */
3286         tmp = BIT(VBUS_INTERRUPT) | BIT(ROOT_PORT_RESET_INTERRUPT);
3287         mask = BIT(SUPER_SPEED) | BIT(HIGH_SPEED) | BIT(FULL_SPEED);
3288
3289         /* VBUS disconnect is indicated by VBUS_PIN and VBUS_INTERRUPT set.
3290          * Root Port Reset is indicated by ROOT_PORT_RESET_INTERRUPT set and
3291          * both HIGH_SPEED and FULL_SPEED clear (as ROOT_PORT_RESET_INTERRUPT
3292          * only indicates a change in the reset state).
3293          */
3294         if (stat & tmp) {
3295                 bool    reset = false;
3296                 bool    disconnect = false;
3297
3298                 /*
3299                  * Ignore disconnects and resets if the speed hasn't been set.
3300                  * VBUS can bounce and there's always an initial reset.
3301                  */
3302                 writel(tmp, &dev->regs->irqstat1);
3303                 if (dev->gadget.speed != USB_SPEED_UNKNOWN) {
3304                         if ((stat & BIT(VBUS_INTERRUPT)) &&
3305                                         (readl(&dev->usb->usbctl) &
3306                                                 BIT(VBUS_PIN)) == 0) {
3307                                 disconnect = true;
3308                                 ep_dbg(dev, "disconnect %s\n",
3309                                                 dev->driver->driver.name);
3310                         } else if ((stat & BIT(ROOT_PORT_RESET_INTERRUPT)) &&
3311                                         (readl(&dev->usb->usbstat) & mask)
3312                                                 == 0) {
3313                                 reset = true;
3314                                 ep_dbg(dev, "reset %s\n",
3315                                                 dev->driver->driver.name);
3316                         }
3317
3318                         if (disconnect || reset) {
3319                                 stop_activity(dev, dev->driver);
3320                                 ep0_start(dev);
3321                                 spin_unlock(&dev->lock);
3322                                 if (reset)
3323                                         usb_gadget_udc_reset
3324                                                 (&dev->gadget, dev->driver);
3325                                 else
3326                                         (dev->driver->disconnect)
3327                                                 (&dev->gadget);
3328                                 spin_lock(&dev->lock);
3329                                 return;
3330                         }
3331                 }
3332                 stat &= ~tmp;
3333
3334                 /* vBUS can bounce ... one of many reasons to ignore the
3335                  * notion of hotplug events on bus connect/disconnect!
3336                  */
3337                 if (!stat)
3338                         return;
3339         }
3340
3341         /* NOTE: chip stays in PCI D0 state for now, but it could
3342          * enter D1 to save more power
3343          */
3344         tmp = BIT(SUSPEND_REQUEST_CHANGE_INTERRUPT);
3345         if (stat & tmp) {
3346                 writel(tmp, &dev->regs->irqstat1);
3347                 spin_unlock(&dev->lock);
3348                 if (stat & BIT(SUSPEND_REQUEST_INTERRUPT)) {
3349                         if (dev->driver->suspend)
3350                                 dev->driver->suspend(&dev->gadget);
3351                         if (!enable_suspend)
3352                                 stat &= ~BIT(SUSPEND_REQUEST_INTERRUPT);
3353                 } else {
3354                         if (dev->driver->resume)
3355                                 dev->driver->resume(&dev->gadget);
3356                         /* at high speed, note erratum 0133 */
3357                 }
3358                 spin_lock(&dev->lock);
3359                 stat &= ~tmp;
3360         }
3361
3362         /* clear any other status/irqs */
3363         if (stat)
3364                 writel(stat, &dev->regs->irqstat1);
3365
3366         /* some status we can just ignore */
3367         if (dev->quirks & PLX_2280)
3368                 stat &= ~(BIT(CONTROL_STATUS_INTERRUPT) |
3369                           BIT(SUSPEND_REQUEST_INTERRUPT) |
3370                           BIT(RESUME_INTERRUPT) |
3371                           BIT(SOF_INTERRUPT));
3372         else
3373                 stat &= ~(BIT(CONTROL_STATUS_INTERRUPT) |
3374                           BIT(RESUME_INTERRUPT) |
3375                           BIT(SOF_DOWN_INTERRUPT) |
3376                           BIT(SOF_INTERRUPT));
3377
3378         if (!stat)
3379                 return;
3380         /* ep_dbg(dev, "irqstat1 %08x\n", stat);*/
3381
3382         /* DMA status, for ep-{a,b,c,d} */
3383         scratch = stat & DMA_INTERRUPTS;
3384         stat &= ~DMA_INTERRUPTS;
3385         scratch >>= 9;
3386         for (num = 0; scratch; num++) {
3387                 struct net2280_dma_regs __iomem *dma;
3388
3389                 tmp = BIT(num);
3390                 if ((tmp & scratch) == 0)
3391                         continue;
3392                 scratch ^= tmp;
3393
3394                 ep = &dev->ep[num + 1];
3395                 dma = ep->dma;
3396
3397                 if (!dma)
3398                         continue;
3399
3400                 /* clear ep's dma status */
3401                 tmp = readl(&dma->dmastat);
3402                 writel(tmp, &dma->dmastat);
3403
3404                 /* dma sync*/
3405                 if (dev->quirks & PLX_SUPERSPEED) {
3406                         u32 r_dmacount = readl(&dma->dmacount);
3407                         if (!ep->is_in &&  (r_dmacount & 0x00FFFFFF) &&
3408                             (tmp & BIT(DMA_TRANSACTION_DONE_INTERRUPT)))
3409                                 continue;
3410                 }
3411
3412                 if (!(tmp & BIT(DMA_TRANSACTION_DONE_INTERRUPT))) {
3413                         ep_dbg(ep->dev, "%s no xact done? %08x\n",
3414                                 ep->ep.name, tmp);
3415                         continue;
3416                 }
3417                 stop_dma(ep->dma);
3418
3419                 /* OUT transfers terminate when the data from the
3420                  * host is in our memory.  Process whatever's done.
3421                  * On this path, we know transfer's last packet wasn't
3422                  * less than req->length. NAK_OUT_PACKETS may be set,
3423                  * or the FIFO may already be holding new packets.
3424                  *
3425                  * IN transfers can linger in the FIFO for a very
3426                  * long time ... we ignore that for now, accounting
3427                  * precisely (like PIO does) needs per-packet irqs
3428                  */
3429                 scan_dma_completions(ep);
3430
3431                 /* disable dma on inactive queues; else maybe restart */
3432                 if (!list_empty(&ep->queue)) {
3433                         tmp = readl(&dma->dmactl);
3434                         restart_dma(ep);
3435                 }
3436                 ep->irqs++;
3437         }
3438
3439         /* NOTE:  there are other PCI errors we might usefully notice.
3440          * if they appear very often, here's where to try recovering.
3441          */
3442         if (stat & PCI_ERROR_INTERRUPTS) {
3443                 ep_err(dev, "pci dma error; stat %08x\n", stat);
3444                 stat &= ~PCI_ERROR_INTERRUPTS;
3445                 /* these are fatal errors, but "maybe" they won't
3446                  * happen again ...
3447                  */
3448                 stop_activity(dev, dev->driver);
3449                 ep0_start(dev);
3450                 stat = 0;
3451         }
3452
3453         if (stat)
3454                 ep_dbg(dev, "unhandled irqstat1 %08x\n", stat);
3455 }
3456
3457 static irqreturn_t net2280_irq(int irq, void *_dev)
3458 {
3459         struct net2280          *dev = _dev;
3460
3461         /* shared interrupt, not ours */
3462         if ((dev->quirks & PLX_LEGACY) &&
3463                 (!(readl(&dev->regs->irqstat0) & BIT(INTA_ASSERTED))))
3464                 return IRQ_NONE;
3465
3466         spin_lock(&dev->lock);
3467
3468         /* handle disconnect, dma, and more */
3469         handle_stat1_irqs(dev, readl(&dev->regs->irqstat1));
3470
3471         /* control requests and PIO */
3472         handle_stat0_irqs(dev, readl(&dev->regs->irqstat0));
3473
3474         if (dev->quirks & PLX_SUPERSPEED) {
3475                 /* re-enable interrupt to trigger any possible new interrupt */
3476                 u32 pciirqenb1 = readl(&dev->regs->pciirqenb1);
3477                 writel(pciirqenb1 & 0x7FFFFFFF, &dev->regs->pciirqenb1);
3478                 writel(pciirqenb1, &dev->regs->pciirqenb1);
3479         }
3480
3481         spin_unlock(&dev->lock);
3482
3483         return IRQ_HANDLED;
3484 }
3485
3486 /*-------------------------------------------------------------------------*/
3487
3488 static void gadget_release(struct device *_dev)
3489 {
3490         struct net2280  *dev = dev_get_drvdata(_dev);
3491
3492         kfree(dev);
3493 }
3494
3495 /* tear down the binding between this driver and the pci device */
3496
3497 static void net2280_remove(struct pci_dev *pdev)
3498 {
3499         struct net2280          *dev = pci_get_drvdata(pdev);
3500
3501         usb_del_gadget_udc(&dev->gadget);
3502
3503         BUG_ON(dev->driver);
3504
3505         /* then clean up the resources we allocated during probe() */
3506         net2280_led_shutdown(dev);
3507         if (dev->requests) {
3508                 int             i;
3509                 for (i = 1; i < 5; i++) {
3510                         if (!dev->ep[i].dummy)
3511                                 continue;
3512                         pci_pool_free(dev->requests, dev->ep[i].dummy,
3513                                         dev->ep[i].td_dma);
3514                 }
3515                 pci_pool_destroy(dev->requests);
3516         }
3517         if (dev->got_irq)
3518                 free_irq(pdev->irq, dev);
3519         if (dev->quirks & PLX_SUPERSPEED)
3520                 pci_disable_msi(pdev);
3521         if (dev->regs)
3522                 iounmap(dev->regs);
3523         if (dev->region)
3524                 release_mem_region(pci_resource_start(pdev, 0),
3525                                 pci_resource_len(pdev, 0));
3526         if (dev->enabled)
3527                 pci_disable_device(pdev);
3528         device_remove_file(&pdev->dev, &dev_attr_registers);
3529
3530         ep_info(dev, "unbind\n");
3531 }
3532
3533 /* wrap this driver around the specified device, but
3534  * don't respond over USB until a gadget driver binds to us.
3535  */
3536
3537 static int net2280_probe(struct pci_dev *pdev, const struct pci_device_id *id)
3538 {
3539         struct net2280          *dev;
3540         unsigned long           resource, len;
3541         void                    __iomem *base = NULL;
3542         int                     retval, i;
3543
3544         /* alloc, and start init */
3545         dev = kzalloc(sizeof(*dev), GFP_KERNEL);
3546         if (dev == NULL) {
3547                 retval = -ENOMEM;
3548                 goto done;
3549         }
3550
3551         pci_set_drvdata(pdev, dev);
3552         spin_lock_init(&dev->lock);
3553         dev->quirks = id->driver_data;
3554         dev->pdev = pdev;
3555         dev->gadget.ops = &net2280_ops;
3556         dev->gadget.max_speed = (dev->quirks & PLX_SUPERSPEED) ?
3557                                 USB_SPEED_SUPER : USB_SPEED_HIGH;
3558
3559         /* the "gadget" abstracts/virtualizes the controller */
3560         dev->gadget.name = driver_name;
3561
3562         /* now all the pci goodies ... */
3563         if (pci_enable_device(pdev) < 0) {
3564                 retval = -ENODEV;
3565                 goto done;
3566         }
3567         dev->enabled = 1;
3568
3569         /* BAR 0 holds all the registers
3570          * BAR 1 is 8051 memory; unused here (note erratum 0103)
3571          * BAR 2 is fifo memory; unused here
3572          */
3573         resource = pci_resource_start(pdev, 0);
3574         len = pci_resource_len(pdev, 0);
3575         if (!request_mem_region(resource, len, driver_name)) {
3576                 ep_dbg(dev, "controller already in use\n");
3577                 retval = -EBUSY;
3578                 goto done;
3579         }
3580         dev->region = 1;
3581
3582         /* FIXME provide firmware download interface to put
3583          * 8051 code into the chip, e.g. to turn on PCI PM.
3584          */
3585
3586         base = ioremap_nocache(resource, len);
3587         if (base == NULL) {
3588                 ep_dbg(dev, "can't map memory\n");
3589                 retval = -EFAULT;
3590                 goto done;
3591         }
3592         dev->regs = (struct net2280_regs __iomem *) base;
3593         dev->usb = (struct net2280_usb_regs __iomem *) (base + 0x0080);
3594         dev->pci = (struct net2280_pci_regs __iomem *) (base + 0x0100);
3595         dev->dma = (struct net2280_dma_regs __iomem *) (base + 0x0180);
3596         dev->dep = (struct net2280_dep_regs __iomem *) (base + 0x0200);
3597         dev->epregs = (struct net2280_ep_regs __iomem *) (base + 0x0300);
3598
3599         if (dev->quirks & PLX_SUPERSPEED) {
3600                 u32 fsmvalue;
3601                 u32 usbstat;
3602                 dev->usb_ext = (struct usb338x_usb_ext_regs __iomem *)
3603                                                         (base + 0x00b4);
3604                 dev->llregs = (struct usb338x_ll_regs __iomem *)
3605                                                         (base + 0x0700);
3606                 dev->ll_lfps_regs = (struct usb338x_ll_lfps_regs __iomem *)
3607                                                         (base + 0x0748);
3608                 dev->ll_tsn_regs = (struct usb338x_ll_tsn_regs __iomem *)
3609                                                         (base + 0x077c);
3610                 dev->ll_chicken_reg = (struct usb338x_ll_chi_regs __iomem *)
3611                                                         (base + 0x079c);
3612                 dev->plregs = (struct usb338x_pl_regs __iomem *)
3613                                                         (base + 0x0800);
3614                 usbstat = readl(&dev->usb->usbstat);
3615                 dev->enhanced_mode = !!(usbstat & BIT(11));
3616                 dev->n_ep = (dev->enhanced_mode) ? 9 : 5;
3617                 /* put into initial config, link up all endpoints */
3618                 fsmvalue = get_idx_reg(dev->regs, SCRATCH) &
3619                                         (0xf << DEFECT7374_FSM_FIELD);
3620                 /* See if firmware needs to set up for workaround: */
3621                 if (fsmvalue == DEFECT7374_FSM_SS_CONTROL_READ) {
3622                         dev->bug7734_patched = 1;
3623                         writel(0, &dev->usb->usbctl);
3624                 } else
3625                         dev->bug7734_patched = 0;
3626         } else {
3627                 dev->enhanced_mode = 0;
3628                 dev->n_ep = 7;
3629                 /* put into initial config, link up all endpoints */
3630                 writel(0, &dev->usb->usbctl);
3631         }
3632
3633         usb_reset(dev);
3634         usb_reinit(dev);
3635
3636         /* irq setup after old hardware is cleaned up */
3637         if (!pdev->irq) {
3638                 ep_err(dev, "No IRQ.  Check PCI setup!\n");
3639                 retval = -ENODEV;
3640                 goto done;
3641         }
3642
3643         if (dev->quirks & PLX_SUPERSPEED)
3644                 if (pci_enable_msi(pdev))
3645                         ep_err(dev, "Failed to enable MSI mode\n");
3646
3647         if (request_irq(pdev->irq, net2280_irq, IRQF_SHARED,
3648                                                         driver_name, dev)) {
3649                 ep_err(dev, "request interrupt %d failed\n", pdev->irq);
3650                 retval = -EBUSY;
3651                 goto done;
3652         }
3653         dev->got_irq = 1;
3654
3655         /* DMA setup */
3656         /* NOTE:  we know only the 32 LSBs of dma addresses may be nonzero */
3657         dev->requests = pci_pool_create("requests", pdev,
3658                 sizeof(struct net2280_dma),
3659                 0 /* no alignment requirements */,
3660                 0 /* or page-crossing issues */);
3661         if (!dev->requests) {
3662                 ep_dbg(dev, "can't get request pool\n");
3663                 retval = -ENOMEM;
3664                 goto done;
3665         }
3666         for (i = 1; i < 5; i++) {
3667                 struct net2280_dma      *td;
3668
3669                 td = pci_pool_alloc(dev->requests, GFP_KERNEL,
3670                                 &dev->ep[i].td_dma);
3671                 if (!td) {
3672                         ep_dbg(dev, "can't get dummy %d\n", i);
3673                         retval = -ENOMEM;
3674                         goto done;
3675                 }
3676                 td->dmacount = 0;       /* not VALID */
3677                 td->dmadesc = td->dmaaddr;
3678                 dev->ep[i].dummy = td;
3679         }
3680
3681         /* enable lower-overhead pci memory bursts during DMA */
3682         if (dev->quirks & PLX_LEGACY)
3683                 writel(BIT(DMA_MEMORY_WRITE_AND_INVALIDATE_ENABLE) |
3684                         /*
3685                          * 256 write retries may not be enough...
3686                            BIT(PCI_RETRY_ABORT_ENABLE) |
3687                         */
3688                         BIT(DMA_READ_MULTIPLE_ENABLE) |
3689                         BIT(DMA_READ_LINE_ENABLE),
3690                         &dev->pci->pcimstctl);
3691         /* erratum 0115 shouldn't appear: Linux inits PCI_LATENCY_TIMER */
3692         pci_set_master(pdev);
3693         pci_try_set_mwi(pdev);
3694
3695         /* ... also flushes any posted pci writes */
3696         dev->chiprev = get_idx_reg(dev->regs, REG_CHIPREV) & 0xffff;
3697
3698         /* done */
3699         ep_info(dev, "%s\n", driver_desc);
3700         ep_info(dev, "irq %d, pci mem %p, chip rev %04x\n",
3701                         pdev->irq, base, dev->chiprev);
3702         ep_info(dev, "version: " DRIVER_VERSION "; %s\n",
3703                 dev->enhanced_mode ? "enhanced mode" : "legacy mode");
3704         retval = device_create_file(&pdev->dev, &dev_attr_registers);
3705         if (retval)
3706                 goto done;
3707
3708         retval = usb_add_gadget_udc_release(&pdev->dev, &dev->gadget,
3709                         gadget_release);
3710         if (retval)
3711                 goto done;
3712         return 0;
3713
3714 done:
3715         if (dev) {
3716                 net2280_remove(pdev);
3717                 kfree(dev);
3718         }
3719         return retval;
3720 }
3721
3722 /* make sure the board is quiescent; otherwise it will continue
3723  * generating IRQs across the upcoming reboot.
3724  */
3725
3726 static void net2280_shutdown(struct pci_dev *pdev)
3727 {
3728         struct net2280          *dev = pci_get_drvdata(pdev);
3729
3730         /* disable IRQs */
3731         writel(0, &dev->regs->pciirqenb0);
3732         writel(0, &dev->regs->pciirqenb1);
3733
3734         /* disable the pullup so the host will think we're gone */
3735         writel(0, &dev->usb->usbctl);
3736
3737 }
3738
3739
3740 /*-------------------------------------------------------------------------*/
3741
3742 static const struct pci_device_id pci_ids[] = { {
3743         .class =        ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
3744         .class_mask =   ~0,
3745         .vendor =       PCI_VENDOR_ID_PLX_LEGACY,
3746         .device =       0x2280,
3747         .subvendor =    PCI_ANY_ID,
3748         .subdevice =    PCI_ANY_ID,
3749         .driver_data =  PLX_LEGACY | PLX_2280,
3750         }, {
3751         .class =        ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
3752         .class_mask =   ~0,
3753         .vendor =       PCI_VENDOR_ID_PLX_LEGACY,
3754         .device =       0x2282,
3755         .subvendor =    PCI_ANY_ID,
3756         .subdevice =    PCI_ANY_ID,
3757         .driver_data =  PLX_LEGACY,
3758         },
3759         {
3760         .class =        ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
3761         .class_mask =   ~0,
3762         .vendor =       PCI_VENDOR_ID_PLX,
3763         .device =       0x3380,
3764         .subvendor =    PCI_ANY_ID,
3765         .subdevice =    PCI_ANY_ID,
3766         .driver_data =  PLX_SUPERSPEED,
3767          },
3768         {
3769         .class =        ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
3770         .class_mask =   ~0,
3771         .vendor =       PCI_VENDOR_ID_PLX,
3772         .device =       0x3382,
3773         .subvendor =    PCI_ANY_ID,
3774         .subdevice =    PCI_ANY_ID,
3775         .driver_data =  PLX_SUPERSPEED,
3776          },
3777 { /* end: all zeroes */ }
3778 };
3779 MODULE_DEVICE_TABLE(pci, pci_ids);
3780
3781 /* pci driver glue; this is a "new style" PCI driver module */
3782 static struct pci_driver net2280_pci_driver = {
3783         .name =         (char *) driver_name,
3784         .id_table =     pci_ids,
3785
3786         .probe =        net2280_probe,
3787         .remove =       net2280_remove,
3788         .shutdown =     net2280_shutdown,
3789
3790         /* FIXME add power management support */
3791 };
3792
3793 module_pci_driver(net2280_pci_driver);
3794
3795 MODULE_DESCRIPTION(DRIVER_DESC);
3796 MODULE_AUTHOR("David Brownell");
3797 MODULE_LICENSE("GPL");