1 // SPDX-License-Identifier: GPL-2.0+
3 * aspeed-vhub -- Driver for Aspeed SoC "vHub" USB gadget
5 * ep0.c - Endpoint 0 handling
7 * Copyright 2017 IBM Corporation
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/platform_device.h>
13 #include <linux/delay.h>
14 #include <linux/ioport.h>
15 #include <linux/slab.h>
16 #include <linux/errno.h>
17 #include <linux/list.h>
18 #include <linux/interrupt.h>
19 #include <linux/proc_fs.h>
20 #include <linux/prefetch.h>
21 #include <linux/clk.h>
22 #include <linux/usb/gadget.h>
24 #include <linux/of_gpio.h>
25 #include <linux/regmap.h>
26 #include <linux/dma-mapping.h>
30 int ast_vhub_reply(struct ast_vhub_ep *ep, char *ptr, int len)
32 struct usb_request *req = &ep->ep0.req.req;
35 if (WARN_ON(ep->d_idx != 0))
37 if (WARN_ON(!ep->ep0.dir_in))
39 if (WARN_ON(len > AST_VHUB_EP0_MAX_PACKET))
41 if (WARN_ON(req->status == -EINPROGRESS))
50 * Call internal queue directly after dropping the lock. This is
51 * safe to do as the reply is always the last thing done when
52 * processing a SETUP packet, usually as a tail call
54 spin_unlock(&ep->vhub->lock);
55 if (ep->ep.ops->queue(&ep->ep, req, GFP_ATOMIC))
59 spin_lock(&ep->vhub->lock);
63 int __ast_vhub_simple_reply(struct ast_vhub_ep *ep, int len, ...)
71 /* Copy data directly into EP buffer */
72 for (i = 0; i < len; i++)
73 buffer[i] = va_arg(args, int);
76 /* req->buf NULL means data is already there */
77 return ast_vhub_reply(ep, NULL, len);
80 void ast_vhub_ep0_handle_setup(struct ast_vhub_ep *ep)
82 struct usb_ctrlrequest crq;
83 enum std_req_rc std_req_rc;
86 if (WARN_ON(ep->d_idx != 0))
90 * Grab the setup packet from the chip and byteswap
93 memcpy_fromio(&crq, ep->ep0.setup, sizeof(crq));
95 EPDBG(ep, "SETUP packet %02x/%02x/%04x/%04x/%04x [%s] st=%d\n",
96 crq.bRequestType, crq.bRequest,
97 le16_to_cpu(crq.wValue),
98 le16_to_cpu(crq.wIndex),
99 le16_to_cpu(crq.wLength),
100 (crq.bRequestType & USB_DIR_IN) ? "in" : "out",
104 * Check our state, cancel pending requests if needed
106 * Note: Under some circumstances, we can get a new setup
107 * packet while waiting for the stall ack, just accept it.
109 * In any case, a SETUP packet in wrong state should have
110 * reset the HW state machine, so let's just log, nuke
113 if (ep->ep0.state != ep0_state_token &&
114 ep->ep0.state != ep0_state_stall) {
115 EPDBG(ep, "wrong state\n");
116 ast_vhub_nuke(ep, -EIO);
119 /* Calculate next state for EP0 */
120 ep->ep0.state = ep0_state_data;
121 ep->ep0.dir_in = !!(crq.bRequestType & USB_DIR_IN);
123 /* If this is the vHub, we handle requests differently */
124 std_req_rc = std_req_driver;
125 if (ep->dev == NULL) {
126 if ((crq.bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
127 std_req_rc = ast_vhub_std_hub_request(ep, &crq);
128 else if ((crq.bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS)
129 std_req_rc = ast_vhub_class_hub_request(ep, &crq);
131 std_req_rc = std_req_stall;
132 } else if ((crq.bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
133 std_req_rc = ast_vhub_std_dev_request(ep, &crq);
135 /* Act upon result */
137 case std_req_complete:
147 /* Pass request up to the gadget driver */
148 if (WARN_ON(!ep->dev))
150 if (ep->dev->driver) {
151 EPDBG(ep, "forwarding to gadget...\n");
152 spin_unlock(&ep->vhub->lock);
153 rc = ep->dev->driver->setup(&ep->dev->gadget, &crq);
154 spin_lock(&ep->vhub->lock);
155 EPDBG(ep, "driver returned %d\n", rc);
157 EPDBG(ep, "no gadget for request !\n");
163 EPDBG(ep, "stalling\n");
164 writel(VHUB_EP0_CTRL_STALL, ep->ep0.ctlstat);
165 ep->ep0.state = ep0_state_stall;
166 ep->ep0.dir_in = false;
170 EPVDBG(ep, "sending [in] status with no data\n");
171 writel(VHUB_EP0_TX_BUFF_RDY, ep->ep0.ctlstat);
172 ep->ep0.state = ep0_state_status;
173 ep->ep0.dir_in = false;
177 static void ast_vhub_ep0_do_send(struct ast_vhub_ep *ep,
178 struct ast_vhub_req *req)
183 /* If this is a 0-length request, it's the gadget trying to
184 * send a status on our behalf. We take it from here.
186 if (req->req.length == 0)
189 /* Are we done ? Complete request, otherwise wait for next interrupt */
190 if (req->last_desc >= 0) {
191 EPVDBG(ep, "complete send %d/%d\n",
192 req->req.actual, req->req.length);
193 ep->ep0.state = ep0_state_status;
194 writel(VHUB_EP0_RX_BUFF_RDY, ep->ep0.ctlstat);
195 ast_vhub_done(ep, req, 0);
200 * Next chunk cropped to max packet size. Also check if this
203 chunk = req->req.length - req->req.actual;
204 if (chunk > ep->ep.maxpacket)
205 chunk = ep->ep.maxpacket;
206 else if ((chunk < ep->ep.maxpacket) || !req->req.zero)
209 EPVDBG(ep, "send chunk=%d last=%d, req->act=%d mp=%d\n",
210 chunk, req->last_desc, req->req.actual, ep->ep.maxpacket);
213 * Copy data if any (internal requests already have data
216 if (chunk && req->req.buf)
217 memcpy(ep->buf, req->req.buf + req->req.actual, chunk);
219 vhub_dma_workaround(ep->buf);
221 /* Remember chunk size and trigger send */
222 reg = VHUB_EP0_SET_TX_LEN(chunk);
223 writel(reg, ep->ep0.ctlstat);
224 writel(reg | VHUB_EP0_TX_BUFF_RDY, ep->ep0.ctlstat);
225 req->req.actual += chunk;
228 static void ast_vhub_ep0_rx_prime(struct ast_vhub_ep *ep)
230 EPVDBG(ep, "rx prime\n");
232 /* Prime endpoint for receiving data */
233 writel(VHUB_EP0_RX_BUFF_RDY, ep->ep0.ctlstat);
236 static void ast_vhub_ep0_do_receive(struct ast_vhub_ep *ep, struct ast_vhub_req *req,
242 /* We are receiving... grab request */
243 remain = req->req.length - req->req.actual;
245 EPVDBG(ep, "receive got=%d remain=%d\n", len, remain);
247 /* Are we getting more than asked ? */
249 EPDBG(ep, "receiving too much (ovf: %d) !\n",
254 if (len && req->req.buf)
255 memcpy(req->req.buf + req->req.actual, ep->buf, len);
256 req->req.actual += len;
259 if (len < ep->ep.maxpacket || len == remain) {
260 ep->ep0.state = ep0_state_status;
261 writel(VHUB_EP0_TX_BUFF_RDY, ep->ep0.ctlstat);
262 ast_vhub_done(ep, req, rc);
264 ast_vhub_ep0_rx_prime(ep);
267 void ast_vhub_ep0_handle_ack(struct ast_vhub_ep *ep, bool in_ack)
269 struct ast_vhub_req *req;
270 struct ast_vhub *vhub = ep->vhub;
271 struct device *dev = &vhub->pdev->dev;
275 /* Read EP0 status */
276 stat = readl(ep->ep0.ctlstat);
278 /* Grab current request if any */
279 req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req, queue);
281 EPVDBG(ep, "ACK status=%08x,state=%d is_in=%d in_ack=%d req=%p\n",
282 stat, ep->ep0.state, ep->ep0.dir_in, in_ack, req);
284 switch(ep->ep0.state) {
285 case ep0_state_token:
286 /* There should be no request queued in that state... */
288 dev_warn(dev, "request present while in TOKEN state\n");
289 ast_vhub_nuke(ep, -EINVAL);
291 dev_warn(dev, "ack while in TOKEN state\n");
295 /* Check the state bits corresponding to our direction */
296 if ((ep->ep0.dir_in && (stat & VHUB_EP0_TX_BUFF_RDY)) ||
297 (!ep->ep0.dir_in && (stat & VHUB_EP0_RX_BUFF_RDY)) ||
298 (ep->ep0.dir_in != in_ack)) {
299 /* In that case, ignore interrupt */
300 dev_warn(dev, "irq state mismatch");
304 * We are in data phase and there's no request, something is
308 dev_warn(dev, "data phase, no request\n");
313 /* We have a request, handle data transfers */
315 ast_vhub_ep0_do_send(ep, req);
317 ast_vhub_ep0_do_receive(ep, req, VHUB_EP0_RX_LEN(stat));
319 case ep0_state_status:
320 /* Nuke stale requests */
322 dev_warn(dev, "request present while in STATUS state\n");
323 ast_vhub_nuke(ep, -EINVAL);
327 * If the status phase completes with the wrong ack, stall
328 * the endpoint just in case, to abort whatever the host
331 if (ep->ep0.dir_in == in_ack) {
332 dev_warn(dev, "status direction mismatch\n");
336 case ep0_state_stall:
338 * There shouldn't be any request left, but nuke just in case
339 * otherwise the stale request will block subsequent ones
341 ast_vhub_nuke(ep, -EIO);
345 /* Reset to token state or stall */
347 writel(VHUB_EP0_CTRL_STALL, ep->ep0.ctlstat);
348 ep->ep0.state = ep0_state_stall;
350 ep->ep0.state = ep0_state_token;
353 static int ast_vhub_ep0_queue(struct usb_ep* u_ep, struct usb_request *u_req,
356 struct ast_vhub_req *req = to_ast_req(u_req);
357 struct ast_vhub_ep *ep = to_ast_ep(u_ep);
358 struct ast_vhub *vhub = ep->vhub;
359 struct device *dev = &vhub->pdev->dev;
363 if (!u_req || (!u_req->complete && !req->internal)) {
364 dev_warn(dev, "Bogus EP0 request ! u_req=%p\n", u_req);
366 dev_warn(dev, "complete=%p internal=%d\n",
367 u_req->complete, req->internal);
372 /* Not endpoint 0 ? */
373 if (WARN_ON(ep->d_idx != 0))
376 /* Disabled device */
377 if (ep->dev && !ep->dev->enabled)
380 /* Data, no buffer and not internal ? */
381 if (u_req->length && !u_req->buf && !req->internal) {
382 dev_warn(dev, "Request with no buffer !\n");
386 EPVDBG(ep, "enqueue req @%p\n", req);
387 EPVDBG(ep, " l=%d zero=%d noshort=%d is_in=%d\n",
388 u_req->length, u_req->zero,
389 u_req->short_not_ok, ep->ep0.dir_in);
391 /* Initialize request progress fields */
392 u_req->status = -EINPROGRESS;
397 spin_lock_irqsave(&vhub->lock, flags);
399 /* EP0 can only support a single request at a time */
400 if (!list_empty(&ep->queue) ||
401 ep->ep0.state == ep0_state_token ||
402 ep->ep0.state == ep0_state_stall) {
403 dev_warn(dev, "EP0: Request in wrong state\n");
404 EPVDBG(ep, "EP0: list_empty=%d state=%d\n",
405 list_empty(&ep->queue), ep->ep0.state);
406 spin_unlock_irqrestore(&vhub->lock, flags);
410 /* Add request to list and kick processing if empty */
411 list_add_tail(&req->queue, &ep->queue);
413 if (ep->ep0.dir_in) {
414 /* IN request, send data */
415 ast_vhub_ep0_do_send(ep, req);
416 } else if (u_req->length == 0) {
417 /* 0-len request, send completion as rx */
418 EPVDBG(ep, "0-length rx completion\n");
419 ep->ep0.state = ep0_state_status;
420 writel(VHUB_EP0_TX_BUFF_RDY, ep->ep0.ctlstat);
421 ast_vhub_done(ep, req, 0);
423 /* OUT request, start receiver */
424 ast_vhub_ep0_rx_prime(ep);
427 spin_unlock_irqrestore(&vhub->lock, flags);
432 static int ast_vhub_ep0_dequeue(struct usb_ep* u_ep, struct usb_request *u_req)
434 struct ast_vhub_ep *ep = to_ast_ep(u_ep);
435 struct ast_vhub *vhub = ep->vhub;
436 struct ast_vhub_req *req;
440 spin_lock_irqsave(&vhub->lock, flags);
442 /* Only one request can be in the queue */
443 req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req, queue);
446 if (req && u_req == &req->req) {
447 EPVDBG(ep, "dequeue req @%p\n", req);
450 * We don't have to deal with "active" as all
451 * DMAs go to the EP buffers, not the request.
453 ast_vhub_done(ep, req, -ECONNRESET);
455 /* We do stall the EP to clean things up in HW */
456 writel(VHUB_EP0_CTRL_STALL, ep->ep0.ctlstat);
457 ep->ep0.state = ep0_state_status;
458 ep->ep0.dir_in = false;
461 spin_unlock_irqrestore(&vhub->lock, flags);
466 static const struct usb_ep_ops ast_vhub_ep0_ops = {
467 .queue = ast_vhub_ep0_queue,
468 .dequeue = ast_vhub_ep0_dequeue,
469 .alloc_request = ast_vhub_alloc_request,
470 .free_request = ast_vhub_free_request,
473 void ast_vhub_reset_ep0(struct ast_vhub_dev *dev)
475 struct ast_vhub_ep *ep = &dev->ep0;
477 ast_vhub_nuke(ep, -EIO);
478 ep->ep0.state = ep0_state_token;
482 void ast_vhub_init_ep0(struct ast_vhub *vhub, struct ast_vhub_ep *ep,
483 struct ast_vhub_dev *dev)
485 memset(ep, 0, sizeof(*ep));
487 INIT_LIST_HEAD(&ep->ep.ep_list);
488 INIT_LIST_HEAD(&ep->queue);
489 ep->ep.ops = &ast_vhub_ep0_ops;
491 ep->ep.caps.type_control = true;
492 usb_ep_set_maxpacket_limit(&ep->ep, AST_VHUB_EP0_MAX_PACKET);
496 ep->ep0.state = ep0_state_token;
497 INIT_LIST_HEAD(&ep->ep0.req.queue);
498 ep->ep0.req.internal = true;
500 /* Small difference between vHub and devices */
502 ep->ep0.ctlstat = dev->regs + AST_VHUB_DEV_EP0_CTRL;
503 ep->ep0.setup = vhub->regs +
504 AST_VHUB_SETUP0 + 8 * (dev->index + 1);
505 ep->buf = vhub->ep0_bufs +
506 AST_VHUB_EP0_MAX_PACKET * (dev->index + 1);
507 ep->buf_dma = vhub->ep0_bufs_dma +
508 AST_VHUB_EP0_MAX_PACKET * (dev->index + 1);
510 ep->ep0.ctlstat = vhub->regs + AST_VHUB_EP0_CTRL;
511 ep->ep0.setup = vhub->regs + AST_VHUB_SETUP0;
512 ep->buf = vhub->ep0_bufs;
513 ep->buf_dma = vhub->ep0_bufs_dma;