2 * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 #include "usb_trace.h"
21 #define MT_VEND_REQ_MAX_RETRY 10
22 #define MT_VEND_REQ_TOUT_MS 300
24 /* should be called with usb_ctrl_mtx locked */
25 static int __mt76u_vendor_request(struct mt76_dev *dev, u8 req,
26 u8 req_type, u16 val, u16 offset,
27 void *buf, size_t len)
29 struct usb_interface *intf = to_usb_interface(dev->dev);
30 struct usb_device *udev = interface_to_usbdev(intf);
34 pipe = (req_type & USB_DIR_IN) ? usb_rcvctrlpipe(udev, 0)
35 : usb_sndctrlpipe(udev, 0);
36 for (i = 0; i < MT_VEND_REQ_MAX_RETRY; i++) {
37 if (test_bit(MT76_REMOVED, &dev->state))
40 ret = usb_control_msg(udev, pipe, req, req_type, val,
41 offset, buf, len, MT_VEND_REQ_TOUT_MS);
43 set_bit(MT76_REMOVED, &dev->state);
44 if (ret >= 0 || ret == -ENODEV)
46 usleep_range(5000, 10000);
49 dev_err(dev->dev, "vendor request req:%02x off:%04x failed:%d\n",
54 int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
55 u8 req_type, u16 val, u16 offset,
56 void *buf, size_t len)
60 mutex_lock(&dev->usb.usb_ctrl_mtx);
61 ret = __mt76u_vendor_request(dev, req, req_type,
62 val, offset, buf, len);
63 trace_usb_reg_wr(dev, offset, val);
64 mutex_unlock(&dev->usb.usb_ctrl_mtx);
68 EXPORT_SYMBOL_GPL(mt76u_vendor_request);
70 /* should be called with usb_ctrl_mtx locked */
71 static u32 __mt76u_rr(struct mt76_dev *dev, u32 addr)
73 struct mt76_usb *usb = &dev->usb;
79 switch (addr & MT_VEND_TYPE_MASK) {
80 case MT_VEND_TYPE_EEPROM:
81 req = MT_VEND_READ_EEPROM;
83 case MT_VEND_TYPE_CFG:
84 req = MT_VEND_READ_CFG;
87 req = MT_VEND_MULTI_READ;
90 offset = addr & ~MT_VEND_TYPE_MASK;
92 ret = __mt76u_vendor_request(dev, req,
93 USB_DIR_IN | USB_TYPE_VENDOR,
94 0, offset, usb->data, sizeof(__le32));
95 if (ret == sizeof(__le32))
96 data = get_unaligned_le32(usb->data);
97 trace_usb_reg_rr(dev, addr, data);
102 u32 mt76u_rr(struct mt76_dev *dev, u32 addr)
106 mutex_lock(&dev->usb.usb_ctrl_mtx);
107 ret = __mt76u_rr(dev, addr);
108 mutex_unlock(&dev->usb.usb_ctrl_mtx);
113 /* should be called with usb_ctrl_mtx locked */
114 static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
116 struct mt76_usb *usb = &dev->usb;
120 switch (addr & MT_VEND_TYPE_MASK) {
121 case MT_VEND_TYPE_CFG:
122 req = MT_VEND_WRITE_CFG;
125 req = MT_VEND_MULTI_WRITE;
128 offset = addr & ~MT_VEND_TYPE_MASK;
130 put_unaligned_le32(val, usb->data);
131 __mt76u_vendor_request(dev, req,
132 USB_DIR_OUT | USB_TYPE_VENDOR, 0,
133 offset, usb->data, sizeof(__le32));
134 trace_usb_reg_wr(dev, addr, val);
137 void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
139 mutex_lock(&dev->usb.usb_ctrl_mtx);
140 __mt76u_wr(dev, addr, val);
141 mutex_unlock(&dev->usb.usb_ctrl_mtx);
144 static u32 mt76u_rmw(struct mt76_dev *dev, u32 addr,
147 mutex_lock(&dev->usb.usb_ctrl_mtx);
148 val |= __mt76u_rr(dev, addr) & ~mask;
149 __mt76u_wr(dev, addr, val);
150 mutex_unlock(&dev->usb.usb_ctrl_mtx);
155 static void mt76u_copy(struct mt76_dev *dev, u32 offset,
156 const void *data, int len)
158 struct mt76_usb *usb = &dev->usb;
159 const u32 *val = data;
162 mutex_lock(&usb->usb_ctrl_mtx);
163 for (i = 0; i < (len / 4); i++) {
164 put_unaligned_le32(val[i], usb->data);
165 ret = __mt76u_vendor_request(dev, MT_VEND_MULTI_WRITE,
166 USB_DIR_OUT | USB_TYPE_VENDOR,
167 0, offset + i * 4, usb->data,
172 mutex_unlock(&usb->usb_ctrl_mtx);
175 void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
176 const u16 offset, const u32 val)
178 mutex_lock(&dev->usb.usb_ctrl_mtx);
179 __mt76u_vendor_request(dev, req,
180 USB_DIR_OUT | USB_TYPE_VENDOR,
181 val & 0xffff, offset, NULL, 0);
182 __mt76u_vendor_request(dev, req,
183 USB_DIR_OUT | USB_TYPE_VENDOR,
184 val >> 16, offset + 2, NULL, 0);
185 mutex_unlock(&dev->usb.usb_ctrl_mtx);
187 EXPORT_SYMBOL_GPL(mt76u_single_wr);
190 mt76u_set_endpoints(struct usb_interface *intf,
191 struct mt76_usb *usb)
193 struct usb_host_interface *intf_desc = intf->cur_altsetting;
194 struct usb_endpoint_descriptor *ep_desc;
195 int i, in_ep = 0, out_ep = 0;
197 for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) {
198 ep_desc = &intf_desc->endpoint[i].desc;
200 if (usb_endpoint_is_bulk_in(ep_desc) &&
201 in_ep < __MT_EP_IN_MAX) {
202 usb->in_ep[in_ep] = usb_endpoint_num(ep_desc);
203 usb->in_max_packet = usb_endpoint_maxp(ep_desc);
205 } else if (usb_endpoint_is_bulk_out(ep_desc) &&
206 out_ep < __MT_EP_OUT_MAX) {
207 usb->out_ep[out_ep] = usb_endpoint_num(ep_desc);
208 usb->out_max_packet = usb_endpoint_maxp(ep_desc);
213 if (in_ep != __MT_EP_IN_MAX || out_ep != __MT_EP_OUT_MAX)
219 mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76u_buf *buf,
220 int nsgs, int len, int sglen)
222 struct urb *urb = buf->urb;
225 for (i = 0; i < nsgs; i++) {
230 data = netdev_alloc_frag(len);
234 page = virt_to_head_page(data);
235 offset = data - page_address(page);
236 sg_set_page(&urb->sg[i], page, sglen, offset);
242 for (j = nsgs; j < urb->num_sgs; j++)
243 skb_free_frag(sg_virt(&urb->sg[j]));
247 urb->num_sgs = max_t(int, i, urb->num_sgs);
248 buf->len = urb->num_sgs * sglen,
249 sg_init_marker(urb->sg, urb->num_sgs);
251 return i ? : -ENOMEM;
254 int mt76u_buf_alloc(struct mt76_dev *dev, struct mt76u_buf *buf,
255 int nsgs, int len, int sglen, gfp_t gfp)
257 buf->urb = usb_alloc_urb(0, gfp);
261 buf->urb->sg = devm_kcalloc(dev->dev, nsgs, sizeof(*buf->urb->sg),
266 sg_init_table(buf->urb->sg, nsgs);
269 return mt76u_fill_rx_sg(dev, buf, nsgs, len, sglen);
271 EXPORT_SYMBOL_GPL(mt76u_buf_alloc);
273 void mt76u_buf_free(struct mt76u_buf *buf)
275 struct urb *urb = buf->urb;
276 struct scatterlist *sg;
279 for (i = 0; i < urb->num_sgs; i++) {
284 skb_free_frag(sg_virt(sg));
286 usb_free_urb(buf->urb);
288 EXPORT_SYMBOL_GPL(mt76u_buf_free);
290 int mt76u_submit_buf(struct mt76_dev *dev, int dir, int index,
291 struct mt76u_buf *buf, gfp_t gfp,
292 usb_complete_t complete_fn, void *context)
294 struct usb_interface *intf = to_usb_interface(dev->dev);
295 struct usb_device *udev = interface_to_usbdev(intf);
298 if (dir == USB_DIR_IN)
299 pipe = usb_rcvbulkpipe(udev, dev->usb.in_ep[index]);
301 pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[index]);
303 usb_fill_bulk_urb(buf->urb, udev, pipe, NULL, buf->len,
304 complete_fn, context);
306 return usb_submit_urb(buf->urb, gfp);
308 EXPORT_SYMBOL_GPL(mt76u_submit_buf);
310 static inline struct mt76u_buf
311 *mt76u_get_next_rx_entry(struct mt76_queue *q)
313 struct mt76u_buf *buf = NULL;
316 spin_lock_irqsave(&q->lock, flags);
318 buf = &q->entry[q->head].ubuf;
319 q->head = (q->head + 1) % q->ndesc;
322 spin_unlock_irqrestore(&q->lock, flags);
327 static int mt76u_get_rx_entry_len(u8 *data, u32 data_len)
329 u16 dma_len, min_len;
331 dma_len = get_unaligned_le16(data);
332 min_len = MT_DMA_HDR_LEN + MT_RX_RXWI_LEN +
335 if (data_len < min_len || WARN_ON(!dma_len) ||
336 WARN_ON(dma_len + MT_DMA_HDR_LEN > data_len) ||
337 WARN_ON(dma_len & 0x3))
343 mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb)
345 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
346 u8 *data = sg_virt(&urb->sg[0]);
347 int data_len, len, nsgs = 1;
350 if (!test_bit(MT76_STATE_INITIALIZED, &dev->state))
353 len = mt76u_get_rx_entry_len(data, urb->actual_length);
357 skb = build_skb(data, q->buf_size);
361 data_len = min_t(int, len, urb->sg[0].length - MT_DMA_HDR_LEN);
362 skb_reserve(skb, MT_DMA_HDR_LEN);
363 if (skb->tail + data_len > skb->end) {
368 __skb_put(skb, data_len);
372 data_len = min_t(int, len, urb->sg[nsgs].length);
373 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
374 sg_page(&urb->sg[nsgs]),
375 urb->sg[nsgs].offset,
376 data_len, q->buf_size);
380 dev->drv->rx_skb(dev, MT_RXQ_MAIN, skb);
385 static void mt76u_complete_rx(struct urb *urb)
387 struct mt76_dev *dev = urb->context;
388 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
391 switch (urb->status) {
397 dev_err(dev->dev, "rx urb failed: %d\n", urb->status);
403 spin_lock_irqsave(&q->lock, flags);
404 if (WARN_ONCE(q->entry[q->tail].ubuf.urb != urb, "rx urb mismatch"))
407 q->tail = (q->tail + 1) % q->ndesc;
409 tasklet_schedule(&dev->usb.rx_tasklet);
411 spin_unlock_irqrestore(&q->lock, flags);
414 static void mt76u_rx_tasklet(unsigned long data)
416 struct mt76_dev *dev = (struct mt76_dev *)data;
417 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
418 int err, nsgs, buf_len = q->buf_size;
419 struct mt76u_buf *buf;
424 buf = mt76u_get_next_rx_entry(q);
428 nsgs = mt76u_process_rx_entry(dev, buf->urb);
430 err = mt76u_fill_rx_sg(dev, buf, nsgs,
432 SKB_WITH_OVERHEAD(buf_len));
436 mt76u_submit_buf(dev, USB_DIR_IN, MT_EP_IN_PKT_RX,
438 mt76u_complete_rx, dev);
440 mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL);
445 int mt76u_submit_rx_buffers(struct mt76_dev *dev)
447 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
451 spin_lock_irqsave(&q->lock, flags);
452 for (i = 0; i < q->ndesc; i++) {
453 err = mt76u_submit_buf(dev, USB_DIR_IN, MT_EP_IN_PKT_RX,
454 &q->entry[i].ubuf, GFP_ATOMIC,
455 mt76u_complete_rx, dev);
459 q->head = q->tail = 0;
461 spin_unlock_irqrestore(&q->lock, flags);
465 EXPORT_SYMBOL_GPL(mt76u_submit_rx_buffers);
467 static int mt76u_alloc_rx(struct mt76_dev *dev)
469 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
472 spin_lock_init(&q->lock);
473 q->entry = devm_kcalloc(dev->dev,
474 MT_NUM_RX_ENTRIES, sizeof(*q->entry),
479 if (mt76u_check_sg(dev)) {
480 q->buf_size = MT_RX_BUF_SIZE;
481 nsgs = MT_SG_MAX_SIZE;
483 q->buf_size = PAGE_SIZE;
487 q->ndesc = MT_NUM_RX_ENTRIES;
488 for (i = 0; i < q->ndesc; i++) {
489 err = mt76u_buf_alloc(dev, &q->entry[i].ubuf,
491 SKB_WITH_OVERHEAD(q->buf_size),
497 return mt76u_submit_rx_buffers(dev);
500 static void mt76u_free_rx(struct mt76_dev *dev)
502 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
505 for (i = 0; i < q->ndesc; i++)
506 mt76u_buf_free(&q->entry[i].ubuf);
509 static void mt76u_stop_rx(struct mt76_dev *dev)
511 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
514 for (i = 0; i < q->ndesc; i++)
515 usb_kill_urb(q->entry[i].ubuf.urb);
518 int mt76u_skb_dma_info(struct sk_buff *skb, int port, u32 flags)
520 struct sk_buff *iter, *last = skb;
524 * | 4B | xfer len | pad | 4B |
525 * | TXINFO | pkt/cmd | zero pad to 4B | zero |
527 * length field of TXINFO should be set to 'xfer len'.
529 info = FIELD_PREP(MT_TXD_INFO_LEN, round_up(skb->len, 4)) |
530 FIELD_PREP(MT_TXD_INFO_DPORT, port) | flags;
531 put_unaligned_le32(info, skb_push(skb, sizeof(info)));
533 pad = round_up(skb->len, 4) + 4 - skb->len;
534 skb_walk_frags(skb, iter) {
537 skb->data_len += pad;
544 if (__skb_pad(last, pad, true))
546 __skb_put(last, pad);
550 EXPORT_SYMBOL_GPL(mt76u_skb_dma_info);
552 static void mt76u_tx_tasklet(unsigned long data)
554 struct mt76_dev *dev = (struct mt76_dev *)data;
555 struct mt76u_buf *buf;
556 struct mt76_queue *q;
560 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
563 spin_lock_bh(&q->lock);
565 buf = &q->entry[q->head].ubuf;
566 if (!buf->done || !q->queued)
569 dev->drv->tx_complete_skb(dev, q,
573 if (q->entry[q->head].schedule) {
574 q->entry[q->head].schedule = false;
578 q->head = (q->head + 1) % q->ndesc;
581 mt76_txq_schedule(dev, q);
582 wake = i < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8;
584 wake_up(&dev->tx_wait);
586 spin_unlock_bh(&q->lock);
588 if (!test_and_set_bit(MT76_READING_STATS, &dev->state))
589 ieee80211_queue_delayed_work(dev->hw,
591 msecs_to_jiffies(10));
594 ieee80211_wake_queue(dev->hw, i);
598 static void mt76u_tx_status_data(struct work_struct *work)
600 struct mt76_usb *usb;
601 struct mt76_dev *dev;
605 usb = container_of(work, struct mt76_usb, stat_work.work);
606 dev = container_of(usb, struct mt76_dev, usb);
609 if (test_bit(MT76_REMOVED, &dev->state))
612 if (!dev->drv->tx_status_data(dev, &update))
617 if (count && test_bit(MT76_STATE_RUNNING, &dev->state))
618 ieee80211_queue_delayed_work(dev->hw, &usb->stat_work,
619 msecs_to_jiffies(10));
621 clear_bit(MT76_READING_STATS, &dev->state);
624 static void mt76u_complete_tx(struct urb *urb)
626 struct mt76u_buf *buf = urb->context;
627 struct mt76_dev *dev = buf->dev;
629 if (mt76u_urb_error(urb))
630 dev_err(dev->dev, "tx urb failed: %d\n", urb->status);
633 tasklet_schedule(&dev->usb.tx_tasklet);
637 mt76u_tx_build_sg(struct sk_buff *skb, struct urb *urb)
639 int nsgs = 1 + skb_shinfo(skb)->nr_frags;
640 struct sk_buff *iter;
642 skb_walk_frags(skb, iter)
643 nsgs += 1 + skb_shinfo(iter)->nr_frags;
645 memset(urb->sg, 0, sizeof(*urb->sg) * MT_SG_MAX_SIZE);
647 nsgs = min_t(int, MT_SG_MAX_SIZE, nsgs);
648 sg_init_marker(urb->sg, nsgs);
651 return skb_to_sgvec_nomark(skb, urb->sg, 0, skb->len);
655 mt76u_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
656 struct sk_buff *skb, struct mt76_wcid *wcid,
657 struct ieee80211_sta *sta)
659 struct usb_interface *intf = to_usb_interface(dev->dev);
660 struct usb_device *udev = interface_to_usbdev(intf);
661 u8 ep = q2ep(q->hw_idx);
662 struct mt76u_buf *buf;
667 if (q->queued == q->ndesc)
670 err = dev->drv->tx_prepare_skb(dev, NULL, skb, q, wcid, sta, NULL);
674 buf = &q->entry[idx].ubuf;
677 err = mt76u_tx_build_sg(skb, buf->urb);
681 pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[ep]);
682 usb_fill_bulk_urb(buf->urb, udev, pipe, NULL, skb->len,
683 mt76u_complete_tx, buf);
685 q->tail = (q->tail + 1) % q->ndesc;
686 q->entry[idx].skb = skb;
692 static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
694 struct mt76u_buf *buf;
697 while (q->first != q->tail) {
698 buf = &q->entry[q->first].ubuf;
699 err = usb_submit_urb(buf->urb, GFP_ATOMIC);
702 set_bit(MT76_REMOVED, &dev->state);
704 dev_err(dev->dev, "tx urb submit failed:%d\n",
708 q->first = (q->first + 1) % q->ndesc;
712 static int mt76u_alloc_tx(struct mt76_dev *dev)
714 struct mt76u_buf *buf;
715 struct mt76_queue *q;
719 size = MT_SG_MAX_SIZE * sizeof(struct scatterlist);
720 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
722 spin_lock_init(&q->lock);
723 INIT_LIST_HEAD(&q->swq);
724 q->hw_idx = q2hwq(i);
726 q->entry = devm_kcalloc(dev->dev,
727 MT_NUM_TX_ENTRIES, sizeof(*q->entry),
732 q->ndesc = MT_NUM_TX_ENTRIES;
733 for (j = 0; j < q->ndesc; j++) {
734 buf = &q->entry[j].ubuf;
737 buf->urb = usb_alloc_urb(0, GFP_KERNEL);
741 buf->urb->sg = devm_kzalloc(dev->dev, size, GFP_KERNEL);
749 static void mt76u_free_tx(struct mt76_dev *dev)
751 struct mt76_queue *q;
754 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
756 for (j = 0; j < q->ndesc; j++)
757 usb_free_urb(q->entry[j].ubuf.urb);
761 static void mt76u_stop_tx(struct mt76_dev *dev)
763 struct mt76_queue *q;
766 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
768 for (j = 0; j < q->ndesc; j++)
769 usb_kill_urb(q->entry[j].ubuf.urb);
773 void mt76u_stop_queues(struct mt76_dev *dev)
775 tasklet_disable(&dev->usb.rx_tasklet);
776 tasklet_disable(&dev->usb.tx_tasklet);
781 EXPORT_SYMBOL_GPL(mt76u_stop_queues);
783 void mt76u_stop_stat_wk(struct mt76_dev *dev)
785 cancel_delayed_work_sync(&dev->usb.stat_work);
786 clear_bit(MT76_READING_STATS, &dev->state);
788 EXPORT_SYMBOL_GPL(mt76u_stop_stat_wk);
790 void mt76u_queues_deinit(struct mt76_dev *dev)
792 mt76u_stop_queues(dev);
797 EXPORT_SYMBOL_GPL(mt76u_queues_deinit);
799 int mt76u_alloc_queues(struct mt76_dev *dev)
803 err = mt76u_alloc_rx(dev);
807 return mt76u_alloc_tx(dev);
809 EXPORT_SYMBOL_GPL(mt76u_alloc_queues);
811 static const struct mt76_queue_ops usb_queue_ops = {
812 .tx_queue_skb = mt76u_tx_queue_skb,
813 .kick = mt76u_tx_kick,
816 int mt76u_init(struct mt76_dev *dev,
817 struct usb_interface *intf)
819 static const struct mt76_bus_ops mt76u_ops = {
825 struct mt76_usb *usb = &dev->usb;
827 tasklet_init(&usb->rx_tasklet, mt76u_rx_tasklet, (unsigned long)dev);
828 tasklet_init(&usb->tx_tasklet, mt76u_tx_tasklet, (unsigned long)dev);
829 INIT_DELAYED_WORK(&usb->stat_work, mt76u_tx_status_data);
830 skb_queue_head_init(&dev->rx_skb[MT_RXQ_MAIN]);
832 init_completion(&usb->mcu.cmpl);
833 mutex_init(&usb->mcu.mutex);
835 mutex_init(&usb->usb_ctrl_mtx);
836 dev->bus = &mt76u_ops;
837 dev->queue_ops = &usb_queue_ops;
839 return mt76u_set_endpoints(intf, usb);
841 EXPORT_SYMBOL_GPL(mt76u_init);
843 MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
844 MODULE_LICENSE("Dual BSD/GPL");