2 * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2
6 * as published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
19 static int mt7601u_submit_rx_buf(struct mt7601u_dev *dev,
20 struct mt7601u_dma_buf_rx *e, gfp_t gfp);
22 static unsigned int ieee80211_get_hdrlen_from_buf(const u8 *data, unsigned len)
24 const struct ieee80211_hdr *hdr = (const struct ieee80211_hdr *)data;
27 if (unlikely(len < 10))
29 hdrlen = ieee80211_hdrlen(hdr->frame_control);
30 if (unlikely(hdrlen > len))
35 static struct sk_buff *
36 mt7601u_rx_skb_from_seg(struct mt7601u_dev *dev, struct mt7601u_rxwi *rxwi,
37 void *data, u32 seg_len, u32 truesize, struct page *p)
40 u32 true_len, hdr_len = 0, copy, frag;
42 skb = alloc_skb(p ? 128 : seg_len, GFP_ATOMIC);
46 true_len = mt76_mac_process_rx(dev, skb, data, rxwi);
47 if (!true_len || true_len > seg_len)
50 hdr_len = ieee80211_get_hdrlen_from_buf(data, true_len);
54 if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_L2PAD)) {
55 memcpy(skb_put(skb, hdr_len), data, hdr_len);
62 /* If not doing paged RX allocated skb will always have enough space */
63 copy = (true_len <= skb_tailroom(skb)) ? true_len : hdr_len + 8;
64 frag = true_len - copy;
66 memcpy(skb_put(skb, copy), data, copy);
70 skb_add_rx_frag(skb, 0, p, data - page_address(p),
78 dev_err_ratelimited(dev->dev, "Error: incorrect frame len:%u hdr:%u\n",
84 static void mt7601u_rx_process_seg(struct mt7601u_dev *dev, u8 *data,
85 u32 seg_len, struct page *p)
88 struct mt7601u_rxwi *rxwi;
89 u32 fce_info, truesize = seg_len;
91 /* DMA_INFO field at the beginning of the segment contains only some of
92 * the information, we need to read the FCE descriptor from the end.
94 fce_info = get_unaligned_le32(data + seg_len - MT_FCE_INFO_LEN);
95 seg_len -= MT_FCE_INFO_LEN;
97 data += MT_DMA_HDR_LEN;
98 seg_len -= MT_DMA_HDR_LEN;
100 rxwi = (struct mt7601u_rxwi *) data;
101 data += sizeof(struct mt7601u_rxwi);
102 seg_len -= sizeof(struct mt7601u_rxwi);
104 if (unlikely(rxwi->zero[0] || rxwi->zero[1] || rxwi->zero[2]))
105 dev_err_once(dev->dev, "Error: RXWI zero fields are set\n");
106 if (unlikely(MT76_GET(MT_RXD_INFO_TYPE, fce_info)))
107 dev_err_once(dev->dev, "Error: RX path seen a non-pkt urb\n");
109 trace_mt_rx(dev, rxwi, fce_info);
111 skb = mt7601u_rx_skb_from_seg(dev, rxwi, data, seg_len, truesize, p);
115 spin_lock(&dev->mac_lock);
116 ieee80211_rx(dev->hw, skb);
117 spin_unlock(&dev->mac_lock);
120 static u16 mt7601u_rx_next_seg_len(u8 *data, u32 data_len)
122 u32 min_seg_len = MT_DMA_HDR_LEN + MT_RX_INFO_LEN +
123 sizeof(struct mt7601u_rxwi) + MT_FCE_INFO_LEN;
124 u16 dma_len = get_unaligned_le16(data);
126 if (data_len < min_seg_len ||
128 WARN_ON(dma_len + MT_DMA_HDRS > data_len) ||
129 WARN_ON(dma_len & 0x3))
132 return MT_DMA_HDRS + dma_len;
136 mt7601u_rx_process_entry(struct mt7601u_dev *dev, struct mt7601u_dma_buf_rx *e)
138 u32 seg_len, data_len = e->urb->actual_length;
139 u8 *data = page_address(e->p);
140 struct page *new_p = NULL;
143 if (!test_bit(MT7601U_STATE_INITIALIZED, &dev->state))
146 /* Copy if there is very little data in the buffer. */
148 new_p = dev_alloc_pages(MT_RX_ORDER);
150 while ((seg_len = mt7601u_rx_next_seg_len(data, data_len))) {
151 mt7601u_rx_process_seg(dev, data, seg_len, new_p ? e->p : NULL);
159 trace_mt_rx_dma_aggr(dev, cnt, !!new_p);
162 /* we have one extra ref from the allocator */
168 static struct mt7601u_dma_buf_rx *
169 mt7601u_rx_get_pending_entry(struct mt7601u_dev *dev)
171 struct mt7601u_rx_queue *q = &dev->rx_q;
172 struct mt7601u_dma_buf_rx *buf = NULL;
175 spin_lock_irqsave(&dev->rx_lock, flags);
180 buf = &q->e[q->start];
182 q->start = (q->start + 1) % q->entries;
184 spin_unlock_irqrestore(&dev->rx_lock, flags);
189 static void mt7601u_complete_rx(struct urb *urb)
191 struct mt7601u_dev *dev = urb->context;
192 struct mt7601u_rx_queue *q = &dev->rx_q;
195 /* do no schedule rx tasklet if urb has been unlinked
196 * or the device has been removed
198 switch (urb->status) {
204 dev_err_ratelimited(dev->dev, "rx urb failed: %d\n",
211 spin_lock_irqsave(&dev->rx_lock, flags);
212 if (WARN_ONCE(q->e[q->end].urb != urb, "RX urb mismatch"))
215 q->end = (q->end + 1) % q->entries;
217 tasklet_schedule(&dev->rx_tasklet);
219 spin_unlock_irqrestore(&dev->rx_lock, flags);
222 static void mt7601u_rx_tasklet(unsigned long data)
224 struct mt7601u_dev *dev = (struct mt7601u_dev *) data;
225 struct mt7601u_dma_buf_rx *e;
227 while ((e = mt7601u_rx_get_pending_entry(dev))) {
231 mt7601u_rx_process_entry(dev, e);
232 mt7601u_submit_rx_buf(dev, e, GFP_ATOMIC);
236 static void mt7601u_complete_tx(struct urb *urb)
238 struct mt7601u_tx_queue *q = urb->context;
239 struct mt7601u_dev *dev = q->dev;
243 switch (urb->status) {
249 dev_err_ratelimited(dev->dev, "tx urb failed: %d\n",
256 spin_lock_irqsave(&dev->tx_lock, flags);
257 if (WARN_ONCE(q->e[q->start].urb != urb, "TX urb mismatch"))
260 skb = q->e[q->start].skb;
261 q->e[q->start].skb = NULL;
262 trace_mt_tx_dma_done(dev, skb);
264 __skb_queue_tail(&dev->tx_skb_done, skb);
265 tasklet_schedule(&dev->tx_tasklet);
267 if (q->used == q->entries - q->entries / 8)
268 ieee80211_wake_queue(dev->hw, skb_get_queue_mapping(skb));
270 q->start = (q->start + 1) % q->entries;
273 spin_unlock_irqrestore(&dev->tx_lock, flags);
276 static void mt7601u_tx_tasklet(unsigned long data)
278 struct mt7601u_dev *dev = (struct mt7601u_dev *) data;
279 struct sk_buff_head skbs;
282 __skb_queue_head_init(&skbs);
284 spin_lock_irqsave(&dev->tx_lock, flags);
286 set_bit(MT7601U_STATE_MORE_STATS, &dev->state);
287 if (!test_and_set_bit(MT7601U_STATE_READING_STATS, &dev->state))
288 queue_delayed_work(dev->stat_wq, &dev->stat_work,
289 msecs_to_jiffies(10));
291 skb_queue_splice_init(&dev->tx_skb_done, &skbs);
293 spin_unlock_irqrestore(&dev->tx_lock, flags);
295 while (!skb_queue_empty(&skbs)) {
296 struct sk_buff *skb = __skb_dequeue(&skbs);
298 mt7601u_tx_status(dev, skb);
302 static int mt7601u_dma_submit_tx(struct mt7601u_dev *dev,
303 struct sk_buff *skb, u8 ep)
305 struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
306 unsigned snd_pipe = usb_sndbulkpipe(usb_dev, dev->out_eps[ep]);
307 struct mt7601u_dma_buf_tx *e;
308 struct mt7601u_tx_queue *q = &dev->tx_q[ep];
312 spin_lock_irqsave(&dev->tx_lock, flags);
314 if (WARN_ON(q->entries <= q->used)) {
320 usb_fill_bulk_urb(e->urb, usb_dev, snd_pipe, skb->data, skb->len,
321 mt7601u_complete_tx, q);
322 ret = usb_submit_urb(e->urb, GFP_ATOMIC);
324 /* Special-handle ENODEV from TX urb submission because it will
325 * often be the first ENODEV we see after device is removed.
328 set_bit(MT7601U_STATE_REMOVED, &dev->state);
330 dev_err(dev->dev, "Error: TX urb submit failed:%d\n",
335 q->end = (q->end + 1) % q->entries;
339 if (q->used >= q->entries)
340 ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb));
342 spin_unlock_irqrestore(&dev->tx_lock, flags);
347 /* Map hardware Q to USB endpoint number */
348 static u8 q2ep(u8 qid)
350 /* TODO: take management packets to queue 5 */
354 /* Map USB endpoint number to Q id in the DMA engine */
355 static enum mt76_qsel ep2dmaq(u8 ep)
362 int mt7601u_dma_enqueue_tx(struct mt7601u_dev *dev, struct sk_buff *skb,
363 struct mt76_wcid *wcid, int hw_q)
369 dma_flags = MT_TXD_PKT_INFO_80211;
370 if (wcid->hw_key_idx == 0xff)
371 dma_flags |= MT_TXD_PKT_INFO_WIV;
373 ret = mt7601u_dma_skb_wrap_pkt(skb, ep2dmaq(ep), dma_flags);
377 ret = mt7601u_dma_submit_tx(dev, skb, ep);
379 ieee80211_free_txskb(dev->hw, skb);
386 static void mt7601u_kill_rx(struct mt7601u_dev *dev)
390 for (i = 0; i < dev->rx_q.entries; i++)
391 usb_poison_urb(dev->rx_q.e[i].urb);
394 static int mt7601u_submit_rx_buf(struct mt7601u_dev *dev,
395 struct mt7601u_dma_buf_rx *e, gfp_t gfp)
397 struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
398 u8 *buf = page_address(e->p);
402 pipe = usb_rcvbulkpipe(usb_dev, dev->in_eps[MT_EP_IN_PKT_RX]);
404 usb_fill_bulk_urb(e->urb, usb_dev, pipe, buf, MT_RX_URB_SIZE,
405 mt7601u_complete_rx, dev);
407 trace_mt_submit_urb(dev, e->urb);
408 ret = usb_submit_urb(e->urb, gfp);
410 dev_err(dev->dev, "Error: submit RX URB failed:%d\n", ret);
415 static int mt7601u_submit_rx(struct mt7601u_dev *dev)
419 for (i = 0; i < dev->rx_q.entries; i++) {
420 ret = mt7601u_submit_rx_buf(dev, &dev->rx_q.e[i], GFP_KERNEL);
428 static void mt7601u_free_rx(struct mt7601u_dev *dev)
432 for (i = 0; i < dev->rx_q.entries; i++) {
433 __free_pages(dev->rx_q.e[i].p, MT_RX_ORDER);
434 usb_free_urb(dev->rx_q.e[i].urb);
438 static int mt7601u_alloc_rx(struct mt7601u_dev *dev)
442 memset(&dev->rx_q, 0, sizeof(dev->rx_q));
444 dev->rx_q.entries = N_RX_ENTRIES;
446 for (i = 0; i < N_RX_ENTRIES; i++) {
447 dev->rx_q.e[i].urb = usb_alloc_urb(0, GFP_KERNEL);
448 dev->rx_q.e[i].p = dev_alloc_pages(MT_RX_ORDER);
450 if (!dev->rx_q.e[i].urb || !dev->rx_q.e[i].p)
457 static void mt7601u_free_tx_queue(struct mt7601u_tx_queue *q)
461 for (i = 0; i < q->entries; i++) {
462 usb_poison_urb(q->e[i].urb);
464 mt7601u_tx_status(q->dev, q->e[i].skb);
465 usb_free_urb(q->e[i].urb);
469 static void mt7601u_free_tx(struct mt7601u_dev *dev)
473 for (i = 0; i < __MT_EP_OUT_MAX; i++)
474 mt7601u_free_tx_queue(&dev->tx_q[i]);
477 static int mt7601u_alloc_tx_queue(struct mt7601u_dev *dev,
478 struct mt7601u_tx_queue *q)
483 q->entries = N_TX_ENTRIES;
485 for (i = 0; i < N_TX_ENTRIES; i++) {
486 q->e[i].urb = usb_alloc_urb(0, GFP_KERNEL);
494 static int mt7601u_alloc_tx(struct mt7601u_dev *dev)
498 dev->tx_q = devm_kcalloc(dev->dev, __MT_EP_OUT_MAX,
499 sizeof(*dev->tx_q), GFP_KERNEL);
501 for (i = 0; i < __MT_EP_OUT_MAX; i++)
502 if (mt7601u_alloc_tx_queue(dev, &dev->tx_q[i]))
508 int mt7601u_dma_init(struct mt7601u_dev *dev)
512 tasklet_init(&dev->tx_tasklet, mt7601u_tx_tasklet, (unsigned long) dev);
513 tasklet_init(&dev->rx_tasklet, mt7601u_rx_tasklet, (unsigned long) dev);
515 ret = mt7601u_alloc_tx(dev);
518 ret = mt7601u_alloc_rx(dev);
522 ret = mt7601u_submit_rx(dev);
528 mt7601u_dma_cleanup(dev);
532 void mt7601u_dma_cleanup(struct mt7601u_dev *dev)
534 mt7601u_kill_rx(dev);
536 tasklet_kill(&dev->rx_tasklet);
538 mt7601u_free_rx(dev);
539 mt7601u_free_tx(dev);
541 tasklet_kill(&dev->tx_tasklet);