2 * Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
3 * Copyright (c) 2018, The Linux Foundation. All rights reserved.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 #include <linux/etherdevice.h>
19 #include <net/ieee80211_radiotap.h>
20 #include <linux/if_arp.h>
21 #include <linux/moduleparam.h>
23 #include <linux/ipv6.h>
25 #include <linux/prefetch.h>
31 #include "txrx_edma.h"
33 static bool rtap_include_phy_info;
34 module_param(rtap_include_phy_info, bool, 0444);
35 MODULE_PARM_DESC(rtap_include_phy_info,
36 " Include PHY info in the radiotap header, default - no");
39 module_param(rx_align_2, bool, 0444);
40 MODULE_PARM_DESC(rx_align_2, " align Rx buffers on 4*n+2, default - no");
43 module_param(rx_large_buf, bool, 0444);
44 MODULE_PARM_DESC(rx_large_buf, " allocate 8KB RX buffers, default - no");
46 static inline uint wil_rx_snaplen(void)
48 return rx_align_2 ? 6 : 0;
51 /* wil_ring_wmark_low - low watermark for available descriptor space */
52 static inline int wil_ring_wmark_low(struct wil_ring *ring)
54 return ring->size / 8;
57 /* wil_ring_wmark_high - high watermark for available descriptor space */
58 static inline int wil_ring_wmark_high(struct wil_ring *ring)
60 return ring->size / 4;
63 /* returns true if num avail descriptors is lower than wmark_low */
64 static inline int wil_ring_avail_low(struct wil_ring *ring)
66 return wil_ring_avail_tx(ring) < wil_ring_wmark_low(ring);
69 /* returns true if num avail descriptors is higher than wmark_high */
70 static inline int wil_ring_avail_high(struct wil_ring *ring)
72 return wil_ring_avail_tx(ring) > wil_ring_wmark_high(ring);
75 /* returns true when all tx vrings are empty */
76 bool wil_is_tx_idle(struct wil6210_priv *wil)
79 unsigned long data_comp_to;
80 int min_ring_id = wil_get_min_tx_ring_id(wil);
82 for (i = min_ring_id; i < WIL6210_MAX_TX_RINGS; i++) {
83 struct wil_ring *vring = &wil->ring_tx[i];
84 int vring_index = vring - wil->ring_tx;
85 struct wil_ring_tx_data *txdata =
86 &wil->ring_tx_data[vring_index];
88 spin_lock(&txdata->lock);
90 if (!vring->va || !txdata->enabled) {
91 spin_unlock(&txdata->lock);
95 data_comp_to = jiffies + msecs_to_jiffies(
96 WIL_DATA_COMPLETION_TO_MS);
97 if (test_bit(wil_status_napi_en, wil->status)) {
98 while (!wil_ring_is_empty(vring)) {
99 if (time_after(jiffies, data_comp_to)) {
101 "TO waiting for idle tx\n");
102 spin_unlock(&txdata->lock);
105 wil_dbg_ratelimited(wil,
106 "tx vring is not empty -> NAPI\n");
107 spin_unlock(&txdata->lock);
108 napi_synchronize(&wil->napi_tx);
110 spin_lock(&txdata->lock);
111 if (!vring->va || !txdata->enabled)
116 spin_unlock(&txdata->lock);
122 static int wil_vring_alloc(struct wil6210_priv *wil, struct wil_ring *vring)
124 struct device *dev = wil_to_dev(wil);
125 size_t sz = vring->size * sizeof(vring->va[0]);
128 wil_dbg_misc(wil, "vring_alloc:\n");
130 BUILD_BUG_ON(sizeof(vring->va[0]) != 32);
134 vring->ctx = kcalloc(vring->size, sizeof(vring->ctx[0]), GFP_KERNEL);
140 /* vring->va should be aligned on its size rounded up to power of 2
141 * This is granted by the dma_alloc_coherent.
143 * HW has limitation that all vrings addresses must share the same
144 * upper 16 msb bits part of 48 bits address. To workaround that,
145 * if we are using more than 32 bit addresses switch to 32 bit
146 * allocation before allocating vring memory.
148 * There's no check for the return value of dma_set_mask_and_coherent,
149 * since we assume if we were able to set the mask during
150 * initialization in this system it will not fail if we set it again
152 if (wil->dma_addr_size > 32)
153 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
155 vring->va = dma_alloc_coherent(dev, sz, &vring->pa, GFP_KERNEL);
162 if (wil->dma_addr_size > 32)
163 dma_set_mask_and_coherent(dev,
164 DMA_BIT_MASK(wil->dma_addr_size));
166 /* initially, all descriptors are SW owned
167 * For Tx and Rx, ownership bit is at the same location, thus
170 for (i = 0; i < vring->size; i++) {
171 volatile struct vring_tx_desc *_d =
172 &vring->va[i].tx.legacy;
174 _d->dma.status = TX_DMA_STATUS_DU;
177 wil_dbg_misc(wil, "vring[%d] 0x%p:%pad 0x%p\n", vring->size,
178 vring->va, &vring->pa, vring->ctx);
183 static void wil_txdesc_unmap(struct device *dev, union wil_tx_desc *desc,
186 struct vring_tx_desc *d = &desc->legacy;
187 dma_addr_t pa = wil_desc_addr(&d->dma.addr);
188 u16 dmalen = le16_to_cpu(d->dma.length);
190 switch (ctx->mapped_as) {
191 case wil_mapped_as_single:
192 dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
194 case wil_mapped_as_page:
195 dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE);
202 static void wil_vring_free(struct wil6210_priv *wil, struct wil_ring *vring)
204 struct device *dev = wil_to_dev(wil);
205 size_t sz = vring->size * sizeof(vring->va[0]);
207 lockdep_assert_held(&wil->mutex);
209 int vring_index = vring - wil->ring_tx;
211 wil_dbg_misc(wil, "free Tx vring %d [%d] 0x%p:%pad 0x%p\n",
212 vring_index, vring->size, vring->va,
213 &vring->pa, vring->ctx);
215 wil_dbg_misc(wil, "free Rx vring [%d] 0x%p:%pad 0x%p\n",
216 vring->size, vring->va,
217 &vring->pa, vring->ctx);
220 while (!wil_ring_is_empty(vring)) {
226 struct vring_tx_desc dd, *d = ⅆ
227 volatile struct vring_tx_desc *_d =
228 &vring->va[vring->swtail].tx.legacy;
230 ctx = &vring->ctx[vring->swtail];
233 "ctx(%d) was already completed\n",
235 vring->swtail = wil_ring_next_tail(vring);
239 wil_txdesc_unmap(dev, (union wil_tx_desc *)d, ctx);
241 dev_kfree_skb_any(ctx->skb);
242 vring->swtail = wil_ring_next_tail(vring);
244 struct vring_rx_desc dd, *d = ⅆ
245 volatile struct vring_rx_desc *_d =
246 &vring->va[vring->swhead].rx.legacy;
248 ctx = &vring->ctx[vring->swhead];
250 pa = wil_desc_addr(&d->dma.addr);
251 dmalen = le16_to_cpu(d->dma.length);
252 dma_unmap_single(dev, pa, dmalen, DMA_FROM_DEVICE);
254 wil_ring_advance_head(vring, 1);
257 dma_free_coherent(dev, sz, (void *)vring->va, vring->pa);
265 * Allocate one skb for Rx VRING
267 * Safe to call from IRQ
269 static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct wil_ring *vring,
272 struct device *dev = wil_to_dev(wil);
273 unsigned int sz = wil->rx_buf_len + ETH_HLEN + wil_rx_snaplen();
274 struct vring_rx_desc dd, *d = ⅆ
275 volatile struct vring_rx_desc *_d = &vring->va[i].rx.legacy;
277 struct sk_buff *skb = dev_alloc_skb(sz + headroom);
282 skb_reserve(skb, headroom);
286 * Make sure that the network stack calculates checksum for packets
287 * which failed the HW checksum calculation
289 skb->ip_summed = CHECKSUM_NONE;
291 pa = dma_map_single(dev, skb->data, skb->len, DMA_FROM_DEVICE);
292 if (unlikely(dma_mapping_error(dev, pa))) {
297 d->dma.d0 = RX_DMA_D0_CMD_DMA_RT | RX_DMA_D0_CMD_DMA_IT;
298 wil_desc_addr_set(&d->dma.addr, pa);
299 /* ip_length don't care */
301 /* error don't care */
302 d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */
303 d->dma.length = cpu_to_le16(sz);
305 vring->ctx[i].skb = skb;
311 * Adds radiotap header
313 * Any error indicated as "Bad FCS"
315 * Vendor data for 04:ce:14-1 (Wilocity-1) consists of:
316 * - Rx descriptor: 32 bytes
319 static void wil_rx_add_radiotap_header(struct wil6210_priv *wil,
322 struct wil6210_rtap {
323 struct ieee80211_radiotap_header rthdr;
324 /* fields should be in the order of bits in rthdr.it_present */
328 __le16 chnl_freq __aligned(2);
335 struct wil6210_rtap_vendor {
336 struct wil6210_rtap rtap;
338 u8 vendor_oui[3] __aligned(2);
343 struct vring_rx_desc *d = wil_skb_rxdesc(skb);
344 struct wil6210_rtap_vendor *rtap_vendor;
345 int rtap_len = sizeof(struct wil6210_rtap);
346 int phy_length = 0; /* phy info header size, bytes */
347 static char phy_data[128];
348 struct ieee80211_channel *ch = wil->monitor_chandef.chan;
350 if (rtap_include_phy_info) {
351 rtap_len = sizeof(*rtap_vendor) + sizeof(*d);
352 /* calculate additional length */
353 if (d->dma.status & RX_DMA_STATUS_PHY_INFO) {
355 * PHY info starts from 8-byte boundary
356 * there are 8-byte lines, last line may be partially
357 * written (HW bug), thus FW configures for last line
358 * to be excessive. Driver skips this last line.
360 int len = min_t(int, 8 + sizeof(phy_data),
361 wil_rxdesc_phy_length(d));
364 void *p = skb_tail_pointer(skb);
365 void *pa = PTR_ALIGN(p, 8);
367 if (skb_tailroom(skb) >= len + (pa - p)) {
368 phy_length = len - 8;
369 memcpy(phy_data, pa, phy_length);
373 rtap_len += phy_length;
376 if (skb_headroom(skb) < rtap_len &&
377 pskb_expand_head(skb, rtap_len, 0, GFP_ATOMIC)) {
378 wil_err(wil, "Unable to expand headroom to %d\n", rtap_len);
382 rtap_vendor = skb_push(skb, rtap_len);
383 memset(rtap_vendor, 0, rtap_len);
385 rtap_vendor->rtap.rthdr.it_version = PKTHDR_RADIOTAP_VERSION;
386 rtap_vendor->rtap.rthdr.it_len = cpu_to_le16(rtap_len);
387 rtap_vendor->rtap.rthdr.it_present = cpu_to_le32(
388 (1 << IEEE80211_RADIOTAP_FLAGS) |
389 (1 << IEEE80211_RADIOTAP_CHANNEL) |
390 (1 << IEEE80211_RADIOTAP_MCS));
391 if (d->dma.status & RX_DMA_STATUS_ERROR)
392 rtap_vendor->rtap.flags |= IEEE80211_RADIOTAP_F_BADFCS;
394 rtap_vendor->rtap.chnl_freq = cpu_to_le16(ch ? ch->center_freq : 58320);
395 rtap_vendor->rtap.chnl_flags = cpu_to_le16(0);
397 rtap_vendor->rtap.mcs_present = IEEE80211_RADIOTAP_MCS_HAVE_MCS;
398 rtap_vendor->rtap.mcs_flags = 0;
399 rtap_vendor->rtap.mcs_index = wil_rxdesc_mcs(d);
401 if (rtap_include_phy_info) {
402 rtap_vendor->rtap.rthdr.it_present |= cpu_to_le32(1 <<
403 IEEE80211_RADIOTAP_VENDOR_NAMESPACE);
404 /* OUI for Wilocity 04:ce:14 */
405 rtap_vendor->vendor_oui[0] = 0x04;
406 rtap_vendor->vendor_oui[1] = 0xce;
407 rtap_vendor->vendor_oui[2] = 0x14;
408 rtap_vendor->vendor_ns = 1;
409 /* Rx descriptor + PHY data */
410 rtap_vendor->vendor_skip = cpu_to_le16(sizeof(*d) +
412 memcpy(rtap_vendor->vendor_data, (void *)d, sizeof(*d));
413 memcpy(rtap_vendor->vendor_data + sizeof(*d), phy_data,
418 static bool wil_is_rx_idle(struct wil6210_priv *wil)
420 struct vring_rx_desc *_d;
421 struct wil_ring *ring = &wil->ring_rx;
423 _d = (struct vring_rx_desc *)&ring->va[ring->swhead].rx.legacy;
424 if (_d->dma.status & RX_DMA_STATUS_DU)
431 * reap 1 frame from @swhead
433 * Rx descriptor copied to skb->cb
435 * Safe to call from IRQ
437 static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
438 struct wil_ring *vring)
440 struct device *dev = wil_to_dev(wil);
441 struct wil6210_vif *vif;
442 struct net_device *ndev;
443 volatile struct vring_rx_desc *_d;
444 struct vring_rx_desc *d;
447 unsigned int snaplen = wil_rx_snaplen();
448 unsigned int sz = wil->rx_buf_len + ETH_HLEN + snaplen;
453 struct wil_net_stats *stats;
455 BUILD_BUG_ON(sizeof(struct vring_rx_desc) > sizeof(skb->cb));
458 if (unlikely(wil_ring_is_empty(vring)))
461 i = (int)vring->swhead;
462 _d = &vring->va[i].rx.legacy;
463 if (unlikely(!(_d->dma.status & RX_DMA_STATUS_DU))) {
464 /* it is not error, we just reached end of Rx done area */
468 skb = vring->ctx[i].skb;
469 vring->ctx[i].skb = NULL;
470 wil_ring_advance_head(vring, 1);
472 wil_err(wil, "No Rx skb at [%d]\n", i);
475 d = wil_skb_rxdesc(skb);
477 pa = wil_desc_addr(&d->dma.addr);
479 dma_unmap_single(dev, pa, sz, DMA_FROM_DEVICE);
480 dmalen = le16_to_cpu(d->dma.length);
482 trace_wil6210_rx(i, d);
483 wil_dbg_txrx(wil, "Rx[%3d] : %d bytes\n", i, dmalen);
484 wil_hex_dump_txrx("RxD ", DUMP_PREFIX_NONE, 32, 4,
485 (const void *)d, sizeof(*d), false);
487 cid = wil_rxdesc_cid(d);
488 mid = wil_rxdesc_mid(d);
489 vif = wil->vifs[mid];
491 if (unlikely(!vif)) {
492 wil_dbg_txrx(wil, "skipped RX descriptor with invalid mid %d",
497 ndev = vif_to_ndev(vif);
498 stats = &wil->sta[cid].stats;
500 if (unlikely(dmalen > sz)) {
501 wil_err(wil, "Rx size too large: %d bytes!\n", dmalen);
502 stats->rx_large_frame++;
506 skb_trim(skb, dmalen);
510 wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
511 skb->data, skb_headlen(skb), false);
513 stats->last_mcs_rx = wil_rxdesc_mcs(d);
514 if (stats->last_mcs_rx < ARRAY_SIZE(stats->rx_per_mcs))
515 stats->rx_per_mcs[stats->last_mcs_rx]++;
517 /* use radiotap header only if required */
518 if (ndev->type == ARPHRD_IEEE80211_RADIOTAP)
519 wil_rx_add_radiotap_header(wil, skb);
521 /* no extra checks if in sniffer mode */
522 if (ndev->type != ARPHRD_ETHER)
524 /* Non-data frames may be delivered through Rx DMA channel (ex: BAR)
525 * Driver should recognize it by frame type, that is found
526 * in Rx descriptor. If type is not data, it is 802.11 frame as is
528 ftype = wil_rxdesc_ftype(d) << 2;
529 if (unlikely(ftype != IEEE80211_FTYPE_DATA)) {
530 u8 fc1 = wil_rxdesc_fc1(d);
531 int tid = wil_rxdesc_tid(d);
532 u16 seq = wil_rxdesc_seq(d);
535 "Non-data frame FC[7:0] 0x%02x MID %d CID %d TID %d Seq 0x%03x\n",
536 fc1, mid, cid, tid, seq);
537 stats->rx_non_data_frame++;
538 if (wil_is_back_req(fc1)) {
540 "BAR: MID %d CID %d TID %d Seq 0x%03x\n",
542 wil_rx_bar(wil, vif, cid, tid, seq);
544 /* print again all info. One can enable only this
545 * without overhead for printing every Rx frame
548 "Unhandled non-data frame FC[7:0] 0x%02x MID %d CID %d TID %d Seq 0x%03x\n",
549 fc1, mid, cid, tid, seq);
550 wil_hex_dump_txrx("RxD ", DUMP_PREFIX_NONE, 32, 4,
551 (const void *)d, sizeof(*d), false);
552 wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
553 skb->data, skb_headlen(skb), false);
559 if (unlikely(skb->len < ETH_HLEN + snaplen)) {
560 wil_err(wil, "Short frame, len = %d\n", skb->len);
561 stats->rx_short_frame++;
566 /* L4 IDENT is on when HW calculated checksum, check status
567 * and in case of error drop the packet
568 * higher stack layers will handle retransmission (if required)
570 if (likely(d->dma.status & RX_DMA_STATUS_L4I)) {
571 /* L4 protocol identified, csum calculated */
572 if (likely((d->dma.error & RX_DMA_ERROR_L4_ERR) == 0))
573 skb->ip_summed = CHECKSUM_UNNECESSARY;
574 /* If HW reports bad checksum, let IP stack re-check it
575 * For example, HW don't understand Microsoft IP stack that
576 * mis-calculates TCP checksum - if it should be 0x0,
577 * it writes 0xffff in violation of RFC 1624
580 stats->rx_csum_err++;
585 * +-------+-------+---------+------------+------+
586 * | SA(6) | DA(6) | SNAP(6) | ETHTYPE(2) | DATA |
587 * +-------+-------+---------+------------+------+
588 * Need to remove SNAP, shifting SA and DA forward
590 memmove(skb->data + snaplen, skb->data, 2 * ETH_ALEN);
591 skb_pull(skb, snaplen);
598 * allocate and fill up to @count buffers in rx ring
599 * buffers posted at @swtail
600 * Note: we have a single RX queue for servicing all VIFs, but we
601 * allocate skbs with headroom according to main interface only. This
602 * means it will not work with monitor interface together with other VIFs.
603 * Currently we only support monitor interface on its own without other VIFs,
604 * and we will need to fix this code once we add support.
606 static int wil_rx_refill(struct wil6210_priv *wil, int count)
608 struct net_device *ndev = wil->main_ndev;
609 struct wil_ring *v = &wil->ring_rx;
612 int headroom = ndev->type == ARPHRD_IEEE80211_RADIOTAP ?
613 WIL6210_RTAP_SIZE : 0;
615 for (; next_tail = wil_ring_next_tail(v),
616 (next_tail != v->swhead) && (count-- > 0);
617 v->swtail = next_tail) {
618 rc = wil_vring_alloc_skb(wil, v, v->swtail, headroom);
620 wil_err_ratelimited(wil, "Error %d in rx refill[%d]\n",
626 /* make sure all writes to descriptors (shared memory) are done before
627 * committing them to HW
631 wil_w(wil, v->hwtail, v->swtail);
637 * reverse_memcmp - Compare two areas of memory, in reverse order
638 * @cs: One area of memory
639 * @ct: Another area of memory
640 * @count: The size of the area.
642 * Cut'n'paste from original memcmp (see lib/string.c)
643 * with minimal modifications
645 int reverse_memcmp(const void *cs, const void *ct, size_t count)
647 const unsigned char *su1, *su2;
650 for (su1 = cs + count - 1, su2 = ct + count - 1; count > 0;
651 --su1, --su2, count--) {
659 static int wil_rx_crypto_check(struct wil6210_priv *wil, struct sk_buff *skb)
661 struct vring_rx_desc *d = wil_skb_rxdesc(skb);
662 int cid = wil_rxdesc_cid(d);
663 int tid = wil_rxdesc_tid(d);
664 int key_id = wil_rxdesc_key_id(d);
665 int mc = wil_rxdesc_mcast(d);
666 struct wil_sta_info *s = &wil->sta[cid];
667 struct wil_tid_crypto_rx *c = mc ? &s->group_crypto_rx :
668 &s->tid_crypto_rx[tid];
669 struct wil_tid_crypto_rx_single *cc = &c->key_id[key_id];
670 const u8 *pn = (u8 *)&d->mac.pn_15_0;
673 wil_err_ratelimited(wil,
674 "Key missing. CID %d TID %d MCast %d KEY_ID %d\n",
675 cid, tid, mc, key_id);
679 if (reverse_memcmp(pn, cc->pn, IEEE80211_GCMP_PN_LEN) <= 0) {
680 wil_err_ratelimited(wil,
681 "Replay attack. CID %d TID %d MCast %d KEY_ID %d PN %6phN last %6phN\n",
682 cid, tid, mc, key_id, pn, cc->pn);
685 memcpy(cc->pn, pn, IEEE80211_GCMP_PN_LEN);
690 static int wil_rx_error_check(struct wil6210_priv *wil, struct sk_buff *skb,
691 struct wil_net_stats *stats)
693 struct vring_rx_desc *d = wil_skb_rxdesc(skb);
695 if ((d->dma.status & RX_DMA_STATUS_ERROR) &&
696 (d->dma.error & RX_DMA_ERROR_MIC)) {
697 stats->rx_mic_error++;
698 wil_dbg_txrx(wil, "MIC error, dropping packet\n");
705 static void wil_get_netif_rx_params(struct sk_buff *skb, int *cid,
708 struct vring_rx_desc *d = wil_skb_rxdesc(skb);
710 *cid = wil_rxdesc_cid(d); /* always 0..7, no need to check */
711 *security = wil_rxdesc_security(d);
715 * Pass Rx packet to the netif. Update statistics.
716 * Called in softirq context (NAPI poll).
718 void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
720 gro_result_t rc = GRO_NORMAL;
721 struct wil6210_vif *vif = ndev_to_vif(ndev);
722 struct wil6210_priv *wil = ndev_to_wil(ndev);
723 struct wireless_dev *wdev = vif_to_wdev(vif);
724 unsigned int len = skb->len;
727 struct ethhdr *eth = (void *)skb->data;
728 /* here looking for DA, not A1, thus Rxdesc's 'mcast' indication
729 * is not suitable, need to look at data
731 int mcast = is_multicast_ether_addr(eth->h_dest);
732 struct wil_net_stats *stats;
733 struct sk_buff *xmit_skb = NULL;
734 static const char * const gro_res_str[] = {
735 [GRO_MERGED] = "GRO_MERGED",
736 [GRO_MERGED_FREE] = "GRO_MERGED_FREE",
737 [GRO_HELD] = "GRO_HELD",
738 [GRO_NORMAL] = "GRO_NORMAL",
739 [GRO_DROP] = "GRO_DROP",
740 [GRO_CONSUMED] = "GRO_CONSUMED",
743 wil->txrx_ops.get_netif_rx_params(skb, &cid, &security);
745 stats = &wil->sta[cid].stats;
747 if (ndev->features & NETIF_F_RXHASH)
748 /* fake L4 to ensure it won't be re-calculated later
749 * set hash to any non-zero value to activate rps
750 * mechanism, core will be chosen according
751 * to user-level rps configuration.
753 skb_set_hash(skb, 1, PKT_HASH_TYPE_L4);
757 if (security && (wil->txrx_ops.rx_crypto_check(wil, skb) != 0)) {
764 /* check errors reported by HW and update statistics */
765 if (unlikely(wil->txrx_ops.rx_error_check(wil, skb, stats))) {
770 if (wdev->iftype == NL80211_IFTYPE_STATION) {
771 if (mcast && ether_addr_equal(eth->h_source, ndev->dev_addr)) {
772 /* mcast packet looped back to us */
777 } else if (wdev->iftype == NL80211_IFTYPE_AP && !vif->ap_isolate) {
779 /* send multicast frames both to higher layers in
780 * local net stack and back to the wireless medium
782 xmit_skb = skb_copy(skb, GFP_ATOMIC);
784 int xmit_cid = wil_find_cid(wil, vif->mid,
788 /* The destination station is associated to
789 * this AP (in this VLAN), so send the frame
790 * directly to it and do not pass it to local
799 /* Send to wireless media and increase priority by 256 to
800 * keep the received priority instead of reclassifying
801 * the frame (see cfg80211_classify8021d).
803 xmit_skb->dev = ndev;
804 xmit_skb->priority += 256;
805 xmit_skb->protocol = htons(ETH_P_802_3);
806 skb_reset_network_header(xmit_skb);
807 skb_reset_mac_header(xmit_skb);
808 wil_dbg_txrx(wil, "Rx -> Tx %d bytes\n", len);
809 dev_queue_xmit(xmit_skb);
812 if (skb) { /* deliver to local stack */
813 skb->protocol = eth_type_trans(skb, ndev);
815 rc = napi_gro_receive(&wil->napi_rx, skb);
816 wil_dbg_txrx(wil, "Rx complete %d bytes => %s\n",
817 len, gro_res_str[rc]);
820 /* statistics. rc set to GRO_NORMAL for AP bridging */
821 if (unlikely(rc == GRO_DROP)) {
822 ndev->stats.rx_dropped++;
824 wil_dbg_txrx(wil, "Rx drop %d bytes\n", len);
826 ndev->stats.rx_packets++;
828 ndev->stats.rx_bytes += len;
829 stats->rx_bytes += len;
831 ndev->stats.multicast++;
836 * Proceed all completed skb's from Rx VRING
838 * Safe to call from NAPI poll, i.e. softirq with interrupts enabled
840 void wil_rx_handle(struct wil6210_priv *wil, int *quota)
842 struct net_device *ndev = wil->main_ndev;
843 struct wireless_dev *wdev = ndev->ieee80211_ptr;
844 struct wil_ring *v = &wil->ring_rx;
847 if (unlikely(!v->va)) {
848 wil_err(wil, "Rx IRQ while Rx not yet initialized\n");
851 wil_dbg_txrx(wil, "rx_handle\n");
852 while ((*quota > 0) && (NULL != (skb = wil_vring_reap_rx(wil, v)))) {
855 /* monitor is currently supported on main interface only */
856 if (wdev->iftype == NL80211_IFTYPE_MONITOR) {
858 skb_reset_mac_header(skb);
859 skb->ip_summed = CHECKSUM_UNNECESSARY;
860 skb->pkt_type = PACKET_OTHERHOST;
861 skb->protocol = htons(ETH_P_802_2);
862 wil_netif_rx_any(skb, ndev);
864 wil_rx_reorder(wil, skb);
867 wil_rx_refill(wil, v->size);
870 static void wil_rx_buf_len_init(struct wil6210_priv *wil)
872 wil->rx_buf_len = rx_large_buf ?
873 WIL_MAX_ETH_MTU : TXRX_BUF_LEN_DEFAULT - WIL_MAX_MPDU_OVERHEAD;
874 if (mtu_max > wil->rx_buf_len) {
875 /* do not allow RX buffers to be smaller than mtu_max, for
876 * backward compatibility (mtu_max parameter was also used
877 * to support receiving large packets)
879 wil_info(wil, "Override RX buffer to mtu_max(%d)\n", mtu_max);
880 wil->rx_buf_len = mtu_max;
884 static int wil_rx_init(struct wil6210_priv *wil, uint order)
886 struct wil_ring *vring = &wil->ring_rx;
889 wil_dbg_misc(wil, "rx_init\n");
892 wil_err(wil, "Rx ring already allocated\n");
896 wil_rx_buf_len_init(wil);
898 vring->size = 1 << order;
900 rc = wil_vring_alloc(wil, vring);
904 rc = wmi_rx_chain_add(wil, vring);
908 rc = wil_rx_refill(wil, vring->size);
914 wil_vring_free(wil, vring);
919 static void wil_rx_fini(struct wil6210_priv *wil)
921 struct wil_ring *vring = &wil->ring_rx;
923 wil_dbg_misc(wil, "rx_fini\n");
926 wil_vring_free(wil, vring);
929 static int wil_tx_desc_map(union wil_tx_desc *desc, dma_addr_t pa,
930 u32 len, int vring_index)
932 struct vring_tx_desc *d = &desc->legacy;
934 wil_desc_addr_set(&d->dma.addr, pa);
935 d->dma.ip_length = 0;
936 /* 0..6: mac_length; 7:ip_version 0-IP6 1-IP4*/
937 d->dma.b11 = 0/*14 | BIT(7)*/;
939 d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */
940 d->dma.length = cpu_to_le16((u16)len);
941 d->dma.d0 = (vring_index << DMA_CFG_DESC_TX_0_QID_POS);
945 d->mac.ucode_cmd = 0;
946 /* translation type: 0 - bypass; 1 - 802.3; 2 - native wifi */
947 d->mac.d[2] = BIT(MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_POS) |
948 (1 << MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_POS);
953 void wil_tx_data_init(struct wil_ring_tx_data *txdata)
955 spin_lock_bh(&txdata->lock);
956 txdata->dot1x_open = 0;
959 txdata->last_idle = 0;
961 txdata->agg_wsize = 0;
962 txdata->agg_timeout = 0;
963 txdata->agg_amsdu = 0;
964 txdata->addba_in_progress = false;
965 txdata->mid = U8_MAX;
966 spin_unlock_bh(&txdata->lock);
969 static int wil_vring_init_tx(struct wil6210_vif *vif, int id, int size,
972 struct wil6210_priv *wil = vif_to_wil(vif);
974 struct wmi_vring_cfg_cmd cmd = {
975 .action = cpu_to_le32(WMI_VRING_CMD_ADD),
979 cpu_to_le16(wil_mtu2macbuf(mtu_max)),
980 .ring_size = cpu_to_le16(size),
983 .cidxtid = mk_cidxtid(cid, tid),
984 .encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
989 .priority = cpu_to_le16(0),
990 .timeslot_us = cpu_to_le16(0xfff),
995 struct wmi_cmd_hdr wmi;
996 struct wmi_vring_cfg_done_event cmd;
998 .cmd = {.status = WMI_FW_STATUS_FAILURE},
1000 struct wil_ring *vring = &wil->ring_tx[id];
1001 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[id];
1003 wil_dbg_misc(wil, "vring_init_tx: max_mpdu_size %d\n",
1004 cmd.vring_cfg.tx_sw_ring.max_mpdu_size);
1005 lockdep_assert_held(&wil->mutex);
1008 wil_err(wil, "Tx ring [%d] already allocated\n", id);
1013 wil_tx_data_init(txdata);
1014 vring->is_rx = false;
1016 rc = wil_vring_alloc(wil, vring);
1020 wil->ring2cid_tid[id][0] = cid;
1021 wil->ring2cid_tid[id][1] = tid;
1023 cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
1026 txdata->dot1x_open = true;
1027 rc = wmi_call(wil, WMI_VRING_CFG_CMDID, vif->mid, &cmd, sizeof(cmd),
1028 WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100);
1032 if (reply.cmd.status != WMI_FW_STATUS_SUCCESS) {
1033 wil_err(wil, "Tx config failed, status 0x%02x\n",
1039 spin_lock_bh(&txdata->lock);
1040 vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
1041 txdata->mid = vif->mid;
1042 txdata->enabled = 1;
1043 spin_unlock_bh(&txdata->lock);
1045 if (txdata->dot1x_open && (agg_wsize >= 0))
1046 wil_addba_tx_request(wil, id, agg_wsize);
1050 spin_lock_bh(&txdata->lock);
1051 txdata->dot1x_open = false;
1052 txdata->enabled = 0;
1053 spin_unlock_bh(&txdata->lock);
1054 wil_vring_free(wil, vring);
1055 wil->ring2cid_tid[id][0] = WIL6210_MAX_CID;
1056 wil->ring2cid_tid[id][1] = 0;
1063 int wil_vring_init_bcast(struct wil6210_vif *vif, int id, int size)
1065 struct wil6210_priv *wil = vif_to_wil(vif);
1067 struct wmi_bcast_vring_cfg_cmd cmd = {
1068 .action = cpu_to_le32(WMI_VRING_CMD_ADD),
1072 cpu_to_le16(wil_mtu2macbuf(mtu_max)),
1073 .ring_size = cpu_to_le16(size),
1076 .encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
1080 struct wmi_cmd_hdr wmi;
1081 struct wmi_vring_cfg_done_event cmd;
1082 } __packed reply = {
1083 .cmd = {.status = WMI_FW_STATUS_FAILURE},
1085 struct wil_ring *vring = &wil->ring_tx[id];
1086 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[id];
1088 wil_dbg_misc(wil, "vring_init_bcast: max_mpdu_size %d\n",
1089 cmd.vring_cfg.tx_sw_ring.max_mpdu_size);
1090 lockdep_assert_held(&wil->mutex);
1093 wil_err(wil, "Tx ring [%d] already allocated\n", id);
1098 wil_tx_data_init(txdata);
1099 vring->is_rx = false;
1101 rc = wil_vring_alloc(wil, vring);
1105 wil->ring2cid_tid[id][0] = WIL6210_MAX_CID; /* CID */
1106 wil->ring2cid_tid[id][1] = 0; /* TID */
1108 cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
1111 txdata->dot1x_open = true;
1112 rc = wmi_call(wil, WMI_BCAST_VRING_CFG_CMDID, vif->mid,
1114 WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100);
1118 if (reply.cmd.status != WMI_FW_STATUS_SUCCESS) {
1119 wil_err(wil, "Tx config failed, status 0x%02x\n",
1125 spin_lock_bh(&txdata->lock);
1126 vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
1127 txdata->mid = vif->mid;
1128 txdata->enabled = 1;
1129 spin_unlock_bh(&txdata->lock);
1133 spin_lock_bh(&txdata->lock);
1134 txdata->enabled = 0;
1135 txdata->dot1x_open = false;
1136 spin_unlock_bh(&txdata->lock);
1137 wil_vring_free(wil, vring);
1143 static struct wil_ring *wil_find_tx_ucast(struct wil6210_priv *wil,
1144 struct wil6210_vif *vif,
1145 struct sk_buff *skb)
1148 struct ethhdr *eth = (void *)skb->data;
1149 int cid = wil_find_cid(wil, vif->mid, eth->h_dest);
1150 int min_ring_id = wil_get_min_tx_ring_id(wil);
1155 /* TODO: fix for multiple TID */
1156 for (i = min_ring_id; i < ARRAY_SIZE(wil->ring2cid_tid); i++) {
1157 if (!wil->ring_tx_data[i].dot1x_open &&
1158 skb->protocol != cpu_to_be16(ETH_P_PAE))
1160 if (wil->ring2cid_tid[i][0] == cid) {
1161 struct wil_ring *v = &wil->ring_tx[i];
1162 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[i];
1164 wil_dbg_txrx(wil, "find_tx_ucast: (%pM) -> [%d]\n",
1166 if (v->va && txdata->enabled) {
1170 "find_tx_ucast: vring[%d] not valid\n",
1180 static int wil_tx_ring(struct wil6210_priv *wil, struct wil6210_vif *vif,
1181 struct wil_ring *ring, struct sk_buff *skb);
1183 static struct wil_ring *wil_find_tx_ring_sta(struct wil6210_priv *wil,
1184 struct wil6210_vif *vif,
1185 struct sk_buff *skb)
1187 struct wil_ring *ring;
1190 struct wil_ring_tx_data *txdata;
1191 int min_ring_id = wil_get_min_tx_ring_id(wil);
1193 /* In the STA mode, it is expected to have only 1 VRING
1194 * for the AP we connected to.
1195 * find 1-st vring eligible for this skb and use it.
1197 for (i = min_ring_id; i < WIL6210_MAX_TX_RINGS; i++) {
1198 ring = &wil->ring_tx[i];
1199 txdata = &wil->ring_tx_data[i];
1200 if (!ring->va || !txdata->enabled || txdata->mid != vif->mid)
1203 cid = wil->ring2cid_tid[i][0];
1204 if (cid >= WIL6210_MAX_CID) /* skip BCAST */
1207 if (!wil->ring_tx_data[i].dot1x_open &&
1208 skb->protocol != cpu_to_be16(ETH_P_PAE))
1211 wil_dbg_txrx(wil, "Tx -> ring %d\n", i);
1216 wil_dbg_txrx(wil, "Tx while no rings active?\n");
1221 /* Use one of 2 strategies:
1223 * 1. New (real broadcast):
1224 * use dedicated broadcast vring
1225 * 2. Old (pseudo-DMS):
1226 * Find 1-st vring and return it;
1227 * duplicate skb and send it to other active vrings;
1228 * in all cases override dest address to unicast peer's address
1229 * Use old strategy when new is not supported yet:
1232 static struct wil_ring *wil_find_tx_bcast_1(struct wil6210_priv *wil,
1233 struct wil6210_vif *vif,
1234 struct sk_buff *skb)
1237 struct wil_ring_tx_data *txdata;
1238 int i = vif->bcast_ring;
1242 v = &wil->ring_tx[i];
1243 txdata = &wil->ring_tx_data[i];
1244 if (!v->va || !txdata->enabled)
1246 if (!wil->ring_tx_data[i].dot1x_open &&
1247 skb->protocol != cpu_to_be16(ETH_P_PAE))
1253 static void wil_set_da_for_vring(struct wil6210_priv *wil,
1254 struct sk_buff *skb, int vring_index)
1256 struct ethhdr *eth = (void *)skb->data;
1257 int cid = wil->ring2cid_tid[vring_index][0];
1259 ether_addr_copy(eth->h_dest, wil->sta[cid].addr);
1262 static struct wil_ring *wil_find_tx_bcast_2(struct wil6210_priv *wil,
1263 struct wil6210_vif *vif,
1264 struct sk_buff *skb)
1266 struct wil_ring *v, *v2;
1267 struct sk_buff *skb2;
1270 struct ethhdr *eth = (void *)skb->data;
1271 char *src = eth->h_source;
1272 struct wil_ring_tx_data *txdata, *txdata2;
1273 int min_ring_id = wil_get_min_tx_ring_id(wil);
1275 /* find 1-st vring eligible for data */
1276 for (i = min_ring_id; i < WIL6210_MAX_TX_RINGS; i++) {
1277 v = &wil->ring_tx[i];
1278 txdata = &wil->ring_tx_data[i];
1279 if (!v->va || !txdata->enabled || txdata->mid != vif->mid)
1282 cid = wil->ring2cid_tid[i][0];
1283 if (cid >= WIL6210_MAX_CID) /* skip BCAST */
1285 if (!wil->ring_tx_data[i].dot1x_open &&
1286 skb->protocol != cpu_to_be16(ETH_P_PAE))
1289 /* don't Tx back to source when re-routing Rx->Tx at the AP */
1290 if (0 == memcmp(wil->sta[cid].addr, src, ETH_ALEN))
1296 wil_dbg_txrx(wil, "Tx while no vrings active?\n");
1301 wil_dbg_txrx(wil, "BCAST -> ring %d\n", i);
1302 wil_set_da_for_vring(wil, skb, i);
1304 /* find other active vrings and duplicate skb for each */
1305 for (i++; i < WIL6210_MAX_TX_RINGS; i++) {
1306 v2 = &wil->ring_tx[i];
1307 txdata2 = &wil->ring_tx_data[i];
1308 if (!v2->va || txdata2->mid != vif->mid)
1310 cid = wil->ring2cid_tid[i][0];
1311 if (cid >= WIL6210_MAX_CID) /* skip BCAST */
1313 if (!wil->ring_tx_data[i].dot1x_open &&
1314 skb->protocol != cpu_to_be16(ETH_P_PAE))
1317 if (0 == memcmp(wil->sta[cid].addr, src, ETH_ALEN))
1320 skb2 = skb_copy(skb, GFP_ATOMIC);
1322 wil_dbg_txrx(wil, "BCAST DUP -> ring %d\n", i);
1323 wil_set_da_for_vring(wil, skb2, i);
1324 wil_tx_ring(wil, vif, v2, skb2);
1325 /* successful call to wil_tx_ring takes skb2 ref */
1326 dev_kfree_skb_any(skb2);
1328 wil_err(wil, "skb_copy failed\n");
1336 void wil_tx_desc_set_nr_frags(struct vring_tx_desc *d, int nr_frags)
1338 d->mac.d[2] |= (nr_frags << MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS);
1342 * Sets the descriptor @d up for csum and/or TSO offloading. The corresponding
1343 * @skb is used to obtain the protocol and headers length.
1344 * @tso_desc_type is a descriptor type for TSO: 0 - a header, 1 - first data,
1345 * 2 - middle, 3 - last descriptor.
1348 static void wil_tx_desc_offload_setup_tso(struct vring_tx_desc *d,
1349 struct sk_buff *skb,
1350 int tso_desc_type, bool is_ipv4,
1351 int tcp_hdr_len, int skb_net_hdr_len)
1353 d->dma.b11 = ETH_HLEN; /* MAC header length */
1354 d->dma.b11 |= is_ipv4 << DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS;
1356 d->dma.d0 |= (2 << DMA_CFG_DESC_TX_0_L4_TYPE_POS);
1357 /* L4 header len: TCP header length */
1358 d->dma.d0 |= (tcp_hdr_len & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK);
1360 /* Setup TSO: bit and desc type */
1361 d->dma.d0 |= (BIT(DMA_CFG_DESC_TX_0_TCP_SEG_EN_POS)) |
1362 (tso_desc_type << DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_POS);
1363 d->dma.d0 |= (is_ipv4 << DMA_CFG_DESC_TX_0_IPV4_CHECKSUM_EN_POS);
1365 d->dma.ip_length = skb_net_hdr_len;
1366 /* Enable TCP/UDP checksum */
1367 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_POS);
1368 /* Calculate pseudo-header */
1369 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_POS);
1373 * Sets the descriptor @d up for csum. The corresponding
1374 * @skb is used to obtain the protocol and headers length.
1375 * Returns the protocol: 0 - not TCP, 1 - TCPv4, 2 - TCPv6.
1376 * Note, if d==NULL, the function only returns the protocol result.
1378 * It is very similar to previous wil_tx_desc_offload_setup_tso. This
1379 * is "if unrolling" to optimize the critical path.
1382 static int wil_tx_desc_offload_setup(struct vring_tx_desc *d,
1383 struct sk_buff *skb){
1386 if (skb->ip_summed != CHECKSUM_PARTIAL)
1389 d->dma.b11 = ETH_HLEN; /* MAC header length */
1391 switch (skb->protocol) {
1392 case cpu_to_be16(ETH_P_IP):
1393 protocol = ip_hdr(skb)->protocol;
1394 d->dma.b11 |= BIT(DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS);
1396 case cpu_to_be16(ETH_P_IPV6):
1397 protocol = ipv6_hdr(skb)->nexthdr;
1405 d->dma.d0 |= (2 << DMA_CFG_DESC_TX_0_L4_TYPE_POS);
1406 /* L4 header len: TCP header length */
1408 (tcp_hdrlen(skb) & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK);
1411 /* L4 header len: UDP header length */
1413 (sizeof(struct udphdr) & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK);
1419 d->dma.ip_length = skb_network_header_len(skb);
1420 /* Enable TCP/UDP checksum */
1421 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_POS);
1422 /* Calculate pseudo-header */
1423 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_POS);
1428 static inline void wil_tx_last_desc(struct vring_tx_desc *d)
1430 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS) |
1431 BIT(DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS) |
1432 BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS);
1435 static inline void wil_set_tx_desc_last_tso(volatile struct vring_tx_desc *d)
1437 d->dma.d0 |= wil_tso_type_lst <<
1438 DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_POS;
1441 static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif,
1442 struct wil_ring *vring, struct sk_buff *skb)
1444 struct device *dev = wil_to_dev(wil);
1446 /* point to descriptors in shared memory */
1447 volatile struct vring_tx_desc *_desc = NULL, *_hdr_desc,
1448 *_first_desc = NULL;
1450 /* pointers to shadow descriptors */
1451 struct vring_tx_desc desc_mem, hdr_desc_mem, first_desc_mem,
1452 *d = &hdr_desc_mem, *hdr_desc = &hdr_desc_mem,
1453 *first_desc = &first_desc_mem;
1455 /* pointer to shadow descriptors' context */
1456 struct wil_ctx *hdr_ctx, *first_ctx = NULL;
1458 int descs_used = 0; /* total number of used descriptors */
1459 int sg_desc_cnt = 0; /* number of descriptors for current mss*/
1461 u32 swhead = vring->swhead;
1462 int used, avail = wil_ring_avail_tx(vring);
1463 int nr_frags = skb_shinfo(skb)->nr_frags;
1464 int min_desc_required = nr_frags + 1;
1465 int mss = skb_shinfo(skb)->gso_size; /* payload size w/o headers */
1466 int f, len, hdrlen, headlen;
1467 int vring_index = vring - wil->ring_tx;
1468 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[vring_index];
1471 const skb_frag_t *frag = NULL;
1474 int hdr_compensation_need = true;
1475 int desc_tso_type = wil_tso_type_first;
1478 int skb_net_hdr_len;
1482 wil_dbg_txrx(wil, "tx_vring_tso: %d bytes to vring %d\n", skb->len,
1485 if (unlikely(!txdata->enabled))
1488 /* A typical page 4K is 3-4 payloads, we assume each fragment
1489 * is a full payload, that's how min_desc_required has been
1490 * calculated. In real we might need more or less descriptors,
1491 * this is the initial check only.
1493 if (unlikely(avail < min_desc_required)) {
1494 wil_err_ratelimited(wil,
1495 "TSO: Tx ring[%2d] full. No space for %d fragments\n",
1496 vring_index, min_desc_required);
1500 /* Header Length = MAC header len + IP header len + TCP header len*/
1502 (int)skb_network_header_len(skb) +
1505 gso_type = skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV6 | SKB_GSO_TCPV4);
1508 /* TCP v4, zero out the IP length and IPv4 checksum fields
1509 * as required by the offloading doc
1511 ip_hdr(skb)->tot_len = 0;
1512 ip_hdr(skb)->check = 0;
1516 /* TCP v6, zero out the payload length */
1517 ipv6_hdr(skb)->payload_len = 0;
1521 /* other than TCPv4 or TCPv6 types are not supported for TSO.
1522 * It is also illegal for both to be set simultaneously
1527 if (skb->ip_summed != CHECKSUM_PARTIAL)
1530 /* tcp header length and skb network header length are fixed for all
1531 * packet's descriptors - read then once here
1533 tcp_hdr_len = tcp_hdrlen(skb);
1534 skb_net_hdr_len = skb_network_header_len(skb);
1536 _hdr_desc = &vring->va[i].tx.legacy;
1538 pa = dma_map_single(dev, skb->data, hdrlen, DMA_TO_DEVICE);
1539 if (unlikely(dma_mapping_error(dev, pa))) {
1540 wil_err(wil, "TSO: Skb head DMA map error\n");
1544 wil->txrx_ops.tx_desc_map((union wil_tx_desc *)hdr_desc, pa,
1545 hdrlen, vring_index);
1546 wil_tx_desc_offload_setup_tso(hdr_desc, skb, wil_tso_type_hdr, is_ipv4,
1547 tcp_hdr_len, skb_net_hdr_len);
1548 wil_tx_last_desc(hdr_desc);
1550 vring->ctx[i].mapped_as = wil_mapped_as_single;
1551 hdr_ctx = &vring->ctx[i];
1554 headlen = skb_headlen(skb) - hdrlen;
1556 for (f = headlen ? -1 : 0; f < nr_frags; f++) {
1559 wil_dbg_txrx(wil, "TSO: process skb head, len %u\n",
1562 frag = &skb_shinfo(skb)->frags[f];
1564 wil_dbg_txrx(wil, "TSO: frag[%d]: len %u\n", f, len);
1569 "TSO: len %d, rem_data %d, descs_used %d\n",
1570 len, rem_data, descs_used);
1572 if (descs_used == avail) {
1573 wil_err_ratelimited(wil, "TSO: ring overflow\n");
1578 lenmss = min_t(int, rem_data, len);
1579 i = (swhead + descs_used) % vring->size;
1580 wil_dbg_txrx(wil, "TSO: lenmss %d, i %d\n", lenmss, i);
1583 pa = skb_frag_dma_map(dev, frag,
1584 frag->size - len, lenmss,
1586 vring->ctx[i].mapped_as = wil_mapped_as_page;
1588 pa = dma_map_single(dev,
1590 skb_headlen(skb) - headlen,
1593 vring->ctx[i].mapped_as = wil_mapped_as_single;
1597 if (unlikely(dma_mapping_error(dev, pa))) {
1598 wil_err(wil, "TSO: DMA map page error\n");
1602 _desc = &vring->va[i].tx.legacy;
1605 _first_desc = _desc;
1606 first_ctx = &vring->ctx[i];
1612 wil->txrx_ops.tx_desc_map((union wil_tx_desc *)d,
1613 pa, lenmss, vring_index);
1614 wil_tx_desc_offload_setup_tso(d, skb, desc_tso_type,
1615 is_ipv4, tcp_hdr_len,
1618 /* use tso_type_first only once */
1619 desc_tso_type = wil_tso_type_mid;
1621 descs_used++; /* desc used so far */
1622 sg_desc_cnt++; /* desc used for this segment */
1627 "TSO: len %d, rem_data %d, descs_used %d, sg_desc_cnt %d,\n",
1628 len, rem_data, descs_used, sg_desc_cnt);
1630 /* Close the segment if reached mss size or last frag*/
1631 if (rem_data == 0 || (f == nr_frags - 1 && len == 0)) {
1632 if (hdr_compensation_need) {
1633 /* first segment include hdr desc for
1636 hdr_ctx->nr_frags = sg_desc_cnt;
1637 wil_tx_desc_set_nr_frags(first_desc,
1640 hdr_compensation_need = false;
1642 wil_tx_desc_set_nr_frags(first_desc,
1645 first_ctx->nr_frags = sg_desc_cnt - 1;
1647 wil_tx_last_desc(d);
1649 /* first descriptor may also be the last
1650 * for this mss - make sure not to copy
1653 if (first_desc != d)
1654 *_first_desc = *first_desc;
1656 /*last descriptor will be copied at the end
1657 * of this TS processing
1659 if (f < nr_frags - 1 || len > 0)
1665 } else if (first_desc != d) /* update mid descriptor */
1670 /* first descriptor may also be the last.
1671 * in this case d pointer is invalid
1673 if (_first_desc == _desc)
1676 /* Last data descriptor */
1677 wil_set_tx_desc_last_tso(d);
1680 /* Fill the total number of descriptors in first desc (hdr)*/
1681 wil_tx_desc_set_nr_frags(hdr_desc, descs_used);
1682 *_hdr_desc = *hdr_desc;
1684 /* hold reference to skb
1685 * to prevent skb release before accounting
1686 * in case of immediate "tx done"
1688 vring->ctx[i].skb = skb_get(skb);
1690 /* performance monitoring */
1691 used = wil_ring_used_tx(vring);
1692 if (wil_val_in_range(wil->ring_idle_trsh,
1693 used, used + descs_used)) {
1694 txdata->idle += get_cycles() - txdata->last_idle;
1695 wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n",
1696 vring_index, used, used + descs_used);
1699 /* Make sure to advance the head only after descriptor update is done.
1700 * This will prevent a race condition where the completion thread
1701 * will see the DU bit set from previous run and will handle the
1702 * skb before it was completed.
1706 /* advance swhead */
1707 wil_ring_advance_head(vring, descs_used);
1708 wil_dbg_txrx(wil, "TSO: Tx swhead %d -> %d\n", swhead, vring->swhead);
1710 /* make sure all writes to descriptors (shared memory) are done before
1711 * committing them to HW
1715 if (wil->tx_latency)
1716 *(ktime_t *)&skb->cb = ktime_get();
1718 memset(skb->cb, 0, sizeof(ktime_t));
1720 wil_w(wil, vring->hwtail, vring->swhead);
1724 while (descs_used > 0) {
1725 struct wil_ctx *ctx;
1727 i = (swhead + descs_used - 1) % vring->size;
1728 d = (struct vring_tx_desc *)&vring->va[i].tx.legacy;
1729 _desc = &vring->va[i].tx.legacy;
1731 _desc->dma.status = TX_DMA_STATUS_DU;
1732 ctx = &vring->ctx[i];
1733 wil_txdesc_unmap(dev, (union wil_tx_desc *)d, ctx);
1734 memset(ctx, 0, sizeof(*ctx));
1741 static int __wil_tx_ring(struct wil6210_priv *wil, struct wil6210_vif *vif,
1742 struct wil_ring *ring, struct sk_buff *skb)
1744 struct device *dev = wil_to_dev(wil);
1745 struct vring_tx_desc dd, *d = ⅆ
1746 volatile struct vring_tx_desc *_d;
1747 u32 swhead = ring->swhead;
1748 int avail = wil_ring_avail_tx(ring);
1749 int nr_frags = skb_shinfo(skb)->nr_frags;
1751 int ring_index = ring - wil->ring_tx;
1752 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_index];
1756 bool mcast = (ring_index == vif->bcast_ring);
1757 uint len = skb_headlen(skb);
1759 wil_dbg_txrx(wil, "tx_ring: %d bytes to ring %d, nr_frags %d\n",
1760 skb->len, ring_index, nr_frags);
1762 if (unlikely(!txdata->enabled))
1765 if (unlikely(avail < 1 + nr_frags)) {
1766 wil_err_ratelimited(wil,
1767 "Tx ring[%2d] full. No space for %d fragments\n",
1768 ring_index, 1 + nr_frags);
1771 _d = &ring->va[i].tx.legacy;
1773 pa = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
1775 wil_dbg_txrx(wil, "Tx[%2d] skb %d bytes 0x%p -> %pad\n", ring_index,
1776 skb_headlen(skb), skb->data, &pa);
1777 wil_hex_dump_txrx("Tx ", DUMP_PREFIX_OFFSET, 16, 1,
1778 skb->data, skb_headlen(skb), false);
1780 if (unlikely(dma_mapping_error(dev, pa)))
1782 ring->ctx[i].mapped_as = wil_mapped_as_single;
1784 wil->txrx_ops.tx_desc_map((union wil_tx_desc *)d, pa, len,
1786 if (unlikely(mcast)) {
1787 d->mac.d[0] |= BIT(MAC_CFG_DESC_TX_0_MCS_EN_POS); /* MCS 0 */
1788 if (unlikely(len > WIL_BCAST_MCS0_LIMIT)) /* set MCS 1 */
1789 d->mac.d[0] |= (1 << MAC_CFG_DESC_TX_0_MCS_INDEX_POS);
1791 /* Process TCP/UDP checksum offloading */
1792 if (unlikely(wil_tx_desc_offload_setup(d, skb))) {
1793 wil_err(wil, "Tx[%2d] Failed to set cksum, drop packet\n",
1798 ring->ctx[i].nr_frags = nr_frags;
1799 wil_tx_desc_set_nr_frags(d, nr_frags + 1);
1801 /* middle segments */
1802 for (; f < nr_frags; f++) {
1803 const struct skb_frag_struct *frag =
1804 &skb_shinfo(skb)->frags[f];
1805 int len = skb_frag_size(frag);
1808 wil_dbg_txrx(wil, "Tx[%2d] desc[%4d]\n", ring_index, i);
1809 wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE, 32, 4,
1810 (const void *)d, sizeof(*d), false);
1811 i = (swhead + f + 1) % ring->size;
1812 _d = &ring->va[i].tx.legacy;
1813 pa = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag),
1815 if (unlikely(dma_mapping_error(dev, pa))) {
1816 wil_err(wil, "Tx[%2d] failed to map fragment\n",
1820 ring->ctx[i].mapped_as = wil_mapped_as_page;
1821 wil->txrx_ops.tx_desc_map((union wil_tx_desc *)d,
1822 pa, len, ring_index);
1823 /* no need to check return code -
1824 * if it succeeded for 1-st descriptor,
1825 * it will succeed here too
1827 wil_tx_desc_offload_setup(d, skb);
1829 /* for the last seg only */
1830 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS);
1831 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS);
1832 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS);
1834 wil_dbg_txrx(wil, "Tx[%2d] desc[%4d]\n", ring_index, i);
1835 wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE, 32, 4,
1836 (const void *)d, sizeof(*d), false);
1838 /* hold reference to skb
1839 * to prevent skb release before accounting
1840 * in case of immediate "tx done"
1842 ring->ctx[i].skb = skb_get(skb);
1844 /* performance monitoring */
1845 used = wil_ring_used_tx(ring);
1846 if (wil_val_in_range(wil->ring_idle_trsh,
1847 used, used + nr_frags + 1)) {
1848 txdata->idle += get_cycles() - txdata->last_idle;
1849 wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n",
1850 ring_index, used, used + nr_frags + 1);
1853 /* Make sure to advance the head only after descriptor update is done.
1854 * This will prevent a race condition where the completion thread
1855 * will see the DU bit set from previous run and will handle the
1856 * skb before it was completed.
1860 /* advance swhead */
1861 wil_ring_advance_head(ring, nr_frags + 1);
1862 wil_dbg_txrx(wil, "Tx[%2d] swhead %d -> %d\n", ring_index, swhead,
1864 trace_wil6210_tx(ring_index, swhead, skb->len, nr_frags);
1866 /* make sure all writes to descriptors (shared memory) are done before
1867 * committing them to HW
1871 if (wil->tx_latency)
1872 *(ktime_t *)&skb->cb = ktime_get();
1874 memset(skb->cb, 0, sizeof(ktime_t));
1876 wil_w(wil, ring->hwtail, ring->swhead);
1880 /* unmap what we have mapped */
1881 nr_frags = f + 1; /* frags mapped + one for skb head */
1882 for (f = 0; f < nr_frags; f++) {
1883 struct wil_ctx *ctx;
1885 i = (swhead + f) % ring->size;
1886 ctx = &ring->ctx[i];
1887 _d = &ring->va[i].tx.legacy;
1889 _d->dma.status = TX_DMA_STATUS_DU;
1890 wil->txrx_ops.tx_desc_unmap(dev,
1891 (union wil_tx_desc *)d,
1894 memset(ctx, 0, sizeof(*ctx));
1900 static int wil_tx_ring(struct wil6210_priv *wil, struct wil6210_vif *vif,
1901 struct wil_ring *ring, struct sk_buff *skb)
1903 int ring_index = ring - wil->ring_tx;
1904 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_index];
1907 spin_lock(&txdata->lock);
1909 if (test_bit(wil_status_suspending, wil->status) ||
1910 test_bit(wil_status_suspended, wil->status) ||
1911 test_bit(wil_status_resuming, wil->status)) {
1913 "suspend/resume in progress. drop packet\n");
1914 spin_unlock(&txdata->lock);
1918 rc = (skb_is_gso(skb) ? wil->txrx_ops.tx_ring_tso : __wil_tx_ring)
1919 (wil, vif, ring, skb);
1921 spin_unlock(&txdata->lock);
1927 * Check status of tx vrings and stop/wake net queues if needed
1928 * It will start/stop net queues of a specific VIF net_device.
1930 * This function does one of two checks:
1931 * In case check_stop is true, will check if net queues need to be stopped. If
1932 * the conditions for stopping are met, netif_tx_stop_all_queues() is called.
1933 * In case check_stop is false, will check if net queues need to be waked. If
1934 * the conditions for waking are met, netif_tx_wake_all_queues() is called.
1935 * vring is the vring which is currently being modified by either adding
1936 * descriptors (tx) into it or removing descriptors (tx complete) from it. Can
1937 * be null when irrelevant (e.g. connect/disconnect events).
1939 * The implementation is to stop net queues if modified vring has low
1940 * descriptor availability. Wake if all vrings are not in low descriptor
1941 * availability and modified vring has high descriptor availability.
1943 static inline void __wil_update_net_queues(struct wil6210_priv *wil,
1944 struct wil6210_vif *vif,
1945 struct wil_ring *ring,
1949 int min_ring_id = wil_get_min_tx_ring_id(wil);
1955 wil_dbg_txrx(wil, "vring %d, mid %d, check_stop=%d, stopped=%d",
1956 (int)(ring - wil->ring_tx), vif->mid, check_stop,
1957 vif->net_queue_stopped);
1959 wil_dbg_txrx(wil, "check_stop=%d, mid=%d, stopped=%d",
1960 check_stop, vif->mid, vif->net_queue_stopped);
1962 if (check_stop == vif->net_queue_stopped)
1963 /* net queues already in desired state */
1967 if (!ring || unlikely(wil_ring_avail_low(ring))) {
1968 /* not enough room in the vring */
1969 netif_tx_stop_all_queues(vif_to_ndev(vif));
1970 vif->net_queue_stopped = true;
1971 wil_dbg_txrx(wil, "netif_tx_stop called\n");
1976 /* Do not wake the queues in suspend flow */
1977 if (test_bit(wil_status_suspending, wil->status) ||
1978 test_bit(wil_status_suspended, wil->status))
1982 for (i = min_ring_id; i < WIL6210_MAX_TX_RINGS; i++) {
1983 struct wil_ring *cur_ring = &wil->ring_tx[i];
1984 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[i];
1986 if (txdata->mid != vif->mid || !cur_ring->va ||
1987 !txdata->enabled || cur_ring == ring)
1990 if (wil_ring_avail_low(cur_ring)) {
1991 wil_dbg_txrx(wil, "ring %d full, can't wake\n",
1992 (int)(cur_ring - wil->ring_tx));
1997 if (!ring || wil_ring_avail_high(ring)) {
1998 /* enough room in the ring */
1999 wil_dbg_txrx(wil, "calling netif_tx_wake\n");
2000 netif_tx_wake_all_queues(vif_to_ndev(vif));
2001 vif->net_queue_stopped = false;
2005 void wil_update_net_queues(struct wil6210_priv *wil, struct wil6210_vif *vif,
2006 struct wil_ring *ring, bool check_stop)
2008 spin_lock(&wil->net_queue_lock);
2009 __wil_update_net_queues(wil, vif, ring, check_stop);
2010 spin_unlock(&wil->net_queue_lock);
2013 void wil_update_net_queues_bh(struct wil6210_priv *wil, struct wil6210_vif *vif,
2014 struct wil_ring *ring, bool check_stop)
2016 spin_lock_bh(&wil->net_queue_lock);
2017 __wil_update_net_queues(wil, vif, ring, check_stop);
2018 spin_unlock_bh(&wil->net_queue_lock);
2021 netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2023 struct wil6210_vif *vif = ndev_to_vif(ndev);
2024 struct wil6210_priv *wil = vif_to_wil(vif);
2025 struct ethhdr *eth = (void *)skb->data;
2026 bool bcast = is_multicast_ether_addr(eth->h_dest);
2027 struct wil_ring *ring;
2028 static bool pr_once_fw;
2031 wil_dbg_txrx(wil, "start_xmit\n");
2032 if (unlikely(!test_bit(wil_status_fwready, wil->status))) {
2034 wil_err(wil, "FW not ready\n");
2039 if (unlikely(!test_bit(wil_vif_fwconnected, vif->status))) {
2040 wil_dbg_ratelimited(wil,
2041 "VIF not connected, packet dropped\n");
2044 if (unlikely(vif->wdev.iftype == NL80211_IFTYPE_MONITOR)) {
2045 wil_err(wil, "Xmit in monitor mode not supported\n");
2051 if (vif->wdev.iftype == NL80211_IFTYPE_STATION && !vif->pbss) {
2052 /* in STA mode (ESS), all to same VRING (to AP) */
2053 ring = wil_find_tx_ring_sta(wil, vif, skb);
2056 /* in pbss, no bcast VRING - duplicate skb in
2057 * all stations VRINGs
2059 ring = wil_find_tx_bcast_2(wil, vif, skb);
2060 else if (vif->wdev.iftype == NL80211_IFTYPE_AP)
2061 /* AP has a dedicated bcast VRING */
2062 ring = wil_find_tx_bcast_1(wil, vif, skb);
2064 /* unexpected combination, fallback to duplicating
2065 * the skb in all stations VRINGs
2067 ring = wil_find_tx_bcast_2(wil, vif, skb);
2069 /* unicast, find specific VRING by dest. address */
2070 ring = wil_find_tx_ucast(wil, vif, skb);
2072 if (unlikely(!ring)) {
2073 wil_dbg_txrx(wil, "No Tx RING found for %pM\n", eth->h_dest);
2076 /* set up vring entry */
2077 rc = wil_tx_ring(wil, vif, ring, skb);
2081 /* shall we stop net queues? */
2082 wil_update_net_queues_bh(wil, vif, ring, true);
2083 /* statistics will be updated on the tx_complete */
2084 dev_kfree_skb_any(skb);
2085 return NETDEV_TX_OK;
2087 return NETDEV_TX_BUSY;
2089 break; /* goto drop; */
2092 ndev->stats.tx_dropped++;
2093 dev_kfree_skb_any(skb);
2095 return NET_XMIT_DROP;
2098 void wil_tx_latency_calc(struct wil6210_priv *wil, struct sk_buff *skb,
2099 struct wil_sta_info *sta)
2104 if (!wil->tx_latency)
2107 if (ktime_to_ms(*(ktime_t *)&skb->cb) == 0)
2110 skb_time_us = ktime_us_delta(ktime_get(), *(ktime_t *)&skb->cb);
2111 bin = skb_time_us / wil->tx_latency_res;
2112 bin = min_t(int, bin, WIL_NUM_LATENCY_BINS - 1);
2114 wil_dbg_txrx(wil, "skb time %dus => bin %d\n", skb_time_us, bin);
2115 sta->tx_latency_bins[bin]++;
2116 sta->stats.tx_latency_total_us += skb_time_us;
2117 if (skb_time_us < sta->stats.tx_latency_min_us)
2118 sta->stats.tx_latency_min_us = skb_time_us;
2119 if (skb_time_us > sta->stats.tx_latency_max_us)
2120 sta->stats.tx_latency_max_us = skb_time_us;
2124 * Clean up transmitted skb's from the Tx VRING
2126 * Return number of descriptors cleared
2128 * Safe to call from IRQ
2130 int wil_tx_complete(struct wil6210_vif *vif, int ringid)
2132 struct wil6210_priv *wil = vif_to_wil(vif);
2133 struct net_device *ndev = vif_to_ndev(vif);
2134 struct device *dev = wil_to_dev(wil);
2135 struct wil_ring *vring = &wil->ring_tx[ringid];
2136 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ringid];
2138 int cid = wil->ring2cid_tid[ringid][0];
2139 struct wil_net_stats *stats = NULL;
2140 volatile struct vring_tx_desc *_d;
2141 int used_before_complete;
2144 if (unlikely(!vring->va)) {
2145 wil_err(wil, "Tx irq[%d]: vring not initialized\n", ringid);
2149 if (unlikely(!txdata->enabled)) {
2150 wil_info(wil, "Tx irq[%d]: vring disabled\n", ringid);
2154 wil_dbg_txrx(wil, "tx_complete: (%d)\n", ringid);
2156 used_before_complete = wil_ring_used_tx(vring);
2158 if (cid < WIL6210_MAX_CID)
2159 stats = &wil->sta[cid].stats;
2161 while (!wil_ring_is_empty(vring)) {
2163 struct wil_ctx *ctx = &vring->ctx[vring->swtail];
2165 * For the fragmented skb, HW will set DU bit only for the
2166 * last fragment. look for it.
2167 * In TSO the first DU will include hdr desc
2169 int lf = (vring->swtail + ctx->nr_frags) % vring->size;
2170 /* TODO: check we are not past head */
2172 _d = &vring->va[lf].tx.legacy;
2173 if (unlikely(!(_d->dma.status & TX_DMA_STATUS_DU)))
2176 new_swtail = (lf + 1) % vring->size;
2177 while (vring->swtail != new_swtail) {
2178 struct vring_tx_desc dd, *d = ⅆ
2180 struct sk_buff *skb;
2182 ctx = &vring->ctx[vring->swtail];
2184 _d = &vring->va[vring->swtail].tx.legacy;
2188 dmalen = le16_to_cpu(d->dma.length);
2189 trace_wil6210_tx_done(ringid, vring->swtail, dmalen,
2192 "TxC[%2d][%3d] : %d bytes, status 0x%02x err 0x%02x\n",
2193 ringid, vring->swtail, dmalen,
2194 d->dma.status, d->dma.error);
2195 wil_hex_dump_txrx("TxCD ", DUMP_PREFIX_NONE, 32, 4,
2196 (const void *)d, sizeof(*d), false);
2198 wil->txrx_ops.tx_desc_unmap(dev,
2199 (union wil_tx_desc *)d,
2203 if (likely(d->dma.error == 0)) {
2204 ndev->stats.tx_packets++;
2205 ndev->stats.tx_bytes += skb->len;
2207 stats->tx_packets++;
2208 stats->tx_bytes += skb->len;
2210 wil_tx_latency_calc(wil, skb,
2214 ndev->stats.tx_errors++;
2218 wil_consume_skb(skb, d->dma.error == 0);
2220 memset(ctx, 0, sizeof(*ctx));
2221 /* Make sure the ctx is zeroed before updating the tail
2222 * to prevent a case where wil_tx_ring will see
2223 * this descriptor as used and handle it before ctx zero
2227 /* There is no need to touch HW descriptor:
2228 * - ststus bit TX_DMA_STATUS_DU is set by design,
2229 * so hardware will not try to process this desc.,
2230 * - rest of descriptor will be initialized on Tx.
2232 vring->swtail = wil_ring_next_tail(vring);
2237 /* performance monitoring */
2238 used_new = wil_ring_used_tx(vring);
2239 if (wil_val_in_range(wil->ring_idle_trsh,
2240 used_new, used_before_complete)) {
2241 wil_dbg_txrx(wil, "Ring[%2d] idle %d -> %d\n",
2242 ringid, used_before_complete, used_new);
2243 txdata->last_idle = get_cycles();
2246 /* shall we wake net queues? */
2248 wil_update_net_queues(wil, vif, vring, false);
2253 static inline int wil_tx_init(struct wil6210_priv *wil)
2258 static inline void wil_tx_fini(struct wil6210_priv *wil) {}
2260 static void wil_get_reorder_params(struct wil6210_priv *wil,
2261 struct sk_buff *skb, int *tid, int *cid,
2262 int *mid, u16 *seq, int *mcast, int *retry)
2264 struct vring_rx_desc *d = wil_skb_rxdesc(skb);
2266 *tid = wil_rxdesc_tid(d);
2267 *cid = wil_rxdesc_cid(d);
2268 *mid = wil_rxdesc_mid(d);
2269 *seq = wil_rxdesc_seq(d);
2270 *mcast = wil_rxdesc_mcast(d);
2271 *retry = wil_rxdesc_retry(d);
2274 void wil_init_txrx_ops_legacy_dma(struct wil6210_priv *wil)
2276 wil->txrx_ops.configure_interrupt_moderation =
2277 wil_configure_interrupt_moderation;
2279 wil->txrx_ops.tx_desc_map = wil_tx_desc_map;
2280 wil->txrx_ops.tx_desc_unmap = wil_txdesc_unmap;
2281 wil->txrx_ops.tx_ring_tso = __wil_tx_vring_tso;
2282 wil->txrx_ops.ring_init_tx = wil_vring_init_tx;
2283 wil->txrx_ops.ring_fini_tx = wil_vring_free;
2284 wil->txrx_ops.ring_init_bcast = wil_vring_init_bcast;
2285 wil->txrx_ops.tx_init = wil_tx_init;
2286 wil->txrx_ops.tx_fini = wil_tx_fini;
2288 wil->txrx_ops.rx_init = wil_rx_init;
2289 wil->txrx_ops.wmi_addba_rx_resp = wmi_addba_rx_resp;
2290 wil->txrx_ops.get_reorder_params = wil_get_reorder_params;
2291 wil->txrx_ops.get_netif_rx_params =
2292 wil_get_netif_rx_params;
2293 wil->txrx_ops.rx_crypto_check = wil_rx_crypto_check;
2294 wil->txrx_ops.rx_error_check = wil_rx_error_check;
2295 wil->txrx_ops.is_rx_idle = wil_is_rx_idle;
2296 wil->txrx_ops.rx_fini = wil_rx_fini;