2 * Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #include <linux/etherdevice.h>
18 #include <net/ieee80211_radiotap.h>
19 #include <linux/if_arp.h>
20 #include <linux/moduleparam.h>
22 #include <linux/ipv6.h>
24 #include <linux/prefetch.h>
31 static bool rtap_include_phy_info;
32 module_param(rtap_include_phy_info, bool, 0444);
33 MODULE_PARM_DESC(rtap_include_phy_info,
34 " Include PHY info in the radiotap header, default - no");
37 module_param(rx_align_2, bool, 0444);
38 MODULE_PARM_DESC(rx_align_2, " align Rx buffers on 4*n+2, default - no");
41 module_param(rx_large_buf, bool, 0444);
42 MODULE_PARM_DESC(rx_large_buf, " allocate 8KB RX buffers, default - no");
44 static inline uint wil_rx_snaplen(void)
46 return rx_align_2 ? 6 : 0;
49 static inline int wil_vring_is_empty(struct vring *vring)
51 return vring->swhead == vring->swtail;
54 static inline u32 wil_vring_next_tail(struct vring *vring)
56 return (vring->swtail + 1) % vring->size;
59 static inline void wil_vring_advance_head(struct vring *vring, int n)
61 vring->swhead = (vring->swhead + n) % vring->size;
64 static inline int wil_vring_is_full(struct vring *vring)
66 return wil_vring_next_tail(vring) == vring->swhead;
69 /* Used space in Tx Vring */
70 static inline int wil_vring_used_tx(struct vring *vring)
72 u32 swhead = vring->swhead;
73 u32 swtail = vring->swtail;
74 return (vring->size + swhead - swtail) % vring->size;
77 /* Available space in Tx Vring */
78 static inline int wil_vring_avail_tx(struct vring *vring)
80 return vring->size - wil_vring_used_tx(vring) - 1;
83 /* wil_vring_wmark_low - low watermark for available descriptor space */
84 static inline int wil_vring_wmark_low(struct vring *vring)
89 /* wil_vring_wmark_high - high watermark for available descriptor space */
90 static inline int wil_vring_wmark_high(struct vring *vring)
95 /* returns true if num avail descriptors is lower than wmark_low */
96 static inline int wil_vring_avail_low(struct vring *vring)
98 return wil_vring_avail_tx(vring) < wil_vring_wmark_low(vring);
101 /* returns true if num avail descriptors is higher than wmark_high */
102 static inline int wil_vring_avail_high(struct vring *vring)
104 return wil_vring_avail_tx(vring) > wil_vring_wmark_high(vring);
107 /* returns true when all tx vrings are empty */
108 bool wil_is_tx_idle(struct wil6210_priv *wil)
111 unsigned long data_comp_to;
113 for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
114 struct vring *vring = &wil->vring_tx[i];
115 int vring_index = vring - wil->vring_tx;
116 struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index];
118 spin_lock(&txdata->lock);
120 if (!vring->va || !txdata->enabled) {
121 spin_unlock(&txdata->lock);
125 data_comp_to = jiffies + msecs_to_jiffies(
126 WIL_DATA_COMPLETION_TO_MS);
127 if (test_bit(wil_status_napi_en, wil->status)) {
128 while (!wil_vring_is_empty(vring)) {
129 if (time_after(jiffies, data_comp_to)) {
131 "TO waiting for idle tx\n");
132 spin_unlock(&txdata->lock);
135 wil_dbg_ratelimited(wil,
136 "tx vring is not empty -> NAPI\n");
137 spin_unlock(&txdata->lock);
138 napi_synchronize(&wil->napi_tx);
140 spin_lock(&txdata->lock);
141 if (!vring->va || !txdata->enabled)
146 spin_unlock(&txdata->lock);
152 /* wil_val_in_range - check if value in [min,max) */
153 static inline bool wil_val_in_range(int val, int min, int max)
155 return val >= min && val < max;
158 static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
160 struct device *dev = wil_to_dev(wil);
161 size_t sz = vring->size * sizeof(vring->va[0]);
164 wil_dbg_misc(wil, "vring_alloc:\n");
166 BUILD_BUG_ON(sizeof(vring->va[0]) != 32);
170 vring->ctx = kcalloc(vring->size, sizeof(vring->ctx[0]), GFP_KERNEL);
176 /* vring->va should be aligned on its size rounded up to power of 2
177 * This is granted by the dma_alloc_coherent.
179 * HW has limitation that all vrings addresses must share the same
180 * upper 16 msb bits part of 48 bits address. To workaround that,
181 * if we are using 48 bit addresses switch to 32 bit allocation
182 * before allocating vring memory.
184 * There's no check for the return value of dma_set_mask_and_coherent,
185 * since we assume if we were able to set the mask during
186 * initialization in this system it will not fail if we set it again
188 if (wil->use_extended_dma_addr)
189 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
191 vring->va = dma_alloc_coherent(dev, sz, &vring->pa, GFP_KERNEL);
198 if (wil->use_extended_dma_addr)
199 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
201 /* initially, all descriptors are SW owned
202 * For Tx and Rx, ownership bit is at the same location, thus
205 for (i = 0; i < vring->size; i++) {
206 volatile struct vring_tx_desc *_d = &vring->va[i].tx;
208 _d->dma.status = TX_DMA_STATUS_DU;
211 wil_dbg_misc(wil, "vring[%d] 0x%p:%pad 0x%p\n", vring->size,
212 vring->va, &vring->pa, vring->ctx);
217 static void wil_txdesc_unmap(struct device *dev, struct vring_tx_desc *d,
220 dma_addr_t pa = wil_desc_addr(&d->dma.addr);
221 u16 dmalen = le16_to_cpu(d->dma.length);
223 switch (ctx->mapped_as) {
224 case wil_mapped_as_single:
225 dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
227 case wil_mapped_as_page:
228 dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE);
235 static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring,
238 struct device *dev = wil_to_dev(wil);
239 size_t sz = vring->size * sizeof(vring->va[0]);
241 lockdep_assert_held(&wil->mutex);
243 int vring_index = vring - wil->vring_tx;
245 wil_dbg_misc(wil, "free Tx vring %d [%d] 0x%p:%pad 0x%p\n",
246 vring_index, vring->size, vring->va,
247 &vring->pa, vring->ctx);
249 wil_dbg_misc(wil, "free Rx vring [%d] 0x%p:%pad 0x%p\n",
250 vring->size, vring->va,
251 &vring->pa, vring->ctx);
254 while (!wil_vring_is_empty(vring)) {
260 struct vring_tx_desc dd, *d = ⅆ
261 volatile struct vring_tx_desc *_d =
262 &vring->va[vring->swtail].tx;
264 ctx = &vring->ctx[vring->swtail];
267 "ctx(%d) was already completed\n",
269 vring->swtail = wil_vring_next_tail(vring);
273 wil_txdesc_unmap(dev, d, ctx);
275 dev_kfree_skb_any(ctx->skb);
276 vring->swtail = wil_vring_next_tail(vring);
278 struct vring_rx_desc dd, *d = ⅆ
279 volatile struct vring_rx_desc *_d =
280 &vring->va[vring->swhead].rx;
282 ctx = &vring->ctx[vring->swhead];
284 pa = wil_desc_addr(&d->dma.addr);
285 dmalen = le16_to_cpu(d->dma.length);
286 dma_unmap_single(dev, pa, dmalen, DMA_FROM_DEVICE);
288 wil_vring_advance_head(vring, 1);
291 dma_free_coherent(dev, sz, (void *)vring->va, vring->pa);
299 * Allocate one skb for Rx VRING
301 * Safe to call from IRQ
303 static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring,
306 struct device *dev = wil_to_dev(wil);
307 unsigned int sz = wil->rx_buf_len + ETH_HLEN + wil_rx_snaplen();
308 struct vring_rx_desc dd, *d = ⅆ
309 volatile struct vring_rx_desc *_d = &vring->va[i].rx;
311 struct sk_buff *skb = dev_alloc_skb(sz + headroom);
316 skb_reserve(skb, headroom);
319 pa = dma_map_single(dev, skb->data, skb->len, DMA_FROM_DEVICE);
320 if (unlikely(dma_mapping_error(dev, pa))) {
325 d->dma.d0 = RX_DMA_D0_CMD_DMA_RT | RX_DMA_D0_CMD_DMA_IT;
326 wil_desc_addr_set(&d->dma.addr, pa);
327 /* ip_length don't care */
329 /* error don't care */
330 d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */
331 d->dma.length = cpu_to_le16(sz);
333 vring->ctx[i].skb = skb;
339 * Adds radiotap header
341 * Any error indicated as "Bad FCS"
343 * Vendor data for 04:ce:14-1 (Wilocity-1) consists of:
344 * - Rx descriptor: 32 bytes
347 static void wil_rx_add_radiotap_header(struct wil6210_priv *wil,
350 struct wireless_dev *wdev = wil->wdev;
351 struct wil6210_rtap {
352 struct ieee80211_radiotap_header rthdr;
353 /* fields should be in the order of bits in rthdr.it_present */
357 __le16 chnl_freq __aligned(2);
364 struct wil6210_rtap_vendor {
365 struct wil6210_rtap rtap;
367 u8 vendor_oui[3] __aligned(2);
372 struct vring_rx_desc *d = wil_skb_rxdesc(skb);
373 struct wil6210_rtap_vendor *rtap_vendor;
374 int rtap_len = sizeof(struct wil6210_rtap);
375 int phy_length = 0; /* phy info header size, bytes */
376 static char phy_data[128];
377 struct ieee80211_channel *ch = wdev->preset_chandef.chan;
379 if (rtap_include_phy_info) {
380 rtap_len = sizeof(*rtap_vendor) + sizeof(*d);
381 /* calculate additional length */
382 if (d->dma.status & RX_DMA_STATUS_PHY_INFO) {
384 * PHY info starts from 8-byte boundary
385 * there are 8-byte lines, last line may be partially
386 * written (HW bug), thus FW configures for last line
387 * to be excessive. Driver skips this last line.
389 int len = min_t(int, 8 + sizeof(phy_data),
390 wil_rxdesc_phy_length(d));
393 void *p = skb_tail_pointer(skb);
394 void *pa = PTR_ALIGN(p, 8);
396 if (skb_tailroom(skb) >= len + (pa - p)) {
397 phy_length = len - 8;
398 memcpy(phy_data, pa, phy_length);
402 rtap_len += phy_length;
405 if (skb_headroom(skb) < rtap_len &&
406 pskb_expand_head(skb, rtap_len, 0, GFP_ATOMIC)) {
407 wil_err(wil, "Unable to expand headroom to %d\n", rtap_len);
411 rtap_vendor = skb_push(skb, rtap_len);
412 memset(rtap_vendor, 0, rtap_len);
414 rtap_vendor->rtap.rthdr.it_version = PKTHDR_RADIOTAP_VERSION;
415 rtap_vendor->rtap.rthdr.it_len = cpu_to_le16(rtap_len);
416 rtap_vendor->rtap.rthdr.it_present = cpu_to_le32(
417 (1 << IEEE80211_RADIOTAP_FLAGS) |
418 (1 << IEEE80211_RADIOTAP_CHANNEL) |
419 (1 << IEEE80211_RADIOTAP_MCS));
420 if (d->dma.status & RX_DMA_STATUS_ERROR)
421 rtap_vendor->rtap.flags |= IEEE80211_RADIOTAP_F_BADFCS;
423 rtap_vendor->rtap.chnl_freq = cpu_to_le16(ch ? ch->center_freq : 58320);
424 rtap_vendor->rtap.chnl_flags = cpu_to_le16(0);
426 rtap_vendor->rtap.mcs_present = IEEE80211_RADIOTAP_MCS_HAVE_MCS;
427 rtap_vendor->rtap.mcs_flags = 0;
428 rtap_vendor->rtap.mcs_index = wil_rxdesc_mcs(d);
430 if (rtap_include_phy_info) {
431 rtap_vendor->rtap.rthdr.it_present |= cpu_to_le32(1 <<
432 IEEE80211_RADIOTAP_VENDOR_NAMESPACE);
433 /* OUI for Wilocity 04:ce:14 */
434 rtap_vendor->vendor_oui[0] = 0x04;
435 rtap_vendor->vendor_oui[1] = 0xce;
436 rtap_vendor->vendor_oui[2] = 0x14;
437 rtap_vendor->vendor_ns = 1;
438 /* Rx descriptor + PHY data */
439 rtap_vendor->vendor_skip = cpu_to_le16(sizeof(*d) +
441 memcpy(rtap_vendor->vendor_data, (void *)d, sizeof(*d));
442 memcpy(rtap_vendor->vendor_data + sizeof(*d), phy_data,
447 /* similar to ieee80211_ version, but FC contain only 1-st byte */
448 static inline int wil_is_back_req(u8 fc)
450 return (fc & (IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
451 (IEEE80211_FTYPE_CTL | IEEE80211_STYPE_BACK_REQ);
454 bool wil_is_rx_idle(struct wil6210_priv *wil)
456 struct vring_rx_desc *_d;
457 struct vring *vring = &wil->vring_rx;
459 _d = (struct vring_rx_desc *)&vring->va[vring->swhead].rx;
460 if (_d->dma.status & RX_DMA_STATUS_DU)
467 * reap 1 frame from @swhead
469 * Rx descriptor copied to skb->cb
471 * Safe to call from IRQ
473 static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
476 struct device *dev = wil_to_dev(wil);
477 struct net_device *ndev = wil_to_ndev(wil);
478 volatile struct vring_rx_desc *_d;
479 struct vring_rx_desc *d;
482 unsigned int snaplen = wil_rx_snaplen();
483 unsigned int sz = wil->rx_buf_len + ETH_HLEN + snaplen;
488 struct wil_net_stats *stats;
490 BUILD_BUG_ON(sizeof(struct vring_rx_desc) > sizeof(skb->cb));
493 if (unlikely(wil_vring_is_empty(vring)))
496 i = (int)vring->swhead;
497 _d = &vring->va[i].rx;
498 if (unlikely(!(_d->dma.status & RX_DMA_STATUS_DU))) {
499 /* it is not error, we just reached end of Rx done area */
503 skb = vring->ctx[i].skb;
504 vring->ctx[i].skb = NULL;
505 wil_vring_advance_head(vring, 1);
507 wil_err(wil, "No Rx skb at [%d]\n", i);
510 d = wil_skb_rxdesc(skb);
512 pa = wil_desc_addr(&d->dma.addr);
514 dma_unmap_single(dev, pa, sz, DMA_FROM_DEVICE);
515 dmalen = le16_to_cpu(d->dma.length);
517 trace_wil6210_rx(i, d);
518 wil_dbg_txrx(wil, "Rx[%3d] : %d bytes\n", i, dmalen);
519 wil_hex_dump_txrx("RxD ", DUMP_PREFIX_NONE, 32, 4,
520 (const void *)d, sizeof(*d), false);
522 cid = wil_rxdesc_cid(d);
523 stats = &wil->sta[cid].stats;
525 if (unlikely(dmalen > sz)) {
526 wil_err(wil, "Rx size too large: %d bytes!\n", dmalen);
527 stats->rx_large_frame++;
531 skb_trim(skb, dmalen);
535 wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
536 skb->data, skb_headlen(skb), false);
538 stats->last_mcs_rx = wil_rxdesc_mcs(d);
539 if (stats->last_mcs_rx < ARRAY_SIZE(stats->rx_per_mcs))
540 stats->rx_per_mcs[stats->last_mcs_rx]++;
542 /* use radiotap header only if required */
543 if (ndev->type == ARPHRD_IEEE80211_RADIOTAP)
544 wil_rx_add_radiotap_header(wil, skb);
546 /* no extra checks if in sniffer mode */
547 if (ndev->type != ARPHRD_ETHER)
549 /* Non-data frames may be delivered through Rx DMA channel (ex: BAR)
550 * Driver should recognize it by frame type, that is found
551 * in Rx descriptor. If type is not data, it is 802.11 frame as is
553 ftype = wil_rxdesc_ftype(d) << 2;
554 if (unlikely(ftype != IEEE80211_FTYPE_DATA)) {
555 u8 fc1 = wil_rxdesc_fc1(d);
556 int mid = wil_rxdesc_mid(d);
557 int tid = wil_rxdesc_tid(d);
558 u16 seq = wil_rxdesc_seq(d);
561 "Non-data frame FC[7:0] 0x%02x MID %d CID %d TID %d Seq 0x%03x\n",
562 fc1, mid, cid, tid, seq);
563 stats->rx_non_data_frame++;
564 if (wil_is_back_req(fc1)) {
566 "BAR: MID %d CID %d TID %d Seq 0x%03x\n",
568 wil_rx_bar(wil, cid, tid, seq);
570 /* print again all info. One can enable only this
571 * without overhead for printing every Rx frame
574 "Unhandled non-data frame FC[7:0] 0x%02x MID %d CID %d TID %d Seq 0x%03x\n",
575 fc1, mid, cid, tid, seq);
576 wil_hex_dump_txrx("RxD ", DUMP_PREFIX_NONE, 32, 4,
577 (const void *)d, sizeof(*d), false);
578 wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
579 skb->data, skb_headlen(skb), false);
585 if (unlikely(skb->len < ETH_HLEN + snaplen)) {
586 wil_err(wil, "Short frame, len = %d\n", skb->len);
587 stats->rx_short_frame++;
592 /* L4 IDENT is on when HW calculated checksum, check status
593 * and in case of error drop the packet
594 * higher stack layers will handle retransmission (if required)
596 if (likely(d->dma.status & RX_DMA_STATUS_L4I)) {
597 /* L4 protocol identified, csum calculated */
598 if (likely((d->dma.error & RX_DMA_ERROR_L4_ERR) == 0))
599 skb->ip_summed = CHECKSUM_UNNECESSARY;
600 /* If HW reports bad checksum, let IP stack re-check it
601 * For example, HW don't understand Microsoft IP stack that
602 * mis-calculates TCP checksum - if it should be 0x0,
603 * it writes 0xffff in violation of RFC 1624
609 * +-------+-------+---------+------------+------+
610 * | SA(6) | DA(6) | SNAP(6) | ETHTYPE(2) | DATA |
611 * +-------+-------+---------+------------+------+
612 * Need to remove SNAP, shifting SA and DA forward
614 memmove(skb->data + snaplen, skb->data, 2 * ETH_ALEN);
615 skb_pull(skb, snaplen);
622 * allocate and fill up to @count buffers in rx ring
623 * buffers posted at @swtail
625 static int wil_rx_refill(struct wil6210_priv *wil, int count)
627 struct net_device *ndev = wil_to_ndev(wil);
628 struct vring *v = &wil->vring_rx;
631 int headroom = ndev->type == ARPHRD_IEEE80211_RADIOTAP ?
632 WIL6210_RTAP_SIZE : 0;
634 for (; next_tail = wil_vring_next_tail(v),
635 (next_tail != v->swhead) && (count-- > 0);
636 v->swtail = next_tail) {
637 rc = wil_vring_alloc_skb(wil, v, v->swtail, headroom);
639 wil_err_ratelimited(wil, "Error %d in rx refill[%d]\n",
645 /* make sure all writes to descriptors (shared memory) are done before
646 * committing them to HW
650 wil_w(wil, v->hwtail, v->swtail);
656 * reverse_memcmp - Compare two areas of memory, in reverse order
657 * @cs: One area of memory
658 * @ct: Another area of memory
659 * @count: The size of the area.
661 * Cut'n'paste from original memcmp (see lib/string.c)
662 * with minimal modifications
664 static int reverse_memcmp(const void *cs, const void *ct, size_t count)
666 const unsigned char *su1, *su2;
669 for (su1 = cs + count - 1, su2 = ct + count - 1; count > 0;
670 --su1, --su2, count--) {
678 static int wil_rx_crypto_check(struct wil6210_priv *wil, struct sk_buff *skb)
680 struct vring_rx_desc *d = wil_skb_rxdesc(skb);
681 int cid = wil_rxdesc_cid(d);
682 int tid = wil_rxdesc_tid(d);
683 int key_id = wil_rxdesc_key_id(d);
684 int mc = wil_rxdesc_mcast(d);
685 struct wil_sta_info *s = &wil->sta[cid];
686 struct wil_tid_crypto_rx *c = mc ? &s->group_crypto_rx :
687 &s->tid_crypto_rx[tid];
688 struct wil_tid_crypto_rx_single *cc = &c->key_id[key_id];
689 const u8 *pn = (u8 *)&d->mac.pn_15_0;
692 wil_err_ratelimited(wil,
693 "Key missing. CID %d TID %d MCast %d KEY_ID %d\n",
694 cid, tid, mc, key_id);
698 if (reverse_memcmp(pn, cc->pn, IEEE80211_GCMP_PN_LEN) <= 0) {
699 wil_err_ratelimited(wil,
700 "Replay attack. CID %d TID %d MCast %d KEY_ID %d PN %6phN last %6phN\n",
701 cid, tid, mc, key_id, pn, cc->pn);
704 memcpy(cc->pn, pn, IEEE80211_GCMP_PN_LEN);
710 * Pass Rx packet to the netif. Update statistics.
711 * Called in softirq context (NAPI poll).
713 void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
715 gro_result_t rc = GRO_NORMAL;
716 struct wil6210_priv *wil = ndev_to_wil(ndev);
717 struct wireless_dev *wdev = wil_to_wdev(wil);
718 unsigned int len = skb->len;
719 struct vring_rx_desc *d = wil_skb_rxdesc(skb);
720 int cid = wil_rxdesc_cid(d); /* always 0..7, no need to check */
721 int security = wil_rxdesc_security(d);
722 struct ethhdr *eth = (void *)skb->data;
723 /* here looking for DA, not A1, thus Rxdesc's 'mcast' indication
724 * is not suitable, need to look at data
726 int mcast = is_multicast_ether_addr(eth->h_dest);
727 struct wil_net_stats *stats = &wil->sta[cid].stats;
728 struct sk_buff *xmit_skb = NULL;
729 static const char * const gro_res_str[] = {
730 [GRO_MERGED] = "GRO_MERGED",
731 [GRO_MERGED_FREE] = "GRO_MERGED_FREE",
732 [GRO_HELD] = "GRO_HELD",
733 [GRO_NORMAL] = "GRO_NORMAL",
734 [GRO_DROP] = "GRO_DROP",
735 [GRO_CONSUMED] = "GRO_CONSUMED",
738 if (ndev->features & NETIF_F_RXHASH)
739 /* fake L4 to ensure it won't be re-calculated later
740 * set hash to any non-zero value to activate rps
741 * mechanism, core will be chosen according
742 * to user-level rps configuration.
744 skb_set_hash(skb, 1, PKT_HASH_TYPE_L4);
748 if (security && (wil_rx_crypto_check(wil, skb) != 0)) {
755 if (wdev->iftype == NL80211_IFTYPE_AP && !wil->ap_isolate) {
757 /* send multicast frames both to higher layers in
758 * local net stack and back to the wireless medium
760 xmit_skb = skb_copy(skb, GFP_ATOMIC);
762 int xmit_cid = wil_find_cid(wil, eth->h_dest);
765 /* The destination station is associated to
766 * this AP (in this VLAN), so send the frame
767 * directly to it and do not pass it to local
776 /* Send to wireless media and increase priority by 256 to
777 * keep the received priority instead of reclassifying
778 * the frame (see cfg80211_classify8021d).
780 xmit_skb->dev = ndev;
781 xmit_skb->priority += 256;
782 xmit_skb->protocol = htons(ETH_P_802_3);
783 skb_reset_network_header(xmit_skb);
784 skb_reset_mac_header(xmit_skb);
785 wil_dbg_txrx(wil, "Rx -> Tx %d bytes\n", len);
786 dev_queue_xmit(xmit_skb);
789 if (skb) { /* deliver to local stack */
791 skb->protocol = eth_type_trans(skb, ndev);
792 rc = napi_gro_receive(&wil->napi_rx, skb);
793 wil_dbg_txrx(wil, "Rx complete %d bytes => %s\n",
794 len, gro_res_str[rc]);
797 /* statistics. rc set to GRO_NORMAL for AP bridging */
798 if (unlikely(rc == GRO_DROP)) {
799 ndev->stats.rx_dropped++;
801 wil_dbg_txrx(wil, "Rx drop %d bytes\n", len);
803 ndev->stats.rx_packets++;
805 ndev->stats.rx_bytes += len;
806 stats->rx_bytes += len;
808 ndev->stats.multicast++;
813 * Proceed all completed skb's from Rx VRING
815 * Safe to call from NAPI poll, i.e. softirq with interrupts enabled
817 void wil_rx_handle(struct wil6210_priv *wil, int *quota)
819 struct net_device *ndev = wil_to_ndev(wil);
820 struct vring *v = &wil->vring_rx;
823 if (unlikely(!v->va)) {
824 wil_err(wil, "Rx IRQ while Rx not yet initialized\n");
827 wil_dbg_txrx(wil, "rx_handle\n");
828 while ((*quota > 0) && (NULL != (skb = wil_vring_reap_rx(wil, v)))) {
831 if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR) {
833 skb_reset_mac_header(skb);
834 skb->ip_summed = CHECKSUM_UNNECESSARY;
835 skb->pkt_type = PACKET_OTHERHOST;
836 skb->protocol = htons(ETH_P_802_2);
837 wil_netif_rx_any(skb, ndev);
839 wil_rx_reorder(wil, skb);
842 wil_rx_refill(wil, v->size);
845 static void wil_rx_buf_len_init(struct wil6210_priv *wil)
847 wil->rx_buf_len = rx_large_buf ?
848 WIL_MAX_ETH_MTU : TXRX_BUF_LEN_DEFAULT - WIL_MAX_MPDU_OVERHEAD;
849 if (mtu_max > wil->rx_buf_len) {
850 /* do not allow RX buffers to be smaller than mtu_max, for
851 * backward compatibility (mtu_max parameter was also used
852 * to support receiving large packets)
854 wil_info(wil, "Override RX buffer to mtu_max(%d)\n", mtu_max);
855 wil->rx_buf_len = mtu_max;
859 int wil_rx_init(struct wil6210_priv *wil, u16 size)
861 struct vring *vring = &wil->vring_rx;
864 wil_dbg_misc(wil, "rx_init\n");
867 wil_err(wil, "Rx ring already allocated\n");
871 wil_rx_buf_len_init(wil);
874 rc = wil_vring_alloc(wil, vring);
878 rc = wmi_rx_chain_add(wil, vring);
882 rc = wil_rx_refill(wil, vring->size);
888 wil_vring_free(wil, vring, 0);
893 void wil_rx_fini(struct wil6210_priv *wil)
895 struct vring *vring = &wil->vring_rx;
897 wil_dbg_misc(wil, "rx_fini\n");
900 wil_vring_free(wil, vring, 0);
903 static inline void wil_tx_data_init(struct vring_tx_data *txdata)
905 spin_lock_bh(&txdata->lock);
906 txdata->dot1x_open = 0;
909 txdata->last_idle = 0;
911 txdata->agg_wsize = 0;
912 txdata->agg_timeout = 0;
913 txdata->agg_amsdu = 0;
914 txdata->addba_in_progress = false;
915 spin_unlock_bh(&txdata->lock);
918 int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
922 struct wmi_vring_cfg_cmd cmd = {
923 .action = cpu_to_le32(WMI_VRING_CMD_ADD),
927 cpu_to_le16(wil_mtu2macbuf(mtu_max)),
928 .ring_size = cpu_to_le16(size),
931 .cidxtid = mk_cidxtid(cid, tid),
932 .encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
937 .priority = cpu_to_le16(0),
938 .timeslot_us = cpu_to_le16(0xfff),
943 struct wmi_cmd_hdr wmi;
944 struct wmi_vring_cfg_done_event cmd;
946 struct vring *vring = &wil->vring_tx[id];
947 struct vring_tx_data *txdata = &wil->vring_tx_data[id];
949 wil_dbg_misc(wil, "vring_init_tx: max_mpdu_size %d\n",
950 cmd.vring_cfg.tx_sw_ring.max_mpdu_size);
951 lockdep_assert_held(&wil->mutex);
954 wil_err(wil, "Tx ring [%d] already allocated\n", id);
959 wil_tx_data_init(txdata);
961 rc = wil_vring_alloc(wil, vring);
965 wil->vring2cid_tid[id][0] = cid;
966 wil->vring2cid_tid[id][1] = tid;
968 cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
971 txdata->dot1x_open = true;
972 rc = wmi_call(wil, WMI_VRING_CFG_CMDID, &cmd, sizeof(cmd),
973 WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100);
977 if (reply.cmd.status != WMI_FW_STATUS_SUCCESS) {
978 wil_err(wil, "Tx config failed, status 0x%02x\n",
984 spin_lock_bh(&txdata->lock);
985 vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
987 spin_unlock_bh(&txdata->lock);
989 if (txdata->dot1x_open && (agg_wsize >= 0))
990 wil_addba_tx_request(wil, id, agg_wsize);
994 spin_lock_bh(&txdata->lock);
995 txdata->dot1x_open = false;
997 spin_unlock_bh(&txdata->lock);
998 wil_vring_free(wil, vring, 1);
999 wil->vring2cid_tid[id][0] = WIL6210_MAX_CID;
1000 wil->vring2cid_tid[id][1] = 0;
1007 int wil_vring_init_bcast(struct wil6210_priv *wil, int id, int size)
1010 struct wmi_bcast_vring_cfg_cmd cmd = {
1011 .action = cpu_to_le32(WMI_VRING_CMD_ADD),
1015 cpu_to_le16(wil_mtu2macbuf(mtu_max)),
1016 .ring_size = cpu_to_le16(size),
1019 .encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
1023 struct wmi_cmd_hdr wmi;
1024 struct wmi_vring_cfg_done_event cmd;
1026 struct vring *vring = &wil->vring_tx[id];
1027 struct vring_tx_data *txdata = &wil->vring_tx_data[id];
1029 wil_dbg_misc(wil, "vring_init_bcast: max_mpdu_size %d\n",
1030 cmd.vring_cfg.tx_sw_ring.max_mpdu_size);
1031 lockdep_assert_held(&wil->mutex);
1034 wil_err(wil, "Tx ring [%d] already allocated\n", id);
1039 wil_tx_data_init(txdata);
1041 rc = wil_vring_alloc(wil, vring);
1045 wil->vring2cid_tid[id][0] = WIL6210_MAX_CID; /* CID */
1046 wil->vring2cid_tid[id][1] = 0; /* TID */
1048 cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
1051 txdata->dot1x_open = true;
1052 rc = wmi_call(wil, WMI_BCAST_VRING_CFG_CMDID, &cmd, sizeof(cmd),
1053 WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100);
1057 if (reply.cmd.status != WMI_FW_STATUS_SUCCESS) {
1058 wil_err(wil, "Tx config failed, status 0x%02x\n",
1064 spin_lock_bh(&txdata->lock);
1065 vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
1066 txdata->enabled = 1;
1067 spin_unlock_bh(&txdata->lock);
1071 spin_lock_bh(&txdata->lock);
1072 txdata->enabled = 0;
1073 txdata->dot1x_open = false;
1074 spin_unlock_bh(&txdata->lock);
1075 wil_vring_free(wil, vring, 1);
1081 void wil_vring_fini_tx(struct wil6210_priv *wil, int id)
1083 struct vring *vring = &wil->vring_tx[id];
1084 struct vring_tx_data *txdata = &wil->vring_tx_data[id];
1086 lockdep_assert_held(&wil->mutex);
1091 wil_dbg_misc(wil, "vring_fini_tx: id=%d\n", id);
1093 spin_lock_bh(&txdata->lock);
1094 txdata->dot1x_open = false;
1095 txdata->enabled = 0; /* no Tx can be in progress or start anew */
1096 spin_unlock_bh(&txdata->lock);
1097 /* napi_synchronize waits for completion of the current NAPI but will
1098 * not prevent the next NAPI run.
1099 * Add a memory barrier to guarantee that txdata->enabled is zeroed
1100 * before napi_synchronize so that the next scheduled NAPI will not
1104 /* make sure NAPI won't touch this vring */
1105 if (test_bit(wil_status_napi_en, wil->status))
1106 napi_synchronize(&wil->napi_tx);
1108 wil_vring_free(wil, vring, 1);
1111 static struct vring *wil_find_tx_ucast(struct wil6210_priv *wil,
1112 struct sk_buff *skb)
1115 struct ethhdr *eth = (void *)skb->data;
1116 int cid = wil_find_cid(wil, eth->h_dest);
1121 /* TODO: fix for multiple TID */
1122 for (i = 0; i < ARRAY_SIZE(wil->vring2cid_tid); i++) {
1123 if (!wil->vring_tx_data[i].dot1x_open &&
1124 (skb->protocol != cpu_to_be16(ETH_P_PAE)))
1126 if (wil->vring2cid_tid[i][0] == cid) {
1127 struct vring *v = &wil->vring_tx[i];
1128 struct vring_tx_data *txdata = &wil->vring_tx_data[i];
1130 wil_dbg_txrx(wil, "find_tx_ucast: (%pM) -> [%d]\n",
1132 if (v->va && txdata->enabled) {
1136 "find_tx_ucast: vring[%d] not valid\n",
1146 static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
1147 struct sk_buff *skb);
1149 static struct vring *wil_find_tx_vring_sta(struct wil6210_priv *wil,
1150 struct sk_buff *skb)
1155 struct vring_tx_data *txdata;
1157 /* In the STA mode, it is expected to have only 1 VRING
1158 * for the AP we connected to.
1159 * find 1-st vring eligible for this skb and use it.
1161 for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
1162 v = &wil->vring_tx[i];
1163 txdata = &wil->vring_tx_data[i];
1164 if (!v->va || !txdata->enabled)
1167 cid = wil->vring2cid_tid[i][0];
1168 if (cid >= WIL6210_MAX_CID) /* skip BCAST */
1171 if (!wil->vring_tx_data[i].dot1x_open &&
1172 (skb->protocol != cpu_to_be16(ETH_P_PAE)))
1175 wil_dbg_txrx(wil, "Tx -> ring %d\n", i);
1180 wil_dbg_txrx(wil, "Tx while no vrings active?\n");
1185 /* Use one of 2 strategies:
1187 * 1. New (real broadcast):
1188 * use dedicated broadcast vring
1189 * 2. Old (pseudo-DMS):
1190 * Find 1-st vring and return it;
1191 * duplicate skb and send it to other active vrings;
1192 * in all cases override dest address to unicast peer's address
1193 * Use old strategy when new is not supported yet:
1196 static struct vring *wil_find_tx_bcast_1(struct wil6210_priv *wil,
1197 struct sk_buff *skb)
1200 struct vring_tx_data *txdata;
1201 int i = wil->bcast_vring;
1205 v = &wil->vring_tx[i];
1206 txdata = &wil->vring_tx_data[i];
1207 if (!v->va || !txdata->enabled)
1209 if (!wil->vring_tx_data[i].dot1x_open &&
1210 (skb->protocol != cpu_to_be16(ETH_P_PAE)))
1216 static void wil_set_da_for_vring(struct wil6210_priv *wil,
1217 struct sk_buff *skb, int vring_index)
1219 struct ethhdr *eth = (void *)skb->data;
1220 int cid = wil->vring2cid_tid[vring_index][0];
1222 ether_addr_copy(eth->h_dest, wil->sta[cid].addr);
1225 static struct vring *wil_find_tx_bcast_2(struct wil6210_priv *wil,
1226 struct sk_buff *skb)
1228 struct vring *v, *v2;
1229 struct sk_buff *skb2;
1232 struct ethhdr *eth = (void *)skb->data;
1233 char *src = eth->h_source;
1234 struct vring_tx_data *txdata;
1236 /* find 1-st vring eligible for data */
1237 for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
1238 v = &wil->vring_tx[i];
1239 txdata = &wil->vring_tx_data[i];
1240 if (!v->va || !txdata->enabled)
1243 cid = wil->vring2cid_tid[i][0];
1244 if (cid >= WIL6210_MAX_CID) /* skip BCAST */
1246 if (!wil->vring_tx_data[i].dot1x_open &&
1247 (skb->protocol != cpu_to_be16(ETH_P_PAE)))
1250 /* don't Tx back to source when re-routing Rx->Tx at the AP */
1251 if (0 == memcmp(wil->sta[cid].addr, src, ETH_ALEN))
1257 wil_dbg_txrx(wil, "Tx while no vrings active?\n");
1262 wil_dbg_txrx(wil, "BCAST -> ring %d\n", i);
1263 wil_set_da_for_vring(wil, skb, i);
1265 /* find other active vrings and duplicate skb for each */
1266 for (i++; i < WIL6210_MAX_TX_RINGS; i++) {
1267 v2 = &wil->vring_tx[i];
1270 cid = wil->vring2cid_tid[i][0];
1271 if (cid >= WIL6210_MAX_CID) /* skip BCAST */
1273 if (!wil->vring_tx_data[i].dot1x_open &&
1274 (skb->protocol != cpu_to_be16(ETH_P_PAE)))
1277 if (0 == memcmp(wil->sta[cid].addr, src, ETH_ALEN))
1280 skb2 = skb_copy(skb, GFP_ATOMIC);
1282 wil_dbg_txrx(wil, "BCAST DUP -> ring %d\n", i);
1283 wil_set_da_for_vring(wil, skb2, i);
1284 wil_tx_vring(wil, v2, skb2);
1286 wil_err(wil, "skb_copy failed\n");
1293 static int wil_tx_desc_map(struct vring_tx_desc *d, dma_addr_t pa, u32 len,
1296 wil_desc_addr_set(&d->dma.addr, pa);
1297 d->dma.ip_length = 0;
1298 /* 0..6: mac_length; 7:ip_version 0-IP6 1-IP4*/
1299 d->dma.b11 = 0/*14 | BIT(7)*/;
1301 d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */
1302 d->dma.length = cpu_to_le16((u16)len);
1303 d->dma.d0 = (vring_index << DMA_CFG_DESC_TX_0_QID_POS);
1307 d->mac.ucode_cmd = 0;
1308 /* translation type: 0 - bypass; 1 - 802.3; 2 - native wifi */
1309 d->mac.d[2] = BIT(MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_POS) |
1310 (1 << MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_POS);
1316 void wil_tx_desc_set_nr_frags(struct vring_tx_desc *d, int nr_frags)
1318 d->mac.d[2] |= (nr_frags << MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS);
1322 * Sets the descriptor @d up for csum and/or TSO offloading. The corresponding
1323 * @skb is used to obtain the protocol and headers length.
1324 * @tso_desc_type is a descriptor type for TSO: 0 - a header, 1 - first data,
1325 * 2 - middle, 3 - last descriptor.
1328 static void wil_tx_desc_offload_setup_tso(struct vring_tx_desc *d,
1329 struct sk_buff *skb,
1330 int tso_desc_type, bool is_ipv4,
1331 int tcp_hdr_len, int skb_net_hdr_len)
1333 d->dma.b11 = ETH_HLEN; /* MAC header length */
1334 d->dma.b11 |= is_ipv4 << DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS;
1336 d->dma.d0 |= (2 << DMA_CFG_DESC_TX_0_L4_TYPE_POS);
1337 /* L4 header len: TCP header length */
1338 d->dma.d0 |= (tcp_hdr_len & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK);
1340 /* Setup TSO: bit and desc type */
1341 d->dma.d0 |= (BIT(DMA_CFG_DESC_TX_0_TCP_SEG_EN_POS)) |
1342 (tso_desc_type << DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_POS);
1343 d->dma.d0 |= (is_ipv4 << DMA_CFG_DESC_TX_0_IPV4_CHECKSUM_EN_POS);
1345 d->dma.ip_length = skb_net_hdr_len;
1346 /* Enable TCP/UDP checksum */
1347 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_POS);
1348 /* Calculate pseudo-header */
1349 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_POS);
1353 * Sets the descriptor @d up for csum. The corresponding
1354 * @skb is used to obtain the protocol and headers length.
1355 * Returns the protocol: 0 - not TCP, 1 - TCPv4, 2 - TCPv6.
1356 * Note, if d==NULL, the function only returns the protocol result.
1358 * It is very similar to previous wil_tx_desc_offload_setup_tso. This
1359 * is "if unrolling" to optimize the critical path.
1362 static int wil_tx_desc_offload_setup(struct vring_tx_desc *d,
1363 struct sk_buff *skb){
1366 if (skb->ip_summed != CHECKSUM_PARTIAL)
1369 d->dma.b11 = ETH_HLEN; /* MAC header length */
1371 switch (skb->protocol) {
1372 case cpu_to_be16(ETH_P_IP):
1373 protocol = ip_hdr(skb)->protocol;
1374 d->dma.b11 |= BIT(DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS);
1376 case cpu_to_be16(ETH_P_IPV6):
1377 protocol = ipv6_hdr(skb)->nexthdr;
1385 d->dma.d0 |= (2 << DMA_CFG_DESC_TX_0_L4_TYPE_POS);
1386 /* L4 header len: TCP header length */
1388 (tcp_hdrlen(skb) & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK);
1391 /* L4 header len: UDP header length */
1393 (sizeof(struct udphdr) & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK);
1399 d->dma.ip_length = skb_network_header_len(skb);
1400 /* Enable TCP/UDP checksum */
1401 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_POS);
1402 /* Calculate pseudo-header */
1403 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_POS);
1408 static inline void wil_tx_last_desc(struct vring_tx_desc *d)
1410 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS) |
1411 BIT(DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS) |
1412 BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS);
1415 static inline void wil_set_tx_desc_last_tso(volatile struct vring_tx_desc *d)
1417 d->dma.d0 |= wil_tso_type_lst <<
1418 DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_POS;
1421 static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct vring *vring,
1422 struct sk_buff *skb)
1424 struct device *dev = wil_to_dev(wil);
1426 /* point to descriptors in shared memory */
1427 volatile struct vring_tx_desc *_desc = NULL, *_hdr_desc,
1428 *_first_desc = NULL;
1430 /* pointers to shadow descriptors */
1431 struct vring_tx_desc desc_mem, hdr_desc_mem, first_desc_mem,
1432 *d = &hdr_desc_mem, *hdr_desc = &hdr_desc_mem,
1433 *first_desc = &first_desc_mem;
1435 /* pointer to shadow descriptors' context */
1436 struct wil_ctx *hdr_ctx, *first_ctx = NULL;
1438 int descs_used = 0; /* total number of used descriptors */
1439 int sg_desc_cnt = 0; /* number of descriptors for current mss*/
1441 u32 swhead = vring->swhead;
1442 int used, avail = wil_vring_avail_tx(vring);
1443 int nr_frags = skb_shinfo(skb)->nr_frags;
1444 int min_desc_required = nr_frags + 1;
1445 int mss = skb_shinfo(skb)->gso_size; /* payload size w/o headers */
1446 int f, len, hdrlen, headlen;
1447 int vring_index = vring - wil->vring_tx;
1448 struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index];
1451 const skb_frag_t *frag = NULL;
1454 int hdr_compensation_need = true;
1455 int desc_tso_type = wil_tso_type_first;
1458 int skb_net_hdr_len;
1462 wil_dbg_txrx(wil, "tx_vring_tso: %d bytes to vring %d\n", skb->len,
1465 if (unlikely(!txdata->enabled))
1468 /* A typical page 4K is 3-4 payloads, we assume each fragment
1469 * is a full payload, that's how min_desc_required has been
1470 * calculated. In real we might need more or less descriptors,
1471 * this is the initial check only.
1473 if (unlikely(avail < min_desc_required)) {
1474 wil_err_ratelimited(wil,
1475 "TSO: Tx ring[%2d] full. No space for %d fragments\n",
1476 vring_index, min_desc_required);
1480 /* Header Length = MAC header len + IP header len + TCP header len*/
1482 (int)skb_network_header_len(skb) +
1485 gso_type = skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV6 | SKB_GSO_TCPV4);
1488 /* TCP v4, zero out the IP length and IPv4 checksum fields
1489 * as required by the offloading doc
1491 ip_hdr(skb)->tot_len = 0;
1492 ip_hdr(skb)->check = 0;
1496 /* TCP v6, zero out the payload length */
1497 ipv6_hdr(skb)->payload_len = 0;
1501 /* other than TCPv4 or TCPv6 types are not supported for TSO.
1502 * It is also illegal for both to be set simultaneously
1507 if (skb->ip_summed != CHECKSUM_PARTIAL)
1510 /* tcp header length and skb network header length are fixed for all
1511 * packet's descriptors - read then once here
1513 tcp_hdr_len = tcp_hdrlen(skb);
1514 skb_net_hdr_len = skb_network_header_len(skb);
1516 _hdr_desc = &vring->va[i].tx;
1518 pa = dma_map_single(dev, skb->data, hdrlen, DMA_TO_DEVICE);
1519 if (unlikely(dma_mapping_error(dev, pa))) {
1520 wil_err(wil, "TSO: Skb head DMA map error\n");
1524 wil_tx_desc_map(hdr_desc, pa, hdrlen, vring_index);
1525 wil_tx_desc_offload_setup_tso(hdr_desc, skb, wil_tso_type_hdr, is_ipv4,
1526 tcp_hdr_len, skb_net_hdr_len);
1527 wil_tx_last_desc(hdr_desc);
1529 vring->ctx[i].mapped_as = wil_mapped_as_single;
1530 hdr_ctx = &vring->ctx[i];
1533 headlen = skb_headlen(skb) - hdrlen;
1535 for (f = headlen ? -1 : 0; f < nr_frags; f++) {
1538 wil_dbg_txrx(wil, "TSO: process skb head, len %u\n",
1541 frag = &skb_shinfo(skb)->frags[f];
1543 wil_dbg_txrx(wil, "TSO: frag[%d]: len %u\n", f, len);
1548 "TSO: len %d, rem_data %d, descs_used %d\n",
1549 len, rem_data, descs_used);
1551 if (descs_used == avail) {
1552 wil_err_ratelimited(wil, "TSO: ring overflow\n");
1557 lenmss = min_t(int, rem_data, len);
1558 i = (swhead + descs_used) % vring->size;
1559 wil_dbg_txrx(wil, "TSO: lenmss %d, i %d\n", lenmss, i);
1562 pa = skb_frag_dma_map(dev, frag,
1563 frag->size - len, lenmss,
1565 vring->ctx[i].mapped_as = wil_mapped_as_page;
1567 pa = dma_map_single(dev,
1569 skb_headlen(skb) - headlen,
1572 vring->ctx[i].mapped_as = wil_mapped_as_single;
1576 if (unlikely(dma_mapping_error(dev, pa))) {
1577 wil_err(wil, "TSO: DMA map page error\n");
1581 _desc = &vring->va[i].tx;
1584 _first_desc = _desc;
1585 first_ctx = &vring->ctx[i];
1591 wil_tx_desc_map(d, pa, lenmss, vring_index);
1592 wil_tx_desc_offload_setup_tso(d, skb, desc_tso_type,
1593 is_ipv4, tcp_hdr_len,
1596 /* use tso_type_first only once */
1597 desc_tso_type = wil_tso_type_mid;
1599 descs_used++; /* desc used so far */
1600 sg_desc_cnt++; /* desc used for this segment */
1605 "TSO: len %d, rem_data %d, descs_used %d, sg_desc_cnt %d,\n",
1606 len, rem_data, descs_used, sg_desc_cnt);
1608 /* Close the segment if reached mss size or last frag*/
1609 if (rem_data == 0 || (f == nr_frags - 1 && len == 0)) {
1610 if (hdr_compensation_need) {
1611 /* first segment include hdr desc for
1614 hdr_ctx->nr_frags = sg_desc_cnt;
1615 wil_tx_desc_set_nr_frags(first_desc,
1618 hdr_compensation_need = false;
1620 wil_tx_desc_set_nr_frags(first_desc,
1623 first_ctx->nr_frags = sg_desc_cnt - 1;
1625 wil_tx_last_desc(d);
1627 /* first descriptor may also be the last
1628 * for this mss - make sure not to copy
1631 if (first_desc != d)
1632 *_first_desc = *first_desc;
1634 /*last descriptor will be copied at the end
1635 * of this TS processing
1637 if (f < nr_frags - 1 || len > 0)
1643 } else if (first_desc != d) /* update mid descriptor */
1648 /* first descriptor may also be the last.
1649 * in this case d pointer is invalid
1651 if (_first_desc == _desc)
1654 /* Last data descriptor */
1655 wil_set_tx_desc_last_tso(d);
1658 /* Fill the total number of descriptors in first desc (hdr)*/
1659 wil_tx_desc_set_nr_frags(hdr_desc, descs_used);
1660 *_hdr_desc = *hdr_desc;
1662 /* hold reference to skb
1663 * to prevent skb release before accounting
1664 * in case of immediate "tx done"
1666 vring->ctx[i].skb = skb_get(skb);
1668 /* performance monitoring */
1669 used = wil_vring_used_tx(vring);
1670 if (wil_val_in_range(wil->vring_idle_trsh,
1671 used, used + descs_used)) {
1672 txdata->idle += get_cycles() - txdata->last_idle;
1673 wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n",
1674 vring_index, used, used + descs_used);
1677 /* Make sure to advance the head only after descriptor update is done.
1678 * This will prevent a race condition where the completion thread
1679 * will see the DU bit set from previous run and will handle the
1680 * skb before it was completed.
1684 /* advance swhead */
1685 wil_vring_advance_head(vring, descs_used);
1686 wil_dbg_txrx(wil, "TSO: Tx swhead %d -> %d\n", swhead, vring->swhead);
1688 /* make sure all writes to descriptors (shared memory) are done before
1689 * committing them to HW
1693 wil_w(wil, vring->hwtail, vring->swhead);
1697 while (descs_used > 0) {
1698 struct wil_ctx *ctx;
1700 i = (swhead + descs_used - 1) % vring->size;
1701 d = (struct vring_tx_desc *)&vring->va[i].tx;
1702 _desc = &vring->va[i].tx;
1704 _desc->dma.status = TX_DMA_STATUS_DU;
1705 ctx = &vring->ctx[i];
1706 wil_txdesc_unmap(dev, d, ctx);
1707 memset(ctx, 0, sizeof(*ctx));
1714 static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
1715 struct sk_buff *skb)
1717 struct device *dev = wil_to_dev(wil);
1718 struct vring_tx_desc dd, *d = ⅆ
1719 volatile struct vring_tx_desc *_d;
1720 u32 swhead = vring->swhead;
1721 int avail = wil_vring_avail_tx(vring);
1722 int nr_frags = skb_shinfo(skb)->nr_frags;
1724 int vring_index = vring - wil->vring_tx;
1725 struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index];
1729 bool mcast = (vring_index == wil->bcast_vring);
1730 uint len = skb_headlen(skb);
1732 wil_dbg_txrx(wil, "tx_vring: %d bytes to vring %d\n", skb->len,
1735 if (unlikely(!txdata->enabled))
1738 if (unlikely(avail < 1 + nr_frags)) {
1739 wil_err_ratelimited(wil,
1740 "Tx ring[%2d] full. No space for %d fragments\n",
1741 vring_index, 1 + nr_frags);
1744 _d = &vring->va[i].tx;
1746 pa = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
1748 wil_dbg_txrx(wil, "Tx[%2d] skb %d bytes 0x%p -> %pad\n", vring_index,
1749 skb_headlen(skb), skb->data, &pa);
1750 wil_hex_dump_txrx("Tx ", DUMP_PREFIX_OFFSET, 16, 1,
1751 skb->data, skb_headlen(skb), false);
1753 if (unlikely(dma_mapping_error(dev, pa)))
1755 vring->ctx[i].mapped_as = wil_mapped_as_single;
1757 wil_tx_desc_map(d, pa, len, vring_index);
1758 if (unlikely(mcast)) {
1759 d->mac.d[0] |= BIT(MAC_CFG_DESC_TX_0_MCS_EN_POS); /* MCS 0 */
1760 if (unlikely(len > WIL_BCAST_MCS0_LIMIT)) /* set MCS 1 */
1761 d->mac.d[0] |= (1 << MAC_CFG_DESC_TX_0_MCS_INDEX_POS);
1763 /* Process TCP/UDP checksum offloading */
1764 if (unlikely(wil_tx_desc_offload_setup(d, skb))) {
1765 wil_err(wil, "Tx[%2d] Failed to set cksum, drop packet\n",
1770 vring->ctx[i].nr_frags = nr_frags;
1771 wil_tx_desc_set_nr_frags(d, nr_frags + 1);
1773 /* middle segments */
1774 for (; f < nr_frags; f++) {
1775 const struct skb_frag_struct *frag =
1776 &skb_shinfo(skb)->frags[f];
1777 int len = skb_frag_size(frag);
1780 wil_dbg_txrx(wil, "Tx[%2d] desc[%4d]\n", vring_index, i);
1781 wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE, 32, 4,
1782 (const void *)d, sizeof(*d), false);
1783 i = (swhead + f + 1) % vring->size;
1784 _d = &vring->va[i].tx;
1785 pa = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag),
1787 if (unlikely(dma_mapping_error(dev, pa))) {
1788 wil_err(wil, "Tx[%2d] failed to map fragment\n",
1792 vring->ctx[i].mapped_as = wil_mapped_as_page;
1793 wil_tx_desc_map(d, pa, len, vring_index);
1794 /* no need to check return code -
1795 * if it succeeded for 1-st descriptor,
1796 * it will succeed here too
1798 wil_tx_desc_offload_setup(d, skb);
1800 /* for the last seg only */
1801 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS);
1802 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS);
1803 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS);
1805 wil_dbg_txrx(wil, "Tx[%2d] desc[%4d]\n", vring_index, i);
1806 wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE, 32, 4,
1807 (const void *)d, sizeof(*d), false);
1809 /* hold reference to skb
1810 * to prevent skb release before accounting
1811 * in case of immediate "tx done"
1813 vring->ctx[i].skb = skb_get(skb);
1815 /* performance monitoring */
1816 used = wil_vring_used_tx(vring);
1817 if (wil_val_in_range(wil->vring_idle_trsh,
1818 used, used + nr_frags + 1)) {
1819 txdata->idle += get_cycles() - txdata->last_idle;
1820 wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n",
1821 vring_index, used, used + nr_frags + 1);
1824 /* Make sure to advance the head only after descriptor update is done.
1825 * This will prevent a race condition where the completion thread
1826 * will see the DU bit set from previous run and will handle the
1827 * skb before it was completed.
1831 /* advance swhead */
1832 wil_vring_advance_head(vring, nr_frags + 1);
1833 wil_dbg_txrx(wil, "Tx[%2d] swhead %d -> %d\n", vring_index, swhead,
1835 trace_wil6210_tx(vring_index, swhead, skb->len, nr_frags);
1837 /* make sure all writes to descriptors (shared memory) are done before
1838 * committing them to HW
1842 wil_w(wil, vring->hwtail, vring->swhead);
1846 /* unmap what we have mapped */
1847 nr_frags = f + 1; /* frags mapped + one for skb head */
1848 for (f = 0; f < nr_frags; f++) {
1849 struct wil_ctx *ctx;
1851 i = (swhead + f) % vring->size;
1852 ctx = &vring->ctx[i];
1853 _d = &vring->va[i].tx;
1855 _d->dma.status = TX_DMA_STATUS_DU;
1856 wil_txdesc_unmap(dev, d, ctx);
1858 memset(ctx, 0, sizeof(*ctx));
1864 static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
1865 struct sk_buff *skb)
1867 int vring_index = vring - wil->vring_tx;
1868 struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index];
1871 spin_lock(&txdata->lock);
1873 if (test_bit(wil_status_suspending, wil->status) ||
1874 test_bit(wil_status_suspended, wil->status) ||
1875 test_bit(wil_status_resuming, wil->status)) {
1877 "suspend/resume in progress. drop packet\n");
1878 spin_unlock(&txdata->lock);
1882 rc = (skb_is_gso(skb) ? __wil_tx_vring_tso : __wil_tx_vring)
1885 spin_unlock(&txdata->lock);
1891 * Check status of tx vrings and stop/wake net queues if needed
1893 * This function does one of two checks:
1894 * In case check_stop is true, will check if net queues need to be stopped. If
1895 * the conditions for stopping are met, netif_tx_stop_all_queues() is called.
1896 * In case check_stop is false, will check if net queues need to be waked. If
1897 * the conditions for waking are met, netif_tx_wake_all_queues() is called.
1898 * vring is the vring which is currently being modified by either adding
1899 * descriptors (tx) into it or removing descriptors (tx complete) from it. Can
1900 * be null when irrelevant (e.g. connect/disconnect events).
1902 * The implementation is to stop net queues if modified vring has low
1903 * descriptor availability. Wake if all vrings are not in low descriptor
1904 * availability and modified vring has high descriptor availability.
1906 static inline void __wil_update_net_queues(struct wil6210_priv *wil,
1907 struct vring *vring,
1913 wil_dbg_txrx(wil, "vring %d, check_stop=%d, stopped=%d",
1914 (int)(vring - wil->vring_tx), check_stop,
1915 wil->net_queue_stopped);
1917 wil_dbg_txrx(wil, "check_stop=%d, stopped=%d",
1918 check_stop, wil->net_queue_stopped);
1920 if (check_stop == wil->net_queue_stopped)
1921 /* net queues already in desired state */
1925 if (!vring || unlikely(wil_vring_avail_low(vring))) {
1926 /* not enough room in the vring */
1927 netif_tx_stop_all_queues(wil_to_ndev(wil));
1928 wil->net_queue_stopped = true;
1929 wil_dbg_txrx(wil, "netif_tx_stop called\n");
1934 /* Do not wake the queues in suspend flow */
1935 if (test_bit(wil_status_suspending, wil->status) ||
1936 test_bit(wil_status_suspended, wil->status))
1940 for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
1941 struct vring *cur_vring = &wil->vring_tx[i];
1942 struct vring_tx_data *txdata = &wil->vring_tx_data[i];
1944 if (!cur_vring->va || !txdata->enabled || cur_vring == vring)
1947 if (wil_vring_avail_low(cur_vring)) {
1948 wil_dbg_txrx(wil, "vring %d full, can't wake\n",
1949 (int)(cur_vring - wil->vring_tx));
1954 if (!vring || wil_vring_avail_high(vring)) {
1955 /* enough room in the vring */
1956 wil_dbg_txrx(wil, "calling netif_tx_wake\n");
1957 netif_tx_wake_all_queues(wil_to_ndev(wil));
1958 wil->net_queue_stopped = false;
1962 void wil_update_net_queues(struct wil6210_priv *wil, struct vring *vring,
1965 spin_lock(&wil->net_queue_lock);
1966 __wil_update_net_queues(wil, vring, check_stop);
1967 spin_unlock(&wil->net_queue_lock);
1970 void wil_update_net_queues_bh(struct wil6210_priv *wil, struct vring *vring,
1973 spin_lock_bh(&wil->net_queue_lock);
1974 __wil_update_net_queues(wil, vring, check_stop);
1975 spin_unlock_bh(&wil->net_queue_lock);
1978 netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1980 struct wil6210_priv *wil = ndev_to_wil(ndev);
1981 struct ethhdr *eth = (void *)skb->data;
1982 bool bcast = is_multicast_ether_addr(eth->h_dest);
1983 struct vring *vring;
1984 static bool pr_once_fw;
1987 wil_dbg_txrx(wil, "start_xmit\n");
1988 if (unlikely(!test_bit(wil_status_fwready, wil->status))) {
1990 wil_err(wil, "FW not ready\n");
1995 if (unlikely(!test_bit(wil_status_fwconnected, wil->status))) {
1996 wil_dbg_ratelimited(wil, "FW not connected, packet dropped\n");
1999 if (unlikely(wil->wdev->iftype == NL80211_IFTYPE_MONITOR)) {
2000 wil_err(wil, "Xmit in monitor mode not supported\n");
2006 if (wil->wdev->iftype == NL80211_IFTYPE_STATION && !wil->pbss) {
2007 /* in STA mode (ESS), all to same VRING (to AP) */
2008 vring = wil_find_tx_vring_sta(wil, skb);
2011 /* in pbss, no bcast VRING - duplicate skb in
2012 * all stations VRINGs
2014 vring = wil_find_tx_bcast_2(wil, skb);
2015 else if (wil->wdev->iftype == NL80211_IFTYPE_AP)
2016 /* AP has a dedicated bcast VRING */
2017 vring = wil_find_tx_bcast_1(wil, skb);
2019 /* unexpected combination, fallback to duplicating
2020 * the skb in all stations VRINGs
2022 vring = wil_find_tx_bcast_2(wil, skb);
2024 /* unicast, find specific VRING by dest. address */
2025 vring = wil_find_tx_ucast(wil, skb);
2027 if (unlikely(!vring)) {
2028 wil_dbg_txrx(wil, "No Tx VRING found for %pM\n", eth->h_dest);
2031 /* set up vring entry */
2032 rc = wil_tx_vring(wil, vring, skb);
2036 /* shall we stop net queues? */
2037 wil_update_net_queues_bh(wil, vring, true);
2038 /* statistics will be updated on the tx_complete */
2039 dev_kfree_skb_any(skb);
2040 return NETDEV_TX_OK;
2042 return NETDEV_TX_BUSY;
2044 break; /* goto drop; */
2047 ndev->stats.tx_dropped++;
2048 dev_kfree_skb_any(skb);
2050 return NET_XMIT_DROP;
2053 static inline bool wil_need_txstat(struct sk_buff *skb)
2055 struct ethhdr *eth = (void *)skb->data;
2057 return is_unicast_ether_addr(eth->h_dest) && skb->sk &&
2058 (skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS);
2061 static inline void wil_consume_skb(struct sk_buff *skb, bool acked)
2063 if (unlikely(wil_need_txstat(skb)))
2064 skb_complete_wifi_ack(skb, acked);
2066 acked ? dev_consume_skb_any(skb) : dev_kfree_skb_any(skb);
2070 * Clean up transmitted skb's from the Tx VRING
2072 * Return number of descriptors cleared
2074 * Safe to call from IRQ
2076 int wil_tx_complete(struct wil6210_priv *wil, int ringid)
2078 struct net_device *ndev = wil_to_ndev(wil);
2079 struct device *dev = wil_to_dev(wil);
2080 struct vring *vring = &wil->vring_tx[ringid];
2081 struct vring_tx_data *txdata = &wil->vring_tx_data[ringid];
2083 int cid = wil->vring2cid_tid[ringid][0];
2084 struct wil_net_stats *stats = NULL;
2085 volatile struct vring_tx_desc *_d;
2086 int used_before_complete;
2089 if (unlikely(!vring->va)) {
2090 wil_err(wil, "Tx irq[%d]: vring not initialized\n", ringid);
2094 if (unlikely(!txdata->enabled)) {
2095 wil_info(wil, "Tx irq[%d]: vring disabled\n", ringid);
2099 wil_dbg_txrx(wil, "tx_complete: (%d)\n", ringid);
2101 used_before_complete = wil_vring_used_tx(vring);
2103 if (cid < WIL6210_MAX_CID)
2104 stats = &wil->sta[cid].stats;
2106 while (!wil_vring_is_empty(vring)) {
2108 struct wil_ctx *ctx = &vring->ctx[vring->swtail];
2110 * For the fragmented skb, HW will set DU bit only for the
2111 * last fragment. look for it.
2112 * In TSO the first DU will include hdr desc
2114 int lf = (vring->swtail + ctx->nr_frags) % vring->size;
2115 /* TODO: check we are not past head */
2117 _d = &vring->va[lf].tx;
2118 if (unlikely(!(_d->dma.status & TX_DMA_STATUS_DU)))
2121 new_swtail = (lf + 1) % vring->size;
2122 while (vring->swtail != new_swtail) {
2123 struct vring_tx_desc dd, *d = ⅆ
2125 struct sk_buff *skb;
2127 ctx = &vring->ctx[vring->swtail];
2129 _d = &vring->va[vring->swtail].tx;
2133 dmalen = le16_to_cpu(d->dma.length);
2134 trace_wil6210_tx_done(ringid, vring->swtail, dmalen,
2137 "TxC[%2d][%3d] : %d bytes, status 0x%02x err 0x%02x\n",
2138 ringid, vring->swtail, dmalen,
2139 d->dma.status, d->dma.error);
2140 wil_hex_dump_txrx("TxCD ", DUMP_PREFIX_NONE, 32, 4,
2141 (const void *)d, sizeof(*d), false);
2143 wil_txdesc_unmap(dev, d, ctx);
2146 if (likely(d->dma.error == 0)) {
2147 ndev->stats.tx_packets++;
2148 ndev->stats.tx_bytes += skb->len;
2150 stats->tx_packets++;
2151 stats->tx_bytes += skb->len;
2154 ndev->stats.tx_errors++;
2158 wil_consume_skb(skb, d->dma.error == 0);
2160 memset(ctx, 0, sizeof(*ctx));
2161 /* Make sure the ctx is zeroed before updating the tail
2162 * to prevent a case where wil_tx_vring will see
2163 * this descriptor as used and handle it before ctx zero
2167 /* There is no need to touch HW descriptor:
2168 * - ststus bit TX_DMA_STATUS_DU is set by design,
2169 * so hardware will not try to process this desc.,
2170 * - rest of descriptor will be initialized on Tx.
2172 vring->swtail = wil_vring_next_tail(vring);
2177 /* performance monitoring */
2178 used_new = wil_vring_used_tx(vring);
2179 if (wil_val_in_range(wil->vring_idle_trsh,
2180 used_new, used_before_complete)) {
2181 wil_dbg_txrx(wil, "Ring[%2d] idle %d -> %d\n",
2182 ringid, used_before_complete, used_new);
2183 txdata->last_idle = get_cycles();
2186 /* shall we wake net queues? */
2188 wil_update_net_queues(wil, vring, false);