2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
26 #include <linux/log2.h>
28 #define HTT_RX_RING_SIZE HTT_RX_RING_SIZE_MAX
29 #define HTT_RX_RING_FILL_LEVEL (((HTT_RX_RING_SIZE) / 2) - 1)
31 /* when under memory pressure rx ring refill may fail and needs a retry */
32 #define HTT_RX_RING_REFILL_RETRY_MS 50
34 #define HTT_RX_RING_REFILL_RESCHED_MS 5
36 static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
38 static struct sk_buff *
39 ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u32 paddr)
41 struct ath10k_skb_rxcb *rxcb;
43 hash_for_each_possible(ar->htt.rx_ring.skb_table, rxcb, hlist, paddr)
44 if (rxcb->paddr == paddr)
45 return ATH10K_RXCB_SKB(rxcb);
51 static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt)
54 struct ath10k_skb_rxcb *rxcb;
58 if (htt->rx_ring.in_ord_rx) {
59 hash_for_each_safe(htt->rx_ring.skb_table, i, n, rxcb, hlist) {
60 skb = ATH10K_RXCB_SKB(rxcb);
61 dma_unmap_single(htt->ar->dev, rxcb->paddr,
62 skb->len + skb_tailroom(skb),
64 hash_del(&rxcb->hlist);
65 dev_kfree_skb_any(skb);
68 for (i = 0; i < htt->rx_ring.size; i++) {
69 skb = htt->rx_ring.netbufs_ring[i];
73 rxcb = ATH10K_SKB_RXCB(skb);
74 dma_unmap_single(htt->ar->dev, rxcb->paddr,
75 skb->len + skb_tailroom(skb),
77 dev_kfree_skb_any(skb);
81 htt->rx_ring.fill_cnt = 0;
82 hash_init(htt->rx_ring.skb_table);
83 memset(htt->rx_ring.netbufs_ring, 0,
84 htt->rx_ring.size * sizeof(htt->rx_ring.netbufs_ring[0]));
87 static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
89 struct htt_rx_desc *rx_desc;
90 struct ath10k_skb_rxcb *rxcb;
95 /* The Full Rx Reorder firmware has no way of telling the host
96 * implicitly when it copied HTT Rx Ring buffers to MAC Rx Ring.
97 * To keep things simple make sure ring is always half empty. This
98 * guarantees there'll be no replenishment overruns possible.
100 BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL >= HTT_RX_RING_SIZE / 2);
102 idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
104 if (idx < 0 || idx >= htt->rx_ring.size) {
105 ath10k_err(htt->ar, "rx ring index is not valid, firmware malfunctioning?\n");
106 idx &= htt->rx_ring.size_mask;
112 skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN);
118 if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN))
120 PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) -
123 /* Clear rx_desc attention word before posting to Rx ring */
124 rx_desc = (struct htt_rx_desc *)skb->data;
125 rx_desc->attention.flags = __cpu_to_le32(0);
127 paddr = dma_map_single(htt->ar->dev, skb->data,
128 skb->len + skb_tailroom(skb),
131 if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) {
132 dev_kfree_skb_any(skb);
137 rxcb = ATH10K_SKB_RXCB(skb);
139 htt->rx_ring.netbufs_ring[idx] = skb;
140 htt->rx_ring.paddrs_ring[idx] = __cpu_to_le32(paddr);
141 htt->rx_ring.fill_cnt++;
143 if (htt->rx_ring.in_ord_rx) {
144 hash_add(htt->rx_ring.skb_table,
145 &ATH10K_SKB_RXCB(skb)->hlist,
151 idx &= htt->rx_ring.size_mask;
156 * Make sure the rx buffer is updated before available buffer
157 * index to avoid any potential rx ring corruption.
160 *htt->rx_ring.alloc_idx.vaddr = __cpu_to_le32(idx);
164 static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
166 lockdep_assert_held(&htt->rx_ring.lock);
167 return __ath10k_htt_rx_ring_fill_n(htt, num);
170 static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
172 int ret, num_deficit, num_to_fill;
174 /* Refilling the whole RX ring buffer proves to be a bad idea. The
175 * reason is RX may take up significant amount of CPU cycles and starve
176 * other tasks, e.g. TX on an ethernet device while acting as a bridge
177 * with ath10k wlan interface. This ended up with very poor performance
178 * once CPU the host system was overwhelmed with RX on ath10k.
180 * By limiting the number of refills the replenishing occurs
181 * progressively. This in turns makes use of the fact tasklets are
182 * processed in FIFO order. This means actual RX processing can starve
183 * out refilling. If there's not enough buffers on RX ring FW will not
184 * report RX until it is refilled with enough buffers. This
185 * automatically balances load wrt to CPU power.
187 * This probably comes at a cost of lower maximum throughput but
188 * improves the average and stability. */
189 spin_lock_bh(&htt->rx_ring.lock);
190 num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
191 num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit);
192 num_deficit -= num_to_fill;
193 ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill);
194 if (ret == -ENOMEM) {
196 * Failed to fill it to the desired level -
197 * we'll start a timer and try again next time.
198 * As long as enough buffers are left in the ring for
199 * another A-MPDU rx, no special recovery is needed.
201 mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
202 msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS));
203 } else if (num_deficit > 0) {
204 mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
205 msecs_to_jiffies(HTT_RX_RING_REFILL_RESCHED_MS));
207 spin_unlock_bh(&htt->rx_ring.lock);
210 static void ath10k_htt_rx_ring_refill_retry(unsigned long arg)
212 struct ath10k_htt *htt = (struct ath10k_htt *)arg;
214 ath10k_htt_rx_msdu_buff_replenish(htt);
217 int ath10k_htt_rx_ring_refill(struct ath10k *ar)
219 struct ath10k_htt *htt = &ar->htt;
222 spin_lock_bh(&htt->rx_ring.lock);
223 ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level -
224 htt->rx_ring.fill_cnt));
227 ath10k_htt_rx_ring_free(htt);
229 spin_unlock_bh(&htt->rx_ring.lock);
234 void ath10k_htt_rx_free(struct ath10k_htt *htt)
236 del_timer_sync(&htt->rx_ring.refill_retry_timer);
238 skb_queue_purge(&htt->rx_compl_q);
239 skb_queue_purge(&htt->rx_in_ord_compl_q);
240 skb_queue_purge(&htt->tx_fetch_ind_q);
242 spin_lock_bh(&htt->rx_ring.lock);
243 ath10k_htt_rx_ring_free(htt);
244 spin_unlock_bh(&htt->rx_ring.lock);
246 dma_free_coherent(htt->ar->dev,
248 sizeof(htt->rx_ring.paddrs_ring)),
249 htt->rx_ring.paddrs_ring,
250 htt->rx_ring.base_paddr);
252 dma_free_coherent(htt->ar->dev,
253 sizeof(*htt->rx_ring.alloc_idx.vaddr),
254 htt->rx_ring.alloc_idx.vaddr,
255 htt->rx_ring.alloc_idx.paddr);
257 kfree(htt->rx_ring.netbufs_ring);
260 static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
262 struct ath10k *ar = htt->ar;
264 struct sk_buff *msdu;
266 lockdep_assert_held(&htt->rx_ring.lock);
268 if (htt->rx_ring.fill_cnt == 0) {
269 ath10k_warn(ar, "tried to pop sk_buff from an empty rx ring\n");
273 idx = htt->rx_ring.sw_rd_idx.msdu_payld;
274 msdu = htt->rx_ring.netbufs_ring[idx];
275 htt->rx_ring.netbufs_ring[idx] = NULL;
276 htt->rx_ring.paddrs_ring[idx] = 0;
279 idx &= htt->rx_ring.size_mask;
280 htt->rx_ring.sw_rd_idx.msdu_payld = idx;
281 htt->rx_ring.fill_cnt--;
283 dma_unmap_single(htt->ar->dev,
284 ATH10K_SKB_RXCB(msdu)->paddr,
285 msdu->len + skb_tailroom(msdu),
287 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
288 msdu->data, msdu->len + skb_tailroom(msdu));
293 /* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */
294 static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
295 struct sk_buff_head *amsdu)
297 struct ath10k *ar = htt->ar;
298 int msdu_len, msdu_chaining = 0;
299 struct sk_buff *msdu;
300 struct htt_rx_desc *rx_desc;
302 lockdep_assert_held(&htt->rx_ring.lock);
305 int last_msdu, msdu_len_invalid, msdu_chained;
307 msdu = ath10k_htt_rx_netbuf_pop(htt);
309 __skb_queue_purge(amsdu);
313 __skb_queue_tail(amsdu, msdu);
315 rx_desc = (struct htt_rx_desc *)msdu->data;
317 /* FIXME: we must report msdu payload since this is what caller
319 skb_put(msdu, offsetof(struct htt_rx_desc, msdu_payload));
320 skb_pull(msdu, offsetof(struct htt_rx_desc, msdu_payload));
323 * Sanity check - confirm the HW is finished filling in the
325 * If the HW and SW are working correctly, then it's guaranteed
326 * that the HW's MAC DMA is done before this point in the SW.
327 * To prevent the case that we handle a stale Rx descriptor,
328 * just assert for now until we have a way to recover.
330 if (!(__le32_to_cpu(rx_desc->attention.flags)
331 & RX_ATTENTION_FLAGS_MSDU_DONE)) {
332 __skb_queue_purge(amsdu);
336 msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags)
337 & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
338 RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));
339 msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.common.info0),
340 RX_MSDU_START_INFO0_MSDU_LENGTH);
341 msdu_chained = rx_desc->frag_info.ring2_more_count;
343 if (msdu_len_invalid)
347 skb_put(msdu, min(msdu_len, HTT_RX_MSDU_SIZE));
348 msdu_len -= msdu->len;
350 /* Note: Chained buffers do not contain rx descriptor */
351 while (msdu_chained--) {
352 msdu = ath10k_htt_rx_netbuf_pop(htt);
354 __skb_queue_purge(amsdu);
358 __skb_queue_tail(amsdu, msdu);
360 skb_put(msdu, min(msdu_len, HTT_RX_BUF_SIZE));
361 msdu_len -= msdu->len;
365 last_msdu = __le32_to_cpu(rx_desc->msdu_end.common.info0) &
366 RX_MSDU_END_INFO0_LAST_MSDU;
368 trace_ath10k_htt_rx_desc(ar, &rx_desc->attention,
369 sizeof(*rx_desc) - sizeof(u32));
375 if (skb_queue_empty(amsdu))
379 * Don't refill the ring yet.
381 * First, the elements popped here are still in use - it is not
382 * safe to overwrite them until the matching call to
383 * mpdu_desc_list_next. Second, for efficiency it is preferable to
384 * refill the rx ring with 1 PPDU's worth of rx buffers (something
385 * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers
386 * (something like 3 buffers). Consequently, we'll rely on the txrx
387 * SW to tell us when it is done pulling all the PPDU's rx buffers
388 * out of the rx ring, and then refill it just once.
391 return msdu_chaining;
394 static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt,
397 struct ath10k *ar = htt->ar;
398 struct ath10k_skb_rxcb *rxcb;
399 struct sk_buff *msdu;
401 lockdep_assert_held(&htt->rx_ring.lock);
403 msdu = ath10k_htt_rx_find_skb_paddr(ar, paddr);
407 rxcb = ATH10K_SKB_RXCB(msdu);
408 hash_del(&rxcb->hlist);
409 htt->rx_ring.fill_cnt--;
411 dma_unmap_single(htt->ar->dev, rxcb->paddr,
412 msdu->len + skb_tailroom(msdu),
414 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
415 msdu->data, msdu->len + skb_tailroom(msdu));
420 static int ath10k_htt_rx_pop_paddr_list(struct ath10k_htt *htt,
421 struct htt_rx_in_ord_ind *ev,
422 struct sk_buff_head *list)
424 struct ath10k *ar = htt->ar;
425 struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs;
426 struct htt_rx_desc *rxd;
427 struct sk_buff *msdu;
432 lockdep_assert_held(&htt->rx_ring.lock);
434 msdu_count = __le16_to_cpu(ev->msdu_count);
435 is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
437 while (msdu_count--) {
438 paddr = __le32_to_cpu(msdu_desc->msdu_paddr);
440 msdu = ath10k_htt_rx_pop_paddr(htt, paddr);
442 __skb_queue_purge(list);
446 __skb_queue_tail(list, msdu);
449 rxd = (void *)msdu->data;
451 trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
453 skb_put(msdu, sizeof(*rxd));
454 skb_pull(msdu, sizeof(*rxd));
455 skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
457 if (!(__le32_to_cpu(rxd->attention.flags) &
458 RX_ATTENTION_FLAGS_MSDU_DONE)) {
459 ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
470 int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
472 struct ath10k *ar = htt->ar;
476 struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
478 htt->rx_confused = false;
480 /* XXX: The fill level could be changed during runtime in response to
481 * the host processing latency. Is this really worth it?
483 htt->rx_ring.size = HTT_RX_RING_SIZE;
484 htt->rx_ring.size_mask = htt->rx_ring.size - 1;
485 htt->rx_ring.fill_level = HTT_RX_RING_FILL_LEVEL;
487 if (!is_power_of_2(htt->rx_ring.size)) {
488 ath10k_warn(ar, "htt rx ring size is not power of 2\n");
492 htt->rx_ring.netbufs_ring =
493 kzalloc(htt->rx_ring.size * sizeof(struct sk_buff *),
495 if (!htt->rx_ring.netbufs_ring)
498 size = htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring);
500 vaddr = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_KERNEL);
504 htt->rx_ring.paddrs_ring = vaddr;
505 htt->rx_ring.base_paddr = paddr;
507 vaddr = dma_alloc_coherent(htt->ar->dev,
508 sizeof(*htt->rx_ring.alloc_idx.vaddr),
513 htt->rx_ring.alloc_idx.vaddr = vaddr;
514 htt->rx_ring.alloc_idx.paddr = paddr;
515 htt->rx_ring.sw_rd_idx.msdu_payld = htt->rx_ring.size_mask;
516 *htt->rx_ring.alloc_idx.vaddr = 0;
518 /* Initialize the Rx refill retry timer */
519 setup_timer(timer, ath10k_htt_rx_ring_refill_retry, (unsigned long)htt);
521 spin_lock_init(&htt->rx_ring.lock);
523 htt->rx_ring.fill_cnt = 0;
524 htt->rx_ring.sw_rd_idx.msdu_payld = 0;
525 hash_init(htt->rx_ring.skb_table);
527 skb_queue_head_init(&htt->rx_compl_q);
528 skb_queue_head_init(&htt->rx_in_ord_compl_q);
529 skb_queue_head_init(&htt->tx_fetch_ind_q);
530 atomic_set(&htt->num_mpdus_ready, 0);
532 ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
533 htt->rx_ring.size, htt->rx_ring.fill_level);
537 dma_free_coherent(htt->ar->dev,
539 sizeof(htt->rx_ring.paddrs_ring)),
540 htt->rx_ring.paddrs_ring,
541 htt->rx_ring.base_paddr);
543 kfree(htt->rx_ring.netbufs_ring);
548 static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar,
549 enum htt_rx_mpdu_encrypt_type type)
552 case HTT_RX_MPDU_ENCRYPT_NONE:
554 case HTT_RX_MPDU_ENCRYPT_WEP40:
555 case HTT_RX_MPDU_ENCRYPT_WEP104:
556 return IEEE80211_WEP_IV_LEN;
557 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
558 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
559 return IEEE80211_TKIP_IV_LEN;
560 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
561 return IEEE80211_CCMP_HDR_LEN;
562 case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
563 return IEEE80211_CCMP_256_HDR_LEN;
564 case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
565 case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
566 return IEEE80211_GCMP_HDR_LEN;
567 case HTT_RX_MPDU_ENCRYPT_WEP128:
568 case HTT_RX_MPDU_ENCRYPT_WAPI:
572 ath10k_warn(ar, "unsupported encryption type %d\n", type);
576 #define MICHAEL_MIC_LEN 8
578 static int ath10k_htt_rx_crypto_tail_len(struct ath10k *ar,
579 enum htt_rx_mpdu_encrypt_type type)
582 case HTT_RX_MPDU_ENCRYPT_NONE:
584 case HTT_RX_MPDU_ENCRYPT_WEP40:
585 case HTT_RX_MPDU_ENCRYPT_WEP104:
586 return IEEE80211_WEP_ICV_LEN;
587 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
588 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
589 return IEEE80211_TKIP_ICV_LEN;
590 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
591 return IEEE80211_CCMP_MIC_LEN;
592 case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
593 return IEEE80211_CCMP_256_MIC_LEN;
594 case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
595 case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
596 return IEEE80211_GCMP_MIC_LEN;
597 case HTT_RX_MPDU_ENCRYPT_WEP128:
598 case HTT_RX_MPDU_ENCRYPT_WAPI:
602 ath10k_warn(ar, "unsupported encryption type %d\n", type);
606 struct amsdu_subframe_hdr {
612 #define GROUP_ID_IS_SU_MIMO(x) ((x) == 0 || (x) == 63)
614 static void ath10k_htt_rx_h_rates(struct ath10k *ar,
615 struct ieee80211_rx_status *status,
616 struct htt_rx_desc *rxd)
618 struct ieee80211_supported_band *sband;
619 u8 cck, rate, bw, sgi, mcs, nss;
622 u32 info1, info2, info3;
625 info1 = __le32_to_cpu(rxd->ppdu_start.info1);
626 info2 = __le32_to_cpu(rxd->ppdu_start.info2);
627 info3 = __le32_to_cpu(rxd->ppdu_start.info3);
629 preamble = MS(info1, RX_PPDU_START_INFO1_PREAMBLE_TYPE);
633 /* To get legacy rate index band is required. Since band can't
634 * be undefined check if freq is non-zero.
639 cck = info1 & RX_PPDU_START_INFO1_L_SIG_RATE_SELECT;
640 rate = MS(info1, RX_PPDU_START_INFO1_L_SIG_RATE);
641 rate &= ~RX_PPDU_START_RATE_FLAG;
643 sband = &ar->mac.sbands[status->band];
644 status->rate_idx = ath10k_mac_hw_rate_to_idx(sband, rate, cck);
647 case HTT_RX_HT_WITH_TXBF:
648 /* HT-SIG - Table 20-11 in info2 and info3 */
651 bw = (info2 >> 7) & 1;
652 sgi = (info3 >> 7) & 1;
654 status->rate_idx = mcs;
655 status->flag |= RX_FLAG_HT;
657 status->flag |= RX_FLAG_SHORT_GI;
659 status->flag |= RX_FLAG_40MHZ;
662 case HTT_RX_VHT_WITH_TXBF:
663 /* VHT-SIG-A1 in info2, VHT-SIG-A2 in info3
667 stbc = (info2 >> 3) & 1;
668 group_id = (info2 >> 4) & 0x3F;
670 if (GROUP_ID_IS_SU_MIMO(group_id)) {
671 mcs = (info3 >> 4) & 0x0F;
672 nsts_su = ((info2 >> 10) & 0x07);
674 nss = (nsts_su >> 2) + 1;
678 /* Hardware doesn't decode VHT-SIG-B into Rx descriptor
679 * so it's impossible to decode MCS. Also since
680 * firmware consumes Group Id Management frames host
681 * has no knowledge regarding group/user position
682 * mapping so it's impossible to pick the correct Nsts
685 * Bandwidth and SGI are valid so report the rateinfo
686 * on best-effort basis.
693 ath10k_warn(ar, "invalid MCS received %u\n", mcs);
694 ath10k_warn(ar, "rxd %08x mpdu start %08x %08x msdu start %08x %08x ppdu start %08x %08x %08x %08x %08x\n",
695 __le32_to_cpu(rxd->attention.flags),
696 __le32_to_cpu(rxd->mpdu_start.info0),
697 __le32_to_cpu(rxd->mpdu_start.info1),
698 __le32_to_cpu(rxd->msdu_start.common.info0),
699 __le32_to_cpu(rxd->msdu_start.common.info1),
700 rxd->ppdu_start.info0,
701 __le32_to_cpu(rxd->ppdu_start.info1),
702 __le32_to_cpu(rxd->ppdu_start.info2),
703 __le32_to_cpu(rxd->ppdu_start.info3),
704 __le32_to_cpu(rxd->ppdu_start.info4));
706 ath10k_warn(ar, "msdu end %08x mpdu end %08x\n",
707 __le32_to_cpu(rxd->msdu_end.common.info0),
708 __le32_to_cpu(rxd->mpdu_end.info0));
710 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL,
711 "rx desc msdu payload: ",
712 rxd->msdu_payload, 50);
715 status->rate_idx = mcs;
716 status->vht_nss = nss;
719 status->flag |= RX_FLAG_SHORT_GI;
727 status->flag |= RX_FLAG_40MHZ;
731 status->vht_flag |= RX_VHT_FLAG_80MHZ;
734 status->flag |= RX_FLAG_VHT;
741 static struct ieee80211_channel *
742 ath10k_htt_rx_h_peer_channel(struct ath10k *ar, struct htt_rx_desc *rxd)
744 struct ath10k_peer *peer;
745 struct ath10k_vif *arvif;
746 struct cfg80211_chan_def def;
749 lockdep_assert_held(&ar->data_lock);
754 if (rxd->attention.flags &
755 __cpu_to_le32(RX_ATTENTION_FLAGS_PEER_IDX_INVALID))
758 if (!(rxd->msdu_end.common.info0 &
759 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)))
762 peer_id = MS(__le32_to_cpu(rxd->mpdu_start.info0),
763 RX_MPDU_START_INFO0_PEER_IDX);
765 peer = ath10k_peer_find_by_id(ar, peer_id);
769 arvif = ath10k_get_arvif(ar, peer->vdev_id);
770 if (WARN_ON_ONCE(!arvif))
773 if (ath10k_mac_vif_chan(arvif->vif, &def))
779 static struct ieee80211_channel *
780 ath10k_htt_rx_h_vdev_channel(struct ath10k *ar, u32 vdev_id)
782 struct ath10k_vif *arvif;
783 struct cfg80211_chan_def def;
785 lockdep_assert_held(&ar->data_lock);
787 list_for_each_entry(arvif, &ar->arvifs, list) {
788 if (arvif->vdev_id == vdev_id &&
789 ath10k_mac_vif_chan(arvif->vif, &def) == 0)
797 ath10k_htt_rx_h_any_chan_iter(struct ieee80211_hw *hw,
798 struct ieee80211_chanctx_conf *conf,
801 struct cfg80211_chan_def *def = data;
806 static struct ieee80211_channel *
807 ath10k_htt_rx_h_any_channel(struct ath10k *ar)
809 struct cfg80211_chan_def def = {};
811 ieee80211_iter_chan_contexts_atomic(ar->hw,
812 ath10k_htt_rx_h_any_chan_iter,
818 static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
819 struct ieee80211_rx_status *status,
820 struct htt_rx_desc *rxd,
823 struct ieee80211_channel *ch;
825 spin_lock_bh(&ar->data_lock);
826 ch = ar->scan_channel;
830 ch = ath10k_htt_rx_h_peer_channel(ar, rxd);
832 ch = ath10k_htt_rx_h_vdev_channel(ar, vdev_id);
834 ch = ath10k_htt_rx_h_any_channel(ar);
836 ch = ar->tgt_oper_chan;
837 spin_unlock_bh(&ar->data_lock);
842 status->band = ch->band;
843 status->freq = ch->center_freq;
848 static void ath10k_htt_rx_h_signal(struct ath10k *ar,
849 struct ieee80211_rx_status *status,
850 struct htt_rx_desc *rxd)
852 /* FIXME: Get real NF */
853 status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
854 rxd->ppdu_start.rssi_comb;
855 status->flag &= ~RX_FLAG_NO_SIGNAL_VAL;
858 static void ath10k_htt_rx_h_mactime(struct ath10k *ar,
859 struct ieee80211_rx_status *status,
860 struct htt_rx_desc *rxd)
862 /* FIXME: TSF is known only at the end of PPDU, in the last MPDU. This
863 * means all prior MSDUs in a PPDU are reported to mac80211 without the
864 * TSF. Is it worth holding frames until end of PPDU is known?
866 * FIXME: Can we get/compute 64bit TSF?
868 status->mactime = __le32_to_cpu(rxd->ppdu_end.common.tsf_timestamp);
869 status->flag |= RX_FLAG_MACTIME_END;
872 static void ath10k_htt_rx_h_ppdu(struct ath10k *ar,
873 struct sk_buff_head *amsdu,
874 struct ieee80211_rx_status *status,
877 struct sk_buff *first;
878 struct htt_rx_desc *rxd;
882 if (skb_queue_empty(amsdu))
885 first = skb_peek(amsdu);
886 rxd = (void *)first->data - sizeof(*rxd);
888 is_first_ppdu = !!(rxd->attention.flags &
889 __cpu_to_le32(RX_ATTENTION_FLAGS_FIRST_MPDU));
890 is_last_ppdu = !!(rxd->attention.flags &
891 __cpu_to_le32(RX_ATTENTION_FLAGS_LAST_MPDU));
894 /* New PPDU starts so clear out the old per-PPDU status. */
896 status->rate_idx = 0;
898 status->vht_flag &= ~RX_VHT_FLAG_80MHZ;
899 status->flag &= ~(RX_FLAG_HT |
903 RX_FLAG_MACTIME_END);
904 status->flag |= RX_FLAG_NO_SIGNAL_VAL;
906 ath10k_htt_rx_h_signal(ar, status, rxd);
907 ath10k_htt_rx_h_channel(ar, status, rxd, vdev_id);
908 ath10k_htt_rx_h_rates(ar, status, rxd);
912 ath10k_htt_rx_h_mactime(ar, status, rxd);
915 static const char * const tid_to_ac[] = {
926 static char *ath10k_get_tid(struct ieee80211_hdr *hdr, char *out, size_t size)
931 if (!ieee80211_is_data_qos(hdr->frame_control))
934 qc = ieee80211_get_qos_ctl(hdr);
935 tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
937 snprintf(out, size, "tid %d (%s)", tid, tid_to_ac[tid]);
939 snprintf(out, size, "tid %d", tid);
944 static void ath10k_process_rx(struct ath10k *ar,
945 struct ieee80211_rx_status *rx_status,
948 struct ieee80211_rx_status *status;
949 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
952 status = IEEE80211_SKB_RXCB(skb);
953 *status = *rx_status;
955 ath10k_dbg(ar, ATH10K_DBG_DATA,
956 "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%llx fcs-err %i mic-err %i amsdu-more %i\n",
959 ieee80211_get_SA(hdr),
960 ath10k_get_tid(hdr, tid, sizeof(tid)),
961 is_multicast_ether_addr(ieee80211_get_DA(hdr)) ?
963 (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4,
964 (status->flag & (RX_FLAG_HT | RX_FLAG_VHT)) == 0 ?
966 status->flag & RX_FLAG_HT ? "ht" : "",
967 status->flag & RX_FLAG_VHT ? "vht" : "",
968 status->flag & RX_FLAG_40MHZ ? "40" : "",
969 status->vht_flag & RX_VHT_FLAG_80MHZ ? "80" : "",
970 status->flag & RX_FLAG_SHORT_GI ? "sgi " : "",
974 status->band, status->flag,
975 !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
976 !!(status->flag & RX_FLAG_MMIC_ERROR),
977 !!(status->flag & RX_FLAG_AMSDU_MORE));
978 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
979 skb->data, skb->len);
980 trace_ath10k_rx_hdr(ar, skb->data, skb->len);
981 trace_ath10k_rx_payload(ar, skb->data, skb->len);
983 ieee80211_rx_napi(ar->hw, NULL, skb, &ar->napi);
986 static int ath10k_htt_rx_nwifi_hdrlen(struct ath10k *ar,
987 struct ieee80211_hdr *hdr)
989 int len = ieee80211_hdrlen(hdr->frame_control);
991 if (!test_bit(ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING,
992 ar->running_fw->fw_file.fw_features))
993 len = round_up(len, 4);
998 static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
999 struct sk_buff *msdu,
1000 struct ieee80211_rx_status *status,
1001 enum htt_rx_mpdu_encrypt_type enctype,
1004 struct ieee80211_hdr *hdr;
1005 struct htt_rx_desc *rxd;
1011 rxd = (void *)msdu->data - sizeof(*rxd);
1012 is_first = !!(rxd->msdu_end.common.info0 &
1013 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
1014 is_last = !!(rxd->msdu_end.common.info0 &
1015 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
1017 /* Delivered decapped frame:
1019 * [crypto param] <-- can be trimmed if !fcs_err &&
1020 * !decrypt_err && !peer_idx_invalid
1021 * [amsdu header] <-- only if A-MSDU
1024 * [FCS] <-- at end, needs to be trimmed
1027 /* This probably shouldn't happen but warn just in case */
1028 if (unlikely(WARN_ON_ONCE(!is_first)))
1031 /* This probably shouldn't happen but warn just in case */
1032 if (unlikely(WARN_ON_ONCE(!(is_first && is_last))))
1035 skb_trim(msdu, msdu->len - FCS_LEN);
1037 /* In most cases this will be true for sniffed frames. It makes sense
1038 * to deliver them as-is without stripping the crypto param. This is
1039 * necessary for software based decryption.
1041 * If there's no error then the frame is decrypted. At least that is
1042 * the case for frames that come in via fragmented rx indication.
1047 /* The payload is decrypted so strip crypto params. Start from tail
1048 * since hdr is used to compute some stuff.
1051 hdr = (void *)msdu->data;
1054 if (status->flag & RX_FLAG_IV_STRIPPED) {
1055 skb_trim(msdu, msdu->len -
1056 ath10k_htt_rx_crypto_tail_len(ar, enctype));
1059 if ((status->flag & RX_FLAG_MIC_STRIPPED) &&
1060 enctype == HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2)
1061 skb_trim(msdu, msdu->len - 8);
1064 if (status->flag & RX_FLAG_ICV_STRIPPED &&
1065 enctype != HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2)
1066 skb_trim(msdu, msdu->len -
1067 ath10k_htt_rx_crypto_tail_len(ar, enctype));
1071 if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
1072 !ieee80211_has_morefrags(hdr->frame_control) &&
1073 enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
1074 skb_trim(msdu, msdu->len - 8);
1077 if (status->flag & RX_FLAG_IV_STRIPPED) {
1078 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1079 crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
1081 memmove((void *)msdu->data + crypto_len,
1082 (void *)msdu->data, hdr_len);
1083 skb_pull(msdu, crypto_len);
1087 static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
1088 struct sk_buff *msdu,
1089 struct ieee80211_rx_status *status,
1090 const u8 first_hdr[64],
1091 enum htt_rx_mpdu_encrypt_type enctype)
1093 struct ieee80211_hdr *hdr;
1094 struct htt_rx_desc *rxd;
1099 int bytes_aligned = ar->hw_params.decap_align_bytes;
1101 /* Delivered decapped frame:
1102 * [nwifi 802.11 header] <-- replaced with 802.11 hdr
1105 * Note: The nwifi header doesn't have QoS Control and is
1106 * (always?) a 3addr frame.
1108 * Note2: There's no A-MSDU subframe header. Even if it's part
1112 /* pull decapped header and copy SA & DA */
1113 rxd = (void *)msdu->data - sizeof(*rxd);
1115 l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
1116 skb_put(msdu, l3_pad_bytes);
1118 hdr = (struct ieee80211_hdr *)(msdu->data + l3_pad_bytes);
1120 hdr_len = ath10k_htt_rx_nwifi_hdrlen(ar, hdr);
1121 ether_addr_copy(da, ieee80211_get_DA(hdr));
1122 ether_addr_copy(sa, ieee80211_get_SA(hdr));
1123 skb_pull(msdu, hdr_len);
1125 /* push original 802.11 header */
1126 hdr = (struct ieee80211_hdr *)first_hdr;
1127 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1129 if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1130 memcpy(skb_push(msdu,
1131 ath10k_htt_rx_crypto_param_len(ar, enctype)),
1132 (void *)hdr + round_up(hdr_len, bytes_aligned),
1133 ath10k_htt_rx_crypto_param_len(ar, enctype));
1136 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1138 /* original 802.11 header has a different DA and in
1139 * case of 4addr it may also have different SA
1141 hdr = (struct ieee80211_hdr *)msdu->data;
1142 ether_addr_copy(ieee80211_get_DA(hdr), da);
1143 ether_addr_copy(ieee80211_get_SA(hdr), sa);
1146 static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar,
1147 struct sk_buff *msdu,
1148 enum htt_rx_mpdu_encrypt_type enctype)
1150 struct ieee80211_hdr *hdr;
1151 struct htt_rx_desc *rxd;
1152 size_t hdr_len, crypto_len;
1154 bool is_first, is_last, is_amsdu;
1155 int bytes_aligned = ar->hw_params.decap_align_bytes;
1157 rxd = (void *)msdu->data - sizeof(*rxd);
1158 hdr = (void *)rxd->rx_hdr_status;
1160 is_first = !!(rxd->msdu_end.common.info0 &
1161 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
1162 is_last = !!(rxd->msdu_end.common.info0 &
1163 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
1164 is_amsdu = !(is_first && is_last);
1169 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1170 crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
1172 rfc1042 += round_up(hdr_len, bytes_aligned) +
1173 round_up(crypto_len, bytes_aligned);
1177 rfc1042 += sizeof(struct amsdu_subframe_hdr);
1182 static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
1183 struct sk_buff *msdu,
1184 struct ieee80211_rx_status *status,
1185 const u8 first_hdr[64],
1186 enum htt_rx_mpdu_encrypt_type enctype)
1188 struct ieee80211_hdr *hdr;
1195 struct htt_rx_desc *rxd;
1196 int bytes_aligned = ar->hw_params.decap_align_bytes;
1198 /* Delivered decapped frame:
1199 * [eth header] <-- replaced with 802.11 hdr & rfc1042/llc
1203 rfc1042 = ath10k_htt_rx_h_find_rfc1042(ar, msdu, enctype);
1204 if (WARN_ON_ONCE(!rfc1042))
1207 rxd = (void *)msdu->data - sizeof(*rxd);
1208 l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
1209 skb_put(msdu, l3_pad_bytes);
1210 skb_pull(msdu, l3_pad_bytes);
1212 /* pull decapped header and copy SA & DA */
1213 eth = (struct ethhdr *)msdu->data;
1214 ether_addr_copy(da, eth->h_dest);
1215 ether_addr_copy(sa, eth->h_source);
1216 skb_pull(msdu, sizeof(struct ethhdr));
1218 /* push rfc1042/llc/snap */
1219 memcpy(skb_push(msdu, sizeof(struct rfc1042_hdr)), rfc1042,
1220 sizeof(struct rfc1042_hdr));
1222 /* push original 802.11 header */
1223 hdr = (struct ieee80211_hdr *)first_hdr;
1224 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1226 if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1227 memcpy(skb_push(msdu,
1228 ath10k_htt_rx_crypto_param_len(ar, enctype)),
1229 (void *)hdr + round_up(hdr_len, bytes_aligned),
1230 ath10k_htt_rx_crypto_param_len(ar, enctype));
1233 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1235 /* original 802.11 header has a different DA and in
1236 * case of 4addr it may also have different SA
1238 hdr = (struct ieee80211_hdr *)msdu->data;
1239 ether_addr_copy(ieee80211_get_DA(hdr), da);
1240 ether_addr_copy(ieee80211_get_SA(hdr), sa);
1243 static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
1244 struct sk_buff *msdu,
1245 struct ieee80211_rx_status *status,
1246 const u8 first_hdr[64],
1247 enum htt_rx_mpdu_encrypt_type enctype)
1249 struct ieee80211_hdr *hdr;
1252 struct htt_rx_desc *rxd;
1253 int bytes_aligned = ar->hw_params.decap_align_bytes;
1255 /* Delivered decapped frame:
1256 * [amsdu header] <-- replaced with 802.11 hdr
1261 rxd = (void *)msdu->data - sizeof(*rxd);
1262 l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
1264 skb_put(msdu, l3_pad_bytes);
1265 skb_pull(msdu, sizeof(struct amsdu_subframe_hdr) + l3_pad_bytes);
1267 hdr = (struct ieee80211_hdr *)first_hdr;
1268 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1270 if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1271 memcpy(skb_push(msdu,
1272 ath10k_htt_rx_crypto_param_len(ar, enctype)),
1273 (void *)hdr + round_up(hdr_len, bytes_aligned),
1274 ath10k_htt_rx_crypto_param_len(ar, enctype));
1277 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1280 static void ath10k_htt_rx_h_undecap(struct ath10k *ar,
1281 struct sk_buff *msdu,
1282 struct ieee80211_rx_status *status,
1284 enum htt_rx_mpdu_encrypt_type enctype,
1287 struct htt_rx_desc *rxd;
1288 enum rx_msdu_decap_format decap;
1290 /* First msdu's decapped header:
1291 * [802.11 header] <-- padded to 4 bytes long
1292 * [crypto param] <-- padded to 4 bytes long
1293 * [amsdu header] <-- only if A-MSDU
1296 * Other (2nd, 3rd, ..) msdu's decapped header:
1297 * [amsdu header] <-- only if A-MSDU
1301 rxd = (void *)msdu->data - sizeof(*rxd);
1302 decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
1303 RX_MSDU_START_INFO1_DECAP_FORMAT);
1306 case RX_MSDU_DECAP_RAW:
1307 ath10k_htt_rx_h_undecap_raw(ar, msdu, status, enctype,
1310 case RX_MSDU_DECAP_NATIVE_WIFI:
1311 ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr,
1314 case RX_MSDU_DECAP_ETHERNET2_DIX:
1315 ath10k_htt_rx_h_undecap_eth(ar, msdu, status, first_hdr, enctype);
1317 case RX_MSDU_DECAP_8023_SNAP_LLC:
1318 ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr,
1324 static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
1326 struct htt_rx_desc *rxd;
1328 bool is_ip4, is_ip6;
1329 bool is_tcp, is_udp;
1330 bool ip_csum_ok, tcpudp_csum_ok;
1332 rxd = (void *)skb->data - sizeof(*rxd);
1333 flags = __le32_to_cpu(rxd->attention.flags);
1334 info = __le32_to_cpu(rxd->msdu_start.common.info1);
1336 is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO);
1337 is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO);
1338 is_tcp = !!(info & RX_MSDU_START_INFO1_TCP_PROTO);
1339 is_udp = !!(info & RX_MSDU_START_INFO1_UDP_PROTO);
1340 ip_csum_ok = !(flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL);
1341 tcpudp_csum_ok = !(flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL);
1343 if (!is_ip4 && !is_ip6)
1344 return CHECKSUM_NONE;
1345 if (!is_tcp && !is_udp)
1346 return CHECKSUM_NONE;
1348 return CHECKSUM_NONE;
1349 if (!tcpudp_csum_ok)
1350 return CHECKSUM_NONE;
1352 return CHECKSUM_UNNECESSARY;
1355 static void ath10k_htt_rx_h_csum_offload(struct sk_buff *msdu)
1357 msdu->ip_summed = ath10k_htt_rx_get_csum_state(msdu);
1360 static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
1361 struct sk_buff_head *amsdu,
1362 struct ieee80211_rx_status *status,
1363 bool fill_crypt_header)
1365 struct sk_buff *first;
1366 struct sk_buff *last;
1367 struct sk_buff *msdu;
1368 struct htt_rx_desc *rxd;
1369 struct ieee80211_hdr *hdr;
1370 enum htt_rx_mpdu_encrypt_type enctype;
1374 bool has_crypto_err;
1376 bool has_peer_idx_invalid;
1381 if (skb_queue_empty(amsdu))
1384 first = skb_peek(amsdu);
1385 rxd = (void *)first->data - sizeof(*rxd);
1387 is_mgmt = !!(rxd->attention.flags &
1388 __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE));
1390 enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
1391 RX_MPDU_START_INFO0_ENCRYPT_TYPE);
1393 /* First MSDU's Rx descriptor in an A-MSDU contains full 802.11
1394 * decapped header. It'll be used for undecapping of each MSDU.
1396 hdr = (void *)rxd->rx_hdr_status;
1397 memcpy(first_hdr, hdr, RX_HTT_HDR_STATUS_LEN);
1399 /* Each A-MSDU subframe will use the original header as the base and be
1400 * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl.
1402 hdr = (void *)first_hdr;
1404 if (ieee80211_is_data_qos(hdr->frame_control)) {
1405 qos = ieee80211_get_qos_ctl(hdr);
1406 qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
1409 /* Some attention flags are valid only in the last MSDU. */
1410 last = skb_peek_tail(amsdu);
1411 rxd = (void *)last->data - sizeof(*rxd);
1412 attention = __le32_to_cpu(rxd->attention.flags);
1414 has_fcs_err = !!(attention & RX_ATTENTION_FLAGS_FCS_ERR);
1415 has_crypto_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR);
1416 has_tkip_err = !!(attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR);
1417 has_peer_idx_invalid = !!(attention & RX_ATTENTION_FLAGS_PEER_IDX_INVALID);
1419 /* Note: If hardware captures an encrypted frame that it can't decrypt,
1420 * e.g. due to fcs error, missing peer or invalid key data it will
1421 * report the frame as raw.
1423 is_decrypted = (enctype != HTT_RX_MPDU_ENCRYPT_NONE &&
1426 !has_peer_idx_invalid);
1428 /* Clear per-MPDU flags while leaving per-PPDU flags intact. */
1429 status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
1430 RX_FLAG_MMIC_ERROR |
1432 RX_FLAG_IV_STRIPPED |
1433 RX_FLAG_ONLY_MONITOR |
1434 RX_FLAG_MMIC_STRIPPED);
1437 status->flag |= RX_FLAG_FAILED_FCS_CRC;
1440 status->flag |= RX_FLAG_MMIC_ERROR;
1442 /* Firmware reports all necessary management frames via WMI already.
1443 * They are not reported to monitor interfaces at all so pass the ones
1444 * coming via HTT to monitor interfaces instead. This simplifies
1448 status->flag |= RX_FLAG_ONLY_MONITOR;
1451 status->flag |= RX_FLAG_DECRYPTED;
1453 if (likely(!is_mgmt))
1454 status->flag |= RX_FLAG_MMIC_STRIPPED;
1456 if (fill_crypt_header)
1457 status->flag |= RX_FLAG_MIC_STRIPPED |
1458 RX_FLAG_ICV_STRIPPED;
1460 status->flag |= RX_FLAG_IV_STRIPPED;
1463 skb_queue_walk(amsdu, msdu) {
1464 ath10k_htt_rx_h_csum_offload(msdu);
1465 ath10k_htt_rx_h_undecap(ar, msdu, status, first_hdr, enctype,
1468 /* Undecapping involves copying the original 802.11 header back
1469 * to sk_buff. If frame is protected and hardware has decrypted
1470 * it then remove the protected bit.
1477 if (fill_crypt_header)
1480 hdr = (void *)msdu->data;
1481 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
1485 static void ath10k_htt_rx_h_deliver(struct ath10k *ar,
1486 struct sk_buff_head *amsdu,
1487 struct ieee80211_rx_status *status)
1489 struct sk_buff *msdu;
1490 struct sk_buff *first_subframe;
1492 first_subframe = skb_peek(amsdu);
1494 while ((msdu = __skb_dequeue(amsdu))) {
1495 /* Setup per-MSDU flags */
1496 if (skb_queue_empty(amsdu))
1497 status->flag &= ~RX_FLAG_AMSDU_MORE;
1499 status->flag |= RX_FLAG_AMSDU_MORE;
1501 if (msdu == first_subframe) {
1502 first_subframe = NULL;
1503 status->flag &= ~RX_FLAG_ALLOW_SAME_PN;
1505 status->flag |= RX_FLAG_ALLOW_SAME_PN;
1508 ath10k_process_rx(ar, status, msdu);
1512 static int ath10k_unchain_msdu(struct sk_buff_head *amsdu)
1514 struct sk_buff *skb, *first;
1518 /* TODO: Might could optimize this by using
1519 * skb_try_coalesce or similar method to
1520 * decrease copying, or maybe get mac80211 to
1521 * provide a way to just receive a list of
1525 first = __skb_dequeue(amsdu);
1527 /* Allocate total length all at once. */
1528 skb_queue_walk(amsdu, skb)
1529 total_len += skb->len;
1531 space = total_len - skb_tailroom(first);
1533 (pskb_expand_head(first, 0, space, GFP_ATOMIC) < 0)) {
1534 /* TODO: bump some rx-oom error stat */
1535 /* put it back together so we can free the
1536 * whole list at once.
1538 __skb_queue_head(amsdu, first);
1542 /* Walk list again, copying contents into
1545 while ((skb = __skb_dequeue(amsdu))) {
1546 skb_copy_from_linear_data(skb, skb_put(first, skb->len),
1548 dev_kfree_skb_any(skb);
1551 __skb_queue_head(amsdu, first);
1555 static void ath10k_htt_rx_h_unchain(struct ath10k *ar,
1556 struct sk_buff_head *amsdu,
1559 struct sk_buff *first;
1560 struct htt_rx_desc *rxd;
1561 enum rx_msdu_decap_format decap;
1563 first = skb_peek(amsdu);
1564 rxd = (void *)first->data - sizeof(*rxd);
1565 decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
1566 RX_MSDU_START_INFO1_DECAP_FORMAT);
1571 /* FIXME: Current unchaining logic can only handle simple case of raw
1572 * msdu chaining. If decapping is other than raw the chaining may be
1573 * more complex and this isn't handled by the current code. Don't even
1574 * try re-constructing such frames - it'll be pretty much garbage.
1576 if (decap != RX_MSDU_DECAP_RAW ||
1577 skb_queue_len(amsdu) != 1 + rxd->frag_info.ring2_more_count) {
1578 __skb_queue_purge(amsdu);
1582 ath10k_unchain_msdu(amsdu);
1585 static bool ath10k_htt_rx_validate_amsdu(struct ath10k *ar,
1586 struct sk_buff_head *amsdu)
1589 struct sk_buff *first;
1590 bool is_first, is_last;
1591 struct htt_rx_desc *rxd;
1592 struct ieee80211_hdr *hdr;
1593 size_t hdr_len, crypto_len;
1594 enum htt_rx_mpdu_encrypt_type enctype;
1595 int bytes_aligned = ar->hw_params.decap_align_bytes;
1597 first = skb_peek(amsdu);
1599 rxd = (void *)first->data - sizeof(*rxd);
1600 hdr = (void *)rxd->rx_hdr_status;
1602 is_first = !!(rxd->msdu_end.common.info0 &
1603 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
1604 is_last = !!(rxd->msdu_end.common.info0 &
1605 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
1607 /* Return in case of non-aggregated msdu */
1608 if (is_first && is_last)
1611 /* First msdu flag is not set for the first msdu of the list */
1615 enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
1616 RX_MPDU_START_INFO0_ENCRYPT_TYPE);
1618 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1619 crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
1621 subframe_hdr = (u8 *)hdr + round_up(hdr_len, bytes_aligned) +
1624 /* Validate if the amsdu has a proper first subframe.
1625 * There are chances a single msdu can be received as amsdu when
1626 * the unauthenticated amsdu flag of a QoS header
1627 * gets flipped in non-SPP AMSDU's, in such cases the first
1628 * subframe has llc/snap header in place of a valid da.
1629 * return false if the da matches rfc1042 pattern
1631 if (ether_addr_equal(subframe_hdr, rfc1042_header))
1637 static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar,
1638 struct sk_buff_head *amsdu,
1639 struct ieee80211_rx_status *rx_status)
1641 if (!rx_status->freq) {
1642 ath10k_warn(ar, "no channel configured; ignoring frame(s)!\n");
1646 if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) {
1647 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx cac running\n");
1651 if (!ath10k_htt_rx_validate_amsdu(ar, amsdu)) {
1652 ath10k_dbg(ar, ATH10K_DBG_HTT, "invalid amsdu received\n");
1659 static void ath10k_htt_rx_h_filter(struct ath10k *ar,
1660 struct sk_buff_head *amsdu,
1661 struct ieee80211_rx_status *rx_status)
1663 if (skb_queue_empty(amsdu))
1666 if (ath10k_htt_rx_amsdu_allowed(ar, amsdu, rx_status))
1669 __skb_queue_purge(amsdu);
1672 static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
1674 struct ath10k *ar = htt->ar;
1675 struct ieee80211_rx_status *rx_status = &htt->rx_status;
1676 struct sk_buff_head amsdu;
1679 __skb_queue_head_init(&amsdu);
1681 spin_lock_bh(&htt->rx_ring.lock);
1682 if (htt->rx_confused) {
1683 spin_unlock_bh(&htt->rx_ring.lock);
1686 ret = ath10k_htt_rx_amsdu_pop(htt, &amsdu);
1687 spin_unlock_bh(&htt->rx_ring.lock);
1690 ath10k_warn(ar, "rx ring became corrupted: %d\n", ret);
1691 __skb_queue_purge(&amsdu);
1692 /* FIXME: It's probably a good idea to reboot the
1693 * device instead of leaving it inoperable.
1695 htt->rx_confused = true;
1699 num_msdus = skb_queue_len(&amsdu);
1700 ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
1701 ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0);
1702 ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
1703 ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status, true);
1704 ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
1709 static void ath10k_htt_rx_proc_rx_ind(struct ath10k_htt *htt,
1710 struct htt_rx_indication *rx)
1712 struct ath10k *ar = htt->ar;
1713 struct htt_rx_indication_mpdu_range *mpdu_ranges;
1714 int num_mpdu_ranges;
1715 int i, mpdu_count = 0;
1717 num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
1718 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
1719 mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);
1721 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
1723 (sizeof(struct htt_rx_indication_mpdu_range) *
1726 for (i = 0; i < num_mpdu_ranges; i++)
1727 mpdu_count += mpdu_ranges[i].mpdu_count;
1729 atomic_add(mpdu_count, &htt->num_mpdus_ready);
1732 static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar,
1733 struct sk_buff *skb)
1735 struct ath10k_htt *htt = &ar->htt;
1736 struct htt_resp *resp = (struct htt_resp *)skb->data;
1737 struct htt_tx_done tx_done = {};
1738 int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS);
1743 case HTT_DATA_TX_STATUS_NO_ACK:
1744 tx_done.status = HTT_TX_COMPL_STATE_NOACK;
1746 case HTT_DATA_TX_STATUS_OK:
1747 tx_done.status = HTT_TX_COMPL_STATE_ACK;
1749 case HTT_DATA_TX_STATUS_DISCARD:
1750 case HTT_DATA_TX_STATUS_POSTPONE:
1751 case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL:
1752 tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
1755 ath10k_warn(ar, "unhandled tx completion status %d\n", status);
1756 tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
1760 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
1761 resp->data_tx_completion.num_msdus);
1763 for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
1764 msdu_id = resp->data_tx_completion.msdus[i];
1765 tx_done.msdu_id = __le16_to_cpu(msdu_id);
1767 /* kfifo_put: In practice firmware shouldn't fire off per-CE
1768 * interrupt and main interrupt (MSI/-X range case) for the same
1769 * HTC service so it should be safe to use kfifo_put w/o lock.
1771 * From kfifo_put() documentation:
1772 * Note that with only one concurrent reader and one concurrent
1773 * writer, you don't need extra locking to use these macro.
1775 if (!kfifo_put(&htt->txdone_fifo, tx_done)) {
1776 ath10k_warn(ar, "txdone fifo overrun, msdu_id %d status %d\n",
1777 tx_done.msdu_id, tx_done.status);
1778 ath10k_txrx_tx_unref(htt, &tx_done);
1783 static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp)
1785 struct htt_rx_addba *ev = &resp->rx_addba;
1786 struct ath10k_peer *peer;
1787 struct ath10k_vif *arvif;
1788 u16 info0, tid, peer_id;
1790 info0 = __le16_to_cpu(ev->info0);
1791 tid = MS(info0, HTT_RX_BA_INFO0_TID);
1792 peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
1794 ath10k_dbg(ar, ATH10K_DBG_HTT,
1795 "htt rx addba tid %hu peer_id %hu size %hhu\n",
1796 tid, peer_id, ev->window_size);
1798 spin_lock_bh(&ar->data_lock);
1799 peer = ath10k_peer_find_by_id(ar, peer_id);
1801 ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
1803 spin_unlock_bh(&ar->data_lock);
1807 arvif = ath10k_get_arvif(ar, peer->vdev_id);
1809 ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
1811 spin_unlock_bh(&ar->data_lock);
1815 ath10k_dbg(ar, ATH10K_DBG_HTT,
1816 "htt rx start rx ba session sta %pM tid %hu size %hhu\n",
1817 peer->addr, tid, ev->window_size);
1819 ieee80211_start_rx_ba_session_offl(arvif->vif, peer->addr, tid);
1820 spin_unlock_bh(&ar->data_lock);
1823 static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp)
1825 struct htt_rx_delba *ev = &resp->rx_delba;
1826 struct ath10k_peer *peer;
1827 struct ath10k_vif *arvif;
1828 u16 info0, tid, peer_id;
1830 info0 = __le16_to_cpu(ev->info0);
1831 tid = MS(info0, HTT_RX_BA_INFO0_TID);
1832 peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
1834 ath10k_dbg(ar, ATH10K_DBG_HTT,
1835 "htt rx delba tid %hu peer_id %hu\n",
1838 spin_lock_bh(&ar->data_lock);
1839 peer = ath10k_peer_find_by_id(ar, peer_id);
1841 ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
1843 spin_unlock_bh(&ar->data_lock);
1847 arvif = ath10k_get_arvif(ar, peer->vdev_id);
1849 ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
1851 spin_unlock_bh(&ar->data_lock);
1855 ath10k_dbg(ar, ATH10K_DBG_HTT,
1856 "htt rx stop rx ba session sta %pM tid %hu\n",
1859 ieee80211_stop_rx_ba_session_offl(arvif->vif, peer->addr, tid);
1860 spin_unlock_bh(&ar->data_lock);
1863 static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head *list,
1864 struct sk_buff_head *amsdu)
1866 struct sk_buff *msdu;
1867 struct htt_rx_desc *rxd;
1869 if (skb_queue_empty(list))
1872 if (WARN_ON(!skb_queue_empty(amsdu)))
1875 while ((msdu = __skb_dequeue(list))) {
1876 __skb_queue_tail(amsdu, msdu);
1878 rxd = (void *)msdu->data - sizeof(*rxd);
1879 if (rxd->msdu_end.common.info0 &
1880 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))
1884 msdu = skb_peek_tail(amsdu);
1885 rxd = (void *)msdu->data - sizeof(*rxd);
1886 if (!(rxd->msdu_end.common.info0 &
1887 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))) {
1888 skb_queue_splice_init(amsdu, list);
1895 static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status *status,
1896 struct sk_buff *skb)
1898 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1900 if (!ieee80211_has_protected(hdr->frame_control))
1903 /* Offloaded frames are already decrypted but firmware insists they are
1904 * protected in the 802.11 header. Strip the flag. Otherwise mac80211
1905 * will drop the frame.
1908 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
1909 status->flag |= RX_FLAG_DECRYPTED |
1910 RX_FLAG_IV_STRIPPED |
1911 RX_FLAG_MMIC_STRIPPED;
1914 static int ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
1915 struct sk_buff_head *list)
1917 struct ath10k_htt *htt = &ar->htt;
1918 struct ieee80211_rx_status *status = &htt->rx_status;
1919 struct htt_rx_offload_msdu *rx;
1920 struct sk_buff *msdu;
1924 while ((msdu = __skb_dequeue(list))) {
1925 /* Offloaded frames don't have Rx descriptor. Instead they have
1926 * a short meta information header.
1929 rx = (void *)msdu->data;
1931 skb_put(msdu, sizeof(*rx));
1932 skb_pull(msdu, sizeof(*rx));
1934 if (skb_tailroom(msdu) < __le16_to_cpu(rx->msdu_len)) {
1935 ath10k_warn(ar, "dropping frame: offloaded rx msdu is too long!\n");
1936 dev_kfree_skb_any(msdu);
1940 skb_put(msdu, __le16_to_cpu(rx->msdu_len));
1942 /* Offloaded rx header length isn't multiple of 2 nor 4 so the
1943 * actual payload is unaligned. Align the frame. Otherwise
1944 * mac80211 complains. This shouldn't reduce performance much
1945 * because these offloaded frames are rare.
1947 offset = 4 - ((unsigned long)msdu->data & 3);
1948 skb_put(msdu, offset);
1949 memmove(msdu->data + offset, msdu->data, msdu->len);
1950 skb_pull(msdu, offset);
1952 /* FIXME: The frame is NWifi. Re-construct QoS Control
1953 * if possible later.
1956 memset(status, 0, sizeof(*status));
1957 status->flag |= RX_FLAG_NO_SIGNAL_VAL;
1959 ath10k_htt_rx_h_rx_offload_prot(status, msdu);
1960 ath10k_htt_rx_h_channel(ar, status, NULL, rx->vdev_id);
1961 ath10k_process_rx(ar, status, msdu);
1967 static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
1969 struct ath10k_htt *htt = &ar->htt;
1970 struct htt_resp *resp = (void *)skb->data;
1971 struct ieee80211_rx_status *status = &htt->rx_status;
1972 struct sk_buff_head list;
1973 struct sk_buff_head amsdu;
1980 int ret, num_msdus = 0;
1982 lockdep_assert_held(&htt->rx_ring.lock);
1984 if (htt->rx_confused)
1987 skb_pull(skb, sizeof(resp->hdr));
1988 skb_pull(skb, sizeof(resp->rx_in_ord_ind));
1990 peer_id = __le16_to_cpu(resp->rx_in_ord_ind.peer_id);
1991 msdu_count = __le16_to_cpu(resp->rx_in_ord_ind.msdu_count);
1992 vdev_id = resp->rx_in_ord_ind.vdev_id;
1993 tid = SM(resp->rx_in_ord_ind.info, HTT_RX_IN_ORD_IND_INFO_TID);
1994 offload = !!(resp->rx_in_ord_ind.info &
1995 HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
1996 frag = !!(resp->rx_in_ord_ind.info & HTT_RX_IN_ORD_IND_INFO_FRAG_MASK);
1998 ath10k_dbg(ar, ATH10K_DBG_HTT,
1999 "htt rx in ord vdev %i peer %i tid %i offload %i frag %i msdu count %i\n",
2000 vdev_id, peer_id, tid, offload, frag, msdu_count);
2002 if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs)) {
2003 ath10k_warn(ar, "dropping invalid in order rx indication\n");
2007 /* The event can deliver more than 1 A-MSDU. Each A-MSDU is later
2008 * extracted and processed.
2010 __skb_queue_head_init(&list);
2011 ret = ath10k_htt_rx_pop_paddr_list(htt, &resp->rx_in_ord_ind, &list);
2013 ath10k_warn(ar, "failed to pop paddr list: %d\n", ret);
2014 htt->rx_confused = true;
2018 /* Offloaded frames are very different and need to be handled
2022 num_msdus = ath10k_htt_rx_h_rx_offload(ar, &list);
2024 while (!skb_queue_empty(&list)) {
2025 __skb_queue_head_init(&amsdu);
2026 ret = ath10k_htt_rx_extract_amsdu(&list, &amsdu);
2029 /* Note: The in-order indication may report interleaved
2030 * frames from different PPDUs meaning reported rx rate
2031 * to mac80211 isn't accurate/reliable. It's still
2032 * better to report something than nothing though. This
2033 * should still give an idea about rx rate to the user.
2035 num_msdus += skb_queue_len(&amsdu);
2036 ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id);
2037 ath10k_htt_rx_h_filter(ar, &amsdu, status);
2038 ath10k_htt_rx_h_mpdu(ar, &amsdu, status, false);
2039 ath10k_htt_rx_h_deliver(ar, &amsdu, status);
2044 /* Should not happen. */
2045 ath10k_warn(ar, "failed to extract amsdu: %d\n", ret);
2046 htt->rx_confused = true;
2047 __skb_queue_purge(&list);
2054 static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar,
2055 const __le32 *resp_ids,
2061 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm num_resp_ids %d\n",
2064 for (i = 0; i < num_resp_ids; i++) {
2065 resp_id = le32_to_cpu(resp_ids[i]);
2067 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm resp_id %u\n",
2070 /* TODO: free resp_id */
2074 static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb)
2076 struct ieee80211_hw *hw = ar->hw;
2077 struct ieee80211_txq *txq;
2078 struct htt_resp *resp = (struct htt_resp *)skb->data;
2079 struct htt_tx_fetch_record *record;
2081 size_t max_num_bytes;
2082 size_t max_num_msdus;
2085 const __le32 *resp_ids;
2093 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind\n");
2095 len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_ind);
2096 if (unlikely(skb->len < len)) {
2097 ath10k_warn(ar, "received corrupted tx_fetch_ind event: buffer too short\n");
2101 num_records = le16_to_cpu(resp->tx_fetch_ind.num_records);
2102 num_resp_ids = le16_to_cpu(resp->tx_fetch_ind.num_resp_ids);
2104 len += sizeof(resp->tx_fetch_ind.records[0]) * num_records;
2105 len += sizeof(resp->tx_fetch_ind.resp_ids[0]) * num_resp_ids;
2107 if (unlikely(skb->len < len)) {
2108 ath10k_warn(ar, "received corrupted tx_fetch_ind event: too many records/resp_ids\n");
2112 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind num records %hu num resps %hu seq %hu\n",
2113 num_records, num_resp_ids,
2114 le16_to_cpu(resp->tx_fetch_ind.fetch_seq_num));
2116 if (!ar->htt.tx_q_state.enabled) {
2117 ath10k_warn(ar, "received unexpected tx_fetch_ind event: not enabled\n");
2121 if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH) {
2122 ath10k_warn(ar, "received unexpected tx_fetch_ind event: in push mode\n");
2128 for (i = 0; i < num_records; i++) {
2129 record = &resp->tx_fetch_ind.records[i];
2130 peer_id = MS(le16_to_cpu(record->info),
2131 HTT_TX_FETCH_RECORD_INFO_PEER_ID);
2132 tid = MS(le16_to_cpu(record->info),
2133 HTT_TX_FETCH_RECORD_INFO_TID);
2134 max_num_msdus = le16_to_cpu(record->num_msdus);
2135 max_num_bytes = le32_to_cpu(record->num_bytes);
2137 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch record %i peer_id %hu tid %hhu msdus %zu bytes %zu\n",
2138 i, peer_id, tid, max_num_msdus, max_num_bytes);
2140 if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
2141 unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
2142 ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n",
2147 spin_lock_bh(&ar->data_lock);
2148 txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
2149 spin_unlock_bh(&ar->data_lock);
2151 /* It is okay to release the lock and use txq because RCU read
2155 if (unlikely(!txq)) {
2156 ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n",
2164 while (num_msdus < max_num_msdus &&
2165 num_bytes < max_num_bytes) {
2166 ret = ath10k_mac_tx_push_txq(hw, txq);
2174 record->num_msdus = cpu_to_le16(num_msdus);
2175 record->num_bytes = cpu_to_le32(num_bytes);
2177 ath10k_htt_tx_txq_recalc(hw, txq);
2182 resp_ids = ath10k_htt_get_tx_fetch_ind_resp_ids(&resp->tx_fetch_ind);
2183 ath10k_htt_rx_tx_fetch_resp_id_confirm(ar, resp_ids, num_resp_ids);
2185 ret = ath10k_htt_tx_fetch_resp(ar,
2186 resp->tx_fetch_ind.token,
2187 resp->tx_fetch_ind.fetch_seq_num,
2188 resp->tx_fetch_ind.records,
2190 if (unlikely(ret)) {
2191 ath10k_warn(ar, "failed to submit tx fetch resp for token 0x%08x: %d\n",
2192 le32_to_cpu(resp->tx_fetch_ind.token), ret);
2193 /* FIXME: request fw restart */
2196 ath10k_htt_tx_txq_sync(ar);
2199 static void ath10k_htt_rx_tx_fetch_confirm(struct ath10k *ar,
2200 struct sk_buff *skb)
2202 const struct htt_resp *resp = (void *)skb->data;
2206 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm\n");
2208 len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_confirm);
2209 if (unlikely(skb->len < len)) {
2210 ath10k_warn(ar, "received corrupted tx_fetch_confirm event: buffer too short\n");
2214 num_resp_ids = le16_to_cpu(resp->tx_fetch_confirm.num_resp_ids);
2215 len += sizeof(resp->tx_fetch_confirm.resp_ids[0]) * num_resp_ids;
2217 if (unlikely(skb->len < len)) {
2218 ath10k_warn(ar, "received corrupted tx_fetch_confirm event: resp_ids buffer overflow\n");
2222 ath10k_htt_rx_tx_fetch_resp_id_confirm(ar,
2223 resp->tx_fetch_confirm.resp_ids,
2227 static void ath10k_htt_rx_tx_mode_switch_ind(struct ath10k *ar,
2228 struct sk_buff *skb)
2230 const struct htt_resp *resp = (void *)skb->data;
2231 const struct htt_tx_mode_switch_record *record;
2232 struct ieee80211_txq *txq;
2233 struct ath10k_txq *artxq;
2236 enum htt_tx_mode_switch_mode mode;
2245 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx mode switch ind\n");
2247 len = sizeof(resp->hdr) + sizeof(resp->tx_mode_switch_ind);
2248 if (unlikely(skb->len < len)) {
2249 ath10k_warn(ar, "received corrupted tx_mode_switch_ind event: buffer too short\n");
2253 info0 = le16_to_cpu(resp->tx_mode_switch_ind.info0);
2254 info1 = le16_to_cpu(resp->tx_mode_switch_ind.info1);
2256 enable = !!(info0 & HTT_TX_MODE_SWITCH_IND_INFO0_ENABLE);
2257 num_records = MS(info0, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
2258 mode = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_MODE);
2259 threshold = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
2261 ath10k_dbg(ar, ATH10K_DBG_HTT,
2262 "htt rx tx mode switch ind info0 0x%04hx info1 0x%04hx enable %d num records %zd mode %d threshold %hu\n",
2263 info0, info1, enable, num_records, mode, threshold);
2265 len += sizeof(resp->tx_mode_switch_ind.records[0]) * num_records;
2267 if (unlikely(skb->len < len)) {
2268 ath10k_warn(ar, "received corrupted tx_mode_switch_mode_ind event: too many records\n");
2273 case HTT_TX_MODE_SWITCH_PUSH:
2274 case HTT_TX_MODE_SWITCH_PUSH_PULL:
2277 ath10k_warn(ar, "received invalid tx_mode_switch_mode_ind mode %d, ignoring\n",
2285 ar->htt.tx_q_state.enabled = enable;
2286 ar->htt.tx_q_state.mode = mode;
2287 ar->htt.tx_q_state.num_push_allowed = threshold;
2291 for (i = 0; i < num_records; i++) {
2292 record = &resp->tx_mode_switch_ind.records[i];
2293 info0 = le16_to_cpu(record->info0);
2294 peer_id = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID);
2295 tid = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_TID);
2297 if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
2298 unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
2299 ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n",
2304 spin_lock_bh(&ar->data_lock);
2305 txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
2306 spin_unlock_bh(&ar->data_lock);
2308 /* It is okay to release the lock and use txq because RCU read
2312 if (unlikely(!txq)) {
2313 ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n",
2318 spin_lock_bh(&ar->htt.tx_lock);
2319 artxq = (void *)txq->drv_priv;
2320 artxq->num_push_allowed = le16_to_cpu(record->num_max_msdus);
2321 spin_unlock_bh(&ar->htt.tx_lock);
2326 ath10k_mac_tx_push_pending(ar);
2329 void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
2333 release = ath10k_htt_t2h_msg_handler(ar, skb);
2335 /* Free the indication buffer */
2337 dev_kfree_skb_any(skb);
2340 bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
2342 struct ath10k_htt *htt = &ar->htt;
2343 struct htt_resp *resp = (struct htt_resp *)skb->data;
2344 enum htt_t2h_msg_type type;
2346 /* confirm alignment */
2347 if (!IS_ALIGNED((unsigned long)skb->data, 4))
2348 ath10k_warn(ar, "unaligned htt message, expect trouble\n");
2350 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n",
2351 resp->hdr.msg_type);
2353 if (resp->hdr.msg_type >= ar->htt.t2h_msg_types_max) {
2354 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, unsupported msg_type: 0x%0X\n max: 0x%0X",
2355 resp->hdr.msg_type, ar->htt.t2h_msg_types_max);
2358 type = ar->htt.t2h_msg_types[resp->hdr.msg_type];
2361 case HTT_T2H_MSG_TYPE_VERSION_CONF: {
2362 htt->target_version_major = resp->ver_resp.major;
2363 htt->target_version_minor = resp->ver_resp.minor;
2364 complete(&htt->target_version_received);
2367 case HTT_T2H_MSG_TYPE_RX_IND:
2368 ath10k_htt_rx_proc_rx_ind(htt, &resp->rx_ind);
2370 case HTT_T2H_MSG_TYPE_PEER_MAP: {
2371 struct htt_peer_map_event ev = {
2372 .vdev_id = resp->peer_map.vdev_id,
2373 .peer_id = __le16_to_cpu(resp->peer_map.peer_id),
2375 memcpy(ev.addr, resp->peer_map.addr, sizeof(ev.addr));
2376 ath10k_peer_map_event(htt, &ev);
2379 case HTT_T2H_MSG_TYPE_PEER_UNMAP: {
2380 struct htt_peer_unmap_event ev = {
2381 .peer_id = __le16_to_cpu(resp->peer_unmap.peer_id),
2383 ath10k_peer_unmap_event(htt, &ev);
2386 case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: {
2387 struct htt_tx_done tx_done = {};
2388 int status = __le32_to_cpu(resp->mgmt_tx_completion.status);
2390 tx_done.msdu_id = __le32_to_cpu(resp->mgmt_tx_completion.desc_id);
2393 case HTT_MGMT_TX_STATUS_OK:
2394 tx_done.status = HTT_TX_COMPL_STATE_ACK;
2396 case HTT_MGMT_TX_STATUS_RETRY:
2397 tx_done.status = HTT_TX_COMPL_STATE_NOACK;
2399 case HTT_MGMT_TX_STATUS_DROP:
2400 tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
2404 status = ath10k_txrx_tx_unref(htt, &tx_done);
2406 spin_lock_bh(&htt->tx_lock);
2407 ath10k_htt_tx_mgmt_dec_pending(htt);
2408 spin_unlock_bh(&htt->tx_lock);
2412 case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
2413 ath10k_htt_rx_tx_compl_ind(htt->ar, skb);
2415 case HTT_T2H_MSG_TYPE_SEC_IND: {
2416 struct ath10k *ar = htt->ar;
2417 struct htt_security_indication *ev = &resp->security_indication;
2419 ath10k_dbg(ar, ATH10K_DBG_HTT,
2420 "sec ind peer_id %d unicast %d type %d\n",
2421 __le16_to_cpu(ev->peer_id),
2422 !!(ev->flags & HTT_SECURITY_IS_UNICAST),
2423 MS(ev->flags, HTT_SECURITY_TYPE));
2424 complete(&ar->install_key_done);
2427 case HTT_T2H_MSG_TYPE_RX_FRAG_IND: {
2428 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
2429 skb->data, skb->len);
2430 atomic_inc(&htt->num_mpdus_ready);
2433 case HTT_T2H_MSG_TYPE_TEST:
2435 case HTT_T2H_MSG_TYPE_STATS_CONF:
2436 trace_ath10k_htt_stats(ar, skb->data, skb->len);
2438 case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
2439 /* Firmware can return tx frames if it's unable to fully
2440 * process them and suspects host may be able to fix it. ath10k
2441 * sends all tx frames as already inspected so this shouldn't
2442 * happen unless fw has a bug.
2444 ath10k_warn(ar, "received an unexpected htt tx inspect event\n");
2446 case HTT_T2H_MSG_TYPE_RX_ADDBA:
2447 ath10k_htt_rx_addba(ar, resp);
2449 case HTT_T2H_MSG_TYPE_RX_DELBA:
2450 ath10k_htt_rx_delba(ar, resp);
2452 case HTT_T2H_MSG_TYPE_PKTLOG: {
2453 trace_ath10k_htt_pktlog(ar, resp->pktlog_msg.payload,
2455 offsetof(struct htt_resp,
2456 pktlog_msg.payload));
2459 case HTT_T2H_MSG_TYPE_RX_FLUSH: {
2460 /* Ignore this event because mac80211 takes care of Rx
2461 * aggregation reordering.
2465 case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: {
2466 __skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
2469 case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND:
2471 case HTT_T2H_MSG_TYPE_CHAN_CHANGE: {
2472 u32 phymode = __le32_to_cpu(resp->chan_change.phymode);
2473 u32 freq = __le32_to_cpu(resp->chan_change.freq);
2476 __ieee80211_get_channel(ar->hw->wiphy, freq);
2477 ath10k_dbg(ar, ATH10K_DBG_HTT,
2478 "htt chan change freq %u phymode %s\n",
2479 freq, ath10k_wmi_phymode_str(phymode));
2482 case HTT_T2H_MSG_TYPE_AGGR_CONF:
2484 case HTT_T2H_MSG_TYPE_TX_FETCH_IND: {
2485 struct sk_buff *tx_fetch_ind = skb_copy(skb, GFP_ATOMIC);
2487 if (!tx_fetch_ind) {
2488 ath10k_warn(ar, "failed to copy htt tx fetch ind\n");
2491 skb_queue_tail(&htt->tx_fetch_ind_q, tx_fetch_ind);
2494 case HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM:
2495 ath10k_htt_rx_tx_fetch_confirm(ar, skb);
2497 case HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND:
2498 ath10k_htt_rx_tx_mode_switch_ind(ar, skb);
2500 case HTT_T2H_MSG_TYPE_EN_STATS:
2502 ath10k_warn(ar, "htt event (%d) not handled\n",
2503 resp->hdr.msg_type);
2504 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
2505 skb->data, skb->len);
2510 EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler);
2512 void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
2513 struct sk_buff *skb)
2515 trace_ath10k_htt_pktlog(ar, skb->data, skb->len);
2516 dev_kfree_skb_any(skb);
2518 EXPORT_SYMBOL(ath10k_htt_rx_pktlog_completion_handler);
2520 int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget)
2522 struct ath10k_htt *htt = &ar->htt;
2523 struct htt_tx_done tx_done = {};
2524 struct sk_buff_head tx_ind_q;
2525 struct sk_buff *skb;
2526 unsigned long flags;
2527 int quota = 0, done, num_rx_msdus;
2528 bool resched_napi = false;
2530 __skb_queue_head_init(&tx_ind_q);
2532 /* Since in-ord-ind can deliver more than 1 A-MSDU in single event,
2533 * process it first to utilize full available quota.
2535 while (quota < budget) {
2536 if (skb_queue_empty(&htt->rx_in_ord_compl_q))
2539 skb = __skb_dequeue(&htt->rx_in_ord_compl_q);
2541 resched_napi = true;
2545 spin_lock_bh(&htt->rx_ring.lock);
2546 num_rx_msdus = ath10k_htt_rx_in_ord_ind(ar, skb);
2547 spin_unlock_bh(&htt->rx_ring.lock);
2548 if (num_rx_msdus < 0) {
2549 resched_napi = true;
2553 dev_kfree_skb_any(skb);
2554 if (num_rx_msdus > 0)
2555 quota += num_rx_msdus;
2557 if ((quota > ATH10K_NAPI_QUOTA_LIMIT) &&
2558 !skb_queue_empty(&htt->rx_in_ord_compl_q)) {
2559 resched_napi = true;
2564 while (quota < budget) {
2565 /* no more data to receive */
2566 if (!atomic_read(&htt->num_mpdus_ready))
2569 num_rx_msdus = ath10k_htt_rx_handle_amsdu(htt);
2570 if (num_rx_msdus < 0) {
2571 resched_napi = true;
2575 quota += num_rx_msdus;
2576 atomic_dec(&htt->num_mpdus_ready);
2577 if ((quota > ATH10K_NAPI_QUOTA_LIMIT) &&
2578 atomic_read(&htt->num_mpdus_ready)) {
2579 resched_napi = true;
2584 /* From NAPI documentation:
2585 * The napi poll() function may also process TX completions, in which
2586 * case if it processes the entire TX ring then it should count that
2587 * work as the rest of the budget.
2589 if ((quota < budget) && !kfifo_is_empty(&htt->txdone_fifo))
2592 /* kfifo_get: called only within txrx_tasklet so it's neatly serialized.
2593 * From kfifo_get() documentation:
2594 * Note that with only one concurrent reader and one concurrent writer,
2595 * you don't need extra locking to use these macro.
2597 while (kfifo_get(&htt->txdone_fifo, &tx_done))
2598 ath10k_txrx_tx_unref(htt, &tx_done);
2600 ath10k_mac_tx_push_pending(ar);
2602 spin_lock_irqsave(&htt->tx_fetch_ind_q.lock, flags);
2603 skb_queue_splice_init(&htt->tx_fetch_ind_q, &tx_ind_q);
2604 spin_unlock_irqrestore(&htt->tx_fetch_ind_q.lock, flags);
2606 while ((skb = __skb_dequeue(&tx_ind_q))) {
2607 ath10k_htt_rx_tx_fetch_ind(ar, skb);
2608 dev_kfree_skb_any(skb);
2612 ath10k_htt_rx_msdu_buff_replenish(htt);
2613 /* In case of rx failure or more data to read, report budget
2614 * to reschedule NAPI poll
2616 done = resched_napi ? budget : quota;
2620 EXPORT_SYMBOL(ath10k_htt_txrx_compl_task);