GNU Linux-libre 6.8.7-gnu
[releases.git] / drivers / net / wireless / ath / ath10k / htt_rx.c
1 // SPDX-License-Identifier: ISC
2 /*
3  * Copyright (c) 2005-2011 Atheros Communications Inc.
4  * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
5  * Copyright (c) 2018, The Linux Foundation. All rights reserved.
6  * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
7  */
8
9 #include "core.h"
10 #include "htc.h"
11 #include "htt.h"
12 #include "txrx.h"
13 #include "debug.h"
14 #include "trace.h"
15 #include "mac.h"
16
17 #include <linux/log2.h>
18 #include <linux/bitfield.h>
19
20 /* when under memory pressure rx ring refill may fail and needs a retry */
21 #define HTT_RX_RING_REFILL_RETRY_MS 50
22
23 #define HTT_RX_RING_REFILL_RESCHED_MS 5
24
25 /* shortcut to interpret a raw memory buffer as a rx descriptor */
26 #define HTT_RX_BUF_TO_RX_DESC(hw, buf) ath10k_htt_rx_desc_from_raw_buffer(hw, buf)
27
28 static int ath10k_htt_rx_get_csum_state(struct ath10k_hw_params *hw, struct sk_buff *skb);
29
30 static struct sk_buff *
31 ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u64 paddr)
32 {
33         struct ath10k_skb_rxcb *rxcb;
34
35         hash_for_each_possible(ar->htt.rx_ring.skb_table, rxcb, hlist, paddr)
36                 if (rxcb->paddr == paddr)
37                         return ATH10K_RXCB_SKB(rxcb);
38
39         WARN_ON_ONCE(1);
40         return NULL;
41 }
42
43 static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt)
44 {
45         struct sk_buff *skb;
46         struct ath10k_skb_rxcb *rxcb;
47         struct hlist_node *n;
48         int i;
49
50         if (htt->rx_ring.in_ord_rx) {
51                 hash_for_each_safe(htt->rx_ring.skb_table, i, n, rxcb, hlist) {
52                         skb = ATH10K_RXCB_SKB(rxcb);
53                         dma_unmap_single(htt->ar->dev, rxcb->paddr,
54                                          skb->len + skb_tailroom(skb),
55                                          DMA_FROM_DEVICE);
56                         hash_del(&rxcb->hlist);
57                         dev_kfree_skb_any(skb);
58                 }
59         } else {
60                 for (i = 0; i < htt->rx_ring.size; i++) {
61                         skb = htt->rx_ring.netbufs_ring[i];
62                         if (!skb)
63                                 continue;
64
65                         rxcb = ATH10K_SKB_RXCB(skb);
66                         dma_unmap_single(htt->ar->dev, rxcb->paddr,
67                                          skb->len + skb_tailroom(skb),
68                                          DMA_FROM_DEVICE);
69                         dev_kfree_skb_any(skb);
70                 }
71         }
72
73         htt->rx_ring.fill_cnt = 0;
74         hash_init(htt->rx_ring.skb_table);
75         memset(htt->rx_ring.netbufs_ring, 0,
76                htt->rx_ring.size * sizeof(htt->rx_ring.netbufs_ring[0]));
77 }
78
79 static size_t ath10k_htt_get_rx_ring_size_32(struct ath10k_htt *htt)
80 {
81         return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_32);
82 }
83
84 static size_t ath10k_htt_get_rx_ring_size_64(struct ath10k_htt *htt)
85 {
86         return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_64);
87 }
88
89 static void ath10k_htt_config_paddrs_ring_32(struct ath10k_htt *htt,
90                                              void *vaddr)
91 {
92         htt->rx_ring.paddrs_ring_32 = vaddr;
93 }
94
95 static void ath10k_htt_config_paddrs_ring_64(struct ath10k_htt *htt,
96                                              void *vaddr)
97 {
98         htt->rx_ring.paddrs_ring_64 = vaddr;
99 }
100
101 static void ath10k_htt_set_paddrs_ring_32(struct ath10k_htt *htt,
102                                           dma_addr_t paddr, int idx)
103 {
104         htt->rx_ring.paddrs_ring_32[idx] = __cpu_to_le32(paddr);
105 }
106
107 static void ath10k_htt_set_paddrs_ring_64(struct ath10k_htt *htt,
108                                           dma_addr_t paddr, int idx)
109 {
110         htt->rx_ring.paddrs_ring_64[idx] = __cpu_to_le64(paddr);
111 }
112
113 static void ath10k_htt_reset_paddrs_ring_32(struct ath10k_htt *htt, int idx)
114 {
115         htt->rx_ring.paddrs_ring_32[idx] = 0;
116 }
117
118 static void ath10k_htt_reset_paddrs_ring_64(struct ath10k_htt *htt, int idx)
119 {
120         htt->rx_ring.paddrs_ring_64[idx] = 0;
121 }
122
123 static void *ath10k_htt_get_vaddr_ring_32(struct ath10k_htt *htt)
124 {
125         return (void *)htt->rx_ring.paddrs_ring_32;
126 }
127
128 static void *ath10k_htt_get_vaddr_ring_64(struct ath10k_htt *htt)
129 {
130         return (void *)htt->rx_ring.paddrs_ring_64;
131 }
132
133 static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
134 {
135         struct ath10k_hw_params *hw = &htt->ar->hw_params;
136         struct htt_rx_desc *rx_desc;
137         struct ath10k_skb_rxcb *rxcb;
138         struct sk_buff *skb;
139         dma_addr_t paddr;
140         int ret = 0, idx;
141
142         /* The Full Rx Reorder firmware has no way of telling the host
143          * implicitly when it copied HTT Rx Ring buffers to MAC Rx Ring.
144          * To keep things simple make sure ring is always half empty. This
145          * guarantees there'll be no replenishment overruns possible.
146          */
147         BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL >= HTT_RX_RING_SIZE / 2);
148
149         idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
150
151         if (idx < 0 || idx >= htt->rx_ring.size) {
152                 ath10k_err(htt->ar, "rx ring index is not valid, firmware malfunctioning?\n");
153                 idx &= htt->rx_ring.size_mask;
154                 ret = -ENOMEM;
155                 goto fail;
156         }
157
158         while (num > 0) {
159                 skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN);
160                 if (!skb) {
161                         ret = -ENOMEM;
162                         goto fail;
163                 }
164
165                 if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN))
166                         skb_pull(skb,
167                                  PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) -
168                                  skb->data);
169
170                 /* Clear rx_desc attention word before posting to Rx ring */
171                 rx_desc = HTT_RX_BUF_TO_RX_DESC(hw, skb->data);
172                 ath10k_htt_rx_desc_get_attention(hw, rx_desc)->flags = __cpu_to_le32(0);
173
174                 paddr = dma_map_single(htt->ar->dev, skb->data,
175                                        skb->len + skb_tailroom(skb),
176                                        DMA_FROM_DEVICE);
177
178                 if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) {
179                         dev_kfree_skb_any(skb);
180                         ret = -ENOMEM;
181                         goto fail;
182                 }
183
184                 rxcb = ATH10K_SKB_RXCB(skb);
185                 rxcb->paddr = paddr;
186                 htt->rx_ring.netbufs_ring[idx] = skb;
187                 ath10k_htt_set_paddrs_ring(htt, paddr, idx);
188                 htt->rx_ring.fill_cnt++;
189
190                 if (htt->rx_ring.in_ord_rx) {
191                         hash_add(htt->rx_ring.skb_table,
192                                  &ATH10K_SKB_RXCB(skb)->hlist,
193                                  paddr);
194                 }
195
196                 num--;
197                 idx++;
198                 idx &= htt->rx_ring.size_mask;
199         }
200
201 fail:
202         /*
203          * Make sure the rx buffer is updated before available buffer
204          * index to avoid any potential rx ring corruption.
205          */
206         mb();
207         *htt->rx_ring.alloc_idx.vaddr = __cpu_to_le32(idx);
208         return ret;
209 }
210
211 static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
212 {
213         lockdep_assert_held(&htt->rx_ring.lock);
214         return __ath10k_htt_rx_ring_fill_n(htt, num);
215 }
216
217 static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
218 {
219         int ret, num_deficit, num_to_fill;
220
221         /* Refilling the whole RX ring buffer proves to be a bad idea. The
222          * reason is RX may take up significant amount of CPU cycles and starve
223          * other tasks, e.g. TX on an ethernet device while acting as a bridge
224          * with ath10k wlan interface. This ended up with very poor performance
225          * once CPU the host system was overwhelmed with RX on ath10k.
226          *
227          * By limiting the number of refills the replenishing occurs
228          * progressively. This in turns makes use of the fact tasklets are
229          * processed in FIFO order. This means actual RX processing can starve
230          * out refilling. If there's not enough buffers on RX ring FW will not
231          * report RX until it is refilled with enough buffers. This
232          * automatically balances load wrt to CPU power.
233          *
234          * This probably comes at a cost of lower maximum throughput but
235          * improves the average and stability.
236          */
237         spin_lock_bh(&htt->rx_ring.lock);
238         num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
239         num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit);
240         num_deficit -= num_to_fill;
241         ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill);
242         if (ret == -ENOMEM) {
243                 /*
244                  * Failed to fill it to the desired level -
245                  * we'll start a timer and try again next time.
246                  * As long as enough buffers are left in the ring for
247                  * another A-MPDU rx, no special recovery is needed.
248                  */
249                 mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
250                           msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS));
251         } else if (num_deficit > 0) {
252                 mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
253                           msecs_to_jiffies(HTT_RX_RING_REFILL_RESCHED_MS));
254         }
255         spin_unlock_bh(&htt->rx_ring.lock);
256 }
257
258 static void ath10k_htt_rx_ring_refill_retry(struct timer_list *t)
259 {
260         struct ath10k_htt *htt = from_timer(htt, t, rx_ring.refill_retry_timer);
261
262         ath10k_htt_rx_msdu_buff_replenish(htt);
263 }
264
265 int ath10k_htt_rx_ring_refill(struct ath10k *ar)
266 {
267         struct ath10k_htt *htt = &ar->htt;
268         int ret;
269
270         if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
271                 return 0;
272
273         spin_lock_bh(&htt->rx_ring.lock);
274         ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level -
275                                               htt->rx_ring.fill_cnt));
276
277         if (ret)
278                 ath10k_htt_rx_ring_free(htt);
279
280         spin_unlock_bh(&htt->rx_ring.lock);
281
282         return ret;
283 }
284
285 void ath10k_htt_rx_free(struct ath10k_htt *htt)
286 {
287         if (htt->ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
288                 return;
289
290         del_timer_sync(&htt->rx_ring.refill_retry_timer);
291
292         skb_queue_purge(&htt->rx_msdus_q);
293         skb_queue_purge(&htt->rx_in_ord_compl_q);
294         skb_queue_purge(&htt->tx_fetch_ind_q);
295
296         spin_lock_bh(&htt->rx_ring.lock);
297         ath10k_htt_rx_ring_free(htt);
298         spin_unlock_bh(&htt->rx_ring.lock);
299
300         dma_free_coherent(htt->ar->dev,
301                           ath10k_htt_get_rx_ring_size(htt),
302                           ath10k_htt_get_vaddr_ring(htt),
303                           htt->rx_ring.base_paddr);
304
305         ath10k_htt_config_paddrs_ring(htt, NULL);
306
307         dma_free_coherent(htt->ar->dev,
308                           sizeof(*htt->rx_ring.alloc_idx.vaddr),
309                           htt->rx_ring.alloc_idx.vaddr,
310                           htt->rx_ring.alloc_idx.paddr);
311         htt->rx_ring.alloc_idx.vaddr = NULL;
312
313         kfree(htt->rx_ring.netbufs_ring);
314         htt->rx_ring.netbufs_ring = NULL;
315 }
316
317 static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
318 {
319         struct ath10k *ar = htt->ar;
320         int idx;
321         struct sk_buff *msdu;
322
323         lockdep_assert_held(&htt->rx_ring.lock);
324
325         if (htt->rx_ring.fill_cnt == 0) {
326                 ath10k_warn(ar, "tried to pop sk_buff from an empty rx ring\n");
327                 return NULL;
328         }
329
330         idx = htt->rx_ring.sw_rd_idx.msdu_payld;
331         msdu = htt->rx_ring.netbufs_ring[idx];
332         htt->rx_ring.netbufs_ring[idx] = NULL;
333         ath10k_htt_reset_paddrs_ring(htt, idx);
334
335         idx++;
336         idx &= htt->rx_ring.size_mask;
337         htt->rx_ring.sw_rd_idx.msdu_payld = idx;
338         htt->rx_ring.fill_cnt--;
339
340         dma_unmap_single(htt->ar->dev,
341                          ATH10K_SKB_RXCB(msdu)->paddr,
342                          msdu->len + skb_tailroom(msdu),
343                          DMA_FROM_DEVICE);
344         ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
345                         msdu->data, msdu->len + skb_tailroom(msdu));
346
347         return msdu;
348 }
349
350 /* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */
351 static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
352                                    struct sk_buff_head *amsdu)
353 {
354         struct ath10k *ar = htt->ar;
355         struct ath10k_hw_params *hw = &ar->hw_params;
356         int msdu_len, msdu_chaining = 0;
357         struct sk_buff *msdu;
358         struct htt_rx_desc *rx_desc;
359         struct rx_attention *rx_desc_attention;
360         struct rx_frag_info_common *rx_desc_frag_info_common;
361         struct rx_msdu_start_common *rx_desc_msdu_start_common;
362         struct rx_msdu_end_common *rx_desc_msdu_end_common;
363
364         lockdep_assert_held(&htt->rx_ring.lock);
365
366         for (;;) {
367                 int last_msdu, msdu_len_invalid, msdu_chained;
368
369                 msdu = ath10k_htt_rx_netbuf_pop(htt);
370                 if (!msdu) {
371                         __skb_queue_purge(amsdu);
372                         return -ENOENT;
373                 }
374
375                 __skb_queue_tail(amsdu, msdu);
376
377                 rx_desc = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data);
378                 rx_desc_attention = ath10k_htt_rx_desc_get_attention(hw, rx_desc);
379                 rx_desc_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw,
380                                                                               rx_desc);
381                 rx_desc_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rx_desc);
382                 rx_desc_frag_info_common = ath10k_htt_rx_desc_get_frag_info(hw, rx_desc);
383
384                 /* FIXME: we must report msdu payload since this is what caller
385                  * expects now
386                  */
387                 skb_put(msdu, hw->rx_desc_ops->rx_desc_msdu_payload_offset);
388                 skb_pull(msdu, hw->rx_desc_ops->rx_desc_msdu_payload_offset);
389
390                 /*
391                  * Sanity check - confirm the HW is finished filling in the
392                  * rx data.
393                  * If the HW and SW are working correctly, then it's guaranteed
394                  * that the HW's MAC DMA is done before this point in the SW.
395                  * To prevent the case that we handle a stale Rx descriptor,
396                  * just assert for now until we have a way to recover.
397                  */
398                 if (!(__le32_to_cpu(rx_desc_attention->flags)
399                                 & RX_ATTENTION_FLAGS_MSDU_DONE)) {
400                         __skb_queue_purge(amsdu);
401                         return -EIO;
402                 }
403
404                 msdu_len_invalid = !!(__le32_to_cpu(rx_desc_attention->flags)
405                                         & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
406                                            RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));
407                 msdu_len = MS(__le32_to_cpu(rx_desc_msdu_start_common->info0),
408                               RX_MSDU_START_INFO0_MSDU_LENGTH);
409                 msdu_chained = rx_desc_frag_info_common->ring2_more_count;
410
411                 if (msdu_len_invalid)
412                         msdu_len = 0;
413
414                 skb_trim(msdu, 0);
415                 skb_put(msdu, min(msdu_len, ath10k_htt_rx_msdu_size(hw)));
416                 msdu_len -= msdu->len;
417
418                 /* Note: Chained buffers do not contain rx descriptor */
419                 while (msdu_chained--) {
420                         msdu = ath10k_htt_rx_netbuf_pop(htt);
421                         if (!msdu) {
422                                 __skb_queue_purge(amsdu);
423                                 return -ENOENT;
424                         }
425
426                         __skb_queue_tail(amsdu, msdu);
427                         skb_trim(msdu, 0);
428                         skb_put(msdu, min(msdu_len, HTT_RX_BUF_SIZE));
429                         msdu_len -= msdu->len;
430                         msdu_chaining = 1;
431                 }
432
433                 last_msdu = __le32_to_cpu(rx_desc_msdu_end_common->info0) &
434                                 RX_MSDU_END_INFO0_LAST_MSDU;
435
436                 /* FIXME: why are we skipping the first part of the rx_desc? */
437                 trace_ath10k_htt_rx_desc(ar, (void *)rx_desc + sizeof(u32),
438                                          hw->rx_desc_ops->rx_desc_size - sizeof(u32));
439
440                 if (last_msdu)
441                         break;
442         }
443
444         if (skb_queue_empty(amsdu))
445                 msdu_chaining = -1;
446
447         /*
448          * Don't refill the ring yet.
449          *
450          * First, the elements popped here are still in use - it is not
451          * safe to overwrite them until the matching call to
452          * mpdu_desc_list_next. Second, for efficiency it is preferable to
453          * refill the rx ring with 1 PPDU's worth of rx buffers (something
454          * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers
455          * (something like 3 buffers). Consequently, we'll rely on the txrx
456          * SW to tell us when it is done pulling all the PPDU's rx buffers
457          * out of the rx ring, and then refill it just once.
458          */
459
460         return msdu_chaining;
461 }
462
463 static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt,
464                                                u64 paddr)
465 {
466         struct ath10k *ar = htt->ar;
467         struct ath10k_skb_rxcb *rxcb;
468         struct sk_buff *msdu;
469
470         lockdep_assert_held(&htt->rx_ring.lock);
471
472         msdu = ath10k_htt_rx_find_skb_paddr(ar, paddr);
473         if (!msdu)
474                 return NULL;
475
476         rxcb = ATH10K_SKB_RXCB(msdu);
477         hash_del(&rxcb->hlist);
478         htt->rx_ring.fill_cnt--;
479
480         dma_unmap_single(htt->ar->dev, rxcb->paddr,
481                          msdu->len + skb_tailroom(msdu),
482                          DMA_FROM_DEVICE);
483         ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
484                         msdu->data, msdu->len + skb_tailroom(msdu));
485
486         return msdu;
487 }
488
489 static inline void ath10k_htt_append_frag_list(struct sk_buff *skb_head,
490                                                struct sk_buff *frag_list,
491                                                unsigned int frag_len)
492 {
493         skb_shinfo(skb_head)->frag_list = frag_list;
494         skb_head->data_len = frag_len;
495         skb_head->len += skb_head->data_len;
496 }
497
498 static int ath10k_htt_rx_handle_amsdu_mon_32(struct ath10k_htt *htt,
499                                              struct sk_buff *msdu,
500                                              struct htt_rx_in_ord_msdu_desc **msdu_desc)
501 {
502         struct ath10k *ar = htt->ar;
503         struct ath10k_hw_params *hw = &ar->hw_params;
504         u32 paddr;
505         struct sk_buff *frag_buf;
506         struct sk_buff *prev_frag_buf;
507         u8 last_frag;
508         struct htt_rx_in_ord_msdu_desc *ind_desc = *msdu_desc;
509         struct htt_rx_desc *rxd;
510         int amsdu_len = __le16_to_cpu(ind_desc->msdu_len);
511
512         rxd = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data);
513         trace_ath10k_htt_rx_desc(ar, rxd, hw->rx_desc_ops->rx_desc_size);
514
515         skb_put(msdu, hw->rx_desc_ops->rx_desc_size);
516         skb_pull(msdu, hw->rx_desc_ops->rx_desc_size);
517         skb_put(msdu, min(amsdu_len, ath10k_htt_rx_msdu_size(hw)));
518         amsdu_len -= msdu->len;
519
520         last_frag = ind_desc->reserved;
521         if (last_frag) {
522                 if (amsdu_len) {
523                         ath10k_warn(ar, "invalid amsdu len %u, left %d",
524                                     __le16_to_cpu(ind_desc->msdu_len),
525                                     amsdu_len);
526                 }
527                 return 0;
528         }
529
530         ind_desc++;
531         paddr = __le32_to_cpu(ind_desc->msdu_paddr);
532         frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr);
533         if (!frag_buf) {
534                 ath10k_warn(ar, "failed to pop frag-1 paddr: 0x%x", paddr);
535                 return -ENOENT;
536         }
537
538         skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE));
539         ath10k_htt_append_frag_list(msdu, frag_buf, amsdu_len);
540
541         amsdu_len -= frag_buf->len;
542         prev_frag_buf = frag_buf;
543         last_frag = ind_desc->reserved;
544         while (!last_frag) {
545                 ind_desc++;
546                 paddr = __le32_to_cpu(ind_desc->msdu_paddr);
547                 frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr);
548                 if (!frag_buf) {
549                         ath10k_warn(ar, "failed to pop frag-n paddr: 0x%x",
550                                     paddr);
551                         prev_frag_buf->next = NULL;
552                         return -ENOENT;
553                 }
554
555                 skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE));
556                 last_frag = ind_desc->reserved;
557                 amsdu_len -= frag_buf->len;
558
559                 prev_frag_buf->next = frag_buf;
560                 prev_frag_buf = frag_buf;
561         }
562
563         if (amsdu_len) {
564                 ath10k_warn(ar, "invalid amsdu len %u, left %d",
565                             __le16_to_cpu(ind_desc->msdu_len), amsdu_len);
566         }
567
568         *msdu_desc = ind_desc;
569
570         prev_frag_buf->next = NULL;
571         return 0;
572 }
573
574 static int
575 ath10k_htt_rx_handle_amsdu_mon_64(struct ath10k_htt *htt,
576                                   struct sk_buff *msdu,
577                                   struct htt_rx_in_ord_msdu_desc_ext **msdu_desc)
578 {
579         struct ath10k *ar = htt->ar;
580         struct ath10k_hw_params *hw = &ar->hw_params;
581         u64 paddr;
582         struct sk_buff *frag_buf;
583         struct sk_buff *prev_frag_buf;
584         u8 last_frag;
585         struct htt_rx_in_ord_msdu_desc_ext *ind_desc = *msdu_desc;
586         struct htt_rx_desc *rxd;
587         int amsdu_len = __le16_to_cpu(ind_desc->msdu_len);
588
589         rxd = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data);
590         trace_ath10k_htt_rx_desc(ar, rxd, hw->rx_desc_ops->rx_desc_size);
591
592         skb_put(msdu, hw->rx_desc_ops->rx_desc_size);
593         skb_pull(msdu, hw->rx_desc_ops->rx_desc_size);
594         skb_put(msdu, min(amsdu_len, ath10k_htt_rx_msdu_size(hw)));
595         amsdu_len -= msdu->len;
596
597         last_frag = ind_desc->reserved;
598         if (last_frag) {
599                 if (amsdu_len) {
600                         ath10k_warn(ar, "invalid amsdu len %u, left %d",
601                                     __le16_to_cpu(ind_desc->msdu_len),
602                                     amsdu_len);
603                 }
604                 return 0;
605         }
606
607         ind_desc++;
608         paddr = __le64_to_cpu(ind_desc->msdu_paddr);
609         frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr);
610         if (!frag_buf) {
611                 ath10k_warn(ar, "failed to pop frag-1 paddr: 0x%llx", paddr);
612                 return -ENOENT;
613         }
614
615         skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE));
616         ath10k_htt_append_frag_list(msdu, frag_buf, amsdu_len);
617
618         amsdu_len -= frag_buf->len;
619         prev_frag_buf = frag_buf;
620         last_frag = ind_desc->reserved;
621         while (!last_frag) {
622                 ind_desc++;
623                 paddr = __le64_to_cpu(ind_desc->msdu_paddr);
624                 frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr);
625                 if (!frag_buf) {
626                         ath10k_warn(ar, "failed to pop frag-n paddr: 0x%llx",
627                                     paddr);
628                         prev_frag_buf->next = NULL;
629                         return -ENOENT;
630                 }
631
632                 skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE));
633                 last_frag = ind_desc->reserved;
634                 amsdu_len -= frag_buf->len;
635
636                 prev_frag_buf->next = frag_buf;
637                 prev_frag_buf = frag_buf;
638         }
639
640         if (amsdu_len) {
641                 ath10k_warn(ar, "invalid amsdu len %u, left %d",
642                             __le16_to_cpu(ind_desc->msdu_len), amsdu_len);
643         }
644
645         *msdu_desc = ind_desc;
646
647         prev_frag_buf->next = NULL;
648         return 0;
649 }
650
651 static int ath10k_htt_rx_pop_paddr32_list(struct ath10k_htt *htt,
652                                           struct htt_rx_in_ord_ind *ev,
653                                           struct sk_buff_head *list)
654 {
655         struct ath10k *ar = htt->ar;
656         struct ath10k_hw_params *hw = &ar->hw_params;
657         struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs32;
658         struct htt_rx_desc *rxd;
659         struct rx_attention *rxd_attention;
660         struct sk_buff *msdu;
661         int msdu_count, ret;
662         bool is_offload;
663         u32 paddr;
664
665         lockdep_assert_held(&htt->rx_ring.lock);
666
667         msdu_count = __le16_to_cpu(ev->msdu_count);
668         is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
669
670         while (msdu_count--) {
671                 paddr = __le32_to_cpu(msdu_desc->msdu_paddr);
672
673                 msdu = ath10k_htt_rx_pop_paddr(htt, paddr);
674                 if (!msdu) {
675                         __skb_queue_purge(list);
676                         return -ENOENT;
677                 }
678
679                 if (!is_offload && ar->monitor_arvif) {
680                         ret = ath10k_htt_rx_handle_amsdu_mon_32(htt, msdu,
681                                                                 &msdu_desc);
682                         if (ret) {
683                                 __skb_queue_purge(list);
684                                 return ret;
685                         }
686                         __skb_queue_tail(list, msdu);
687                         msdu_desc++;
688                         continue;
689                 }
690
691                 __skb_queue_tail(list, msdu);
692
693                 if (!is_offload) {
694                         rxd = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data);
695                         rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
696
697                         trace_ath10k_htt_rx_desc(ar, rxd, hw->rx_desc_ops->rx_desc_size);
698
699                         skb_put(msdu, hw->rx_desc_ops->rx_desc_size);
700                         skb_pull(msdu, hw->rx_desc_ops->rx_desc_size);
701                         skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
702
703                         if (!(__le32_to_cpu(rxd_attention->flags) &
704                               RX_ATTENTION_FLAGS_MSDU_DONE)) {
705                                 ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
706                                 return -EIO;
707                         }
708                 }
709
710                 msdu_desc++;
711         }
712
713         return 0;
714 }
715
716 static int ath10k_htt_rx_pop_paddr64_list(struct ath10k_htt *htt,
717                                           struct htt_rx_in_ord_ind *ev,
718                                           struct sk_buff_head *list)
719 {
720         struct ath10k *ar = htt->ar;
721         struct ath10k_hw_params *hw = &ar->hw_params;
722         struct htt_rx_in_ord_msdu_desc_ext *msdu_desc = ev->msdu_descs64;
723         struct htt_rx_desc *rxd;
724         struct rx_attention *rxd_attention;
725         struct sk_buff *msdu;
726         int msdu_count, ret;
727         bool is_offload;
728         u64 paddr;
729
730         lockdep_assert_held(&htt->rx_ring.lock);
731
732         msdu_count = __le16_to_cpu(ev->msdu_count);
733         is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
734
735         while (msdu_count--) {
736                 paddr = __le64_to_cpu(msdu_desc->msdu_paddr);
737                 msdu = ath10k_htt_rx_pop_paddr(htt, paddr);
738                 if (!msdu) {
739                         __skb_queue_purge(list);
740                         return -ENOENT;
741                 }
742
743                 if (!is_offload && ar->monitor_arvif) {
744                         ret = ath10k_htt_rx_handle_amsdu_mon_64(htt, msdu,
745                                                                 &msdu_desc);
746                         if (ret) {
747                                 __skb_queue_purge(list);
748                                 return ret;
749                         }
750                         __skb_queue_tail(list, msdu);
751                         msdu_desc++;
752                         continue;
753                 }
754
755                 __skb_queue_tail(list, msdu);
756
757                 if (!is_offload) {
758                         rxd = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data);
759                         rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
760
761                         trace_ath10k_htt_rx_desc(ar, rxd, hw->rx_desc_ops->rx_desc_size);
762
763                         skb_put(msdu, hw->rx_desc_ops->rx_desc_size);
764                         skb_pull(msdu, hw->rx_desc_ops->rx_desc_size);
765                         skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
766
767                         if (!(__le32_to_cpu(rxd_attention->flags) &
768                               RX_ATTENTION_FLAGS_MSDU_DONE)) {
769                                 ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
770                                 return -EIO;
771                         }
772                 }
773
774                 msdu_desc++;
775         }
776
777         return 0;
778 }
779
780 int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
781 {
782         struct ath10k *ar = htt->ar;
783         dma_addr_t paddr;
784         void *vaddr, *vaddr_ring;
785         size_t size;
786         struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
787
788         if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
789                 return 0;
790
791         htt->rx_confused = false;
792
793         /* XXX: The fill level could be changed during runtime in response to
794          * the host processing latency. Is this really worth it?
795          */
796         htt->rx_ring.size = HTT_RX_RING_SIZE;
797         htt->rx_ring.size_mask = htt->rx_ring.size - 1;
798         htt->rx_ring.fill_level = ar->hw_params.rx_ring_fill_level;
799
800         if (!is_power_of_2(htt->rx_ring.size)) {
801                 ath10k_warn(ar, "htt rx ring size is not power of 2\n");
802                 return -EINVAL;
803         }
804
805         htt->rx_ring.netbufs_ring =
806                 kcalloc(htt->rx_ring.size, sizeof(struct sk_buff *),
807                         GFP_KERNEL);
808         if (!htt->rx_ring.netbufs_ring)
809                 goto err_netbuf;
810
811         size = ath10k_htt_get_rx_ring_size(htt);
812
813         vaddr_ring = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_KERNEL);
814         if (!vaddr_ring)
815                 goto err_dma_ring;
816
817         ath10k_htt_config_paddrs_ring(htt, vaddr_ring);
818         htt->rx_ring.base_paddr = paddr;
819
820         vaddr = dma_alloc_coherent(htt->ar->dev,
821                                    sizeof(*htt->rx_ring.alloc_idx.vaddr),
822                                    &paddr, GFP_KERNEL);
823         if (!vaddr)
824                 goto err_dma_idx;
825
826         htt->rx_ring.alloc_idx.vaddr = vaddr;
827         htt->rx_ring.alloc_idx.paddr = paddr;
828         htt->rx_ring.sw_rd_idx.msdu_payld = htt->rx_ring.size_mask;
829         *htt->rx_ring.alloc_idx.vaddr = 0;
830
831         /* Initialize the Rx refill retry timer */
832         timer_setup(timer, ath10k_htt_rx_ring_refill_retry, 0);
833
834         spin_lock_init(&htt->rx_ring.lock);
835
836         htt->rx_ring.fill_cnt = 0;
837         htt->rx_ring.sw_rd_idx.msdu_payld = 0;
838         hash_init(htt->rx_ring.skb_table);
839
840         skb_queue_head_init(&htt->rx_msdus_q);
841         skb_queue_head_init(&htt->rx_in_ord_compl_q);
842         skb_queue_head_init(&htt->tx_fetch_ind_q);
843         atomic_set(&htt->num_mpdus_ready, 0);
844
845         ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
846                    htt->rx_ring.size, htt->rx_ring.fill_level);
847         return 0;
848
849 err_dma_idx:
850         dma_free_coherent(htt->ar->dev,
851                           ath10k_htt_get_rx_ring_size(htt),
852                           vaddr_ring,
853                           htt->rx_ring.base_paddr);
854         ath10k_htt_config_paddrs_ring(htt, NULL);
855 err_dma_ring:
856         kfree(htt->rx_ring.netbufs_ring);
857         htt->rx_ring.netbufs_ring = NULL;
858 err_netbuf:
859         return -ENOMEM;
860 }
861
862 static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar,
863                                           enum htt_rx_mpdu_encrypt_type type)
864 {
865         switch (type) {
866         case HTT_RX_MPDU_ENCRYPT_NONE:
867                 return 0;
868         case HTT_RX_MPDU_ENCRYPT_WEP40:
869         case HTT_RX_MPDU_ENCRYPT_WEP104:
870                 return IEEE80211_WEP_IV_LEN;
871         case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
872         case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
873                 return IEEE80211_TKIP_IV_LEN;
874         case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
875                 return IEEE80211_CCMP_HDR_LEN;
876         case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
877                 return IEEE80211_CCMP_256_HDR_LEN;
878         case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
879         case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
880                 return IEEE80211_GCMP_HDR_LEN;
881         case HTT_RX_MPDU_ENCRYPT_WEP128:
882         case HTT_RX_MPDU_ENCRYPT_WAPI:
883                 break;
884         }
885
886         ath10k_warn(ar, "unsupported encryption type %d\n", type);
887         return 0;
888 }
889
890 #define MICHAEL_MIC_LEN 8
891
892 static int ath10k_htt_rx_crypto_mic_len(struct ath10k *ar,
893                                         enum htt_rx_mpdu_encrypt_type type)
894 {
895         switch (type) {
896         case HTT_RX_MPDU_ENCRYPT_NONE:
897         case HTT_RX_MPDU_ENCRYPT_WEP40:
898         case HTT_RX_MPDU_ENCRYPT_WEP104:
899         case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
900         case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
901                 return 0;
902         case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
903                 return IEEE80211_CCMP_MIC_LEN;
904         case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
905                 return IEEE80211_CCMP_256_MIC_LEN;
906         case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
907         case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
908                 return IEEE80211_GCMP_MIC_LEN;
909         case HTT_RX_MPDU_ENCRYPT_WEP128:
910         case HTT_RX_MPDU_ENCRYPT_WAPI:
911                 break;
912         }
913
914         ath10k_warn(ar, "unsupported encryption type %d\n", type);
915         return 0;
916 }
917
918 static int ath10k_htt_rx_crypto_icv_len(struct ath10k *ar,
919                                         enum htt_rx_mpdu_encrypt_type type)
920 {
921         switch (type) {
922         case HTT_RX_MPDU_ENCRYPT_NONE:
923         case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
924         case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
925         case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
926         case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
927                 return 0;
928         case HTT_RX_MPDU_ENCRYPT_WEP40:
929         case HTT_RX_MPDU_ENCRYPT_WEP104:
930                 return IEEE80211_WEP_ICV_LEN;
931         case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
932         case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
933                 return IEEE80211_TKIP_ICV_LEN;
934         case HTT_RX_MPDU_ENCRYPT_WEP128:
935         case HTT_RX_MPDU_ENCRYPT_WAPI:
936                 break;
937         }
938
939         ath10k_warn(ar, "unsupported encryption type %d\n", type);
940         return 0;
941 }
942
943 struct amsdu_subframe_hdr {
944         u8 dst[ETH_ALEN];
945         u8 src[ETH_ALEN];
946         __be16 len;
947 } __packed;
948
949 #define GROUP_ID_IS_SU_MIMO(x) ((x) == 0 || (x) == 63)
950
951 static inline u8 ath10k_bw_to_mac80211_bw(u8 bw)
952 {
953         u8 ret = 0;
954
955         switch (bw) {
956         case 0:
957                 ret = RATE_INFO_BW_20;
958                 break;
959         case 1:
960                 ret = RATE_INFO_BW_40;
961                 break;
962         case 2:
963                 ret = RATE_INFO_BW_80;
964                 break;
965         case 3:
966                 ret = RATE_INFO_BW_160;
967                 break;
968         }
969
970         return ret;
971 }
972
973 static void ath10k_htt_rx_h_rates(struct ath10k *ar,
974                                   struct ieee80211_rx_status *status,
975                                   struct htt_rx_desc *rxd)
976 {
977         struct ath10k_hw_params *hw = &ar->hw_params;
978         struct rx_attention *rxd_attention;
979         struct rx_mpdu_start *rxd_mpdu_start;
980         struct rx_mpdu_end *rxd_mpdu_end;
981         struct rx_msdu_start_common *rxd_msdu_start_common;
982         struct rx_msdu_end_common *rxd_msdu_end_common;
983         struct rx_ppdu_start *rxd_ppdu_start;
984         struct ieee80211_supported_band *sband;
985         u8 cck, rate, bw, sgi, mcs, nss;
986         u8 *rxd_msdu_payload;
987         u8 preamble = 0;
988         u8 group_id;
989         u32 info1, info2, info3;
990         u32 stbc, nsts_su;
991
992         rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
993         rxd_mpdu_start = ath10k_htt_rx_desc_get_mpdu_start(hw, rxd);
994         rxd_mpdu_end = ath10k_htt_rx_desc_get_mpdu_end(hw, rxd);
995         rxd_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw, rxd);
996         rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
997         rxd_ppdu_start = ath10k_htt_rx_desc_get_ppdu_start(hw, rxd);
998         rxd_msdu_payload = ath10k_htt_rx_desc_get_msdu_payload(hw, rxd);
999
1000         info1 = __le32_to_cpu(rxd_ppdu_start->info1);
1001         info2 = __le32_to_cpu(rxd_ppdu_start->info2);
1002         info3 = __le32_to_cpu(rxd_ppdu_start->info3);
1003
1004         preamble = MS(info1, RX_PPDU_START_INFO1_PREAMBLE_TYPE);
1005
1006         switch (preamble) {
1007         case HTT_RX_LEGACY:
1008                 /* To get legacy rate index band is required. Since band can't
1009                  * be undefined check if freq is non-zero.
1010                  */
1011                 if (!status->freq)
1012                         return;
1013
1014                 cck = info1 & RX_PPDU_START_INFO1_L_SIG_RATE_SELECT;
1015                 rate = MS(info1, RX_PPDU_START_INFO1_L_SIG_RATE);
1016                 rate &= ~RX_PPDU_START_RATE_FLAG;
1017
1018                 sband = &ar->mac.sbands[status->band];
1019                 status->rate_idx = ath10k_mac_hw_rate_to_idx(sband, rate, cck);
1020                 break;
1021         case HTT_RX_HT:
1022         case HTT_RX_HT_WITH_TXBF:
1023                 /* HT-SIG - Table 20-11 in info2 and info3 */
1024                 mcs = info2 & 0x1F;
1025                 nss = mcs >> 3;
1026                 bw = (info2 >> 7) & 1;
1027                 sgi = (info3 >> 7) & 1;
1028
1029                 status->rate_idx = mcs;
1030                 status->encoding = RX_ENC_HT;
1031                 if (sgi)
1032                         status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
1033                 if (bw)
1034                         status->bw = RATE_INFO_BW_40;
1035                 break;
1036         case HTT_RX_VHT:
1037         case HTT_RX_VHT_WITH_TXBF:
1038                 /* VHT-SIG-A1 in info2, VHT-SIG-A2 in info3
1039                  * TODO check this
1040                  */
1041                 bw = info2 & 3;
1042                 sgi = info3 & 1;
1043                 stbc = (info2 >> 3) & 1;
1044                 group_id = (info2 >> 4) & 0x3F;
1045
1046                 if (GROUP_ID_IS_SU_MIMO(group_id)) {
1047                         mcs = (info3 >> 4) & 0x0F;
1048                         nsts_su = ((info2 >> 10) & 0x07);
1049                         if (stbc)
1050                                 nss = (nsts_su >> 2) + 1;
1051                         else
1052                                 nss = (nsts_su + 1);
1053                 } else {
1054                         /* Hardware doesn't decode VHT-SIG-B into Rx descriptor
1055                          * so it's impossible to decode MCS. Also since
1056                          * firmware consumes Group Id Management frames host
1057                          * has no knowledge regarding group/user position
1058                          * mapping so it's impossible to pick the correct Nsts
1059                          * from VHT-SIG-A1.
1060                          *
1061                          * Bandwidth and SGI are valid so report the rateinfo
1062                          * on best-effort basis.
1063                          */
1064                         mcs = 0;
1065                         nss = 1;
1066                 }
1067
1068                 if (mcs > 0x09) {
1069                         ath10k_warn(ar, "invalid MCS received %u\n", mcs);
1070                         ath10k_warn(ar, "rxd %08x mpdu start %08x %08x msdu start %08x %08x ppdu start %08x %08x %08x %08x %08x\n",
1071                                     __le32_to_cpu(rxd_attention->flags),
1072                                     __le32_to_cpu(rxd_mpdu_start->info0),
1073                                     __le32_to_cpu(rxd_mpdu_start->info1),
1074                                     __le32_to_cpu(rxd_msdu_start_common->info0),
1075                                     __le32_to_cpu(rxd_msdu_start_common->info1),
1076                                     rxd_ppdu_start->info0,
1077                                     __le32_to_cpu(rxd_ppdu_start->info1),
1078                                     __le32_to_cpu(rxd_ppdu_start->info2),
1079                                     __le32_to_cpu(rxd_ppdu_start->info3),
1080                                     __le32_to_cpu(rxd_ppdu_start->info4));
1081
1082                         ath10k_warn(ar, "msdu end %08x mpdu end %08x\n",
1083                                     __le32_to_cpu(rxd_msdu_end_common->info0),
1084                                     __le32_to_cpu(rxd_mpdu_end->info0));
1085
1086                         ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL,
1087                                         "rx desc msdu payload: ",
1088                                         rxd_msdu_payload, 50);
1089                 }
1090
1091                 status->rate_idx = mcs;
1092                 status->nss = nss;
1093
1094                 if (sgi)
1095                         status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
1096
1097                 status->bw = ath10k_bw_to_mac80211_bw(bw);
1098                 status->encoding = RX_ENC_VHT;
1099                 break;
1100         default:
1101                 break;
1102         }
1103 }
1104
1105 static struct ieee80211_channel *
1106 ath10k_htt_rx_h_peer_channel(struct ath10k *ar, struct htt_rx_desc *rxd)
1107 {
1108         struct ath10k_hw_params *hw = &ar->hw_params;
1109         struct rx_attention *rxd_attention;
1110         struct rx_msdu_end_common *rxd_msdu_end_common;
1111         struct rx_mpdu_start *rxd_mpdu_start;
1112         struct ath10k_peer *peer;
1113         struct ath10k_vif *arvif;
1114         struct cfg80211_chan_def def;
1115         u16 peer_id;
1116
1117         lockdep_assert_held(&ar->data_lock);
1118
1119         if (!rxd)
1120                 return NULL;
1121
1122         rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
1123         rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
1124         rxd_mpdu_start = ath10k_htt_rx_desc_get_mpdu_start(hw, rxd);
1125
1126         if (rxd_attention->flags &
1127             __cpu_to_le32(RX_ATTENTION_FLAGS_PEER_IDX_INVALID))
1128                 return NULL;
1129
1130         if (!(rxd_msdu_end_common->info0 &
1131               __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)))
1132                 return NULL;
1133
1134         peer_id = MS(__le32_to_cpu(rxd_mpdu_start->info0),
1135                      RX_MPDU_START_INFO0_PEER_IDX);
1136
1137         peer = ath10k_peer_find_by_id(ar, peer_id);
1138         if (!peer)
1139                 return NULL;
1140
1141         arvif = ath10k_get_arvif(ar, peer->vdev_id);
1142         if (WARN_ON_ONCE(!arvif))
1143                 return NULL;
1144
1145         if (ath10k_mac_vif_chan(arvif->vif, &def))
1146                 return NULL;
1147
1148         return def.chan;
1149 }
1150
1151 static struct ieee80211_channel *
1152 ath10k_htt_rx_h_vdev_channel(struct ath10k *ar, u32 vdev_id)
1153 {
1154         struct ath10k_vif *arvif;
1155         struct cfg80211_chan_def def;
1156
1157         lockdep_assert_held(&ar->data_lock);
1158
1159         list_for_each_entry(arvif, &ar->arvifs, list) {
1160                 if (arvif->vdev_id == vdev_id &&
1161                     ath10k_mac_vif_chan(arvif->vif, &def) == 0)
1162                         return def.chan;
1163         }
1164
1165         return NULL;
1166 }
1167
1168 static void
1169 ath10k_htt_rx_h_any_chan_iter(struct ieee80211_hw *hw,
1170                               struct ieee80211_chanctx_conf *conf,
1171                               void *data)
1172 {
1173         struct cfg80211_chan_def *def = data;
1174
1175         *def = conf->def;
1176 }
1177
1178 static struct ieee80211_channel *
1179 ath10k_htt_rx_h_any_channel(struct ath10k *ar)
1180 {
1181         struct cfg80211_chan_def def = {};
1182
1183         ieee80211_iter_chan_contexts_atomic(ar->hw,
1184                                             ath10k_htt_rx_h_any_chan_iter,
1185                                             &def);
1186
1187         return def.chan;
1188 }
1189
1190 static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
1191                                     struct ieee80211_rx_status *status,
1192                                     struct htt_rx_desc *rxd,
1193                                     u32 vdev_id)
1194 {
1195         struct ieee80211_channel *ch;
1196
1197         spin_lock_bh(&ar->data_lock);
1198         ch = ar->scan_channel;
1199         if (!ch)
1200                 ch = ar->rx_channel;
1201         if (!ch)
1202                 ch = ath10k_htt_rx_h_peer_channel(ar, rxd);
1203         if (!ch)
1204                 ch = ath10k_htt_rx_h_vdev_channel(ar, vdev_id);
1205         if (!ch)
1206                 ch = ath10k_htt_rx_h_any_channel(ar);
1207         if (!ch)
1208                 ch = ar->tgt_oper_chan;
1209         spin_unlock_bh(&ar->data_lock);
1210
1211         if (!ch)
1212                 return false;
1213
1214         status->band = ch->band;
1215         status->freq = ch->center_freq;
1216
1217         return true;
1218 }
1219
1220 static void ath10k_htt_rx_h_signal(struct ath10k *ar,
1221                                    struct ieee80211_rx_status *status,
1222                                    struct htt_rx_desc *rxd)
1223 {
1224         struct ath10k_hw_params *hw = &ar->hw_params;
1225         struct rx_ppdu_start *rxd_ppdu_start = ath10k_htt_rx_desc_get_ppdu_start(hw, rxd);
1226         int i;
1227
1228         for (i = 0; i < IEEE80211_MAX_CHAINS ; i++) {
1229                 status->chains &= ~BIT(i);
1230
1231                 if (rxd_ppdu_start->rssi_chains[i].pri20_mhz != 0x80) {
1232                         status->chain_signal[i] = ATH10K_DEFAULT_NOISE_FLOOR +
1233                                 rxd_ppdu_start->rssi_chains[i].pri20_mhz;
1234
1235                         status->chains |= BIT(i);
1236                 }
1237         }
1238
1239         /* FIXME: Get real NF */
1240         status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
1241                          rxd_ppdu_start->rssi_comb;
1242         status->flag &= ~RX_FLAG_NO_SIGNAL_VAL;
1243 }
1244
1245 static void ath10k_htt_rx_h_mactime(struct ath10k *ar,
1246                                     struct ieee80211_rx_status *status,
1247                                     struct htt_rx_desc *rxd)
1248 {
1249         struct ath10k_hw_params *hw = &ar->hw_params;
1250         struct rx_ppdu_end_common *rxd_ppdu_end_common;
1251
1252         rxd_ppdu_end_common = ath10k_htt_rx_desc_get_ppdu_end(hw, rxd);
1253
1254         /* FIXME: TSF is known only at the end of PPDU, in the last MPDU. This
1255          * means all prior MSDUs in a PPDU are reported to mac80211 without the
1256          * TSF. Is it worth holding frames until end of PPDU is known?
1257          *
1258          * FIXME: Can we get/compute 64bit TSF?
1259          */
1260         status->mactime = __le32_to_cpu(rxd_ppdu_end_common->tsf_timestamp);
1261         status->flag |= RX_FLAG_MACTIME_END;
1262 }
1263
1264 static void ath10k_htt_rx_h_ppdu(struct ath10k *ar,
1265                                  struct sk_buff_head *amsdu,
1266                                  struct ieee80211_rx_status *status,
1267                                  u32 vdev_id)
1268 {
1269         struct sk_buff *first;
1270         struct ath10k_hw_params *hw = &ar->hw_params;
1271         struct htt_rx_desc *rxd;
1272         struct rx_attention *rxd_attention;
1273         bool is_first_ppdu;
1274         bool is_last_ppdu;
1275
1276         if (skb_queue_empty(amsdu))
1277                 return;
1278
1279         first = skb_peek(amsdu);
1280         rxd = HTT_RX_BUF_TO_RX_DESC(hw,
1281                                     (void *)first->data - hw->rx_desc_ops->rx_desc_size);
1282
1283         rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
1284
1285         is_first_ppdu = !!(rxd_attention->flags &
1286                            __cpu_to_le32(RX_ATTENTION_FLAGS_FIRST_MPDU));
1287         is_last_ppdu = !!(rxd_attention->flags &
1288                           __cpu_to_le32(RX_ATTENTION_FLAGS_LAST_MPDU));
1289
1290         if (is_first_ppdu) {
1291                 /* New PPDU starts so clear out the old per-PPDU status. */
1292                 status->freq = 0;
1293                 status->rate_idx = 0;
1294                 status->nss = 0;
1295                 status->encoding = RX_ENC_LEGACY;
1296                 status->bw = RATE_INFO_BW_20;
1297
1298                 status->flag &= ~RX_FLAG_MACTIME;
1299                 status->flag |= RX_FLAG_NO_SIGNAL_VAL;
1300
1301                 status->flag &= ~(RX_FLAG_AMPDU_IS_LAST);
1302                 status->flag |= RX_FLAG_AMPDU_DETAILS | RX_FLAG_AMPDU_LAST_KNOWN;
1303                 status->ampdu_reference = ar->ampdu_reference;
1304
1305                 ath10k_htt_rx_h_signal(ar, status, rxd);
1306                 ath10k_htt_rx_h_channel(ar, status, rxd, vdev_id);
1307                 ath10k_htt_rx_h_rates(ar, status, rxd);
1308         }
1309
1310         if (is_last_ppdu) {
1311                 ath10k_htt_rx_h_mactime(ar, status, rxd);
1312
1313                 /* set ampdu last segment flag */
1314                 status->flag |= RX_FLAG_AMPDU_IS_LAST;
1315                 ar->ampdu_reference++;
1316         }
1317 }
1318
1319 static const char * const tid_to_ac[] = {
1320         "BE",
1321         "BK",
1322         "BK",
1323         "BE",
1324         "VI",
1325         "VI",
1326         "VO",
1327         "VO",
1328 };
1329
1330 static char *ath10k_get_tid(struct ieee80211_hdr *hdr, char *out, size_t size)
1331 {
1332         u8 *qc;
1333         int tid;
1334
1335         if (!ieee80211_is_data_qos(hdr->frame_control))
1336                 return "";
1337
1338         qc = ieee80211_get_qos_ctl(hdr);
1339         tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
1340         if (tid < 8)
1341                 snprintf(out, size, "tid %d (%s)", tid, tid_to_ac[tid]);
1342         else
1343                 snprintf(out, size, "tid %d", tid);
1344
1345         return out;
1346 }
1347
1348 static void ath10k_htt_rx_h_queue_msdu(struct ath10k *ar,
1349                                        struct ieee80211_rx_status *rx_status,
1350                                        struct sk_buff *skb)
1351 {
1352         struct ieee80211_rx_status *status;
1353
1354         status = IEEE80211_SKB_RXCB(skb);
1355         *status = *rx_status;
1356
1357         skb_queue_tail(&ar->htt.rx_msdus_q, skb);
1358 }
1359
1360 static void ath10k_process_rx(struct ath10k *ar, struct sk_buff *skb)
1361 {
1362         struct ieee80211_rx_status *status;
1363         struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1364         char tid[32];
1365
1366         status = IEEE80211_SKB_RXCB(skb);
1367
1368         if (!(ar->filter_flags & FIF_FCSFAIL) &&
1369             status->flag & RX_FLAG_FAILED_FCS_CRC) {
1370                 ar->stats.rx_crc_err_drop++;
1371                 dev_kfree_skb_any(skb);
1372                 return;
1373         }
1374
1375         ath10k_dbg(ar, ATH10K_DBG_DATA,
1376                    "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
1377                    skb,
1378                    skb->len,
1379                    ieee80211_get_SA(hdr),
1380                    ath10k_get_tid(hdr, tid, sizeof(tid)),
1381                    is_multicast_ether_addr(ieee80211_get_DA(hdr)) ?
1382                                                         "mcast" : "ucast",
1383                    IEEE80211_SEQ_TO_SN(__le16_to_cpu(hdr->seq_ctrl)),
1384                    (status->encoding == RX_ENC_LEGACY) ? "legacy" : "",
1385                    (status->encoding == RX_ENC_HT) ? "ht" : "",
1386                    (status->encoding == RX_ENC_VHT) ? "vht" : "",
1387                    (status->bw == RATE_INFO_BW_40) ? "40" : "",
1388                    (status->bw == RATE_INFO_BW_80) ? "80" : "",
1389                    (status->bw == RATE_INFO_BW_160) ? "160" : "",
1390                    status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "",
1391                    status->rate_idx,
1392                    status->nss,
1393                    status->freq,
1394                    status->band, status->flag,
1395                    !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
1396                    !!(status->flag & RX_FLAG_MMIC_ERROR),
1397                    !!(status->flag & RX_FLAG_AMSDU_MORE));
1398         ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
1399                         skb->data, skb->len);
1400         trace_ath10k_rx_hdr(ar, skb->data, skb->len);
1401         trace_ath10k_rx_payload(ar, skb->data, skb->len);
1402
1403         ieee80211_rx_napi(ar->hw, NULL, skb, &ar->napi);
1404 }
1405
1406 static int ath10k_htt_rx_nwifi_hdrlen(struct ath10k *ar,
1407                                       struct ieee80211_hdr *hdr)
1408 {
1409         int len = ieee80211_hdrlen(hdr->frame_control);
1410
1411         if (!test_bit(ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING,
1412                       ar->running_fw->fw_file.fw_features))
1413                 len = round_up(len, 4);
1414
1415         return len;
1416 }
1417
1418 static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
1419                                         struct sk_buff *msdu,
1420                                         struct ieee80211_rx_status *status,
1421                                         enum htt_rx_mpdu_encrypt_type enctype,
1422                                         bool is_decrypted,
1423                                         const u8 first_hdr[64])
1424 {
1425         struct ieee80211_hdr *hdr;
1426         struct ath10k_hw_params *hw = &ar->hw_params;
1427         struct htt_rx_desc *rxd;
1428         struct rx_msdu_end_common *rxd_msdu_end_common;
1429         size_t hdr_len;
1430         size_t crypto_len;
1431         bool is_first;
1432         bool is_last;
1433         bool msdu_limit_err;
1434         int bytes_aligned = ar->hw_params.decap_align_bytes;
1435         u8 *qos;
1436
1437         rxd = HTT_RX_BUF_TO_RX_DESC(hw,
1438                                     (void *)msdu->data - hw->rx_desc_ops->rx_desc_size);
1439
1440         rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
1441         is_first = !!(rxd_msdu_end_common->info0 &
1442                       __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
1443         is_last = !!(rxd_msdu_end_common->info0 &
1444                      __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
1445
1446         /* Delivered decapped frame:
1447          * [802.11 header]
1448          * [crypto param] <-- can be trimmed if !fcs_err &&
1449          *                    !decrypt_err && !peer_idx_invalid
1450          * [amsdu header] <-- only if A-MSDU
1451          * [rfc1042/llc]
1452          * [payload]
1453          * [FCS] <-- at end, needs to be trimmed
1454          */
1455
1456         /* Some hardwares(QCA99x0 variants) limit number of msdus in a-msdu when
1457          * deaggregate, so that unwanted MSDU-deaggregation is avoided for
1458          * error packets. If limit exceeds, hw sends all remaining MSDUs as
1459          * a single last MSDU with this msdu limit error set.
1460          */
1461         msdu_limit_err = ath10k_htt_rx_desc_msdu_limit_error(hw, rxd);
1462
1463         /* If MSDU limit error happens, then don't warn on, the partial raw MSDU
1464          * without first MSDU is expected in that case, and handled later here.
1465          */
1466         /* This probably shouldn't happen but warn just in case */
1467         if (WARN_ON_ONCE(!is_first && !msdu_limit_err))
1468                 return;
1469
1470         /* This probably shouldn't happen but warn just in case */
1471         if (WARN_ON_ONCE(!(is_first && is_last) && !msdu_limit_err))
1472                 return;
1473
1474         skb_trim(msdu, msdu->len - FCS_LEN);
1475
1476         /* Push original 80211 header */
1477         if (unlikely(msdu_limit_err)) {
1478                 hdr = (struct ieee80211_hdr *)first_hdr;
1479                 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1480                 crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
1481
1482                 if (ieee80211_is_data_qos(hdr->frame_control)) {
1483                         qos = ieee80211_get_qos_ctl(hdr);
1484                         qos[0] |= IEEE80211_QOS_CTL_A_MSDU_PRESENT;
1485                 }
1486
1487                 if (crypto_len)
1488                         memcpy(skb_push(msdu, crypto_len),
1489                                (void *)hdr + round_up(hdr_len, bytes_aligned),
1490                                crypto_len);
1491
1492                 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1493         }
1494
1495         /* In most cases this will be true for sniffed frames. It makes sense
1496          * to deliver them as-is without stripping the crypto param. This is
1497          * necessary for software based decryption.
1498          *
1499          * If there's no error then the frame is decrypted. At least that is
1500          * the case for frames that come in via fragmented rx indication.
1501          */
1502         if (!is_decrypted)
1503                 return;
1504
1505         /* The payload is decrypted so strip crypto params. Start from tail
1506          * since hdr is used to compute some stuff.
1507          */
1508
1509         hdr = (void *)msdu->data;
1510
1511         /* Tail */
1512         if (status->flag & RX_FLAG_IV_STRIPPED) {
1513                 skb_trim(msdu, msdu->len -
1514                          ath10k_htt_rx_crypto_mic_len(ar, enctype));
1515
1516                 skb_trim(msdu, msdu->len -
1517                          ath10k_htt_rx_crypto_icv_len(ar, enctype));
1518         } else {
1519                 /* MIC */
1520                 if (status->flag & RX_FLAG_MIC_STRIPPED)
1521                         skb_trim(msdu, msdu->len -
1522                                  ath10k_htt_rx_crypto_mic_len(ar, enctype));
1523
1524                 /* ICV */
1525                 if (status->flag & RX_FLAG_ICV_STRIPPED)
1526                         skb_trim(msdu, msdu->len -
1527                                  ath10k_htt_rx_crypto_icv_len(ar, enctype));
1528         }
1529
1530         /* MMIC */
1531         if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
1532             !ieee80211_has_morefrags(hdr->frame_control) &&
1533             enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
1534                 skb_trim(msdu, msdu->len - MICHAEL_MIC_LEN);
1535
1536         /* Head */
1537         if (status->flag & RX_FLAG_IV_STRIPPED) {
1538                 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1539                 crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
1540
1541                 memmove((void *)msdu->data + crypto_len,
1542                         (void *)msdu->data, hdr_len);
1543                 skb_pull(msdu, crypto_len);
1544         }
1545 }
1546
1547 static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
1548                                           struct sk_buff *msdu,
1549                                           struct ieee80211_rx_status *status,
1550                                           const u8 first_hdr[64],
1551                                           enum htt_rx_mpdu_encrypt_type enctype)
1552 {
1553         struct ath10k_hw_params *hw = &ar->hw_params;
1554         struct ieee80211_hdr *hdr;
1555         struct htt_rx_desc *rxd;
1556         size_t hdr_len;
1557         u8 da[ETH_ALEN];
1558         u8 sa[ETH_ALEN];
1559         int l3_pad_bytes;
1560         int bytes_aligned = ar->hw_params.decap_align_bytes;
1561
1562         /* Delivered decapped frame:
1563          * [nwifi 802.11 header] <-- replaced with 802.11 hdr
1564          * [rfc1042/llc]
1565          *
1566          * Note: The nwifi header doesn't have QoS Control and is
1567          * (always?) a 3addr frame.
1568          *
1569          * Note2: There's no A-MSDU subframe header. Even if it's part
1570          * of an A-MSDU.
1571          */
1572
1573         /* pull decapped header and copy SA & DA */
1574         rxd = HTT_RX_BUF_TO_RX_DESC(hw, (void *)msdu->data -
1575                                     hw->rx_desc_ops->rx_desc_size);
1576
1577         l3_pad_bytes = ath10k_htt_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
1578         skb_put(msdu, l3_pad_bytes);
1579
1580         hdr = (struct ieee80211_hdr *)(msdu->data + l3_pad_bytes);
1581
1582         hdr_len = ath10k_htt_rx_nwifi_hdrlen(ar, hdr);
1583         ether_addr_copy(da, ieee80211_get_DA(hdr));
1584         ether_addr_copy(sa, ieee80211_get_SA(hdr));
1585         skb_pull(msdu, hdr_len);
1586
1587         /* push original 802.11 header */
1588         hdr = (struct ieee80211_hdr *)first_hdr;
1589         hdr_len = ieee80211_hdrlen(hdr->frame_control);
1590
1591         if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1592                 memcpy(skb_push(msdu,
1593                                 ath10k_htt_rx_crypto_param_len(ar, enctype)),
1594                        (void *)hdr + round_up(hdr_len, bytes_aligned),
1595                         ath10k_htt_rx_crypto_param_len(ar, enctype));
1596         }
1597
1598         memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1599
1600         /* original 802.11 header has a different DA and in
1601          * case of 4addr it may also have different SA
1602          */
1603         hdr = (struct ieee80211_hdr *)msdu->data;
1604         ether_addr_copy(ieee80211_get_DA(hdr), da);
1605         ether_addr_copy(ieee80211_get_SA(hdr), sa);
1606 }
1607
1608 static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar,
1609                                           struct sk_buff *msdu,
1610                                           enum htt_rx_mpdu_encrypt_type enctype)
1611 {
1612         struct ieee80211_hdr *hdr;
1613         struct ath10k_hw_params *hw = &ar->hw_params;
1614         struct htt_rx_desc *rxd;
1615         struct rx_msdu_end_common *rxd_msdu_end_common;
1616         u8 *rxd_rx_hdr_status;
1617         size_t hdr_len, crypto_len;
1618         void *rfc1042;
1619         bool is_first, is_last, is_amsdu;
1620         int bytes_aligned = ar->hw_params.decap_align_bytes;
1621
1622         rxd = HTT_RX_BUF_TO_RX_DESC(hw,
1623                                     (void *)msdu->data - hw->rx_desc_ops->rx_desc_size);
1624
1625         rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
1626         rxd_rx_hdr_status = ath10k_htt_rx_desc_get_rx_hdr_status(hw, rxd);
1627         hdr = (void *)rxd_rx_hdr_status;
1628
1629         is_first = !!(rxd_msdu_end_common->info0 &
1630                       __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
1631         is_last = !!(rxd_msdu_end_common->info0 &
1632                      __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
1633         is_amsdu = !(is_first && is_last);
1634
1635         rfc1042 = hdr;
1636
1637         if (is_first) {
1638                 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1639                 crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
1640
1641                 rfc1042 += round_up(hdr_len, bytes_aligned) +
1642                            round_up(crypto_len, bytes_aligned);
1643         }
1644
1645         if (is_amsdu)
1646                 rfc1042 += sizeof(struct amsdu_subframe_hdr);
1647
1648         return rfc1042;
1649 }
1650
1651 static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
1652                                         struct sk_buff *msdu,
1653                                         struct ieee80211_rx_status *status,
1654                                         const u8 first_hdr[64],
1655                                         enum htt_rx_mpdu_encrypt_type enctype)
1656 {
1657         struct ath10k_hw_params *hw = &ar->hw_params;
1658         struct ieee80211_hdr *hdr;
1659         struct ethhdr *eth;
1660         size_t hdr_len;
1661         void *rfc1042;
1662         u8 da[ETH_ALEN];
1663         u8 sa[ETH_ALEN];
1664         int l3_pad_bytes;
1665         struct htt_rx_desc *rxd;
1666         int bytes_aligned = ar->hw_params.decap_align_bytes;
1667
1668         /* Delivered decapped frame:
1669          * [eth header] <-- replaced with 802.11 hdr & rfc1042/llc
1670          * [payload]
1671          */
1672
1673         rfc1042 = ath10k_htt_rx_h_find_rfc1042(ar, msdu, enctype);
1674         if (WARN_ON_ONCE(!rfc1042))
1675                 return;
1676
1677         rxd = HTT_RX_BUF_TO_RX_DESC(hw,
1678                                     (void *)msdu->data - hw->rx_desc_ops->rx_desc_size);
1679
1680         l3_pad_bytes = ath10k_htt_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
1681         skb_put(msdu, l3_pad_bytes);
1682         skb_pull(msdu, l3_pad_bytes);
1683
1684         /* pull decapped header and copy SA & DA */
1685         eth = (struct ethhdr *)msdu->data;
1686         ether_addr_copy(da, eth->h_dest);
1687         ether_addr_copy(sa, eth->h_source);
1688         skb_pull(msdu, sizeof(struct ethhdr));
1689
1690         /* push rfc1042/llc/snap */
1691         memcpy(skb_push(msdu, sizeof(struct rfc1042_hdr)), rfc1042,
1692                sizeof(struct rfc1042_hdr));
1693
1694         /* push original 802.11 header */
1695         hdr = (struct ieee80211_hdr *)first_hdr;
1696         hdr_len = ieee80211_hdrlen(hdr->frame_control);
1697
1698         if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1699                 memcpy(skb_push(msdu,
1700                                 ath10k_htt_rx_crypto_param_len(ar, enctype)),
1701                        (void *)hdr + round_up(hdr_len, bytes_aligned),
1702                         ath10k_htt_rx_crypto_param_len(ar, enctype));
1703         }
1704
1705         memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1706
1707         /* original 802.11 header has a different DA and in
1708          * case of 4addr it may also have different SA
1709          */
1710         hdr = (struct ieee80211_hdr *)msdu->data;
1711         ether_addr_copy(ieee80211_get_DA(hdr), da);
1712         ether_addr_copy(ieee80211_get_SA(hdr), sa);
1713 }
1714
1715 static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
1716                                          struct sk_buff *msdu,
1717                                          struct ieee80211_rx_status *status,
1718                                          const u8 first_hdr[64],
1719                                          enum htt_rx_mpdu_encrypt_type enctype)
1720 {
1721         struct ath10k_hw_params *hw = &ar->hw_params;
1722         struct ieee80211_hdr *hdr;
1723         size_t hdr_len;
1724         int l3_pad_bytes;
1725         struct htt_rx_desc *rxd;
1726         int bytes_aligned = ar->hw_params.decap_align_bytes;
1727
1728         /* Delivered decapped frame:
1729          * [amsdu header] <-- replaced with 802.11 hdr
1730          * [rfc1042/llc]
1731          * [payload]
1732          */
1733
1734         rxd = HTT_RX_BUF_TO_RX_DESC(hw,
1735                                     (void *)msdu->data - hw->rx_desc_ops->rx_desc_size);
1736
1737         l3_pad_bytes = ath10k_htt_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
1738
1739         skb_put(msdu, l3_pad_bytes);
1740         skb_pull(msdu, sizeof(struct amsdu_subframe_hdr) + l3_pad_bytes);
1741
1742         hdr = (struct ieee80211_hdr *)first_hdr;
1743         hdr_len = ieee80211_hdrlen(hdr->frame_control);
1744
1745         if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1746                 memcpy(skb_push(msdu,
1747                                 ath10k_htt_rx_crypto_param_len(ar, enctype)),
1748                        (void *)hdr + round_up(hdr_len, bytes_aligned),
1749                         ath10k_htt_rx_crypto_param_len(ar, enctype));
1750         }
1751
1752         memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1753 }
1754
1755 static void ath10k_htt_rx_h_undecap(struct ath10k *ar,
1756                                     struct sk_buff *msdu,
1757                                     struct ieee80211_rx_status *status,
1758                                     u8 first_hdr[64],
1759                                     enum htt_rx_mpdu_encrypt_type enctype,
1760                                     bool is_decrypted)
1761 {
1762         struct ath10k_hw_params *hw = &ar->hw_params;
1763         struct htt_rx_desc *rxd;
1764         struct rx_msdu_start_common *rxd_msdu_start_common;
1765         enum rx_msdu_decap_format decap;
1766
1767         /* First msdu's decapped header:
1768          * [802.11 header] <-- padded to 4 bytes long
1769          * [crypto param] <-- padded to 4 bytes long
1770          * [amsdu header] <-- only if A-MSDU
1771          * [rfc1042/llc]
1772          *
1773          * Other (2nd, 3rd, ..) msdu's decapped header:
1774          * [amsdu header] <-- only if A-MSDU
1775          * [rfc1042/llc]
1776          */
1777
1778         rxd = HTT_RX_BUF_TO_RX_DESC(hw,
1779                                     (void *)msdu->data - hw->rx_desc_ops->rx_desc_size);
1780
1781         rxd_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw, rxd);
1782         decap = MS(__le32_to_cpu(rxd_msdu_start_common->info1),
1783                    RX_MSDU_START_INFO1_DECAP_FORMAT);
1784
1785         switch (decap) {
1786         case RX_MSDU_DECAP_RAW:
1787                 ath10k_htt_rx_h_undecap_raw(ar, msdu, status, enctype,
1788                                             is_decrypted, first_hdr);
1789                 break;
1790         case RX_MSDU_DECAP_NATIVE_WIFI:
1791                 ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr,
1792                                               enctype);
1793                 break;
1794         case RX_MSDU_DECAP_ETHERNET2_DIX:
1795                 ath10k_htt_rx_h_undecap_eth(ar, msdu, status, first_hdr, enctype);
1796                 break;
1797         case RX_MSDU_DECAP_8023_SNAP_LLC:
1798                 ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr,
1799                                              enctype);
1800                 break;
1801         }
1802 }
1803
1804 static int ath10k_htt_rx_get_csum_state(struct ath10k_hw_params *hw, struct sk_buff *skb)
1805 {
1806         struct htt_rx_desc *rxd;
1807         struct rx_attention *rxd_attention;
1808         struct rx_msdu_start_common *rxd_msdu_start_common;
1809         u32 flags, info;
1810         bool is_ip4, is_ip6;
1811         bool is_tcp, is_udp;
1812         bool ip_csum_ok, tcpudp_csum_ok;
1813
1814         rxd = HTT_RX_BUF_TO_RX_DESC(hw,
1815                                     (void *)skb->data - hw->rx_desc_ops->rx_desc_size);
1816
1817         rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
1818         rxd_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw, rxd);
1819         flags = __le32_to_cpu(rxd_attention->flags);
1820         info = __le32_to_cpu(rxd_msdu_start_common->info1);
1821
1822         is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO);
1823         is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO);
1824         is_tcp = !!(info & RX_MSDU_START_INFO1_TCP_PROTO);
1825         is_udp = !!(info & RX_MSDU_START_INFO1_UDP_PROTO);
1826         ip_csum_ok = !(flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL);
1827         tcpudp_csum_ok = !(flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL);
1828
1829         if (!is_ip4 && !is_ip6)
1830                 return CHECKSUM_NONE;
1831         if (!is_tcp && !is_udp)
1832                 return CHECKSUM_NONE;
1833         if (!ip_csum_ok)
1834                 return CHECKSUM_NONE;
1835         if (!tcpudp_csum_ok)
1836                 return CHECKSUM_NONE;
1837
1838         return CHECKSUM_UNNECESSARY;
1839 }
1840
1841 static void ath10k_htt_rx_h_csum_offload(struct ath10k_hw_params *hw,
1842                                          struct sk_buff *msdu)
1843 {
1844         msdu->ip_summed = ath10k_htt_rx_get_csum_state(hw, msdu);
1845 }
1846
1847 static u64 ath10k_htt_rx_h_get_pn(struct ath10k *ar, struct sk_buff *skb,
1848                                   enum htt_rx_mpdu_encrypt_type enctype)
1849 {
1850         struct ieee80211_hdr *hdr;
1851         u64 pn = 0;
1852         u8 *ehdr;
1853
1854         hdr = (struct ieee80211_hdr *)skb->data;
1855         ehdr = skb->data + ieee80211_hdrlen(hdr->frame_control);
1856
1857         if (enctype == HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2) {
1858                 pn = ehdr[0];
1859                 pn |= (u64)ehdr[1] << 8;
1860                 pn |= (u64)ehdr[4] << 16;
1861                 pn |= (u64)ehdr[5] << 24;
1862                 pn |= (u64)ehdr[6] << 32;
1863                 pn |= (u64)ehdr[7] << 40;
1864         }
1865         return pn;
1866 }
1867
1868 static bool ath10k_htt_rx_h_frag_multicast_check(struct ath10k *ar,
1869                                                  struct sk_buff *skb)
1870 {
1871         struct ieee80211_hdr *hdr;
1872
1873         hdr = (struct ieee80211_hdr *)skb->data;
1874         return !is_multicast_ether_addr(hdr->addr1);
1875 }
1876
1877 static bool ath10k_htt_rx_h_frag_pn_check(struct ath10k *ar,
1878                                           struct sk_buff *skb,
1879                                           u16 peer_id,
1880                                           enum htt_rx_mpdu_encrypt_type enctype)
1881 {
1882         struct ath10k_peer *peer;
1883         union htt_rx_pn_t *last_pn, new_pn = {0};
1884         struct ieee80211_hdr *hdr;
1885         u8 tid, frag_number;
1886         u32 seq;
1887
1888         peer = ath10k_peer_find_by_id(ar, peer_id);
1889         if (!peer) {
1890                 ath10k_dbg(ar, ATH10K_DBG_HTT, "invalid peer for frag pn check\n");
1891                 return false;
1892         }
1893
1894         hdr = (struct ieee80211_hdr *)skb->data;
1895         if (ieee80211_is_data_qos(hdr->frame_control))
1896                 tid = ieee80211_get_tid(hdr);
1897         else
1898                 tid = ATH10K_TXRX_NON_QOS_TID;
1899
1900         last_pn = &peer->frag_tids_last_pn[tid];
1901         new_pn.pn48 = ath10k_htt_rx_h_get_pn(ar, skb, enctype);
1902         frag_number = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
1903         seq = IEEE80211_SEQ_TO_SN(__le16_to_cpu(hdr->seq_ctrl));
1904
1905         if (frag_number == 0) {
1906                 last_pn->pn48 = new_pn.pn48;
1907                 peer->frag_tids_seq[tid] = seq;
1908         } else {
1909                 if (seq != peer->frag_tids_seq[tid])
1910                         return false;
1911
1912                 if (new_pn.pn48 != last_pn->pn48 + 1)
1913                         return false;
1914
1915                 last_pn->pn48 = new_pn.pn48;
1916         }
1917
1918         return true;
1919 }
1920
1921 static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
1922                                  struct sk_buff_head *amsdu,
1923                                  struct ieee80211_rx_status *status,
1924                                  bool fill_crypt_header,
1925                                  u8 *rx_hdr,
1926                                  enum ath10k_pkt_rx_err *err,
1927                                  u16 peer_id,
1928                                  bool frag)
1929 {
1930         struct sk_buff *first;
1931         struct sk_buff *last;
1932         struct sk_buff *msdu, *temp;
1933         struct ath10k_hw_params *hw = &ar->hw_params;
1934         struct htt_rx_desc *rxd;
1935         struct rx_attention *rxd_attention;
1936         struct rx_mpdu_start *rxd_mpdu_start;
1937
1938         struct ieee80211_hdr *hdr;
1939         enum htt_rx_mpdu_encrypt_type enctype;
1940         u8 first_hdr[64];
1941         u8 *qos;
1942         bool has_fcs_err;
1943         bool has_crypto_err;
1944         bool has_tkip_err;
1945         bool has_peer_idx_invalid;
1946         bool is_decrypted;
1947         bool is_mgmt;
1948         u32 attention;
1949         bool frag_pn_check = true, multicast_check = true;
1950
1951         if (skb_queue_empty(amsdu))
1952                 return;
1953
1954         first = skb_peek(amsdu);
1955         rxd = HTT_RX_BUF_TO_RX_DESC(hw,
1956                                     (void *)first->data - hw->rx_desc_ops->rx_desc_size);
1957
1958         rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
1959         rxd_mpdu_start = ath10k_htt_rx_desc_get_mpdu_start(hw, rxd);
1960
1961         is_mgmt = !!(rxd_attention->flags &
1962                      __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE));
1963
1964         enctype = MS(__le32_to_cpu(rxd_mpdu_start->info0),
1965                      RX_MPDU_START_INFO0_ENCRYPT_TYPE);
1966
1967         /* First MSDU's Rx descriptor in an A-MSDU contains full 802.11
1968          * decapped header. It'll be used for undecapping of each MSDU.
1969          */
1970         hdr = (void *)ath10k_htt_rx_desc_get_rx_hdr_status(hw, rxd);
1971         memcpy(first_hdr, hdr, RX_HTT_HDR_STATUS_LEN);
1972
1973         if (rx_hdr)
1974                 memcpy(rx_hdr, hdr, RX_HTT_HDR_STATUS_LEN);
1975
1976         /* Each A-MSDU subframe will use the original header as the base and be
1977          * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl.
1978          */
1979         hdr = (void *)first_hdr;
1980
1981         if (ieee80211_is_data_qos(hdr->frame_control)) {
1982                 qos = ieee80211_get_qos_ctl(hdr);
1983                 qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
1984         }
1985
1986         /* Some attention flags are valid only in the last MSDU. */
1987         last = skb_peek_tail(amsdu);
1988         rxd = HTT_RX_BUF_TO_RX_DESC(hw,
1989                                     (void *)last->data - hw->rx_desc_ops->rx_desc_size);
1990
1991         rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
1992         attention = __le32_to_cpu(rxd_attention->flags);
1993
1994         has_fcs_err = !!(attention & RX_ATTENTION_FLAGS_FCS_ERR);
1995         has_crypto_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR);
1996         has_tkip_err = !!(attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR);
1997         has_peer_idx_invalid = !!(attention & RX_ATTENTION_FLAGS_PEER_IDX_INVALID);
1998
1999         /* Note: If hardware captures an encrypted frame that it can't decrypt,
2000          * e.g. due to fcs error, missing peer or invalid key data it will
2001          * report the frame as raw.
2002          */
2003         is_decrypted = (enctype != HTT_RX_MPDU_ENCRYPT_NONE &&
2004                         !has_fcs_err &&
2005                         !has_crypto_err &&
2006                         !has_peer_idx_invalid);
2007
2008         /* Clear per-MPDU flags while leaving per-PPDU flags intact. */
2009         status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
2010                           RX_FLAG_MMIC_ERROR |
2011                           RX_FLAG_DECRYPTED |
2012                           RX_FLAG_IV_STRIPPED |
2013                           RX_FLAG_ONLY_MONITOR |
2014                           RX_FLAG_MMIC_STRIPPED);
2015
2016         if (has_fcs_err)
2017                 status->flag |= RX_FLAG_FAILED_FCS_CRC;
2018
2019         if (has_tkip_err)
2020                 status->flag |= RX_FLAG_MMIC_ERROR;
2021
2022         if (err) {
2023                 if (has_fcs_err)
2024                         *err = ATH10K_PKT_RX_ERR_FCS;
2025                 else if (has_tkip_err)
2026                         *err = ATH10K_PKT_RX_ERR_TKIP;
2027                 else if (has_crypto_err)
2028                         *err = ATH10K_PKT_RX_ERR_CRYPT;
2029                 else if (has_peer_idx_invalid)
2030                         *err = ATH10K_PKT_RX_ERR_PEER_IDX_INVAL;
2031         }
2032
2033         /* Firmware reports all necessary management frames via WMI already.
2034          * They are not reported to monitor interfaces at all so pass the ones
2035          * coming via HTT to monitor interfaces instead. This simplifies
2036          * matters a lot.
2037          */
2038         if (is_mgmt)
2039                 status->flag |= RX_FLAG_ONLY_MONITOR;
2040
2041         if (is_decrypted) {
2042                 status->flag |= RX_FLAG_DECRYPTED;
2043
2044                 if (likely(!is_mgmt))
2045                         status->flag |= RX_FLAG_MMIC_STRIPPED;
2046
2047                 if (fill_crypt_header)
2048                         status->flag |= RX_FLAG_MIC_STRIPPED |
2049                                         RX_FLAG_ICV_STRIPPED;
2050                 else
2051                         status->flag |= RX_FLAG_IV_STRIPPED;
2052         }
2053
2054         skb_queue_walk(amsdu, msdu) {
2055                 if (frag && !fill_crypt_header && is_decrypted &&
2056                     enctype == HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2)
2057                         frag_pn_check = ath10k_htt_rx_h_frag_pn_check(ar,
2058                                                                       msdu,
2059                                                                       peer_id,
2060                                                                       enctype);
2061
2062                 if (frag)
2063                         multicast_check = ath10k_htt_rx_h_frag_multicast_check(ar,
2064                                                                                msdu);
2065
2066                 if (!frag_pn_check || !multicast_check) {
2067                         /* Discard the fragment with invalid PN or multicast DA
2068                          */
2069                         temp = msdu->prev;
2070                         __skb_unlink(msdu, amsdu);
2071                         dev_kfree_skb_any(msdu);
2072                         msdu = temp;
2073                         frag_pn_check = true;
2074                         multicast_check = true;
2075                         continue;
2076                 }
2077
2078                 ath10k_htt_rx_h_csum_offload(&ar->hw_params, msdu);
2079
2080                 if (frag && !fill_crypt_header &&
2081                     enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
2082                         status->flag &= ~RX_FLAG_MMIC_STRIPPED;
2083
2084                 ath10k_htt_rx_h_undecap(ar, msdu, status, first_hdr, enctype,
2085                                         is_decrypted);
2086
2087                 /* Undecapping involves copying the original 802.11 header back
2088                  * to sk_buff. If frame is protected and hardware has decrypted
2089                  * it then remove the protected bit.
2090                  */
2091                 if (!is_decrypted)
2092                         continue;
2093                 if (is_mgmt)
2094                         continue;
2095
2096                 if (fill_crypt_header)
2097                         continue;
2098
2099                 hdr = (void *)msdu->data;
2100                 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
2101
2102                 if (frag && !fill_crypt_header &&
2103                     enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
2104                         status->flag &= ~RX_FLAG_IV_STRIPPED &
2105                                         ~RX_FLAG_MMIC_STRIPPED;
2106         }
2107 }
2108
2109 static void ath10k_htt_rx_h_enqueue(struct ath10k *ar,
2110                                     struct sk_buff_head *amsdu,
2111                                     struct ieee80211_rx_status *status)
2112 {
2113         struct sk_buff *msdu;
2114         struct sk_buff *first_subframe;
2115
2116         first_subframe = skb_peek(amsdu);
2117
2118         while ((msdu = __skb_dequeue(amsdu))) {
2119                 /* Setup per-MSDU flags */
2120                 if (skb_queue_empty(amsdu))
2121                         status->flag &= ~RX_FLAG_AMSDU_MORE;
2122                 else
2123                         status->flag |= RX_FLAG_AMSDU_MORE;
2124
2125                 if (msdu == first_subframe) {
2126                         first_subframe = NULL;
2127                         status->flag &= ~RX_FLAG_ALLOW_SAME_PN;
2128                 } else {
2129                         status->flag |= RX_FLAG_ALLOW_SAME_PN;
2130                 }
2131
2132                 ath10k_htt_rx_h_queue_msdu(ar, status, msdu);
2133         }
2134 }
2135
2136 static int ath10k_unchain_msdu(struct sk_buff_head *amsdu,
2137                                unsigned long *unchain_cnt)
2138 {
2139         struct sk_buff *skb, *first;
2140         int space;
2141         int total_len = 0;
2142         int amsdu_len = skb_queue_len(amsdu);
2143
2144         /* TODO:  Might could optimize this by using
2145          * skb_try_coalesce or similar method to
2146          * decrease copying, or maybe get mac80211 to
2147          * provide a way to just receive a list of
2148          * skb?
2149          */
2150
2151         first = __skb_dequeue(amsdu);
2152
2153         /* Allocate total length all at once. */
2154         skb_queue_walk(amsdu, skb)
2155                 total_len += skb->len;
2156
2157         space = total_len - skb_tailroom(first);
2158         if ((space > 0) &&
2159             (pskb_expand_head(first, 0, space, GFP_ATOMIC) < 0)) {
2160                 /* TODO:  bump some rx-oom error stat */
2161                 /* put it back together so we can free the
2162                  * whole list at once.
2163                  */
2164                 __skb_queue_head(amsdu, first);
2165                 return -1;
2166         }
2167
2168         /* Walk list again, copying contents into
2169          * msdu_head
2170          */
2171         while ((skb = __skb_dequeue(amsdu))) {
2172                 skb_copy_from_linear_data(skb, skb_put(first, skb->len),
2173                                           skb->len);
2174                 dev_kfree_skb_any(skb);
2175         }
2176
2177         __skb_queue_head(amsdu, first);
2178
2179         *unchain_cnt += amsdu_len - 1;
2180
2181         return 0;
2182 }
2183
2184 static void ath10k_htt_rx_h_unchain(struct ath10k *ar,
2185                                     struct sk_buff_head *amsdu,
2186                                     unsigned long *drop_cnt,
2187                                     unsigned long *unchain_cnt)
2188 {
2189         struct sk_buff *first;
2190         struct ath10k_hw_params *hw = &ar->hw_params;
2191         struct htt_rx_desc *rxd;
2192         struct rx_msdu_start_common *rxd_msdu_start_common;
2193         struct rx_frag_info_common *rxd_frag_info;
2194         enum rx_msdu_decap_format decap;
2195
2196         first = skb_peek(amsdu);
2197         rxd = HTT_RX_BUF_TO_RX_DESC(hw,
2198                                     (void *)first->data - hw->rx_desc_ops->rx_desc_size);
2199
2200         rxd_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw, rxd);
2201         rxd_frag_info = ath10k_htt_rx_desc_get_frag_info(hw, rxd);
2202         decap = MS(__le32_to_cpu(rxd_msdu_start_common->info1),
2203                    RX_MSDU_START_INFO1_DECAP_FORMAT);
2204
2205         /* FIXME: Current unchaining logic can only handle simple case of raw
2206          * msdu chaining. If decapping is other than raw the chaining may be
2207          * more complex and this isn't handled by the current code. Don't even
2208          * try re-constructing such frames - it'll be pretty much garbage.
2209          */
2210         if (decap != RX_MSDU_DECAP_RAW ||
2211             skb_queue_len(amsdu) != 1 + rxd_frag_info->ring2_more_count) {
2212                 *drop_cnt += skb_queue_len(amsdu);
2213                 __skb_queue_purge(amsdu);
2214                 return;
2215         }
2216
2217         ath10k_unchain_msdu(amsdu, unchain_cnt);
2218 }
2219
2220 static bool ath10k_htt_rx_validate_amsdu(struct ath10k *ar,
2221                                          struct sk_buff_head *amsdu)
2222 {
2223         u8 *subframe_hdr;
2224         struct sk_buff *first;
2225         bool is_first, is_last;
2226         struct ath10k_hw_params *hw = &ar->hw_params;
2227         struct htt_rx_desc *rxd;
2228         struct rx_msdu_end_common *rxd_msdu_end_common;
2229         struct rx_mpdu_start *rxd_mpdu_start;
2230         struct ieee80211_hdr *hdr;
2231         size_t hdr_len, crypto_len;
2232         enum htt_rx_mpdu_encrypt_type enctype;
2233         int bytes_aligned = ar->hw_params.decap_align_bytes;
2234
2235         first = skb_peek(amsdu);
2236
2237         rxd = HTT_RX_BUF_TO_RX_DESC(hw,
2238                                     (void *)first->data - hw->rx_desc_ops->rx_desc_size);
2239
2240         rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
2241         rxd_mpdu_start = ath10k_htt_rx_desc_get_mpdu_start(hw, rxd);
2242         hdr = (void *)ath10k_htt_rx_desc_get_rx_hdr_status(hw, rxd);
2243
2244         is_first = !!(rxd_msdu_end_common->info0 &
2245                       __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
2246         is_last = !!(rxd_msdu_end_common->info0 &
2247                      __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
2248
2249         /* Return in case of non-aggregated msdu */
2250         if (is_first && is_last)
2251                 return true;
2252
2253         /* First msdu flag is not set for the first msdu of the list */
2254         if (!is_first)
2255                 return false;
2256
2257         enctype = MS(__le32_to_cpu(rxd_mpdu_start->info0),
2258                      RX_MPDU_START_INFO0_ENCRYPT_TYPE);
2259
2260         hdr_len = ieee80211_hdrlen(hdr->frame_control);
2261         crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
2262
2263         subframe_hdr = (u8 *)hdr + round_up(hdr_len, bytes_aligned) +
2264                        crypto_len;
2265
2266         /* Validate if the amsdu has a proper first subframe.
2267          * There are chances a single msdu can be received as amsdu when
2268          * the unauthenticated amsdu flag of a QoS header
2269          * gets flipped in non-SPP AMSDU's, in such cases the first
2270          * subframe has llc/snap header in place of a valid da.
2271          * return false if the da matches rfc1042 pattern
2272          */
2273         if (ether_addr_equal(subframe_hdr, rfc1042_header))
2274                 return false;
2275
2276         return true;
2277 }
2278
2279 static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar,
2280                                         struct sk_buff_head *amsdu,
2281                                         struct ieee80211_rx_status *rx_status)
2282 {
2283         if (!rx_status->freq) {
2284                 ath10k_dbg(ar, ATH10K_DBG_HTT, "no channel configured; ignoring frame(s)!\n");
2285                 return false;
2286         }
2287
2288         if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) {
2289                 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx cac running\n");
2290                 return false;
2291         }
2292
2293         if (!ath10k_htt_rx_validate_amsdu(ar, amsdu)) {
2294                 ath10k_dbg(ar, ATH10K_DBG_HTT, "invalid amsdu received\n");
2295                 return false;
2296         }
2297
2298         return true;
2299 }
2300
2301 static void ath10k_htt_rx_h_filter(struct ath10k *ar,
2302                                    struct sk_buff_head *amsdu,
2303                                    struct ieee80211_rx_status *rx_status,
2304                                    unsigned long *drop_cnt)
2305 {
2306         if (skb_queue_empty(amsdu))
2307                 return;
2308
2309         if (ath10k_htt_rx_amsdu_allowed(ar, amsdu, rx_status))
2310                 return;
2311
2312         if (drop_cnt)
2313                 *drop_cnt += skb_queue_len(amsdu);
2314
2315         __skb_queue_purge(amsdu);
2316 }
2317
2318 static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
2319 {
2320         struct ath10k *ar = htt->ar;
2321         struct ieee80211_rx_status *rx_status = &htt->rx_status;
2322         struct sk_buff_head amsdu;
2323         int ret;
2324         unsigned long drop_cnt = 0;
2325         unsigned long unchain_cnt = 0;
2326         unsigned long drop_cnt_filter = 0;
2327         unsigned long msdus_to_queue, num_msdus;
2328         enum ath10k_pkt_rx_err err = ATH10K_PKT_RX_ERR_MAX;
2329         u8 first_hdr[RX_HTT_HDR_STATUS_LEN];
2330
2331         __skb_queue_head_init(&amsdu);
2332
2333         spin_lock_bh(&htt->rx_ring.lock);
2334         if (htt->rx_confused) {
2335                 spin_unlock_bh(&htt->rx_ring.lock);
2336                 return -EIO;
2337         }
2338         ret = ath10k_htt_rx_amsdu_pop(htt, &amsdu);
2339         spin_unlock_bh(&htt->rx_ring.lock);
2340
2341         if (ret < 0) {
2342                 ath10k_warn(ar, "rx ring became corrupted: %d\n", ret);
2343                 __skb_queue_purge(&amsdu);
2344                 /* FIXME: It's probably a good idea to reboot the
2345                  * device instead of leaving it inoperable.
2346                  */
2347                 htt->rx_confused = true;
2348                 return ret;
2349         }
2350
2351         num_msdus = skb_queue_len(&amsdu);
2352
2353         ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
2354
2355         /* only for ret = 1 indicates chained msdus */
2356         if (ret > 0)
2357                 ath10k_htt_rx_h_unchain(ar, &amsdu, &drop_cnt, &unchain_cnt);
2358
2359         ath10k_htt_rx_h_filter(ar, &amsdu, rx_status, &drop_cnt_filter);
2360         ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status, true, first_hdr, &err, 0,
2361                              false);
2362         msdus_to_queue = skb_queue_len(&amsdu);
2363         ath10k_htt_rx_h_enqueue(ar, &amsdu, rx_status);
2364
2365         ath10k_sta_update_rx_tid_stats(ar, first_hdr, num_msdus, err,
2366                                        unchain_cnt, drop_cnt, drop_cnt_filter,
2367                                        msdus_to_queue);
2368
2369         return 0;
2370 }
2371
2372 static void ath10k_htt_rx_mpdu_desc_pn_hl(struct htt_hl_rx_desc *rx_desc,
2373                                           union htt_rx_pn_t *pn,
2374                                           int pn_len_bits)
2375 {
2376         switch (pn_len_bits) {
2377         case 48:
2378                 pn->pn48 = __le32_to_cpu(rx_desc->pn_31_0) +
2379                            ((u64)(__le32_to_cpu(rx_desc->u0.pn_63_32) & 0xFFFF) << 32);
2380                 break;
2381         case 24:
2382                 pn->pn24 = __le32_to_cpu(rx_desc->pn_31_0);
2383                 break;
2384         }
2385 }
2386
2387 static bool ath10k_htt_rx_pn_cmp48(union htt_rx_pn_t *new_pn,
2388                                    union htt_rx_pn_t *old_pn)
2389 {
2390         return ((new_pn->pn48 & 0xffffffffffffULL) <=
2391                 (old_pn->pn48 & 0xffffffffffffULL));
2392 }
2393
2394 static bool ath10k_htt_rx_pn_check_replay_hl(struct ath10k *ar,
2395                                              struct ath10k_peer *peer,
2396                                              struct htt_rx_indication_hl *rx)
2397 {
2398         bool last_pn_valid, pn_invalid = false;
2399         enum htt_txrx_sec_cast_type sec_index;
2400         enum htt_security_types sec_type;
2401         union htt_rx_pn_t new_pn = {0};
2402         struct htt_hl_rx_desc *rx_desc;
2403         union htt_rx_pn_t *last_pn;
2404         u32 rx_desc_info, tid;
2405         int num_mpdu_ranges;
2406
2407         lockdep_assert_held(&ar->data_lock);
2408
2409         if (!peer)
2410                 return false;
2411
2412         if (!(rx->fw_desc.flags & FW_RX_DESC_FLAGS_FIRST_MSDU))
2413                 return false;
2414
2415         num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
2416                              HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
2417
2418         rx_desc = (struct htt_hl_rx_desc *)&rx->mpdu_ranges[num_mpdu_ranges];
2419         rx_desc_info = __le32_to_cpu(rx_desc->info);
2420
2421         if (!MS(rx_desc_info, HTT_RX_DESC_HL_INFO_ENCRYPTED))
2422                 return false;
2423
2424         tid = MS(rx->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID);
2425         last_pn_valid = peer->tids_last_pn_valid[tid];
2426         last_pn = &peer->tids_last_pn[tid];
2427
2428         if (MS(rx_desc_info, HTT_RX_DESC_HL_INFO_MCAST_BCAST))
2429                 sec_index = HTT_TXRX_SEC_MCAST;
2430         else
2431                 sec_index = HTT_TXRX_SEC_UCAST;
2432
2433         sec_type = peer->rx_pn[sec_index].sec_type;
2434         ath10k_htt_rx_mpdu_desc_pn_hl(rx_desc, &new_pn, peer->rx_pn[sec_index].pn_len);
2435
2436         if (sec_type != HTT_SECURITY_AES_CCMP &&
2437             sec_type != HTT_SECURITY_TKIP &&
2438             sec_type != HTT_SECURITY_TKIP_NOMIC)
2439                 return false;
2440
2441         if (last_pn_valid)
2442                 pn_invalid = ath10k_htt_rx_pn_cmp48(&new_pn, last_pn);
2443         else
2444                 peer->tids_last_pn_valid[tid] = true;
2445
2446         if (!pn_invalid)
2447                 last_pn->pn48 = new_pn.pn48;
2448
2449         return pn_invalid;
2450 }
2451
2452 static bool ath10k_htt_rx_proc_rx_ind_hl(struct ath10k_htt *htt,
2453                                          struct htt_rx_indication_hl *rx,
2454                                          struct sk_buff *skb,
2455                                          enum htt_rx_pn_check_type check_pn_type,
2456                                          enum htt_rx_tkip_demic_type tkip_mic_type)
2457 {
2458         struct ath10k *ar = htt->ar;
2459         struct ath10k_peer *peer;
2460         struct htt_rx_indication_mpdu_range *mpdu_ranges;
2461         struct fw_rx_desc_hl *fw_desc;
2462         enum htt_txrx_sec_cast_type sec_index;
2463         enum htt_security_types sec_type;
2464         union htt_rx_pn_t new_pn = {0};
2465         struct htt_hl_rx_desc *rx_desc;
2466         struct ieee80211_hdr *hdr;
2467         struct ieee80211_rx_status *rx_status;
2468         u16 peer_id;
2469         u8 rx_desc_len;
2470         int num_mpdu_ranges;
2471         size_t tot_hdr_len;
2472         struct ieee80211_channel *ch;
2473         bool pn_invalid, qos, first_msdu;
2474         u32 tid, rx_desc_info;
2475
2476         peer_id = __le16_to_cpu(rx->hdr.peer_id);
2477         tid = MS(rx->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID);
2478
2479         spin_lock_bh(&ar->data_lock);
2480         peer = ath10k_peer_find_by_id(ar, peer_id);
2481         spin_unlock_bh(&ar->data_lock);
2482         if (!peer && peer_id != HTT_INVALID_PEERID)
2483                 ath10k_warn(ar, "Got RX ind from invalid peer: %u\n", peer_id);
2484
2485         if (!peer)
2486                 return true;
2487
2488         num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
2489                              HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
2490         mpdu_ranges = htt_rx_ind_get_mpdu_ranges_hl(rx);
2491         fw_desc = &rx->fw_desc;
2492         rx_desc_len = fw_desc->len;
2493
2494         if (fw_desc->u.bits.discard) {
2495                 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt discard mpdu\n");
2496                 goto err;
2497         }
2498
2499         /* I have not yet seen any case where num_mpdu_ranges > 1.
2500          * qcacld does not seem handle that case either, so we introduce the
2501          * same limitation here as well.
2502          */
2503         if (num_mpdu_ranges > 1)
2504                 ath10k_warn(ar,
2505                             "Unsupported number of MPDU ranges: %d, ignoring all but the first\n",
2506                             num_mpdu_ranges);
2507
2508         if (mpdu_ranges->mpdu_range_status !=
2509             HTT_RX_IND_MPDU_STATUS_OK &&
2510             mpdu_ranges->mpdu_range_status !=
2511             HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR) {
2512                 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt mpdu_range_status %d\n",
2513                            mpdu_ranges->mpdu_range_status);
2514                 goto err;
2515         }
2516
2517         rx_desc = (struct htt_hl_rx_desc *)&rx->mpdu_ranges[num_mpdu_ranges];
2518         rx_desc_info = __le32_to_cpu(rx_desc->info);
2519
2520         if (MS(rx_desc_info, HTT_RX_DESC_HL_INFO_MCAST_BCAST))
2521                 sec_index = HTT_TXRX_SEC_MCAST;
2522         else
2523                 sec_index = HTT_TXRX_SEC_UCAST;
2524
2525         sec_type = peer->rx_pn[sec_index].sec_type;
2526         first_msdu = rx->fw_desc.flags & FW_RX_DESC_FLAGS_FIRST_MSDU;
2527
2528         ath10k_htt_rx_mpdu_desc_pn_hl(rx_desc, &new_pn, peer->rx_pn[sec_index].pn_len);
2529
2530         if (check_pn_type == HTT_RX_PN_CHECK && tid >= IEEE80211_NUM_TIDS) {
2531                 spin_lock_bh(&ar->data_lock);
2532                 pn_invalid = ath10k_htt_rx_pn_check_replay_hl(ar, peer, rx);
2533                 spin_unlock_bh(&ar->data_lock);
2534
2535                 if (pn_invalid)
2536                         goto err;
2537         }
2538
2539         /* Strip off all headers before the MAC header before delivery to
2540          * mac80211
2541          */
2542         tot_hdr_len = sizeof(struct htt_resp_hdr) + sizeof(rx->hdr) +
2543                       sizeof(rx->ppdu) + sizeof(rx->prefix) +
2544                       sizeof(rx->fw_desc) +
2545                       sizeof(*mpdu_ranges) * num_mpdu_ranges + rx_desc_len;
2546
2547         skb_pull(skb, tot_hdr_len);
2548
2549         hdr = (struct ieee80211_hdr *)skb->data;
2550         qos = ieee80211_is_data_qos(hdr->frame_control);
2551
2552         rx_status = IEEE80211_SKB_RXCB(skb);
2553         memset(rx_status, 0, sizeof(*rx_status));
2554
2555         if (rx->ppdu.combined_rssi == 0) {
2556                 /* SDIO firmware does not provide signal */
2557                 rx_status->signal = 0;
2558                 rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
2559         } else {
2560                 rx_status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
2561                         rx->ppdu.combined_rssi;
2562                 rx_status->flag &= ~RX_FLAG_NO_SIGNAL_VAL;
2563         }
2564
2565         spin_lock_bh(&ar->data_lock);
2566         ch = ar->scan_channel;
2567         if (!ch)
2568                 ch = ar->rx_channel;
2569         if (!ch)
2570                 ch = ath10k_htt_rx_h_any_channel(ar);
2571         if (!ch)
2572                 ch = ar->tgt_oper_chan;
2573         spin_unlock_bh(&ar->data_lock);
2574
2575         if (ch) {
2576                 rx_status->band = ch->band;
2577                 rx_status->freq = ch->center_freq;
2578         }
2579         if (rx->fw_desc.flags & FW_RX_DESC_FLAGS_LAST_MSDU)
2580                 rx_status->flag &= ~RX_FLAG_AMSDU_MORE;
2581         else
2582                 rx_status->flag |= RX_FLAG_AMSDU_MORE;
2583
2584         /* Not entirely sure about this, but all frames from the chipset has
2585          * the protected flag set even though they have already been decrypted.
2586          * Unmasking this flag is necessary in order for mac80211 not to drop
2587          * the frame.
2588          * TODO: Verify this is always the case or find out a way to check
2589          * if there has been hw decryption.
2590          */
2591         if (ieee80211_has_protected(hdr->frame_control)) {
2592                 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
2593                 rx_status->flag |= RX_FLAG_DECRYPTED |
2594                                    RX_FLAG_IV_STRIPPED |
2595                                    RX_FLAG_MMIC_STRIPPED;
2596
2597                 if (tid < IEEE80211_NUM_TIDS &&
2598                     first_msdu &&
2599                     check_pn_type == HTT_RX_PN_CHECK &&
2600                    (sec_type == HTT_SECURITY_AES_CCMP ||
2601                     sec_type == HTT_SECURITY_TKIP ||
2602                     sec_type == HTT_SECURITY_TKIP_NOMIC)) {
2603                         u8 offset, *ivp, i;
2604                         s8 keyidx = 0;
2605                         __le64 pn48 = cpu_to_le64(new_pn.pn48);
2606
2607                         hdr = (struct ieee80211_hdr *)skb->data;
2608                         offset = ieee80211_hdrlen(hdr->frame_control);
2609                         hdr->frame_control |= __cpu_to_le16(IEEE80211_FCTL_PROTECTED);
2610                         rx_status->flag &= ~RX_FLAG_IV_STRIPPED;
2611
2612                         memmove(skb->data - IEEE80211_CCMP_HDR_LEN,
2613                                 skb->data, offset);
2614                         skb_push(skb, IEEE80211_CCMP_HDR_LEN);
2615                         ivp = skb->data + offset;
2616                         memset(skb->data + offset, 0, IEEE80211_CCMP_HDR_LEN);
2617                         /* Ext IV */
2618                         ivp[IEEE80211_WEP_IV_LEN - 1] |= ATH10K_IEEE80211_EXTIV;
2619
2620                         for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
2621                                 if (peer->keys[i] &&
2622                                     peer->keys[i]->flags & IEEE80211_KEY_FLAG_PAIRWISE)
2623                                         keyidx = peer->keys[i]->keyidx;
2624                         }
2625
2626                         /* Key ID */
2627                         ivp[IEEE80211_WEP_IV_LEN - 1] |= keyidx << 6;
2628
2629                         if (sec_type == HTT_SECURITY_AES_CCMP) {
2630                                 rx_status->flag |= RX_FLAG_MIC_STRIPPED;
2631                                 /* pn 0, pn 1 */
2632                                 memcpy(skb->data + offset, &pn48, 2);
2633                                 /* pn 1, pn 3 , pn 34 , pn 5 */
2634                                 memcpy(skb->data + offset + 4, ((u8 *)&pn48) + 2, 4);
2635                         } else {
2636                                 rx_status->flag |= RX_FLAG_ICV_STRIPPED;
2637                                 /* TSC 0 */
2638                                 memcpy(skb->data + offset + 2, &pn48, 1);
2639                                 /* TSC 1 */
2640                                 memcpy(skb->data + offset, ((u8 *)&pn48) + 1, 1);
2641                                 /* TSC 2 , TSC 3 , TSC 4 , TSC 5*/
2642                                 memcpy(skb->data + offset + 4, ((u8 *)&pn48) + 2, 4);
2643                         }
2644                 }
2645         }
2646
2647         if (tkip_mic_type == HTT_RX_TKIP_MIC)
2648                 rx_status->flag &= ~RX_FLAG_IV_STRIPPED &
2649                                    ~RX_FLAG_MMIC_STRIPPED;
2650
2651         if (mpdu_ranges->mpdu_range_status == HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR)
2652                 rx_status->flag |= RX_FLAG_MMIC_ERROR;
2653
2654         if (!qos && tid < IEEE80211_NUM_TIDS) {
2655                 u8 offset;
2656                 __le16 qos_ctrl = 0;
2657
2658                 hdr = (struct ieee80211_hdr *)skb->data;
2659                 offset = ieee80211_hdrlen(hdr->frame_control);
2660
2661                 hdr->frame_control |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
2662                 memmove(skb->data - IEEE80211_QOS_CTL_LEN, skb->data, offset);
2663                 skb_push(skb, IEEE80211_QOS_CTL_LEN);
2664                 qos_ctrl = cpu_to_le16(tid);
2665                 memcpy(skb->data + offset, &qos_ctrl, IEEE80211_QOS_CTL_LEN);
2666         }
2667
2668         if (ar->napi.dev)
2669                 ieee80211_rx_napi(ar->hw, NULL, skb, &ar->napi);
2670         else
2671                 ieee80211_rx_ni(ar->hw, skb);
2672
2673         /* We have delivered the skb to the upper layers (mac80211) so we
2674          * must not free it.
2675          */
2676         return false;
2677 err:
2678         /* Tell the caller that it must free the skb since we have not
2679          * consumed it
2680          */
2681         return true;
2682 }
2683
2684 static int ath10k_htt_rx_frag_tkip_decap_nomic(struct sk_buff *skb,
2685                                                u16 head_len,
2686                                                u16 hdr_len)
2687 {
2688         u8 *ivp, *orig_hdr;
2689
2690         orig_hdr = skb->data;
2691         ivp = orig_hdr + hdr_len + head_len;
2692
2693         /* the ExtIV bit is always set to 1 for TKIP */
2694         if (!(ivp[IEEE80211_WEP_IV_LEN - 1] & ATH10K_IEEE80211_EXTIV))
2695                 return -EINVAL;
2696
2697         memmove(orig_hdr + IEEE80211_TKIP_IV_LEN, orig_hdr, head_len + hdr_len);
2698         skb_pull(skb, IEEE80211_TKIP_IV_LEN);
2699         skb_trim(skb, skb->len - ATH10K_IEEE80211_TKIP_MICLEN);
2700         return 0;
2701 }
2702
2703 static int ath10k_htt_rx_frag_tkip_decap_withmic(struct sk_buff *skb,
2704                                                  u16 head_len,
2705                                                  u16 hdr_len)
2706 {
2707         u8 *ivp, *orig_hdr;
2708
2709         orig_hdr = skb->data;
2710         ivp = orig_hdr + hdr_len + head_len;
2711
2712         /* the ExtIV bit is always set to 1 for TKIP */
2713         if (!(ivp[IEEE80211_WEP_IV_LEN - 1] & ATH10K_IEEE80211_EXTIV))
2714                 return -EINVAL;
2715
2716         memmove(orig_hdr + IEEE80211_TKIP_IV_LEN, orig_hdr, head_len + hdr_len);
2717         skb_pull(skb, IEEE80211_TKIP_IV_LEN);
2718         skb_trim(skb, skb->len - IEEE80211_TKIP_ICV_LEN);
2719         return 0;
2720 }
2721
2722 static int ath10k_htt_rx_frag_ccmp_decap(struct sk_buff *skb,
2723                                          u16 head_len,
2724                                          u16 hdr_len)
2725 {
2726         u8 *ivp, *orig_hdr;
2727
2728         orig_hdr = skb->data;
2729         ivp = orig_hdr + hdr_len + head_len;
2730
2731         /* the ExtIV bit is always set to 1 for CCMP */
2732         if (!(ivp[IEEE80211_WEP_IV_LEN - 1] & ATH10K_IEEE80211_EXTIV))
2733                 return -EINVAL;
2734
2735         skb_trim(skb, skb->len - IEEE80211_CCMP_MIC_LEN);
2736         memmove(orig_hdr + IEEE80211_CCMP_HDR_LEN, orig_hdr, head_len + hdr_len);
2737         skb_pull(skb, IEEE80211_CCMP_HDR_LEN);
2738         return 0;
2739 }
2740
2741 static int ath10k_htt_rx_frag_wep_decap(struct sk_buff *skb,
2742                                         u16 head_len,
2743                                         u16 hdr_len)
2744 {
2745         u8 *orig_hdr;
2746
2747         orig_hdr = skb->data;
2748
2749         memmove(orig_hdr + IEEE80211_WEP_IV_LEN,
2750                 orig_hdr, head_len + hdr_len);
2751         skb_pull(skb, IEEE80211_WEP_IV_LEN);
2752         skb_trim(skb, skb->len - IEEE80211_WEP_ICV_LEN);
2753         return 0;
2754 }
2755
2756 static bool ath10k_htt_rx_proc_rx_frag_ind_hl(struct ath10k_htt *htt,
2757                                               struct htt_rx_fragment_indication *rx,
2758                                               struct sk_buff *skb)
2759 {
2760         struct ath10k *ar = htt->ar;
2761         enum htt_rx_tkip_demic_type tkip_mic = HTT_RX_NON_TKIP_MIC;
2762         enum htt_txrx_sec_cast_type sec_index;
2763         struct htt_rx_indication_hl *rx_hl;
2764         enum htt_security_types sec_type;
2765         u32 tid, frag, seq, rx_desc_info;
2766         union htt_rx_pn_t new_pn = {0};
2767         struct htt_hl_rx_desc *rx_desc;
2768         u16 peer_id, sc, hdr_space;
2769         union htt_rx_pn_t *last_pn;
2770         struct ieee80211_hdr *hdr;
2771         int ret, num_mpdu_ranges;
2772         struct ath10k_peer *peer;
2773         struct htt_resp *resp;
2774         size_t tot_hdr_len;
2775
2776         resp = (struct htt_resp *)(skb->data + HTT_RX_FRAG_IND_INFO0_HEADER_LEN);
2777         skb_pull(skb, HTT_RX_FRAG_IND_INFO0_HEADER_LEN);
2778         skb_trim(skb, skb->len - FCS_LEN);
2779
2780         peer_id = __le16_to_cpu(rx->peer_id);
2781         rx_hl = (struct htt_rx_indication_hl *)(&resp->rx_ind_hl);
2782
2783         spin_lock_bh(&ar->data_lock);
2784         peer = ath10k_peer_find_by_id(ar, peer_id);
2785         if (!peer) {
2786                 ath10k_dbg(ar, ATH10K_DBG_HTT, "invalid peer: %u\n", peer_id);
2787                 goto err;
2788         }
2789
2790         num_mpdu_ranges = MS(__le32_to_cpu(rx_hl->hdr.info1),
2791                              HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
2792
2793         tot_hdr_len = sizeof(struct htt_resp_hdr) +
2794                       sizeof(rx_hl->hdr) +
2795                       sizeof(rx_hl->ppdu) +
2796                       sizeof(rx_hl->prefix) +
2797                       sizeof(rx_hl->fw_desc) +
2798                       sizeof(struct htt_rx_indication_mpdu_range) * num_mpdu_ranges;
2799
2800         tid =  MS(rx_hl->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID);
2801         rx_desc = (struct htt_hl_rx_desc *)(skb->data + tot_hdr_len);
2802         rx_desc_info = __le32_to_cpu(rx_desc->info);
2803
2804         hdr = (struct ieee80211_hdr *)((u8 *)rx_desc + rx_hl->fw_desc.len);
2805
2806         if (is_multicast_ether_addr(hdr->addr1)) {
2807                 /* Discard the fragment with multicast DA */
2808                 goto err;
2809         }
2810
2811         if (!MS(rx_desc_info, HTT_RX_DESC_HL_INFO_ENCRYPTED)) {
2812                 spin_unlock_bh(&ar->data_lock);
2813                 return ath10k_htt_rx_proc_rx_ind_hl(htt, &resp->rx_ind_hl, skb,
2814                                                     HTT_RX_NON_PN_CHECK,
2815                                                     HTT_RX_NON_TKIP_MIC);
2816         }
2817
2818         if (ieee80211_has_retry(hdr->frame_control))
2819                 goto err;
2820
2821         hdr_space = ieee80211_hdrlen(hdr->frame_control);
2822         sc = __le16_to_cpu(hdr->seq_ctrl);
2823         seq = IEEE80211_SEQ_TO_SN(sc);
2824         frag = sc & IEEE80211_SCTL_FRAG;
2825
2826         sec_index = MS(rx_desc_info, HTT_RX_DESC_HL_INFO_MCAST_BCAST) ?
2827                     HTT_TXRX_SEC_MCAST : HTT_TXRX_SEC_UCAST;
2828         sec_type = peer->rx_pn[sec_index].sec_type;
2829         ath10k_htt_rx_mpdu_desc_pn_hl(rx_desc, &new_pn, peer->rx_pn[sec_index].pn_len);
2830
2831         switch (sec_type) {
2832         case HTT_SECURITY_TKIP:
2833                 tkip_mic = HTT_RX_TKIP_MIC;
2834                 ret = ath10k_htt_rx_frag_tkip_decap_withmic(skb,
2835                                                             tot_hdr_len +
2836                                                             rx_hl->fw_desc.len,
2837                                                             hdr_space);
2838                 if (ret)
2839                         goto err;
2840                 break;
2841         case HTT_SECURITY_TKIP_NOMIC:
2842                 ret = ath10k_htt_rx_frag_tkip_decap_nomic(skb,
2843                                                           tot_hdr_len +
2844                                                           rx_hl->fw_desc.len,
2845                                                           hdr_space);
2846                 if (ret)
2847                         goto err;
2848                 break;
2849         case HTT_SECURITY_AES_CCMP:
2850                 ret = ath10k_htt_rx_frag_ccmp_decap(skb,
2851                                                     tot_hdr_len + rx_hl->fw_desc.len,
2852                                                     hdr_space);
2853                 if (ret)
2854                         goto err;
2855                 break;
2856         case HTT_SECURITY_WEP128:
2857         case HTT_SECURITY_WEP104:
2858         case HTT_SECURITY_WEP40:
2859                 ret = ath10k_htt_rx_frag_wep_decap(skb,
2860                                                    tot_hdr_len + rx_hl->fw_desc.len,
2861                                                    hdr_space);
2862                 if (ret)
2863                         goto err;
2864                 break;
2865         default:
2866                 break;
2867         }
2868
2869         resp = (struct htt_resp *)(skb->data);
2870
2871         if (sec_type != HTT_SECURITY_AES_CCMP &&
2872             sec_type != HTT_SECURITY_TKIP &&
2873             sec_type != HTT_SECURITY_TKIP_NOMIC) {
2874                 spin_unlock_bh(&ar->data_lock);
2875                 return ath10k_htt_rx_proc_rx_ind_hl(htt, &resp->rx_ind_hl, skb,
2876                                                     HTT_RX_NON_PN_CHECK,
2877                                                     HTT_RX_NON_TKIP_MIC);
2878         }
2879
2880         last_pn = &peer->frag_tids_last_pn[tid];
2881
2882         if (frag == 0) {
2883                 if (ath10k_htt_rx_pn_check_replay_hl(ar, peer, &resp->rx_ind_hl))
2884                         goto err;
2885
2886                 last_pn->pn48 = new_pn.pn48;
2887                 peer->frag_tids_seq[tid] = seq;
2888         } else if (sec_type == HTT_SECURITY_AES_CCMP) {
2889                 if (seq != peer->frag_tids_seq[tid])
2890                         goto err;
2891
2892                 if (new_pn.pn48 != last_pn->pn48 + 1)
2893                         goto err;
2894
2895                 last_pn->pn48 = new_pn.pn48;
2896                 last_pn = &peer->tids_last_pn[tid];
2897                 last_pn->pn48 = new_pn.pn48;
2898         }
2899
2900         spin_unlock_bh(&ar->data_lock);
2901
2902         return ath10k_htt_rx_proc_rx_ind_hl(htt, &resp->rx_ind_hl, skb,
2903                                             HTT_RX_NON_PN_CHECK, tkip_mic);
2904
2905 err:
2906         spin_unlock_bh(&ar->data_lock);
2907
2908         /* Tell the caller that it must free the skb since we have not
2909          * consumed it
2910          */
2911         return true;
2912 }
2913
2914 static void ath10k_htt_rx_proc_rx_ind_ll(struct ath10k_htt *htt,
2915                                          struct htt_rx_indication *rx)
2916 {
2917         struct ath10k *ar = htt->ar;
2918         struct htt_rx_indication_mpdu_range *mpdu_ranges;
2919         int num_mpdu_ranges;
2920         int i, mpdu_count = 0;
2921         u16 peer_id;
2922         u8 tid;
2923
2924         num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
2925                              HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
2926         peer_id = __le16_to_cpu(rx->hdr.peer_id);
2927         tid =  MS(rx->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID);
2928
2929         mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);
2930
2931         ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
2932                         rx, struct_size(rx, mpdu_ranges, num_mpdu_ranges));
2933
2934         for (i = 0; i < num_mpdu_ranges; i++)
2935                 mpdu_count += mpdu_ranges[i].mpdu_count;
2936
2937         atomic_add(mpdu_count, &htt->num_mpdus_ready);
2938
2939         ath10k_sta_update_rx_tid_stats_ampdu(ar, peer_id, tid, mpdu_ranges,
2940                                              num_mpdu_ranges);
2941 }
2942
2943 static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar,
2944                                        struct sk_buff *skb)
2945 {
2946         struct ath10k_htt *htt = &ar->htt;
2947         struct htt_resp *resp = (struct htt_resp *)skb->data;
2948         struct htt_tx_done tx_done = {};
2949         int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS);
2950         __le16 msdu_id, *msdus;
2951         bool rssi_enabled = false;
2952         u8 msdu_count = 0, num_airtime_records, tid;
2953         int i, htt_pad = 0;
2954         struct htt_data_tx_compl_ppdu_dur *ppdu_info;
2955         struct ath10k_peer *peer;
2956         u16 ppdu_info_offset = 0, peer_id;
2957         u32 tx_duration;
2958
2959         switch (status) {
2960         case HTT_DATA_TX_STATUS_NO_ACK:
2961                 tx_done.status = HTT_TX_COMPL_STATE_NOACK;
2962                 break;
2963         case HTT_DATA_TX_STATUS_OK:
2964                 tx_done.status = HTT_TX_COMPL_STATE_ACK;
2965                 break;
2966         case HTT_DATA_TX_STATUS_DISCARD:
2967         case HTT_DATA_TX_STATUS_POSTPONE:
2968                 tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
2969                 break;
2970         default:
2971                 ath10k_warn(ar, "unhandled tx completion status %d\n", status);
2972                 tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
2973                 break;
2974         }
2975
2976         ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
2977                    resp->data_tx_completion.num_msdus);
2978
2979         msdu_count = resp->data_tx_completion.num_msdus;
2980         msdus = resp->data_tx_completion.msdus;
2981         rssi_enabled = ath10k_is_rssi_enable(&ar->hw_params, resp);
2982
2983         if (rssi_enabled)
2984                 htt_pad = ath10k_tx_data_rssi_get_pad_bytes(&ar->hw_params,
2985                                                             resp);
2986
2987         for (i = 0; i < msdu_count; i++) {
2988                 msdu_id = msdus[i];
2989                 tx_done.msdu_id = __le16_to_cpu(msdu_id);
2990
2991                 if (rssi_enabled) {
2992                         /* Total no of MSDUs should be even,
2993                          * if odd MSDUs are sent firmware fills
2994                          * last msdu id with 0xffff
2995                          */
2996                         if (msdu_count & 0x01) {
2997                                 msdu_id = msdus[msdu_count +  i + 1 + htt_pad];
2998                                 tx_done.ack_rssi = __le16_to_cpu(msdu_id);
2999                         } else {
3000                                 msdu_id = msdus[msdu_count +  i + htt_pad];
3001                                 tx_done.ack_rssi = __le16_to_cpu(msdu_id);
3002                         }
3003                 }
3004
3005                 /* kfifo_put: In practice firmware shouldn't fire off per-CE
3006                  * interrupt and main interrupt (MSI/-X range case) for the same
3007                  * HTC service so it should be safe to use kfifo_put w/o lock.
3008                  *
3009                  * From kfifo_put() documentation:
3010                  *  Note that with only one concurrent reader and one concurrent
3011                  *  writer, you don't need extra locking to use these macro.
3012                  */
3013                 if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) {
3014                         ath10k_txrx_tx_unref(htt, &tx_done);
3015                 } else if (!kfifo_put(&htt->txdone_fifo, tx_done)) {
3016                         ath10k_warn(ar, "txdone fifo overrun, msdu_id %d status %d\n",
3017                                     tx_done.msdu_id, tx_done.status);
3018                         ath10k_txrx_tx_unref(htt, &tx_done);
3019                 }
3020         }
3021
3022         if (!(resp->data_tx_completion.flags2 & HTT_TX_CMPL_FLAG_PPDU_DURATION_PRESENT))
3023                 return;
3024
3025         ppdu_info_offset = (msdu_count & 0x01) ? msdu_count + 1 : msdu_count;
3026
3027         if (rssi_enabled)
3028                 ppdu_info_offset += ppdu_info_offset;
3029
3030         if (resp->data_tx_completion.flags2 &
3031             (HTT_TX_CMPL_FLAG_PPID_PRESENT | HTT_TX_CMPL_FLAG_PA_PRESENT))
3032                 ppdu_info_offset += 2;
3033
3034         ppdu_info = (struct htt_data_tx_compl_ppdu_dur *)&msdus[ppdu_info_offset];
3035         num_airtime_records = FIELD_GET(HTT_TX_COMPL_PPDU_DUR_INFO0_NUM_ENTRIES_MASK,
3036                                         __le32_to_cpu(ppdu_info->info0));
3037
3038         for (i = 0; i < num_airtime_records; i++) {
3039                 struct htt_data_tx_ppdu_dur *ppdu_dur;
3040                 u32 info0;
3041
3042                 ppdu_dur = &ppdu_info->ppdu_dur[i];
3043                 info0 = __le32_to_cpu(ppdu_dur->info0);
3044
3045                 peer_id = FIELD_GET(HTT_TX_PPDU_DUR_INFO0_PEER_ID_MASK,
3046                                     info0);
3047                 rcu_read_lock();
3048                 spin_lock_bh(&ar->data_lock);
3049
3050                 peer = ath10k_peer_find_by_id(ar, peer_id);
3051                 if (!peer || !peer->sta) {
3052                         spin_unlock_bh(&ar->data_lock);
3053                         rcu_read_unlock();
3054                         continue;
3055                 }
3056
3057                 tid = FIELD_GET(HTT_TX_PPDU_DUR_INFO0_TID_MASK, info0) &
3058                                                 IEEE80211_QOS_CTL_TID_MASK;
3059                 tx_duration = __le32_to_cpu(ppdu_dur->tx_duration);
3060
3061                 ieee80211_sta_register_airtime(peer->sta, tid, tx_duration, 0);
3062
3063                 spin_unlock_bh(&ar->data_lock);
3064                 rcu_read_unlock();
3065         }
3066 }
3067
3068 static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp)
3069 {
3070         struct htt_rx_addba *ev = &resp->rx_addba;
3071         struct ath10k_peer *peer;
3072         struct ath10k_vif *arvif;
3073         u16 info0, tid, peer_id;
3074
3075         info0 = __le16_to_cpu(ev->info0);
3076         tid = MS(info0, HTT_RX_BA_INFO0_TID);
3077         peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
3078
3079         ath10k_dbg(ar, ATH10K_DBG_HTT,
3080                    "htt rx addba tid %u peer_id %u size %u\n",
3081                    tid, peer_id, ev->window_size);
3082
3083         spin_lock_bh(&ar->data_lock);
3084         peer = ath10k_peer_find_by_id(ar, peer_id);
3085         if (!peer) {
3086                 ath10k_warn(ar, "received addba event for invalid peer_id: %u\n",
3087                             peer_id);
3088                 spin_unlock_bh(&ar->data_lock);
3089                 return;
3090         }
3091
3092         arvif = ath10k_get_arvif(ar, peer->vdev_id);
3093         if (!arvif) {
3094                 ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
3095                             peer->vdev_id);
3096                 spin_unlock_bh(&ar->data_lock);
3097                 return;
3098         }
3099
3100         ath10k_dbg(ar, ATH10K_DBG_HTT,
3101                    "htt rx start rx ba session sta %pM tid %u size %u\n",
3102                    peer->addr, tid, ev->window_size);
3103
3104         ieee80211_start_rx_ba_session_offl(arvif->vif, peer->addr, tid);
3105         spin_unlock_bh(&ar->data_lock);
3106 }
3107
3108 static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp)
3109 {
3110         struct htt_rx_delba *ev = &resp->rx_delba;
3111         struct ath10k_peer *peer;
3112         struct ath10k_vif *arvif;
3113         u16 info0, tid, peer_id;
3114
3115         info0 = __le16_to_cpu(ev->info0);
3116         tid = MS(info0, HTT_RX_BA_INFO0_TID);
3117         peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
3118
3119         ath10k_dbg(ar, ATH10K_DBG_HTT,
3120                    "htt rx delba tid %u peer_id %u\n",
3121                    tid, peer_id);
3122
3123         spin_lock_bh(&ar->data_lock);
3124         peer = ath10k_peer_find_by_id(ar, peer_id);
3125         if (!peer) {
3126                 ath10k_warn(ar, "received addba event for invalid peer_id: %u\n",
3127                             peer_id);
3128                 spin_unlock_bh(&ar->data_lock);
3129                 return;
3130         }
3131
3132         arvif = ath10k_get_arvif(ar, peer->vdev_id);
3133         if (!arvif) {
3134                 ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
3135                             peer->vdev_id);
3136                 spin_unlock_bh(&ar->data_lock);
3137                 return;
3138         }
3139
3140         ath10k_dbg(ar, ATH10K_DBG_HTT,
3141                    "htt rx stop rx ba session sta %pM tid %u\n",
3142                    peer->addr, tid);
3143
3144         ieee80211_stop_rx_ba_session_offl(arvif->vif, peer->addr, tid);
3145         spin_unlock_bh(&ar->data_lock);
3146 }
3147
3148 static int ath10k_htt_rx_extract_amsdu(struct ath10k_hw_params *hw,
3149                                        struct sk_buff_head *list,
3150                                        struct sk_buff_head *amsdu)
3151 {
3152         struct sk_buff *msdu;
3153         struct htt_rx_desc *rxd;
3154         struct rx_msdu_end_common *rxd_msdu_end_common;
3155
3156         if (skb_queue_empty(list))
3157                 return -ENOBUFS;
3158
3159         if (WARN_ON(!skb_queue_empty(amsdu)))
3160                 return -EINVAL;
3161
3162         while ((msdu = __skb_dequeue(list))) {
3163                 __skb_queue_tail(amsdu, msdu);
3164
3165                 rxd = HTT_RX_BUF_TO_RX_DESC(hw,
3166                                             (void *)msdu->data -
3167                                             hw->rx_desc_ops->rx_desc_size);
3168
3169                 rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
3170                 if (rxd_msdu_end_common->info0 &
3171                     __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))
3172                         break;
3173         }
3174
3175         msdu = skb_peek_tail(amsdu);
3176         rxd = HTT_RX_BUF_TO_RX_DESC(hw,
3177                                     (void *)msdu->data - hw->rx_desc_ops->rx_desc_size);
3178
3179         rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
3180         if (!(rxd_msdu_end_common->info0 &
3181               __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))) {
3182                 skb_queue_splice_init(amsdu, list);
3183                 return -EAGAIN;
3184         }
3185
3186         return 0;
3187 }
3188
3189 static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status *status,
3190                                             struct sk_buff *skb)
3191 {
3192         struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
3193
3194         if (!ieee80211_has_protected(hdr->frame_control))
3195                 return;
3196
3197         /* Offloaded frames are already decrypted but firmware insists they are
3198          * protected in the 802.11 header. Strip the flag.  Otherwise mac80211
3199          * will drop the frame.
3200          */
3201
3202         hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
3203         status->flag |= RX_FLAG_DECRYPTED |
3204                         RX_FLAG_IV_STRIPPED |
3205                         RX_FLAG_MMIC_STRIPPED;
3206 }
3207
3208 static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
3209                                        struct sk_buff_head *list)
3210 {
3211         struct ath10k_htt *htt = &ar->htt;
3212         struct ieee80211_rx_status *status = &htt->rx_status;
3213         struct htt_rx_offload_msdu *rx;
3214         struct sk_buff *msdu;
3215         size_t offset;
3216
3217         while ((msdu = __skb_dequeue(list))) {
3218                 /* Offloaded frames don't have Rx descriptor. Instead they have
3219                  * a short meta information header.
3220                  */
3221
3222                 rx = (void *)msdu->data;
3223
3224                 skb_put(msdu, sizeof(*rx));
3225                 skb_pull(msdu, sizeof(*rx));
3226
3227                 if (skb_tailroom(msdu) < __le16_to_cpu(rx->msdu_len)) {
3228                         ath10k_warn(ar, "dropping frame: offloaded rx msdu is too long!\n");
3229                         dev_kfree_skb_any(msdu);
3230                         continue;
3231                 }
3232
3233                 skb_put(msdu, __le16_to_cpu(rx->msdu_len));
3234
3235                 /* Offloaded rx header length isn't multiple of 2 nor 4 so the
3236                  * actual payload is unaligned. Align the frame.  Otherwise
3237                  * mac80211 complains.  This shouldn't reduce performance much
3238                  * because these offloaded frames are rare.
3239                  */
3240                 offset = 4 - ((unsigned long)msdu->data & 3);
3241                 skb_put(msdu, offset);
3242                 memmove(msdu->data + offset, msdu->data, msdu->len);
3243                 skb_pull(msdu, offset);
3244
3245                 /* FIXME: The frame is NWifi. Re-construct QoS Control
3246                  * if possible later.
3247                  */
3248
3249                 memset(status, 0, sizeof(*status));
3250                 status->flag |= RX_FLAG_NO_SIGNAL_VAL;
3251
3252                 ath10k_htt_rx_h_rx_offload_prot(status, msdu);
3253                 ath10k_htt_rx_h_channel(ar, status, NULL, rx->vdev_id);
3254                 ath10k_htt_rx_h_queue_msdu(ar, status, msdu);
3255         }
3256 }
3257
3258 static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
3259 {
3260         struct ath10k_htt *htt = &ar->htt;
3261         struct htt_resp *resp = (void *)skb->data;
3262         struct ieee80211_rx_status *status = &htt->rx_status;
3263         struct sk_buff_head list;
3264         struct sk_buff_head amsdu;
3265         u16 peer_id;
3266         u16 msdu_count;
3267         u8 vdev_id;
3268         u8 tid;
3269         bool offload;
3270         bool frag;
3271         int ret;
3272
3273         lockdep_assert_held(&htt->rx_ring.lock);
3274
3275         if (htt->rx_confused)
3276                 return -EIO;
3277
3278         skb_pull(skb, sizeof(resp->hdr));
3279         skb_pull(skb, sizeof(resp->rx_in_ord_ind));
3280
3281         peer_id = __le16_to_cpu(resp->rx_in_ord_ind.peer_id);
3282         msdu_count = __le16_to_cpu(resp->rx_in_ord_ind.msdu_count);
3283         vdev_id = resp->rx_in_ord_ind.vdev_id;
3284         tid = SM(resp->rx_in_ord_ind.info, HTT_RX_IN_ORD_IND_INFO_TID);
3285         offload = !!(resp->rx_in_ord_ind.info &
3286                         HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
3287         frag = !!(resp->rx_in_ord_ind.info & HTT_RX_IN_ORD_IND_INFO_FRAG_MASK);
3288
3289         ath10k_dbg(ar, ATH10K_DBG_HTT,
3290                    "htt rx in ord vdev %i peer %i tid %i offload %i frag %i msdu count %i\n",
3291                    vdev_id, peer_id, tid, offload, frag, msdu_count);
3292
3293         if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs32)) {
3294                 ath10k_warn(ar, "dropping invalid in order rx indication\n");
3295                 return -EINVAL;
3296         }
3297
3298         /* The event can deliver more than 1 A-MSDU. Each A-MSDU is later
3299          * extracted and processed.
3300          */
3301         __skb_queue_head_init(&list);
3302         if (ar->hw_params.target_64bit)
3303                 ret = ath10k_htt_rx_pop_paddr64_list(htt, &resp->rx_in_ord_ind,
3304                                                      &list);
3305         else
3306                 ret = ath10k_htt_rx_pop_paddr32_list(htt, &resp->rx_in_ord_ind,
3307                                                      &list);
3308
3309         if (ret < 0) {
3310                 ath10k_warn(ar, "failed to pop paddr list: %d\n", ret);
3311                 htt->rx_confused = true;
3312                 return -EIO;
3313         }
3314
3315         /* Offloaded frames are very different and need to be handled
3316          * separately.
3317          */
3318         if (offload)
3319                 ath10k_htt_rx_h_rx_offload(ar, &list);
3320
3321         while (!skb_queue_empty(&list)) {
3322                 __skb_queue_head_init(&amsdu);
3323                 ret = ath10k_htt_rx_extract_amsdu(&ar->hw_params, &list, &amsdu);
3324                 switch (ret) {
3325                 case 0:
3326                         /* Note: The in-order indication may report interleaved
3327                          * frames from different PPDUs meaning reported rx rate
3328                          * to mac80211 isn't accurate/reliable. It's still
3329                          * better to report something than nothing though. This
3330                          * should still give an idea about rx rate to the user.
3331                          */
3332                         ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id);
3333                         ath10k_htt_rx_h_filter(ar, &amsdu, status, NULL);
3334                         ath10k_htt_rx_h_mpdu(ar, &amsdu, status, false, NULL,
3335                                              NULL, peer_id, frag);
3336                         ath10k_htt_rx_h_enqueue(ar, &amsdu, status);
3337                         break;
3338                 case -EAGAIN:
3339                         fallthrough;
3340                 default:
3341                         /* Should not happen. */
3342                         ath10k_warn(ar, "failed to extract amsdu: %d\n", ret);
3343                         htt->rx_confused = true;
3344                         __skb_queue_purge(&list);
3345                         return -EIO;
3346                 }
3347         }
3348         return ret;
3349 }
3350
3351 static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar,
3352                                                    const __le32 *resp_ids,
3353                                                    int num_resp_ids)
3354 {
3355         int i;
3356         u32 resp_id;
3357
3358         ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm num_resp_ids %d\n",
3359                    num_resp_ids);
3360
3361         for (i = 0; i < num_resp_ids; i++) {
3362                 resp_id = le32_to_cpu(resp_ids[i]);
3363
3364                 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm resp_id %u\n",
3365                            resp_id);
3366
3367                 /* TODO: free resp_id */
3368         }
3369 }
3370
3371 static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb)
3372 {
3373         struct ieee80211_hw *hw = ar->hw;
3374         struct ieee80211_txq *txq;
3375         struct htt_resp *resp = (struct htt_resp *)skb->data;
3376         struct htt_tx_fetch_record *record;
3377         size_t len;
3378         size_t max_num_bytes;
3379         size_t max_num_msdus;
3380         size_t num_bytes;
3381         size_t num_msdus;
3382         const __le32 *resp_ids;
3383         u16 num_records;
3384         u16 num_resp_ids;
3385         u16 peer_id;
3386         u8 tid;
3387         int ret;
3388         int i;
3389         bool may_tx;
3390
3391         ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind\n");
3392
3393         len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_ind);
3394         if (unlikely(skb->len < len)) {
3395                 ath10k_warn(ar, "received corrupted tx_fetch_ind event: buffer too short\n");
3396                 return;
3397         }
3398
3399         num_records = le16_to_cpu(resp->tx_fetch_ind.num_records);
3400         num_resp_ids = le16_to_cpu(resp->tx_fetch_ind.num_resp_ids);
3401
3402         len += sizeof(resp->tx_fetch_ind.records[0]) * num_records;
3403         len += sizeof(resp->tx_fetch_ind.resp_ids[0]) * num_resp_ids;
3404
3405         if (unlikely(skb->len < len)) {
3406                 ath10k_warn(ar, "received corrupted tx_fetch_ind event: too many records/resp_ids\n");
3407                 return;
3408         }
3409
3410         ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind num records %u num resps %u seq %u\n",
3411                    num_records, num_resp_ids,
3412                    le16_to_cpu(resp->tx_fetch_ind.fetch_seq_num));
3413
3414         if (!ar->htt.tx_q_state.enabled) {
3415                 ath10k_warn(ar, "received unexpected tx_fetch_ind event: not enabled\n");
3416                 return;
3417         }
3418
3419         if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH) {
3420                 ath10k_warn(ar, "received unexpected tx_fetch_ind event: in push mode\n");
3421                 return;
3422         }
3423
3424         rcu_read_lock();
3425
3426         for (i = 0; i < num_records; i++) {
3427                 record = &resp->tx_fetch_ind.records[i];
3428                 peer_id = MS(le16_to_cpu(record->info),
3429                              HTT_TX_FETCH_RECORD_INFO_PEER_ID);
3430                 tid = MS(le16_to_cpu(record->info),
3431                          HTT_TX_FETCH_RECORD_INFO_TID);
3432                 max_num_msdus = le16_to_cpu(record->num_msdus);
3433                 max_num_bytes = le32_to_cpu(record->num_bytes);
3434
3435                 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch record %i peer_id %u tid %u msdus %zu bytes %zu\n",
3436                            i, peer_id, tid, max_num_msdus, max_num_bytes);
3437
3438                 if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
3439                     unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
3440                         ath10k_warn(ar, "received out of range peer_id %u tid %u\n",
3441                                     peer_id, tid);
3442                         continue;
3443                 }
3444
3445                 spin_lock_bh(&ar->data_lock);
3446                 txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
3447                 spin_unlock_bh(&ar->data_lock);
3448
3449                 /* It is okay to release the lock and use txq because RCU read
3450                  * lock is held.
3451                  */
3452
3453                 if (unlikely(!txq)) {
3454                         ath10k_warn(ar, "failed to lookup txq for peer_id %u tid %u\n",
3455                                     peer_id, tid);
3456                         continue;
3457                 }
3458
3459                 num_msdus = 0;
3460                 num_bytes = 0;
3461
3462                 ieee80211_txq_schedule_start(hw, txq->ac);
3463                 may_tx = ieee80211_txq_may_transmit(hw, txq);
3464                 while (num_msdus < max_num_msdus &&
3465                        num_bytes < max_num_bytes) {
3466                         if (!may_tx)
3467                                 break;
3468
3469                         ret = ath10k_mac_tx_push_txq(hw, txq);
3470                         if (ret < 0)
3471                                 break;
3472
3473                         num_msdus++;
3474                         num_bytes += ret;
3475                 }
3476                 ieee80211_return_txq(hw, txq, false);
3477                 ieee80211_txq_schedule_end(hw, txq->ac);
3478
3479                 record->num_msdus = cpu_to_le16(num_msdus);
3480                 record->num_bytes = cpu_to_le32(num_bytes);
3481
3482                 ath10k_htt_tx_txq_recalc(hw, txq);
3483         }
3484
3485         rcu_read_unlock();
3486
3487         resp_ids = ath10k_htt_get_tx_fetch_ind_resp_ids(&resp->tx_fetch_ind);
3488         ath10k_htt_rx_tx_fetch_resp_id_confirm(ar, resp_ids, num_resp_ids);
3489
3490         ret = ath10k_htt_tx_fetch_resp(ar,
3491                                        resp->tx_fetch_ind.token,
3492                                        resp->tx_fetch_ind.fetch_seq_num,
3493                                        resp->tx_fetch_ind.records,
3494                                        num_records);
3495         if (unlikely(ret)) {
3496                 ath10k_warn(ar, "failed to submit tx fetch resp for token 0x%08x: %d\n",
3497                             le32_to_cpu(resp->tx_fetch_ind.token), ret);
3498                 /* FIXME: request fw restart */
3499         }
3500
3501         ath10k_htt_tx_txq_sync(ar);
3502 }
3503
3504 static void ath10k_htt_rx_tx_fetch_confirm(struct ath10k *ar,
3505                                            struct sk_buff *skb)
3506 {
3507         const struct htt_resp *resp = (void *)skb->data;
3508         size_t len;
3509         int num_resp_ids;
3510
3511         ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm\n");
3512
3513         len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_confirm);
3514         if (unlikely(skb->len < len)) {
3515                 ath10k_warn(ar, "received corrupted tx_fetch_confirm event: buffer too short\n");
3516                 return;
3517         }
3518
3519         num_resp_ids = le16_to_cpu(resp->tx_fetch_confirm.num_resp_ids);
3520         len += sizeof(resp->tx_fetch_confirm.resp_ids[0]) * num_resp_ids;
3521
3522         if (unlikely(skb->len < len)) {
3523                 ath10k_warn(ar, "received corrupted tx_fetch_confirm event: resp_ids buffer overflow\n");
3524                 return;
3525         }
3526
3527         ath10k_htt_rx_tx_fetch_resp_id_confirm(ar,
3528                                                resp->tx_fetch_confirm.resp_ids,
3529                                                num_resp_ids);
3530 }
3531
3532 static void ath10k_htt_rx_tx_mode_switch_ind(struct ath10k *ar,
3533                                              struct sk_buff *skb)
3534 {
3535         const struct htt_resp *resp = (void *)skb->data;
3536         const struct htt_tx_mode_switch_record *record;
3537         struct ieee80211_txq *txq;
3538         struct ath10k_txq *artxq;
3539         size_t len;
3540         size_t num_records;
3541         enum htt_tx_mode_switch_mode mode;
3542         bool enable;
3543         u16 info0;
3544         u16 info1;
3545         u16 threshold;
3546         u16 peer_id;
3547         u8 tid;
3548         int i;
3549
3550         ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx mode switch ind\n");
3551
3552         len = sizeof(resp->hdr) + sizeof(resp->tx_mode_switch_ind);
3553         if (unlikely(skb->len < len)) {
3554                 ath10k_warn(ar, "received corrupted tx_mode_switch_ind event: buffer too short\n");
3555                 return;
3556         }
3557
3558         info0 = le16_to_cpu(resp->tx_mode_switch_ind.info0);
3559         info1 = le16_to_cpu(resp->tx_mode_switch_ind.info1);
3560
3561         enable = !!(info0 & HTT_TX_MODE_SWITCH_IND_INFO0_ENABLE);
3562         num_records = MS(info0, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
3563         mode = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_MODE);
3564         threshold = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
3565
3566         ath10k_dbg(ar, ATH10K_DBG_HTT,
3567                    "htt rx tx mode switch ind info0 0x%04x info1 0x%04x enable %d num records %zd mode %d threshold %u\n",
3568                    info0, info1, enable, num_records, mode, threshold);
3569
3570         len += sizeof(resp->tx_mode_switch_ind.records[0]) * num_records;
3571
3572         if (unlikely(skb->len < len)) {
3573                 ath10k_warn(ar, "received corrupted tx_mode_switch_mode_ind event: too many records\n");
3574                 return;
3575         }
3576
3577         switch (mode) {
3578         case HTT_TX_MODE_SWITCH_PUSH:
3579         case HTT_TX_MODE_SWITCH_PUSH_PULL:
3580                 break;
3581         default:
3582                 ath10k_warn(ar, "received invalid tx_mode_switch_mode_ind mode %d, ignoring\n",
3583                             mode);
3584                 return;
3585         }
3586
3587         if (!enable)
3588                 return;
3589
3590         ar->htt.tx_q_state.enabled = enable;
3591         ar->htt.tx_q_state.mode = mode;
3592         ar->htt.tx_q_state.num_push_allowed = threshold;
3593
3594         rcu_read_lock();
3595
3596         for (i = 0; i < num_records; i++) {
3597                 record = &resp->tx_mode_switch_ind.records[i];
3598                 info0 = le16_to_cpu(record->info0);
3599                 peer_id = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID);
3600                 tid = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_TID);
3601
3602                 if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
3603                     unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
3604                         ath10k_warn(ar, "received out of range peer_id %u tid %u\n",
3605                                     peer_id, tid);
3606                         continue;
3607                 }
3608
3609                 spin_lock_bh(&ar->data_lock);
3610                 txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
3611                 spin_unlock_bh(&ar->data_lock);
3612
3613                 /* It is okay to release the lock and use txq because RCU read
3614                  * lock is held.
3615                  */
3616
3617                 if (unlikely(!txq)) {
3618                         ath10k_warn(ar, "failed to lookup txq for peer_id %u tid %u\n",
3619                                     peer_id, tid);
3620                         continue;
3621                 }
3622
3623                 spin_lock_bh(&ar->htt.tx_lock);
3624                 artxq = (void *)txq->drv_priv;
3625                 artxq->num_push_allowed = le16_to_cpu(record->num_max_msdus);
3626                 spin_unlock_bh(&ar->htt.tx_lock);
3627         }
3628
3629         rcu_read_unlock();
3630
3631         ath10k_mac_tx_push_pending(ar);
3632 }
3633
3634 void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
3635 {
3636         bool release;
3637
3638         release = ath10k_htt_t2h_msg_handler(ar, skb);
3639
3640         /* Free the indication buffer */
3641         if (release)
3642                 dev_kfree_skb_any(skb);
3643 }
3644
3645 static inline s8 ath10k_get_legacy_rate_idx(struct ath10k *ar, u8 rate)
3646 {
3647         static const u8 legacy_rates[] = {1, 2, 5, 11, 6, 9, 12,
3648                                           18, 24, 36, 48, 54};
3649         int i;
3650
3651         for (i = 0; i < ARRAY_SIZE(legacy_rates); i++) {
3652                 if (rate == legacy_rates[i])
3653                         return i;
3654         }
3655
3656         ath10k_warn(ar, "Invalid legacy rate %d peer stats", rate);
3657         return -EINVAL;
3658 }
3659
3660 static void
3661 ath10k_accumulate_per_peer_tx_stats(struct ath10k *ar,
3662                                     struct ath10k_sta *arsta,
3663                                     struct ath10k_per_peer_tx_stats *pstats,
3664                                     s8 legacy_rate_idx)
3665 {
3666         struct rate_info *txrate = &arsta->txrate;
3667         struct ath10k_htt_tx_stats *tx_stats;
3668         int idx, ht_idx, gi, mcs, bw, nss;
3669         unsigned long flags;
3670
3671         if (!arsta->tx_stats)
3672                 return;
3673
3674         tx_stats = arsta->tx_stats;
3675         flags = txrate->flags;
3676         gi = test_bit(ATH10K_RATE_INFO_FLAGS_SGI_BIT, &flags);
3677         mcs = ATH10K_HW_MCS_RATE(pstats->ratecode);
3678         bw = txrate->bw;
3679         nss = txrate->nss;
3680         ht_idx = mcs + (nss - 1) * 8;
3681         idx = mcs * 8 + 8 * 10 * (nss - 1);
3682         idx += bw * 2 + gi;
3683
3684 #define STATS_OP_FMT(name) tx_stats->stats[ATH10K_STATS_TYPE_##name]
3685
3686         if (txrate->flags & RATE_INFO_FLAGS_VHT_MCS) {
3687                 STATS_OP_FMT(SUCC).vht[0][mcs] += pstats->succ_bytes;
3688                 STATS_OP_FMT(SUCC).vht[1][mcs] += pstats->succ_pkts;
3689                 STATS_OP_FMT(FAIL).vht[0][mcs] += pstats->failed_bytes;
3690                 STATS_OP_FMT(FAIL).vht[1][mcs] += pstats->failed_pkts;
3691                 STATS_OP_FMT(RETRY).vht[0][mcs] += pstats->retry_bytes;
3692                 STATS_OP_FMT(RETRY).vht[1][mcs] += pstats->retry_pkts;
3693         } else if (txrate->flags & RATE_INFO_FLAGS_MCS) {
3694                 STATS_OP_FMT(SUCC).ht[0][ht_idx] += pstats->succ_bytes;
3695                 STATS_OP_FMT(SUCC).ht[1][ht_idx] += pstats->succ_pkts;
3696                 STATS_OP_FMT(FAIL).ht[0][ht_idx] += pstats->failed_bytes;
3697                 STATS_OP_FMT(FAIL).ht[1][ht_idx] += pstats->failed_pkts;
3698                 STATS_OP_FMT(RETRY).ht[0][ht_idx] += pstats->retry_bytes;
3699                 STATS_OP_FMT(RETRY).ht[1][ht_idx] += pstats->retry_pkts;
3700         } else {
3701                 mcs = legacy_rate_idx;
3702
3703                 STATS_OP_FMT(SUCC).legacy[0][mcs] += pstats->succ_bytes;
3704                 STATS_OP_FMT(SUCC).legacy[1][mcs] += pstats->succ_pkts;
3705                 STATS_OP_FMT(FAIL).legacy[0][mcs] += pstats->failed_bytes;
3706                 STATS_OP_FMT(FAIL).legacy[1][mcs] += pstats->failed_pkts;
3707                 STATS_OP_FMT(RETRY).legacy[0][mcs] += pstats->retry_bytes;
3708                 STATS_OP_FMT(RETRY).legacy[1][mcs] += pstats->retry_pkts;
3709         }
3710
3711         if (ATH10K_HW_AMPDU(pstats->flags)) {
3712                 tx_stats->ba_fails += ATH10K_HW_BA_FAIL(pstats->flags);
3713
3714                 if (txrate->flags & RATE_INFO_FLAGS_MCS) {
3715                         STATS_OP_FMT(AMPDU).ht[0][ht_idx] +=
3716                                 pstats->succ_bytes + pstats->retry_bytes;
3717                         STATS_OP_FMT(AMPDU).ht[1][ht_idx] +=
3718                                 pstats->succ_pkts + pstats->retry_pkts;
3719                 } else {
3720                         STATS_OP_FMT(AMPDU).vht[0][mcs] +=
3721                                 pstats->succ_bytes + pstats->retry_bytes;
3722                         STATS_OP_FMT(AMPDU).vht[1][mcs] +=
3723                                 pstats->succ_pkts + pstats->retry_pkts;
3724                 }
3725                 STATS_OP_FMT(AMPDU).bw[0][bw] +=
3726                         pstats->succ_bytes + pstats->retry_bytes;
3727                 STATS_OP_FMT(AMPDU).nss[0][nss - 1] +=
3728                         pstats->succ_bytes + pstats->retry_bytes;
3729                 STATS_OP_FMT(AMPDU).gi[0][gi] +=
3730                         pstats->succ_bytes + pstats->retry_bytes;
3731                 STATS_OP_FMT(AMPDU).rate_table[0][idx] +=
3732                         pstats->succ_bytes + pstats->retry_bytes;
3733                 STATS_OP_FMT(AMPDU).bw[1][bw] +=
3734                         pstats->succ_pkts + pstats->retry_pkts;
3735                 STATS_OP_FMT(AMPDU).nss[1][nss - 1] +=
3736                         pstats->succ_pkts + pstats->retry_pkts;
3737                 STATS_OP_FMT(AMPDU).gi[1][gi] +=
3738                         pstats->succ_pkts + pstats->retry_pkts;
3739                 STATS_OP_FMT(AMPDU).rate_table[1][idx] +=
3740                         pstats->succ_pkts + pstats->retry_pkts;
3741         } else {
3742                 tx_stats->ack_fails +=
3743                                 ATH10K_HW_BA_FAIL(pstats->flags);
3744         }
3745
3746         STATS_OP_FMT(SUCC).bw[0][bw] += pstats->succ_bytes;
3747         STATS_OP_FMT(SUCC).nss[0][nss - 1] += pstats->succ_bytes;
3748         STATS_OP_FMT(SUCC).gi[0][gi] += pstats->succ_bytes;
3749
3750         STATS_OP_FMT(SUCC).bw[1][bw] += pstats->succ_pkts;
3751         STATS_OP_FMT(SUCC).nss[1][nss - 1] += pstats->succ_pkts;
3752         STATS_OP_FMT(SUCC).gi[1][gi] += pstats->succ_pkts;
3753
3754         STATS_OP_FMT(FAIL).bw[0][bw] += pstats->failed_bytes;
3755         STATS_OP_FMT(FAIL).nss[0][nss - 1] += pstats->failed_bytes;
3756         STATS_OP_FMT(FAIL).gi[0][gi] += pstats->failed_bytes;
3757
3758         STATS_OP_FMT(FAIL).bw[1][bw] += pstats->failed_pkts;
3759         STATS_OP_FMT(FAIL).nss[1][nss - 1] += pstats->failed_pkts;
3760         STATS_OP_FMT(FAIL).gi[1][gi] += pstats->failed_pkts;
3761
3762         STATS_OP_FMT(RETRY).bw[0][bw] += pstats->retry_bytes;
3763         STATS_OP_FMT(RETRY).nss[0][nss - 1] += pstats->retry_bytes;
3764         STATS_OP_FMT(RETRY).gi[0][gi] += pstats->retry_bytes;
3765
3766         STATS_OP_FMT(RETRY).bw[1][bw] += pstats->retry_pkts;
3767         STATS_OP_FMT(RETRY).nss[1][nss - 1] += pstats->retry_pkts;
3768         STATS_OP_FMT(RETRY).gi[1][gi] += pstats->retry_pkts;
3769
3770         if (txrate->flags >= RATE_INFO_FLAGS_MCS) {
3771                 STATS_OP_FMT(SUCC).rate_table[0][idx] += pstats->succ_bytes;
3772                 STATS_OP_FMT(SUCC).rate_table[1][idx] += pstats->succ_pkts;
3773                 STATS_OP_FMT(FAIL).rate_table[0][idx] += pstats->failed_bytes;
3774                 STATS_OP_FMT(FAIL).rate_table[1][idx] += pstats->failed_pkts;
3775                 STATS_OP_FMT(RETRY).rate_table[0][idx] += pstats->retry_bytes;
3776                 STATS_OP_FMT(RETRY).rate_table[1][idx] += pstats->retry_pkts;
3777         }
3778
3779         tx_stats->tx_duration += pstats->duration;
3780 }
3781
3782 static void
3783 ath10k_update_per_peer_tx_stats(struct ath10k *ar,
3784                                 struct ieee80211_sta *sta,
3785                                 struct ath10k_per_peer_tx_stats *peer_stats)
3786 {
3787         struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
3788         struct ieee80211_chanctx_conf *conf = NULL;
3789         u8 rate = 0, sgi;
3790         s8 rate_idx = 0;
3791         bool skip_auto_rate;
3792         struct rate_info txrate;
3793
3794         lockdep_assert_held(&ar->data_lock);
3795
3796         txrate.flags = ATH10K_HW_PREAMBLE(peer_stats->ratecode);
3797         txrate.bw = ATH10K_HW_BW(peer_stats->flags);
3798         txrate.nss = ATH10K_HW_NSS(peer_stats->ratecode);
3799         txrate.mcs = ATH10K_HW_MCS_RATE(peer_stats->ratecode);
3800         sgi = ATH10K_HW_GI(peer_stats->flags);
3801         skip_auto_rate = ATH10K_FW_SKIPPED_RATE_CTRL(peer_stats->flags);
3802
3803         /* Firmware's rate control skips broadcast/management frames,
3804          * if host has configure fixed rates and in some other special cases.
3805          */
3806         if (skip_auto_rate)
3807                 return;
3808
3809         if (txrate.flags == WMI_RATE_PREAMBLE_VHT && txrate.mcs > 9) {
3810                 ath10k_warn(ar, "Invalid VHT mcs %d peer stats",  txrate.mcs);
3811                 return;
3812         }
3813
3814         if (txrate.flags == WMI_RATE_PREAMBLE_HT &&
3815             (txrate.mcs > 7 || txrate.nss < 1)) {
3816                 ath10k_warn(ar, "Invalid HT mcs %d nss %d peer stats",
3817                             txrate.mcs, txrate.nss);
3818                 return;
3819         }
3820
3821         memset(&arsta->txrate, 0, sizeof(arsta->txrate));
3822         memset(&arsta->tx_info.status, 0, sizeof(arsta->tx_info.status));
3823         if (txrate.flags == WMI_RATE_PREAMBLE_CCK ||
3824             txrate.flags == WMI_RATE_PREAMBLE_OFDM) {
3825                 rate = ATH10K_HW_LEGACY_RATE(peer_stats->ratecode);
3826                 /* This is hacky, FW sends CCK rate 5.5Mbps as 6 */
3827                 if (rate == 6 && txrate.flags == WMI_RATE_PREAMBLE_CCK)
3828                         rate = 5;
3829                 rate_idx = ath10k_get_legacy_rate_idx(ar, rate);
3830                 if (rate_idx < 0)
3831                         return;
3832                 arsta->txrate.legacy = rate;
3833         } else if (txrate.flags == WMI_RATE_PREAMBLE_HT) {
3834                 arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
3835                 arsta->txrate.mcs = txrate.mcs + 8 * (txrate.nss - 1);
3836         } else {
3837                 arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
3838                 arsta->txrate.mcs = txrate.mcs;
3839         }
3840
3841         switch (txrate.flags) {
3842         case WMI_RATE_PREAMBLE_OFDM:
3843                 if (arsta->arvif && arsta->arvif->vif)
3844                         conf = rcu_dereference(arsta->arvif->vif->bss_conf.chanctx_conf);
3845                 if (conf && conf->def.chan->band == NL80211_BAND_5GHZ)
3846                         arsta->tx_info.status.rates[0].idx = rate_idx - 4;
3847                 break;
3848         case WMI_RATE_PREAMBLE_CCK:
3849                 arsta->tx_info.status.rates[0].idx = rate_idx;
3850                 if (sgi)
3851                         arsta->tx_info.status.rates[0].flags |=
3852                                 (IEEE80211_TX_RC_USE_SHORT_PREAMBLE |
3853                                  IEEE80211_TX_RC_SHORT_GI);
3854                 break;
3855         case WMI_RATE_PREAMBLE_HT:
3856                 arsta->tx_info.status.rates[0].idx =
3857                                 txrate.mcs + ((txrate.nss - 1) * 8);
3858                 if (sgi)
3859                         arsta->tx_info.status.rates[0].flags |=
3860                                         IEEE80211_TX_RC_SHORT_GI;
3861                 arsta->tx_info.status.rates[0].flags |= IEEE80211_TX_RC_MCS;
3862                 break;
3863         case WMI_RATE_PREAMBLE_VHT:
3864                 ieee80211_rate_set_vht(&arsta->tx_info.status.rates[0],
3865                                        txrate.mcs, txrate.nss);
3866                 if (sgi)
3867                         arsta->tx_info.status.rates[0].flags |=
3868                                                 IEEE80211_TX_RC_SHORT_GI;
3869                 arsta->tx_info.status.rates[0].flags |= IEEE80211_TX_RC_VHT_MCS;
3870                 break;
3871         }
3872
3873         arsta->txrate.nss = txrate.nss;
3874         arsta->txrate.bw = ath10k_bw_to_mac80211_bw(txrate.bw);
3875         arsta->last_tx_bitrate = cfg80211_calculate_bitrate(&arsta->txrate);
3876         if (sgi)
3877                 arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
3878
3879         switch (arsta->txrate.bw) {
3880         case RATE_INFO_BW_40:
3881                 arsta->tx_info.status.rates[0].flags |=
3882                                 IEEE80211_TX_RC_40_MHZ_WIDTH;
3883                 break;
3884         case RATE_INFO_BW_80:
3885                 arsta->tx_info.status.rates[0].flags |=
3886                                 IEEE80211_TX_RC_80_MHZ_WIDTH;
3887                 break;
3888         case RATE_INFO_BW_160:
3889                 arsta->tx_info.status.rates[0].flags |=
3890                                 IEEE80211_TX_RC_160_MHZ_WIDTH;
3891                 break;
3892         }
3893
3894         if (peer_stats->succ_pkts) {
3895                 arsta->tx_info.flags = IEEE80211_TX_STAT_ACK;
3896                 arsta->tx_info.status.rates[0].count = 1;
3897                 ieee80211_tx_rate_update(ar->hw, sta, &arsta->tx_info);
3898         }
3899
3900         if (ar->htt.disable_tx_comp) {
3901                 arsta->tx_failed += peer_stats->failed_pkts;
3902                 ath10k_dbg(ar, ATH10K_DBG_HTT, "tx failed %d\n",
3903                            arsta->tx_failed);
3904         }
3905
3906         arsta->tx_retries += peer_stats->retry_pkts;
3907         ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx retries %d", arsta->tx_retries);
3908
3909         if (ath10k_debug_is_extd_tx_stats_enabled(ar))
3910                 ath10k_accumulate_per_peer_tx_stats(ar, arsta, peer_stats,
3911                                                     rate_idx);
3912 }
3913
3914 static void ath10k_htt_fetch_peer_stats(struct ath10k *ar,
3915                                         struct sk_buff *skb)
3916 {
3917         struct htt_resp *resp = (struct htt_resp *)skb->data;
3918         struct ath10k_per_peer_tx_stats *p_tx_stats = &ar->peer_tx_stats;
3919         struct htt_per_peer_tx_stats_ind *tx_stats;
3920         struct ieee80211_sta *sta;
3921         struct ath10k_peer *peer;
3922         int peer_id, i;
3923         u8 ppdu_len, num_ppdu;
3924
3925         num_ppdu = resp->peer_tx_stats.num_ppdu;
3926         ppdu_len = resp->peer_tx_stats.ppdu_len * sizeof(__le32);
3927
3928         if (skb->len < sizeof(struct htt_resp_hdr) + num_ppdu * ppdu_len) {
3929                 ath10k_warn(ar, "Invalid peer stats buf length %d\n", skb->len);
3930                 return;
3931         }
3932
3933         tx_stats = (struct htt_per_peer_tx_stats_ind *)
3934                         (resp->peer_tx_stats.payload);
3935         peer_id = __le16_to_cpu(tx_stats->peer_id);
3936
3937         rcu_read_lock();
3938         spin_lock_bh(&ar->data_lock);
3939         peer = ath10k_peer_find_by_id(ar, peer_id);
3940         if (!peer || !peer->sta) {
3941                 ath10k_warn(ar, "Invalid peer id %d peer stats buffer\n",
3942                             peer_id);
3943                 goto out;
3944         }
3945
3946         sta = peer->sta;
3947         for (i = 0; i < num_ppdu; i++) {
3948                 tx_stats = (struct htt_per_peer_tx_stats_ind *)
3949                            (resp->peer_tx_stats.payload + i * ppdu_len);
3950
3951                 p_tx_stats->succ_bytes = __le32_to_cpu(tx_stats->succ_bytes);
3952                 p_tx_stats->retry_bytes = __le32_to_cpu(tx_stats->retry_bytes);
3953                 p_tx_stats->failed_bytes =
3954                                 __le32_to_cpu(tx_stats->failed_bytes);
3955                 p_tx_stats->ratecode = tx_stats->ratecode;
3956                 p_tx_stats->flags = tx_stats->flags;
3957                 p_tx_stats->succ_pkts = __le16_to_cpu(tx_stats->succ_pkts);
3958                 p_tx_stats->retry_pkts = __le16_to_cpu(tx_stats->retry_pkts);
3959                 p_tx_stats->failed_pkts = __le16_to_cpu(tx_stats->failed_pkts);
3960                 p_tx_stats->duration = __le16_to_cpu(tx_stats->tx_duration);
3961
3962                 ath10k_update_per_peer_tx_stats(ar, sta, p_tx_stats);
3963         }
3964
3965 out:
3966         spin_unlock_bh(&ar->data_lock);
3967         rcu_read_unlock();
3968 }
3969
3970 static void ath10k_fetch_10_2_tx_stats(struct ath10k *ar, u8 *data)
3971 {
3972         struct ath10k_pktlog_hdr *hdr = (struct ath10k_pktlog_hdr *)data;
3973         struct ath10k_per_peer_tx_stats *p_tx_stats = &ar->peer_tx_stats;
3974         struct ath10k_10_2_peer_tx_stats *tx_stats;
3975         struct ieee80211_sta *sta;
3976         struct ath10k_peer *peer;
3977         u16 log_type = __le16_to_cpu(hdr->log_type);
3978         u32 peer_id = 0, i;
3979
3980         if (log_type != ATH_PKTLOG_TYPE_TX_STAT)
3981                 return;
3982
3983         tx_stats = (struct ath10k_10_2_peer_tx_stats *)((hdr->payload) +
3984                     ATH10K_10_2_TX_STATS_OFFSET);
3985
3986         if (!tx_stats->tx_ppdu_cnt)
3987                 return;
3988
3989         peer_id = tx_stats->peer_id;
3990
3991         rcu_read_lock();
3992         spin_lock_bh(&ar->data_lock);
3993         peer = ath10k_peer_find_by_id(ar, peer_id);
3994         if (!peer || !peer->sta) {
3995                 ath10k_warn(ar, "Invalid peer id %d in peer stats buffer\n",
3996                             peer_id);
3997                 goto out;
3998         }
3999
4000         sta = peer->sta;
4001         for (i = 0; i < tx_stats->tx_ppdu_cnt; i++) {
4002                 p_tx_stats->succ_bytes =
4003                         __le16_to_cpu(tx_stats->success_bytes[i]);
4004                 p_tx_stats->retry_bytes =
4005                         __le16_to_cpu(tx_stats->retry_bytes[i]);
4006                 p_tx_stats->failed_bytes =
4007                         __le16_to_cpu(tx_stats->failed_bytes[i]);
4008                 p_tx_stats->ratecode = tx_stats->ratecode[i];
4009                 p_tx_stats->flags = tx_stats->flags[i];
4010                 p_tx_stats->succ_pkts = tx_stats->success_pkts[i];
4011                 p_tx_stats->retry_pkts = tx_stats->retry_pkts[i];
4012                 p_tx_stats->failed_pkts = tx_stats->failed_pkts[i];
4013
4014                 ath10k_update_per_peer_tx_stats(ar, sta, p_tx_stats);
4015         }
4016         spin_unlock_bh(&ar->data_lock);
4017         rcu_read_unlock();
4018
4019         return;
4020
4021 out:
4022         spin_unlock_bh(&ar->data_lock);
4023         rcu_read_unlock();
4024 }
4025
4026 static int ath10k_htt_rx_pn_len(enum htt_security_types sec_type)
4027 {
4028         switch (sec_type) {
4029         case HTT_SECURITY_TKIP:
4030         case HTT_SECURITY_TKIP_NOMIC:
4031         case HTT_SECURITY_AES_CCMP:
4032                 return 48;
4033         default:
4034                 return 0;
4035         }
4036 }
4037
4038 static void ath10k_htt_rx_sec_ind_handler(struct ath10k *ar,
4039                                           struct htt_security_indication *ev)
4040 {
4041         enum htt_txrx_sec_cast_type sec_index;
4042         enum htt_security_types sec_type;
4043         struct ath10k_peer *peer;
4044
4045         spin_lock_bh(&ar->data_lock);
4046
4047         peer = ath10k_peer_find_by_id(ar, __le16_to_cpu(ev->peer_id));
4048         if (!peer) {
4049                 ath10k_warn(ar, "failed to find peer id %d for security indication",
4050                             __le16_to_cpu(ev->peer_id));
4051                 goto out;
4052         }
4053
4054         sec_type = MS(ev->flags, HTT_SECURITY_TYPE);
4055
4056         if (ev->flags & HTT_SECURITY_IS_UNICAST)
4057                 sec_index = HTT_TXRX_SEC_UCAST;
4058         else
4059                 sec_index = HTT_TXRX_SEC_MCAST;
4060
4061         peer->rx_pn[sec_index].sec_type = sec_type;
4062         peer->rx_pn[sec_index].pn_len = ath10k_htt_rx_pn_len(sec_type);
4063
4064         memset(peer->tids_last_pn_valid, 0, sizeof(peer->tids_last_pn_valid));
4065         memset(peer->tids_last_pn, 0, sizeof(peer->tids_last_pn));
4066
4067 out:
4068         spin_unlock_bh(&ar->data_lock);
4069 }
4070
4071 bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
4072 {
4073         struct ath10k_htt *htt = &ar->htt;
4074         struct htt_resp *resp = (struct htt_resp *)skb->data;
4075         enum htt_t2h_msg_type type;
4076
4077         /* confirm alignment */
4078         if (!IS_ALIGNED((unsigned long)skb->data, 4))
4079                 ath10k_warn(ar, "unaligned htt message, expect trouble\n");
4080
4081         ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n",
4082                    resp->hdr.msg_type);
4083
4084         if (resp->hdr.msg_type >= ar->htt.t2h_msg_types_max) {
4085                 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, unsupported msg_type: 0x%0X\n max: 0x%0X",
4086                            resp->hdr.msg_type, ar->htt.t2h_msg_types_max);
4087                 return true;
4088         }
4089         type = ar->htt.t2h_msg_types[resp->hdr.msg_type];
4090
4091         switch (type) {
4092         case HTT_T2H_MSG_TYPE_VERSION_CONF: {
4093                 htt->target_version_major = resp->ver_resp.major;
4094                 htt->target_version_minor = resp->ver_resp.minor;
4095                 complete(&htt->target_version_received);
4096                 break;
4097         }
4098         case HTT_T2H_MSG_TYPE_RX_IND:
4099                 if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL) {
4100                         ath10k_htt_rx_proc_rx_ind_ll(htt, &resp->rx_ind);
4101                 } else {
4102                         skb_queue_tail(&htt->rx_indication_head, skb);
4103                         return false;
4104                 }
4105                 break;
4106         case HTT_T2H_MSG_TYPE_PEER_MAP: {
4107                 struct htt_peer_map_event ev = {
4108                         .vdev_id = resp->peer_map.vdev_id,
4109                         .peer_id = __le16_to_cpu(resp->peer_map.peer_id),
4110                 };
4111                 memcpy(ev.addr, resp->peer_map.addr, sizeof(ev.addr));
4112                 ath10k_peer_map_event(htt, &ev);
4113                 break;
4114         }
4115         case HTT_T2H_MSG_TYPE_PEER_UNMAP: {
4116                 struct htt_peer_unmap_event ev = {
4117                         .peer_id = __le16_to_cpu(resp->peer_unmap.peer_id),
4118                 };
4119                 ath10k_peer_unmap_event(htt, &ev);
4120                 break;
4121         }
4122         case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: {
4123                 struct htt_tx_done tx_done = {};
4124                 struct ath10k_htt *htt = &ar->htt;
4125                 struct ath10k_htc *htc = &ar->htc;
4126                 struct ath10k_htc_ep *ep = &ar->htc.endpoint[htt->eid];
4127                 int status = __le32_to_cpu(resp->mgmt_tx_completion.status);
4128                 int info = __le32_to_cpu(resp->mgmt_tx_completion.info);
4129
4130                 tx_done.msdu_id = __le32_to_cpu(resp->mgmt_tx_completion.desc_id);
4131
4132                 switch (status) {
4133                 case HTT_MGMT_TX_STATUS_OK:
4134                         tx_done.status = HTT_TX_COMPL_STATE_ACK;
4135                         if (test_bit(WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS,
4136                                      ar->wmi.svc_map) &&
4137                             (resp->mgmt_tx_completion.flags &
4138                              HTT_MGMT_TX_CMPL_FLAG_ACK_RSSI)) {
4139                                 tx_done.ack_rssi =
4140                                 FIELD_GET(HTT_MGMT_TX_CMPL_INFO_ACK_RSSI_MASK,
4141                                           info);
4142                         }
4143                         break;
4144                 case HTT_MGMT_TX_STATUS_RETRY:
4145                         tx_done.status = HTT_TX_COMPL_STATE_NOACK;
4146                         break;
4147                 case HTT_MGMT_TX_STATUS_DROP:
4148                         tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
4149                         break;
4150                 }
4151
4152                 if (htt->disable_tx_comp) {
4153                         spin_lock_bh(&htc->tx_lock);
4154                         ep->tx_credits++;
4155                         spin_unlock_bh(&htc->tx_lock);
4156                 }
4157
4158                 status = ath10k_txrx_tx_unref(htt, &tx_done);
4159                 if (!status) {
4160                         spin_lock_bh(&htt->tx_lock);
4161                         ath10k_htt_tx_mgmt_dec_pending(htt);
4162                         spin_unlock_bh(&htt->tx_lock);
4163                 }
4164                 break;
4165         }
4166         case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
4167                 ath10k_htt_rx_tx_compl_ind(htt->ar, skb);
4168                 break;
4169         case HTT_T2H_MSG_TYPE_SEC_IND: {
4170                 struct ath10k *ar = htt->ar;
4171                 struct htt_security_indication *ev = &resp->security_indication;
4172
4173                 ath10k_htt_rx_sec_ind_handler(ar, ev);
4174                 ath10k_dbg(ar, ATH10K_DBG_HTT,
4175                            "sec ind peer_id %d unicast %d type %d\n",
4176                           __le16_to_cpu(ev->peer_id),
4177                           !!(ev->flags & HTT_SECURITY_IS_UNICAST),
4178                           MS(ev->flags, HTT_SECURITY_TYPE));
4179                 complete(&ar->install_key_done);
4180                 break;
4181         }
4182         case HTT_T2H_MSG_TYPE_RX_FRAG_IND: {
4183                 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
4184                                 skb->data, skb->len);
4185                 atomic_inc(&htt->num_mpdus_ready);
4186
4187                 return ath10k_htt_rx_proc_rx_frag_ind(htt,
4188                                                       &resp->rx_frag_ind,
4189                                                       skb);
4190         }
4191         case HTT_T2H_MSG_TYPE_TEST:
4192                 break;
4193         case HTT_T2H_MSG_TYPE_STATS_CONF:
4194                 trace_ath10k_htt_stats(ar, skb->data, skb->len);
4195                 break;
4196         case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
4197                 /* Firmware can return tx frames if it's unable to fully
4198                  * process them and suspects host may be able to fix it. ath10k
4199                  * sends all tx frames as already inspected so this shouldn't
4200                  * happen unless fw has a bug.
4201                  */
4202                 ath10k_warn(ar, "received an unexpected htt tx inspect event\n");
4203                 break;
4204         case HTT_T2H_MSG_TYPE_RX_ADDBA:
4205                 ath10k_htt_rx_addba(ar, resp);
4206                 break;
4207         case HTT_T2H_MSG_TYPE_RX_DELBA:
4208                 ath10k_htt_rx_delba(ar, resp);
4209                 break;
4210         case HTT_T2H_MSG_TYPE_PKTLOG: {
4211                 trace_ath10k_htt_pktlog(ar, resp->pktlog_msg.payload,
4212                                         skb->len -
4213                                         offsetof(struct htt_resp,
4214                                                  pktlog_msg.payload));
4215
4216                 if (ath10k_peer_stats_enabled(ar))
4217                         ath10k_fetch_10_2_tx_stats(ar,
4218                                                    resp->pktlog_msg.payload);
4219                 break;
4220         }
4221         case HTT_T2H_MSG_TYPE_RX_FLUSH: {
4222                 /* Ignore this event because mac80211 takes care of Rx
4223                  * aggregation reordering.
4224                  */
4225                 break;
4226         }
4227         case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: {
4228                 skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
4229                 return false;
4230         }
4231         case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND: {
4232                 struct ath10k_htt *htt = &ar->htt;
4233                 struct ath10k_htc *htc = &ar->htc;
4234                 struct ath10k_htc_ep *ep = &ar->htc.endpoint[htt->eid];
4235                 u32 msg_word = __le32_to_cpu(*(__le32 *)resp);
4236                 int htt_credit_delta;
4237
4238                 htt_credit_delta = HTT_TX_CREDIT_DELTA_ABS_GET(msg_word);
4239                 if (HTT_TX_CREDIT_SIGN_BIT_GET(msg_word))
4240                         htt_credit_delta = -htt_credit_delta;
4241
4242                 ath10k_dbg(ar, ATH10K_DBG_HTT,
4243                            "htt credit update delta %d\n",
4244                            htt_credit_delta);
4245
4246                 if (htt->disable_tx_comp) {
4247                         spin_lock_bh(&htc->tx_lock);
4248                         ep->tx_credits += htt_credit_delta;
4249                         spin_unlock_bh(&htc->tx_lock);
4250                         ath10k_dbg(ar, ATH10K_DBG_HTT,
4251                                    "htt credit total %d\n",
4252                                    ep->tx_credits);
4253                         ep->ep_ops.ep_tx_credits(htc->ar);
4254                 }
4255                 break;
4256         }
4257         case HTT_T2H_MSG_TYPE_CHAN_CHANGE: {
4258                 u32 phymode = __le32_to_cpu(resp->chan_change.phymode);
4259                 u32 freq = __le32_to_cpu(resp->chan_change.freq);
4260
4261                 ar->tgt_oper_chan = ieee80211_get_channel(ar->hw->wiphy, freq);
4262                 ath10k_dbg(ar, ATH10K_DBG_HTT,
4263                            "htt chan change freq %u phymode %s\n",
4264                            freq, ath10k_wmi_phymode_str(phymode));
4265                 break;
4266         }
4267         case HTT_T2H_MSG_TYPE_AGGR_CONF:
4268                 break;
4269         case HTT_T2H_MSG_TYPE_TX_FETCH_IND: {
4270                 struct sk_buff *tx_fetch_ind = skb_copy(skb, GFP_ATOMIC);
4271
4272                 if (!tx_fetch_ind) {
4273                         ath10k_warn(ar, "failed to copy htt tx fetch ind\n");
4274                         break;
4275                 }
4276                 skb_queue_tail(&htt->tx_fetch_ind_q, tx_fetch_ind);
4277                 break;
4278         }
4279         case HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM:
4280                 ath10k_htt_rx_tx_fetch_confirm(ar, skb);
4281                 break;
4282         case HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND:
4283                 ath10k_htt_rx_tx_mode_switch_ind(ar, skb);
4284                 break;
4285         case HTT_T2H_MSG_TYPE_PEER_STATS:
4286                 ath10k_htt_fetch_peer_stats(ar, skb);
4287                 break;
4288         case HTT_T2H_MSG_TYPE_EN_STATS:
4289         default:
4290                 ath10k_warn(ar, "htt event (%d) not handled\n",
4291                             resp->hdr.msg_type);
4292                 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
4293                                 skb->data, skb->len);
4294                 break;
4295         }
4296         return true;
4297 }
4298 EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler);
4299
4300 void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
4301                                              struct sk_buff *skb)
4302 {
4303         trace_ath10k_htt_pktlog(ar, skb->data, skb->len);
4304         dev_kfree_skb_any(skb);
4305 }
4306 EXPORT_SYMBOL(ath10k_htt_rx_pktlog_completion_handler);
4307
4308 static int ath10k_htt_rx_deliver_msdu(struct ath10k *ar, int quota, int budget)
4309 {
4310         struct sk_buff *skb;
4311
4312         while (quota < budget) {
4313                 if (skb_queue_empty(&ar->htt.rx_msdus_q))
4314                         break;
4315
4316                 skb = skb_dequeue(&ar->htt.rx_msdus_q);
4317                 if (!skb)
4318                         break;
4319                 ath10k_process_rx(ar, skb);
4320                 quota++;
4321         }
4322
4323         return quota;
4324 }
4325
4326 int ath10k_htt_rx_hl_indication(struct ath10k *ar, int budget)
4327 {
4328         struct htt_resp *resp;
4329         struct ath10k_htt *htt = &ar->htt;
4330         struct sk_buff *skb;
4331         bool release;
4332         int quota;
4333
4334         for (quota = 0; quota < budget; quota++) {
4335                 skb = skb_dequeue(&htt->rx_indication_head);
4336                 if (!skb)
4337                         break;
4338
4339                 resp = (struct htt_resp *)skb->data;
4340
4341                 release = ath10k_htt_rx_proc_rx_ind_hl(htt,
4342                                                        &resp->rx_ind_hl,
4343                                                        skb,
4344                                                        HTT_RX_PN_CHECK,
4345                                                        HTT_RX_NON_TKIP_MIC);
4346
4347                 if (release)
4348                         dev_kfree_skb_any(skb);
4349
4350                 ath10k_dbg(ar, ATH10K_DBG_HTT, "rx indication poll pending count:%d\n",
4351                            skb_queue_len(&htt->rx_indication_head));
4352         }
4353         return quota;
4354 }
4355 EXPORT_SYMBOL(ath10k_htt_rx_hl_indication);
4356
4357 int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget)
4358 {
4359         struct ath10k_htt *htt = &ar->htt;
4360         struct htt_tx_done tx_done = {};
4361         struct sk_buff_head tx_ind_q;
4362         struct sk_buff *skb;
4363         unsigned long flags;
4364         int quota = 0, done, ret;
4365         bool resched_napi = false;
4366
4367         __skb_queue_head_init(&tx_ind_q);
4368
4369         /* Process pending frames before dequeuing more data
4370          * from hardware.
4371          */
4372         quota = ath10k_htt_rx_deliver_msdu(ar, quota, budget);
4373         if (quota == budget) {
4374                 resched_napi = true;
4375                 goto exit;
4376         }
4377
4378         while ((skb = skb_dequeue(&htt->rx_in_ord_compl_q))) {
4379                 spin_lock_bh(&htt->rx_ring.lock);
4380                 ret = ath10k_htt_rx_in_ord_ind(ar, skb);
4381                 spin_unlock_bh(&htt->rx_ring.lock);
4382
4383                 dev_kfree_skb_any(skb);
4384                 if (ret == -EIO) {
4385                         resched_napi = true;
4386                         goto exit;
4387                 }
4388         }
4389
4390         while (atomic_read(&htt->num_mpdus_ready)) {
4391                 ret = ath10k_htt_rx_handle_amsdu(htt);
4392                 if (ret == -EIO) {
4393                         resched_napi = true;
4394                         goto exit;
4395                 }
4396                 atomic_dec(&htt->num_mpdus_ready);
4397         }
4398
4399         /* Deliver received data after processing data from hardware */
4400         quota = ath10k_htt_rx_deliver_msdu(ar, quota, budget);
4401
4402         /* From NAPI documentation:
4403          *  The napi poll() function may also process TX completions, in which
4404          *  case if it processes the entire TX ring then it should count that
4405          *  work as the rest of the budget.
4406          */
4407         if ((quota < budget) && !kfifo_is_empty(&htt->txdone_fifo))
4408                 quota = budget;
4409
4410         /* kfifo_get: called only within txrx_tasklet so it's neatly serialized.
4411          * From kfifo_get() documentation:
4412          *  Note that with only one concurrent reader and one concurrent writer,
4413          *  you don't need extra locking to use these macro.
4414          */
4415         while (kfifo_get(&htt->txdone_fifo, &tx_done))
4416                 ath10k_txrx_tx_unref(htt, &tx_done);
4417
4418         ath10k_mac_tx_push_pending(ar);
4419
4420         spin_lock_irqsave(&htt->tx_fetch_ind_q.lock, flags);
4421         skb_queue_splice_init(&htt->tx_fetch_ind_q, &tx_ind_q);
4422         spin_unlock_irqrestore(&htt->tx_fetch_ind_q.lock, flags);
4423
4424         while ((skb = __skb_dequeue(&tx_ind_q))) {
4425                 ath10k_htt_rx_tx_fetch_ind(ar, skb);
4426                 dev_kfree_skb_any(skb);
4427         }
4428
4429 exit:
4430         ath10k_htt_rx_msdu_buff_replenish(htt);
4431         /* In case of rx failure or more data to read, report budget
4432          * to reschedule NAPI poll
4433          */
4434         done = resched_napi ? budget : quota;
4435
4436         return done;
4437 }
4438 EXPORT_SYMBOL(ath10k_htt_txrx_compl_task);
4439
4440 static const struct ath10k_htt_rx_ops htt_rx_ops_32 = {
4441         .htt_get_rx_ring_size = ath10k_htt_get_rx_ring_size_32,
4442         .htt_config_paddrs_ring = ath10k_htt_config_paddrs_ring_32,
4443         .htt_set_paddrs_ring = ath10k_htt_set_paddrs_ring_32,
4444         .htt_get_vaddr_ring = ath10k_htt_get_vaddr_ring_32,
4445         .htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_32,
4446 };
4447
4448 static const struct ath10k_htt_rx_ops htt_rx_ops_64 = {
4449         .htt_get_rx_ring_size = ath10k_htt_get_rx_ring_size_64,
4450         .htt_config_paddrs_ring = ath10k_htt_config_paddrs_ring_64,
4451         .htt_set_paddrs_ring = ath10k_htt_set_paddrs_ring_64,
4452         .htt_get_vaddr_ring = ath10k_htt_get_vaddr_ring_64,
4453         .htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_64,
4454 };
4455
4456 static const struct ath10k_htt_rx_ops htt_rx_ops_hl = {
4457         .htt_rx_proc_rx_frag_ind = ath10k_htt_rx_proc_rx_frag_ind_hl,
4458 };
4459
4460 void ath10k_htt_set_rx_ops(struct ath10k_htt *htt)
4461 {
4462         struct ath10k *ar = htt->ar;
4463
4464         if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
4465                 htt->rx_ops = &htt_rx_ops_hl;
4466         else if (ar->hw_params.target_64bit)
4467                 htt->rx_ops = &htt_rx_ops_64;
4468         else
4469                 htt->rx_ops = &htt_rx_ops_32;
4470 }