GNU Linux-libre 5.19-rc6-gnu
[releases.git] / drivers / net / wireless / ath / ath11k / dp_rx.c
1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3  * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
4  */
5
6 #include <linux/ieee80211.h>
7 #include <linux/kernel.h>
8 #include <linux/skbuff.h>
9 #include <crypto/hash.h>
10 #include "core.h"
11 #include "debug.h"
12 #include "debugfs_htt_stats.h"
13 #include "debugfs_sta.h"
14 #include "hal_desc.h"
15 #include "hw.h"
16 #include "dp_rx.h"
17 #include "hal_rx.h"
18 #include "dp_tx.h"
19 #include "peer.h"
20
21 #define ATH11K_DP_RX_FRAGMENT_TIMEOUT_MS (2 * HZ)
22
23 static inline
24 u8 *ath11k_dp_rx_h_80211_hdr(struct ath11k_base *ab, struct hal_rx_desc *desc)
25 {
26         return ab->hw_params.hw_ops->rx_desc_get_hdr_status(desc);
27 }
28
29 static inline
30 enum hal_encrypt_type ath11k_dp_rx_h_mpdu_start_enctype(struct ath11k_base *ab,
31                                                         struct hal_rx_desc *desc)
32 {
33         if (!ab->hw_params.hw_ops->rx_desc_encrypt_valid(desc))
34                 return HAL_ENCRYPT_TYPE_OPEN;
35
36         return ab->hw_params.hw_ops->rx_desc_get_encrypt_type(desc);
37 }
38
39 static inline u8 ath11k_dp_rx_h_msdu_start_decap_type(struct ath11k_base *ab,
40                                                       struct hal_rx_desc *desc)
41 {
42         return ab->hw_params.hw_ops->rx_desc_get_decap_type(desc);
43 }
44
45 static inline
46 bool ath11k_dp_rx_h_msdu_start_ldpc_support(struct ath11k_base *ab,
47                                             struct hal_rx_desc *desc)
48 {
49         return ab->hw_params.hw_ops->rx_desc_get_ldpc_support(desc);
50 }
51
52 static inline
53 u8 ath11k_dp_rx_h_msdu_start_mesh_ctl_present(struct ath11k_base *ab,
54                                               struct hal_rx_desc *desc)
55 {
56         return ab->hw_params.hw_ops->rx_desc_get_mesh_ctl(desc);
57 }
58
59 static inline
60 bool ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(struct ath11k_base *ab,
61                                               struct hal_rx_desc *desc)
62 {
63         return ab->hw_params.hw_ops->rx_desc_get_mpdu_seq_ctl_vld(desc);
64 }
65
66 static inline bool ath11k_dp_rx_h_mpdu_start_fc_valid(struct ath11k_base *ab,
67                                                       struct hal_rx_desc *desc)
68 {
69         return ab->hw_params.hw_ops->rx_desc_get_mpdu_fc_valid(desc);
70 }
71
72 static inline bool ath11k_dp_rx_h_mpdu_start_more_frags(struct ath11k_base *ab,
73                                                         struct sk_buff *skb)
74 {
75         struct ieee80211_hdr *hdr;
76
77         hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params.hal_desc_sz);
78         return ieee80211_has_morefrags(hdr->frame_control);
79 }
80
81 static inline u16 ath11k_dp_rx_h_mpdu_start_frag_no(struct ath11k_base *ab,
82                                                     struct sk_buff *skb)
83 {
84         struct ieee80211_hdr *hdr;
85
86         hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params.hal_desc_sz);
87         return le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
88 }
89
90 static inline u16 ath11k_dp_rx_h_mpdu_start_seq_no(struct ath11k_base *ab,
91                                                    struct hal_rx_desc *desc)
92 {
93         return ab->hw_params.hw_ops->rx_desc_get_mpdu_start_seq_no(desc);
94 }
95
96 static inline void *ath11k_dp_rx_get_attention(struct ath11k_base *ab,
97                                                struct hal_rx_desc *desc)
98 {
99         return ab->hw_params.hw_ops->rx_desc_get_attention(desc);
100 }
101
102 static inline bool ath11k_dp_rx_h_attn_msdu_done(struct rx_attention *attn)
103 {
104         return !!FIELD_GET(RX_ATTENTION_INFO2_MSDU_DONE,
105                            __le32_to_cpu(attn->info2));
106 }
107
108 static inline bool ath11k_dp_rx_h_attn_l4_cksum_fail(struct rx_attention *attn)
109 {
110         return !!FIELD_GET(RX_ATTENTION_INFO1_TCP_UDP_CKSUM_FAIL,
111                            __le32_to_cpu(attn->info1));
112 }
113
114 static inline bool ath11k_dp_rx_h_attn_ip_cksum_fail(struct rx_attention *attn)
115 {
116         return !!FIELD_GET(RX_ATTENTION_INFO1_IP_CKSUM_FAIL,
117                            __le32_to_cpu(attn->info1));
118 }
119
120 static inline bool ath11k_dp_rx_h_attn_is_decrypted(struct rx_attention *attn)
121 {
122         return (FIELD_GET(RX_ATTENTION_INFO2_DCRYPT_STATUS_CODE,
123                           __le32_to_cpu(attn->info2)) ==
124                 RX_DESC_DECRYPT_STATUS_CODE_OK);
125 }
126
127 static u32 ath11k_dp_rx_h_attn_mpdu_err(struct rx_attention *attn)
128 {
129         u32 info = __le32_to_cpu(attn->info1);
130         u32 errmap = 0;
131
132         if (info & RX_ATTENTION_INFO1_FCS_ERR)
133                 errmap |= DP_RX_MPDU_ERR_FCS;
134
135         if (info & RX_ATTENTION_INFO1_DECRYPT_ERR)
136                 errmap |= DP_RX_MPDU_ERR_DECRYPT;
137
138         if (info & RX_ATTENTION_INFO1_TKIP_MIC_ERR)
139                 errmap |= DP_RX_MPDU_ERR_TKIP_MIC;
140
141         if (info & RX_ATTENTION_INFO1_A_MSDU_ERROR)
142                 errmap |= DP_RX_MPDU_ERR_AMSDU_ERR;
143
144         if (info & RX_ATTENTION_INFO1_OVERFLOW_ERR)
145                 errmap |= DP_RX_MPDU_ERR_OVERFLOW;
146
147         if (info & RX_ATTENTION_INFO1_MSDU_LEN_ERR)
148                 errmap |= DP_RX_MPDU_ERR_MSDU_LEN;
149
150         if (info & RX_ATTENTION_INFO1_MPDU_LEN_ERR)
151                 errmap |= DP_RX_MPDU_ERR_MPDU_LEN;
152
153         return errmap;
154 }
155
156 static bool ath11k_dp_rx_h_attn_msdu_len_err(struct ath11k_base *ab,
157                                              struct hal_rx_desc *desc)
158 {
159         struct rx_attention *rx_attention;
160         u32 errmap;
161
162         rx_attention = ath11k_dp_rx_get_attention(ab, desc);
163         errmap = ath11k_dp_rx_h_attn_mpdu_err(rx_attention);
164
165         return errmap & DP_RX_MPDU_ERR_MSDU_LEN;
166 }
167
168 static inline u16 ath11k_dp_rx_h_msdu_start_msdu_len(struct ath11k_base *ab,
169                                                      struct hal_rx_desc *desc)
170 {
171         return ab->hw_params.hw_ops->rx_desc_get_msdu_len(desc);
172 }
173
174 static inline u8 ath11k_dp_rx_h_msdu_start_sgi(struct ath11k_base *ab,
175                                                struct hal_rx_desc *desc)
176 {
177         return ab->hw_params.hw_ops->rx_desc_get_msdu_sgi(desc);
178 }
179
180 static inline u8 ath11k_dp_rx_h_msdu_start_rate_mcs(struct ath11k_base *ab,
181                                                     struct hal_rx_desc *desc)
182 {
183         return ab->hw_params.hw_ops->rx_desc_get_msdu_rate_mcs(desc);
184 }
185
186 static inline u8 ath11k_dp_rx_h_msdu_start_rx_bw(struct ath11k_base *ab,
187                                                  struct hal_rx_desc *desc)
188 {
189         return ab->hw_params.hw_ops->rx_desc_get_msdu_rx_bw(desc);
190 }
191
192 static inline u32 ath11k_dp_rx_h_msdu_start_freq(struct ath11k_base *ab,
193                                                  struct hal_rx_desc *desc)
194 {
195         return ab->hw_params.hw_ops->rx_desc_get_msdu_freq(desc);
196 }
197
198 static inline u8 ath11k_dp_rx_h_msdu_start_pkt_type(struct ath11k_base *ab,
199                                                     struct hal_rx_desc *desc)
200 {
201         return ab->hw_params.hw_ops->rx_desc_get_msdu_pkt_type(desc);
202 }
203
204 static inline u8 ath11k_dp_rx_h_msdu_start_nss(struct ath11k_base *ab,
205                                                struct hal_rx_desc *desc)
206 {
207         return hweight8(ab->hw_params.hw_ops->rx_desc_get_msdu_nss(desc));
208 }
209
210 static inline u8 ath11k_dp_rx_h_mpdu_start_tid(struct ath11k_base *ab,
211                                                struct hal_rx_desc *desc)
212 {
213         return ab->hw_params.hw_ops->rx_desc_get_mpdu_tid(desc);
214 }
215
216 static inline u16 ath11k_dp_rx_h_mpdu_start_peer_id(struct ath11k_base *ab,
217                                                     struct hal_rx_desc *desc)
218 {
219         return ab->hw_params.hw_ops->rx_desc_get_mpdu_peer_id(desc);
220 }
221
222 static inline u8 ath11k_dp_rx_h_msdu_end_l3pad(struct ath11k_base *ab,
223                                                struct hal_rx_desc *desc)
224 {
225         return ab->hw_params.hw_ops->rx_desc_get_l3_pad_bytes(desc);
226 }
227
228 static inline bool ath11k_dp_rx_h_msdu_end_first_msdu(struct ath11k_base *ab,
229                                                       struct hal_rx_desc *desc)
230 {
231         return ab->hw_params.hw_ops->rx_desc_get_first_msdu(desc);
232 }
233
234 static bool ath11k_dp_rx_h_msdu_end_last_msdu(struct ath11k_base *ab,
235                                               struct hal_rx_desc *desc)
236 {
237         return ab->hw_params.hw_ops->rx_desc_get_last_msdu(desc);
238 }
239
240 static void ath11k_dp_rx_desc_end_tlv_copy(struct ath11k_base *ab,
241                                            struct hal_rx_desc *fdesc,
242                                            struct hal_rx_desc *ldesc)
243 {
244         ab->hw_params.hw_ops->rx_desc_copy_attn_end_tlv(fdesc, ldesc);
245 }
246
247 static inline u32 ath11k_dp_rxdesc_get_mpdulen_err(struct rx_attention *attn)
248 {
249         return FIELD_GET(RX_ATTENTION_INFO1_MPDU_LEN_ERR,
250                          __le32_to_cpu(attn->info1));
251 }
252
253 static inline u8 *ath11k_dp_rxdesc_get_80211hdr(struct ath11k_base *ab,
254                                                 struct hal_rx_desc *rx_desc)
255 {
256         u8 *rx_pkt_hdr;
257
258         rx_pkt_hdr = ab->hw_params.hw_ops->rx_desc_get_msdu_payload(rx_desc);
259
260         return rx_pkt_hdr;
261 }
262
263 static inline bool ath11k_dp_rxdesc_mpdu_valid(struct ath11k_base *ab,
264                                                struct hal_rx_desc *rx_desc)
265 {
266         u32 tlv_tag;
267
268         tlv_tag = ab->hw_params.hw_ops->rx_desc_get_mpdu_start_tag(rx_desc);
269
270         return tlv_tag == HAL_RX_MPDU_START;
271 }
272
273 static inline u32 ath11k_dp_rxdesc_get_ppduid(struct ath11k_base *ab,
274                                               struct hal_rx_desc *rx_desc)
275 {
276         return ab->hw_params.hw_ops->rx_desc_get_mpdu_ppdu_id(rx_desc);
277 }
278
279 static inline void ath11k_dp_rxdesc_set_msdu_len(struct ath11k_base *ab,
280                                                  struct hal_rx_desc *desc,
281                                                  u16 len)
282 {
283         ab->hw_params.hw_ops->rx_desc_set_msdu_len(desc, len);
284 }
285
286 static bool ath11k_dp_rx_h_attn_is_mcbc(struct ath11k_base *ab,
287                                         struct hal_rx_desc *desc)
288 {
289         struct rx_attention *attn = ath11k_dp_rx_get_attention(ab, desc);
290
291         return ath11k_dp_rx_h_msdu_end_first_msdu(ab, desc) &&
292                 (!!FIELD_GET(RX_ATTENTION_INFO1_MCAST_BCAST,
293                  __le32_to_cpu(attn->info1)));
294 }
295
296 static bool ath11k_dp_rxdesc_mac_addr2_valid(struct ath11k_base *ab,
297                                              struct hal_rx_desc *desc)
298 {
299         return ab->hw_params.hw_ops->rx_desc_mac_addr2_valid(desc);
300 }
301
302 static u8 *ath11k_dp_rxdesc_mpdu_start_addr2(struct ath11k_base *ab,
303                                              struct hal_rx_desc *desc)
304 {
305         return ab->hw_params.hw_ops->rx_desc_mpdu_start_addr2(desc);
306 }
307
308 static void ath11k_dp_service_mon_ring(struct timer_list *t)
309 {
310         struct ath11k_base *ab = from_timer(ab, t, mon_reap_timer);
311         int i;
312
313         for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++)
314                 ath11k_dp_rx_process_mon_rings(ab, i, NULL, DP_MON_SERVICE_BUDGET);
315
316         mod_timer(&ab->mon_reap_timer, jiffies +
317                   msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL));
318 }
319
320 static int ath11k_dp_purge_mon_ring(struct ath11k_base *ab)
321 {
322         int i, reaped = 0;
323         unsigned long timeout = jiffies + msecs_to_jiffies(DP_MON_PURGE_TIMEOUT_MS);
324
325         do {
326                 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++)
327                         reaped += ath11k_dp_rx_process_mon_rings(ab, i,
328                                                                  NULL,
329                                                                  DP_MON_SERVICE_BUDGET);
330
331                 /* nothing more to reap */
332                 if (reaped < DP_MON_SERVICE_BUDGET)
333                         return 0;
334
335         } while (time_before(jiffies, timeout));
336
337         ath11k_warn(ab, "dp mon ring purge timeout");
338
339         return -ETIMEDOUT;
340 }
341
342 /* Returns number of Rx buffers replenished */
343 int ath11k_dp_rxbufs_replenish(struct ath11k_base *ab, int mac_id,
344                                struct dp_rxdma_ring *rx_ring,
345                                int req_entries,
346                                enum hal_rx_buf_return_buf_manager mgr)
347 {
348         struct hal_srng *srng;
349         u32 *desc;
350         struct sk_buff *skb;
351         int num_free;
352         int num_remain;
353         int buf_id;
354         u32 cookie;
355         dma_addr_t paddr;
356
357         req_entries = min(req_entries, rx_ring->bufs_max);
358
359         srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
360
361         spin_lock_bh(&srng->lock);
362
363         ath11k_hal_srng_access_begin(ab, srng);
364
365         num_free = ath11k_hal_srng_src_num_free(ab, srng, true);
366         if (!req_entries && (num_free > (rx_ring->bufs_max * 3) / 4))
367                 req_entries = num_free;
368
369         req_entries = min(num_free, req_entries);
370         num_remain = req_entries;
371
372         while (num_remain > 0) {
373                 skb = dev_alloc_skb(DP_RX_BUFFER_SIZE +
374                                     DP_RX_BUFFER_ALIGN_SIZE);
375                 if (!skb)
376                         break;
377
378                 if (!IS_ALIGNED((unsigned long)skb->data,
379                                 DP_RX_BUFFER_ALIGN_SIZE)) {
380                         skb_pull(skb,
381                                  PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) -
382                                  skb->data);
383                 }
384
385                 paddr = dma_map_single(ab->dev, skb->data,
386                                        skb->len + skb_tailroom(skb),
387                                        DMA_FROM_DEVICE);
388                 if (dma_mapping_error(ab->dev, paddr))
389                         goto fail_free_skb;
390
391                 spin_lock_bh(&rx_ring->idr_lock);
392                 buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0,
393                                    rx_ring->bufs_max * 3, GFP_ATOMIC);
394                 spin_unlock_bh(&rx_ring->idr_lock);
395                 if (buf_id < 0)
396                         goto fail_dma_unmap;
397
398                 desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
399                 if (!desc)
400                         goto fail_idr_remove;
401
402                 ATH11K_SKB_RXCB(skb)->paddr = paddr;
403
404                 cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |
405                          FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
406
407                 num_remain--;
408
409                 ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr);
410         }
411
412         ath11k_hal_srng_access_end(ab, srng);
413
414         spin_unlock_bh(&srng->lock);
415
416         return req_entries - num_remain;
417
418 fail_idr_remove:
419         spin_lock_bh(&rx_ring->idr_lock);
420         idr_remove(&rx_ring->bufs_idr, buf_id);
421         spin_unlock_bh(&rx_ring->idr_lock);
422 fail_dma_unmap:
423         dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
424                          DMA_FROM_DEVICE);
425 fail_free_skb:
426         dev_kfree_skb_any(skb);
427
428         ath11k_hal_srng_access_end(ab, srng);
429
430         spin_unlock_bh(&srng->lock);
431
432         return req_entries - num_remain;
433 }
434
435 static int ath11k_dp_rxdma_buf_ring_free(struct ath11k *ar,
436                                          struct dp_rxdma_ring *rx_ring)
437 {
438         struct ath11k_pdev_dp *dp = &ar->dp;
439         struct sk_buff *skb;
440         int buf_id;
441
442         spin_lock_bh(&rx_ring->idr_lock);
443         idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) {
444                 idr_remove(&rx_ring->bufs_idr, buf_id);
445                 /* TODO: Understand where internal driver does this dma_unmap
446                  * of rxdma_buffer.
447                  */
448                 dma_unmap_single(ar->ab->dev, ATH11K_SKB_RXCB(skb)->paddr,
449                                  skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);
450                 dev_kfree_skb_any(skb);
451         }
452
453         idr_destroy(&rx_ring->bufs_idr);
454         spin_unlock_bh(&rx_ring->idr_lock);
455
456         /* if rxdma1_enable is false, mon_status_refill_ring
457          * isn't setup, so don't clean.
458          */
459         if (!ar->ab->hw_params.rxdma1_enable)
460                 return 0;
461
462         rx_ring = &dp->rx_mon_status_refill_ring[0];
463
464         spin_lock_bh(&rx_ring->idr_lock);
465         idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) {
466                 idr_remove(&rx_ring->bufs_idr, buf_id);
467                 /* XXX: Understand where internal driver does this dma_unmap
468                  * of rxdma_buffer.
469                  */
470                 dma_unmap_single(ar->ab->dev, ATH11K_SKB_RXCB(skb)->paddr,
471                                  skb->len + skb_tailroom(skb), DMA_BIDIRECTIONAL);
472                 dev_kfree_skb_any(skb);
473         }
474
475         idr_destroy(&rx_ring->bufs_idr);
476         spin_unlock_bh(&rx_ring->idr_lock);
477
478         return 0;
479 }
480
481 static int ath11k_dp_rxdma_pdev_buf_free(struct ath11k *ar)
482 {
483         struct ath11k_pdev_dp *dp = &ar->dp;
484         struct ath11k_base *ab = ar->ab;
485         struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
486         int i;
487
488         ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
489
490         rx_ring = &dp->rxdma_mon_buf_ring;
491         ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
492
493         for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
494                 rx_ring = &dp->rx_mon_status_refill_ring[i];
495                 ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
496         }
497
498         return 0;
499 }
500
501 static int ath11k_dp_rxdma_ring_buf_setup(struct ath11k *ar,
502                                           struct dp_rxdma_ring *rx_ring,
503                                           u32 ringtype)
504 {
505         struct ath11k_pdev_dp *dp = &ar->dp;
506         int num_entries;
507
508         num_entries = rx_ring->refill_buf_ring.size /
509                 ath11k_hal_srng_get_entrysize(ar->ab, ringtype);
510
511         rx_ring->bufs_max = num_entries;
512         ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, rx_ring, num_entries,
513                                    ar->ab->hw_params.hal_params->rx_buf_rbm);
514         return 0;
515 }
516
517 static int ath11k_dp_rxdma_pdev_buf_setup(struct ath11k *ar)
518 {
519         struct ath11k_pdev_dp *dp = &ar->dp;
520         struct ath11k_base *ab = ar->ab;
521         struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
522         int i;
523
524         ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_BUF);
525
526         if (ar->ab->hw_params.rxdma1_enable) {
527                 rx_ring = &dp->rxdma_mon_buf_ring;
528                 ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_BUF);
529         }
530
531         for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
532                 rx_ring = &dp->rx_mon_status_refill_ring[i];
533                 ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_STATUS);
534         }
535
536         return 0;
537 }
538
539 static void ath11k_dp_rx_pdev_srng_free(struct ath11k *ar)
540 {
541         struct ath11k_pdev_dp *dp = &ar->dp;
542         struct ath11k_base *ab = ar->ab;
543         int i;
544
545         ath11k_dp_srng_cleanup(ab, &dp->rx_refill_buf_ring.refill_buf_ring);
546
547         for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
548                 if (ab->hw_params.rx_mac_buf_ring)
549                         ath11k_dp_srng_cleanup(ab, &dp->rx_mac_buf_ring[i]);
550
551                 ath11k_dp_srng_cleanup(ab, &dp->rxdma_err_dst_ring[i]);
552                 ath11k_dp_srng_cleanup(ab,
553                                        &dp->rx_mon_status_refill_ring[i].refill_buf_ring);
554         }
555
556         ath11k_dp_srng_cleanup(ab, &dp->rxdma_mon_buf_ring.refill_buf_ring);
557 }
558
559 void ath11k_dp_pdev_reo_cleanup(struct ath11k_base *ab)
560 {
561         struct ath11k_dp *dp = &ab->dp;
562         int i;
563
564         for (i = 0; i < DP_REO_DST_RING_MAX; i++)
565                 ath11k_dp_srng_cleanup(ab, &dp->reo_dst_ring[i]);
566 }
567
568 int ath11k_dp_pdev_reo_setup(struct ath11k_base *ab)
569 {
570         struct ath11k_dp *dp = &ab->dp;
571         int ret;
572         int i;
573
574         for (i = 0; i < DP_REO_DST_RING_MAX; i++) {
575                 ret = ath11k_dp_srng_setup(ab, &dp->reo_dst_ring[i],
576                                            HAL_REO_DST, i, 0,
577                                            DP_REO_DST_RING_SIZE);
578                 if (ret) {
579                         ath11k_warn(ab, "failed to setup reo_dst_ring\n");
580                         goto err_reo_cleanup;
581                 }
582         }
583
584         return 0;
585
586 err_reo_cleanup:
587         ath11k_dp_pdev_reo_cleanup(ab);
588
589         return ret;
590 }
591
592 static int ath11k_dp_rx_pdev_srng_alloc(struct ath11k *ar)
593 {
594         struct ath11k_pdev_dp *dp = &ar->dp;
595         struct ath11k_base *ab = ar->ab;
596         struct dp_srng *srng = NULL;
597         int i;
598         int ret;
599
600         ret = ath11k_dp_srng_setup(ar->ab,
601                                    &dp->rx_refill_buf_ring.refill_buf_ring,
602                                    HAL_RXDMA_BUF, 0,
603                                    dp->mac_id, DP_RXDMA_BUF_RING_SIZE);
604         if (ret) {
605                 ath11k_warn(ar->ab, "failed to setup rx_refill_buf_ring\n");
606                 return ret;
607         }
608
609         if (ar->ab->hw_params.rx_mac_buf_ring) {
610                 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
611                         ret = ath11k_dp_srng_setup(ar->ab,
612                                                    &dp->rx_mac_buf_ring[i],
613                                                    HAL_RXDMA_BUF, 1,
614                                                    dp->mac_id + i, 1024);
615                         if (ret) {
616                                 ath11k_warn(ar->ab, "failed to setup rx_mac_buf_ring %d\n",
617                                             i);
618                                 return ret;
619                         }
620                 }
621         }
622
623         for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
624                 ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_err_dst_ring[i],
625                                            HAL_RXDMA_DST, 0, dp->mac_id + i,
626                                            DP_RXDMA_ERR_DST_RING_SIZE);
627                 if (ret) {
628                         ath11k_warn(ar->ab, "failed to setup rxdma_err_dst_ring %d\n", i);
629                         return ret;
630                 }
631         }
632
633         for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
634                 srng = &dp->rx_mon_status_refill_ring[i].refill_buf_ring;
635                 ret = ath11k_dp_srng_setup(ar->ab,
636                                            srng,
637                                            HAL_RXDMA_MONITOR_STATUS, 0, dp->mac_id + i,
638                                            DP_RXDMA_MON_STATUS_RING_SIZE);
639                 if (ret) {
640                         ath11k_warn(ar->ab,
641                                     "failed to setup rx_mon_status_refill_ring %d\n", i);
642                         return ret;
643                 }
644         }
645
646         /* if rxdma1_enable is false, then it doesn't need
647          * to setup rxdam_mon_buf_ring, rxdma_mon_dst_ring
648          * and rxdma_mon_desc_ring.
649          * init reap timer for QCA6390.
650          */
651         if (!ar->ab->hw_params.rxdma1_enable) {
652                 //init mon status buffer reap timer
653                 timer_setup(&ar->ab->mon_reap_timer,
654                             ath11k_dp_service_mon_ring, 0);
655                 return 0;
656         }
657
658         ret = ath11k_dp_srng_setup(ar->ab,
659                                    &dp->rxdma_mon_buf_ring.refill_buf_ring,
660                                    HAL_RXDMA_MONITOR_BUF, 0, dp->mac_id,
661                                    DP_RXDMA_MONITOR_BUF_RING_SIZE);
662         if (ret) {
663                 ath11k_warn(ar->ab,
664                             "failed to setup HAL_RXDMA_MONITOR_BUF\n");
665                 return ret;
666         }
667
668         ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_dst_ring,
669                                    HAL_RXDMA_MONITOR_DST, 0, dp->mac_id,
670                                    DP_RXDMA_MONITOR_DST_RING_SIZE);
671         if (ret) {
672                 ath11k_warn(ar->ab,
673                             "failed to setup HAL_RXDMA_MONITOR_DST\n");
674                 return ret;
675         }
676
677         ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_desc_ring,
678                                    HAL_RXDMA_MONITOR_DESC, 0, dp->mac_id,
679                                    DP_RXDMA_MONITOR_DESC_RING_SIZE);
680         if (ret) {
681                 ath11k_warn(ar->ab,
682                             "failed to setup HAL_RXDMA_MONITOR_DESC\n");
683                 return ret;
684         }
685
686         return 0;
687 }
688
689 void ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base *ab)
690 {
691         struct ath11k_dp *dp = &ab->dp;
692         struct dp_reo_cmd *cmd, *tmp;
693         struct dp_reo_cache_flush_elem *cmd_cache, *tmp_cache;
694
695         spin_lock_bh(&dp->reo_cmd_lock);
696         list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
697                 list_del(&cmd->list);
698                 dma_unmap_single(ab->dev, cmd->data.paddr,
699                                  cmd->data.size, DMA_BIDIRECTIONAL);
700                 kfree(cmd->data.vaddr);
701                 kfree(cmd);
702         }
703
704         list_for_each_entry_safe(cmd_cache, tmp_cache,
705                                  &dp->reo_cmd_cache_flush_list, list) {
706                 list_del(&cmd_cache->list);
707                 dp->reo_cmd_cache_flush_count--;
708                 dma_unmap_single(ab->dev, cmd_cache->data.paddr,
709                                  cmd_cache->data.size, DMA_BIDIRECTIONAL);
710                 kfree(cmd_cache->data.vaddr);
711                 kfree(cmd_cache);
712         }
713         spin_unlock_bh(&dp->reo_cmd_lock);
714 }
715
716 static void ath11k_dp_reo_cmd_free(struct ath11k_dp *dp, void *ctx,
717                                    enum hal_reo_cmd_status status)
718 {
719         struct dp_rx_tid *rx_tid = ctx;
720
721         if (status != HAL_REO_CMD_SUCCESS)
722                 ath11k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n",
723                             rx_tid->tid, status);
724
725         dma_unmap_single(dp->ab->dev, rx_tid->paddr, rx_tid->size,
726                          DMA_BIDIRECTIONAL);
727         kfree(rx_tid->vaddr);
728 }
729
730 static void ath11k_dp_reo_cache_flush(struct ath11k_base *ab,
731                                       struct dp_rx_tid *rx_tid)
732 {
733         struct ath11k_hal_reo_cmd cmd = {0};
734         unsigned long tot_desc_sz, desc_sz;
735         int ret;
736
737         tot_desc_sz = rx_tid->size;
738         desc_sz = ath11k_hal_reo_qdesc_size(0, HAL_DESC_REO_NON_QOS_TID);
739
740         while (tot_desc_sz > desc_sz) {
741                 tot_desc_sz -= desc_sz;
742                 cmd.addr_lo = lower_32_bits(rx_tid->paddr + tot_desc_sz);
743                 cmd.addr_hi = upper_32_bits(rx_tid->paddr);
744                 ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid,
745                                                 HAL_REO_CMD_FLUSH_CACHE, &cmd,
746                                                 NULL);
747                 if (ret)
748                         ath11k_warn(ab,
749                                     "failed to send HAL_REO_CMD_FLUSH_CACHE, tid %d (%d)\n",
750                                     rx_tid->tid, ret);
751         }
752
753         memset(&cmd, 0, sizeof(cmd));
754         cmd.addr_lo = lower_32_bits(rx_tid->paddr);
755         cmd.addr_hi = upper_32_bits(rx_tid->paddr);
756         cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS;
757         ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid,
758                                         HAL_REO_CMD_FLUSH_CACHE,
759                                         &cmd, ath11k_dp_reo_cmd_free);
760         if (ret) {
761                 ath11k_err(ab, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n",
762                            rx_tid->tid, ret);
763                 dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
764                                  DMA_BIDIRECTIONAL);
765                 kfree(rx_tid->vaddr);
766         }
767 }
768
769 static void ath11k_dp_rx_tid_del_func(struct ath11k_dp *dp, void *ctx,
770                                       enum hal_reo_cmd_status status)
771 {
772         struct ath11k_base *ab = dp->ab;
773         struct dp_rx_tid *rx_tid = ctx;
774         struct dp_reo_cache_flush_elem *elem, *tmp;
775
776         if (status == HAL_REO_CMD_DRAIN) {
777                 goto free_desc;
778         } else if (status != HAL_REO_CMD_SUCCESS) {
779                 /* Shouldn't happen! Cleanup in case of other failure? */
780                 ath11k_warn(ab, "failed to delete rx tid %d hw descriptor %d\n",
781                             rx_tid->tid, status);
782                 return;
783         }
784
785         elem = kzalloc(sizeof(*elem), GFP_ATOMIC);
786         if (!elem)
787                 goto free_desc;
788
789         elem->ts = jiffies;
790         memcpy(&elem->data, rx_tid, sizeof(*rx_tid));
791
792         spin_lock_bh(&dp->reo_cmd_lock);
793         list_add_tail(&elem->list, &dp->reo_cmd_cache_flush_list);
794         dp->reo_cmd_cache_flush_count++;
795
796         /* Flush and invalidate aged REO desc from HW cache */
797         list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_cache_flush_list,
798                                  list) {
799                 if (dp->reo_cmd_cache_flush_count > DP_REO_DESC_FREE_THRESHOLD ||
800                     time_after(jiffies, elem->ts +
801                                msecs_to_jiffies(DP_REO_DESC_FREE_TIMEOUT_MS))) {
802                         list_del(&elem->list);
803                         dp->reo_cmd_cache_flush_count--;
804                         spin_unlock_bh(&dp->reo_cmd_lock);
805
806                         ath11k_dp_reo_cache_flush(ab, &elem->data);
807                         kfree(elem);
808                         spin_lock_bh(&dp->reo_cmd_lock);
809                 }
810         }
811         spin_unlock_bh(&dp->reo_cmd_lock);
812
813         return;
814 free_desc:
815         dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
816                          DMA_BIDIRECTIONAL);
817         kfree(rx_tid->vaddr);
818 }
819
820 void ath11k_peer_rx_tid_delete(struct ath11k *ar,
821                                struct ath11k_peer *peer, u8 tid)
822 {
823         struct ath11k_hal_reo_cmd cmd = {0};
824         struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
825         int ret;
826
827         if (!rx_tid->active)
828                 return;
829
830         cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
831         cmd.addr_lo = lower_32_bits(rx_tid->paddr);
832         cmd.addr_hi = upper_32_bits(rx_tid->paddr);
833         cmd.upd0 |= HAL_REO_CMD_UPD0_VLD;
834         ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid,
835                                         HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
836                                         ath11k_dp_rx_tid_del_func);
837         if (ret) {
838                 ath11k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n",
839                            tid, ret);
840                 dma_unmap_single(ar->ab->dev, rx_tid->paddr, rx_tid->size,
841                                  DMA_BIDIRECTIONAL);
842                 kfree(rx_tid->vaddr);
843         }
844
845         rx_tid->active = false;
846 }
847
848 static int ath11k_dp_rx_link_desc_return(struct ath11k_base *ab,
849                                          u32 *link_desc,
850                                          enum hal_wbm_rel_bm_act action)
851 {
852         struct ath11k_dp *dp = &ab->dp;
853         struct hal_srng *srng;
854         u32 *desc;
855         int ret = 0;
856
857         srng = &ab->hal.srng_list[dp->wbm_desc_rel_ring.ring_id];
858
859         spin_lock_bh(&srng->lock);
860
861         ath11k_hal_srng_access_begin(ab, srng);
862
863         desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
864         if (!desc) {
865                 ret = -ENOBUFS;
866                 goto exit;
867         }
868
869         ath11k_hal_rx_msdu_link_desc_set(ab, (void *)desc, (void *)link_desc,
870                                          action);
871
872 exit:
873         ath11k_hal_srng_access_end(ab, srng);
874
875         spin_unlock_bh(&srng->lock);
876
877         return ret;
878 }
879
880 static void ath11k_dp_rx_frags_cleanup(struct dp_rx_tid *rx_tid, bool rel_link_desc)
881 {
882         struct ath11k_base *ab = rx_tid->ab;
883
884         lockdep_assert_held(&ab->base_lock);
885
886         if (rx_tid->dst_ring_desc) {
887                 if (rel_link_desc)
888                         ath11k_dp_rx_link_desc_return(ab, (u32 *)rx_tid->dst_ring_desc,
889                                                       HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
890                 kfree(rx_tid->dst_ring_desc);
891                 rx_tid->dst_ring_desc = NULL;
892         }
893
894         rx_tid->cur_sn = 0;
895         rx_tid->last_frag_no = 0;
896         rx_tid->rx_frag_bitmap = 0;
897         __skb_queue_purge(&rx_tid->rx_frags);
898 }
899
900 void ath11k_peer_frags_flush(struct ath11k *ar, struct ath11k_peer *peer)
901 {
902         struct dp_rx_tid *rx_tid;
903         int i;
904
905         lockdep_assert_held(&ar->ab->base_lock);
906
907         for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
908                 rx_tid = &peer->rx_tid[i];
909
910                 spin_unlock_bh(&ar->ab->base_lock);
911                 del_timer_sync(&rx_tid->frag_timer);
912                 spin_lock_bh(&ar->ab->base_lock);
913
914                 ath11k_dp_rx_frags_cleanup(rx_tid, true);
915         }
916 }
917
918 void ath11k_peer_rx_tid_cleanup(struct ath11k *ar, struct ath11k_peer *peer)
919 {
920         struct dp_rx_tid *rx_tid;
921         int i;
922
923         lockdep_assert_held(&ar->ab->base_lock);
924
925         for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
926                 rx_tid = &peer->rx_tid[i];
927
928                 ath11k_peer_rx_tid_delete(ar, peer, i);
929                 ath11k_dp_rx_frags_cleanup(rx_tid, true);
930
931                 spin_unlock_bh(&ar->ab->base_lock);
932                 del_timer_sync(&rx_tid->frag_timer);
933                 spin_lock_bh(&ar->ab->base_lock);
934         }
935 }
936
937 static int ath11k_peer_rx_tid_reo_update(struct ath11k *ar,
938                                          struct ath11k_peer *peer,
939                                          struct dp_rx_tid *rx_tid,
940                                          u32 ba_win_sz, u16 ssn,
941                                          bool update_ssn)
942 {
943         struct ath11k_hal_reo_cmd cmd = {0};
944         int ret;
945
946         cmd.addr_lo = lower_32_bits(rx_tid->paddr);
947         cmd.addr_hi = upper_32_bits(rx_tid->paddr);
948         cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
949         cmd.upd0 = HAL_REO_CMD_UPD0_BA_WINDOW_SIZE;
950         cmd.ba_window_size = ba_win_sz;
951
952         if (update_ssn) {
953                 cmd.upd0 |= HAL_REO_CMD_UPD0_SSN;
954                 cmd.upd2 = FIELD_PREP(HAL_REO_CMD_UPD2_SSN, ssn);
955         }
956
957         ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid,
958                                         HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
959                                         NULL);
960         if (ret) {
961                 ath11k_warn(ar->ab, "failed to update rx tid queue, tid %d (%d)\n",
962                             rx_tid->tid, ret);
963                 return ret;
964         }
965
966         rx_tid->ba_win_sz = ba_win_sz;
967
968         return 0;
969 }
970
971 static void ath11k_dp_rx_tid_mem_free(struct ath11k_base *ab,
972                                       const u8 *peer_mac, int vdev_id, u8 tid)
973 {
974         struct ath11k_peer *peer;
975         struct dp_rx_tid *rx_tid;
976
977         spin_lock_bh(&ab->base_lock);
978
979         peer = ath11k_peer_find(ab, vdev_id, peer_mac);
980         if (!peer) {
981                 ath11k_warn(ab, "failed to find the peer to free up rx tid mem\n");
982                 goto unlock_exit;
983         }
984
985         rx_tid = &peer->rx_tid[tid];
986         if (!rx_tid->active)
987                 goto unlock_exit;
988
989         dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
990                          DMA_BIDIRECTIONAL);
991         kfree(rx_tid->vaddr);
992
993         rx_tid->active = false;
994
995 unlock_exit:
996         spin_unlock_bh(&ab->base_lock);
997 }
998
999 int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id,
1000                              u8 tid, u32 ba_win_sz, u16 ssn,
1001                              enum hal_pn_type pn_type)
1002 {
1003         struct ath11k_base *ab = ar->ab;
1004         struct ath11k_peer *peer;
1005         struct dp_rx_tid *rx_tid;
1006         u32 hw_desc_sz;
1007         u32 *addr_aligned;
1008         void *vaddr;
1009         dma_addr_t paddr;
1010         int ret;
1011
1012         spin_lock_bh(&ab->base_lock);
1013
1014         peer = ath11k_peer_find(ab, vdev_id, peer_mac);
1015         if (!peer) {
1016                 ath11k_warn(ab, "failed to find the peer to set up rx tid\n");
1017                 spin_unlock_bh(&ab->base_lock);
1018                 return -ENOENT;
1019         }
1020
1021         rx_tid = &peer->rx_tid[tid];
1022         /* Update the tid queue if it is already setup */
1023         if (rx_tid->active) {
1024                 paddr = rx_tid->paddr;
1025                 ret = ath11k_peer_rx_tid_reo_update(ar, peer, rx_tid,
1026                                                     ba_win_sz, ssn, true);
1027                 spin_unlock_bh(&ab->base_lock);
1028                 if (ret) {
1029                         ath11k_warn(ab, "failed to update reo for rx tid %d\n", tid);
1030                         return ret;
1031                 }
1032
1033                 ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id,
1034                                                              peer_mac, paddr,
1035                                                              tid, 1, ba_win_sz);
1036                 if (ret)
1037                         ath11k_warn(ab, "failed to send wmi command to update rx reorder queue, tid :%d (%d)\n",
1038                                     tid, ret);
1039                 return ret;
1040         }
1041
1042         rx_tid->tid = tid;
1043
1044         rx_tid->ba_win_sz = ba_win_sz;
1045
1046         /* TODO: Optimize the memory allocation for qos tid based on
1047          * the actual BA window size in REO tid update path.
1048          */
1049         if (tid == HAL_DESC_REO_NON_QOS_TID)
1050                 hw_desc_sz = ath11k_hal_reo_qdesc_size(ba_win_sz, tid);
1051         else
1052                 hw_desc_sz = ath11k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid);
1053
1054         vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_ATOMIC);
1055         if (!vaddr) {
1056                 spin_unlock_bh(&ab->base_lock);
1057                 return -ENOMEM;
1058         }
1059
1060         addr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN);
1061
1062         ath11k_hal_reo_qdesc_setup(addr_aligned, tid, ba_win_sz,
1063                                    ssn, pn_type);
1064
1065         paddr = dma_map_single(ab->dev, addr_aligned, hw_desc_sz,
1066                                DMA_BIDIRECTIONAL);
1067
1068         ret = dma_mapping_error(ab->dev, paddr);
1069         if (ret) {
1070                 spin_unlock_bh(&ab->base_lock);
1071                 goto err_mem_free;
1072         }
1073
1074         rx_tid->vaddr = vaddr;
1075         rx_tid->paddr = paddr;
1076         rx_tid->size = hw_desc_sz;
1077         rx_tid->active = true;
1078
1079         spin_unlock_bh(&ab->base_lock);
1080
1081         ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac,
1082                                                      paddr, tid, 1, ba_win_sz);
1083         if (ret) {
1084                 ath11k_warn(ar->ab, "failed to setup rx reorder queue, tid :%d (%d)\n",
1085                             tid, ret);
1086                 ath11k_dp_rx_tid_mem_free(ab, peer_mac, vdev_id, tid);
1087         }
1088
1089         return ret;
1090
1091 err_mem_free:
1092         kfree(vaddr);
1093
1094         return ret;
1095 }
1096
1097 int ath11k_dp_rx_ampdu_start(struct ath11k *ar,
1098                              struct ieee80211_ampdu_params *params)
1099 {
1100         struct ath11k_base *ab = ar->ab;
1101         struct ath11k_sta *arsta = (void *)params->sta->drv_priv;
1102         int vdev_id = arsta->arvif->vdev_id;
1103         int ret;
1104
1105         ret = ath11k_peer_rx_tid_setup(ar, params->sta->addr, vdev_id,
1106                                        params->tid, params->buf_size,
1107                                        params->ssn, arsta->pn_type);
1108         if (ret)
1109                 ath11k_warn(ab, "failed to setup rx tid %d\n", ret);
1110
1111         return ret;
1112 }
1113
1114 int ath11k_dp_rx_ampdu_stop(struct ath11k *ar,
1115                             struct ieee80211_ampdu_params *params)
1116 {
1117         struct ath11k_base *ab = ar->ab;
1118         struct ath11k_peer *peer;
1119         struct ath11k_sta *arsta = (void *)params->sta->drv_priv;
1120         int vdev_id = arsta->arvif->vdev_id;
1121         dma_addr_t paddr;
1122         bool active;
1123         int ret;
1124
1125         spin_lock_bh(&ab->base_lock);
1126
1127         peer = ath11k_peer_find(ab, vdev_id, params->sta->addr);
1128         if (!peer) {
1129                 ath11k_warn(ab, "failed to find the peer to stop rx aggregation\n");
1130                 spin_unlock_bh(&ab->base_lock);
1131                 return -ENOENT;
1132         }
1133
1134         paddr = peer->rx_tid[params->tid].paddr;
1135         active = peer->rx_tid[params->tid].active;
1136
1137         if (!active) {
1138                 spin_unlock_bh(&ab->base_lock);
1139                 return 0;
1140         }
1141
1142         ret = ath11k_peer_rx_tid_reo_update(ar, peer, peer->rx_tid, 1, 0, false);
1143         spin_unlock_bh(&ab->base_lock);
1144         if (ret) {
1145                 ath11k_warn(ab, "failed to update reo for rx tid %d: %d\n",
1146                             params->tid, ret);
1147                 return ret;
1148         }
1149
1150         ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id,
1151                                                      params->sta->addr, paddr,
1152                                                      params->tid, 1, 1);
1153         if (ret)
1154                 ath11k_warn(ab, "failed to send wmi to delete rx tid %d\n",
1155                             ret);
1156
1157         return ret;
1158 }
1159
1160 int ath11k_dp_peer_rx_pn_replay_config(struct ath11k_vif *arvif,
1161                                        const u8 *peer_addr,
1162                                        enum set_key_cmd key_cmd,
1163                                        struct ieee80211_key_conf *key)
1164 {
1165         struct ath11k *ar = arvif->ar;
1166         struct ath11k_base *ab = ar->ab;
1167         struct ath11k_hal_reo_cmd cmd = {0};
1168         struct ath11k_peer *peer;
1169         struct dp_rx_tid *rx_tid;
1170         u8 tid;
1171         int ret = 0;
1172
1173         /* NOTE: Enable PN/TSC replay check offload only for unicast frames.
1174          * We use mac80211 PN/TSC replay check functionality for bcast/mcast
1175          * for now.
1176          */
1177         if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
1178                 return 0;
1179
1180         cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS;
1181         cmd.upd0 |= HAL_REO_CMD_UPD0_PN |
1182                     HAL_REO_CMD_UPD0_PN_SIZE |
1183                     HAL_REO_CMD_UPD0_PN_VALID |
1184                     HAL_REO_CMD_UPD0_PN_CHECK |
1185                     HAL_REO_CMD_UPD0_SVLD;
1186
1187         switch (key->cipher) {
1188         case WLAN_CIPHER_SUITE_TKIP:
1189         case WLAN_CIPHER_SUITE_CCMP:
1190         case WLAN_CIPHER_SUITE_CCMP_256:
1191         case WLAN_CIPHER_SUITE_GCMP:
1192         case WLAN_CIPHER_SUITE_GCMP_256:
1193                 if (key_cmd == SET_KEY) {
1194                         cmd.upd1 |= HAL_REO_CMD_UPD1_PN_CHECK;
1195                         cmd.pn_size = 48;
1196                 }
1197                 break;
1198         default:
1199                 break;
1200         }
1201
1202         spin_lock_bh(&ab->base_lock);
1203
1204         peer = ath11k_peer_find(ab, arvif->vdev_id, peer_addr);
1205         if (!peer) {
1206                 ath11k_warn(ab, "failed to find the peer to configure pn replay detection\n");
1207                 spin_unlock_bh(&ab->base_lock);
1208                 return -ENOENT;
1209         }
1210
1211         for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) {
1212                 rx_tid = &peer->rx_tid[tid];
1213                 if (!rx_tid->active)
1214                         continue;
1215                 cmd.addr_lo = lower_32_bits(rx_tid->paddr);
1216                 cmd.addr_hi = upper_32_bits(rx_tid->paddr);
1217                 ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid,
1218                                                 HAL_REO_CMD_UPDATE_RX_QUEUE,
1219                                                 &cmd, NULL);
1220                 if (ret) {
1221                         ath11k_warn(ab, "failed to configure rx tid %d queue for pn replay detection %d\n",
1222                                     tid, ret);
1223                         break;
1224                 }
1225         }
1226
1227         spin_unlock_bh(&ab->base_lock);
1228
1229         return ret;
1230 }
1231
1232 static inline int ath11k_get_ppdu_user_index(struct htt_ppdu_stats *ppdu_stats,
1233                                              u16 peer_id)
1234 {
1235         int i;
1236
1237         for (i = 0; i < HTT_PPDU_STATS_MAX_USERS - 1; i++) {
1238                 if (ppdu_stats->user_stats[i].is_valid_peer_id) {
1239                         if (peer_id == ppdu_stats->user_stats[i].peer_id)
1240                                 return i;
1241                 } else {
1242                         return i;
1243                 }
1244         }
1245
1246         return -EINVAL;
1247 }
1248
1249 static int ath11k_htt_tlv_ppdu_stats_parse(struct ath11k_base *ab,
1250                                            u16 tag, u16 len, const void *ptr,
1251                                            void *data)
1252 {
1253         struct htt_ppdu_stats_info *ppdu_info;
1254         struct htt_ppdu_user_stats *user_stats;
1255         int cur_user;
1256         u16 peer_id;
1257
1258         ppdu_info = (struct htt_ppdu_stats_info *)data;
1259
1260         switch (tag) {
1261         case HTT_PPDU_STATS_TAG_COMMON:
1262                 if (len < sizeof(struct htt_ppdu_stats_common)) {
1263                         ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1264                                     len, tag);
1265                         return -EINVAL;
1266                 }
1267                 memcpy((void *)&ppdu_info->ppdu_stats.common, ptr,
1268                        sizeof(struct htt_ppdu_stats_common));
1269                 break;
1270         case HTT_PPDU_STATS_TAG_USR_RATE:
1271                 if (len < sizeof(struct htt_ppdu_stats_user_rate)) {
1272                         ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1273                                     len, tag);
1274                         return -EINVAL;
1275                 }
1276
1277                 peer_id = ((struct htt_ppdu_stats_user_rate *)ptr)->sw_peer_id;
1278                 cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
1279                                                       peer_id);
1280                 if (cur_user < 0)
1281                         return -EINVAL;
1282                 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
1283                 user_stats->peer_id = peer_id;
1284                 user_stats->is_valid_peer_id = true;
1285                 memcpy((void *)&user_stats->rate, ptr,
1286                        sizeof(struct htt_ppdu_stats_user_rate));
1287                 user_stats->tlv_flags |= BIT(tag);
1288                 break;
1289         case HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON:
1290                 if (len < sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)) {
1291                         ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1292                                     len, tag);
1293                         return -EINVAL;
1294                 }
1295
1296                 peer_id = ((struct htt_ppdu_stats_usr_cmpltn_cmn *)ptr)->sw_peer_id;
1297                 cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
1298                                                       peer_id);
1299                 if (cur_user < 0)
1300                         return -EINVAL;
1301                 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
1302                 user_stats->peer_id = peer_id;
1303                 user_stats->is_valid_peer_id = true;
1304                 memcpy((void *)&user_stats->cmpltn_cmn, ptr,
1305                        sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn));
1306                 user_stats->tlv_flags |= BIT(tag);
1307                 break;
1308         case HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS:
1309                 if (len <
1310                     sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)) {
1311                         ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1312                                     len, tag);
1313                         return -EINVAL;
1314                 }
1315
1316                 peer_id =
1317                 ((struct htt_ppdu_stats_usr_cmpltn_ack_ba_status *)ptr)->sw_peer_id;
1318                 cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
1319                                                       peer_id);
1320                 if (cur_user < 0)
1321                         return -EINVAL;
1322                 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
1323                 user_stats->peer_id = peer_id;
1324                 user_stats->is_valid_peer_id = true;
1325                 memcpy((void *)&user_stats->ack_ba, ptr,
1326                        sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status));
1327                 user_stats->tlv_flags |= BIT(tag);
1328                 break;
1329         }
1330         return 0;
1331 }
1332
1333 int ath11k_dp_htt_tlv_iter(struct ath11k_base *ab, const void *ptr, size_t len,
1334                            int (*iter)(struct ath11k_base *ar, u16 tag, u16 len,
1335                                        const void *ptr, void *data),
1336                            void *data)
1337 {
1338         const struct htt_tlv *tlv;
1339         const void *begin = ptr;
1340         u16 tlv_tag, tlv_len;
1341         int ret = -EINVAL;
1342
1343         while (len > 0) {
1344                 if (len < sizeof(*tlv)) {
1345                         ath11k_err(ab, "htt tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
1346                                    ptr - begin, len, sizeof(*tlv));
1347                         return -EINVAL;
1348                 }
1349                 tlv = (struct htt_tlv *)ptr;
1350                 tlv_tag = FIELD_GET(HTT_TLV_TAG, tlv->header);
1351                 tlv_len = FIELD_GET(HTT_TLV_LEN, tlv->header);
1352                 ptr += sizeof(*tlv);
1353                 len -= sizeof(*tlv);
1354
1355                 if (tlv_len > len) {
1356                         ath11k_err(ab, "htt tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n",
1357                                    tlv_tag, ptr - begin, len, tlv_len);
1358                         return -EINVAL;
1359                 }
1360                 ret = iter(ab, tlv_tag, tlv_len, ptr, data);
1361                 if (ret == -ENOMEM)
1362                         return ret;
1363
1364                 ptr += tlv_len;
1365                 len -= tlv_len;
1366         }
1367         return 0;
1368 }
1369
1370 static void
1371 ath11k_update_per_peer_tx_stats(struct ath11k *ar,
1372                                 struct htt_ppdu_stats *ppdu_stats, u8 user)
1373 {
1374         struct ath11k_base *ab = ar->ab;
1375         struct ath11k_peer *peer;
1376         struct ieee80211_sta *sta;
1377         struct ath11k_sta *arsta;
1378         struct htt_ppdu_stats_user_rate *user_rate;
1379         struct ath11k_per_peer_tx_stats *peer_stats = &ar->peer_tx_stats;
1380         struct htt_ppdu_user_stats *usr_stats = &ppdu_stats->user_stats[user];
1381         struct htt_ppdu_stats_common *common = &ppdu_stats->common;
1382         int ret;
1383         u8 flags, mcs, nss, bw, sgi, dcm, rate_idx = 0;
1384         u32 succ_bytes = 0;
1385         u16 rate = 0, succ_pkts = 0;
1386         u32 tx_duration = 0;
1387         u8 tid = HTT_PPDU_STATS_NON_QOS_TID;
1388         bool is_ampdu = false;
1389
1390         if (!usr_stats)
1391                 return;
1392
1393         if (!(usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_RATE)))
1394                 return;
1395
1396         if (usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON))
1397                 is_ampdu =
1398                         HTT_USR_CMPLTN_IS_AMPDU(usr_stats->cmpltn_cmn.flags);
1399
1400         if (usr_stats->tlv_flags &
1401             BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS)) {
1402                 succ_bytes = usr_stats->ack_ba.success_bytes;
1403                 succ_pkts = FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M,
1404                                       usr_stats->ack_ba.info);
1405                 tid = FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM,
1406                                 usr_stats->ack_ba.info);
1407         }
1408
1409         if (common->fes_duration_us)
1410                 tx_duration = common->fes_duration_us;
1411
1412         user_rate = &usr_stats->rate;
1413         flags = HTT_USR_RATE_PREAMBLE(user_rate->rate_flags);
1414         bw = HTT_USR_RATE_BW(user_rate->rate_flags) - 2;
1415         nss = HTT_USR_RATE_NSS(user_rate->rate_flags) + 1;
1416         mcs = HTT_USR_RATE_MCS(user_rate->rate_flags);
1417         sgi = HTT_USR_RATE_GI(user_rate->rate_flags);
1418         dcm = HTT_USR_RATE_DCM(user_rate->rate_flags);
1419
1420         /* Note: If host configured fixed rates and in some other special
1421          * cases, the broadcast/management frames are sent in different rates.
1422          * Firmware rate's control to be skipped for this?
1423          */
1424
1425         if (flags == WMI_RATE_PREAMBLE_HE && mcs > ATH11K_HE_MCS_MAX) {
1426                 ath11k_warn(ab, "Invalid HE mcs %d peer stats",  mcs);
1427                 return;
1428         }
1429
1430         if (flags == WMI_RATE_PREAMBLE_VHT && mcs > ATH11K_VHT_MCS_MAX) {
1431                 ath11k_warn(ab, "Invalid VHT mcs %d peer stats",  mcs);
1432                 return;
1433         }
1434
1435         if (flags == WMI_RATE_PREAMBLE_HT && (mcs > ATH11K_HT_MCS_MAX || nss < 1)) {
1436                 ath11k_warn(ab, "Invalid HT mcs %d nss %d peer stats",
1437                             mcs, nss);
1438                 return;
1439         }
1440
1441         if (flags == WMI_RATE_PREAMBLE_CCK || flags == WMI_RATE_PREAMBLE_OFDM) {
1442                 ret = ath11k_mac_hw_ratecode_to_legacy_rate(mcs,
1443                                                             flags,
1444                                                             &rate_idx,
1445                                                             &rate);
1446                 if (ret < 0)
1447                         return;
1448         }
1449
1450         rcu_read_lock();
1451         spin_lock_bh(&ab->base_lock);
1452         peer = ath11k_peer_find_by_id(ab, usr_stats->peer_id);
1453
1454         if (!peer || !peer->sta) {
1455                 spin_unlock_bh(&ab->base_lock);
1456                 rcu_read_unlock();
1457                 return;
1458         }
1459
1460         sta = peer->sta;
1461         arsta = (struct ath11k_sta *)sta->drv_priv;
1462
1463         memset(&arsta->txrate, 0, sizeof(arsta->txrate));
1464
1465         switch (flags) {
1466         case WMI_RATE_PREAMBLE_OFDM:
1467                 arsta->txrate.legacy = rate;
1468                 break;
1469         case WMI_RATE_PREAMBLE_CCK:
1470                 arsta->txrate.legacy = rate;
1471                 break;
1472         case WMI_RATE_PREAMBLE_HT:
1473                 arsta->txrate.mcs = mcs + 8 * (nss - 1);
1474                 arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
1475                 if (sgi)
1476                         arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
1477                 break;
1478         case WMI_RATE_PREAMBLE_VHT:
1479                 arsta->txrate.mcs = mcs;
1480                 arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
1481                 if (sgi)
1482                         arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
1483                 break;
1484         case WMI_RATE_PREAMBLE_HE:
1485                 arsta->txrate.mcs = mcs;
1486                 arsta->txrate.flags = RATE_INFO_FLAGS_HE_MCS;
1487                 arsta->txrate.he_dcm = dcm;
1488                 arsta->txrate.he_gi = ath11k_mac_he_gi_to_nl80211_he_gi(sgi);
1489                 arsta->txrate.he_ru_alloc = ath11k_mac_phy_he_ru_to_nl80211_he_ru_alloc
1490                                                 ((user_rate->ru_end -
1491                                                  user_rate->ru_start) + 1);
1492                 break;
1493         }
1494
1495         arsta->txrate.nss = nss;
1496
1497         arsta->txrate.bw = ath11k_mac_bw_to_mac80211_bw(bw);
1498         arsta->tx_duration += tx_duration;
1499         memcpy(&arsta->last_txrate, &arsta->txrate, sizeof(struct rate_info));
1500
1501         /* PPDU stats reported for mgmt packet doesn't have valid tx bytes.
1502          * So skip peer stats update for mgmt packets.
1503          */
1504         if (tid < HTT_PPDU_STATS_NON_QOS_TID) {
1505                 memset(peer_stats, 0, sizeof(*peer_stats));
1506                 peer_stats->succ_pkts = succ_pkts;
1507                 peer_stats->succ_bytes = succ_bytes;
1508                 peer_stats->is_ampdu = is_ampdu;
1509                 peer_stats->duration = tx_duration;
1510                 peer_stats->ba_fails =
1511                         HTT_USR_CMPLTN_LONG_RETRY(usr_stats->cmpltn_cmn.flags) +
1512                         HTT_USR_CMPLTN_SHORT_RETRY(usr_stats->cmpltn_cmn.flags);
1513
1514                 if (ath11k_debugfs_is_extd_tx_stats_enabled(ar))
1515                         ath11k_debugfs_sta_add_tx_stats(arsta, peer_stats, rate_idx);
1516         }
1517
1518         spin_unlock_bh(&ab->base_lock);
1519         rcu_read_unlock();
1520 }
1521
1522 static void ath11k_htt_update_ppdu_stats(struct ath11k *ar,
1523                                          struct htt_ppdu_stats *ppdu_stats)
1524 {
1525         u8 user;
1526
1527         for (user = 0; user < HTT_PPDU_STATS_MAX_USERS - 1; user++)
1528                 ath11k_update_per_peer_tx_stats(ar, ppdu_stats, user);
1529 }
1530
1531 static
1532 struct htt_ppdu_stats_info *ath11k_dp_htt_get_ppdu_desc(struct ath11k *ar,
1533                                                         u32 ppdu_id)
1534 {
1535         struct htt_ppdu_stats_info *ppdu_info;
1536
1537         spin_lock_bh(&ar->data_lock);
1538         if (!list_empty(&ar->ppdu_stats_info)) {
1539                 list_for_each_entry(ppdu_info, &ar->ppdu_stats_info, list) {
1540                         if (ppdu_info->ppdu_id == ppdu_id) {
1541                                 spin_unlock_bh(&ar->data_lock);
1542                                 return ppdu_info;
1543                         }
1544                 }
1545
1546                 if (ar->ppdu_stat_list_depth > HTT_PPDU_DESC_MAX_DEPTH) {
1547                         ppdu_info = list_first_entry(&ar->ppdu_stats_info,
1548                                                      typeof(*ppdu_info), list);
1549                         list_del(&ppdu_info->list);
1550                         ar->ppdu_stat_list_depth--;
1551                         ath11k_htt_update_ppdu_stats(ar, &ppdu_info->ppdu_stats);
1552                         kfree(ppdu_info);
1553                 }
1554         }
1555         spin_unlock_bh(&ar->data_lock);
1556
1557         ppdu_info = kzalloc(sizeof(*ppdu_info), GFP_ATOMIC);
1558         if (!ppdu_info)
1559                 return NULL;
1560
1561         spin_lock_bh(&ar->data_lock);
1562         list_add_tail(&ppdu_info->list, &ar->ppdu_stats_info);
1563         ar->ppdu_stat_list_depth++;
1564         spin_unlock_bh(&ar->data_lock);
1565
1566         return ppdu_info;
1567 }
1568
1569 static int ath11k_htt_pull_ppdu_stats(struct ath11k_base *ab,
1570                                       struct sk_buff *skb)
1571 {
1572         struct ath11k_htt_ppdu_stats_msg *msg;
1573         struct htt_ppdu_stats_info *ppdu_info;
1574         struct ath11k *ar;
1575         int ret;
1576         u8 pdev_id;
1577         u32 ppdu_id, len;
1578
1579         msg = (struct ath11k_htt_ppdu_stats_msg *)skb->data;
1580         len = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE, msg->info);
1581         pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, msg->info);
1582         ppdu_id = msg->ppdu_id;
1583
1584         rcu_read_lock();
1585         ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id);
1586         if (!ar) {
1587                 ret = -EINVAL;
1588                 goto exit;
1589         }
1590
1591         if (ath11k_debugfs_is_pktlog_lite_mode_enabled(ar))
1592                 trace_ath11k_htt_ppdu_stats(ar, skb->data, len);
1593
1594         ppdu_info = ath11k_dp_htt_get_ppdu_desc(ar, ppdu_id);
1595         if (!ppdu_info) {
1596                 ret = -EINVAL;
1597                 goto exit;
1598         }
1599
1600         ppdu_info->ppdu_id = ppdu_id;
1601         ret = ath11k_dp_htt_tlv_iter(ab, msg->data, len,
1602                                      ath11k_htt_tlv_ppdu_stats_parse,
1603                                      (void *)ppdu_info);
1604         if (ret) {
1605                 ath11k_warn(ab, "Failed to parse tlv %d\n", ret);
1606                 goto exit;
1607         }
1608
1609 exit:
1610         rcu_read_unlock();
1611
1612         return ret;
1613 }
1614
1615 static void ath11k_htt_pktlog(struct ath11k_base *ab, struct sk_buff *skb)
1616 {
1617         struct htt_pktlog_msg *data = (struct htt_pktlog_msg *)skb->data;
1618         struct ath_pktlog_hdr *hdr = (struct ath_pktlog_hdr *)data;
1619         struct ath11k *ar;
1620         u8 pdev_id;
1621
1622         pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, data->hdr);
1623         ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id);
1624         if (!ar) {
1625                 ath11k_warn(ab, "invalid pdev id %d on htt pktlog\n", pdev_id);
1626                 return;
1627         }
1628
1629         trace_ath11k_htt_pktlog(ar, data->payload, hdr->size,
1630                                 ar->ab->pktlog_defs_checksum);
1631 }
1632
1633 static void ath11k_htt_backpressure_event_handler(struct ath11k_base *ab,
1634                                                   struct sk_buff *skb)
1635 {
1636         u32 *data = (u32 *)skb->data;
1637         u8 pdev_id, ring_type, ring_id, pdev_idx;
1638         u16 hp, tp;
1639         u32 backpressure_time;
1640         struct ath11k_bp_stats *bp_stats;
1641
1642         pdev_id = FIELD_GET(HTT_BACKPRESSURE_EVENT_PDEV_ID_M, *data);
1643         ring_type = FIELD_GET(HTT_BACKPRESSURE_EVENT_RING_TYPE_M, *data);
1644         ring_id = FIELD_GET(HTT_BACKPRESSURE_EVENT_RING_ID_M, *data);
1645         ++data;
1646
1647         hp = FIELD_GET(HTT_BACKPRESSURE_EVENT_HP_M, *data);
1648         tp = FIELD_GET(HTT_BACKPRESSURE_EVENT_TP_M, *data);
1649         ++data;
1650
1651         backpressure_time = *data;
1652
1653         ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "htt backpressure event, pdev %d, ring type %d,ring id %d, hp %d tp %d, backpressure time %d\n",
1654                    pdev_id, ring_type, ring_id, hp, tp, backpressure_time);
1655
1656         if (ring_type == HTT_BACKPRESSURE_UMAC_RING_TYPE) {
1657                 if (ring_id >= HTT_SW_UMAC_RING_IDX_MAX)
1658                         return;
1659
1660                 bp_stats = &ab->soc_stats.bp_stats.umac_ring_bp_stats[ring_id];
1661         } else if (ring_type == HTT_BACKPRESSURE_LMAC_RING_TYPE) {
1662                 pdev_idx = DP_HW2SW_MACID(pdev_id);
1663
1664                 if (ring_id >= HTT_SW_LMAC_RING_IDX_MAX || pdev_idx >= MAX_RADIOS)
1665                         return;
1666
1667                 bp_stats = &ab->soc_stats.bp_stats.lmac_ring_bp_stats[ring_id][pdev_idx];
1668         } else {
1669                 ath11k_warn(ab, "unknown ring type received in htt bp event %d\n",
1670                             ring_type);
1671                 return;
1672         }
1673
1674         spin_lock_bh(&ab->base_lock);
1675         bp_stats->hp = hp;
1676         bp_stats->tp = tp;
1677         bp_stats->count++;
1678         bp_stats->jiffies = jiffies;
1679         spin_unlock_bh(&ab->base_lock);
1680 }
1681
1682 void ath11k_dp_htt_htc_t2h_msg_handler(struct ath11k_base *ab,
1683                                        struct sk_buff *skb)
1684 {
1685         struct ath11k_dp *dp = &ab->dp;
1686         struct htt_resp_msg *resp = (struct htt_resp_msg *)skb->data;
1687         enum htt_t2h_msg_type type = FIELD_GET(HTT_T2H_MSG_TYPE, *(u32 *)resp);
1688         u16 peer_id;
1689         u8 vdev_id;
1690         u8 mac_addr[ETH_ALEN];
1691         u16 peer_mac_h16;
1692         u16 ast_hash;
1693         u16 hw_peer_id;
1694
1695         ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "dp_htt rx msg type :0x%0x\n", type);
1696
1697         switch (type) {
1698         case HTT_T2H_MSG_TYPE_VERSION_CONF:
1699                 dp->htt_tgt_ver_major = FIELD_GET(HTT_T2H_VERSION_CONF_MAJOR,
1700                                                   resp->version_msg.version);
1701                 dp->htt_tgt_ver_minor = FIELD_GET(HTT_T2H_VERSION_CONF_MINOR,
1702                                                   resp->version_msg.version);
1703                 complete(&dp->htt_tgt_version_received);
1704                 break;
1705         case HTT_T2H_MSG_TYPE_PEER_MAP:
1706                 vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID,
1707                                     resp->peer_map_ev.info);
1708                 peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID,
1709                                     resp->peer_map_ev.info);
1710                 peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16,
1711                                          resp->peer_map_ev.info1);
1712                 ath11k_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32,
1713                                        peer_mac_h16, mac_addr);
1714                 ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, 0, 0);
1715                 break;
1716         case HTT_T2H_MSG_TYPE_PEER_MAP2:
1717                 vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID,
1718                                     resp->peer_map_ev.info);
1719                 peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID,
1720                                     resp->peer_map_ev.info);
1721                 peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16,
1722                                          resp->peer_map_ev.info1);
1723                 ath11k_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32,
1724                                        peer_mac_h16, mac_addr);
1725                 ast_hash = FIELD_GET(HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL,
1726                                      resp->peer_map_ev.info2);
1727                 hw_peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_HW_PEER_ID,
1728                                        resp->peer_map_ev.info1);
1729                 ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash,
1730                                       hw_peer_id);
1731                 break;
1732         case HTT_T2H_MSG_TYPE_PEER_UNMAP:
1733         case HTT_T2H_MSG_TYPE_PEER_UNMAP2:
1734                 peer_id = FIELD_GET(HTT_T2H_PEER_UNMAP_INFO_PEER_ID,
1735                                     resp->peer_unmap_ev.info);
1736                 ath11k_peer_unmap_event(ab, peer_id);
1737                 break;
1738         case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:
1739                 ath11k_htt_pull_ppdu_stats(ab, skb);
1740                 break;
1741         case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
1742                 ath11k_debugfs_htt_ext_stats_handler(ab, skb);
1743                 break;
1744         case HTT_T2H_MSG_TYPE_PKTLOG:
1745                 ath11k_htt_pktlog(ab, skb);
1746                 break;
1747         case HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND:
1748                 ath11k_htt_backpressure_event_handler(ab, skb);
1749                 break;
1750         default:
1751                 ath11k_warn(ab, "htt event %d not handled\n", type);
1752                 break;
1753         }
1754
1755         dev_kfree_skb_any(skb);
1756 }
1757
1758 static int ath11k_dp_rx_msdu_coalesce(struct ath11k *ar,
1759                                       struct sk_buff_head *msdu_list,
1760                                       struct sk_buff *first, struct sk_buff *last,
1761                                       u8 l3pad_bytes, int msdu_len)
1762 {
1763         struct ath11k_base *ab = ar->ab;
1764         struct sk_buff *skb;
1765         struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first);
1766         int buf_first_hdr_len, buf_first_len;
1767         struct hal_rx_desc *ldesc;
1768         int space_extra, rem_len, buf_len;
1769         u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
1770
1771         /* As the msdu is spread across multiple rx buffers,
1772          * find the offset to the start of msdu for computing
1773          * the length of the msdu in the first buffer.
1774          */
1775         buf_first_hdr_len = hal_rx_desc_sz + l3pad_bytes;
1776         buf_first_len = DP_RX_BUFFER_SIZE - buf_first_hdr_len;
1777
1778         if (WARN_ON_ONCE(msdu_len <= buf_first_len)) {
1779                 skb_put(first, buf_first_hdr_len + msdu_len);
1780                 skb_pull(first, buf_first_hdr_len);
1781                 return 0;
1782         }
1783
1784         ldesc = (struct hal_rx_desc *)last->data;
1785         rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ab, ldesc);
1786         rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ab, ldesc);
1787
1788         /* MSDU spans over multiple buffers because the length of the MSDU
1789          * exceeds DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. So assume the data
1790          * in the first buf is of length DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE.
1791          */
1792         skb_put(first, DP_RX_BUFFER_SIZE);
1793         skb_pull(first, buf_first_hdr_len);
1794
1795         /* When an MSDU spread over multiple buffers attention, MSDU_END and
1796          * MPDU_END tlvs are valid only in the last buffer. Copy those tlvs.
1797          */
1798         ath11k_dp_rx_desc_end_tlv_copy(ab, rxcb->rx_desc, ldesc);
1799
1800         space_extra = msdu_len - (buf_first_len + skb_tailroom(first));
1801         if (space_extra > 0 &&
1802             (pskb_expand_head(first, 0, space_extra, GFP_ATOMIC) < 0)) {
1803                 /* Free up all buffers of the MSDU */
1804                 while ((skb = __skb_dequeue(msdu_list)) != NULL) {
1805                         rxcb = ATH11K_SKB_RXCB(skb);
1806                         if (!rxcb->is_continuation) {
1807                                 dev_kfree_skb_any(skb);
1808                                 break;
1809                         }
1810                         dev_kfree_skb_any(skb);
1811                 }
1812                 return -ENOMEM;
1813         }
1814
1815         rem_len = msdu_len - buf_first_len;
1816         while ((skb = __skb_dequeue(msdu_list)) != NULL && rem_len > 0) {
1817                 rxcb = ATH11K_SKB_RXCB(skb);
1818                 if (rxcb->is_continuation)
1819                         buf_len = DP_RX_BUFFER_SIZE - hal_rx_desc_sz;
1820                 else
1821                         buf_len = rem_len;
1822
1823                 if (buf_len > (DP_RX_BUFFER_SIZE - hal_rx_desc_sz)) {
1824                         WARN_ON_ONCE(1);
1825                         dev_kfree_skb_any(skb);
1826                         return -EINVAL;
1827                 }
1828
1829                 skb_put(skb, buf_len + hal_rx_desc_sz);
1830                 skb_pull(skb, hal_rx_desc_sz);
1831                 skb_copy_from_linear_data(skb, skb_put(first, buf_len),
1832                                           buf_len);
1833                 dev_kfree_skb_any(skb);
1834
1835                 rem_len -= buf_len;
1836                 if (!rxcb->is_continuation)
1837                         break;
1838         }
1839
1840         return 0;
1841 }
1842
1843 static struct sk_buff *ath11k_dp_rx_get_msdu_last_buf(struct sk_buff_head *msdu_list,
1844                                                       struct sk_buff *first)
1845 {
1846         struct sk_buff *skb;
1847         struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first);
1848
1849         if (!rxcb->is_continuation)
1850                 return first;
1851
1852         skb_queue_walk(msdu_list, skb) {
1853                 rxcb = ATH11K_SKB_RXCB(skb);
1854                 if (!rxcb->is_continuation)
1855                         return skb;
1856         }
1857
1858         return NULL;
1859 }
1860
1861 static void ath11k_dp_rx_h_csum_offload(struct ath11k *ar, struct sk_buff *msdu)
1862 {
1863         struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
1864         struct rx_attention *rx_attention;
1865         bool ip_csum_fail, l4_csum_fail;
1866
1867         rx_attention = ath11k_dp_rx_get_attention(ar->ab, rxcb->rx_desc);
1868         ip_csum_fail = ath11k_dp_rx_h_attn_ip_cksum_fail(rx_attention);
1869         l4_csum_fail = ath11k_dp_rx_h_attn_l4_cksum_fail(rx_attention);
1870
1871         msdu->ip_summed = (ip_csum_fail || l4_csum_fail) ?
1872                           CHECKSUM_NONE : CHECKSUM_UNNECESSARY;
1873 }
1874
1875 static int ath11k_dp_rx_crypto_mic_len(struct ath11k *ar,
1876                                        enum hal_encrypt_type enctype)
1877 {
1878         switch (enctype) {
1879         case HAL_ENCRYPT_TYPE_OPEN:
1880         case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
1881         case HAL_ENCRYPT_TYPE_TKIP_MIC:
1882                 return 0;
1883         case HAL_ENCRYPT_TYPE_CCMP_128:
1884                 return IEEE80211_CCMP_MIC_LEN;
1885         case HAL_ENCRYPT_TYPE_CCMP_256:
1886                 return IEEE80211_CCMP_256_MIC_LEN;
1887         case HAL_ENCRYPT_TYPE_GCMP_128:
1888         case HAL_ENCRYPT_TYPE_AES_GCMP_256:
1889                 return IEEE80211_GCMP_MIC_LEN;
1890         case HAL_ENCRYPT_TYPE_WEP_40:
1891         case HAL_ENCRYPT_TYPE_WEP_104:
1892         case HAL_ENCRYPT_TYPE_WEP_128:
1893         case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
1894         case HAL_ENCRYPT_TYPE_WAPI:
1895                 break;
1896         }
1897
1898         ath11k_warn(ar->ab, "unsupported encryption type %d for mic len\n", enctype);
1899         return 0;
1900 }
1901
1902 static int ath11k_dp_rx_crypto_param_len(struct ath11k *ar,
1903                                          enum hal_encrypt_type enctype)
1904 {
1905         switch (enctype) {
1906         case HAL_ENCRYPT_TYPE_OPEN:
1907                 return 0;
1908         case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
1909         case HAL_ENCRYPT_TYPE_TKIP_MIC:
1910                 return IEEE80211_TKIP_IV_LEN;
1911         case HAL_ENCRYPT_TYPE_CCMP_128:
1912                 return IEEE80211_CCMP_HDR_LEN;
1913         case HAL_ENCRYPT_TYPE_CCMP_256:
1914                 return IEEE80211_CCMP_256_HDR_LEN;
1915         case HAL_ENCRYPT_TYPE_GCMP_128:
1916         case HAL_ENCRYPT_TYPE_AES_GCMP_256:
1917                 return IEEE80211_GCMP_HDR_LEN;
1918         case HAL_ENCRYPT_TYPE_WEP_40:
1919         case HAL_ENCRYPT_TYPE_WEP_104:
1920         case HAL_ENCRYPT_TYPE_WEP_128:
1921         case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
1922         case HAL_ENCRYPT_TYPE_WAPI:
1923                 break;
1924         }
1925
1926         ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype);
1927         return 0;
1928 }
1929
1930 static int ath11k_dp_rx_crypto_icv_len(struct ath11k *ar,
1931                                        enum hal_encrypt_type enctype)
1932 {
1933         switch (enctype) {
1934         case HAL_ENCRYPT_TYPE_OPEN:
1935         case HAL_ENCRYPT_TYPE_CCMP_128:
1936         case HAL_ENCRYPT_TYPE_CCMP_256:
1937         case HAL_ENCRYPT_TYPE_GCMP_128:
1938         case HAL_ENCRYPT_TYPE_AES_GCMP_256:
1939                 return 0;
1940         case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
1941         case HAL_ENCRYPT_TYPE_TKIP_MIC:
1942                 return IEEE80211_TKIP_ICV_LEN;
1943         case HAL_ENCRYPT_TYPE_WEP_40:
1944         case HAL_ENCRYPT_TYPE_WEP_104:
1945         case HAL_ENCRYPT_TYPE_WEP_128:
1946         case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
1947         case HAL_ENCRYPT_TYPE_WAPI:
1948                 break;
1949         }
1950
1951         ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype);
1952         return 0;
1953 }
1954
1955 static void ath11k_dp_rx_h_undecap_nwifi(struct ath11k *ar,
1956                                          struct sk_buff *msdu,
1957                                          u8 *first_hdr,
1958                                          enum hal_encrypt_type enctype,
1959                                          struct ieee80211_rx_status *status)
1960 {
1961         struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
1962         u8 decap_hdr[DP_MAX_NWIFI_HDR_LEN];
1963         struct ieee80211_hdr *hdr;
1964         size_t hdr_len;
1965         u8 da[ETH_ALEN];
1966         u8 sa[ETH_ALEN];
1967         u16 qos_ctl = 0;
1968         u8 *qos;
1969
1970         /* copy SA & DA and pull decapped header */
1971         hdr = (struct ieee80211_hdr *)msdu->data;
1972         hdr_len = ieee80211_hdrlen(hdr->frame_control);
1973         ether_addr_copy(da, ieee80211_get_DA(hdr));
1974         ether_addr_copy(sa, ieee80211_get_SA(hdr));
1975         skb_pull(msdu, ieee80211_hdrlen(hdr->frame_control));
1976
1977         if (rxcb->is_first_msdu) {
1978                 /* original 802.11 header is valid for the first msdu
1979                  * hence we can reuse the same header
1980                  */
1981                 hdr = (struct ieee80211_hdr *)first_hdr;
1982                 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1983
1984                 /* Each A-MSDU subframe will be reported as a separate MSDU,
1985                  * so strip the A-MSDU bit from QoS Ctl.
1986                  */
1987                 if (ieee80211_is_data_qos(hdr->frame_control)) {
1988                         qos = ieee80211_get_qos_ctl(hdr);
1989                         qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
1990                 }
1991         } else {
1992                 /*  Rebuild qos header if this is a middle/last msdu */
1993                 hdr->frame_control |= __cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
1994
1995                 /* Reset the order bit as the HT_Control header is stripped */
1996                 hdr->frame_control &= ~(__cpu_to_le16(IEEE80211_FCTL_ORDER));
1997
1998                 qos_ctl = rxcb->tid;
1999
2000                 if (ath11k_dp_rx_h_msdu_start_mesh_ctl_present(ar->ab, rxcb->rx_desc))
2001                         qos_ctl |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT;
2002
2003                 /* TODO Add other QoS ctl fields when required */
2004
2005                 /* copy decap header before overwriting for reuse below */
2006                 memcpy(decap_hdr, (uint8_t *)hdr, hdr_len);
2007         }
2008
2009         if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
2010                 memcpy(skb_push(msdu,
2011                                 ath11k_dp_rx_crypto_param_len(ar, enctype)),
2012                        (void *)hdr + hdr_len,
2013                        ath11k_dp_rx_crypto_param_len(ar, enctype));
2014         }
2015
2016         if (!rxcb->is_first_msdu) {
2017                 memcpy(skb_push(msdu,
2018                                 IEEE80211_QOS_CTL_LEN), &qos_ctl,
2019                                 IEEE80211_QOS_CTL_LEN);
2020                 memcpy(skb_push(msdu, hdr_len), decap_hdr, hdr_len);
2021                 return;
2022         }
2023
2024         memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
2025
2026         /* original 802.11 header has a different DA and in
2027          * case of 4addr it may also have different SA
2028          */
2029         hdr = (struct ieee80211_hdr *)msdu->data;
2030         ether_addr_copy(ieee80211_get_DA(hdr), da);
2031         ether_addr_copy(ieee80211_get_SA(hdr), sa);
2032 }
2033
2034 static void ath11k_dp_rx_h_undecap_raw(struct ath11k *ar, struct sk_buff *msdu,
2035                                        enum hal_encrypt_type enctype,
2036                                        struct ieee80211_rx_status *status,
2037                                        bool decrypted)
2038 {
2039         struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
2040         struct ieee80211_hdr *hdr;
2041         size_t hdr_len;
2042         size_t crypto_len;
2043
2044         if (!rxcb->is_first_msdu ||
2045             !(rxcb->is_first_msdu && rxcb->is_last_msdu)) {
2046                 WARN_ON_ONCE(1);
2047                 return;
2048         }
2049
2050         skb_trim(msdu, msdu->len - FCS_LEN);
2051
2052         if (!decrypted)
2053                 return;
2054
2055         hdr = (void *)msdu->data;
2056
2057         /* Tail */
2058         if (status->flag & RX_FLAG_IV_STRIPPED) {
2059                 skb_trim(msdu, msdu->len -
2060                          ath11k_dp_rx_crypto_mic_len(ar, enctype));
2061
2062                 skb_trim(msdu, msdu->len -
2063                          ath11k_dp_rx_crypto_icv_len(ar, enctype));
2064         } else {
2065                 /* MIC */
2066                 if (status->flag & RX_FLAG_MIC_STRIPPED)
2067                         skb_trim(msdu, msdu->len -
2068                                  ath11k_dp_rx_crypto_mic_len(ar, enctype));
2069
2070                 /* ICV */
2071                 if (status->flag & RX_FLAG_ICV_STRIPPED)
2072                         skb_trim(msdu, msdu->len -
2073                                  ath11k_dp_rx_crypto_icv_len(ar, enctype));
2074         }
2075
2076         /* MMIC */
2077         if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
2078             !ieee80211_has_morefrags(hdr->frame_control) &&
2079             enctype == HAL_ENCRYPT_TYPE_TKIP_MIC)
2080                 skb_trim(msdu, msdu->len - IEEE80211_CCMP_MIC_LEN);
2081
2082         /* Head */
2083         if (status->flag & RX_FLAG_IV_STRIPPED) {
2084                 hdr_len = ieee80211_hdrlen(hdr->frame_control);
2085                 crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype);
2086
2087                 memmove((void *)msdu->data + crypto_len,
2088                         (void *)msdu->data, hdr_len);
2089                 skb_pull(msdu, crypto_len);
2090         }
2091 }
2092
2093 static void *ath11k_dp_rx_h_find_rfc1042(struct ath11k *ar,
2094                                          struct sk_buff *msdu,
2095                                          enum hal_encrypt_type enctype)
2096 {
2097         struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
2098         struct ieee80211_hdr *hdr;
2099         size_t hdr_len, crypto_len;
2100         void *rfc1042;
2101         bool is_amsdu;
2102
2103         is_amsdu = !(rxcb->is_first_msdu && rxcb->is_last_msdu);
2104         hdr = (struct ieee80211_hdr *)ath11k_dp_rx_h_80211_hdr(ar->ab, rxcb->rx_desc);
2105         rfc1042 = hdr;
2106
2107         if (rxcb->is_first_msdu) {
2108                 hdr_len = ieee80211_hdrlen(hdr->frame_control);
2109                 crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype);
2110
2111                 rfc1042 += hdr_len + crypto_len;
2112         }
2113
2114         if (is_amsdu)
2115                 rfc1042 += sizeof(struct ath11k_dp_amsdu_subframe_hdr);
2116
2117         return rfc1042;
2118 }
2119
2120 static void ath11k_dp_rx_h_undecap_eth(struct ath11k *ar,
2121                                        struct sk_buff *msdu,
2122                                        u8 *first_hdr,
2123                                        enum hal_encrypt_type enctype,
2124                                        struct ieee80211_rx_status *status)
2125 {
2126         struct ieee80211_hdr *hdr;
2127         struct ethhdr *eth;
2128         size_t hdr_len;
2129         u8 da[ETH_ALEN];
2130         u8 sa[ETH_ALEN];
2131         void *rfc1042;
2132
2133         rfc1042 = ath11k_dp_rx_h_find_rfc1042(ar, msdu, enctype);
2134         if (WARN_ON_ONCE(!rfc1042))
2135                 return;
2136
2137         /* pull decapped header and copy SA & DA */
2138         eth = (struct ethhdr *)msdu->data;
2139         ether_addr_copy(da, eth->h_dest);
2140         ether_addr_copy(sa, eth->h_source);
2141         skb_pull(msdu, sizeof(struct ethhdr));
2142
2143         /* push rfc1042/llc/snap */
2144         memcpy(skb_push(msdu, sizeof(struct ath11k_dp_rfc1042_hdr)), rfc1042,
2145                sizeof(struct ath11k_dp_rfc1042_hdr));
2146
2147         /* push original 802.11 header */
2148         hdr = (struct ieee80211_hdr *)first_hdr;
2149         hdr_len = ieee80211_hdrlen(hdr->frame_control);
2150
2151         if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
2152                 memcpy(skb_push(msdu,
2153                                 ath11k_dp_rx_crypto_param_len(ar, enctype)),
2154                        (void *)hdr + hdr_len,
2155                        ath11k_dp_rx_crypto_param_len(ar, enctype));
2156         }
2157
2158         memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
2159
2160         /* original 802.11 header has a different DA and in
2161          * case of 4addr it may also have different SA
2162          */
2163         hdr = (struct ieee80211_hdr *)msdu->data;
2164         ether_addr_copy(ieee80211_get_DA(hdr), da);
2165         ether_addr_copy(ieee80211_get_SA(hdr), sa);
2166 }
2167
2168 static void ath11k_dp_rx_h_undecap(struct ath11k *ar, struct sk_buff *msdu,
2169                                    struct hal_rx_desc *rx_desc,
2170                                    enum hal_encrypt_type enctype,
2171                                    struct ieee80211_rx_status *status,
2172                                    bool decrypted)
2173 {
2174         u8 *first_hdr;
2175         u8 decap;
2176         struct ethhdr *ehdr;
2177
2178         first_hdr = ath11k_dp_rx_h_80211_hdr(ar->ab, rx_desc);
2179         decap = ath11k_dp_rx_h_msdu_start_decap_type(ar->ab, rx_desc);
2180
2181         switch (decap) {
2182         case DP_RX_DECAP_TYPE_NATIVE_WIFI:
2183                 ath11k_dp_rx_h_undecap_nwifi(ar, msdu, first_hdr,
2184                                              enctype, status);
2185                 break;
2186         case DP_RX_DECAP_TYPE_RAW:
2187                 ath11k_dp_rx_h_undecap_raw(ar, msdu, enctype, status,
2188                                            decrypted);
2189                 break;
2190         case DP_RX_DECAP_TYPE_ETHERNET2_DIX:
2191                 ehdr = (struct ethhdr *)msdu->data;
2192
2193                 /* mac80211 allows fast path only for authorized STA */
2194                 if (ehdr->h_proto == cpu_to_be16(ETH_P_PAE)) {
2195                         ATH11K_SKB_RXCB(msdu)->is_eapol = true;
2196                         ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr,
2197                                                    enctype, status);
2198                         break;
2199                 }
2200
2201                 /* PN for mcast packets will be validated in mac80211;
2202                  * remove eth header and add 802.11 header.
2203                  */
2204                 if (ATH11K_SKB_RXCB(msdu)->is_mcbc && decrypted)
2205                         ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr,
2206                                                    enctype, status);
2207                 break;
2208         case DP_RX_DECAP_TYPE_8023:
2209                 /* TODO: Handle undecap for these formats */
2210                 break;
2211         }
2212 }
2213
2214 static struct ath11k_peer *
2215 ath11k_dp_rx_h_find_peer(struct ath11k_base *ab, struct sk_buff *msdu)
2216 {
2217         struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
2218         struct hal_rx_desc *rx_desc = rxcb->rx_desc;
2219         struct ath11k_peer *peer = NULL;
2220
2221         lockdep_assert_held(&ab->base_lock);
2222
2223         if (rxcb->peer_id)
2224                 peer = ath11k_peer_find_by_id(ab, rxcb->peer_id);
2225
2226         if (peer)
2227                 return peer;
2228
2229         if (!rx_desc || !(ath11k_dp_rxdesc_mac_addr2_valid(ab, rx_desc)))
2230                 return NULL;
2231
2232         peer = ath11k_peer_find_by_addr(ab,
2233                                         ath11k_dp_rxdesc_mpdu_start_addr2(ab, rx_desc));
2234         return peer;
2235 }
2236
2237 static void ath11k_dp_rx_h_mpdu(struct ath11k *ar,
2238                                 struct sk_buff *msdu,
2239                                 struct hal_rx_desc *rx_desc,
2240                                 struct ieee80211_rx_status *rx_status)
2241 {
2242         bool  fill_crypto_hdr;
2243         enum hal_encrypt_type enctype;
2244         bool is_decrypted = false;
2245         struct ath11k_skb_rxcb *rxcb;
2246         struct ieee80211_hdr *hdr;
2247         struct ath11k_peer *peer;
2248         struct rx_attention *rx_attention;
2249         u32 err_bitmap;
2250
2251         /* PN for multicast packets will be checked in mac80211 */
2252         rxcb = ATH11K_SKB_RXCB(msdu);
2253         fill_crypto_hdr = ath11k_dp_rx_h_attn_is_mcbc(ar->ab, rx_desc);
2254         rxcb->is_mcbc = fill_crypto_hdr;
2255
2256         if (rxcb->is_mcbc) {
2257                 rxcb->peer_id = ath11k_dp_rx_h_mpdu_start_peer_id(ar->ab, rx_desc);
2258                 rxcb->seq_no = ath11k_dp_rx_h_mpdu_start_seq_no(ar->ab, rx_desc);
2259         }
2260
2261         spin_lock_bh(&ar->ab->base_lock);
2262         peer = ath11k_dp_rx_h_find_peer(ar->ab, msdu);
2263         if (peer) {
2264                 if (rxcb->is_mcbc)
2265                         enctype = peer->sec_type_grp;
2266                 else
2267                         enctype = peer->sec_type;
2268         } else {
2269                 enctype = ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, rx_desc);
2270         }
2271         spin_unlock_bh(&ar->ab->base_lock);
2272
2273         rx_attention = ath11k_dp_rx_get_attention(ar->ab, rx_desc);
2274         err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(rx_attention);
2275         if (enctype != HAL_ENCRYPT_TYPE_OPEN && !err_bitmap)
2276                 is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_attention);
2277
2278         /* Clear per-MPDU flags while leaving per-PPDU flags intact */
2279         rx_status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
2280                              RX_FLAG_MMIC_ERROR |
2281                              RX_FLAG_DECRYPTED |
2282                              RX_FLAG_IV_STRIPPED |
2283                              RX_FLAG_MMIC_STRIPPED);
2284
2285         if (err_bitmap & DP_RX_MPDU_ERR_FCS)
2286                 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
2287         if (err_bitmap & DP_RX_MPDU_ERR_TKIP_MIC)
2288                 rx_status->flag |= RX_FLAG_MMIC_ERROR;
2289
2290         if (is_decrypted) {
2291                 rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MMIC_STRIPPED;
2292
2293                 if (fill_crypto_hdr)
2294                         rx_status->flag |= RX_FLAG_MIC_STRIPPED |
2295                                         RX_FLAG_ICV_STRIPPED;
2296                 else
2297                         rx_status->flag |= RX_FLAG_IV_STRIPPED |
2298                                            RX_FLAG_PN_VALIDATED;
2299         }
2300
2301         ath11k_dp_rx_h_csum_offload(ar, msdu);
2302         ath11k_dp_rx_h_undecap(ar, msdu, rx_desc,
2303                                enctype, rx_status, is_decrypted);
2304
2305         if (!is_decrypted || fill_crypto_hdr)
2306                 return;
2307
2308         if (ath11k_dp_rx_h_msdu_start_decap_type(ar->ab, rx_desc) !=
2309             DP_RX_DECAP_TYPE_ETHERNET2_DIX) {
2310                 hdr = (void *)msdu->data;
2311                 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
2312         }
2313 }
2314
2315 static void ath11k_dp_rx_h_rate(struct ath11k *ar, struct hal_rx_desc *rx_desc,
2316                                 struct ieee80211_rx_status *rx_status)
2317 {
2318         struct ieee80211_supported_band *sband;
2319         enum rx_msdu_start_pkt_type pkt_type;
2320         u8 bw;
2321         u8 rate_mcs, nss;
2322         u8 sgi;
2323         bool is_cck, is_ldpc;
2324
2325         pkt_type = ath11k_dp_rx_h_msdu_start_pkt_type(ar->ab, rx_desc);
2326         bw = ath11k_dp_rx_h_msdu_start_rx_bw(ar->ab, rx_desc);
2327         rate_mcs = ath11k_dp_rx_h_msdu_start_rate_mcs(ar->ab, rx_desc);
2328         nss = ath11k_dp_rx_h_msdu_start_nss(ar->ab, rx_desc);
2329         sgi = ath11k_dp_rx_h_msdu_start_sgi(ar->ab, rx_desc);
2330
2331         switch (pkt_type) {
2332         case RX_MSDU_START_PKT_TYPE_11A:
2333         case RX_MSDU_START_PKT_TYPE_11B:
2334                 is_cck = (pkt_type == RX_MSDU_START_PKT_TYPE_11B);
2335                 sband = &ar->mac.sbands[rx_status->band];
2336                 rx_status->rate_idx = ath11k_mac_hw_rate_to_idx(sband, rate_mcs,
2337                                                                 is_cck);
2338                 break;
2339         case RX_MSDU_START_PKT_TYPE_11N:
2340                 rx_status->encoding = RX_ENC_HT;
2341                 if (rate_mcs > ATH11K_HT_MCS_MAX) {
2342                         ath11k_warn(ar->ab,
2343                                     "Received with invalid mcs in HT mode %d\n",
2344                                      rate_mcs);
2345                         break;
2346                 }
2347                 rx_status->rate_idx = rate_mcs + (8 * (nss - 1));
2348                 if (sgi)
2349                         rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
2350                 rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw);
2351                 break;
2352         case RX_MSDU_START_PKT_TYPE_11AC:
2353                 rx_status->encoding = RX_ENC_VHT;
2354                 rx_status->rate_idx = rate_mcs;
2355                 if (rate_mcs > ATH11K_VHT_MCS_MAX) {
2356                         ath11k_warn(ar->ab,
2357                                     "Received with invalid mcs in VHT mode %d\n",
2358                                      rate_mcs);
2359                         break;
2360                 }
2361                 rx_status->nss = nss;
2362                 if (sgi)
2363                         rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
2364                 rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw);
2365                 is_ldpc = ath11k_dp_rx_h_msdu_start_ldpc_support(ar->ab, rx_desc);
2366                 if (is_ldpc)
2367                         rx_status->enc_flags |= RX_ENC_FLAG_LDPC;
2368                 break;
2369         case RX_MSDU_START_PKT_TYPE_11AX:
2370                 rx_status->rate_idx = rate_mcs;
2371                 if (rate_mcs > ATH11K_HE_MCS_MAX) {
2372                         ath11k_warn(ar->ab,
2373                                     "Received with invalid mcs in HE mode %d\n",
2374                                     rate_mcs);
2375                         break;
2376                 }
2377                 rx_status->encoding = RX_ENC_HE;
2378                 rx_status->nss = nss;
2379                 rx_status->he_gi = ath11k_mac_he_gi_to_nl80211_he_gi(sgi);
2380                 rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw);
2381                 break;
2382         }
2383 }
2384
2385 static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc,
2386                                 struct ieee80211_rx_status *rx_status)
2387 {
2388         u8 channel_num;
2389         u32 center_freq, meta_data;
2390         struct ieee80211_channel *channel;
2391
2392         rx_status->freq = 0;
2393         rx_status->rate_idx = 0;
2394         rx_status->nss = 0;
2395         rx_status->encoding = RX_ENC_LEGACY;
2396         rx_status->bw = RATE_INFO_BW_20;
2397
2398         rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
2399
2400         meta_data = ath11k_dp_rx_h_msdu_start_freq(ar->ab, rx_desc);
2401         channel_num = meta_data;
2402         center_freq = meta_data >> 16;
2403
2404         if (center_freq >= ATH11K_MIN_6G_FREQ &&
2405             center_freq <= ATH11K_MAX_6G_FREQ) {
2406                 rx_status->band = NL80211_BAND_6GHZ;
2407                 rx_status->freq = center_freq;
2408         } else if (channel_num >= 1 && channel_num <= 14) {
2409                 rx_status->band = NL80211_BAND_2GHZ;
2410         } else if (channel_num >= 36 && channel_num <= 173) {
2411                 rx_status->band = NL80211_BAND_5GHZ;
2412         } else {
2413                 spin_lock_bh(&ar->data_lock);
2414                 channel = ar->rx_channel;
2415                 if (channel) {
2416                         rx_status->band = channel->band;
2417                         channel_num =
2418                                 ieee80211_frequency_to_channel(channel->center_freq);
2419                 }
2420                 spin_unlock_bh(&ar->data_lock);
2421                 ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "rx_desc: ",
2422                                 rx_desc, sizeof(struct hal_rx_desc));
2423         }
2424
2425         if (rx_status->band != NL80211_BAND_6GHZ)
2426                 rx_status->freq = ieee80211_channel_to_frequency(channel_num,
2427                                                                  rx_status->band);
2428
2429         ath11k_dp_rx_h_rate(ar, rx_desc, rx_status);
2430 }
2431
2432 static void ath11k_dp_rx_deliver_msdu(struct ath11k *ar, struct napi_struct *napi,
2433                                       struct sk_buff *msdu,
2434                                       struct ieee80211_rx_status *status)
2435 {
2436         static const struct ieee80211_radiotap_he known = {
2437                 .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
2438                                      IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN),
2439                 .data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN),
2440         };
2441         struct ieee80211_rx_status *rx_status;
2442         struct ieee80211_radiotap_he *he = NULL;
2443         struct ieee80211_sta *pubsta = NULL;
2444         struct ath11k_peer *peer;
2445         struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
2446         u8 decap = DP_RX_DECAP_TYPE_RAW;
2447         bool is_mcbc = rxcb->is_mcbc;
2448         bool is_eapol = rxcb->is_eapol;
2449
2450         if (status->encoding == RX_ENC_HE &&
2451             !(status->flag & RX_FLAG_RADIOTAP_HE) &&
2452             !(status->flag & RX_FLAG_SKIP_MONITOR)) {
2453                 he = skb_push(msdu, sizeof(known));
2454                 memcpy(he, &known, sizeof(known));
2455                 status->flag |= RX_FLAG_RADIOTAP_HE;
2456         }
2457
2458         if (!(status->flag & RX_FLAG_ONLY_MONITOR))
2459                 decap = ath11k_dp_rx_h_msdu_start_decap_type(ar->ab, rxcb->rx_desc);
2460
2461         spin_lock_bh(&ar->ab->base_lock);
2462         peer = ath11k_dp_rx_h_find_peer(ar->ab, msdu);
2463         if (peer && peer->sta)
2464                 pubsta = peer->sta;
2465         spin_unlock_bh(&ar->ab->base_lock);
2466
2467         ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
2468                    "rx skb %pK len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
2469                    msdu,
2470                    msdu->len,
2471                    peer ? peer->addr : NULL,
2472                    rxcb->tid,
2473                    is_mcbc ? "mcast" : "ucast",
2474                    rxcb->seq_no,
2475                    (status->encoding == RX_ENC_LEGACY) ? "legacy" : "",
2476                    (status->encoding == RX_ENC_HT) ? "ht" : "",
2477                    (status->encoding == RX_ENC_VHT) ? "vht" : "",
2478                    (status->encoding == RX_ENC_HE) ? "he" : "",
2479                    (status->bw == RATE_INFO_BW_40) ? "40" : "",
2480                    (status->bw == RATE_INFO_BW_80) ? "80" : "",
2481                    (status->bw == RATE_INFO_BW_160) ? "160" : "",
2482                    status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "",
2483                    status->rate_idx,
2484                    status->nss,
2485                    status->freq,
2486                    status->band, status->flag,
2487                    !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
2488                    !!(status->flag & RX_FLAG_MMIC_ERROR),
2489                    !!(status->flag & RX_FLAG_AMSDU_MORE));
2490
2491         ath11k_dbg_dump(ar->ab, ATH11K_DBG_DP_RX, NULL, "dp rx msdu: ",
2492                         msdu->data, msdu->len);
2493
2494         rx_status = IEEE80211_SKB_RXCB(msdu);
2495         *rx_status = *status;
2496
2497         /* TODO: trace rx packet */
2498
2499         /* PN for multicast packets are not validate in HW,
2500          * so skip 802.3 rx path
2501          * Also, fast_rx expectes the STA to be authorized, hence
2502          * eapol packets are sent in slow path.
2503          */
2504         if (decap == DP_RX_DECAP_TYPE_ETHERNET2_DIX && !is_eapol &&
2505             !(is_mcbc && rx_status->flag & RX_FLAG_DECRYPTED))
2506                 rx_status->flag |= RX_FLAG_8023;
2507
2508         ieee80211_rx_napi(ar->hw, pubsta, msdu, napi);
2509 }
2510
2511 static int ath11k_dp_rx_process_msdu(struct ath11k *ar,
2512                                      struct sk_buff *msdu,
2513                                      struct sk_buff_head *msdu_list,
2514                                      struct ieee80211_rx_status *rx_status)
2515 {
2516         struct ath11k_base *ab = ar->ab;
2517         struct hal_rx_desc *rx_desc, *lrx_desc;
2518         struct rx_attention *rx_attention;
2519         struct ath11k_skb_rxcb *rxcb;
2520         struct sk_buff *last_buf;
2521         u8 l3_pad_bytes;
2522         u8 *hdr_status;
2523         u16 msdu_len;
2524         int ret;
2525         u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
2526
2527         last_buf = ath11k_dp_rx_get_msdu_last_buf(msdu_list, msdu);
2528         if (!last_buf) {
2529                 ath11k_warn(ab,
2530                             "No valid Rx buffer to access Atten/MSDU_END/MPDU_END tlvs\n");
2531                 ret = -EIO;
2532                 goto free_out;
2533         }
2534
2535         rx_desc = (struct hal_rx_desc *)msdu->data;
2536         if (ath11k_dp_rx_h_attn_msdu_len_err(ab, rx_desc)) {
2537                 ath11k_warn(ar->ab, "msdu len not valid\n");
2538                 ret = -EIO;
2539                 goto free_out;
2540         }
2541
2542         lrx_desc = (struct hal_rx_desc *)last_buf->data;
2543         rx_attention = ath11k_dp_rx_get_attention(ab, lrx_desc);
2544         if (!ath11k_dp_rx_h_attn_msdu_done(rx_attention)) {
2545                 ath11k_warn(ab, "msdu_done bit in attention is not set\n");
2546                 ret = -EIO;
2547                 goto free_out;
2548         }
2549
2550         rxcb = ATH11K_SKB_RXCB(msdu);
2551         rxcb->rx_desc = rx_desc;
2552         msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ab, rx_desc);
2553         l3_pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(ab, lrx_desc);
2554
2555         if (rxcb->is_frag) {
2556                 skb_pull(msdu, hal_rx_desc_sz);
2557         } else if (!rxcb->is_continuation) {
2558                 if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) {
2559                         hdr_status = ath11k_dp_rx_h_80211_hdr(ab, rx_desc);
2560                         ret = -EINVAL;
2561                         ath11k_warn(ab, "invalid msdu len %u\n", msdu_len);
2562                         ath11k_dbg_dump(ab, ATH11K_DBG_DATA, NULL, "", hdr_status,
2563                                         sizeof(struct ieee80211_hdr));
2564                         ath11k_dbg_dump(ab, ATH11K_DBG_DATA, NULL, "", rx_desc,
2565                                         sizeof(struct hal_rx_desc));
2566                         goto free_out;
2567                 }
2568                 skb_put(msdu, hal_rx_desc_sz + l3_pad_bytes + msdu_len);
2569                 skb_pull(msdu, hal_rx_desc_sz + l3_pad_bytes);
2570         } else {
2571                 ret = ath11k_dp_rx_msdu_coalesce(ar, msdu_list,
2572                                                  msdu, last_buf,
2573                                                  l3_pad_bytes, msdu_len);
2574                 if (ret) {
2575                         ath11k_warn(ab,
2576                                     "failed to coalesce msdu rx buffer%d\n", ret);
2577                         goto free_out;
2578                 }
2579         }
2580
2581         ath11k_dp_rx_h_ppdu(ar, rx_desc, rx_status);
2582         ath11k_dp_rx_h_mpdu(ar, msdu, rx_desc, rx_status);
2583
2584         rx_status->flag |= RX_FLAG_SKIP_MONITOR | RX_FLAG_DUP_VALIDATED;
2585
2586         return 0;
2587
2588 free_out:
2589         return ret;
2590 }
2591
2592 static void ath11k_dp_rx_process_received_packets(struct ath11k_base *ab,
2593                                                   struct napi_struct *napi,
2594                                                   struct sk_buff_head *msdu_list,
2595                                                   int mac_id)
2596 {
2597         struct sk_buff *msdu;
2598         struct ath11k *ar;
2599         struct ieee80211_rx_status rx_status = {0};
2600         int ret;
2601
2602         if (skb_queue_empty(msdu_list))
2603                 return;
2604
2605         if (unlikely(!rcu_access_pointer(ab->pdevs_active[mac_id]))) {
2606                 __skb_queue_purge(msdu_list);
2607                 return;
2608         }
2609
2610         ar = ab->pdevs[mac_id].ar;
2611         if (unlikely(test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags))) {
2612                 __skb_queue_purge(msdu_list);
2613                 return;
2614         }
2615
2616         while ((msdu = __skb_dequeue(msdu_list))) {
2617                 ret = ath11k_dp_rx_process_msdu(ar, msdu, msdu_list, &rx_status);
2618                 if (unlikely(ret)) {
2619                         ath11k_dbg(ab, ATH11K_DBG_DATA,
2620                                    "Unable to process msdu %d", ret);
2621                         dev_kfree_skb_any(msdu);
2622                         continue;
2623                 }
2624
2625                 ath11k_dp_rx_deliver_msdu(ar, napi, msdu, &rx_status);
2626         }
2627 }
2628
2629 int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id,
2630                          struct napi_struct *napi, int budget)
2631 {
2632         struct ath11k_dp *dp = &ab->dp;
2633         struct dp_rxdma_ring *rx_ring;
2634         int num_buffs_reaped[MAX_RADIOS] = {0};
2635         struct sk_buff_head msdu_list[MAX_RADIOS];
2636         struct ath11k_skb_rxcb *rxcb;
2637         int total_msdu_reaped = 0;
2638         struct hal_srng *srng;
2639         struct sk_buff *msdu;
2640         bool done = false;
2641         int buf_id, mac_id;
2642         struct ath11k *ar;
2643         struct hal_reo_dest_ring *desc;
2644         enum hal_reo_dest_ring_push_reason push_reason;
2645         u32 cookie;
2646         int i;
2647
2648         for (i = 0; i < MAX_RADIOS; i++)
2649                 __skb_queue_head_init(&msdu_list[i]);
2650
2651         srng = &ab->hal.srng_list[dp->reo_dst_ring[ring_id].ring_id];
2652
2653         spin_lock_bh(&srng->lock);
2654
2655 try_again:
2656         ath11k_hal_srng_access_begin(ab, srng);
2657
2658         while (likely(desc =
2659               (struct hal_reo_dest_ring *)ath11k_hal_srng_dst_get_next_entry(ab,
2660                                                                              srng))) {
2661                 cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
2662                                    desc->buf_addr_info.info1);
2663                 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
2664                                    cookie);
2665                 mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, cookie);
2666
2667                 ar = ab->pdevs[mac_id].ar;
2668                 rx_ring = &ar->dp.rx_refill_buf_ring;
2669                 spin_lock_bh(&rx_ring->idr_lock);
2670                 msdu = idr_find(&rx_ring->bufs_idr, buf_id);
2671                 if (unlikely(!msdu)) {
2672                         ath11k_warn(ab, "frame rx with invalid buf_id %d\n",
2673                                     buf_id);
2674                         spin_unlock_bh(&rx_ring->idr_lock);
2675                         continue;
2676                 }
2677
2678                 idr_remove(&rx_ring->bufs_idr, buf_id);
2679                 spin_unlock_bh(&rx_ring->idr_lock);
2680
2681                 rxcb = ATH11K_SKB_RXCB(msdu);
2682                 dma_unmap_single(ab->dev, rxcb->paddr,
2683                                  msdu->len + skb_tailroom(msdu),
2684                                  DMA_FROM_DEVICE);
2685
2686                 num_buffs_reaped[mac_id]++;
2687
2688                 push_reason = FIELD_GET(HAL_REO_DEST_RING_INFO0_PUSH_REASON,
2689                                         desc->info0);
2690                 if (unlikely(push_reason !=
2691                              HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION)) {
2692                         dev_kfree_skb_any(msdu);
2693                         ab->soc_stats.hal_reo_error[dp->reo_dst_ring[ring_id].ring_id]++;
2694                         continue;
2695                 }
2696
2697                 rxcb->is_first_msdu = !!(desc->rx_msdu_info.info0 &
2698                                          RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU);
2699                 rxcb->is_last_msdu = !!(desc->rx_msdu_info.info0 &
2700                                         RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU);
2701                 rxcb->is_continuation = !!(desc->rx_msdu_info.info0 &
2702                                            RX_MSDU_DESC_INFO0_MSDU_CONTINUATION);
2703                 rxcb->peer_id = FIELD_GET(RX_MPDU_DESC_META_DATA_PEER_ID,
2704                                           desc->rx_mpdu_info.meta_data);
2705                 rxcb->seq_no = FIELD_GET(RX_MPDU_DESC_INFO0_SEQ_NUM,
2706                                          desc->rx_mpdu_info.info0);
2707                 rxcb->tid = FIELD_GET(HAL_REO_DEST_RING_INFO0_RX_QUEUE_NUM,
2708                                       desc->info0);
2709
2710                 rxcb->mac_id = mac_id;
2711                 __skb_queue_tail(&msdu_list[mac_id], msdu);
2712
2713                 if (rxcb->is_continuation) {
2714                         done = false;
2715                 } else {
2716                         total_msdu_reaped++;
2717                         done = true;
2718                 }
2719
2720                 if (total_msdu_reaped >= budget)
2721                         break;
2722         }
2723
2724         /* Hw might have updated the head pointer after we cached it.
2725          * In this case, even though there are entries in the ring we'll
2726          * get rx_desc NULL. Give the read another try with updated cached
2727          * head pointer so that we can reap complete MPDU in the current
2728          * rx processing.
2729          */
2730         if (unlikely(!done && ath11k_hal_srng_dst_num_free(ab, srng, true))) {
2731                 ath11k_hal_srng_access_end(ab, srng);
2732                 goto try_again;
2733         }
2734
2735         ath11k_hal_srng_access_end(ab, srng);
2736
2737         spin_unlock_bh(&srng->lock);
2738
2739         if (unlikely(!total_msdu_reaped))
2740                 goto exit;
2741
2742         for (i = 0; i < ab->num_radios; i++) {
2743                 if (!num_buffs_reaped[i])
2744                         continue;
2745
2746                 ath11k_dp_rx_process_received_packets(ab, napi, &msdu_list[i], i);
2747
2748                 ar = ab->pdevs[i].ar;
2749                 rx_ring = &ar->dp.rx_refill_buf_ring;
2750
2751                 ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i],
2752                                            ab->hw_params.hal_params->rx_buf_rbm);
2753         }
2754 exit:
2755         return total_msdu_reaped;
2756 }
2757
2758 static void ath11k_dp_rx_update_peer_stats(struct ath11k_sta *arsta,
2759                                            struct hal_rx_mon_ppdu_info *ppdu_info)
2760 {
2761         struct ath11k_rx_peer_stats *rx_stats = arsta->rx_stats;
2762         u32 num_msdu;
2763         int i;
2764
2765         if (!rx_stats)
2766                 return;
2767
2768         num_msdu = ppdu_info->tcp_msdu_count + ppdu_info->tcp_ack_msdu_count +
2769                    ppdu_info->udp_msdu_count + ppdu_info->other_msdu_count;
2770
2771         rx_stats->num_msdu += num_msdu;
2772         rx_stats->tcp_msdu_count += ppdu_info->tcp_msdu_count +
2773                                     ppdu_info->tcp_ack_msdu_count;
2774         rx_stats->udp_msdu_count += ppdu_info->udp_msdu_count;
2775         rx_stats->other_msdu_count += ppdu_info->other_msdu_count;
2776
2777         if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11A ||
2778             ppdu_info->preamble_type == HAL_RX_PREAMBLE_11B) {
2779                 ppdu_info->nss = 1;
2780                 ppdu_info->mcs = HAL_RX_MAX_MCS;
2781                 ppdu_info->tid = IEEE80211_NUM_TIDS;
2782         }
2783
2784         if (ppdu_info->nss > 0 && ppdu_info->nss <= HAL_RX_MAX_NSS)
2785                 rx_stats->nss_count[ppdu_info->nss - 1] += num_msdu;
2786
2787         if (ppdu_info->mcs <= HAL_RX_MAX_MCS)
2788                 rx_stats->mcs_count[ppdu_info->mcs] += num_msdu;
2789
2790         if (ppdu_info->gi < HAL_RX_GI_MAX)
2791                 rx_stats->gi_count[ppdu_info->gi] += num_msdu;
2792
2793         if (ppdu_info->bw < HAL_RX_BW_MAX)
2794                 rx_stats->bw_count[ppdu_info->bw] += num_msdu;
2795
2796         if (ppdu_info->ldpc < HAL_RX_SU_MU_CODING_MAX)
2797                 rx_stats->coding_count[ppdu_info->ldpc] += num_msdu;
2798
2799         if (ppdu_info->tid <= IEEE80211_NUM_TIDS)
2800                 rx_stats->tid_count[ppdu_info->tid] += num_msdu;
2801
2802         if (ppdu_info->preamble_type < HAL_RX_PREAMBLE_MAX)
2803                 rx_stats->pream_cnt[ppdu_info->preamble_type] += num_msdu;
2804
2805         if (ppdu_info->reception_type < HAL_RX_RECEPTION_TYPE_MAX)
2806                 rx_stats->reception_type[ppdu_info->reception_type] += num_msdu;
2807
2808         if (ppdu_info->is_stbc)
2809                 rx_stats->stbc_count += num_msdu;
2810
2811         if (ppdu_info->beamformed)
2812                 rx_stats->beamformed_count += num_msdu;
2813
2814         if (ppdu_info->num_mpdu_fcs_ok > 1)
2815                 rx_stats->ampdu_msdu_count += num_msdu;
2816         else
2817                 rx_stats->non_ampdu_msdu_count += num_msdu;
2818
2819         rx_stats->num_mpdu_fcs_ok += ppdu_info->num_mpdu_fcs_ok;
2820         rx_stats->num_mpdu_fcs_err += ppdu_info->num_mpdu_fcs_err;
2821         rx_stats->dcm_count += ppdu_info->dcm;
2822         rx_stats->ru_alloc_cnt[ppdu_info->ru_alloc] += num_msdu;
2823
2824         arsta->rssi_comb = ppdu_info->rssi_comb;
2825
2826         BUILD_BUG_ON(ARRAY_SIZE(arsta->chain_signal) >
2827                              ARRAY_SIZE(ppdu_info->rssi_chain_pri20));
2828
2829         for (i = 0; i < ARRAY_SIZE(arsta->chain_signal); i++)
2830                 arsta->chain_signal[i] = ppdu_info->rssi_chain_pri20[i];
2831
2832         rx_stats->rx_duration += ppdu_info->rx_duration;
2833         arsta->rx_duration = rx_stats->rx_duration;
2834 }
2835
2836 static struct sk_buff *ath11k_dp_rx_alloc_mon_status_buf(struct ath11k_base *ab,
2837                                                          struct dp_rxdma_ring *rx_ring,
2838                                                          int *buf_id)
2839 {
2840         struct sk_buff *skb;
2841         dma_addr_t paddr;
2842
2843         skb = dev_alloc_skb(DP_RX_BUFFER_SIZE +
2844                             DP_RX_BUFFER_ALIGN_SIZE);
2845
2846         if (!skb)
2847                 goto fail_alloc_skb;
2848
2849         if (!IS_ALIGNED((unsigned long)skb->data,
2850                         DP_RX_BUFFER_ALIGN_SIZE)) {
2851                 skb_pull(skb, PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) -
2852                          skb->data);
2853         }
2854
2855         paddr = dma_map_single(ab->dev, skb->data,
2856                                skb->len + skb_tailroom(skb),
2857                                DMA_FROM_DEVICE);
2858         if (unlikely(dma_mapping_error(ab->dev, paddr)))
2859                 goto fail_free_skb;
2860
2861         spin_lock_bh(&rx_ring->idr_lock);
2862         *buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0,
2863                             rx_ring->bufs_max, GFP_ATOMIC);
2864         spin_unlock_bh(&rx_ring->idr_lock);
2865         if (*buf_id < 0)
2866                 goto fail_dma_unmap;
2867
2868         ATH11K_SKB_RXCB(skb)->paddr = paddr;
2869         return skb;
2870
2871 fail_dma_unmap:
2872         dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
2873                          DMA_FROM_DEVICE);
2874 fail_free_skb:
2875         dev_kfree_skb_any(skb);
2876 fail_alloc_skb:
2877         return NULL;
2878 }
2879
2880 int ath11k_dp_rx_mon_status_bufs_replenish(struct ath11k_base *ab, int mac_id,
2881                                            struct dp_rxdma_ring *rx_ring,
2882                                            int req_entries,
2883                                            enum hal_rx_buf_return_buf_manager mgr)
2884 {
2885         struct hal_srng *srng;
2886         u32 *desc;
2887         struct sk_buff *skb;
2888         int num_free;
2889         int num_remain;
2890         int buf_id;
2891         u32 cookie;
2892         dma_addr_t paddr;
2893
2894         req_entries = min(req_entries, rx_ring->bufs_max);
2895
2896         srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
2897
2898         spin_lock_bh(&srng->lock);
2899
2900         ath11k_hal_srng_access_begin(ab, srng);
2901
2902         num_free = ath11k_hal_srng_src_num_free(ab, srng, true);
2903
2904         req_entries = min(num_free, req_entries);
2905         num_remain = req_entries;
2906
2907         while (num_remain > 0) {
2908                 skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring,
2909                                                         &buf_id);
2910                 if (!skb)
2911                         break;
2912                 paddr = ATH11K_SKB_RXCB(skb)->paddr;
2913
2914                 desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
2915                 if (!desc)
2916                         goto fail_desc_get;
2917
2918                 cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |
2919                          FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
2920
2921                 num_remain--;
2922
2923                 ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr);
2924         }
2925
2926         ath11k_hal_srng_access_end(ab, srng);
2927
2928         spin_unlock_bh(&srng->lock);
2929
2930         return req_entries - num_remain;
2931
2932 fail_desc_get:
2933         spin_lock_bh(&rx_ring->idr_lock);
2934         idr_remove(&rx_ring->bufs_idr, buf_id);
2935         spin_unlock_bh(&rx_ring->idr_lock);
2936         dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
2937                          DMA_FROM_DEVICE);
2938         dev_kfree_skb_any(skb);
2939         ath11k_hal_srng_access_end(ab, srng);
2940         spin_unlock_bh(&srng->lock);
2941
2942         return req_entries - num_remain;
2943 }
2944
2945 #define ATH11K_DP_RX_FULL_MON_PPDU_ID_WRAP 32535
2946
2947 static void
2948 ath11k_dp_rx_mon_update_status_buf_state(struct ath11k_mon_data *pmon,
2949                                          struct hal_tlv_hdr *tlv)
2950 {
2951         struct hal_rx_ppdu_start *ppdu_start;
2952         u16 ppdu_id_diff, ppdu_id, tlv_len;
2953         u8 *ptr;
2954
2955         /* PPDU id is part of second tlv, move ptr to second tlv */
2956         tlv_len = FIELD_GET(HAL_TLV_HDR_LEN, tlv->tl);
2957         ptr = (u8 *)tlv;
2958         ptr += sizeof(*tlv) + tlv_len;
2959         tlv = (struct hal_tlv_hdr *)ptr;
2960
2961         if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) != HAL_RX_PPDU_START)
2962                 return;
2963
2964         ptr += sizeof(*tlv);
2965         ppdu_start = (struct hal_rx_ppdu_start *)ptr;
2966         ppdu_id = FIELD_GET(HAL_RX_PPDU_START_INFO0_PPDU_ID,
2967                             __le32_to_cpu(ppdu_start->info0));
2968
2969         if (pmon->sw_mon_entries.ppdu_id < ppdu_id) {
2970                 pmon->buf_state = DP_MON_STATUS_LEAD;
2971                 ppdu_id_diff = ppdu_id - pmon->sw_mon_entries.ppdu_id;
2972                 if (ppdu_id_diff > ATH11K_DP_RX_FULL_MON_PPDU_ID_WRAP)
2973                         pmon->buf_state = DP_MON_STATUS_LAG;
2974         } else if (pmon->sw_mon_entries.ppdu_id > ppdu_id) {
2975                 pmon->buf_state = DP_MON_STATUS_LAG;
2976                 ppdu_id_diff = pmon->sw_mon_entries.ppdu_id - ppdu_id;
2977                 if (ppdu_id_diff > ATH11K_DP_RX_FULL_MON_PPDU_ID_WRAP)
2978                         pmon->buf_state = DP_MON_STATUS_LEAD;
2979         }
2980 }
2981
2982 static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id,
2983                                              int *budget, struct sk_buff_head *skb_list)
2984 {
2985         struct ath11k *ar;
2986         const struct ath11k_hw_hal_params *hal_params;
2987         struct ath11k_pdev_dp *dp;
2988         struct dp_rxdma_ring *rx_ring;
2989         struct ath11k_mon_data *pmon;
2990         struct hal_srng *srng;
2991         void *rx_mon_status_desc;
2992         struct sk_buff *skb;
2993         struct ath11k_skb_rxcb *rxcb;
2994         struct hal_tlv_hdr *tlv;
2995         u32 cookie;
2996         int buf_id, srng_id;
2997         dma_addr_t paddr;
2998         u8 rbm;
2999         int num_buffs_reaped = 0;
3000
3001         ar = ab->pdevs[ath11k_hw_mac_id_to_pdev_id(&ab->hw_params, mac_id)].ar;
3002         dp = &ar->dp;
3003         pmon = &dp->mon_data;
3004         srng_id = ath11k_hw_mac_id_to_srng_id(&ab->hw_params, mac_id);
3005         rx_ring = &dp->rx_mon_status_refill_ring[srng_id];
3006
3007         srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
3008
3009         spin_lock_bh(&srng->lock);
3010
3011         ath11k_hal_srng_access_begin(ab, srng);
3012         while (*budget) {
3013                 *budget -= 1;
3014                 rx_mon_status_desc =
3015                         ath11k_hal_srng_src_peek(ab, srng);
3016                 if (!rx_mon_status_desc) {
3017                         pmon->buf_state = DP_MON_STATUS_REPLINISH;
3018                         break;
3019                 }
3020
3021                 ath11k_hal_rx_buf_addr_info_get(rx_mon_status_desc, &paddr,
3022                                                 &cookie, &rbm);
3023                 if (paddr) {
3024                         buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie);
3025
3026                         spin_lock_bh(&rx_ring->idr_lock);
3027                         skb = idr_find(&rx_ring->bufs_idr, buf_id);
3028                         if (!skb) {
3029                                 ath11k_warn(ab, "rx monitor status with invalid buf_id %d\n",
3030                                             buf_id);
3031                                 spin_unlock_bh(&rx_ring->idr_lock);
3032                                 pmon->buf_state = DP_MON_STATUS_REPLINISH;
3033                                 goto move_next;
3034                         }
3035
3036                         idr_remove(&rx_ring->bufs_idr, buf_id);
3037                         spin_unlock_bh(&rx_ring->idr_lock);
3038
3039                         rxcb = ATH11K_SKB_RXCB(skb);
3040
3041                         dma_unmap_single(ab->dev, rxcb->paddr,
3042                                          skb->len + skb_tailroom(skb),
3043                                          DMA_FROM_DEVICE);
3044
3045                         tlv = (struct hal_tlv_hdr *)skb->data;
3046                         if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) !=
3047                                         HAL_RX_STATUS_BUFFER_DONE) {
3048                                 ath11k_warn(ab, "mon status DONE not set %lx\n",
3049                                             FIELD_GET(HAL_TLV_HDR_TAG,
3050                                                       tlv->tl));
3051                                 dev_kfree_skb_any(skb);
3052                                 pmon->buf_state = DP_MON_STATUS_NO_DMA;
3053                                 goto move_next;
3054                         }
3055
3056                         if (ab->hw_params.full_monitor_mode) {
3057                                 ath11k_dp_rx_mon_update_status_buf_state(pmon, tlv);
3058                                 if (paddr == pmon->mon_status_paddr)
3059                                         pmon->buf_state = DP_MON_STATUS_MATCH;
3060                         }
3061                         __skb_queue_tail(skb_list, skb);
3062                 } else {
3063                         pmon->buf_state = DP_MON_STATUS_REPLINISH;
3064                 }
3065 move_next:
3066                 skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring,
3067                                                         &buf_id);
3068
3069                 if (!skb) {
3070                         hal_params = ab->hw_params.hal_params;
3071                         ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, 0, 0,
3072                                                         hal_params->rx_buf_rbm);
3073                         num_buffs_reaped++;
3074                         break;
3075                 }
3076                 rxcb = ATH11K_SKB_RXCB(skb);
3077
3078                 cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |
3079                          FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
3080
3081                 ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, rxcb->paddr,
3082                                                 cookie,
3083                                                 ab->hw_params.hal_params->rx_buf_rbm);
3084                 ath11k_hal_srng_src_get_next_entry(ab, srng);
3085                 num_buffs_reaped++;
3086         }
3087         ath11k_hal_srng_access_end(ab, srng);
3088         spin_unlock_bh(&srng->lock);
3089
3090         return num_buffs_reaped;
3091 }
3092
3093 static void ath11k_dp_rx_frag_timer(struct timer_list *timer)
3094 {
3095         struct dp_rx_tid *rx_tid = from_timer(rx_tid, timer, frag_timer);
3096
3097         spin_lock_bh(&rx_tid->ab->base_lock);
3098         if (rx_tid->last_frag_no &&
3099             rx_tid->rx_frag_bitmap == GENMASK(rx_tid->last_frag_no, 0)) {
3100                 spin_unlock_bh(&rx_tid->ab->base_lock);
3101                 return;
3102         }
3103         ath11k_dp_rx_frags_cleanup(rx_tid, true);
3104         spin_unlock_bh(&rx_tid->ab->base_lock);
3105 }
3106
3107 int ath11k_peer_rx_frag_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id)
3108 {
3109         struct ath11k_base *ab = ar->ab;
3110         struct crypto_shash *tfm;
3111         struct ath11k_peer *peer;
3112         struct dp_rx_tid *rx_tid;
3113         int i;
3114
3115         tfm = crypto_alloc_shash("michael_mic", 0, 0);
3116         if (IS_ERR(tfm))
3117                 return PTR_ERR(tfm);
3118
3119         spin_lock_bh(&ab->base_lock);
3120
3121         peer = ath11k_peer_find(ab, vdev_id, peer_mac);
3122         if (!peer) {
3123                 ath11k_warn(ab, "failed to find the peer to set up fragment info\n");
3124                 spin_unlock_bh(&ab->base_lock);
3125                 return -ENOENT;
3126         }
3127
3128         for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
3129                 rx_tid = &peer->rx_tid[i];
3130                 rx_tid->ab = ab;
3131                 timer_setup(&rx_tid->frag_timer, ath11k_dp_rx_frag_timer, 0);
3132                 skb_queue_head_init(&rx_tid->rx_frags);
3133         }
3134
3135         peer->tfm_mmic = tfm;
3136         spin_unlock_bh(&ab->base_lock);
3137
3138         return 0;
3139 }
3140
3141 static int ath11k_dp_rx_h_michael_mic(struct crypto_shash *tfm, u8 *key,
3142                                       struct ieee80211_hdr *hdr, u8 *data,
3143                                       size_t data_len, u8 *mic)
3144 {
3145         SHASH_DESC_ON_STACK(desc, tfm);
3146         u8 mic_hdr[16] = {0};
3147         u8 tid = 0;
3148         int ret;
3149
3150         if (!tfm)
3151                 return -EINVAL;
3152
3153         desc->tfm = tfm;
3154
3155         ret = crypto_shash_setkey(tfm, key, 8);
3156         if (ret)
3157                 goto out;
3158
3159         ret = crypto_shash_init(desc);
3160         if (ret)
3161                 goto out;
3162
3163         /* TKIP MIC header */
3164         memcpy(mic_hdr, ieee80211_get_DA(hdr), ETH_ALEN);
3165         memcpy(mic_hdr + ETH_ALEN, ieee80211_get_SA(hdr), ETH_ALEN);
3166         if (ieee80211_is_data_qos(hdr->frame_control))
3167                 tid = ieee80211_get_tid(hdr);
3168         mic_hdr[12] = tid;
3169
3170         ret = crypto_shash_update(desc, mic_hdr, 16);
3171         if (ret)
3172                 goto out;
3173         ret = crypto_shash_update(desc, data, data_len);
3174         if (ret)
3175                 goto out;
3176         ret = crypto_shash_final(desc, mic);
3177 out:
3178         shash_desc_zero(desc);
3179         return ret;
3180 }
3181
3182 static int ath11k_dp_rx_h_verify_tkip_mic(struct ath11k *ar, struct ath11k_peer *peer,
3183                                           struct sk_buff *msdu)
3184 {
3185         struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data;
3186         struct ieee80211_rx_status *rxs = IEEE80211_SKB_RXCB(msdu);
3187         struct ieee80211_key_conf *key_conf;
3188         struct ieee80211_hdr *hdr;
3189         u8 mic[IEEE80211_CCMP_MIC_LEN];
3190         int head_len, tail_len, ret;
3191         size_t data_len;
3192         u32 hdr_len, hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
3193         u8 *key, *data;
3194         u8 key_idx;
3195
3196         if (ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, rx_desc) !=
3197             HAL_ENCRYPT_TYPE_TKIP_MIC)
3198                 return 0;
3199
3200         hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz);
3201         hdr_len = ieee80211_hdrlen(hdr->frame_control);
3202         head_len = hdr_len + hal_rx_desc_sz + IEEE80211_TKIP_IV_LEN;
3203         tail_len = IEEE80211_CCMP_MIC_LEN + IEEE80211_TKIP_ICV_LEN + FCS_LEN;
3204
3205         if (!is_multicast_ether_addr(hdr->addr1))
3206                 key_idx = peer->ucast_keyidx;
3207         else
3208                 key_idx = peer->mcast_keyidx;
3209
3210         key_conf = peer->keys[key_idx];
3211
3212         data = msdu->data + head_len;
3213         data_len = msdu->len - head_len - tail_len;
3214         key = &key_conf->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY];
3215
3216         ret = ath11k_dp_rx_h_michael_mic(peer->tfm_mmic, key, hdr, data, data_len, mic);
3217         if (ret || memcmp(mic, data + data_len, IEEE80211_CCMP_MIC_LEN))
3218                 goto mic_fail;
3219
3220         return 0;
3221
3222 mic_fail:
3223         (ATH11K_SKB_RXCB(msdu))->is_first_msdu = true;
3224         (ATH11K_SKB_RXCB(msdu))->is_last_msdu = true;
3225
3226         rxs->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_MMIC_STRIPPED |
3227                     RX_FLAG_IV_STRIPPED | RX_FLAG_DECRYPTED;
3228         skb_pull(msdu, hal_rx_desc_sz);
3229
3230         ath11k_dp_rx_h_ppdu(ar, rx_desc, rxs);
3231         ath11k_dp_rx_h_undecap(ar, msdu, rx_desc,
3232                                HAL_ENCRYPT_TYPE_TKIP_MIC, rxs, true);
3233         ieee80211_rx(ar->hw, msdu);
3234         return -EINVAL;
3235 }
3236
3237 static void ath11k_dp_rx_h_undecap_frag(struct ath11k *ar, struct sk_buff *msdu,
3238                                         enum hal_encrypt_type enctype, u32 flags)
3239 {
3240         struct ieee80211_hdr *hdr;
3241         size_t hdr_len;
3242         size_t crypto_len;
3243         u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
3244
3245         if (!flags)
3246                 return;
3247
3248         hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz);
3249
3250         if (flags & RX_FLAG_MIC_STRIPPED)
3251                 skb_trim(msdu, msdu->len -
3252                          ath11k_dp_rx_crypto_mic_len(ar, enctype));
3253
3254         if (flags & RX_FLAG_ICV_STRIPPED)
3255                 skb_trim(msdu, msdu->len -
3256                          ath11k_dp_rx_crypto_icv_len(ar, enctype));
3257
3258         if (flags & RX_FLAG_IV_STRIPPED) {
3259                 hdr_len = ieee80211_hdrlen(hdr->frame_control);
3260                 crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype);
3261
3262                 memmove((void *)msdu->data + hal_rx_desc_sz + crypto_len,
3263                         (void *)msdu->data + hal_rx_desc_sz, hdr_len);
3264                 skb_pull(msdu, crypto_len);
3265         }
3266 }
3267
3268 static int ath11k_dp_rx_h_defrag(struct ath11k *ar,
3269                                  struct ath11k_peer *peer,
3270                                  struct dp_rx_tid *rx_tid,
3271                                  struct sk_buff **defrag_skb)
3272 {
3273         struct hal_rx_desc *rx_desc;
3274         struct sk_buff *skb, *first_frag, *last_frag;
3275         struct ieee80211_hdr *hdr;
3276         struct rx_attention *rx_attention;
3277         enum hal_encrypt_type enctype;
3278         bool is_decrypted = false;
3279         int msdu_len = 0;
3280         int extra_space;
3281         u32 flags, hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
3282
3283         first_frag = skb_peek(&rx_tid->rx_frags);
3284         last_frag = skb_peek_tail(&rx_tid->rx_frags);
3285
3286         skb_queue_walk(&rx_tid->rx_frags, skb) {
3287                 flags = 0;
3288                 rx_desc = (struct hal_rx_desc *)skb->data;
3289                 hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz);
3290
3291                 enctype = ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, rx_desc);
3292                 if (enctype != HAL_ENCRYPT_TYPE_OPEN) {
3293                         rx_attention = ath11k_dp_rx_get_attention(ar->ab, rx_desc);
3294                         is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_attention);
3295                 }
3296
3297                 if (is_decrypted) {
3298                         if (skb != first_frag)
3299                                 flags |=  RX_FLAG_IV_STRIPPED;
3300                         if (skb != last_frag)
3301                                 flags |= RX_FLAG_ICV_STRIPPED |
3302                                          RX_FLAG_MIC_STRIPPED;
3303                 }
3304
3305                 /* RX fragments are always raw packets */
3306                 if (skb != last_frag)
3307                         skb_trim(skb, skb->len - FCS_LEN);
3308                 ath11k_dp_rx_h_undecap_frag(ar, skb, enctype, flags);
3309
3310                 if (skb != first_frag)
3311                         skb_pull(skb, hal_rx_desc_sz +
3312                                       ieee80211_hdrlen(hdr->frame_control));
3313                 msdu_len += skb->len;
3314         }
3315
3316         extra_space = msdu_len - (DP_RX_BUFFER_SIZE + skb_tailroom(first_frag));
3317         if (extra_space > 0 &&
3318             (pskb_expand_head(first_frag, 0, extra_space, GFP_ATOMIC) < 0))
3319                 return -ENOMEM;
3320
3321         __skb_unlink(first_frag, &rx_tid->rx_frags);
3322         while ((skb = __skb_dequeue(&rx_tid->rx_frags))) {
3323                 skb_put_data(first_frag, skb->data, skb->len);
3324                 dev_kfree_skb_any(skb);
3325         }
3326
3327         hdr = (struct ieee80211_hdr *)(first_frag->data + hal_rx_desc_sz);
3328         hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_MOREFRAGS);
3329         ATH11K_SKB_RXCB(first_frag)->is_frag = 1;
3330
3331         if (ath11k_dp_rx_h_verify_tkip_mic(ar, peer, first_frag))
3332                 first_frag = NULL;
3333
3334         *defrag_skb = first_frag;
3335         return 0;
3336 }
3337
3338 static int ath11k_dp_rx_h_defrag_reo_reinject(struct ath11k *ar, struct dp_rx_tid *rx_tid,
3339                                               struct sk_buff *defrag_skb)
3340 {
3341         struct ath11k_base *ab = ar->ab;
3342         struct ath11k_pdev_dp *dp = &ar->dp;
3343         struct dp_rxdma_ring *rx_refill_ring = &dp->rx_refill_buf_ring;
3344         struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)defrag_skb->data;
3345         struct hal_reo_entrance_ring *reo_ent_ring;
3346         struct hal_reo_dest_ring *reo_dest_ring;
3347         struct dp_link_desc_bank *link_desc_banks;
3348         struct hal_rx_msdu_link *msdu_link;
3349         struct hal_rx_msdu_details *msdu0;
3350         struct hal_srng *srng;
3351         dma_addr_t paddr;
3352         u32 desc_bank, msdu_info, mpdu_info;
3353         u32 dst_idx, cookie, hal_rx_desc_sz;
3354         int ret, buf_id;
3355
3356         hal_rx_desc_sz = ab->hw_params.hal_desc_sz;
3357         link_desc_banks = ab->dp.link_desc_banks;
3358         reo_dest_ring = rx_tid->dst_ring_desc;
3359
3360         ath11k_hal_rx_reo_ent_paddr_get(ab, reo_dest_ring, &paddr, &desc_bank);
3361         msdu_link = (struct hal_rx_msdu_link *)(link_desc_banks[desc_bank].vaddr +
3362                         (paddr - link_desc_banks[desc_bank].paddr));
3363         msdu0 = &msdu_link->msdu_link[0];
3364         dst_idx = FIELD_GET(RX_MSDU_DESC_INFO0_REO_DEST_IND, msdu0->rx_msdu_info.info0);
3365         memset(msdu0, 0, sizeof(*msdu0));
3366
3367         msdu_info = FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU, 1) |
3368                     FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU, 1) |
3369                     FIELD_PREP(RX_MSDU_DESC_INFO0_MSDU_CONTINUATION, 0) |
3370                     FIELD_PREP(RX_MSDU_DESC_INFO0_MSDU_LENGTH,
3371                                defrag_skb->len - hal_rx_desc_sz) |
3372                     FIELD_PREP(RX_MSDU_DESC_INFO0_REO_DEST_IND, dst_idx) |
3373                     FIELD_PREP(RX_MSDU_DESC_INFO0_VALID_SA, 1) |
3374                     FIELD_PREP(RX_MSDU_DESC_INFO0_VALID_DA, 1);
3375         msdu0->rx_msdu_info.info0 = msdu_info;
3376
3377         /* change msdu len in hal rx desc */
3378         ath11k_dp_rxdesc_set_msdu_len(ab, rx_desc, defrag_skb->len - hal_rx_desc_sz);
3379
3380         paddr = dma_map_single(ab->dev, defrag_skb->data,
3381                                defrag_skb->len + skb_tailroom(defrag_skb),
3382                                DMA_TO_DEVICE);
3383         if (dma_mapping_error(ab->dev, paddr))
3384                 return -ENOMEM;
3385
3386         spin_lock_bh(&rx_refill_ring->idr_lock);
3387         buf_id = idr_alloc(&rx_refill_ring->bufs_idr, defrag_skb, 0,
3388                            rx_refill_ring->bufs_max * 3, GFP_ATOMIC);
3389         spin_unlock_bh(&rx_refill_ring->idr_lock);
3390         if (buf_id < 0) {
3391                 ret = -ENOMEM;
3392                 goto err_unmap_dma;
3393         }
3394
3395         ATH11K_SKB_RXCB(defrag_skb)->paddr = paddr;
3396         cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, dp->mac_id) |
3397                  FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
3398
3399         ath11k_hal_rx_buf_addr_info_set(msdu0, paddr, cookie,
3400                                         ab->hw_params.hal_params->rx_buf_rbm);
3401
3402         /* Fill mpdu details into reo entrace ring */
3403         srng = &ab->hal.srng_list[ab->dp.reo_reinject_ring.ring_id];
3404
3405         spin_lock_bh(&srng->lock);
3406         ath11k_hal_srng_access_begin(ab, srng);
3407
3408         reo_ent_ring = (struct hal_reo_entrance_ring *)
3409                         ath11k_hal_srng_src_get_next_entry(ab, srng);
3410         if (!reo_ent_ring) {
3411                 ath11k_hal_srng_access_end(ab, srng);
3412                 spin_unlock_bh(&srng->lock);
3413                 ret = -ENOSPC;
3414                 goto err_free_idr;
3415         }
3416         memset(reo_ent_ring, 0, sizeof(*reo_ent_ring));
3417
3418         ath11k_hal_rx_reo_ent_paddr_get(ab, reo_dest_ring, &paddr, &desc_bank);
3419         ath11k_hal_rx_buf_addr_info_set(reo_ent_ring, paddr, desc_bank,
3420                                         HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST);
3421
3422         mpdu_info = FIELD_PREP(RX_MPDU_DESC_INFO0_MSDU_COUNT, 1) |
3423                     FIELD_PREP(RX_MPDU_DESC_INFO0_SEQ_NUM, rx_tid->cur_sn) |
3424                     FIELD_PREP(RX_MPDU_DESC_INFO0_FRAG_FLAG, 0) |
3425                     FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_SA, 1) |
3426                     FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_DA, 1) |
3427                     FIELD_PREP(RX_MPDU_DESC_INFO0_RAW_MPDU, 1) |
3428                     FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_PN, 1);
3429
3430         reo_ent_ring->rx_mpdu_info.info0 = mpdu_info;
3431         reo_ent_ring->rx_mpdu_info.meta_data = reo_dest_ring->rx_mpdu_info.meta_data;
3432         reo_ent_ring->queue_addr_lo = reo_dest_ring->queue_addr_lo;
3433         reo_ent_ring->info0 = FIELD_PREP(HAL_REO_ENTR_RING_INFO0_QUEUE_ADDR_HI,
3434                                          FIELD_GET(HAL_REO_DEST_RING_INFO0_QUEUE_ADDR_HI,
3435                                                    reo_dest_ring->info0)) |
3436                               FIELD_PREP(HAL_REO_ENTR_RING_INFO0_DEST_IND, dst_idx);
3437         ath11k_hal_srng_access_end(ab, srng);
3438         spin_unlock_bh(&srng->lock);
3439
3440         return 0;
3441
3442 err_free_idr:
3443         spin_lock_bh(&rx_refill_ring->idr_lock);
3444         idr_remove(&rx_refill_ring->bufs_idr, buf_id);
3445         spin_unlock_bh(&rx_refill_ring->idr_lock);
3446 err_unmap_dma:
3447         dma_unmap_single(ab->dev, paddr, defrag_skb->len + skb_tailroom(defrag_skb),
3448                          DMA_TO_DEVICE);
3449         return ret;
3450 }
3451
3452 static int ath11k_dp_rx_h_cmp_frags(struct ath11k *ar,
3453                                     struct sk_buff *a, struct sk_buff *b)
3454 {
3455         int frag1, frag2;
3456
3457         frag1 = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, a);
3458         frag2 = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, b);
3459
3460         return frag1 - frag2;
3461 }
3462
3463 static void ath11k_dp_rx_h_sort_frags(struct ath11k *ar,
3464                                       struct sk_buff_head *frag_list,
3465                                       struct sk_buff *cur_frag)
3466 {
3467         struct sk_buff *skb;
3468         int cmp;
3469
3470         skb_queue_walk(frag_list, skb) {
3471                 cmp = ath11k_dp_rx_h_cmp_frags(ar, skb, cur_frag);
3472                 if (cmp < 0)
3473                         continue;
3474                 __skb_queue_before(frag_list, skb, cur_frag);
3475                 return;
3476         }
3477         __skb_queue_tail(frag_list, cur_frag);
3478 }
3479
3480 static u64 ath11k_dp_rx_h_get_pn(struct ath11k *ar, struct sk_buff *skb)
3481 {
3482         struct ieee80211_hdr *hdr;
3483         u64 pn = 0;
3484         u8 *ehdr;
3485         u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
3486
3487         hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz);
3488         ehdr = skb->data + hal_rx_desc_sz + ieee80211_hdrlen(hdr->frame_control);
3489
3490         pn = ehdr[0];
3491         pn |= (u64)ehdr[1] << 8;
3492         pn |= (u64)ehdr[4] << 16;
3493         pn |= (u64)ehdr[5] << 24;
3494         pn |= (u64)ehdr[6] << 32;
3495         pn |= (u64)ehdr[7] << 40;
3496
3497         return pn;
3498 }
3499
3500 static bool
3501 ath11k_dp_rx_h_defrag_validate_incr_pn(struct ath11k *ar, struct dp_rx_tid *rx_tid)
3502 {
3503         enum hal_encrypt_type encrypt_type;
3504         struct sk_buff *first_frag, *skb;
3505         struct hal_rx_desc *desc;
3506         u64 last_pn;
3507         u64 cur_pn;
3508
3509         first_frag = skb_peek(&rx_tid->rx_frags);
3510         desc = (struct hal_rx_desc *)first_frag->data;
3511
3512         encrypt_type = ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, desc);
3513         if (encrypt_type != HAL_ENCRYPT_TYPE_CCMP_128 &&
3514             encrypt_type != HAL_ENCRYPT_TYPE_CCMP_256 &&
3515             encrypt_type != HAL_ENCRYPT_TYPE_GCMP_128 &&
3516             encrypt_type != HAL_ENCRYPT_TYPE_AES_GCMP_256)
3517                 return true;
3518
3519         last_pn = ath11k_dp_rx_h_get_pn(ar, first_frag);
3520         skb_queue_walk(&rx_tid->rx_frags, skb) {
3521                 if (skb == first_frag)
3522                         continue;
3523
3524                 cur_pn = ath11k_dp_rx_h_get_pn(ar, skb);
3525                 if (cur_pn != last_pn + 1)
3526                         return false;
3527                 last_pn = cur_pn;
3528         }
3529         return true;
3530 }
3531
3532 static int ath11k_dp_rx_frag_h_mpdu(struct ath11k *ar,
3533                                     struct sk_buff *msdu,
3534                                     u32 *ring_desc)
3535 {
3536         struct ath11k_base *ab = ar->ab;
3537         struct hal_rx_desc *rx_desc;
3538         struct ath11k_peer *peer;
3539         struct dp_rx_tid *rx_tid;
3540         struct sk_buff *defrag_skb = NULL;
3541         u32 peer_id;
3542         u16 seqno, frag_no;
3543         u8 tid;
3544         int ret = 0;
3545         bool more_frags;
3546         bool is_mcbc;
3547
3548         rx_desc = (struct hal_rx_desc *)msdu->data;
3549         peer_id = ath11k_dp_rx_h_mpdu_start_peer_id(ar->ab, rx_desc);
3550         tid = ath11k_dp_rx_h_mpdu_start_tid(ar->ab, rx_desc);
3551         seqno = ath11k_dp_rx_h_mpdu_start_seq_no(ar->ab, rx_desc);
3552         frag_no = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, msdu);
3553         more_frags = ath11k_dp_rx_h_mpdu_start_more_frags(ar->ab, msdu);
3554         is_mcbc = ath11k_dp_rx_h_attn_is_mcbc(ar->ab, rx_desc);
3555
3556         /* Multicast/Broadcast fragments are not expected */
3557         if (is_mcbc)
3558                 return -EINVAL;
3559
3560         if (!ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(ar->ab, rx_desc) ||
3561             !ath11k_dp_rx_h_mpdu_start_fc_valid(ar->ab, rx_desc) ||
3562             tid > IEEE80211_NUM_TIDS)
3563                 return -EINVAL;
3564
3565         /* received unfragmented packet in reo
3566          * exception ring, this shouldn't happen
3567          * as these packets typically come from
3568          * reo2sw srngs.
3569          */
3570         if (WARN_ON_ONCE(!frag_no && !more_frags))
3571                 return -EINVAL;
3572
3573         spin_lock_bh(&ab->base_lock);
3574         peer = ath11k_peer_find_by_id(ab, peer_id);
3575         if (!peer) {
3576                 ath11k_warn(ab, "failed to find the peer to de-fragment received fragment peer_id %d\n",
3577                             peer_id);
3578                 ret = -ENOENT;
3579                 goto out_unlock;
3580         }
3581         rx_tid = &peer->rx_tid[tid];
3582
3583         if ((!skb_queue_empty(&rx_tid->rx_frags) && seqno != rx_tid->cur_sn) ||
3584             skb_queue_empty(&rx_tid->rx_frags)) {
3585                 /* Flush stored fragments and start a new sequence */
3586                 ath11k_dp_rx_frags_cleanup(rx_tid, true);
3587                 rx_tid->cur_sn = seqno;
3588         }
3589
3590         if (rx_tid->rx_frag_bitmap & BIT(frag_no)) {
3591                 /* Fragment already present */
3592                 ret = -EINVAL;
3593                 goto out_unlock;
3594         }
3595
3596         if (frag_no > __fls(rx_tid->rx_frag_bitmap))
3597                 __skb_queue_tail(&rx_tid->rx_frags, msdu);
3598         else
3599                 ath11k_dp_rx_h_sort_frags(ar, &rx_tid->rx_frags, msdu);
3600
3601         rx_tid->rx_frag_bitmap |= BIT(frag_no);
3602         if (!more_frags)
3603                 rx_tid->last_frag_no = frag_no;
3604
3605         if (frag_no == 0) {
3606                 rx_tid->dst_ring_desc = kmemdup(ring_desc,
3607                                                 sizeof(*rx_tid->dst_ring_desc),
3608                                                 GFP_ATOMIC);
3609                 if (!rx_tid->dst_ring_desc) {
3610                         ret = -ENOMEM;
3611                         goto out_unlock;
3612                 }
3613         } else {
3614                 ath11k_dp_rx_link_desc_return(ab, ring_desc,
3615                                               HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
3616         }
3617
3618         if (!rx_tid->last_frag_no ||
3619             rx_tid->rx_frag_bitmap != GENMASK(rx_tid->last_frag_no, 0)) {
3620                 mod_timer(&rx_tid->frag_timer, jiffies +
3621                                                ATH11K_DP_RX_FRAGMENT_TIMEOUT_MS);
3622                 goto out_unlock;
3623         }
3624
3625         spin_unlock_bh(&ab->base_lock);
3626         del_timer_sync(&rx_tid->frag_timer);
3627         spin_lock_bh(&ab->base_lock);
3628
3629         peer = ath11k_peer_find_by_id(ab, peer_id);
3630         if (!peer)
3631                 goto err_frags_cleanup;
3632
3633         if (!ath11k_dp_rx_h_defrag_validate_incr_pn(ar, rx_tid))
3634                 goto err_frags_cleanup;
3635
3636         if (ath11k_dp_rx_h_defrag(ar, peer, rx_tid, &defrag_skb))
3637                 goto err_frags_cleanup;
3638
3639         if (!defrag_skb)
3640                 goto err_frags_cleanup;
3641
3642         if (ath11k_dp_rx_h_defrag_reo_reinject(ar, rx_tid, defrag_skb))
3643                 goto err_frags_cleanup;
3644
3645         ath11k_dp_rx_frags_cleanup(rx_tid, false);
3646         goto out_unlock;
3647
3648 err_frags_cleanup:
3649         dev_kfree_skb_any(defrag_skb);
3650         ath11k_dp_rx_frags_cleanup(rx_tid, true);
3651 out_unlock:
3652         spin_unlock_bh(&ab->base_lock);
3653         return ret;
3654 }
3655
3656 static int
3657 ath11k_dp_process_rx_err_buf(struct ath11k *ar, u32 *ring_desc, int buf_id, bool drop)
3658 {
3659         struct ath11k_pdev_dp *dp = &ar->dp;
3660         struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
3661         struct sk_buff *msdu;
3662         struct ath11k_skb_rxcb *rxcb;
3663         struct hal_rx_desc *rx_desc;
3664         u8 *hdr_status;
3665         u16 msdu_len;
3666         u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
3667
3668         spin_lock_bh(&rx_ring->idr_lock);
3669         msdu = idr_find(&rx_ring->bufs_idr, buf_id);
3670         if (!msdu) {
3671                 ath11k_warn(ar->ab, "rx err buf with invalid buf_id %d\n",
3672                             buf_id);
3673                 spin_unlock_bh(&rx_ring->idr_lock);
3674                 return -EINVAL;
3675         }
3676
3677         idr_remove(&rx_ring->bufs_idr, buf_id);
3678         spin_unlock_bh(&rx_ring->idr_lock);
3679
3680         rxcb = ATH11K_SKB_RXCB(msdu);
3681         dma_unmap_single(ar->ab->dev, rxcb->paddr,
3682                          msdu->len + skb_tailroom(msdu),
3683                          DMA_FROM_DEVICE);
3684
3685         if (drop) {
3686                 dev_kfree_skb_any(msdu);
3687                 return 0;
3688         }
3689
3690         rcu_read_lock();
3691         if (!rcu_dereference(ar->ab->pdevs_active[ar->pdev_idx])) {
3692                 dev_kfree_skb_any(msdu);
3693                 goto exit;
3694         }
3695
3696         if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
3697                 dev_kfree_skb_any(msdu);
3698                 goto exit;
3699         }
3700
3701         rx_desc = (struct hal_rx_desc *)msdu->data;
3702         msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ar->ab, rx_desc);
3703         if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) {
3704                 hdr_status = ath11k_dp_rx_h_80211_hdr(ar->ab, rx_desc);
3705                 ath11k_warn(ar->ab, "invalid msdu leng %u", msdu_len);
3706                 ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", hdr_status,
3707                                 sizeof(struct ieee80211_hdr));
3708                 ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", rx_desc,
3709                                 sizeof(struct hal_rx_desc));
3710                 dev_kfree_skb_any(msdu);
3711                 goto exit;
3712         }
3713
3714         skb_put(msdu, hal_rx_desc_sz + msdu_len);
3715
3716         if (ath11k_dp_rx_frag_h_mpdu(ar, msdu, ring_desc)) {
3717                 dev_kfree_skb_any(msdu);
3718                 ath11k_dp_rx_link_desc_return(ar->ab, ring_desc,
3719                                               HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
3720         }
3721 exit:
3722         rcu_read_unlock();
3723         return 0;
3724 }
3725
3726 int ath11k_dp_process_rx_err(struct ath11k_base *ab, struct napi_struct *napi,
3727                              int budget)
3728 {
3729         u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];
3730         struct dp_link_desc_bank *link_desc_banks;
3731         enum hal_rx_buf_return_buf_manager rbm;
3732         int tot_n_bufs_reaped, quota, ret, i;
3733         int n_bufs_reaped[MAX_RADIOS] = {0};
3734         struct dp_rxdma_ring *rx_ring;
3735         struct dp_srng *reo_except;
3736         u32 desc_bank, num_msdus;
3737         struct hal_srng *srng;
3738         struct ath11k_dp *dp;
3739         void *link_desc_va;
3740         int buf_id, mac_id;
3741         struct ath11k *ar;
3742         dma_addr_t paddr;
3743         u32 *desc;
3744         bool is_frag;
3745         u8 drop = 0;
3746
3747         tot_n_bufs_reaped = 0;
3748         quota = budget;
3749
3750         dp = &ab->dp;
3751         reo_except = &dp->reo_except_ring;
3752         link_desc_banks = dp->link_desc_banks;
3753
3754         srng = &ab->hal.srng_list[reo_except->ring_id];
3755
3756         spin_lock_bh(&srng->lock);
3757
3758         ath11k_hal_srng_access_begin(ab, srng);
3759
3760         while (budget &&
3761                (desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
3762                 struct hal_reo_dest_ring *reo_desc = (struct hal_reo_dest_ring *)desc;
3763
3764                 ab->soc_stats.err_ring_pkts++;
3765                 ret = ath11k_hal_desc_reo_parse_err(ab, desc, &paddr,
3766                                                     &desc_bank);
3767                 if (ret) {
3768                         ath11k_warn(ab, "failed to parse error reo desc %d\n",
3769                                     ret);
3770                         continue;
3771                 }
3772                 link_desc_va = link_desc_banks[desc_bank].vaddr +
3773                                (paddr - link_desc_banks[desc_bank].paddr);
3774                 ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies,
3775                                                  &rbm);
3776                 if (rbm != HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST &&
3777                     rbm != HAL_RX_BUF_RBM_SW3_BM) {
3778                         ab->soc_stats.invalid_rbm++;
3779                         ath11k_warn(ab, "invalid return buffer manager %d\n", rbm);
3780                         ath11k_dp_rx_link_desc_return(ab, desc,
3781                                                       HAL_WBM_REL_BM_ACT_REL_MSDU);
3782                         continue;
3783                 }
3784
3785                 is_frag = !!(reo_desc->rx_mpdu_info.info0 & RX_MPDU_DESC_INFO0_FRAG_FLAG);
3786
3787                 /* Process only rx fragments with one msdu per link desc below, and drop
3788                  * msdu's indicated due to error reasons.
3789                  */
3790                 if (!is_frag || num_msdus > 1) {
3791                         drop = 1;
3792                         /* Return the link desc back to wbm idle list */
3793                         ath11k_dp_rx_link_desc_return(ab, desc,
3794                                                       HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
3795                 }
3796
3797                 for (i = 0; i < num_msdus; i++) {
3798                         buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
3799                                            msdu_cookies[i]);
3800
3801                         mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID,
3802                                            msdu_cookies[i]);
3803
3804                         ar = ab->pdevs[mac_id].ar;
3805
3806                         if (!ath11k_dp_process_rx_err_buf(ar, desc, buf_id, drop)) {
3807                                 n_bufs_reaped[mac_id]++;
3808                                 tot_n_bufs_reaped++;
3809                         }
3810                 }
3811
3812                 if (tot_n_bufs_reaped >= quota) {
3813                         tot_n_bufs_reaped = quota;
3814                         goto exit;
3815                 }
3816
3817                 budget = quota - tot_n_bufs_reaped;
3818         }
3819
3820 exit:
3821         ath11k_hal_srng_access_end(ab, srng);
3822
3823         spin_unlock_bh(&srng->lock);
3824
3825         for (i = 0; i <  ab->num_radios; i++) {
3826                 if (!n_bufs_reaped[i])
3827                         continue;
3828
3829                 ar = ab->pdevs[i].ar;
3830                 rx_ring = &ar->dp.rx_refill_buf_ring;
3831
3832                 ath11k_dp_rxbufs_replenish(ab, i, rx_ring, n_bufs_reaped[i],
3833                                            ab->hw_params.hal_params->rx_buf_rbm);
3834         }
3835
3836         return tot_n_bufs_reaped;
3837 }
3838
3839 static void ath11k_dp_rx_null_q_desc_sg_drop(struct ath11k *ar,
3840                                              int msdu_len,
3841                                              struct sk_buff_head *msdu_list)
3842 {
3843         struct sk_buff *skb, *tmp;
3844         struct ath11k_skb_rxcb *rxcb;
3845         int n_buffs;
3846
3847         n_buffs = DIV_ROUND_UP(msdu_len,
3848                                (DP_RX_BUFFER_SIZE - ar->ab->hw_params.hal_desc_sz));
3849
3850         skb_queue_walk_safe(msdu_list, skb, tmp) {
3851                 rxcb = ATH11K_SKB_RXCB(skb);
3852                 if (rxcb->err_rel_src == HAL_WBM_REL_SRC_MODULE_REO &&
3853                     rxcb->err_code == HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO) {
3854                         if (!n_buffs)
3855                                 break;
3856                         __skb_unlink(skb, msdu_list);
3857                         dev_kfree_skb_any(skb);
3858                         n_buffs--;
3859                 }
3860         }
3861 }
3862
3863 static int ath11k_dp_rx_h_null_q_desc(struct ath11k *ar, struct sk_buff *msdu,
3864                                       struct ieee80211_rx_status *status,
3865                                       struct sk_buff_head *msdu_list)
3866 {
3867         u16 msdu_len;
3868         struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
3869         struct rx_attention *rx_attention;
3870         u8 l3pad_bytes;
3871         struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
3872         u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
3873
3874         msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ar->ab, desc);
3875
3876         if (!rxcb->is_frag && ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE)) {
3877                 /* First buffer will be freed by the caller, so deduct it's length */
3878                 msdu_len = msdu_len - (DP_RX_BUFFER_SIZE - hal_rx_desc_sz);
3879                 ath11k_dp_rx_null_q_desc_sg_drop(ar, msdu_len, msdu_list);
3880                 return -EINVAL;
3881         }
3882
3883         rx_attention = ath11k_dp_rx_get_attention(ar->ab, desc);
3884         if (!ath11k_dp_rx_h_attn_msdu_done(rx_attention)) {
3885                 ath11k_warn(ar->ab,
3886                             "msdu_done bit not set in null_q_des processing\n");
3887                 __skb_queue_purge(msdu_list);
3888                 return -EIO;
3889         }
3890
3891         /* Handle NULL queue descriptor violations arising out a missing
3892          * REO queue for a given peer or a given TID. This typically
3893          * may happen if a packet is received on a QOS enabled TID before the
3894          * ADDBA negotiation for that TID, when the TID queue is setup. Or
3895          * it may also happen for MC/BC frames if they are not routed to the
3896          * non-QOS TID queue, in the absence of any other default TID queue.
3897          * This error can show up both in a REO destination or WBM release ring.
3898          */
3899
3900         rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ar->ab, desc);
3901         rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ar->ab, desc);
3902
3903         if (rxcb->is_frag) {
3904                 skb_pull(msdu, hal_rx_desc_sz);
3905         } else {
3906                 l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, desc);
3907
3908                 if ((hal_rx_desc_sz + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE)
3909                         return -EINVAL;
3910
3911                 skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len);
3912                 skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes);
3913         }
3914         ath11k_dp_rx_h_ppdu(ar, desc, status);
3915
3916         ath11k_dp_rx_h_mpdu(ar, msdu, desc, status);
3917
3918         rxcb->tid = ath11k_dp_rx_h_mpdu_start_tid(ar->ab, desc);
3919
3920         /* Please note that caller will having the access to msdu and completing
3921          * rx with mac80211. Need not worry about cleaning up amsdu_list.
3922          */
3923
3924         return 0;
3925 }
3926
3927 static bool ath11k_dp_rx_h_reo_err(struct ath11k *ar, struct sk_buff *msdu,
3928                                    struct ieee80211_rx_status *status,
3929                                    struct sk_buff_head *msdu_list)
3930 {
3931         struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
3932         bool drop = false;
3933
3934         ar->ab->soc_stats.reo_error[rxcb->err_code]++;
3935
3936         switch (rxcb->err_code) {
3937         case HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO:
3938                 if (ath11k_dp_rx_h_null_q_desc(ar, msdu, status, msdu_list))
3939                         drop = true;
3940                 break;
3941         case HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED:
3942                 /* TODO: Do not drop PN failed packets in the driver;
3943                  * instead, it is good to drop such packets in mac80211
3944                  * after incrementing the replay counters.
3945                  */
3946                 fallthrough;
3947         default:
3948                 /* TODO: Review other errors and process them to mac80211
3949                  * as appropriate.
3950                  */
3951                 drop = true;
3952                 break;
3953         }
3954
3955         return drop;
3956 }
3957
3958 static void ath11k_dp_rx_h_tkip_mic_err(struct ath11k *ar, struct sk_buff *msdu,
3959                                         struct ieee80211_rx_status *status)
3960 {
3961         u16 msdu_len;
3962         struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
3963         u8 l3pad_bytes;
3964         struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
3965         u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
3966
3967         rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ar->ab, desc);
3968         rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ar->ab, desc);
3969
3970         l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, desc);
3971         msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ar->ab, desc);
3972         skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len);
3973         skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes);
3974
3975         ath11k_dp_rx_h_ppdu(ar, desc, status);
3976
3977         status->flag |= (RX_FLAG_MMIC_STRIPPED | RX_FLAG_MMIC_ERROR |
3978                          RX_FLAG_DECRYPTED);
3979
3980         ath11k_dp_rx_h_undecap(ar, msdu, desc,
3981                                HAL_ENCRYPT_TYPE_TKIP_MIC, status, false);
3982 }
3983
3984 static bool ath11k_dp_rx_h_rxdma_err(struct ath11k *ar,  struct sk_buff *msdu,
3985                                      struct ieee80211_rx_status *status)
3986 {
3987         struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
3988         bool drop = false;
3989
3990         ar->ab->soc_stats.rxdma_error[rxcb->err_code]++;
3991
3992         switch (rxcb->err_code) {
3993         case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR:
3994                 ath11k_dp_rx_h_tkip_mic_err(ar, msdu, status);
3995                 break;
3996         default:
3997                 /* TODO: Review other rxdma error code to check if anything is
3998                  * worth reporting to mac80211
3999                  */
4000                 drop = true;
4001                 break;
4002         }
4003
4004         return drop;
4005 }
4006
4007 static void ath11k_dp_rx_wbm_err(struct ath11k *ar,
4008                                  struct napi_struct *napi,
4009                                  struct sk_buff *msdu,
4010                                  struct sk_buff_head *msdu_list)
4011 {
4012         struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
4013         struct ieee80211_rx_status rxs = {0};
4014         bool drop = true;
4015
4016         switch (rxcb->err_rel_src) {
4017         case HAL_WBM_REL_SRC_MODULE_REO:
4018                 drop = ath11k_dp_rx_h_reo_err(ar, msdu, &rxs, msdu_list);
4019                 break;
4020         case HAL_WBM_REL_SRC_MODULE_RXDMA:
4021                 drop = ath11k_dp_rx_h_rxdma_err(ar, msdu, &rxs);
4022                 break;
4023         default:
4024                 /* msdu will get freed */
4025                 break;
4026         }
4027
4028         if (drop) {
4029                 dev_kfree_skb_any(msdu);
4030                 return;
4031         }
4032
4033         ath11k_dp_rx_deliver_msdu(ar, napi, msdu, &rxs);
4034 }
4035
4036 int ath11k_dp_rx_process_wbm_err(struct ath11k_base *ab,
4037                                  struct napi_struct *napi, int budget)
4038 {
4039         struct ath11k *ar;
4040         struct ath11k_dp *dp = &ab->dp;
4041         struct dp_rxdma_ring *rx_ring;
4042         struct hal_rx_wbm_rel_info err_info;
4043         struct hal_srng *srng;
4044         struct sk_buff *msdu;
4045         struct sk_buff_head msdu_list[MAX_RADIOS];
4046         struct ath11k_skb_rxcb *rxcb;
4047         u32 *rx_desc;
4048         int buf_id, mac_id;
4049         int num_buffs_reaped[MAX_RADIOS] = {0};
4050         int total_num_buffs_reaped = 0;
4051         int ret, i;
4052
4053         for (i = 0; i < ab->num_radios; i++)
4054                 __skb_queue_head_init(&msdu_list[i]);
4055
4056         srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id];
4057
4058         spin_lock_bh(&srng->lock);
4059
4060         ath11k_hal_srng_access_begin(ab, srng);
4061
4062         while (budget) {
4063                 rx_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng);
4064                 if (!rx_desc)
4065                         break;
4066
4067                 ret = ath11k_hal_wbm_desc_parse_err(ab, rx_desc, &err_info);
4068                 if (ret) {
4069                         ath11k_warn(ab,
4070                                     "failed to parse rx error in wbm_rel ring desc %d\n",
4071                                     ret);
4072                         continue;
4073                 }
4074
4075                 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, err_info.cookie);
4076                 mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, err_info.cookie);
4077
4078                 ar = ab->pdevs[mac_id].ar;
4079                 rx_ring = &ar->dp.rx_refill_buf_ring;
4080
4081                 spin_lock_bh(&rx_ring->idr_lock);
4082                 msdu = idr_find(&rx_ring->bufs_idr, buf_id);
4083                 if (!msdu) {
4084                         ath11k_warn(ab, "frame rx with invalid buf_id %d pdev %d\n",
4085                                     buf_id, mac_id);
4086                         spin_unlock_bh(&rx_ring->idr_lock);
4087                         continue;
4088                 }
4089
4090                 idr_remove(&rx_ring->bufs_idr, buf_id);
4091                 spin_unlock_bh(&rx_ring->idr_lock);
4092
4093                 rxcb = ATH11K_SKB_RXCB(msdu);
4094                 dma_unmap_single(ab->dev, rxcb->paddr,
4095                                  msdu->len + skb_tailroom(msdu),
4096                                  DMA_FROM_DEVICE);
4097
4098                 num_buffs_reaped[mac_id]++;
4099                 total_num_buffs_reaped++;
4100                 budget--;
4101
4102                 if (err_info.push_reason !=
4103                     HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
4104                         dev_kfree_skb_any(msdu);
4105                         continue;
4106                 }
4107
4108                 rxcb->err_rel_src = err_info.err_rel_src;
4109                 rxcb->err_code = err_info.err_code;
4110                 rxcb->rx_desc = (struct hal_rx_desc *)msdu->data;
4111                 __skb_queue_tail(&msdu_list[mac_id], msdu);
4112         }
4113
4114         ath11k_hal_srng_access_end(ab, srng);
4115
4116         spin_unlock_bh(&srng->lock);
4117
4118         if (!total_num_buffs_reaped)
4119                 goto done;
4120
4121         for (i = 0; i <  ab->num_radios; i++) {
4122                 if (!num_buffs_reaped[i])
4123                         continue;
4124
4125                 ar = ab->pdevs[i].ar;
4126                 rx_ring = &ar->dp.rx_refill_buf_ring;
4127
4128                 ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i],
4129                                            ab->hw_params.hal_params->rx_buf_rbm);
4130         }
4131
4132         rcu_read_lock();
4133         for (i = 0; i <  ab->num_radios; i++) {
4134                 if (!rcu_dereference(ab->pdevs_active[i])) {
4135                         __skb_queue_purge(&msdu_list[i]);
4136                         continue;
4137                 }
4138
4139                 ar = ab->pdevs[i].ar;
4140
4141                 if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
4142                         __skb_queue_purge(&msdu_list[i]);
4143                         continue;
4144                 }
4145
4146                 while ((msdu = __skb_dequeue(&msdu_list[i])) != NULL)
4147                         ath11k_dp_rx_wbm_err(ar, napi, msdu, &msdu_list[i]);
4148         }
4149         rcu_read_unlock();
4150 done:
4151         return total_num_buffs_reaped;
4152 }
4153
4154 int ath11k_dp_process_rxdma_err(struct ath11k_base *ab, int mac_id, int budget)
4155 {
4156         struct ath11k *ar;
4157         struct dp_srng *err_ring;
4158         struct dp_rxdma_ring *rx_ring;
4159         struct dp_link_desc_bank *link_desc_banks = ab->dp.link_desc_banks;
4160         struct hal_srng *srng;
4161         u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];
4162         enum hal_rx_buf_return_buf_manager rbm;
4163         enum hal_reo_entr_rxdma_ecode rxdma_err_code;
4164         struct ath11k_skb_rxcb *rxcb;
4165         struct sk_buff *skb;
4166         struct hal_reo_entrance_ring *entr_ring;
4167         void *desc;
4168         int num_buf_freed = 0;
4169         int quota = budget;
4170         dma_addr_t paddr;
4171         u32 desc_bank;
4172         void *link_desc_va;
4173         int num_msdus;
4174         int i;
4175         int buf_id;
4176
4177         ar = ab->pdevs[ath11k_hw_mac_id_to_pdev_id(&ab->hw_params, mac_id)].ar;
4178         err_ring = &ar->dp.rxdma_err_dst_ring[ath11k_hw_mac_id_to_srng_id(&ab->hw_params,
4179                                                                           mac_id)];
4180         rx_ring = &ar->dp.rx_refill_buf_ring;
4181
4182         srng = &ab->hal.srng_list[err_ring->ring_id];
4183
4184         spin_lock_bh(&srng->lock);
4185
4186         ath11k_hal_srng_access_begin(ab, srng);
4187
4188         while (quota-- &&
4189                (desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
4190                 ath11k_hal_rx_reo_ent_paddr_get(ab, desc, &paddr, &desc_bank);
4191
4192                 entr_ring = (struct hal_reo_entrance_ring *)desc;
4193                 rxdma_err_code =
4194                         FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE,
4195                                   entr_ring->info1);
4196                 ab->soc_stats.rxdma_error[rxdma_err_code]++;
4197
4198                 link_desc_va = link_desc_banks[desc_bank].vaddr +
4199                                (paddr - link_desc_banks[desc_bank].paddr);
4200                 ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus,
4201                                                  msdu_cookies, &rbm);
4202
4203                 for (i = 0; i < num_msdus; i++) {
4204                         buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
4205                                            msdu_cookies[i]);
4206
4207                         spin_lock_bh(&rx_ring->idr_lock);
4208                         skb = idr_find(&rx_ring->bufs_idr, buf_id);
4209                         if (!skb) {
4210                                 ath11k_warn(ab, "rxdma error with invalid buf_id %d\n",
4211                                             buf_id);
4212                                 spin_unlock_bh(&rx_ring->idr_lock);
4213                                 continue;
4214                         }
4215
4216                         idr_remove(&rx_ring->bufs_idr, buf_id);
4217                         spin_unlock_bh(&rx_ring->idr_lock);
4218
4219                         rxcb = ATH11K_SKB_RXCB(skb);
4220                         dma_unmap_single(ab->dev, rxcb->paddr,
4221                                          skb->len + skb_tailroom(skb),
4222                                          DMA_FROM_DEVICE);
4223                         dev_kfree_skb_any(skb);
4224
4225                         num_buf_freed++;
4226                 }
4227
4228                 ath11k_dp_rx_link_desc_return(ab, desc,
4229                                               HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
4230         }
4231
4232         ath11k_hal_srng_access_end(ab, srng);
4233
4234         spin_unlock_bh(&srng->lock);
4235
4236         if (num_buf_freed)
4237                 ath11k_dp_rxbufs_replenish(ab, mac_id, rx_ring, num_buf_freed,
4238                                            ab->hw_params.hal_params->rx_buf_rbm);
4239
4240         return budget - quota;
4241 }
4242
4243 void ath11k_dp_process_reo_status(struct ath11k_base *ab)
4244 {
4245         struct ath11k_dp *dp = &ab->dp;
4246         struct hal_srng *srng;
4247         struct dp_reo_cmd *cmd, *tmp;
4248         bool found = false;
4249         u32 *reo_desc;
4250         u16 tag;
4251         struct hal_reo_status reo_status;
4252
4253         srng = &ab->hal.srng_list[dp->reo_status_ring.ring_id];
4254
4255         memset(&reo_status, 0, sizeof(reo_status));
4256
4257         spin_lock_bh(&srng->lock);
4258
4259         ath11k_hal_srng_access_begin(ab, srng);
4260
4261         while ((reo_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
4262                 tag = FIELD_GET(HAL_SRNG_TLV_HDR_TAG, *reo_desc);
4263
4264                 switch (tag) {
4265                 case HAL_REO_GET_QUEUE_STATS_STATUS:
4266                         ath11k_hal_reo_status_queue_stats(ab, reo_desc,
4267                                                           &reo_status);
4268                         break;
4269                 case HAL_REO_FLUSH_QUEUE_STATUS:
4270                         ath11k_hal_reo_flush_queue_status(ab, reo_desc,
4271                                                           &reo_status);
4272                         break;
4273                 case HAL_REO_FLUSH_CACHE_STATUS:
4274                         ath11k_hal_reo_flush_cache_status(ab, reo_desc,
4275                                                           &reo_status);
4276                         break;
4277                 case HAL_REO_UNBLOCK_CACHE_STATUS:
4278                         ath11k_hal_reo_unblk_cache_status(ab, reo_desc,
4279                                                           &reo_status);
4280                         break;
4281                 case HAL_REO_FLUSH_TIMEOUT_LIST_STATUS:
4282                         ath11k_hal_reo_flush_timeout_list_status(ab, reo_desc,
4283                                                                  &reo_status);
4284                         break;
4285                 case HAL_REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS:
4286                         ath11k_hal_reo_desc_thresh_reached_status(ab, reo_desc,
4287                                                                   &reo_status);
4288                         break;
4289                 case HAL_REO_UPDATE_RX_REO_QUEUE_STATUS:
4290                         ath11k_hal_reo_update_rx_reo_queue_status(ab, reo_desc,
4291                                                                   &reo_status);
4292                         break;
4293                 default:
4294                         ath11k_warn(ab, "Unknown reo status type %d\n", tag);
4295                         continue;
4296                 }
4297
4298                 spin_lock_bh(&dp->reo_cmd_lock);
4299                 list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
4300                         if (reo_status.uniform_hdr.cmd_num == cmd->cmd_num) {
4301                                 found = true;
4302                                 list_del(&cmd->list);
4303                                 break;
4304                         }
4305                 }
4306                 spin_unlock_bh(&dp->reo_cmd_lock);
4307
4308                 if (found) {
4309                         cmd->handler(dp, (void *)&cmd->data,
4310                                      reo_status.uniform_hdr.cmd_status);
4311                         kfree(cmd);
4312                 }
4313
4314                 found = false;
4315         }
4316
4317         ath11k_hal_srng_access_end(ab, srng);
4318
4319         spin_unlock_bh(&srng->lock);
4320 }
4321
4322 void ath11k_dp_rx_pdev_free(struct ath11k_base *ab, int mac_id)
4323 {
4324         struct ath11k *ar = ab->pdevs[mac_id].ar;
4325
4326         ath11k_dp_rx_pdev_srng_free(ar);
4327         ath11k_dp_rxdma_pdev_buf_free(ar);
4328 }
4329
4330 int ath11k_dp_rx_pdev_alloc(struct ath11k_base *ab, int mac_id)
4331 {
4332         struct ath11k *ar = ab->pdevs[mac_id].ar;
4333         struct ath11k_pdev_dp *dp = &ar->dp;
4334         u32 ring_id;
4335         int i;
4336         int ret;
4337
4338         ret = ath11k_dp_rx_pdev_srng_alloc(ar);
4339         if (ret) {
4340                 ath11k_warn(ab, "failed to setup rx srngs\n");
4341                 return ret;
4342         }
4343
4344         ret = ath11k_dp_rxdma_pdev_buf_setup(ar);
4345         if (ret) {
4346                 ath11k_warn(ab, "failed to setup rxdma ring\n");
4347                 return ret;
4348         }
4349
4350         ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;
4351         ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id, HAL_RXDMA_BUF);
4352         if (ret) {
4353                 ath11k_warn(ab, "failed to configure rx_refill_buf_ring %d\n",
4354                             ret);
4355                 return ret;
4356         }
4357
4358         if (ab->hw_params.rx_mac_buf_ring) {
4359                 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
4360                         ring_id = dp->rx_mac_buf_ring[i].ring_id;
4361                         ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id,
4362                                                           mac_id + i, HAL_RXDMA_BUF);
4363                         if (ret) {
4364                                 ath11k_warn(ab, "failed to configure rx_mac_buf_ring%d %d\n",
4365                                             i, ret);
4366                                 return ret;
4367                         }
4368                 }
4369         }
4370
4371         for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
4372                 ring_id = dp->rxdma_err_dst_ring[i].ring_id;
4373                 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id,
4374                                                   mac_id + i, HAL_RXDMA_DST);
4375                 if (ret) {
4376                         ath11k_warn(ab, "failed to configure rxdma_err_dest_ring%d %d\n",
4377                                     i, ret);
4378                         return ret;
4379                 }
4380         }
4381
4382         if (!ab->hw_params.rxdma1_enable)
4383                 goto config_refill_ring;
4384
4385         ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id;
4386         ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id,
4387                                           mac_id, HAL_RXDMA_MONITOR_BUF);
4388         if (ret) {
4389                 ath11k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n",
4390                             ret);
4391                 return ret;
4392         }
4393         ret = ath11k_dp_tx_htt_srng_setup(ab,
4394                                           dp->rxdma_mon_dst_ring.ring_id,
4395                                           mac_id, HAL_RXDMA_MONITOR_DST);
4396         if (ret) {
4397                 ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n",
4398                             ret);
4399                 return ret;
4400         }
4401         ret = ath11k_dp_tx_htt_srng_setup(ab,
4402                                           dp->rxdma_mon_desc_ring.ring_id,
4403                                           mac_id, HAL_RXDMA_MONITOR_DESC);
4404         if (ret) {
4405                 ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n",
4406                             ret);
4407                 return ret;
4408         }
4409
4410 config_refill_ring:
4411         for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
4412                 ring_id = dp->rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
4413                 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id + i,
4414                                                   HAL_RXDMA_MONITOR_STATUS);
4415                 if (ret) {
4416                         ath11k_warn(ab,
4417                                     "failed to configure mon_status_refill_ring%d %d\n",
4418                                     i, ret);
4419                         return ret;
4420                 }
4421         }
4422
4423         return 0;
4424 }
4425
4426 static void ath11k_dp_mon_set_frag_len(u32 *total_len, u32 *frag_len)
4427 {
4428         if (*total_len >= (DP_RX_BUFFER_SIZE - sizeof(struct hal_rx_desc))) {
4429                 *frag_len = DP_RX_BUFFER_SIZE - sizeof(struct hal_rx_desc);
4430                 *total_len -= *frag_len;
4431         } else {
4432                 *frag_len = *total_len;
4433                 *total_len = 0;
4434         }
4435 }
4436
4437 static
4438 int ath11k_dp_rx_monitor_link_desc_return(struct ath11k *ar,
4439                                           void *p_last_buf_addr_info,
4440                                           u8 mac_id)
4441 {
4442         struct ath11k_pdev_dp *dp = &ar->dp;
4443         struct dp_srng *dp_srng;
4444         void *hal_srng;
4445         void *src_srng_desc;
4446         int ret = 0;
4447
4448         if (ar->ab->hw_params.rxdma1_enable) {
4449                 dp_srng = &dp->rxdma_mon_desc_ring;
4450                 hal_srng = &ar->ab->hal.srng_list[dp_srng->ring_id];
4451         } else {
4452                 dp_srng = &ar->ab->dp.wbm_desc_rel_ring;
4453                 hal_srng = &ar->ab->hal.srng_list[dp_srng->ring_id];
4454         }
4455
4456         ath11k_hal_srng_access_begin(ar->ab, hal_srng);
4457
4458         src_srng_desc = ath11k_hal_srng_src_get_next_entry(ar->ab, hal_srng);
4459
4460         if (src_srng_desc) {
4461                 struct ath11k_buffer_addr *src_desc =
4462                                 (struct ath11k_buffer_addr *)src_srng_desc;
4463
4464                 *src_desc = *((struct ath11k_buffer_addr *)p_last_buf_addr_info);
4465         } else {
4466                 ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4467                            "Monitor Link Desc Ring %d Full", mac_id);
4468                 ret = -ENOMEM;
4469         }
4470
4471         ath11k_hal_srng_access_end(ar->ab, hal_srng);
4472         return ret;
4473 }
4474
4475 static
4476 void ath11k_dp_rx_mon_next_link_desc_get(void *rx_msdu_link_desc,
4477                                          dma_addr_t *paddr, u32 *sw_cookie,
4478                                          u8 *rbm,
4479                                          void **pp_buf_addr_info)
4480 {
4481         struct hal_rx_msdu_link *msdu_link =
4482                         (struct hal_rx_msdu_link *)rx_msdu_link_desc;
4483         struct ath11k_buffer_addr *buf_addr_info;
4484
4485         buf_addr_info = (struct ath11k_buffer_addr *)&msdu_link->buf_addr_info;
4486
4487         ath11k_hal_rx_buf_addr_info_get(buf_addr_info, paddr, sw_cookie, rbm);
4488
4489         *pp_buf_addr_info = (void *)buf_addr_info;
4490 }
4491
4492 static int ath11k_dp_pkt_set_pktlen(struct sk_buff *skb, u32 len)
4493 {
4494         if (skb->len > len) {
4495                 skb_trim(skb, len);
4496         } else {
4497                 if (skb_tailroom(skb) < len - skb->len) {
4498                         if ((pskb_expand_head(skb, 0,
4499                                               len - skb->len - skb_tailroom(skb),
4500                                               GFP_ATOMIC))) {
4501                                 dev_kfree_skb_any(skb);
4502                                 return -ENOMEM;
4503                         }
4504                 }
4505                 skb_put(skb, (len - skb->len));
4506         }
4507         return 0;
4508 }
4509
4510 static void ath11k_hal_rx_msdu_list_get(struct ath11k *ar,
4511                                         void *msdu_link_desc,
4512                                         struct hal_rx_msdu_list *msdu_list,
4513                                         u16 *num_msdus)
4514 {
4515         struct hal_rx_msdu_details *msdu_details = NULL;
4516         struct rx_msdu_desc *msdu_desc_info = NULL;
4517         struct hal_rx_msdu_link *msdu_link = NULL;
4518         int i;
4519         u32 last = FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU, 1);
4520         u32 first = FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU, 1);
4521         u8  tmp  = 0;
4522
4523         msdu_link = (struct hal_rx_msdu_link *)msdu_link_desc;
4524         msdu_details = &msdu_link->msdu_link[0];
4525
4526         for (i = 0; i < HAL_RX_NUM_MSDU_DESC; i++) {
4527                 if (FIELD_GET(BUFFER_ADDR_INFO0_ADDR,
4528                               msdu_details[i].buf_addr_info.info0) == 0) {
4529                         msdu_desc_info = &msdu_details[i - 1].rx_msdu_info;
4530                         msdu_desc_info->info0 |= last;
4531                         ;
4532                         break;
4533                 }
4534                 msdu_desc_info = &msdu_details[i].rx_msdu_info;
4535
4536                 if (!i)
4537                         msdu_desc_info->info0 |= first;
4538                 else if (i == (HAL_RX_NUM_MSDU_DESC - 1))
4539                         msdu_desc_info->info0 |= last;
4540                 msdu_list->msdu_info[i].msdu_flags = msdu_desc_info->info0;
4541                 msdu_list->msdu_info[i].msdu_len =
4542                          HAL_RX_MSDU_PKT_LENGTH_GET(msdu_desc_info->info0);
4543                 msdu_list->sw_cookie[i] =
4544                         FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
4545                                   msdu_details[i].buf_addr_info.info1);
4546                 tmp = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR,
4547                                 msdu_details[i].buf_addr_info.info1);
4548                 msdu_list->rbm[i] = tmp;
4549         }
4550         *num_msdus = i;
4551 }
4552
4553 static u32 ath11k_dp_rx_mon_comp_ppduid(u32 msdu_ppdu_id, u32 *ppdu_id,
4554                                         u32 *rx_bufs_used)
4555 {
4556         u32 ret = 0;
4557
4558         if ((*ppdu_id < msdu_ppdu_id) &&
4559             ((msdu_ppdu_id - *ppdu_id) < DP_NOT_PPDU_ID_WRAP_AROUND)) {
4560                 *ppdu_id = msdu_ppdu_id;
4561                 ret = msdu_ppdu_id;
4562         } else if ((*ppdu_id > msdu_ppdu_id) &&
4563                 ((*ppdu_id - msdu_ppdu_id) > DP_NOT_PPDU_ID_WRAP_AROUND)) {
4564                 /* mon_dst is behind than mon_status
4565                  * skip dst_ring and free it
4566                  */
4567                 *rx_bufs_used += 1;
4568                 *ppdu_id = msdu_ppdu_id;
4569                 ret = msdu_ppdu_id;
4570         }
4571         return ret;
4572 }
4573
4574 static void ath11k_dp_mon_get_buf_len(struct hal_rx_msdu_desc_info *info,
4575                                       bool *is_frag, u32 *total_len,
4576                                       u32 *frag_len, u32 *msdu_cnt)
4577 {
4578         if (info->msdu_flags & RX_MSDU_DESC_INFO0_MSDU_CONTINUATION) {
4579                 if (!*is_frag) {
4580                         *total_len = info->msdu_len;
4581                         *is_frag = true;
4582                 }
4583                 ath11k_dp_mon_set_frag_len(total_len,
4584                                            frag_len);
4585         } else {
4586                 if (*is_frag) {
4587                         ath11k_dp_mon_set_frag_len(total_len,
4588                                                    frag_len);
4589                 } else {
4590                         *frag_len = info->msdu_len;
4591                 }
4592                 *is_frag = false;
4593                 *msdu_cnt -= 1;
4594         }
4595 }
4596
4597 static u32
4598 ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar, int mac_id,
4599                           void *ring_entry, struct sk_buff **head_msdu,
4600                           struct sk_buff **tail_msdu, u32 *npackets,
4601                           u32 *ppdu_id)
4602 {
4603         struct ath11k_pdev_dp *dp = &ar->dp;
4604         struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
4605         struct dp_rxdma_ring *rx_ring = &dp->rxdma_mon_buf_ring;
4606         struct sk_buff *msdu = NULL, *last = NULL;
4607         struct hal_rx_msdu_list msdu_list;
4608         void *p_buf_addr_info, *p_last_buf_addr_info;
4609         struct hal_rx_desc *rx_desc;
4610         void *rx_msdu_link_desc;
4611         dma_addr_t paddr;
4612         u16 num_msdus = 0;
4613         u32 rx_buf_size, rx_pkt_offset, sw_cookie;
4614         u32 rx_bufs_used = 0, i = 0;
4615         u32 msdu_ppdu_id = 0, msdu_cnt = 0;
4616         u32 total_len = 0, frag_len = 0;
4617         bool is_frag, is_first_msdu;
4618         bool drop_mpdu = false;
4619         struct ath11k_skb_rxcb *rxcb;
4620         struct hal_reo_entrance_ring *ent_desc =
4621                         (struct hal_reo_entrance_ring *)ring_entry;
4622         int buf_id;
4623         u32 rx_link_buf_info[2];
4624         u8 rbm;
4625
4626         if (!ar->ab->hw_params.rxdma1_enable)
4627                 rx_ring = &dp->rx_refill_buf_ring;
4628
4629         ath11k_hal_rx_reo_ent_buf_paddr_get(ring_entry, &paddr,
4630                                             &sw_cookie,
4631                                             &p_last_buf_addr_info, &rbm,
4632                                             &msdu_cnt);
4633
4634         if (FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_PUSH_REASON,
4635                       ent_desc->info1) ==
4636                       HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
4637                 u8 rxdma_err =
4638                         FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE,
4639                                   ent_desc->info1);
4640                 if (rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR ||
4641                     rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR ||
4642                     rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR) {
4643                         drop_mpdu = true;
4644                         pmon->rx_mon_stats.dest_mpdu_drop++;
4645                 }
4646         }
4647
4648         is_frag = false;
4649         is_first_msdu = true;
4650
4651         do {
4652                 if (pmon->mon_last_linkdesc_paddr == paddr) {
4653                         pmon->rx_mon_stats.dup_mon_linkdesc_cnt++;
4654                         return rx_bufs_used;
4655                 }
4656
4657                 if (ar->ab->hw_params.rxdma1_enable)
4658                         rx_msdu_link_desc =
4659                                 (void *)pmon->link_desc_banks[sw_cookie].vaddr +
4660                                 (paddr - pmon->link_desc_banks[sw_cookie].paddr);
4661                 else
4662                         rx_msdu_link_desc =
4663                                 (void *)ar->ab->dp.link_desc_banks[sw_cookie].vaddr +
4664                                 (paddr - ar->ab->dp.link_desc_banks[sw_cookie].paddr);
4665
4666                 ath11k_hal_rx_msdu_list_get(ar, rx_msdu_link_desc, &msdu_list,
4667                                             &num_msdus);
4668
4669                 for (i = 0; i < num_msdus; i++) {
4670                         u32 l2_hdr_offset;
4671
4672                         if (pmon->mon_last_buf_cookie == msdu_list.sw_cookie[i]) {
4673                                 ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4674                                            "i %d last_cookie %d is same\n",
4675                                            i, pmon->mon_last_buf_cookie);
4676                                 drop_mpdu = true;
4677                                 pmon->rx_mon_stats.dup_mon_buf_cnt++;
4678                                 continue;
4679                         }
4680                         buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
4681                                            msdu_list.sw_cookie[i]);
4682
4683                         spin_lock_bh(&rx_ring->idr_lock);
4684                         msdu = idr_find(&rx_ring->bufs_idr, buf_id);
4685                         spin_unlock_bh(&rx_ring->idr_lock);
4686                         if (!msdu) {
4687                                 ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4688                                            "msdu_pop: invalid buf_id %d\n", buf_id);
4689                                 break;
4690                         }
4691                         rxcb = ATH11K_SKB_RXCB(msdu);
4692                         if (!rxcb->unmapped) {
4693                                 dma_unmap_single(ar->ab->dev, rxcb->paddr,
4694                                                  msdu->len +
4695                                                  skb_tailroom(msdu),
4696                                                  DMA_FROM_DEVICE);
4697                                 rxcb->unmapped = 1;
4698                         }
4699                         if (drop_mpdu) {
4700                                 ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4701                                            "i %d drop msdu %p *ppdu_id %x\n",
4702                                            i, msdu, *ppdu_id);
4703                                 dev_kfree_skb_any(msdu);
4704                                 msdu = NULL;
4705                                 goto next_msdu;
4706                         }
4707
4708                         rx_desc = (struct hal_rx_desc *)msdu->data;
4709
4710                         rx_pkt_offset = sizeof(struct hal_rx_desc);
4711                         l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, rx_desc);
4712
4713                         if (is_first_msdu) {
4714                                 if (!ath11k_dp_rxdesc_mpdu_valid(ar->ab, rx_desc)) {
4715                                         drop_mpdu = true;
4716                                         dev_kfree_skb_any(msdu);
4717                                         msdu = NULL;
4718                                         pmon->mon_last_linkdesc_paddr = paddr;
4719                                         goto next_msdu;
4720                                 }
4721
4722                                 msdu_ppdu_id =
4723                                         ath11k_dp_rxdesc_get_ppduid(ar->ab, rx_desc);
4724
4725                                 if (ath11k_dp_rx_mon_comp_ppduid(msdu_ppdu_id,
4726                                                                  ppdu_id,
4727                                                                  &rx_bufs_used)) {
4728                                         if (rx_bufs_used) {
4729                                                 drop_mpdu = true;
4730                                                 dev_kfree_skb_any(msdu);
4731                                                 msdu = NULL;
4732                                                 goto next_msdu;
4733                                         }
4734                                         return rx_bufs_used;
4735                                 }
4736                                 pmon->mon_last_linkdesc_paddr = paddr;
4737                                 is_first_msdu = false;
4738                         }
4739                         ath11k_dp_mon_get_buf_len(&msdu_list.msdu_info[i],
4740                                                   &is_frag, &total_len,
4741                                                   &frag_len, &msdu_cnt);
4742                         rx_buf_size = rx_pkt_offset + l2_hdr_offset + frag_len;
4743
4744                         ath11k_dp_pkt_set_pktlen(msdu, rx_buf_size);
4745
4746                         if (!(*head_msdu))
4747                                 *head_msdu = msdu;
4748                         else if (last)
4749                                 last->next = msdu;
4750
4751                         last = msdu;
4752 next_msdu:
4753                         pmon->mon_last_buf_cookie = msdu_list.sw_cookie[i];
4754                         rx_bufs_used++;
4755                         spin_lock_bh(&rx_ring->idr_lock);
4756                         idr_remove(&rx_ring->bufs_idr, buf_id);
4757                         spin_unlock_bh(&rx_ring->idr_lock);
4758                 }
4759
4760                 ath11k_hal_rx_buf_addr_info_set(rx_link_buf_info, paddr, sw_cookie, rbm);
4761
4762                 ath11k_dp_rx_mon_next_link_desc_get(rx_msdu_link_desc, &paddr,
4763                                                     &sw_cookie, &rbm,
4764                                                     &p_buf_addr_info);
4765
4766                 if (ar->ab->hw_params.rxdma1_enable) {
4767                         if (ath11k_dp_rx_monitor_link_desc_return(ar,
4768                                                                   p_last_buf_addr_info,
4769                                                                   dp->mac_id))
4770                                 ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4771                                            "dp_rx_monitor_link_desc_return failed");
4772                 } else {
4773                         ath11k_dp_rx_link_desc_return(ar->ab, rx_link_buf_info,
4774                                                       HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
4775                 }
4776
4777                 p_last_buf_addr_info = p_buf_addr_info;
4778
4779         } while (paddr && msdu_cnt);
4780
4781         if (last)
4782                 last->next = NULL;
4783
4784         *tail_msdu = msdu;
4785
4786         if (msdu_cnt == 0)
4787                 *npackets = 1;
4788
4789         return rx_bufs_used;
4790 }
4791
4792 static void ath11k_dp_rx_msdus_set_payload(struct ath11k *ar, struct sk_buff *msdu)
4793 {
4794         u32 rx_pkt_offset, l2_hdr_offset;
4795
4796         rx_pkt_offset = ar->ab->hw_params.hal_desc_sz;
4797         l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab,
4798                                                       (struct hal_rx_desc *)msdu->data);
4799         skb_pull(msdu, rx_pkt_offset + l2_hdr_offset);
4800 }
4801
4802 static struct sk_buff *
4803 ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar,
4804                             u32 mac_id, struct sk_buff *head_msdu,
4805                             struct sk_buff *last_msdu,
4806                             struct ieee80211_rx_status *rxs, bool *fcs_err)
4807 {
4808         struct ath11k_base *ab = ar->ab;
4809         struct sk_buff *msdu, *prev_buf;
4810         struct hal_rx_desc *rx_desc;
4811         char *hdr_desc;
4812         u8 *dest, decap_format;
4813         struct ieee80211_hdr_3addr *wh;
4814         struct rx_attention *rx_attention;
4815         u32 err_bitmap;
4816
4817         if (!head_msdu)
4818                 goto err_merge_fail;
4819
4820         rx_desc = (struct hal_rx_desc *)head_msdu->data;
4821         rx_attention = ath11k_dp_rx_get_attention(ab, rx_desc);
4822         err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(rx_attention);
4823
4824         if (err_bitmap & DP_RX_MPDU_ERR_FCS)
4825                 *fcs_err = true;
4826
4827         if (ath11k_dp_rxdesc_get_mpdulen_err(rx_attention))
4828                 return NULL;
4829
4830         decap_format = ath11k_dp_rx_h_msdu_start_decap_type(ab, rx_desc);
4831
4832         ath11k_dp_rx_h_ppdu(ar, rx_desc, rxs);
4833
4834         if (decap_format == DP_RX_DECAP_TYPE_RAW) {
4835                 ath11k_dp_rx_msdus_set_payload(ar, head_msdu);
4836
4837                 prev_buf = head_msdu;
4838                 msdu = head_msdu->next;
4839
4840                 while (msdu) {
4841                         ath11k_dp_rx_msdus_set_payload(ar, msdu);
4842
4843                         prev_buf = msdu;
4844                         msdu = msdu->next;
4845                 }
4846
4847                 prev_buf->next = NULL;
4848
4849                 skb_trim(prev_buf, prev_buf->len - HAL_RX_FCS_LEN);
4850         } else if (decap_format == DP_RX_DECAP_TYPE_NATIVE_WIFI) {
4851                 u8 qos_pkt = 0;
4852
4853                 rx_desc = (struct hal_rx_desc *)head_msdu->data;
4854                 hdr_desc = ath11k_dp_rxdesc_get_80211hdr(ab, rx_desc);
4855
4856                 /* Base size */
4857                 wh = (struct ieee80211_hdr_3addr *)hdr_desc;
4858
4859                 if (ieee80211_is_data_qos(wh->frame_control))
4860                         qos_pkt = 1;
4861
4862                 msdu = head_msdu;
4863
4864                 while (msdu) {
4865                         ath11k_dp_rx_msdus_set_payload(ar, msdu);
4866                         if (qos_pkt) {
4867                                 dest = skb_push(msdu, sizeof(__le16));
4868                                 if (!dest)
4869                                         goto err_merge_fail;
4870                                 memcpy(dest, hdr_desc, sizeof(struct ieee80211_qos_hdr));
4871                         }
4872                         prev_buf = msdu;
4873                         msdu = msdu->next;
4874                 }
4875                 dest = skb_put(prev_buf, HAL_RX_FCS_LEN);
4876                 if (!dest)
4877                         goto err_merge_fail;
4878
4879                 ath11k_dbg(ab, ATH11K_DBG_DATA,
4880                            "mpdu_buf %pK mpdu_buf->len %u",
4881                            prev_buf, prev_buf->len);
4882         } else {
4883                 ath11k_dbg(ab, ATH11K_DBG_DATA,
4884                            "decap format %d is not supported!\n",
4885                            decap_format);
4886                 goto err_merge_fail;
4887         }
4888
4889         return head_msdu;
4890
4891 err_merge_fail:
4892         return NULL;
4893 }
4894
4895 static void
4896 ath11k_dp_rx_update_radiotap_he(struct hal_rx_mon_ppdu_info *rx_status,
4897                                 u8 *rtap_buf)
4898 {
4899         u32 rtap_len = 0;
4900
4901         put_unaligned_le16(rx_status->he_data1, &rtap_buf[rtap_len]);
4902         rtap_len += 2;
4903
4904         put_unaligned_le16(rx_status->he_data2, &rtap_buf[rtap_len]);
4905         rtap_len += 2;
4906
4907         put_unaligned_le16(rx_status->he_data3, &rtap_buf[rtap_len]);
4908         rtap_len += 2;
4909
4910         put_unaligned_le16(rx_status->he_data4, &rtap_buf[rtap_len]);
4911         rtap_len += 2;
4912
4913         put_unaligned_le16(rx_status->he_data5, &rtap_buf[rtap_len]);
4914         rtap_len += 2;
4915
4916         put_unaligned_le16(rx_status->he_data6, &rtap_buf[rtap_len]);
4917 }
4918
4919 static void
4920 ath11k_dp_rx_update_radiotap_he_mu(struct hal_rx_mon_ppdu_info *rx_status,
4921                                    u8 *rtap_buf)
4922 {
4923         u32 rtap_len = 0;
4924
4925         put_unaligned_le16(rx_status->he_flags1, &rtap_buf[rtap_len]);
4926         rtap_len += 2;
4927
4928         put_unaligned_le16(rx_status->he_flags2, &rtap_buf[rtap_len]);
4929         rtap_len += 2;
4930
4931         rtap_buf[rtap_len] = rx_status->he_RU[0];
4932         rtap_len += 1;
4933
4934         rtap_buf[rtap_len] = rx_status->he_RU[1];
4935         rtap_len += 1;
4936
4937         rtap_buf[rtap_len] = rx_status->he_RU[2];
4938         rtap_len += 1;
4939
4940         rtap_buf[rtap_len] = rx_status->he_RU[3];
4941 }
4942
4943 static void ath11k_update_radiotap(struct ath11k *ar,
4944                                    struct hal_rx_mon_ppdu_info *ppduinfo,
4945                                    struct sk_buff *mon_skb,
4946                                    struct ieee80211_rx_status *rxs)
4947 {
4948         struct ieee80211_supported_band *sband;
4949         u8 *ptr = NULL;
4950
4951         rxs->flag |= RX_FLAG_MACTIME_START;
4952         rxs->signal = ppduinfo->rssi_comb + ATH11K_DEFAULT_NOISE_FLOOR;
4953
4954         if (ppduinfo->nss)
4955                 rxs->nss = ppduinfo->nss;
4956
4957         if (ppduinfo->he_mu_flags) {
4958                 rxs->flag |= RX_FLAG_RADIOTAP_HE_MU;
4959                 rxs->encoding = RX_ENC_HE;
4960                 ptr = skb_push(mon_skb, sizeof(struct ieee80211_radiotap_he_mu));
4961                 ath11k_dp_rx_update_radiotap_he_mu(ppduinfo, ptr);
4962         } else if (ppduinfo->he_flags) {
4963                 rxs->flag |= RX_FLAG_RADIOTAP_HE;
4964                 rxs->encoding = RX_ENC_HE;
4965                 ptr = skb_push(mon_skb, sizeof(struct ieee80211_radiotap_he));
4966                 ath11k_dp_rx_update_radiotap_he(ppduinfo, ptr);
4967                 rxs->rate_idx = ppduinfo->rate;
4968         } else if (ppduinfo->vht_flags) {
4969                 rxs->encoding = RX_ENC_VHT;
4970                 rxs->rate_idx = ppduinfo->rate;
4971         } else if (ppduinfo->ht_flags) {
4972                 rxs->encoding = RX_ENC_HT;
4973                 rxs->rate_idx = ppduinfo->rate;
4974         } else {
4975                 rxs->encoding = RX_ENC_LEGACY;
4976                 sband = &ar->mac.sbands[rxs->band];
4977                 rxs->rate_idx = ath11k_mac_hw_rate_to_idx(sband, ppduinfo->rate,
4978                                                           ppduinfo->cck_flag);
4979         }
4980
4981         rxs->mactime = ppduinfo->tsft;
4982 }
4983
4984 static int ath11k_dp_rx_mon_deliver(struct ath11k *ar, u32 mac_id,
4985                                     struct sk_buff *head_msdu,
4986                                     struct hal_rx_mon_ppdu_info *ppduinfo,
4987                                     struct sk_buff *tail_msdu,
4988                                     struct napi_struct *napi)
4989 {
4990         struct ath11k_pdev_dp *dp = &ar->dp;
4991         struct sk_buff *mon_skb, *skb_next, *header;
4992         struct ieee80211_rx_status *rxs = &dp->rx_status;
4993         bool fcs_err = false;
4994
4995         mon_skb = ath11k_dp_rx_mon_merg_msdus(ar, mac_id, head_msdu,
4996                                               tail_msdu, rxs, &fcs_err);
4997
4998         if (!mon_skb)
4999                 goto mon_deliver_fail;
5000
5001         header = mon_skb;
5002
5003         rxs->flag = 0;
5004
5005         if (fcs_err)
5006                 rxs->flag = RX_FLAG_FAILED_FCS_CRC;
5007
5008         do {
5009                 skb_next = mon_skb->next;
5010                 if (!skb_next)
5011                         rxs->flag &= ~RX_FLAG_AMSDU_MORE;
5012                 else
5013                         rxs->flag |= RX_FLAG_AMSDU_MORE;
5014
5015                 if (mon_skb == header) {
5016                         header = NULL;
5017                         rxs->flag &= ~RX_FLAG_ALLOW_SAME_PN;
5018                 } else {
5019                         rxs->flag |= RX_FLAG_ALLOW_SAME_PN;
5020                 }
5021                 ath11k_update_radiotap(ar, ppduinfo, mon_skb, rxs);
5022
5023                 ath11k_dp_rx_deliver_msdu(ar, napi, mon_skb, rxs);
5024                 mon_skb = skb_next;
5025         } while (mon_skb);
5026         rxs->flag = 0;
5027
5028         return 0;
5029
5030 mon_deliver_fail:
5031         mon_skb = head_msdu;
5032         while (mon_skb) {
5033                 skb_next = mon_skb->next;
5034                 dev_kfree_skb_any(mon_skb);
5035                 mon_skb = skb_next;
5036         }
5037         return -EINVAL;
5038 }
5039
5040 /* The destination ring processing is stuck if the destination is not
5041  * moving while status ring moves 16 PPDU. The destination ring processing
5042  * skips this destination ring PPDU as a workaround.
5043  */
5044 #define MON_DEST_RING_STUCK_MAX_CNT 16
5045
5046 static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id,
5047                                           u32 quota, struct napi_struct *napi)
5048 {
5049         struct ath11k_pdev_dp *dp = &ar->dp;
5050         struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
5051         const struct ath11k_hw_hal_params *hal_params;
5052         void *ring_entry;
5053         void *mon_dst_srng;
5054         u32 ppdu_id;
5055         u32 rx_bufs_used;
5056         u32 ring_id;
5057         struct ath11k_pdev_mon_stats *rx_mon_stats;
5058         u32      npackets = 0;
5059         u32 mpdu_rx_bufs_used;
5060
5061         if (ar->ab->hw_params.rxdma1_enable)
5062                 ring_id = dp->rxdma_mon_dst_ring.ring_id;
5063         else
5064                 ring_id = dp->rxdma_err_dst_ring[mac_id].ring_id;
5065
5066         mon_dst_srng = &ar->ab->hal.srng_list[ring_id];
5067
5068         if (!mon_dst_srng) {
5069                 ath11k_warn(ar->ab,
5070                             "HAL Monitor Destination Ring Init Failed -- %pK",
5071                             mon_dst_srng);
5072                 return;
5073         }
5074
5075         spin_lock_bh(&pmon->mon_lock);
5076
5077         ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng);
5078
5079         ppdu_id = pmon->mon_ppdu_info.ppdu_id;
5080         rx_bufs_used = 0;
5081         rx_mon_stats = &pmon->rx_mon_stats;
5082
5083         while ((ring_entry = ath11k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) {
5084                 struct sk_buff *head_msdu, *tail_msdu;
5085
5086                 head_msdu = NULL;
5087                 tail_msdu = NULL;
5088
5089                 mpdu_rx_bufs_used = ath11k_dp_rx_mon_mpdu_pop(ar, mac_id, ring_entry,
5090                                                               &head_msdu,
5091                                                               &tail_msdu,
5092                                                               &npackets, &ppdu_id);
5093
5094                 rx_bufs_used += mpdu_rx_bufs_used;
5095
5096                 if (mpdu_rx_bufs_used) {
5097                         dp->mon_dest_ring_stuck_cnt = 0;
5098                 } else {
5099                         dp->mon_dest_ring_stuck_cnt++;
5100                         rx_mon_stats->dest_mon_not_reaped++;
5101                 }
5102
5103                 if (dp->mon_dest_ring_stuck_cnt > MON_DEST_RING_STUCK_MAX_CNT) {
5104                         rx_mon_stats->dest_mon_stuck++;
5105                         ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
5106                                    "status ring ppdu_id=%d dest ring ppdu_id=%d mon_dest_ring_stuck_cnt=%d dest_mon_not_reaped=%u dest_mon_stuck=%u\n",
5107                                    pmon->mon_ppdu_info.ppdu_id, ppdu_id,
5108                                    dp->mon_dest_ring_stuck_cnt,
5109                                    rx_mon_stats->dest_mon_not_reaped,
5110                                    rx_mon_stats->dest_mon_stuck);
5111                         pmon->mon_ppdu_info.ppdu_id = ppdu_id;
5112                         continue;
5113                 }
5114
5115                 if (ppdu_id != pmon->mon_ppdu_info.ppdu_id) {
5116                         pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
5117                         ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
5118                                    "dest_rx: new ppdu_id %x != status ppdu_id %x dest_mon_not_reaped = %u dest_mon_stuck = %u\n",
5119                                    ppdu_id, pmon->mon_ppdu_info.ppdu_id,
5120                                    rx_mon_stats->dest_mon_not_reaped,
5121                                    rx_mon_stats->dest_mon_stuck);
5122                         break;
5123                 }
5124                 if (head_msdu && tail_msdu) {
5125                         ath11k_dp_rx_mon_deliver(ar, dp->mac_id, head_msdu,
5126                                                  &pmon->mon_ppdu_info,
5127                                                  tail_msdu, napi);
5128                         rx_mon_stats->dest_mpdu_done++;
5129                 }
5130
5131                 ring_entry = ath11k_hal_srng_dst_get_next_entry(ar->ab,
5132                                                                 mon_dst_srng);
5133         }
5134         ath11k_hal_srng_access_end(ar->ab, mon_dst_srng);
5135
5136         spin_unlock_bh(&pmon->mon_lock);
5137
5138         if (rx_bufs_used) {
5139                 rx_mon_stats->dest_ppdu_done++;
5140                 hal_params = ar->ab->hw_params.hal_params;
5141
5142                 if (ar->ab->hw_params.rxdma1_enable)
5143                         ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id,
5144                                                    &dp->rxdma_mon_buf_ring,
5145                                                    rx_bufs_used,
5146                                                    hal_params->rx_buf_rbm);
5147                 else
5148                         ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id,
5149                                                    &dp->rx_refill_buf_ring,
5150                                                    rx_bufs_used,
5151                                                    hal_params->rx_buf_rbm);
5152         }
5153 }
5154
5155 int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id,
5156                                     struct napi_struct *napi, int budget)
5157 {
5158         struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id);
5159         enum hal_rx_mon_status hal_status;
5160         struct sk_buff *skb;
5161         struct sk_buff_head skb_list;
5162         struct ath11k_peer *peer;
5163         struct ath11k_sta *arsta;
5164         int num_buffs_reaped = 0;
5165         u32 rx_buf_sz;
5166         u16 log_type;
5167         struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&ar->dp.mon_data;
5168         struct ath11k_pdev_mon_stats *rx_mon_stats = &pmon->rx_mon_stats;
5169         struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info;
5170
5171         __skb_queue_head_init(&skb_list);
5172
5173         num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ab, mac_id, &budget,
5174                                                              &skb_list);
5175         if (!num_buffs_reaped)
5176                 goto exit;
5177
5178         memset(ppdu_info, 0, sizeof(*ppdu_info));
5179         ppdu_info->peer_id = HAL_INVALID_PEERID;
5180
5181         while ((skb = __skb_dequeue(&skb_list))) {
5182                 if (ath11k_debugfs_is_pktlog_lite_mode_enabled(ar)) {
5183                         log_type = ATH11K_PKTLOG_TYPE_LITE_RX;
5184                         rx_buf_sz = DP_RX_BUFFER_SIZE_LITE;
5185                 } else if (ath11k_debugfs_is_pktlog_rx_stats_enabled(ar)) {
5186                         log_type = ATH11K_PKTLOG_TYPE_RX_STATBUF;
5187                         rx_buf_sz = DP_RX_BUFFER_SIZE;
5188                 } else {
5189                         log_type = ATH11K_PKTLOG_TYPE_INVALID;
5190                         rx_buf_sz = 0;
5191                 }
5192
5193                 if (log_type != ATH11K_PKTLOG_TYPE_INVALID)
5194                         trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz);
5195
5196                 memset(ppdu_info, 0, sizeof(struct hal_rx_mon_ppdu_info));
5197                 hal_status = ath11k_hal_rx_parse_mon_status(ab, ppdu_info, skb);
5198
5199                 if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags) &&
5200                     pmon->mon_ppdu_status == DP_PPDU_STATUS_START &&
5201                     hal_status == HAL_TLV_STATUS_PPDU_DONE) {
5202                         rx_mon_stats->status_ppdu_done++;
5203                         pmon->mon_ppdu_status = DP_PPDU_STATUS_DONE;
5204                         ath11k_dp_rx_mon_dest_process(ar, mac_id, budget, napi);
5205                         pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
5206                 }
5207
5208                 if (ppdu_info->peer_id == HAL_INVALID_PEERID ||
5209                     hal_status != HAL_RX_MON_STATUS_PPDU_DONE) {
5210                         dev_kfree_skb_any(skb);
5211                         continue;
5212                 }
5213
5214                 rcu_read_lock();
5215                 spin_lock_bh(&ab->base_lock);
5216                 peer = ath11k_peer_find_by_id(ab, ppdu_info->peer_id);
5217
5218                 if (!peer || !peer->sta) {
5219                         ath11k_dbg(ab, ATH11K_DBG_DATA,
5220                                    "failed to find the peer with peer_id %d\n",
5221                                    ppdu_info->peer_id);
5222                         goto next_skb;
5223                 }
5224
5225                 arsta = (struct ath11k_sta *)peer->sta->drv_priv;
5226                 ath11k_dp_rx_update_peer_stats(arsta, ppdu_info);
5227
5228                 if (ath11k_debugfs_is_pktlog_peer_valid(ar, peer->addr))
5229                         trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz);
5230
5231 next_skb:
5232                 spin_unlock_bh(&ab->base_lock);
5233                 rcu_read_unlock();
5234
5235                 dev_kfree_skb_any(skb);
5236                 memset(ppdu_info, 0, sizeof(*ppdu_info));
5237                 ppdu_info->peer_id = HAL_INVALID_PEERID;
5238         }
5239 exit:
5240         return num_buffs_reaped;
5241 }
5242
5243 static u32
5244 ath11k_dp_rx_full_mon_mpdu_pop(struct ath11k *ar,
5245                                void *ring_entry, struct sk_buff **head_msdu,
5246                                struct sk_buff **tail_msdu,
5247                                struct hal_sw_mon_ring_entries *sw_mon_entries)
5248 {
5249         struct ath11k_pdev_dp *dp = &ar->dp;
5250         struct ath11k_mon_data *pmon = &dp->mon_data;
5251         struct dp_rxdma_ring *rx_ring = &dp->rxdma_mon_buf_ring;
5252         struct sk_buff *msdu = NULL, *last = NULL;
5253         struct hal_sw_monitor_ring *sw_desc = ring_entry;
5254         struct hal_rx_msdu_list msdu_list;
5255         struct hal_rx_desc *rx_desc;
5256         struct ath11k_skb_rxcb *rxcb;
5257         void *rx_msdu_link_desc;
5258         void *p_buf_addr_info, *p_last_buf_addr_info;
5259         int buf_id, i = 0;
5260         u32 rx_buf_size, rx_pkt_offset, l2_hdr_offset;
5261         u32 rx_bufs_used = 0, msdu_cnt = 0;
5262         u32 total_len = 0, frag_len = 0, sw_cookie;
5263         u16 num_msdus = 0;
5264         u8 rxdma_err, rbm;
5265         bool is_frag, is_first_msdu;
5266         bool drop_mpdu = false;
5267
5268         ath11k_hal_rx_sw_mon_ring_buf_paddr_get(ring_entry, sw_mon_entries);
5269
5270         sw_cookie = sw_mon_entries->mon_dst_sw_cookie;
5271         sw_mon_entries->end_of_ppdu = false;
5272         sw_mon_entries->drop_ppdu = false;
5273         p_last_buf_addr_info = sw_mon_entries->dst_buf_addr_info;
5274         msdu_cnt = sw_mon_entries->msdu_cnt;
5275
5276         sw_mon_entries->end_of_ppdu =
5277                 FIELD_GET(HAL_SW_MON_RING_INFO0_END_OF_PPDU, sw_desc->info0);
5278         if (sw_mon_entries->end_of_ppdu)
5279                 return rx_bufs_used;
5280
5281         if (FIELD_GET(HAL_SW_MON_RING_INFO0_RXDMA_PUSH_REASON,
5282                       sw_desc->info0) ==
5283                       HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
5284                 rxdma_err =
5285                         FIELD_GET(HAL_SW_MON_RING_INFO0_RXDMA_ERROR_CODE,
5286                                   sw_desc->info0);
5287                 if (rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR ||
5288                     rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR ||
5289                     rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR) {
5290                         pmon->rx_mon_stats.dest_mpdu_drop++;
5291                         drop_mpdu = true;
5292                 }
5293         }
5294
5295         is_frag = false;
5296         is_first_msdu = true;
5297
5298         do {
5299                 rx_msdu_link_desc =
5300                         (u8 *)pmon->link_desc_banks[sw_cookie].vaddr +
5301                         (sw_mon_entries->mon_dst_paddr -
5302                          pmon->link_desc_banks[sw_cookie].paddr);
5303
5304                 ath11k_hal_rx_msdu_list_get(ar, rx_msdu_link_desc, &msdu_list,
5305                                             &num_msdus);
5306
5307                 for (i = 0; i < num_msdus; i++) {
5308                         buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
5309                                            msdu_list.sw_cookie[i]);
5310
5311                         spin_lock_bh(&rx_ring->idr_lock);
5312                         msdu = idr_find(&rx_ring->bufs_idr, buf_id);
5313                         if (!msdu) {
5314                                 ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
5315                                            "full mon msdu_pop: invalid buf_id %d\n",
5316                                             buf_id);
5317                                 spin_unlock_bh(&rx_ring->idr_lock);
5318                                 break;
5319                         }
5320                         idr_remove(&rx_ring->bufs_idr, buf_id);
5321                         spin_unlock_bh(&rx_ring->idr_lock);
5322
5323                         rxcb = ATH11K_SKB_RXCB(msdu);
5324                         if (!rxcb->unmapped) {
5325                                 dma_unmap_single(ar->ab->dev, rxcb->paddr,
5326                                                  msdu->len +
5327                                                  skb_tailroom(msdu),
5328                                                  DMA_FROM_DEVICE);
5329                                 rxcb->unmapped = 1;
5330                         }
5331                         if (drop_mpdu) {
5332                                 ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
5333                                            "full mon: i %d drop msdu %p *ppdu_id %x\n",
5334                                            i, msdu, sw_mon_entries->ppdu_id);
5335                                 dev_kfree_skb_any(msdu);
5336                                 msdu_cnt--;
5337                                 goto next_msdu;
5338                         }
5339
5340                         rx_desc = (struct hal_rx_desc *)msdu->data;
5341
5342                         rx_pkt_offset = sizeof(struct hal_rx_desc);
5343                         l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, rx_desc);
5344
5345                         if (is_first_msdu) {
5346                                 if (!ath11k_dp_rxdesc_mpdu_valid(ar->ab, rx_desc)) {
5347                                         drop_mpdu = true;
5348                                         dev_kfree_skb_any(msdu);
5349                                         msdu = NULL;
5350                                         goto next_msdu;
5351                                 }
5352                                 is_first_msdu = false;
5353                         }
5354
5355                         ath11k_dp_mon_get_buf_len(&msdu_list.msdu_info[i],
5356                                                   &is_frag, &total_len,
5357                                                   &frag_len, &msdu_cnt);
5358
5359                         rx_buf_size = rx_pkt_offset + l2_hdr_offset + frag_len;
5360
5361                         ath11k_dp_pkt_set_pktlen(msdu, rx_buf_size);
5362
5363                         if (!(*head_msdu))
5364                                 *head_msdu = msdu;
5365                         else if (last)
5366                                 last->next = msdu;
5367
5368                         last = msdu;
5369 next_msdu:
5370                         rx_bufs_used++;
5371                 }
5372
5373                 ath11k_dp_rx_mon_next_link_desc_get(rx_msdu_link_desc,
5374                                                     &sw_mon_entries->mon_dst_paddr,
5375                                                     &sw_mon_entries->mon_dst_sw_cookie,
5376                                                     &rbm,
5377                                                     &p_buf_addr_info);
5378
5379                 if (ath11k_dp_rx_monitor_link_desc_return(ar,
5380                                                           p_last_buf_addr_info,
5381                                                           dp->mac_id))
5382                         ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
5383                                    "full mon: dp_rx_monitor_link_desc_return failed\n");
5384
5385                 p_last_buf_addr_info = p_buf_addr_info;
5386
5387         } while (sw_mon_entries->mon_dst_paddr && msdu_cnt);
5388
5389         if (last)
5390                 last->next = NULL;
5391
5392         *tail_msdu = msdu;
5393
5394         return rx_bufs_used;
5395 }
5396
5397 static int ath11k_dp_rx_full_mon_prepare_mpdu(struct ath11k_dp *dp,
5398                                               struct dp_full_mon_mpdu *mon_mpdu,
5399                                               struct sk_buff *head,
5400                                               struct sk_buff *tail)
5401 {
5402         mon_mpdu = kzalloc(sizeof(*mon_mpdu), GFP_ATOMIC);
5403         if (!mon_mpdu)
5404                 return -ENOMEM;
5405
5406         list_add_tail(&mon_mpdu->list, &dp->dp_full_mon_mpdu_list);
5407         mon_mpdu->head = head;
5408         mon_mpdu->tail = tail;
5409
5410         return 0;
5411 }
5412
5413 static void ath11k_dp_rx_full_mon_drop_ppdu(struct ath11k_dp *dp,
5414                                             struct dp_full_mon_mpdu *mon_mpdu)
5415 {
5416         struct dp_full_mon_mpdu *tmp;
5417         struct sk_buff *tmp_msdu, *skb_next;
5418
5419         if (list_empty(&dp->dp_full_mon_mpdu_list))
5420                 return;
5421
5422         list_for_each_entry_safe(mon_mpdu, tmp, &dp->dp_full_mon_mpdu_list, list) {
5423                 list_del(&mon_mpdu->list);
5424
5425                 tmp_msdu = mon_mpdu->head;
5426                 while (tmp_msdu) {
5427                         skb_next = tmp_msdu->next;
5428                         dev_kfree_skb_any(tmp_msdu);
5429                         tmp_msdu = skb_next;
5430                 }
5431
5432                 kfree(mon_mpdu);
5433         }
5434 }
5435
5436 static int ath11k_dp_rx_full_mon_deliver_ppdu(struct ath11k *ar,
5437                                               int mac_id,
5438                                               struct ath11k_mon_data *pmon,
5439                                               struct napi_struct *napi)
5440 {
5441         struct ath11k_pdev_mon_stats *rx_mon_stats;
5442         struct dp_full_mon_mpdu *tmp;
5443         struct dp_full_mon_mpdu *mon_mpdu = pmon->mon_mpdu;
5444         struct sk_buff *head_msdu, *tail_msdu;
5445         struct ath11k_base *ab = ar->ab;
5446         struct ath11k_dp *dp = &ab->dp;
5447         int ret;
5448
5449         rx_mon_stats = &pmon->rx_mon_stats;
5450
5451         list_for_each_entry_safe(mon_mpdu, tmp, &dp->dp_full_mon_mpdu_list, list) {
5452                 list_del(&mon_mpdu->list);
5453                 head_msdu = mon_mpdu->head;
5454                 tail_msdu = mon_mpdu->tail;
5455                 if (head_msdu && tail_msdu) {
5456                         ret = ath11k_dp_rx_mon_deliver(ar, mac_id, head_msdu,
5457                                                        &pmon->mon_ppdu_info,
5458                                                        tail_msdu, napi);
5459                         rx_mon_stats->dest_mpdu_done++;
5460                         ath11k_dbg(ar->ab, ATH11K_DBG_DATA, "full mon: deliver ppdu\n");
5461                 }
5462                 kfree(mon_mpdu);
5463         }
5464
5465         return ret;
5466 }
5467
5468 static int
5469 ath11k_dp_rx_process_full_mon_status_ring(struct ath11k_base *ab, int mac_id,
5470                                           struct napi_struct *napi, int budget)
5471 {
5472         struct ath11k *ar = ab->pdevs[mac_id].ar;
5473         struct ath11k_pdev_dp *dp = &ar->dp;
5474         struct ath11k_mon_data *pmon = &dp->mon_data;
5475         struct hal_sw_mon_ring_entries *sw_mon_entries;
5476         int quota = 0, work = 0, count;
5477
5478         sw_mon_entries = &pmon->sw_mon_entries;
5479
5480         while (pmon->hold_mon_dst_ring) {
5481                 quota = ath11k_dp_rx_process_mon_status(ab, mac_id,
5482                                                         napi, 1);
5483                 if (pmon->buf_state == DP_MON_STATUS_MATCH) {
5484                         count = sw_mon_entries->status_buf_count;
5485                         if (count > 1) {
5486                                 quota += ath11k_dp_rx_process_mon_status(ab, mac_id,
5487                                                                          napi, count);
5488                         }
5489
5490                         ath11k_dp_rx_full_mon_deliver_ppdu(ar, dp->mac_id,
5491                                                            pmon, napi);
5492                         pmon->hold_mon_dst_ring = false;
5493                 } else if (!pmon->mon_status_paddr ||
5494                            pmon->buf_state == DP_MON_STATUS_LEAD) {
5495                         sw_mon_entries->drop_ppdu = true;
5496                         pmon->hold_mon_dst_ring = false;
5497                 }
5498
5499                 if (!quota)
5500                         break;
5501
5502                 work += quota;
5503         }
5504
5505         if (sw_mon_entries->drop_ppdu)
5506                 ath11k_dp_rx_full_mon_drop_ppdu(&ab->dp, pmon->mon_mpdu);
5507
5508         return work;
5509 }
5510
5511 static int ath11k_dp_full_mon_process_rx(struct ath11k_base *ab, int mac_id,
5512                                          struct napi_struct *napi, int budget)
5513 {
5514         struct ath11k *ar = ab->pdevs[mac_id].ar;
5515         struct ath11k_pdev_dp *dp = &ar->dp;
5516         struct ath11k_mon_data *pmon = &dp->mon_data;
5517         struct hal_sw_mon_ring_entries *sw_mon_entries;
5518         struct ath11k_pdev_mon_stats *rx_mon_stats;
5519         struct sk_buff *head_msdu, *tail_msdu;
5520         void *mon_dst_srng = &ar->ab->hal.srng_list[dp->rxdma_mon_dst_ring.ring_id];
5521         void *ring_entry;
5522         u32 rx_bufs_used = 0, mpdu_rx_bufs_used;
5523         int quota = 0, ret;
5524         bool break_dst_ring = false;
5525
5526         spin_lock_bh(&pmon->mon_lock);
5527
5528         sw_mon_entries = &pmon->sw_mon_entries;
5529         rx_mon_stats = &pmon->rx_mon_stats;
5530
5531         if (pmon->hold_mon_dst_ring) {
5532                 spin_unlock_bh(&pmon->mon_lock);
5533                 goto reap_status_ring;
5534         }
5535
5536         ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng);
5537         while ((ring_entry = ath11k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) {
5538                 head_msdu = NULL;
5539                 tail_msdu = NULL;
5540
5541                 mpdu_rx_bufs_used = ath11k_dp_rx_full_mon_mpdu_pop(ar, ring_entry,
5542                                                                    &head_msdu,
5543                                                                    &tail_msdu,
5544                                                                    sw_mon_entries);
5545                 rx_bufs_used += mpdu_rx_bufs_used;
5546
5547                 if (!sw_mon_entries->end_of_ppdu) {
5548                         if (head_msdu) {
5549                                 ret = ath11k_dp_rx_full_mon_prepare_mpdu(&ab->dp,
5550                                                                          pmon->mon_mpdu,
5551                                                                          head_msdu,
5552                                                                          tail_msdu);
5553                                 if (ret)
5554                                         break_dst_ring = true;
5555                         }
5556
5557                         goto next_entry;
5558                 } else {
5559                         if (!sw_mon_entries->ppdu_id &&
5560                             !sw_mon_entries->mon_status_paddr) {
5561                                 break_dst_ring = true;
5562                                 goto next_entry;
5563                         }
5564                 }
5565
5566                 rx_mon_stats->dest_ppdu_done++;
5567                 pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
5568                 pmon->buf_state = DP_MON_STATUS_LAG;
5569                 pmon->mon_status_paddr = sw_mon_entries->mon_status_paddr;
5570                 pmon->hold_mon_dst_ring = true;
5571 next_entry:
5572                 ring_entry = ath11k_hal_srng_dst_get_next_entry(ar->ab,
5573                                                                 mon_dst_srng);
5574                 if (break_dst_ring)
5575                         break;
5576         }
5577
5578         ath11k_hal_srng_access_end(ar->ab, mon_dst_srng);
5579         spin_unlock_bh(&pmon->mon_lock);
5580
5581         if (rx_bufs_used) {
5582                 ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id,
5583                                            &dp->rxdma_mon_buf_ring,
5584                                            rx_bufs_used,
5585                                            HAL_RX_BUF_RBM_SW3_BM);
5586         }
5587
5588 reap_status_ring:
5589         quota = ath11k_dp_rx_process_full_mon_status_ring(ab, mac_id,
5590                                                           napi, budget);
5591
5592         return quota;
5593 }
5594
5595 int ath11k_dp_rx_process_mon_rings(struct ath11k_base *ab, int mac_id,
5596                                    struct napi_struct *napi, int budget)
5597 {
5598         struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id);
5599         int ret = 0;
5600
5601         if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags) &&
5602             ab->hw_params.full_monitor_mode)
5603                 ret = ath11k_dp_full_mon_process_rx(ab, mac_id, napi, budget);
5604         else
5605                 ret = ath11k_dp_rx_process_mon_status(ab, mac_id, napi, budget);
5606
5607         return ret;
5608 }
5609
5610 static int ath11k_dp_rx_pdev_mon_status_attach(struct ath11k *ar)
5611 {
5612         struct ath11k_pdev_dp *dp = &ar->dp;
5613         struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
5614
5615         skb_queue_head_init(&pmon->rx_status_q);
5616
5617         pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
5618
5619         memset(&pmon->rx_mon_stats, 0,
5620                sizeof(pmon->rx_mon_stats));
5621         return 0;
5622 }
5623
5624 int ath11k_dp_rx_pdev_mon_attach(struct ath11k *ar)
5625 {
5626         struct ath11k_pdev_dp *dp = &ar->dp;
5627         struct ath11k_mon_data *pmon = &dp->mon_data;
5628         struct hal_srng *mon_desc_srng = NULL;
5629         struct dp_srng *dp_srng;
5630         int ret = 0;
5631         u32 n_link_desc = 0;
5632
5633         ret = ath11k_dp_rx_pdev_mon_status_attach(ar);
5634         if (ret) {
5635                 ath11k_warn(ar->ab, "pdev_mon_status_attach() failed");
5636                 return ret;
5637         }
5638
5639         /* if rxdma1_enable is false, no need to setup
5640          * rxdma_mon_desc_ring.
5641          */
5642         if (!ar->ab->hw_params.rxdma1_enable)
5643                 return 0;
5644
5645         dp_srng = &dp->rxdma_mon_desc_ring;
5646         n_link_desc = dp_srng->size /
5647                 ath11k_hal_srng_get_entrysize(ar->ab, HAL_RXDMA_MONITOR_DESC);
5648         mon_desc_srng =
5649                 &ar->ab->hal.srng_list[dp->rxdma_mon_desc_ring.ring_id];
5650
5651         ret = ath11k_dp_link_desc_setup(ar->ab, pmon->link_desc_banks,
5652                                         HAL_RXDMA_MONITOR_DESC, mon_desc_srng,
5653                                         n_link_desc);
5654         if (ret) {
5655                 ath11k_warn(ar->ab, "mon_link_desc_pool_setup() failed");
5656                 return ret;
5657         }
5658         pmon->mon_last_linkdesc_paddr = 0;
5659         pmon->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1;
5660         spin_lock_init(&pmon->mon_lock);
5661
5662         return 0;
5663 }
5664
5665 static int ath11k_dp_mon_link_free(struct ath11k *ar)
5666 {
5667         struct ath11k_pdev_dp *dp = &ar->dp;
5668         struct ath11k_mon_data *pmon = &dp->mon_data;
5669
5670         ath11k_dp_link_desc_cleanup(ar->ab, pmon->link_desc_banks,
5671                                     HAL_RXDMA_MONITOR_DESC,
5672                                     &dp->rxdma_mon_desc_ring);
5673         return 0;
5674 }
5675
5676 int ath11k_dp_rx_pdev_mon_detach(struct ath11k *ar)
5677 {
5678         ath11k_dp_mon_link_free(ar);
5679         return 0;
5680 }
5681
5682 int ath11k_dp_rx_pktlog_start(struct ath11k_base *ab)
5683 {
5684         /* start reap timer */
5685         mod_timer(&ab->mon_reap_timer,
5686                   jiffies + msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL));
5687
5688         return 0;
5689 }
5690
5691 int ath11k_dp_rx_pktlog_stop(struct ath11k_base *ab, bool stop_timer)
5692 {
5693         int ret;
5694
5695         if (stop_timer)
5696                 del_timer_sync(&ab->mon_reap_timer);
5697
5698         /* reap all the monitor related rings */
5699         ret = ath11k_dp_purge_mon_ring(ab);
5700         if (ret) {
5701                 ath11k_warn(ab, "failed to purge dp mon ring: %d\n", ret);
5702                 return ret;
5703         }
5704
5705         return 0;
5706 }