GNU Linux-libre 4.19.304-gnu1
[releases.git] / drivers / net / wireless / intel / iwlwifi / mvm / rxmq.c
1 /******************************************************************************
2  *
3  * This file is provided under a dual BSD/GPLv2 license.  When using or
4  * redistributing this file, you may do so under either license.
5  *
6  * GPL LICENSE SUMMARY
7  *
8  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10  * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
11  * Copyright(c) 2018 Intel Corporation
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of version 2 of the GNU General Public License as
15  * published by the Free Software Foundation.
16  *
17  * This program is distributed in the hope that it will be useful, but
18  * WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
20  * General Public License for more details.
21  *
22  * The full GNU General Public License is included in this distribution
23  * in the file called COPYING.
24  *
25  * Contact Information:
26  *  Intel Linux Wireless <ilw@linux.intel.com>
27  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28  *
29  * BSD LICENSE
30  *
31  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
32  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
33  * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
34  * Copyright(c) 2018 Intel Corporation
35  * All rights reserved.
36  *
37  * Redistribution and use in source and binary forms, with or without
38  * modification, are permitted provided that the following conditions
39  * are met:
40  *
41  *  * Redistributions of source code must retain the above copyright
42  *    notice, this list of conditions and the following disclaimer.
43  *  * Redistributions in binary form must reproduce the above copyright
44  *    notice, this list of conditions and the following disclaimer in
45  *    the documentation and/or other materials provided with the
46  *    distribution.
47  *  * Neither the name Intel Corporation nor the names of its
48  *    contributors may be used to endorse or promote products derived
49  *    from this software without specific prior written permission.
50  *
51  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
52  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
53  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
54  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
55  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
56  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
57  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
58  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
59  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
60  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
61  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
62  *****************************************************************************/
63 #include <linux/etherdevice.h>
64 #include <linux/skbuff.h>
65 #include "iwl-trans.h"
66 #include "mvm.h"
67 #include "fw-api.h"
68
69 static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb,
70                                    int queue, struct ieee80211_sta *sta)
71 {
72         struct iwl_mvm_sta *mvmsta;
73         struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
74         struct ieee80211_rx_status *stats = IEEE80211_SKB_RXCB(skb);
75         struct iwl_mvm_key_pn *ptk_pn;
76         int res;
77         u8 tid, keyidx;
78         u8 pn[IEEE80211_CCMP_PN_LEN];
79         u8 *extiv;
80
81         /* do PN checking */
82
83         /* multicast and non-data only arrives on default queue */
84         if (!ieee80211_is_data(hdr->frame_control) ||
85             is_multicast_ether_addr(hdr->addr1))
86                 return 0;
87
88         /* do not check PN for open AP */
89         if (!(stats->flag & RX_FLAG_DECRYPTED))
90                 return 0;
91
92         /*
93          * avoid checking for default queue - we don't want to replicate
94          * all the logic that's necessary for checking the PN on fragmented
95          * frames, leave that to mac80211
96          */
97         if (queue == 0)
98                 return 0;
99
100         /* if we are here - this for sure is either CCMP or GCMP */
101         if (IS_ERR_OR_NULL(sta)) {
102                 IWL_ERR(mvm,
103                         "expected hw-decrypted unicast frame for station\n");
104                 return -1;
105         }
106
107         mvmsta = iwl_mvm_sta_from_mac80211(sta);
108
109         extiv = (u8 *)hdr + ieee80211_hdrlen(hdr->frame_control);
110         keyidx = extiv[3] >> 6;
111
112         ptk_pn = rcu_dereference(mvmsta->ptk_pn[keyidx]);
113         if (!ptk_pn)
114                 return -1;
115
116         if (ieee80211_is_data_qos(hdr->frame_control))
117                 tid = ieee80211_get_tid(hdr);
118         else
119                 tid = 0;
120
121         /* we don't use HCCA/802.11 QoS TSPECs, so drop such frames */
122         if (tid >= IWL_MAX_TID_COUNT)
123                 return -1;
124
125         /* load pn */
126         pn[0] = extiv[7];
127         pn[1] = extiv[6];
128         pn[2] = extiv[5];
129         pn[3] = extiv[4];
130         pn[4] = extiv[1];
131         pn[5] = extiv[0];
132
133         res = memcmp(pn, ptk_pn->q[queue].pn[tid], IEEE80211_CCMP_PN_LEN);
134         if (res < 0)
135                 return -1;
136         if (!res && !(stats->flag & RX_FLAG_ALLOW_SAME_PN))
137                 return -1;
138
139         memcpy(ptk_pn->q[queue].pn[tid], pn, IEEE80211_CCMP_PN_LEN);
140         stats->flag |= RX_FLAG_PN_VALIDATED;
141
142         return 0;
143 }
144
145 /* iwl_mvm_create_skb Adds the rxb to a new skb */
146 static int iwl_mvm_create_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
147                               struct ieee80211_hdr *hdr, u16 len, u8 crypt_len,
148                               struct iwl_rx_cmd_buffer *rxb)
149 {
150         struct iwl_rx_packet *pkt = rxb_addr(rxb);
151         struct iwl_rx_mpdu_desc *desc = (void *)pkt->data;
152         unsigned int headlen, fraglen, pad_len = 0;
153         unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control);
154         u8 mic_crc_len = u8_get_bits(desc->mac_flags1,
155                                      IWL_RX_MPDU_MFLG1_MIC_CRC_LEN_MASK) << 1;
156
157         if (desc->mac_flags2 & IWL_RX_MPDU_MFLG2_PAD) {
158                 len -= 2;
159                 pad_len = 2;
160         }
161
162         /*
163          * For non monitor interface strip the bytes the RADA might not have
164          * removed. As monitor interface cannot exist with other interfaces
165          * this removal is safe.
166          */
167         if (mic_crc_len && !ieee80211_hw_check(mvm->hw, RX_INCLUDES_FCS)) {
168                 u32 pkt_flags = le32_to_cpu(pkt->len_n_flags);
169
170                 /*
171                  * If RADA was not enabled then decryption was not performed so
172                  * the MIC cannot be removed.
173                  */
174                 if (!(pkt_flags & FH_RSCSR_RADA_EN)) {
175                         if (WARN_ON(crypt_len > mic_crc_len))
176                                 return -EINVAL;
177
178                         mic_crc_len -= crypt_len;
179                 }
180
181                 if (WARN_ON(mic_crc_len > len))
182                         return -EINVAL;
183
184                 len -= mic_crc_len;
185         }
186
187         /* If frame is small enough to fit in skb->head, pull it completely.
188          * If not, only pull ieee80211_hdr (including crypto if present, and
189          * an additional 8 bytes for SNAP/ethertype, see below) so that
190          * splice() or TCP coalesce are more efficient.
191          *
192          * Since, in addition, ieee80211_data_to_8023() always pull in at
193          * least 8 bytes (possibly more for mesh) we can do the same here
194          * to save the cost of doing it later. That still doesn't pull in
195          * the actual IP header since the typical case has a SNAP header.
196          * If the latter changes (there are efforts in the standards group
197          * to do so) we should revisit this and ieee80211_data_to_8023().
198          */
199         headlen = (len <= skb_tailroom(skb)) ? len :
200                                                hdrlen + crypt_len + 8;
201
202         /* The firmware may align the packet to DWORD.
203          * The padding is inserted after the IV.
204          * After copying the header + IV skip the padding if
205          * present before copying packet data.
206          */
207         hdrlen += crypt_len;
208
209         if (WARN_ONCE(headlen < hdrlen,
210                       "invalid packet lengths (hdrlen=%d, len=%d, crypt_len=%d)\n",
211                       hdrlen, len, crypt_len)) {
212                 /*
213                  * We warn and trace because we want to be able to see
214                  * it in trace-cmd as well.
215                  */
216                 IWL_DEBUG_RX(mvm,
217                              "invalid packet lengths (hdrlen=%d, len=%d, crypt_len=%d)\n",
218                              hdrlen, len, crypt_len);
219                 return -EINVAL;
220         }
221
222         skb_put_data(skb, hdr, hdrlen);
223         skb_put_data(skb, (u8 *)hdr + hdrlen + pad_len, headlen - hdrlen);
224
225         fraglen = len - headlen;
226
227         if (fraglen) {
228                 int offset = (void *)hdr + headlen + pad_len -
229                              rxb_addr(rxb) + rxb_offset(rxb);
230
231                 skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset,
232                                 fraglen, rxb->truesize);
233         }
234
235         return 0;
236 }
237
238 /* iwl_mvm_pass_packet_to_mac80211 - passes the packet for mac80211 */
239 static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
240                                             struct napi_struct *napi,
241                                             struct sk_buff *skb, int queue,
242                                             struct ieee80211_sta *sta)
243 {
244         struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
245
246         if (iwl_mvm_check_pn(mvm, skb, queue, sta)) {
247                 kfree_skb(skb);
248         } else {
249                 unsigned int radiotap_len = 0;
250
251                 if (rx_status->flag & RX_FLAG_RADIOTAP_HE)
252                         radiotap_len += sizeof(struct ieee80211_radiotap_he);
253                 if (rx_status->flag & RX_FLAG_RADIOTAP_HE_MU)
254                         radiotap_len += sizeof(struct ieee80211_radiotap_he_mu);
255                 __skb_push(skb, radiotap_len);
256                 ieee80211_rx_napi(mvm->hw, sta, skb, napi);
257         }
258 }
259
260 static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
261                                         struct ieee80211_rx_status *rx_status,
262                                         u32 rate_n_flags, int energy_a,
263                                         int energy_b)
264 {
265         int max_energy;
266         u32 rate_flags = rate_n_flags;
267
268         energy_a = energy_a ? -energy_a : S8_MIN;
269         energy_b = energy_b ? -energy_b : S8_MIN;
270         max_energy = max(energy_a, energy_b);
271
272         IWL_DEBUG_STATS(mvm, "energy In A %d B %d, and max %d\n",
273                         energy_a, energy_b, max_energy);
274
275         rx_status->signal = max_energy;
276         rx_status->chains =
277                 (rate_flags & RATE_MCS_ANT_AB_MSK) >> RATE_MCS_ANT_POS;
278         rx_status->chain_signal[0] = energy_a;
279         rx_status->chain_signal[1] = energy_b;
280         rx_status->chain_signal[2] = S8_MIN;
281 }
282
283 static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
284                              struct ieee80211_rx_status *stats, u16 phy_info,
285                              struct iwl_rx_mpdu_desc *desc,
286                              u32 pkt_flags, int queue, u8 *crypt_len)
287 {
288         u16 status = le16_to_cpu(desc->status);
289
290         /*
291          * Drop UNKNOWN frames in aggregation, unless in monitor mode
292          * (where we don't have the keys).
293          * We limit this to aggregation because in TKIP this is a valid
294          * scenario, since we may not have the (correct) TTAK (phase 1
295          * key) in the firmware.
296          */
297         if (phy_info & IWL_RX_MPDU_PHY_AMPDU &&
298             (status & IWL_RX_MPDU_STATUS_SEC_MASK) ==
299             IWL_RX_MPDU_STATUS_SEC_UNKNOWN && !mvm->monitor_on)
300                 return -1;
301
302         if (!ieee80211_has_protected(hdr->frame_control) ||
303             (status & IWL_RX_MPDU_STATUS_SEC_MASK) ==
304             IWL_RX_MPDU_STATUS_SEC_NONE)
305                 return 0;
306
307         /* TODO: handle packets encrypted with unknown alg */
308
309         switch (status & IWL_RX_MPDU_STATUS_SEC_MASK) {
310         case IWL_RX_MPDU_STATUS_SEC_CCM:
311         case IWL_RX_MPDU_STATUS_SEC_GCM:
312                 BUILD_BUG_ON(IEEE80211_CCMP_PN_LEN != IEEE80211_GCMP_PN_LEN);
313                 /* alg is CCM: check MIC only */
314                 if (!(status & IWL_RX_MPDU_STATUS_MIC_OK))
315                         return -1;
316
317                 stats->flag |= RX_FLAG_DECRYPTED;
318                 if (pkt_flags & FH_RSCSR_RADA_EN)
319                         stats->flag |= RX_FLAG_MIC_STRIPPED;
320                 *crypt_len = IEEE80211_CCMP_HDR_LEN;
321                 return 0;
322         case IWL_RX_MPDU_STATUS_SEC_TKIP:
323                 /* Don't drop the frame and decrypt it in SW */
324                 if (!fw_has_api(&mvm->fw->ucode_capa,
325                                 IWL_UCODE_TLV_API_DEPRECATE_TTAK) &&
326                     !(status & IWL_RX_MPDU_RES_STATUS_TTAK_OK))
327                         return 0;
328
329                 *crypt_len = IEEE80211_TKIP_IV_LEN;
330                 /* fall through if TTAK OK */
331         case IWL_RX_MPDU_STATUS_SEC_WEP:
332                 if (!(status & IWL_RX_MPDU_STATUS_ICV_OK))
333                         return -1;
334
335                 stats->flag |= RX_FLAG_DECRYPTED;
336                 if ((status & IWL_RX_MPDU_STATUS_SEC_MASK) ==
337                                 IWL_RX_MPDU_STATUS_SEC_WEP)
338                         *crypt_len = IEEE80211_WEP_IV_LEN;
339
340                 if (pkt_flags & FH_RSCSR_RADA_EN)
341                         stats->flag |= RX_FLAG_ICV_STRIPPED;
342
343                 return 0;
344         case IWL_RX_MPDU_STATUS_SEC_EXT_ENC:
345                 if (!(status & IWL_RX_MPDU_STATUS_MIC_OK))
346                         return -1;
347                 stats->flag |= RX_FLAG_DECRYPTED;
348                 return 0;
349         default:
350                 /* Expected in monitor (not having the keys) */
351                 if (!mvm->monitor_on)
352                         IWL_ERR(mvm, "Unhandled alg: 0x%x\n", status);
353         }
354
355         return 0;
356 }
357
358 static void iwl_mvm_rx_csum(struct ieee80211_sta *sta,
359                             struct sk_buff *skb,
360                             struct iwl_rx_mpdu_desc *desc)
361 {
362         struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
363         struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
364         u16 flags = le16_to_cpu(desc->l3l4_flags);
365         u8 l3_prot = (u8)((flags & IWL_RX_L3L4_L3_PROTO_MASK) >>
366                           IWL_RX_L3_PROTO_POS);
367
368         if (mvmvif->features & NETIF_F_RXCSUM &&
369             flags & IWL_RX_L3L4_TCP_UDP_CSUM_OK &&
370             (flags & IWL_RX_L3L4_IP_HDR_CSUM_OK ||
371              l3_prot == IWL_RX_L3_TYPE_IPV6 ||
372              l3_prot == IWL_RX_L3_TYPE_IPV6_FRAG))
373                 skb->ip_summed = CHECKSUM_UNNECESSARY;
374 }
375
376 /*
377  * returns true if a packet is a duplicate and should be dropped.
378  * Updates AMSDU PN tracking info
379  */
380 static bool iwl_mvm_is_dup(struct ieee80211_sta *sta, int queue,
381                            struct ieee80211_rx_status *rx_status,
382                            struct ieee80211_hdr *hdr,
383                            struct iwl_rx_mpdu_desc *desc)
384 {
385         struct iwl_mvm_sta *mvm_sta;
386         struct iwl_mvm_rxq_dup_data *dup_data;
387         u8 tid, sub_frame_idx;
388
389         if (WARN_ON(IS_ERR_OR_NULL(sta)))
390                 return false;
391
392         mvm_sta = iwl_mvm_sta_from_mac80211(sta);
393         dup_data = &mvm_sta->dup_data[queue];
394
395         /*
396          * Drop duplicate 802.11 retransmissions
397          * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery")
398          */
399         if (ieee80211_is_ctl(hdr->frame_control) ||
400             ieee80211_is_qos_nullfunc(hdr->frame_control) ||
401             is_multicast_ether_addr(hdr->addr1)) {
402                 rx_status->flag |= RX_FLAG_DUP_VALIDATED;
403                 return false;
404         }
405
406         if (ieee80211_is_data_qos(hdr->frame_control))
407                 /* frame has qos control */
408                 tid = ieee80211_get_tid(hdr);
409         else
410                 tid = IWL_MAX_TID_COUNT;
411
412         /* If this wasn't a part of an A-MSDU the sub-frame index will be 0 */
413         sub_frame_idx = desc->amsdu_info &
414                 IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
415
416         if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
417                      dup_data->last_seq[tid] == hdr->seq_ctrl &&
418                      dup_data->last_sub_frame[tid] >= sub_frame_idx))
419                 return true;
420
421         /* Allow same PN as the first subframe for following sub frames */
422         if (dup_data->last_seq[tid] == hdr->seq_ctrl &&
423             sub_frame_idx > dup_data->last_sub_frame[tid] &&
424             desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU)
425                 rx_status->flag |= RX_FLAG_ALLOW_SAME_PN;
426
427         dup_data->last_seq[tid] = hdr->seq_ctrl;
428         dup_data->last_sub_frame[tid] = sub_frame_idx;
429
430         rx_status->flag |= RX_FLAG_DUP_VALIDATED;
431
432         return false;
433 }
434
435 int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask,
436                             const u8 *data, u32 count)
437 {
438         struct iwl_rxq_sync_cmd *cmd;
439         u32 data_size = sizeof(*cmd) + count;
440         int ret;
441
442         /* should be DWORD aligned */
443         if (WARN_ON(count & 3 || count > IWL_MULTI_QUEUE_SYNC_MSG_MAX_SIZE))
444                 return -EINVAL;
445
446         cmd = kzalloc(data_size, GFP_KERNEL);
447         if (!cmd)
448                 return -ENOMEM;
449
450         cmd->rxq_mask = cpu_to_le32(rxq_mask);
451         cmd->count =  cpu_to_le32(count);
452         cmd->flags = 0;
453         memcpy(cmd->payload, data, count);
454
455         ret = iwl_mvm_send_cmd_pdu(mvm,
456                                    WIDE_ID(DATA_PATH_GROUP,
457                                            TRIGGER_RX_QUEUES_NOTIF_CMD),
458                                    0, data_size, cmd);
459
460         kfree(cmd);
461         return ret;
462 }
463
464 /*
465  * Returns true if sn2 - buffer_size < sn1 < sn2.
466  * To be used only in order to compare reorder buffer head with NSSN.
467  * We fully trust NSSN unless it is behind us due to reorder timeout.
468  * Reorder timeout can only bring us up to buffer_size SNs ahead of NSSN.
469  */
470 static bool iwl_mvm_is_sn_less(u16 sn1, u16 sn2, u16 buffer_size)
471 {
472         return ieee80211_sn_less(sn1, sn2) &&
473                !ieee80211_sn_less(sn1, sn2 - buffer_size);
474 }
475
476 #define RX_REORDER_BUF_TIMEOUT_MQ (HZ / 10)
477
478 static void iwl_mvm_release_frames(struct iwl_mvm *mvm,
479                                    struct ieee80211_sta *sta,
480                                    struct napi_struct *napi,
481                                    struct iwl_mvm_baid_data *baid_data,
482                                    struct iwl_mvm_reorder_buffer *reorder_buf,
483                                    u16 nssn)
484 {
485         struct iwl_mvm_reorder_buf_entry *entries =
486                 &baid_data->entries[reorder_buf->queue *
487                                     baid_data->entries_per_queue];
488         u16 ssn = reorder_buf->head_sn;
489
490         lockdep_assert_held(&reorder_buf->lock);
491
492         /* ignore nssn smaller than head sn - this can happen due to timeout */
493         if (iwl_mvm_is_sn_less(nssn, ssn, reorder_buf->buf_size))
494                 goto set_timer;
495
496         while (iwl_mvm_is_sn_less(ssn, nssn, reorder_buf->buf_size)) {
497                 int index = ssn % reorder_buf->buf_size;
498                 struct sk_buff_head *skb_list = &entries[index].e.frames;
499                 struct sk_buff *skb;
500
501                 ssn = ieee80211_sn_inc(ssn);
502
503                 /*
504                  * Empty the list. Will have more than one frame for A-MSDU.
505                  * Empty list is valid as well since nssn indicates frames were
506                  * received.
507                  */
508                 while ((skb = __skb_dequeue(skb_list))) {
509                         iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb,
510                                                         reorder_buf->queue,
511                                                         sta);
512                         reorder_buf->num_stored--;
513                 }
514         }
515         reorder_buf->head_sn = nssn;
516
517 set_timer:
518         if (reorder_buf->num_stored && !reorder_buf->removed) {
519                 u16 index = reorder_buf->head_sn % reorder_buf->buf_size;
520
521                 while (skb_queue_empty(&entries[index].e.frames))
522                         index = (index + 1) % reorder_buf->buf_size;
523                 /* modify timer to match next frame's expiration time */
524                 mod_timer(&reorder_buf->reorder_timer,
525                           entries[index].e.reorder_time + 1 +
526                           RX_REORDER_BUF_TIMEOUT_MQ);
527         } else {
528                 del_timer(&reorder_buf->reorder_timer);
529         }
530 }
531
532 void iwl_mvm_reorder_timer_expired(struct timer_list *t)
533 {
534         struct iwl_mvm_reorder_buffer *buf = from_timer(buf, t, reorder_timer);
535         struct iwl_mvm_baid_data *baid_data =
536                 iwl_mvm_baid_data_from_reorder_buf(buf);
537         struct iwl_mvm_reorder_buf_entry *entries =
538                 &baid_data->entries[buf->queue * baid_data->entries_per_queue];
539         int i;
540         u16 sn = 0, index = 0;
541         bool expired = false;
542         bool cont = false;
543
544         spin_lock(&buf->lock);
545
546         if (!buf->num_stored || buf->removed) {
547                 spin_unlock(&buf->lock);
548                 return;
549         }
550
551         for (i = 0; i < buf->buf_size ; i++) {
552                 index = (buf->head_sn + i) % buf->buf_size;
553
554                 if (skb_queue_empty(&entries[index].e.frames)) {
555                         /*
556                          * If there is a hole and the next frame didn't expire
557                          * we want to break and not advance SN
558                          */
559                         cont = false;
560                         continue;
561                 }
562                 if (!cont &&
563                     !time_after(jiffies, entries[index].e.reorder_time +
564                                          RX_REORDER_BUF_TIMEOUT_MQ))
565                         break;
566
567                 expired = true;
568                 /* continue until next hole after this expired frames */
569                 cont = true;
570                 sn = ieee80211_sn_add(buf->head_sn, i + 1);
571         }
572
573         if (expired) {
574                 struct ieee80211_sta *sta;
575                 struct iwl_mvm_sta *mvmsta;
576                 u8 sta_id = baid_data->sta_id;
577
578                 rcu_read_lock();
579                 sta = rcu_dereference(buf->mvm->fw_id_to_mac_id[sta_id]);
580                 mvmsta = iwl_mvm_sta_from_mac80211(sta);
581
582                 /* SN is set to the last expired frame + 1 */
583                 IWL_DEBUG_HT(buf->mvm,
584                              "Releasing expired frames for sta %u, sn %d\n",
585                              sta_id, sn);
586                 iwl_mvm_event_frame_timeout_callback(buf->mvm, mvmsta->vif,
587                                                      sta, baid_data->tid);
588                 iwl_mvm_release_frames(buf->mvm, sta, NULL, baid_data, buf, sn);
589                 rcu_read_unlock();
590         } else {
591                 /*
592                  * If no frame expired and there are stored frames, index is now
593                  * pointing to the first unexpired frame - modify timer
594                  * accordingly to this frame.
595                  */
596                 mod_timer(&buf->reorder_timer,
597                           entries[index].e.reorder_time +
598                           1 + RX_REORDER_BUF_TIMEOUT_MQ);
599         }
600         spin_unlock(&buf->lock);
601 }
602
603 static void iwl_mvm_del_ba(struct iwl_mvm *mvm, int queue,
604                            struct iwl_mvm_delba_data *data)
605 {
606         struct iwl_mvm_baid_data *ba_data;
607         struct ieee80211_sta *sta;
608         struct iwl_mvm_reorder_buffer *reorder_buf;
609         u8 baid = data->baid;
610
611         if (WARN_ONCE(baid >= IWL_MAX_BAID, "invalid BAID: %x\n", baid))
612                 return;
613
614         rcu_read_lock();
615
616         ba_data = rcu_dereference(mvm->baid_map[baid]);
617         if (WARN_ON_ONCE(!ba_data))
618                 goto out;
619
620         sta = rcu_dereference(mvm->fw_id_to_mac_id[ba_data->sta_id]);
621         if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
622                 goto out;
623
624         reorder_buf = &ba_data->reorder_buf[queue];
625
626         /* release all frames that are in the reorder buffer to the stack */
627         spin_lock_bh(&reorder_buf->lock);
628         iwl_mvm_release_frames(mvm, sta, NULL, ba_data, reorder_buf,
629                                ieee80211_sn_add(reorder_buf->head_sn,
630                                                 reorder_buf->buf_size));
631         spin_unlock_bh(&reorder_buf->lock);
632         del_timer_sync(&reorder_buf->reorder_timer);
633
634 out:
635         rcu_read_unlock();
636 }
637
638 void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
639                             int queue)
640 {
641         struct iwl_rx_packet *pkt = rxb_addr(rxb);
642         struct iwl_rxq_sync_notification *notif;
643         struct iwl_mvm_internal_rxq_notif *internal_notif;
644
645         notif = (void *)pkt->data;
646         internal_notif = (void *)notif->payload;
647
648         if (internal_notif->sync &&
649             mvm->queue_sync_cookie != internal_notif->cookie) {
650                 WARN_ONCE(1, "Received expired RX queue sync message\n");
651                 return;
652         }
653
654         switch (internal_notif->type) {
655         case IWL_MVM_RXQ_EMPTY:
656                 break;
657         case IWL_MVM_RXQ_NOTIF_DEL_BA:
658                 iwl_mvm_del_ba(mvm, queue, (void *)internal_notif->data);
659                 break;
660         default:
661                 WARN_ONCE(1, "Invalid identifier %d", internal_notif->type);
662         }
663
664         if (internal_notif->sync &&
665             !atomic_dec_return(&mvm->queue_sync_counter))
666                 wake_up(&mvm->rx_sync_waitq);
667 }
668
669 /*
670  * Returns true if the MPDU was buffered\dropped, false if it should be passed
671  * to upper layer.
672  */
673 static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
674                             struct napi_struct *napi,
675                             int queue,
676                             struct ieee80211_sta *sta,
677                             struct sk_buff *skb,
678                             struct iwl_rx_mpdu_desc *desc)
679 {
680         struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
681         struct iwl_mvm_sta *mvm_sta;
682         struct iwl_mvm_baid_data *baid_data;
683         struct iwl_mvm_reorder_buffer *buffer;
684         struct sk_buff *tail;
685         u32 reorder = le32_to_cpu(desc->reorder_data);
686         bool amsdu = desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU;
687         bool last_subframe =
688                 desc->amsdu_info & IWL_RX_MPDU_AMSDU_LAST_SUBFRAME;
689         u8 tid = ieee80211_get_tid(hdr);
690         u8 sub_frame_idx = desc->amsdu_info &
691                            IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
692         struct iwl_mvm_reorder_buf_entry *entries;
693         int index;
694         u16 nssn, sn;
695         u8 baid;
696
697         baid = (reorder & IWL_RX_MPDU_REORDER_BAID_MASK) >>
698                 IWL_RX_MPDU_REORDER_BAID_SHIFT;
699
700         /*
701          * This also covers the case of receiving a Block Ack Request
702          * outside a BA session; we'll pass it to mac80211 and that
703          * then sends a delBA action frame.
704          */
705         if (baid == IWL_RX_REORDER_DATA_INVALID_BAID)
706                 return false;
707
708         /* no sta yet */
709         if (WARN_ONCE(IS_ERR_OR_NULL(sta),
710                       "Got valid BAID without a valid station assigned\n"))
711                 return false;
712
713         mvm_sta = iwl_mvm_sta_from_mac80211(sta);
714
715         /* not a data packet or a bar */
716         if (!ieee80211_is_back_req(hdr->frame_control) &&
717             (!ieee80211_is_data_qos(hdr->frame_control) ||
718              is_multicast_ether_addr(hdr->addr1)))
719                 return false;
720
721         if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
722                 return false;
723
724         baid_data = rcu_dereference(mvm->baid_map[baid]);
725         if (!baid_data) {
726                 IWL_DEBUG_RX(mvm,
727                              "Got valid BAID but no baid allocated, bypass the re-ordering buffer. Baid %d reorder 0x%x\n",
728                               baid, reorder);
729                 return false;
730         }
731
732         if (WARN(tid != baid_data->tid || mvm_sta->sta_id != baid_data->sta_id,
733                  "baid 0x%x is mapped to sta:%d tid:%d, but was received for sta:%d tid:%d\n",
734                  baid, baid_data->sta_id, baid_data->tid, mvm_sta->sta_id,
735                  tid))
736                 return false;
737
738         nssn = reorder & IWL_RX_MPDU_REORDER_NSSN_MASK;
739         sn = (reorder & IWL_RX_MPDU_REORDER_SN_MASK) >>
740                 IWL_RX_MPDU_REORDER_SN_SHIFT;
741
742         buffer = &baid_data->reorder_buf[queue];
743         entries = &baid_data->entries[queue * baid_data->entries_per_queue];
744
745         spin_lock_bh(&buffer->lock);
746
747         if (!buffer->valid) {
748                 if (reorder & IWL_RX_MPDU_REORDER_BA_OLD_SN) {
749                         spin_unlock_bh(&buffer->lock);
750                         return false;
751                 }
752                 buffer->valid = true;
753         }
754
755         if (ieee80211_is_back_req(hdr->frame_control)) {
756                 iwl_mvm_release_frames(mvm, sta, napi, baid_data, buffer, nssn);
757                 goto drop;
758         }
759
760         /*
761          * If there was a significant jump in the nssn - adjust.
762          * If the SN is smaller than the NSSN it might need to first go into
763          * the reorder buffer, in which case we just release up to it and the
764          * rest of the function will take care of storing it and releasing up to
765          * the nssn
766          */
767         if (!iwl_mvm_is_sn_less(nssn, buffer->head_sn + buffer->buf_size,
768                                 buffer->buf_size) ||
769             !ieee80211_sn_less(sn, buffer->head_sn + buffer->buf_size)) {
770                 u16 min_sn = ieee80211_sn_less(sn, nssn) ? sn : nssn;
771
772                 iwl_mvm_release_frames(mvm, sta, napi, baid_data, buffer,
773                                        min_sn);
774         }
775
776         /* drop any oudated packets */
777         if (ieee80211_sn_less(sn, buffer->head_sn))
778                 goto drop;
779
780         /* release immediately if allowed by nssn and no stored frames */
781         if (!buffer->num_stored && ieee80211_sn_less(sn, nssn)) {
782                 if (iwl_mvm_is_sn_less(buffer->head_sn, nssn,
783                                        buffer->buf_size) &&
784                    (!amsdu || last_subframe))
785                         buffer->head_sn = nssn;
786                 /* No need to update AMSDU last SN - we are moving the head */
787                 spin_unlock_bh(&buffer->lock);
788                 return false;
789         }
790
791         /*
792          * release immediately if there are no stored frames, and the sn is
793          * equal to the head.
794          * This can happen due to reorder timer, where NSSN is behind head_sn.
795          * When we released everything, and we got the next frame in the
796          * sequence, according to the NSSN we can't release immediately,
797          * while technically there is no hole and we can move forward.
798          */
799         if (!buffer->num_stored && sn == buffer->head_sn) {
800                 if (!amsdu || last_subframe)
801                         buffer->head_sn = ieee80211_sn_inc(buffer->head_sn);
802                 /* No need to update AMSDU last SN - we are moving the head */
803                 spin_unlock_bh(&buffer->lock);
804                 return false;
805         }
806
807         index = sn % buffer->buf_size;
808
809         /*
810          * Check if we already stored this frame
811          * As AMSDU is either received or not as whole, logic is simple:
812          * If we have frames in that position in the buffer and the last frame
813          * originated from AMSDU had a different SN then it is a retransmission.
814          * If it is the same SN then if the subframe index is incrementing it
815          * is the same AMSDU - otherwise it is a retransmission.
816          */
817         tail = skb_peek_tail(&entries[index].e.frames);
818         if (tail && !amsdu)
819                 goto drop;
820         else if (tail && (sn != buffer->last_amsdu ||
821                           buffer->last_sub_index >= sub_frame_idx))
822                 goto drop;
823
824         /* put in reorder buffer */
825         __skb_queue_tail(&entries[index].e.frames, skb);
826         buffer->num_stored++;
827         entries[index].e.reorder_time = jiffies;
828
829         if (amsdu) {
830                 buffer->last_amsdu = sn;
831                 buffer->last_sub_index = sub_frame_idx;
832         }
833
834         /*
835          * We cannot trust NSSN for AMSDU sub-frames that are not the last.
836          * The reason is that NSSN advances on the first sub-frame, and may
837          * cause the reorder buffer to advance before all the sub-frames arrive.
838          * Example: reorder buffer contains SN 0 & 2, and we receive AMSDU with
839          * SN 1. NSSN for first sub frame will be 3 with the result of driver
840          * releasing SN 0,1, 2. When sub-frame 1 arrives - reorder buffer is
841          * already ahead and it will be dropped.
842          * If the last sub-frame is not on this queue - we will get frame
843          * release notification with up to date NSSN.
844          */
845         if (!amsdu || last_subframe)
846                 iwl_mvm_release_frames(mvm, sta, napi, baid_data, buffer, nssn);
847
848         spin_unlock_bh(&buffer->lock);
849         return true;
850
851 drop:
852         kfree_skb(skb);
853         spin_unlock_bh(&buffer->lock);
854         return true;
855 }
856
857 static void iwl_mvm_agg_rx_received(struct iwl_mvm *mvm,
858                                     u32 reorder_data, u8 baid)
859 {
860         unsigned long now = jiffies;
861         unsigned long timeout;
862         struct iwl_mvm_baid_data *data;
863
864         rcu_read_lock();
865
866         data = rcu_dereference(mvm->baid_map[baid]);
867         if (!data) {
868                 IWL_DEBUG_RX(mvm,
869                              "Got valid BAID but no baid allocated, bypass the re-ordering buffer. Baid %d reorder 0x%x\n",
870                               baid, reorder_data);
871                 goto out;
872         }
873
874         if (!data->timeout)
875                 goto out;
876
877         timeout = data->timeout;
878         /*
879          * Do not update last rx all the time to avoid cache bouncing
880          * between the rx queues.
881          * Update it every timeout. Worst case is the session will
882          * expire after ~ 2 * timeout, which doesn't matter that much.
883          */
884         if (time_before(data->last_rx + TU_TO_JIFFIES(timeout), now))
885                 /* Update is atomic */
886                 data->last_rx = now;
887
888 out:
889         rcu_read_unlock();
890 }
891
892 static void iwl_mvm_flip_address(u8 *addr)
893 {
894         int i;
895         u8 mac_addr[ETH_ALEN];
896
897         for (i = 0; i < ETH_ALEN; i++)
898                 mac_addr[i] = addr[ETH_ALEN - i - 1];
899         ether_addr_copy(addr, mac_addr);
900 }
901
902 void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
903                         struct iwl_rx_cmd_buffer *rxb, int queue)
904 {
905         struct ieee80211_rx_status *rx_status;
906         struct iwl_rx_packet *pkt = rxb_addr(rxb);
907         struct iwl_rx_mpdu_desc *desc = (void *)pkt->data;
908         struct ieee80211_hdr *hdr;
909         u32 len = le16_to_cpu(desc->mpdu_len);
910         u32 rate_n_flags, gp2_on_air_rise;
911         u16 phy_info = le16_to_cpu(desc->phy_info);
912         struct ieee80211_sta *sta = NULL;
913         struct sk_buff *skb;
914         u8 crypt_len = 0, channel, energy_a, energy_b;
915         struct ieee80211_radiotap_he *he = NULL;
916         struct ieee80211_radiotap_he_mu *he_mu = NULL;
917         u32 he_type = 0xffffffff;
918         /* this is invalid e.g. because puncture type doesn't allow 0b11 */
919 #define HE_PHY_DATA_INVAL ((u64)-1)
920         u64 he_phy_data = HE_PHY_DATA_INVAL;
921         size_t desc_size;
922
923         if (unlikely(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)))
924                 return;
925
926         if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
927                 rate_n_flags = le32_to_cpu(desc->v3.rate_n_flags);
928                 channel = desc->v3.channel;
929                 gp2_on_air_rise = le32_to_cpu(desc->v3.gp2_on_air_rise);
930                 energy_a = desc->v3.energy_a;
931                 energy_b = desc->v3.energy_b;
932                 desc_size = sizeof(*desc);
933         } else {
934                 rate_n_flags = le32_to_cpu(desc->v1.rate_n_flags);
935                 channel = desc->v1.channel;
936                 gp2_on_air_rise = le32_to_cpu(desc->v1.gp2_on_air_rise);
937                 energy_a = desc->v1.energy_a;
938                 energy_b = desc->v1.energy_b;
939                 desc_size = IWL_RX_DESC_SIZE_V1;
940         }
941
942         hdr = (void *)(pkt->data + desc_size);
943         /* Dont use dev_alloc_skb(), we'll have enough headroom once
944          * ieee80211_hdr pulled.
945          */
946         skb = alloc_skb(128, GFP_ATOMIC);
947         if (!skb) {
948                 IWL_ERR(mvm, "alloc_skb failed\n");
949                 return;
950         }
951
952         if (desc->mac_flags2 & IWL_RX_MPDU_MFLG2_PAD) {
953                 /*
954                  * If the device inserted padding it means that (it thought)
955                  * the 802.11 header wasn't a multiple of 4 bytes long. In
956                  * this case, reserve two bytes at the start of the SKB to
957                  * align the payload properly in case we end up copying it.
958                  */
959                 skb_reserve(skb, 2);
960         }
961
962         rx_status = IEEE80211_SKB_RXCB(skb);
963
964         if (rate_n_flags & RATE_MCS_HE_MSK) {
965                 static const struct ieee80211_radiotap_he known = {
966                         .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
967                                              IEEE80211_RADIOTAP_HE_DATA1_DATA_DCM_KNOWN |
968                                              IEEE80211_RADIOTAP_HE_DATA1_STBC_KNOWN |
969                                              IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN),
970                         .data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN |
971                                              IEEE80211_RADIOTAP_HE_DATA2_TXBF_KNOWN),
972                 };
973                 static const struct ieee80211_radiotap_he_mu mu_known = {
974                         .flags1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS_KNOWN |
975                                               IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM_KNOWN |
976                                               IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_SYMS_USERS_KNOWN |
977                                               IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_COMP_KNOWN),
978                         .flags2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW_KNOWN),
979                 };
980                 unsigned int radiotap_len = 0;
981
982                 he = skb_put_data(skb, &known, sizeof(known));
983                 radiotap_len += sizeof(known);
984                 rx_status->flag |= RX_FLAG_RADIOTAP_HE;
985
986                 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK;
987
988                 if (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) {
989                         if (mvm->trans->cfg->device_family >=
990                             IWL_DEVICE_FAMILY_22560)
991                                 he_phy_data = le64_to_cpu(desc->v3.he_phy_data);
992                         else
993                                 he_phy_data = le64_to_cpu(desc->v1.he_phy_data);
994
995                         if (he_type == RATE_MCS_HE_TYPE_MU) {
996                                 he_mu = skb_put_data(skb, &mu_known,
997                                                      sizeof(mu_known));
998                                 radiotap_len += sizeof(mu_known);
999                                 rx_status->flag |= RX_FLAG_RADIOTAP_HE_MU;
1000                         }
1001                 }
1002
1003                 /* temporarily hide the radiotap data */
1004                 __skb_pull(skb, radiotap_len);
1005         }
1006
1007         rx_status = IEEE80211_SKB_RXCB(skb);
1008
1009         if (iwl_mvm_rx_crypto(mvm, hdr, rx_status, phy_info, desc,
1010                               le32_to_cpu(pkt->len_n_flags), queue,
1011                               &crypt_len)) {
1012                 kfree_skb(skb);
1013                 return;
1014         }
1015
1016         /*
1017          * Keep packets with CRC errors (and with overrun) for monitor mode
1018          * (otherwise the firmware discards them) but mark them as bad.
1019          */
1020         if (!(desc->status & cpu_to_le16(IWL_RX_MPDU_STATUS_CRC_OK)) ||
1021             !(desc->status & cpu_to_le16(IWL_RX_MPDU_STATUS_OVERRUN_OK))) {
1022                 IWL_DEBUG_RX(mvm, "Bad CRC or FIFO: 0x%08X.\n",
1023                              le16_to_cpu(desc->status));
1024                 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
1025         }
1026         /* set the preamble flag if appropriate */
1027         if (phy_info & IWL_RX_MPDU_PHY_SHORT_PREAMBLE)
1028                 rx_status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
1029
1030         if (likely(!(phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD))) {
1031                 u64 tsf_on_air_rise;
1032
1033                 if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
1034                         tsf_on_air_rise = le64_to_cpu(desc->v3.tsf_on_air_rise);
1035                 else
1036                         tsf_on_air_rise = le64_to_cpu(desc->v1.tsf_on_air_rise);
1037
1038                 rx_status->mactime = tsf_on_air_rise;
1039                 /* TSF as indicated by the firmware is at INA time */
1040                 rx_status->flag |= RX_FLAG_MACTIME_PLCP_START;
1041         } else if (he_type == RATE_MCS_HE_TYPE_SU) {
1042                 u64 he_phy_data;
1043
1044                 if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
1045                         he_phy_data = le64_to_cpu(desc->v3.he_phy_data);
1046                 else
1047                         he_phy_data = le64_to_cpu(desc->v1.he_phy_data);
1048
1049                 he->data1 |=
1050                         cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN);
1051                 if (FIELD_GET(IWL_RX_HE_PHY_UPLINK,
1052                               he_phy_data))
1053                         he->data3 |=
1054                                 cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA3_UL_DL);
1055
1056                 if (!queue && !(phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
1057                         rx_status->ampdu_reference = mvm->ampdu_ref;
1058                         mvm->ampdu_ref++;
1059
1060                         rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
1061                         rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN;
1062                         if (FIELD_GET(IWL_RX_HE_PHY_DELIM_EOF,
1063                                       he_phy_data))
1064                                 rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT;
1065                 }
1066         } else if (he_mu && he_phy_data != HE_PHY_DATA_INVAL) {
1067                 he_mu->flags1 |=
1068                         le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_SIBG_SYM_OR_USER_NUM_MASK,
1069                                                    he_phy_data),
1070                                          IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_SYMS_USERS);
1071                 he_mu->flags1 |=
1072                         le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_SIGB_DCM,
1073                                                    he_phy_data),
1074                                          IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM);
1075                 he_mu->flags1 |=
1076                         le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_SIGB_MCS_MASK,
1077                                                    he_phy_data),
1078                                          IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS);
1079                 he_mu->flags2 |=
1080                         le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_SIGB_COMPRESSION,
1081                                                    he_phy_data),
1082                                          IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_COMP);
1083                 he_mu->flags2 |=
1084                         le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_PREAMBLE_PUNC_TYPE_MASK,
1085                                                    he_phy_data),
1086                                          IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW);
1087         }
1088         rx_status->device_timestamp = gp2_on_air_rise;
1089         rx_status->band = channel > 14 ? NL80211_BAND_5GHZ :
1090                 NL80211_BAND_2GHZ;
1091         rx_status->freq = ieee80211_channel_to_frequency(channel,
1092                                                          rx_status->band);
1093         iwl_mvm_get_signal_strength(mvm, rx_status, rate_n_flags, energy_a,
1094                                     energy_b);
1095
1096         /* update aggregation data for monitor sake on default queue */
1097         if (!queue && (phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
1098                 bool toggle_bit = phy_info & IWL_RX_MPDU_PHY_AMPDU_TOGGLE;
1099                 u64 he_phy_data;
1100
1101                 if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
1102                         he_phy_data = le64_to_cpu(desc->v3.he_phy_data);
1103                 else
1104                         he_phy_data = le64_to_cpu(desc->v1.he_phy_data);
1105
1106                 rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
1107                 /* toggle is switched whenever new aggregation starts */
1108                 if (toggle_bit != mvm->ampdu_toggle) {
1109                         mvm->ampdu_ref++;
1110                         mvm->ampdu_toggle = toggle_bit;
1111
1112                         if (he_phy_data != HE_PHY_DATA_INVAL &&
1113                             he_type == RATE_MCS_HE_TYPE_MU) {
1114                                 rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN;
1115                                 if (FIELD_GET(IWL_RX_HE_PHY_DELIM_EOF,
1116                                               he_phy_data))
1117                                         rx_status->flag |=
1118                                                 RX_FLAG_AMPDU_EOF_BIT;
1119                         }
1120                 }
1121                 rx_status->ampdu_reference = mvm->ampdu_ref;
1122         }
1123
1124         rcu_read_lock();
1125
1126         if (desc->status & cpu_to_le16(IWL_RX_MPDU_STATUS_SRC_STA_FOUND)) {
1127                 u8 id = desc->sta_id_flags & IWL_RX_MPDU_SIF_STA_ID_MASK;
1128
1129                 if (!WARN_ON_ONCE(id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))) {
1130                         sta = rcu_dereference(mvm->fw_id_to_mac_id[id]);
1131                         if (IS_ERR(sta))
1132                                 sta = NULL;
1133                 }
1134         } else if (!is_multicast_ether_addr(hdr->addr2)) {
1135                 /*
1136                  * This is fine since we prevent two stations with the same
1137                  * address from being added.
1138                  */
1139                 sta = ieee80211_find_sta_by_ifaddr(mvm->hw, hdr->addr2, NULL);
1140         }
1141
1142         if (sta) {
1143                 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1144                 struct ieee80211_vif *tx_blocked_vif =
1145                         rcu_dereference(mvm->csa_tx_blocked_vif);
1146                 u8 baid = (u8)((le32_to_cpu(desc->reorder_data) &
1147                                IWL_RX_MPDU_REORDER_BAID_MASK) >>
1148                                IWL_RX_MPDU_REORDER_BAID_SHIFT);
1149
1150                 if (!mvm->tcm.paused && len >= sizeof(*hdr) &&
1151                     !is_multicast_ether_addr(hdr->addr1) &&
1152                     ieee80211_is_data(hdr->frame_control) &&
1153                     time_after(jiffies, mvm->tcm.ts + MVM_TCM_PERIOD))
1154                         schedule_delayed_work(&mvm->tcm.work, 0);
1155
1156                 /*
1157                  * We have tx blocked stations (with CS bit). If we heard
1158                  * frames from a blocked station on a new channel we can
1159                  * TX to it again.
1160                  */
1161                 if (unlikely(tx_blocked_vif) &&
1162                     tx_blocked_vif == mvmsta->vif) {
1163                         struct iwl_mvm_vif *mvmvif =
1164                                 iwl_mvm_vif_from_mac80211(tx_blocked_vif);
1165
1166                         if (mvmvif->csa_target_freq == rx_status->freq)
1167                                 iwl_mvm_sta_modify_disable_tx_ap(mvm, sta,
1168                                                                  false);
1169                 }
1170
1171                 rs_update_last_rssi(mvm, mvmsta, rx_status);
1172
1173                 if (iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_RSSI) &&
1174                     ieee80211_is_beacon(hdr->frame_control)) {
1175                         struct iwl_fw_dbg_trigger_tlv *trig;
1176                         struct iwl_fw_dbg_trigger_low_rssi *rssi_trig;
1177                         bool trig_check;
1178                         s32 rssi;
1179
1180                         trig = iwl_fw_dbg_get_trigger(mvm->fw,
1181                                                       FW_DBG_TRIGGER_RSSI);
1182                         rssi_trig = (void *)trig->data;
1183                         rssi = le32_to_cpu(rssi_trig->rssi);
1184
1185                         trig_check =
1186                                 iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
1187                                                               ieee80211_vif_to_wdev(mvmsta->vif),
1188                                                               trig);
1189                         if (trig_check && rx_status->signal < rssi)
1190                                 iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
1191                                                         NULL);
1192                 }
1193
1194                 if (ieee80211_is_data(hdr->frame_control))
1195                         iwl_mvm_rx_csum(sta, skb, desc);
1196
1197                 if (iwl_mvm_is_dup(sta, queue, rx_status, hdr, desc)) {
1198                         kfree_skb(skb);
1199                         goto out;
1200                 }
1201
1202                 /*
1203                  * Our hardware de-aggregates AMSDUs but copies the mac header
1204                  * as it to the de-aggregated MPDUs. We need to turn off the
1205                  * AMSDU bit in the QoS control ourselves.
1206                  * In addition, HW reverses addr3 and addr4 - reverse it back.
1207                  */
1208                 if ((desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU) &&
1209                     !WARN_ON(!ieee80211_is_data_qos(hdr->frame_control))) {
1210                         u8 *qc = ieee80211_get_qos_ctl(hdr);
1211
1212                         *qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
1213
1214                         if (mvm->trans->cfg->device_family ==
1215                             IWL_DEVICE_FAMILY_9000) {
1216                                 iwl_mvm_flip_address(hdr->addr3);
1217
1218                                 if (ieee80211_has_a4(hdr->frame_control))
1219                                         iwl_mvm_flip_address(hdr->addr4);
1220                         }
1221                 }
1222                 if (baid != IWL_RX_REORDER_DATA_INVALID_BAID) {
1223                         u32 reorder_data = le32_to_cpu(desc->reorder_data);
1224
1225                         iwl_mvm_agg_rx_received(mvm, reorder_data, baid);
1226                 }
1227         }
1228
1229         switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
1230         case RATE_MCS_CHAN_WIDTH_20:
1231                 break;
1232         case RATE_MCS_CHAN_WIDTH_40:
1233                 rx_status->bw = RATE_INFO_BW_40;
1234                 break;
1235         case RATE_MCS_CHAN_WIDTH_80:
1236                 rx_status->bw = RATE_INFO_BW_80;
1237                 break;
1238         case RATE_MCS_CHAN_WIDTH_160:
1239                 rx_status->bw = RATE_INFO_BW_160;
1240                 break;
1241         }
1242
1243         if (he_type == RATE_MCS_HE_TYPE_EXT_SU &&
1244             rate_n_flags & RATE_MCS_HE_106T_MSK) {
1245                 rx_status->bw = RATE_INFO_BW_HE_RU;
1246                 rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106;
1247         }
1248
1249         if (rate_n_flags & RATE_MCS_HE_MSK &&
1250             phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD &&
1251             he_type == RATE_MCS_HE_TYPE_MU) {
1252                 /*
1253                  * Unfortunately, we have to leave the mac80211 data
1254                  * incorrect for the case that we receive an HE-MU
1255                  * transmission and *don't* have the he_mu pointer,
1256                  * i.e. we don't have the phy data (due to the bits
1257                  * being used for TSF). This shouldn't happen though
1258                  * as management frames where we need the TSF/timers
1259                  * are not be transmitted in HE-MU, I think.
1260                  */
1261                 u8 ru = FIELD_GET(IWL_RX_HE_PHY_RU_ALLOC_MASK, he_phy_data);
1262                 u8 offs = 0;
1263
1264                 rx_status->bw = RATE_INFO_BW_HE_RU;
1265
1266                 switch (ru) {
1267                 case 0 ... 36:
1268                         rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_26;
1269                         offs = ru;
1270                         break;
1271                 case 37 ... 52:
1272                         rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_52;
1273                         offs = ru - 37;
1274                         break;
1275                 case 53 ... 60:
1276                         rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106;
1277                         offs = ru - 53;
1278                         break;
1279                 case 61 ... 64:
1280                         rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_242;
1281                         offs = ru - 61;
1282                         break;
1283                 case 65 ... 66:
1284                         rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_484;
1285                         offs = ru - 65;
1286                         break;
1287                 case 67:
1288                         rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_996;
1289                         break;
1290                 case 68:
1291                         rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_2x996;
1292                         break;
1293                 }
1294                 he->data2 |=
1295                         le16_encode_bits(offs,
1296                                          IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET);
1297                 he->data2 |=
1298                         cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_KNOWN);
1299                 if (he_phy_data & IWL_RX_HE_PHY_RU_ALLOC_SEC80)
1300                         he->data2 |=
1301                                 cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_SEC);
1302         } else if (he) {
1303                 he->data1 |=
1304                         cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN);
1305         }
1306
1307         if (!(rate_n_flags & RATE_MCS_CCK_MSK) &&
1308             rate_n_flags & RATE_MCS_SGI_MSK)
1309                 rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
1310         if (rate_n_flags & RATE_HT_MCS_GF_MSK)
1311                 rx_status->enc_flags |= RX_ENC_FLAG_HT_GF;
1312         if (rate_n_flags & RATE_MCS_LDPC_MSK)
1313                 rx_status->enc_flags |= RX_ENC_FLAG_LDPC;
1314         if (rate_n_flags & RATE_MCS_HT_MSK) {
1315                 u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >>
1316                                 RATE_MCS_STBC_POS;
1317                 rx_status->encoding = RX_ENC_HT;
1318                 rx_status->rate_idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK;
1319                 rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
1320         } else if (rate_n_flags & RATE_MCS_VHT_MSK) {
1321                 u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >>
1322                                 RATE_MCS_STBC_POS;
1323                 rx_status->nss =
1324                         ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
1325                                                 RATE_VHT_MCS_NSS_POS) + 1;
1326                 rx_status->rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
1327                 rx_status->encoding = RX_ENC_VHT;
1328                 rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
1329                 if (rate_n_flags & RATE_MCS_BF_MSK)
1330                         rx_status->enc_flags |= RX_ENC_FLAG_BF;
1331         } else if (he) {
1332                 u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >>
1333                                 RATE_MCS_STBC_POS;
1334                 rx_status->nss =
1335                         ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
1336                                                 RATE_VHT_MCS_NSS_POS) + 1;
1337                 rx_status->rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
1338                 rx_status->encoding = RX_ENC_HE;
1339                 rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
1340                 if (rate_n_flags & RATE_MCS_BF_MSK)
1341                         rx_status->enc_flags |= RX_ENC_FLAG_BF;
1342
1343                 rx_status->he_dcm =
1344                         !!(rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK);
1345
1346 #define CHECK_TYPE(F)                                                   \
1347         BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA1_FORMAT_ ## F !=        \
1348                      (RATE_MCS_HE_TYPE_ ## F >> RATE_MCS_HE_TYPE_POS))
1349
1350                 CHECK_TYPE(SU);
1351                 CHECK_TYPE(EXT_SU);
1352                 CHECK_TYPE(MU);
1353                 CHECK_TYPE(TRIG);
1354
1355                 he->data1 |= cpu_to_le16(he_type >> RATE_MCS_HE_TYPE_POS);
1356
1357                 if (rate_n_flags & RATE_MCS_BF_POS)
1358                         he->data5 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA5_TXBF);
1359
1360                 switch ((rate_n_flags & RATE_MCS_HE_GI_LTF_MSK) >>
1361                         RATE_MCS_HE_GI_LTF_POS) {
1362                 case 0:
1363                         rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
1364                         break;
1365                 case 1:
1366                         rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
1367                         break;
1368                 case 2:
1369                         rx_status->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
1370                         break;
1371                 case 3:
1372                         if (rate_n_flags & RATE_MCS_SGI_MSK)
1373                                 rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
1374                         else
1375                                 rx_status->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
1376                         break;
1377                 }
1378
1379                 switch (he_type) {
1380                 case RATE_MCS_HE_TYPE_SU: {
1381                         u16 val;
1382
1383                         /* LTF syms correspond to streams */
1384                         he->data2 |=
1385                                 cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN);
1386                         switch (rx_status->nss) {
1387                         case 1:
1388                                 val = 0;
1389                                 break;
1390                         case 2:
1391                                 val = 1;
1392                                 break;
1393                         case 3:
1394                         case 4:
1395                                 val = 2;
1396                                 break;
1397                         case 5:
1398                         case 6:
1399                                 val = 3;
1400                                 break;
1401                         case 7:
1402                         case 8:
1403                                 val = 4;
1404                                 break;
1405                         default:
1406                                 WARN_ONCE(1, "invalid nss: %d\n",
1407                                           rx_status->nss);
1408                                 val = 0;
1409                         }
1410                         he->data5 |=
1411                                 le16_encode_bits(val,
1412                                                  IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS);
1413                         }
1414                         break;
1415                 case RATE_MCS_HE_TYPE_MU: {
1416                         u16 val;
1417                         u64 he_phy_data;
1418
1419                         if (mvm->trans->cfg->device_family >=
1420                             IWL_DEVICE_FAMILY_22560)
1421                                 he_phy_data = le64_to_cpu(desc->v3.he_phy_data);
1422                         else
1423                                 he_phy_data = le64_to_cpu(desc->v1.he_phy_data);
1424
1425                         if (he_phy_data == HE_PHY_DATA_INVAL)
1426                                 break;
1427
1428                         val = FIELD_GET(IWL_RX_HE_PHY_HE_LTF_NUM_MASK,
1429                                         he_phy_data);
1430
1431                         he->data2 |=
1432                                 cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN);
1433                         he->data5 |=
1434                                 cpu_to_le16(FIELD_PREP(
1435                                         IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS,
1436                                         val));
1437                         }
1438                         break;
1439                 case RATE_MCS_HE_TYPE_EXT_SU:
1440                 case RATE_MCS_HE_TYPE_TRIG:
1441                         /* not supported yet */
1442                         break;
1443                 }
1444         } else {
1445                 int rate = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
1446                                                                rx_status->band);
1447
1448                 if (WARN(rate < 0 || rate > 0xFF,
1449                          "Invalid rate flags 0x%x, band %d,\n",
1450                          rate_n_flags, rx_status->band)) {
1451                         kfree_skb(skb);
1452                         goto out;
1453                 }
1454                 rx_status->rate_idx = rate;
1455
1456         }
1457
1458         /* management stuff on default queue */
1459         if (!queue) {
1460                 if (unlikely((ieee80211_is_beacon(hdr->frame_control) ||
1461                               ieee80211_is_probe_resp(hdr->frame_control)) &&
1462                              mvm->sched_scan_pass_all ==
1463                              SCHED_SCAN_PASS_ALL_ENABLED))
1464                         mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_FOUND;
1465
1466                 if (unlikely(ieee80211_is_beacon(hdr->frame_control) ||
1467                              ieee80211_is_probe_resp(hdr->frame_control)))
1468                         rx_status->boottime_ns = ktime_get_boot_ns();
1469         }
1470
1471         if (iwl_mvm_create_skb(mvm, skb, hdr, len, crypt_len, rxb)) {
1472                 kfree_skb(skb);
1473                 goto out;
1474         }
1475
1476         if (!iwl_mvm_reorder(mvm, napi, queue, sta, skb, desc))
1477                 iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta);
1478 out:
1479         rcu_read_unlock();
1480 }
1481
1482 void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi,
1483                               struct iwl_rx_cmd_buffer *rxb, int queue)
1484 {
1485         struct iwl_rx_packet *pkt = rxb_addr(rxb);
1486         struct iwl_frame_release *release = (void *)pkt->data;
1487         struct ieee80211_sta *sta;
1488         struct iwl_mvm_reorder_buffer *reorder_buf;
1489         struct iwl_mvm_baid_data *ba_data;
1490
1491         int baid = release->baid;
1492
1493         IWL_DEBUG_HT(mvm, "Frame release notification for BAID %u, NSSN %d\n",
1494                      release->baid, le16_to_cpu(release->nssn));
1495
1496         if (WARN_ON_ONCE(baid == IWL_RX_REORDER_DATA_INVALID_BAID))
1497                 return;
1498
1499         rcu_read_lock();
1500
1501         ba_data = rcu_dereference(mvm->baid_map[baid]);
1502         if (WARN_ON_ONCE(!ba_data))
1503                 goto out;
1504
1505         sta = rcu_dereference(mvm->fw_id_to_mac_id[ba_data->sta_id]);
1506         if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
1507                 goto out;
1508
1509         reorder_buf = &ba_data->reorder_buf[queue];
1510
1511         spin_lock_bh(&reorder_buf->lock);
1512         iwl_mvm_release_frames(mvm, sta, napi, ba_data, reorder_buf,
1513                                le16_to_cpu(release->nssn));
1514         spin_unlock_bh(&reorder_buf->lock);
1515
1516 out:
1517         rcu_read_unlock();
1518 }