1 /******************************************************************************
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
11 * Copyright(c) 2018 Intel Corporation
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of version 2 of the GNU General Public License as
15 * published by the Free Software Foundation.
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
22 * The full GNU General Public License is included in this distribution
23 * in the file called COPYING.
25 * Contact Information:
26 * Intel Linux Wireless <ilw@linux.intel.com>
27 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
32 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
33 * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
34 * Copyright(c) 2018 Intel Corporation
35 * All rights reserved.
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
41 * * Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * * Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in
45 * the documentation and/or other materials provided with the
47 * * Neither the name Intel Corporation nor the names of its
48 * contributors may be used to endorse or promote products derived
49 * from this software without specific prior written permission.
51 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
52 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
53 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
54 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
55 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
56 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
57 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
58 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
59 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
60 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
61 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
62 *****************************************************************************/
63 #include <linux/etherdevice.h>
64 #include <linux/skbuff.h>
65 #include "iwl-trans.h"
69 static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb,
70 int queue, struct ieee80211_sta *sta)
72 struct iwl_mvm_sta *mvmsta;
73 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
74 struct ieee80211_rx_status *stats = IEEE80211_SKB_RXCB(skb);
75 struct iwl_mvm_key_pn *ptk_pn;
78 u8 pn[IEEE80211_CCMP_PN_LEN];
83 /* multicast and non-data only arrives on default queue */
84 if (!ieee80211_is_data(hdr->frame_control) ||
85 is_multicast_ether_addr(hdr->addr1))
88 /* do not check PN for open AP */
89 if (!(stats->flag & RX_FLAG_DECRYPTED))
93 * avoid checking for default queue - we don't want to replicate
94 * all the logic that's necessary for checking the PN on fragmented
95 * frames, leave that to mac80211
100 /* if we are here - this for sure is either CCMP or GCMP */
101 if (IS_ERR_OR_NULL(sta)) {
103 "expected hw-decrypted unicast frame for station\n");
107 mvmsta = iwl_mvm_sta_from_mac80211(sta);
109 extiv = (u8 *)hdr + ieee80211_hdrlen(hdr->frame_control);
110 keyidx = extiv[3] >> 6;
112 ptk_pn = rcu_dereference(mvmsta->ptk_pn[keyidx]);
116 if (ieee80211_is_data_qos(hdr->frame_control))
117 tid = ieee80211_get_tid(hdr);
121 /* we don't use HCCA/802.11 QoS TSPECs, so drop such frames */
122 if (tid >= IWL_MAX_TID_COUNT)
133 res = memcmp(pn, ptk_pn->q[queue].pn[tid], IEEE80211_CCMP_PN_LEN);
136 if (!res && !(stats->flag & RX_FLAG_ALLOW_SAME_PN))
139 memcpy(ptk_pn->q[queue].pn[tid], pn, IEEE80211_CCMP_PN_LEN);
140 stats->flag |= RX_FLAG_PN_VALIDATED;
145 /* iwl_mvm_create_skb Adds the rxb to a new skb */
146 static int iwl_mvm_create_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
147 struct ieee80211_hdr *hdr, u16 len, u8 crypt_len,
148 struct iwl_rx_cmd_buffer *rxb)
150 struct iwl_rx_packet *pkt = rxb_addr(rxb);
151 struct iwl_rx_mpdu_desc *desc = (void *)pkt->data;
152 unsigned int headlen, fraglen, pad_len = 0;
153 unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control);
155 if (desc->mac_flags2 & IWL_RX_MPDU_MFLG2_PAD) {
160 /* If frame is small enough to fit in skb->head, pull it completely.
161 * If not, only pull ieee80211_hdr (including crypto if present, and
162 * an additional 8 bytes for SNAP/ethertype, see below) so that
163 * splice() or TCP coalesce are more efficient.
165 * Since, in addition, ieee80211_data_to_8023() always pull in at
166 * least 8 bytes (possibly more for mesh) we can do the same here
167 * to save the cost of doing it later. That still doesn't pull in
168 * the actual IP header since the typical case has a SNAP header.
169 * If the latter changes (there are efforts in the standards group
170 * to do so) we should revisit this and ieee80211_data_to_8023().
172 headlen = (len <= skb_tailroom(skb)) ? len :
173 hdrlen + crypt_len + 8;
175 /* The firmware may align the packet to DWORD.
176 * The padding is inserted after the IV.
177 * After copying the header + IV skip the padding if
178 * present before copying packet data.
182 if (WARN_ONCE(headlen < hdrlen,
183 "invalid packet lengths (hdrlen=%d, len=%d, crypt_len=%d)\n",
184 hdrlen, len, crypt_len)) {
186 * We warn and trace because we want to be able to see
187 * it in trace-cmd as well.
190 "invalid packet lengths (hdrlen=%d, len=%d, crypt_len=%d)\n",
191 hdrlen, len, crypt_len);
195 skb_put_data(skb, hdr, hdrlen);
196 skb_put_data(skb, (u8 *)hdr + hdrlen + pad_len, headlen - hdrlen);
198 fraglen = len - headlen;
201 int offset = (void *)hdr + headlen + pad_len -
202 rxb_addr(rxb) + rxb_offset(rxb);
204 skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset,
205 fraglen, rxb->truesize);
211 /* iwl_mvm_pass_packet_to_mac80211 - passes the packet for mac80211 */
212 static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
213 struct napi_struct *napi,
214 struct sk_buff *skb, int queue,
215 struct ieee80211_sta *sta)
217 struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
219 if (iwl_mvm_check_pn(mvm, skb, queue, sta)) {
222 unsigned int radiotap_len = 0;
224 if (rx_status->flag & RX_FLAG_RADIOTAP_HE)
225 radiotap_len += sizeof(struct ieee80211_radiotap_he);
226 if (rx_status->flag & RX_FLAG_RADIOTAP_HE_MU)
227 radiotap_len += sizeof(struct ieee80211_radiotap_he_mu);
228 __skb_push(skb, radiotap_len);
229 ieee80211_rx_napi(mvm->hw, sta, skb, napi);
233 static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
234 struct ieee80211_rx_status *rx_status,
235 u32 rate_n_flags, int energy_a,
239 u32 rate_flags = rate_n_flags;
241 energy_a = energy_a ? -energy_a : S8_MIN;
242 energy_b = energy_b ? -energy_b : S8_MIN;
243 max_energy = max(energy_a, energy_b);
245 IWL_DEBUG_STATS(mvm, "energy In A %d B %d, and max %d\n",
246 energy_a, energy_b, max_energy);
248 rx_status->signal = max_energy;
250 (rate_flags & RATE_MCS_ANT_AB_MSK) >> RATE_MCS_ANT_POS;
251 rx_status->chain_signal[0] = energy_a;
252 rx_status->chain_signal[1] = energy_b;
253 rx_status->chain_signal[2] = S8_MIN;
256 static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
257 struct ieee80211_rx_status *stats, u16 phy_info,
258 struct iwl_rx_mpdu_desc *desc,
259 u32 pkt_flags, int queue, u8 *crypt_len)
261 u16 status = le16_to_cpu(desc->status);
264 * Drop UNKNOWN frames in aggregation, unless in monitor mode
265 * (where we don't have the keys).
266 * We limit this to aggregation because in TKIP this is a valid
267 * scenario, since we may not have the (correct) TTAK (phase 1
268 * key) in the firmware.
270 if (phy_info & IWL_RX_MPDU_PHY_AMPDU &&
271 (status & IWL_RX_MPDU_STATUS_SEC_MASK) ==
272 IWL_RX_MPDU_STATUS_SEC_UNKNOWN && !mvm->monitor_on)
275 if (!ieee80211_has_protected(hdr->frame_control) ||
276 (status & IWL_RX_MPDU_STATUS_SEC_MASK) ==
277 IWL_RX_MPDU_STATUS_SEC_NONE)
280 /* TODO: handle packets encrypted with unknown alg */
282 switch (status & IWL_RX_MPDU_STATUS_SEC_MASK) {
283 case IWL_RX_MPDU_STATUS_SEC_CCM:
284 case IWL_RX_MPDU_STATUS_SEC_GCM:
285 BUILD_BUG_ON(IEEE80211_CCMP_PN_LEN != IEEE80211_GCMP_PN_LEN);
286 /* alg is CCM: check MIC only */
287 if (!(status & IWL_RX_MPDU_STATUS_MIC_OK))
290 stats->flag |= RX_FLAG_DECRYPTED;
291 if (pkt_flags & FH_RSCSR_RADA_EN)
292 stats->flag |= RX_FLAG_MIC_STRIPPED;
293 *crypt_len = IEEE80211_CCMP_HDR_LEN;
295 case IWL_RX_MPDU_STATUS_SEC_TKIP:
296 /* Don't drop the frame and decrypt it in SW */
297 if (!fw_has_api(&mvm->fw->ucode_capa,
298 IWL_UCODE_TLV_API_DEPRECATE_TTAK) &&
299 !(status & IWL_RX_MPDU_RES_STATUS_TTAK_OK))
302 *crypt_len = IEEE80211_TKIP_IV_LEN;
303 /* fall through if TTAK OK */
304 case IWL_RX_MPDU_STATUS_SEC_WEP:
305 if (!(status & IWL_RX_MPDU_STATUS_ICV_OK))
308 stats->flag |= RX_FLAG_DECRYPTED;
309 if ((status & IWL_RX_MPDU_STATUS_SEC_MASK) ==
310 IWL_RX_MPDU_STATUS_SEC_WEP)
311 *crypt_len = IEEE80211_WEP_IV_LEN;
313 if (pkt_flags & FH_RSCSR_RADA_EN)
314 stats->flag |= RX_FLAG_ICV_STRIPPED;
317 case IWL_RX_MPDU_STATUS_SEC_EXT_ENC:
318 if (!(status & IWL_RX_MPDU_STATUS_MIC_OK))
320 stats->flag |= RX_FLAG_DECRYPTED;
323 /* Expected in monitor (not having the keys) */
324 if (!mvm->monitor_on)
325 IWL_ERR(mvm, "Unhandled alg: 0x%x\n", status);
331 static void iwl_mvm_rx_csum(struct ieee80211_sta *sta,
333 struct iwl_rx_mpdu_desc *desc)
335 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
336 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
337 u16 flags = le16_to_cpu(desc->l3l4_flags);
338 u8 l3_prot = (u8)((flags & IWL_RX_L3L4_L3_PROTO_MASK) >>
339 IWL_RX_L3_PROTO_POS);
341 if (mvmvif->features & NETIF_F_RXCSUM &&
342 flags & IWL_RX_L3L4_TCP_UDP_CSUM_OK &&
343 (flags & IWL_RX_L3L4_IP_HDR_CSUM_OK ||
344 l3_prot == IWL_RX_L3_TYPE_IPV6 ||
345 l3_prot == IWL_RX_L3_TYPE_IPV6_FRAG))
346 skb->ip_summed = CHECKSUM_UNNECESSARY;
350 * returns true if a packet is a duplicate and should be dropped.
351 * Updates AMSDU PN tracking info
353 static bool iwl_mvm_is_dup(struct ieee80211_sta *sta, int queue,
354 struct ieee80211_rx_status *rx_status,
355 struct ieee80211_hdr *hdr,
356 struct iwl_rx_mpdu_desc *desc)
358 struct iwl_mvm_sta *mvm_sta;
359 struct iwl_mvm_rxq_dup_data *dup_data;
360 u8 tid, sub_frame_idx;
362 if (WARN_ON(IS_ERR_OR_NULL(sta)))
365 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
366 dup_data = &mvm_sta->dup_data[queue];
369 * Drop duplicate 802.11 retransmissions
370 * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery")
372 if (ieee80211_is_ctl(hdr->frame_control) ||
373 ieee80211_is_qos_nullfunc(hdr->frame_control) ||
374 is_multicast_ether_addr(hdr->addr1)) {
375 rx_status->flag |= RX_FLAG_DUP_VALIDATED;
379 if (ieee80211_is_data_qos(hdr->frame_control))
380 /* frame has qos control */
381 tid = ieee80211_get_tid(hdr);
383 tid = IWL_MAX_TID_COUNT;
385 /* If this wasn't a part of an A-MSDU the sub-frame index will be 0 */
386 sub_frame_idx = desc->amsdu_info &
387 IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
389 if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
390 dup_data->last_seq[tid] == hdr->seq_ctrl &&
391 dup_data->last_sub_frame[tid] >= sub_frame_idx))
394 /* Allow same PN as the first subframe for following sub frames */
395 if (dup_data->last_seq[tid] == hdr->seq_ctrl &&
396 sub_frame_idx > dup_data->last_sub_frame[tid] &&
397 desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU)
398 rx_status->flag |= RX_FLAG_ALLOW_SAME_PN;
400 dup_data->last_seq[tid] = hdr->seq_ctrl;
401 dup_data->last_sub_frame[tid] = sub_frame_idx;
403 rx_status->flag |= RX_FLAG_DUP_VALIDATED;
408 int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask,
409 const u8 *data, u32 count)
411 struct iwl_rxq_sync_cmd *cmd;
412 u32 data_size = sizeof(*cmd) + count;
415 /* should be DWORD aligned */
416 if (WARN_ON(count & 3 || count > IWL_MULTI_QUEUE_SYNC_MSG_MAX_SIZE))
419 cmd = kzalloc(data_size, GFP_KERNEL);
423 cmd->rxq_mask = cpu_to_le32(rxq_mask);
424 cmd->count = cpu_to_le32(count);
426 memcpy(cmd->payload, data, count);
428 ret = iwl_mvm_send_cmd_pdu(mvm,
429 WIDE_ID(DATA_PATH_GROUP,
430 TRIGGER_RX_QUEUES_NOTIF_CMD),
438 * Returns true if sn2 - buffer_size < sn1 < sn2.
439 * To be used only in order to compare reorder buffer head with NSSN.
440 * We fully trust NSSN unless it is behind us due to reorder timeout.
441 * Reorder timeout can only bring us up to buffer_size SNs ahead of NSSN.
443 static bool iwl_mvm_is_sn_less(u16 sn1, u16 sn2, u16 buffer_size)
445 return ieee80211_sn_less(sn1, sn2) &&
446 !ieee80211_sn_less(sn1, sn2 - buffer_size);
449 #define RX_REORDER_BUF_TIMEOUT_MQ (HZ / 10)
451 static void iwl_mvm_release_frames(struct iwl_mvm *mvm,
452 struct ieee80211_sta *sta,
453 struct napi_struct *napi,
454 struct iwl_mvm_baid_data *baid_data,
455 struct iwl_mvm_reorder_buffer *reorder_buf,
458 struct iwl_mvm_reorder_buf_entry *entries =
459 &baid_data->entries[reorder_buf->queue *
460 baid_data->entries_per_queue];
461 u16 ssn = reorder_buf->head_sn;
463 lockdep_assert_held(&reorder_buf->lock);
465 /* ignore nssn smaller than head sn - this can happen due to timeout */
466 if (iwl_mvm_is_sn_less(nssn, ssn, reorder_buf->buf_size))
469 while (iwl_mvm_is_sn_less(ssn, nssn, reorder_buf->buf_size)) {
470 int index = ssn % reorder_buf->buf_size;
471 struct sk_buff_head *skb_list = &entries[index].e.frames;
474 ssn = ieee80211_sn_inc(ssn);
477 * Empty the list. Will have more than one frame for A-MSDU.
478 * Empty list is valid as well since nssn indicates frames were
481 while ((skb = __skb_dequeue(skb_list))) {
482 iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb,
485 reorder_buf->num_stored--;
488 reorder_buf->head_sn = nssn;
491 if (reorder_buf->num_stored && !reorder_buf->removed) {
492 u16 index = reorder_buf->head_sn % reorder_buf->buf_size;
494 while (skb_queue_empty(&entries[index].e.frames))
495 index = (index + 1) % reorder_buf->buf_size;
496 /* modify timer to match next frame's expiration time */
497 mod_timer(&reorder_buf->reorder_timer,
498 entries[index].e.reorder_time + 1 +
499 RX_REORDER_BUF_TIMEOUT_MQ);
501 del_timer(&reorder_buf->reorder_timer);
505 void iwl_mvm_reorder_timer_expired(struct timer_list *t)
507 struct iwl_mvm_reorder_buffer *buf = from_timer(buf, t, reorder_timer);
508 struct iwl_mvm_baid_data *baid_data =
509 iwl_mvm_baid_data_from_reorder_buf(buf);
510 struct iwl_mvm_reorder_buf_entry *entries =
511 &baid_data->entries[buf->queue * baid_data->entries_per_queue];
513 u16 sn = 0, index = 0;
514 bool expired = false;
517 spin_lock(&buf->lock);
519 if (!buf->num_stored || buf->removed) {
520 spin_unlock(&buf->lock);
524 for (i = 0; i < buf->buf_size ; i++) {
525 index = (buf->head_sn + i) % buf->buf_size;
527 if (skb_queue_empty(&entries[index].e.frames)) {
529 * If there is a hole and the next frame didn't expire
530 * we want to break and not advance SN
536 !time_after(jiffies, entries[index].e.reorder_time +
537 RX_REORDER_BUF_TIMEOUT_MQ))
541 /* continue until next hole after this expired frames */
543 sn = ieee80211_sn_add(buf->head_sn, i + 1);
547 struct ieee80211_sta *sta;
548 struct iwl_mvm_sta *mvmsta;
549 u8 sta_id = baid_data->sta_id;
552 sta = rcu_dereference(buf->mvm->fw_id_to_mac_id[sta_id]);
553 mvmsta = iwl_mvm_sta_from_mac80211(sta);
555 /* SN is set to the last expired frame + 1 */
556 IWL_DEBUG_HT(buf->mvm,
557 "Releasing expired frames for sta %u, sn %d\n",
559 iwl_mvm_event_frame_timeout_callback(buf->mvm, mvmsta->vif,
560 sta, baid_data->tid);
561 iwl_mvm_release_frames(buf->mvm, sta, NULL, baid_data, buf, sn);
565 * If no frame expired and there are stored frames, index is now
566 * pointing to the first unexpired frame - modify timer
567 * accordingly to this frame.
569 mod_timer(&buf->reorder_timer,
570 entries[index].e.reorder_time +
571 1 + RX_REORDER_BUF_TIMEOUT_MQ);
573 spin_unlock(&buf->lock);
576 static void iwl_mvm_del_ba(struct iwl_mvm *mvm, int queue,
577 struct iwl_mvm_delba_data *data)
579 struct iwl_mvm_baid_data *ba_data;
580 struct ieee80211_sta *sta;
581 struct iwl_mvm_reorder_buffer *reorder_buf;
582 u8 baid = data->baid;
584 if (WARN_ONCE(baid >= IWL_MAX_BAID, "invalid BAID: %x\n", baid))
589 ba_data = rcu_dereference(mvm->baid_map[baid]);
590 if (WARN_ON_ONCE(!ba_data))
593 sta = rcu_dereference(mvm->fw_id_to_mac_id[ba_data->sta_id]);
594 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
597 reorder_buf = &ba_data->reorder_buf[queue];
599 /* release all frames that are in the reorder buffer to the stack */
600 spin_lock_bh(&reorder_buf->lock);
601 iwl_mvm_release_frames(mvm, sta, NULL, ba_data, reorder_buf,
602 ieee80211_sn_add(reorder_buf->head_sn,
603 reorder_buf->buf_size));
604 spin_unlock_bh(&reorder_buf->lock);
605 del_timer_sync(&reorder_buf->reorder_timer);
611 void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
614 struct iwl_rx_packet *pkt = rxb_addr(rxb);
615 struct iwl_rxq_sync_notification *notif;
616 struct iwl_mvm_internal_rxq_notif *internal_notif;
618 notif = (void *)pkt->data;
619 internal_notif = (void *)notif->payload;
621 if (internal_notif->sync &&
622 mvm->queue_sync_cookie != internal_notif->cookie) {
623 WARN_ONCE(1, "Received expired RX queue sync message\n");
627 switch (internal_notif->type) {
628 case IWL_MVM_RXQ_EMPTY:
630 case IWL_MVM_RXQ_NOTIF_DEL_BA:
631 iwl_mvm_del_ba(mvm, queue, (void *)internal_notif->data);
634 WARN_ONCE(1, "Invalid identifier %d", internal_notif->type);
637 if (internal_notif->sync &&
638 !atomic_dec_return(&mvm->queue_sync_counter))
639 wake_up(&mvm->rx_sync_waitq);
643 * Returns true if the MPDU was buffered\dropped, false if it should be passed
646 static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
647 struct napi_struct *napi,
649 struct ieee80211_sta *sta,
651 struct iwl_rx_mpdu_desc *desc)
653 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
654 struct iwl_mvm_sta *mvm_sta;
655 struct iwl_mvm_baid_data *baid_data;
656 struct iwl_mvm_reorder_buffer *buffer;
657 struct sk_buff *tail;
658 u32 reorder = le32_to_cpu(desc->reorder_data);
659 bool amsdu = desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU;
661 desc->amsdu_info & IWL_RX_MPDU_AMSDU_LAST_SUBFRAME;
662 u8 tid = ieee80211_get_tid(hdr);
663 u8 sub_frame_idx = desc->amsdu_info &
664 IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
665 struct iwl_mvm_reorder_buf_entry *entries;
670 baid = (reorder & IWL_RX_MPDU_REORDER_BAID_MASK) >>
671 IWL_RX_MPDU_REORDER_BAID_SHIFT;
674 * This also covers the case of receiving a Block Ack Request
675 * outside a BA session; we'll pass it to mac80211 and that
676 * then sends a delBA action frame.
678 if (baid == IWL_RX_REORDER_DATA_INVALID_BAID)
682 if (WARN_ONCE(IS_ERR_OR_NULL(sta),
683 "Got valid BAID without a valid station assigned\n"))
686 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
688 /* not a data packet or a bar */
689 if (!ieee80211_is_back_req(hdr->frame_control) &&
690 (!ieee80211_is_data_qos(hdr->frame_control) ||
691 is_multicast_ether_addr(hdr->addr1)))
694 if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
697 baid_data = rcu_dereference(mvm->baid_map[baid]);
700 "Got valid BAID but no baid allocated, bypass the re-ordering buffer. Baid %d reorder 0x%x\n",
705 if (WARN(tid != baid_data->tid || mvm_sta->sta_id != baid_data->sta_id,
706 "baid 0x%x is mapped to sta:%d tid:%d, but was received for sta:%d tid:%d\n",
707 baid, baid_data->sta_id, baid_data->tid, mvm_sta->sta_id,
711 nssn = reorder & IWL_RX_MPDU_REORDER_NSSN_MASK;
712 sn = (reorder & IWL_RX_MPDU_REORDER_SN_MASK) >>
713 IWL_RX_MPDU_REORDER_SN_SHIFT;
715 buffer = &baid_data->reorder_buf[queue];
716 entries = &baid_data->entries[queue * baid_data->entries_per_queue];
718 spin_lock_bh(&buffer->lock);
720 if (!buffer->valid) {
721 if (reorder & IWL_RX_MPDU_REORDER_BA_OLD_SN) {
722 spin_unlock_bh(&buffer->lock);
725 buffer->valid = true;
728 if (ieee80211_is_back_req(hdr->frame_control)) {
729 iwl_mvm_release_frames(mvm, sta, napi, baid_data, buffer, nssn);
734 * If there was a significant jump in the nssn - adjust.
735 * If the SN is smaller than the NSSN it might need to first go into
736 * the reorder buffer, in which case we just release up to it and the
737 * rest of the function will take care of storing it and releasing up to
740 if (!iwl_mvm_is_sn_less(nssn, buffer->head_sn + buffer->buf_size,
742 !ieee80211_sn_less(sn, buffer->head_sn + buffer->buf_size)) {
743 u16 min_sn = ieee80211_sn_less(sn, nssn) ? sn : nssn;
745 iwl_mvm_release_frames(mvm, sta, napi, baid_data, buffer,
749 /* drop any oudated packets */
750 if (ieee80211_sn_less(sn, buffer->head_sn))
753 /* release immediately if allowed by nssn and no stored frames */
754 if (!buffer->num_stored && ieee80211_sn_less(sn, nssn)) {
755 if (iwl_mvm_is_sn_less(buffer->head_sn, nssn,
757 (!amsdu || last_subframe))
758 buffer->head_sn = nssn;
759 /* No need to update AMSDU last SN - we are moving the head */
760 spin_unlock_bh(&buffer->lock);
765 * release immediately if there are no stored frames, and the sn is
767 * This can happen due to reorder timer, where NSSN is behind head_sn.
768 * When we released everything, and we got the next frame in the
769 * sequence, according to the NSSN we can't release immediately,
770 * while technically there is no hole and we can move forward.
772 if (!buffer->num_stored && sn == buffer->head_sn) {
773 if (!amsdu || last_subframe)
774 buffer->head_sn = ieee80211_sn_inc(buffer->head_sn);
775 /* No need to update AMSDU last SN - we are moving the head */
776 spin_unlock_bh(&buffer->lock);
780 index = sn % buffer->buf_size;
783 * Check if we already stored this frame
784 * As AMSDU is either received or not as whole, logic is simple:
785 * If we have frames in that position in the buffer and the last frame
786 * originated from AMSDU had a different SN then it is a retransmission.
787 * If it is the same SN then if the subframe index is incrementing it
788 * is the same AMSDU - otherwise it is a retransmission.
790 tail = skb_peek_tail(&entries[index].e.frames);
793 else if (tail && (sn != buffer->last_amsdu ||
794 buffer->last_sub_index >= sub_frame_idx))
797 /* put in reorder buffer */
798 __skb_queue_tail(&entries[index].e.frames, skb);
799 buffer->num_stored++;
800 entries[index].e.reorder_time = jiffies;
803 buffer->last_amsdu = sn;
804 buffer->last_sub_index = sub_frame_idx;
808 * We cannot trust NSSN for AMSDU sub-frames that are not the last.
809 * The reason is that NSSN advances on the first sub-frame, and may
810 * cause the reorder buffer to advance before all the sub-frames arrive.
811 * Example: reorder buffer contains SN 0 & 2, and we receive AMSDU with
812 * SN 1. NSSN for first sub frame will be 3 with the result of driver
813 * releasing SN 0,1, 2. When sub-frame 1 arrives - reorder buffer is
814 * already ahead and it will be dropped.
815 * If the last sub-frame is not on this queue - we will get frame
816 * release notification with up to date NSSN.
818 if (!amsdu || last_subframe)
819 iwl_mvm_release_frames(mvm, sta, napi, baid_data, buffer, nssn);
821 spin_unlock_bh(&buffer->lock);
826 spin_unlock_bh(&buffer->lock);
830 static void iwl_mvm_agg_rx_received(struct iwl_mvm *mvm,
831 u32 reorder_data, u8 baid)
833 unsigned long now = jiffies;
834 unsigned long timeout;
835 struct iwl_mvm_baid_data *data;
839 data = rcu_dereference(mvm->baid_map[baid]);
842 "Got valid BAID but no baid allocated, bypass the re-ordering buffer. Baid %d reorder 0x%x\n",
850 timeout = data->timeout;
852 * Do not update last rx all the time to avoid cache bouncing
853 * between the rx queues.
854 * Update it every timeout. Worst case is the session will
855 * expire after ~ 2 * timeout, which doesn't matter that much.
857 if (time_before(data->last_rx + TU_TO_JIFFIES(timeout), now))
858 /* Update is atomic */
865 static void iwl_mvm_flip_address(u8 *addr)
868 u8 mac_addr[ETH_ALEN];
870 for (i = 0; i < ETH_ALEN; i++)
871 mac_addr[i] = addr[ETH_ALEN - i - 1];
872 ether_addr_copy(addr, mac_addr);
875 void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
876 struct iwl_rx_cmd_buffer *rxb, int queue)
878 struct ieee80211_rx_status *rx_status;
879 struct iwl_rx_packet *pkt = rxb_addr(rxb);
880 struct iwl_rx_mpdu_desc *desc = (void *)pkt->data;
881 struct ieee80211_hdr *hdr;
882 u32 len = le16_to_cpu(desc->mpdu_len);
883 u32 rate_n_flags, gp2_on_air_rise;
884 u16 phy_info = le16_to_cpu(desc->phy_info);
885 struct ieee80211_sta *sta = NULL;
887 u8 crypt_len = 0, channel, energy_a, energy_b;
888 struct ieee80211_radiotap_he *he = NULL;
889 struct ieee80211_radiotap_he_mu *he_mu = NULL;
890 u32 he_type = 0xffffffff;
891 /* this is invalid e.g. because puncture type doesn't allow 0b11 */
892 #define HE_PHY_DATA_INVAL ((u64)-1)
893 u64 he_phy_data = HE_PHY_DATA_INVAL;
896 if (unlikely(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)))
899 if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
900 rate_n_flags = le32_to_cpu(desc->v3.rate_n_flags);
901 channel = desc->v3.channel;
902 gp2_on_air_rise = le32_to_cpu(desc->v3.gp2_on_air_rise);
903 energy_a = desc->v3.energy_a;
904 energy_b = desc->v3.energy_b;
905 desc_size = sizeof(*desc);
907 rate_n_flags = le32_to_cpu(desc->v1.rate_n_flags);
908 channel = desc->v1.channel;
909 gp2_on_air_rise = le32_to_cpu(desc->v1.gp2_on_air_rise);
910 energy_a = desc->v1.energy_a;
911 energy_b = desc->v1.energy_b;
912 desc_size = IWL_RX_DESC_SIZE_V1;
915 hdr = (void *)(pkt->data + desc_size);
916 /* Dont use dev_alloc_skb(), we'll have enough headroom once
917 * ieee80211_hdr pulled.
919 skb = alloc_skb(128, GFP_ATOMIC);
921 IWL_ERR(mvm, "alloc_skb failed\n");
925 if (desc->mac_flags2 & IWL_RX_MPDU_MFLG2_PAD) {
927 * If the device inserted padding it means that (it thought)
928 * the 802.11 header wasn't a multiple of 4 bytes long. In
929 * this case, reserve two bytes at the start of the SKB to
930 * align the payload properly in case we end up copying it.
935 rx_status = IEEE80211_SKB_RXCB(skb);
937 if (rate_n_flags & RATE_MCS_HE_MSK) {
938 static const struct ieee80211_radiotap_he known = {
939 .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
940 IEEE80211_RADIOTAP_HE_DATA1_DATA_DCM_KNOWN |
941 IEEE80211_RADIOTAP_HE_DATA1_STBC_KNOWN |
942 IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN),
943 .data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN |
944 IEEE80211_RADIOTAP_HE_DATA2_TXBF_KNOWN),
946 static const struct ieee80211_radiotap_he_mu mu_known = {
947 .flags1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS_KNOWN |
948 IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM_KNOWN |
949 IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_SYMS_USERS_KNOWN |
950 IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_COMP_KNOWN),
951 .flags2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW_KNOWN),
953 unsigned int radiotap_len = 0;
955 he = skb_put_data(skb, &known, sizeof(known));
956 radiotap_len += sizeof(known);
957 rx_status->flag |= RX_FLAG_RADIOTAP_HE;
959 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK;
961 if (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) {
962 if (mvm->trans->cfg->device_family >=
963 IWL_DEVICE_FAMILY_22560)
964 he_phy_data = le64_to_cpu(desc->v3.he_phy_data);
966 he_phy_data = le64_to_cpu(desc->v1.he_phy_data);
968 if (he_type == RATE_MCS_HE_TYPE_MU) {
969 he_mu = skb_put_data(skb, &mu_known,
971 radiotap_len += sizeof(mu_known);
972 rx_status->flag |= RX_FLAG_RADIOTAP_HE_MU;
976 /* temporarily hide the radiotap data */
977 __skb_pull(skb, radiotap_len);
980 rx_status = IEEE80211_SKB_RXCB(skb);
982 if (iwl_mvm_rx_crypto(mvm, hdr, rx_status, phy_info, desc,
983 le32_to_cpu(pkt->len_n_flags), queue,
990 * Keep packets with CRC errors (and with overrun) for monitor mode
991 * (otherwise the firmware discards them) but mark them as bad.
993 if (!(desc->status & cpu_to_le16(IWL_RX_MPDU_STATUS_CRC_OK)) ||
994 !(desc->status & cpu_to_le16(IWL_RX_MPDU_STATUS_OVERRUN_OK))) {
995 IWL_DEBUG_RX(mvm, "Bad CRC or FIFO: 0x%08X.\n",
996 le16_to_cpu(desc->status));
997 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
999 /* set the preamble flag if appropriate */
1000 if (phy_info & IWL_RX_MPDU_PHY_SHORT_PREAMBLE)
1001 rx_status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
1003 if (likely(!(phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD))) {
1004 u64 tsf_on_air_rise;
1006 if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
1007 tsf_on_air_rise = le64_to_cpu(desc->v3.tsf_on_air_rise);
1009 tsf_on_air_rise = le64_to_cpu(desc->v1.tsf_on_air_rise);
1011 rx_status->mactime = tsf_on_air_rise;
1012 /* TSF as indicated by the firmware is at INA time */
1013 rx_status->flag |= RX_FLAG_MACTIME_PLCP_START;
1014 } else if (he_type == RATE_MCS_HE_TYPE_SU) {
1017 if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
1018 he_phy_data = le64_to_cpu(desc->v3.he_phy_data);
1020 he_phy_data = le64_to_cpu(desc->v1.he_phy_data);
1023 cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN);
1024 if (FIELD_GET(IWL_RX_HE_PHY_UPLINK,
1027 cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA3_UL_DL);
1029 if (!queue && !(phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
1030 rx_status->ampdu_reference = mvm->ampdu_ref;
1033 rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
1034 rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN;
1035 if (FIELD_GET(IWL_RX_HE_PHY_DELIM_EOF,
1037 rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT;
1039 } else if (he_mu && he_phy_data != HE_PHY_DATA_INVAL) {
1041 le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_SIBG_SYM_OR_USER_NUM_MASK,
1043 IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_SYMS_USERS);
1045 le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_SIGB_DCM,
1047 IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM);
1049 le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_SIGB_MCS_MASK,
1051 IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS);
1053 le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_SIGB_COMPRESSION,
1055 IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_COMP);
1057 le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_PREAMBLE_PUNC_TYPE_MASK,
1059 IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW);
1061 rx_status->device_timestamp = gp2_on_air_rise;
1062 rx_status->band = channel > 14 ? NL80211_BAND_5GHZ :
1064 rx_status->freq = ieee80211_channel_to_frequency(channel,
1066 iwl_mvm_get_signal_strength(mvm, rx_status, rate_n_flags, energy_a,
1069 /* update aggregation data for monitor sake on default queue */
1070 if (!queue && (phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
1071 bool toggle_bit = phy_info & IWL_RX_MPDU_PHY_AMPDU_TOGGLE;
1074 if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
1075 he_phy_data = le64_to_cpu(desc->v3.he_phy_data);
1077 he_phy_data = le64_to_cpu(desc->v1.he_phy_data);
1079 rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
1080 /* toggle is switched whenever new aggregation starts */
1081 if (toggle_bit != mvm->ampdu_toggle) {
1083 mvm->ampdu_toggle = toggle_bit;
1085 if (he_phy_data != HE_PHY_DATA_INVAL &&
1086 he_type == RATE_MCS_HE_TYPE_MU) {
1087 rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN;
1088 if (FIELD_GET(IWL_RX_HE_PHY_DELIM_EOF,
1091 RX_FLAG_AMPDU_EOF_BIT;
1094 rx_status->ampdu_reference = mvm->ampdu_ref;
1099 if (desc->status & cpu_to_le16(IWL_RX_MPDU_STATUS_SRC_STA_FOUND)) {
1100 u8 id = desc->sta_id_flags & IWL_RX_MPDU_SIF_STA_ID_MASK;
1102 if (!WARN_ON_ONCE(id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))) {
1103 sta = rcu_dereference(mvm->fw_id_to_mac_id[id]);
1107 } else if (!is_multicast_ether_addr(hdr->addr2)) {
1109 * This is fine since we prevent two stations with the same
1110 * address from being added.
1112 sta = ieee80211_find_sta_by_ifaddr(mvm->hw, hdr->addr2, NULL);
1116 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1117 struct ieee80211_vif *tx_blocked_vif =
1118 rcu_dereference(mvm->csa_tx_blocked_vif);
1119 u8 baid = (u8)((le32_to_cpu(desc->reorder_data) &
1120 IWL_RX_MPDU_REORDER_BAID_MASK) >>
1121 IWL_RX_MPDU_REORDER_BAID_SHIFT);
1123 if (!mvm->tcm.paused && len >= sizeof(*hdr) &&
1124 !is_multicast_ether_addr(hdr->addr1) &&
1125 ieee80211_is_data(hdr->frame_control) &&
1126 time_after(jiffies, mvm->tcm.ts + MVM_TCM_PERIOD))
1127 schedule_delayed_work(&mvm->tcm.work, 0);
1130 * We have tx blocked stations (with CS bit). If we heard
1131 * frames from a blocked station on a new channel we can
1134 if (unlikely(tx_blocked_vif) &&
1135 tx_blocked_vif == mvmsta->vif) {
1136 struct iwl_mvm_vif *mvmvif =
1137 iwl_mvm_vif_from_mac80211(tx_blocked_vif);
1139 if (mvmvif->csa_target_freq == rx_status->freq)
1140 iwl_mvm_sta_modify_disable_tx_ap(mvm, sta,
1144 rs_update_last_rssi(mvm, mvmsta, rx_status);
1146 if (iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_RSSI) &&
1147 ieee80211_is_beacon(hdr->frame_control)) {
1148 struct iwl_fw_dbg_trigger_tlv *trig;
1149 struct iwl_fw_dbg_trigger_low_rssi *rssi_trig;
1153 trig = iwl_fw_dbg_get_trigger(mvm->fw,
1154 FW_DBG_TRIGGER_RSSI);
1155 rssi_trig = (void *)trig->data;
1156 rssi = le32_to_cpu(rssi_trig->rssi);
1159 iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
1160 ieee80211_vif_to_wdev(mvmsta->vif),
1162 if (trig_check && rx_status->signal < rssi)
1163 iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
1167 if (ieee80211_is_data(hdr->frame_control))
1168 iwl_mvm_rx_csum(sta, skb, desc);
1170 if (iwl_mvm_is_dup(sta, queue, rx_status, hdr, desc)) {
1176 * Our hardware de-aggregates AMSDUs but copies the mac header
1177 * as it to the de-aggregated MPDUs. We need to turn off the
1178 * AMSDU bit in the QoS control ourselves.
1179 * In addition, HW reverses addr3 and addr4 - reverse it back.
1181 if ((desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU) &&
1182 !WARN_ON(!ieee80211_is_data_qos(hdr->frame_control))) {
1183 u8 *qc = ieee80211_get_qos_ctl(hdr);
1185 *qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
1187 if (mvm->trans->cfg->device_family ==
1188 IWL_DEVICE_FAMILY_9000) {
1189 iwl_mvm_flip_address(hdr->addr3);
1191 if (ieee80211_has_a4(hdr->frame_control))
1192 iwl_mvm_flip_address(hdr->addr4);
1195 if (baid != IWL_RX_REORDER_DATA_INVALID_BAID) {
1196 u32 reorder_data = le32_to_cpu(desc->reorder_data);
1198 iwl_mvm_agg_rx_received(mvm, reorder_data, baid);
1202 switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
1203 case RATE_MCS_CHAN_WIDTH_20:
1205 case RATE_MCS_CHAN_WIDTH_40:
1206 rx_status->bw = RATE_INFO_BW_40;
1208 case RATE_MCS_CHAN_WIDTH_80:
1209 rx_status->bw = RATE_INFO_BW_80;
1211 case RATE_MCS_CHAN_WIDTH_160:
1212 rx_status->bw = RATE_INFO_BW_160;
1216 if (he_type == RATE_MCS_HE_TYPE_EXT_SU &&
1217 rate_n_flags & RATE_MCS_HE_106T_MSK) {
1218 rx_status->bw = RATE_INFO_BW_HE_RU;
1219 rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106;
1222 if (rate_n_flags & RATE_MCS_HE_MSK &&
1223 phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD &&
1224 he_type == RATE_MCS_HE_TYPE_MU) {
1226 * Unfortunately, we have to leave the mac80211 data
1227 * incorrect for the case that we receive an HE-MU
1228 * transmission and *don't* have the he_mu pointer,
1229 * i.e. we don't have the phy data (due to the bits
1230 * being used for TSF). This shouldn't happen though
1231 * as management frames where we need the TSF/timers
1232 * are not be transmitted in HE-MU, I think.
1234 u8 ru = FIELD_GET(IWL_RX_HE_PHY_RU_ALLOC_MASK, he_phy_data);
1237 rx_status->bw = RATE_INFO_BW_HE_RU;
1241 rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_26;
1245 rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_52;
1249 rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106;
1253 rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_242;
1257 rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_484;
1261 rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_996;
1264 rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_2x996;
1268 le16_encode_bits(offs,
1269 IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET);
1271 cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_KNOWN);
1272 if (he_phy_data & IWL_RX_HE_PHY_RU_ALLOC_SEC80)
1274 cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_SEC);
1277 cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN);
1280 if (!(rate_n_flags & RATE_MCS_CCK_MSK) &&
1281 rate_n_flags & RATE_MCS_SGI_MSK)
1282 rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
1283 if (rate_n_flags & RATE_HT_MCS_GF_MSK)
1284 rx_status->enc_flags |= RX_ENC_FLAG_HT_GF;
1285 if (rate_n_flags & RATE_MCS_LDPC_MSK)
1286 rx_status->enc_flags |= RX_ENC_FLAG_LDPC;
1287 if (rate_n_flags & RATE_MCS_HT_MSK) {
1288 u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >>
1290 rx_status->encoding = RX_ENC_HT;
1291 rx_status->rate_idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK;
1292 rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
1293 } else if (rate_n_flags & RATE_MCS_VHT_MSK) {
1294 u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >>
1297 ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
1298 RATE_VHT_MCS_NSS_POS) + 1;
1299 rx_status->rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
1300 rx_status->encoding = RX_ENC_VHT;
1301 rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
1302 if (rate_n_flags & RATE_MCS_BF_MSK)
1303 rx_status->enc_flags |= RX_ENC_FLAG_BF;
1305 u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >>
1308 ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
1309 RATE_VHT_MCS_NSS_POS) + 1;
1310 rx_status->rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
1311 rx_status->encoding = RX_ENC_HE;
1312 rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
1313 if (rate_n_flags & RATE_MCS_BF_MSK)
1314 rx_status->enc_flags |= RX_ENC_FLAG_BF;
1317 !!(rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK);
1319 #define CHECK_TYPE(F) \
1320 BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA1_FORMAT_ ## F != \
1321 (RATE_MCS_HE_TYPE_ ## F >> RATE_MCS_HE_TYPE_POS))
1328 he->data1 |= cpu_to_le16(he_type >> RATE_MCS_HE_TYPE_POS);
1330 if (rate_n_flags & RATE_MCS_BF_POS)
1331 he->data5 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA5_TXBF);
1333 switch ((rate_n_flags & RATE_MCS_HE_GI_LTF_MSK) >>
1334 RATE_MCS_HE_GI_LTF_POS) {
1336 rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
1339 rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
1342 rx_status->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
1345 if (rate_n_flags & RATE_MCS_SGI_MSK)
1346 rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
1348 rx_status->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
1353 case RATE_MCS_HE_TYPE_SU: {
1356 /* LTF syms correspond to streams */
1358 cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN);
1359 switch (rx_status->nss) {
1379 WARN_ONCE(1, "invalid nss: %d\n",
1384 le16_encode_bits(val,
1385 IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS);
1388 case RATE_MCS_HE_TYPE_MU: {
1392 if (mvm->trans->cfg->device_family >=
1393 IWL_DEVICE_FAMILY_22560)
1394 he_phy_data = le64_to_cpu(desc->v3.he_phy_data);
1396 he_phy_data = le64_to_cpu(desc->v1.he_phy_data);
1398 if (he_phy_data == HE_PHY_DATA_INVAL)
1401 val = FIELD_GET(IWL_RX_HE_PHY_HE_LTF_NUM_MASK,
1405 cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN);
1407 cpu_to_le16(FIELD_PREP(
1408 IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS,
1412 case RATE_MCS_HE_TYPE_EXT_SU:
1413 case RATE_MCS_HE_TYPE_TRIG:
1414 /* not supported yet */
1418 int rate = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
1421 if (WARN(rate < 0 || rate > 0xFF,
1422 "Invalid rate flags 0x%x, band %d,\n",
1423 rate_n_flags, rx_status->band)) {
1427 rx_status->rate_idx = rate;
1431 /* management stuff on default queue */
1433 if (unlikely((ieee80211_is_beacon(hdr->frame_control) ||
1434 ieee80211_is_probe_resp(hdr->frame_control)) &&
1435 mvm->sched_scan_pass_all ==
1436 SCHED_SCAN_PASS_ALL_ENABLED))
1437 mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_FOUND;
1439 if (unlikely(ieee80211_is_beacon(hdr->frame_control) ||
1440 ieee80211_is_probe_resp(hdr->frame_control)))
1441 rx_status->boottime_ns = ktime_get_boot_ns();
1444 if (iwl_mvm_create_skb(mvm, skb, hdr, len, crypt_len, rxb)) {
1449 if (!iwl_mvm_reorder(mvm, napi, queue, sta, skb, desc))
1450 iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta);
1455 void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi,
1456 struct iwl_rx_cmd_buffer *rxb, int queue)
1458 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1459 struct iwl_frame_release *release = (void *)pkt->data;
1460 struct ieee80211_sta *sta;
1461 struct iwl_mvm_reorder_buffer *reorder_buf;
1462 struct iwl_mvm_baid_data *ba_data;
1464 int baid = release->baid;
1466 IWL_DEBUG_HT(mvm, "Frame release notification for BAID %u, NSSN %d\n",
1467 release->baid, le16_to_cpu(release->nssn));
1469 if (WARN_ON_ONCE(baid == IWL_RX_REORDER_DATA_INVALID_BAID))
1474 ba_data = rcu_dereference(mvm->baid_map[baid]);
1475 if (WARN_ON_ONCE(!ba_data))
1478 sta = rcu_dereference(mvm->fw_id_to_mac_id[ba_data->sta_id]);
1479 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
1482 reorder_buf = &ba_data->reorder_buf[queue];
1484 spin_lock_bh(&reorder_buf->lock);
1485 iwl_mvm_release_frames(mvm, sta, napi, ba_data, reorder_buf,
1486 le16_to_cpu(release->nssn));
1487 spin_unlock_bh(&reorder_buf->lock);