2 * Copyright 2002-2005, Instant802 Networks, Inc.
3 * Copyright 2005-2006, Devicescape Software, Inc.
4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
5 * Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
6 * Copyright 2013-2014 Intel Mobile Communications GmbH
7 * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
14 #include <linux/jiffies.h>
15 #include <linux/slab.h>
16 #include <linux/kernel.h>
17 #include <linux/skbuff.h>
18 #include <linux/netdevice.h>
19 #include <linux/etherdevice.h>
20 #include <linux/rcupdate.h>
21 #include <linux/export.h>
22 #include <linux/bitops.h>
23 #include <net/mac80211.h>
24 #include <net/ieee80211_radiotap.h>
25 #include <asm/unaligned.h>
27 #include "ieee80211_i.h"
28 #include "driver-ops.h"
37 static inline void ieee80211_rx_stats(struct net_device *dev, u32 len)
39 struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
41 u64_stats_update_begin(&tstats->syncp);
43 tstats->rx_bytes += len;
44 u64_stats_update_end(&tstats->syncp);
47 static u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len,
48 enum nl80211_iftype type)
50 __le16 fc = hdr->frame_control;
52 if (ieee80211_is_data(fc)) {
53 if (len < 24) /* drop incorrect hdr len (data) */
56 if (ieee80211_has_a4(fc))
58 if (ieee80211_has_tods(fc))
60 if (ieee80211_has_fromds(fc))
66 if (ieee80211_is_mgmt(fc)) {
67 if (len < 24) /* drop incorrect hdr len (mgmt) */
72 if (ieee80211_is_ctl(fc)) {
73 if (ieee80211_is_pspoll(fc))
76 if (ieee80211_is_back_req(fc)) {
78 case NL80211_IFTYPE_STATION:
80 case NL80211_IFTYPE_AP:
81 case NL80211_IFTYPE_AP_VLAN:
84 break; /* fall through to the return */
93 * monitor mode reception
95 * This function cleans up the SKB, i.e. it removes all the stuff
96 * only useful for monitoring.
98 static void remove_monitor_info(struct sk_buff *skb,
99 unsigned int present_fcs_len,
100 unsigned int rtap_vendor_space)
103 __pskb_trim(skb, skb->len - present_fcs_len);
104 __pskb_pull(skb, rtap_vendor_space);
107 static inline bool should_drop_frame(struct sk_buff *skb, int present_fcs_len,
108 unsigned int rtap_vendor_space)
110 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
111 struct ieee80211_hdr *hdr;
113 hdr = (void *)(skb->data + rtap_vendor_space);
115 if (status->flag & (RX_FLAG_FAILED_FCS_CRC |
116 RX_FLAG_FAILED_PLCP_CRC |
117 RX_FLAG_ONLY_MONITOR))
120 if (unlikely(skb->len < 16 + present_fcs_len + rtap_vendor_space))
123 if (ieee80211_is_ctl(hdr->frame_control) &&
124 !ieee80211_is_pspoll(hdr->frame_control) &&
125 !ieee80211_is_back_req(hdr->frame_control))
132 ieee80211_rx_radiotap_hdrlen(struct ieee80211_local *local,
133 struct ieee80211_rx_status *status,
138 /* always present fields */
139 len = sizeof(struct ieee80211_radiotap_header) + 8;
141 /* allocate extra bitmaps */
143 len += 4 * hweight8(status->chains);
144 /* vendor presence bitmap */
145 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA)
148 if (ieee80211_have_rx_timestamp(status)) {
152 if (ieee80211_hw_check(&local->hw, SIGNAL_DBM))
155 /* antenna field, if we don't have per-chain info */
159 /* padding for RX_FLAGS if necessary */
162 if (status->encoding == RX_ENC_HT) /* HT info */
165 if (status->flag & RX_FLAG_AMPDU_DETAILS) {
170 if (status->encoding == RX_ENC_VHT) {
175 if (local->hw.radiotap_timestamp.units_pos >= 0) {
180 if (status->chains) {
181 /* antenna and antenna signal fields */
182 len += 2 * hweight8(status->chains);
185 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) {
186 struct ieee80211_vendor_radiotap *rtap = (void *)skb->data;
188 /* alignment for fixed 6-byte vendor data header */
190 /* vendor data header */
192 if (WARN_ON(rtap->align == 0))
194 len = ALIGN(len, rtap->align);
195 len += rtap->len + rtap->pad;
201 static void ieee80211_handle_mu_mimo_mon(struct ieee80211_sub_if_data *sdata,
203 int rtap_vendor_space)
206 struct ieee80211_hdr_3addr hdr;
209 } __packed __aligned(2) action;
214 BUILD_BUG_ON(sizeof(action) != IEEE80211_MIN_ACTION_SIZE + 1);
216 if (skb->len < rtap_vendor_space + sizeof(action) +
217 VHT_MUMIMO_GROUPS_DATA_LEN)
220 if (!is_valid_ether_addr(sdata->u.mntr.mu_follow_addr))
223 skb_copy_bits(skb, rtap_vendor_space, &action, sizeof(action));
225 if (!ieee80211_is_action(action.hdr.frame_control))
228 if (action.category != WLAN_CATEGORY_VHT)
231 if (action.action_code != WLAN_VHT_ACTION_GROUPID_MGMT)
234 if (!ether_addr_equal(action.hdr.addr1, sdata->u.mntr.mu_follow_addr))
237 skb = skb_copy(skb, GFP_ATOMIC);
241 skb_queue_tail(&sdata->skb_queue, skb);
242 ieee80211_queue_work(&sdata->local->hw, &sdata->work);
246 * ieee80211_add_rx_radiotap_header - add radiotap header
248 * add a radiotap header containing all the fields which the hardware provided.
251 ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
253 struct ieee80211_rate *rate,
254 int rtap_len, bool has_fcs)
256 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
257 struct ieee80211_radiotap_header *rthdr;
262 u16 channel_flags = 0;
264 unsigned long chains = status->chains;
265 struct ieee80211_vendor_radiotap rtap = {};
267 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) {
268 rtap = *(struct ieee80211_vendor_radiotap *)skb->data;
269 /* rtap.len and rtap.pad are undone immediately */
270 skb_pull(skb, sizeof(rtap) + rtap.len + rtap.pad);
274 if (!(has_fcs && ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS)))
277 rthdr = skb_push(skb, rtap_len);
278 memset(rthdr, 0, rtap_len - rtap.len - rtap.pad);
279 it_present = &rthdr->it_present;
281 /* radiotap header, set always present flags */
282 rthdr->it_len = cpu_to_le16(rtap_len);
283 it_present_val = BIT(IEEE80211_RADIOTAP_FLAGS) |
284 BIT(IEEE80211_RADIOTAP_CHANNEL) |
285 BIT(IEEE80211_RADIOTAP_RX_FLAGS);
288 it_present_val |= BIT(IEEE80211_RADIOTAP_ANTENNA);
290 for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) {
292 BIT(IEEE80211_RADIOTAP_EXT) |
293 BIT(IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE);
294 put_unaligned_le32(it_present_val, it_present);
296 it_present_val = BIT(IEEE80211_RADIOTAP_ANTENNA) |
297 BIT(IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
300 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) {
301 it_present_val |= BIT(IEEE80211_RADIOTAP_VENDOR_NAMESPACE) |
302 BIT(IEEE80211_RADIOTAP_EXT);
303 put_unaligned_le32(it_present_val, it_present);
305 it_present_val = rtap.present;
308 put_unaligned_le32(it_present_val, it_present);
310 pos = (void *)(it_present + 1);
312 /* the order of the following fields is important */
314 /* IEEE80211_RADIOTAP_TSFT */
315 if (ieee80211_have_rx_timestamp(status)) {
317 while ((pos - (u8 *)rthdr) & 7)
320 ieee80211_calculate_rx_timestamp(local, status,
323 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT);
327 /* IEEE80211_RADIOTAP_FLAGS */
328 if (has_fcs && ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS))
329 *pos |= IEEE80211_RADIOTAP_F_FCS;
330 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
331 *pos |= IEEE80211_RADIOTAP_F_BADFCS;
332 if (status->enc_flags & RX_ENC_FLAG_SHORTPRE)
333 *pos |= IEEE80211_RADIOTAP_F_SHORTPRE;
336 /* IEEE80211_RADIOTAP_RATE */
337 if (!rate || status->encoding != RX_ENC_LEGACY) {
339 * Without rate information don't add it. If we have,
340 * MCS information is a separate field in radiotap,
341 * added below. The byte here is needed as padding
342 * for the channel though, so initialise it to 0.
347 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE);
348 if (status->bw == RATE_INFO_BW_10)
350 else if (status->bw == RATE_INFO_BW_5)
352 *pos = DIV_ROUND_UP(rate->bitrate, 5 * (1 << shift));
356 /* IEEE80211_RADIOTAP_CHANNEL */
357 put_unaligned_le16(status->freq, pos);
359 if (status->bw == RATE_INFO_BW_10)
360 channel_flags |= IEEE80211_CHAN_HALF;
361 else if (status->bw == RATE_INFO_BW_5)
362 channel_flags |= IEEE80211_CHAN_QUARTER;
364 if (status->band == NL80211_BAND_5GHZ)
365 channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ;
366 else if (status->encoding != RX_ENC_LEGACY)
367 channel_flags |= IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ;
368 else if (rate && rate->flags & IEEE80211_RATE_ERP_G)
369 channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ;
371 channel_flags |= IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ;
373 channel_flags |= IEEE80211_CHAN_2GHZ;
374 put_unaligned_le16(channel_flags, pos);
377 /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */
378 if (ieee80211_hw_check(&local->hw, SIGNAL_DBM) &&
379 !(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
380 *pos = status->signal;
382 cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
386 /* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */
388 if (!status->chains) {
389 /* IEEE80211_RADIOTAP_ANTENNA */
390 *pos = status->antenna;
394 /* IEEE80211_RADIOTAP_DB_ANTNOISE is not used */
396 /* IEEE80211_RADIOTAP_RX_FLAGS */
397 /* ensure 2 byte alignment for the 2 byte field as required */
398 if ((pos - (u8 *)rthdr) & 1)
400 if (status->flag & RX_FLAG_FAILED_PLCP_CRC)
401 rx_flags |= IEEE80211_RADIOTAP_F_RX_BADPLCP;
402 put_unaligned_le16(rx_flags, pos);
405 if (status->encoding == RX_ENC_HT) {
408 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_MCS);
409 *pos++ = local->hw.radiotap_mcs_details;
411 if (status->enc_flags & RX_ENC_FLAG_SHORT_GI)
412 *pos |= IEEE80211_RADIOTAP_MCS_SGI;
413 if (status->bw == RATE_INFO_BW_40)
414 *pos |= IEEE80211_RADIOTAP_MCS_BW_40;
415 if (status->enc_flags & RX_ENC_FLAG_HT_GF)
416 *pos |= IEEE80211_RADIOTAP_MCS_FMT_GF;
417 if (status->enc_flags & RX_ENC_FLAG_LDPC)
418 *pos |= IEEE80211_RADIOTAP_MCS_FEC_LDPC;
419 stbc = (status->enc_flags & RX_ENC_FLAG_STBC_MASK) >> RX_ENC_FLAG_STBC_SHIFT;
420 *pos |= stbc << IEEE80211_RADIOTAP_MCS_STBC_SHIFT;
422 *pos++ = status->rate_idx;
425 if (status->flag & RX_FLAG_AMPDU_DETAILS) {
428 /* ensure 4 byte alignment */
429 while ((pos - (u8 *)rthdr) & 3)
432 cpu_to_le32(1 << IEEE80211_RADIOTAP_AMPDU_STATUS);
433 put_unaligned_le32(status->ampdu_reference, pos);
435 if (status->flag & RX_FLAG_AMPDU_LAST_KNOWN)
436 flags |= IEEE80211_RADIOTAP_AMPDU_LAST_KNOWN;
437 if (status->flag & RX_FLAG_AMPDU_IS_LAST)
438 flags |= IEEE80211_RADIOTAP_AMPDU_IS_LAST;
439 if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_ERROR)
440 flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_ERR;
441 if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_KNOWN)
442 flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_KNOWN;
443 put_unaligned_le16(flags, pos);
445 if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_KNOWN)
446 *pos++ = status->ampdu_delimiter_crc;
452 if (status->encoding == RX_ENC_VHT) {
453 u16 known = local->hw.radiotap_vht_details;
455 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_VHT);
456 put_unaligned_le16(known, pos);
459 if (status->enc_flags & RX_ENC_FLAG_SHORT_GI)
460 *pos |= IEEE80211_RADIOTAP_VHT_FLAG_SGI;
461 /* in VHT, STBC is binary */
462 if (status->enc_flags & RX_ENC_FLAG_STBC_MASK)
463 *pos |= IEEE80211_RADIOTAP_VHT_FLAG_STBC;
464 if (status->enc_flags & RX_ENC_FLAG_BF)
465 *pos |= IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED;
468 switch (status->bw) {
469 case RATE_INFO_BW_80:
472 case RATE_INFO_BW_160:
475 case RATE_INFO_BW_40:
482 *pos = (status->rate_idx << 4) | status->nss;
485 if (status->enc_flags & RX_ENC_FLAG_LDPC)
486 *pos |= IEEE80211_RADIOTAP_CODING_LDPC_USER0;
494 if (local->hw.radiotap_timestamp.units_pos >= 0) {
496 u8 flags = IEEE80211_RADIOTAP_TIMESTAMP_FLAG_32BIT;
499 cpu_to_le32(1 << IEEE80211_RADIOTAP_TIMESTAMP);
501 /* ensure 8 byte alignment */
502 while ((pos - (u8 *)rthdr) & 7)
505 put_unaligned_le64(status->device_timestamp, pos);
508 if (local->hw.radiotap_timestamp.accuracy >= 0) {
509 accuracy = local->hw.radiotap_timestamp.accuracy;
510 flags |= IEEE80211_RADIOTAP_TIMESTAMP_FLAG_ACCURACY;
512 put_unaligned_le16(accuracy, pos);
515 *pos++ = local->hw.radiotap_timestamp.units_pos;
519 for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) {
520 *pos++ = status->chain_signal[chain];
524 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) {
525 /* ensure 2 byte alignment for the vendor field as required */
526 if ((pos - (u8 *)rthdr) & 1)
528 *pos++ = rtap.oui[0];
529 *pos++ = rtap.oui[1];
530 *pos++ = rtap.oui[2];
532 put_unaligned_le16(rtap.len, pos);
534 /* align the actual payload as requested */
535 while ((pos - (u8 *)rthdr) & (rtap.align - 1))
537 /* data (and possible padding) already follows */
541 static struct sk_buff *
542 ieee80211_make_monitor_skb(struct ieee80211_local *local,
543 struct sk_buff **origskb,
544 struct ieee80211_rate *rate,
545 int rtap_vendor_space, bool use_origskb)
547 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(*origskb);
548 int rt_hdrlen, needed_headroom;
551 /* room for the radiotap header based on driver features */
552 rt_hdrlen = ieee80211_rx_radiotap_hdrlen(local, status, *origskb);
553 needed_headroom = rt_hdrlen - rtap_vendor_space;
556 /* only need to expand headroom if necessary */
561 * This shouldn't trigger often because most devices have an
562 * RX header they pull before we get here, and that should
563 * be big enough for our radiotap information. We should
564 * probably export the length to drivers so that we can have
565 * them allocate enough headroom to start with.
567 if (skb_headroom(skb) < needed_headroom &&
568 pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) {
574 * Need to make a copy and possibly remove radiotap header
575 * and FCS from the original.
577 skb = skb_copy_expand(*origskb, needed_headroom, 0, GFP_ATOMIC);
583 /* prepend radiotap information */
584 ieee80211_add_rx_radiotap_header(local, skb, rate, rt_hdrlen, true);
586 skb_reset_mac_header(skb);
587 skb->ip_summed = CHECKSUM_UNNECESSARY;
588 skb->pkt_type = PACKET_OTHERHOST;
589 skb->protocol = htons(ETH_P_802_2);
595 * This function copies a received frame to all monitor interfaces and
596 * returns a cleaned-up SKB that no longer includes the FCS nor the
597 * radiotap header the driver might have added.
599 static struct sk_buff *
600 ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
601 struct ieee80211_rate *rate)
603 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(origskb);
604 struct ieee80211_sub_if_data *sdata;
605 struct sk_buff *monskb = NULL;
606 int present_fcs_len = 0;
607 unsigned int rtap_vendor_space = 0;
608 struct ieee80211_sub_if_data *monitor_sdata =
609 rcu_dereference(local->monitor_sdata);
610 bool only_monitor = false;
612 if (unlikely(status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA)) {
613 struct ieee80211_vendor_radiotap *rtap = (void *)origskb->data;
615 rtap_vendor_space = sizeof(*rtap) + rtap->len + rtap->pad;
619 * First, we may need to make a copy of the skb because
620 * (1) we need to modify it for radiotap (if not present), and
621 * (2) the other RX handlers will modify the skb we got.
623 * We don't need to, of course, if we aren't going to return
624 * the SKB because it has a bad FCS/PLCP checksum.
627 if (ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS)) {
628 if (unlikely(origskb->len <= FCS_LEN)) {
631 dev_kfree_skb(origskb);
634 present_fcs_len = FCS_LEN;
637 /* ensure hdr->frame_control and vendor radiotap data are in skb head */
638 if (!pskb_may_pull(origskb, 2 + rtap_vendor_space)) {
639 dev_kfree_skb(origskb);
643 only_monitor = should_drop_frame(origskb, present_fcs_len,
646 if (!local->monitors || (status->flag & RX_FLAG_SKIP_MONITOR)) {
648 dev_kfree_skb(origskb);
652 remove_monitor_info(origskb, present_fcs_len,
657 ieee80211_handle_mu_mimo_mon(monitor_sdata, origskb, rtap_vendor_space);
659 list_for_each_entry_rcu(sdata, &local->mon_list, u.mntr.list) {
660 bool last_monitor = list_is_last(&sdata->u.mntr.list,
664 monskb = ieee80211_make_monitor_skb(local, &origskb,
677 skb = skb_clone(monskb, GFP_ATOMIC);
681 skb->dev = sdata->dev;
682 ieee80211_rx_stats(skb->dev, skb->len);
683 netif_receive_skb(skb);
691 /* this happens if last_monitor was erroneously false */
692 dev_kfree_skb(monskb);
698 remove_monitor_info(origskb, present_fcs_len, rtap_vendor_space);
702 static void ieee80211_parse_qos(struct ieee80211_rx_data *rx)
704 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
705 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
706 int tid, seqno_idx, security_idx;
708 /* does the frame have a qos control field? */
709 if (ieee80211_is_data_qos(hdr->frame_control)) {
710 u8 *qc = ieee80211_get_qos_ctl(hdr);
711 /* frame has qos control */
712 tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
713 if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT)
714 status->rx_flags |= IEEE80211_RX_AMSDU;
720 * IEEE 802.11-2007, 7.1.3.4.1 ("Sequence Number field"):
722 * Sequence numbers for management frames, QoS data
723 * frames with a broadcast/multicast address in the
724 * Address 1 field, and all non-QoS data frames sent
725 * by QoS STAs are assigned using an additional single
726 * modulo-4096 counter, [...]
728 * We also use that counter for non-QoS STAs.
730 seqno_idx = IEEE80211_NUM_TIDS;
732 if (ieee80211_is_mgmt(hdr->frame_control))
733 security_idx = IEEE80211_NUM_TIDS;
737 rx->seqno_idx = seqno_idx;
738 rx->security_idx = security_idx;
739 /* Set skb->priority to 1d tag if highest order bit of TID is not set.
740 * For now, set skb->priority to 0 for other cases. */
741 rx->skb->priority = (tid > 7) ? 0 : tid;
745 * DOC: Packet alignment
747 * Drivers always need to pass packets that are aligned to two-byte boundaries
750 * Additionally, should, if possible, align the payload data in a way that
751 * guarantees that the contained IP header is aligned to a four-byte
752 * boundary. In the case of regular frames, this simply means aligning the
753 * payload to a four-byte boundary (because either the IP header is directly
754 * contained, or IV/RFC1042 headers that have a length divisible by four are
755 * in front of it). If the payload data is not properly aligned and the
756 * architecture doesn't support efficient unaligned operations, mac80211
757 * will align the data.
759 * With A-MSDU frames, however, the payload data address must yield two modulo
760 * four because there are 14-byte 802.3 headers within the A-MSDU frames that
761 * push the IP header further back to a multiple of four again. Thankfully, the
762 * specs were sane enough this time around to require padding each A-MSDU
763 * subframe to a length that is a multiple of four.
765 * Padding like Atheros hardware adds which is between the 802.11 header and
766 * the payload is not supported, the driver is required to move the 802.11
767 * header to be directly in front of the payload in that case.
769 static void ieee80211_verify_alignment(struct ieee80211_rx_data *rx)
771 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
772 WARN_ON_ONCE((unsigned long)rx->skb->data & 1);
779 static int ieee80211_is_unicast_robust_mgmt_frame(struct sk_buff *skb)
781 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
783 if (is_multicast_ether_addr(hdr->addr1))
786 return ieee80211_is_robust_mgmt_frame(skb);
790 static int ieee80211_is_multicast_robust_mgmt_frame(struct sk_buff *skb)
792 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
794 if (!is_multicast_ether_addr(hdr->addr1))
797 return ieee80211_is_robust_mgmt_frame(skb);
801 /* Get the BIP key index from MMIE; return -1 if this is not a BIP frame */
802 static int ieee80211_get_mmie_keyidx(struct sk_buff *skb)
804 struct ieee80211_mgmt *hdr = (struct ieee80211_mgmt *) skb->data;
805 struct ieee80211_mmie *mmie;
806 struct ieee80211_mmie_16 *mmie16;
808 if (skb->len < 24 + sizeof(*mmie) || !is_multicast_ether_addr(hdr->da))
811 if (!ieee80211_is_robust_mgmt_frame(skb))
812 return -1; /* not a robust management frame */
814 mmie = (struct ieee80211_mmie *)
815 (skb->data + skb->len - sizeof(*mmie));
816 if (mmie->element_id == WLAN_EID_MMIE &&
817 mmie->length == sizeof(*mmie) - 2)
818 return le16_to_cpu(mmie->key_id);
820 mmie16 = (struct ieee80211_mmie_16 *)
821 (skb->data + skb->len - sizeof(*mmie16));
822 if (skb->len >= 24 + sizeof(*mmie16) &&
823 mmie16->element_id == WLAN_EID_MMIE &&
824 mmie16->length == sizeof(*mmie16) - 2)
825 return le16_to_cpu(mmie16->key_id);
830 static int ieee80211_get_cs_keyid(const struct ieee80211_cipher_scheme *cs,
833 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
838 fc = hdr->frame_control;
839 hdrlen = ieee80211_hdrlen(fc);
841 if (skb->len < hdrlen + cs->hdr_len)
844 skb_copy_bits(skb, hdrlen + cs->key_idx_off, &keyid, 1);
845 keyid &= cs->key_idx_mask;
846 keyid >>= cs->key_idx_shift;
851 static ieee80211_rx_result ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
853 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
854 char *dev_addr = rx->sdata->vif.addr;
856 if (ieee80211_is_data(hdr->frame_control)) {
857 if (is_multicast_ether_addr(hdr->addr1)) {
858 if (ieee80211_has_tods(hdr->frame_control) ||
859 !ieee80211_has_fromds(hdr->frame_control))
860 return RX_DROP_MONITOR;
861 if (ether_addr_equal(hdr->addr3, dev_addr))
862 return RX_DROP_MONITOR;
864 if (!ieee80211_has_a4(hdr->frame_control))
865 return RX_DROP_MONITOR;
866 if (ether_addr_equal(hdr->addr4, dev_addr))
867 return RX_DROP_MONITOR;
871 /* If there is not an established peer link and this is not a peer link
872 * establisment frame, beacon or probe, drop the frame.
875 if (!rx->sta || sta_plink_state(rx->sta) != NL80211_PLINK_ESTAB) {
876 struct ieee80211_mgmt *mgmt;
878 if (!ieee80211_is_mgmt(hdr->frame_control))
879 return RX_DROP_MONITOR;
881 if (ieee80211_is_action(hdr->frame_control)) {
884 /* make sure category field is present */
885 if (rx->skb->len < IEEE80211_MIN_ACTION_SIZE)
886 return RX_DROP_MONITOR;
888 mgmt = (struct ieee80211_mgmt *)hdr;
889 category = mgmt->u.action.category;
890 if (category != WLAN_CATEGORY_MESH_ACTION &&
891 category != WLAN_CATEGORY_SELF_PROTECTED)
892 return RX_DROP_MONITOR;
896 if (ieee80211_is_probe_req(hdr->frame_control) ||
897 ieee80211_is_probe_resp(hdr->frame_control) ||
898 ieee80211_is_beacon(hdr->frame_control) ||
899 ieee80211_is_auth(hdr->frame_control))
902 return RX_DROP_MONITOR;
908 static inline bool ieee80211_rx_reorder_ready(struct tid_ampdu_rx *tid_agg_rx,
911 struct sk_buff_head *frames = &tid_agg_rx->reorder_buf[index];
912 struct sk_buff *tail = skb_peek_tail(frames);
913 struct ieee80211_rx_status *status;
915 if (tid_agg_rx->reorder_buf_filtered & BIT_ULL(index))
921 status = IEEE80211_SKB_RXCB(tail);
922 if (status->flag & RX_FLAG_AMSDU_MORE)
928 static void ieee80211_release_reorder_frame(struct ieee80211_sub_if_data *sdata,
929 struct tid_ampdu_rx *tid_agg_rx,
931 struct sk_buff_head *frames)
933 struct sk_buff_head *skb_list = &tid_agg_rx->reorder_buf[index];
935 struct ieee80211_rx_status *status;
937 lockdep_assert_held(&tid_agg_rx->reorder_lock);
939 if (skb_queue_empty(skb_list))
942 if (!ieee80211_rx_reorder_ready(tid_agg_rx, index)) {
943 __skb_queue_purge(skb_list);
947 /* release frames from the reorder ring buffer */
948 tid_agg_rx->stored_mpdu_num--;
949 while ((skb = __skb_dequeue(skb_list))) {
950 status = IEEE80211_SKB_RXCB(skb);
951 status->rx_flags |= IEEE80211_RX_DEFERRED_RELEASE;
952 __skb_queue_tail(frames, skb);
956 tid_agg_rx->reorder_buf_filtered &= ~BIT_ULL(index);
957 tid_agg_rx->head_seq_num = ieee80211_sn_inc(tid_agg_rx->head_seq_num);
960 static void ieee80211_release_reorder_frames(struct ieee80211_sub_if_data *sdata,
961 struct tid_ampdu_rx *tid_agg_rx,
963 struct sk_buff_head *frames)
967 lockdep_assert_held(&tid_agg_rx->reorder_lock);
969 while (ieee80211_sn_less(tid_agg_rx->head_seq_num, head_seq_num)) {
970 index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size;
971 ieee80211_release_reorder_frame(sdata, tid_agg_rx, index,
977 * Timeout (in jiffies) for skb's that are waiting in the RX reorder buffer. If
978 * the skb was added to the buffer longer than this time ago, the earlier
979 * frames that have not yet been received are assumed to be lost and the skb
980 * can be released for processing. This may also release other skb's from the
981 * reorder buffer if there are no additional gaps between the frames.
983 * Callers must hold tid_agg_rx->reorder_lock.
985 #define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10)
987 static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata,
988 struct tid_ampdu_rx *tid_agg_rx,
989 struct sk_buff_head *frames)
993 lockdep_assert_held(&tid_agg_rx->reorder_lock);
995 /* release the buffer until next missing frame */
996 index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size;
997 if (!ieee80211_rx_reorder_ready(tid_agg_rx, index) &&
998 tid_agg_rx->stored_mpdu_num) {
1000 * No buffers ready to be released, but check whether any
1001 * frames in the reorder buffer have timed out.
1004 for (j = (index + 1) % tid_agg_rx->buf_size; j != index;
1005 j = (j + 1) % tid_agg_rx->buf_size) {
1006 if (!ieee80211_rx_reorder_ready(tid_agg_rx, j)) {
1011 !time_after(jiffies, tid_agg_rx->reorder_time[j] +
1012 HT_RX_REORDER_BUF_TIMEOUT))
1013 goto set_release_timer;
1015 /* don't leave incomplete A-MSDUs around */
1016 for (i = (index + 1) % tid_agg_rx->buf_size; i != j;
1017 i = (i + 1) % tid_agg_rx->buf_size)
1018 __skb_queue_purge(&tid_agg_rx->reorder_buf[i]);
1020 ht_dbg_ratelimited(sdata,
1021 "release an RX reorder frame due to timeout on earlier frames\n");
1022 ieee80211_release_reorder_frame(sdata, tid_agg_rx, j,
1026 * Increment the head seq# also for the skipped slots.
1028 tid_agg_rx->head_seq_num =
1029 (tid_agg_rx->head_seq_num +
1030 skipped) & IEEE80211_SN_MASK;
1033 } else while (ieee80211_rx_reorder_ready(tid_agg_rx, index)) {
1034 ieee80211_release_reorder_frame(sdata, tid_agg_rx, index,
1036 index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size;
1039 if (tid_agg_rx->stored_mpdu_num) {
1040 j = index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size;
1042 for (; j != (index - 1) % tid_agg_rx->buf_size;
1043 j = (j + 1) % tid_agg_rx->buf_size) {
1044 if (ieee80211_rx_reorder_ready(tid_agg_rx, j))
1050 if (!tid_agg_rx->removed)
1051 mod_timer(&tid_agg_rx->reorder_timer,
1052 tid_agg_rx->reorder_time[j] + 1 +
1053 HT_RX_REORDER_BUF_TIMEOUT);
1055 del_timer(&tid_agg_rx->reorder_timer);
1060 * As this function belongs to the RX path it must be under
1061 * rcu_read_lock protection. It returns false if the frame
1062 * can be processed immediately, true if it was consumed.
1064 static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata,
1065 struct tid_ampdu_rx *tid_agg_rx,
1066 struct sk_buff *skb,
1067 struct sk_buff_head *frames)
1069 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1070 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1071 u16 sc = le16_to_cpu(hdr->seq_ctrl);
1072 u16 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4;
1073 u16 head_seq_num, buf_size;
1077 spin_lock(&tid_agg_rx->reorder_lock);
1080 * Offloaded BA sessions have no known starting sequence number so pick
1081 * one from first Rxed frame for this tid after BA was started.
1083 if (unlikely(tid_agg_rx->auto_seq)) {
1084 tid_agg_rx->auto_seq = false;
1085 tid_agg_rx->ssn = mpdu_seq_num;
1086 tid_agg_rx->head_seq_num = mpdu_seq_num;
1089 buf_size = tid_agg_rx->buf_size;
1090 head_seq_num = tid_agg_rx->head_seq_num;
1093 * If the current MPDU's SN is smaller than the SSN, it shouldn't
1096 if (unlikely(!tid_agg_rx->started)) {
1097 if (ieee80211_sn_less(mpdu_seq_num, head_seq_num)) {
1101 tid_agg_rx->started = true;
1104 /* frame with out of date sequence number */
1105 if (ieee80211_sn_less(mpdu_seq_num, head_seq_num)) {
1111 * If frame the sequence number exceeds our buffering window
1112 * size release some previous frames to make room for this one.
1114 if (!ieee80211_sn_less(mpdu_seq_num, head_seq_num + buf_size)) {
1115 head_seq_num = ieee80211_sn_inc(
1116 ieee80211_sn_sub(mpdu_seq_num, buf_size));
1117 /* release stored frames up to new head to stack */
1118 ieee80211_release_reorder_frames(sdata, tid_agg_rx,
1119 head_seq_num, frames);
1122 /* Now the new frame is always in the range of the reordering buffer */
1124 index = mpdu_seq_num % tid_agg_rx->buf_size;
1126 /* check if we already stored this frame */
1127 if (ieee80211_rx_reorder_ready(tid_agg_rx, index)) {
1133 * If the current MPDU is in the right order and nothing else
1134 * is stored we can process it directly, no need to buffer it.
1135 * If it is first but there's something stored, we may be able
1136 * to release frames after this one.
1138 if (mpdu_seq_num == tid_agg_rx->head_seq_num &&
1139 tid_agg_rx->stored_mpdu_num == 0) {
1140 if (!(status->flag & RX_FLAG_AMSDU_MORE))
1141 tid_agg_rx->head_seq_num =
1142 ieee80211_sn_inc(tid_agg_rx->head_seq_num);
1147 /* put the frame in the reordering buffer */
1148 __skb_queue_tail(&tid_agg_rx->reorder_buf[index], skb);
1149 if (!(status->flag & RX_FLAG_AMSDU_MORE)) {
1150 tid_agg_rx->reorder_time[index] = jiffies;
1151 tid_agg_rx->stored_mpdu_num++;
1152 ieee80211_sta_reorder_release(sdata, tid_agg_rx, frames);
1156 spin_unlock(&tid_agg_rx->reorder_lock);
1161 * Reorder MPDUs from A-MPDUs, keeping them on a buffer. Returns
1162 * true if the MPDU was buffered, false if it should be processed.
1164 static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx,
1165 struct sk_buff_head *frames)
1167 struct sk_buff *skb = rx->skb;
1168 struct ieee80211_local *local = rx->local;
1169 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1170 struct sta_info *sta = rx->sta;
1171 struct tid_ampdu_rx *tid_agg_rx;
1175 if (!ieee80211_is_data_qos(hdr->frame_control) ||
1176 is_multicast_ether_addr(hdr->addr1))
1180 * filter the QoS data rx stream according to
1181 * STA/TID and check if this STA/TID is on aggregation
1187 ack_policy = *ieee80211_get_qos_ctl(hdr) &
1188 IEEE80211_QOS_CTL_ACK_POLICY_MASK;
1189 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
1191 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
1193 if (ack_policy == IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK &&
1194 !test_bit(tid, rx->sta->ampdu_mlme.agg_session_valid) &&
1195 !test_and_set_bit(tid, rx->sta->ampdu_mlme.unexpected_agg))
1196 ieee80211_send_delba(rx->sdata, rx->sta->sta.addr, tid,
1197 WLAN_BACK_RECIPIENT,
1198 WLAN_REASON_QSTA_REQUIRE_SETUP);
1202 /* qos null data frames are excluded */
1203 if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC)))
1206 /* not part of a BA session */
1207 if (ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK &&
1208 ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_NORMAL)
1211 /* new, potentially un-ordered, ampdu frame - process it */
1213 /* reset session timer */
1214 if (tid_agg_rx->timeout)
1215 tid_agg_rx->last_rx = jiffies;
1217 /* if this mpdu is fragmented - terminate rx aggregation session */
1218 sc = le16_to_cpu(hdr->seq_ctrl);
1219 if (sc & IEEE80211_SCTL_FRAG) {
1220 skb_queue_tail(&rx->sdata->skb_queue, skb);
1221 ieee80211_queue_work(&local->hw, &rx->sdata->work);
1226 * No locking needed -- we will only ever process one
1227 * RX packet at a time, and thus own tid_agg_rx. All
1228 * other code manipulating it needs to (and does) make
1229 * sure that we cannot get to it any more before doing
1232 if (ieee80211_sta_manage_reorder_buf(rx->sdata, tid_agg_rx, skb,
1237 __skb_queue_tail(frames, skb);
1240 static ieee80211_rx_result debug_noinline
1241 ieee80211_rx_h_check_dup(struct ieee80211_rx_data *rx)
1243 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1244 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1246 if (status->flag & RX_FLAG_DUP_VALIDATED)
1250 * Drop duplicate 802.11 retransmissions
1251 * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery")
1254 if (rx->skb->len < 24)
1257 if (ieee80211_is_ctl(hdr->frame_control) ||
1258 ieee80211_is_any_nullfunc(hdr->frame_control) ||
1259 is_multicast_ether_addr(hdr->addr1))
1265 if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
1266 rx->sta->last_seq_ctrl[rx->seqno_idx] == hdr->seq_ctrl)) {
1267 I802_DEBUG_INC(rx->local->dot11FrameDuplicateCount);
1268 rx->sta->rx_stats.num_duplicates++;
1269 return RX_DROP_UNUSABLE;
1270 } else if (!(status->flag & RX_FLAG_AMSDU_MORE)) {
1271 rx->sta->last_seq_ctrl[rx->seqno_idx] = hdr->seq_ctrl;
1277 static ieee80211_rx_result debug_noinline
1278 ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
1280 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1282 /* Drop disallowed frame classes based on STA auth/assoc state;
1283 * IEEE 802.11, Chap 5.5.
1285 * mac80211 filters only based on association state, i.e. it drops
1286 * Class 3 frames from not associated stations. hostapd sends
1287 * deauth/disassoc frames when needed. In addition, hostapd is
1288 * responsible for filtering on both auth and assoc states.
1291 if (ieee80211_vif_is_mesh(&rx->sdata->vif))
1292 return ieee80211_rx_mesh_check(rx);
1294 if (unlikely((ieee80211_is_data(hdr->frame_control) ||
1295 ieee80211_is_pspoll(hdr->frame_control)) &&
1296 rx->sdata->vif.type != NL80211_IFTYPE_ADHOC &&
1297 rx->sdata->vif.type != NL80211_IFTYPE_WDS &&
1298 rx->sdata->vif.type != NL80211_IFTYPE_OCB &&
1299 (!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_ASSOC)))) {
1301 * accept port control frames from the AP even when it's not
1302 * yet marked ASSOC to prevent a race where we don't set the
1303 * assoc bit quickly enough before it sends the first frame
1305 if (rx->sta && rx->sdata->vif.type == NL80211_IFTYPE_STATION &&
1306 ieee80211_is_data_present(hdr->frame_control)) {
1307 unsigned int hdrlen;
1310 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1312 if (rx->skb->len < hdrlen + 8)
1313 return RX_DROP_MONITOR;
1315 skb_copy_bits(rx->skb, hdrlen + 6, ðertype, 2);
1316 if (ethertype == rx->sdata->control_port_protocol)
1320 if (rx->sdata->vif.type == NL80211_IFTYPE_AP &&
1321 cfg80211_rx_spurious_frame(rx->sdata->dev,
1324 return RX_DROP_UNUSABLE;
1326 return RX_DROP_MONITOR;
1333 static ieee80211_rx_result debug_noinline
1334 ieee80211_rx_h_check_more_data(struct ieee80211_rx_data *rx)
1336 struct ieee80211_local *local;
1337 struct ieee80211_hdr *hdr;
1338 struct sk_buff *skb;
1342 hdr = (struct ieee80211_hdr *) skb->data;
1344 if (!local->pspolling)
1347 if (!ieee80211_has_fromds(hdr->frame_control))
1348 /* this is not from AP */
1351 if (!ieee80211_is_data(hdr->frame_control))
1354 if (!ieee80211_has_moredata(hdr->frame_control)) {
1355 /* AP has no more frames buffered for us */
1356 local->pspolling = false;
1360 /* more data bit is set, let's request a new frame from the AP */
1361 ieee80211_send_pspoll(local, rx->sdata);
1366 static void sta_ps_start(struct sta_info *sta)
1368 struct ieee80211_sub_if_data *sdata = sta->sdata;
1369 struct ieee80211_local *local = sdata->local;
1373 if (sta->sdata->vif.type == NL80211_IFTYPE_AP ||
1374 sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
1375 ps = &sdata->bss->ps;
1379 atomic_inc(&ps->num_sta_ps);
1380 set_sta_flag(sta, WLAN_STA_PS_STA);
1381 if (!ieee80211_hw_check(&local->hw, AP_LINK_PS))
1382 drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta);
1383 ps_dbg(sdata, "STA %pM aid %d enters power save mode\n",
1384 sta->sta.addr, sta->sta.aid);
1386 ieee80211_clear_fast_xmit(sta);
1388 if (!sta->sta.txq[0])
1391 for (tid = 0; tid < ARRAY_SIZE(sta->sta.txq); tid++) {
1392 if (txq_has_queue(sta->sta.txq[tid]))
1393 set_bit(tid, &sta->txq_buffered_tids);
1395 clear_bit(tid, &sta->txq_buffered_tids);
1399 static void sta_ps_end(struct sta_info *sta)
1401 ps_dbg(sta->sdata, "STA %pM aid %d exits power save mode\n",
1402 sta->sta.addr, sta->sta.aid);
1404 if (test_sta_flag(sta, WLAN_STA_PS_DRIVER)) {
1406 * Clear the flag only if the other one is still set
1407 * so that the TX path won't start TX'ing new frames
1408 * directly ... In the case that the driver flag isn't
1409 * set ieee80211_sta_ps_deliver_wakeup() will clear it.
1411 clear_sta_flag(sta, WLAN_STA_PS_STA);
1412 ps_dbg(sta->sdata, "STA %pM aid %d driver-ps-blocked\n",
1413 sta->sta.addr, sta->sta.aid);
1417 set_sta_flag(sta, WLAN_STA_PS_DELIVER);
1418 clear_sta_flag(sta, WLAN_STA_PS_STA);
1419 ieee80211_sta_ps_deliver_wakeup(sta);
1422 int ieee80211_sta_ps_transition(struct ieee80211_sta *pubsta, bool start)
1424 struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
1427 WARN_ON(!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS));
1429 /* Don't let the same PS state be set twice */
1430 in_ps = test_sta_flag(sta, WLAN_STA_PS_STA);
1431 if ((start && in_ps) || (!start && !in_ps))
1441 EXPORT_SYMBOL(ieee80211_sta_ps_transition);
1443 void ieee80211_sta_pspoll(struct ieee80211_sta *pubsta)
1445 struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
1447 if (test_sta_flag(sta, WLAN_STA_SP))
1450 if (!test_sta_flag(sta, WLAN_STA_PS_DRIVER))
1451 ieee80211_sta_ps_deliver_poll_response(sta);
1453 set_sta_flag(sta, WLAN_STA_PSPOLL);
1455 EXPORT_SYMBOL(ieee80211_sta_pspoll);
1457 void ieee80211_sta_uapsd_trigger(struct ieee80211_sta *pubsta, u8 tid)
1459 struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
1460 int ac = ieee80211_ac_from_tid(tid);
1463 * If this AC is not trigger-enabled do nothing unless the
1464 * driver is calling us after it already checked.
1466 * NB: This could/should check a separate bitmap of trigger-
1467 * enabled queues, but for now we only implement uAPSD w/o
1468 * TSPEC changes to the ACs, so they're always the same.
1470 if (!(sta->sta.uapsd_queues & ieee80211_ac_to_qos_mask[ac]) &&
1471 tid != IEEE80211_NUM_TIDS)
1474 /* if we are in a service period, do nothing */
1475 if (test_sta_flag(sta, WLAN_STA_SP))
1478 if (!test_sta_flag(sta, WLAN_STA_PS_DRIVER))
1479 ieee80211_sta_ps_deliver_uapsd(sta);
1481 set_sta_flag(sta, WLAN_STA_UAPSD);
1483 EXPORT_SYMBOL(ieee80211_sta_uapsd_trigger);
1485 static ieee80211_rx_result debug_noinline
1486 ieee80211_rx_h_uapsd_and_pspoll(struct ieee80211_rx_data *rx)
1488 struct ieee80211_sub_if_data *sdata = rx->sdata;
1489 struct ieee80211_hdr *hdr = (void *)rx->skb->data;
1490 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1495 if (sdata->vif.type != NL80211_IFTYPE_AP &&
1496 sdata->vif.type != NL80211_IFTYPE_AP_VLAN)
1500 * The device handles station powersave, so don't do anything about
1501 * uAPSD and PS-Poll frames (the latter shouldn't even come up from
1502 * it to mac80211 since they're handled.)
1504 if (ieee80211_hw_check(&sdata->local->hw, AP_LINK_PS))
1508 * Don't do anything if the station isn't already asleep. In
1509 * the uAPSD case, the station will probably be marked asleep,
1510 * in the PS-Poll case the station must be confused ...
1512 if (!test_sta_flag(rx->sta, WLAN_STA_PS_STA))
1515 if (unlikely(ieee80211_is_pspoll(hdr->frame_control))) {
1516 ieee80211_sta_pspoll(&rx->sta->sta);
1518 /* Free PS Poll skb here instead of returning RX_DROP that would
1519 * count as an dropped frame. */
1520 dev_kfree_skb(rx->skb);
1523 } else if (!ieee80211_has_morefrags(hdr->frame_control) &&
1524 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
1525 ieee80211_has_pm(hdr->frame_control) &&
1526 (ieee80211_is_data_qos(hdr->frame_control) ||
1527 ieee80211_is_qos_nullfunc(hdr->frame_control))) {
1530 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
1532 ieee80211_sta_uapsd_trigger(&rx->sta->sta, tid);
1538 static ieee80211_rx_result debug_noinline
1539 ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
1541 struct sta_info *sta = rx->sta;
1542 struct sk_buff *skb = rx->skb;
1543 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1544 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1551 * Update last_rx only for IBSS packets which are for the current
1552 * BSSID and for station already AUTHORIZED to avoid keeping the
1553 * current IBSS network alive in cases where other STAs start
1554 * using different BSSID. This will also give the station another
1555 * chance to restart the authentication/authorization in case
1556 * something went wrong the first time.
1558 if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) {
1559 u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len,
1560 NL80211_IFTYPE_ADHOC);
1561 if (ether_addr_equal(bssid, rx->sdata->u.ibss.bssid) &&
1562 test_sta_flag(sta, WLAN_STA_AUTHORIZED)) {
1563 sta->rx_stats.last_rx = jiffies;
1564 if (ieee80211_is_data(hdr->frame_control) &&
1565 !is_multicast_ether_addr(hdr->addr1))
1566 sta->rx_stats.last_rate =
1567 sta_stats_encode_rate(status);
1569 } else if (rx->sdata->vif.type == NL80211_IFTYPE_OCB) {
1570 sta->rx_stats.last_rx = jiffies;
1571 } else if (!is_multicast_ether_addr(hdr->addr1)) {
1573 * Mesh beacons will update last_rx when if they are found to
1574 * match the current local configuration when processed.
1576 sta->rx_stats.last_rx = jiffies;
1577 if (ieee80211_is_data(hdr->frame_control))
1578 sta->rx_stats.last_rate = sta_stats_encode_rate(status);
1581 if (rx->sdata->vif.type == NL80211_IFTYPE_STATION)
1582 ieee80211_sta_rx_notify(rx->sdata, hdr);
1584 sta->rx_stats.fragments++;
1586 u64_stats_update_begin(&rx->sta->rx_stats.syncp);
1587 sta->rx_stats.bytes += rx->skb->len;
1588 u64_stats_update_end(&rx->sta->rx_stats.syncp);
1590 if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
1591 sta->rx_stats.last_signal = status->signal;
1592 ewma_signal_add(&sta->rx_stats_avg.signal, -status->signal);
1595 if (status->chains) {
1596 sta->rx_stats.chains = status->chains;
1597 for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) {
1598 int signal = status->chain_signal[i];
1600 if (!(status->chains & BIT(i)))
1603 sta->rx_stats.chain_signal_last[i] = signal;
1604 ewma_signal_add(&sta->rx_stats_avg.chain_signal[i],
1610 * Change STA power saving mode only at the end of a frame
1611 * exchange sequence.
1613 if (!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS) &&
1614 !ieee80211_has_morefrags(hdr->frame_control) &&
1615 !ieee80211_is_back_req(hdr->frame_control) &&
1616 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
1617 (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
1618 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
1620 * PM bit is only checked in frames where it isn't reserved,
1621 * in AP mode it's reserved in non-bufferable management frames
1622 * (cf. IEEE 802.11-2012 8.2.4.1.7 Power Management field)
1623 * BAR frames should be ignored as specified in
1624 * IEEE 802.11-2012 10.2.1.2.
1626 (!ieee80211_is_mgmt(hdr->frame_control) ||
1627 ieee80211_is_bufferable_mmpdu(hdr->frame_control))) {
1628 if (test_sta_flag(sta, WLAN_STA_PS_STA)) {
1629 if (!ieee80211_has_pm(hdr->frame_control))
1632 if (ieee80211_has_pm(hdr->frame_control))
1637 /* mesh power save support */
1638 if (ieee80211_vif_is_mesh(&rx->sdata->vif))
1639 ieee80211_mps_rx_h_sta_process(sta, hdr);
1642 * Drop (qos-)data::nullfunc frames silently, since they
1643 * are used only to control station power saving mode.
1645 if (ieee80211_is_any_nullfunc(hdr->frame_control)) {
1646 I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc);
1649 * If we receive a 4-addr nullfunc frame from a STA
1650 * that was not moved to a 4-addr STA vlan yet send
1651 * the event to userspace and for older hostapd drop
1652 * the frame to the monitor interface.
1654 if (ieee80211_has_a4(hdr->frame_control) &&
1655 (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
1656 (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1657 !rx->sdata->u.vlan.sta))) {
1658 if (!test_and_set_sta_flag(sta, WLAN_STA_4ADDR_EVENT))
1659 cfg80211_rx_unexpected_4addr_frame(
1660 rx->sdata->dev, sta->sta.addr,
1662 return RX_DROP_MONITOR;
1665 * Update counter and free packet here to avoid
1666 * counting this as a dropped packed.
1668 sta->rx_stats.packets++;
1669 dev_kfree_skb(rx->skb);
1674 } /* ieee80211_rx_h_sta_process */
1676 static ieee80211_rx_result debug_noinline
1677 ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
1679 struct sk_buff *skb = rx->skb;
1680 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1681 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1684 ieee80211_rx_result result = RX_DROP_UNUSABLE;
1685 struct ieee80211_key *sta_ptk = NULL;
1686 int mmie_keyidx = -1;
1688 const struct ieee80211_cipher_scheme *cs = NULL;
1693 * There are four types of keys:
1694 * - GTK (group keys)
1695 * - IGTK (group keys for management frames)
1696 * - PTK (pairwise keys)
1697 * - STK (station-to-station pairwise keys)
1699 * When selecting a key, we have to distinguish between multicast
1700 * (including broadcast) and unicast frames, the latter can only
1701 * use PTKs and STKs while the former always use GTKs and IGTKs.
1702 * Unless, of course, actual WEP keys ("pre-RSNA") are used, then
1703 * unicast frames can also use key indices like GTKs. Hence, if we
1704 * don't have a PTK/STK we check the key index for a WEP key.
1706 * Note that in a regular BSS, multicast frames are sent by the
1707 * AP only, associated stations unicast the frame to the AP first
1708 * which then multicasts it on their behalf.
1710 * There is also a slight problem in IBSS mode: GTKs are negotiated
1711 * with each station, that is something we don't currently handle.
1712 * The spec seems to expect that one negotiates the same key with
1713 * every station but there's no such requirement; VLANs could be
1717 /* start without a key */
1719 fc = hdr->frame_control;
1722 int keyid = rx->sta->ptk_idx;
1724 if (ieee80211_has_protected(fc) && rx->sta->cipher_scheme) {
1725 cs = rx->sta->cipher_scheme;
1726 keyid = ieee80211_get_cs_keyid(cs, rx->skb);
1727 if (unlikely(keyid < 0))
1728 return RX_DROP_UNUSABLE;
1730 sta_ptk = rcu_dereference(rx->sta->ptk[keyid]);
1733 if (!ieee80211_has_protected(fc))
1734 mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb);
1736 if (!is_multicast_ether_addr(hdr->addr1) && sta_ptk) {
1738 if ((status->flag & RX_FLAG_DECRYPTED) &&
1739 (status->flag & RX_FLAG_IV_STRIPPED))
1741 /* Skip decryption if the frame is not protected. */
1742 if (!ieee80211_has_protected(fc))
1744 } else if (mmie_keyidx >= 0) {
1745 /* Broadcast/multicast robust management frame / BIP */
1746 if ((status->flag & RX_FLAG_DECRYPTED) &&
1747 (status->flag & RX_FLAG_IV_STRIPPED))
1750 if (mmie_keyidx < NUM_DEFAULT_KEYS ||
1751 mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS)
1752 return RX_DROP_MONITOR; /* unexpected BIP keyidx */
1754 if (ieee80211_is_group_privacy_action(skb) &&
1755 test_sta_flag(rx->sta, WLAN_STA_MFP))
1756 return RX_DROP_MONITOR;
1758 rx->key = rcu_dereference(rx->sta->gtk[mmie_keyidx]);
1761 rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]);
1762 } else if (!ieee80211_has_protected(fc)) {
1764 * The frame was not protected, so skip decryption. However, we
1765 * need to set rx->key if there is a key that could have been
1766 * used so that the frame may be dropped if encryption would
1767 * have been expected.
1769 struct ieee80211_key *key = NULL;
1770 struct ieee80211_sub_if_data *sdata = rx->sdata;
1773 if (ieee80211_is_mgmt(fc) &&
1774 is_multicast_ether_addr(hdr->addr1) &&
1775 (key = rcu_dereference(rx->sdata->default_mgmt_key)))
1779 for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
1780 key = rcu_dereference(rx->sta->gtk[i]);
1786 for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
1787 key = rcu_dereference(sdata->keys[i]);
1800 * The device doesn't give us the IV so we won't be
1801 * able to look up the key. That's ok though, we
1802 * don't need to decrypt the frame, we just won't
1803 * be able to keep statistics accurate.
1804 * Except for key threshold notifications, should
1805 * we somehow allow the driver to tell us which key
1806 * the hardware used if this flag is set?
1808 if ((status->flag & RX_FLAG_DECRYPTED) &&
1809 (status->flag & RX_FLAG_IV_STRIPPED))
1812 hdrlen = ieee80211_hdrlen(fc);
1815 keyidx = ieee80211_get_cs_keyid(cs, rx->skb);
1817 if (unlikely(keyidx < 0))
1818 return RX_DROP_UNUSABLE;
1820 if (rx->skb->len < 8 + hdrlen)
1821 return RX_DROP_UNUSABLE; /* TODO: count this? */
1823 * no need to call ieee80211_wep_get_keyidx,
1824 * it verifies a bunch of things we've done already
1826 skb_copy_bits(rx->skb, hdrlen + 3, &keyid, 1);
1827 keyidx = keyid >> 6;
1830 /* check per-station GTK first, if multicast packet */
1831 if (is_multicast_ether_addr(hdr->addr1) && rx->sta)
1832 rx->key = rcu_dereference(rx->sta->gtk[keyidx]);
1834 /* if not found, try default key */
1836 rx->key = rcu_dereference(rx->sdata->keys[keyidx]);
1839 * RSNA-protected unicast frames should always be
1840 * sent with pairwise or station-to-station keys,
1841 * but for WEP we allow using a key index as well.
1844 rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP40 &&
1845 rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP104 &&
1846 !is_multicast_ether_addr(hdr->addr1))
1852 if (unlikely(rx->key->flags & KEY_FLAG_TAINTED))
1853 return RX_DROP_MONITOR;
1855 /* TODO: add threshold stuff again */
1857 return RX_DROP_MONITOR;
1860 switch (rx->key->conf.cipher) {
1861 case WLAN_CIPHER_SUITE_WEP40:
1862 case WLAN_CIPHER_SUITE_WEP104:
1863 result = ieee80211_crypto_wep_decrypt(rx);
1865 case WLAN_CIPHER_SUITE_TKIP:
1866 result = ieee80211_crypto_tkip_decrypt(rx);
1868 case WLAN_CIPHER_SUITE_CCMP:
1869 result = ieee80211_crypto_ccmp_decrypt(
1870 rx, IEEE80211_CCMP_MIC_LEN);
1872 case WLAN_CIPHER_SUITE_CCMP_256:
1873 result = ieee80211_crypto_ccmp_decrypt(
1874 rx, IEEE80211_CCMP_256_MIC_LEN);
1876 case WLAN_CIPHER_SUITE_AES_CMAC:
1877 result = ieee80211_crypto_aes_cmac_decrypt(rx);
1879 case WLAN_CIPHER_SUITE_BIP_CMAC_256:
1880 result = ieee80211_crypto_aes_cmac_256_decrypt(rx);
1882 case WLAN_CIPHER_SUITE_BIP_GMAC_128:
1883 case WLAN_CIPHER_SUITE_BIP_GMAC_256:
1884 result = ieee80211_crypto_aes_gmac_decrypt(rx);
1886 case WLAN_CIPHER_SUITE_GCMP:
1887 case WLAN_CIPHER_SUITE_GCMP_256:
1888 result = ieee80211_crypto_gcmp_decrypt(rx);
1891 result = ieee80211_crypto_hw_decrypt(rx);
1894 /* the hdr variable is invalid after the decrypt handlers */
1896 /* either the frame has been decrypted or will be dropped */
1897 status->flag |= RX_FLAG_DECRYPTED;
1902 void ieee80211_init_frag_cache(struct ieee80211_fragment_cache *cache)
1906 for (i = 0; i < ARRAY_SIZE(cache->entries); i++)
1907 skb_queue_head_init(&cache->entries[i].skb_list);
1910 void ieee80211_destroy_frag_cache(struct ieee80211_fragment_cache *cache)
1914 for (i = 0; i < ARRAY_SIZE(cache->entries); i++)
1915 __skb_queue_purge(&cache->entries[i].skb_list);
1918 static inline struct ieee80211_fragment_entry *
1919 ieee80211_reassemble_add(struct ieee80211_fragment_cache *cache,
1920 unsigned int frag, unsigned int seq, int rx_queue,
1921 struct sk_buff **skb)
1923 struct ieee80211_fragment_entry *entry;
1925 entry = &cache->entries[cache->next++];
1926 if (cache->next >= IEEE80211_FRAGMENT_MAX)
1929 __skb_queue_purge(&entry->skb_list);
1931 __skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */
1933 entry->first_frag_time = jiffies;
1935 entry->rx_queue = rx_queue;
1936 entry->last_frag = frag;
1937 entry->check_sequential_pn = false;
1938 entry->extra_len = 0;
1943 static inline struct ieee80211_fragment_entry *
1944 ieee80211_reassemble_find(struct ieee80211_fragment_cache *cache,
1945 unsigned int frag, unsigned int seq,
1946 int rx_queue, struct ieee80211_hdr *hdr)
1948 struct ieee80211_fragment_entry *entry;
1952 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) {
1953 struct ieee80211_hdr *f_hdr;
1957 idx = IEEE80211_FRAGMENT_MAX - 1;
1959 entry = &cache->entries[idx];
1960 if (skb_queue_empty(&entry->skb_list) || entry->seq != seq ||
1961 entry->rx_queue != rx_queue ||
1962 entry->last_frag + 1 != frag)
1965 f_hdr = (struct ieee80211_hdr *)entry->skb_list.next->data;
1968 * Check ftype and addresses are equal, else check next fragment
1970 if (((hdr->frame_control ^ f_hdr->frame_control) &
1971 cpu_to_le16(IEEE80211_FCTL_FTYPE)) ||
1972 !ether_addr_equal(hdr->addr1, f_hdr->addr1) ||
1973 !ether_addr_equal(hdr->addr2, f_hdr->addr2))
1976 if (time_after(jiffies, entry->first_frag_time + 2 * HZ)) {
1977 __skb_queue_purge(&entry->skb_list);
1986 static bool requires_sequential_pn(struct ieee80211_rx_data *rx, __le16 fc)
1989 (rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP ||
1990 rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP_256 ||
1991 rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP ||
1992 rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP_256) &&
1993 ieee80211_has_protected(fc);
1996 static ieee80211_rx_result debug_noinline
1997 ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
1999 struct ieee80211_fragment_cache *cache = &rx->sdata->frags;
2000 struct ieee80211_hdr *hdr;
2003 unsigned int frag, seq;
2004 struct ieee80211_fragment_entry *entry;
2005 struct sk_buff *skb;
2006 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
2008 hdr = (struct ieee80211_hdr *)rx->skb->data;
2009 fc = hdr->frame_control;
2011 if (ieee80211_is_ctl(fc))
2014 sc = le16_to_cpu(hdr->seq_ctrl);
2015 frag = sc & IEEE80211_SCTL_FRAG;
2018 cache = &rx->sta->frags;
2020 if (likely(!ieee80211_has_morefrags(fc) && frag == 0))
2023 if (is_multicast_ether_addr(hdr->addr1))
2024 return RX_DROP_MONITOR;
2026 I802_DEBUG_INC(rx->local->rx_handlers_fragments);
2028 if (skb_linearize(rx->skb))
2029 return RX_DROP_UNUSABLE;
2032 * skb_linearize() might change the skb->data and
2033 * previously cached variables (in this case, hdr) need to
2034 * be refreshed with the new data.
2036 hdr = (struct ieee80211_hdr *)rx->skb->data;
2037 seq = (sc & IEEE80211_SCTL_SEQ) >> 4;
2040 /* This is the first fragment of a new frame. */
2041 entry = ieee80211_reassemble_add(cache, frag, seq,
2042 rx->seqno_idx, &(rx->skb));
2043 if (requires_sequential_pn(rx, fc)) {
2044 int queue = rx->security_idx;
2046 /* Store CCMP/GCMP PN so that we can verify that the
2047 * next fragment has a sequential PN value.
2049 entry->check_sequential_pn = true;
2050 entry->is_protected = true;
2051 entry->key_color = rx->key->color;
2052 memcpy(entry->last_pn,
2053 rx->key->u.ccmp.rx_pn[queue],
2054 IEEE80211_CCMP_PN_LEN);
2055 BUILD_BUG_ON(offsetof(struct ieee80211_key,
2057 offsetof(struct ieee80211_key,
2059 BUILD_BUG_ON(sizeof(rx->key->u.ccmp.rx_pn[queue]) !=
2060 sizeof(rx->key->u.gcmp.rx_pn[queue]));
2061 BUILD_BUG_ON(IEEE80211_CCMP_PN_LEN !=
2062 IEEE80211_GCMP_PN_LEN);
2063 } else if (rx->key &&
2064 (ieee80211_has_protected(fc) ||
2065 (status->flag & RX_FLAG_DECRYPTED))) {
2066 entry->is_protected = true;
2067 entry->key_color = rx->key->color;
2072 /* This is a fragment for a frame that should already be pending in
2073 * fragment cache. Add this fragment to the end of the pending entry.
2075 entry = ieee80211_reassemble_find(cache, frag, seq,
2076 rx->seqno_idx, hdr);
2078 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
2079 return RX_DROP_MONITOR;
2082 /* "The receiver shall discard MSDUs and MMPDUs whose constituent
2083 * MPDU PN values are not incrementing in steps of 1."
2084 * see IEEE P802.11-REVmc/D5.0, 12.5.3.4.4, item d (for CCMP)
2085 * and IEEE P802.11-REVmc/D5.0, 12.5.5.4.4, item d (for GCMP)
2087 if (entry->check_sequential_pn) {
2089 u8 pn[IEEE80211_CCMP_PN_LEN], *rpn;
2091 if (!requires_sequential_pn(rx, fc))
2092 return RX_DROP_UNUSABLE;
2094 /* Prevent mixed key and fragment cache attacks */
2095 if (entry->key_color != rx->key->color)
2096 return RX_DROP_UNUSABLE;
2098 memcpy(pn, entry->last_pn, IEEE80211_CCMP_PN_LEN);
2099 for (i = IEEE80211_CCMP_PN_LEN - 1; i >= 0; i--) {
2105 rpn = rx->ccm_gcm.pn;
2106 if (memcmp(pn, rpn, IEEE80211_CCMP_PN_LEN))
2107 return RX_DROP_UNUSABLE;
2108 memcpy(entry->last_pn, pn, IEEE80211_CCMP_PN_LEN);
2109 } else if (entry->is_protected &&
2111 (!ieee80211_has_protected(fc) &&
2112 !(status->flag & RX_FLAG_DECRYPTED)) ||
2113 rx->key->color != entry->key_color)) {
2114 /* Drop this as a mixed key or fragment cache attack, even
2115 * if for TKIP Michael MIC should protect us, and WEP is a
2116 * lost cause anyway.
2118 return RX_DROP_UNUSABLE;
2119 } else if (entry->is_protected && rx->key &&
2120 entry->key_color != rx->key->color &&
2121 (status->flag & RX_FLAG_DECRYPTED)) {
2122 return RX_DROP_UNUSABLE;
2125 skb_pull(rx->skb, ieee80211_hdrlen(fc));
2126 __skb_queue_tail(&entry->skb_list, rx->skb);
2127 entry->last_frag = frag;
2128 entry->extra_len += rx->skb->len;
2129 if (ieee80211_has_morefrags(fc)) {
2134 rx->skb = __skb_dequeue(&entry->skb_list);
2135 if (skb_tailroom(rx->skb) < entry->extra_len) {
2136 I802_DEBUG_INC(rx->local->rx_expand_skb_head_defrag);
2137 if (unlikely(pskb_expand_head(rx->skb, 0, entry->extra_len,
2139 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
2140 __skb_queue_purge(&entry->skb_list);
2141 return RX_DROP_UNUSABLE;
2144 while ((skb = __skb_dequeue(&entry->skb_list))) {
2145 skb_put_data(rx->skb, skb->data, skb->len);
2150 ieee80211_led_rx(rx->local);
2152 rx->sta->rx_stats.packets++;
2156 static int ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx)
2158 if (unlikely(!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_AUTHORIZED)))
2164 static int ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
2166 struct ieee80211_hdr *hdr = (void *)rx->skb->data;
2167 struct sk_buff *skb = rx->skb;
2168 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2171 * Pass through unencrypted frames if the hardware has
2172 * decrypted them already.
2174 if (status->flag & RX_FLAG_DECRYPTED)
2177 /* check mesh EAPOL frames first */
2178 if (unlikely(rx->sta && ieee80211_vif_is_mesh(&rx->sdata->vif) &&
2179 ieee80211_is_data(fc))) {
2180 struct ieee80211s_hdr *mesh_hdr;
2181 u16 hdr_len = ieee80211_hdrlen(fc);
2182 u16 ethertype_offset;
2185 if (!ether_addr_equal(hdr->addr1, rx->sdata->vif.addr))
2188 /* make sure fixed part of mesh header is there, also checks skb len */
2189 if (!pskb_may_pull(rx->skb, hdr_len + 6))
2192 mesh_hdr = (struct ieee80211s_hdr *)(skb->data + hdr_len);
2193 ethertype_offset = hdr_len + ieee80211_get_mesh_hdrlen(mesh_hdr) +
2194 sizeof(rfc1042_header);
2196 if (skb_copy_bits(rx->skb, ethertype_offset, ðertype, 2) == 0 &&
2197 ethertype == rx->sdata->control_port_protocol)
2202 /* Drop unencrypted frames if key is set. */
2203 if (unlikely(!ieee80211_has_protected(fc) &&
2204 !ieee80211_is_any_nullfunc(fc) &&
2205 ieee80211_is_data(fc) && rx->key))
2211 static int ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx)
2213 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
2214 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
2215 __le16 fc = hdr->frame_control;
2218 * Pass through unencrypted frames if the hardware has
2219 * decrypted them already.
2221 if (status->flag & RX_FLAG_DECRYPTED)
2224 if (rx->sta && test_sta_flag(rx->sta, WLAN_STA_MFP)) {
2225 if (unlikely(!ieee80211_has_protected(fc) &&
2226 ieee80211_is_unicast_robust_mgmt_frame(rx->skb) &&
2228 if (ieee80211_is_deauth(fc) ||
2229 ieee80211_is_disassoc(fc))
2230 cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev,
2235 /* BIP does not use Protected field, so need to check MMIE */
2236 if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb) &&
2237 ieee80211_get_mmie_keyidx(rx->skb) < 0)) {
2238 if (ieee80211_is_deauth(fc) ||
2239 ieee80211_is_disassoc(fc))
2240 cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev,
2246 * When using MFP, Action frames are not allowed prior to
2247 * having configured keys.
2249 if (unlikely(ieee80211_is_action(fc) && !rx->key &&
2250 ieee80211_is_robust_mgmt_frame(rx->skb)))
2258 __ieee80211_data_to_8023(struct ieee80211_rx_data *rx, bool *port_control)
2260 struct ieee80211_sub_if_data *sdata = rx->sdata;
2261 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
2262 bool check_port_control = false;
2263 struct ethhdr *ehdr;
2266 *port_control = false;
2267 if (ieee80211_has_a4(hdr->frame_control) &&
2268 sdata->vif.type == NL80211_IFTYPE_AP_VLAN && !sdata->u.vlan.sta)
2271 if (sdata->vif.type == NL80211_IFTYPE_STATION &&
2272 !!sdata->u.mgd.use_4addr != !!ieee80211_has_a4(hdr->frame_control)) {
2274 if (!sdata->u.mgd.use_4addr)
2277 check_port_control = true;
2280 if (is_multicast_ether_addr(hdr->addr1) &&
2281 sdata->vif.type == NL80211_IFTYPE_AP_VLAN && sdata->u.vlan.sta)
2284 ret = ieee80211_data_to_8023(rx->skb, sdata->vif.addr, sdata->vif.type);
2288 ehdr = (struct ethhdr *) rx->skb->data;
2289 if (ehdr->h_proto == rx->sdata->control_port_protocol)
2290 *port_control = true;
2291 else if (check_port_control)
2298 * requires that rx->skb is a frame with ethernet header
2300 static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc)
2302 static const u8 pae_group_addr[ETH_ALEN] __aligned(2)
2303 = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 };
2304 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
2307 * Allow EAPOL frames to us/the PAE group address regardless of
2308 * whether the frame was encrypted or not, and always disallow
2309 * all other destination addresses for them.
2311 if (unlikely(ehdr->h_proto == rx->sdata->control_port_protocol))
2312 return ether_addr_equal(ehdr->h_dest, rx->sdata->vif.addr) ||
2313 ether_addr_equal(ehdr->h_dest, pae_group_addr);
2315 if (ieee80211_802_1x_port_control(rx) ||
2316 ieee80211_drop_unencrypted(rx, fc))
2323 * requires that rx->skb is a frame with ethernet header
2326 ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
2328 struct ieee80211_sub_if_data *sdata = rx->sdata;
2329 struct net_device *dev = sdata->dev;
2330 struct sk_buff *skb, *xmit_skb;
2331 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
2332 struct sta_info *dsta;
2337 ieee80211_rx_stats(dev, skb->len);
2340 /* The seqno index has the same property as needed
2341 * for the rx_msdu field, i.e. it is IEEE80211_NUM_TIDS
2342 * for non-QoS-data frames. Here we know it's a data
2343 * frame, so count MSDUs.
2345 u64_stats_update_begin(&rx->sta->rx_stats.syncp);
2346 rx->sta->rx_stats.msdu[rx->seqno_idx]++;
2347 u64_stats_update_end(&rx->sta->rx_stats.syncp);
2350 if ((sdata->vif.type == NL80211_IFTYPE_AP ||
2351 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
2352 !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) &&
2353 ehdr->h_proto != rx->sdata->control_port_protocol &&
2354 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->u.vlan.sta)) {
2355 if (is_multicast_ether_addr(ehdr->h_dest) &&
2356 ieee80211_vif_get_num_mcast_if(sdata) != 0) {
2358 * send multicast frames both to higher layers in
2359 * local net stack and back to the wireless medium
2361 xmit_skb = skb_copy(skb, GFP_ATOMIC);
2363 net_info_ratelimited("%s: failed to clone multicast frame\n",
2365 } else if (!is_multicast_ether_addr(ehdr->h_dest)) {
2366 dsta = sta_info_get(sdata, skb->data);
2369 * The destination station is associated to
2370 * this AP (in this VLAN), so send the frame
2371 * directly to it and do not pass it to local
2380 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2382 /* 'align' will only take the values 0 or 2 here since all
2383 * frames are required to be aligned to 2-byte boundaries
2384 * when being passed to mac80211; the code here works just
2385 * as well if that isn't true, but mac80211 assumes it can
2386 * access fields as 2-byte aligned (e.g. for ether_addr_equal)
2390 align = (unsigned long)(skb->data + sizeof(struct ethhdr)) & 3;
2392 if (WARN_ON(skb_headroom(skb) < 3)) {
2396 u8 *data = skb->data;
2397 size_t len = skb_headlen(skb);
2399 memmove(skb->data, data, len);
2400 skb_set_tail_pointer(skb, len);
2407 struct ethhdr *ehdr = (struct ethhdr *)skb->data;
2409 /* deliver to local stack */
2410 skb->protocol = eth_type_trans(skb, dev);
2411 memset(skb->cb, 0, sizeof(skb->cb));
2414 * 802.1X over 802.11 requires that the authenticator address
2415 * be used for EAPOL frames. However, 802.1X allows the use of
2416 * the PAE group address instead. If the interface is part of
2417 * a bridge and we pass the frame with the PAE group address,
2418 * then the bridge will forward it to the network (even if the
2419 * client was not associated yet), which isn't supposed to
2421 * To avoid that, rewrite the destination address to our own
2422 * address, so that the authenticator (e.g. hostapd) will see
2423 * the frame, but bridge won't forward it anywhere else. Note
2424 * that due to earlier filtering, the only other address can
2425 * be the PAE group address.
2427 if (unlikely(skb->protocol == sdata->control_port_protocol &&
2428 !ether_addr_equal(ehdr->h_dest, sdata->vif.addr)))
2429 ether_addr_copy(ehdr->h_dest, sdata->vif.addr);
2432 napi_gro_receive(rx->napi, skb);
2434 netif_receive_skb(skb);
2439 * Send to wireless media and increase priority by 256 to
2440 * keep the received priority instead of reclassifying
2441 * the frame (see cfg80211_classify8021d).
2443 xmit_skb->priority += 256;
2444 xmit_skb->protocol = htons(ETH_P_802_3);
2445 skb_reset_network_header(xmit_skb);
2446 skb_reset_mac_header(xmit_skb);
2447 dev_queue_xmit(xmit_skb);
2451 static ieee80211_rx_result debug_noinline
2452 ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
2454 struct net_device *dev = rx->sdata->dev;
2455 struct sk_buff *skb = rx->skb;
2456 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2457 __le16 fc = hdr->frame_control;
2458 struct sk_buff_head frame_list;
2459 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
2460 struct ethhdr ethhdr;
2461 const u8 *check_da = ethhdr.h_dest, *check_sa = ethhdr.h_source;
2463 if (unlikely(!ieee80211_is_data(fc)))
2466 if (unlikely(!ieee80211_is_data_present(fc)))
2467 return RX_DROP_MONITOR;
2469 if (!(status->rx_flags & IEEE80211_RX_AMSDU))
2472 if (unlikely(ieee80211_has_a4(hdr->frame_control))) {
2473 switch (rx->sdata->vif.type) {
2474 case NL80211_IFTYPE_AP_VLAN:
2475 if (!rx->sdata->u.vlan.sta)
2476 return RX_DROP_UNUSABLE;
2478 case NL80211_IFTYPE_STATION:
2479 if (!rx->sdata->u.mgd.use_4addr)
2480 return RX_DROP_UNUSABLE;
2483 return RX_DROP_UNUSABLE;
2487 } else switch (rx->sdata->vif.type) {
2488 case NL80211_IFTYPE_AP:
2489 case NL80211_IFTYPE_AP_VLAN:
2492 case NL80211_IFTYPE_STATION:
2494 !test_sta_flag(rx->sta, WLAN_STA_TDLS_PEER))
2497 case NL80211_IFTYPE_MESH_POINT:
2504 if (is_multicast_ether_addr(hdr->addr1))
2505 return RX_DROP_UNUSABLE;
2508 __skb_queue_head_init(&frame_list);
2510 if (ieee80211_data_to_8023_exthdr(skb, ðhdr,
2511 rx->sdata->vif.addr,
2512 rx->sdata->vif.type,
2514 return RX_DROP_UNUSABLE;
2518 * We should not receive A-MSDUs on pre-HT connections,
2519 * and HT connections cannot use old ciphers. Thus drop
2520 * them, as in those cases we couldn't even have SPP
2523 switch (rx->key->conf.cipher) {
2524 case WLAN_CIPHER_SUITE_WEP40:
2525 case WLAN_CIPHER_SUITE_WEP104:
2526 case WLAN_CIPHER_SUITE_TKIP:
2527 return RX_DROP_UNUSABLE;
2533 ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr,
2534 rx->sdata->vif.type,
2535 rx->local->hw.extra_tx_headroom,
2536 check_da, check_sa);
2538 while (!skb_queue_empty(&frame_list)) {
2539 rx->skb = __skb_dequeue(&frame_list);
2541 if (!ieee80211_frame_allowed(rx, fc)) {
2542 dev_kfree_skb(rx->skb);
2546 ieee80211_deliver_skb(rx);
2552 #ifdef CONFIG_MAC80211_MESH
2553 static ieee80211_rx_result
2554 ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
2556 struct ieee80211_hdr *fwd_hdr, *hdr;
2557 struct ieee80211_tx_info *info;
2558 struct ieee80211s_hdr *mesh_hdr;
2559 struct sk_buff *skb = rx->skb, *fwd_skb;
2560 struct ieee80211_local *local = rx->local;
2561 struct ieee80211_sub_if_data *sdata = rx->sdata;
2562 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
2565 hdr = (struct ieee80211_hdr *) skb->data;
2566 hdrlen = ieee80211_hdrlen(hdr->frame_control);
2568 /* make sure fixed part of mesh header is there, also checks skb len */
2569 if (!pskb_may_pull(rx->skb, hdrlen + 6))
2570 return RX_DROP_MONITOR;
2572 mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
2574 /* make sure full mesh header is there, also checks skb len */
2575 if (!pskb_may_pull(rx->skb,
2576 hdrlen + ieee80211_get_mesh_hdrlen(mesh_hdr)))
2577 return RX_DROP_MONITOR;
2579 /* reload pointers */
2580 hdr = (struct ieee80211_hdr *) skb->data;
2581 mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
2583 if (ieee80211_drop_unencrypted(rx, hdr->frame_control))
2584 return RX_DROP_MONITOR;
2586 /* frame is in RMC, don't forward */
2587 if (ieee80211_is_data(hdr->frame_control) &&
2588 is_multicast_ether_addr(hdr->addr1) &&
2589 mesh_rmc_check(rx->sdata, hdr->addr3, mesh_hdr))
2590 return RX_DROP_MONITOR;
2592 if (!ieee80211_is_data(hdr->frame_control))
2596 return RX_DROP_MONITOR;
2598 if (mesh_hdr->flags & MESH_FLAGS_AE) {
2599 struct mesh_path *mppath;
2603 if (is_multicast_ether_addr(hdr->addr1)) {
2604 mpp_addr = hdr->addr3;
2605 proxied_addr = mesh_hdr->eaddr1;
2606 } else if ((mesh_hdr->flags & MESH_FLAGS_AE) ==
2607 MESH_FLAGS_AE_A5_A6) {
2608 /* has_a4 already checked in ieee80211_rx_mesh_check */
2609 mpp_addr = hdr->addr4;
2610 proxied_addr = mesh_hdr->eaddr2;
2612 return RX_DROP_MONITOR;
2616 mppath = mpp_path_lookup(sdata, proxied_addr);
2618 mpp_path_add(sdata, proxied_addr, mpp_addr);
2620 spin_lock_bh(&mppath->state_lock);
2621 if (!ether_addr_equal(mppath->mpp, mpp_addr))
2622 memcpy(mppath->mpp, mpp_addr, ETH_ALEN);
2623 mppath->exp_time = jiffies;
2624 spin_unlock_bh(&mppath->state_lock);
2629 /* Frame has reached destination. Don't forward */
2630 if (!is_multicast_ether_addr(hdr->addr1) &&
2631 ether_addr_equal(sdata->vif.addr, hdr->addr3))
2634 ac = ieee80211_select_queue_80211(sdata, skb, hdr);
2635 q = sdata->vif.hw_queue[ac];
2636 if (ieee80211_queue_stopped(&local->hw, q)) {
2637 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_congestion);
2638 return RX_DROP_MONITOR;
2640 skb_set_queue_mapping(skb, q);
2642 if (!--mesh_hdr->ttl) {
2643 if (!is_multicast_ether_addr(hdr->addr1))
2644 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh,
2645 dropped_frames_ttl);
2649 if (!ifmsh->mshcfg.dot11MeshForwarding)
2652 fwd_skb = skb_copy_expand(skb, local->tx_headroom +
2653 sdata->encrypt_headroom, 0, GFP_ATOMIC);
2655 net_info_ratelimited("%s: failed to clone mesh frame\n",
2660 fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data;
2661 fwd_hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_RETRY);
2662 info = IEEE80211_SKB_CB(fwd_skb);
2663 memset(info, 0, sizeof(*info));
2664 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
2665 info->control.vif = &rx->sdata->vif;
2666 info->control.jiffies = jiffies;
2667 if (is_multicast_ether_addr(fwd_hdr->addr1)) {
2668 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_mcast);
2669 memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN);
2670 /* update power mode indication when forwarding */
2671 ieee80211_mps_set_frame_flags(sdata, NULL, fwd_hdr);
2672 } else if (!mesh_nexthop_lookup(sdata, fwd_skb)) {
2673 /* mesh power mode flags updated in mesh_nexthop_lookup */
2674 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_unicast);
2676 /* unable to resolve next hop */
2677 mesh_path_error_tx(sdata, ifmsh->mshcfg.element_ttl,
2679 WLAN_REASON_MESH_PATH_NOFORWARD,
2681 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_no_route);
2683 return RX_DROP_MONITOR;
2686 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_frames);
2687 ieee80211_add_pending_skb(local, fwd_skb);
2689 if (is_multicast_ether_addr(hdr->addr1))
2691 return RX_DROP_MONITOR;
2695 static ieee80211_rx_result debug_noinline
2696 ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
2698 struct ieee80211_sub_if_data *sdata = rx->sdata;
2699 struct ieee80211_local *local = rx->local;
2700 struct net_device *dev = sdata->dev;
2701 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
2702 __le16 fc = hdr->frame_control;
2706 if (unlikely(!ieee80211_is_data(hdr->frame_control)))
2709 if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
2710 return RX_DROP_MONITOR;
2713 * Send unexpected-4addr-frame event to hostapd. For older versions,
2714 * also drop the frame to cooked monitor interfaces.
2716 if (ieee80211_has_a4(hdr->frame_control) &&
2717 sdata->vif.type == NL80211_IFTYPE_AP) {
2719 !test_and_set_sta_flag(rx->sta, WLAN_STA_4ADDR_EVENT))
2720 cfg80211_rx_unexpected_4addr_frame(
2721 rx->sdata->dev, rx->sta->sta.addr, GFP_ATOMIC);
2722 return RX_DROP_MONITOR;
2725 err = __ieee80211_data_to_8023(rx, &port_control);
2727 return RX_DROP_UNUSABLE;
2729 if (!ieee80211_frame_allowed(rx, fc))
2730 return RX_DROP_MONITOR;
2732 /* directly handle TDLS channel switch requests/responses */
2733 if (unlikely(((struct ethhdr *)rx->skb->data)->h_proto ==
2734 cpu_to_be16(ETH_P_TDLS))) {
2735 struct ieee80211_tdls_data *tf = (void *)rx->skb->data;
2737 if (pskb_may_pull(rx->skb,
2738 offsetof(struct ieee80211_tdls_data, u)) &&
2739 tf->payload_type == WLAN_TDLS_SNAP_RFTYPE &&
2740 tf->category == WLAN_CATEGORY_TDLS &&
2741 (tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST ||
2742 tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE)) {
2743 skb_queue_tail(&local->skb_queue_tdls_chsw, rx->skb);
2744 schedule_work(&local->tdls_chsw_work);
2746 rx->sta->rx_stats.packets++;
2752 if (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
2753 unlikely(port_control) && sdata->bss) {
2754 sdata = container_of(sdata->bss, struct ieee80211_sub_if_data,
2762 if (!ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS) &&
2763 local->ps_sdata && local->hw.conf.dynamic_ps_timeout > 0 &&
2764 !is_multicast_ether_addr(
2765 ((struct ethhdr *)rx->skb->data)->h_dest) &&
2766 (!local->scanning &&
2767 !test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state)))
2768 mod_timer(&local->dynamic_ps_timer, jiffies +
2769 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
2771 ieee80211_deliver_skb(rx);
2776 static ieee80211_rx_result debug_noinline
2777 ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames)
2779 struct sk_buff *skb = rx->skb;
2780 struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
2781 struct tid_ampdu_rx *tid_agg_rx;
2785 if (likely(!ieee80211_is_ctl(bar->frame_control)))
2788 if (ieee80211_is_back_req(bar->frame_control)) {
2790 __le16 control, start_seq_num;
2791 } __packed bar_data;
2792 struct ieee80211_event event = {
2793 .type = BAR_RX_EVENT,
2797 return RX_DROP_MONITOR;
2799 if (skb_copy_bits(skb, offsetof(struct ieee80211_bar, control),
2800 &bar_data, sizeof(bar_data)))
2801 return RX_DROP_MONITOR;
2803 tid = le16_to_cpu(bar_data.control) >> 12;
2805 if (!test_bit(tid, rx->sta->ampdu_mlme.agg_session_valid) &&
2806 !test_and_set_bit(tid, rx->sta->ampdu_mlme.unexpected_agg))
2807 ieee80211_send_delba(rx->sdata, rx->sta->sta.addr, tid,
2808 WLAN_BACK_RECIPIENT,
2809 WLAN_REASON_QSTA_REQUIRE_SETUP);
2811 tid_agg_rx = rcu_dereference(rx->sta->ampdu_mlme.tid_rx[tid]);
2813 return RX_DROP_MONITOR;
2815 start_seq_num = le16_to_cpu(bar_data.start_seq_num) >> 4;
2816 event.u.ba.tid = tid;
2817 event.u.ba.ssn = start_seq_num;
2818 event.u.ba.sta = &rx->sta->sta;
2820 /* reset session timer */
2821 if (tid_agg_rx->timeout)
2822 mod_timer(&tid_agg_rx->session_timer,
2823 TU_TO_EXP_TIME(tid_agg_rx->timeout));
2825 spin_lock(&tid_agg_rx->reorder_lock);
2826 /* release stored frames up to start of BAR */
2827 ieee80211_release_reorder_frames(rx->sdata, tid_agg_rx,
2828 start_seq_num, frames);
2829 spin_unlock(&tid_agg_rx->reorder_lock);
2831 drv_event_callback(rx->local, rx->sdata, &event);
2838 * After this point, we only want management frames,
2839 * so we can drop all remaining control frames to
2840 * cooked monitor interfaces.
2842 return RX_DROP_MONITOR;
2845 static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata,
2846 struct ieee80211_mgmt *mgmt,
2849 struct ieee80211_local *local = sdata->local;
2850 struct sk_buff *skb;
2851 struct ieee80211_mgmt *resp;
2853 if (!ether_addr_equal(mgmt->da, sdata->vif.addr)) {
2854 /* Not to own unicast address */
2858 if (!ether_addr_equal(mgmt->sa, sdata->u.mgd.bssid) ||
2859 !ether_addr_equal(mgmt->bssid, sdata->u.mgd.bssid)) {
2860 /* Not from the current AP or not associated yet. */
2864 if (len < 24 + 1 + sizeof(resp->u.action.u.sa_query)) {
2865 /* Too short SA Query request frame */
2869 skb = dev_alloc_skb(sizeof(*resp) + local->hw.extra_tx_headroom);
2873 skb_reserve(skb, local->hw.extra_tx_headroom);
2874 resp = skb_put_zero(skb, 24);
2875 memcpy(resp->da, mgmt->sa, ETH_ALEN);
2876 memcpy(resp->sa, sdata->vif.addr, ETH_ALEN);
2877 memcpy(resp->bssid, sdata->u.mgd.bssid, ETH_ALEN);
2878 resp->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
2879 IEEE80211_STYPE_ACTION);
2880 skb_put(skb, 1 + sizeof(resp->u.action.u.sa_query));
2881 resp->u.action.category = WLAN_CATEGORY_SA_QUERY;
2882 resp->u.action.u.sa_query.action = WLAN_ACTION_SA_QUERY_RESPONSE;
2883 memcpy(resp->u.action.u.sa_query.trans_id,
2884 mgmt->u.action.u.sa_query.trans_id,
2885 WLAN_SA_QUERY_TR_ID_LEN);
2887 ieee80211_tx_skb(sdata, skb);
2890 static ieee80211_rx_result debug_noinline
2891 ieee80211_rx_h_mgmt_check(struct ieee80211_rx_data *rx)
2893 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
2894 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
2897 * From here on, look only at management frames.
2898 * Data and control frames are already handled,
2899 * and unknown (reserved) frames are useless.
2901 if (rx->skb->len < 24)
2902 return RX_DROP_MONITOR;
2904 if (!ieee80211_is_mgmt(mgmt->frame_control))
2905 return RX_DROP_MONITOR;
2907 if (rx->sdata->vif.type == NL80211_IFTYPE_AP &&
2908 ieee80211_is_beacon(mgmt->frame_control) &&
2909 !(rx->flags & IEEE80211_RX_BEACON_REPORTED)) {
2912 if (ieee80211_hw_check(&rx->local->hw, SIGNAL_DBM))
2913 sig = status->signal;
2915 cfg80211_report_obss_beacon(rx->local->hw.wiphy,
2916 rx->skb->data, rx->skb->len,
2918 rx->flags |= IEEE80211_RX_BEACON_REPORTED;
2921 if (ieee80211_drop_unencrypted_mgmt(rx))
2922 return RX_DROP_UNUSABLE;
2927 static ieee80211_rx_result debug_noinline
2928 ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
2930 struct ieee80211_local *local = rx->local;
2931 struct ieee80211_sub_if_data *sdata = rx->sdata;
2932 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
2933 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
2934 int len = rx->skb->len;
2936 if (!ieee80211_is_action(mgmt->frame_control))
2939 /* drop too small frames */
2940 if (len < IEEE80211_MIN_ACTION_SIZE)
2941 return RX_DROP_UNUSABLE;
2943 if (!rx->sta && mgmt->u.action.category != WLAN_CATEGORY_PUBLIC &&
2944 mgmt->u.action.category != WLAN_CATEGORY_SELF_PROTECTED &&
2945 mgmt->u.action.category != WLAN_CATEGORY_SPECTRUM_MGMT)
2946 return RX_DROP_UNUSABLE;
2948 switch (mgmt->u.action.category) {
2949 case WLAN_CATEGORY_HT:
2950 /* reject HT action frames from stations not supporting HT */
2951 if (!rx->sta->sta.ht_cap.ht_supported)
2954 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
2955 sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
2956 sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
2957 sdata->vif.type != NL80211_IFTYPE_AP &&
2958 sdata->vif.type != NL80211_IFTYPE_ADHOC)
2961 /* verify action & smps_control/chanwidth are present */
2962 if (len < IEEE80211_MIN_ACTION_SIZE + 2)
2965 switch (mgmt->u.action.u.ht_smps.action) {
2966 case WLAN_HT_ACTION_SMPS: {
2967 struct ieee80211_supported_band *sband;
2968 enum ieee80211_smps_mode smps_mode;
2970 /* convert to HT capability */
2971 switch (mgmt->u.action.u.ht_smps.smps_control) {
2972 case WLAN_HT_SMPS_CONTROL_DISABLED:
2973 smps_mode = IEEE80211_SMPS_OFF;
2975 case WLAN_HT_SMPS_CONTROL_STATIC:
2976 smps_mode = IEEE80211_SMPS_STATIC;
2978 case WLAN_HT_SMPS_CONTROL_DYNAMIC:
2979 smps_mode = IEEE80211_SMPS_DYNAMIC;
2985 /* if no change do nothing */
2986 if (rx->sta->sta.smps_mode == smps_mode)
2988 rx->sta->sta.smps_mode = smps_mode;
2990 sband = rx->local->hw.wiphy->bands[status->band];
2992 rate_control_rate_update(local, sband, rx->sta,
2993 IEEE80211_RC_SMPS_CHANGED);
2996 case WLAN_HT_ACTION_NOTIFY_CHANWIDTH: {
2997 struct ieee80211_supported_band *sband;
2998 u8 chanwidth = mgmt->u.action.u.ht_notify_cw.chanwidth;
2999 enum ieee80211_sta_rx_bandwidth max_bw, new_bw;
3001 /* If it doesn't support 40 MHz it can't change ... */
3002 if (!(rx->sta->sta.ht_cap.cap &
3003 IEEE80211_HT_CAP_SUP_WIDTH_20_40))
3006 if (chanwidth == IEEE80211_HT_CHANWIDTH_20MHZ)
3007 max_bw = IEEE80211_STA_RX_BW_20;
3009 max_bw = ieee80211_sta_cap_rx_bw(rx->sta);
3011 /* set cur_max_bandwidth and recalc sta bw */
3012 rx->sta->cur_max_bandwidth = max_bw;
3013 new_bw = ieee80211_sta_cur_vht_bw(rx->sta);
3015 if (rx->sta->sta.bandwidth == new_bw)
3018 rx->sta->sta.bandwidth = new_bw;
3019 sband = rx->local->hw.wiphy->bands[status->band];
3021 rate_control_rate_update(local, sband, rx->sta,
3022 IEEE80211_RC_BW_CHANGED);
3030 case WLAN_CATEGORY_PUBLIC:
3031 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
3033 if (sdata->vif.type != NL80211_IFTYPE_STATION)
3037 if (!ether_addr_equal(mgmt->bssid, sdata->u.mgd.bssid))
3039 if (mgmt->u.action.u.ext_chan_switch.action_code !=
3040 WLAN_PUB_ACTION_EXT_CHANSW_ANN)
3042 if (len < offsetof(struct ieee80211_mgmt,
3043 u.action.u.ext_chan_switch.variable))
3046 case WLAN_CATEGORY_VHT:
3047 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
3048 sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
3049 sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
3050 sdata->vif.type != NL80211_IFTYPE_AP &&
3051 sdata->vif.type != NL80211_IFTYPE_ADHOC)
3054 /* verify action code is present */
3055 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
3058 switch (mgmt->u.action.u.vht_opmode_notif.action_code) {
3059 case WLAN_VHT_ACTION_OPMODE_NOTIF: {
3060 /* verify opmode is present */
3061 if (len < IEEE80211_MIN_ACTION_SIZE + 2)
3065 case WLAN_VHT_ACTION_GROUPID_MGMT: {
3066 if (len < IEEE80211_MIN_ACTION_SIZE + 25)
3074 case WLAN_CATEGORY_BACK:
3075 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
3076 sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
3077 sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
3078 sdata->vif.type != NL80211_IFTYPE_AP &&
3079 sdata->vif.type != NL80211_IFTYPE_ADHOC)
3082 /* verify action_code is present */
3083 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
3086 switch (mgmt->u.action.u.addba_req.action_code) {
3087 case WLAN_ACTION_ADDBA_REQ:
3088 if (len < (IEEE80211_MIN_ACTION_SIZE +
3089 sizeof(mgmt->u.action.u.addba_req)))
3092 case WLAN_ACTION_ADDBA_RESP:
3093 if (len < (IEEE80211_MIN_ACTION_SIZE +
3094 sizeof(mgmt->u.action.u.addba_resp)))
3097 case WLAN_ACTION_DELBA:
3098 if (len < (IEEE80211_MIN_ACTION_SIZE +
3099 sizeof(mgmt->u.action.u.delba)))
3107 case WLAN_CATEGORY_SPECTRUM_MGMT:
3108 /* verify action_code is present */
3109 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
3112 switch (mgmt->u.action.u.measurement.action_code) {
3113 case WLAN_ACTION_SPCT_MSR_REQ:
3114 if (status->band != NL80211_BAND_5GHZ)
3117 if (len < (IEEE80211_MIN_ACTION_SIZE +
3118 sizeof(mgmt->u.action.u.measurement)))
3121 if (sdata->vif.type != NL80211_IFTYPE_STATION)
3124 ieee80211_process_measurement_req(sdata, mgmt, len);
3126 case WLAN_ACTION_SPCT_CHL_SWITCH: {
3128 if (len < (IEEE80211_MIN_ACTION_SIZE +
3129 sizeof(mgmt->u.action.u.chan_switch)))
3132 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
3133 sdata->vif.type != NL80211_IFTYPE_ADHOC &&
3134 sdata->vif.type != NL80211_IFTYPE_MESH_POINT)
3137 if (sdata->vif.type == NL80211_IFTYPE_STATION)
3138 bssid = sdata->u.mgd.bssid;
3139 else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
3140 bssid = sdata->u.ibss.bssid;
3141 else if (sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
3146 if (!ether_addr_equal(mgmt->bssid, bssid))
3153 case WLAN_CATEGORY_SA_QUERY:
3154 if (len < (IEEE80211_MIN_ACTION_SIZE +
3155 sizeof(mgmt->u.action.u.sa_query)))
3158 switch (mgmt->u.action.u.sa_query.action) {
3159 case WLAN_ACTION_SA_QUERY_REQUEST:
3160 if (sdata->vif.type != NL80211_IFTYPE_STATION)
3162 ieee80211_process_sa_query_req(sdata, mgmt, len);
3166 case WLAN_CATEGORY_SELF_PROTECTED:
3167 if (len < (IEEE80211_MIN_ACTION_SIZE +
3168 sizeof(mgmt->u.action.u.self_prot.action_code)))
3171 switch (mgmt->u.action.u.self_prot.action_code) {
3172 case WLAN_SP_MESH_PEERING_OPEN:
3173 case WLAN_SP_MESH_PEERING_CLOSE:
3174 case WLAN_SP_MESH_PEERING_CONFIRM:
3175 if (!ieee80211_vif_is_mesh(&sdata->vif))
3177 if (sdata->u.mesh.user_mpm)
3178 /* userspace handles this frame */
3181 case WLAN_SP_MGK_INFORM:
3182 case WLAN_SP_MGK_ACK:
3183 if (!ieee80211_vif_is_mesh(&sdata->vif))
3188 case WLAN_CATEGORY_MESH_ACTION:
3189 if (len < (IEEE80211_MIN_ACTION_SIZE +
3190 sizeof(mgmt->u.action.u.mesh_action.action_code)))
3193 if (!ieee80211_vif_is_mesh(&sdata->vif))
3195 if (mesh_action_is_path_sel(mgmt) &&
3196 !mesh_path_sel_is_hwmp(sdata))
3204 status->rx_flags |= IEEE80211_RX_MALFORMED_ACTION_FRM;
3205 /* will return in the next handlers */
3210 rx->sta->rx_stats.packets++;
3211 dev_kfree_skb(rx->skb);
3215 skb_queue_tail(&sdata->skb_queue, rx->skb);
3216 ieee80211_queue_work(&local->hw, &sdata->work);
3218 rx->sta->rx_stats.packets++;
3222 static ieee80211_rx_result debug_noinline
3223 ieee80211_rx_h_userspace_mgmt(struct ieee80211_rx_data *rx)
3225 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
3228 /* skip known-bad action frames and return them in the next handler */
3229 if (status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM)
3233 * Getting here means the kernel doesn't know how to handle
3234 * it, but maybe userspace does ... include returned frames
3235 * so userspace can register for those to know whether ones
3236 * it transmitted were processed or returned.
3239 if (ieee80211_hw_check(&rx->local->hw, SIGNAL_DBM))
3240 sig = status->signal;
3242 if (cfg80211_rx_mgmt(&rx->sdata->wdev, status->freq, sig,
3243 rx->skb->data, rx->skb->len, 0)) {
3245 rx->sta->rx_stats.packets++;
3246 dev_kfree_skb(rx->skb);
3253 static ieee80211_rx_result debug_noinline
3254 ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx)
3256 struct ieee80211_local *local = rx->local;
3257 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
3258 struct sk_buff *nskb;
3259 struct ieee80211_sub_if_data *sdata = rx->sdata;
3260 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
3262 if (!ieee80211_is_action(mgmt->frame_control))
3266 * For AP mode, hostapd is responsible for handling any action
3267 * frames that we didn't handle, including returning unknown
3268 * ones. For all other modes we will return them to the sender,
3269 * setting the 0x80 bit in the action category, as required by
3270 * 802.11-2012 9.24.4.
3271 * Newer versions of hostapd shall also use the management frame
3272 * registration mechanisms, but older ones still use cooked
3273 * monitor interfaces so push all frames there.
3275 if (!(status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM) &&
3276 (sdata->vif.type == NL80211_IFTYPE_AP ||
3277 sdata->vif.type == NL80211_IFTYPE_AP_VLAN))
3278 return RX_DROP_MONITOR;
3280 if (is_multicast_ether_addr(mgmt->da))
3281 return RX_DROP_MONITOR;
3283 /* do not return rejected action frames */
3284 if (mgmt->u.action.category & 0x80)
3285 return RX_DROP_UNUSABLE;
3287 nskb = skb_copy_expand(rx->skb, local->hw.extra_tx_headroom, 0,
3290 struct ieee80211_mgmt *nmgmt = (void *)nskb->data;
3292 nmgmt->u.action.category |= 0x80;
3293 memcpy(nmgmt->da, nmgmt->sa, ETH_ALEN);
3294 memcpy(nmgmt->sa, rx->sdata->vif.addr, ETH_ALEN);
3296 memset(nskb->cb, 0, sizeof(nskb->cb));
3298 if (rx->sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE) {
3299 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(nskb);
3301 info->flags = IEEE80211_TX_CTL_TX_OFFCHAN |
3302 IEEE80211_TX_INTFL_OFFCHAN_TX_OK |
3303 IEEE80211_TX_CTL_NO_CCK_RATE;
3304 if (ieee80211_hw_check(&local->hw, QUEUE_CONTROL))
3306 local->hw.offchannel_tx_hw_queue;
3309 __ieee80211_tx_skb_tid_band(rx->sdata, nskb, 7,
3312 dev_kfree_skb(rx->skb);
3316 static ieee80211_rx_result debug_noinline
3317 ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
3319 struct ieee80211_sub_if_data *sdata = rx->sdata;
3320 struct ieee80211_mgmt *mgmt = (void *)rx->skb->data;
3323 stype = mgmt->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE);
3325 if (!ieee80211_vif_is_mesh(&sdata->vif) &&
3326 sdata->vif.type != NL80211_IFTYPE_ADHOC &&
3327 sdata->vif.type != NL80211_IFTYPE_OCB &&
3328 sdata->vif.type != NL80211_IFTYPE_STATION)
3329 return RX_DROP_MONITOR;
3332 case cpu_to_le16(IEEE80211_STYPE_AUTH):
3333 case cpu_to_le16(IEEE80211_STYPE_BEACON):
3334 case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
3335 /* process for all: mesh, mlme, ibss */
3337 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
3338 if (is_multicast_ether_addr(mgmt->da) &&
3339 !is_broadcast_ether_addr(mgmt->da))
3340 return RX_DROP_MONITOR;
3342 /* process only for station/IBSS */
3343 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
3344 sdata->vif.type != NL80211_IFTYPE_ADHOC)
3345 return RX_DROP_MONITOR;
3347 case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP):
3348 case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP):
3349 case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
3350 if (is_multicast_ether_addr(mgmt->da) &&
3351 !is_broadcast_ether_addr(mgmt->da))
3352 return RX_DROP_MONITOR;
3354 /* process only for station */
3355 if (sdata->vif.type != NL80211_IFTYPE_STATION)
3356 return RX_DROP_MONITOR;
3358 case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
3359 /* process only for ibss and mesh */
3360 if (sdata->vif.type != NL80211_IFTYPE_ADHOC &&
3361 sdata->vif.type != NL80211_IFTYPE_MESH_POINT)
3362 return RX_DROP_MONITOR;
3365 return RX_DROP_MONITOR;
3368 /* queue up frame and kick off work to process it */
3369 skb_queue_tail(&sdata->skb_queue, rx->skb);
3370 ieee80211_queue_work(&rx->local->hw, &sdata->work);
3372 rx->sta->rx_stats.packets++;
3377 static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
3378 struct ieee80211_rate *rate)
3380 struct ieee80211_sub_if_data *sdata;
3381 struct ieee80211_local *local = rx->local;
3382 struct sk_buff *skb = rx->skb, *skb2;
3383 struct net_device *prev_dev = NULL;
3384 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
3385 int needed_headroom;
3388 * If cooked monitor has been processed already, then
3389 * don't do it again. If not, set the flag.
3391 if (rx->flags & IEEE80211_RX_CMNTR)
3393 rx->flags |= IEEE80211_RX_CMNTR;
3395 /* If there are no cooked monitor interfaces, just free the SKB */
3396 if (!local->cooked_mntrs)
3399 /* vendor data is long removed here */
3400 status->flag &= ~RX_FLAG_RADIOTAP_VENDOR_DATA;
3401 /* room for the radiotap header based on driver features */
3402 needed_headroom = ieee80211_rx_radiotap_hdrlen(local, status, skb);
3404 if (skb_headroom(skb) < needed_headroom &&
3405 pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC))
3408 /* prepend radiotap information */
3409 ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom,
3412 skb_reset_mac_header(skb);
3413 skb->ip_summed = CHECKSUM_UNNECESSARY;
3414 skb->pkt_type = PACKET_OTHERHOST;
3415 skb->protocol = htons(ETH_P_802_2);
3417 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
3418 if (!ieee80211_sdata_running(sdata))
3421 if (sdata->vif.type != NL80211_IFTYPE_MONITOR ||
3422 !(sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES))
3426 skb2 = skb_clone(skb, GFP_ATOMIC);
3428 skb2->dev = prev_dev;
3429 netif_receive_skb(skb2);
3433 prev_dev = sdata->dev;
3434 ieee80211_rx_stats(sdata->dev, skb->len);
3438 skb->dev = prev_dev;
3439 netif_receive_skb(skb);
3447 static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx,
3448 ieee80211_rx_result res)
3451 case RX_DROP_MONITOR:
3452 I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop);
3454 rx->sta->rx_stats.dropped++;
3457 struct ieee80211_rate *rate = NULL;
3458 struct ieee80211_supported_band *sband;
3459 struct ieee80211_rx_status *status;
3461 status = IEEE80211_SKB_RXCB((rx->skb));
3463 sband = rx->local->hw.wiphy->bands[status->band];
3464 if (!(status->encoding == RX_ENC_HT) &&
3465 !(status->encoding == RX_ENC_VHT))
3466 rate = &sband->bitrates[status->rate_idx];
3468 ieee80211_rx_cooked_monitor(rx, rate);
3471 case RX_DROP_UNUSABLE:
3472 I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop);
3474 rx->sta->rx_stats.dropped++;
3475 dev_kfree_skb(rx->skb);
3478 I802_DEBUG_INC(rx->sdata->local->rx_handlers_queued);
3483 static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx,
3484 struct sk_buff_head *frames)
3486 ieee80211_rx_result res = RX_DROP_MONITOR;
3487 struct sk_buff *skb;
3489 #define CALL_RXH(rxh) \
3492 if (res != RX_CONTINUE) \
3496 /* Lock here to avoid hitting all of the data used in the RX
3497 * path (e.g. key data, station data, ...) concurrently when
3498 * a frame is released from the reorder buffer due to timeout
3499 * from the timer, potentially concurrently with RX from the
3502 spin_lock_bh(&rx->local->rx_path_lock);
3504 while ((skb = __skb_dequeue(frames))) {
3506 * all the other fields are valid across frames
3507 * that belong to an aMPDU since they are on the
3508 * same TID from the same station
3512 CALL_RXH(ieee80211_rx_h_check_more_data);
3513 CALL_RXH(ieee80211_rx_h_uapsd_and_pspoll);
3514 CALL_RXH(ieee80211_rx_h_sta_process);
3515 CALL_RXH(ieee80211_rx_h_decrypt);
3516 CALL_RXH(ieee80211_rx_h_defragment);
3517 CALL_RXH(ieee80211_rx_h_michael_mic_verify);
3518 /* must be after MMIC verify so header is counted in MPDU mic */
3519 #ifdef CONFIG_MAC80211_MESH
3520 if (ieee80211_vif_is_mesh(&rx->sdata->vif))
3521 CALL_RXH(ieee80211_rx_h_mesh_fwding);
3523 CALL_RXH(ieee80211_rx_h_amsdu);
3524 CALL_RXH(ieee80211_rx_h_data);
3526 /* special treatment -- needs the queue */
3527 res = ieee80211_rx_h_ctrl(rx, frames);
3528 if (res != RX_CONTINUE)
3531 CALL_RXH(ieee80211_rx_h_mgmt_check);
3532 CALL_RXH(ieee80211_rx_h_action);
3533 CALL_RXH(ieee80211_rx_h_userspace_mgmt);
3534 CALL_RXH(ieee80211_rx_h_action_return);
3535 CALL_RXH(ieee80211_rx_h_mgmt);
3538 ieee80211_rx_handlers_result(rx, res);
3543 spin_unlock_bh(&rx->local->rx_path_lock);
3546 static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx)
3548 struct sk_buff_head reorder_release;
3549 ieee80211_rx_result res = RX_DROP_MONITOR;
3551 __skb_queue_head_init(&reorder_release);
3553 #define CALL_RXH(rxh) \
3556 if (res != RX_CONTINUE) \
3560 CALL_RXH(ieee80211_rx_h_check_dup);
3561 CALL_RXH(ieee80211_rx_h_check);
3563 ieee80211_rx_reorder_ampdu(rx, &reorder_release);
3565 ieee80211_rx_handlers(rx, &reorder_release);
3569 ieee80211_rx_handlers_result(rx, res);
3575 * This function makes calls into the RX path, therefore
3576 * it has to be invoked under RCU read lock.
3578 void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
3580 struct sk_buff_head frames;
3581 struct ieee80211_rx_data rx = {
3583 .sdata = sta->sdata,
3584 .local = sta->local,
3585 /* This is OK -- must be QoS data frame */
3586 .security_idx = tid,
3588 .napi = NULL, /* must be NULL to not have races */
3590 struct tid_ampdu_rx *tid_agg_rx;
3592 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
3596 __skb_queue_head_init(&frames);
3598 spin_lock(&tid_agg_rx->reorder_lock);
3599 ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx, &frames);
3600 spin_unlock(&tid_agg_rx->reorder_lock);
3602 if (!skb_queue_empty(&frames)) {
3603 struct ieee80211_event event = {
3604 .type = BA_FRAME_TIMEOUT,
3606 .u.ba.sta = &sta->sta,
3608 drv_event_callback(rx.local, rx.sdata, &event);
3611 ieee80211_rx_handlers(&rx, &frames);
3614 void ieee80211_mark_rx_ba_filtered_frames(struct ieee80211_sta *pubsta, u8 tid,
3615 u16 ssn, u64 filtered,
3618 struct sta_info *sta;
3619 struct tid_ampdu_rx *tid_agg_rx;
3620 struct sk_buff_head frames;
3621 struct ieee80211_rx_data rx = {
3622 /* This is OK -- must be QoS data frame */
3623 .security_idx = tid,
3628 if (WARN_ON(!pubsta || tid >= IEEE80211_NUM_TIDS))
3631 __skb_queue_head_init(&frames);
3633 sta = container_of(pubsta, struct sta_info, sta);
3636 rx.sdata = sta->sdata;
3637 rx.local = sta->local;
3640 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
3644 spin_lock_bh(&tid_agg_rx->reorder_lock);
3646 if (received_mpdus >= IEEE80211_SN_MODULO >> 1) {
3649 /* release all frames in the reorder buffer */
3650 release = (tid_agg_rx->head_seq_num + tid_agg_rx->buf_size) %
3651 IEEE80211_SN_MODULO;
3652 ieee80211_release_reorder_frames(sta->sdata, tid_agg_rx,
3654 /* update ssn to match received ssn */
3655 tid_agg_rx->head_seq_num = ssn;
3657 ieee80211_release_reorder_frames(sta->sdata, tid_agg_rx, ssn,
3661 /* handle the case that received ssn is behind the mac ssn.
3662 * it can be tid_agg_rx->buf_size behind and still be valid */
3663 diff = (tid_agg_rx->head_seq_num - ssn) & IEEE80211_SN_MASK;
3664 if (diff >= tid_agg_rx->buf_size) {
3665 tid_agg_rx->reorder_buf_filtered = 0;
3668 filtered = filtered >> diff;
3672 for (i = 0; i < tid_agg_rx->buf_size; i++) {
3673 int index = (ssn + i) % tid_agg_rx->buf_size;
3675 tid_agg_rx->reorder_buf_filtered &= ~BIT_ULL(index);
3676 if (filtered & BIT_ULL(i))
3677 tid_agg_rx->reorder_buf_filtered |= BIT_ULL(index);
3680 /* now process also frames that the filter marking released */
3681 ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx, &frames);
3684 spin_unlock_bh(&tid_agg_rx->reorder_lock);
3686 ieee80211_rx_handlers(&rx, &frames);
3691 EXPORT_SYMBOL(ieee80211_mark_rx_ba_filtered_frames);
3693 /* main receive path */
3695 static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx)
3697 struct ieee80211_sub_if_data *sdata = rx->sdata;
3698 struct sk_buff *skb = rx->skb;
3699 struct ieee80211_hdr *hdr = (void *)skb->data;
3700 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
3701 u8 *bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type);
3702 bool multicast = is_multicast_ether_addr(hdr->addr1);
3704 switch (sdata->vif.type) {
3705 case NL80211_IFTYPE_STATION:
3706 if (!bssid && !sdata->u.mgd.use_4addr)
3708 if (ieee80211_is_robust_mgmt_frame(skb) && !rx->sta)
3712 return ether_addr_equal(sdata->vif.addr, hdr->addr1);
3713 case NL80211_IFTYPE_ADHOC:
3716 if (ether_addr_equal(sdata->vif.addr, hdr->addr2) ||
3717 ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2) ||
3718 !is_valid_ether_addr(hdr->addr2))
3720 if (ieee80211_is_beacon(hdr->frame_control))
3722 if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid))
3725 !ether_addr_equal(sdata->vif.addr, hdr->addr1))
3729 if (status->encoding != RX_ENC_LEGACY)
3730 rate_idx = 0; /* TODO: HT/VHT rates */
3732 rate_idx = status->rate_idx;
3733 ieee80211_ibss_rx_no_sta(sdata, bssid, hdr->addr2,
3737 case NL80211_IFTYPE_OCB:
3740 if (!ieee80211_is_data_present(hdr->frame_control))
3742 if (!is_broadcast_ether_addr(bssid))
3745 !ether_addr_equal(sdata->dev->dev_addr, hdr->addr1))
3749 if (status->encoding != RX_ENC_LEGACY)
3750 rate_idx = 0; /* TODO: HT rates */
3752 rate_idx = status->rate_idx;
3753 ieee80211_ocb_rx_no_sta(sdata, bssid, hdr->addr2,
3757 case NL80211_IFTYPE_MESH_POINT:
3758 if (ether_addr_equal(sdata->vif.addr, hdr->addr2))
3762 return ether_addr_equal(sdata->vif.addr, hdr->addr1);
3763 case NL80211_IFTYPE_AP_VLAN:
3764 case NL80211_IFTYPE_AP:
3766 return ether_addr_equal(sdata->vif.addr, hdr->addr1);
3768 if (!ieee80211_bssid_match(bssid, sdata->vif.addr)) {
3770 * Accept public action frames even when the
3771 * BSSID doesn't match, this is used for P2P
3772 * and location updates. Note that mac80211
3773 * itself never looks at these frames.
3776 !ether_addr_equal(sdata->vif.addr, hdr->addr1))
3778 if (ieee80211_is_public_action(hdr, skb->len))
3780 return ieee80211_is_beacon(hdr->frame_control);
3783 if (!ieee80211_has_tods(hdr->frame_control)) {
3784 /* ignore data frames to TDLS-peers */
3785 if (ieee80211_is_data(hdr->frame_control))
3787 /* ignore action frames to TDLS-peers */
3788 if (ieee80211_is_action(hdr->frame_control) &&
3789 !is_broadcast_ether_addr(bssid) &&
3790 !ether_addr_equal(bssid, hdr->addr1))
3795 * 802.11-2016 Table 9-26 says that for data frames, A1 must be
3796 * the BSSID - we've checked that already but may have accepted
3797 * the wildcard (ff:ff:ff:ff:ff:ff).
3800 * The BSSID of the Data frame is determined as follows:
3801 * a) If the STA is contained within an AP or is associated
3802 * with an AP, the BSSID is the address currently in use
3803 * by the STA contained in the AP.
3805 * So we should not accept data frames with an address that's
3808 * Accepting it also opens a security problem because stations
3809 * could encrypt it with the GTK and inject traffic that way.
3811 if (ieee80211_is_data(hdr->frame_control) && multicast)
3815 case NL80211_IFTYPE_WDS:
3816 if (bssid || !ieee80211_is_data(hdr->frame_control))
3818 return ether_addr_equal(sdata->u.wds.remote_addr, hdr->addr2);
3819 case NL80211_IFTYPE_P2P_DEVICE:
3820 return ieee80211_is_public_action(hdr, skb->len) ||
3821 ieee80211_is_probe_req(hdr->frame_control) ||
3822 ieee80211_is_probe_resp(hdr->frame_control) ||
3823 ieee80211_is_beacon(hdr->frame_control);
3824 case NL80211_IFTYPE_NAN:
3825 /* Currently no frames on NAN interface are allowed */
3835 void ieee80211_check_fast_rx(struct sta_info *sta)
3837 struct ieee80211_sub_if_data *sdata = sta->sdata;
3838 struct ieee80211_local *local = sdata->local;
3839 struct ieee80211_key *key;
3840 struct ieee80211_fast_rx fastrx = {
3842 .vif_type = sdata->vif.type,
3843 .control_port_protocol = sdata->control_port_protocol,
3844 }, *old, *new = NULL;
3845 bool assign = false;
3847 /* use sparse to check that we don't return without updating */
3848 __acquire(check_fast_rx);
3850 BUILD_BUG_ON(sizeof(fastrx.rfc1042_hdr) != sizeof(rfc1042_header));
3851 BUILD_BUG_ON(sizeof(fastrx.rfc1042_hdr) != ETH_ALEN);
3852 ether_addr_copy(fastrx.rfc1042_hdr, rfc1042_header);
3853 ether_addr_copy(fastrx.vif_addr, sdata->vif.addr);
3855 fastrx.uses_rss = ieee80211_hw_check(&local->hw, USES_RSS);
3857 /* fast-rx doesn't do reordering */
3858 if (ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION) &&
3859 !ieee80211_hw_check(&local->hw, SUPPORTS_REORDERING_BUFFER))
3862 switch (sdata->vif.type) {
3863 case NL80211_IFTYPE_STATION:
3864 /* 4-addr is harder to deal with, later maybe */
3865 if (sdata->u.mgd.use_4addr)
3867 /* software powersave is a huge mess, avoid all of it */
3868 if (ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK))
3870 if (ieee80211_hw_check(&local->hw, SUPPORTS_PS) &&
3871 !ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS))
3873 if (sta->sta.tdls) {
3874 fastrx.da_offs = offsetof(struct ieee80211_hdr, addr1);
3875 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr2);
3876 fastrx.expected_ds_bits = 0;
3878 fastrx.sta_notify = sdata->u.mgd.probe_send_count > 0;
3879 fastrx.da_offs = offsetof(struct ieee80211_hdr, addr1);
3880 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr3);
3881 fastrx.expected_ds_bits =
3882 cpu_to_le16(IEEE80211_FCTL_FROMDS);
3885 case NL80211_IFTYPE_AP_VLAN:
3886 case NL80211_IFTYPE_AP:
3887 /* parallel-rx requires this, at least with calls to
3888 * ieee80211_sta_ps_transition()
3890 if (!ieee80211_hw_check(&local->hw, AP_LINK_PS))
3892 fastrx.da_offs = offsetof(struct ieee80211_hdr, addr3);
3893 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr2);
3894 fastrx.expected_ds_bits = cpu_to_le16(IEEE80211_FCTL_TODS);
3896 fastrx.internal_forward =
3897 !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) &&
3898 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN ||
3899 !sdata->u.vlan.sta);
3905 if (!test_sta_flag(sta, WLAN_STA_AUTHORIZED))
3909 key = rcu_dereference(sta->ptk[sta->ptk_idx]);
3911 key = rcu_dereference(sdata->default_unicast_key);
3913 switch (key->conf.cipher) {
3914 case WLAN_CIPHER_SUITE_TKIP:
3915 /* we don't want to deal with MMIC in fast-rx */
3917 case WLAN_CIPHER_SUITE_CCMP:
3918 case WLAN_CIPHER_SUITE_CCMP_256:
3919 case WLAN_CIPHER_SUITE_GCMP:
3920 case WLAN_CIPHER_SUITE_GCMP_256:
3923 /* we also don't want to deal with WEP or cipher scheme
3924 * since those require looking up the key idx in the
3925 * frame, rather than assuming the PTK is used
3926 * (we need to revisit this once we implement the real
3927 * PTK index, which is now valid in the spec, but we
3928 * haven't implemented that part yet)
3934 fastrx.icv_len = key->conf.icv_len;
3941 __release(check_fast_rx);
3944 new = kmemdup(&fastrx, sizeof(fastrx), GFP_KERNEL);
3946 spin_lock_bh(&sta->lock);
3947 old = rcu_dereference_protected(sta->fast_rx, true);
3948 rcu_assign_pointer(sta->fast_rx, new);
3949 spin_unlock_bh(&sta->lock);
3952 kfree_rcu(old, rcu_head);
3955 void ieee80211_clear_fast_rx(struct sta_info *sta)
3957 struct ieee80211_fast_rx *old;
3959 spin_lock_bh(&sta->lock);
3960 old = rcu_dereference_protected(sta->fast_rx, true);
3961 RCU_INIT_POINTER(sta->fast_rx, NULL);
3962 spin_unlock_bh(&sta->lock);
3965 kfree_rcu(old, rcu_head);
3968 void __ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata)
3970 struct ieee80211_local *local = sdata->local;
3971 struct sta_info *sta;
3973 lockdep_assert_held(&local->sta_mtx);
3975 list_for_each_entry(sta, &local->sta_list, list) {
3976 if (sdata != sta->sdata &&
3977 (!sta->sdata->bss || sta->sdata->bss != sdata->bss))
3979 ieee80211_check_fast_rx(sta);
3983 void ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata)
3985 struct ieee80211_local *local = sdata->local;
3987 mutex_lock(&local->sta_mtx);
3988 __ieee80211_check_fast_rx_iface(sdata);
3989 mutex_unlock(&local->sta_mtx);
3992 static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx,
3993 struct ieee80211_fast_rx *fast_rx)
3995 struct sk_buff *skb = rx->skb;
3996 struct ieee80211_hdr *hdr = (void *)skb->data;
3997 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
3998 struct sta_info *sta = rx->sta;
3999 int orig_len = skb->len;
4000 int snap_offs = ieee80211_hdrlen(hdr->frame_control);
4002 u8 snap[sizeof(rfc1042_header)];
4004 } *payload __aligned(2);
4008 } addrs __aligned(2);
4009 struct ieee80211_sta_rx_stats *stats = &sta->rx_stats;
4011 if (fast_rx->uses_rss)
4012 stats = this_cpu_ptr(sta->pcpu_rx_stats);
4014 /* for parallel-rx, we need to have DUP_VALIDATED, otherwise we write
4015 * to a common data structure; drivers can implement that per queue
4016 * but we don't have that information in mac80211
4018 if (!(status->flag & RX_FLAG_DUP_VALIDATED))
4021 #define FAST_RX_CRYPT_FLAGS (RX_FLAG_PN_VALIDATED | RX_FLAG_DECRYPTED)
4023 /* If using encryption, we also need to have:
4024 * - PN_VALIDATED: similar, but the implementation is tricky
4025 * - DECRYPTED: necessary for PN_VALIDATED
4028 (status->flag & FAST_RX_CRYPT_FLAGS) != FAST_RX_CRYPT_FLAGS)
4031 /* we don't deal with A-MSDU deaggregation here */
4032 if (status->rx_flags & IEEE80211_RX_AMSDU)
4035 if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
4038 if (unlikely(ieee80211_is_frag(hdr)))
4041 /* Since our interface address cannot be multicast, this
4042 * implicitly also rejects multicast frames without the
4045 * We shouldn't get any *data* frames not addressed to us
4046 * (AP mode will accept multicast *management* frames), but
4047 * punting here will make it go through the full checks in
4048 * ieee80211_accept_frame().
4050 if (!ether_addr_equal(fast_rx->vif_addr, hdr->addr1))
4053 if ((hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_FROMDS |
4054 IEEE80211_FCTL_TODS)) !=
4055 fast_rx->expected_ds_bits)
4058 /* assign the key to drop unencrypted frames (later)
4059 * and strip the IV/MIC if necessary
4061 if (fast_rx->key && !(status->flag & RX_FLAG_IV_STRIPPED)) {
4062 /* GCMP header length is the same */
4063 snap_offs += IEEE80211_CCMP_HDR_LEN;
4066 if (!pskb_may_pull(skb, snap_offs + sizeof(*payload)))
4068 payload = (void *)(skb->data + snap_offs);
4070 if (!ether_addr_equal(payload->snap, fast_rx->rfc1042_hdr))
4073 /* Don't handle these here since they require special code.
4074 * Accept AARP and IPX even though they should come with a
4075 * bridge-tunnel header - but if we get them this way then
4076 * there's little point in discarding them.
4078 if (unlikely(payload->proto == cpu_to_be16(ETH_P_TDLS) ||
4079 payload->proto == fast_rx->control_port_protocol))
4082 /* after this point, don't punt to the slowpath! */
4084 if (rx->key && !(status->flag & RX_FLAG_MIC_STRIPPED) &&
4085 pskb_trim(skb, skb->len - fast_rx->icv_len))
4088 if (unlikely(fast_rx->sta_notify)) {
4089 ieee80211_sta_rx_notify(rx->sdata, hdr);
4090 fast_rx->sta_notify = false;
4093 /* statistics part of ieee80211_rx_h_sta_process() */
4094 stats->last_rx = jiffies;
4095 stats->last_rate = sta_stats_encode_rate(status);
4100 if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
4101 stats->last_signal = status->signal;
4102 if (!fast_rx->uses_rss)
4103 ewma_signal_add(&sta->rx_stats_avg.signal,
4107 if (status->chains) {
4110 stats->chains = status->chains;
4111 for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) {
4112 int signal = status->chain_signal[i];
4114 if (!(status->chains & BIT(i)))
4117 stats->chain_signal_last[i] = signal;
4118 if (!fast_rx->uses_rss)
4119 ewma_signal_add(&sta->rx_stats_avg.chain_signal[i],
4123 /* end of statistics */
4125 if (rx->key && !ieee80211_has_protected(hdr->frame_control))
4128 /* do the header conversion - first grab the addresses */
4129 ether_addr_copy(addrs.da, skb->data + fast_rx->da_offs);
4130 ether_addr_copy(addrs.sa, skb->data + fast_rx->sa_offs);
4131 /* remove the SNAP but leave the ethertype */
4132 skb_pull(skb, snap_offs + sizeof(rfc1042_header));
4133 /* push the addresses in front */
4134 memcpy(skb_push(skb, sizeof(addrs)), &addrs, sizeof(addrs));
4136 skb->dev = fast_rx->dev;
4138 ieee80211_rx_stats(fast_rx->dev, skb->len);
4140 /* The seqno index has the same property as needed
4141 * for the rx_msdu field, i.e. it is IEEE80211_NUM_TIDS
4142 * for non-QoS-data frames. Here we know it's a data
4143 * frame, so count MSDUs.
4145 u64_stats_update_begin(&stats->syncp);
4146 stats->msdu[rx->seqno_idx]++;
4147 stats->bytes += orig_len;
4148 u64_stats_update_end(&stats->syncp);
4150 if (fast_rx->internal_forward) {
4151 struct sk_buff *xmit_skb = NULL;
4152 bool multicast = is_multicast_ether_addr(skb->data);
4155 xmit_skb = skb_copy(skb, GFP_ATOMIC);
4156 } else if (sta_info_get(rx->sdata, skb->data)) {
4163 * Send to wireless media and increase priority by 256
4164 * to keep the received priority instead of
4165 * reclassifying the frame (see cfg80211_classify8021d).
4167 xmit_skb->priority += 256;
4168 xmit_skb->protocol = htons(ETH_P_802_3);
4169 skb_reset_network_header(xmit_skb);
4170 skb_reset_mac_header(xmit_skb);
4171 dev_queue_xmit(xmit_skb);
4178 /* deliver to local stack */
4179 skb->protocol = eth_type_trans(skb, fast_rx->dev);
4180 memset(skb->cb, 0, sizeof(skb->cb));
4182 napi_gro_receive(rx->napi, skb);
4184 netif_receive_skb(skb);
4194 * This function returns whether or not the SKB
4195 * was destined for RX processing or not, which,
4196 * if consume is true, is equivalent to whether
4197 * or not the skb was consumed.
4199 static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx,
4200 struct sk_buff *skb, bool consume)
4202 struct ieee80211_local *local = rx->local;
4203 struct ieee80211_sub_if_data *sdata = rx->sdata;
4207 /* See if we can do fast-rx; if we have to copy we already lost,
4208 * so punt in that case. We should never have to deliver a data
4209 * frame to multiple interfaces anyway.
4211 * We skip the ieee80211_accept_frame() call and do the necessary
4212 * checking inside ieee80211_invoke_fast_rx().
4214 if (consume && rx->sta) {
4215 struct ieee80211_fast_rx *fast_rx;
4217 fast_rx = rcu_dereference(rx->sta->fast_rx);
4218 if (fast_rx && ieee80211_invoke_fast_rx(rx, fast_rx))
4222 if (!ieee80211_accept_frame(rx))
4226 skb = skb_copy(skb, GFP_ATOMIC);
4228 if (net_ratelimit())
4229 wiphy_debug(local->hw.wiphy,
4230 "failed to copy skb for %s\n",
4238 ieee80211_invoke_rx_handlers(rx);
4243 * This is the actual Rx frames handler. as it belongs to Rx path it must
4244 * be called with rcu_read_lock protection.
4246 static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
4247 struct ieee80211_sta *pubsta,
4248 struct sk_buff *skb,
4249 struct napi_struct *napi)
4251 struct ieee80211_local *local = hw_to_local(hw);
4252 struct ieee80211_sub_if_data *sdata;
4253 struct ieee80211_hdr *hdr;
4255 struct ieee80211_rx_data rx;
4256 struct ieee80211_sub_if_data *prev;
4257 struct rhlist_head *tmp;
4260 fc = ((struct ieee80211_hdr *)skb->data)->frame_control;
4261 memset(&rx, 0, sizeof(rx));
4266 if (ieee80211_is_data(fc) || ieee80211_is_mgmt(fc))
4267 I802_DEBUG_INC(local->dot11ReceivedFragmentCount);
4269 if (ieee80211_is_mgmt(fc)) {
4270 /* drop frame if too short for header */
4271 if (skb->len < ieee80211_hdrlen(fc))
4274 err = skb_linearize(skb);
4276 err = !pskb_may_pull(skb, ieee80211_hdrlen(fc));
4284 hdr = (struct ieee80211_hdr *)skb->data;
4285 ieee80211_parse_qos(&rx);
4286 ieee80211_verify_alignment(&rx);
4288 if (unlikely(ieee80211_is_probe_resp(hdr->frame_control) ||
4289 ieee80211_is_beacon(hdr->frame_control)))
4290 ieee80211_scan_rx(local, skb);
4292 if (ieee80211_is_data(fc)) {
4293 struct sta_info *sta, *prev_sta;
4296 rx.sta = container_of(pubsta, struct sta_info, sta);
4297 rx.sdata = rx.sta->sdata;
4298 if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
4305 for_each_sta_info(local, hdr->addr2, sta, tmp) {
4312 rx.sdata = prev_sta->sdata;
4313 ieee80211_prepare_and_rx_handle(&rx, skb, false);
4320 rx.sdata = prev_sta->sdata;
4322 if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
4330 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
4331 if (!ieee80211_sdata_running(sdata))
4334 if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
4335 sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
4339 * frame is destined for this interface, but if it's
4340 * not also for the previous one we handle that after
4341 * the loop to avoid copying the SKB once too much
4349 rx.sta = sta_info_get_bss(prev, hdr->addr2);
4351 ieee80211_prepare_and_rx_handle(&rx, skb, false);
4357 rx.sta = sta_info_get_bss(prev, hdr->addr2);
4360 if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
4369 * This is the receive path handler. It is called by a low level driver when an
4370 * 802.11 MPDU is received from the hardware.
4372 void ieee80211_rx_napi(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta,
4373 struct sk_buff *skb, struct napi_struct *napi)
4375 struct ieee80211_local *local = hw_to_local(hw);
4376 struct ieee80211_rate *rate = NULL;
4377 struct ieee80211_supported_band *sband;
4378 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
4380 WARN_ON_ONCE(softirq_count() == 0);
4382 if (WARN_ON(status->band >= NUM_NL80211_BANDS))
4385 sband = local->hw.wiphy->bands[status->band];
4386 if (WARN_ON(!sband))
4390 * If we're suspending, it is possible although not too likely
4391 * that we'd be receiving frames after having already partially
4392 * quiesced the stack. We can't process such frames then since
4393 * that might, for example, cause stations to be added or other
4394 * driver callbacks be invoked.
4396 if (unlikely(local->quiescing || local->suspended))
4399 /* We might be during a HW reconfig, prevent Rx for the same reason */
4400 if (unlikely(local->in_reconfig))
4404 * The same happens when we're not even started,
4405 * but that's worth a warning.
4407 if (WARN_ON(!local->started))
4410 if (likely(!(status->flag & RX_FLAG_FAILED_PLCP_CRC))) {
4412 * Validate the rate, unless a PLCP error means that
4413 * we probably can't have a valid rate here anyway.
4416 switch (status->encoding) {
4419 * rate_idx is MCS index, which can be [0-76]
4422 * http://wireless.kernel.org/en/developers/Documentation/ieee80211/802.11n
4424 * Anything else would be some sort of driver or
4425 * hardware error. The driver should catch hardware
4428 if (WARN(status->rate_idx > 76,
4429 "Rate marked as an HT rate but passed "
4430 "status->rate_idx is not "
4431 "an MCS index [0-76]: %d (0x%02x)\n",
4437 if (WARN_ONCE(status->rate_idx > 9 ||
4440 "Rate marked as a VHT rate but data is invalid: MCS: %d, NSS: %d\n",
4441 status->rate_idx, status->nss))
4448 if (WARN_ON(status->rate_idx >= sband->n_bitrates))
4450 rate = &sband->bitrates[status->rate_idx];
4454 status->rx_flags = 0;
4457 * key references and virtual interfaces are protected using RCU
4458 * and this requires that we are in a read-side RCU section during
4459 * receive processing
4464 * Frames with failed FCS/PLCP checksum are not returned,
4465 * all other frames are returned without radiotap header
4466 * if it was previously present.
4467 * Also, frames with less than 16 bytes are dropped.
4469 skb = ieee80211_rx_monitor(local, skb, rate);
4475 ieee80211_tpt_led_trig_rx(local,
4476 ((struct ieee80211_hdr *)skb->data)->frame_control,
4479 __ieee80211_rx_handle_packet(hw, pubsta, skb, napi);
4487 EXPORT_SYMBOL(ieee80211_rx_napi);
4489 /* This is a version of the rx handler that can be called from hard irq
4490 * context. Post the skb on the queue and schedule the tasklet */
4491 void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb)
4493 struct ieee80211_local *local = hw_to_local(hw);
4495 BUILD_BUG_ON(sizeof(struct ieee80211_rx_status) > sizeof(skb->cb));
4497 skb->pkt_type = IEEE80211_RX_MSG;
4498 skb_queue_tail(&local->skb_queue, skb);
4499 tasklet_schedule(&local->tasklet);
4501 EXPORT_SYMBOL(ieee80211_rx_irqsafe);