1 /******************************************************************************
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
26 * The full GNU General Public License is included in this distribution
27 * in the file called COPYING.
29 * Contact Information:
30 * Intel Linux Wireless <linuxwifi@intel.com>
31 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
35 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
36 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
37 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
38 * All rights reserved.
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
44 * * Redistributions of source code must retain the above copyright
45 * notice, this list of conditions and the following disclaimer.
46 * * Redistributions in binary form must reproduce the above copyright
47 * notice, this list of conditions and the following disclaimer in
48 * the documentation and/or other materials provided with the
50 * * Neither the name Intel Corporation nor the names of its
51 * contributors may be used to endorse or promote products derived
52 * from this software without specific prior written permission.
54 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
55 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
56 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
57 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
58 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
60 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
64 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
66 *****************************************************************************/
67 #include <linux/ieee80211.h>
68 #include <linux/etherdevice.h>
69 #include <linux/tcp.h>
73 #include "iwl-trans.h"
74 #include "iwl-eeprom-parse.h"
80 iwl_mvm_bar_check_trigger(struct iwl_mvm *mvm, const u8 *addr,
83 struct iwl_fw_dbg_trigger_tlv *trig;
84 struct iwl_fw_dbg_trigger_ba *ba_trig;
86 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
89 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
90 ba_trig = (void *)trig->data;
92 if (!iwl_fw_dbg_trigger_check_stop(mvm, NULL, trig))
95 if (!(le16_to_cpu(ba_trig->tx_bar) & BIT(tid)))
98 iwl_mvm_fw_dbg_collect_trig(mvm, trig,
99 "BAR sent to %pM, tid %d, ssn %d",
103 #define OPT_HDR(type, skb, off) \
104 (type *)(skb_network_header(skb) + (off))
106 static void iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb,
107 struct ieee80211_hdr *hdr,
108 struct ieee80211_tx_info *info,
109 struct iwl_tx_cmd *tx_cmd)
111 #if IS_ENABLED(CONFIG_INET)
112 u16 mh_len = ieee80211_hdrlen(hdr->frame_control);
113 u16 offload_assist = le16_to_cpu(tx_cmd->offload_assist);
117 * Do not compute checksum if already computed or if transport will
120 if (skb->ip_summed != CHECKSUM_PARTIAL || IWL_MVM_SW_TX_CSUM_OFFLOAD)
123 /* We do not expect to be requested to csum stuff we do not support */
124 if (WARN_ONCE(!(mvm->hw->netdev_features & IWL_TX_CSUM_NETIF_FLAGS) ||
125 (skb->protocol != htons(ETH_P_IP) &&
126 skb->protocol != htons(ETH_P_IPV6)),
127 "No support for requested checksum\n")) {
128 skb_checksum_help(skb);
132 if (skb->protocol == htons(ETH_P_IP)) {
133 protocol = ip_hdr(skb)->protocol;
135 #if IS_ENABLED(CONFIG_IPV6)
136 struct ipv6hdr *ipv6h =
137 (struct ipv6hdr *)skb_network_header(skb);
138 unsigned int off = sizeof(*ipv6h);
140 protocol = ipv6h->nexthdr;
141 while (protocol != NEXTHDR_NONE && ipv6_ext_hdr(protocol)) {
142 struct ipv6_opt_hdr *hp;
144 /* only supported extension headers */
145 if (protocol != NEXTHDR_ROUTING &&
146 protocol != NEXTHDR_HOP &&
147 protocol != NEXTHDR_DEST) {
148 skb_checksum_help(skb);
152 hp = OPT_HDR(struct ipv6_opt_hdr, skb, off);
153 protocol = hp->nexthdr;
154 off += ipv6_optlen(hp);
156 /* if we get here - protocol now should be TCP/UDP */
160 if (protocol != IPPROTO_TCP && protocol != IPPROTO_UDP) {
162 skb_checksum_help(skb);
167 offload_assist |= BIT(TX_CMD_OFFLD_L4_EN);
170 * Set offset to IP header (snap).
171 * We don't support tunneling so no need to take care of inner header.
174 offload_assist |= (4 << TX_CMD_OFFLD_IP_HDR);
176 /* Do IPv4 csum for AMSDU only (no IP csum for Ipv6) */
177 if (skb->protocol == htons(ETH_P_IP) &&
178 (offload_assist & BIT(TX_CMD_OFFLD_AMSDU))) {
179 ip_hdr(skb)->check = 0;
180 offload_assist |= BIT(TX_CMD_OFFLD_L3_EN);
183 /* reset UDP/TCP header csum */
184 if (protocol == IPPROTO_TCP)
185 tcp_hdr(skb)->check = 0;
187 udp_hdr(skb)->check = 0;
189 /* mac header len should include IV, size is in words */
190 if (info->control.hw_key)
191 mh_len += info->control.hw_key->iv_len;
193 offload_assist |= mh_len << TX_CMD_OFFLD_MH_SIZE;
195 tx_cmd->offload_assist = cpu_to_le16(offload_assist);
200 * Sets most of the Tx cmd's fields
202 void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
203 struct iwl_tx_cmd *tx_cmd,
204 struct ieee80211_tx_info *info, u8 sta_id)
206 struct ieee80211_hdr *hdr = (void *)skb->data;
207 __le16 fc = hdr->frame_control;
208 u32 tx_flags = le32_to_cpu(tx_cmd->tx_flags);
209 u32 len = skb->len + FCS_LEN;
212 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
213 tx_flags |= TX_CMD_FLG_ACK;
215 tx_flags &= ~TX_CMD_FLG_ACK;
217 if (ieee80211_is_probe_resp(fc))
218 tx_flags |= TX_CMD_FLG_TSF;
220 if (ieee80211_has_morefrags(fc))
221 tx_flags |= TX_CMD_FLG_MORE_FRAG;
223 if (ieee80211_is_data_qos(fc)) {
224 u8 *qc = ieee80211_get_qos_ctl(hdr);
225 tx_cmd->tid_tspec = qc[0] & 0xf;
226 tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
227 if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT)
228 tx_cmd->offload_assist |=
229 cpu_to_le16(BIT(TX_CMD_OFFLD_AMSDU));
230 } else if (ieee80211_is_back_req(fc)) {
231 struct ieee80211_bar *bar = (void *)skb->data;
232 u16 control = le16_to_cpu(bar->control);
233 u16 ssn = le16_to_cpu(bar->start_seq_num);
235 tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR;
236 tx_cmd->tid_tspec = (control &
237 IEEE80211_BAR_CTRL_TID_INFO_MASK) >>
238 IEEE80211_BAR_CTRL_TID_INFO_SHIFT;
239 WARN_ON_ONCE(tx_cmd->tid_tspec >= IWL_MAX_TID_COUNT);
240 iwl_mvm_bar_check_trigger(mvm, bar->ra, tx_cmd->tid_tspec,
243 tx_cmd->tid_tspec = IWL_TID_NON_QOS;
244 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
245 tx_flags |= TX_CMD_FLG_SEQ_CTL;
247 tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
250 /* Default to 0 (BE) when tid_spec is set to IWL_TID_NON_QOS */
251 if (tx_cmd->tid_tspec < IWL_MAX_TID_COUNT)
252 ac = tid_to_mac80211_ac[tx_cmd->tid_tspec];
254 ac = tid_to_mac80211_ac[0];
256 tx_flags |= iwl_mvm_bt_coex_tx_prio(mvm, hdr, info, ac) <<
257 TX_CMD_FLG_BT_PRIO_POS;
259 if (ieee80211_is_mgmt(fc)) {
260 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
261 tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_ASSOC);
262 else if (ieee80211_is_action(fc))
263 tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_NONE);
265 tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_MGMT);
267 /* The spec allows Action frames in A-MPDU, we don't support
270 WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU);
271 } else if (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO) {
272 tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_MGMT);
274 tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_NONE);
277 if (ieee80211_is_data(fc) && len > mvm->rts_threshold &&
278 !is_multicast_ether_addr(ieee80211_get_DA(hdr)))
279 tx_flags |= TX_CMD_FLG_PROT_REQUIRE;
281 if (fw_has_capa(&mvm->fw->ucode_capa,
282 IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT) &&
283 ieee80211_action_contains_tpc(skb))
284 tx_flags |= TX_CMD_FLG_WRITE_TX_POWER;
286 tx_cmd->tx_flags = cpu_to_le32(tx_flags);
287 /* Total # bytes to be transmitted - PCIe code will adjust for A-MSDU */
288 tx_cmd->len = cpu_to_le16((u16)skb->len);
289 tx_cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
290 tx_cmd->sta_id = sta_id;
292 /* padding is inserted later in transport */
293 if (ieee80211_hdrlen(fc) % 4 &&
294 !(tx_cmd->offload_assist & cpu_to_le16(BIT(TX_CMD_OFFLD_AMSDU))))
295 tx_cmd->offload_assist |= cpu_to_le16(BIT(TX_CMD_OFFLD_PAD));
297 iwl_mvm_tx_csum(mvm, skb, hdr, info, tx_cmd);
301 * Sets the fields in the Tx cmd that are rate related
303 void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
304 struct ieee80211_tx_info *info,
305 struct ieee80211_sta *sta, __le16 fc)
311 /* Set retry limit on RTS packets */
312 tx_cmd->rts_retry_limit = IWL_RTS_DFAULT_RETRY_LIMIT;
314 /* Set retry limit on DATA packets and Probe Responses*/
315 if (ieee80211_is_probe_resp(fc)) {
316 tx_cmd->data_retry_limit = IWL_MGMT_DFAULT_RETRY_LIMIT;
317 tx_cmd->rts_retry_limit =
318 min(tx_cmd->data_retry_limit, tx_cmd->rts_retry_limit);
319 } else if (ieee80211_is_back_req(fc)) {
320 tx_cmd->data_retry_limit = IWL_BAR_DFAULT_RETRY_LIMIT;
322 tx_cmd->data_retry_limit = IWL_DEFAULT_TX_RETRY;
326 * for data packets, rate info comes from the table inside the fw. This
327 * table is controlled by LINK_QUALITY commands
330 if (ieee80211_is_data(fc) && sta) {
331 tx_cmd->initial_rate_index = 0;
332 tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE);
334 } else if (ieee80211_is_back_req(fc)) {
336 cpu_to_le32(TX_CMD_FLG_ACK | TX_CMD_FLG_BAR);
339 /* HT rate doesn't make sense for a non data frame */
340 WARN_ONCE(info->control.rates[0].flags & IEEE80211_TX_RC_MCS,
341 "Got an HT rate (flags:0x%x/mcs:%d) for a non data frame (fc:0x%x)\n",
342 info->control.rates[0].flags,
343 info->control.rates[0].idx,
346 rate_idx = info->control.rates[0].idx;
347 /* if the rate isn't a well known legacy rate, take the lowest one */
348 if (rate_idx < 0 || rate_idx >= IWL_RATE_COUNT_LEGACY)
349 rate_idx = rate_lowest_index(
350 &mvm->nvm_data->bands[info->band], sta);
352 /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
353 if (info->band == NL80211_BAND_5GHZ)
354 rate_idx += IWL_FIRST_OFDM_RATE;
356 /* For 2.4 GHZ band, check that there is no need to remap */
357 BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0);
359 /* Get PLCP rate for tx_cmd->rate_n_flags */
360 rate_plcp = iwl_mvm_mac80211_idx_to_hwrate(rate_idx);
362 mvm->mgmt_last_antenna_idx =
363 iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm),
364 mvm->mgmt_last_antenna_idx);
366 if (info->band == NL80211_BAND_2GHZ &&
367 !iwl_mvm_bt_coex_is_shared_ant_avail(mvm))
368 rate_flags = mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS;
371 BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS;
373 /* Set CCK flag as needed */
374 if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
375 rate_flags |= RATE_MCS_CCK_MSK;
377 /* Set the rate in the TX cmd */
378 tx_cmd->rate_n_flags = cpu_to_le32((u32)rate_plcp | rate_flags);
381 static inline void iwl_mvm_set_tx_cmd_pn(struct ieee80211_tx_info *info,
384 struct ieee80211_key_conf *keyconf = info->control.hw_key;
387 pn = atomic64_inc_return(&keyconf->tx_pn);
390 crypto_hdr[3] = 0x20 | (keyconf->keyidx << 6);
391 crypto_hdr[1] = pn >> 8;
392 crypto_hdr[4] = pn >> 16;
393 crypto_hdr[5] = pn >> 24;
394 crypto_hdr[6] = pn >> 32;
395 crypto_hdr[7] = pn >> 40;
399 * Sets the fields in the Tx cmd that are crypto related
401 static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
402 struct ieee80211_tx_info *info,
403 struct iwl_tx_cmd *tx_cmd,
404 struct sk_buff *skb_frag,
407 struct ieee80211_key_conf *keyconf = info->control.hw_key;
408 u8 *crypto_hdr = skb_frag->data + hdrlen;
409 enum iwl_tx_cmd_sec_ctrl type = TX_CMD_SEC_CCM;
412 switch (keyconf->cipher) {
413 case WLAN_CIPHER_SUITE_CCMP:
414 iwl_mvm_set_tx_cmd_ccmp(info, tx_cmd);
415 iwl_mvm_set_tx_cmd_pn(info, crypto_hdr);
418 case WLAN_CIPHER_SUITE_TKIP:
419 tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
420 pn = atomic64_inc_return(&keyconf->tx_pn);
421 ieee80211_tkip_add_iv(crypto_hdr, keyconf, pn);
422 ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key);
425 case WLAN_CIPHER_SUITE_WEP104:
426 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
428 case WLAN_CIPHER_SUITE_WEP40:
429 tx_cmd->sec_ctl |= TX_CMD_SEC_WEP |
430 ((keyconf->keyidx << TX_CMD_SEC_WEP_KEY_IDX_POS) &
431 TX_CMD_SEC_WEP_KEY_IDX_MSK);
433 memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
435 case WLAN_CIPHER_SUITE_GCMP:
436 case WLAN_CIPHER_SUITE_GCMP_256:
437 type = TX_CMD_SEC_GCMP;
439 case WLAN_CIPHER_SUITE_CCMP_256:
440 /* TODO: Taking the key from the table might introduce a race
441 * when PTK rekeying is done, having an old packets with a PN
442 * based on the old key but the message encrypted with a new
444 * Need to handle this.
446 tx_cmd->sec_ctl |= type | TX_CMD_SEC_KEY_FROM_TABLE;
447 tx_cmd->key[0] = keyconf->hw_key_idx;
448 iwl_mvm_set_tx_cmd_pn(info, crypto_hdr);
451 tx_cmd->sec_ctl |= TX_CMD_SEC_EXT;
456 * Allocates and sets the Tx cmd the driver data pointers in the skb
458 static struct iwl_device_cmd *
459 iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
460 struct ieee80211_tx_info *info, int hdrlen,
461 struct ieee80211_sta *sta, u8 sta_id)
463 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
464 struct iwl_device_cmd *dev_cmd;
465 struct iwl_tx_cmd *tx_cmd;
467 dev_cmd = iwl_trans_alloc_tx_cmd(mvm->trans);
469 if (unlikely(!dev_cmd))
472 memset(dev_cmd, 0, sizeof(*dev_cmd));
473 dev_cmd->hdr.cmd = TX_CMD;
474 tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
476 if (info->control.hw_key)
477 iwl_mvm_set_tx_cmd_crypto(mvm, info, tx_cmd, skb, hdrlen);
479 iwl_mvm_set_tx_cmd(mvm, skb, tx_cmd, info, sta_id);
481 iwl_mvm_set_tx_cmd_rate(mvm, tx_cmd, info, sta, hdr->frame_control);
486 static void iwl_mvm_skb_prepare_status(struct sk_buff *skb,
487 struct iwl_device_cmd *cmd)
489 struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
491 memset(&skb_info->status, 0, sizeof(skb_info->status));
492 memset(skb_info->driver_data, 0, sizeof(skb_info->driver_data));
494 skb_info->driver_data[1] = cmd;
497 static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
498 struct ieee80211_tx_info *info, __le16 fc)
500 if (!iwl_mvm_is_dqa_supported(mvm))
501 return info->hw_queue;
503 switch (info->control.vif->type) {
504 case NL80211_IFTYPE_AP:
506 * Handle legacy hostapd as well, where station may be added
507 * only after assoc. Take care of the case where we send a
508 * deauth to a station that we don't have.
510 if (ieee80211_is_probe_resp(fc) || ieee80211_is_auth(fc) ||
511 ieee80211_is_deauth(fc))
512 return IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
513 if (info->hw_queue == info->control.vif->cab_queue)
514 return info->hw_queue;
516 WARN_ONCE(1, "fc=0x%02x", le16_to_cpu(fc));
517 return IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
518 case NL80211_IFTYPE_P2P_DEVICE:
519 if (ieee80211_is_mgmt(fc))
520 return IWL_MVM_DQA_P2P_DEVICE_QUEUE;
521 if (info->hw_queue == info->control.vif->cab_queue)
522 return info->hw_queue;
525 return IWL_MVM_DQA_P2P_DEVICE_QUEUE;
527 WARN_ONCE(1, "Not a ctrl vif, no available queue\n");
532 int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
534 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
535 struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
536 struct ieee80211_tx_info info;
537 struct iwl_device_cmd *dev_cmd;
538 struct iwl_tx_cmd *tx_cmd;
540 int hdrlen = ieee80211_hdrlen(hdr->frame_control);
543 /* IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets that can be used
544 * in 2 different types of vifs, P2P & STATION. P2P uses the offchannel
545 * queue. STATION (HS2.0) uses the auxiliary context of the FW,
546 * and hence needs to be sent on the aux queue
548 if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE &&
549 skb_info->control.vif->type == NL80211_IFTYPE_STATION)
550 IEEE80211_SKB_CB(skb)->hw_queue = mvm->aux_queue;
552 memcpy(&info, skb->cb, sizeof(info));
554 if (WARN_ON_ONCE(skb->len > IEEE80211_MAX_DATA_LEN + hdrlen))
557 if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_AMPDU))
560 if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM &&
561 (!info.control.vif ||
562 info.hw_queue != info.control.vif->cab_queue)))
565 queue = info.hw_queue;
568 * If the interface on which the frame is sent is the P2P_DEVICE
569 * or an AP/GO interface use the broadcast station associated
570 * with it; otherwise if the interface is a managed interface
571 * use the AP station associated with it for multicast traffic
572 * (this is not possible for unicast packets as a TLDS discovery
573 * response are sent without a station entry); otherwise use the
575 * In DQA mode, if vif is of type STATION and frames are not multicast
576 * or offchannel, they should be sent from the BSS queue.
577 * For example, TDLS setup frames should be sent on this queue,
578 * as they go through the AP.
580 sta_id = mvm->aux_sta.sta_id;
581 if (info.control.vif) {
582 struct iwl_mvm_vif *mvmvif =
583 iwl_mvm_vif_from_mac80211(info.control.vif);
585 if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE ||
586 info.control.vif->type == NL80211_IFTYPE_AP) {
587 sta_id = mvmvif->bcast_sta.sta_id;
588 queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info,
593 } else if (info.control.vif->type == NL80211_IFTYPE_STATION &&
594 is_multicast_ether_addr(hdr->addr1)) {
595 u8 ap_sta_id = ACCESS_ONCE(mvmvif->ap_sta_id);
597 if (ap_sta_id != IWL_MVM_STATION_COUNT)
599 } else if (iwl_mvm_is_dqa_supported(mvm) &&
600 info.control.vif->type == NL80211_IFTYPE_STATION &&
601 queue != mvm->aux_queue) {
602 queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
606 IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, queue);
608 dev_cmd = iwl_mvm_set_tx_params(mvm, skb, &info, hdrlen, NULL, sta_id);
612 /* From now on, we cannot access info->control */
613 iwl_mvm_skb_prepare_status(skb, dev_cmd);
615 tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
617 /* Copy MAC header from skb into command buffer */
618 memcpy(tx_cmd->hdr, hdr, hdrlen);
620 if (iwl_trans_tx(mvm->trans, skb, dev_cmd, queue)) {
621 iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
626 * Increase the pending frames counter, so that later when a reply comes
627 * in and the counter is decreased - we don't start getting negative
629 * Note that we don't need to make sure it isn't agg'd, since we're
631 * For DQA mode - we shouldn't increase it though
633 if (!iwl_mvm_is_dqa_supported(mvm))
634 atomic_inc(&mvm->pending_frames[sta_id]);
640 static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
641 struct ieee80211_tx_info *info,
642 struct ieee80211_sta *sta,
643 struct sk_buff_head *mpdus_skb)
645 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
646 struct ieee80211_hdr *hdr = (void *)skb->data;
647 unsigned int mss = skb_shinfo(skb)->gso_size;
648 struct sk_buff *tmp, *next;
649 char cb[sizeof(skb->cb)];
650 unsigned int num_subframes, tcp_payload_len, subf_len, max_amsdu_len;
651 bool ipv4 = (skb->protocol == htons(ETH_P_IP));
652 u16 ip_base_id = ipv4 ? ntohs(ip_hdr(skb)->id) : 0;
653 u16 snap_ip_tcp, pad, i = 0;
654 unsigned int dbg_max_amsdu_len;
655 netdev_features_t netdev_features = NETIF_F_CSUM_MASK | NETIF_F_SG;
658 snap_ip_tcp = 8 + skb_transport_header(skb) - skb_network_header(skb) +
661 qc = ieee80211_get_qos_ctl(hdr);
662 tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
663 if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
666 dbg_max_amsdu_len = ACCESS_ONCE(mvm->max_amsdu_len);
668 if (!sta->max_amsdu_len ||
669 !ieee80211_is_data_qos(hdr->frame_control) ||
670 (!mvmsta->tlc_amsdu && !dbg_max_amsdu_len)) {
677 * Do not build AMSDU for IPv6 with extension headers.
678 * ask stack to segment and checkum the generated MPDUs for us.
680 if (skb->protocol == htons(ETH_P_IPV6) &&
681 ((struct ipv6hdr *)skb_network_header(skb))->nexthdr !=
685 netdev_features &= ~NETIF_F_CSUM_MASK;
690 * No need to lock amsdu_in_ampdu_allowed since it can't be modified
691 * during an BA session.
693 if (info->flags & IEEE80211_TX_CTL_AMPDU &&
694 !mvmsta->tid_data[tid].amsdu_in_ampdu_allowed) {
700 max_amsdu_len = sta->max_amsdu_len;
702 /* the Tx FIFO to which this A-MSDU will be routed */
703 txf = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
706 * Don't send an AMSDU that will be longer than the TXF.
707 * Add a security margin of 256 for the TX command + headers.
708 * We also want to have the start of the next packet inside the
709 * fifo to be able to send bursts.
711 max_amsdu_len = min_t(unsigned int, max_amsdu_len,
712 mvm->shared_mem_cfg.txfifo_size[txf] - 256);
714 if (unlikely(dbg_max_amsdu_len))
715 max_amsdu_len = min_t(unsigned int, max_amsdu_len,
719 * Limit A-MSDU in A-MPDU to 4095 bytes when VHT is not
720 * supported. This is a spec requirement (IEEE 802.11-2015
721 * section 8.7.3 NOTE 3).
723 if (info->flags & IEEE80211_TX_CTL_AMPDU &&
724 !sta->vht_cap.vht_supported)
725 max_amsdu_len = min_t(unsigned int, max_amsdu_len, 4095);
727 /* Sub frame header + SNAP + IP header + TCP header + MSS */
728 subf_len = sizeof(struct ethhdr) + snap_ip_tcp + mss;
729 pad = (4 - subf_len) & 0x3;
732 * If we have N subframes in the A-MSDU, then the A-MSDU's size is
733 * N * subf_len + (N - 1) * pad.
735 num_subframes = (max_amsdu_len + pad) / (subf_len + pad);
736 if (num_subframes > 1)
737 *qc |= IEEE80211_QOS_CTL_A_MSDU_PRESENT;
739 tcp_payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) -
740 tcp_hdrlen(skb) + skb->data_len;
743 * Make sure we have enough TBs for the A-MSDU:
744 * 2 for each subframe
745 * 1 more for each fragment
746 * 1 more for the potential data in the header
749 min_t(unsigned int, num_subframes,
750 (mvm->trans->max_skb_frags - 1 -
751 skb_shinfo(skb)->nr_frags) / 2);
753 /* This skb fits in one single A-MSDU */
754 if (num_subframes * mss >= tcp_payload_len) {
755 __skb_queue_tail(mpdus_skb, skb);
760 * Trick the segmentation function to make it
761 * create SKBs that can fit into one A-MSDU.
764 skb_shinfo(skb)->gso_size = num_subframes * mss;
765 memcpy(cb, skb->cb, sizeof(cb));
767 next = skb_gso_segment(skb, netdev_features);
768 skb_shinfo(skb)->gso_size = mss;
769 if (WARN_ON_ONCE(IS_ERR(next)))
778 memcpy(tmp->cb, cb, sizeof(tmp->cb));
780 * Compute the length of all the data added for the A-MSDU.
781 * This will be used to compute the length to write in the TX
782 * command. We have: SNAP + IP + TCP for n -1 subframes and
783 * ETH header for n subframes.
785 tcp_payload_len = skb_tail_pointer(tmp) -
786 skb_transport_header(tmp) -
787 tcp_hdrlen(tmp) + tmp->data_len;
790 ip_hdr(tmp)->id = htons(ip_base_id + i * num_subframes);
792 if (tcp_payload_len > mss) {
793 skb_shinfo(tmp)->gso_size = mss;
795 qc = ieee80211_get_qos_ctl((void *)tmp->data);
798 ip_send_check(ip_hdr(tmp));
799 *qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
800 skb_shinfo(tmp)->gso_size = 0;
806 __skb_queue_tail(mpdus_skb, tmp);
812 #else /* CONFIG_INET */
813 static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
814 struct ieee80211_tx_info *info,
815 struct ieee80211_sta *sta,
816 struct sk_buff_head *mpdus_skb)
818 /* Impossible to get TSO with CONFIG_INET */
825 static void iwl_mvm_tx_add_stream(struct iwl_mvm *mvm,
826 struct iwl_mvm_sta *mvm_sta, u8 tid,
829 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
830 u8 mac_queue = info->hw_queue;
831 struct sk_buff_head *deferred_tx_frames;
833 lockdep_assert_held(&mvm_sta->lock);
835 mvm_sta->deferred_traffic_tid_map |= BIT(tid);
836 set_bit(mvm_sta->sta_id, mvm->sta_deferred_frames);
838 deferred_tx_frames = &mvm_sta->tid_data[tid].deferred_tx_frames;
840 skb_queue_tail(deferred_tx_frames, skb);
843 * The first deferred frame should've stopped the MAC queues, so we
844 * should never get a second deferred frame for the RA/TID.
846 if (!WARN(skb_queue_len(deferred_tx_frames) != 1,
847 "RATID %d/%d has %d deferred frames\n", mvm_sta->sta_id, tid,
848 skb_queue_len(deferred_tx_frames))) {
849 iwl_mvm_stop_mac_queues(mvm, BIT(mac_queue));
850 schedule_work(&mvm->add_stream_wk);
854 /* Check if there are any timed-out TIDs on a given shared TXQ */
855 static bool iwl_mvm_txq_should_update(struct iwl_mvm *mvm, int txq_id)
857 unsigned long queue_tid_bitmap = mvm->queue_info[txq_id].tid_bitmap;
858 unsigned long now = jiffies;
861 for_each_set_bit(tid, &queue_tid_bitmap, IWL_MAX_TID_COUNT + 1) {
862 if (time_before(mvm->queue_info[txq_id].last_frame_time[tid] +
863 IWL_MVM_DQA_QUEUE_TIMEOUT, now))
871 * Sets the fields in the Tx cmd that are crypto related
873 static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
874 struct ieee80211_tx_info *info,
875 struct ieee80211_sta *sta)
877 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
878 struct iwl_mvm_sta *mvmsta;
879 struct iwl_device_cmd *dev_cmd;
880 struct iwl_tx_cmd *tx_cmd;
883 u8 tid = IWL_MAX_TID_COUNT;
884 u8 txq_id = info->hw_queue;
885 bool is_ampdu = false;
888 mvmsta = iwl_mvm_sta_from_mac80211(sta);
889 fc = hdr->frame_control;
890 hdrlen = ieee80211_hdrlen(fc);
892 if (WARN_ON_ONCE(!mvmsta))
895 if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_STATION_COUNT))
898 dev_cmd = iwl_mvm_set_tx_params(mvm, skb, info, hdrlen,
899 sta, mvmsta->sta_id);
903 tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
906 * we handle that entirely ourselves -- for uAPSD the firmware
907 * will always send a notification, and for PS-Poll responses
908 * we'll notify mac80211 when getting frame status
910 info->flags &= ~IEEE80211_TX_STATUS_EOSP;
912 spin_lock(&mvmsta->lock);
914 if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) {
916 qc = ieee80211_get_qos_ctl(hdr);
917 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
918 if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
919 goto drop_unlock_sta;
921 seq_number = mvmsta->tid_data[tid].seq_number;
922 seq_number &= IEEE80211_SCTL_SEQ;
923 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
924 hdr->seq_ctrl |= cpu_to_le16(seq_number);
925 is_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU;
926 } else if (iwl_mvm_is_dqa_supported(mvm) &&
927 (ieee80211_is_qos_nullfunc(fc) ||
928 ieee80211_is_nullfunc(fc))) {
930 * nullfunc frames should go to the MGMT queue regardless of QOS
932 tid = IWL_MAX_TID_COUNT;
935 if (iwl_mvm_is_dqa_supported(mvm)) {
936 txq_id = mvmsta->tid_data[tid].txq_id;
938 if (ieee80211_is_mgmt(fc))
939 tx_cmd->tid_tspec = IWL_TID_NON_QOS;
942 /* Copy MAC header from skb into command buffer */
943 memcpy(tx_cmd->hdr, hdr, hdrlen);
945 WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);
947 if (sta->tdls && !iwl_mvm_is_dqa_supported(mvm)) {
948 /* default to TID 0 for non-QoS packets */
949 u8 tdls_tid = tid == IWL_MAX_TID_COUNT ? 0 : tid;
951 txq_id = mvmsta->hw_queue[tid_to_mac80211_ac[tdls_tid]];
955 if (WARN_ON_ONCE(mvmsta->tid_data[tid].state != IWL_AGG_ON))
956 goto drop_unlock_sta;
957 txq_id = mvmsta->tid_data[tid].txq_id;
960 /* Check if TXQ needs to be allocated or re-activated */
961 if (unlikely(txq_id == IEEE80211_INVAL_HW_QUEUE ||
962 !mvmsta->tid_data[tid].is_tid_active) &&
963 iwl_mvm_is_dqa_supported(mvm)) {
964 /* If TXQ needs to be allocated... */
965 if (txq_id == IEEE80211_INVAL_HW_QUEUE) {
966 iwl_mvm_tx_add_stream(mvm, mvmsta, tid, skb);
969 * The frame is now deferred, and the worker scheduled
970 * will re-allocate it, so we can free it for now.
972 iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
973 spin_unlock(&mvmsta->lock);
977 /* If we are here - TXQ exists and needs to be re-activated */
978 spin_lock(&mvm->queue_info_lock);
979 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
980 mvmsta->tid_data[tid].is_tid_active = true;
981 spin_unlock(&mvm->queue_info_lock);
983 IWL_DEBUG_TX_QUEUES(mvm, "Re-activating queue %d for TX\n",
987 if (iwl_mvm_is_dqa_supported(mvm)) {
988 /* Keep track of the time of the last frame for this RA/TID */
989 mvm->queue_info[txq_id].last_frame_time[tid] = jiffies;
992 * If we have timed-out TIDs - schedule the worker that will
993 * reconfig the queues and update them
995 * Note that the mvm->queue_info_lock isn't being taken here in
996 * order to not serialize the TX flow. This isn't dangerous
997 * because scheduling mvm->add_stream_wk can't ruin the state,
998 * and if we DON'T schedule it due to some race condition then
999 * next TX we get here we will.
1001 if (unlikely(mvm->queue_info[txq_id].status ==
1002 IWL_MVM_QUEUE_SHARED &&
1003 iwl_mvm_txq_should_update(mvm, txq_id)))
1004 schedule_work(&mvm->add_stream_wk);
1007 IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x\n", mvmsta->sta_id,
1008 tid, txq_id, IEEE80211_SEQ_TO_SN(seq_number));
1010 /* From now on, we cannot access info->control */
1011 iwl_mvm_skb_prepare_status(skb, dev_cmd);
1013 if (iwl_trans_tx(mvm->trans, skb, dev_cmd, txq_id))
1014 goto drop_unlock_sta;
1016 if (tid < IWL_MAX_TID_COUNT && !ieee80211_has_morefrags(fc))
1017 mvmsta->tid_data[tid].seq_number = seq_number + 0x10;
1019 spin_unlock(&mvmsta->lock);
1021 /* Increase pending frames count if this isn't AMPDU or DQA queue */
1022 if (!iwl_mvm_is_dqa_supported(mvm) && !is_ampdu)
1023 atomic_inc(&mvm->pending_frames[mvmsta->sta_id]);
1028 iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
1029 spin_unlock(&mvmsta->lock);
1034 int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
1035 struct ieee80211_sta *sta)
1037 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1038 struct ieee80211_tx_info info;
1039 struct sk_buff_head mpdus_skbs;
1040 unsigned int payload_len;
1043 if (WARN_ON_ONCE(!mvmsta))
1046 if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_STATION_COUNT))
1049 memcpy(&info, skb->cb, sizeof(info));
1051 if (!skb_is_gso(skb))
1052 return iwl_mvm_tx_mpdu(mvm, skb, &info, sta);
1054 payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) -
1055 tcp_hdrlen(skb) + skb->data_len;
1057 if (payload_len <= skb_shinfo(skb)->gso_size)
1058 return iwl_mvm_tx_mpdu(mvm, skb, &info, sta);
1060 __skb_queue_head_init(&mpdus_skbs);
1062 ret = iwl_mvm_tx_tso(mvm, skb, &info, sta, &mpdus_skbs);
1066 if (WARN_ON(skb_queue_empty(&mpdus_skbs)))
1069 while (!skb_queue_empty(&mpdus_skbs)) {
1070 skb = __skb_dequeue(&mpdus_skbs);
1072 ret = iwl_mvm_tx_mpdu(mvm, skb, &info, sta);
1074 __skb_queue_purge(&mpdus_skbs);
1082 static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm,
1083 struct ieee80211_sta *sta, u8 tid)
1085 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1086 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
1087 struct ieee80211_vif *vif = mvmsta->vif;
1089 lockdep_assert_held(&mvmsta->lock);
1091 if ((tid_data->state == IWL_AGG_ON ||
1092 tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA ||
1093 iwl_mvm_is_dqa_supported(mvm)) &&
1094 iwl_mvm_tid_queued(tid_data) == 0) {
1096 * Now that this aggregation or DQA queue is empty tell
1097 * mac80211 so it knows we no longer have frames buffered for
1098 * the station on this TID (for the TIM bitmap calculation.)
1100 ieee80211_sta_set_buffered(sta, tid, false);
1103 if (tid_data->ssn != tid_data->next_reclaimed)
1106 switch (tid_data->state) {
1107 case IWL_EMPTYING_HW_QUEUE_ADDBA:
1108 IWL_DEBUG_TX_QUEUES(mvm,
1109 "Can continue addBA flow ssn = next_recl = %d\n",
1110 tid_data->next_reclaimed);
1111 tid_data->state = IWL_AGG_STARTING;
1112 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1115 case IWL_EMPTYING_HW_QUEUE_DELBA:
1116 IWL_DEBUG_TX_QUEUES(mvm,
1117 "Can continue DELBA flow ssn = next_recl = %d\n",
1118 tid_data->next_reclaimed);
1119 if (!iwl_mvm_is_dqa_supported(mvm)) {
1120 u8 mac80211_ac = tid_to_mac80211_ac[tid];
1122 iwl_mvm_disable_txq(mvm, tid_data->txq_id,
1123 vif->hw_queue[mac80211_ac], tid,
1126 tid_data->state = IWL_AGG_OFF;
1127 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1135 #ifdef CONFIG_IWLWIFI_DEBUG
1136 const char *iwl_mvm_get_tx_fail_reason(u32 status)
1138 #define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
1139 #define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
1141 switch (status & TX_STATUS_MSK) {
1142 case TX_STATUS_SUCCESS:
1144 TX_STATUS_POSTPONE(DELAY);
1145 TX_STATUS_POSTPONE(FEW_BYTES);
1146 TX_STATUS_POSTPONE(BT_PRIO);
1147 TX_STATUS_POSTPONE(QUIET_PERIOD);
1148 TX_STATUS_POSTPONE(CALC_TTAK);
1149 TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
1150 TX_STATUS_FAIL(SHORT_LIMIT);
1151 TX_STATUS_FAIL(LONG_LIMIT);
1152 TX_STATUS_FAIL(UNDERRUN);
1153 TX_STATUS_FAIL(DRAIN_FLOW);
1154 TX_STATUS_FAIL(RFKILL_FLUSH);
1155 TX_STATUS_FAIL(LIFE_EXPIRE);
1156 TX_STATUS_FAIL(DEST_PS);
1157 TX_STATUS_FAIL(HOST_ABORTED);
1158 TX_STATUS_FAIL(BT_RETRY);
1159 TX_STATUS_FAIL(STA_INVALID);
1160 TX_STATUS_FAIL(FRAG_DROPPED);
1161 TX_STATUS_FAIL(TID_DISABLE);
1162 TX_STATUS_FAIL(FIFO_FLUSHED);
1163 TX_STATUS_FAIL(SMALL_CF_POLL);
1164 TX_STATUS_FAIL(FW_DROP);
1165 TX_STATUS_FAIL(STA_COLOR_MISMATCH);
1170 #undef TX_STATUS_FAIL
1171 #undef TX_STATUS_POSTPONE
1173 #endif /* CONFIG_IWLWIFI_DEBUG */
1175 void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags,
1176 enum nl80211_band band,
1177 struct ieee80211_tx_rate *r)
1179 if (rate_n_flags & RATE_HT_MCS_GF_MSK)
1180 r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
1181 switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
1182 case RATE_MCS_CHAN_WIDTH_20:
1184 case RATE_MCS_CHAN_WIDTH_40:
1185 r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
1187 case RATE_MCS_CHAN_WIDTH_80:
1188 r->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH;
1190 case RATE_MCS_CHAN_WIDTH_160:
1191 r->flags |= IEEE80211_TX_RC_160_MHZ_WIDTH;
1194 if (rate_n_flags & RATE_MCS_SGI_MSK)
1195 r->flags |= IEEE80211_TX_RC_SHORT_GI;
1196 if (rate_n_flags & RATE_MCS_HT_MSK) {
1197 r->flags |= IEEE80211_TX_RC_MCS;
1198 r->idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK;
1199 } else if (rate_n_flags & RATE_MCS_VHT_MSK) {
1200 ieee80211_rate_set_vht(
1201 r, rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK,
1202 ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
1203 RATE_VHT_MCS_NSS_POS) + 1);
1204 r->flags |= IEEE80211_TX_RC_VHT_MCS;
1206 r->idx = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
1212 * translate ucode response to mac80211 tx status control values
1214 static void iwl_mvm_hwrate_to_tx_status(u32 rate_n_flags,
1215 struct ieee80211_tx_info *info)
1217 struct ieee80211_tx_rate *r = &info->status.rates[0];
1219 info->status.antenna =
1220 ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
1221 iwl_mvm_hwrate_to_tx_rate(rate_n_flags, info->band, r);
1224 static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm *mvm,
1227 struct iwl_fw_dbg_trigger_tlv *trig;
1228 struct iwl_fw_dbg_trigger_tx_status *status_trig;
1231 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TX_STATUS))
1234 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TX_STATUS);
1235 status_trig = (void *)trig->data;
1237 if (!iwl_fw_dbg_trigger_check_stop(mvm, NULL, trig))
1240 for (i = 0; i < ARRAY_SIZE(status_trig->statuses); i++) {
1241 /* don't collect on status 0 */
1242 if (!status_trig->statuses[i].status)
1245 if (status_trig->statuses[i].status != (status & TX_STATUS_MSK))
1248 iwl_mvm_fw_dbg_collect_trig(mvm, trig,
1249 "Tx status %d was received",
1250 status & TX_STATUS_MSK);
1255 static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
1256 struct iwl_rx_packet *pkt)
1258 struct ieee80211_sta *sta;
1259 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1260 int txq_id = SEQ_TO_QUEUE(sequence);
1261 struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
1262 int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid);
1263 int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid);
1264 u32 status = le16_to_cpu(tx_resp->status.status);
1265 u16 ssn = iwl_mvm_get_scd_ssn(tx_resp);
1266 struct iwl_mvm_sta *mvmsta;
1267 struct sk_buff_head skbs;
1269 u16 next_reclaimed, seq_ctl;
1270 bool is_ndp = false;
1272 __skb_queue_head_init(&skbs);
1274 seq_ctl = le16_to_cpu(tx_resp->seq_ctl);
1276 /* we can free until ssn % q.n_bd not inclusive */
1277 iwl_trans_reclaim(mvm->trans, txq_id, ssn, &skbs);
1279 while (!skb_queue_empty(&skbs)) {
1280 struct sk_buff *skb = __skb_dequeue(&skbs);
1281 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1285 iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
1287 memset(&info->status, 0, sizeof(info->status));
1289 /* inform mac80211 about what happened with the frame */
1290 switch (status & TX_STATUS_MSK) {
1291 case TX_STATUS_SUCCESS:
1292 case TX_STATUS_DIRECT_DONE:
1293 info->flags |= IEEE80211_TX_STAT_ACK;
1295 case TX_STATUS_FAIL_DEST_PS:
1296 /* In DQA, the FW should have stopped the queue and not
1297 * return this status
1299 WARN_ON(iwl_mvm_is_dqa_supported(mvm));
1300 info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
1307 * If we are freeing multiple frames, mark all the frames
1308 * but the first one as acked, since they were acknowledged
1312 info->flags |= IEEE80211_TX_STAT_ACK;
1314 iwl_mvm_tx_status_check_trigger(mvm, status);
1316 info->status.rates[0].count = tx_resp->failure_frame + 1;
1317 iwl_mvm_hwrate_to_tx_status(le32_to_cpu(tx_resp->initial_rate),
1319 info->status.status_driver_data[1] =
1320 (void *)(uintptr_t)le32_to_cpu(tx_resp->initial_rate);
1322 /* Single frame failure in an AMPDU queue => send BAR */
1323 if (info->flags & IEEE80211_TX_CTL_AMPDU &&
1324 !(info->flags & IEEE80211_TX_STAT_ACK) &&
1325 !(info->flags & IEEE80211_TX_STAT_TX_FILTERED))
1326 info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
1327 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
1329 /* W/A FW bug: seq_ctl is wrong when the status isn't success */
1330 if (status != TX_STATUS_SUCCESS) {
1331 struct ieee80211_hdr *hdr = (void *)skb->data;
1332 seq_ctl = le16_to_cpu(hdr->seq_ctrl);
1335 if (unlikely(!seq_ctl)) {
1336 struct ieee80211_hdr *hdr = (void *)skb->data;
1339 * If it is an NDP, we can't update next_reclaim since
1340 * its sequence control is 0. Note that for that same
1341 * reason, NDPs are never sent to A-MPDU'able queues
1342 * so that we can never have more than one freed frame
1343 * for a single Tx resonse (see WARN_ON below).
1345 if (ieee80211_is_qos_nullfunc(hdr->frame_control))
1350 * TODO: this is not accurate if we are freeing more than one
1353 info->status.tx_time =
1354 le16_to_cpu(tx_resp->wireless_media_time);
1355 BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1);
1356 info->status.status_driver_data[0] =
1357 (void *)(uintptr_t)tx_resp->reduced_tpc;
1359 ieee80211_tx_status(mvm->hw, skb);
1362 if (iwl_mvm_is_dqa_supported(mvm) || txq_id >= mvm->first_agg_queue) {
1363 /* If this is an aggregation queue, we use the ssn since:
1364 * ssn = wifi seq_num % 256.
1365 * The seq_ctl is the sequence control of the packet to which
1366 * this Tx response relates. But if there is a hole in the
1367 * bitmap of the BA we received, this Tx response may allow to
1368 * reclaim the hole and all the subsequent packets that were
1369 * already acked. In that case, seq_ctl != ssn, and the next
1370 * packet to be reclaimed will be ssn and not seq_ctl. In that
1371 * case, several packets will be reclaimed even if
1374 * The ssn is the index (% 256) of the latest packet that has
1375 * treated (acked / dropped) + 1.
1377 next_reclaimed = ssn;
1379 /* The next packet to be reclaimed is the one after this one */
1380 next_reclaimed = IEEE80211_SEQ_TO_SN(seq_ctl + 0x10);
1383 IWL_DEBUG_TX_REPLY(mvm,
1384 "TXQ %d status %s (0x%08x)\n",
1385 txq_id, iwl_mvm_get_tx_fail_reason(status), status);
1387 IWL_DEBUG_TX_REPLY(mvm,
1388 "\t\t\t\tinitial_rate 0x%x retries %d, idx=%d ssn=%d next_reclaimed=0x%x seq_ctl=0x%x\n",
1389 le32_to_cpu(tx_resp->initial_rate),
1390 tx_resp->failure_frame, SEQ_TO_INDEX(sequence),
1391 ssn, next_reclaimed, seq_ctl);
1395 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
1397 * sta can't be NULL otherwise it'd mean that the sta has been freed in
1398 * the firmware while we still have packets for it in the Tx queues.
1400 if (WARN_ON_ONCE(!sta))
1404 mvmsta = iwl_mvm_sta_from_mac80211(sta);
1406 if (tid != IWL_TID_NON_QOS) {
1407 struct iwl_mvm_tid_data *tid_data =
1408 &mvmsta->tid_data[tid];
1409 bool send_eosp_ndp = false;
1411 spin_lock_bh(&mvmsta->lock);
1414 tid_data->next_reclaimed = next_reclaimed;
1415 IWL_DEBUG_TX_REPLY(mvm,
1416 "Next reclaimed packet:%d\n",
1419 IWL_DEBUG_TX_REPLY(mvm,
1420 "NDP - don't update next_reclaimed\n");
1423 iwl_mvm_check_ratid_empty(mvm, sta, tid);
1425 if (mvmsta->sleep_tx_count) {
1426 mvmsta->sleep_tx_count--;
1427 if (mvmsta->sleep_tx_count &&
1428 !iwl_mvm_tid_queued(tid_data)) {
1430 * The number of frames in the queue
1431 * dropped to 0 even if we sent less
1432 * frames than we thought we had on the
1434 * This means we had holes in the BA
1435 * window that we just filled, ask
1436 * mac80211 to send EOSP since the
1437 * firmware won't know how to do that.
1438 * Send NDP and the firmware will send
1439 * EOSP notification that will trigger
1440 * a call to ieee80211_sta_eosp().
1442 send_eosp_ndp = true;
1446 spin_unlock_bh(&mvmsta->lock);
1447 if (send_eosp_ndp) {
1448 iwl_mvm_sta_modify_sleep_tx_count(mvm, sta,
1449 IEEE80211_FRAME_RELEASE_UAPSD,
1450 1, tid, false, false);
1451 mvmsta->sleep_tx_count = 0;
1452 ieee80211_send_eosp_nullfunc(sta, tid);
1456 if (mvmsta->next_status_eosp) {
1457 mvmsta->next_status_eosp = false;
1458 ieee80211_sta_eosp(sta);
1465 * If the txq is not an AMPDU queue, there is no chance we freed
1466 * several skbs. Check that out...
1468 if (iwl_mvm_is_dqa_supported(mvm) || txq_id >= mvm->first_agg_queue)
1471 /* We can't free more than one frame at once on a shared queue */
1472 WARN_ON(skb_freed > 1);
1474 /* If we have still frames for this STA nothing to do here */
1475 if (!atomic_sub_and_test(skb_freed, &mvm->pending_frames[sta_id]))
1478 if (mvmsta && mvmsta->vif->type == NL80211_IFTYPE_AP) {
1481 * If there are no pending frames for this STA and
1482 * the tx to this station is not disabled, notify
1483 * mac80211 that this station can now wake up in its
1485 * If mvmsta is not NULL, sta is valid.
1488 spin_lock_bh(&mvmsta->lock);
1490 if (!mvmsta->disable_tx)
1491 ieee80211_sta_block_awake(mvm->hw, sta, false);
1493 spin_unlock_bh(&mvmsta->lock);
1496 if (PTR_ERR(sta) == -EBUSY || PTR_ERR(sta) == -ENOENT) {
1498 * We are draining and this was the last packet - pre_rcu_remove
1499 * has been called already. We might be after the
1500 * synchronize_net already.
1501 * Don't rely on iwl_mvm_rm_sta to see the empty Tx queues.
1503 set_bit(sta_id, mvm->sta_drained);
1504 schedule_work(&mvm->sta_drained_wk);
1511 #ifdef CONFIG_IWLWIFI_DEBUG
1512 #define AGG_TX_STATE_(x) case AGG_TX_STATE_ ## x: return #x
1513 static const char *iwl_get_agg_tx_status(u16 status)
1515 switch (status & AGG_TX_STATE_STATUS_MSK) {
1516 AGG_TX_STATE_(TRANSMITTED);
1517 AGG_TX_STATE_(UNDERRUN);
1518 AGG_TX_STATE_(BT_PRIO);
1519 AGG_TX_STATE_(FEW_BYTES);
1520 AGG_TX_STATE_(ABORT);
1521 AGG_TX_STATE_(LAST_SENT_TTL);
1522 AGG_TX_STATE_(LAST_SENT_TRY_CNT);
1523 AGG_TX_STATE_(LAST_SENT_BT_KILL);
1524 AGG_TX_STATE_(SCD_QUERY);
1525 AGG_TX_STATE_(TEST_BAD_CRC32);
1526 AGG_TX_STATE_(RESPONSE);
1527 AGG_TX_STATE_(DUMP_TX);
1528 AGG_TX_STATE_(DELAY_TX);
1534 static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm,
1535 struct iwl_rx_packet *pkt)
1537 struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
1538 struct agg_tx_status *frame_status = &tx_resp->status;
1541 for (i = 0; i < tx_resp->frame_count; i++) {
1542 u16 fstatus = le16_to_cpu(frame_status[i].status);
1544 IWL_DEBUG_TX_REPLY(mvm,
1545 "status %s (0x%04x), try-count (%d) seq (0x%x)\n",
1546 iwl_get_agg_tx_status(fstatus),
1547 fstatus & AGG_TX_STATE_STATUS_MSK,
1548 (fstatus & AGG_TX_STATE_TRY_CNT_MSK) >>
1549 AGG_TX_STATE_TRY_CNT_POS,
1550 le16_to_cpu(frame_status[i].sequence));
1554 static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm,
1555 struct iwl_rx_packet *pkt)
1557 #endif /* CONFIG_IWLWIFI_DEBUG */
1559 static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
1560 struct iwl_rx_packet *pkt)
1562 struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
1563 int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid);
1564 int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid);
1565 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1566 struct iwl_mvm_sta *mvmsta;
1567 int queue = SEQ_TO_QUEUE(sequence);
1569 if (WARN_ON_ONCE(queue < mvm->first_agg_queue &&
1570 (!iwl_mvm_is_dqa_supported(mvm) ||
1571 (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE))))
1574 if (WARN_ON_ONCE(tid == IWL_TID_NON_QOS))
1577 iwl_mvm_rx_tx_cmd_agg_dbg(mvm, pkt);
1581 mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id);
1583 if (!WARN_ON_ONCE(!mvmsta)) {
1584 mvmsta->tid_data[tid].rate_n_flags =
1585 le32_to_cpu(tx_resp->initial_rate);
1586 mvmsta->tid_data[tid].tx_time =
1587 le16_to_cpu(tx_resp->wireless_media_time);
1593 void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
1595 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1596 struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
1598 if (tx_resp->frame_count == 1)
1599 iwl_mvm_rx_tx_cmd_single(mvm, pkt);
1601 iwl_mvm_rx_tx_cmd_agg(mvm, pkt);
1604 static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
1606 struct ieee80211_tx_info *ba_info, u32 rate)
1608 struct sk_buff_head reclaimed_skbs;
1609 struct iwl_mvm_tid_data *tid_data;
1610 struct ieee80211_sta *sta;
1611 struct iwl_mvm_sta *mvmsta;
1612 struct sk_buff *skb;
1615 if (WARN_ONCE(sta_id >= IWL_MVM_STATION_COUNT ||
1616 tid >= IWL_MAX_TID_COUNT,
1617 "sta_id %d tid %d", sta_id, tid))
1622 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
1624 /* Reclaiming frames for a station that has been deleted ? */
1625 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
1630 mvmsta = iwl_mvm_sta_from_mac80211(sta);
1631 tid_data = &mvmsta->tid_data[tid];
1633 if (tid_data->txq_id != txq) {
1635 "invalid BA notification: Q %d, tid %d\n",
1636 tid_data->txq_id, tid);
1641 spin_lock_bh(&mvmsta->lock);
1643 __skb_queue_head_init(&reclaimed_skbs);
1646 * Release all TFDs before the SSN, i.e. all TFDs in front of
1647 * block-ack window (we assume that they've been successfully
1648 * transmitted ... if not, it's too late anyway).
1650 iwl_trans_reclaim(mvm->trans, txq, index, &reclaimed_skbs);
1652 tid_data->next_reclaimed = index;
1654 iwl_mvm_check_ratid_empty(mvm, sta, tid);
1657 ba_info->status.status_driver_data[1] = (void *)(uintptr_t)rate;
1659 skb_queue_walk(&reclaimed_skbs, skb) {
1660 struct ieee80211_hdr *hdr = (void *)skb->data;
1661 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1663 if (ieee80211_is_data_qos(hdr->frame_control))
1668 iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
1670 memset(&info->status, 0, sizeof(info->status));
1671 /* Packet was transmitted successfully, failures come as single
1672 * frames because before failing a frame the firmware transmits
1673 * it without aggregation at least once.
1675 info->flags |= IEEE80211_TX_STAT_ACK;
1677 /* this is the first skb we deliver in this batch */
1678 /* put the rate scaling data there */
1680 info->flags |= IEEE80211_TX_STAT_AMPDU;
1681 memcpy(&info->status, &ba_info->status,
1682 sizeof(ba_info->status));
1683 iwl_mvm_hwrate_to_tx_status(rate, info);
1687 spin_unlock_bh(&mvmsta->lock);
1689 /* We got a BA notif with 0 acked or scd_ssn didn't progress which is
1690 * possible (i.e. first MPDU in the aggregation wasn't acked)
1691 * Still it's important to update RS about sent vs. acked.
1693 if (skb_queue_empty(&reclaimed_skbs)) {
1694 struct ieee80211_chanctx_conf *chanctx_conf = NULL;
1698 rcu_dereference(mvmsta->vif->chanctx_conf);
1700 if (WARN_ON_ONCE(!chanctx_conf))
1703 ba_info->band = chanctx_conf->def.chan->band;
1704 iwl_mvm_hwrate_to_tx_status(rate, ba_info);
1706 IWL_DEBUG_TX_REPLY(mvm, "No reclaim. Update rs directly\n");
1707 iwl_mvm_rs_tx_status(mvm, sta, tid, ba_info, false);
1713 while (!skb_queue_empty(&reclaimed_skbs)) {
1714 skb = __skb_dequeue(&reclaimed_skbs);
1715 ieee80211_tx_status(mvm->hw, skb);
1719 void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
1721 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1722 int sta_id, tid, txq, index;
1723 struct ieee80211_tx_info ba_info = {};
1724 struct iwl_mvm_ba_notif *ba_notif;
1725 struct iwl_mvm_tid_data *tid_data;
1726 struct iwl_mvm_sta *mvmsta;
1728 if (iwl_mvm_has_new_tx_api(mvm)) {
1729 struct iwl_mvm_compressed_ba_notif *ba_res =
1732 sta_id = ba_res->sta_id;
1733 ba_info.status.ampdu_ack_len = (u8)le16_to_cpu(ba_res->done);
1734 ba_info.status.ampdu_len = (u8)le16_to_cpu(ba_res->txed);
1735 ba_info.status.tx_time =
1736 (u16)le32_to_cpu(ba_res->wireless_time);
1737 ba_info.status.status_driver_data[0] =
1738 (void *)(uintptr_t)ba_res->reduced_txp;
1742 * When supporting multi TID aggregations - we need to move
1743 * next_reclaimed to be per TXQ and not per TID or handle it
1744 * in a different way.
1745 * This will go together with SN and AddBA offload and cannot
1746 * be handled properly for now.
1748 WARN_ON(le16_to_cpu(ba_res->tfd_cnt) != 1);
1749 iwl_mvm_tx_reclaim(mvm, sta_id, ba_res->ra_tid[0].tid,
1750 (int)ba_res->tfd[0].q_num,
1751 le16_to_cpu(ba_res->tfd[0].tfd_index),
1752 &ba_info, le32_to_cpu(ba_res->tx_rate));
1754 IWL_DEBUG_TX_REPLY(mvm,
1755 "BA_NOTIFICATION Received from sta_id = %d, flags %x, sent:%d, acked:%d\n",
1756 sta_id, le32_to_cpu(ba_res->flags),
1757 le16_to_cpu(ba_res->txed),
1758 le16_to_cpu(ba_res->done));
1762 ba_notif = (void *)pkt->data;
1763 sta_id = ba_notif->sta_id;
1764 tid = ba_notif->tid;
1765 /* "flow" corresponds to Tx queue */
1766 txq = le16_to_cpu(ba_notif->scd_flow);
1767 /* "ssn" is start of block-ack Tx window, corresponds to index
1768 * (in Tx queue's circular buffer) of first TFD/frame in window */
1769 index = le16_to_cpu(ba_notif->scd_ssn);
1772 mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id);
1773 if (WARN_ON_ONCE(!mvmsta)) {
1778 tid_data = &mvmsta->tid_data[tid];
1780 ba_info.status.ampdu_ack_len = ba_notif->txed_2_done;
1781 ba_info.status.ampdu_len = ba_notif->txed;
1782 ba_info.status.tx_time = tid_data->tx_time;
1783 ba_info.status.status_driver_data[0] =
1784 (void *)(uintptr_t)ba_notif->reduced_txp;
1788 iwl_mvm_tx_reclaim(mvm, sta_id, tid, txq, index, &ba_info,
1789 tid_data->rate_n_flags);
1791 IWL_DEBUG_TX_REPLY(mvm,
1792 "BA_NOTIFICATION Received from %pM, sta_id = %d\n",
1793 (u8 *)&ba_notif->sta_addr_lo32, ba_notif->sta_id);
1795 IWL_DEBUG_TX_REPLY(mvm,
1796 "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = %d, scd_ssn = %d sent:%d, acked:%d\n",
1797 ba_notif->tid, le16_to_cpu(ba_notif->seq_ctl),
1798 le64_to_cpu(ba_notif->bitmap), txq, index,
1799 ba_notif->txed, ba_notif->txed_2_done);
1801 IWL_DEBUG_TX_REPLY(mvm, "reduced txp from ba notif %d\n",
1802 ba_notif->reduced_txp);
1806 * Note that there are transports that buffer frames before they reach
1807 * the firmware. This means that after flush_tx_path is called, the
1808 * queue might not be empty. The race-free way to handle this is to:
1809 * 1) set the station as draining
1810 * 2) flush the Tx path
1811 * 3) wait for the transport queues to be empty
1813 int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, u32 flags)
1816 struct iwl_tx_path_flush_cmd flush_cmd = {
1817 .queues_ctl = cpu_to_le32(tfd_msk),
1818 .flush_ctl = cpu_to_le16(DUMP_TX_FIFO_FLUSH),
1821 ret = iwl_mvm_send_cmd_pdu(mvm, TXPATH_FLUSH, flags,
1822 sizeof(flush_cmd), &flush_cmd);
1824 IWL_ERR(mvm, "Failed to send flush command (%d)\n", ret);