1 /******************************************************************************
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
10 * Copyright (C) 2015 - 2017 Intel Deutschland GmbH
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
26 * The full GNU General Public License is included in this distribution
27 * in the file called COPYING.
29 * Contact Information:
30 * Intel Linux Wireless <linuxwifi@intel.com>
31 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
35 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
36 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
37 * Copyright (C) 2015 - 2017 Intel Deutschland GmbH
38 * All rights reserved.
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
44 * * Redistributions of source code must retain the above copyright
45 * notice, this list of conditions and the following disclaimer.
46 * * Redistributions in binary form must reproduce the above copyright
47 * notice, this list of conditions and the following disclaimer in
48 * the documentation and/or other materials provided with the
50 * * Neither the name Intel Corporation nor the names of its
51 * contributors may be used to endorse or promote products derived
52 * from this software without specific prior written permission.
54 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
55 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
56 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
57 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
58 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
60 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
64 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
66 *****************************************************************************/
67 #include <net/mac80211.h>
69 #include "iwl-debug.h"
74 #include "fw/api/rs.h"
77 * Will return 0 even if the cmd failed when RFKILL is asserted unless
78 * CMD_WANT_SKB is set in cmd->flags.
80 int iwl_mvm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd)
84 #if defined(CONFIG_IWLWIFI_DEBUGFS) && defined(CONFIG_PM_SLEEP)
85 if (WARN_ON(mvm->d3_test_active))
90 * Synchronous commands from this op-mode must hold
91 * the mutex, this ensures we don't try to send two
92 * (or more) synchronous commands at a time.
94 if (!(cmd->flags & CMD_ASYNC)) {
95 lockdep_assert_held(&mvm->mutex);
96 if (!(cmd->flags & CMD_SEND_IN_IDLE))
97 iwl_mvm_ref(mvm, IWL_MVM_REF_SENDING_CMD);
100 ret = iwl_trans_send_cmd(mvm->trans, cmd);
102 if (!(cmd->flags & (CMD_ASYNC | CMD_SEND_IN_IDLE)))
103 iwl_mvm_unref(mvm, IWL_MVM_REF_SENDING_CMD);
106 * If the caller wants the SKB, then don't hide any problems, the
107 * caller might access the response buffer which will be NULL if
108 * the command failed.
110 if (cmd->flags & CMD_WANT_SKB)
113 /* Silently ignore failures if RFKILL is asserted */
114 if (!ret || ret == -ERFKILL)
119 int iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u32 id,
120 u32 flags, u16 len, const void *data)
122 struct iwl_host_cmd cmd = {
129 return iwl_mvm_send_cmd(mvm, &cmd);
133 * We assume that the caller set the status to the success value
135 int iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd,
138 struct iwl_rx_packet *pkt;
139 struct iwl_cmd_response *resp;
142 lockdep_assert_held(&mvm->mutex);
144 #if defined(CONFIG_IWLWIFI_DEBUGFS) && defined(CONFIG_PM_SLEEP)
145 if (WARN_ON(mvm->d3_test_active))
150 * Only synchronous commands can wait for status,
151 * we use WANT_SKB so the caller can't.
153 if (WARN_ONCE(cmd->flags & (CMD_ASYNC | CMD_WANT_SKB),
154 "cmd flags %x", cmd->flags))
157 cmd->flags |= CMD_WANT_SKB;
159 ret = iwl_trans_send_cmd(mvm->trans, cmd);
160 if (ret == -ERFKILL) {
162 * The command failed because of RFKILL, don't update
163 * the status, leave it as success and return 0.
172 resp_len = iwl_rx_packet_payload_len(pkt);
173 if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
178 resp = (void *)pkt->data;
179 *status = le32_to_cpu(resp->status);
186 * We assume that the caller set the status to the sucess value
188 int iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u32 id, u16 len,
189 const void *data, u32 *status)
191 struct iwl_host_cmd cmd = {
197 return iwl_mvm_send_cmd_status(mvm, &cmd, status);
200 #define IWL_DECLARE_RATE_INFO(r) \
201 [IWL_RATE_##r##M_INDEX] = IWL_RATE_##r##M_PLCP
204 * Translate from fw_rate_index (IWL_RATE_XXM_INDEX) to PLCP
206 static const u8 fw_rate_idx_to_plcp[IWL_RATE_COUNT] = {
207 IWL_DECLARE_RATE_INFO(1),
208 IWL_DECLARE_RATE_INFO(2),
209 IWL_DECLARE_RATE_INFO(5),
210 IWL_DECLARE_RATE_INFO(11),
211 IWL_DECLARE_RATE_INFO(6),
212 IWL_DECLARE_RATE_INFO(9),
213 IWL_DECLARE_RATE_INFO(12),
214 IWL_DECLARE_RATE_INFO(18),
215 IWL_DECLARE_RATE_INFO(24),
216 IWL_DECLARE_RATE_INFO(36),
217 IWL_DECLARE_RATE_INFO(48),
218 IWL_DECLARE_RATE_INFO(54),
221 int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
222 enum nl80211_band band)
224 int rate = rate_n_flags & RATE_LEGACY_RATE_MSK;
228 /* Legacy rate format, search for match in table */
229 if (band == NL80211_BAND_5GHZ)
230 band_offset = IWL_FIRST_OFDM_RATE;
231 for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
232 if (fw_rate_idx_to_plcp[idx] == rate)
233 return idx - band_offset;
238 u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx)
240 /* Get PLCP rate for tx_cmd->rate_n_flags */
241 return fw_rate_idx_to_plcp[rate_idx];
244 void iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
246 struct iwl_rx_packet *pkt = rxb_addr(rxb);
247 struct iwl_error_resp *err_resp = (void *)pkt->data;
249 IWL_ERR(mvm, "FW Error notification: type 0x%08X cmd_id 0x%02X\n",
250 le32_to_cpu(err_resp->error_type), err_resp->cmd_id);
251 IWL_ERR(mvm, "FW Error notification: seq 0x%04X service 0x%08X\n",
252 le16_to_cpu(err_resp->bad_cmd_seq_num),
253 le32_to_cpu(err_resp->error_service));
254 IWL_ERR(mvm, "FW Error notification: timestamp 0x%16llX\n",
255 le64_to_cpu(err_resp->timestamp));
259 * Returns the first antenna as ANT_[ABC], as defined in iwl-config.h.
260 * The parameter should also be a combination of ANT_[ABC].
262 u8 first_antenna(u8 mask)
264 BUILD_BUG_ON(ANT_A != BIT(0)); /* using ffs is wrong if not */
265 if (WARN_ON_ONCE(!mask)) /* ffs will return 0 if mask is zeroed */
267 return BIT(ffs(mask) - 1);
271 * Toggles between TX antennas to send the probe request on.
272 * Receives the bitmask of valid TX antennas and the *index* used
273 * for the last TX, and returns the next valid *index* to use.
274 * In order to set it in the tx_cmd, must do BIT(idx).
276 u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx)
281 for (i = 0; i < RATE_MCS_ANT_NUM; i++) {
282 ind = (ind + 1) % RATE_MCS_ANT_NUM;
283 if (valid & BIT(ind))
287 WARN_ONCE(1, "Failed to toggle between antennas 0x%x", valid);
291 static const struct {
294 } advanced_lookup[] = {
295 { "NMI_INTERRUPT_WDG", 0x34 },
296 { "SYSASSERT", 0x35 },
297 { "UCODE_VERSION_MISMATCH", 0x37 },
298 { "BAD_COMMAND", 0x38 },
299 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
300 { "FATAL_ERROR", 0x3D },
301 { "NMI_TRM_HW_ERR", 0x46 },
302 { "NMI_INTERRUPT_TRM", 0x4C },
303 { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
304 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
305 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
306 { "NMI_INTERRUPT_HOST", 0x66 },
307 { "NMI_INTERRUPT_ACTION_PT", 0x7C },
308 { "NMI_INTERRUPT_UNKNOWN", 0x84 },
309 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
310 { "ADVANCED_SYSASSERT", 0 },
313 static const char *desc_lookup(u32 num)
317 for (i = 0; i < ARRAY_SIZE(advanced_lookup) - 1; i++)
318 if (advanced_lookup[i].num == num)
319 return advanced_lookup[i].name;
321 /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
322 return advanced_lookup[i].name;
326 * Note: This structure is read from the device with IO accesses,
327 * and the reading already does the endian conversion. As it is
328 * read with u32-sized accesses, any members with a different size
329 * need to be ordered correctly though!
331 struct iwl_error_event_table_v1 {
332 u32 valid; /* (nonzero) valid, (0) log is empty */
333 u32 error_id; /* type of error */
334 u32 pc; /* program counter */
335 u32 blink1; /* branch link */
336 u32 blink2; /* branch link */
337 u32 ilink1; /* interrupt link */
338 u32 ilink2; /* interrupt link */
339 u32 data1; /* error-specific data */
340 u32 data2; /* error-specific data */
341 u32 data3; /* error-specific data */
342 u32 bcon_time; /* beacon timer */
343 u32 tsf_low; /* network timestamp function timer */
344 u32 tsf_hi; /* network timestamp function timer */
345 u32 gp1; /* GP1 timer register */
346 u32 gp2; /* GP2 timer register */
347 u32 gp3; /* GP3 timer register */
348 u32 ucode_ver; /* uCode version */
349 u32 hw_ver; /* HW Silicon version */
350 u32 brd_ver; /* HW board version */
351 u32 log_pc; /* log program counter */
352 u32 frame_ptr; /* frame pointer */
353 u32 stack_ptr; /* stack pointer */
354 u32 hcmd; /* last host command header */
355 u32 isr0; /* isr status register LMPM_NIC_ISR0:
357 u32 isr1; /* isr status register LMPM_NIC_ISR1:
359 u32 isr2; /* isr status register LMPM_NIC_ISR2:
361 u32 isr3; /* isr status register LMPM_NIC_ISR3:
363 u32 isr4; /* isr status register LMPM_NIC_ISR4:
365 u32 isr_pref; /* isr status register LMPM_NIC_PREF_STAT */
366 u32 wait_event; /* wait event() caller address */
367 u32 l2p_control; /* L2pControlField */
368 u32 l2p_duration; /* L2pDurationField */
369 u32 l2p_mhvalid; /* L2pMhValidBits */
370 u32 l2p_addr_match; /* L2pAddrMatchStat */
371 u32 lmpm_pmg_sel; /* indicate which clocks are turned on
373 u32 u_timestamp; /* indicate when the date and time of the
375 u32 flow_handler; /* FH read/write pointers, RX credit */
376 } __packed /* LOG_ERROR_TABLE_API_S_VER_1 */;
378 struct iwl_error_event_table {
379 u32 valid; /* (nonzero) valid, (0) log is empty */
380 u32 error_id; /* type of error */
381 u32 trm_hw_status0; /* TRM HW status */
382 u32 trm_hw_status1; /* TRM HW status */
383 u32 blink2; /* branch link */
384 u32 ilink1; /* interrupt link */
385 u32 ilink2; /* interrupt link */
386 u32 data1; /* error-specific data */
387 u32 data2; /* error-specific data */
388 u32 data3; /* error-specific data */
389 u32 bcon_time; /* beacon timer */
390 u32 tsf_low; /* network timestamp function timer */
391 u32 tsf_hi; /* network timestamp function timer */
392 u32 gp1; /* GP1 timer register */
393 u32 gp2; /* GP2 timer register */
394 u32 fw_rev_type; /* firmware revision type */
395 u32 major; /* uCode version major */
396 u32 minor; /* uCode version minor */
397 u32 hw_ver; /* HW Silicon version */
398 u32 brd_ver; /* HW board version */
399 u32 log_pc; /* log program counter */
400 u32 frame_ptr; /* frame pointer */
401 u32 stack_ptr; /* stack pointer */
402 u32 hcmd; /* last host command header */
403 u32 isr0; /* isr status register LMPM_NIC_ISR0:
405 u32 isr1; /* isr status register LMPM_NIC_ISR1:
407 u32 isr2; /* isr status register LMPM_NIC_ISR2:
409 u32 isr3; /* isr status register LMPM_NIC_ISR3:
411 u32 isr4; /* isr status register LMPM_NIC_ISR4:
413 u32 last_cmd_id; /* last HCMD id handled by the firmware */
414 u32 wait_event; /* wait event() caller address */
415 u32 l2p_control; /* L2pControlField */
416 u32 l2p_duration; /* L2pDurationField */
417 u32 l2p_mhvalid; /* L2pMhValidBits */
418 u32 l2p_addr_match; /* L2pAddrMatchStat */
419 u32 lmpm_pmg_sel; /* indicate which clocks are turned on
421 u32 u_timestamp; /* indicate when the date and time of the
423 u32 flow_handler; /* FH read/write pointers, RX credit */
424 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
427 * UMAC error struct - relevant starting from family 8000 chip.
428 * Note: This structure is read from the device with IO accesses,
429 * and the reading already does the endian conversion. As it is
430 * read with u32-sized accesses, any members with a different size
431 * need to be ordered correctly though!
433 struct iwl_umac_error_event_table {
434 u32 valid; /* (nonzero) valid, (0) log is empty */
435 u32 error_id; /* type of error */
436 u32 blink1; /* branch link */
437 u32 blink2; /* branch link */
438 u32 ilink1; /* interrupt link */
439 u32 ilink2; /* interrupt link */
440 u32 data1; /* error-specific data */
441 u32 data2; /* error-specific data */
442 u32 data3; /* error-specific data */
445 u32 frame_pointer; /* core register 27*/
446 u32 stack_pointer; /* core register 28 */
447 u32 cmd_header; /* latest host cmd sent to UMAC */
448 u32 nic_isr_pref; /* ISR status register */
451 #define ERROR_START_OFFSET (1 * sizeof(u32))
452 #define ERROR_ELEM_SIZE (7 * sizeof(u32))
454 static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm)
456 struct iwl_trans *trans = mvm->trans;
457 struct iwl_umac_error_event_table table;
460 base = mvm->umac_error_event_table;
462 if (base < 0x800000) {
464 "Not valid error log pointer 0x%08X for %s uCode\n",
466 (mvm->fwrt.cur_fw_img == IWL_UCODE_INIT)
471 iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
473 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
474 IWL_ERR(trans, "Start IWL Error Log Dump:\n");
475 IWL_ERR(trans, "Status: 0x%08lX, count: %d\n",
476 mvm->status, table.valid);
479 IWL_ERR(mvm, "0x%08X | %s\n", table.error_id,
480 desc_lookup(table.error_id));
481 IWL_ERR(mvm, "0x%08X | umac branchlink1\n", table.blink1);
482 IWL_ERR(mvm, "0x%08X | umac branchlink2\n", table.blink2);
483 IWL_ERR(mvm, "0x%08X | umac interruptlink1\n", table.ilink1);
484 IWL_ERR(mvm, "0x%08X | umac interruptlink2\n", table.ilink2);
485 IWL_ERR(mvm, "0x%08X | umac data1\n", table.data1);
486 IWL_ERR(mvm, "0x%08X | umac data2\n", table.data2);
487 IWL_ERR(mvm, "0x%08X | umac data3\n", table.data3);
488 IWL_ERR(mvm, "0x%08X | umac major\n", table.umac_major);
489 IWL_ERR(mvm, "0x%08X | umac minor\n", table.umac_minor);
490 IWL_ERR(mvm, "0x%08X | frame pointer\n", table.frame_pointer);
491 IWL_ERR(mvm, "0x%08X | stack pointer\n", table.stack_pointer);
492 IWL_ERR(mvm, "0x%08X | last host cmd\n", table.cmd_header);
493 IWL_ERR(mvm, "0x%08X | isr status reg\n", table.nic_isr_pref);
496 static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u32 base)
498 struct iwl_trans *trans = mvm->trans;
499 struct iwl_error_event_table table;
502 if (mvm->fwrt.cur_fw_img == IWL_UCODE_INIT) {
504 base = mvm->fw->init_errlog_ptr;
507 base = mvm->fw->inst_errlog_ptr;
510 if (base < 0x400000) {
512 "Not valid error log pointer 0x%08X for %s uCode\n",
514 (mvm->fwrt.cur_fw_img == IWL_UCODE_INIT)
519 /* check if there is a HW error */
520 val = iwl_trans_read_mem32(trans, base);
521 if (((val & ~0xf) == 0xa5a5a5a0) || ((val & ~0xf) == 0x5a5a5a50)) {
524 IWL_ERR(trans, "HW error, resetting before reading\n");
526 /* reset the device */
527 iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
528 usleep_range(5000, 6000);
530 /* set INIT_DONE flag */
531 iwl_set_bit(trans, CSR_GP_CNTRL,
532 CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
534 /* and wait for clock stabilization */
535 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
538 err = iwl_poll_bit(trans, CSR_GP_CNTRL,
539 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
540 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
543 IWL_DEBUG_INFO(trans,
544 "Failed to reset the card for the dump\n");
549 iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
551 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
552 IWL_ERR(trans, "Start IWL Error Log Dump:\n");
553 IWL_ERR(trans, "Status: 0x%08lX, count: %d\n",
554 mvm->status, table.valid);
557 /* Do not change this output - scripts rely on it */
559 IWL_ERR(mvm, "Loaded firmware version: %s\n", mvm->fw->fw_version);
561 trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low,
562 table.data1, table.data2, table.data3,
563 table.blink2, table.ilink1,
564 table.ilink2, table.bcon_time, table.gp1,
565 table.gp2, table.fw_rev_type, table.major,
566 table.minor, table.hw_ver, table.brd_ver);
567 IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id,
568 desc_lookup(table.error_id));
569 IWL_ERR(mvm, "0x%08X | trm_hw_status0\n", table.trm_hw_status0);
570 IWL_ERR(mvm, "0x%08X | trm_hw_status1\n", table.trm_hw_status1);
571 IWL_ERR(mvm, "0x%08X | branchlink2\n", table.blink2);
572 IWL_ERR(mvm, "0x%08X | interruptlink1\n", table.ilink1);
573 IWL_ERR(mvm, "0x%08X | interruptlink2\n", table.ilink2);
574 IWL_ERR(mvm, "0x%08X | data1\n", table.data1);
575 IWL_ERR(mvm, "0x%08X | data2\n", table.data2);
576 IWL_ERR(mvm, "0x%08X | data3\n", table.data3);
577 IWL_ERR(mvm, "0x%08X | beacon time\n", table.bcon_time);
578 IWL_ERR(mvm, "0x%08X | tsf low\n", table.tsf_low);
579 IWL_ERR(mvm, "0x%08X | tsf hi\n", table.tsf_hi);
580 IWL_ERR(mvm, "0x%08X | time gp1\n", table.gp1);
581 IWL_ERR(mvm, "0x%08X | time gp2\n", table.gp2);
582 IWL_ERR(mvm, "0x%08X | uCode revision type\n", table.fw_rev_type);
583 IWL_ERR(mvm, "0x%08X | uCode version major\n", table.major);
584 IWL_ERR(mvm, "0x%08X | uCode version minor\n", table.minor);
585 IWL_ERR(mvm, "0x%08X | hw version\n", table.hw_ver);
586 IWL_ERR(mvm, "0x%08X | board version\n", table.brd_ver);
587 IWL_ERR(mvm, "0x%08X | hcmd\n", table.hcmd);
588 IWL_ERR(mvm, "0x%08X | isr0\n", table.isr0);
589 IWL_ERR(mvm, "0x%08X | isr1\n", table.isr1);
590 IWL_ERR(mvm, "0x%08X | isr2\n", table.isr2);
591 IWL_ERR(mvm, "0x%08X | isr3\n", table.isr3);
592 IWL_ERR(mvm, "0x%08X | isr4\n", table.isr4);
593 IWL_ERR(mvm, "0x%08X | last cmd Id\n", table.last_cmd_id);
594 IWL_ERR(mvm, "0x%08X | wait_event\n", table.wait_event);
595 IWL_ERR(mvm, "0x%08X | l2p_control\n", table.l2p_control);
596 IWL_ERR(mvm, "0x%08X | l2p_duration\n", table.l2p_duration);
597 IWL_ERR(mvm, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid);
598 IWL_ERR(mvm, "0x%08X | l2p_addr_match\n", table.l2p_addr_match);
599 IWL_ERR(mvm, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
600 IWL_ERR(mvm, "0x%08X | timestamp\n", table.u_timestamp);
601 IWL_ERR(mvm, "0x%08X | flow_handler\n", table.flow_handler);
604 void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
606 if (!test_bit(STATUS_DEVICE_ENABLED, &mvm->trans->status)) {
608 "DEVICE_ENABLED bit is not set. Aborting dump.\n");
612 iwl_mvm_dump_lmac_error_log(mvm, mvm->error_event_table[0]);
614 if (mvm->error_event_table[1])
615 iwl_mvm_dump_lmac_error_log(mvm, mvm->error_event_table[1]);
617 if (mvm->support_umac_log)
618 iwl_mvm_dump_umac_error_log(mvm);
621 int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq)
625 lockdep_assert_held(&mvm->queue_info_lock);
627 /* This should not be hit with new TX path */
628 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
631 /* Start by looking for a free queue */
632 for (i = minq; i <= maxq; i++)
633 if (mvm->queue_info[i].hw_queue_refcount == 0 &&
634 mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE)
638 * If no free queue found - settle for an inactive one to reconfigure
639 * Make sure that the inactive queue either already belongs to this STA,
640 * or that if it belongs to another one - it isn't the reserved queue
642 for (i = minq; i <= maxq; i++)
643 if (mvm->queue_info[i].status == IWL_MVM_QUEUE_INACTIVE &&
644 (sta_id == mvm->queue_info[i].ra_sta_id ||
645 !mvm->queue_info[i].reserved))
651 int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
652 int tid, int frame_limit, u16 ssn)
654 struct iwl_scd_txq_cfg_cmd cmd = {
656 .action = SCD_CFG_ENABLE_QUEUE,
657 .window = frame_limit,
659 .ssn = cpu_to_le16(ssn),
661 .aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
662 queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE),
667 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
670 spin_lock_bh(&mvm->queue_info_lock);
671 if (WARN(mvm->queue_info[queue].hw_queue_refcount == 0,
672 "Trying to reconfig unallocated queue %d\n", queue)) {
673 spin_unlock_bh(&mvm->queue_info_lock);
676 spin_unlock_bh(&mvm->queue_info_lock);
678 IWL_DEBUG_TX_QUEUES(mvm, "Reconfig SCD for TXQ #%d\n", queue);
680 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
681 WARN_ONCE(ret, "Failed to re-configure queue %d on FIFO %d, ret=%d\n",
687 static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue,
688 int mac80211_queue, u8 sta_id, u8 tid)
690 bool enable_queue = true;
692 spin_lock_bh(&mvm->queue_info_lock);
694 /* Make sure this TID isn't already enabled */
695 if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) {
696 spin_unlock_bh(&mvm->queue_info_lock);
697 IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n",
702 /* Update mappings and refcounts */
703 if (mvm->queue_info[queue].hw_queue_refcount > 0)
704 enable_queue = false;
706 if (mac80211_queue != IEEE80211_INVAL_HW_QUEUE) {
707 WARN(mac80211_queue >=
708 BITS_PER_BYTE * sizeof(mvm->hw_queue_to_mac80211[0]),
709 "cannot track mac80211 queue %d (queue %d, sta %d, tid %d)\n",
710 mac80211_queue, queue, sta_id, tid);
711 mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue);
714 mvm->queue_info[queue].hw_queue_refcount++;
715 mvm->queue_info[queue].tid_bitmap |= BIT(tid);
716 mvm->queue_info[queue].ra_sta_id = sta_id;
719 if (tid != IWL_MAX_TID_COUNT)
720 mvm->queue_info[queue].mac80211_ac =
721 tid_to_mac80211_ac[tid];
723 mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO;
725 mvm->queue_info[queue].txq_tid = tid;
728 IWL_DEBUG_TX_QUEUES(mvm,
729 "Enabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n",
730 queue, mvm->queue_info[queue].hw_queue_refcount,
731 mvm->hw_queue_to_mac80211[queue]);
733 spin_unlock_bh(&mvm->queue_info_lock);
738 int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue,
739 u8 sta_id, u8 tid, unsigned int timeout)
741 struct iwl_tx_queue_cfg_cmd cmd = {
742 .flags = cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE),
748 if (cmd.tid == IWL_MAX_TID_COUNT)
749 cmd.tid = IWL_MGMT_TID;
750 queue = iwl_trans_txq_alloc(mvm->trans, (void *)&cmd,
751 SCD_QUEUE_CFG, timeout);
754 IWL_DEBUG_TX_QUEUES(mvm,
755 "Failed allocating TXQ for sta %d tid %d, ret: %d\n",
760 IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n",
763 mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue);
764 IWL_DEBUG_TX_QUEUES(mvm,
765 "Enabling TXQ #%d (mac80211 map:0x%x)\n",
766 queue, mvm->hw_queue_to_mac80211[queue]);
771 bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
772 u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg,
773 unsigned int wdg_timeout)
775 struct iwl_scd_txq_cfg_cmd cmd = {
777 .action = SCD_CFG_ENABLE_QUEUE,
778 .window = cfg->frame_limit,
779 .sta_id = cfg->sta_id,
780 .ssn = cpu_to_le16(ssn),
781 .tx_fifo = cfg->fifo,
782 .aggregate = cfg->aggregate,
787 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
790 /* Send the enabling command if we need to */
791 if (!iwl_mvm_update_txq_mapping(mvm, queue, mac80211_queue,
792 cfg->sta_id, cfg->tid))
795 inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn,
798 le16_add_cpu(&cmd.ssn, 1);
800 WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd),
801 "Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo);
806 int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
809 struct iwl_scd_txq_cfg_cmd cmd = {
811 .action = SCD_CFG_DISABLE_QUEUE,
813 bool remove_mac_queue = mac80211_queue != IEEE80211_INVAL_HW_QUEUE;
816 if (WARN_ON(remove_mac_queue && mac80211_queue >= IEEE80211_MAX_QUEUES))
819 if (iwl_mvm_has_new_tx_api(mvm)) {
820 spin_lock_bh(&mvm->queue_info_lock);
822 if (remove_mac_queue)
823 mvm->hw_queue_to_mac80211[queue] &=
824 ~BIT(mac80211_queue);
826 spin_unlock_bh(&mvm->queue_info_lock);
828 iwl_trans_txq_free(mvm->trans, queue);
833 spin_lock_bh(&mvm->queue_info_lock);
835 if (WARN_ON(mvm->queue_info[queue].hw_queue_refcount == 0)) {
836 spin_unlock_bh(&mvm->queue_info_lock);
840 mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
843 * If there is another TID with the same AC - don't remove the MAC queue
846 if (tid < IWL_MAX_TID_COUNT) {
847 unsigned long tid_bitmap =
848 mvm->queue_info[queue].tid_bitmap;
849 int ac = tid_to_mac80211_ac[tid];
852 for_each_set_bit(i, &tid_bitmap, IWL_MAX_TID_COUNT) {
853 if (tid_to_mac80211_ac[i] == ac)
854 remove_mac_queue = false;
858 if (remove_mac_queue)
859 mvm->hw_queue_to_mac80211[queue] &=
860 ~BIT(mac80211_queue);
861 mvm->queue_info[queue].hw_queue_refcount--;
863 cmd.action = mvm->queue_info[queue].hw_queue_refcount ?
864 SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE;
865 if (cmd.action == SCD_CFG_DISABLE_QUEUE)
866 mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE;
868 IWL_DEBUG_TX_QUEUES(mvm,
869 "Disabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n",
871 mvm->queue_info[queue].hw_queue_refcount,
872 mvm->hw_queue_to_mac80211[queue]);
874 /* If the queue is still enabled - nothing left to do in this func */
875 if (cmd.action == SCD_CFG_ENABLE_QUEUE) {
876 spin_unlock_bh(&mvm->queue_info_lock);
880 cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
881 cmd.tid = mvm->queue_info[queue].txq_tid;
883 /* Make sure queue info is correct even though we overwrite it */
884 WARN(mvm->queue_info[queue].hw_queue_refcount ||
885 mvm->queue_info[queue].tid_bitmap ||
886 mvm->hw_queue_to_mac80211[queue],
887 "TXQ #%d info out-of-sync - refcount=%d, mac map=0x%x, tid=0x%x\n",
888 queue, mvm->queue_info[queue].hw_queue_refcount,
889 mvm->hw_queue_to_mac80211[queue],
890 mvm->queue_info[queue].tid_bitmap);
892 /* If we are here - the queue is freed and we can zero out these vals */
893 mvm->queue_info[queue].hw_queue_refcount = 0;
894 mvm->queue_info[queue].tid_bitmap = 0;
895 mvm->hw_queue_to_mac80211[queue] = 0;
897 /* Regardless if this is a reserved TXQ for a STA - mark it as false */
898 mvm->queue_info[queue].reserved = false;
900 spin_unlock_bh(&mvm->queue_info_lock);
902 iwl_trans_txq_disable(mvm->trans, queue, false);
903 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags,
904 sizeof(struct iwl_scd_txq_cfg_cmd), &cmd);
907 IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n",
913 * iwl_mvm_send_lq_cmd() - Send link quality command
914 * @init: This command is sent as part of station initialization right
915 * after station has been added.
917 * The link quality command is sent as the last step of station creation.
918 * This is the special case in which init is set and we call a callback in
919 * this case to clear the state indicating that station creation is in
922 int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init)
924 struct iwl_host_cmd cmd = {
926 .len = { sizeof(struct iwl_lq_cmd), },
927 .flags = init ? 0 : CMD_ASYNC,
931 if (WARN_ON(lq->sta_id == IWL_MVM_INVALID_STA))
934 return iwl_mvm_send_cmd(mvm, &cmd);
938 * iwl_mvm_update_smps - Get a request to change the SMPS mode
939 * @req_type: The part of the driver who call for a change.
940 * @smps_requests: The request to change the SMPS mode.
942 * Get a requst to change the SMPS mode,
943 * and change it according to all other requests in the driver.
945 void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
946 enum iwl_mvm_smps_type_request req_type,
947 enum ieee80211_smps_mode smps_request)
949 struct iwl_mvm_vif *mvmvif;
950 enum ieee80211_smps_mode smps_mode;
953 lockdep_assert_held(&mvm->mutex);
955 /* SMPS is irrelevant for NICs that don't have at least 2 RX antenna */
956 if (num_of_ant(iwl_mvm_get_valid_rx_ant(mvm)) == 1)
959 if (vif->type == NL80211_IFTYPE_AP)
960 smps_mode = IEEE80211_SMPS_OFF;
962 smps_mode = IEEE80211_SMPS_AUTOMATIC;
964 mvmvif = iwl_mvm_vif_from_mac80211(vif);
965 mvmvif->smps_requests[req_type] = smps_request;
966 for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) {
967 if (mvmvif->smps_requests[i] == IEEE80211_SMPS_STATIC) {
968 smps_mode = IEEE80211_SMPS_STATIC;
971 if (mvmvif->smps_requests[i] == IEEE80211_SMPS_DYNAMIC)
972 smps_mode = IEEE80211_SMPS_DYNAMIC;
975 ieee80211_request_smps(vif, smps_mode);
978 int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear)
980 struct iwl_statistics_cmd scmd = {
981 .flags = clear ? cpu_to_le32(IWL_STATISTICS_FLG_CLEAR) : 0,
983 struct iwl_host_cmd cmd = {
984 .id = STATISTICS_CMD,
985 .len[0] = sizeof(scmd),
987 .flags = CMD_WANT_SKB,
991 ret = iwl_mvm_send_cmd(mvm, &cmd);
995 iwl_mvm_handle_rx_statistics(mvm, cmd.resp_pkt);
999 iwl_mvm_accu_radio_stats(mvm);
1004 void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm)
1006 mvm->accu_radio_stats.rx_time += mvm->radio_stats.rx_time;
1007 mvm->accu_radio_stats.tx_time += mvm->radio_stats.tx_time;
1008 mvm->accu_radio_stats.on_time_rf += mvm->radio_stats.on_time_rf;
1009 mvm->accu_radio_stats.on_time_scan += mvm->radio_stats.on_time_scan;
1012 static void iwl_mvm_diversity_iter(void *_data, u8 *mac,
1013 struct ieee80211_vif *vif)
1015 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1016 bool *result = _data;
1019 for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) {
1020 if (mvmvif->smps_requests[i] == IEEE80211_SMPS_STATIC ||
1021 mvmvif->smps_requests[i] == IEEE80211_SMPS_DYNAMIC)
1026 bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm)
1030 lockdep_assert_held(&mvm->mutex);
1032 if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM)
1035 if (num_of_ant(iwl_mvm_get_valid_rx_ant(mvm)) == 1)
1038 if (mvm->cfg->rx_with_siso_diversity)
1041 ieee80211_iterate_active_interfaces_atomic(
1042 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
1043 iwl_mvm_diversity_iter, &result);
1048 int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1051 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1054 lockdep_assert_held(&mvm->mutex);
1056 if (iwl_mvm_vif_low_latency(mvmvif) == prev)
1059 res = iwl_mvm_update_quotas(mvm, false, NULL);
1063 iwl_mvm_bt_coex_vif_change(mvm);
1065 return iwl_mvm_power_update_mac(mvm);
1068 static void iwl_mvm_ll_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
1070 bool *result = _data;
1072 if (iwl_mvm_vif_low_latency(iwl_mvm_vif_from_mac80211(vif)))
1076 bool iwl_mvm_low_latency(struct iwl_mvm *mvm)
1078 bool result = false;
1080 ieee80211_iterate_active_interfaces_atomic(
1081 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
1082 iwl_mvm_ll_iter, &result);
1087 struct iwl_bss_iter_data {
1088 struct ieee80211_vif *vif;
1092 static void iwl_mvm_bss_iface_iterator(void *_data, u8 *mac,
1093 struct ieee80211_vif *vif)
1095 struct iwl_bss_iter_data *data = _data;
1097 if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
1108 struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm)
1110 struct iwl_bss_iter_data bss_iter_data = {};
1112 ieee80211_iterate_active_interfaces_atomic(
1113 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
1114 iwl_mvm_bss_iface_iterator, &bss_iter_data);
1116 if (bss_iter_data.error) {
1117 IWL_ERR(mvm, "More than one managed interface active!\n");
1118 return ERR_PTR(-EINVAL);
1121 return bss_iter_data.vif;
1124 struct iwl_sta_iter_data {
1128 static void iwl_mvm_sta_iface_iterator(void *_data, u8 *mac,
1129 struct ieee80211_vif *vif)
1131 struct iwl_sta_iter_data *data = _data;
1133 if (vif->type != NL80211_IFTYPE_STATION)
1136 if (vif->bss_conf.assoc)
1140 bool iwl_mvm_is_vif_assoc(struct iwl_mvm *mvm)
1142 struct iwl_sta_iter_data data = {
1146 ieee80211_iterate_active_interfaces_atomic(mvm->hw,
1147 IEEE80211_IFACE_ITER_NORMAL,
1148 iwl_mvm_sta_iface_iterator,
1153 unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm,
1154 struct ieee80211_vif *vif,
1155 bool tdls, bool cmd_q)
1157 struct iwl_fw_dbg_trigger_tlv *trigger;
1158 struct iwl_fw_dbg_trigger_txq_timer *txq_timer;
1159 unsigned int default_timeout =
1160 cmd_q ? IWL_DEF_WD_TIMEOUT : mvm->cfg->base_params->wd_timeout;
1162 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS)) {
1164 * We can't know when the station is asleep or awake, so we
1165 * must disable the queue hang detection.
1167 if (fw_has_capa(&mvm->fw->ucode_capa,
1168 IWL_UCODE_TLV_CAPA_STA_PM_NOTIF) &&
1169 vif && vif->type == NL80211_IFTYPE_AP)
1170 return IWL_WATCHDOG_DISABLED;
1171 return iwlmvm_mod_params.tfd_q_hang_detect ?
1172 default_timeout : IWL_WATCHDOG_DISABLED;
1175 trigger = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS);
1176 txq_timer = (void *)trigger->data;
1179 return le32_to_cpu(txq_timer->tdls);
1182 return le32_to_cpu(txq_timer->command_queue);
1185 return default_timeout;
1187 switch (ieee80211_vif_type_p2p(vif)) {
1188 case NL80211_IFTYPE_ADHOC:
1189 return le32_to_cpu(txq_timer->ibss);
1190 case NL80211_IFTYPE_STATION:
1191 return le32_to_cpu(txq_timer->bss);
1192 case NL80211_IFTYPE_AP:
1193 return le32_to_cpu(txq_timer->softap);
1194 case NL80211_IFTYPE_P2P_CLIENT:
1195 return le32_to_cpu(txq_timer->p2p_client);
1196 case NL80211_IFTYPE_P2P_GO:
1197 return le32_to_cpu(txq_timer->p2p_go);
1198 case NL80211_IFTYPE_P2P_DEVICE:
1199 return le32_to_cpu(txq_timer->p2p_device);
1200 case NL80211_IFTYPE_MONITOR:
1201 return default_timeout;
1204 return mvm->cfg->base_params->wd_timeout;
1208 void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1211 struct iwl_fw_dbg_trigger_tlv *trig;
1212 struct iwl_fw_dbg_trigger_mlme *trig_mlme;
1214 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_MLME))
1217 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME);
1218 trig_mlme = (void *)trig->data;
1219 if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
1220 ieee80211_vif_to_wdev(vif), trig))
1223 if (trig_mlme->stop_connection_loss &&
1224 --trig_mlme->stop_connection_loss)
1227 iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, "%s", errmsg);
1230 ieee80211_connection_loss(vif);
1234 * Remove inactive TIDs of a given queue.
1235 * If all queue TIDs are inactive - mark the queue as inactive
1236 * If only some the queue TIDs are inactive - unmap them from the queue
1238 static void iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
1239 struct iwl_mvm_sta *mvmsta, int queue,
1240 unsigned long tid_bitmap)
1244 lockdep_assert_held(&mvmsta->lock);
1245 lockdep_assert_held(&mvm->queue_info_lock);
1247 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1250 /* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */
1251 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1252 /* If some TFDs are still queued - don't mark TID as inactive */
1253 if (iwl_mvm_tid_queued(mvm, &mvmsta->tid_data[tid]))
1254 tid_bitmap &= ~BIT(tid);
1256 /* Don't mark as inactive any TID that has an active BA */
1257 if (mvmsta->tid_data[tid].state != IWL_AGG_OFF)
1258 tid_bitmap &= ~BIT(tid);
1261 /* If all TIDs in the queue are inactive - mark queue as inactive. */
1262 if (tid_bitmap == mvm->queue_info[queue].tid_bitmap) {
1263 mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE;
1265 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1)
1266 mvmsta->tid_data[tid].is_tid_active = false;
1268 IWL_DEBUG_TX_QUEUES(mvm, "Queue %d marked as inactive\n",
1274 * If we are here, this is a shared queue and not all TIDs timed-out.
1275 * Remove the ones that did.
1277 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1278 int mac_queue = mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]];
1280 mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
1281 mvm->hw_queue_to_mac80211[queue] &= ~BIT(mac_queue);
1282 mvm->queue_info[queue].hw_queue_refcount--;
1283 mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
1284 mvmsta->tid_data[tid].is_tid_active = false;
1286 IWL_DEBUG_TX_QUEUES(mvm,
1287 "Removing inactive TID %d from shared Q:%d\n",
1291 IWL_DEBUG_TX_QUEUES(mvm,
1292 "TXQ #%d left with tid bitmap 0x%x\n", queue,
1293 mvm->queue_info[queue].tid_bitmap);
1296 * There may be different TIDs with the same mac queues, so make
1297 * sure all TIDs have existing corresponding mac queues enabled
1299 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1300 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1301 mvm->hw_queue_to_mac80211[queue] |=
1302 BIT(mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]);
1305 /* If the queue is marked as shared - "unshare" it */
1306 if (mvm->queue_info[queue].hw_queue_refcount == 1 &&
1307 mvm->queue_info[queue].status == IWL_MVM_QUEUE_SHARED) {
1308 mvm->queue_info[queue].status = IWL_MVM_QUEUE_RECONFIGURING;
1309 IWL_DEBUG_TX_QUEUES(mvm, "Marking Q:%d for reconfig\n",
1314 void iwl_mvm_inactivity_check(struct iwl_mvm *mvm)
1316 unsigned long timeout_queues_map = 0;
1317 unsigned long now = jiffies;
1320 if (iwl_mvm_has_new_tx_api(mvm))
1323 spin_lock_bh(&mvm->queue_info_lock);
1324 for (i = 0; i < IWL_MAX_HW_QUEUES; i++)
1325 if (mvm->queue_info[i].hw_queue_refcount > 0)
1326 timeout_queues_map |= BIT(i);
1327 spin_unlock_bh(&mvm->queue_info_lock);
1332 * If a queue time outs - mark it as INACTIVE (don't remove right away
1333 * if we don't have to.) This is an optimization in case traffic comes
1334 * later, and we don't HAVE to use a currently-inactive queue
1336 for_each_set_bit(i, &timeout_queues_map, IWL_MAX_HW_QUEUES) {
1337 struct ieee80211_sta *sta;
1338 struct iwl_mvm_sta *mvmsta;
1341 unsigned long inactive_tid_bitmap = 0;
1342 unsigned long queue_tid_bitmap;
1344 spin_lock_bh(&mvm->queue_info_lock);
1345 queue_tid_bitmap = mvm->queue_info[i].tid_bitmap;
1347 /* If TXQ isn't in active use anyway - nothing to do here... */
1348 if (mvm->queue_info[i].status != IWL_MVM_QUEUE_READY &&
1349 mvm->queue_info[i].status != IWL_MVM_QUEUE_SHARED) {
1350 spin_unlock_bh(&mvm->queue_info_lock);
1354 /* Check to see if there are inactive TIDs on this queue */
1355 for_each_set_bit(tid, &queue_tid_bitmap,
1356 IWL_MAX_TID_COUNT + 1) {
1357 if (time_after(mvm->queue_info[i].last_frame_time[tid] +
1358 IWL_MVM_DQA_QUEUE_TIMEOUT, now))
1361 inactive_tid_bitmap |= BIT(tid);
1363 spin_unlock_bh(&mvm->queue_info_lock);
1365 /* If all TIDs are active - finish check on this queue */
1366 if (!inactive_tid_bitmap)
1370 * If we are here - the queue hadn't been served recently and is
1374 sta_id = mvm->queue_info[i].ra_sta_id;
1375 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
1378 * If the STA doesn't exist anymore, it isn't an error. It could
1379 * be that it was removed since getting the queues, and in this
1380 * case it should've inactivated its queues anyway.
1382 if (IS_ERR_OR_NULL(sta))
1385 mvmsta = iwl_mvm_sta_from_mac80211(sta);
1387 spin_lock_bh(&mvmsta->lock);
1388 spin_lock(&mvm->queue_info_lock);
1389 iwl_mvm_remove_inactive_tids(mvm, mvmsta, i,
1390 inactive_tid_bitmap);
1391 spin_unlock(&mvm->queue_info_lock);
1392 spin_unlock_bh(&mvmsta->lock);
1398 void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, u32 *gp2, u64 *boottime)
1402 lockdep_assert_held(&mvm->mutex);
1404 /* Disable power save when reading GP2 */
1405 ps_disabled = mvm->ps_disabled;
1407 mvm->ps_disabled = true;
1408 iwl_mvm_power_update_device(mvm);
1411 *gp2 = iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG);
1412 *boottime = ktime_get_boot_ns();
1415 mvm->ps_disabled = ps_disabled;
1416 iwl_mvm_power_update_device(mvm);
1420 int iwl_mvm_send_lqm_cmd(struct ieee80211_vif *vif,
1421 enum iwl_lqm_cmd_operatrions operation,
1422 u32 duration, u32 timeout)
1424 struct iwl_mvm_vif *mvm_vif = iwl_mvm_vif_from_mac80211(vif);
1425 struct iwl_link_qual_msrmnt_cmd cmd = {
1426 .cmd_operation = cpu_to_le32(operation),
1427 .mac_id = cpu_to_le32(mvm_vif->id),
1428 .measurement_time = cpu_to_le32(duration),
1429 .timeout = cpu_to_le32(timeout),
1432 iwl_cmd_id(LINK_QUALITY_MEASUREMENT_CMD, MAC_CONF_GROUP, 0);
1435 if (!fw_has_capa(&mvm_vif->mvm->fw->ucode_capa,
1436 IWL_UCODE_TLV_CAPA_LQM_SUPPORT))
1439 if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
1442 switch (operation) {
1443 case LQM_CMD_OPERATION_START_MEASUREMENT:
1444 if (iwl_mvm_lqm_active(mvm_vif->mvm))
1446 if (!vif->bss_conf.assoc)
1448 mvm_vif->lqm_active = true;
1450 case LQM_CMD_OPERATION_STOP_MEASUREMENT:
1451 if (!iwl_mvm_lqm_active(mvm_vif->mvm))
1458 ret = iwl_mvm_send_cmd_pdu(mvm_vif->mvm, cmdid, 0, sizeof(cmd),
1461 /* command failed - roll back lqm_active state */
1463 mvm_vif->lqm_active =
1464 operation == LQM_CMD_OPERATION_STOP_MEASUREMENT;
1470 static void iwl_mvm_lqm_active_iterator(void *_data, u8 *mac,
1471 struct ieee80211_vif *vif)
1473 struct iwl_mvm_vif *mvm_vif = iwl_mvm_vif_from_mac80211(vif);
1474 bool *lqm_active = _data;
1476 *lqm_active = *lqm_active || mvm_vif->lqm_active;
1479 bool iwl_mvm_lqm_active(struct iwl_mvm *mvm)
1483 lockdep_assert_held(&mvm->mutex);
1484 ieee80211_iterate_active_interfaces_atomic(
1485 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
1486 iwl_mvm_lqm_active_iterator, &ret);