1 /******************************************************************************
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 Intel Deutschland GmbH
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
26 * The full GNU General Public License is included in this distribution
27 * in the file called COPYING.
29 * Contact Information:
30 * Intel Linux Wireless <linuxwifi@intel.com>
31 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
35 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
36 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
37 * All rights reserved.
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
43 * * Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * * Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in
47 * the documentation and/or other materials provided with the
49 * * Neither the name Intel Corporation nor the names of its
50 * contributors may be used to endorse or promote products derived
51 * from this software without specific prior written permission.
53 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
54 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
55 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
56 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
57 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
58 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
59 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
60 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
61 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
62 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
63 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65 *****************************************************************************/
66 #include <net/mac80211.h>
67 #include <linux/netdevice.h>
68 #include <linux/acpi.h>
70 #include "iwl-trans.h"
71 #include "iwl-op-mode.h"
73 #include "iwl-debug.h"
74 #include "iwl-csr.h" /* for iwl_mvm_rx_card_state_notif */
75 #include "iwl-io.h" /* for iwl_mvm_rx_card_state_notif */
77 #include "iwl-eeprom-parse.h"
81 #include "iwl-phy-db.h"
83 #define MVM_UCODE_ALIVE_TIMEOUT HZ
84 #define MVM_UCODE_CALIB_TIMEOUT (2*HZ)
86 #define UCODE_VALID_OK cpu_to_le32(0x1)
88 struct iwl_mvm_alive_data {
93 static int iwl_send_tx_ant_cfg(struct iwl_mvm *mvm, u8 valid_tx_ant)
95 struct iwl_tx_ant_cfg_cmd tx_ant_cmd = {
96 .valid = cpu_to_le32(valid_tx_ant),
99 IWL_DEBUG_FW(mvm, "select valid tx ant: %u\n", valid_tx_ant);
100 return iwl_mvm_send_cmd_pdu(mvm, TX_ANT_CONFIGURATION_CMD, 0,
101 sizeof(tx_ant_cmd), &tx_ant_cmd);
104 static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm)
107 struct iwl_rss_config_cmd cmd = {
108 .flags = cpu_to_le32(IWL_RSS_ENABLE),
109 .hash_mask = BIT(IWL_RSS_HASH_TYPE_IPV4_TCP) |
110 BIT(IWL_RSS_HASH_TYPE_IPV4_UDP) |
111 BIT(IWL_RSS_HASH_TYPE_IPV4_PAYLOAD) |
112 BIT(IWL_RSS_HASH_TYPE_IPV6_TCP) |
113 BIT(IWL_RSS_HASH_TYPE_IPV6_UDP) |
114 BIT(IWL_RSS_HASH_TYPE_IPV6_PAYLOAD),
117 if (mvm->trans->num_rx_queues == 1)
120 /* Do not direct RSS traffic to Q 0 which is our fallback queue */
121 for (i = 0; i < ARRAY_SIZE(cmd.indirection_table); i++)
122 cmd.indirection_table[i] =
123 1 + (i % (mvm->trans->num_rx_queues - 1));
124 netdev_rss_key_fill(cmd.secret_key, sizeof(cmd.secret_key));
126 return iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd);
129 static int iwl_mvm_send_dqa_cmd(struct iwl_mvm *mvm)
131 struct iwl_dqa_enable_cmd dqa_cmd = {
132 .cmd_queue = cpu_to_le32(IWL_MVM_DQA_CMD_QUEUE),
134 u32 cmd_id = iwl_cmd_id(DQA_ENABLE_CMD, DATA_PATH_GROUP, 0);
137 ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd);
139 IWL_ERR(mvm, "Failed to send DQA enabling command: %d\n", ret);
141 IWL_DEBUG_FW(mvm, "Working in DQA mode\n");
146 void iwl_free_fw_paging(struct iwl_mvm *mvm)
150 if (!mvm->fw_paging_db[0].fw_paging_block)
153 for (i = 0; i < NUM_OF_FW_PAGING_BLOCKS; i++) {
154 struct iwl_fw_paging *paging = &mvm->fw_paging_db[i];
156 if (!paging->fw_paging_block) {
158 "Paging: block %d already freed, continue to next page\n",
163 dma_unmap_page(mvm->trans->dev, paging->fw_paging_phys,
164 paging->fw_paging_size, DMA_BIDIRECTIONAL);
166 __free_pages(paging->fw_paging_block,
167 get_order(paging->fw_paging_size));
168 paging->fw_paging_block = NULL;
170 kfree(mvm->trans->paging_download_buf);
171 mvm->trans->paging_download_buf = NULL;
172 mvm->trans->paging_db = NULL;
174 memset(mvm->fw_paging_db, 0, sizeof(mvm->fw_paging_db));
177 static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image)
183 * find where is the paging image start point:
184 * if CPU2 exist and it's in paging format, then the image looks like:
185 * CPU1 sections (2 or more)
186 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between CPU1 to CPU2
187 * CPU2 sections (not paged)
188 * PAGING_SEPARATOR_SECTION delimiter - separate between CPU2
189 * non paged to CPU2 paging sec
191 * CPU2 paging image (including instruction and data)
193 for (sec_idx = 0; sec_idx < IWL_UCODE_SECTION_MAX; sec_idx++) {
194 if (image->sec[sec_idx].offset == PAGING_SEPARATOR_SECTION) {
201 * If paging is enabled there should be at least 2 more sections left
202 * (one for CSS and one for Paging data)
204 if (sec_idx >= ARRAY_SIZE(image->sec) - 1) {
205 IWL_ERR(mvm, "Paging: Missing CSS and/or paging sections\n");
206 iwl_free_fw_paging(mvm);
210 /* copy the CSS block to the dram */
211 IWL_DEBUG_FW(mvm, "Paging: load paging CSS to FW, sec = %d\n",
214 memcpy(page_address(mvm->fw_paging_db[0].fw_paging_block),
215 image->sec[sec_idx].data,
216 mvm->fw_paging_db[0].fw_paging_size);
217 dma_sync_single_for_device(mvm->trans->dev,
218 mvm->fw_paging_db[0].fw_paging_phys,
219 mvm->fw_paging_db[0].fw_paging_size,
223 "Paging: copied %d CSS bytes to first block\n",
224 mvm->fw_paging_db[0].fw_paging_size);
229 * copy the paging blocks to the dram
230 * loop index start from 1 since that CSS block already copied to dram
231 * and CSS index is 0.
232 * loop stop at num_of_paging_blk since that last block is not full.
234 for (idx = 1; idx < mvm->num_of_paging_blk; idx++) {
235 struct iwl_fw_paging *block = &mvm->fw_paging_db[idx];
237 memcpy(page_address(block->fw_paging_block),
238 image->sec[sec_idx].data + offset,
239 block->fw_paging_size);
240 dma_sync_single_for_device(mvm->trans->dev,
241 block->fw_paging_phys,
242 block->fw_paging_size,
247 "Paging: copied %d paging bytes to block %d\n",
248 mvm->fw_paging_db[idx].fw_paging_size,
251 offset += mvm->fw_paging_db[idx].fw_paging_size;
254 /* copy the last paging block */
255 if (mvm->num_of_pages_in_last_blk > 0) {
256 struct iwl_fw_paging *block = &mvm->fw_paging_db[idx];
258 memcpy(page_address(block->fw_paging_block),
259 image->sec[sec_idx].data + offset,
260 FW_PAGING_SIZE * mvm->num_of_pages_in_last_blk);
261 dma_sync_single_for_device(mvm->trans->dev,
262 block->fw_paging_phys,
263 block->fw_paging_size,
267 "Paging: copied %d pages in the last block %d\n",
268 mvm->num_of_pages_in_last_blk, idx);
274 static int iwl_alloc_fw_paging_mem(struct iwl_mvm *mvm,
275 const struct fw_img *image)
280 int order, num_of_pages;
283 if (mvm->fw_paging_db[0].fw_paging_block)
286 dma_enabled = is_device_dma_capable(mvm->trans->dev);
288 /* ensure BLOCK_2_EXP_SIZE is power of 2 of PAGING_BLOCK_SIZE */
289 BUILD_BUG_ON(BIT(BLOCK_2_EXP_SIZE) != PAGING_BLOCK_SIZE);
291 num_of_pages = image->paging_mem_size / FW_PAGING_SIZE;
292 mvm->num_of_paging_blk = ((num_of_pages - 1) /
293 NUM_OF_PAGE_PER_GROUP) + 1;
295 mvm->num_of_pages_in_last_blk =
297 NUM_OF_PAGE_PER_GROUP * (mvm->num_of_paging_blk - 1);
300 "Paging: allocating mem for %d paging blocks, each block holds 8 pages, last block holds %d pages\n",
301 mvm->num_of_paging_blk,
302 mvm->num_of_pages_in_last_blk);
304 /* allocate block of 4Kbytes for paging CSS */
305 order = get_order(FW_PAGING_SIZE);
306 block = alloc_pages(GFP_KERNEL, order);
308 /* free all the previous pages since we failed */
309 iwl_free_fw_paging(mvm);
313 mvm->fw_paging_db[blk_idx].fw_paging_block = block;
314 mvm->fw_paging_db[blk_idx].fw_paging_size = FW_PAGING_SIZE;
317 phys = dma_map_page(mvm->trans->dev, block, 0,
318 PAGE_SIZE << order, DMA_BIDIRECTIONAL);
319 if (dma_mapping_error(mvm->trans->dev, phys)) {
321 * free the previous pages and the current one since
322 * we failed to map_page.
324 iwl_free_fw_paging(mvm);
327 mvm->fw_paging_db[blk_idx].fw_paging_phys = phys;
329 mvm->fw_paging_db[blk_idx].fw_paging_phys = PAGING_ADDR_SIG |
330 blk_idx << BLOCK_2_EXP_SIZE;
334 "Paging: allocated 4K(CSS) bytes (order %d) for firmware paging.\n",
338 * allocate blocks in dram.
339 * since that CSS allocated in fw_paging_db[0] loop start from index 1
341 for (blk_idx = 1; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) {
342 /* allocate block of PAGING_BLOCK_SIZE (32K) */
343 order = get_order(PAGING_BLOCK_SIZE);
344 block = alloc_pages(GFP_KERNEL, order);
346 /* free all the previous pages since we failed */
347 iwl_free_fw_paging(mvm);
351 mvm->fw_paging_db[blk_idx].fw_paging_block = block;
352 mvm->fw_paging_db[blk_idx].fw_paging_size = PAGING_BLOCK_SIZE;
355 phys = dma_map_page(mvm->trans->dev, block, 0,
358 if (dma_mapping_error(mvm->trans->dev, phys)) {
360 * free the previous pages and the current one
361 * since we failed to map_page.
363 iwl_free_fw_paging(mvm);
366 mvm->fw_paging_db[blk_idx].fw_paging_phys = phys;
368 mvm->fw_paging_db[blk_idx].fw_paging_phys =
370 blk_idx << BLOCK_2_EXP_SIZE;
374 "Paging: allocated 32K bytes (order %d) for firmware paging.\n",
381 static int iwl_save_fw_paging(struct iwl_mvm *mvm,
382 const struct fw_img *fw)
386 ret = iwl_alloc_fw_paging_mem(mvm, fw);
390 return iwl_fill_paging_mem(mvm, fw);
393 /* send paging cmd to FW in case CPU2 has paging image */
394 static int iwl_send_paging_cmd(struct iwl_mvm *mvm, const struct fw_img *fw)
396 struct iwl_fw_paging_cmd paging_cmd = {
398 cpu_to_le32(PAGING_CMD_IS_SECURED |
399 PAGING_CMD_IS_ENABLED |
400 (mvm->num_of_pages_in_last_blk <<
401 PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS)),
402 .block_size = cpu_to_le32(BLOCK_2_EXP_SIZE),
403 .block_num = cpu_to_le32(mvm->num_of_paging_blk),
405 int blk_idx, size = sizeof(paging_cmd);
407 /* A bit hard coded - but this is the old API and will be deprecated */
408 if (!iwl_mvm_has_new_tx_api(mvm))
409 size -= NUM_OF_FW_PAGING_BLOCKS * 4;
411 /* loop for for all paging blocks + CSS block */
412 for (blk_idx = 0; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) {
413 dma_addr_t addr = mvm->fw_paging_db[blk_idx].fw_paging_phys;
415 addr = addr >> PAGE_2_EXP_SIZE;
417 if (iwl_mvm_has_new_tx_api(mvm)) {
418 __le64 phy_addr = cpu_to_le64(addr);
420 paging_cmd.device_phy_addr.addr64[blk_idx] = phy_addr;
422 __le32 phy_addr = cpu_to_le32(addr);
424 paging_cmd.device_phy_addr.addr32[blk_idx] = phy_addr;
428 return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(FW_PAGING_BLOCK_CMD,
429 IWL_ALWAYS_LONG_GROUP, 0),
430 0, size, &paging_cmd);
434 * Send paging item cmd to FW in case CPU2 has paging image
436 static int iwl_trans_get_paging_item(struct iwl_mvm *mvm)
439 struct iwl_fw_get_item_cmd fw_get_item_cmd = {
440 .item_id = cpu_to_le32(IWL_FW_ITEM_ID_PAGING),
443 struct iwl_fw_get_item_resp *item_resp;
444 struct iwl_host_cmd cmd = {
445 .id = iwl_cmd_id(FW_GET_ITEM_CMD, IWL_ALWAYS_LONG_GROUP, 0),
446 .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
447 .data = { &fw_get_item_cmd, },
450 cmd.len[0] = sizeof(struct iwl_fw_get_item_cmd);
452 ret = iwl_mvm_send_cmd(mvm, &cmd);
455 "Paging: Failed to send FW_GET_ITEM_CMD cmd (err = %d)\n",
460 item_resp = (void *)((struct iwl_rx_packet *)cmd.resp_pkt)->data;
461 if (item_resp->item_id != cpu_to_le32(IWL_FW_ITEM_ID_PAGING)) {
463 "Paging: got wrong item in FW_GET_ITEM_CMD resp (item_id = %u)\n",
464 le32_to_cpu(item_resp->item_id));
469 /* Add an extra page for headers */
470 mvm->trans->paging_download_buf = kzalloc(PAGING_BLOCK_SIZE +
473 if (!mvm->trans->paging_download_buf) {
477 mvm->trans->paging_req_addr = le32_to_cpu(item_resp->item_val);
478 mvm->trans->paging_db = mvm->fw_paging_db;
480 "Paging: got paging request address (paging_req_addr 0x%08x)\n",
481 mvm->trans->paging_req_addr);
489 static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
490 struct iwl_rx_packet *pkt, void *data)
492 struct iwl_mvm *mvm =
493 container_of(notif_wait, struct iwl_mvm, notif_wait);
494 struct iwl_mvm_alive_data *alive_data = data;
495 struct mvm_alive_resp_ver1 *palive1;
496 struct mvm_alive_resp_ver2 *palive2;
497 struct mvm_alive_resp *palive;
499 if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive1)) {
500 palive1 = (void *)pkt->data;
502 mvm->support_umac_log = false;
503 mvm->error_event_table =
504 le32_to_cpu(palive1->error_event_table_ptr);
505 mvm->log_event_table =
506 le32_to_cpu(palive1->log_event_table_ptr);
507 alive_data->scd_base_addr = le32_to_cpu(palive1->scd_base_ptr);
509 alive_data->valid = le16_to_cpu(palive1->status) ==
512 "Alive VER1 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
513 le16_to_cpu(palive1->status), palive1->ver_type,
514 palive1->ver_subtype, palive1->flags);
515 } else if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive2)) {
516 palive2 = (void *)pkt->data;
518 mvm->error_event_table =
519 le32_to_cpu(palive2->error_event_table_ptr);
520 mvm->log_event_table =
521 le32_to_cpu(palive2->log_event_table_ptr);
522 alive_data->scd_base_addr = le32_to_cpu(palive2->scd_base_ptr);
523 mvm->umac_error_event_table =
524 le32_to_cpu(palive2->error_info_addr);
525 mvm->sf_space.addr = le32_to_cpu(palive2->st_fwrd_addr);
526 mvm->sf_space.size = le32_to_cpu(palive2->st_fwrd_size);
528 alive_data->valid = le16_to_cpu(palive2->status) ==
530 if (mvm->umac_error_event_table)
531 mvm->support_umac_log = true;
534 "Alive VER2 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
535 le16_to_cpu(palive2->status), palive2->ver_type,
536 palive2->ver_subtype, palive2->flags);
539 "UMAC version: Major - 0x%x, Minor - 0x%x\n",
540 palive2->umac_major, palive2->umac_minor);
541 } else if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive)) {
542 palive = (void *)pkt->data;
544 mvm->error_event_table =
545 le32_to_cpu(palive->error_event_table_ptr);
546 mvm->log_event_table =
547 le32_to_cpu(palive->log_event_table_ptr);
548 alive_data->scd_base_addr = le32_to_cpu(palive->scd_base_ptr);
549 mvm->umac_error_event_table =
550 le32_to_cpu(palive->error_info_addr);
551 mvm->sf_space.addr = le32_to_cpu(palive->st_fwrd_addr);
552 mvm->sf_space.size = le32_to_cpu(palive->st_fwrd_size);
554 alive_data->valid = le16_to_cpu(palive->status) ==
556 if (mvm->umac_error_event_table)
557 mvm->support_umac_log = true;
560 "Alive VER3 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
561 le16_to_cpu(palive->status), palive->ver_type,
562 palive->ver_subtype, palive->flags);
565 "UMAC version: Major - 0x%x, Minor - 0x%x\n",
566 le32_to_cpu(palive->umac_major),
567 le32_to_cpu(palive->umac_minor));
573 static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data *notif_wait,
574 struct iwl_rx_packet *pkt, void *data)
576 struct iwl_phy_db *phy_db = data;
578 if (pkt->hdr.cmd != CALIB_RES_NOTIF_PHY_DB) {
579 WARN_ON(pkt->hdr.cmd != INIT_COMPLETE_NOTIF);
583 WARN_ON(iwl_phy_db_set_section(phy_db, pkt));
588 static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
589 enum iwl_ucode_type ucode_type)
591 struct iwl_notification_wait alive_wait;
592 struct iwl_mvm_alive_data alive_data;
593 const struct fw_img *fw;
595 enum iwl_ucode_type old_type = mvm->cur_ucode;
596 static const u16 alive_cmd[] = { MVM_ALIVE };
597 struct iwl_sf_region st_fwrd_space;
599 if (ucode_type == IWL_UCODE_REGULAR &&
600 iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE) &&
601 !(fw_has_capa(&mvm->fw->ucode_capa,
602 IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED)))
603 fw = iwl_get_ucode_image(mvm->fw, IWL_UCODE_REGULAR_USNIFFER);
605 fw = iwl_get_ucode_image(mvm->fw, ucode_type);
608 mvm->cur_ucode = ucode_type;
609 mvm->ucode_loaded = false;
611 iwl_init_notification_wait(&mvm->notif_wait, &alive_wait,
612 alive_cmd, ARRAY_SIZE(alive_cmd),
613 iwl_alive_fn, &alive_data);
615 ret = iwl_trans_start_fw(mvm->trans, fw, ucode_type == IWL_UCODE_INIT);
617 mvm->cur_ucode = old_type;
618 iwl_remove_notification(&mvm->notif_wait, &alive_wait);
623 * Some things may run in the background now, but we
624 * just wait for the ALIVE notification here.
626 ret = iwl_wait_notification(&mvm->notif_wait, &alive_wait,
627 MVM_UCODE_ALIVE_TIMEOUT);
629 if (mvm->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
631 "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
632 iwl_read_prph(mvm->trans, SB_CPU_1_STATUS),
633 iwl_read_prph(mvm->trans, SB_CPU_2_STATUS));
634 mvm->cur_ucode = old_type;
638 if (!alive_data.valid) {
639 IWL_ERR(mvm, "Loaded ucode is not valid!\n");
640 mvm->cur_ucode = old_type;
645 * update the sdio allocation according to the pointer we get in the
646 * alive notification.
648 st_fwrd_space.addr = mvm->sf_space.addr;
649 st_fwrd_space.size = mvm->sf_space.size;
650 ret = iwl_trans_update_sf(mvm->trans, &st_fwrd_space);
652 IWL_ERR(mvm, "Failed to update SF size. ret %d\n", ret);
656 iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr);
659 * configure and operate fw paging mechanism.
660 * driver configures the paging flow only once, CPU2 paging image
661 * included in the IWL_UCODE_INIT image.
663 if (fw->paging_mem_size) {
665 * When dma is not enabled, the driver needs to copy / write
666 * the downloaded / uploaded page to / from the smem.
667 * This gets the location of the place were the pages are
670 if (!is_device_dma_capable(mvm->trans->dev)) {
671 ret = iwl_trans_get_paging_item(mvm);
673 IWL_ERR(mvm, "failed to get FW paging item\n");
678 ret = iwl_save_fw_paging(mvm, fw);
680 IWL_ERR(mvm, "failed to save the FW paging image\n");
684 ret = iwl_send_paging_cmd(mvm, fw);
686 IWL_ERR(mvm, "failed to send the paging cmd\n");
687 iwl_free_fw_paging(mvm);
693 * Note: all the queues are enabled as part of the interface
694 * initialization, but in firmware restart scenarios they
695 * could be stopped, so wake them up. In firmware restart,
696 * mac80211 will have the queues stopped as well until the
697 * reconfiguration completes. During normal startup, they
701 memset(&mvm->queue_info, 0, sizeof(mvm->queue_info));
702 if (iwl_mvm_is_dqa_supported(mvm))
703 mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].hw_queue_refcount = 1;
705 mvm->queue_info[IWL_MVM_CMD_QUEUE].hw_queue_refcount = 1;
707 for (i = 0; i < IEEE80211_MAX_QUEUES; i++)
708 atomic_set(&mvm->mac80211_queue_stop_count[i], 0);
710 mvm->ucode_loaded = true;
715 static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
717 struct iwl_phy_cfg_cmd phy_cfg_cmd;
718 enum iwl_ucode_type ucode_type = mvm->cur_ucode;
721 phy_cfg_cmd.phy_cfg = cpu_to_le32(iwl_mvm_get_phy_config(mvm));
722 phy_cfg_cmd.calib_control.event_trigger =
723 mvm->fw->default_calib[ucode_type].event_trigger;
724 phy_cfg_cmd.calib_control.flow_trigger =
725 mvm->fw->default_calib[ucode_type].flow_trigger;
727 IWL_DEBUG_INFO(mvm, "Sending Phy CFG command: 0x%x\n",
728 phy_cfg_cmd.phy_cfg);
730 return iwl_mvm_send_cmd_pdu(mvm, PHY_CONFIGURATION_CMD, 0,
731 sizeof(phy_cfg_cmd), &phy_cfg_cmd);
734 int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
736 struct iwl_notification_wait calib_wait;
737 static const u16 init_complete[] = {
739 CALIB_RES_NOTIF_PHY_DB
743 lockdep_assert_held(&mvm->mutex);
745 if (WARN_ON_ONCE(mvm->calibrating))
748 iwl_init_notification_wait(&mvm->notif_wait,
751 ARRAY_SIZE(init_complete),
752 iwl_wait_phy_db_entry,
755 /* Will also start the device */
756 ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_INIT);
758 IWL_ERR(mvm, "Failed to start INIT ucode: %d\n", ret);
762 ret = iwl_send_bt_init_conf(mvm);
766 /* Read the NVM only at driver load time, no need to do this twice */
769 ret = iwl_nvm_init(mvm, true);
771 IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
776 /* In case we read the NVM from external file, load it to the NIC */
777 if (mvm->nvm_file_name)
778 iwl_mvm_load_nvm_to_nic(mvm);
780 ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans);
784 * abort after reading the nvm in case RF Kill is on, we will complete
785 * the init seq later when RF kill will switch to off
787 if (iwl_mvm_is_radio_hw_killed(mvm)) {
788 IWL_DEBUG_RF_KILL(mvm,
789 "jump over all phy activities due to RF kill\n");
790 iwl_remove_notification(&mvm->notif_wait, &calib_wait);
795 mvm->calibrating = true;
797 /* Send TX valid antennas before triggering calibrations */
798 ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
803 * Send phy configurations command to init uCode
804 * to start the 16.0 uCode init image internal calibrations.
806 ret = iwl_send_phy_cfg_cmd(mvm);
808 IWL_ERR(mvm, "Failed to run INIT calibrations: %d\n",
814 * Some things may run in the background now, but we
815 * just wait for the calibration complete notification.
817 ret = iwl_wait_notification(&mvm->notif_wait, &calib_wait,
818 MVM_UCODE_CALIB_TIMEOUT);
820 if (ret && iwl_mvm_is_radio_hw_killed(mvm)) {
821 IWL_DEBUG_RF_KILL(mvm, "RFKILL while calibrating.\n");
827 iwl_remove_notification(&mvm->notif_wait, &calib_wait);
829 mvm->calibrating = false;
830 if (iwlmvm_mod_params.init_dbg && !mvm->nvm_data) {
831 /* we want to debug INIT and we have no NVM - fake */
832 mvm->nvm_data = kzalloc(sizeof(struct iwl_nvm_data) +
833 sizeof(struct ieee80211_channel) +
834 sizeof(struct ieee80211_rate),
838 mvm->nvm_data->bands[0].channels = mvm->nvm_data->channels;
839 mvm->nvm_data->bands[0].n_channels = 1;
840 mvm->nvm_data->bands[0].n_bitrates = 1;
841 mvm->nvm_data->bands[0].bitrates =
842 (void *)mvm->nvm_data->channels + 1;
843 mvm->nvm_data->bands[0].bitrates->hw_value = 10;
849 static void iwl_mvm_parse_shared_mem_a000(struct iwl_mvm *mvm,
850 struct iwl_rx_packet *pkt)
852 struct iwl_shared_mem_cfg *mem_cfg = (void *)pkt->data;
855 mvm->shared_mem_cfg.num_txfifo_entries =
856 ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size);
857 for (i = 0; i < ARRAY_SIZE(mem_cfg->txfifo_size); i++)
858 mvm->shared_mem_cfg.txfifo_size[i] =
859 le32_to_cpu(mem_cfg->txfifo_size[i]);
860 for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++)
861 mvm->shared_mem_cfg.rxfifo_size[i] =
862 le32_to_cpu(mem_cfg->rxfifo_size[i]);
864 BUILD_BUG_ON(sizeof(mvm->shared_mem_cfg.internal_txfifo_size) !=
865 sizeof(mem_cfg->internal_txfifo_size));
867 for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.internal_txfifo_size);
869 mvm->shared_mem_cfg.internal_txfifo_size[i] =
870 le32_to_cpu(mem_cfg->internal_txfifo_size[i]);
873 static void iwl_mvm_parse_shared_mem(struct iwl_mvm *mvm,
874 struct iwl_rx_packet *pkt)
876 struct iwl_shared_mem_cfg_v1 *mem_cfg = (void *)pkt->data;
879 mvm->shared_mem_cfg.num_txfifo_entries =
880 ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size);
881 for (i = 0; i < ARRAY_SIZE(mem_cfg->txfifo_size); i++)
882 mvm->shared_mem_cfg.txfifo_size[i] =
883 le32_to_cpu(mem_cfg->txfifo_size[i]);
884 for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++)
885 mvm->shared_mem_cfg.rxfifo_size[i] =
886 le32_to_cpu(mem_cfg->rxfifo_size[i]);
888 /* new API has more data, from rxfifo_addr field and on */
889 if (fw_has_capa(&mvm->fw->ucode_capa,
890 IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
891 BUILD_BUG_ON(sizeof(mvm->shared_mem_cfg.internal_txfifo_size) !=
892 sizeof(mem_cfg->internal_txfifo_size));
895 i < ARRAY_SIZE(mvm->shared_mem_cfg.internal_txfifo_size);
897 mvm->shared_mem_cfg.internal_txfifo_size[i] =
898 le32_to_cpu(mem_cfg->internal_txfifo_size[i]);
902 static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm)
904 struct iwl_host_cmd cmd = {
905 .flags = CMD_WANT_SKB,
909 struct iwl_rx_packet *pkt;
911 lockdep_assert_held(&mvm->mutex);
913 if (fw_has_capa(&mvm->fw->ucode_capa,
914 IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG))
915 cmd.id = iwl_cmd_id(SHARED_MEM_CFG_CMD, SYSTEM_GROUP, 0);
917 cmd.id = SHARED_MEM_CFG;
919 if (WARN_ON(iwl_mvm_send_cmd(mvm, &cmd)))
923 if (iwl_mvm_has_new_tx_api(mvm))
924 iwl_mvm_parse_shared_mem_a000(mvm, pkt);
926 iwl_mvm_parse_shared_mem(mvm, pkt);
928 IWL_DEBUG_INFO(mvm, "SHARED MEM CFG: got memory offsets/sizes\n");
933 static int iwl_mvm_config_ltr(struct iwl_mvm *mvm)
935 struct iwl_ltr_config_cmd cmd = {
936 .flags = cpu_to_le32(LTR_CFG_FLAG_FEATURE_ENABLE),
939 if (!mvm->trans->ltr_enabled)
942 return iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0,
946 #define ACPI_WRDS_METHOD "WRDS"
947 #define ACPI_WRDS_WIFI (0x07)
948 #define ACPI_WRDS_TABLE_SIZE 10
950 struct iwl_mvm_sar_table {
952 u8 values[ACPI_WRDS_TABLE_SIZE];
956 static int iwl_mvm_sar_get_wrds(struct iwl_mvm *mvm, union acpi_object *wrds,
957 struct iwl_mvm_sar_table *sar_table)
959 union acpi_object *data_pkg;
962 /* We need at least two packages, one for the revision and one
963 * for the data itself. Also check that the revision is valid
964 * (i.e. it is an integer set to 0).
966 if (wrds->type != ACPI_TYPE_PACKAGE ||
967 wrds->package.count < 2 ||
968 wrds->package.elements[0].type != ACPI_TYPE_INTEGER ||
969 wrds->package.elements[0].integer.value != 0) {
970 IWL_DEBUG_RADIO(mvm, "Unsupported wrds structure\n");
974 /* loop through all the packages to find the one for WiFi */
975 for (i = 1; i < wrds->package.count; i++) {
976 union acpi_object *domain;
978 data_pkg = &wrds->package.elements[i];
980 /* Skip anything that is not a package with the right
981 * amount of elements (i.e. domain_type,
982 * enabled/disabled plus the sar table size.
984 if (data_pkg->type != ACPI_TYPE_PACKAGE ||
985 data_pkg->package.count != ACPI_WRDS_TABLE_SIZE + 2)
988 domain = &data_pkg->package.elements[0];
989 if (domain->type == ACPI_TYPE_INTEGER &&
990 domain->integer.value == ACPI_WRDS_WIFI)
999 if (data_pkg->package.elements[1].type != ACPI_TYPE_INTEGER)
1002 sar_table->enabled = !!(data_pkg->package.elements[1].integer.value);
1004 for (i = 0; i < ACPI_WRDS_TABLE_SIZE; i++) {
1005 union acpi_object *entry;
1007 entry = &data_pkg->package.elements[i + 2];
1008 if ((entry->type != ACPI_TYPE_INTEGER) ||
1009 (entry->integer.value > U8_MAX))
1012 sar_table->values[i] = entry->integer.value;
1018 static int iwl_mvm_sar_get_table(struct iwl_mvm *mvm,
1019 struct iwl_mvm_sar_table *sar_table)
1021 acpi_handle root_handle;
1023 struct acpi_buffer wrds = {ACPI_ALLOCATE_BUFFER, NULL};
1027 root_handle = ACPI_HANDLE(mvm->dev);
1029 IWL_DEBUG_RADIO(mvm,
1030 "Could not retrieve root port ACPI handle\n");
1034 /* Get the method's handle */
1035 status = acpi_get_handle(root_handle, (acpi_string)ACPI_WRDS_METHOD,
1037 if (ACPI_FAILURE(status)) {
1038 IWL_DEBUG_RADIO(mvm, "WRDS method not found\n");
1042 /* Call WRDS with no arguments */
1043 status = acpi_evaluate_object(handle, NULL, NULL, &wrds);
1044 if (ACPI_FAILURE(status)) {
1045 IWL_DEBUG_RADIO(mvm, "WRDS invocation failed (0x%x)\n", status);
1049 ret = iwl_mvm_sar_get_wrds(mvm, wrds.pointer, sar_table);
1050 kfree(wrds.pointer);
1054 #else /* CONFIG_ACPI */
1055 static int iwl_mvm_sar_get_table(struct iwl_mvm *mvm,
1056 struct iwl_mvm_sar_table *sar_table)
1060 #endif /* CONFIG_ACPI */
1062 static int iwl_mvm_sar_init(struct iwl_mvm *mvm)
1064 struct iwl_mvm_sar_table sar_table;
1065 struct iwl_dev_tx_power_cmd cmd = {
1066 .v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS),
1069 int len = sizeof(cmd);
1071 if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TX_POWER_ACK))
1072 len = sizeof(cmd.v3);
1074 ret = iwl_mvm_sar_get_table(mvm, &sar_table);
1076 IWL_DEBUG_RADIO(mvm,
1077 "SAR BIOS table invalid or unavailable. (%d)\n",
1079 /* we don't fail if the table is not available */
1083 if (!sar_table.enabled)
1086 IWL_DEBUG_RADIO(mvm, "Sending REDUCE_TX_POWER_CMD per chain\n");
1088 BUILD_BUG_ON(IWL_NUM_CHAIN_LIMITS * IWL_NUM_SUB_BANDS !=
1089 ACPI_WRDS_TABLE_SIZE);
1091 for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) {
1092 IWL_DEBUG_RADIO(mvm, " Chain[%d]:\n", i);
1093 for (j = 0; j < IWL_NUM_SUB_BANDS; j++) {
1094 idx = (i * IWL_NUM_SUB_BANDS) + j;
1095 cmd.v3.per_chain_restriction[i][j] =
1096 cpu_to_le16(sar_table.values[idx]);
1097 IWL_DEBUG_RADIO(mvm, " Band[%d] = %d * .125dBm\n",
1098 j, sar_table.values[idx]);
1102 ret = iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
1104 IWL_ERR(mvm, "failed to set per-chain TX power: %d\n", ret);
1109 int iwl_mvm_up(struct iwl_mvm *mvm)
1112 struct ieee80211_channel *chan;
1113 struct cfg80211_chan_def chandef;
1115 lockdep_assert_held(&mvm->mutex);
1117 ret = iwl_trans_start_hw(mvm->trans);
1122 * If we haven't completed the run of the init ucode during
1123 * module loading, load init ucode now
1124 * (for example, if we were in RFKILL)
1126 ret = iwl_run_init_mvm_ucode(mvm, false);
1128 if (iwlmvm_mod_params.init_dbg)
1132 IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret);
1133 /* this can't happen */
1134 if (WARN_ON(ret > 0))
1140 * Stop and start the transport without entering low power
1141 * mode. This will save the state of other components on the
1142 * device that are triggered by the INIT firwmare (MFUART).
1144 _iwl_trans_stop_device(mvm->trans, false);
1145 ret = _iwl_trans_start_hw(mvm->trans, false);
1149 ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
1151 IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
1155 iwl_mvm_get_shared_mem_conf(mvm);
1157 ret = iwl_mvm_sf_update(mvm, NULL, false);
1159 IWL_ERR(mvm, "Failed to initialize Smart Fifo\n");
1161 mvm->fw_dbg_conf = FW_DBG_INVALID;
1162 /* if we have a destination, assume EARLY START */
1163 if (mvm->fw->dbg_dest_tlv)
1164 mvm->fw_dbg_conf = FW_DBG_START_FROM_ALIVE;
1165 iwl_mvm_start_fw_dbg_conf(mvm, FW_DBG_START_FROM_ALIVE);
1167 ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
1171 ret = iwl_send_bt_init_conf(mvm);
1175 /* Send phy db control command and then phy db calibration*/
1176 ret = iwl_send_phy_db_data(mvm->phy_db);
1180 ret = iwl_send_phy_cfg_cmd(mvm);
1184 /* Init RSS configuration */
1185 if (iwl_mvm_has_new_rx_api(mvm)) {
1186 ret = iwl_send_rss_cfg_cmd(mvm);
1188 IWL_ERR(mvm, "Failed to configure RSS queues: %d\n",
1194 /* init the fw <-> mac80211 STA mapping */
1195 for (i = 0; i < IWL_MVM_STATION_COUNT; i++)
1196 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);
1198 mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT;
1200 /* reset quota debouncing buffer - 0xff will yield invalid data */
1201 memset(&mvm->last_quota_cmd, 0xff, sizeof(mvm->last_quota_cmd));
1203 /* Enable DQA-mode if required */
1204 if (iwl_mvm_is_dqa_supported(mvm)) {
1205 ret = iwl_mvm_send_dqa_cmd(mvm);
1209 IWL_DEBUG_FW(mvm, "Working in non-DQA mode\n");
1212 /* Add auxiliary station for scanning */
1213 ret = iwl_mvm_add_aux_sta(mvm);
1217 /* Add all the PHY contexts */
1218 chan = &mvm->hw->wiphy->bands[NL80211_BAND_2GHZ]->channels[0];
1219 cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT);
1220 for (i = 0; i < NUM_PHY_CTX; i++) {
1222 * The channel used here isn't relevant as it's
1223 * going to be overwritten in the other flows.
1224 * For now use the first channel we have.
1226 ret = iwl_mvm_phy_ctxt_add(mvm, &mvm->phy_ctxts[i],
1232 #ifdef CONFIG_THERMAL
1233 if (iwl_mvm_is_tt_in_fw(mvm)) {
1234 /* in order to give the responsibility of ct-kill and
1235 * TX backoff to FW we need to send empty temperature reporting
1236 * cmd during init time
1238 iwl_mvm_send_temp_report_ths_cmd(mvm);
1240 /* Initialize tx backoffs to the minimal possible */
1241 iwl_mvm_tt_tx_backoff(mvm, 0);
1244 /* TODO: read the budget from BIOS / Platform NVM */
1245 if (iwl_mvm_is_ctdp_supported(mvm) && mvm->cooling_dev.cur_state > 0) {
1246 ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_START,
1247 mvm->cooling_dev.cur_state);
1252 /* Initialize tx backoffs to the minimal possible */
1253 iwl_mvm_tt_tx_backoff(mvm, 0);
1256 WARN_ON(iwl_mvm_config_ltr(mvm));
1258 ret = iwl_mvm_power_update_device(mvm);
1263 * RTNL is not taken during Ct-kill, but we don't need to scan/Tx
1264 * anyway, so don't init MCC.
1266 if (!test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status)) {
1267 ret = iwl_mvm_init_mcc(mvm);
1272 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
1273 mvm->scan_type = IWL_SCAN_TYPE_NOT_SET;
1274 ret = iwl_mvm_config_scan(mvm);
1279 if (iwl_mvm_is_csum_supported(mvm) &&
1280 mvm->cfg->features & NETIF_F_RXCSUM)
1281 iwl_trans_write_prph(mvm->trans, RX_EN_CSUM, 0x3);
1283 /* allow FW/transport low power modes if not during restart */
1284 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1285 iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);
1287 ret = iwl_mvm_sar_init(mvm);
1291 IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
1294 iwl_mvm_stop_device(mvm);
1298 int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm)
1302 lockdep_assert_held(&mvm->mutex);
1304 ret = iwl_trans_start_hw(mvm->trans);
1308 ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_WOWLAN);
1310 IWL_ERR(mvm, "Failed to start WoWLAN firmware: %d\n", ret);
1314 ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
1318 /* Send phy db control command and then phy db calibration*/
1319 ret = iwl_send_phy_db_data(mvm->phy_db);
1323 ret = iwl_send_phy_cfg_cmd(mvm);
1327 /* init the fw <-> mac80211 STA mapping */
1328 for (i = 0; i < IWL_MVM_STATION_COUNT; i++)
1329 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);
1331 /* Add auxiliary station for scanning */
1332 ret = iwl_mvm_add_aux_sta(mvm);
1338 iwl_mvm_stop_device(mvm);
1342 void iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
1343 struct iwl_rx_cmd_buffer *rxb)
1345 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1346 struct iwl_card_state_notif *card_state_notif = (void *)pkt->data;
1347 u32 flags = le32_to_cpu(card_state_notif->flags);
1349 IWL_DEBUG_RF_KILL(mvm, "Card state received: HW:%s SW:%s CT:%s\n",
1350 (flags & HW_CARD_DISABLED) ? "Kill" : "On",
1351 (flags & SW_CARD_DISABLED) ? "Kill" : "On",
1352 (flags & CT_KILL_CARD_DISABLED) ?
1353 "Reached" : "Not reached");
1356 void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm,
1357 struct iwl_rx_cmd_buffer *rxb)
1359 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1360 struct iwl_mfuart_load_notif *mfuart_notif = (void *)pkt->data;
1363 "MFUART: installed ver: 0x%08x, external ver: 0x%08x, status: 0x%08x, duration: 0x%08x\n",
1364 le32_to_cpu(mfuart_notif->installed_ver),
1365 le32_to_cpu(mfuart_notif->external_ver),
1366 le32_to_cpu(mfuart_notif->status),
1367 le32_to_cpu(mfuart_notif->duration));