1 /******************************************************************************
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2017 Intel Deutschland GmbH
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
21 * Copyright(c) 2017 Intel Deutschland GmbH
22 * All rights reserved.
24 * Redistribution and use in source and binary forms, with or without
25 * modification, are permitted provided that the following conditions
28 * * Redistributions of source code must retain the above copyright
29 * notice, this list of conditions and the following disclaimer.
30 * * Redistributions in binary form must reproduce the above copyright
31 * notice, this list of conditions and the following disclaimer in
32 * the documentation and/or other materials provided with the
34 * * Neither the name Intel Corporation nor the names of its
35 * contributors may be used to endorse or promote products derived
36 * from this software without specific prior written permission.
38 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
39 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
40 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
41 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
42 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
43 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
44 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
45 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
46 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
47 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
48 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
50 *****************************************************************************/
51 #include <linux/pm_runtime.h>
54 #include "iwl-debug.h"
58 #include "fw/api/tx.h"
61 * iwl_pcie_gen2_tx_stop - Stop all Tx DMA channels
63 void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans)
65 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
69 * This function can be called before the op_mode disabled the
70 * queues. This happens when we have an rfkill interrupt.
71 * Since we stop Tx altogether - mark the queues as stopped.
73 memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
74 memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
76 /* Unmap DMA from host system and free skb's */
77 for (txq_id = 0; txq_id < ARRAY_SIZE(trans_pcie->txq); txq_id++) {
78 if (!trans_pcie->txq[txq_id])
80 iwl_pcie_gen2_txq_unmap(trans, txq_id);
85 * iwl_pcie_txq_update_byte_tbl - Set up entry in Tx byte-count array
87 static void iwl_pcie_gen2_update_byte_tbl(struct iwl_txq *txq, u16 byte_cnt,
90 struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr;
91 int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
92 u8 filled_tfd_size, num_fetch_chunks;
96 len = DIV_ROUND_UP(len, 4);
98 if (WARN_ON(len > 0xFFF || idx >= txq->n_window))
101 filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) +
102 num_tbs * sizeof(struct iwl_tfh_tb);
104 * filled_tfd_size contains the number of filled bytes in the TFD.
105 * Dividing it by 64 will give the number of chunks to fetch
106 * to SRAM- 0 for one chunk, 1 for 2 and so on.
107 * If, for example, TFD contains only 3 TBs then 32 bytes
108 * of the TFD are used, and only one chunk of 64 bytes should
111 num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
113 bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
114 scd_bc_tbl->tfd_offset[idx] = bc_ent;
118 * iwl_pcie_gen2_txq_inc_wr_ptr - Send new write index to hardware
120 static void iwl_pcie_gen2_txq_inc_wr_ptr(struct iwl_trans *trans,
123 lockdep_assert_held(&txq->lock);
125 IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq->id, txq->write_ptr);
128 * if not in power-save mode, uCode will never sleep when we're
129 * trying to tx (during RFKILL, we're not trying to tx).
131 iwl_write32(trans, HBUS_TARG_WRPTR, txq->write_ptr | (txq->id << 16));
134 static u8 iwl_pcie_gen2_get_num_tbs(struct iwl_trans *trans,
135 struct iwl_tfh_tfd *tfd)
137 return le16_to_cpu(tfd->num_tbs) & 0x1f;
140 static void iwl_pcie_gen2_tfd_unmap(struct iwl_trans *trans,
141 struct iwl_cmd_meta *meta,
142 struct iwl_tfh_tfd *tfd)
144 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
147 /* Sanity check on number of chunks */
148 num_tbs = iwl_pcie_gen2_get_num_tbs(trans, tfd);
150 if (num_tbs >= trans_pcie->max_tbs) {
151 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
155 /* first TB is never freed - it's the bidirectional DMA data */
156 for (i = 1; i < num_tbs; i++) {
157 if (meta->tbs & BIT(i))
158 dma_unmap_page(trans->dev,
159 le64_to_cpu(tfd->tbs[i].addr),
160 le16_to_cpu(tfd->tbs[i].tb_len),
163 dma_unmap_single(trans->dev,
164 le64_to_cpu(tfd->tbs[i].addr),
165 le16_to_cpu(tfd->tbs[i].tb_len),
172 static void iwl_pcie_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
174 /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
175 * idx is bounded by n_window
177 int idx = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
179 lockdep_assert_held(&txq->lock);
181 iwl_pcie_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
182 iwl_pcie_get_tfd(trans, txq, idx));
188 skb = txq->entries[idx].skb;
190 /* Can be called from irqs-disabled context
191 * If skb is not NULL, it means that the whole queue is being
192 * freed and that the queue is not empty - free the skb
195 iwl_op_mode_free_skb(trans->op_mode, skb);
196 txq->entries[idx].skb = NULL;
201 static int iwl_pcie_gen2_set_tb(struct iwl_trans *trans,
202 struct iwl_tfh_tfd *tfd, dma_addr_t addr,
205 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
206 int idx = iwl_pcie_gen2_get_num_tbs(trans, tfd);
207 struct iwl_tfh_tb *tb = &tfd->tbs[idx];
209 /* Each TFD can point to a maximum max_tbs Tx buffers */
210 if (le16_to_cpu(tfd->num_tbs) >= trans_pcie->max_tbs) {
211 IWL_ERR(trans, "Error can not send more than %d chunks\n",
212 trans_pcie->max_tbs);
216 put_unaligned_le64(addr, &tb->addr);
217 tb->tb_len = cpu_to_le16(len);
219 tfd->num_tbs = cpu_to_le16(idx + 1);
224 static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
226 struct iwl_tfh_tfd *tfd, int start_len,
227 u8 hdr_len, struct iwl_device_cmd *dev_cmd)
230 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
231 struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
232 struct ieee80211_hdr *hdr = (void *)skb->data;
233 unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
234 unsigned int mss = skb_shinfo(skb)->gso_size;
235 u16 length, amsdu_pad;
237 struct iwl_tso_hdr_page *hdr_page;
238 struct page **page_ptr;
241 trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd),
242 &dev_cmd->hdr, start_len, 0);
244 ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb);
245 snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);
246 total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len;
249 /* total amount of header we may need for this A-MSDU */
250 hdr_room = DIV_ROUND_UP(total_len, mss) *
251 (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr));
253 /* Our device supports 9 segments at most, it will fit in 1 page */
254 hdr_page = get_page_hdr(trans, hdr_room);
258 get_page(hdr_page->page);
259 start_hdr = hdr_page->pos;
260 page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
261 *page_ptr = hdr_page->page;
264 * Pull the ieee80211 header to be able to use TSO core,
265 * we will restore it for the tx_status flow.
267 skb_pull(skb, hdr_len);
270 * Remove the length of all the headers that we don't actually
271 * have in the MPDU by themselves, but that we duplicate into
272 * all the different MSDUs inside the A-MSDU.
274 le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen);
276 tso_start(skb, &tso);
279 /* this is the data left for this subframe */
280 unsigned int data_left = min_t(unsigned int, mss, total_len);
281 struct sk_buff *csum_skb = NULL;
285 u8 *iph, *subf_hdrs_start = hdr_page->pos;
287 total_len -= data_left;
289 memset(hdr_page->pos, 0, amsdu_pad);
290 hdr_page->pos += amsdu_pad;
291 amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen +
293 ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr));
294 hdr_page->pos += ETH_ALEN;
295 ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr));
296 hdr_page->pos += ETH_ALEN;
298 length = snap_ip_tcp_hdrlen + data_left;
299 *((__be16 *)hdr_page->pos) = cpu_to_be16(length);
300 hdr_page->pos += sizeof(length);
303 * This will copy the SNAP as well which will be considered
306 tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len);
307 iph = hdr_page->pos + 8;
308 tcph = (void *)(iph + ip_hdrlen);
310 hdr_page->pos += snap_ip_tcp_hdrlen;
312 tb_len = hdr_page->pos - start_hdr;
313 tb_phys = dma_map_single(trans->dev, start_hdr,
314 tb_len, DMA_TO_DEVICE);
315 if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
316 dev_kfree_skb(csum_skb);
319 iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len);
320 trace_iwlwifi_dev_tx_tso_chunk(trans->dev, start_hdr, tb_len);
321 /* add this subframe's headers' length to the tx_cmd */
322 le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start);
324 /* prepare the start_hdr for the next subframe */
325 start_hdr = hdr_page->pos;
327 /* put the payload */
329 tb_len = min_t(unsigned int, tso.size, data_left);
330 tb_phys = dma_map_single(trans->dev, tso.data,
331 tb_len, DMA_TO_DEVICE);
332 if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
333 dev_kfree_skb(csum_skb);
336 iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len);
337 trace_iwlwifi_dev_tx_tso_chunk(trans->dev, tso.data,
341 tso_build_data(skb, &tso, tb_len);
345 /* re -add the WiFi header */
346 skb_push(skb, hdr_len);
356 struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
358 struct iwl_device_cmd *dev_cmd,
360 struct iwl_cmd_meta *out_meta)
362 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
363 int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
364 struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx);
367 int i, len, tb1_len, tb2_len, hdr_len;
370 memset(tfd, 0, sizeof(*tfd));
372 amsdu = ieee80211_is_data_qos(hdr->frame_control) &&
373 (*ieee80211_get_qos_ctl(hdr) &
374 IEEE80211_QOS_CTL_A_MSDU_PRESENT);
376 tb_phys = iwl_pcie_get_first_tb_dma(txq, idx);
377 /* The first TB points to bi-directional DMA data */
379 memcpy(&txq->first_tb_bufs[idx], &dev_cmd->hdr,
382 iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
384 /* there must be data left over for TB1 or this code must be changed */
385 BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE);
388 * The second TB (tb1) points to the remainder of the TX command
389 * and the 802.11 header - dword aligned size
390 * (This calculation modifies the TX command, so do it before the
391 * setup of the first TB)
393 len = sizeof(struct iwl_tx_cmd_gen2) + sizeof(struct iwl_cmd_header) +
394 ieee80211_hdrlen(hdr->frame_control) - IWL_FIRST_TB_SIZE;
396 /* do not align A-MSDU to dword as the subframe header aligns it */
400 tb1_len = ALIGN(len, 4);
402 /* map the data for TB1 */
403 tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
404 tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
405 if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
407 iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb1_len);
409 hdr_len = ieee80211_hdrlen(hdr->frame_control);
412 if (iwl_pcie_gen2_build_amsdu(trans, skb, tfd,
413 tb1_len + IWL_FIRST_TB_SIZE,
418 * building the A-MSDU might have changed this data, so memcpy
421 memcpy(&txq->first_tb_bufs[idx], &dev_cmd->hdr,
426 /* set up TFD's third entry to point to remainder of skb's head */
427 tb2_len = skb_headlen(skb) - hdr_len;
430 tb_phys = dma_map_single(trans->dev, skb->data + hdr_len,
431 tb2_len, DMA_TO_DEVICE);
432 if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
434 iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb2_len);
437 /* set up the remaining entries to point to the data */
438 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
439 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
442 if (!skb_frag_size(frag))
445 tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
446 skb_frag_size(frag), DMA_TO_DEVICE);
448 if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
450 tb_idx = iwl_pcie_gen2_set_tb(trans, tfd, tb_phys,
451 skb_frag_size(frag));
453 out_meta->tbs |= BIT(tb_idx);
456 trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr,
457 IWL_FIRST_TB_SIZE + tb1_len, hdr_len);
458 trace_iwlwifi_dev_tx_data(trans->dev, skb, hdr_len);
463 iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd);
467 int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
468 struct iwl_device_cmd *dev_cmd, int txq_id)
470 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
471 struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload;
472 struct iwl_cmd_meta *out_meta;
473 struct iwl_txq *txq = trans_pcie->txq[txq_id];
477 if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used),
478 "TX on unused queue %d\n", txq_id))
481 if (skb_is_nonlinear(skb) &&
482 skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS(trans_pcie) &&
483 __skb_linearize(skb))
486 spin_lock(&txq->lock);
488 idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
490 /* Set up driver data for this TFD */
491 txq->entries[idx].skb = skb;
492 txq->entries[idx].cmd = dev_cmd;
494 dev_cmd->hdr.sequence =
495 cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
498 /* Set up first empty entry in queue's array of Tx/cmd buffers */
499 out_meta = &txq->entries[idx].meta;
502 tfd = iwl_pcie_gen2_build_tfd(trans, txq, dev_cmd, skb, out_meta);
504 spin_unlock(&txq->lock);
508 /* Set up entry for this TFD in Tx byte-count array */
509 iwl_pcie_gen2_update_byte_tbl(txq, le16_to_cpu(tx_cmd->len),
510 iwl_pcie_gen2_get_num_tbs(trans, tfd));
512 /* start timer if queue currently empty */
513 if (txq->read_ptr == txq->write_ptr) {
515 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
516 IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", txq->id);
517 iwl_trans_ref(trans);
520 /* Tell device the write index *just past* this latest filled TFD */
521 txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr);
522 iwl_pcie_gen2_txq_inc_wr_ptr(trans, txq);
523 if (iwl_queue_space(txq) < txq->high_mark)
524 iwl_stop_queue(trans, txq);
527 * At this point the frame is "transmitted" successfully
528 * and we will get a TX status notification eventually.
530 spin_unlock(&txq->lock);
534 /*************** HOST COMMAND QUEUE FUNCTIONS *****/
537 * iwl_pcie_gen2_enqueue_hcmd - enqueue a uCode command
538 * @priv: device private data point
539 * @cmd: a pointer to the ucode command structure
541 * The function returns < 0 values to indicate the operation
542 * failed. On success, it returns the index (>= 0) of command in the
545 static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
546 struct iwl_host_cmd *cmd)
548 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
549 struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
550 struct iwl_device_cmd *out_cmd;
551 struct iwl_cmd_meta *out_meta;
553 void *dup_buf = NULL;
554 dma_addr_t phys_addr;
555 int i, cmd_pos, idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
556 u16 copy_size, cmd_size, tb0_size;
557 bool had_nocopy = false;
558 u8 group_id = iwl_cmd_groupid(cmd->id);
559 const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
560 u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
561 struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr);
562 unsigned long flags2;
564 memset(tfd, 0, sizeof(*tfd));
566 copy_size = sizeof(struct iwl_cmd_header_wide);
567 cmd_size = sizeof(struct iwl_cmd_header_wide);
569 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
570 cmddata[i] = cmd->data[i];
571 cmdlen[i] = cmd->len[i];
576 /* need at least IWL_FIRST_TB_SIZE copied */
577 if (copy_size < IWL_FIRST_TB_SIZE) {
578 int copy = IWL_FIRST_TB_SIZE - copy_size;
580 if (copy > cmdlen[i])
587 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
589 if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) {
593 } else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) {
595 * This is also a chunk that isn't copied
596 * to the static buffer so set had_nocopy.
600 /* only allowed once */
601 if (WARN_ON(dup_buf)) {
606 dup_buf = kmemdup(cmddata[i], cmdlen[i],
611 /* NOCOPY must not be followed by normal! */
612 if (WARN_ON(had_nocopy)) {
616 copy_size += cmdlen[i];
618 cmd_size += cmd->len[i];
622 * If any of the command structures end up being larger than the
623 * TFD_MAX_PAYLOAD_SIZE and they aren't dynamically allocated into
624 * separate TFDs, then we will need to increase the size of the buffers
626 if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE,
627 "Command %s (%#x) is too large (%d bytes)\n",
628 iwl_get_cmd_string(trans, cmd->id), cmd->id, copy_size)) {
633 spin_lock_irqsave(&txq->lock, flags2);
635 if (iwl_queue_space(txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
636 spin_unlock_irqrestore(&txq->lock, flags2);
638 IWL_ERR(trans, "No space in command queue\n");
639 iwl_op_mode_cmd_queue_full(trans->op_mode);
644 out_cmd = txq->entries[idx].cmd;
645 out_meta = &txq->entries[idx].meta;
647 /* re-initialize to NULL */
648 memset(out_meta, 0, sizeof(*out_meta));
649 if (cmd->flags & CMD_WANT_SKB)
650 out_meta->source = cmd;
652 /* set up the header */
653 out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id);
654 out_cmd->hdr_wide.group_id = group_id;
655 out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id);
656 out_cmd->hdr_wide.length =
657 cpu_to_le16(cmd_size - sizeof(struct iwl_cmd_header_wide));
658 out_cmd->hdr_wide.reserved = 0;
659 out_cmd->hdr_wide.sequence =
660 cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
661 INDEX_TO_SEQ(txq->write_ptr));
663 cmd_pos = sizeof(struct iwl_cmd_header_wide);
664 copy_size = sizeof(struct iwl_cmd_header_wide);
666 /* and copy the data that needs to be copied */
667 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
673 /* copy everything if not nocopy/dup */
674 if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
675 IWL_HCMD_DFL_DUP))) {
678 memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
685 * Otherwise we need at least IWL_FIRST_TB_SIZE copied
686 * in total (for bi-directional DMA), but copy up to what
687 * we can fit into the payload for debug dump purposes.
689 copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]);
691 memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
694 /* However, treat copy_size the proper way, we need it below */
695 if (copy_size < IWL_FIRST_TB_SIZE) {
696 copy = IWL_FIRST_TB_SIZE - copy_size;
698 if (copy > cmd->len[i])
705 "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
706 iwl_get_cmd_string(trans, cmd->id), group_id,
707 out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
708 cmd_size, txq->write_ptr, idx, trans_pcie->cmd_queue);
710 /* start the TFD with the minimum copy bytes */
711 tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE);
712 memcpy(&txq->first_tb_bufs[idx], &out_cmd->hdr, tb0_size);
713 iwl_pcie_gen2_set_tb(trans, tfd, iwl_pcie_get_first_tb_dma(txq, idx),
716 /* map first command fragment, if any remains */
717 if (copy_size > tb0_size) {
718 phys_addr = dma_map_single(trans->dev,
719 ((u8 *)&out_cmd->hdr) + tb0_size,
720 copy_size - tb0_size,
722 if (dma_mapping_error(trans->dev, phys_addr)) {
724 iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd);
727 iwl_pcie_gen2_set_tb(trans, tfd, phys_addr,
728 copy_size - tb0_size);
731 /* map the remaining (adjusted) nocopy/dup fragments */
732 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
733 const void *data = cmddata[i];
737 if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
740 if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
742 phys_addr = dma_map_single(trans->dev, (void *)data,
743 cmdlen[i], DMA_TO_DEVICE);
744 if (dma_mapping_error(trans->dev, phys_addr)) {
746 iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd);
749 iwl_pcie_gen2_set_tb(trans, tfd, phys_addr, cmdlen[i]);
752 BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE);
753 out_meta->flags = cmd->flags;
754 if (WARN_ON_ONCE(txq->entries[idx].free_buf))
755 kzfree(txq->entries[idx].free_buf);
756 txq->entries[idx].free_buf = dup_buf;
758 trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide);
760 /* start timer if queue currently empty */
761 if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
762 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
764 spin_lock_irqsave(&trans_pcie->reg_lock, flags);
765 if (!(cmd->flags & CMD_SEND_IN_IDLE) &&
766 !trans_pcie->ref_cmd_in_flight) {
767 trans_pcie->ref_cmd_in_flight = true;
768 IWL_DEBUG_RPM(trans, "set ref_cmd_in_flight - ref\n");
769 iwl_trans_ref(trans);
771 /* Increment and update queue's write index */
772 txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr);
773 iwl_pcie_gen2_txq_inc_wr_ptr(trans, txq);
774 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
777 spin_unlock_irqrestore(&txq->lock, flags2);
784 #define HOST_COMPLETE_TIMEOUT (2 * HZ)
786 static int iwl_pcie_gen2_send_hcmd_sync(struct iwl_trans *trans,
787 struct iwl_host_cmd *cmd)
789 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
790 const char *cmd_str = iwl_get_cmd_string(trans, cmd->id);
791 struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
795 IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", cmd_str);
797 if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE,
799 "Command %s: a command is already active!\n", cmd_str))
802 IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", cmd_str);
804 if (pm_runtime_suspended(&trans_pcie->pci_dev->dev)) {
805 ret = wait_event_timeout(trans_pcie->d0i3_waitq,
806 pm_runtime_active(&trans_pcie->pci_dev->dev),
807 msecs_to_jiffies(IWL_TRANS_IDLE_TIMEOUT));
809 IWL_ERR(trans, "Timeout exiting D0i3 before hcmd\n");
814 cmd_idx = iwl_pcie_gen2_enqueue_hcmd(trans, cmd);
817 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
818 IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
823 ret = wait_event_timeout(trans_pcie->wait_command_queue,
824 !test_bit(STATUS_SYNC_HCMD_ACTIVE,
826 HOST_COMPLETE_TIMEOUT);
828 IWL_ERR(trans, "Error sending %s: time out after %dms.\n",
829 cmd_str, jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
831 IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n",
832 txq->read_ptr, txq->write_ptr);
834 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
835 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
839 iwl_force_nmi(trans);
840 iwl_trans_fw_error(trans);
845 if (test_bit(STATUS_FW_ERROR, &trans->status)) {
846 IWL_ERR(trans, "FW error in SYNC CMD %s\n", cmd_str);
852 if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
853 test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
854 IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n");
859 if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
860 IWL_ERR(trans, "Error: Response NULL in '%s'\n", cmd_str);
868 if (cmd->flags & CMD_WANT_SKB) {
870 * Cancel the CMD_WANT_SKB flag for the cmd in the
871 * TX cmd queue. Otherwise in case the cmd comes
872 * in later, it will possibly set an invalid
873 * address (cmd->meta.source).
875 txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
880 cmd->resp_pkt = NULL;
886 int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
887 struct iwl_host_cmd *cmd)
889 if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
890 test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
891 IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n",
896 if (cmd->flags & CMD_ASYNC) {
899 /* An asynchronous command can not expect an SKB to be set. */
900 if (WARN_ON(cmd->flags & CMD_WANT_SKB))
903 ret = iwl_pcie_gen2_enqueue_hcmd(trans, cmd);
906 "Error sending %s: enqueue_hcmd failed: %d\n",
907 iwl_get_cmd_string(trans, cmd->id), ret);
913 return iwl_pcie_gen2_send_hcmd_sync(trans, cmd);
917 * iwl_pcie_gen2_txq_unmap - Unmap any remaining DMA mappings and free skb's
919 void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id)
921 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
922 struct iwl_txq *txq = trans_pcie->txq[txq_id];
924 spin_lock_bh(&txq->lock);
925 while (txq->write_ptr != txq->read_ptr) {
926 IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
927 txq_id, txq->read_ptr);
929 if (txq_id != trans_pcie->cmd_queue) {
930 int idx = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
931 struct sk_buff *skb = txq->entries[idx].skb;
933 if (WARN_ON_ONCE(!skb))
936 iwl_pcie_free_tso_page(trans_pcie, skb);
938 iwl_pcie_gen2_free_tfd(trans, txq);
939 txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr);
941 if (txq->read_ptr == txq->write_ptr) {
944 spin_lock_irqsave(&trans_pcie->reg_lock, flags);
945 if (txq_id != trans_pcie->cmd_queue) {
946 IWL_DEBUG_RPM(trans, "Q %d - last tx freed\n",
948 iwl_trans_unref(trans);
949 } else if (trans_pcie->ref_cmd_in_flight) {
950 trans_pcie->ref_cmd_in_flight = false;
952 "clear ref_cmd_in_flight\n");
953 iwl_trans_unref(trans);
955 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
958 spin_unlock_bh(&txq->lock);
960 /* just in case - this queue may have been stopped */
961 iwl_wake_queue(trans, txq);
964 static void iwl_pcie_gen2_txq_free_memory(struct iwl_trans *trans,
967 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
968 struct device *dev = trans->dev;
970 /* De-alloc circular buffer of TFDs */
972 dma_free_coherent(dev,
973 trans_pcie->tfd_size * TFD_QUEUE_SIZE_MAX,
974 txq->tfds, txq->dma_addr);
975 dma_free_coherent(dev,
976 sizeof(*txq->first_tb_bufs) * txq->n_window,
977 txq->first_tb_bufs, txq->first_tb_dma);
981 iwl_pcie_free_dma_ptr(trans, &txq->bc_tbl);
986 * iwl_pcie_txq_free - Deallocate DMA queue.
987 * @txq: Transmit queue to deallocate.
989 * Empty queue by removing and destroying all BD's.
991 * 0-fill, but do not free "txq" descriptor structure.
993 static void iwl_pcie_gen2_txq_free(struct iwl_trans *trans, int txq_id)
995 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
996 struct iwl_txq *txq = trans_pcie->txq[txq_id];
1002 iwl_pcie_gen2_txq_unmap(trans, txq_id);
1004 /* De-alloc array of command/tx buffers */
1005 if (txq_id == trans_pcie->cmd_queue)
1006 for (i = 0; i < txq->n_window; i++) {
1007 kzfree(txq->entries[i].cmd);
1008 kzfree(txq->entries[i].free_buf);
1010 del_timer_sync(&txq->stuck_timer);
1012 iwl_pcie_gen2_txq_free_memory(trans, txq);
1014 trans_pcie->txq[txq_id] = NULL;
1016 clear_bit(txq_id, trans_pcie->queue_used);
1019 int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
1020 struct iwl_tx_queue_cfg_cmd *cmd,
1022 unsigned int timeout)
1024 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1025 struct iwl_tx_queue_cfg_rsp *rsp;
1026 struct iwl_txq *txq;
1027 struct iwl_host_cmd hcmd = {
1029 .len = { sizeof(*cmd) },
1031 .flags = CMD_WANT_SKB,
1036 txq = kzalloc(sizeof(*txq), GFP_KERNEL);
1039 ret = iwl_pcie_alloc_dma_ptr(trans, &txq->bc_tbl,
1040 sizeof(struct iwlagn_scd_bc_tbl));
1042 IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
1047 ret = iwl_pcie_txq_alloc(trans, txq, TFD_TX_CMD_SLOTS, false);
1049 IWL_ERR(trans, "Tx queue alloc failed\n");
1052 ret = iwl_pcie_txq_init(trans, txq, TFD_TX_CMD_SLOTS, false);
1054 IWL_ERR(trans, "Tx queue init failed\n");
1058 txq->wd_timeout = msecs_to_jiffies(timeout);
1060 cmd->tfdq_addr = cpu_to_le64(txq->dma_addr);
1061 cmd->byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma);
1062 cmd->cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(TFD_TX_CMD_SLOTS));
1064 ret = iwl_trans_send_cmd(trans, &hcmd);
1068 if (WARN_ON(iwl_rx_packet_payload_len(hcmd.resp_pkt) != sizeof(*rsp))) {
1070 goto error_free_resp;
1073 rsp = (void *)hcmd.resp_pkt->data;
1074 qid = le16_to_cpu(rsp->queue_number);
1075 wr_ptr = le16_to_cpu(rsp->write_pointer);
1077 if (qid >= ARRAY_SIZE(trans_pcie->txq)) {
1078 WARN_ONCE(1, "queue index %d unsupported", qid);
1080 goto error_free_resp;
1083 if (test_and_set_bit(qid, trans_pcie->queue_used)) {
1084 WARN_ONCE(1, "queue %d already used", qid);
1086 goto error_free_resp;
1090 trans_pcie->txq[qid] = txq;
1091 wr_ptr &= (TFD_QUEUE_SIZE_MAX - 1);
1093 /* Place first TFD at index corresponding to start sequence number */
1094 txq->read_ptr = wr_ptr;
1095 txq->write_ptr = wr_ptr;
1096 iwl_write_direct32(trans, HBUS_TARG_WRPTR,
1097 (txq->write_ptr) | (qid << 16));
1098 IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid);
1100 iwl_free_resp(&hcmd);
1104 iwl_free_resp(&hcmd);
1106 iwl_pcie_gen2_txq_free_memory(trans, txq);
1110 void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue)
1112 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1115 * Upon HW Rfkill - we stop the device, and then stop the queues
1116 * in the op_mode. Just for the sake of the simplicity of the op_mode,
1117 * allow the op_mode to call txq_disable after it already called
1120 if (!test_and_clear_bit(queue, trans_pcie->queue_used)) {
1121 WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
1122 "queue %d not used", queue);
1126 iwl_pcie_gen2_txq_unmap(trans, queue);
1128 iwl_pcie_gen2_txq_free_memory(trans, trans_pcie->txq[queue]);
1129 trans_pcie->txq[queue] = NULL;
1131 IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue);
1134 void iwl_pcie_gen2_tx_free(struct iwl_trans *trans)
1136 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1139 memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
1141 /* Free all TX queues */
1142 for (i = 0; i < ARRAY_SIZE(trans_pcie->txq); i++) {
1143 if (!trans_pcie->txq[i])
1146 iwl_pcie_gen2_txq_free(trans, i);
1150 int iwl_pcie_gen2_tx_init(struct iwl_trans *trans)
1152 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1153 struct iwl_txq *cmd_queue;
1154 int txq_id = trans_pcie->cmd_queue, ret;
1156 /* alloc and init the command queue */
1157 if (!trans_pcie->txq[txq_id]) {
1158 cmd_queue = kzalloc(sizeof(*cmd_queue), GFP_KERNEL);
1160 IWL_ERR(trans, "Not enough memory for command queue\n");
1163 trans_pcie->txq[txq_id] = cmd_queue;
1164 ret = iwl_pcie_txq_alloc(trans, cmd_queue, TFD_CMD_SLOTS, true);
1166 IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
1170 cmd_queue = trans_pcie->txq[txq_id];
1173 ret = iwl_pcie_txq_init(trans, cmd_queue, TFD_CMD_SLOTS, true);
1175 IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
1178 trans_pcie->txq[txq_id]->id = txq_id;
1179 set_bit(txq_id, trans_pcie->queue_used);
1184 iwl_pcie_gen2_tx_free(trans);