2 * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 /* DXE - DMA transfer engine
18 * we have 2 channels(High prio and Low prio) for TX and 2 channels for RX.
19 * through low channels data packets are transfered
20 * through high channels managment packets are transfered
23 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25 #include <linux/interrupt.h>
26 #include <linux/soc/qcom/smem_state.h>
30 void *wcn36xx_dxe_get_next_bd(struct wcn36xx *wcn, bool is_low)
32 struct wcn36xx_dxe_ch *ch = is_low ?
36 return ch->head_blk_ctl->bd_cpu_addr;
39 static void wcn36xx_ccu_write_register(struct wcn36xx *wcn, int addr, int data)
41 wcn36xx_dbg(WCN36XX_DBG_DXE,
42 "wcn36xx_ccu_write_register: addr=%x, data=%x\n",
45 writel(data, wcn->ccu_base + addr);
48 static void wcn36xx_dxe_write_register(struct wcn36xx *wcn, int addr, int data)
50 wcn36xx_dbg(WCN36XX_DBG_DXE,
51 "wcn36xx_dxe_write_register: addr=%x, data=%x\n",
54 writel(data, wcn->dxe_base + addr);
57 static void wcn36xx_dxe_read_register(struct wcn36xx *wcn, int addr, int *data)
59 *data = readl(wcn->dxe_base + addr);
61 wcn36xx_dbg(WCN36XX_DBG_DXE,
62 "wcn36xx_dxe_read_register: addr=%x, data=%x\n",
66 static void wcn36xx_dxe_free_ctl_block(struct wcn36xx_dxe_ch *ch)
68 struct wcn36xx_dxe_ctl *ctl = ch->head_blk_ctl, *next;
71 for (i = 0; i < ch->desc_num && ctl; i++) {
78 static int wcn36xx_dxe_allocate_ctl_block(struct wcn36xx_dxe_ch *ch)
80 struct wcn36xx_dxe_ctl *prev_ctl = NULL;
81 struct wcn36xx_dxe_ctl *cur_ctl = NULL;
84 spin_lock_init(&ch->lock);
85 for (i = 0; i < ch->desc_num; i++) {
86 cur_ctl = kzalloc(sizeof(*cur_ctl), GFP_KERNEL);
90 spin_lock_init(&cur_ctl->skb_lock);
91 cur_ctl->ctl_blk_order = i;
93 ch->head_blk_ctl = cur_ctl;
94 ch->tail_blk_ctl = cur_ctl;
95 } else if (ch->desc_num - 1 == i) {
96 prev_ctl->next = cur_ctl;
97 cur_ctl->next = ch->head_blk_ctl;
99 prev_ctl->next = cur_ctl;
107 wcn36xx_dxe_free_ctl_block(ch);
111 int wcn36xx_dxe_alloc_ctl_blks(struct wcn36xx *wcn)
115 wcn->dxe_tx_l_ch.ch_type = WCN36XX_DXE_CH_TX_L;
116 wcn->dxe_tx_h_ch.ch_type = WCN36XX_DXE_CH_TX_H;
117 wcn->dxe_rx_l_ch.ch_type = WCN36XX_DXE_CH_RX_L;
118 wcn->dxe_rx_h_ch.ch_type = WCN36XX_DXE_CH_RX_H;
120 wcn->dxe_tx_l_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_TX_L;
121 wcn->dxe_tx_h_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_TX_H;
122 wcn->dxe_rx_l_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_RX_L;
123 wcn->dxe_rx_h_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_RX_H;
125 wcn->dxe_tx_l_ch.dxe_wq = WCN36XX_DXE_WQ_TX_L;
126 wcn->dxe_tx_h_ch.dxe_wq = WCN36XX_DXE_WQ_TX_H;
128 wcn->dxe_tx_l_ch.ctrl_bd = WCN36XX_DXE_CTRL_TX_L_BD;
129 wcn->dxe_tx_h_ch.ctrl_bd = WCN36XX_DXE_CTRL_TX_H_BD;
131 wcn->dxe_tx_l_ch.ctrl_skb = WCN36XX_DXE_CTRL_TX_L_SKB;
132 wcn->dxe_tx_h_ch.ctrl_skb = WCN36XX_DXE_CTRL_TX_H_SKB;
134 wcn->dxe_tx_l_ch.reg_ctrl = WCN36XX_DXE_REG_CTL_TX_L;
135 wcn->dxe_tx_h_ch.reg_ctrl = WCN36XX_DXE_REG_CTL_TX_H;
137 wcn->dxe_tx_l_ch.def_ctrl = WCN36XX_DXE_CH_DEFAULT_CTL_TX_L;
138 wcn->dxe_tx_h_ch.def_ctrl = WCN36XX_DXE_CH_DEFAULT_CTL_TX_H;
140 /* DXE control block allocation */
141 ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_tx_l_ch);
144 ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_tx_h_ch);
147 ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_rx_l_ch);
150 ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_rx_h_ch);
154 /* Initialize SMSM state Clear TX Enable RING EMPTY STATE */
155 ret = qcom_smem_state_update_bits(wcn->tx_enable_state,
156 WCN36XX_SMSM_WLAN_TX_ENABLE |
157 WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY,
158 WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY);
165 wcn36xx_err("Failed to allocate DXE control blocks\n");
166 wcn36xx_dxe_free_ctl_blks(wcn);
170 void wcn36xx_dxe_free_ctl_blks(struct wcn36xx *wcn)
172 wcn36xx_dxe_free_ctl_block(&wcn->dxe_tx_l_ch);
173 wcn36xx_dxe_free_ctl_block(&wcn->dxe_tx_h_ch);
174 wcn36xx_dxe_free_ctl_block(&wcn->dxe_rx_l_ch);
175 wcn36xx_dxe_free_ctl_block(&wcn->dxe_rx_h_ch);
178 static int wcn36xx_dxe_init_descs(struct device *dev, struct wcn36xx_dxe_ch *wcn_ch)
180 struct wcn36xx_dxe_desc *cur_dxe = NULL;
181 struct wcn36xx_dxe_desc *prev_dxe = NULL;
182 struct wcn36xx_dxe_ctl *cur_ctl = NULL;
186 size = wcn_ch->desc_num * sizeof(struct wcn36xx_dxe_desc);
187 wcn_ch->cpu_addr = dma_alloc_coherent(dev, size, &wcn_ch->dma_addr,
189 if (!wcn_ch->cpu_addr)
192 memset(wcn_ch->cpu_addr, 0, size);
194 cur_dxe = (struct wcn36xx_dxe_desc *)wcn_ch->cpu_addr;
195 cur_ctl = wcn_ch->head_blk_ctl;
197 for (i = 0; i < wcn_ch->desc_num; i++) {
198 cur_ctl->desc = cur_dxe;
199 cur_ctl->desc_phy_addr = wcn_ch->dma_addr +
200 i * sizeof(struct wcn36xx_dxe_desc);
202 switch (wcn_ch->ch_type) {
203 case WCN36XX_DXE_CH_TX_L:
204 cur_dxe->ctrl = WCN36XX_DXE_CTRL_TX_L;
205 cur_dxe->dst_addr_l = WCN36XX_DXE_WQ_TX_L;
207 case WCN36XX_DXE_CH_TX_H:
208 cur_dxe->ctrl = WCN36XX_DXE_CTRL_TX_H;
209 cur_dxe->dst_addr_l = WCN36XX_DXE_WQ_TX_H;
211 case WCN36XX_DXE_CH_RX_L:
212 cur_dxe->ctrl = WCN36XX_DXE_CTRL_RX_L;
213 cur_dxe->src_addr_l = WCN36XX_DXE_WQ_RX_L;
215 case WCN36XX_DXE_CH_RX_H:
216 cur_dxe->ctrl = WCN36XX_DXE_CTRL_RX_H;
217 cur_dxe->src_addr_l = WCN36XX_DXE_WQ_RX_H;
221 cur_dxe->phy_next_l = 0;
222 } else if ((0 < i) && (i < wcn_ch->desc_num - 1)) {
223 prev_dxe->phy_next_l =
224 cur_ctl->desc_phy_addr;
225 } else if (i == (wcn_ch->desc_num - 1)) {
226 prev_dxe->phy_next_l =
227 cur_ctl->desc_phy_addr;
228 cur_dxe->phy_next_l =
229 wcn_ch->head_blk_ctl->desc_phy_addr;
231 cur_ctl = cur_ctl->next;
239 static void wcn36xx_dxe_init_tx_bd(struct wcn36xx_dxe_ch *ch,
240 struct wcn36xx_dxe_mem_pool *pool)
242 int i, chunk_size = pool->chunk_size;
243 dma_addr_t bd_phy_addr = pool->phy_addr;
244 void *bd_cpu_addr = pool->virt_addr;
245 struct wcn36xx_dxe_ctl *cur = ch->head_blk_ctl;
247 for (i = 0; i < ch->desc_num; i++) {
248 /* Only every second dxe needs a bd pointer,
249 the other will point to the skb data */
251 cur->bd_phy_addr = bd_phy_addr;
252 cur->bd_cpu_addr = bd_cpu_addr;
253 bd_phy_addr += chunk_size;
254 bd_cpu_addr += chunk_size;
256 cur->bd_phy_addr = 0;
257 cur->bd_cpu_addr = NULL;
263 static int wcn36xx_dxe_enable_ch_int(struct wcn36xx *wcn, u16 wcn_ch)
267 wcn36xx_dxe_read_register(wcn,
268 WCN36XX_DXE_INT_MASK_REG,
273 wcn36xx_dxe_write_register(wcn,
274 WCN36XX_DXE_INT_MASK_REG,
279 static int wcn36xx_dxe_fill_skb(struct device *dev, struct wcn36xx_dxe_ctl *ctl)
281 struct wcn36xx_dxe_desc *dxe = ctl->desc;
284 skb = alloc_skb(WCN36XX_PKT_SIZE, GFP_ATOMIC);
288 dxe->dst_addr_l = dma_map_single(dev,
289 skb_tail_pointer(skb),
292 if (dma_mapping_error(dev, dxe->dst_addr_l)) {
293 dev_err(dev, "unable to map skb\n");
302 static int wcn36xx_dxe_ch_alloc_skb(struct wcn36xx *wcn,
303 struct wcn36xx_dxe_ch *wcn_ch)
306 struct wcn36xx_dxe_ctl *cur_ctl = NULL;
308 cur_ctl = wcn_ch->head_blk_ctl;
310 for (i = 0; i < wcn_ch->desc_num; i++) {
311 wcn36xx_dxe_fill_skb(wcn->dev, cur_ctl);
312 cur_ctl = cur_ctl->next;
318 static void wcn36xx_dxe_ch_free_skbs(struct wcn36xx *wcn,
319 struct wcn36xx_dxe_ch *wcn_ch)
321 struct wcn36xx_dxe_ctl *cur = wcn_ch->head_blk_ctl;
324 for (i = 0; i < wcn_ch->desc_num; i++) {
330 void wcn36xx_dxe_tx_ack_ind(struct wcn36xx *wcn, u32 status)
332 struct ieee80211_tx_info *info;
336 spin_lock_irqsave(&wcn->dxe_lock, flags);
337 skb = wcn->tx_ack_skb;
338 wcn->tx_ack_skb = NULL;
339 spin_unlock_irqrestore(&wcn->dxe_lock, flags);
342 wcn36xx_warn("Spurious TX complete indication\n");
346 info = IEEE80211_SKB_CB(skb);
349 info->flags |= IEEE80211_TX_STAT_ACK;
351 wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ack status: %d\n", status);
353 ieee80211_tx_status_irqsafe(wcn->hw, skb);
354 ieee80211_wake_queues(wcn->hw);
357 static void reap_tx_dxes(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *ch)
359 struct wcn36xx_dxe_ctl *ctl;
360 struct ieee80211_tx_info *info;
364 * Make at least one loop of do-while because in case ring is
365 * completely full head and tail are pointing to the same element
366 * and while-do will not make any cycles.
368 spin_lock_irqsave(&ch->lock, flags);
369 ctl = ch->tail_blk_ctl;
371 if (ctl->desc->ctrl & WCN36XX_DXE_CTRL_VALID_MASK)
374 dma_unmap_single(wcn->dev, ctl->desc->src_addr_l,
375 ctl->skb->len, DMA_TO_DEVICE);
376 info = IEEE80211_SKB_CB(ctl->skb);
377 if (!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)) {
378 /* Keep frame until TX status comes */
379 ieee80211_free_txskb(wcn->hw, ctl->skb);
381 spin_lock(&ctl->skb_lock);
382 if (wcn->queues_stopped) {
383 wcn->queues_stopped = false;
384 ieee80211_wake_queues(wcn->hw);
386 spin_unlock(&ctl->skb_lock);
391 } while (ctl != ch->head_blk_ctl &&
392 !(ctl->desc->ctrl & WCN36XX_DXE_CTRL_VALID_MASK));
394 ch->tail_blk_ctl = ctl;
395 spin_unlock_irqrestore(&ch->lock, flags);
398 static irqreturn_t wcn36xx_irq_tx_complete(int irq, void *dev)
400 struct wcn36xx *wcn = (struct wcn36xx *)dev;
401 int int_src, int_reason;
403 wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src);
405 if (int_src & WCN36XX_INT_MASK_CHAN_TX_H) {
406 wcn36xx_dxe_read_register(wcn,
407 WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_H,
410 /* TODO: Check int_reason */
412 wcn36xx_dxe_write_register(wcn,
413 WCN36XX_DXE_0_INT_CLR,
414 WCN36XX_INT_MASK_CHAN_TX_H);
416 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_ED_CLR,
417 WCN36XX_INT_MASK_CHAN_TX_H);
418 wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready high\n");
419 reap_tx_dxes(wcn, &wcn->dxe_tx_h_ch);
422 if (int_src & WCN36XX_INT_MASK_CHAN_TX_L) {
423 wcn36xx_dxe_read_register(wcn,
424 WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_L,
426 /* TODO: Check int_reason */
428 wcn36xx_dxe_write_register(wcn,
429 WCN36XX_DXE_0_INT_CLR,
430 WCN36XX_INT_MASK_CHAN_TX_L);
432 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_ED_CLR,
433 WCN36XX_INT_MASK_CHAN_TX_L);
434 wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready low\n");
435 reap_tx_dxes(wcn, &wcn->dxe_tx_l_ch);
441 static irqreturn_t wcn36xx_irq_rx_ready(int irq, void *dev)
443 struct wcn36xx *wcn = (struct wcn36xx *)dev;
445 disable_irq_nosync(wcn->rx_irq);
446 wcn36xx_dxe_rx_frame(wcn);
447 enable_irq(wcn->rx_irq);
451 static int wcn36xx_dxe_request_irqs(struct wcn36xx *wcn)
455 ret = request_irq(wcn->tx_irq, wcn36xx_irq_tx_complete,
456 IRQF_TRIGGER_HIGH, "wcn36xx_tx", wcn);
458 wcn36xx_err("failed to alloc tx irq\n");
462 ret = request_irq(wcn->rx_irq, wcn36xx_irq_rx_ready, IRQF_TRIGGER_HIGH,
465 wcn36xx_err("failed to alloc rx irq\n");
469 enable_irq_wake(wcn->rx_irq);
474 free_irq(wcn->tx_irq, wcn);
480 static int wcn36xx_rx_handle_packets(struct wcn36xx *wcn,
481 struct wcn36xx_dxe_ch *ch)
483 struct wcn36xx_dxe_ctl *ctl = ch->head_blk_ctl;
484 struct wcn36xx_dxe_desc *dxe = ctl->desc;
487 int ret = 0, int_mask;
490 if (ch->ch_type == WCN36XX_DXE_CH_RX_L) {
491 value = WCN36XX_DXE_CTRL_RX_L;
492 int_mask = WCN36XX_DXE_INT_CH1_MASK;
494 value = WCN36XX_DXE_CTRL_RX_H;
495 int_mask = WCN36XX_DXE_INT_CH3_MASK;
498 while (!(dxe->ctrl & WCN36XX_DXE_CTRL_VALID_MASK)) {
500 dma_addr = dxe->dst_addr_l;
501 ret = wcn36xx_dxe_fill_skb(wcn->dev, ctl);
503 /* new skb allocation ok. Use the new one and queue
504 * the old one to network system.
506 dma_unmap_single(wcn->dev, dma_addr, WCN36XX_PKT_SIZE,
508 wcn36xx_rx_skb(wcn, skb);
509 } /* else keep old skb not submitted and use it for rx DMA */
515 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_ENCH_ADDR, int_mask);
517 ch->head_blk_ctl = ctl;
521 void wcn36xx_dxe_rx_frame(struct wcn36xx *wcn)
525 wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src);
528 if (int_src & WCN36XX_DXE_INT_CH1_MASK) {
529 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_CLR,
530 WCN36XX_DXE_INT_CH1_MASK);
531 wcn36xx_rx_handle_packets(wcn, &(wcn->dxe_rx_l_ch));
535 if (int_src & WCN36XX_DXE_INT_CH3_MASK) {
536 /* Clean up all the INT within this channel */
537 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_CLR,
538 WCN36XX_DXE_INT_CH3_MASK);
539 wcn36xx_rx_handle_packets(wcn, &(wcn->dxe_rx_h_ch));
543 wcn36xx_warn("No DXE interrupt pending\n");
546 int wcn36xx_dxe_allocate_mem_pools(struct wcn36xx *wcn)
551 /* Allocate BD headers for MGMT frames */
553 /* Where this come from ask QC */
554 wcn->mgmt_mem_pool.chunk_size = WCN36XX_BD_CHUNK_SIZE +
555 16 - (WCN36XX_BD_CHUNK_SIZE % 8);
557 s = wcn->mgmt_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_H;
558 cpu_addr = dma_alloc_coherent(wcn->dev, s, &wcn->mgmt_mem_pool.phy_addr,
563 wcn->mgmt_mem_pool.virt_addr = cpu_addr;
564 memset(cpu_addr, 0, s);
566 /* Allocate BD headers for DATA frames */
568 /* Where this come from ask QC */
569 wcn->data_mem_pool.chunk_size = WCN36XX_BD_CHUNK_SIZE +
570 16 - (WCN36XX_BD_CHUNK_SIZE % 8);
572 s = wcn->data_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_L;
573 cpu_addr = dma_alloc_coherent(wcn->dev, s, &wcn->data_mem_pool.phy_addr,
578 wcn->data_mem_pool.virt_addr = cpu_addr;
579 memset(cpu_addr, 0, s);
584 wcn36xx_dxe_free_mem_pools(wcn);
585 wcn36xx_err("Failed to allocate BD mempool\n");
589 void wcn36xx_dxe_free_mem_pools(struct wcn36xx *wcn)
591 if (wcn->mgmt_mem_pool.virt_addr)
592 dma_free_coherent(wcn->dev, wcn->mgmt_mem_pool.chunk_size *
593 WCN36XX_DXE_CH_DESC_NUMB_TX_H,
594 wcn->mgmt_mem_pool.virt_addr,
595 wcn->mgmt_mem_pool.phy_addr);
597 if (wcn->data_mem_pool.virt_addr) {
598 dma_free_coherent(wcn->dev, wcn->data_mem_pool.chunk_size *
599 WCN36XX_DXE_CH_DESC_NUMB_TX_L,
600 wcn->data_mem_pool.virt_addr,
601 wcn->data_mem_pool.phy_addr);
605 int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn,
606 struct wcn36xx_vif *vif_priv,
610 struct wcn36xx_dxe_ctl *ctl = NULL;
611 struct wcn36xx_dxe_desc *desc = NULL;
612 struct wcn36xx_dxe_ch *ch = NULL;
616 ch = is_low ? &wcn->dxe_tx_l_ch : &wcn->dxe_tx_h_ch;
618 spin_lock_irqsave(&ch->lock, flags);
619 ctl = ch->head_blk_ctl;
621 spin_lock(&ctl->next->skb_lock);
624 * If skb is not null that means that we reached the tail of the ring
625 * hence ring is full. Stop queues to let mac80211 back off until ring
626 * has an empty slot again.
628 if (NULL != ctl->next->skb) {
629 ieee80211_stop_queues(wcn->hw);
630 wcn->queues_stopped = true;
631 spin_unlock(&ctl->next->skb_lock);
632 spin_unlock_irqrestore(&ch->lock, flags);
635 spin_unlock(&ctl->next->skb_lock);
640 /* Set source address of the BD we send */
641 desc->src_addr_l = ctl->bd_phy_addr;
643 desc->dst_addr_l = ch->dxe_wq;
644 desc->fr_len = sizeof(struct wcn36xx_tx_bd);
645 desc->ctrl = ch->ctrl_bd;
647 wcn36xx_dbg(WCN36XX_DBG_DXE, "DXE TX\n");
649 wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC1 >>> ",
650 (char *)desc, sizeof(*desc));
651 wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP,
652 "BD >>> ", (char *)ctl->bd_cpu_addr,
653 sizeof(struct wcn36xx_tx_bd));
655 /* Set source address of the SKB we send */
659 if (ctl->bd_cpu_addr) {
660 wcn36xx_err("bd_cpu_addr cannot be NULL for skb DXE\n");
665 desc->src_addr_l = dma_map_single(wcn->dev,
670 desc->dst_addr_l = ch->dxe_wq;
671 desc->fr_len = ctl->skb->len;
673 /* set dxe descriptor to VALID */
674 desc->ctrl = ch->ctrl_skb;
676 wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC2 >>> ",
677 (char *)desc, sizeof(*desc));
678 wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "SKB >>> ",
679 (char *)ctl->skb->data, ctl->skb->len);
681 /* Move the head of the ring to the next empty descriptor */
682 ch->head_blk_ctl = ctl->next;
685 * When connected and trying to send data frame chip can be in sleep
686 * mode and writing to the register will not wake up the chip. Instead
687 * notify chip about new frame through SMSM bus.
689 if (is_low && vif_priv->pw_state == WCN36XX_BMPS) {
690 qcom_smem_state_update_bits(wcn->tx_rings_empty_state,
691 WCN36XX_SMSM_WLAN_TX_ENABLE,
692 WCN36XX_SMSM_WLAN_TX_ENABLE);
694 /* indicate End Of Packet and generate interrupt on descriptor
697 wcn36xx_dxe_write_register(wcn,
698 ch->reg_ctrl, ch->def_ctrl);
703 spin_unlock_irqrestore(&ch->lock, flags);
707 int wcn36xx_dxe_init(struct wcn36xx *wcn)
709 int reg_data = 0, ret;
711 reg_data = WCN36XX_DXE_REG_RESET;
712 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_REG_CSR_RESET, reg_data);
714 /* Select channels for rx avail and xfer done interrupts... */
715 reg_data = (WCN36XX_DXE_INT_CH3_MASK | WCN36XX_DXE_INT_CH1_MASK) << 16 |
716 WCN36XX_DXE_INT_CH0_MASK | WCN36XX_DXE_INT_CH4_MASK;
718 wcn36xx_ccu_write_register(wcn, WCN36XX_CCU_DXE_INT_SELECT_PRONTO, reg_data);
720 wcn36xx_ccu_write_register(wcn, WCN36XX_CCU_DXE_INT_SELECT_RIVA, reg_data);
722 /***************************************/
723 /* Init descriptors for TX LOW channel */
724 /***************************************/
725 wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_tx_l_ch);
726 wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_l_ch, &wcn->data_mem_pool);
728 /* Write channel head to a NEXT register */
729 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_L,
730 wcn->dxe_tx_l_ch.head_blk_ctl->desc_phy_addr);
732 /* Program DMA destination addr for TX LOW */
733 wcn36xx_dxe_write_register(wcn,
734 WCN36XX_DXE_CH_DEST_ADDR_TX_L,
735 WCN36XX_DXE_WQ_TX_L);
737 wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_EN, ®_data);
738 wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_L);
740 /***************************************/
741 /* Init descriptors for TX HIGH channel */
742 /***************************************/
743 wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_tx_h_ch);
744 wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_h_ch, &wcn->mgmt_mem_pool);
746 /* Write channel head to a NEXT register */
747 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_H,
748 wcn->dxe_tx_h_ch.head_blk_ctl->desc_phy_addr);
750 /* Program DMA destination addr for TX HIGH */
751 wcn36xx_dxe_write_register(wcn,
752 WCN36XX_DXE_CH_DEST_ADDR_TX_H,
753 WCN36XX_DXE_WQ_TX_H);
755 wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_EN, ®_data);
757 /* Enable channel interrupts */
758 wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_H);
760 /***************************************/
761 /* Init descriptors for RX LOW channel */
762 /***************************************/
763 wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_rx_l_ch);
765 /* For RX we need to preallocated buffers */
766 wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_l_ch);
768 /* Write channel head to a NEXT register */
769 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_L,
770 wcn->dxe_rx_l_ch.head_blk_ctl->desc_phy_addr);
772 /* Write DMA source address */
773 wcn36xx_dxe_write_register(wcn,
774 WCN36XX_DXE_CH_SRC_ADDR_RX_L,
775 WCN36XX_DXE_WQ_RX_L);
777 /* Program preallocated destination address */
778 wcn36xx_dxe_write_register(wcn,
779 WCN36XX_DXE_CH_DEST_ADDR_RX_L,
780 wcn->dxe_rx_l_ch.head_blk_ctl->desc->phy_next_l);
782 /* Enable default control registers */
783 wcn36xx_dxe_write_register(wcn,
784 WCN36XX_DXE_REG_CTL_RX_L,
785 WCN36XX_DXE_CH_DEFAULT_CTL_RX_L);
787 /* Enable channel interrupts */
788 wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_L);
790 /***************************************/
791 /* Init descriptors for RX HIGH channel */
792 /***************************************/
793 wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_rx_h_ch);
795 /* For RX we need to prealocat buffers */
796 wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_h_ch);
798 /* Write chanel head to a NEXT register */
799 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_H,
800 wcn->dxe_rx_h_ch.head_blk_ctl->desc_phy_addr);
802 /* Write DMA source address */
803 wcn36xx_dxe_write_register(wcn,
804 WCN36XX_DXE_CH_SRC_ADDR_RX_H,
805 WCN36XX_DXE_WQ_RX_H);
807 /* Program preallocated destination address */
808 wcn36xx_dxe_write_register(wcn,
809 WCN36XX_DXE_CH_DEST_ADDR_RX_H,
810 wcn->dxe_rx_h_ch.head_blk_ctl->desc->phy_next_l);
812 /* Enable default control registers */
813 wcn36xx_dxe_write_register(wcn,
814 WCN36XX_DXE_REG_CTL_RX_H,
815 WCN36XX_DXE_CH_DEFAULT_CTL_RX_H);
817 /* Enable channel interrupts */
818 wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_H);
820 ret = wcn36xx_dxe_request_irqs(wcn);
830 void wcn36xx_dxe_deinit(struct wcn36xx *wcn)
832 free_irq(wcn->tx_irq, wcn);
833 free_irq(wcn->rx_irq, wcn);
835 if (wcn->tx_ack_skb) {
836 ieee80211_tx_status_irqsafe(wcn->hw, wcn->tx_ack_skb);
837 wcn->tx_ack_skb = NULL;
840 wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_l_ch);
841 wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_h_ch);