1 // SPDX-License-Identifier: BSD-3-Clause-Clear
3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
4 * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
7 #include <linux/module.h>
8 #include <linux/platform_device.h>
9 #include <linux/of_device.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/of_address.h>
13 #include <linux/iommu.h>
17 #include <linux/remoteproc.h>
20 static const struct of_device_id ath11k_ahb_of_match[] = {
21 /* TODO: Should we change the compatible string to something similar
22 * to one that ath10k uses?
24 { .compatible = "qcom,ipq8074-wifi",
25 .data = (void *)ATH11K_HW_IPQ8074,
27 { .compatible = "qcom,ipq6018-wifi",
28 .data = (void *)ATH11K_HW_IPQ6018_HW10,
30 { .compatible = "qcom,wcn6750-wifi",
31 .data = (void *)ATH11K_HW_WCN6750_HW10,
36 MODULE_DEVICE_TABLE(of, ath11k_ahb_of_match);
38 #define ATH11K_IRQ_CE0_OFFSET 4
40 static const char *irq_name[ATH11K_IRQ_NUM_MAX] = {
58 "host2reo-re-injection",
60 "host2rxdma-monitor-ring3",
61 "host2rxdma-monitor-ring2",
62 "host2rxdma-monitor-ring1",
64 "wbm2host-rx-release",
66 "reo2host-destination-ring4",
67 "reo2host-destination-ring3",
68 "reo2host-destination-ring2",
69 "reo2host-destination-ring1",
70 "rxdma2host-monitor-destination-mac3",
71 "rxdma2host-monitor-destination-mac2",
72 "rxdma2host-monitor-destination-mac1",
73 "ppdu-end-interrupts-mac3",
74 "ppdu-end-interrupts-mac2",
75 "ppdu-end-interrupts-mac1",
76 "rxdma2host-monitor-status-ring-mac3",
77 "rxdma2host-monitor-status-ring-mac2",
78 "rxdma2host-monitor-status-ring-mac1",
79 "host2rxdma-host-buf-ring-mac3",
80 "host2rxdma-host-buf-ring-mac2",
81 "host2rxdma-host-buf-ring-mac1",
82 "rxdma2host-destination-ring-mac3",
83 "rxdma2host-destination-ring-mac2",
84 "rxdma2host-destination-ring-mac1",
85 "host2tcl-input-ring4",
86 "host2tcl-input-ring3",
87 "host2tcl-input-ring2",
88 "host2tcl-input-ring1",
89 "wbm2host-tx-completions-ring3",
90 "wbm2host-tx-completions-ring2",
91 "wbm2host-tx-completions-ring1",
92 "tcl2host-status-ring",
95 /* enum ext_irq_num - irq numbers that can be used by external modules
99 host2wbm_desc_feed = 16,
100 host2reo_re_injection,
102 host2rxdma_monitor_ring3,
103 host2rxdma_monitor_ring2,
104 host2rxdma_monitor_ring1,
108 reo2host_destination_ring4,
109 reo2host_destination_ring3,
110 reo2host_destination_ring2,
111 reo2host_destination_ring1,
112 rxdma2host_monitor_destination_mac3,
113 rxdma2host_monitor_destination_mac2,
114 rxdma2host_monitor_destination_mac1,
115 ppdu_end_interrupts_mac3,
116 ppdu_end_interrupts_mac2,
117 ppdu_end_interrupts_mac1,
118 rxdma2host_monitor_status_ring_mac3,
119 rxdma2host_monitor_status_ring_mac2,
120 rxdma2host_monitor_status_ring_mac1,
121 host2rxdma_host_buf_ring_mac3,
122 host2rxdma_host_buf_ring_mac2,
123 host2rxdma_host_buf_ring_mac1,
124 rxdma2host_destination_ring_mac3,
125 rxdma2host_destination_ring_mac2,
126 rxdma2host_destination_ring_mac1,
127 host2tcl_input_ring4,
128 host2tcl_input_ring3,
129 host2tcl_input_ring2,
130 host2tcl_input_ring1,
131 wbm2host_tx_completions_ring3,
132 wbm2host_tx_completions_ring2,
133 wbm2host_tx_completions_ring1,
134 tcl2host_status_ring,
138 ath11k_ahb_get_msi_irq_wcn6750(struct ath11k_base *ab, unsigned int vector)
140 return ab->pci.msi.irqs[vector];
143 static const struct ath11k_pci_ops ath11k_ahb_pci_ops_wcn6750 = {
144 .get_msi_irq = ath11k_ahb_get_msi_irq_wcn6750,
147 static inline u32 ath11k_ahb_read32(struct ath11k_base *ab, u32 offset)
149 return ioread32(ab->mem + offset);
152 static inline void ath11k_ahb_write32(struct ath11k_base *ab, u32 offset, u32 value)
154 iowrite32(value, ab->mem + offset);
157 static void ath11k_ahb_kill_tasklets(struct ath11k_base *ab)
161 for (i = 0; i < ab->hw_params.ce_count; i++) {
162 struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
164 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
167 tasklet_kill(&ce_pipe->intr_tq);
171 static void ath11k_ahb_ext_grp_disable(struct ath11k_ext_irq_grp *irq_grp)
175 for (i = 0; i < irq_grp->num_irq; i++)
176 disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
179 static void __ath11k_ahb_ext_irq_disable(struct ath11k_base *ab)
183 for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
184 struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
186 ath11k_ahb_ext_grp_disable(irq_grp);
188 if (irq_grp->napi_enabled) {
189 napi_synchronize(&irq_grp->napi);
190 napi_disable(&irq_grp->napi);
191 irq_grp->napi_enabled = false;
196 static void ath11k_ahb_ext_grp_enable(struct ath11k_ext_irq_grp *irq_grp)
200 for (i = 0; i < irq_grp->num_irq; i++)
201 enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
204 static void ath11k_ahb_setbit32(struct ath11k_base *ab, u8 bit, u32 offset)
208 val = ath11k_ahb_read32(ab, offset);
209 ath11k_ahb_write32(ab, offset, val | BIT(bit));
212 static void ath11k_ahb_clearbit32(struct ath11k_base *ab, u8 bit, u32 offset)
216 val = ath11k_ahb_read32(ab, offset);
217 ath11k_ahb_write32(ab, offset, val & ~BIT(bit));
220 static void ath11k_ahb_ce_irq_enable(struct ath11k_base *ab, u16 ce_id)
222 const struct ce_attr *ce_attr;
224 ce_attr = &ab->hw_params.host_ce_config[ce_id];
225 if (ce_attr->src_nentries)
226 ath11k_ahb_setbit32(ab, ce_id, CE_HOST_IE_ADDRESS);
228 if (ce_attr->dest_nentries) {
229 ath11k_ahb_setbit32(ab, ce_id, CE_HOST_IE_2_ADDRESS);
230 ath11k_ahb_setbit32(ab, ce_id + CE_HOST_IE_3_SHIFT,
231 CE_HOST_IE_3_ADDRESS);
235 static void ath11k_ahb_ce_irq_disable(struct ath11k_base *ab, u16 ce_id)
237 const struct ce_attr *ce_attr;
239 ce_attr = &ab->hw_params.host_ce_config[ce_id];
240 if (ce_attr->src_nentries)
241 ath11k_ahb_clearbit32(ab, ce_id, CE_HOST_IE_ADDRESS);
243 if (ce_attr->dest_nentries) {
244 ath11k_ahb_clearbit32(ab, ce_id, CE_HOST_IE_2_ADDRESS);
245 ath11k_ahb_clearbit32(ab, ce_id + CE_HOST_IE_3_SHIFT,
246 CE_HOST_IE_3_ADDRESS);
250 static void ath11k_ahb_sync_ce_irqs(struct ath11k_base *ab)
255 for (i = 0; i < ab->hw_params.ce_count; i++) {
256 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
259 irq_idx = ATH11K_IRQ_CE0_OFFSET + i;
260 synchronize_irq(ab->irq_num[irq_idx]);
264 static void ath11k_ahb_sync_ext_irqs(struct ath11k_base *ab)
269 for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
270 struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
272 for (j = 0; j < irq_grp->num_irq; j++) {
273 irq_idx = irq_grp->irqs[j];
274 synchronize_irq(ab->irq_num[irq_idx]);
279 static void ath11k_ahb_ce_irqs_enable(struct ath11k_base *ab)
283 for (i = 0; i < ab->hw_params.ce_count; i++) {
284 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
286 ath11k_ahb_ce_irq_enable(ab, i);
290 static void ath11k_ahb_ce_irqs_disable(struct ath11k_base *ab)
294 for (i = 0; i < ab->hw_params.ce_count; i++) {
295 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
297 ath11k_ahb_ce_irq_disable(ab, i);
301 static int ath11k_ahb_start(struct ath11k_base *ab)
303 ath11k_ahb_ce_irqs_enable(ab);
304 ath11k_ce_rx_post_buf(ab);
309 static void ath11k_ahb_ext_irq_enable(struct ath11k_base *ab)
313 for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
314 struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
316 if (!irq_grp->napi_enabled) {
317 napi_enable(&irq_grp->napi);
318 irq_grp->napi_enabled = true;
320 ath11k_ahb_ext_grp_enable(irq_grp);
324 static void ath11k_ahb_ext_irq_disable(struct ath11k_base *ab)
326 __ath11k_ahb_ext_irq_disable(ab);
327 ath11k_ahb_sync_ext_irqs(ab);
330 static void ath11k_ahb_stop(struct ath11k_base *ab)
332 if (!test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags))
333 ath11k_ahb_ce_irqs_disable(ab);
334 ath11k_ahb_sync_ce_irqs(ab);
335 ath11k_ahb_kill_tasklets(ab);
336 del_timer_sync(&ab->rx_replenish_retry);
337 ath11k_ce_cleanup_pipes(ab);
340 static int ath11k_ahb_power_up(struct ath11k_base *ab)
342 struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
345 ret = rproc_boot(ab_ahb->tgt_rproc);
347 ath11k_err(ab, "failed to boot the remote processor Q6\n");
352 static void ath11k_ahb_power_down(struct ath11k_base *ab)
354 struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
356 rproc_shutdown(ab_ahb->tgt_rproc);
359 static int ath11k_ahb_fwreset_from_cold_boot(struct ath11k_base *ab)
363 if (ath11k_cold_boot_cal == 0 || ab->qmi.cal_done ||
364 ab->hw_params.cold_boot_calib == 0)
367 ath11k_dbg(ab, ATH11K_DBG_AHB, "wait for cold boot done\n");
368 timeout = wait_event_timeout(ab->qmi.cold_boot_waitq,
369 (ab->qmi.cal_done == 1),
370 ATH11K_COLD_BOOT_FW_RESET_DELAY);
372 ath11k_cold_boot_cal = 0;
373 ath11k_warn(ab, "Coldboot Calibration failed timed out\n");
376 /* reset the firmware */
377 ath11k_ahb_power_down(ab);
378 ath11k_ahb_power_up(ab);
380 ath11k_dbg(ab, ATH11K_DBG_AHB, "exited from cold boot mode\n");
384 static void ath11k_ahb_init_qmi_ce_config(struct ath11k_base *ab)
386 struct ath11k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg;
388 cfg->tgt_ce_len = ab->hw_params.target_ce_count;
389 cfg->tgt_ce = ab->hw_params.target_ce_config;
390 cfg->svc_to_ce_map_len = ab->hw_params.svc_to_ce_map_len;
391 cfg->svc_to_ce_map = ab->hw_params.svc_to_ce_map;
392 ab->qmi.service_ins_id = ab->hw_params.qmi_service_ins_id;
395 static void ath11k_ahb_free_ext_irq(struct ath11k_base *ab)
399 for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
400 struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
402 for (j = 0; j < irq_grp->num_irq; j++)
403 free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp);
405 netif_napi_del(&irq_grp->napi);
409 static void ath11k_ahb_free_irq(struct ath11k_base *ab)
414 if (ab->hw_params.hybrid_bus_type)
415 return ath11k_pcic_free_irq(ab);
417 for (i = 0; i < ab->hw_params.ce_count; i++) {
418 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
420 irq_idx = ATH11K_IRQ_CE0_OFFSET + i;
421 free_irq(ab->irq_num[irq_idx], &ab->ce.ce_pipe[i]);
424 ath11k_ahb_free_ext_irq(ab);
427 static void ath11k_ahb_ce_tasklet(struct tasklet_struct *t)
429 struct ath11k_ce_pipe *ce_pipe = from_tasklet(ce_pipe, t, intr_tq);
431 ath11k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num);
433 ath11k_ahb_ce_irq_enable(ce_pipe->ab, ce_pipe->pipe_num);
436 static irqreturn_t ath11k_ahb_ce_interrupt_handler(int irq, void *arg)
438 struct ath11k_ce_pipe *ce_pipe = arg;
440 /* last interrupt received for this CE */
441 ce_pipe->timestamp = jiffies;
443 ath11k_ahb_ce_irq_disable(ce_pipe->ab, ce_pipe->pipe_num);
445 tasklet_schedule(&ce_pipe->intr_tq);
450 static int ath11k_ahb_ext_grp_napi_poll(struct napi_struct *napi, int budget)
452 struct ath11k_ext_irq_grp *irq_grp = container_of(napi,
453 struct ath11k_ext_irq_grp,
455 struct ath11k_base *ab = irq_grp->ab;
458 work_done = ath11k_dp_service_srng(ab, irq_grp, budget);
459 if (work_done < budget) {
460 napi_complete_done(napi, work_done);
461 ath11k_ahb_ext_grp_enable(irq_grp);
464 if (work_done > budget)
470 static irqreturn_t ath11k_ahb_ext_interrupt_handler(int irq, void *arg)
472 struct ath11k_ext_irq_grp *irq_grp = arg;
474 /* last interrupt received for this group */
475 irq_grp->timestamp = jiffies;
477 ath11k_ahb_ext_grp_disable(irq_grp);
479 napi_schedule(&irq_grp->napi);
484 static int ath11k_ahb_config_ext_irq(struct ath11k_base *ab)
486 struct ath11k_hw_params *hw = &ab->hw_params;
491 for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
492 struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
497 init_dummy_netdev(&irq_grp->napi_ndev);
498 netif_napi_add(&irq_grp->napi_ndev, &irq_grp->napi,
499 ath11k_ahb_ext_grp_napi_poll, NAPI_POLL_WEIGHT);
501 for (j = 0; j < ATH11K_EXT_IRQ_NUM_MAX; j++) {
502 if (ab->hw_params.ring_mask->tx[i] & BIT(j)) {
503 irq_grp->irqs[num_irq++] =
504 wbm2host_tx_completions_ring1 - j;
507 if (ab->hw_params.ring_mask->rx[i] & BIT(j)) {
508 irq_grp->irqs[num_irq++] =
509 reo2host_destination_ring1 - j;
512 if (ab->hw_params.ring_mask->rx_err[i] & BIT(j))
513 irq_grp->irqs[num_irq++] = reo2host_exception;
515 if (ab->hw_params.ring_mask->rx_wbm_rel[i] & BIT(j))
516 irq_grp->irqs[num_irq++] = wbm2host_rx_release;
518 if (ab->hw_params.ring_mask->reo_status[i] & BIT(j))
519 irq_grp->irqs[num_irq++] = reo2host_status;
521 if (j < ab->hw_params.max_radios) {
522 if (ab->hw_params.ring_mask->rxdma2host[i] & BIT(j)) {
523 irq_grp->irqs[num_irq++] =
524 rxdma2host_destination_ring_mac1 -
525 ath11k_hw_get_mac_from_pdev_id(hw, j);
528 if (ab->hw_params.ring_mask->host2rxdma[i] & BIT(j)) {
529 irq_grp->irqs[num_irq++] =
530 host2rxdma_host_buf_ring_mac1 -
531 ath11k_hw_get_mac_from_pdev_id(hw, j);
534 if (ab->hw_params.ring_mask->rx_mon_status[i] & BIT(j)) {
535 irq_grp->irqs[num_irq++] =
536 ppdu_end_interrupts_mac1 -
537 ath11k_hw_get_mac_from_pdev_id(hw, j);
538 irq_grp->irqs[num_irq++] =
539 rxdma2host_monitor_status_ring_mac1 -
540 ath11k_hw_get_mac_from_pdev_id(hw, j);
544 irq_grp->num_irq = num_irq;
546 for (j = 0; j < irq_grp->num_irq; j++) {
547 int irq_idx = irq_grp->irqs[j];
549 irq = platform_get_irq_byname(ab->pdev,
551 ab->irq_num[irq_idx] = irq;
552 irq_set_status_flags(irq, IRQ_NOAUTOEN | IRQ_DISABLE_UNLAZY);
553 ret = request_irq(irq, ath11k_ahb_ext_interrupt_handler,
555 irq_name[irq_idx], irq_grp);
557 ath11k_err(ab, "failed request_irq for %d\n",
566 static int ath11k_ahb_config_irq(struct ath11k_base *ab)
571 if (ab->hw_params.hybrid_bus_type)
572 return ath11k_pcic_config_irq(ab);
574 /* Configure CE irqs */
575 for (i = 0; i < ab->hw_params.ce_count; i++) {
576 struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
578 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
581 irq_idx = ATH11K_IRQ_CE0_OFFSET + i;
583 tasklet_setup(&ce_pipe->intr_tq, ath11k_ahb_ce_tasklet);
584 irq = platform_get_irq_byname(ab->pdev, irq_name[irq_idx]);
585 ret = request_irq(irq, ath11k_ahb_ce_interrupt_handler,
586 IRQF_TRIGGER_RISING, irq_name[irq_idx],
591 ab->irq_num[irq_idx] = irq;
594 /* Configure external interrupts */
595 ret = ath11k_ahb_config_ext_irq(ab);
600 static int ath11k_ahb_map_service_to_pipe(struct ath11k_base *ab, u16 service_id,
601 u8 *ul_pipe, u8 *dl_pipe)
603 const struct service_to_pipe *entry;
604 bool ul_set = false, dl_set = false;
607 for (i = 0; i < ab->hw_params.svc_to_ce_map_len; i++) {
608 entry = &ab->hw_params.svc_to_ce_map[i];
610 if (__le32_to_cpu(entry->service_id) != service_id)
613 switch (__le32_to_cpu(entry->pipedir)) {
618 *dl_pipe = __le32_to_cpu(entry->pipenum);
623 *ul_pipe = __le32_to_cpu(entry->pipenum);
629 *dl_pipe = __le32_to_cpu(entry->pipenum);
630 *ul_pipe = __le32_to_cpu(entry->pipenum);
637 if (WARN_ON(!ul_set || !dl_set))
643 static const struct ath11k_hif_ops ath11k_ahb_hif_ops_ipq8074 = {
644 .start = ath11k_ahb_start,
645 .stop = ath11k_ahb_stop,
646 .read32 = ath11k_ahb_read32,
647 .write32 = ath11k_ahb_write32,
648 .irq_enable = ath11k_ahb_ext_irq_enable,
649 .irq_disable = ath11k_ahb_ext_irq_disable,
650 .map_service_to_pipe = ath11k_ahb_map_service_to_pipe,
651 .power_down = ath11k_ahb_power_down,
652 .power_up = ath11k_ahb_power_up,
655 static const struct ath11k_hif_ops ath11k_ahb_hif_ops_wcn6750 = {
656 .start = ath11k_pcic_start,
657 .stop = ath11k_pcic_stop,
658 .read32 = ath11k_pcic_read32,
659 .write32 = ath11k_pcic_write32,
660 .irq_enable = ath11k_pcic_ext_irq_enable,
661 .irq_disable = ath11k_pcic_ext_irq_disable,
662 .get_msi_address = ath11k_pcic_get_msi_address,
663 .get_user_msi_vector = ath11k_pcic_get_user_msi_assignment,
664 .map_service_to_pipe = ath11k_pcic_map_service_to_pipe,
665 .power_down = ath11k_ahb_power_down,
666 .power_up = ath11k_ahb_power_up,
669 static int ath11k_core_get_rproc(struct ath11k_base *ab)
671 struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
672 struct device *dev = ab->dev;
673 struct rproc *prproc;
674 phandle rproc_phandle;
676 if (of_property_read_u32(dev->of_node, "qcom,rproc", &rproc_phandle)) {
677 ath11k_err(ab, "failed to get q6_rproc handle\n");
681 prproc = rproc_get_by_phandle(rproc_phandle);
683 ath11k_err(ab, "failed to get rproc\n");
686 ab_ahb->tgt_rproc = prproc;
691 static int ath11k_ahb_setup_msi_resources(struct ath11k_base *ab)
693 struct platform_device *pdev = ab->pdev;
694 phys_addr_t msi_addr_pa;
695 dma_addr_t msi_addr_iova;
696 struct resource *res;
701 ret = ath11k_pcic_init_msi_config(ab);
703 ath11k_err(ab, "failed to init msi config: %d\n", ret);
707 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
709 ath11k_err(ab, "failed to fetch msi_addr\n");
713 msi_addr_pa = res->start;
714 msi_addr_iova = dma_map_resource(ab->dev, msi_addr_pa, PAGE_SIZE,
716 if (dma_mapping_error(ab->dev, msi_addr_iova))
719 ab->pci.msi.addr_lo = lower_32_bits(msi_addr_iova);
720 ab->pci.msi.addr_hi = upper_32_bits(msi_addr_iova);
722 ret = of_property_read_u32_index(ab->dev->of_node, "interrupts", 1, &int_prop);
726 ab->pci.msi.ep_base_data = int_prop + 32;
728 for (i = 0; i < ab->pci.msi.config->total_vectors; i++) {
729 res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
733 ab->pci.msi.irqs[i] = res->start;
736 set_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags);
741 static int ath11k_ahb_setup_resources(struct ath11k_base *ab)
743 struct platform_device *pdev = ab->pdev;
744 struct resource *mem_res;
747 if (ab->hw_params.hybrid_bus_type)
748 return ath11k_ahb_setup_msi_resources(ab);
750 mem = devm_platform_get_and_ioremap_resource(pdev, 0, &mem_res);
752 dev_err(&pdev->dev, "ioremap error\n");
757 ab->mem_len = resource_size(mem_res);
762 static int ath11k_ahb_setup_msa_resources(struct ath11k_base *ab)
764 struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
765 struct device *dev = ab->dev;
766 struct device_node *node;
770 node = of_parse_phandle(dev->of_node, "memory-region", 0);
774 ret = of_address_to_resource(node, 0, &r);
777 dev_err(dev, "failed to resolve msa fixed region\n");
781 ab_ahb->fw.msa_paddr = r.start;
782 ab_ahb->fw.msa_size = resource_size(&r);
784 node = of_parse_phandle(dev->of_node, "memory-region", 1);
788 ret = of_address_to_resource(node, 0, &r);
791 dev_err(dev, "failed to resolve ce fixed region\n");
795 ab_ahb->fw.ce_paddr = r.start;
796 ab_ahb->fw.ce_size = resource_size(&r);
801 static int ath11k_ahb_fw_resources_init(struct ath11k_base *ab)
803 struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
804 struct device *host_dev = ab->dev;
805 struct platform_device_info info = {0};
806 struct iommu_domain *iommu_dom;
807 struct platform_device *pdev;
808 struct device_node *node;
811 /* Chipsets not requiring MSA need not initialize
812 * MSA resources, return success in such cases.
814 if (!ab->hw_params.fixed_fw_mem)
817 ret = ath11k_ahb_setup_msa_resources(ab);
819 ath11k_err(ab, "failed to setup msa resources\n");
823 node = of_get_child_by_name(host_dev->of_node, "wifi-firmware");
825 ab_ahb->fw.use_tz = true;
829 info.fwnode = &node->fwnode;
830 info.parent = host_dev;
831 info.name = node->name;
832 info.dma_mask = DMA_BIT_MASK(32);
834 pdev = platform_device_register_full(&info);
837 return PTR_ERR(pdev);
840 ret = of_dma_configure(&pdev->dev, node, true);
842 ath11k_err(ab, "dma configure fail: %d\n", ret);
846 ab_ahb->fw.dev = &pdev->dev;
848 iommu_dom = iommu_domain_alloc(&platform_bus_type);
850 ath11k_err(ab, "failed to allocate iommu domain\n");
855 ret = iommu_attach_device(iommu_dom, ab_ahb->fw.dev);
857 ath11k_err(ab, "could not attach device: %d\n", ret);
861 ret = iommu_map(iommu_dom, ab_ahb->fw.msa_paddr,
862 ab_ahb->fw.msa_paddr, ab_ahb->fw.msa_size,
863 IOMMU_READ | IOMMU_WRITE);
865 ath11k_err(ab, "failed to map firmware region: %d\n", ret);
866 goto err_iommu_detach;
869 ret = iommu_map(iommu_dom, ab_ahb->fw.ce_paddr,
870 ab_ahb->fw.ce_paddr, ab_ahb->fw.ce_size,
871 IOMMU_READ | IOMMU_WRITE);
873 ath11k_err(ab, "failed to map firmware CE region: %d\n", ret);
874 goto err_iommu_unmap;
877 ab_ahb->fw.use_tz = false;
878 ab_ahb->fw.iommu_domain = iommu_dom;
884 iommu_unmap(iommu_dom, ab_ahb->fw.msa_paddr, ab_ahb->fw.msa_size);
887 iommu_detach_device(iommu_dom, ab_ahb->fw.dev);
890 iommu_domain_free(iommu_dom);
893 platform_device_unregister(pdev);
899 static int ath11k_ahb_fw_resource_deinit(struct ath11k_base *ab)
901 struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
902 struct iommu_domain *iommu;
903 size_t unmapped_size;
905 if (ab_ahb->fw.use_tz)
908 iommu = ab_ahb->fw.iommu_domain;
910 unmapped_size = iommu_unmap(iommu, ab_ahb->fw.msa_paddr, ab_ahb->fw.msa_size);
911 if (unmapped_size != ab_ahb->fw.msa_size)
912 ath11k_err(ab, "failed to unmap firmware: %zu\n",
915 unmapped_size = iommu_unmap(iommu, ab_ahb->fw.ce_paddr, ab_ahb->fw.ce_size);
916 if (unmapped_size != ab_ahb->fw.ce_size)
917 ath11k_err(ab, "failed to unmap firmware CE memory: %zu\n",
920 iommu_detach_device(iommu, ab_ahb->fw.dev);
921 iommu_domain_free(iommu);
923 platform_device_unregister(to_platform_device(ab_ahb->fw.dev));
928 static int ath11k_ahb_probe(struct platform_device *pdev)
930 struct ath11k_base *ab;
931 const struct of_device_id *of_id;
932 const struct ath11k_hif_ops *hif_ops;
933 const struct ath11k_pci_ops *pci_ops;
934 enum ath11k_hw_rev hw_rev;
937 of_id = of_match_device(ath11k_ahb_of_match, &pdev->dev);
939 dev_err(&pdev->dev, "failed to find matching device tree id\n");
943 hw_rev = (enum ath11k_hw_rev)of_id->data;
946 case ATH11K_HW_IPQ8074:
947 case ATH11K_HW_IPQ6018_HW10:
948 hif_ops = &ath11k_ahb_hif_ops_ipq8074;
951 case ATH11K_HW_WCN6750_HW10:
952 hif_ops = &ath11k_ahb_hif_ops_wcn6750;
953 pci_ops = &ath11k_ahb_pci_ops_wcn6750;
956 dev_err(&pdev->dev, "unsupported device type %d\n", hw_rev);
960 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
962 dev_err(&pdev->dev, "failed to set 32-bit consistent dma\n");
966 ab = ath11k_core_alloc(&pdev->dev, sizeof(struct ath11k_ahb),
969 dev_err(&pdev->dev, "failed to allocate ath11k base\n");
973 ab->hif.ops = hif_ops;
974 ab->pci.ops = pci_ops;
977 platform_set_drvdata(pdev, ab);
979 ret = ath11k_ahb_setup_resources(ab);
983 ret = ath11k_core_pre_init(ab);
987 ret = ath11k_ahb_fw_resources_init(ab);
991 ret = ath11k_hal_srng_init(ab);
995 ret = ath11k_ce_alloc_pipes(ab);
997 ath11k_err(ab, "failed to allocate ce pipes: %d\n", ret);
998 goto err_hal_srng_deinit;
1001 ath11k_ahb_init_qmi_ce_config(ab);
1003 ret = ath11k_core_get_rproc(ab);
1005 ath11k_err(ab, "failed to get rproc: %d\n", ret);
1009 ret = ath11k_core_init(ab);
1011 ath11k_err(ab, "failed to init core: %d\n", ret);
1015 ret = ath11k_ahb_config_irq(ab);
1017 ath11k_err(ab, "failed to configure irq: %d\n", ret);
1021 ath11k_ahb_fwreset_from_cold_boot(ab);
1026 ath11k_ce_free_pipes(ab);
1028 err_hal_srng_deinit:
1029 ath11k_hal_srng_deinit(ab);
1032 ath11k_ahb_fw_resource_deinit(ab);
1035 ath11k_core_free(ab);
1036 platform_set_drvdata(pdev, NULL);
1041 static int ath11k_ahb_remove(struct platform_device *pdev)
1043 struct ath11k_base *ab = platform_get_drvdata(pdev);
1046 if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) {
1047 ath11k_ahb_power_down(ab);
1048 ath11k_debugfs_soc_destroy(ab);
1049 ath11k_qmi_deinit_service(ab);
1053 reinit_completion(&ab->driver_recovery);
1055 if (test_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags)) {
1056 left = wait_for_completion_timeout(&ab->driver_recovery,
1057 ATH11K_AHB_RECOVERY_TIMEOUT);
1059 ath11k_warn(ab, "failed to receive recovery response completion\n");
1062 set_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags);
1063 cancel_work_sync(&ab->restart_work);
1065 ath11k_core_deinit(ab);
1067 ath11k_ahb_free_irq(ab);
1068 ath11k_hal_srng_deinit(ab);
1069 ath11k_ahb_fw_resource_deinit(ab);
1070 ath11k_ce_free_pipes(ab);
1071 ath11k_core_free(ab);
1072 platform_set_drvdata(pdev, NULL);
1077 static struct platform_driver ath11k_ahb_driver = {
1080 .of_match_table = ath11k_ahb_of_match,
1082 .probe = ath11k_ahb_probe,
1083 .remove = ath11k_ahb_remove,
1086 static int ath11k_ahb_init(void)
1088 return platform_driver_register(&ath11k_ahb_driver);
1090 module_init(ath11k_ahb_init);
1092 static void ath11k_ahb_exit(void)
1094 platform_driver_unregister(&ath11k_ahb_driver);
1096 module_exit(ath11k_ahb_exit);
1098 MODULE_DESCRIPTION("Driver support for Qualcomm Technologies 802.11ax WLAN AHB devices");
1099 MODULE_LICENSE("Dual BSD/GPL");