1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2015, The Linux Foundation. All rights reserved.
5 #include <linux/delay.h>
6 #include <linux/highmem.h>
8 #include <linux/iopoll.h>
9 #include <linux/module.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/slab.h>
12 #include <linux/scatterlist.h>
13 #include <linux/platform_device.h>
14 #include <linux/ktime.h>
16 #include <linux/mmc/mmc.h>
17 #include <linux/mmc/host.h>
18 #include <linux/mmc/card.h>
26 struct mmc_request *mrq;
28 #define CQHCI_EXTERNAL_TIMEOUT BIT(0)
29 #define CQHCI_COMPLETED BIT(1)
30 #define CQHCI_HOST_CRC BIT(2)
31 #define CQHCI_HOST_TIMEOUT BIT(3)
32 #define CQHCI_HOST_OTHER BIT(4)
35 static inline u8 *get_desc(struct cqhci_host *cq_host, u8 tag)
37 return cq_host->desc_base + (tag * cq_host->slot_sz);
40 static inline u8 *get_link_desc(struct cqhci_host *cq_host, u8 tag)
42 u8 *desc = get_desc(cq_host, tag);
44 return desc + cq_host->task_desc_len;
47 static inline dma_addr_t get_trans_desc_dma(struct cqhci_host *cq_host, u8 tag)
49 return cq_host->trans_desc_dma_base +
50 (cq_host->mmc->max_segs * tag *
51 cq_host->trans_desc_len);
54 static inline u8 *get_trans_desc(struct cqhci_host *cq_host, u8 tag)
56 return cq_host->trans_desc_base +
57 (cq_host->trans_desc_len * cq_host->mmc->max_segs * tag);
60 static void setup_trans_desc(struct cqhci_host *cq_host, u8 tag)
63 dma_addr_t trans_temp;
65 link_temp = get_link_desc(cq_host, tag);
66 trans_temp = get_trans_desc_dma(cq_host, tag);
68 memset(link_temp, 0, cq_host->link_desc_len);
69 if (cq_host->link_desc_len > 8)
72 if (tag == DCMD_SLOT && (cq_host->mmc->caps2 & MMC_CAP2_CQE_DCMD)) {
73 *link_temp = CQHCI_VALID(0) | CQHCI_ACT(0) | CQHCI_END(1);
77 *link_temp = CQHCI_VALID(1) | CQHCI_ACT(0x6) | CQHCI_END(0);
80 __le64 *data_addr = (__le64 __force *)(link_temp + 4);
82 data_addr[0] = cpu_to_le64(trans_temp);
84 __le32 *data_addr = (__le32 __force *)(link_temp + 4);
86 data_addr[0] = cpu_to_le32(trans_temp);
90 static void cqhci_set_irqs(struct cqhci_host *cq_host, u32 set)
92 cqhci_writel(cq_host, set, CQHCI_ISTE);
93 cqhci_writel(cq_host, set, CQHCI_ISGE);
96 #define DRV_NAME "cqhci"
98 #define CQHCI_DUMP(f, x...) \
99 pr_err("%s: " DRV_NAME ": " f, mmc_hostname(mmc), ## x)
101 static void cqhci_dumpregs(struct cqhci_host *cq_host)
103 struct mmc_host *mmc = cq_host->mmc;
105 CQHCI_DUMP("============ CQHCI REGISTER DUMP ===========\n");
107 CQHCI_DUMP("Caps: 0x%08x | Version: 0x%08x\n",
108 cqhci_readl(cq_host, CQHCI_CAP),
109 cqhci_readl(cq_host, CQHCI_VER));
110 CQHCI_DUMP("Config: 0x%08x | Control: 0x%08x\n",
111 cqhci_readl(cq_host, CQHCI_CFG),
112 cqhci_readl(cq_host, CQHCI_CTL));
113 CQHCI_DUMP("Int stat: 0x%08x | Int enab: 0x%08x\n",
114 cqhci_readl(cq_host, CQHCI_IS),
115 cqhci_readl(cq_host, CQHCI_ISTE));
116 CQHCI_DUMP("Int sig: 0x%08x | Int Coal: 0x%08x\n",
117 cqhci_readl(cq_host, CQHCI_ISGE),
118 cqhci_readl(cq_host, CQHCI_IC));
119 CQHCI_DUMP("TDL base: 0x%08x | TDL up32: 0x%08x\n",
120 cqhci_readl(cq_host, CQHCI_TDLBA),
121 cqhci_readl(cq_host, CQHCI_TDLBAU));
122 CQHCI_DUMP("Doorbell: 0x%08x | TCN: 0x%08x\n",
123 cqhci_readl(cq_host, CQHCI_TDBR),
124 cqhci_readl(cq_host, CQHCI_TCN));
125 CQHCI_DUMP("Dev queue: 0x%08x | Dev Pend: 0x%08x\n",
126 cqhci_readl(cq_host, CQHCI_DQS),
127 cqhci_readl(cq_host, CQHCI_DPT));
128 CQHCI_DUMP("Task clr: 0x%08x | SSC1: 0x%08x\n",
129 cqhci_readl(cq_host, CQHCI_TCLR),
130 cqhci_readl(cq_host, CQHCI_SSC1));
131 CQHCI_DUMP("SSC2: 0x%08x | DCMD rsp: 0x%08x\n",
132 cqhci_readl(cq_host, CQHCI_SSC2),
133 cqhci_readl(cq_host, CQHCI_CRDCT));
134 CQHCI_DUMP("RED mask: 0x%08x | TERRI: 0x%08x\n",
135 cqhci_readl(cq_host, CQHCI_RMEM),
136 cqhci_readl(cq_host, CQHCI_TERRI));
137 CQHCI_DUMP("Resp idx: 0x%08x | Resp arg: 0x%08x\n",
138 cqhci_readl(cq_host, CQHCI_CRI),
139 cqhci_readl(cq_host, CQHCI_CRA));
141 if (cq_host->ops->dumpregs)
142 cq_host->ops->dumpregs(mmc);
144 CQHCI_DUMP(": ===========================================\n");
148 * The allocated descriptor table for task, link & transfer descritors
151 * |task desc | |->|----------|
152 * |----------| | |trans desc|
153 * |link desc-|->| |----------|
156 * no. of slots max-segs
159 * The idea here is to create the [task+trans] table and mark & point the
160 * link desc to the transfer desc table on a per slot basis.
162 static int cqhci_host_alloc_tdl(struct cqhci_host *cq_host)
166 /* task descriptor can be 64/128 bit irrespective of arch */
167 if (cq_host->caps & CQHCI_TASK_DESC_SZ_128) {
168 cqhci_writel(cq_host, cqhci_readl(cq_host, CQHCI_CFG) |
169 CQHCI_TASK_DESC_SZ, CQHCI_CFG);
170 cq_host->task_desc_len = 16;
172 cq_host->task_desc_len = 8;
176 * 96 bits length of transfer desc instead of 128 bits which means
177 * ADMA would expect next valid descriptor at the 96th bit
180 if (cq_host->dma64) {
181 if (cq_host->quirks & CQHCI_QUIRK_SHORT_TXFR_DESC_SZ)
182 cq_host->trans_desc_len = 12;
184 cq_host->trans_desc_len = 16;
185 cq_host->link_desc_len = 16;
187 cq_host->trans_desc_len = 8;
188 cq_host->link_desc_len = 8;
191 /* total size of a slot: 1 task & 1 transfer (link) */
192 cq_host->slot_sz = cq_host->task_desc_len + cq_host->link_desc_len;
194 cq_host->desc_size = cq_host->slot_sz * cq_host->num_slots;
196 cq_host->data_size = cq_host->trans_desc_len * cq_host->mmc->max_segs *
197 cq_host->mmc->cqe_qdepth;
199 pr_debug("%s: cqhci: desc_size: %zu data_sz: %zu slot-sz: %d\n",
200 mmc_hostname(cq_host->mmc), cq_host->desc_size, cq_host->data_size,
204 * allocate a dma-mapped chunk of memory for the descriptors
205 * allocate a dma-mapped chunk of memory for link descriptors
206 * setup each link-desc memory offset per slot-number to
207 * the descriptor table.
209 cq_host->desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc),
211 &cq_host->desc_dma_base,
213 if (!cq_host->desc_base)
216 cq_host->trans_desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc),
218 &cq_host->trans_desc_dma_base,
220 if (!cq_host->trans_desc_base) {
221 dmam_free_coherent(mmc_dev(cq_host->mmc), cq_host->desc_size,
223 cq_host->desc_dma_base);
224 cq_host->desc_base = NULL;
225 cq_host->desc_dma_base = 0;
229 pr_debug("%s: cqhci: desc-base: 0x%p trans-base: 0x%p\n desc_dma 0x%llx trans_dma: 0x%llx\n",
230 mmc_hostname(cq_host->mmc), cq_host->desc_base, cq_host->trans_desc_base,
231 (unsigned long long)cq_host->desc_dma_base,
232 (unsigned long long)cq_host->trans_desc_dma_base);
234 for (; i < (cq_host->num_slots); i++)
235 setup_trans_desc(cq_host, i);
240 static void __cqhci_enable(struct cqhci_host *cq_host)
242 struct mmc_host *mmc = cq_host->mmc;
245 cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
247 /* Configuration must not be changed while enabled */
248 if (cqcfg & CQHCI_ENABLE) {
249 cqcfg &= ~CQHCI_ENABLE;
250 cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
253 cqcfg &= ~(CQHCI_DCMD | CQHCI_TASK_DESC_SZ);
255 if (mmc->caps2 & MMC_CAP2_CQE_DCMD)
258 if (cq_host->caps & CQHCI_TASK_DESC_SZ_128)
259 cqcfg |= CQHCI_TASK_DESC_SZ;
261 cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
263 cqhci_writel(cq_host, lower_32_bits(cq_host->desc_dma_base),
265 cqhci_writel(cq_host, upper_32_bits(cq_host->desc_dma_base),
268 cqhci_writel(cq_host, cq_host->rca, CQHCI_SSC2);
270 cqhci_set_irqs(cq_host, 0);
272 cqcfg |= CQHCI_ENABLE;
274 cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
276 if (cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT)
277 cqhci_writel(cq_host, 0, CQHCI_CTL);
281 if (cq_host->ops->enable)
282 cq_host->ops->enable(mmc);
284 /* Ensure all writes are done before interrupts are enabled */
287 cqhci_set_irqs(cq_host, CQHCI_IS_MASK);
289 cq_host->activated = true;
292 static void __cqhci_disable(struct cqhci_host *cq_host)
296 cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
297 cqcfg &= ~CQHCI_ENABLE;
298 cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
300 cq_host->mmc->cqe_on = false;
302 cq_host->activated = false;
305 int cqhci_deactivate(struct mmc_host *mmc)
307 struct cqhci_host *cq_host = mmc->cqe_private;
309 if (cq_host->enabled && cq_host->activated)
310 __cqhci_disable(cq_host);
314 EXPORT_SYMBOL(cqhci_deactivate);
316 int cqhci_resume(struct mmc_host *mmc)
318 /* Re-enable is done upon first request */
321 EXPORT_SYMBOL(cqhci_resume);
323 static int cqhci_enable(struct mmc_host *mmc, struct mmc_card *card)
325 struct cqhci_host *cq_host = mmc->cqe_private;
328 if (!card->ext_csd.cmdq_en)
331 if (cq_host->enabled)
334 cq_host->rca = card->rca;
336 err = cqhci_host_alloc_tdl(cq_host);
338 pr_err("%s: Failed to enable CQE, error %d\n",
339 mmc_hostname(mmc), err);
343 __cqhci_enable(cq_host);
345 cq_host->enabled = true;
348 cqhci_dumpregs(cq_host);
353 /* CQHCI is idle and should halt immediately, so set a small timeout */
354 #define CQHCI_OFF_TIMEOUT 100
356 static u32 cqhci_read_ctl(struct cqhci_host *cq_host)
358 return cqhci_readl(cq_host, CQHCI_CTL);
361 static void cqhci_off(struct mmc_host *mmc)
363 struct cqhci_host *cq_host = mmc->cqe_private;
367 if (!cq_host->enabled || !mmc->cqe_on || cq_host->recovery_halt)
370 if (cq_host->ops->disable)
371 cq_host->ops->disable(mmc, false);
373 cqhci_writel(cq_host, CQHCI_HALT, CQHCI_CTL);
375 err = readx_poll_timeout(cqhci_read_ctl, cq_host, reg,
376 reg & CQHCI_HALT, 0, CQHCI_OFF_TIMEOUT);
378 pr_err("%s: cqhci: CQE stuck on\n", mmc_hostname(mmc));
380 pr_debug("%s: cqhci: CQE off\n", mmc_hostname(mmc));
382 if (cq_host->ops->post_disable)
383 cq_host->ops->post_disable(mmc);
388 static void cqhci_disable(struct mmc_host *mmc)
390 struct cqhci_host *cq_host = mmc->cqe_private;
392 if (!cq_host->enabled)
397 __cqhci_disable(cq_host);
399 dmam_free_coherent(mmc_dev(mmc), cq_host->data_size,
400 cq_host->trans_desc_base,
401 cq_host->trans_desc_dma_base);
403 dmam_free_coherent(mmc_dev(mmc), cq_host->desc_size,
405 cq_host->desc_dma_base);
407 cq_host->trans_desc_base = NULL;
408 cq_host->desc_base = NULL;
410 cq_host->enabled = false;
413 static void cqhci_prep_task_desc(struct mmc_request *mrq,
414 u64 *data, bool intr)
416 u32 req_flags = mrq->data->flags;
418 *data = CQHCI_VALID(1) |
422 CQHCI_FORCED_PROG(!!(req_flags & MMC_DATA_FORCED_PRG)) |
423 CQHCI_DATA_TAG(!!(req_flags & MMC_DATA_DAT_TAG)) |
424 CQHCI_DATA_DIR(!!(req_flags & MMC_DATA_READ)) |
425 CQHCI_PRIORITY(!!(req_flags & MMC_DATA_PRIO)) |
426 CQHCI_QBAR(!!(req_flags & MMC_DATA_QBR)) |
427 CQHCI_REL_WRITE(!!(req_flags & MMC_DATA_REL_WR)) |
428 CQHCI_BLK_COUNT(mrq->data->blocks) |
429 CQHCI_BLK_ADDR((u64)mrq->data->blk_addr);
431 pr_debug("%s: cqhci: tag %d task descriptor 0x%016llx\n",
432 mmc_hostname(mrq->host), mrq->tag, (unsigned long long)*data);
435 static int cqhci_dma_map(struct mmc_host *host, struct mmc_request *mrq)
438 struct mmc_data *data = mrq->data;
443 sg_count = dma_map_sg(mmc_dev(host), data->sg,
445 (data->flags & MMC_DATA_WRITE) ?
446 DMA_TO_DEVICE : DMA_FROM_DEVICE);
448 pr_err("%s: sg-len: %d\n", __func__, data->sg_len);
455 static void cqhci_set_tran_desc(u8 *desc, dma_addr_t addr, int len, bool end,
458 __le32 *attr = (__le32 __force *)desc;
460 *attr = (CQHCI_VALID(1) |
461 CQHCI_END(end ? 1 : 0) |
464 CQHCI_DAT_LENGTH(len));
467 __le64 *dataddr = (__le64 __force *)(desc + 4);
469 dataddr[0] = cpu_to_le64(addr);
471 __le32 *dataddr = (__le32 __force *)(desc + 4);
473 dataddr[0] = cpu_to_le32(addr);
477 static int cqhci_prep_tran_desc(struct mmc_request *mrq,
478 struct cqhci_host *cq_host, int tag)
480 struct mmc_data *data = mrq->data;
481 int i, sg_count, len;
483 bool dma64 = cq_host->dma64;
486 struct scatterlist *sg;
488 sg_count = cqhci_dma_map(mrq->host, mrq);
490 pr_err("%s: %s: unable to map sg lists, %d\n",
491 mmc_hostname(mrq->host), __func__, sg_count);
495 desc = get_trans_desc(cq_host, tag);
497 for_each_sg(data->sg, sg, sg_count, i) {
498 addr = sg_dma_address(sg);
499 len = sg_dma_len(sg);
501 if ((i+1) == sg_count)
503 cqhci_set_tran_desc(desc, addr, len, end, dma64);
504 desc += cq_host->trans_desc_len;
510 static void cqhci_prep_dcmd_desc(struct mmc_host *mmc,
511 struct mmc_request *mrq)
513 u64 *task_desc = NULL;
518 struct cqhci_host *cq_host = mmc->cqe_private;
521 if (!(mrq->cmd->flags & MMC_RSP_PRESENT)) {
525 if (mrq->cmd->flags & MMC_RSP_R1B) {
534 task_desc = (__le64 __force *)get_desc(cq_host, cq_host->dcmd_slot);
535 memset(task_desc, 0, cq_host->task_desc_len);
536 data |= (CQHCI_VALID(1) |
541 CQHCI_CMD_INDEX(mrq->cmd->opcode) |
542 CQHCI_CMD_TIMING(timing) | CQHCI_RESP_TYPE(resp_type));
543 if (cq_host->ops->update_dcmd_desc)
544 cq_host->ops->update_dcmd_desc(mmc, mrq, &data);
546 desc = (u8 *)task_desc;
547 pr_debug("%s: cqhci: dcmd: cmd: %d timing: %d resp: %d\n",
548 mmc_hostname(mmc), mrq->cmd->opcode, timing, resp_type);
549 dataddr = (__le64 __force *)(desc + 4);
550 dataddr[0] = cpu_to_le64((u64)mrq->cmd->arg);
554 static void cqhci_post_req(struct mmc_host *host, struct mmc_request *mrq)
556 struct mmc_data *data = mrq->data;
559 dma_unmap_sg(mmc_dev(host), data->sg, data->sg_len,
560 (data->flags & MMC_DATA_READ) ?
561 DMA_FROM_DEVICE : DMA_TO_DEVICE);
565 static inline int cqhci_tag(struct mmc_request *mrq)
567 return mrq->cmd ? DCMD_SLOT : mrq->tag;
570 static int cqhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
574 u64 *task_desc = NULL;
575 int tag = cqhci_tag(mrq);
576 struct cqhci_host *cq_host = mmc->cqe_private;
579 if (!cq_host->enabled) {
580 pr_err("%s: cqhci: not enabled\n", mmc_hostname(mmc));
584 /* First request after resume has to re-enable */
585 if (!cq_host->activated)
586 __cqhci_enable(cq_host);
589 if (cq_host->ops->pre_enable)
590 cq_host->ops->pre_enable(mmc);
592 cqhci_writel(cq_host, 0, CQHCI_CTL);
594 pr_debug("%s: cqhci: CQE on\n", mmc_hostname(mmc));
595 if (cqhci_readl(cq_host, CQHCI_CTL) && CQHCI_HALT) {
596 pr_err("%s: cqhci: CQE failed to exit halt state\n",
599 if (cq_host->ops->enable)
600 cq_host->ops->enable(mmc);
604 task_desc = (__le64 __force *)get_desc(cq_host, tag);
605 cqhci_prep_task_desc(mrq, &data, 1);
606 *task_desc = cpu_to_le64(data);
607 err = cqhci_prep_tran_desc(mrq, cq_host, tag);
609 pr_err("%s: cqhci: failed to setup tx desc: %d\n",
610 mmc_hostname(mmc), err);
614 cqhci_prep_dcmd_desc(mmc, mrq);
617 spin_lock_irqsave(&cq_host->lock, flags);
619 if (cq_host->recovery_halt) {
624 cq_host->slot[tag].mrq = mrq;
625 cq_host->slot[tag].flags = 0;
628 /* Make sure descriptors are ready before ringing the doorbell */
630 cqhci_writel(cq_host, 1 << tag, CQHCI_TDBR);
631 if (!(cqhci_readl(cq_host, CQHCI_TDBR) & (1 << tag)))
632 pr_debug("%s: cqhci: doorbell not set for tag %d\n",
633 mmc_hostname(mmc), tag);
635 spin_unlock_irqrestore(&cq_host->lock, flags);
638 cqhci_post_req(mmc, mrq);
643 static void cqhci_recovery_needed(struct mmc_host *mmc, struct mmc_request *mrq,
646 struct cqhci_host *cq_host = mmc->cqe_private;
648 if (!cq_host->recovery_halt) {
649 cq_host->recovery_halt = true;
650 pr_debug("%s: cqhci: recovery needed\n", mmc_hostname(mmc));
651 wake_up(&cq_host->wait_queue);
652 if (notify && mrq->recovery_notifier)
653 mrq->recovery_notifier(mrq);
657 static unsigned int cqhci_error_flags(int error1, int error2)
659 int error = error1 ? error1 : error2;
663 return CQHCI_HOST_CRC;
665 return CQHCI_HOST_TIMEOUT;
667 return CQHCI_HOST_OTHER;
671 static void cqhci_error_irq(struct mmc_host *mmc, u32 status, int cmd_error,
674 struct cqhci_host *cq_host = mmc->cqe_private;
675 struct cqhci_slot *slot;
679 spin_lock(&cq_host->lock);
681 terri = cqhci_readl(cq_host, CQHCI_TERRI);
683 pr_debug("%s: cqhci: error IRQ status: 0x%08x cmd error %d data error %d TERRI: 0x%08x\n",
684 mmc_hostname(mmc), status, cmd_error, data_error, terri);
686 /* Forget about errors when recovery has already been triggered */
687 if (cq_host->recovery_halt)
690 if (!cq_host->qcnt) {
691 WARN_ONCE(1, "%s: cqhci: error when idle. IRQ status: 0x%08x cmd error %d data error %d TERRI: 0x%08x\n",
692 mmc_hostname(mmc), status, cmd_error, data_error,
697 if (CQHCI_TERRI_C_VALID(terri)) {
698 tag = CQHCI_TERRI_C_TASK(terri);
699 slot = &cq_host->slot[tag];
701 slot->flags = cqhci_error_flags(cmd_error, data_error);
702 cqhci_recovery_needed(mmc, slot->mrq, true);
706 if (CQHCI_TERRI_D_VALID(terri)) {
707 tag = CQHCI_TERRI_D_TASK(terri);
708 slot = &cq_host->slot[tag];
710 slot->flags = cqhci_error_flags(data_error, cmd_error);
711 cqhci_recovery_needed(mmc, slot->mrq, true);
715 if (!cq_host->recovery_halt) {
717 * The only way to guarantee forward progress is to mark at
718 * least one task in error, so if none is indicated, pick one.
720 for (tag = 0; tag < NUM_SLOTS; tag++) {
721 slot = &cq_host->slot[tag];
724 slot->flags = cqhci_error_flags(data_error, cmd_error);
725 cqhci_recovery_needed(mmc, slot->mrq, true);
731 spin_unlock(&cq_host->lock);
734 static void cqhci_finish_mrq(struct mmc_host *mmc, unsigned int tag)
736 struct cqhci_host *cq_host = mmc->cqe_private;
737 struct cqhci_slot *slot = &cq_host->slot[tag];
738 struct mmc_request *mrq = slot->mrq;
739 struct mmc_data *data;
742 WARN_ONCE(1, "%s: cqhci: spurious TCN for tag %d\n",
743 mmc_hostname(mmc), tag);
747 /* No completions allowed during recovery */
748 if (cq_host->recovery_halt) {
749 slot->flags |= CQHCI_COMPLETED;
760 data->bytes_xfered = 0;
762 data->bytes_xfered = data->blksz * data->blocks;
765 mmc_cqe_request_done(mmc, mrq);
768 irqreturn_t cqhci_irq(struct mmc_host *mmc, u32 intmask, int cmd_error,
772 unsigned long tag = 0, comp_status;
773 struct cqhci_host *cq_host = mmc->cqe_private;
775 status = cqhci_readl(cq_host, CQHCI_IS);
776 cqhci_writel(cq_host, status, CQHCI_IS);
778 pr_debug("%s: cqhci: IRQ status: 0x%08x\n", mmc_hostname(mmc), status);
780 if ((status & CQHCI_IS_RED) || cmd_error || data_error)
781 cqhci_error_irq(mmc, status, cmd_error, data_error);
783 if (status & CQHCI_IS_TCC) {
784 /* read TCN and complete the request */
785 comp_status = cqhci_readl(cq_host, CQHCI_TCN);
786 cqhci_writel(cq_host, comp_status, CQHCI_TCN);
787 pr_debug("%s: cqhci: TCN: 0x%08lx\n",
788 mmc_hostname(mmc), comp_status);
790 spin_lock(&cq_host->lock);
792 for_each_set_bit(tag, &comp_status, cq_host->num_slots) {
793 /* complete the corresponding mrq */
794 pr_debug("%s: cqhci: completing tag %lu\n",
795 mmc_hostname(mmc), tag);
796 cqhci_finish_mrq(mmc, tag);
799 if (cq_host->waiting_for_idle && !cq_host->qcnt) {
800 cq_host->waiting_for_idle = false;
801 wake_up(&cq_host->wait_queue);
804 spin_unlock(&cq_host->lock);
807 if (status & CQHCI_IS_TCL)
808 wake_up(&cq_host->wait_queue);
810 if (status & CQHCI_IS_HAC)
811 wake_up(&cq_host->wait_queue);
815 EXPORT_SYMBOL(cqhci_irq);
817 static bool cqhci_is_idle(struct cqhci_host *cq_host, int *ret)
822 spin_lock_irqsave(&cq_host->lock, flags);
823 is_idle = !cq_host->qcnt || cq_host->recovery_halt;
824 *ret = cq_host->recovery_halt ? -EBUSY : 0;
825 cq_host->waiting_for_idle = !is_idle;
826 spin_unlock_irqrestore(&cq_host->lock, flags);
831 static int cqhci_wait_for_idle(struct mmc_host *mmc)
833 struct cqhci_host *cq_host = mmc->cqe_private;
836 wait_event(cq_host->wait_queue, cqhci_is_idle(cq_host, &ret));
841 static bool cqhci_timeout(struct mmc_host *mmc, struct mmc_request *mrq,
842 bool *recovery_needed)
844 struct cqhci_host *cq_host = mmc->cqe_private;
845 int tag = cqhci_tag(mrq);
846 struct cqhci_slot *slot = &cq_host->slot[tag];
850 spin_lock_irqsave(&cq_host->lock, flags);
851 timed_out = slot->mrq == mrq;
853 slot->flags |= CQHCI_EXTERNAL_TIMEOUT;
854 cqhci_recovery_needed(mmc, mrq, false);
855 *recovery_needed = cq_host->recovery_halt;
857 spin_unlock_irqrestore(&cq_host->lock, flags);
860 pr_err("%s: cqhci: timeout for tag %d\n",
861 mmc_hostname(mmc), tag);
862 cqhci_dumpregs(cq_host);
868 static bool cqhci_tasks_cleared(struct cqhci_host *cq_host)
870 return !(cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_CLEAR_ALL_TASKS);
873 static bool cqhci_clear_all_tasks(struct mmc_host *mmc, unsigned int timeout)
875 struct cqhci_host *cq_host = mmc->cqe_private;
879 cqhci_set_irqs(cq_host, CQHCI_IS_TCL);
881 ctl = cqhci_readl(cq_host, CQHCI_CTL);
882 ctl |= CQHCI_CLEAR_ALL_TASKS;
883 cqhci_writel(cq_host, ctl, CQHCI_CTL);
885 wait_event_timeout(cq_host->wait_queue, cqhci_tasks_cleared(cq_host),
886 msecs_to_jiffies(timeout) + 1);
888 cqhci_set_irqs(cq_host, 0);
890 ret = cqhci_tasks_cleared(cq_host);
893 pr_debug("%s: cqhci: Failed to clear tasks\n",
899 static bool cqhci_halted(struct cqhci_host *cq_host)
901 return cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT;
904 static bool cqhci_halt(struct mmc_host *mmc, unsigned int timeout)
906 struct cqhci_host *cq_host = mmc->cqe_private;
910 if (cqhci_halted(cq_host))
913 cqhci_set_irqs(cq_host, CQHCI_IS_HAC);
915 ctl = cqhci_readl(cq_host, CQHCI_CTL);
917 cqhci_writel(cq_host, ctl, CQHCI_CTL);
919 wait_event_timeout(cq_host->wait_queue, cqhci_halted(cq_host),
920 msecs_to_jiffies(timeout) + 1);
922 cqhci_set_irqs(cq_host, 0);
924 ret = cqhci_halted(cq_host);
927 pr_debug("%s: cqhci: Failed to halt\n", mmc_hostname(mmc));
933 * After halting we expect to be able to use the command line. We interpret the
934 * failure to halt to mean the data lines might still be in use (and the upper
935 * layers will need to send a STOP command), so we set the timeout based on a
936 * generous command timeout.
938 #define CQHCI_START_HALT_TIMEOUT 5
940 static void cqhci_recovery_start(struct mmc_host *mmc)
942 struct cqhci_host *cq_host = mmc->cqe_private;
944 pr_debug("%s: cqhci: %s\n", mmc_hostname(mmc), __func__);
946 WARN_ON(!cq_host->recovery_halt);
948 cqhci_halt(mmc, CQHCI_START_HALT_TIMEOUT);
950 if (cq_host->ops->disable)
951 cq_host->ops->disable(mmc, true);
956 static int cqhci_error_from_flags(unsigned int flags)
961 /* CRC errors might indicate re-tuning so prefer to report that */
962 if (flags & CQHCI_HOST_CRC)
965 if (flags & (CQHCI_EXTERNAL_TIMEOUT | CQHCI_HOST_TIMEOUT))
971 static void cqhci_recover_mrq(struct cqhci_host *cq_host, unsigned int tag)
973 struct cqhci_slot *slot = &cq_host->slot[tag];
974 struct mmc_request *mrq = slot->mrq;
975 struct mmc_data *data;
986 data->bytes_xfered = 0;
987 data->error = cqhci_error_from_flags(slot->flags);
989 mrq->cmd->error = cqhci_error_from_flags(slot->flags);
992 mmc_cqe_request_done(cq_host->mmc, mrq);
995 static void cqhci_recover_mrqs(struct cqhci_host *cq_host)
999 for (i = 0; i < cq_host->num_slots; i++)
1000 cqhci_recover_mrq(cq_host, i);
1004 * By now the command and data lines should be unused so there is no reason for
1005 * CQHCI to take a long time to halt, but if it doesn't halt there could be
1006 * problems clearing tasks, so be generous.
1008 #define CQHCI_FINISH_HALT_TIMEOUT 20
1010 /* CQHCI could be expected to clear it's internal state pretty quickly */
1011 #define CQHCI_CLEAR_TIMEOUT 20
1013 static void cqhci_recovery_finish(struct mmc_host *mmc)
1015 struct cqhci_host *cq_host = mmc->cqe_private;
1016 unsigned long flags;
1020 pr_debug("%s: cqhci: %s\n", mmc_hostname(mmc), __func__);
1022 WARN_ON(!cq_host->recovery_halt);
1024 ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
1026 if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
1030 * The specification contradicts itself, by saying that tasks cannot be
1031 * cleared if CQHCI does not halt, but if CQHCI does not halt, it should
1032 * be disabled/re-enabled, but not to disable before clearing tasks.
1036 pr_debug("%s: cqhci: disable / re-enable\n", mmc_hostname(mmc));
1037 cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
1038 cqcfg &= ~CQHCI_ENABLE;
1039 cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
1040 cqcfg |= CQHCI_ENABLE;
1041 cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
1042 /* Be sure that there are no tasks */
1043 ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
1044 if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
1049 cqhci_recover_mrqs(cq_host);
1051 WARN_ON(cq_host->qcnt);
1053 spin_lock_irqsave(&cq_host->lock, flags);
1055 cq_host->recovery_halt = false;
1056 mmc->cqe_on = false;
1057 spin_unlock_irqrestore(&cq_host->lock, flags);
1059 /* Ensure all writes are done before interrupts are re-enabled */
1062 cqhci_writel(cq_host, CQHCI_IS_HAC | CQHCI_IS_TCL, CQHCI_IS);
1064 cqhci_set_irqs(cq_host, CQHCI_IS_MASK);
1066 pr_debug("%s: cqhci: recovery done\n", mmc_hostname(mmc));
1069 static const struct mmc_cqe_ops cqhci_cqe_ops = {
1070 .cqe_enable = cqhci_enable,
1071 .cqe_disable = cqhci_disable,
1072 .cqe_request = cqhci_request,
1073 .cqe_post_req = cqhci_post_req,
1074 .cqe_off = cqhci_off,
1075 .cqe_wait_for_idle = cqhci_wait_for_idle,
1076 .cqe_timeout = cqhci_timeout,
1077 .cqe_recovery_start = cqhci_recovery_start,
1078 .cqe_recovery_finish = cqhci_recovery_finish,
1081 struct cqhci_host *cqhci_pltfm_init(struct platform_device *pdev)
1083 struct cqhci_host *cq_host;
1084 struct resource *cqhci_memres = NULL;
1086 /* check and setup CMDQ interface */
1087 cqhci_memres = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1089 if (!cqhci_memres) {
1090 dev_dbg(&pdev->dev, "CMDQ not supported\n");
1091 return ERR_PTR(-EINVAL);
1094 cq_host = devm_kzalloc(&pdev->dev, sizeof(*cq_host), GFP_KERNEL);
1096 return ERR_PTR(-ENOMEM);
1097 cq_host->mmio = devm_ioremap(&pdev->dev,
1098 cqhci_memres->start,
1099 resource_size(cqhci_memres));
1100 if (!cq_host->mmio) {
1101 dev_err(&pdev->dev, "failed to remap cqhci regs\n");
1102 return ERR_PTR(-EBUSY);
1104 dev_dbg(&pdev->dev, "CMDQ ioremap: done\n");
1108 EXPORT_SYMBOL(cqhci_pltfm_init);
1110 static unsigned int cqhci_ver_major(struct cqhci_host *cq_host)
1112 return CQHCI_VER_MAJOR(cqhci_readl(cq_host, CQHCI_VER));
1115 static unsigned int cqhci_ver_minor(struct cqhci_host *cq_host)
1117 u32 ver = cqhci_readl(cq_host, CQHCI_VER);
1119 return CQHCI_VER_MINOR1(ver) * 10 + CQHCI_VER_MINOR2(ver);
1122 int cqhci_init(struct cqhci_host *cq_host, struct mmc_host *mmc,
1127 cq_host->dma64 = dma64;
1129 cq_host->mmc->cqe_private = cq_host;
1131 cq_host->num_slots = NUM_SLOTS;
1132 cq_host->dcmd_slot = DCMD_SLOT;
1134 mmc->cqe_ops = &cqhci_cqe_ops;
1136 mmc->cqe_qdepth = NUM_SLOTS;
1137 if (mmc->caps2 & MMC_CAP2_CQE_DCMD)
1138 mmc->cqe_qdepth -= 1;
1140 cq_host->slot = devm_kcalloc(mmc_dev(mmc), cq_host->num_slots,
1141 sizeof(*cq_host->slot), GFP_KERNEL);
1142 if (!cq_host->slot) {
1147 spin_lock_init(&cq_host->lock);
1149 init_completion(&cq_host->halt_comp);
1150 init_waitqueue_head(&cq_host->wait_queue);
1152 pr_info("%s: CQHCI version %u.%02u\n",
1153 mmc_hostname(mmc), cqhci_ver_major(cq_host),
1154 cqhci_ver_minor(cq_host));
1159 pr_err("%s: CQHCI version %u.%02u failed to initialize, error %d\n",
1160 mmc_hostname(mmc), cqhci_ver_major(cq_host),
1161 cqhci_ver_minor(cq_host), err);
1164 EXPORT_SYMBOL(cqhci_init);
1166 MODULE_AUTHOR("Venkat Gopalakrishnan <venkatg@codeaurora.org>");
1167 MODULE_DESCRIPTION("Command Queue Host Controller Interface driver");
1168 MODULE_LICENSE("GPL v2");