1 // SPDX-License-Identifier: ISC
2 /* Copyright (C) 2020 MediaTek Inc.
4 * This file is written based on mt76/usb.c.
6 * Author: Felix Fietkau <nbd@nbd.name>
7 * Lorenzo Bianconi <lorenzo@kernel.org>
8 * Sean Wang <sean.wang@mediatek.com>
11 #include <linux/iopoll.h>
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/mmc/sdio_func.h>
15 #include <linux/mmc/card.h>
16 #include <linux/mmc/host.h>
17 #include <linux/sched.h>
18 #include <linux/kthread.h>
23 static u32 mt76s_read_whisr(struct mt76_dev *dev)
25 return sdio_readl(dev->sdio.func, MCR_WHISR, NULL);
28 u32 mt76s_read_pcr(struct mt76_dev *dev)
30 struct mt76_sdio *sdio = &dev->sdio;
32 return sdio_readl(sdio->func, MCR_WHLPCR, NULL);
34 EXPORT_SYMBOL_GPL(mt76s_read_pcr);
36 static u32 mt76s_read_mailbox(struct mt76_dev *dev, u32 offset)
38 struct sdio_func *func = dev->sdio.func;
42 sdio_claim_host(func);
44 sdio_writel(func, offset, MCR_H2DSM0R, &err);
46 dev_err(dev->dev, "failed setting address [err=%d]\n", err);
50 sdio_writel(func, H2D_SW_INT_READ, MCR_WSICR, &err);
52 dev_err(dev->dev, "failed setting read mode [err=%d]\n", err);
56 err = readx_poll_timeout(mt76s_read_whisr, dev, status,
57 status & H2D_SW_INT_READ, 0, 1000000);
59 dev_err(dev->dev, "query whisr timeout\n");
63 sdio_writel(func, H2D_SW_INT_READ, MCR_WHISR, &err);
65 dev_err(dev->dev, "failed setting read mode [err=%d]\n", err);
69 val = sdio_readl(func, MCR_H2DSM0R, &err);
71 dev_err(dev->dev, "failed reading h2dsm0r [err=%d]\n", err);
76 dev_err(dev->dev, "register mismatch\n");
81 val = sdio_readl(func, MCR_D2HRM1R, &err);
83 dev_err(dev->dev, "failed reading d2hrm1r [err=%d]\n", err);
86 sdio_release_host(func);
91 static void mt76s_write_mailbox(struct mt76_dev *dev, u32 offset, u32 val)
93 struct sdio_func *func = dev->sdio.func;
97 sdio_claim_host(func);
99 sdio_writel(func, offset, MCR_H2DSM0R, &err);
101 dev_err(dev->dev, "failed setting address [err=%d]\n", err);
105 sdio_writel(func, val, MCR_H2DSM1R, &err);
108 "failed setting write value [err=%d]\n", err);
112 sdio_writel(func, H2D_SW_INT_WRITE, MCR_WSICR, &err);
114 dev_err(dev->dev, "failed setting write mode [err=%d]\n", err);
118 err = readx_poll_timeout(mt76s_read_whisr, dev, status,
119 status & H2D_SW_INT_WRITE, 0, 1000000);
121 dev_err(dev->dev, "query whisr timeout\n");
125 sdio_writel(func, H2D_SW_INT_WRITE, MCR_WHISR, &err);
127 dev_err(dev->dev, "failed setting write mode [err=%d]\n", err);
131 val = sdio_readl(func, MCR_H2DSM0R, &err);
133 dev_err(dev->dev, "failed reading h2dsm0r [err=%d]\n", err);
138 dev_err(dev->dev, "register mismatch\n");
141 sdio_release_host(func);
144 u32 mt76s_rr(struct mt76_dev *dev, u32 offset)
146 if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state))
147 return dev->mcu_ops->mcu_rr(dev, offset);
149 return mt76s_read_mailbox(dev, offset);
151 EXPORT_SYMBOL_GPL(mt76s_rr);
153 void mt76s_wr(struct mt76_dev *dev, u32 offset, u32 val)
155 if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state))
156 dev->mcu_ops->mcu_wr(dev, offset, val);
158 mt76s_write_mailbox(dev, offset, val);
160 EXPORT_SYMBOL_GPL(mt76s_wr);
162 u32 mt76s_rmw(struct mt76_dev *dev, u32 offset, u32 mask, u32 val)
164 val |= mt76s_rr(dev, offset) & ~mask;
165 mt76s_wr(dev, offset, val);
169 EXPORT_SYMBOL_GPL(mt76s_rmw);
171 void mt76s_write_copy(struct mt76_dev *dev, u32 offset,
172 const void *data, int len)
174 const u32 *val = data;
177 for (i = 0; i < len / sizeof(u32); i++) {
178 mt76s_wr(dev, offset, val[i]);
179 offset += sizeof(u32);
182 EXPORT_SYMBOL_GPL(mt76s_write_copy);
184 void mt76s_read_copy(struct mt76_dev *dev, u32 offset,
190 for (i = 0; i < len / sizeof(u32); i++) {
191 val[i] = mt76s_rr(dev, offset);
192 offset += sizeof(u32);
195 EXPORT_SYMBOL_GPL(mt76s_read_copy);
197 int mt76s_wr_rp(struct mt76_dev *dev, u32 base,
198 const struct mt76_reg_pair *data,
203 for (i = 0; i < len; i++) {
204 mt76s_wr(dev, data->reg, data->value);
210 EXPORT_SYMBOL_GPL(mt76s_wr_rp);
212 int mt76s_rd_rp(struct mt76_dev *dev, u32 base,
213 struct mt76_reg_pair *data, int len)
217 for (i = 0; i < len; i++) {
218 data->value = mt76s_rr(dev, data->reg);
224 EXPORT_SYMBOL_GPL(mt76s_rd_rp);
226 int mt76s_hw_init(struct mt76_dev *dev, struct sdio_func *func, int hw_ver)
231 dev->sdio.hw_ver = hw_ver;
233 sdio_claim_host(func);
235 ret = sdio_enable_func(func);
239 /* Get ownership from the device */
240 sdio_writel(func, WHLPCR_INT_EN_CLR | WHLPCR_FW_OWN_REQ_CLR,
245 ret = readx_poll_timeout(mt76s_read_pcr, dev, status,
246 status & WHLPCR_IS_DRIVER_OWN, 2000, 1000000);
248 dev_err(dev->dev, "Cannot get ownership from device");
252 ret = sdio_set_block_size(func, 512);
256 /* Enable interrupt */
257 sdio_writel(func, WHLPCR_INT_EN_SET, MCR_WHLPCR, &ret);
261 ctrl = WHIER_RX0_DONE_INT_EN | WHIER_TX_DONE_INT_EN;
262 if (hw_ver == MT76_CONNAC2_SDIO)
263 ctrl |= WHIER_RX1_DONE_INT_EN;
264 sdio_writel(func, ctrl, MCR_WHIER, &ret);
269 case MT76_CONNAC_SDIO:
270 /* set WHISR as read clear and Rx aggregation number as 16 */
271 ctrl = FIELD_PREP(MAX_HIF_RX_LEN_NUM, 16);
274 ctrl = sdio_readl(func, MCR_WHCR, &ret);
277 ctrl &= ~MAX_HIF_RX_LEN_NUM_CONNAC2;
278 ctrl &= ~W_INT_CLR_CTRL; /* read clear */
279 ctrl |= FIELD_PREP(MAX_HIF_RX_LEN_NUM_CONNAC2, 0);
283 sdio_writel(func, ctrl, MCR_WHCR, &ret);
287 ret = sdio_claim_irq(func, mt76s_sdio_irq);
291 sdio_release_host(func);
296 sdio_disable_func(func);
298 sdio_release_host(func);
302 EXPORT_SYMBOL_GPL(mt76s_hw_init);
304 int mt76s_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid)
306 struct mt76_queue *q = &dev->q_rx[qid];
308 spin_lock_init(&q->lock);
309 q->entry = devm_kcalloc(dev->dev,
310 MT76S_NUM_RX_ENTRIES, sizeof(*q->entry),
315 q->ndesc = MT76S_NUM_RX_ENTRIES;
316 q->head = q->tail = 0;
321 EXPORT_SYMBOL_GPL(mt76s_alloc_rx_queue);
323 static struct mt76_queue *mt76s_alloc_tx_queue(struct mt76_dev *dev)
325 struct mt76_queue *q;
327 q = devm_kzalloc(dev->dev, sizeof(*q), GFP_KERNEL);
329 return ERR_PTR(-ENOMEM);
331 spin_lock_init(&q->lock);
332 q->entry = devm_kcalloc(dev->dev,
333 MT76S_NUM_TX_ENTRIES, sizeof(*q->entry),
336 return ERR_PTR(-ENOMEM);
338 q->ndesc = MT76S_NUM_TX_ENTRIES;
343 int mt76s_alloc_tx(struct mt76_dev *dev)
345 struct mt76_queue *q;
348 for (i = 0; i <= MT_TXQ_PSD; i++) {
349 q = mt76s_alloc_tx_queue(dev);
354 dev->phy.q_tx[i] = q;
357 q = mt76s_alloc_tx_queue(dev);
362 dev->q_mcu[MT_MCUQ_WM] = q;
366 EXPORT_SYMBOL_GPL(mt76s_alloc_tx);
368 static struct mt76_queue_entry *
369 mt76s_get_next_rx_entry(struct mt76_queue *q)
371 struct mt76_queue_entry *e = NULL;
373 spin_lock_bh(&q->lock);
375 e = &q->entry[q->tail];
376 q->tail = (q->tail + 1) % q->ndesc;
379 spin_unlock_bh(&q->lock);
385 mt76s_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
387 int qid = q - &dev->q_rx[MT_RXQ_MAIN];
391 struct mt76_queue_entry *e;
393 if (!test_bit(MT76_STATE_INITIALIZED, &dev->phy.state))
396 e = mt76s_get_next_rx_entry(q);
400 dev->drv->rx_skb(dev, MT_RXQ_MAIN, e->skb);
404 if (qid == MT_RXQ_MAIN)
405 mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL);
410 static void mt76s_net_worker(struct mt76_worker *w)
412 struct mt76_sdio *sdio = container_of(w, struct mt76_sdio,
414 struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio);
423 mt76_for_each_q_rx(dev, i)
424 nframes += mt76s_process_rx_queue(dev, &dev->q_rx[i]);
428 } while (nframes > 0);
431 static int mt76s_process_tx_queue(struct mt76_dev *dev, struct mt76_queue *q)
433 struct mt76_queue_entry entry;
440 mcu = q == dev->q_mcu[MT_MCUQ_WM];
441 while (q->queued > 0) {
442 if (!q->entry[q->tail].done)
445 entry = q->entry[q->tail];
446 q->entry[q->tail].done = false;
449 dev_kfree_skb(entry.skb);
453 mt76_queue_tx_complete(dev, q, &entry);
458 wake_up(&dev->tx_wait);
463 static void mt76s_status_worker(struct mt76_worker *w)
465 struct mt76_sdio *sdio = container_of(w, struct mt76_sdio,
467 struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio);
468 bool resched = false;
472 int ndata_frames = 0;
474 nframes = mt76s_process_tx_queue(dev, dev->q_mcu[MT_MCUQ_WM]);
476 for (i = 0; i <= MT_TXQ_PSD; i++)
477 ndata_frames += mt76s_process_tx_queue(dev,
479 nframes += ndata_frames;
480 if (ndata_frames > 0)
483 if (dev->drv->tx_status_data &&
484 !test_and_set_bit(MT76_READING_STATS, &dev->phy.state) &&
485 !test_bit(MT76_STATE_SUSPEND, &dev->phy.state))
486 queue_work(dev->wq, &dev->sdio.stat_work);
487 } while (nframes > 0);
490 mt76_worker_schedule(&dev->sdio.txrx_worker);
493 static void mt76s_tx_status_data(struct work_struct *work)
495 struct mt76_sdio *sdio;
496 struct mt76_dev *dev;
500 sdio = container_of(work, struct mt76_sdio, stat_work);
501 dev = container_of(sdio, struct mt76_dev, sdio);
504 if (test_bit(MT76_REMOVED, &dev->phy.state))
507 if (!dev->drv->tx_status_data(dev, &update))
512 if (count && test_bit(MT76_STATE_RUNNING, &dev->phy.state))
513 queue_work(dev->wq, &sdio->stat_work);
515 clear_bit(MT76_READING_STATS, &dev->phy.state);
519 mt76s_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
520 struct sk_buff *skb, struct mt76_wcid *wcid,
521 struct ieee80211_sta *sta)
523 struct mt76_tx_info tx_info = {
526 int err, len = skb->len;
529 if (q->queued == q->ndesc)
532 skb->prev = skb->next = NULL;
533 err = dev->drv->tx_prepare_skb(dev, NULL, q->qid, wcid, sta, &tx_info);
537 q->entry[q->head].skb = tx_info.skb;
538 q->entry[q->head].buf_sz = len;
539 q->entry[q->head].wcid = 0xffff;
543 q->head = (q->head + 1) % q->ndesc;
550 mt76s_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q,
551 struct sk_buff *skb, u32 tx_info)
553 int ret = -ENOSPC, len = skb->len, pad;
555 if (q->queued == q->ndesc)
558 pad = round_up(skb->len, 4) - skb->len;
559 ret = mt76_skb_adjust_pad(skb, pad);
563 spin_lock_bh(&q->lock);
565 q->entry[q->head].buf_sz = len;
566 q->entry[q->head].skb = skb;
567 q->head = (q->head + 1) % q->ndesc;
570 spin_unlock_bh(&q->lock);
580 static void mt76s_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
582 struct mt76_sdio *sdio = &dev->sdio;
584 mt76_worker_schedule(&sdio->txrx_worker);
587 static const struct mt76_queue_ops sdio_queue_ops = {
588 .tx_queue_skb = mt76s_tx_queue_skb,
589 .kick = mt76s_tx_kick,
590 .tx_queue_skb_raw = mt76s_tx_queue_skb_raw,
593 void mt76s_deinit(struct mt76_dev *dev)
595 struct mt76_sdio *sdio = &dev->sdio;
598 mt76_worker_teardown(&sdio->txrx_worker);
599 mt76_worker_teardown(&sdio->status_worker);
600 mt76_worker_teardown(&sdio->net_worker);
602 cancel_work_sync(&sdio->stat_work);
603 clear_bit(MT76_READING_STATS, &dev->phy.state);
605 mt76_tx_status_check(dev, true);
607 sdio_claim_host(sdio->func);
608 sdio_release_irq(sdio->func);
609 sdio_release_host(sdio->func);
611 mt76_for_each_q_rx(dev, i) {
612 struct mt76_queue *q = &dev->q_rx[i];
615 for (j = 0; j < q->ndesc; j++) {
616 struct mt76_queue_entry *e = &q->entry[j];
621 dev_kfree_skb(e->skb);
626 EXPORT_SYMBOL_GPL(mt76s_deinit);
628 int mt76s_init(struct mt76_dev *dev, struct sdio_func *func,
629 const struct mt76_bus_ops *bus_ops)
631 struct mt76_sdio *sdio = &dev->sdio;
635 err = mt76_worker_setup(dev->hw, &sdio->status_worker,
636 mt76s_status_worker, "sdio-status");
640 err = mt76_worker_setup(dev->hw, &sdio->net_worker, mt76s_net_worker,
645 sched_set_fifo_low(sdio->status_worker.task);
646 sched_set_fifo_low(sdio->net_worker.task);
648 INIT_WORK(&sdio->stat_work, mt76s_tx_status_data);
650 dev->queue_ops = &sdio_queue_ops;
652 dev->sdio.func = func;
654 host_max_cap = min_t(u32, func->card->host->max_req_size,
656 func->card->host->max_blk_count);
657 dev->sdio.xmit_buf_sz = min_t(u32, host_max_cap, MT76S_XMIT_BUF_SZ);
658 dev->sdio.xmit_buf = devm_kmalloc(dev->dev, dev->sdio.xmit_buf_sz,
660 if (!dev->sdio.xmit_buf)
665 EXPORT_SYMBOL_GPL(mt76s_init);
667 MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
668 MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
669 MODULE_LICENSE("Dual BSD/GPL");