1 /* Freescale QUICC Engine HDLC Device Driver
3 * Copyright 2016 Freescale Semiconductor Inc.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/hdlc.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
17 #include <linux/irq.h>
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/netdevice.h>
21 #include <linux/of_address.h>
22 #include <linux/of_irq.h>
23 #include <linux/of_platform.h>
24 #include <linux/platform_device.h>
25 #include <linux/sched.h>
26 #include <linux/skbuff.h>
27 #include <linux/slab.h>
28 #include <linux/spinlock.h>
29 #include <linux/stddef.h>
30 #include <soc/fsl/qe/qe_tdm.h>
31 #include <uapi/linux/if_arp.h>
33 #include "fsl_ucc_hdlc.h"
35 #define DRV_DESC "Freescale QE UCC HDLC Driver"
36 #define DRV_NAME "ucc_hdlc"
38 #define TDM_PPPOHT_SLIC_MAXIN
39 #define BROKEN_FRAME_INFO
41 static struct ucc_tdm_info utdm_primary_info = {
56 .mode = UCC_FAST_PROTOCOL_MODE_HDLC,
57 .ttx_trx = UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL,
58 .tenc = UCC_FAST_TX_ENCODING_NRZ,
59 .renc = UCC_FAST_RX_ENCODING_NRZ,
60 .tcrc = UCC_FAST_16_BIT_CRC,
61 .synl = UCC_FAST_SYNC_LEN_NOT_USED,
65 #ifdef TDM_PPPOHT_SLIC_MAXIN
80 static struct ucc_tdm_info utdm_info[UCC_MAX_NUM];
82 static int uhdlc_init(struct ucc_hdlc_private *priv)
84 struct ucc_tdm_info *ut_info;
85 struct ucc_fast_info *uf_info;
90 dma_addr_t bd_dma_addr;
95 ut_info = priv->ut_info;
96 uf_info = &ut_info->uf_info;
102 uf_info->uccm_mask = ((UCC_HDLC_UCCE_RXB | UCC_HDLC_UCCE_RXF |
103 UCC_HDLC_UCCE_TXB) << 16);
105 ret = ucc_fast_init(uf_info, &priv->uccf);
107 dev_err(priv->dev, "Failed to init uccf.");
111 priv->uf_regs = priv->uccf->uf_regs;
112 ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
115 if (priv->loopback) {
116 dev_info(priv->dev, "Loopback Mode\n");
117 gumr = ioread32be(&priv->uf_regs->gumr);
118 gumr |= (UCC_FAST_GUMR_LOOPBACK | UCC_FAST_GUMR_CDS |
120 gumr &= ~(UCC_FAST_GUMR_CTSP | UCC_FAST_GUMR_RSYN);
121 iowrite32be(gumr, &priv->uf_regs->gumr);
126 ucc_tdm_init(priv->utdm, priv->ut_info);
128 /* Write to QE CECR, UCCx channel to Stop Transmission */
129 cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
130 ret = qe_issue_cmd(QE_STOP_TX, cecr_subblock,
131 QE_CR_PROTOCOL_UNSPECIFIED, 0);
133 /* Set UPSMR normal mode (need fixed)*/
134 iowrite32be(0, &priv->uf_regs->upsmr);
136 priv->rx_ring_size = RX_BD_RING_LEN;
137 priv->tx_ring_size = TX_BD_RING_LEN;
139 priv->rx_bd_base = dma_alloc_coherent(priv->dev,
140 RX_BD_RING_LEN * sizeof(struct qe_bd),
141 &priv->dma_rx_bd, GFP_KERNEL);
143 if (!priv->rx_bd_base) {
144 dev_err(priv->dev, "Cannot allocate MURAM memory for RxBDs\n");
150 priv->tx_bd_base = dma_alloc_coherent(priv->dev,
151 TX_BD_RING_LEN * sizeof(struct qe_bd),
152 &priv->dma_tx_bd, GFP_KERNEL);
154 if (!priv->tx_bd_base) {
155 dev_err(priv->dev, "Cannot allocate MURAM memory for TxBDs\n");
160 /* Alloc parameter ram for ucc hdlc */
161 priv->ucc_pram_offset = qe_muram_alloc(sizeof(struct ucc_hdlc_param),
162 ALIGNMENT_OF_UCC_HDLC_PRAM);
164 if (IS_ERR_VALUE(priv->ucc_pram_offset)) {
165 dev_err(priv->dev, "Can not allocate MURAM for hdlc parameter.\n");
170 priv->rx_skbuff = kzalloc(priv->rx_ring_size * sizeof(*priv->rx_skbuff),
172 if (!priv->rx_skbuff) {
177 priv->tx_skbuff = kzalloc(priv->tx_ring_size * sizeof(*priv->tx_skbuff),
179 if (!priv->tx_skbuff) {
185 priv->skb_dirtytx = 0;
186 priv->curtx_bd = priv->tx_bd_base;
187 priv->dirty_tx = priv->tx_bd_base;
188 priv->currx_bd = priv->rx_bd_base;
189 priv->currx_bdnum = 0;
191 /* init parameter base */
192 cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
193 ret = qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock,
194 QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset);
196 priv->ucc_pram = (struct ucc_hdlc_param __iomem *)
197 qe_muram_addr(priv->ucc_pram_offset);
199 /* Zero out parameter ram */
200 memset_io(priv->ucc_pram, 0, sizeof(struct ucc_hdlc_param));
202 /* Alloc riptr, tiptr */
203 riptr = qe_muram_alloc(32, 32);
204 if (IS_ERR_VALUE(riptr)) {
205 dev_err(priv->dev, "Cannot allocate MURAM mem for Receive internal temp data pointer\n");
210 tiptr = qe_muram_alloc(32, 32);
211 if (IS_ERR_VALUE(tiptr)) {
212 dev_err(priv->dev, "Cannot allocate MURAM mem for Transmit internal temp data pointer\n");
216 if (riptr != (u16)riptr || tiptr != (u16)tiptr) {
217 dev_err(priv->dev, "MURAM allocation out of addressable range\n");
222 /* Set RIPTR, TIPTR */
223 iowrite16be(riptr, &priv->ucc_pram->riptr);
224 iowrite16be(tiptr, &priv->ucc_pram->tiptr);
227 iowrite16be(MAX_RX_BUF_LENGTH, &priv->ucc_pram->mrblr);
229 /* Set RBASE, TBASE */
230 iowrite32be(priv->dma_rx_bd, &priv->ucc_pram->rbase);
231 iowrite32be(priv->dma_tx_bd, &priv->ucc_pram->tbase);
233 /* Set RSTATE, TSTATE */
234 iowrite32be(BMR_GBL | BMR_BIG_ENDIAN, &priv->ucc_pram->rstate);
235 iowrite32be(BMR_GBL | BMR_BIG_ENDIAN, &priv->ucc_pram->tstate);
237 /* Set C_MASK, C_PRES for 16bit CRC */
238 iowrite32be(CRC_16BIT_MASK, &priv->ucc_pram->c_mask);
239 iowrite32be(CRC_16BIT_PRES, &priv->ucc_pram->c_pres);
241 iowrite16be(MAX_FRAME_LENGTH, &priv->ucc_pram->mflr);
242 iowrite16be(DEFAULT_RFTHR, &priv->ucc_pram->rfthr);
243 iowrite16be(DEFAULT_RFTHR, &priv->ucc_pram->rfcnt);
244 iowrite16be(DEFAULT_ADDR_MASK, &priv->ucc_pram->hmask);
245 iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr1);
246 iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr2);
247 iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr3);
248 iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr4);
251 bd_buffer = dma_alloc_coherent(priv->dev,
252 (RX_BD_RING_LEN + TX_BD_RING_LEN) *
254 &bd_dma_addr, GFP_KERNEL);
257 dev_err(priv->dev, "Could not allocate buffer descriptors\n");
262 memset(bd_buffer, 0, (RX_BD_RING_LEN + TX_BD_RING_LEN)
263 * MAX_RX_BUF_LENGTH);
265 priv->rx_buffer = bd_buffer;
266 priv->tx_buffer = bd_buffer + RX_BD_RING_LEN * MAX_RX_BUF_LENGTH;
268 priv->dma_rx_addr = bd_dma_addr;
269 priv->dma_tx_addr = bd_dma_addr + RX_BD_RING_LEN * MAX_RX_BUF_LENGTH;
271 for (i = 0; i < RX_BD_RING_LEN; i++) {
272 if (i < (RX_BD_RING_LEN - 1))
273 bd_status = R_E_S | R_I_S;
275 bd_status = R_E_S | R_I_S | R_W_S;
277 iowrite16be(bd_status, &priv->rx_bd_base[i].status);
278 iowrite32be(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH,
279 &priv->rx_bd_base[i].buf);
282 for (i = 0; i < TX_BD_RING_LEN; i++) {
283 if (i < (TX_BD_RING_LEN - 1))
284 bd_status = T_I_S | T_TC_S;
286 bd_status = T_I_S | T_TC_S | T_W_S;
288 iowrite16be(bd_status, &priv->tx_bd_base[i].status);
289 iowrite32be(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH,
290 &priv->tx_bd_base[i].buf);
296 qe_muram_free(tiptr);
298 qe_muram_free(riptr);
300 kfree(priv->tx_skbuff);
302 kfree(priv->rx_skbuff);
304 qe_muram_free(priv->ucc_pram_offset);
306 dma_free_coherent(priv->dev,
307 TX_BD_RING_LEN * sizeof(struct qe_bd),
308 priv->tx_bd_base, priv->dma_tx_bd);
310 dma_free_coherent(priv->dev,
311 RX_BD_RING_LEN * sizeof(struct qe_bd),
312 priv->rx_bd_base, priv->dma_rx_bd);
314 ucc_fast_free(priv->uccf);
319 static netdev_tx_t ucc_hdlc_tx(struct sk_buff *skb, struct net_device *dev)
321 hdlc_device *hdlc = dev_to_hdlc(dev);
322 struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)hdlc->priv;
323 struct qe_bd __iomem *bd;
332 if (skb_headroom(skb) < HDLC_HEAD_LEN) {
333 dev->stats.tx_dropped++;
335 netdev_err(dev, "No enough space for hdlc head\n");
339 skb_push(skb, HDLC_HEAD_LEN);
341 proto_head = (u16 *)skb->data;
342 *proto_head = htons(DEFAULT_HDLC_HEAD);
344 dev->stats.tx_bytes += skb->len;
348 proto_head = (u16 *)skb->data;
349 if (*proto_head != htons(DEFAULT_PPP_HEAD)) {
350 dev->stats.tx_dropped++;
352 netdev_err(dev, "Wrong ppp header\n");
356 dev->stats.tx_bytes += skb->len;
360 dev->stats.tx_dropped++;
365 pr_info("Tx data skb->len:%d ", skb->len);
366 send_buf = (u8 *)skb->data;
367 pr_info("\nTransmitted data:\n");
368 for (i = 0; i < 16; i++) {
372 pr_info("%02x\n", send_buf[i]);
374 spin_lock_irqsave(&priv->lock, flags);
376 /* Start from the next BD that should be filled */
378 bd_status = ioread16be(&bd->status);
379 /* Save the skb pointer so we can free it later */
380 priv->tx_skbuff[priv->skb_curtx] = skb;
382 /* Update the current skb pointer (wrapping if this was the last) */
384 (priv->skb_curtx + 1) & TX_RING_MOD_MASK(TX_BD_RING_LEN);
386 /* copy skb data to tx buffer for sdma processing */
387 memcpy(priv->tx_buffer + (be32_to_cpu(bd->buf) - priv->dma_tx_addr),
388 skb->data, skb->len);
390 /* set bd status and length */
391 bd_status = (bd_status & T_W_S) | T_R_S | T_I_S | T_L_S | T_TC_S;
393 iowrite16be(bd_status, &bd->status);
394 iowrite16be(skb->len, &bd->length);
396 /* Move to next BD in the ring */
397 if (!(bd_status & T_W_S))
400 bd = priv->tx_bd_base;
402 if (bd == priv->dirty_tx) {
403 if (!netif_queue_stopped(dev))
404 netif_stop_queue(dev);
409 spin_unlock_irqrestore(&priv->lock, flags);
414 static int hdlc_tx_done(struct ucc_hdlc_private *priv)
416 /* Start from the next BD that should be filled */
417 struct net_device *dev = priv->ndev;
418 struct qe_bd *bd; /* BD pointer */
422 bd_status = ioread16be(&bd->status);
424 /* Normal processing. */
425 while ((bd_status & T_R_S) == 0) {
428 /* BD contains already transmitted buffer. */
429 /* Handle the transmitted buffer and release */
430 /* the BD to be used with the current frame */
432 skb = priv->tx_skbuff[priv->skb_dirtytx];
435 pr_info("TxBD: %x\n", bd_status);
436 dev->stats.tx_packets++;
437 memset(priv->tx_buffer +
438 (be32_to_cpu(bd->buf) - priv->dma_tx_addr),
440 dev_kfree_skb_irq(skb);
442 priv->tx_skbuff[priv->skb_dirtytx] = NULL;
445 1) & TX_RING_MOD_MASK(TX_BD_RING_LEN);
447 /* We freed a buffer, so now we can restart transmission */
448 if (netif_queue_stopped(dev))
449 netif_wake_queue(dev);
451 /* Advance the confirmation BD pointer */
452 if (!(bd_status & T_W_S))
455 bd = priv->tx_bd_base;
456 bd_status = ioread16be(&bd->status);
463 static int hdlc_rx_done(struct ucc_hdlc_private *priv, int rx_work_limit)
465 struct net_device *dev = priv->ndev;
466 struct sk_buff *skb = NULL;
467 hdlc_device *hdlc = dev_to_hdlc(dev);
470 u16 length, howmany = 0;
476 bd_status = ioread16be(&bd->status);
478 /* while there are received buffers and BD is full (~R_E) */
479 while (!((bd_status & (R_E_S)) || (--rx_work_limit < 0))) {
480 if (bd_status & R_OV_S)
481 dev->stats.rx_over_errors++;
482 if (bd_status & R_CR_S) {
483 #ifdef BROKEN_FRAME_INFO
484 pr_info("Broken Frame with RxBD: %x\n", bd_status);
486 dev->stats.rx_crc_errors++;
487 dev->stats.rx_dropped++;
490 bdbuffer = priv->rx_buffer +
491 (priv->currx_bdnum * MAX_RX_BUF_LENGTH);
492 length = ioread16be(&bd->length);
494 pr_info("Received data length:%d", length);
495 pr_info("while entry times:%d", entry++);
497 pr_info("\nReceived data:\n");
498 for (i = 0; (i < 16); i++) {
502 pr_info("%02x\n", bdbuffer[i]);
507 bdbuffer += HDLC_HEAD_LEN;
508 length -= (HDLC_HEAD_LEN + HDLC_CRC_SIZE);
510 skb = dev_alloc_skb(length);
512 dev->stats.rx_dropped++;
516 skb_put(skb, length);
519 memcpy(skb->data, bdbuffer, length);
523 length -= HDLC_CRC_SIZE;
525 skb = dev_alloc_skb(length);
527 dev->stats.rx_dropped++;
531 skb_put(skb, length);
534 memcpy(skb->data, bdbuffer, length);
538 dev->stats.rx_packets++;
539 dev->stats.rx_bytes += skb->len;
542 skb->protocol = hdlc_type_trans(skb, dev);
543 pr_info("skb->protocol:%x\n", skb->protocol);
544 netif_receive_skb(skb);
547 iowrite16be(bd_status | R_E_S | R_I_S, &bd->status);
549 /* update to point at the next bd */
550 if (bd_status & R_W_S) {
551 priv->currx_bdnum = 0;
552 bd = priv->rx_bd_base;
554 if (priv->currx_bdnum < (RX_BD_RING_LEN - 1))
555 priv->currx_bdnum += 1;
557 priv->currx_bdnum = RX_BD_RING_LEN - 1;
562 bd_status = ioread16be(&bd->status);
569 static int ucc_hdlc_poll(struct napi_struct *napi, int budget)
571 struct ucc_hdlc_private *priv = container_of(napi,
572 struct ucc_hdlc_private,
576 /* Tx event processing */
577 spin_lock(&priv->lock);
579 spin_unlock(&priv->lock);
582 howmany += hdlc_rx_done(priv, budget - howmany);
584 if (howmany < budget) {
586 qe_setbits32(priv->uccf->p_uccm,
587 (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS) << 16);
593 static irqreturn_t ucc_hdlc_irq_handler(int irq, void *dev_id)
595 struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)dev_id;
596 struct net_device *dev = priv->ndev;
597 struct ucc_fast_private *uccf;
598 struct ucc_tdm_info *ut_info;
602 ut_info = priv->ut_info;
605 ucce = ioread32be(uccf->p_ucce);
606 uccm = ioread32be(uccf->p_uccm);
608 iowrite32be(ucce, uccf->p_ucce);
609 pr_info("irq ucce:%x\n", ucce);
613 if ((ucce >> 16) & (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS)) {
614 if (napi_schedule_prep(&priv->napi)) {
615 uccm &= ~((UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS)
617 iowrite32be(uccm, uccf->p_uccm);
618 __napi_schedule(&priv->napi);
622 /* Errors and other events */
623 if (ucce >> 16 & UCC_HDLC_UCCE_BSY)
624 dev->stats.rx_errors++;
625 if (ucce >> 16 & UCC_HDLC_UCCE_TXE)
626 dev->stats.tx_errors++;
631 static int uhdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
633 const size_t size = sizeof(te1_settings);
635 struct ucc_hdlc_private *priv = netdev_priv(dev);
637 if (cmd != SIOCWANDEV)
638 return hdlc_ioctl(dev, ifr, cmd);
640 switch (ifr->ifr_settings.type) {
642 ifr->ifr_settings.type = IF_IFACE_E1;
643 if (ifr->ifr_settings.size < size) {
644 ifr->ifr_settings.size = size; /* data size wanted */
647 memset(&line, 0, sizeof(line));
648 line.clock_type = priv->clocking;
650 if (copy_to_user(ifr->ifr_settings.ifs_ifsu.sync, &line, size))
655 return hdlc_ioctl(dev, ifr, cmd);
659 static int uhdlc_open(struct net_device *dev)
662 hdlc_device *hdlc = dev_to_hdlc(dev);
663 struct ucc_hdlc_private *priv = hdlc->priv;
664 struct ucc_tdm *utdm = priv->utdm;
666 if (priv->hdlc_busy != 1) {
667 if (request_irq(priv->ut_info->uf_info.irq,
668 ucc_hdlc_irq_handler, 0, "hdlc", priv))
671 cecr_subblock = ucc_fast_get_qe_cr_subblock(
672 priv->ut_info->uf_info.ucc_num);
674 qe_issue_cmd(QE_INIT_TX_RX, cecr_subblock,
675 QE_CR_PROTOCOL_UNSPECIFIED, 0);
677 ucc_fast_enable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
679 /* Enable the TDM port */
681 utdm->si_regs->siglmr1_h |= (0x1 << utdm->tdm_port);
684 netif_device_attach(priv->ndev);
685 napi_enable(&priv->napi);
686 netif_start_queue(dev);
693 static void uhdlc_memclean(struct ucc_hdlc_private *priv)
695 qe_muram_free(priv->ucc_pram->riptr);
696 qe_muram_free(priv->ucc_pram->tiptr);
698 if (priv->rx_bd_base) {
699 dma_free_coherent(priv->dev,
700 RX_BD_RING_LEN * sizeof(struct qe_bd),
701 priv->rx_bd_base, priv->dma_rx_bd);
703 priv->rx_bd_base = NULL;
707 if (priv->tx_bd_base) {
708 dma_free_coherent(priv->dev,
709 TX_BD_RING_LEN * sizeof(struct qe_bd),
710 priv->tx_bd_base, priv->dma_tx_bd);
712 priv->tx_bd_base = NULL;
716 if (priv->ucc_pram) {
717 qe_muram_free(priv->ucc_pram_offset);
718 priv->ucc_pram = NULL;
719 priv->ucc_pram_offset = 0;
722 kfree(priv->rx_skbuff);
723 priv->rx_skbuff = NULL;
725 kfree(priv->tx_skbuff);
726 priv->tx_skbuff = NULL;
729 iounmap(priv->uf_regs);
730 priv->uf_regs = NULL;
734 ucc_fast_free(priv->uccf);
738 if (priv->rx_buffer) {
739 dma_free_coherent(priv->dev,
740 RX_BD_RING_LEN * MAX_RX_BUF_LENGTH,
741 priv->rx_buffer, priv->dma_rx_addr);
742 priv->rx_buffer = NULL;
743 priv->dma_rx_addr = 0;
746 if (priv->tx_buffer) {
747 dma_free_coherent(priv->dev,
748 TX_BD_RING_LEN * MAX_RX_BUF_LENGTH,
749 priv->tx_buffer, priv->dma_tx_addr);
750 priv->tx_buffer = NULL;
751 priv->dma_tx_addr = 0;
755 static int uhdlc_close(struct net_device *dev)
757 struct ucc_hdlc_private *priv = dev_to_hdlc(dev)->priv;
758 struct ucc_tdm *utdm = priv->utdm;
761 napi_disable(&priv->napi);
762 cecr_subblock = ucc_fast_get_qe_cr_subblock(
763 priv->ut_info->uf_info.ucc_num);
765 qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock,
766 (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
767 qe_issue_cmd(QE_CLOSE_RX_BD, cecr_subblock,
768 (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
771 utdm->si_regs->siglmr1_h &= ~(0x1 << utdm->tdm_port);
773 ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
775 free_irq(priv->ut_info->uf_info.irq, priv);
776 netif_stop_queue(dev);
782 static int ucc_hdlc_attach(struct net_device *dev, unsigned short encoding,
783 unsigned short parity)
785 struct ucc_hdlc_private *priv = dev_to_hdlc(dev)->priv;
787 if (encoding != ENCODING_NRZ &&
788 encoding != ENCODING_NRZI)
791 if (parity != PARITY_NONE &&
792 parity != PARITY_CRC32_PR1_CCITT &&
793 parity != PARITY_CRC16_PR1_CCITT)
796 priv->encoding = encoding;
797 priv->parity = parity;
803 static void store_clk_config(struct ucc_hdlc_private *priv)
805 struct qe_mux *qe_mux_reg = &qe_immr->qmx;
808 priv->cmxsi1cr_h = ioread32be(&qe_mux_reg->cmxsi1cr_h);
809 priv->cmxsi1cr_l = ioread32be(&qe_mux_reg->cmxsi1cr_l);
812 priv->cmxsi1syr = ioread32be(&qe_mux_reg->cmxsi1syr);
815 memcpy_fromio(priv->cmxucr, qe_mux_reg->cmxucr, 4 * sizeof(u32));
818 static void resume_clk_config(struct ucc_hdlc_private *priv)
820 struct qe_mux *qe_mux_reg = &qe_immr->qmx;
822 memcpy_toio(qe_mux_reg->cmxucr, priv->cmxucr, 4 * sizeof(u32));
824 iowrite32be(priv->cmxsi1cr_h, &qe_mux_reg->cmxsi1cr_h);
825 iowrite32be(priv->cmxsi1cr_l, &qe_mux_reg->cmxsi1cr_l);
827 iowrite32be(priv->cmxsi1syr, &qe_mux_reg->cmxsi1syr);
830 static int uhdlc_suspend(struct device *dev)
832 struct ucc_hdlc_private *priv = dev_get_drvdata(dev);
833 struct ucc_tdm_info *ut_info;
834 struct ucc_fast __iomem *uf_regs;
839 if (!netif_running(priv->ndev))
842 netif_device_detach(priv->ndev);
843 napi_disable(&priv->napi);
845 ut_info = priv->ut_info;
846 uf_regs = priv->uf_regs;
848 /* backup gumr guemr*/
849 priv->gumr = ioread32be(&uf_regs->gumr);
850 priv->guemr = ioread8(&uf_regs->guemr);
852 priv->ucc_pram_bak = kmalloc(sizeof(*priv->ucc_pram_bak),
854 if (!priv->ucc_pram_bak)
857 /* backup HDLC parameter */
858 memcpy_fromio(priv->ucc_pram_bak, priv->ucc_pram,
859 sizeof(struct ucc_hdlc_param));
861 /* store the clk configuration */
862 store_clk_config(priv);
865 ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
867 dev_dbg(dev, "ucc hdlc suspend\n");
871 static int uhdlc_resume(struct device *dev)
873 struct ucc_hdlc_private *priv = dev_get_drvdata(dev);
874 struct ucc_tdm *utdm;
875 struct ucc_tdm_info *ut_info;
876 struct ucc_fast __iomem *uf_regs;
877 struct ucc_fast_private *uccf;
878 struct ucc_fast_info *uf_info;
886 if (!netif_running(priv->ndev))
890 ut_info = priv->ut_info;
891 uf_info = &ut_info->uf_info;
892 uf_regs = priv->uf_regs;
895 /* restore gumr guemr */
896 iowrite8(priv->guemr, &uf_regs->guemr);
897 iowrite32be(priv->gumr, &uf_regs->gumr);
899 /* Set Virtual Fifo registers */
900 iowrite16be(uf_info->urfs, &uf_regs->urfs);
901 iowrite16be(uf_info->urfet, &uf_regs->urfet);
902 iowrite16be(uf_info->urfset, &uf_regs->urfset);
903 iowrite16be(uf_info->utfs, &uf_regs->utfs);
904 iowrite16be(uf_info->utfet, &uf_regs->utfet);
905 iowrite16be(uf_info->utftt, &uf_regs->utftt);
906 /* utfb, urfb are offsets from MURAM base */
907 iowrite32be(uccf->ucc_fast_tx_virtual_fifo_base_offset, &uf_regs->utfb);
908 iowrite32be(uccf->ucc_fast_rx_virtual_fifo_base_offset, &uf_regs->urfb);
910 /* Rx Tx and sync clock routing */
911 resume_clk_config(priv);
913 iowrite32be(uf_info->uccm_mask, &uf_regs->uccm);
914 iowrite32be(0xffffffff, &uf_regs->ucce);
916 ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
920 ucc_tdm_init(priv->utdm, priv->ut_info);
922 /* Write to QE CECR, UCCx channel to Stop Transmission */
923 cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
924 ret = qe_issue_cmd(QE_STOP_TX, cecr_subblock,
925 (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
927 /* Set UPSMR normal mode */
928 iowrite32be(0, &uf_regs->upsmr);
930 /* init parameter base */
931 cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
932 ret = qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock,
933 QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset);
935 priv->ucc_pram = (struct ucc_hdlc_param __iomem *)
936 qe_muram_addr(priv->ucc_pram_offset);
938 /* restore ucc parameter */
939 memcpy_toio(priv->ucc_pram, priv->ucc_pram_bak,
940 sizeof(struct ucc_hdlc_param));
941 kfree(priv->ucc_pram_bak);
943 /* rebuild BD entry */
944 for (i = 0; i < RX_BD_RING_LEN; i++) {
945 if (i < (RX_BD_RING_LEN - 1))
946 bd_status = R_E_S | R_I_S;
948 bd_status = R_E_S | R_I_S | R_W_S;
950 iowrite16be(bd_status, &priv->rx_bd_base[i].status);
951 iowrite32be(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH,
952 &priv->rx_bd_base[i].buf);
955 for (i = 0; i < TX_BD_RING_LEN; i++) {
956 if (i < (TX_BD_RING_LEN - 1))
957 bd_status = T_I_S | T_TC_S;
959 bd_status = T_I_S | T_TC_S | T_W_S;
961 iowrite16be(bd_status, &priv->tx_bd_base[i].status);
962 iowrite32be(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH,
963 &priv->tx_bd_base[i].buf);
966 /* if hdlc is busy enable TX and RX */
967 if (priv->hdlc_busy == 1) {
968 cecr_subblock = ucc_fast_get_qe_cr_subblock(
969 priv->ut_info->uf_info.ucc_num);
971 qe_issue_cmd(QE_INIT_TX_RX, cecr_subblock,
972 (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
974 ucc_fast_enable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
976 /* Enable the TDM port */
978 utdm->si_regs->siglmr1_h |= (0x1 << utdm->tdm_port);
981 napi_enable(&priv->napi);
982 netif_device_attach(priv->ndev);
987 static const struct dev_pm_ops uhdlc_pm_ops = {
988 .suspend = uhdlc_suspend,
989 .resume = uhdlc_resume,
990 .freeze = uhdlc_suspend,
991 .thaw = uhdlc_resume,
994 #define HDLC_PM_OPS (&uhdlc_pm_ops)
998 #define HDLC_PM_OPS NULL
1001 static const struct net_device_ops uhdlc_ops = {
1002 .ndo_open = uhdlc_open,
1003 .ndo_stop = uhdlc_close,
1004 .ndo_change_mtu = hdlc_change_mtu,
1005 .ndo_start_xmit = hdlc_start_xmit,
1006 .ndo_do_ioctl = uhdlc_ioctl,
1009 static int ucc_hdlc_probe(struct platform_device *pdev)
1011 struct device_node *np = pdev->dev.of_node;
1012 struct ucc_hdlc_private *uhdlc_priv = NULL;
1013 struct ucc_tdm_info *ut_info;
1014 struct ucc_tdm *utdm = NULL;
1015 struct resource res;
1016 struct net_device *dev;
1023 ret = of_property_read_u32_index(np, "cell-index", 0, &val);
1025 dev_err(&pdev->dev, "Invalid ucc property\n");
1030 if ((ucc_num > 3) || (ucc_num < 0)) {
1031 dev_err(&pdev->dev, ": Invalid UCC num\n");
1035 memcpy(&utdm_info[ucc_num], &utdm_primary_info,
1036 sizeof(utdm_primary_info));
1038 ut_info = &utdm_info[ucc_num];
1039 ut_info->uf_info.ucc_num = ucc_num;
1041 sprop = of_get_property(np, "rx-clock-name", NULL);
1043 ut_info->uf_info.rx_clock = qe_clock_source(sprop);
1044 if ((ut_info->uf_info.rx_clock < QE_CLK_NONE) ||
1045 (ut_info->uf_info.rx_clock > QE_CLK24)) {
1046 dev_err(&pdev->dev, "Invalid rx-clock-name property\n");
1050 dev_err(&pdev->dev, "Invalid rx-clock-name property\n");
1054 sprop = of_get_property(np, "tx-clock-name", NULL);
1056 ut_info->uf_info.tx_clock = qe_clock_source(sprop);
1057 if ((ut_info->uf_info.tx_clock < QE_CLK_NONE) ||
1058 (ut_info->uf_info.tx_clock > QE_CLK24)) {
1059 dev_err(&pdev->dev, "Invalid tx-clock-name property\n");
1063 dev_err(&pdev->dev, "Invalid tx-clock-name property\n");
1067 /* use the same clock when work in loopback */
1068 if (ut_info->uf_info.rx_clock == ut_info->uf_info.tx_clock)
1069 qe_setbrg(ut_info->uf_info.rx_clock, 20000000, 1);
1071 ret = of_address_to_resource(np, 0, &res);
1075 ut_info->uf_info.regs = res.start;
1076 ut_info->uf_info.irq = irq_of_parse_and_map(np, 0);
1078 uhdlc_priv = kzalloc(sizeof(*uhdlc_priv), GFP_KERNEL);
1083 dev_set_drvdata(&pdev->dev, uhdlc_priv);
1084 uhdlc_priv->dev = &pdev->dev;
1085 uhdlc_priv->ut_info = ut_info;
1087 if (of_get_property(np, "fsl,tdm-interface", NULL))
1088 uhdlc_priv->tsa = 1;
1090 if (of_get_property(np, "fsl,ucc-internal-loopback", NULL))
1091 uhdlc_priv->loopback = 1;
1093 if (uhdlc_priv->tsa == 1) {
1094 utdm = kzalloc(sizeof(*utdm), GFP_KERNEL);
1097 dev_err(&pdev->dev, "No mem to alloc ucc tdm data\n");
1098 goto free_uhdlc_priv;
1100 uhdlc_priv->utdm = utdm;
1101 ret = ucc_of_parse_tdm(np, utdm, ut_info);
1106 ret = uhdlc_init(uhdlc_priv);
1108 dev_err(&pdev->dev, "Failed to init uhdlc\n");
1112 dev = alloc_hdlcdev(uhdlc_priv);
1115 pr_err("ucc_hdlc: unable to allocate memory\n");
1116 goto undo_uhdlc_init;
1119 uhdlc_priv->ndev = dev;
1120 hdlc = dev_to_hdlc(dev);
1121 dev->tx_queue_len = 16;
1122 dev->netdev_ops = &uhdlc_ops;
1123 hdlc->attach = ucc_hdlc_attach;
1124 hdlc->xmit = ucc_hdlc_tx;
1125 netif_napi_add(dev, &uhdlc_priv->napi, ucc_hdlc_poll, 32);
1126 if (register_hdlc_device(dev)) {
1128 pr_err("ucc_hdlc: unable to register hdlc device\n");
1138 if (uhdlc_priv->tsa)
1145 static int ucc_hdlc_remove(struct platform_device *pdev)
1147 struct ucc_hdlc_private *priv = dev_get_drvdata(&pdev->dev);
1149 uhdlc_memclean(priv);
1151 if (priv->utdm->si_regs) {
1152 iounmap(priv->utdm->si_regs);
1153 priv->utdm->si_regs = NULL;
1156 if (priv->utdm->siram) {
1157 iounmap(priv->utdm->siram);
1158 priv->utdm->siram = NULL;
1162 dev_info(&pdev->dev, "UCC based hdlc module removed\n");
1167 static const struct of_device_id fsl_ucc_hdlc_of_match[] = {
1169 .compatible = "fsl,ucc-hdlc",
1174 MODULE_DEVICE_TABLE(of, fsl_ucc_hdlc_of_match);
1176 static struct platform_driver ucc_hdlc_driver = {
1177 .probe = ucc_hdlc_probe,
1178 .remove = ucc_hdlc_remove,
1182 .of_match_table = fsl_ucc_hdlc_of_match,
1186 module_platform_driver(ucc_hdlc_driver);