1 /* Freescale QUICC Engine HDLC Device Driver
3 * Copyright 2016 Freescale Semiconductor Inc.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/hdlc.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
17 #include <linux/irq.h>
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/netdevice.h>
21 #include <linux/of_address.h>
22 #include <linux/of_irq.h>
23 #include <linux/of_platform.h>
24 #include <linux/platform_device.h>
25 #include <linux/sched.h>
26 #include <linux/skbuff.h>
27 #include <linux/slab.h>
28 #include <linux/spinlock.h>
29 #include <linux/stddef.h>
30 #include <soc/fsl/qe/qe_tdm.h>
31 #include <uapi/linux/if_arp.h>
33 #include "fsl_ucc_hdlc.h"
35 #define DRV_DESC "Freescale QE UCC HDLC Driver"
36 #define DRV_NAME "ucc_hdlc"
38 #define TDM_PPPOHT_SLIC_MAXIN
40 static struct ucc_tdm_info utdm_primary_info = {
55 .mode = UCC_FAST_PROTOCOL_MODE_HDLC,
56 .ttx_trx = UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL,
57 .tenc = UCC_FAST_TX_ENCODING_NRZ,
58 .renc = UCC_FAST_RX_ENCODING_NRZ,
59 .tcrc = UCC_FAST_16_BIT_CRC,
60 .synl = UCC_FAST_SYNC_LEN_NOT_USED,
64 #ifdef TDM_PPPOHT_SLIC_MAXIN
79 static struct ucc_tdm_info utdm_info[UCC_MAX_NUM];
81 static int uhdlc_init(struct ucc_hdlc_private *priv)
83 struct ucc_tdm_info *ut_info;
84 struct ucc_fast_info *uf_info;
89 dma_addr_t bd_dma_addr;
94 ut_info = priv->ut_info;
95 uf_info = &ut_info->uf_info;
102 /* This sets HPM register in CMXUCR register which configures a
103 * open drain connected HDLC bus
106 uf_info->brkpt_support = 1;
108 uf_info->uccm_mask = ((UCC_HDLC_UCCE_RXB | UCC_HDLC_UCCE_RXF |
109 UCC_HDLC_UCCE_TXB) << 16);
111 ret = ucc_fast_init(uf_info, &priv->uccf);
113 dev_err(priv->dev, "Failed to init uccf.");
117 priv->uf_regs = priv->uccf->uf_regs;
118 ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
121 if (priv->loopback) {
122 dev_info(priv->dev, "Loopback Mode\n");
123 /* use the same clock when work in loopback */
124 qe_setbrg(ut_info->uf_info.rx_clock, 20000000, 1);
126 gumr = ioread32be(&priv->uf_regs->gumr);
127 gumr |= (UCC_FAST_GUMR_LOOPBACK | UCC_FAST_GUMR_CDS |
129 gumr &= ~(UCC_FAST_GUMR_CTSP | UCC_FAST_GUMR_RSYN);
130 iowrite32be(gumr, &priv->uf_regs->gumr);
135 ucc_tdm_init(priv->utdm, priv->ut_info);
137 /* Write to QE CECR, UCCx channel to Stop Transmission */
138 cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
139 ret = qe_issue_cmd(QE_STOP_TX, cecr_subblock,
140 QE_CR_PROTOCOL_UNSPECIFIED, 0);
142 /* Set UPSMR normal mode (need fixed)*/
143 iowrite32be(0, &priv->uf_regs->upsmr);
146 if (priv->hdlc_bus) {
149 dev_info(priv->dev, "HDLC bus Mode\n");
150 upsmr = ioread32be(&priv->uf_regs->upsmr);
152 /* bus mode and retransmit enable, with collision window
155 upsmr |= UCC_HDLC_UPSMR_RTE | UCC_HDLC_UPSMR_BUS |
157 iowrite32be(upsmr, &priv->uf_regs->upsmr);
159 /* explicitly disable CDS & CTSP */
160 gumr = ioread32be(&priv->uf_regs->gumr);
161 gumr &= ~(UCC_FAST_GUMR_CDS | UCC_FAST_GUMR_CTSP);
162 /* set automatic sync to explicitly ignore CD signal */
163 gumr |= UCC_FAST_GUMR_SYNL_AUTO;
164 iowrite32be(gumr, &priv->uf_regs->gumr);
167 priv->rx_ring_size = RX_BD_RING_LEN;
168 priv->tx_ring_size = TX_BD_RING_LEN;
170 priv->rx_bd_base = dma_alloc_coherent(priv->dev,
171 RX_BD_RING_LEN * sizeof(struct qe_bd),
172 &priv->dma_rx_bd, GFP_KERNEL);
174 if (!priv->rx_bd_base) {
175 dev_err(priv->dev, "Cannot allocate MURAM memory for RxBDs\n");
181 priv->tx_bd_base = dma_alloc_coherent(priv->dev,
182 TX_BD_RING_LEN * sizeof(struct qe_bd),
183 &priv->dma_tx_bd, GFP_KERNEL);
185 if (!priv->tx_bd_base) {
186 dev_err(priv->dev, "Cannot allocate MURAM memory for TxBDs\n");
191 /* Alloc parameter ram for ucc hdlc */
192 priv->ucc_pram_offset = qe_muram_alloc(sizeof(struct ucc_hdlc_param),
193 ALIGNMENT_OF_UCC_HDLC_PRAM);
195 if (IS_ERR_VALUE(priv->ucc_pram_offset)) {
196 dev_err(priv->dev, "Can not allocate MURAM for hdlc parameter.\n");
201 priv->rx_skbuff = kcalloc(priv->rx_ring_size,
202 sizeof(*priv->rx_skbuff),
204 if (!priv->rx_skbuff) {
209 priv->tx_skbuff = kcalloc(priv->tx_ring_size,
210 sizeof(*priv->tx_skbuff),
212 if (!priv->tx_skbuff) {
218 priv->skb_dirtytx = 0;
219 priv->curtx_bd = priv->tx_bd_base;
220 priv->dirty_tx = priv->tx_bd_base;
221 priv->currx_bd = priv->rx_bd_base;
222 priv->currx_bdnum = 0;
224 /* init parameter base */
225 cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
226 ret = qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock,
227 QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset);
229 priv->ucc_pram = (struct ucc_hdlc_param __iomem *)
230 qe_muram_addr(priv->ucc_pram_offset);
232 /* Zero out parameter ram */
233 memset_io(priv->ucc_pram, 0, sizeof(struct ucc_hdlc_param));
235 /* Alloc riptr, tiptr */
236 riptr = qe_muram_alloc(32, 32);
237 if (IS_ERR_VALUE(riptr)) {
238 dev_err(priv->dev, "Cannot allocate MURAM mem for Receive internal temp data pointer\n");
243 tiptr = qe_muram_alloc(32, 32);
244 if (IS_ERR_VALUE(tiptr)) {
245 dev_err(priv->dev, "Cannot allocate MURAM mem for Transmit internal temp data pointer\n");
249 if (riptr != (u16)riptr || tiptr != (u16)tiptr) {
250 dev_err(priv->dev, "MURAM allocation out of addressable range\n");
255 /* Set RIPTR, TIPTR */
256 iowrite16be(riptr, &priv->ucc_pram->riptr);
257 iowrite16be(tiptr, &priv->ucc_pram->tiptr);
260 iowrite16be(MAX_RX_BUF_LENGTH, &priv->ucc_pram->mrblr);
262 /* Set RBASE, TBASE */
263 iowrite32be(priv->dma_rx_bd, &priv->ucc_pram->rbase);
264 iowrite32be(priv->dma_tx_bd, &priv->ucc_pram->tbase);
266 /* Set RSTATE, TSTATE */
267 iowrite32be(BMR_GBL | BMR_BIG_ENDIAN, &priv->ucc_pram->rstate);
268 iowrite32be(BMR_GBL | BMR_BIG_ENDIAN, &priv->ucc_pram->tstate);
270 /* Set C_MASK, C_PRES for 16bit CRC */
271 iowrite32be(CRC_16BIT_MASK, &priv->ucc_pram->c_mask);
272 iowrite32be(CRC_16BIT_PRES, &priv->ucc_pram->c_pres);
274 iowrite16be(MAX_FRAME_LENGTH, &priv->ucc_pram->mflr);
275 iowrite16be(DEFAULT_RFTHR, &priv->ucc_pram->rfthr);
276 iowrite16be(DEFAULT_RFTHR, &priv->ucc_pram->rfcnt);
277 iowrite16be(DEFAULT_ADDR_MASK, &priv->ucc_pram->hmask);
278 iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr1);
279 iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr2);
280 iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr3);
281 iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr4);
284 bd_buffer = dma_zalloc_coherent(priv->dev,
285 (RX_BD_RING_LEN + TX_BD_RING_LEN) *
287 &bd_dma_addr, GFP_KERNEL);
290 dev_err(priv->dev, "Could not allocate buffer descriptors\n");
295 priv->rx_buffer = bd_buffer;
296 priv->tx_buffer = bd_buffer + RX_BD_RING_LEN * MAX_RX_BUF_LENGTH;
298 priv->dma_rx_addr = bd_dma_addr;
299 priv->dma_tx_addr = bd_dma_addr + RX_BD_RING_LEN * MAX_RX_BUF_LENGTH;
301 for (i = 0; i < RX_BD_RING_LEN; i++) {
302 if (i < (RX_BD_RING_LEN - 1))
303 bd_status = R_E_S | R_I_S;
305 bd_status = R_E_S | R_I_S | R_W_S;
307 iowrite16be(bd_status, &priv->rx_bd_base[i].status);
308 iowrite32be(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH,
309 &priv->rx_bd_base[i].buf);
312 for (i = 0; i < TX_BD_RING_LEN; i++) {
313 if (i < (TX_BD_RING_LEN - 1))
314 bd_status = T_I_S | T_TC_S;
316 bd_status = T_I_S | T_TC_S | T_W_S;
318 iowrite16be(bd_status, &priv->tx_bd_base[i].status);
319 iowrite32be(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH,
320 &priv->tx_bd_base[i].buf);
326 qe_muram_free(tiptr);
328 qe_muram_free(riptr);
330 kfree(priv->tx_skbuff);
332 kfree(priv->rx_skbuff);
334 qe_muram_free(priv->ucc_pram_offset);
336 dma_free_coherent(priv->dev,
337 TX_BD_RING_LEN * sizeof(struct qe_bd),
338 priv->tx_bd_base, priv->dma_tx_bd);
340 dma_free_coherent(priv->dev,
341 RX_BD_RING_LEN * sizeof(struct qe_bd),
342 priv->rx_bd_base, priv->dma_rx_bd);
344 ucc_fast_free(priv->uccf);
349 static netdev_tx_t ucc_hdlc_tx(struct sk_buff *skb, struct net_device *dev)
351 hdlc_device *hdlc = dev_to_hdlc(dev);
352 struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)hdlc->priv;
353 struct qe_bd __iomem *bd;
360 if (skb_headroom(skb) < HDLC_HEAD_LEN) {
361 dev->stats.tx_dropped++;
363 netdev_err(dev, "No enough space for hdlc head\n");
367 skb_push(skb, HDLC_HEAD_LEN);
369 proto_head = (u16 *)skb->data;
370 *proto_head = htons(DEFAULT_HDLC_HEAD);
372 dev->stats.tx_bytes += skb->len;
376 proto_head = (u16 *)skb->data;
377 if (*proto_head != htons(DEFAULT_PPP_HEAD)) {
378 dev->stats.tx_dropped++;
380 netdev_err(dev, "Wrong ppp header\n");
384 dev->stats.tx_bytes += skb->len;
388 dev->stats.tx_dropped++;
392 spin_lock_irqsave(&priv->lock, flags);
394 /* Start from the next BD that should be filled */
396 bd_status = ioread16be(&bd->status);
397 /* Save the skb pointer so we can free it later */
398 priv->tx_skbuff[priv->skb_curtx] = skb;
400 /* Update the current skb pointer (wrapping if this was the last) */
402 (priv->skb_curtx + 1) & TX_RING_MOD_MASK(TX_BD_RING_LEN);
404 /* copy skb data to tx buffer for sdma processing */
405 memcpy(priv->tx_buffer + (be32_to_cpu(bd->buf) - priv->dma_tx_addr),
406 skb->data, skb->len);
408 /* set bd status and length */
409 bd_status = (bd_status & T_W_S) | T_R_S | T_I_S | T_L_S | T_TC_S;
411 iowrite16be(skb->len, &bd->length);
412 iowrite16be(bd_status, &bd->status);
414 /* Move to next BD in the ring */
415 if (!(bd_status & T_W_S))
418 bd = priv->tx_bd_base;
420 if (bd == priv->dirty_tx) {
421 if (!netif_queue_stopped(dev))
422 netif_stop_queue(dev);
427 spin_unlock_irqrestore(&priv->lock, flags);
432 static int hdlc_tx_done(struct ucc_hdlc_private *priv)
434 /* Start from the next BD that should be filled */
435 struct net_device *dev = priv->ndev;
436 struct qe_bd *bd; /* BD pointer */
440 bd_status = ioread16be(&bd->status);
442 /* Normal processing. */
443 while ((bd_status & T_R_S) == 0) {
446 /* BD contains already transmitted buffer. */
447 /* Handle the transmitted buffer and release */
448 /* the BD to be used with the current frame */
450 skb = priv->tx_skbuff[priv->skb_dirtytx];
453 dev->stats.tx_packets++;
454 memset(priv->tx_buffer +
455 (be32_to_cpu(bd->buf) - priv->dma_tx_addr),
457 dev_kfree_skb_irq(skb);
459 priv->tx_skbuff[priv->skb_dirtytx] = NULL;
462 1) & TX_RING_MOD_MASK(TX_BD_RING_LEN);
464 /* We freed a buffer, so now we can restart transmission */
465 if (netif_queue_stopped(dev))
466 netif_wake_queue(dev);
468 /* Advance the confirmation BD pointer */
469 if (!(bd_status & T_W_S))
472 bd = priv->tx_bd_base;
473 bd_status = ioread16be(&bd->status);
480 static int hdlc_rx_done(struct ucc_hdlc_private *priv, int rx_work_limit)
482 struct net_device *dev = priv->ndev;
483 struct sk_buff *skb = NULL;
484 hdlc_device *hdlc = dev_to_hdlc(dev);
487 u16 length, howmany = 0;
491 bd_status = ioread16be(&bd->status);
493 /* while there are received buffers and BD is full (~R_E) */
494 while (!((bd_status & (R_E_S)) || (--rx_work_limit < 0))) {
495 if (bd_status & R_OV_S)
496 dev->stats.rx_over_errors++;
497 if (bd_status & R_CR_S) {
498 dev->stats.rx_crc_errors++;
499 dev->stats.rx_dropped++;
502 bdbuffer = priv->rx_buffer +
503 (priv->currx_bdnum * MAX_RX_BUF_LENGTH);
504 length = ioread16be(&bd->length);
508 bdbuffer += HDLC_HEAD_LEN;
509 length -= (HDLC_HEAD_LEN + HDLC_CRC_SIZE);
511 skb = dev_alloc_skb(length);
513 dev->stats.rx_dropped++;
517 skb_put(skb, length);
520 memcpy(skb->data, bdbuffer, length);
524 length -= HDLC_CRC_SIZE;
526 skb = dev_alloc_skb(length);
528 dev->stats.rx_dropped++;
532 skb_put(skb, length);
535 memcpy(skb->data, bdbuffer, length);
539 dev->stats.rx_packets++;
540 dev->stats.rx_bytes += skb->len;
543 skb->protocol = hdlc_type_trans(skb, dev);
544 netif_receive_skb(skb);
547 iowrite16be(bd_status | R_E_S | R_I_S, &bd->status);
549 /* update to point at the next bd */
550 if (bd_status & R_W_S) {
551 priv->currx_bdnum = 0;
552 bd = priv->rx_bd_base;
554 if (priv->currx_bdnum < (RX_BD_RING_LEN - 1))
555 priv->currx_bdnum += 1;
557 priv->currx_bdnum = RX_BD_RING_LEN - 1;
562 bd_status = ioread16be(&bd->status);
569 static int ucc_hdlc_poll(struct napi_struct *napi, int budget)
571 struct ucc_hdlc_private *priv = container_of(napi,
572 struct ucc_hdlc_private,
576 /* Tx event processing */
577 spin_lock(&priv->lock);
579 spin_unlock(&priv->lock);
582 howmany += hdlc_rx_done(priv, budget - howmany);
584 if (howmany < budget) {
585 napi_complete_done(napi, howmany);
586 qe_setbits32(priv->uccf->p_uccm,
587 (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS) << 16);
593 static irqreturn_t ucc_hdlc_irq_handler(int irq, void *dev_id)
595 struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)dev_id;
596 struct net_device *dev = priv->ndev;
597 struct ucc_fast_private *uccf;
598 struct ucc_tdm_info *ut_info;
602 ut_info = priv->ut_info;
605 ucce = ioread32be(uccf->p_ucce);
606 uccm = ioread32be(uccf->p_uccm);
608 iowrite32be(ucce, uccf->p_ucce);
612 if ((ucce >> 16) & (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS)) {
613 if (napi_schedule_prep(&priv->napi)) {
614 uccm &= ~((UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS)
616 iowrite32be(uccm, uccf->p_uccm);
617 __napi_schedule(&priv->napi);
621 /* Errors and other events */
622 if (ucce >> 16 & UCC_HDLC_UCCE_BSY)
623 dev->stats.rx_errors++;
624 if (ucce >> 16 & UCC_HDLC_UCCE_TXE)
625 dev->stats.tx_errors++;
630 static int uhdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
632 const size_t size = sizeof(te1_settings);
634 struct ucc_hdlc_private *priv = netdev_priv(dev);
636 if (cmd != SIOCWANDEV)
637 return hdlc_ioctl(dev, ifr, cmd);
639 switch (ifr->ifr_settings.type) {
641 ifr->ifr_settings.type = IF_IFACE_E1;
642 if (ifr->ifr_settings.size < size) {
643 ifr->ifr_settings.size = size; /* data size wanted */
646 memset(&line, 0, sizeof(line));
647 line.clock_type = priv->clocking;
649 if (copy_to_user(ifr->ifr_settings.ifs_ifsu.sync, &line, size))
654 return hdlc_ioctl(dev, ifr, cmd);
658 static int uhdlc_open(struct net_device *dev)
661 hdlc_device *hdlc = dev_to_hdlc(dev);
662 struct ucc_hdlc_private *priv = hdlc->priv;
663 struct ucc_tdm *utdm = priv->utdm;
665 if (priv->hdlc_busy != 1) {
666 if (request_irq(priv->ut_info->uf_info.irq,
667 ucc_hdlc_irq_handler, 0, "hdlc", priv))
670 cecr_subblock = ucc_fast_get_qe_cr_subblock(
671 priv->ut_info->uf_info.ucc_num);
673 qe_issue_cmd(QE_INIT_TX_RX, cecr_subblock,
674 QE_CR_PROTOCOL_UNSPECIFIED, 0);
676 ucc_fast_enable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
678 /* Enable the TDM port */
680 utdm->si_regs->siglmr1_h |= (0x1 << utdm->tdm_port);
683 netif_device_attach(priv->ndev);
684 napi_enable(&priv->napi);
685 netif_start_queue(dev);
692 static void uhdlc_memclean(struct ucc_hdlc_private *priv)
694 qe_muram_free(priv->ucc_pram->riptr);
695 qe_muram_free(priv->ucc_pram->tiptr);
697 if (priv->rx_bd_base) {
698 dma_free_coherent(priv->dev,
699 RX_BD_RING_LEN * sizeof(struct qe_bd),
700 priv->rx_bd_base, priv->dma_rx_bd);
702 priv->rx_bd_base = NULL;
706 if (priv->tx_bd_base) {
707 dma_free_coherent(priv->dev,
708 TX_BD_RING_LEN * sizeof(struct qe_bd),
709 priv->tx_bd_base, priv->dma_tx_bd);
711 priv->tx_bd_base = NULL;
715 if (priv->ucc_pram) {
716 qe_muram_free(priv->ucc_pram_offset);
717 priv->ucc_pram = NULL;
718 priv->ucc_pram_offset = 0;
721 kfree(priv->rx_skbuff);
722 priv->rx_skbuff = NULL;
724 kfree(priv->tx_skbuff);
725 priv->tx_skbuff = NULL;
728 iounmap(priv->uf_regs);
729 priv->uf_regs = NULL;
733 ucc_fast_free(priv->uccf);
737 if (priv->rx_buffer) {
738 dma_free_coherent(priv->dev,
739 RX_BD_RING_LEN * MAX_RX_BUF_LENGTH,
740 priv->rx_buffer, priv->dma_rx_addr);
741 priv->rx_buffer = NULL;
742 priv->dma_rx_addr = 0;
745 if (priv->tx_buffer) {
746 dma_free_coherent(priv->dev,
747 TX_BD_RING_LEN * MAX_RX_BUF_LENGTH,
748 priv->tx_buffer, priv->dma_tx_addr);
749 priv->tx_buffer = NULL;
750 priv->dma_tx_addr = 0;
754 static int uhdlc_close(struct net_device *dev)
756 struct ucc_hdlc_private *priv = dev_to_hdlc(dev)->priv;
757 struct ucc_tdm *utdm = priv->utdm;
760 napi_disable(&priv->napi);
761 cecr_subblock = ucc_fast_get_qe_cr_subblock(
762 priv->ut_info->uf_info.ucc_num);
764 qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock,
765 (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
766 qe_issue_cmd(QE_CLOSE_RX_BD, cecr_subblock,
767 (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
770 utdm->si_regs->siglmr1_h &= ~(0x1 << utdm->tdm_port);
772 ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
774 free_irq(priv->ut_info->uf_info.irq, priv);
775 netif_stop_queue(dev);
781 static int ucc_hdlc_attach(struct net_device *dev, unsigned short encoding,
782 unsigned short parity)
784 struct ucc_hdlc_private *priv = dev_to_hdlc(dev)->priv;
786 if (encoding != ENCODING_NRZ &&
787 encoding != ENCODING_NRZI)
790 if (parity != PARITY_NONE &&
791 parity != PARITY_CRC32_PR1_CCITT &&
792 parity != PARITY_CRC16_PR1_CCITT)
795 priv->encoding = encoding;
796 priv->parity = parity;
802 static void store_clk_config(struct ucc_hdlc_private *priv)
804 struct qe_mux *qe_mux_reg = &qe_immr->qmx;
807 priv->cmxsi1cr_h = ioread32be(&qe_mux_reg->cmxsi1cr_h);
808 priv->cmxsi1cr_l = ioread32be(&qe_mux_reg->cmxsi1cr_l);
811 priv->cmxsi1syr = ioread32be(&qe_mux_reg->cmxsi1syr);
814 memcpy_fromio(priv->cmxucr, qe_mux_reg->cmxucr, 4 * sizeof(u32));
817 static void resume_clk_config(struct ucc_hdlc_private *priv)
819 struct qe_mux *qe_mux_reg = &qe_immr->qmx;
821 memcpy_toio(qe_mux_reg->cmxucr, priv->cmxucr, 4 * sizeof(u32));
823 iowrite32be(priv->cmxsi1cr_h, &qe_mux_reg->cmxsi1cr_h);
824 iowrite32be(priv->cmxsi1cr_l, &qe_mux_reg->cmxsi1cr_l);
826 iowrite32be(priv->cmxsi1syr, &qe_mux_reg->cmxsi1syr);
829 static int uhdlc_suspend(struct device *dev)
831 struct ucc_hdlc_private *priv = dev_get_drvdata(dev);
832 struct ucc_tdm_info *ut_info;
833 struct ucc_fast __iomem *uf_regs;
838 if (!netif_running(priv->ndev))
841 netif_device_detach(priv->ndev);
842 napi_disable(&priv->napi);
844 ut_info = priv->ut_info;
845 uf_regs = priv->uf_regs;
847 /* backup gumr guemr*/
848 priv->gumr = ioread32be(&uf_regs->gumr);
849 priv->guemr = ioread8(&uf_regs->guemr);
851 priv->ucc_pram_bak = kmalloc(sizeof(*priv->ucc_pram_bak),
853 if (!priv->ucc_pram_bak)
856 /* backup HDLC parameter */
857 memcpy_fromio(priv->ucc_pram_bak, priv->ucc_pram,
858 sizeof(struct ucc_hdlc_param));
860 /* store the clk configuration */
861 store_clk_config(priv);
864 ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
869 static int uhdlc_resume(struct device *dev)
871 struct ucc_hdlc_private *priv = dev_get_drvdata(dev);
872 struct ucc_tdm *utdm;
873 struct ucc_tdm_info *ut_info;
874 struct ucc_fast __iomem *uf_regs;
875 struct ucc_fast_private *uccf;
876 struct ucc_fast_info *uf_info;
884 if (!netif_running(priv->ndev))
888 ut_info = priv->ut_info;
889 uf_info = &ut_info->uf_info;
890 uf_regs = priv->uf_regs;
893 /* restore gumr guemr */
894 iowrite8(priv->guemr, &uf_regs->guemr);
895 iowrite32be(priv->gumr, &uf_regs->gumr);
897 /* Set Virtual Fifo registers */
898 iowrite16be(uf_info->urfs, &uf_regs->urfs);
899 iowrite16be(uf_info->urfet, &uf_regs->urfet);
900 iowrite16be(uf_info->urfset, &uf_regs->urfset);
901 iowrite16be(uf_info->utfs, &uf_regs->utfs);
902 iowrite16be(uf_info->utfet, &uf_regs->utfet);
903 iowrite16be(uf_info->utftt, &uf_regs->utftt);
904 /* utfb, urfb are offsets from MURAM base */
905 iowrite32be(uccf->ucc_fast_tx_virtual_fifo_base_offset, &uf_regs->utfb);
906 iowrite32be(uccf->ucc_fast_rx_virtual_fifo_base_offset, &uf_regs->urfb);
908 /* Rx Tx and sync clock routing */
909 resume_clk_config(priv);
911 iowrite32be(uf_info->uccm_mask, &uf_regs->uccm);
912 iowrite32be(0xffffffff, &uf_regs->ucce);
914 ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
918 ucc_tdm_init(priv->utdm, priv->ut_info);
920 /* Write to QE CECR, UCCx channel to Stop Transmission */
921 cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
922 ret = qe_issue_cmd(QE_STOP_TX, cecr_subblock,
923 (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
925 /* Set UPSMR normal mode */
926 iowrite32be(0, &uf_regs->upsmr);
928 /* init parameter base */
929 cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
930 ret = qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock,
931 QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset);
933 priv->ucc_pram = (struct ucc_hdlc_param __iomem *)
934 qe_muram_addr(priv->ucc_pram_offset);
936 /* restore ucc parameter */
937 memcpy_toio(priv->ucc_pram, priv->ucc_pram_bak,
938 sizeof(struct ucc_hdlc_param));
939 kfree(priv->ucc_pram_bak);
941 /* rebuild BD entry */
942 for (i = 0; i < RX_BD_RING_LEN; i++) {
943 if (i < (RX_BD_RING_LEN - 1))
944 bd_status = R_E_S | R_I_S;
946 bd_status = R_E_S | R_I_S | R_W_S;
948 iowrite16be(bd_status, &priv->rx_bd_base[i].status);
949 iowrite32be(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH,
950 &priv->rx_bd_base[i].buf);
953 for (i = 0; i < TX_BD_RING_LEN; i++) {
954 if (i < (TX_BD_RING_LEN - 1))
955 bd_status = T_I_S | T_TC_S;
957 bd_status = T_I_S | T_TC_S | T_W_S;
959 iowrite16be(bd_status, &priv->tx_bd_base[i].status);
960 iowrite32be(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH,
961 &priv->tx_bd_base[i].buf);
964 /* if hdlc is busy enable TX and RX */
965 if (priv->hdlc_busy == 1) {
966 cecr_subblock = ucc_fast_get_qe_cr_subblock(
967 priv->ut_info->uf_info.ucc_num);
969 qe_issue_cmd(QE_INIT_TX_RX, cecr_subblock,
970 (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
972 ucc_fast_enable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
974 /* Enable the TDM port */
976 utdm->si_regs->siglmr1_h |= (0x1 << utdm->tdm_port);
979 napi_enable(&priv->napi);
980 netif_device_attach(priv->ndev);
985 static const struct dev_pm_ops uhdlc_pm_ops = {
986 .suspend = uhdlc_suspend,
987 .resume = uhdlc_resume,
988 .freeze = uhdlc_suspend,
989 .thaw = uhdlc_resume,
992 #define HDLC_PM_OPS (&uhdlc_pm_ops)
996 #define HDLC_PM_OPS NULL
999 static const struct net_device_ops uhdlc_ops = {
1000 .ndo_open = uhdlc_open,
1001 .ndo_stop = uhdlc_close,
1002 .ndo_start_xmit = hdlc_start_xmit,
1003 .ndo_do_ioctl = uhdlc_ioctl,
1006 static int ucc_hdlc_probe(struct platform_device *pdev)
1008 struct device_node *np = pdev->dev.of_node;
1009 struct ucc_hdlc_private *uhdlc_priv = NULL;
1010 struct ucc_tdm_info *ut_info;
1011 struct ucc_tdm *utdm = NULL;
1012 struct resource res;
1013 struct net_device *dev;
1020 ret = of_property_read_u32_index(np, "cell-index", 0, &val);
1022 dev_err(&pdev->dev, "Invalid ucc property\n");
1027 if ((ucc_num > 3) || (ucc_num < 0)) {
1028 dev_err(&pdev->dev, ": Invalid UCC num\n");
1032 memcpy(&utdm_info[ucc_num], &utdm_primary_info,
1033 sizeof(utdm_primary_info));
1035 ut_info = &utdm_info[ucc_num];
1036 ut_info->uf_info.ucc_num = ucc_num;
1038 sprop = of_get_property(np, "rx-clock-name", NULL);
1040 ut_info->uf_info.rx_clock = qe_clock_source(sprop);
1041 if ((ut_info->uf_info.rx_clock < QE_CLK_NONE) ||
1042 (ut_info->uf_info.rx_clock > QE_CLK24)) {
1043 dev_err(&pdev->dev, "Invalid rx-clock-name property\n");
1047 dev_err(&pdev->dev, "Invalid rx-clock-name property\n");
1051 sprop = of_get_property(np, "tx-clock-name", NULL);
1053 ut_info->uf_info.tx_clock = qe_clock_source(sprop);
1054 if ((ut_info->uf_info.tx_clock < QE_CLK_NONE) ||
1055 (ut_info->uf_info.tx_clock > QE_CLK24)) {
1056 dev_err(&pdev->dev, "Invalid tx-clock-name property\n");
1060 dev_err(&pdev->dev, "Invalid tx-clock-name property\n");
1064 ret = of_address_to_resource(np, 0, &res);
1068 ut_info->uf_info.regs = res.start;
1069 ut_info->uf_info.irq = irq_of_parse_and_map(np, 0);
1071 uhdlc_priv = kzalloc(sizeof(*uhdlc_priv), GFP_KERNEL);
1076 dev_set_drvdata(&pdev->dev, uhdlc_priv);
1077 uhdlc_priv->dev = &pdev->dev;
1078 uhdlc_priv->ut_info = ut_info;
1080 if (of_get_property(np, "fsl,tdm-interface", NULL))
1081 uhdlc_priv->tsa = 1;
1083 if (of_get_property(np, "fsl,ucc-internal-loopback", NULL))
1084 uhdlc_priv->loopback = 1;
1086 if (of_get_property(np, "fsl,hdlc-bus", NULL))
1087 uhdlc_priv->hdlc_bus = 1;
1089 if (uhdlc_priv->tsa == 1) {
1090 utdm = kzalloc(sizeof(*utdm), GFP_KERNEL);
1093 dev_err(&pdev->dev, "No mem to alloc ucc tdm data\n");
1094 goto free_uhdlc_priv;
1096 uhdlc_priv->utdm = utdm;
1097 ret = ucc_of_parse_tdm(np, utdm, ut_info);
1102 ret = uhdlc_init(uhdlc_priv);
1104 dev_err(&pdev->dev, "Failed to init uhdlc\n");
1108 dev = alloc_hdlcdev(uhdlc_priv);
1111 pr_err("ucc_hdlc: unable to allocate memory\n");
1112 goto undo_uhdlc_init;
1115 uhdlc_priv->ndev = dev;
1116 hdlc = dev_to_hdlc(dev);
1117 dev->tx_queue_len = 16;
1118 dev->netdev_ops = &uhdlc_ops;
1119 hdlc->attach = ucc_hdlc_attach;
1120 hdlc->xmit = ucc_hdlc_tx;
1121 netif_napi_add(dev, &uhdlc_priv->napi, ucc_hdlc_poll, 32);
1122 if (register_hdlc_device(dev)) {
1124 pr_err("ucc_hdlc: unable to register hdlc device\n");
1134 if (uhdlc_priv->tsa)
1141 static int ucc_hdlc_remove(struct platform_device *pdev)
1143 struct ucc_hdlc_private *priv = dev_get_drvdata(&pdev->dev);
1145 uhdlc_memclean(priv);
1147 if (priv->utdm->si_regs) {
1148 iounmap(priv->utdm->si_regs);
1149 priv->utdm->si_regs = NULL;
1152 if (priv->utdm->siram) {
1153 iounmap(priv->utdm->siram);
1154 priv->utdm->siram = NULL;
1158 dev_info(&pdev->dev, "UCC based hdlc module removed\n");
1163 static const struct of_device_id fsl_ucc_hdlc_of_match[] = {
1165 .compatible = "fsl,ucc-hdlc",
1170 MODULE_DEVICE_TABLE(of, fsl_ucc_hdlc_of_match);
1172 static struct platform_driver ucc_hdlc_driver = {
1173 .probe = ucc_hdlc_probe,
1174 .remove = ucc_hdlc_remove,
1178 .of_match_table = fsl_ucc_hdlc_of_match,
1182 module_platform_driver(ucc_hdlc_driver);
1183 MODULE_LICENSE("GPL");