1 /* Freescale QUICC Engine HDLC Device Driver
3 * Copyright 2016 Freescale Semiconductor Inc.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/hdlc.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
17 #include <linux/irq.h>
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/netdevice.h>
21 #include <linux/of_address.h>
22 #include <linux/of_irq.h>
23 #include <linux/of_platform.h>
24 #include <linux/platform_device.h>
25 #include <linux/sched.h>
26 #include <linux/skbuff.h>
27 #include <linux/slab.h>
28 #include <linux/spinlock.h>
29 #include <linux/stddef.h>
30 #include <soc/fsl/qe/qe_tdm.h>
31 #include <uapi/linux/if_arp.h>
33 #include "fsl_ucc_hdlc.h"
35 #define DRV_DESC "Freescale QE UCC HDLC Driver"
36 #define DRV_NAME "ucc_hdlc"
38 #define TDM_PPPOHT_SLIC_MAXIN
40 static struct ucc_tdm_info utdm_primary_info = {
55 .mode = UCC_FAST_PROTOCOL_MODE_HDLC,
56 .ttx_trx = UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL,
57 .tenc = UCC_FAST_TX_ENCODING_NRZ,
58 .renc = UCC_FAST_RX_ENCODING_NRZ,
59 .tcrc = UCC_FAST_16_BIT_CRC,
60 .synl = UCC_FAST_SYNC_LEN_NOT_USED,
64 #ifdef TDM_PPPOHT_SLIC_MAXIN
79 static struct ucc_tdm_info utdm_info[UCC_MAX_NUM];
81 static int uhdlc_init(struct ucc_hdlc_private *priv)
83 struct ucc_tdm_info *ut_info;
84 struct ucc_fast_info *uf_info;
89 dma_addr_t bd_dma_addr;
94 ut_info = priv->ut_info;
95 uf_info = &ut_info->uf_info;
102 /* This sets HPM register in CMXUCR register which configures a
103 * open drain connected HDLC bus
106 uf_info->brkpt_support = 1;
108 uf_info->uccm_mask = ((UCC_HDLC_UCCE_RXB | UCC_HDLC_UCCE_RXF |
109 UCC_HDLC_UCCE_TXB) << 16);
111 ret = ucc_fast_init(uf_info, &priv->uccf);
113 dev_err(priv->dev, "Failed to init uccf.");
117 priv->uf_regs = priv->uccf->uf_regs;
118 ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
121 if (priv->loopback) {
122 dev_info(priv->dev, "Loopback Mode\n");
123 /* use the same clock when work in loopback */
124 qe_setbrg(ut_info->uf_info.rx_clock, 20000000, 1);
126 gumr = ioread32be(&priv->uf_regs->gumr);
127 gumr |= (UCC_FAST_GUMR_LOOPBACK | UCC_FAST_GUMR_CDS |
129 gumr &= ~(UCC_FAST_GUMR_CTSP | UCC_FAST_GUMR_RSYN);
130 iowrite32be(gumr, &priv->uf_regs->gumr);
135 ucc_tdm_init(priv->utdm, priv->ut_info);
137 /* Write to QE CECR, UCCx channel to Stop Transmission */
138 cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
139 ret = qe_issue_cmd(QE_STOP_TX, cecr_subblock,
140 QE_CR_PROTOCOL_UNSPECIFIED, 0);
142 /* Set UPSMR normal mode (need fixed)*/
143 iowrite32be(0, &priv->uf_regs->upsmr);
146 if (priv->hdlc_bus) {
149 dev_info(priv->dev, "HDLC bus Mode\n");
150 upsmr = ioread32be(&priv->uf_regs->upsmr);
152 /* bus mode and retransmit enable, with collision window
155 upsmr |= UCC_HDLC_UPSMR_RTE | UCC_HDLC_UPSMR_BUS |
157 iowrite32be(upsmr, &priv->uf_regs->upsmr);
159 /* explicitly disable CDS & CTSP */
160 gumr = ioread32be(&priv->uf_regs->gumr);
161 gumr &= ~(UCC_FAST_GUMR_CDS | UCC_FAST_GUMR_CTSP);
162 /* set automatic sync to explicitly ignore CD signal */
163 gumr |= UCC_FAST_GUMR_SYNL_AUTO;
164 iowrite32be(gumr, &priv->uf_regs->gumr);
167 priv->rx_ring_size = RX_BD_RING_LEN;
168 priv->tx_ring_size = TX_BD_RING_LEN;
170 priv->rx_bd_base = dma_alloc_coherent(priv->dev,
171 RX_BD_RING_LEN * sizeof(struct qe_bd),
172 &priv->dma_rx_bd, GFP_KERNEL);
174 if (!priv->rx_bd_base) {
175 dev_err(priv->dev, "Cannot allocate MURAM memory for RxBDs\n");
181 priv->tx_bd_base = dma_alloc_coherent(priv->dev,
182 TX_BD_RING_LEN * sizeof(struct qe_bd),
183 &priv->dma_tx_bd, GFP_KERNEL);
185 if (!priv->tx_bd_base) {
186 dev_err(priv->dev, "Cannot allocate MURAM memory for TxBDs\n");
191 /* Alloc parameter ram for ucc hdlc */
192 priv->ucc_pram_offset = qe_muram_alloc(sizeof(struct ucc_hdlc_param),
193 ALIGNMENT_OF_UCC_HDLC_PRAM);
195 if (IS_ERR_VALUE(priv->ucc_pram_offset)) {
196 dev_err(priv->dev, "Can not allocate MURAM for hdlc parameter.\n");
201 priv->rx_skbuff = kzalloc(priv->rx_ring_size * sizeof(*priv->rx_skbuff),
203 if (!priv->rx_skbuff) {
208 priv->tx_skbuff = kzalloc(priv->tx_ring_size * sizeof(*priv->tx_skbuff),
210 if (!priv->tx_skbuff) {
216 priv->skb_dirtytx = 0;
217 priv->curtx_bd = priv->tx_bd_base;
218 priv->dirty_tx = priv->tx_bd_base;
219 priv->currx_bd = priv->rx_bd_base;
220 priv->currx_bdnum = 0;
222 /* init parameter base */
223 cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
224 ret = qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock,
225 QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset);
227 priv->ucc_pram = (struct ucc_hdlc_param __iomem *)
228 qe_muram_addr(priv->ucc_pram_offset);
230 /* Zero out parameter ram */
231 memset_io(priv->ucc_pram, 0, sizeof(struct ucc_hdlc_param));
233 /* Alloc riptr, tiptr */
234 riptr = qe_muram_alloc(32, 32);
235 if (IS_ERR_VALUE(riptr)) {
236 dev_err(priv->dev, "Cannot allocate MURAM mem for Receive internal temp data pointer\n");
241 tiptr = qe_muram_alloc(32, 32);
242 if (IS_ERR_VALUE(tiptr)) {
243 dev_err(priv->dev, "Cannot allocate MURAM mem for Transmit internal temp data pointer\n");
247 if (riptr != (u16)riptr || tiptr != (u16)tiptr) {
248 dev_err(priv->dev, "MURAM allocation out of addressable range\n");
253 /* Set RIPTR, TIPTR */
254 iowrite16be(riptr, &priv->ucc_pram->riptr);
255 iowrite16be(tiptr, &priv->ucc_pram->tiptr);
258 iowrite16be(MAX_RX_BUF_LENGTH, &priv->ucc_pram->mrblr);
260 /* Set RBASE, TBASE */
261 iowrite32be(priv->dma_rx_bd, &priv->ucc_pram->rbase);
262 iowrite32be(priv->dma_tx_bd, &priv->ucc_pram->tbase);
264 /* Set RSTATE, TSTATE */
265 iowrite32be(BMR_GBL | BMR_BIG_ENDIAN, &priv->ucc_pram->rstate);
266 iowrite32be(BMR_GBL | BMR_BIG_ENDIAN, &priv->ucc_pram->tstate);
268 /* Set C_MASK, C_PRES for 16bit CRC */
269 iowrite32be(CRC_16BIT_MASK, &priv->ucc_pram->c_mask);
270 iowrite32be(CRC_16BIT_PRES, &priv->ucc_pram->c_pres);
272 iowrite16be(MAX_FRAME_LENGTH, &priv->ucc_pram->mflr);
273 iowrite16be(DEFAULT_RFTHR, &priv->ucc_pram->rfthr);
274 iowrite16be(DEFAULT_RFTHR, &priv->ucc_pram->rfcnt);
275 iowrite16be(DEFAULT_ADDR_MASK, &priv->ucc_pram->hmask);
276 iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr1);
277 iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr2);
278 iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr3);
279 iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr4);
282 bd_buffer = dma_alloc_coherent(priv->dev,
283 (RX_BD_RING_LEN + TX_BD_RING_LEN) *
285 &bd_dma_addr, GFP_KERNEL);
288 dev_err(priv->dev, "Could not allocate buffer descriptors\n");
293 memset(bd_buffer, 0, (RX_BD_RING_LEN + TX_BD_RING_LEN)
294 * MAX_RX_BUF_LENGTH);
296 priv->rx_buffer = bd_buffer;
297 priv->tx_buffer = bd_buffer + RX_BD_RING_LEN * MAX_RX_BUF_LENGTH;
299 priv->dma_rx_addr = bd_dma_addr;
300 priv->dma_tx_addr = bd_dma_addr + RX_BD_RING_LEN * MAX_RX_BUF_LENGTH;
302 for (i = 0; i < RX_BD_RING_LEN; i++) {
303 if (i < (RX_BD_RING_LEN - 1))
304 bd_status = R_E_S | R_I_S;
306 bd_status = R_E_S | R_I_S | R_W_S;
308 iowrite16be(bd_status, &priv->rx_bd_base[i].status);
309 iowrite32be(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH,
310 &priv->rx_bd_base[i].buf);
313 for (i = 0; i < TX_BD_RING_LEN; i++) {
314 if (i < (TX_BD_RING_LEN - 1))
315 bd_status = T_I_S | T_TC_S;
317 bd_status = T_I_S | T_TC_S | T_W_S;
319 iowrite16be(bd_status, &priv->tx_bd_base[i].status);
320 iowrite32be(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH,
321 &priv->tx_bd_base[i].buf);
327 qe_muram_free(tiptr);
329 qe_muram_free(riptr);
331 kfree(priv->tx_skbuff);
333 kfree(priv->rx_skbuff);
335 qe_muram_free(priv->ucc_pram_offset);
337 dma_free_coherent(priv->dev,
338 TX_BD_RING_LEN * sizeof(struct qe_bd),
339 priv->tx_bd_base, priv->dma_tx_bd);
341 dma_free_coherent(priv->dev,
342 RX_BD_RING_LEN * sizeof(struct qe_bd),
343 priv->rx_bd_base, priv->dma_rx_bd);
345 ucc_fast_free(priv->uccf);
350 static netdev_tx_t ucc_hdlc_tx(struct sk_buff *skb, struct net_device *dev)
352 hdlc_device *hdlc = dev_to_hdlc(dev);
353 struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)hdlc->priv;
354 struct qe_bd __iomem *bd;
361 if (skb_headroom(skb) < HDLC_HEAD_LEN) {
362 dev->stats.tx_dropped++;
364 netdev_err(dev, "No enough space for hdlc head\n");
368 skb_push(skb, HDLC_HEAD_LEN);
370 proto_head = (u16 *)skb->data;
371 *proto_head = htons(DEFAULT_HDLC_HEAD);
373 dev->stats.tx_bytes += skb->len;
377 proto_head = (u16 *)skb->data;
378 if (*proto_head != htons(DEFAULT_PPP_HEAD)) {
379 dev->stats.tx_dropped++;
381 netdev_err(dev, "Wrong ppp header\n");
385 dev->stats.tx_bytes += skb->len;
389 dev->stats.tx_dropped++;
393 spin_lock_irqsave(&priv->lock, flags);
395 /* Start from the next BD that should be filled */
397 bd_status = ioread16be(&bd->status);
398 /* Save the skb pointer so we can free it later */
399 priv->tx_skbuff[priv->skb_curtx] = skb;
401 /* Update the current skb pointer (wrapping if this was the last) */
403 (priv->skb_curtx + 1) & TX_RING_MOD_MASK(TX_BD_RING_LEN);
405 /* copy skb data to tx buffer for sdma processing */
406 memcpy(priv->tx_buffer + (be32_to_cpu(bd->buf) - priv->dma_tx_addr),
407 skb->data, skb->len);
409 /* set bd status and length */
410 bd_status = (bd_status & T_W_S) | T_R_S | T_I_S | T_L_S | T_TC_S;
412 iowrite16be(skb->len, &bd->length);
413 iowrite16be(bd_status, &bd->status);
415 /* Move to next BD in the ring */
416 if (!(bd_status & T_W_S))
419 bd = priv->tx_bd_base;
421 if (bd == priv->dirty_tx) {
422 if (!netif_queue_stopped(dev))
423 netif_stop_queue(dev);
428 spin_unlock_irqrestore(&priv->lock, flags);
433 static int hdlc_tx_done(struct ucc_hdlc_private *priv)
435 /* Start from the next BD that should be filled */
436 struct net_device *dev = priv->ndev;
437 struct qe_bd *bd; /* BD pointer */
441 bd_status = ioread16be(&bd->status);
443 /* Normal processing. */
444 while ((bd_status & T_R_S) == 0) {
447 /* BD contains already transmitted buffer. */
448 /* Handle the transmitted buffer and release */
449 /* the BD to be used with the current frame */
451 skb = priv->tx_skbuff[priv->skb_dirtytx];
454 dev->stats.tx_packets++;
455 memset(priv->tx_buffer +
456 (be32_to_cpu(bd->buf) - priv->dma_tx_addr),
458 dev_kfree_skb_irq(skb);
460 priv->tx_skbuff[priv->skb_dirtytx] = NULL;
463 1) & TX_RING_MOD_MASK(TX_BD_RING_LEN);
465 /* We freed a buffer, so now we can restart transmission */
466 if (netif_queue_stopped(dev))
467 netif_wake_queue(dev);
469 /* Advance the confirmation BD pointer */
470 if (!(bd_status & T_W_S))
473 bd = priv->tx_bd_base;
474 bd_status = ioread16be(&bd->status);
481 static int hdlc_rx_done(struct ucc_hdlc_private *priv, int rx_work_limit)
483 struct net_device *dev = priv->ndev;
484 struct sk_buff *skb = NULL;
485 hdlc_device *hdlc = dev_to_hdlc(dev);
488 u16 length, howmany = 0;
492 bd_status = ioread16be(&bd->status);
494 /* while there are received buffers and BD is full (~R_E) */
495 while (!((bd_status & (R_E_S)) || (--rx_work_limit < 0))) {
496 if (bd_status & R_OV_S)
497 dev->stats.rx_over_errors++;
498 if (bd_status & R_CR_S) {
499 dev->stats.rx_crc_errors++;
500 dev->stats.rx_dropped++;
503 bdbuffer = priv->rx_buffer +
504 (priv->currx_bdnum * MAX_RX_BUF_LENGTH);
505 length = ioread16be(&bd->length);
509 bdbuffer += HDLC_HEAD_LEN;
510 length -= (HDLC_HEAD_LEN + HDLC_CRC_SIZE);
512 skb = dev_alloc_skb(length);
514 dev->stats.rx_dropped++;
518 skb_put(skb, length);
521 memcpy(skb->data, bdbuffer, length);
525 length -= HDLC_CRC_SIZE;
527 skb = dev_alloc_skb(length);
529 dev->stats.rx_dropped++;
533 skb_put(skb, length);
536 memcpy(skb->data, bdbuffer, length);
540 dev->stats.rx_packets++;
541 dev->stats.rx_bytes += skb->len;
544 skb->protocol = hdlc_type_trans(skb, dev);
545 netif_receive_skb(skb);
548 iowrite16be(bd_status | R_E_S | R_I_S, &bd->status);
550 /* update to point at the next bd */
551 if (bd_status & R_W_S) {
552 priv->currx_bdnum = 0;
553 bd = priv->rx_bd_base;
555 if (priv->currx_bdnum < (RX_BD_RING_LEN - 1))
556 priv->currx_bdnum += 1;
558 priv->currx_bdnum = RX_BD_RING_LEN - 1;
563 bd_status = ioread16be(&bd->status);
570 static int ucc_hdlc_poll(struct napi_struct *napi, int budget)
572 struct ucc_hdlc_private *priv = container_of(napi,
573 struct ucc_hdlc_private,
577 /* Tx event processing */
578 spin_lock(&priv->lock);
580 spin_unlock(&priv->lock);
583 howmany += hdlc_rx_done(priv, budget - howmany);
585 if (howmany < budget) {
586 napi_complete_done(napi, howmany);
587 qe_setbits32(priv->uccf->p_uccm,
588 (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS) << 16);
594 static irqreturn_t ucc_hdlc_irq_handler(int irq, void *dev_id)
596 struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)dev_id;
597 struct net_device *dev = priv->ndev;
598 struct ucc_fast_private *uccf;
599 struct ucc_tdm_info *ut_info;
603 ut_info = priv->ut_info;
606 ucce = ioread32be(uccf->p_ucce);
607 uccm = ioread32be(uccf->p_uccm);
609 iowrite32be(ucce, uccf->p_ucce);
613 if ((ucce >> 16) & (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS)) {
614 if (napi_schedule_prep(&priv->napi)) {
615 uccm &= ~((UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS)
617 iowrite32be(uccm, uccf->p_uccm);
618 __napi_schedule(&priv->napi);
622 /* Errors and other events */
623 if (ucce >> 16 & UCC_HDLC_UCCE_BSY)
624 dev->stats.rx_errors++;
625 if (ucce >> 16 & UCC_HDLC_UCCE_TXE)
626 dev->stats.tx_errors++;
631 static int uhdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
633 const size_t size = sizeof(te1_settings);
635 struct ucc_hdlc_private *priv = netdev_priv(dev);
637 if (cmd != SIOCWANDEV)
638 return hdlc_ioctl(dev, ifr, cmd);
640 switch (ifr->ifr_settings.type) {
642 ifr->ifr_settings.type = IF_IFACE_E1;
643 if (ifr->ifr_settings.size < size) {
644 ifr->ifr_settings.size = size; /* data size wanted */
647 memset(&line, 0, sizeof(line));
648 line.clock_type = priv->clocking;
650 if (copy_to_user(ifr->ifr_settings.ifs_ifsu.sync, &line, size))
655 return hdlc_ioctl(dev, ifr, cmd);
659 static int uhdlc_open(struct net_device *dev)
662 hdlc_device *hdlc = dev_to_hdlc(dev);
663 struct ucc_hdlc_private *priv = hdlc->priv;
664 struct ucc_tdm *utdm = priv->utdm;
666 if (priv->hdlc_busy != 1) {
667 if (request_irq(priv->ut_info->uf_info.irq,
668 ucc_hdlc_irq_handler, 0, "hdlc", priv))
671 cecr_subblock = ucc_fast_get_qe_cr_subblock(
672 priv->ut_info->uf_info.ucc_num);
674 qe_issue_cmd(QE_INIT_TX_RX, cecr_subblock,
675 QE_CR_PROTOCOL_UNSPECIFIED, 0);
677 ucc_fast_enable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
679 /* Enable the TDM port */
681 utdm->si_regs->siglmr1_h |= (0x1 << utdm->tdm_port);
684 netif_device_attach(priv->ndev);
685 napi_enable(&priv->napi);
686 netif_start_queue(dev);
693 static void uhdlc_memclean(struct ucc_hdlc_private *priv)
695 qe_muram_free(priv->ucc_pram->riptr);
696 qe_muram_free(priv->ucc_pram->tiptr);
698 if (priv->rx_bd_base) {
699 dma_free_coherent(priv->dev,
700 RX_BD_RING_LEN * sizeof(struct qe_bd),
701 priv->rx_bd_base, priv->dma_rx_bd);
703 priv->rx_bd_base = NULL;
707 if (priv->tx_bd_base) {
708 dma_free_coherent(priv->dev,
709 TX_BD_RING_LEN * sizeof(struct qe_bd),
710 priv->tx_bd_base, priv->dma_tx_bd);
712 priv->tx_bd_base = NULL;
716 if (priv->ucc_pram) {
717 qe_muram_free(priv->ucc_pram_offset);
718 priv->ucc_pram = NULL;
719 priv->ucc_pram_offset = 0;
722 kfree(priv->rx_skbuff);
723 priv->rx_skbuff = NULL;
725 kfree(priv->tx_skbuff);
726 priv->tx_skbuff = NULL;
729 iounmap(priv->uf_regs);
730 priv->uf_regs = NULL;
734 ucc_fast_free(priv->uccf);
738 if (priv->rx_buffer) {
739 dma_free_coherent(priv->dev,
740 RX_BD_RING_LEN * MAX_RX_BUF_LENGTH,
741 priv->rx_buffer, priv->dma_rx_addr);
742 priv->rx_buffer = NULL;
743 priv->dma_rx_addr = 0;
746 if (priv->tx_buffer) {
747 dma_free_coherent(priv->dev,
748 TX_BD_RING_LEN * MAX_RX_BUF_LENGTH,
749 priv->tx_buffer, priv->dma_tx_addr);
750 priv->tx_buffer = NULL;
751 priv->dma_tx_addr = 0;
755 static int uhdlc_close(struct net_device *dev)
757 struct ucc_hdlc_private *priv = dev_to_hdlc(dev)->priv;
758 struct ucc_tdm *utdm = priv->utdm;
761 napi_disable(&priv->napi);
762 cecr_subblock = ucc_fast_get_qe_cr_subblock(
763 priv->ut_info->uf_info.ucc_num);
765 qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock,
766 (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
767 qe_issue_cmd(QE_CLOSE_RX_BD, cecr_subblock,
768 (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
771 utdm->si_regs->siglmr1_h &= ~(0x1 << utdm->tdm_port);
773 ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
775 free_irq(priv->ut_info->uf_info.irq, priv);
776 netif_stop_queue(dev);
782 static int ucc_hdlc_attach(struct net_device *dev, unsigned short encoding,
783 unsigned short parity)
785 struct ucc_hdlc_private *priv = dev_to_hdlc(dev)->priv;
787 if (encoding != ENCODING_NRZ &&
788 encoding != ENCODING_NRZI)
791 if (parity != PARITY_NONE &&
792 parity != PARITY_CRC32_PR1_CCITT &&
793 parity != PARITY_CRC16_PR1_CCITT)
796 priv->encoding = encoding;
797 priv->parity = parity;
803 static void store_clk_config(struct ucc_hdlc_private *priv)
805 struct qe_mux *qe_mux_reg = &qe_immr->qmx;
808 priv->cmxsi1cr_h = ioread32be(&qe_mux_reg->cmxsi1cr_h);
809 priv->cmxsi1cr_l = ioread32be(&qe_mux_reg->cmxsi1cr_l);
812 priv->cmxsi1syr = ioread32be(&qe_mux_reg->cmxsi1syr);
815 memcpy_fromio(priv->cmxucr, qe_mux_reg->cmxucr, 4 * sizeof(u32));
818 static void resume_clk_config(struct ucc_hdlc_private *priv)
820 struct qe_mux *qe_mux_reg = &qe_immr->qmx;
822 memcpy_toio(qe_mux_reg->cmxucr, priv->cmxucr, 4 * sizeof(u32));
824 iowrite32be(priv->cmxsi1cr_h, &qe_mux_reg->cmxsi1cr_h);
825 iowrite32be(priv->cmxsi1cr_l, &qe_mux_reg->cmxsi1cr_l);
827 iowrite32be(priv->cmxsi1syr, &qe_mux_reg->cmxsi1syr);
830 static int uhdlc_suspend(struct device *dev)
832 struct ucc_hdlc_private *priv = dev_get_drvdata(dev);
833 struct ucc_tdm_info *ut_info;
834 struct ucc_fast __iomem *uf_regs;
839 if (!netif_running(priv->ndev))
842 netif_device_detach(priv->ndev);
843 napi_disable(&priv->napi);
845 ut_info = priv->ut_info;
846 uf_regs = priv->uf_regs;
848 /* backup gumr guemr*/
849 priv->gumr = ioread32be(&uf_regs->gumr);
850 priv->guemr = ioread8(&uf_regs->guemr);
852 priv->ucc_pram_bak = kmalloc(sizeof(*priv->ucc_pram_bak),
854 if (!priv->ucc_pram_bak)
857 /* backup HDLC parameter */
858 memcpy_fromio(priv->ucc_pram_bak, priv->ucc_pram,
859 sizeof(struct ucc_hdlc_param));
861 /* store the clk configuration */
862 store_clk_config(priv);
865 ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
870 static int uhdlc_resume(struct device *dev)
872 struct ucc_hdlc_private *priv = dev_get_drvdata(dev);
873 struct ucc_tdm *utdm;
874 struct ucc_tdm_info *ut_info;
875 struct ucc_fast __iomem *uf_regs;
876 struct ucc_fast_private *uccf;
877 struct ucc_fast_info *uf_info;
885 if (!netif_running(priv->ndev))
889 ut_info = priv->ut_info;
890 uf_info = &ut_info->uf_info;
891 uf_regs = priv->uf_regs;
894 /* restore gumr guemr */
895 iowrite8(priv->guemr, &uf_regs->guemr);
896 iowrite32be(priv->gumr, &uf_regs->gumr);
898 /* Set Virtual Fifo registers */
899 iowrite16be(uf_info->urfs, &uf_regs->urfs);
900 iowrite16be(uf_info->urfet, &uf_regs->urfet);
901 iowrite16be(uf_info->urfset, &uf_regs->urfset);
902 iowrite16be(uf_info->utfs, &uf_regs->utfs);
903 iowrite16be(uf_info->utfet, &uf_regs->utfet);
904 iowrite16be(uf_info->utftt, &uf_regs->utftt);
905 /* utfb, urfb are offsets from MURAM base */
906 iowrite32be(uccf->ucc_fast_tx_virtual_fifo_base_offset, &uf_regs->utfb);
907 iowrite32be(uccf->ucc_fast_rx_virtual_fifo_base_offset, &uf_regs->urfb);
909 /* Rx Tx and sync clock routing */
910 resume_clk_config(priv);
912 iowrite32be(uf_info->uccm_mask, &uf_regs->uccm);
913 iowrite32be(0xffffffff, &uf_regs->ucce);
915 ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
919 ucc_tdm_init(priv->utdm, priv->ut_info);
921 /* Write to QE CECR, UCCx channel to Stop Transmission */
922 cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
923 ret = qe_issue_cmd(QE_STOP_TX, cecr_subblock,
924 (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
926 /* Set UPSMR normal mode */
927 iowrite32be(0, &uf_regs->upsmr);
929 /* init parameter base */
930 cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
931 ret = qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock,
932 QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset);
934 priv->ucc_pram = (struct ucc_hdlc_param __iomem *)
935 qe_muram_addr(priv->ucc_pram_offset);
937 /* restore ucc parameter */
938 memcpy_toio(priv->ucc_pram, priv->ucc_pram_bak,
939 sizeof(struct ucc_hdlc_param));
940 kfree(priv->ucc_pram_bak);
942 /* rebuild BD entry */
943 for (i = 0; i < RX_BD_RING_LEN; i++) {
944 if (i < (RX_BD_RING_LEN - 1))
945 bd_status = R_E_S | R_I_S;
947 bd_status = R_E_S | R_I_S | R_W_S;
949 iowrite16be(bd_status, &priv->rx_bd_base[i].status);
950 iowrite32be(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH,
951 &priv->rx_bd_base[i].buf);
954 for (i = 0; i < TX_BD_RING_LEN; i++) {
955 if (i < (TX_BD_RING_LEN - 1))
956 bd_status = T_I_S | T_TC_S;
958 bd_status = T_I_S | T_TC_S | T_W_S;
960 iowrite16be(bd_status, &priv->tx_bd_base[i].status);
961 iowrite32be(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH,
962 &priv->tx_bd_base[i].buf);
965 /* if hdlc is busy enable TX and RX */
966 if (priv->hdlc_busy == 1) {
967 cecr_subblock = ucc_fast_get_qe_cr_subblock(
968 priv->ut_info->uf_info.ucc_num);
970 qe_issue_cmd(QE_INIT_TX_RX, cecr_subblock,
971 (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
973 ucc_fast_enable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
975 /* Enable the TDM port */
977 utdm->si_regs->siglmr1_h |= (0x1 << utdm->tdm_port);
980 napi_enable(&priv->napi);
981 netif_device_attach(priv->ndev);
986 static const struct dev_pm_ops uhdlc_pm_ops = {
987 .suspend = uhdlc_suspend,
988 .resume = uhdlc_resume,
989 .freeze = uhdlc_suspend,
990 .thaw = uhdlc_resume,
993 #define HDLC_PM_OPS (&uhdlc_pm_ops)
997 #define HDLC_PM_OPS NULL
1000 static const struct net_device_ops uhdlc_ops = {
1001 .ndo_open = uhdlc_open,
1002 .ndo_stop = uhdlc_close,
1003 .ndo_start_xmit = hdlc_start_xmit,
1004 .ndo_do_ioctl = uhdlc_ioctl,
1007 static int ucc_hdlc_probe(struct platform_device *pdev)
1009 struct device_node *np = pdev->dev.of_node;
1010 struct ucc_hdlc_private *uhdlc_priv = NULL;
1011 struct ucc_tdm_info *ut_info;
1012 struct ucc_tdm *utdm = NULL;
1013 struct resource res;
1014 struct net_device *dev;
1021 ret = of_property_read_u32_index(np, "cell-index", 0, &val);
1023 dev_err(&pdev->dev, "Invalid ucc property\n");
1028 if ((ucc_num > 3) || (ucc_num < 0)) {
1029 dev_err(&pdev->dev, ": Invalid UCC num\n");
1033 memcpy(&utdm_info[ucc_num], &utdm_primary_info,
1034 sizeof(utdm_primary_info));
1036 ut_info = &utdm_info[ucc_num];
1037 ut_info->uf_info.ucc_num = ucc_num;
1039 sprop = of_get_property(np, "rx-clock-name", NULL);
1041 ut_info->uf_info.rx_clock = qe_clock_source(sprop);
1042 if ((ut_info->uf_info.rx_clock < QE_CLK_NONE) ||
1043 (ut_info->uf_info.rx_clock > QE_CLK24)) {
1044 dev_err(&pdev->dev, "Invalid rx-clock-name property\n");
1048 dev_err(&pdev->dev, "Invalid rx-clock-name property\n");
1052 sprop = of_get_property(np, "tx-clock-name", NULL);
1054 ut_info->uf_info.tx_clock = qe_clock_source(sprop);
1055 if ((ut_info->uf_info.tx_clock < QE_CLK_NONE) ||
1056 (ut_info->uf_info.tx_clock > QE_CLK24)) {
1057 dev_err(&pdev->dev, "Invalid tx-clock-name property\n");
1061 dev_err(&pdev->dev, "Invalid tx-clock-name property\n");
1065 ret = of_address_to_resource(np, 0, &res);
1069 ut_info->uf_info.regs = res.start;
1070 ut_info->uf_info.irq = irq_of_parse_and_map(np, 0);
1072 uhdlc_priv = kzalloc(sizeof(*uhdlc_priv), GFP_KERNEL);
1077 dev_set_drvdata(&pdev->dev, uhdlc_priv);
1078 uhdlc_priv->dev = &pdev->dev;
1079 uhdlc_priv->ut_info = ut_info;
1081 if (of_get_property(np, "fsl,tdm-interface", NULL))
1082 uhdlc_priv->tsa = 1;
1084 if (of_get_property(np, "fsl,ucc-internal-loopback", NULL))
1085 uhdlc_priv->loopback = 1;
1087 if (of_get_property(np, "fsl,hdlc-bus", NULL))
1088 uhdlc_priv->hdlc_bus = 1;
1090 if (uhdlc_priv->tsa == 1) {
1091 utdm = kzalloc(sizeof(*utdm), GFP_KERNEL);
1094 dev_err(&pdev->dev, "No mem to alloc ucc tdm data\n");
1095 goto free_uhdlc_priv;
1097 uhdlc_priv->utdm = utdm;
1098 ret = ucc_of_parse_tdm(np, utdm, ut_info);
1103 ret = uhdlc_init(uhdlc_priv);
1105 dev_err(&pdev->dev, "Failed to init uhdlc\n");
1109 dev = alloc_hdlcdev(uhdlc_priv);
1112 pr_err("ucc_hdlc: unable to allocate memory\n");
1113 goto undo_uhdlc_init;
1116 uhdlc_priv->ndev = dev;
1117 hdlc = dev_to_hdlc(dev);
1118 dev->tx_queue_len = 16;
1119 dev->netdev_ops = &uhdlc_ops;
1120 hdlc->attach = ucc_hdlc_attach;
1121 hdlc->xmit = ucc_hdlc_tx;
1122 netif_napi_add(dev, &uhdlc_priv->napi, ucc_hdlc_poll, 32);
1123 if (register_hdlc_device(dev)) {
1125 pr_err("ucc_hdlc: unable to register hdlc device\n");
1135 if (uhdlc_priv->tsa)
1142 static int ucc_hdlc_remove(struct platform_device *pdev)
1144 struct ucc_hdlc_private *priv = dev_get_drvdata(&pdev->dev);
1146 uhdlc_memclean(priv);
1148 if (priv->utdm->si_regs) {
1149 iounmap(priv->utdm->si_regs);
1150 priv->utdm->si_regs = NULL;
1153 if (priv->utdm->siram) {
1154 iounmap(priv->utdm->siram);
1155 priv->utdm->siram = NULL;
1159 dev_info(&pdev->dev, "UCC based hdlc module removed\n");
1164 static const struct of_device_id fsl_ucc_hdlc_of_match[] = {
1166 .compatible = "fsl,ucc-hdlc",
1171 MODULE_DEVICE_TABLE(of, fsl_ucc_hdlc_of_match);
1173 static struct platform_driver ucc_hdlc_driver = {
1174 .probe = ucc_hdlc_probe,
1175 .remove = ucc_hdlc_remove,
1179 .of_match_table = fsl_ucc_hdlc_of_match,
1183 module_platform_driver(ucc_hdlc_driver);
1184 MODULE_LICENSE("GPL");