1 /* Freescale QUICC Engine HDLC Device Driver
3 * Copyright 2016 Freescale Semiconductor Inc.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/hdlc.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
17 #include <linux/irq.h>
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/netdevice.h>
21 #include <linux/of_address.h>
22 #include <linux/of_irq.h>
23 #include <linux/of_platform.h>
24 #include <linux/platform_device.h>
25 #include <linux/sched.h>
26 #include <linux/skbuff.h>
27 #include <linux/slab.h>
28 #include <linux/spinlock.h>
29 #include <linux/stddef.h>
30 #include <soc/fsl/qe/qe_tdm.h>
31 #include <uapi/linux/if_arp.h>
33 #include "fsl_ucc_hdlc.h"
35 #define DRV_DESC "Freescale QE UCC HDLC Driver"
36 #define DRV_NAME "ucc_hdlc"
38 #define TDM_PPPOHT_SLIC_MAXIN
40 static int uhdlc_close(struct net_device *dev);
42 static struct ucc_tdm_info utdm_primary_info = {
57 .mode = UCC_FAST_PROTOCOL_MODE_HDLC,
58 .ttx_trx = UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL,
59 .tenc = UCC_FAST_TX_ENCODING_NRZ,
60 .renc = UCC_FAST_RX_ENCODING_NRZ,
61 .tcrc = UCC_FAST_16_BIT_CRC,
62 .synl = UCC_FAST_SYNC_LEN_NOT_USED,
66 #ifdef TDM_PPPOHT_SLIC_MAXIN
81 static struct ucc_tdm_info utdm_info[UCC_MAX_NUM];
83 static int uhdlc_init(struct ucc_hdlc_private *priv)
85 struct ucc_tdm_info *ut_info;
86 struct ucc_fast_info *uf_info;
91 dma_addr_t bd_dma_addr;
96 ut_info = priv->ut_info;
97 uf_info = &ut_info->uf_info;
104 /* This sets HPM register in CMXUCR register which configures a
105 * open drain connected HDLC bus
108 uf_info->brkpt_support = 1;
110 uf_info->uccm_mask = ((UCC_HDLC_UCCE_RXB | UCC_HDLC_UCCE_RXF |
111 UCC_HDLC_UCCE_TXB) << 16);
113 ret = ucc_fast_init(uf_info, &priv->uccf);
115 dev_err(priv->dev, "Failed to init uccf.");
119 priv->uf_regs = priv->uccf->uf_regs;
120 ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
123 if (priv->loopback) {
124 dev_info(priv->dev, "Loopback Mode\n");
125 /* use the same clock when work in loopback */
126 qe_setbrg(ut_info->uf_info.rx_clock, 20000000, 1);
128 gumr = ioread32be(&priv->uf_regs->gumr);
129 gumr |= (UCC_FAST_GUMR_LOOPBACK | UCC_FAST_GUMR_CDS |
131 gumr &= ~(UCC_FAST_GUMR_CTSP | UCC_FAST_GUMR_RSYN);
132 iowrite32be(gumr, &priv->uf_regs->gumr);
137 ucc_tdm_init(priv->utdm, priv->ut_info);
139 /* Write to QE CECR, UCCx channel to Stop Transmission */
140 cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
141 ret = qe_issue_cmd(QE_STOP_TX, cecr_subblock,
142 QE_CR_PROTOCOL_UNSPECIFIED, 0);
144 /* Set UPSMR normal mode (need fixed)*/
145 iowrite32be(0, &priv->uf_regs->upsmr);
148 if (priv->hdlc_bus) {
151 dev_info(priv->dev, "HDLC bus Mode\n");
152 upsmr = ioread32be(&priv->uf_regs->upsmr);
154 /* bus mode and retransmit enable, with collision window
157 upsmr |= UCC_HDLC_UPSMR_RTE | UCC_HDLC_UPSMR_BUS |
159 iowrite32be(upsmr, &priv->uf_regs->upsmr);
161 /* explicitly disable CDS & CTSP */
162 gumr = ioread32be(&priv->uf_regs->gumr);
163 gumr &= ~(UCC_FAST_GUMR_CDS | UCC_FAST_GUMR_CTSP);
164 /* set automatic sync to explicitly ignore CD signal */
165 gumr |= UCC_FAST_GUMR_SYNL_AUTO;
166 iowrite32be(gumr, &priv->uf_regs->gumr);
169 priv->rx_ring_size = RX_BD_RING_LEN;
170 priv->tx_ring_size = TX_BD_RING_LEN;
172 priv->rx_bd_base = dma_alloc_coherent(priv->dev,
173 RX_BD_RING_LEN * sizeof(struct qe_bd),
174 &priv->dma_rx_bd, GFP_KERNEL);
176 if (!priv->rx_bd_base) {
177 dev_err(priv->dev, "Cannot allocate MURAM memory for RxBDs\n");
183 priv->tx_bd_base = dma_alloc_coherent(priv->dev,
184 TX_BD_RING_LEN * sizeof(struct qe_bd),
185 &priv->dma_tx_bd, GFP_KERNEL);
187 if (!priv->tx_bd_base) {
188 dev_err(priv->dev, "Cannot allocate MURAM memory for TxBDs\n");
193 /* Alloc parameter ram for ucc hdlc */
194 priv->ucc_pram_offset = qe_muram_alloc(sizeof(struct ucc_hdlc_param),
195 ALIGNMENT_OF_UCC_HDLC_PRAM);
197 if (IS_ERR_VALUE(priv->ucc_pram_offset)) {
198 dev_err(priv->dev, "Can not allocate MURAM for hdlc parameter.\n");
203 priv->rx_skbuff = kzalloc(priv->rx_ring_size * sizeof(*priv->rx_skbuff),
205 if (!priv->rx_skbuff) {
210 priv->tx_skbuff = kzalloc(priv->tx_ring_size * sizeof(*priv->tx_skbuff),
212 if (!priv->tx_skbuff) {
218 priv->skb_dirtytx = 0;
219 priv->curtx_bd = priv->tx_bd_base;
220 priv->dirty_tx = priv->tx_bd_base;
221 priv->currx_bd = priv->rx_bd_base;
222 priv->currx_bdnum = 0;
224 /* init parameter base */
225 cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
226 ret = qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock,
227 QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset);
229 priv->ucc_pram = (struct ucc_hdlc_param __iomem *)
230 qe_muram_addr(priv->ucc_pram_offset);
232 /* Zero out parameter ram */
233 memset_io(priv->ucc_pram, 0, sizeof(struct ucc_hdlc_param));
235 /* Alloc riptr, tiptr */
236 riptr = qe_muram_alloc(32, 32);
237 if (IS_ERR_VALUE(riptr)) {
238 dev_err(priv->dev, "Cannot allocate MURAM mem for Receive internal temp data pointer\n");
243 tiptr = qe_muram_alloc(32, 32);
244 if (IS_ERR_VALUE(tiptr)) {
245 dev_err(priv->dev, "Cannot allocate MURAM mem for Transmit internal temp data pointer\n");
249 if (riptr != (u16)riptr || tiptr != (u16)tiptr) {
250 dev_err(priv->dev, "MURAM allocation out of addressable range\n");
255 /* Set RIPTR, TIPTR */
256 iowrite16be(riptr, &priv->ucc_pram->riptr);
257 iowrite16be(tiptr, &priv->ucc_pram->tiptr);
260 iowrite16be(MAX_RX_BUF_LENGTH, &priv->ucc_pram->mrblr);
262 /* Set RBASE, TBASE */
263 iowrite32be(priv->dma_rx_bd, &priv->ucc_pram->rbase);
264 iowrite32be(priv->dma_tx_bd, &priv->ucc_pram->tbase);
266 /* Set RSTATE, TSTATE */
267 iowrite32be(BMR_GBL | BMR_BIG_ENDIAN, &priv->ucc_pram->rstate);
268 iowrite32be(BMR_GBL | BMR_BIG_ENDIAN, &priv->ucc_pram->tstate);
270 /* Set C_MASK, C_PRES for 16bit CRC */
271 iowrite32be(CRC_16BIT_MASK, &priv->ucc_pram->c_mask);
272 iowrite32be(CRC_16BIT_PRES, &priv->ucc_pram->c_pres);
274 iowrite16be(MAX_FRAME_LENGTH, &priv->ucc_pram->mflr);
275 iowrite16be(DEFAULT_RFTHR, &priv->ucc_pram->rfthr);
276 iowrite16be(DEFAULT_RFTHR, &priv->ucc_pram->rfcnt);
277 iowrite16be(DEFAULT_ADDR_MASK, &priv->ucc_pram->hmask);
278 iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr1);
279 iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr2);
280 iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr3);
281 iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr4);
284 bd_buffer = dma_alloc_coherent(priv->dev,
285 (RX_BD_RING_LEN + TX_BD_RING_LEN) *
287 &bd_dma_addr, GFP_KERNEL);
290 dev_err(priv->dev, "Could not allocate buffer descriptors\n");
295 memset(bd_buffer, 0, (RX_BD_RING_LEN + TX_BD_RING_LEN)
296 * MAX_RX_BUF_LENGTH);
298 priv->rx_buffer = bd_buffer;
299 priv->tx_buffer = bd_buffer + RX_BD_RING_LEN * MAX_RX_BUF_LENGTH;
301 priv->dma_rx_addr = bd_dma_addr;
302 priv->dma_tx_addr = bd_dma_addr + RX_BD_RING_LEN * MAX_RX_BUF_LENGTH;
304 for (i = 0; i < RX_BD_RING_LEN; i++) {
305 if (i < (RX_BD_RING_LEN - 1))
306 bd_status = R_E_S | R_I_S;
308 bd_status = R_E_S | R_I_S | R_W_S;
310 iowrite16be(bd_status, &priv->rx_bd_base[i].status);
311 iowrite32be(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH,
312 &priv->rx_bd_base[i].buf);
315 for (i = 0; i < TX_BD_RING_LEN; i++) {
316 if (i < (TX_BD_RING_LEN - 1))
317 bd_status = T_I_S | T_TC_S;
319 bd_status = T_I_S | T_TC_S | T_W_S;
321 iowrite16be(bd_status, &priv->tx_bd_base[i].status);
322 iowrite32be(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH,
323 &priv->tx_bd_base[i].buf);
329 qe_muram_free(tiptr);
331 qe_muram_free(riptr);
333 kfree(priv->tx_skbuff);
335 kfree(priv->rx_skbuff);
337 qe_muram_free(priv->ucc_pram_offset);
339 dma_free_coherent(priv->dev,
340 TX_BD_RING_LEN * sizeof(struct qe_bd),
341 priv->tx_bd_base, priv->dma_tx_bd);
343 dma_free_coherent(priv->dev,
344 RX_BD_RING_LEN * sizeof(struct qe_bd),
345 priv->rx_bd_base, priv->dma_rx_bd);
347 ucc_fast_free(priv->uccf);
352 static netdev_tx_t ucc_hdlc_tx(struct sk_buff *skb, struct net_device *dev)
354 hdlc_device *hdlc = dev_to_hdlc(dev);
355 struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)hdlc->priv;
356 struct qe_bd __iomem *bd;
363 if (skb_headroom(skb) < HDLC_HEAD_LEN) {
364 dev->stats.tx_dropped++;
366 netdev_err(dev, "No enough space for hdlc head\n");
370 skb_push(skb, HDLC_HEAD_LEN);
372 proto_head = (u16 *)skb->data;
373 *proto_head = htons(DEFAULT_HDLC_HEAD);
375 dev->stats.tx_bytes += skb->len;
379 proto_head = (u16 *)skb->data;
380 if (*proto_head != htons(DEFAULT_PPP_HEAD)) {
381 dev->stats.tx_dropped++;
383 netdev_err(dev, "Wrong ppp header\n");
387 dev->stats.tx_bytes += skb->len;
391 dev->stats.tx_dropped++;
395 spin_lock_irqsave(&priv->lock, flags);
397 /* Start from the next BD that should be filled */
399 bd_status = ioread16be(&bd->status);
400 /* Save the skb pointer so we can free it later */
401 priv->tx_skbuff[priv->skb_curtx] = skb;
403 /* Update the current skb pointer (wrapping if this was the last) */
405 (priv->skb_curtx + 1) & TX_RING_MOD_MASK(TX_BD_RING_LEN);
407 /* copy skb data to tx buffer for sdma processing */
408 memcpy(priv->tx_buffer + (be32_to_cpu(bd->buf) - priv->dma_tx_addr),
409 skb->data, skb->len);
411 /* set bd status and length */
412 bd_status = (bd_status & T_W_S) | T_R_S | T_I_S | T_L_S | T_TC_S;
414 iowrite16be(skb->len, &bd->length);
415 iowrite16be(bd_status, &bd->status);
417 /* Move to next BD in the ring */
418 if (!(bd_status & T_W_S))
421 bd = priv->tx_bd_base;
423 if (bd == priv->dirty_tx) {
424 if (!netif_queue_stopped(dev))
425 netif_stop_queue(dev);
430 spin_unlock_irqrestore(&priv->lock, flags);
435 static int hdlc_tx_done(struct ucc_hdlc_private *priv)
437 /* Start from the next BD that should be filled */
438 struct net_device *dev = priv->ndev;
439 struct qe_bd *bd; /* BD pointer */
443 bd_status = ioread16be(&bd->status);
445 /* Normal processing. */
446 while ((bd_status & T_R_S) == 0) {
449 /* BD contains already transmitted buffer. */
450 /* Handle the transmitted buffer and release */
451 /* the BD to be used with the current frame */
453 skb = priv->tx_skbuff[priv->skb_dirtytx];
456 dev->stats.tx_packets++;
457 memset(priv->tx_buffer +
458 (be32_to_cpu(bd->buf) - priv->dma_tx_addr),
460 dev_kfree_skb_irq(skb);
462 priv->tx_skbuff[priv->skb_dirtytx] = NULL;
465 1) & TX_RING_MOD_MASK(TX_BD_RING_LEN);
467 /* We freed a buffer, so now we can restart transmission */
468 if (netif_queue_stopped(dev))
469 netif_wake_queue(dev);
471 /* Advance the confirmation BD pointer */
472 if (!(bd_status & T_W_S))
475 bd = priv->tx_bd_base;
476 bd_status = ioread16be(&bd->status);
483 static int hdlc_rx_done(struct ucc_hdlc_private *priv, int rx_work_limit)
485 struct net_device *dev = priv->ndev;
486 struct sk_buff *skb = NULL;
487 hdlc_device *hdlc = dev_to_hdlc(dev);
490 u16 length, howmany = 0;
494 bd_status = ioread16be(&bd->status);
496 /* while there are received buffers and BD is full (~R_E) */
497 while (!((bd_status & (R_E_S)) || (--rx_work_limit < 0))) {
498 if (bd_status & R_OV_S)
499 dev->stats.rx_over_errors++;
500 if (bd_status & R_CR_S) {
501 dev->stats.rx_crc_errors++;
502 dev->stats.rx_dropped++;
505 bdbuffer = priv->rx_buffer +
506 (priv->currx_bdnum * MAX_RX_BUF_LENGTH);
507 length = ioread16be(&bd->length);
511 bdbuffer += HDLC_HEAD_LEN;
512 length -= (HDLC_HEAD_LEN + HDLC_CRC_SIZE);
514 skb = dev_alloc_skb(length);
516 dev->stats.rx_dropped++;
520 skb_put(skb, length);
523 memcpy(skb->data, bdbuffer, length);
527 length -= HDLC_CRC_SIZE;
529 skb = dev_alloc_skb(length);
531 dev->stats.rx_dropped++;
535 skb_put(skb, length);
538 memcpy(skb->data, bdbuffer, length);
542 dev->stats.rx_packets++;
543 dev->stats.rx_bytes += skb->len;
546 skb->protocol = hdlc_type_trans(skb, dev);
547 netif_receive_skb(skb);
550 iowrite16be(bd_status | R_E_S | R_I_S, &bd->status);
552 /* update to point at the next bd */
553 if (bd_status & R_W_S) {
554 priv->currx_bdnum = 0;
555 bd = priv->rx_bd_base;
557 if (priv->currx_bdnum < (RX_BD_RING_LEN - 1))
558 priv->currx_bdnum += 1;
560 priv->currx_bdnum = RX_BD_RING_LEN - 1;
565 bd_status = ioread16be(&bd->status);
572 static int ucc_hdlc_poll(struct napi_struct *napi, int budget)
574 struct ucc_hdlc_private *priv = container_of(napi,
575 struct ucc_hdlc_private,
579 /* Tx event processing */
580 spin_lock(&priv->lock);
582 spin_unlock(&priv->lock);
585 howmany += hdlc_rx_done(priv, budget - howmany);
587 if (howmany < budget) {
588 napi_complete_done(napi, howmany);
589 qe_setbits32(priv->uccf->p_uccm,
590 (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS) << 16);
596 static irqreturn_t ucc_hdlc_irq_handler(int irq, void *dev_id)
598 struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)dev_id;
599 struct net_device *dev = priv->ndev;
600 struct ucc_fast_private *uccf;
601 struct ucc_tdm_info *ut_info;
605 ut_info = priv->ut_info;
608 ucce = ioread32be(uccf->p_ucce);
609 uccm = ioread32be(uccf->p_uccm);
611 iowrite32be(ucce, uccf->p_ucce);
615 if ((ucce >> 16) & (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS)) {
616 if (napi_schedule_prep(&priv->napi)) {
617 uccm &= ~((UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS)
619 iowrite32be(uccm, uccf->p_uccm);
620 __napi_schedule(&priv->napi);
624 /* Errors and other events */
625 if (ucce >> 16 & UCC_HDLC_UCCE_BSY)
626 dev->stats.rx_errors++;
627 if (ucce >> 16 & UCC_HDLC_UCCE_TXE)
628 dev->stats.tx_errors++;
633 static int uhdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
635 const size_t size = sizeof(te1_settings);
637 struct ucc_hdlc_private *priv = netdev_priv(dev);
639 if (cmd != SIOCWANDEV)
640 return hdlc_ioctl(dev, ifr, cmd);
642 switch (ifr->ifr_settings.type) {
644 ifr->ifr_settings.type = IF_IFACE_E1;
645 if (ifr->ifr_settings.size < size) {
646 ifr->ifr_settings.size = size; /* data size wanted */
649 memset(&line, 0, sizeof(line));
650 line.clock_type = priv->clocking;
652 if (copy_to_user(ifr->ifr_settings.ifs_ifsu.sync, &line, size))
657 return hdlc_ioctl(dev, ifr, cmd);
661 static int uhdlc_open(struct net_device *dev)
664 hdlc_device *hdlc = dev_to_hdlc(dev);
665 struct ucc_hdlc_private *priv = hdlc->priv;
666 struct ucc_tdm *utdm = priv->utdm;
669 if (priv->hdlc_busy != 1) {
670 if (request_irq(priv->ut_info->uf_info.irq,
671 ucc_hdlc_irq_handler, 0, "hdlc", priv))
674 cecr_subblock = ucc_fast_get_qe_cr_subblock(
675 priv->ut_info->uf_info.ucc_num);
677 qe_issue_cmd(QE_INIT_TX_RX, cecr_subblock,
678 QE_CR_PROTOCOL_UNSPECIFIED, 0);
680 ucc_fast_enable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
682 /* Enable the TDM port */
684 utdm->si_regs->siglmr1_h |= (0x1 << utdm->tdm_port);
687 netif_device_attach(priv->ndev);
688 napi_enable(&priv->napi);
689 netif_start_queue(dev);
699 static void uhdlc_memclean(struct ucc_hdlc_private *priv)
701 qe_muram_free(priv->ucc_pram->riptr);
702 qe_muram_free(priv->ucc_pram->tiptr);
704 if (priv->rx_bd_base) {
705 dma_free_coherent(priv->dev,
706 RX_BD_RING_LEN * sizeof(struct qe_bd),
707 priv->rx_bd_base, priv->dma_rx_bd);
709 priv->rx_bd_base = NULL;
713 if (priv->tx_bd_base) {
714 dma_free_coherent(priv->dev,
715 TX_BD_RING_LEN * sizeof(struct qe_bd),
716 priv->tx_bd_base, priv->dma_tx_bd);
718 priv->tx_bd_base = NULL;
722 if (priv->ucc_pram) {
723 qe_muram_free(priv->ucc_pram_offset);
724 priv->ucc_pram = NULL;
725 priv->ucc_pram_offset = 0;
728 kfree(priv->rx_skbuff);
729 priv->rx_skbuff = NULL;
731 kfree(priv->tx_skbuff);
732 priv->tx_skbuff = NULL;
735 iounmap(priv->uf_regs);
736 priv->uf_regs = NULL;
740 ucc_fast_free(priv->uccf);
744 if (priv->rx_buffer) {
745 dma_free_coherent(priv->dev,
746 RX_BD_RING_LEN * MAX_RX_BUF_LENGTH,
747 priv->rx_buffer, priv->dma_rx_addr);
748 priv->rx_buffer = NULL;
749 priv->dma_rx_addr = 0;
752 if (priv->tx_buffer) {
753 dma_free_coherent(priv->dev,
754 TX_BD_RING_LEN * MAX_RX_BUF_LENGTH,
755 priv->tx_buffer, priv->dma_tx_addr);
756 priv->tx_buffer = NULL;
757 priv->dma_tx_addr = 0;
761 static int uhdlc_close(struct net_device *dev)
763 struct ucc_hdlc_private *priv = dev_to_hdlc(dev)->priv;
764 struct ucc_tdm *utdm = priv->utdm;
767 napi_disable(&priv->napi);
768 cecr_subblock = ucc_fast_get_qe_cr_subblock(
769 priv->ut_info->uf_info.ucc_num);
771 qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock,
772 (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
773 qe_issue_cmd(QE_CLOSE_RX_BD, cecr_subblock,
774 (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
777 utdm->si_regs->siglmr1_h &= ~(0x1 << utdm->tdm_port);
779 ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
781 free_irq(priv->ut_info->uf_info.irq, priv);
782 netif_stop_queue(dev);
790 static int ucc_hdlc_attach(struct net_device *dev, unsigned short encoding,
791 unsigned short parity)
793 struct ucc_hdlc_private *priv = dev_to_hdlc(dev)->priv;
795 if (encoding != ENCODING_NRZ &&
796 encoding != ENCODING_NRZI)
799 if (parity != PARITY_NONE &&
800 parity != PARITY_CRC32_PR1_CCITT &&
801 parity != PARITY_CRC16_PR1_CCITT)
804 priv->encoding = encoding;
805 priv->parity = parity;
811 static void store_clk_config(struct ucc_hdlc_private *priv)
813 struct qe_mux *qe_mux_reg = &qe_immr->qmx;
816 priv->cmxsi1cr_h = ioread32be(&qe_mux_reg->cmxsi1cr_h);
817 priv->cmxsi1cr_l = ioread32be(&qe_mux_reg->cmxsi1cr_l);
820 priv->cmxsi1syr = ioread32be(&qe_mux_reg->cmxsi1syr);
823 memcpy_fromio(priv->cmxucr, qe_mux_reg->cmxucr, 4 * sizeof(u32));
826 static void resume_clk_config(struct ucc_hdlc_private *priv)
828 struct qe_mux *qe_mux_reg = &qe_immr->qmx;
830 memcpy_toio(qe_mux_reg->cmxucr, priv->cmxucr, 4 * sizeof(u32));
832 iowrite32be(priv->cmxsi1cr_h, &qe_mux_reg->cmxsi1cr_h);
833 iowrite32be(priv->cmxsi1cr_l, &qe_mux_reg->cmxsi1cr_l);
835 iowrite32be(priv->cmxsi1syr, &qe_mux_reg->cmxsi1syr);
838 static int uhdlc_suspend(struct device *dev)
840 struct ucc_hdlc_private *priv = dev_get_drvdata(dev);
841 struct ucc_tdm_info *ut_info;
842 struct ucc_fast __iomem *uf_regs;
847 if (!netif_running(priv->ndev))
850 netif_device_detach(priv->ndev);
851 napi_disable(&priv->napi);
853 ut_info = priv->ut_info;
854 uf_regs = priv->uf_regs;
856 /* backup gumr guemr*/
857 priv->gumr = ioread32be(&uf_regs->gumr);
858 priv->guemr = ioread8(&uf_regs->guemr);
860 priv->ucc_pram_bak = kmalloc(sizeof(*priv->ucc_pram_bak),
862 if (!priv->ucc_pram_bak)
865 /* backup HDLC parameter */
866 memcpy_fromio(priv->ucc_pram_bak, priv->ucc_pram,
867 sizeof(struct ucc_hdlc_param));
869 /* store the clk configuration */
870 store_clk_config(priv);
873 ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
878 static int uhdlc_resume(struct device *dev)
880 struct ucc_hdlc_private *priv = dev_get_drvdata(dev);
881 struct ucc_tdm *utdm;
882 struct ucc_tdm_info *ut_info;
883 struct ucc_fast __iomem *uf_regs;
884 struct ucc_fast_private *uccf;
885 struct ucc_fast_info *uf_info;
893 if (!netif_running(priv->ndev))
897 ut_info = priv->ut_info;
898 uf_info = &ut_info->uf_info;
899 uf_regs = priv->uf_regs;
902 /* restore gumr guemr */
903 iowrite8(priv->guemr, &uf_regs->guemr);
904 iowrite32be(priv->gumr, &uf_regs->gumr);
906 /* Set Virtual Fifo registers */
907 iowrite16be(uf_info->urfs, &uf_regs->urfs);
908 iowrite16be(uf_info->urfet, &uf_regs->urfet);
909 iowrite16be(uf_info->urfset, &uf_regs->urfset);
910 iowrite16be(uf_info->utfs, &uf_regs->utfs);
911 iowrite16be(uf_info->utfet, &uf_regs->utfet);
912 iowrite16be(uf_info->utftt, &uf_regs->utftt);
913 /* utfb, urfb are offsets from MURAM base */
914 iowrite32be(uccf->ucc_fast_tx_virtual_fifo_base_offset, &uf_regs->utfb);
915 iowrite32be(uccf->ucc_fast_rx_virtual_fifo_base_offset, &uf_regs->urfb);
917 /* Rx Tx and sync clock routing */
918 resume_clk_config(priv);
920 iowrite32be(uf_info->uccm_mask, &uf_regs->uccm);
921 iowrite32be(0xffffffff, &uf_regs->ucce);
923 ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
927 ucc_tdm_init(priv->utdm, priv->ut_info);
929 /* Write to QE CECR, UCCx channel to Stop Transmission */
930 cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
931 ret = qe_issue_cmd(QE_STOP_TX, cecr_subblock,
932 (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
934 /* Set UPSMR normal mode */
935 iowrite32be(0, &uf_regs->upsmr);
937 /* init parameter base */
938 cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
939 ret = qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock,
940 QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset);
942 priv->ucc_pram = (struct ucc_hdlc_param __iomem *)
943 qe_muram_addr(priv->ucc_pram_offset);
945 /* restore ucc parameter */
946 memcpy_toio(priv->ucc_pram, priv->ucc_pram_bak,
947 sizeof(struct ucc_hdlc_param));
948 kfree(priv->ucc_pram_bak);
950 /* rebuild BD entry */
951 for (i = 0; i < RX_BD_RING_LEN; i++) {
952 if (i < (RX_BD_RING_LEN - 1))
953 bd_status = R_E_S | R_I_S;
955 bd_status = R_E_S | R_I_S | R_W_S;
957 iowrite16be(bd_status, &priv->rx_bd_base[i].status);
958 iowrite32be(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH,
959 &priv->rx_bd_base[i].buf);
962 for (i = 0; i < TX_BD_RING_LEN; i++) {
963 if (i < (TX_BD_RING_LEN - 1))
964 bd_status = T_I_S | T_TC_S;
966 bd_status = T_I_S | T_TC_S | T_W_S;
968 iowrite16be(bd_status, &priv->tx_bd_base[i].status);
969 iowrite32be(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH,
970 &priv->tx_bd_base[i].buf);
973 /* if hdlc is busy enable TX and RX */
974 if (priv->hdlc_busy == 1) {
975 cecr_subblock = ucc_fast_get_qe_cr_subblock(
976 priv->ut_info->uf_info.ucc_num);
978 qe_issue_cmd(QE_INIT_TX_RX, cecr_subblock,
979 (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
981 ucc_fast_enable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
983 /* Enable the TDM port */
985 utdm->si_regs->siglmr1_h |= (0x1 << utdm->tdm_port);
988 napi_enable(&priv->napi);
989 netif_device_attach(priv->ndev);
994 static const struct dev_pm_ops uhdlc_pm_ops = {
995 .suspend = uhdlc_suspend,
996 .resume = uhdlc_resume,
997 .freeze = uhdlc_suspend,
998 .thaw = uhdlc_resume,
1001 #define HDLC_PM_OPS (&uhdlc_pm_ops)
1005 #define HDLC_PM_OPS NULL
1008 static const struct net_device_ops uhdlc_ops = {
1009 .ndo_open = uhdlc_open,
1010 .ndo_stop = uhdlc_close,
1011 .ndo_start_xmit = hdlc_start_xmit,
1012 .ndo_do_ioctl = uhdlc_ioctl,
1015 static int ucc_hdlc_probe(struct platform_device *pdev)
1017 struct device_node *np = pdev->dev.of_node;
1018 struct ucc_hdlc_private *uhdlc_priv = NULL;
1019 struct ucc_tdm_info *ut_info;
1020 struct ucc_tdm *utdm = NULL;
1021 struct resource res;
1022 struct net_device *dev;
1029 ret = of_property_read_u32_index(np, "cell-index", 0, &val);
1031 dev_err(&pdev->dev, "Invalid ucc property\n");
1036 if ((ucc_num > 3) || (ucc_num < 0)) {
1037 dev_err(&pdev->dev, ": Invalid UCC num\n");
1041 memcpy(&utdm_info[ucc_num], &utdm_primary_info,
1042 sizeof(utdm_primary_info));
1044 ut_info = &utdm_info[ucc_num];
1045 ut_info->uf_info.ucc_num = ucc_num;
1047 sprop = of_get_property(np, "rx-clock-name", NULL);
1049 ut_info->uf_info.rx_clock = qe_clock_source(sprop);
1050 if ((ut_info->uf_info.rx_clock < QE_CLK_NONE) ||
1051 (ut_info->uf_info.rx_clock > QE_CLK24)) {
1052 dev_err(&pdev->dev, "Invalid rx-clock-name property\n");
1056 dev_err(&pdev->dev, "Invalid rx-clock-name property\n");
1060 sprop = of_get_property(np, "tx-clock-name", NULL);
1062 ut_info->uf_info.tx_clock = qe_clock_source(sprop);
1063 if ((ut_info->uf_info.tx_clock < QE_CLK_NONE) ||
1064 (ut_info->uf_info.tx_clock > QE_CLK24)) {
1065 dev_err(&pdev->dev, "Invalid tx-clock-name property\n");
1069 dev_err(&pdev->dev, "Invalid tx-clock-name property\n");
1073 ret = of_address_to_resource(np, 0, &res);
1077 ut_info->uf_info.regs = res.start;
1078 ut_info->uf_info.irq = irq_of_parse_and_map(np, 0);
1080 uhdlc_priv = kzalloc(sizeof(*uhdlc_priv), GFP_KERNEL);
1085 dev_set_drvdata(&pdev->dev, uhdlc_priv);
1086 uhdlc_priv->dev = &pdev->dev;
1087 uhdlc_priv->ut_info = ut_info;
1089 if (of_get_property(np, "fsl,tdm-interface", NULL))
1090 uhdlc_priv->tsa = 1;
1092 if (of_get_property(np, "fsl,ucc-internal-loopback", NULL))
1093 uhdlc_priv->loopback = 1;
1095 if (of_get_property(np, "fsl,hdlc-bus", NULL))
1096 uhdlc_priv->hdlc_bus = 1;
1098 if (uhdlc_priv->tsa == 1) {
1099 utdm = kzalloc(sizeof(*utdm), GFP_KERNEL);
1102 dev_err(&pdev->dev, "No mem to alloc ucc tdm data\n");
1103 goto free_uhdlc_priv;
1105 uhdlc_priv->utdm = utdm;
1106 ret = ucc_of_parse_tdm(np, utdm, ut_info);
1111 ret = uhdlc_init(uhdlc_priv);
1113 dev_err(&pdev->dev, "Failed to init uhdlc\n");
1117 dev = alloc_hdlcdev(uhdlc_priv);
1120 pr_err("ucc_hdlc: unable to allocate memory\n");
1121 goto undo_uhdlc_init;
1124 uhdlc_priv->ndev = dev;
1125 hdlc = dev_to_hdlc(dev);
1126 dev->tx_queue_len = 16;
1127 dev->netdev_ops = &uhdlc_ops;
1128 hdlc->attach = ucc_hdlc_attach;
1129 hdlc->xmit = ucc_hdlc_tx;
1130 netif_napi_add(dev, &uhdlc_priv->napi, ucc_hdlc_poll, 32);
1131 if (register_hdlc_device(dev)) {
1133 pr_err("ucc_hdlc: unable to register hdlc device\n");
1143 if (uhdlc_priv->tsa)
1150 static int ucc_hdlc_remove(struct platform_device *pdev)
1152 struct ucc_hdlc_private *priv = dev_get_drvdata(&pdev->dev);
1154 uhdlc_memclean(priv);
1156 if (priv->utdm->si_regs) {
1157 iounmap(priv->utdm->si_regs);
1158 priv->utdm->si_regs = NULL;
1161 if (priv->utdm->siram) {
1162 iounmap(priv->utdm->siram);
1163 priv->utdm->siram = NULL;
1167 dev_info(&pdev->dev, "UCC based hdlc module removed\n");
1172 static const struct of_device_id fsl_ucc_hdlc_of_match[] = {
1174 .compatible = "fsl,ucc-hdlc",
1179 MODULE_DEVICE_TABLE(of, fsl_ucc_hdlc_of_match);
1181 static struct platform_driver ucc_hdlc_driver = {
1182 .probe = ucc_hdlc_probe,
1183 .remove = ucc_hdlc_remove,
1187 .of_match_table = fsl_ucc_hdlc_of_match,
1191 module_platform_driver(ucc_hdlc_driver);
1192 MODULE_LICENSE("GPL");