1 /* Applied Micro X-Gene SoC Ethernet Driver
3 * Copyright (c) 2014, Applied Micro Circuits Corporation
4 * Authors: Iyappan Subramanian <isubramanian@apm.com>
5 * Ravi Patel <rapatel@apm.com>
6 * Keyur Chudgar <kchudgar@apm.com>
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
22 #include <linux/gpio.h>
23 #include "xgene_enet_main.h"
24 #include "xgene_enet_hw.h"
25 #include "xgene_enet_sgmac.h"
26 #include "xgene_enet_xgmac.h"
28 #define RES_ENET_CSR 0
29 #define RES_RING_CSR 1
30 #define RES_RING_CMD 2
32 static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool)
34 struct xgene_enet_raw_desc16 *raw_desc;
37 for (i = 0; i < buf_pool->slots; i++) {
38 raw_desc = &buf_pool->raw_desc16[i];
40 /* Hardware expects descriptor in little endian format */
41 raw_desc->m0 = cpu_to_le64(i |
42 SET_VAL(FPQNUM, buf_pool->dst_ring_num) |
47 static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool,
51 struct xgene_enet_raw_desc16 *raw_desc;
52 struct xgene_enet_pdata *pdata;
53 struct net_device *ndev;
56 u32 tail = buf_pool->tail;
57 u32 slots = buf_pool->slots - 1;
61 ndev = buf_pool->ndev;
62 dev = ndev_to_dev(buf_pool->ndev);
63 pdata = netdev_priv(ndev);
64 bufdatalen = BUF_LEN_CODE_2K | (SKB_BUFFER_SIZE & GENMASK(11, 0));
65 len = XGENE_ENET_MAX_MTU;
67 for (i = 0; i < nbuf; i++) {
68 raw_desc = &buf_pool->raw_desc16[tail];
70 skb = netdev_alloc_skb_ip_align(ndev, len);
74 dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE);
75 if (dma_mapping_error(dev, dma_addr)) {
76 netdev_err(ndev, "DMA mapping error\n");
77 dev_kfree_skb_any(skb);
81 buf_pool->rx_skb[tail] = skb;
83 raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
84 SET_VAL(BUFDATALEN, bufdatalen) |
86 tail = (tail + 1) & slots;
89 pdata->ring_ops->wr_cmd(buf_pool, nbuf);
90 buf_pool->tail = tail;
95 static u8 xgene_enet_hdr_len(const void *data)
97 const struct ethhdr *eth = data;
99 return (eth->h_proto == htons(ETH_P_8021Q)) ? VLAN_ETH_HLEN : ETH_HLEN;
102 static void xgene_enet_delete_bufpool(struct xgene_enet_desc_ring *buf_pool)
104 struct device *dev = ndev_to_dev(buf_pool->ndev);
105 struct xgene_enet_raw_desc16 *raw_desc;
109 /* Free up the buffers held by hardware */
110 for (i = 0; i < buf_pool->slots; i++) {
111 if (buf_pool->rx_skb[i]) {
112 dev_kfree_skb_any(buf_pool->rx_skb[i]);
114 raw_desc = &buf_pool->raw_desc16[i];
115 dma_addr = GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1));
116 dma_unmap_single(dev, dma_addr, XGENE_ENET_MAX_MTU,
122 static irqreturn_t xgene_enet_rx_irq(const int irq, void *data)
124 struct xgene_enet_desc_ring *rx_ring = data;
126 if (napi_schedule_prep(&rx_ring->napi)) {
127 disable_irq_nosync(irq);
128 __napi_schedule(&rx_ring->napi);
134 static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring,
135 struct xgene_enet_raw_desc *raw_desc)
137 struct xgene_enet_pdata *pdata = netdev_priv(cp_ring->ndev);
141 dma_addr_t *frag_dma_addr;
147 skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
148 skb = cp_ring->cp_skb[skb_index];
149 frag_dma_addr = &cp_ring->frag_dma_addr[skb_index * MAX_SKB_FRAGS];
151 dev = ndev_to_dev(cp_ring->ndev);
152 dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)),
156 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
157 frag = &skb_shinfo(skb)->frags[i];
158 dma_unmap_page(dev, frag_dma_addr[i], skb_frag_size(frag),
162 if (GET_BIT(ET, le64_to_cpu(raw_desc->m3))) {
163 mss_index = GET_VAL(MSS, le64_to_cpu(raw_desc->m3));
164 spin_lock(&pdata->mss_lock);
165 pdata->mss_refcnt[mss_index]--;
166 spin_unlock(&pdata->mss_lock);
169 /* Checking for error */
170 status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
171 if (unlikely(status > 2)) {
172 xgene_enet_parse_error(cp_ring, netdev_priv(cp_ring->ndev),
178 dev_kfree_skb_any(skb);
180 netdev_err(cp_ring->ndev, "completion skb is NULL\n");
187 static int xgene_enet_setup_mss(struct net_device *ndev, u32 mss)
189 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
190 bool mss_index_found = false;
194 spin_lock(&pdata->mss_lock);
196 /* Reuse the slot if MSS matches */
197 for (i = 0; !mss_index_found && i < NUM_MSS_REG; i++) {
198 if (pdata->mss[i] == mss) {
199 pdata->mss_refcnt[i]++;
201 mss_index_found = true;
205 /* Overwrite the slot with ref_count = 0 */
206 for (i = 0; !mss_index_found && i < NUM_MSS_REG; i++) {
207 if (!pdata->mss_refcnt[i]) {
208 pdata->mss_refcnt[i]++;
209 pdata->mac_ops->set_mss(pdata, mss, i);
212 mss_index_found = true;
216 spin_unlock(&pdata->mss_lock);
218 /* No slots with ref_count = 0 available, return busy */
219 if (!mss_index_found)
225 static int xgene_enet_work_msg(struct sk_buff *skb, u64 *hopinfo)
227 struct net_device *ndev = skb->dev;
229 u8 l3hlen = 0, l4hlen = 0;
230 u8 ethhdr, proto = 0, csum_enable = 0;
231 u32 hdr_len, mss = 0;
232 u32 i, len, nr_frags;
235 ethhdr = xgene_enet_hdr_len(skb->data);
237 if (unlikely(skb->protocol != htons(ETH_P_IP)) &&
238 unlikely(skb->protocol != htons(ETH_P_8021Q)))
241 if (unlikely(!(skb->dev->features & NETIF_F_IP_CSUM)))
245 if (unlikely(ip_is_fragment(iph)))
248 if (likely(iph->protocol == IPPROTO_TCP)) {
249 l4hlen = tcp_hdrlen(skb) >> 2;
251 proto = TSO_IPPROTO_TCP;
252 if (ndev->features & NETIF_F_TSO) {
253 hdr_len = ethhdr + ip_hdrlen(skb) + tcp_hdrlen(skb);
254 mss = skb_shinfo(skb)->gso_size;
256 if (skb_is_nonlinear(skb)) {
257 len = skb_headlen(skb);
258 nr_frags = skb_shinfo(skb)->nr_frags;
260 for (i = 0; i < 2 && i < nr_frags; i++)
261 len += skb_shinfo(skb)->frags[i].size;
263 /* HW requires header must reside in 3 buffer */
264 if (unlikely(hdr_len > len)) {
265 if (skb_linearize(skb))
270 if (!mss || ((skb->len - hdr_len) <= mss))
273 mss_index = xgene_enet_setup_mss(ndev, mss);
274 if (unlikely(mss_index < 0))
277 *hopinfo |= SET_BIT(ET) | SET_VAL(MSS, mss_index);
279 } else if (iph->protocol == IPPROTO_UDP) {
280 l4hlen = UDP_HDR_SIZE;
284 l3hlen = ip_hdrlen(skb) >> 2;
285 *hopinfo |= SET_VAL(TCPHDR, l4hlen) |
286 SET_VAL(IPHDR, l3hlen) |
287 SET_VAL(ETHHDR, ethhdr) |
288 SET_VAL(EC, csum_enable) |
291 SET_BIT(TYPE_ETH_WORK_MESSAGE);
296 static u16 xgene_enet_encode_len(u16 len)
298 return (len == BUFLEN_16K) ? 0 : len;
301 static void xgene_set_addr_len(__le64 *desc, u32 idx, dma_addr_t addr, u32 len)
303 desc[idx ^ 1] = cpu_to_le64(SET_VAL(DATAADDR, addr) |
304 SET_VAL(BUFDATALEN, len));
307 static __le64 *xgene_enet_get_exp_bufs(struct xgene_enet_desc_ring *ring)
311 exp_bufs = &ring->exp_bufs[ring->exp_buf_tail * MAX_EXP_BUFFS];
312 memset(exp_bufs, 0, sizeof(__le64) * MAX_EXP_BUFFS);
313 ring->exp_buf_tail = (ring->exp_buf_tail + 1) & ((ring->slots / 2) - 1);
318 static dma_addr_t *xgene_get_frag_dma_array(struct xgene_enet_desc_ring *ring)
320 return &ring->cp_ring->frag_dma_addr[ring->tail * MAX_SKB_FRAGS];
323 static int xgene_enet_setup_tx_desc(struct xgene_enet_desc_ring *tx_ring,
326 struct device *dev = ndev_to_dev(tx_ring->ndev);
327 struct xgene_enet_pdata *pdata = netdev_priv(tx_ring->ndev);
328 struct xgene_enet_raw_desc *raw_desc;
329 __le64 *exp_desc = NULL, *exp_bufs = NULL;
330 dma_addr_t dma_addr, pbuf_addr, *frag_dma_addr;
332 u16 tail = tx_ring->tail;
335 u8 ll = 0, nv = 0, idx = 0;
337 u32 size, offset, ell_bytes = 0;
338 u32 i, fidx, nr_frags, count = 1;
341 raw_desc = &tx_ring->raw_desc[tail];
342 tail = (tail + 1) & (tx_ring->slots - 1);
343 memset(raw_desc, 0, sizeof(struct xgene_enet_raw_desc));
345 ret = xgene_enet_work_msg(skb, &hopinfo);
349 raw_desc->m3 = cpu_to_le64(SET_VAL(HENQNUM, tx_ring->dst_ring_num) |
352 len = skb_headlen(skb);
353 hw_len = xgene_enet_encode_len(len);
355 dma_addr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
356 if (dma_mapping_error(dev, dma_addr)) {
357 netdev_err(tx_ring->ndev, "DMA mapping error\n");
361 /* Hardware expects descriptor in little endian format */
362 raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
363 SET_VAL(BUFDATALEN, hw_len) |
366 if (!skb_is_nonlinear(skb))
371 exp_desc = (void *)&tx_ring->raw_desc[tail];
372 tail = (tail + 1) & (tx_ring->slots - 1);
373 memset(exp_desc, 0, sizeof(struct xgene_enet_raw_desc));
375 nr_frags = skb_shinfo(skb)->nr_frags;
376 for (i = nr_frags; i < 4 ; i++)
377 exp_desc[i ^ 1] = cpu_to_le64(LAST_BUFFER);
379 frag_dma_addr = xgene_get_frag_dma_array(tx_ring);
381 for (i = 0, fidx = 0; split || (fidx < nr_frags); i++) {
383 frag = &skb_shinfo(skb)->frags[fidx];
384 size = skb_frag_size(frag);
387 pbuf_addr = skb_frag_dma_map(dev, frag, 0, size,
389 if (dma_mapping_error(dev, pbuf_addr))
392 frag_dma_addr[fidx] = pbuf_addr;
395 if (size > BUFLEN_16K)
399 if (size > BUFLEN_16K) {
407 dma_addr = pbuf_addr + offset;
408 hw_len = xgene_enet_encode_len(len);
414 xgene_set_addr_len(exp_desc, i, dma_addr, hw_len);
417 if (split || (fidx != nr_frags)) {
418 exp_bufs = xgene_enet_get_exp_bufs(tx_ring);
419 xgene_set_addr_len(exp_bufs, idx, dma_addr,
424 xgene_set_addr_len(exp_desc, i, dma_addr,
429 xgene_set_addr_len(exp_bufs, idx, dma_addr, hw_len);
436 offset += BUFLEN_16K;
442 dma_addr = dma_map_single(dev, exp_bufs,
443 sizeof(u64) * MAX_EXP_BUFFS,
445 if (dma_mapping_error(dev, dma_addr)) {
446 dev_kfree_skb_any(skb);
449 i = ell_bytes >> LL_BYTES_LSB_LEN;
450 exp_desc[2] = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
451 SET_VAL(LL_BYTES_MSB, i) |
452 SET_VAL(LL_LEN, idx));
453 raw_desc->m2 = cpu_to_le64(SET_VAL(LL_BYTES_LSB, ell_bytes));
457 raw_desc->m0 = cpu_to_le64(SET_VAL(LL, ll) | SET_VAL(NV, nv) |
458 SET_VAL(USERINFO, tx_ring->tail));
459 tx_ring->cp_ring->cp_skb[tx_ring->tail] = skb;
460 pdata->tx_level[tx_ring->cp_ring->index] += count;
461 tx_ring->tail = tail;
466 static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
467 struct net_device *ndev)
469 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
470 struct xgene_enet_desc_ring *tx_ring;
471 int index = skb->queue_mapping;
472 u32 tx_level = pdata->tx_level[index];
475 tx_ring = pdata->tx_ring[index];
476 if (tx_level < pdata->txc_level[index])
477 tx_level += ((typeof(pdata->tx_level[index]))~0U);
479 if ((tx_level - pdata->txc_level[index]) > pdata->tx_qcnt_hi) {
480 netif_stop_subqueue(ndev, index);
481 return NETDEV_TX_BUSY;
484 if (skb_padto(skb, XGENE_MIN_ENET_FRAME_SIZE))
487 count = xgene_enet_setup_tx_desc(tx_ring, skb);
489 return NETDEV_TX_BUSY;
492 dev_kfree_skb_any(skb);
496 skb_tx_timestamp(skb);
498 tx_ring->tx_packets++;
499 tx_ring->tx_bytes += skb->len;
501 pdata->ring_ops->wr_cmd(tx_ring, count);
505 static void xgene_enet_rx_csum(struct sk_buff *skb)
507 struct net_device *ndev = skb->dev;
508 struct iphdr *iph = ip_hdr(skb);
510 if (!(ndev->features & NETIF_F_RXCSUM))
513 if (skb->protocol != htons(ETH_P_IP))
516 if (ip_is_fragment(iph))
519 if (iph->protocol != IPPROTO_TCP && iph->protocol != IPPROTO_UDP)
522 skb->ip_summed = CHECKSUM_UNNECESSARY;
525 static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
526 struct xgene_enet_raw_desc *raw_desc)
528 struct net_device *ndev;
530 struct xgene_enet_desc_ring *buf_pool;
531 u32 datalen, skb_index;
536 ndev = rx_ring->ndev;
537 dev = ndev_to_dev(rx_ring->ndev);
538 buf_pool = rx_ring->buf_pool;
540 dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)),
541 XGENE_ENET_MAX_MTU, DMA_FROM_DEVICE);
542 skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
543 skb = buf_pool->rx_skb[skb_index];
544 buf_pool->rx_skb[skb_index] = NULL;
546 /* checking for error */
547 status = (GET_VAL(ELERR, le64_to_cpu(raw_desc->m0)) << LERR_LEN) |
548 GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
549 if (unlikely(status)) {
550 dev_kfree_skb_any(skb);
551 xgene_enet_parse_error(rx_ring, netdev_priv(rx_ring->ndev),
557 /* strip off CRC as HW isn't doing this */
558 datalen = GET_VAL(BUFDATALEN, le64_to_cpu(raw_desc->m1));
559 datalen = (datalen & DATALEN_MASK) - 4;
560 prefetch(skb->data - NET_IP_ALIGN);
561 skb_put(skb, datalen);
563 skb_checksum_none_assert(skb);
564 skb->protocol = eth_type_trans(skb, ndev);
565 xgene_enet_rx_csum(skb);
567 rx_ring->rx_packets++;
568 rx_ring->rx_bytes += datalen;
569 napi_gro_receive(&rx_ring->napi, skb);
571 if (--rx_ring->nbufpool == 0) {
572 ret = xgene_enet_refill_bufpool(buf_pool, NUM_BUFPOOL);
573 rx_ring->nbufpool = NUM_BUFPOOL;
579 static bool is_rx_desc(struct xgene_enet_raw_desc *raw_desc)
581 return GET_VAL(FPQNUM, le64_to_cpu(raw_desc->m0)) ? true : false;
584 static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
587 struct net_device *ndev = ring->ndev;
588 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
589 struct xgene_enet_raw_desc *raw_desc, *exp_desc;
590 u16 head = ring->head;
591 u16 slots = ring->slots - 1;
592 int ret, desc_count, count = 0, processed = 0;
596 raw_desc = &ring->raw_desc[head];
598 is_completion = false;
600 if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc)))
603 /* read fpqnum field after dataaddr field */
605 if (GET_BIT(NV, le64_to_cpu(raw_desc->m0))) {
606 head = (head + 1) & slots;
607 exp_desc = &ring->raw_desc[head];
609 if (unlikely(xgene_enet_is_desc_slot_empty(exp_desc))) {
610 head = (head - 1) & slots;
617 if (is_rx_desc(raw_desc)) {
618 ret = xgene_enet_rx_frame(ring, raw_desc);
620 ret = xgene_enet_tx_completion(ring, raw_desc);
621 is_completion = true;
623 xgene_enet_mark_desc_slot_empty(raw_desc);
625 xgene_enet_mark_desc_slot_empty(exp_desc);
627 head = (head + 1) & slots;
632 pdata->txc_level[ring->index] += desc_count;
639 pdata->ring_ops->wr_cmd(ring, -count);
642 if (__netif_subqueue_stopped(ndev, ring->index))
643 netif_start_subqueue(ndev, ring->index);
649 static int xgene_enet_napi(struct napi_struct *napi, const int budget)
651 struct xgene_enet_desc_ring *ring;
654 ring = container_of(napi, struct xgene_enet_desc_ring, napi);
655 processed = xgene_enet_process_ring(ring, budget);
657 if (processed != budget) {
659 enable_irq(ring->irq);
665 static void xgene_enet_timeout(struct net_device *ndev)
667 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
668 struct netdev_queue *txq;
671 pdata->mac_ops->reset(pdata);
673 for (i = 0; i < pdata->txq_cnt; i++) {
674 txq = netdev_get_tx_queue(ndev, i);
675 txq->trans_start = jiffies;
676 netif_tx_start_queue(txq);
680 static void xgene_enet_set_irq_name(struct net_device *ndev)
682 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
683 struct xgene_enet_desc_ring *ring;
686 for (i = 0; i < pdata->rxq_cnt; i++) {
687 ring = pdata->rx_ring[i];
688 if (!pdata->cq_cnt) {
689 snprintf(ring->irq_name, IRQ_ID_SIZE, "%s-rx-txc",
692 snprintf(ring->irq_name, IRQ_ID_SIZE, "%s-rx-%d",
697 for (i = 0; i < pdata->cq_cnt; i++) {
698 ring = pdata->tx_ring[i]->cp_ring;
699 snprintf(ring->irq_name, IRQ_ID_SIZE, "%s-txc-%d",
704 static int xgene_enet_register_irq(struct net_device *ndev)
706 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
707 struct device *dev = ndev_to_dev(ndev);
708 struct xgene_enet_desc_ring *ring;
711 xgene_enet_set_irq_name(ndev);
712 for (i = 0; i < pdata->rxq_cnt; i++) {
713 ring = pdata->rx_ring[i];
714 irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
715 ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
716 0, ring->irq_name, ring);
718 netdev_err(ndev, "Failed to request irq %s\n",
723 for (i = 0; i < pdata->cq_cnt; i++) {
724 ring = pdata->tx_ring[i]->cp_ring;
725 irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
726 ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
727 0, ring->irq_name, ring);
729 netdev_err(ndev, "Failed to request irq %s\n",
737 static void xgene_enet_free_irq(struct net_device *ndev)
739 struct xgene_enet_pdata *pdata;
740 struct xgene_enet_desc_ring *ring;
744 pdata = netdev_priv(ndev);
745 dev = ndev_to_dev(ndev);
747 for (i = 0; i < pdata->rxq_cnt; i++) {
748 ring = pdata->rx_ring[i];
749 irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
750 devm_free_irq(dev, ring->irq, ring);
753 for (i = 0; i < pdata->cq_cnt; i++) {
754 ring = pdata->tx_ring[i]->cp_ring;
755 irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
756 devm_free_irq(dev, ring->irq, ring);
760 static void xgene_enet_napi_enable(struct xgene_enet_pdata *pdata)
762 struct napi_struct *napi;
765 for (i = 0; i < pdata->rxq_cnt; i++) {
766 napi = &pdata->rx_ring[i]->napi;
770 for (i = 0; i < pdata->cq_cnt; i++) {
771 napi = &pdata->tx_ring[i]->cp_ring->napi;
776 static void xgene_enet_napi_disable(struct xgene_enet_pdata *pdata)
778 struct napi_struct *napi;
781 for (i = 0; i < pdata->rxq_cnt; i++) {
782 napi = &pdata->rx_ring[i]->napi;
786 for (i = 0; i < pdata->cq_cnt; i++) {
787 napi = &pdata->tx_ring[i]->cp_ring->napi;
792 static int xgene_enet_open(struct net_device *ndev)
794 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
795 const struct xgene_mac_ops *mac_ops = pdata->mac_ops;
798 ret = netif_set_real_num_tx_queues(ndev, pdata->txq_cnt);
802 ret = netif_set_real_num_rx_queues(ndev, pdata->rxq_cnt);
806 xgene_enet_napi_enable(pdata);
807 ret = xgene_enet_register_irq(ndev);
812 phy_start(ndev->phydev);
814 schedule_delayed_work(&pdata->link_work, PHY_POLL_LINK_OFF);
815 netif_carrier_off(ndev);
818 mac_ops->tx_enable(pdata);
819 mac_ops->rx_enable(pdata);
820 netif_tx_start_all_queues(ndev);
825 static int xgene_enet_close(struct net_device *ndev)
827 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
828 const struct xgene_mac_ops *mac_ops = pdata->mac_ops;
831 netif_tx_stop_all_queues(ndev);
832 mac_ops->tx_disable(pdata);
833 mac_ops->rx_disable(pdata);
836 phy_stop(ndev->phydev);
838 cancel_delayed_work_sync(&pdata->link_work);
840 xgene_enet_free_irq(ndev);
841 xgene_enet_napi_disable(pdata);
842 for (i = 0; i < pdata->rxq_cnt; i++)
843 xgene_enet_process_ring(pdata->rx_ring[i], -1);
847 static void xgene_enet_delete_ring(struct xgene_enet_desc_ring *ring)
849 struct xgene_enet_pdata *pdata;
852 pdata = netdev_priv(ring->ndev);
853 dev = ndev_to_dev(ring->ndev);
855 pdata->ring_ops->clear(ring);
856 dmam_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
859 static void xgene_enet_delete_desc_rings(struct xgene_enet_pdata *pdata)
861 struct xgene_enet_desc_ring *buf_pool;
862 struct xgene_enet_desc_ring *ring;
865 for (i = 0; i < pdata->txq_cnt; i++) {
866 ring = pdata->tx_ring[i];
868 xgene_enet_delete_ring(ring);
869 pdata->port_ops->clear(pdata, ring);
871 xgene_enet_delete_ring(ring->cp_ring);
872 pdata->tx_ring[i] = NULL;
876 for (i = 0; i < pdata->rxq_cnt; i++) {
877 ring = pdata->rx_ring[i];
879 buf_pool = ring->buf_pool;
880 xgene_enet_delete_bufpool(buf_pool);
881 xgene_enet_delete_ring(buf_pool);
882 pdata->port_ops->clear(pdata, buf_pool);
883 xgene_enet_delete_ring(ring);
884 pdata->rx_ring[i] = NULL;
889 static int xgene_enet_get_ring_size(struct device *dev,
890 enum xgene_enet_ring_cfgsize cfgsize)
895 case RING_CFGSIZE_512B:
898 case RING_CFGSIZE_2KB:
901 case RING_CFGSIZE_16KB:
904 case RING_CFGSIZE_64KB:
907 case RING_CFGSIZE_512KB:
911 dev_err(dev, "Unsupported cfg ring size %d\n", cfgsize);
918 static void xgene_enet_free_desc_ring(struct xgene_enet_desc_ring *ring)
920 struct xgene_enet_pdata *pdata;
926 dev = ndev_to_dev(ring->ndev);
927 pdata = netdev_priv(ring->ndev);
929 if (ring->desc_addr) {
930 pdata->ring_ops->clear(ring);
931 dmam_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
933 devm_kfree(dev, ring);
936 static void xgene_enet_free_desc_rings(struct xgene_enet_pdata *pdata)
938 struct device *dev = &pdata->pdev->dev;
939 struct xgene_enet_desc_ring *ring;
942 for (i = 0; i < pdata->txq_cnt; i++) {
943 ring = pdata->tx_ring[i];
945 if (ring->cp_ring && ring->cp_ring->cp_skb)
946 devm_kfree(dev, ring->cp_ring->cp_skb);
947 if (ring->cp_ring && pdata->cq_cnt)
948 xgene_enet_free_desc_ring(ring->cp_ring);
949 xgene_enet_free_desc_ring(ring);
953 for (i = 0; i < pdata->rxq_cnt; i++) {
954 ring = pdata->rx_ring[i];
956 if (ring->buf_pool) {
957 if (ring->buf_pool->rx_skb)
958 devm_kfree(dev, ring->buf_pool->rx_skb);
959 xgene_enet_free_desc_ring(ring->buf_pool);
961 xgene_enet_free_desc_ring(ring);
966 static bool is_irq_mbox_required(struct xgene_enet_pdata *pdata,
967 struct xgene_enet_desc_ring *ring)
969 if ((pdata->enet_id == XGENE_ENET2) &&
970 (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU)) {
977 static void __iomem *xgene_enet_ring_cmd_base(struct xgene_enet_pdata *pdata,
978 struct xgene_enet_desc_ring *ring)
980 u8 num_ring_id_shift = pdata->ring_ops->num_ring_id_shift;
982 return pdata->ring_cmd_addr + (ring->num << num_ring_id_shift);
985 static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring(
986 struct net_device *ndev, u32 ring_num,
987 enum xgene_enet_ring_cfgsize cfgsize, u32 ring_id)
989 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
990 struct device *dev = ndev_to_dev(ndev);
991 struct xgene_enet_desc_ring *ring;
995 size = xgene_enet_get_ring_size(dev, cfgsize);
999 ring = devm_kzalloc(dev, sizeof(struct xgene_enet_desc_ring),
1005 ring->num = ring_num;
1006 ring->cfgsize = cfgsize;
1009 ring->desc_addr = dmam_alloc_coherent(dev, size, &ring->dma,
1010 GFP_KERNEL | __GFP_ZERO);
1011 if (!ring->desc_addr) {
1012 devm_kfree(dev, ring);
1017 if (is_irq_mbox_required(pdata, ring)) {
1018 irq_mbox_addr = dmam_alloc_coherent(dev, INTR_MBOX_SIZE,
1019 &ring->irq_mbox_dma,
1020 GFP_KERNEL | __GFP_ZERO);
1021 if (!irq_mbox_addr) {
1022 dmam_free_coherent(dev, size, ring->desc_addr,
1024 devm_kfree(dev, ring);
1027 ring->irq_mbox_addr = irq_mbox_addr;
1030 ring->cmd_base = xgene_enet_ring_cmd_base(pdata, ring);
1031 ring->cmd = ring->cmd_base + INC_DEC_CMD_ADDR;
1032 ring = pdata->ring_ops->setup(ring);
1033 netdev_dbg(ndev, "ring info: num=%d size=%d id=%d slots=%d\n",
1034 ring->num, ring->size, ring->id, ring->slots);
1039 static u16 xgene_enet_get_ring_id(enum xgene_ring_owner owner, u8 bufnum)
1041 return (owner << 6) | (bufnum & GENMASK(5, 0));
1044 static enum xgene_ring_owner xgene_derive_ring_owner(struct xgene_enet_pdata *p)
1046 enum xgene_ring_owner owner;
1048 if (p->enet_id == XGENE_ENET1) {
1049 switch (p->phy_mode) {
1050 case PHY_INTERFACE_MODE_SGMII:
1051 owner = RING_OWNER_ETH0;
1054 owner = (!p->port_id) ? RING_OWNER_ETH0 :
1059 owner = (!p->port_id) ? RING_OWNER_ETH0 : RING_OWNER_ETH1;
1065 static u8 xgene_start_cpu_bufnum(struct xgene_enet_pdata *pdata)
1067 struct device *dev = &pdata->pdev->dev;
1071 ret = device_property_read_u32(dev, "channel", &cpu_bufnum);
1073 return (!ret) ? cpu_bufnum : pdata->cpu_bufnum;
1076 static int xgene_enet_create_desc_rings(struct net_device *ndev)
1078 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
1079 struct device *dev = ndev_to_dev(ndev);
1080 struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring;
1081 struct xgene_enet_desc_ring *buf_pool = NULL;
1082 enum xgene_ring_owner owner;
1083 dma_addr_t dma_exp_bufs;
1085 u8 eth_bufnum = pdata->eth_bufnum;
1086 u8 bp_bufnum = pdata->bp_bufnum;
1087 u16 ring_num = pdata->ring_num;
1092 cpu_bufnum = xgene_start_cpu_bufnum(pdata);
1094 for (i = 0; i < pdata->rxq_cnt; i++) {
1095 /* allocate rx descriptor ring */
1096 owner = xgene_derive_ring_owner(pdata);
1097 ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++);
1098 rx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
1106 /* allocate buffer pool for receiving packets */
1107 owner = xgene_derive_ring_owner(pdata);
1108 ring_id = xgene_enet_get_ring_id(owner, bp_bufnum++);
1109 buf_pool = xgene_enet_create_desc_ring(ndev, ring_num++,
1117 rx_ring->nbufpool = NUM_BUFPOOL;
1118 rx_ring->buf_pool = buf_pool;
1119 rx_ring->irq = pdata->irqs[i];
1120 buf_pool->rx_skb = devm_kcalloc(dev, buf_pool->slots,
1121 sizeof(struct sk_buff *),
1123 if (!buf_pool->rx_skb) {
1128 buf_pool->dst_ring_num = xgene_enet_dst_ring_num(buf_pool);
1129 rx_ring->buf_pool = buf_pool;
1130 pdata->rx_ring[i] = rx_ring;
1133 for (i = 0; i < pdata->txq_cnt; i++) {
1134 /* allocate tx descriptor ring */
1135 owner = xgene_derive_ring_owner(pdata);
1136 ring_id = xgene_enet_get_ring_id(owner, eth_bufnum++);
1137 tx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
1145 size = (tx_ring->slots / 2) * sizeof(__le64) * MAX_EXP_BUFFS;
1146 exp_bufs = dmam_alloc_coherent(dev, size, &dma_exp_bufs,
1147 GFP_KERNEL | __GFP_ZERO);
1152 tx_ring->exp_bufs = exp_bufs;
1154 pdata->tx_ring[i] = tx_ring;
1156 if (!pdata->cq_cnt) {
1157 cp_ring = pdata->rx_ring[i];
1159 /* allocate tx completion descriptor ring */
1160 ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU,
1162 cp_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
1170 cp_ring->irq = pdata->irqs[pdata->rxq_cnt + i];
1174 cp_ring->cp_skb = devm_kcalloc(dev, tx_ring->slots,
1175 sizeof(struct sk_buff *),
1177 if (!cp_ring->cp_skb) {
1182 size = sizeof(dma_addr_t) * MAX_SKB_FRAGS;
1183 cp_ring->frag_dma_addr = devm_kcalloc(dev, tx_ring->slots,
1185 if (!cp_ring->frag_dma_addr) {
1186 devm_kfree(dev, cp_ring->cp_skb);
1191 tx_ring->cp_ring = cp_ring;
1192 tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring);
1195 if (pdata->ring_ops->coalesce)
1196 pdata->ring_ops->coalesce(pdata->tx_ring[0]);
1197 pdata->tx_qcnt_hi = pdata->tx_ring[0]->slots - 128;
1202 xgene_enet_free_desc_rings(pdata);
1206 static struct rtnl_link_stats64 *xgene_enet_get_stats64(
1207 struct net_device *ndev,
1208 struct rtnl_link_stats64 *storage)
1210 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
1211 struct rtnl_link_stats64 *stats = &pdata->stats;
1212 struct xgene_enet_desc_ring *ring;
1215 memset(stats, 0, sizeof(struct rtnl_link_stats64));
1216 for (i = 0; i < pdata->txq_cnt; i++) {
1217 ring = pdata->tx_ring[i];
1219 stats->tx_packets += ring->tx_packets;
1220 stats->tx_bytes += ring->tx_bytes;
1224 for (i = 0; i < pdata->rxq_cnt; i++) {
1225 ring = pdata->rx_ring[i];
1227 stats->rx_packets += ring->rx_packets;
1228 stats->rx_bytes += ring->rx_bytes;
1229 stats->rx_errors += ring->rx_length_errors +
1230 ring->rx_crc_errors +
1231 ring->rx_frame_errors +
1232 ring->rx_fifo_errors;
1233 stats->rx_dropped += ring->rx_dropped;
1236 memcpy(storage, stats, sizeof(struct rtnl_link_stats64));
1241 static int xgene_enet_set_mac_address(struct net_device *ndev, void *addr)
1243 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
1246 ret = eth_mac_addr(ndev, addr);
1249 pdata->mac_ops->set_mac_addr(pdata);
1254 static const struct net_device_ops xgene_ndev_ops = {
1255 .ndo_open = xgene_enet_open,
1256 .ndo_stop = xgene_enet_close,
1257 .ndo_start_xmit = xgene_enet_start_xmit,
1258 .ndo_tx_timeout = xgene_enet_timeout,
1259 .ndo_get_stats64 = xgene_enet_get_stats64,
1260 .ndo_change_mtu = eth_change_mtu,
1261 .ndo_set_mac_address = xgene_enet_set_mac_address,
1265 static void xgene_get_port_id_acpi(struct device *dev,
1266 struct xgene_enet_pdata *pdata)
1271 status = acpi_evaluate_integer(ACPI_HANDLE(dev), "_SUN", NULL, &temp);
1272 if (ACPI_FAILURE(status)) {
1275 pdata->port_id = temp;
1282 static void xgene_get_port_id_dt(struct device *dev, struct xgene_enet_pdata *pdata)
1286 of_property_read_u32(dev->of_node, "port-id", &id);
1288 pdata->port_id = id & BIT(0);
1293 static int xgene_get_tx_delay(struct xgene_enet_pdata *pdata)
1295 struct device *dev = &pdata->pdev->dev;
1298 ret = of_property_read_u32(dev->of_node, "tx-delay", &delay);
1300 pdata->tx_delay = 4;
1304 if (delay < 0 || delay > 7) {
1305 dev_err(dev, "Invalid tx-delay specified\n");
1309 pdata->tx_delay = delay;
1314 static int xgene_get_rx_delay(struct xgene_enet_pdata *pdata)
1316 struct device *dev = &pdata->pdev->dev;
1319 ret = of_property_read_u32(dev->of_node, "rx-delay", &delay);
1321 pdata->rx_delay = 2;
1325 if (delay < 0 || delay > 7) {
1326 dev_err(dev, "Invalid rx-delay specified\n");
1330 pdata->rx_delay = delay;
1335 static int xgene_enet_get_irqs(struct xgene_enet_pdata *pdata)
1337 struct platform_device *pdev = pdata->pdev;
1338 struct device *dev = &pdev->dev;
1339 int i, ret, max_irqs;
1341 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
1343 else if (pdata->phy_mode == PHY_INTERFACE_MODE_SGMII)
1346 max_irqs = XGENE_MAX_ENET_IRQ;
1348 for (i = 0; i < max_irqs; i++) {
1349 ret = platform_get_irq(pdev, i);
1351 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1353 pdata->rxq_cnt = max_irqs / 2;
1354 pdata->txq_cnt = max_irqs / 2;
1355 pdata->cq_cnt = max_irqs / 2;
1358 dev_err(dev, "Unable to get ENET IRQ\n");
1359 ret = ret ? : -ENXIO;
1362 pdata->irqs[i] = ret;
1368 static int xgene_enet_check_phy_handle(struct xgene_enet_pdata *pdata)
1372 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII)
1375 if (!IS_ENABLED(CONFIG_MDIO_XGENE))
1378 ret = xgene_enet_phy_connect(pdata->ndev);
1380 pdata->mdio_driver = true;
1385 static void xgene_enet_gpiod_get(struct xgene_enet_pdata *pdata)
1387 struct device *dev = &pdata->pdev->dev;
1389 if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII)
1392 pdata->sfp_rdy = gpiod_get(dev, "rxlos", GPIOD_IN);
1393 if (IS_ERR(pdata->sfp_rdy))
1394 pdata->sfp_rdy = gpiod_get(dev, "sfp", GPIOD_IN);
1397 static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
1399 struct platform_device *pdev;
1400 struct net_device *ndev;
1402 struct resource *res;
1403 void __iomem *base_addr;
1411 res = platform_get_resource(pdev, IORESOURCE_MEM, RES_ENET_CSR);
1413 dev_err(dev, "Resource enet_csr not defined\n");
1416 pdata->base_addr = devm_ioremap(dev, res->start, resource_size(res));
1417 if (!pdata->base_addr) {
1418 dev_err(dev, "Unable to retrieve ENET Port CSR region\n");
1422 res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CSR);
1424 dev_err(dev, "Resource ring_csr not defined\n");
1427 pdata->ring_csr_addr = devm_ioremap(dev, res->start,
1428 resource_size(res));
1429 if (!pdata->ring_csr_addr) {
1430 dev_err(dev, "Unable to retrieve ENET Ring CSR region\n");
1434 res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CMD);
1436 dev_err(dev, "Resource ring_cmd not defined\n");
1439 pdata->ring_cmd_addr = devm_ioremap(dev, res->start,
1440 resource_size(res));
1441 if (!pdata->ring_cmd_addr) {
1442 dev_err(dev, "Unable to retrieve ENET Ring command region\n");
1447 xgene_get_port_id_dt(dev, pdata);
1450 xgene_get_port_id_acpi(dev, pdata);
1453 if (!device_get_mac_address(dev, ndev->dev_addr, ETH_ALEN))
1454 eth_hw_addr_random(ndev);
1456 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
1458 pdata->phy_mode = device_get_phy_mode(dev);
1459 if (pdata->phy_mode < 0) {
1460 dev_err(dev, "Unable to get phy-connection-type\n");
1461 return pdata->phy_mode;
1463 if (pdata->phy_mode != PHY_INTERFACE_MODE_RGMII &&
1464 pdata->phy_mode != PHY_INTERFACE_MODE_SGMII &&
1465 pdata->phy_mode != PHY_INTERFACE_MODE_XGMII) {
1466 dev_err(dev, "Incorrect phy-connection-type specified\n");
1470 ret = xgene_get_tx_delay(pdata);
1474 ret = xgene_get_rx_delay(pdata);
1478 ret = xgene_enet_get_irqs(pdata);
1482 ret = xgene_enet_check_phy_handle(pdata);
1486 xgene_enet_gpiod_get(pdata);
1488 pdata->clk = devm_clk_get(&pdev->dev, NULL);
1489 if (IS_ERR(pdata->clk)) {
1490 /* Firmware may have set up the clock already. */
1491 dev_info(dev, "clocks have been setup already\n");
1494 if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII)
1495 base_addr = pdata->base_addr - (pdata->port_id * MAC_OFFSET);
1497 base_addr = pdata->base_addr;
1498 pdata->eth_csr_addr = base_addr + BLOCK_ETH_CSR_OFFSET;
1499 pdata->cle.base = base_addr + BLOCK_ETH_CLE_CSR_OFFSET;
1500 pdata->eth_ring_if_addr = base_addr + BLOCK_ETH_RING_IF_OFFSET;
1501 pdata->eth_diag_csr_addr = base_addr + BLOCK_ETH_DIAG_CSR_OFFSET;
1502 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII ||
1503 pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) {
1504 pdata->mcx_mac_addr = pdata->base_addr + BLOCK_ETH_MAC_OFFSET;
1505 offset = (pdata->enet_id == XGENE_ENET1) ?
1506 BLOCK_ETH_MAC_CSR_OFFSET :
1507 X2_BLOCK_ETH_MAC_CSR_OFFSET;
1508 pdata->mcx_mac_csr_addr = base_addr + offset;
1510 pdata->mcx_mac_addr = base_addr + BLOCK_AXG_MAC_OFFSET;
1511 pdata->mcx_mac_csr_addr = base_addr + BLOCK_AXG_MAC_CSR_OFFSET;
1512 pdata->pcs_addr = base_addr + BLOCK_PCS_OFFSET;
1514 pdata->rx_buff_cnt = NUM_PKT_BUF;
1519 static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata)
1521 struct xgene_enet_cle *enet_cle = &pdata->cle;
1522 struct net_device *ndev = pdata->ndev;
1523 struct xgene_enet_desc_ring *buf_pool;
1527 ret = pdata->port_ops->reset(pdata);
1531 ret = xgene_enet_create_desc_rings(ndev);
1533 netdev_err(ndev, "Error in ring configuration\n");
1537 /* setup buffer pool */
1538 for (i = 0; i < pdata->rxq_cnt; i++) {
1539 buf_pool = pdata->rx_ring[i]->buf_pool;
1540 xgene_enet_init_bufpool(buf_pool);
1541 ret = xgene_enet_refill_bufpool(buf_pool, pdata->rx_buff_cnt);
1546 dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring[0]);
1547 buf_pool = pdata->rx_ring[0]->buf_pool;
1548 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1549 /* Initialize and Enable PreClassifier Tree */
1550 enet_cle->max_nodes = 512;
1551 enet_cle->max_dbptrs = 1024;
1552 enet_cle->parsers = 3;
1553 enet_cle->active_parser = PARSER_ALL;
1554 enet_cle->ptree.start_node = 0;
1555 enet_cle->ptree.start_dbptr = 0;
1556 enet_cle->jump_bytes = 8;
1557 ret = pdata->cle_ops->cle_init(pdata);
1559 netdev_err(ndev, "Preclass Tree init error\n");
1563 pdata->port_ops->cle_bypass(pdata, dst_ring_num, buf_pool->id);
1566 pdata->phy_speed = SPEED_UNKNOWN;
1567 pdata->mac_ops->init(pdata);
1572 xgene_enet_delete_desc_rings(pdata);
1576 static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata)
1578 switch (pdata->phy_mode) {
1579 case PHY_INTERFACE_MODE_RGMII:
1580 pdata->mac_ops = &xgene_gmac_ops;
1581 pdata->port_ops = &xgene_gport_ops;
1587 case PHY_INTERFACE_MODE_SGMII:
1588 pdata->mac_ops = &xgene_sgmac_ops;
1589 pdata->port_ops = &xgene_sgport_ops;
1596 pdata->mac_ops = &xgene_xgmac_ops;
1597 pdata->port_ops = &xgene_xgport_ops;
1598 pdata->cle_ops = &xgene_cle3in_ops;
1600 if (!pdata->rxq_cnt) {
1601 pdata->rxq_cnt = XGENE_NUM_RX_RING;
1602 pdata->txq_cnt = XGENE_NUM_TX_RING;
1603 pdata->cq_cnt = XGENE_NUM_TXC_RING;
1608 if (pdata->enet_id == XGENE_ENET1) {
1609 switch (pdata->port_id) {
1611 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1612 pdata->cpu_bufnum = X2_START_CPU_BUFNUM_0;
1613 pdata->eth_bufnum = X2_START_ETH_BUFNUM_0;
1614 pdata->bp_bufnum = X2_START_BP_BUFNUM_0;
1615 pdata->ring_num = START_RING_NUM_0;
1617 pdata->cpu_bufnum = START_CPU_BUFNUM_0;
1618 pdata->eth_bufnum = START_ETH_BUFNUM_0;
1619 pdata->bp_bufnum = START_BP_BUFNUM_0;
1620 pdata->ring_num = START_RING_NUM_0;
1624 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1625 pdata->cpu_bufnum = XG_START_CPU_BUFNUM_1;
1626 pdata->eth_bufnum = XG_START_ETH_BUFNUM_1;
1627 pdata->bp_bufnum = XG_START_BP_BUFNUM_1;
1628 pdata->ring_num = XG_START_RING_NUM_1;
1630 pdata->cpu_bufnum = START_CPU_BUFNUM_1;
1631 pdata->eth_bufnum = START_ETH_BUFNUM_1;
1632 pdata->bp_bufnum = START_BP_BUFNUM_1;
1633 pdata->ring_num = START_RING_NUM_1;
1639 pdata->ring_ops = &xgene_ring1_ops;
1641 switch (pdata->port_id) {
1643 pdata->cpu_bufnum = X2_START_CPU_BUFNUM_0;
1644 pdata->eth_bufnum = X2_START_ETH_BUFNUM_0;
1645 pdata->bp_bufnum = X2_START_BP_BUFNUM_0;
1646 pdata->ring_num = X2_START_RING_NUM_0;
1649 pdata->cpu_bufnum = X2_START_CPU_BUFNUM_1;
1650 pdata->eth_bufnum = X2_START_ETH_BUFNUM_1;
1651 pdata->bp_bufnum = X2_START_BP_BUFNUM_1;
1652 pdata->ring_num = X2_START_RING_NUM_1;
1658 pdata->ring_ops = &xgene_ring2_ops;
1662 static void xgene_enet_napi_add(struct xgene_enet_pdata *pdata)
1664 struct napi_struct *napi;
1667 for (i = 0; i < pdata->rxq_cnt; i++) {
1668 napi = &pdata->rx_ring[i]->napi;
1669 netif_napi_add(pdata->ndev, napi, xgene_enet_napi,
1673 for (i = 0; i < pdata->cq_cnt; i++) {
1674 napi = &pdata->tx_ring[i]->cp_ring->napi;
1675 netif_napi_add(pdata->ndev, napi, xgene_enet_napi,
1681 static const struct acpi_device_id xgene_enet_acpi_match[] = {
1682 { "APMC0D05", XGENE_ENET1},
1683 { "APMC0D30", XGENE_ENET1},
1684 { "APMC0D31", XGENE_ENET1},
1685 { "APMC0D3F", XGENE_ENET1},
1686 { "APMC0D26", XGENE_ENET2},
1687 { "APMC0D25", XGENE_ENET2},
1690 MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match);
1693 static const struct of_device_id xgene_enet_of_match[] = {
1694 {.compatible = "apm,xgene-enet", .data = (void *)XGENE_ENET1},
1695 {.compatible = "apm,xgene1-sgenet", .data = (void *)XGENE_ENET1},
1696 {.compatible = "apm,xgene1-xgenet", .data = (void *)XGENE_ENET1},
1697 {.compatible = "apm,xgene2-sgenet", .data = (void *)XGENE_ENET2},
1698 {.compatible = "apm,xgene2-xgenet", .data = (void *)XGENE_ENET2},
1702 MODULE_DEVICE_TABLE(of, xgene_enet_of_match);
1704 static int xgene_enet_probe(struct platform_device *pdev)
1706 struct net_device *ndev;
1707 struct xgene_enet_pdata *pdata;
1708 struct device *dev = &pdev->dev;
1709 void (*link_state)(struct work_struct *);
1710 const struct of_device_id *of_id;
1713 ndev = alloc_etherdev_mqs(sizeof(struct xgene_enet_pdata),
1714 XGENE_NUM_TX_RING, XGENE_NUM_RX_RING);
1718 pdata = netdev_priv(ndev);
1722 SET_NETDEV_DEV(ndev, dev);
1723 platform_set_drvdata(pdev, pdata);
1724 ndev->netdev_ops = &xgene_ndev_ops;
1725 xgene_enet_set_ethtool_ops(ndev);
1726 ndev->features |= NETIF_F_IP_CSUM |
1731 of_id = of_match_device(xgene_enet_of_match, &pdev->dev);
1733 pdata->enet_id = (enum xgene_enet_id)of_id->data;
1737 const struct acpi_device_id *acpi_id;
1739 acpi_id = acpi_match_device(xgene_enet_acpi_match, &pdev->dev);
1741 pdata->enet_id = (enum xgene_enet_id) acpi_id->driver_data;
1744 if (!pdata->enet_id) {
1749 ret = xgene_enet_get_resources(pdata);
1753 xgene_enet_setup_ops(pdata);
1755 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1756 ndev->features |= NETIF_F_TSO | NETIF_F_RXCSUM;
1757 spin_lock_init(&pdata->mss_lock);
1759 ndev->hw_features = ndev->features;
1761 ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
1763 netdev_err(ndev, "No usable DMA configuration\n");
1767 ret = xgene_enet_init_hw(pdata);
1771 link_state = pdata->mac_ops->link_state;
1772 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1773 INIT_DELAYED_WORK(&pdata->link_work, link_state);
1774 } else if (!pdata->mdio_driver) {
1775 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
1776 ret = xgene_enet_mdio_config(pdata);
1778 INIT_DELAYED_WORK(&pdata->link_work, link_state);
1784 xgene_enet_napi_add(pdata);
1785 ret = register_netdev(ndev);
1787 netdev_err(ndev, "Failed to register netdev\n");
1795 * If necessary, free_netdev() will call netif_napi_del() and undo
1796 * the effects of xgene_enet_napi_add()'s calls to netif_napi_add().
1799 if (pdata->mdio_driver)
1800 xgene_enet_phy_disconnect(pdata);
1801 else if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
1802 xgene_enet_mdio_remove(pdata);
1804 xgene_enet_delete_desc_rings(pdata);
1810 static int xgene_enet_remove(struct platform_device *pdev)
1812 struct xgene_enet_pdata *pdata;
1813 struct net_device *ndev;
1815 pdata = platform_get_drvdata(pdev);
1819 if (netif_running(ndev))
1823 if (pdata->mdio_driver)
1824 xgene_enet_phy_disconnect(pdata);
1825 else if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
1826 xgene_enet_mdio_remove(pdata);
1828 unregister_netdev(ndev);
1829 pdata->port_ops->shutdown(pdata);
1830 xgene_enet_delete_desc_rings(pdata);
1836 static void xgene_enet_shutdown(struct platform_device *pdev)
1838 struct xgene_enet_pdata *pdata;
1840 pdata = platform_get_drvdata(pdev);
1847 xgene_enet_remove(pdev);
1850 static struct platform_driver xgene_enet_driver = {
1852 .name = "xgene-enet",
1853 .of_match_table = of_match_ptr(xgene_enet_of_match),
1854 .acpi_match_table = ACPI_PTR(xgene_enet_acpi_match),
1856 .probe = xgene_enet_probe,
1857 .remove = xgene_enet_remove,
1858 .shutdown = xgene_enet_shutdown,
1861 module_platform_driver(xgene_enet_driver);
1863 MODULE_DESCRIPTION("APM X-Gene SoC Ethernet driver");
1864 MODULE_VERSION(XGENE_DRV_VERSION);
1865 MODULE_AUTHOR("Iyappan Subramanian <isubramanian@apm.com>");
1866 MODULE_AUTHOR("Keyur Chudgar <kchudgar@apm.com>");
1867 MODULE_LICENSE("GPL");