2 * Driver for (BCM4706)? GBit MAC core on BCMA bus.
4 * Copyright (C) 2012 Rafał Miłecki <zajec5@gmail.com>
6 * Licensed under the GNU/GPL. See COPYING for details.
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/bcma/bcma.h>
13 #include <linux/etherdevice.h>
14 #include <linux/bcm47xx_nvram.h>
17 static bool bgmac_wait_value(struct bgmac *bgmac, u16 reg, u32 mask,
18 u32 value, int timeout)
23 for (i = 0; i < timeout / 10; i++) {
24 val = bgmac_read(bgmac, reg);
25 if ((val & mask) == value)
29 dev_err(bgmac->dev, "Timeout waiting for reg 0x%X\n", reg);
33 /**************************************************
35 **************************************************/
37 static void bgmac_dma_tx_reset(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
45 /* Suspend DMA TX ring first.
46 * bgmac_wait_value doesn't support waiting for any of few values, so
47 * implement whole loop here.
49 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL,
50 BGMAC_DMA_TX_SUSPEND);
51 for (i = 0; i < 10000 / 10; i++) {
52 val = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
53 val &= BGMAC_DMA_TX_STAT;
54 if (val == BGMAC_DMA_TX_STAT_DISABLED ||
55 val == BGMAC_DMA_TX_STAT_IDLEWAIT ||
56 val == BGMAC_DMA_TX_STAT_STOPPED) {
63 dev_err(bgmac->dev, "Timeout suspending DMA TX ring 0x%X (BGMAC_DMA_TX_STAT: 0x%08X)\n",
64 ring->mmio_base, val);
66 /* Remove SUSPEND bit */
67 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, 0);
68 if (!bgmac_wait_value(bgmac,
69 ring->mmio_base + BGMAC_DMA_TX_STATUS,
70 BGMAC_DMA_TX_STAT, BGMAC_DMA_TX_STAT_DISABLED,
72 dev_warn(bgmac->dev, "DMA TX ring 0x%X wasn't disabled on time, waiting additional 300us\n",
75 val = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
76 if ((val & BGMAC_DMA_TX_STAT) != BGMAC_DMA_TX_STAT_DISABLED)
77 dev_err(bgmac->dev, "Reset of DMA TX ring 0x%X failed\n",
82 static void bgmac_dma_tx_enable(struct bgmac *bgmac,
83 struct bgmac_dma_ring *ring)
87 ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL);
88 if (bgmac->feature_flags & BGMAC_FEAT_TX_MASK_SETUP) {
89 ctl &= ~BGMAC_DMA_TX_BL_MASK;
90 ctl |= BGMAC_DMA_TX_BL_128 << BGMAC_DMA_TX_BL_SHIFT;
92 ctl &= ~BGMAC_DMA_TX_MR_MASK;
93 ctl |= BGMAC_DMA_TX_MR_2 << BGMAC_DMA_TX_MR_SHIFT;
95 ctl &= ~BGMAC_DMA_TX_PC_MASK;
96 ctl |= BGMAC_DMA_TX_PC_16 << BGMAC_DMA_TX_PC_SHIFT;
98 ctl &= ~BGMAC_DMA_TX_PT_MASK;
99 ctl |= BGMAC_DMA_TX_PT_8 << BGMAC_DMA_TX_PT_SHIFT;
101 ctl |= BGMAC_DMA_TX_ENABLE;
102 ctl |= BGMAC_DMA_TX_PARITY_DISABLE;
103 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, ctl);
107 bgmac_dma_tx_add_buf(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
108 int i, int len, u32 ctl0)
110 struct bgmac_slot_info *slot;
111 struct bgmac_dma_desc *dma_desc;
114 if (i == BGMAC_TX_RING_SLOTS - 1)
115 ctl0 |= BGMAC_DESC_CTL0_EOT;
117 ctl1 = len & BGMAC_DESC_CTL1_LEN;
119 slot = &ring->slots[i];
120 dma_desc = &ring->cpu_base[i];
121 dma_desc->addr_low = cpu_to_le32(lower_32_bits(slot->dma_addr));
122 dma_desc->addr_high = cpu_to_le32(upper_32_bits(slot->dma_addr));
123 dma_desc->ctl0 = cpu_to_le32(ctl0);
124 dma_desc->ctl1 = cpu_to_le32(ctl1);
127 static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
128 struct bgmac_dma_ring *ring,
131 struct device *dma_dev = bgmac->dma_dev;
132 struct net_device *net_dev = bgmac->net_dev;
133 int index = ring->end % BGMAC_TX_RING_SLOTS;
134 struct bgmac_slot_info *slot = &ring->slots[index];
139 if (skb->len > BGMAC_DESC_CTL1_LEN) {
140 netdev_err(bgmac->net_dev, "Too long skb (%d)\n", skb->len);
144 if (skb->ip_summed == CHECKSUM_PARTIAL)
145 skb_checksum_help(skb);
147 nr_frags = skb_shinfo(skb)->nr_frags;
149 /* ring->end - ring->start will return the number of valid slots,
150 * even when ring->end overflows
152 if (ring->end - ring->start + nr_frags + 1 >= BGMAC_TX_RING_SLOTS) {
153 netdev_err(bgmac->net_dev, "TX ring is full, queue should be stopped!\n");
154 netif_stop_queue(net_dev);
155 return NETDEV_TX_BUSY;
158 slot->dma_addr = dma_map_single(dma_dev, skb->data, skb_headlen(skb),
160 if (unlikely(dma_mapping_error(dma_dev, slot->dma_addr)))
163 flags = BGMAC_DESC_CTL0_SOF;
165 flags |= BGMAC_DESC_CTL0_EOF | BGMAC_DESC_CTL0_IOC;
167 bgmac_dma_tx_add_buf(bgmac, ring, index, skb_headlen(skb), flags);
170 for (i = 0; i < nr_frags; i++) {
171 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
172 int len = skb_frag_size(frag);
174 index = (index + 1) % BGMAC_TX_RING_SLOTS;
175 slot = &ring->slots[index];
176 slot->dma_addr = skb_frag_dma_map(dma_dev, frag, 0,
178 if (unlikely(dma_mapping_error(dma_dev, slot->dma_addr)))
181 if (i == nr_frags - 1)
182 flags |= BGMAC_DESC_CTL0_EOF | BGMAC_DESC_CTL0_IOC;
184 bgmac_dma_tx_add_buf(bgmac, ring, index, len, flags);
188 ring->end += nr_frags + 1;
189 netdev_sent_queue(net_dev, skb->len);
193 /* Increase ring->end to point empty slot. We tell hardware the first
194 * slot it should *not* read.
196 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_INDEX,
198 (ring->end % BGMAC_TX_RING_SLOTS) *
199 sizeof(struct bgmac_dma_desc));
201 if (ring->end - ring->start >= BGMAC_TX_RING_SLOTS - 8)
202 netif_stop_queue(net_dev);
207 dma_unmap_single(dma_dev, slot->dma_addr, skb_headlen(skb),
211 int index = (ring->end + i) % BGMAC_TX_RING_SLOTS;
212 struct bgmac_slot_info *slot = &ring->slots[index];
213 u32 ctl1 = le32_to_cpu(ring->cpu_base[index].ctl1);
214 int len = ctl1 & BGMAC_DESC_CTL1_LEN;
216 dma_unmap_page(dma_dev, slot->dma_addr, len, DMA_TO_DEVICE);
220 netdev_err(bgmac->net_dev, "Mapping error of skb on ring 0x%X\n",
225 net_dev->stats.tx_dropped++;
226 net_dev->stats.tx_errors++;
230 /* Free transmitted packets */
231 static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
233 struct device *dma_dev = bgmac->dma_dev;
236 unsigned bytes_compl = 0, pkts_compl = 0;
238 /* The last slot that hardware didn't consume yet */
239 empty_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
240 empty_slot &= BGMAC_DMA_TX_STATDPTR;
241 empty_slot -= ring->index_base;
242 empty_slot &= BGMAC_DMA_TX_STATDPTR;
243 empty_slot /= sizeof(struct bgmac_dma_desc);
245 while (ring->start != ring->end) {
246 int slot_idx = ring->start % BGMAC_TX_RING_SLOTS;
247 struct bgmac_slot_info *slot = &ring->slots[slot_idx];
251 if (slot_idx == empty_slot)
254 ctl0 = le32_to_cpu(ring->cpu_base[slot_idx].ctl0);
255 ctl1 = le32_to_cpu(ring->cpu_base[slot_idx].ctl1);
256 len = ctl1 & BGMAC_DESC_CTL1_LEN;
257 if (ctl0 & BGMAC_DESC_CTL0_SOF)
258 /* Unmap no longer used buffer */
259 dma_unmap_single(dma_dev, slot->dma_addr, len,
262 dma_unmap_page(dma_dev, slot->dma_addr, len,
266 bgmac->net_dev->stats.tx_bytes += slot->skb->len;
267 bgmac->net_dev->stats.tx_packets++;
268 bytes_compl += slot->skb->len;
271 /* Free memory! :) */
272 dev_kfree_skb(slot->skb);
284 netdev_completed_queue(bgmac->net_dev, pkts_compl, bytes_compl);
286 if (netif_queue_stopped(bgmac->net_dev))
287 netif_wake_queue(bgmac->net_dev);
290 static void bgmac_dma_rx_reset(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
292 if (!ring->mmio_base)
295 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL, 0);
296 if (!bgmac_wait_value(bgmac,
297 ring->mmio_base + BGMAC_DMA_RX_STATUS,
298 BGMAC_DMA_RX_STAT, BGMAC_DMA_RX_STAT_DISABLED,
300 dev_err(bgmac->dev, "Reset of ring 0x%X RX failed\n",
304 static void bgmac_dma_rx_enable(struct bgmac *bgmac,
305 struct bgmac_dma_ring *ring)
309 ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL);
311 /* preserve ONLY bits 16-17 from current hardware value */
312 ctl &= BGMAC_DMA_RX_ADDREXT_MASK;
314 if (bgmac->feature_flags & BGMAC_FEAT_RX_MASK_SETUP) {
315 ctl &= ~BGMAC_DMA_RX_BL_MASK;
316 ctl |= BGMAC_DMA_RX_BL_128 << BGMAC_DMA_RX_BL_SHIFT;
318 ctl &= ~BGMAC_DMA_RX_PC_MASK;
319 ctl |= BGMAC_DMA_RX_PC_8 << BGMAC_DMA_RX_PC_SHIFT;
321 ctl &= ~BGMAC_DMA_RX_PT_MASK;
322 ctl |= BGMAC_DMA_RX_PT_1 << BGMAC_DMA_RX_PT_SHIFT;
324 ctl |= BGMAC_DMA_RX_ENABLE;
325 ctl |= BGMAC_DMA_RX_PARITY_DISABLE;
326 ctl |= BGMAC_DMA_RX_OVERFLOW_CONT;
327 ctl |= BGMAC_RX_FRAME_OFFSET << BGMAC_DMA_RX_FRAME_OFFSET_SHIFT;
328 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL, ctl);
331 static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac,
332 struct bgmac_slot_info *slot)
334 struct device *dma_dev = bgmac->dma_dev;
336 struct bgmac_rx_header *rx;
340 buf = netdev_alloc_frag(BGMAC_RX_ALLOC_SIZE);
344 /* Poison - if everything goes fine, hardware will overwrite it */
345 rx = buf + BGMAC_RX_BUF_OFFSET;
346 rx->len = cpu_to_le16(0xdead);
347 rx->flags = cpu_to_le16(0xbeef);
349 /* Map skb for the DMA */
350 dma_addr = dma_map_single(dma_dev, buf + BGMAC_RX_BUF_OFFSET,
351 BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
352 if (dma_mapping_error(dma_dev, dma_addr)) {
353 netdev_err(bgmac->net_dev, "DMA mapping error\n");
354 put_page(virt_to_head_page(buf));
358 /* Update the slot */
360 slot->dma_addr = dma_addr;
365 static void bgmac_dma_rx_update_index(struct bgmac *bgmac,
366 struct bgmac_dma_ring *ring)
370 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_INDEX,
372 ring->end * sizeof(struct bgmac_dma_desc));
375 static void bgmac_dma_rx_setup_desc(struct bgmac *bgmac,
376 struct bgmac_dma_ring *ring, int desc_idx)
378 struct bgmac_dma_desc *dma_desc = ring->cpu_base + desc_idx;
379 u32 ctl0 = 0, ctl1 = 0;
381 if (desc_idx == BGMAC_RX_RING_SLOTS - 1)
382 ctl0 |= BGMAC_DESC_CTL0_EOT;
383 ctl1 |= BGMAC_RX_BUF_SIZE & BGMAC_DESC_CTL1_LEN;
384 /* Is there any BGMAC device that requires extension? */
385 /* ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT) &
386 * B43_DMA64_DCTL1_ADDREXT_MASK;
389 dma_desc->addr_low = cpu_to_le32(lower_32_bits(ring->slots[desc_idx].dma_addr));
390 dma_desc->addr_high = cpu_to_le32(upper_32_bits(ring->slots[desc_idx].dma_addr));
391 dma_desc->ctl0 = cpu_to_le32(ctl0);
392 dma_desc->ctl1 = cpu_to_le32(ctl1);
394 ring->end = desc_idx;
397 static void bgmac_dma_rx_poison_buf(struct device *dma_dev,
398 struct bgmac_slot_info *slot)
400 struct bgmac_rx_header *rx = slot->buf + BGMAC_RX_BUF_OFFSET;
402 dma_sync_single_for_cpu(dma_dev, slot->dma_addr, BGMAC_RX_BUF_SIZE,
404 rx->len = cpu_to_le16(0xdead);
405 rx->flags = cpu_to_le16(0xbeef);
406 dma_sync_single_for_device(dma_dev, slot->dma_addr, BGMAC_RX_BUF_SIZE,
410 static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
416 end_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_STATUS);
417 end_slot &= BGMAC_DMA_RX_STATDPTR;
418 end_slot -= ring->index_base;
419 end_slot &= BGMAC_DMA_RX_STATDPTR;
420 end_slot /= sizeof(struct bgmac_dma_desc);
422 while (ring->start != end_slot) {
423 struct device *dma_dev = bgmac->dma_dev;
424 struct bgmac_slot_info *slot = &ring->slots[ring->start];
425 struct bgmac_rx_header *rx = slot->buf + BGMAC_RX_BUF_OFFSET;
427 void *buf = slot->buf;
428 dma_addr_t dma_addr = slot->dma_addr;
432 /* Prepare new skb as replacement */
433 if (bgmac_dma_rx_skb_for_slot(bgmac, slot)) {
434 bgmac_dma_rx_poison_buf(dma_dev, slot);
438 /* Unmap buffer to make it accessible to the CPU */
439 dma_unmap_single(dma_dev, dma_addr,
440 BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
442 /* Get info from the header */
443 len = le16_to_cpu(rx->len);
444 flags = le16_to_cpu(rx->flags);
446 /* Check for poison and drop or pass the packet */
447 if (len == 0xdead && flags == 0xbeef) {
448 netdev_err(bgmac->net_dev, "Found poisoned packet at slot %d, DMA issue!\n",
450 put_page(virt_to_head_page(buf));
451 bgmac->net_dev->stats.rx_errors++;
455 if (len > BGMAC_RX_ALLOC_SIZE) {
456 netdev_err(bgmac->net_dev, "Found oversized packet at slot %d, DMA issue!\n",
458 put_page(virt_to_head_page(buf));
459 bgmac->net_dev->stats.rx_length_errors++;
460 bgmac->net_dev->stats.rx_errors++;
467 skb = build_skb(buf, BGMAC_RX_ALLOC_SIZE);
468 if (unlikely(!skb)) {
469 netdev_err(bgmac->net_dev, "build_skb failed\n");
470 put_page(virt_to_head_page(buf));
471 bgmac->net_dev->stats.rx_errors++;
474 skb_put(skb, BGMAC_RX_FRAME_OFFSET +
475 BGMAC_RX_BUF_OFFSET + len);
476 skb_pull(skb, BGMAC_RX_FRAME_OFFSET +
477 BGMAC_RX_BUF_OFFSET);
479 skb_checksum_none_assert(skb);
480 skb->protocol = eth_type_trans(skb, bgmac->net_dev);
481 bgmac->net_dev->stats.rx_bytes += len;
482 bgmac->net_dev->stats.rx_packets++;
483 napi_gro_receive(&bgmac->napi, skb);
487 bgmac_dma_rx_setup_desc(bgmac, ring, ring->start);
489 if (++ring->start >= BGMAC_RX_RING_SLOTS)
492 if (handled >= weight) /* Should never be greater */
496 bgmac_dma_rx_update_index(bgmac, ring);
501 /* Does ring support unaligned addressing? */
502 static bool bgmac_dma_unaligned(struct bgmac *bgmac,
503 struct bgmac_dma_ring *ring,
504 enum bgmac_dma_ring_type ring_type)
507 case BGMAC_DMA_RING_TX:
508 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO,
510 if (bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO))
513 case BGMAC_DMA_RING_RX:
514 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO,
516 if (bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO))
523 static void bgmac_dma_tx_ring_free(struct bgmac *bgmac,
524 struct bgmac_dma_ring *ring)
526 struct device *dma_dev = bgmac->dma_dev;
527 struct bgmac_dma_desc *dma_desc = ring->cpu_base;
528 struct bgmac_slot_info *slot;
531 for (i = 0; i < BGMAC_TX_RING_SLOTS; i++) {
532 u32 ctl1 = le32_to_cpu(dma_desc[i].ctl1);
533 unsigned int len = ctl1 & BGMAC_DESC_CTL1_LEN;
535 slot = &ring->slots[i];
536 dev_kfree_skb(slot->skb);
542 dma_unmap_single(dma_dev, slot->dma_addr,
545 dma_unmap_page(dma_dev, slot->dma_addr,
550 static void bgmac_dma_rx_ring_free(struct bgmac *bgmac,
551 struct bgmac_dma_ring *ring)
553 struct device *dma_dev = bgmac->dma_dev;
554 struct bgmac_slot_info *slot;
557 for (i = 0; i < BGMAC_RX_RING_SLOTS; i++) {
558 slot = &ring->slots[i];
562 dma_unmap_single(dma_dev, slot->dma_addr,
565 put_page(virt_to_head_page(slot->buf));
570 static void bgmac_dma_ring_desc_free(struct bgmac *bgmac,
571 struct bgmac_dma_ring *ring,
574 struct device *dma_dev = bgmac->dma_dev;
580 /* Free ring of descriptors */
581 size = num_slots * sizeof(struct bgmac_dma_desc);
582 dma_free_coherent(dma_dev, size, ring->cpu_base,
586 static void bgmac_dma_cleanup(struct bgmac *bgmac)
590 for (i = 0; i < BGMAC_MAX_TX_RINGS; i++)
591 bgmac_dma_tx_ring_free(bgmac, &bgmac->tx_ring[i]);
593 for (i = 0; i < BGMAC_MAX_RX_RINGS; i++)
594 bgmac_dma_rx_ring_free(bgmac, &bgmac->rx_ring[i]);
597 static void bgmac_dma_free(struct bgmac *bgmac)
601 for (i = 0; i < BGMAC_MAX_TX_RINGS; i++)
602 bgmac_dma_ring_desc_free(bgmac, &bgmac->tx_ring[i],
603 BGMAC_TX_RING_SLOTS);
605 for (i = 0; i < BGMAC_MAX_RX_RINGS; i++)
606 bgmac_dma_ring_desc_free(bgmac, &bgmac->rx_ring[i],
607 BGMAC_RX_RING_SLOTS);
610 static int bgmac_dma_alloc(struct bgmac *bgmac)
612 struct device *dma_dev = bgmac->dma_dev;
613 struct bgmac_dma_ring *ring;
614 static const u16 ring_base[] = { BGMAC_DMA_BASE0, BGMAC_DMA_BASE1,
615 BGMAC_DMA_BASE2, BGMAC_DMA_BASE3, };
616 int size; /* ring size: different for Tx and Rx */
620 BUILD_BUG_ON(BGMAC_MAX_TX_RINGS > ARRAY_SIZE(ring_base));
621 BUILD_BUG_ON(BGMAC_MAX_RX_RINGS > ARRAY_SIZE(ring_base));
623 if (!(bgmac_idm_read(bgmac, BCMA_IOST) & BCMA_IOST_DMA64)) {
624 dev_err(bgmac->dev, "Core does not report 64-bit DMA\n");
628 for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
629 ring = &bgmac->tx_ring[i];
630 ring->mmio_base = ring_base[i];
632 /* Alloc ring of descriptors */
633 size = BGMAC_TX_RING_SLOTS * sizeof(struct bgmac_dma_desc);
634 ring->cpu_base = dma_zalloc_coherent(dma_dev, size,
637 if (!ring->cpu_base) {
638 dev_err(bgmac->dev, "Allocation of TX ring 0x%X failed\n",
643 ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
646 ring->index_base = lower_32_bits(ring->dma_base);
648 ring->index_base = 0;
650 /* No need to alloc TX slots yet */
653 for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) {
654 ring = &bgmac->rx_ring[i];
655 ring->mmio_base = ring_base[i];
657 /* Alloc ring of descriptors */
658 size = BGMAC_RX_RING_SLOTS * sizeof(struct bgmac_dma_desc);
659 ring->cpu_base = dma_zalloc_coherent(dma_dev, size,
662 if (!ring->cpu_base) {
663 dev_err(bgmac->dev, "Allocation of RX ring 0x%X failed\n",
669 ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
672 ring->index_base = lower_32_bits(ring->dma_base);
674 ring->index_base = 0;
680 bgmac_dma_free(bgmac);
684 static int bgmac_dma_init(struct bgmac *bgmac)
686 struct bgmac_dma_ring *ring;
689 for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
690 ring = &bgmac->tx_ring[i];
692 if (!ring->unaligned)
693 bgmac_dma_tx_enable(bgmac, ring);
694 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO,
695 lower_32_bits(ring->dma_base));
696 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGHI,
697 upper_32_bits(ring->dma_base));
699 bgmac_dma_tx_enable(bgmac, ring);
702 ring->end = 0; /* Points the slot that should *not* be read */
705 for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) {
708 ring = &bgmac->rx_ring[i];
710 if (!ring->unaligned)
711 bgmac_dma_rx_enable(bgmac, ring);
712 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO,
713 lower_32_bits(ring->dma_base));
714 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGHI,
715 upper_32_bits(ring->dma_base));
717 bgmac_dma_rx_enable(bgmac, ring);
721 for (j = 0; j < BGMAC_RX_RING_SLOTS; j++) {
722 err = bgmac_dma_rx_skb_for_slot(bgmac, &ring->slots[j]);
726 bgmac_dma_rx_setup_desc(bgmac, ring, j);
729 bgmac_dma_rx_update_index(bgmac, ring);
735 bgmac_dma_cleanup(bgmac);
740 /**************************************************
742 **************************************************/
744 /* TODO: can we just drop @force? Can we don't reset MAC at all if there is
745 * nothing to change? Try if after stabilizng driver.
747 static void bgmac_cmdcfg_maskset(struct bgmac *bgmac, u32 mask, u32 set,
750 u32 cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG);
751 u32 new_val = (cmdcfg & mask) | set;
754 if (bgmac->feature_flags & BGMAC_FEAT_CMDCFG_SR_REV4)
755 cmdcfg_sr = BGMAC_CMDCFG_SR_REV4;
757 cmdcfg_sr = BGMAC_CMDCFG_SR_REV0;
759 bgmac_set(bgmac, BGMAC_CMDCFG, cmdcfg_sr);
762 if (new_val != cmdcfg || force)
763 bgmac_write(bgmac, BGMAC_CMDCFG, new_val);
765 bgmac_mask(bgmac, BGMAC_CMDCFG, ~cmdcfg_sr);
769 static void bgmac_write_mac_address(struct bgmac *bgmac, u8 *addr)
773 tmp = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3];
774 bgmac_write(bgmac, BGMAC_MACADDR_HIGH, tmp);
775 tmp = (addr[4] << 8) | addr[5];
776 bgmac_write(bgmac, BGMAC_MACADDR_LOW, tmp);
779 static void bgmac_set_rx_mode(struct net_device *net_dev)
781 struct bgmac *bgmac = netdev_priv(net_dev);
783 if (net_dev->flags & IFF_PROMISC)
784 bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_PROM, true);
786 bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_PROM, 0, true);
789 #if 0 /* We don't use that regs yet */
790 static void bgmac_chip_stats_update(struct bgmac *bgmac)
794 if (!(bgmac->feature_flags & BGMAC_FEAT_NO_CLR_MIB)) {
795 for (i = 0; i < BGMAC_NUM_MIB_TX_REGS; i++)
796 bgmac->mib_tx_regs[i] =
798 BGMAC_TX_GOOD_OCTETS + (i * 4));
799 for (i = 0; i < BGMAC_NUM_MIB_RX_REGS; i++)
800 bgmac->mib_rx_regs[i] =
802 BGMAC_RX_GOOD_OCTETS + (i * 4));
805 /* TODO: what else? how to handle BCM4706? Specs are needed */
809 static void bgmac_clear_mib(struct bgmac *bgmac)
813 if (bgmac->feature_flags & BGMAC_FEAT_NO_CLR_MIB)
816 bgmac_set(bgmac, BGMAC_DEV_CTL, BGMAC_DC_MROR);
817 for (i = 0; i < BGMAC_NUM_MIB_TX_REGS; i++)
818 bgmac_read(bgmac, BGMAC_TX_GOOD_OCTETS + (i * 4));
819 for (i = 0; i < BGMAC_NUM_MIB_RX_REGS; i++)
820 bgmac_read(bgmac, BGMAC_RX_GOOD_OCTETS + (i * 4));
823 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_speed */
824 static void bgmac_mac_speed(struct bgmac *bgmac)
826 u32 mask = ~(BGMAC_CMDCFG_ES_MASK | BGMAC_CMDCFG_HD);
829 switch (bgmac->mac_speed) {
831 set |= BGMAC_CMDCFG_ES_10;
834 set |= BGMAC_CMDCFG_ES_100;
837 set |= BGMAC_CMDCFG_ES_1000;
840 set |= BGMAC_CMDCFG_ES_2500;
843 dev_err(bgmac->dev, "Unsupported speed: %d\n",
847 if (bgmac->mac_duplex == DUPLEX_HALF)
848 set |= BGMAC_CMDCFG_HD;
850 bgmac_cmdcfg_maskset(bgmac, mask, set, true);
853 static void bgmac_miiconfig(struct bgmac *bgmac)
855 if (bgmac->feature_flags & BGMAC_FEAT_FORCE_SPEED_2500) {
856 bgmac_idm_write(bgmac, BCMA_IOCTL,
857 bgmac_idm_read(bgmac, BCMA_IOCTL) | 0x40 |
858 BGMAC_BCMA_IOCTL_SW_CLKEN);
859 bgmac->mac_speed = SPEED_2500;
860 bgmac->mac_duplex = DUPLEX_FULL;
861 bgmac_mac_speed(bgmac);
865 imode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) &
866 BGMAC_DS_MM_MASK) >> BGMAC_DS_MM_SHIFT;
867 if (imode == 0 || imode == 1) {
868 bgmac->mac_speed = SPEED_100;
869 bgmac->mac_duplex = DUPLEX_FULL;
870 bgmac_mac_speed(bgmac);
875 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipreset */
876 static void bgmac_chip_reset(struct bgmac *bgmac)
882 if (bgmac_clk_enabled(bgmac)) {
883 if (!bgmac->stats_grabbed) {
884 /* bgmac_chip_stats_update(bgmac); */
885 bgmac->stats_grabbed = true;
888 for (i = 0; i < BGMAC_MAX_TX_RINGS; i++)
889 bgmac_dma_tx_reset(bgmac, &bgmac->tx_ring[i]);
891 bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_ML, false);
894 for (i = 0; i < BGMAC_MAX_RX_RINGS; i++)
895 bgmac_dma_rx_reset(bgmac, &bgmac->rx_ring[i]);
897 /* TODO: Clear software multicast filter list */
900 iost = bgmac_idm_read(bgmac, BCMA_IOST);
901 if (bgmac->feature_flags & BGMAC_FEAT_IOST_ATTACHED)
902 iost &= ~BGMAC_BCMA_IOST_ATTACHED;
904 /* 3GMAC: for BCM4707 & BCM47094, only do core reset at bgmac_probe() */
905 if (!(bgmac->feature_flags & BGMAC_FEAT_NO_RESET)) {
907 if (iost & BGMAC_BCMA_IOST_ATTACHED) {
908 flags = BGMAC_BCMA_IOCTL_SW_CLKEN;
909 if (!bgmac->has_robosw)
910 flags |= BGMAC_BCMA_IOCTL_SW_RESET;
912 bgmac_clk_enable(bgmac, flags);
915 /* Request Misc PLL for corerev > 2 */
916 if (bgmac->feature_flags & BGMAC_FEAT_MISC_PLL_REQ) {
917 bgmac_set(bgmac, BCMA_CLKCTLST,
918 BGMAC_BCMA_CLKCTLST_MISC_PLL_REQ);
919 bgmac_wait_value(bgmac, BCMA_CLKCTLST,
920 BGMAC_BCMA_CLKCTLST_MISC_PLL_ST,
921 BGMAC_BCMA_CLKCTLST_MISC_PLL_ST,
925 if (bgmac->feature_flags & BGMAC_FEAT_SW_TYPE_PHY) {
927 u8 sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHY |
928 BGMAC_CHIPCTL_1_IF_TYPE_MII;
931 if (bcm47xx_nvram_getenv("et_swtype", buf, sizeof(buf)) > 0) {
932 if (kstrtou8(buf, 0, &et_swtype))
933 dev_err(bgmac->dev, "Failed to parse et_swtype (%s)\n",
938 } else if (bgmac->feature_flags & BGMAC_FEAT_SW_TYPE_EPHYRMII) {
939 sw_type = BGMAC_CHIPCTL_1_IF_TYPE_RMII |
940 BGMAC_CHIPCTL_1_SW_TYPE_EPHYRMII;
941 } else if (bgmac->feature_flags & BGMAC_FEAT_SW_TYPE_RGMII) {
942 sw_type = BGMAC_CHIPCTL_1_IF_TYPE_RGMII |
943 BGMAC_CHIPCTL_1_SW_TYPE_RGMII;
945 bgmac_cco_ctl_maskset(bgmac, 1, ~(BGMAC_CHIPCTL_1_IF_TYPE_MASK |
946 BGMAC_CHIPCTL_1_SW_TYPE_MASK),
948 } else if (bgmac->feature_flags & BGMAC_FEAT_CC4_IF_SW_TYPE) {
949 u32 sw_type = BGMAC_CHIPCTL_4_IF_TYPE_MII |
950 BGMAC_CHIPCTL_4_SW_TYPE_EPHY;
954 if (bcm47xx_nvram_getenv("et_swtype", buf, sizeof(buf)) > 0) {
955 if (kstrtou8(buf, 0, &et_swtype))
956 dev_err(bgmac->dev, "Failed to parse et_swtype (%s)\n",
958 sw_type = (et_swtype & 0x0f) << 12;
959 } else if (bgmac->feature_flags & BGMAC_FEAT_CC4_IF_SW_TYPE_RGMII) {
960 sw_type = BGMAC_CHIPCTL_4_IF_TYPE_RGMII |
961 BGMAC_CHIPCTL_4_SW_TYPE_RGMII;
963 bgmac_cco_ctl_maskset(bgmac, 4, ~(BGMAC_CHIPCTL_4_IF_TYPE_MASK |
964 BGMAC_CHIPCTL_4_SW_TYPE_MASK),
966 } else if (bgmac->feature_flags & BGMAC_FEAT_CC7_IF_TYPE_RGMII) {
967 bgmac_cco_ctl_maskset(bgmac, 7, ~BGMAC_CHIPCTL_7_IF_TYPE_MASK,
968 BGMAC_CHIPCTL_7_IF_TYPE_RGMII);
971 if (iost & BGMAC_BCMA_IOST_ATTACHED && !bgmac->has_robosw)
972 bgmac_idm_write(bgmac, BCMA_IOCTL,
973 bgmac_idm_read(bgmac, BCMA_IOCTL) &
974 ~BGMAC_BCMA_IOCTL_SW_RESET);
976 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_reset
977 * Specs don't say about using BGMAC_CMDCFG_SR, but in this routine
978 * BGMAC_CMDCFG is read _after_ putting chip in a reset. So it has to
979 * be keps until taking MAC out of the reset.
981 if (bgmac->feature_flags & BGMAC_FEAT_CMDCFG_SR_REV4)
982 cmdcfg_sr = BGMAC_CMDCFG_SR_REV4;
984 cmdcfg_sr = BGMAC_CMDCFG_SR_REV0;
986 bgmac_cmdcfg_maskset(bgmac,
998 BGMAC_CMDCFG_PAD_EN |
1005 bgmac->mac_speed = SPEED_UNKNOWN;
1006 bgmac->mac_duplex = DUPLEX_UNKNOWN;
1008 bgmac_clear_mib(bgmac);
1009 if (bgmac->feature_flags & BGMAC_FEAT_CMN_PHY_CTL)
1010 bgmac_cmn_maskset32(bgmac, BCMA_GMAC_CMN_PHY_CTL, ~0,
1011 BCMA_GMAC_CMN_PC_MTE);
1013 bgmac_set(bgmac, BGMAC_PHY_CNTL, BGMAC_PC_MTE);
1014 bgmac_miiconfig(bgmac);
1016 bgmac->mii_bus->reset(bgmac->mii_bus);
1018 netdev_reset_queue(bgmac->net_dev);
1021 static void bgmac_chip_intrs_on(struct bgmac *bgmac)
1023 bgmac_write(bgmac, BGMAC_INT_MASK, bgmac->int_mask);
1026 static void bgmac_chip_intrs_off(struct bgmac *bgmac)
1028 bgmac_write(bgmac, BGMAC_INT_MASK, 0);
1029 bgmac_read(bgmac, BGMAC_INT_MASK);
1032 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_enable */
1033 static void bgmac_enable(struct bgmac *bgmac)
1039 if (bgmac->feature_flags & BGMAC_FEAT_CMDCFG_SR_REV4)
1040 cmdcfg_sr = BGMAC_CMDCFG_SR_REV4;
1042 cmdcfg_sr = BGMAC_CMDCFG_SR_REV0;
1044 cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG);
1045 bgmac_cmdcfg_maskset(bgmac, ~(BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE),
1048 cmdcfg |= BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE;
1049 bgmac_write(bgmac, BGMAC_CMDCFG, cmdcfg);
1051 mode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >>
1053 if (bgmac->feature_flags & BGMAC_FEAT_CLKCTLST || mode != 0)
1054 bgmac_set(bgmac, BCMA_CLKCTLST, BCMA_CLKCTLST_FORCEHT);
1055 if (!(bgmac->feature_flags & BGMAC_FEAT_CLKCTLST) && mode == 2)
1056 bgmac_cco_ctl_maskset(bgmac, 1, ~0,
1057 BGMAC_CHIPCTL_1_RXC_DLL_BYPASS);
1059 if (bgmac->feature_flags & (BGMAC_FEAT_FLW_CTRL1 |
1060 BGMAC_FEAT_FLW_CTRL2)) {
1063 if (bgmac->feature_flags & BGMAC_FEAT_FLW_CTRL1)
1066 fl_ctl = 0x03cb04cb;
1068 bgmac_write(bgmac, BGMAC_FLOW_CTL_THRESH, fl_ctl);
1069 bgmac_write(bgmac, BGMAC_PAUSE_CTL, 0x27fff);
1072 if (bgmac->feature_flags & BGMAC_FEAT_SET_RXQ_CLK) {
1077 rxq_ctl = bgmac_read(bgmac, BGMAC_RXQ_CTL);
1078 rxq_ctl &= ~BGMAC_RXQ_CTL_MDP_MASK;
1079 bp_clk = bgmac_get_bus_clock(bgmac) / 1000000;
1080 mdp = (bp_clk * 128 / 1000) - 3;
1081 rxq_ctl |= (mdp << BGMAC_RXQ_CTL_MDP_SHIFT);
1082 bgmac_write(bgmac, BGMAC_RXQ_CTL, rxq_ctl);
1086 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipinit */
1087 static void bgmac_chip_init(struct bgmac *bgmac)
1089 /* 1 interrupt per received frame */
1090 bgmac_write(bgmac, BGMAC_INT_RECV_LAZY, 1 << BGMAC_IRL_FC_SHIFT);
1092 /* Enable 802.3x tx flow control (honor received PAUSE frames) */
1093 bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_RPI, 0, true);
1095 bgmac_set_rx_mode(bgmac->net_dev);
1097 bgmac_write_mac_address(bgmac, bgmac->net_dev->dev_addr);
1099 if (bgmac->loopback)
1100 bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_ML, false);
1102 bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_ML, 0, false);
1104 bgmac_write(bgmac, BGMAC_RXMAX_LENGTH, 32 + ETHER_MAX_LEN);
1106 bgmac_chip_intrs_on(bgmac);
1108 bgmac_enable(bgmac);
1111 static irqreturn_t bgmac_interrupt(int irq, void *dev_id)
1113 struct bgmac *bgmac = netdev_priv(dev_id);
1115 u32 int_status = bgmac_read(bgmac, BGMAC_INT_STATUS);
1116 int_status &= bgmac->int_mask;
1121 int_status &= ~(BGMAC_IS_TX0 | BGMAC_IS_RX);
1123 dev_err(bgmac->dev, "Unknown IRQs: 0x%08X\n", int_status);
1125 /* Disable new interrupts until handling existing ones */
1126 bgmac_chip_intrs_off(bgmac);
1128 napi_schedule(&bgmac->napi);
1133 static int bgmac_poll(struct napi_struct *napi, int weight)
1135 struct bgmac *bgmac = container_of(napi, struct bgmac, napi);
1139 bgmac_write(bgmac, BGMAC_INT_STATUS, ~0);
1141 bgmac_dma_tx_free(bgmac, &bgmac->tx_ring[0]);
1142 handled += bgmac_dma_rx_read(bgmac, &bgmac->rx_ring[0], weight);
1144 /* Poll again if more events arrived in the meantime */
1145 if (bgmac_read(bgmac, BGMAC_INT_STATUS) & (BGMAC_IS_TX0 | BGMAC_IS_RX))
1148 if (handled < weight) {
1149 napi_complete(napi);
1150 bgmac_chip_intrs_on(bgmac);
1156 /**************************************************
1158 **************************************************/
1160 static int bgmac_open(struct net_device *net_dev)
1162 struct bgmac *bgmac = netdev_priv(net_dev);
1165 bgmac_chip_reset(bgmac);
1167 err = bgmac_dma_init(bgmac);
1171 /* Specs say about reclaiming rings here, but we do that in DMA init */
1172 bgmac_chip_init(bgmac);
1174 err = request_irq(bgmac->irq, bgmac_interrupt, IRQF_SHARED,
1175 KBUILD_MODNAME, net_dev);
1177 dev_err(bgmac->dev, "IRQ request error: %d!\n", err);
1178 bgmac_dma_cleanup(bgmac);
1181 napi_enable(&bgmac->napi);
1183 phy_start(net_dev->phydev);
1185 netif_start_queue(net_dev);
1190 static int bgmac_stop(struct net_device *net_dev)
1192 struct bgmac *bgmac = netdev_priv(net_dev);
1194 netif_carrier_off(net_dev);
1196 phy_stop(net_dev->phydev);
1198 napi_disable(&bgmac->napi);
1199 bgmac_chip_intrs_off(bgmac);
1200 free_irq(bgmac->irq, net_dev);
1202 bgmac_chip_reset(bgmac);
1203 bgmac_dma_cleanup(bgmac);
1208 static netdev_tx_t bgmac_start_xmit(struct sk_buff *skb,
1209 struct net_device *net_dev)
1211 struct bgmac *bgmac = netdev_priv(net_dev);
1212 struct bgmac_dma_ring *ring;
1214 /* No QOS support yet */
1215 ring = &bgmac->tx_ring[0];
1216 return bgmac_dma_tx_add(bgmac, ring, skb);
1219 static int bgmac_set_mac_address(struct net_device *net_dev, void *addr)
1221 struct bgmac *bgmac = netdev_priv(net_dev);
1224 ret = eth_prepare_mac_addr_change(net_dev, addr);
1227 bgmac_write_mac_address(bgmac, (u8 *)addr);
1228 eth_commit_mac_addr_change(net_dev, addr);
1232 static int bgmac_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
1234 if (!netif_running(net_dev))
1237 return phy_mii_ioctl(net_dev->phydev, ifr, cmd);
1240 static const struct net_device_ops bgmac_netdev_ops = {
1241 .ndo_open = bgmac_open,
1242 .ndo_stop = bgmac_stop,
1243 .ndo_start_xmit = bgmac_start_xmit,
1244 .ndo_set_rx_mode = bgmac_set_rx_mode,
1245 .ndo_set_mac_address = bgmac_set_mac_address,
1246 .ndo_validate_addr = eth_validate_addr,
1247 .ndo_do_ioctl = bgmac_ioctl,
1250 /**************************************************
1252 **************************************************/
1260 static struct bgmac_stat bgmac_get_strings_stats[] = {
1261 { 8, BGMAC_TX_GOOD_OCTETS, "tx_good_octets" },
1262 { 4, BGMAC_TX_GOOD_PKTS, "tx_good" },
1263 { 8, BGMAC_TX_OCTETS, "tx_octets" },
1264 { 4, BGMAC_TX_PKTS, "tx_pkts" },
1265 { 4, BGMAC_TX_BROADCAST_PKTS, "tx_broadcast" },
1266 { 4, BGMAC_TX_MULTICAST_PKTS, "tx_multicast" },
1267 { 4, BGMAC_TX_LEN_64, "tx_64" },
1268 { 4, BGMAC_TX_LEN_65_TO_127, "tx_65_127" },
1269 { 4, BGMAC_TX_LEN_128_TO_255, "tx_128_255" },
1270 { 4, BGMAC_TX_LEN_256_TO_511, "tx_256_511" },
1271 { 4, BGMAC_TX_LEN_512_TO_1023, "tx_512_1023" },
1272 { 4, BGMAC_TX_LEN_1024_TO_1522, "tx_1024_1522" },
1273 { 4, BGMAC_TX_LEN_1523_TO_2047, "tx_1523_2047" },
1274 { 4, BGMAC_TX_LEN_2048_TO_4095, "tx_2048_4095" },
1275 { 4, BGMAC_TX_LEN_4096_TO_8191, "tx_4096_8191" },
1276 { 4, BGMAC_TX_LEN_8192_TO_MAX, "tx_8192_max" },
1277 { 4, BGMAC_TX_JABBER_PKTS, "tx_jabber" },
1278 { 4, BGMAC_TX_OVERSIZE_PKTS, "tx_oversize" },
1279 { 4, BGMAC_TX_FRAGMENT_PKTS, "tx_fragment" },
1280 { 4, BGMAC_TX_UNDERRUNS, "tx_underruns" },
1281 { 4, BGMAC_TX_TOTAL_COLS, "tx_total_cols" },
1282 { 4, BGMAC_TX_SINGLE_COLS, "tx_single_cols" },
1283 { 4, BGMAC_TX_MULTIPLE_COLS, "tx_multiple_cols" },
1284 { 4, BGMAC_TX_EXCESSIVE_COLS, "tx_excessive_cols" },
1285 { 4, BGMAC_TX_LATE_COLS, "tx_late_cols" },
1286 { 4, BGMAC_TX_DEFERED, "tx_defered" },
1287 { 4, BGMAC_TX_CARRIER_LOST, "tx_carrier_lost" },
1288 { 4, BGMAC_TX_PAUSE_PKTS, "tx_pause" },
1289 { 4, BGMAC_TX_UNI_PKTS, "tx_unicast" },
1290 { 4, BGMAC_TX_Q0_PKTS, "tx_q0" },
1291 { 8, BGMAC_TX_Q0_OCTETS, "tx_q0_octets" },
1292 { 4, BGMAC_TX_Q1_PKTS, "tx_q1" },
1293 { 8, BGMAC_TX_Q1_OCTETS, "tx_q1_octets" },
1294 { 4, BGMAC_TX_Q2_PKTS, "tx_q2" },
1295 { 8, BGMAC_TX_Q2_OCTETS, "tx_q2_octets" },
1296 { 4, BGMAC_TX_Q3_PKTS, "tx_q3" },
1297 { 8, BGMAC_TX_Q3_OCTETS, "tx_q3_octets" },
1298 { 8, BGMAC_RX_GOOD_OCTETS, "rx_good_octets" },
1299 { 4, BGMAC_RX_GOOD_PKTS, "rx_good" },
1300 { 8, BGMAC_RX_OCTETS, "rx_octets" },
1301 { 4, BGMAC_RX_PKTS, "rx_pkts" },
1302 { 4, BGMAC_RX_BROADCAST_PKTS, "rx_broadcast" },
1303 { 4, BGMAC_RX_MULTICAST_PKTS, "rx_multicast" },
1304 { 4, BGMAC_RX_LEN_64, "rx_64" },
1305 { 4, BGMAC_RX_LEN_65_TO_127, "rx_65_127" },
1306 { 4, BGMAC_RX_LEN_128_TO_255, "rx_128_255" },
1307 { 4, BGMAC_RX_LEN_256_TO_511, "rx_256_511" },
1308 { 4, BGMAC_RX_LEN_512_TO_1023, "rx_512_1023" },
1309 { 4, BGMAC_RX_LEN_1024_TO_1522, "rx_1024_1522" },
1310 { 4, BGMAC_RX_LEN_1523_TO_2047, "rx_1523_2047" },
1311 { 4, BGMAC_RX_LEN_2048_TO_4095, "rx_2048_4095" },
1312 { 4, BGMAC_RX_LEN_4096_TO_8191, "rx_4096_8191" },
1313 { 4, BGMAC_RX_LEN_8192_TO_MAX, "rx_8192_max" },
1314 { 4, BGMAC_RX_JABBER_PKTS, "rx_jabber" },
1315 { 4, BGMAC_RX_OVERSIZE_PKTS, "rx_oversize" },
1316 { 4, BGMAC_RX_FRAGMENT_PKTS, "rx_fragment" },
1317 { 4, BGMAC_RX_MISSED_PKTS, "rx_missed" },
1318 { 4, BGMAC_RX_CRC_ALIGN_ERRS, "rx_crc_align" },
1319 { 4, BGMAC_RX_UNDERSIZE, "rx_undersize" },
1320 { 4, BGMAC_RX_CRC_ERRS, "rx_crc" },
1321 { 4, BGMAC_RX_ALIGN_ERRS, "rx_align" },
1322 { 4, BGMAC_RX_SYMBOL_ERRS, "rx_symbol" },
1323 { 4, BGMAC_RX_PAUSE_PKTS, "rx_pause" },
1324 { 4, BGMAC_RX_NONPAUSE_PKTS, "rx_nonpause" },
1325 { 4, BGMAC_RX_SACHANGES, "rx_sa_changes" },
1326 { 4, BGMAC_RX_UNI_PKTS, "rx_unicast" },
1329 #define BGMAC_STATS_LEN ARRAY_SIZE(bgmac_get_strings_stats)
1331 static int bgmac_get_sset_count(struct net_device *dev, int string_set)
1333 switch (string_set) {
1335 return BGMAC_STATS_LEN;
1341 static void bgmac_get_strings(struct net_device *dev, u32 stringset,
1346 if (stringset != ETH_SS_STATS)
1349 for (i = 0; i < BGMAC_STATS_LEN; i++)
1350 strlcpy(data + i * ETH_GSTRING_LEN,
1351 bgmac_get_strings_stats[i].name, ETH_GSTRING_LEN);
1354 static void bgmac_get_ethtool_stats(struct net_device *dev,
1355 struct ethtool_stats *ss, uint64_t *data)
1357 struct bgmac *bgmac = netdev_priv(dev);
1358 const struct bgmac_stat *s;
1362 if (!netif_running(dev))
1365 for (i = 0; i < BGMAC_STATS_LEN; i++) {
1366 s = &bgmac_get_strings_stats[i];
1369 val = (u64)bgmac_read(bgmac, s->offset + 4) << 32;
1370 val |= bgmac_read(bgmac, s->offset);
1375 static void bgmac_get_drvinfo(struct net_device *net_dev,
1376 struct ethtool_drvinfo *info)
1378 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
1379 strlcpy(info->bus_info, "AXI", sizeof(info->bus_info));
1382 static const struct ethtool_ops bgmac_ethtool_ops = {
1383 .get_strings = bgmac_get_strings,
1384 .get_sset_count = bgmac_get_sset_count,
1385 .get_ethtool_stats = bgmac_get_ethtool_stats,
1386 .get_drvinfo = bgmac_get_drvinfo,
1387 .get_link_ksettings = phy_ethtool_get_link_ksettings,
1388 .set_link_ksettings = phy_ethtool_set_link_ksettings,
1391 /**************************************************
1393 **************************************************/
1395 static void bgmac_adjust_link(struct net_device *net_dev)
1397 struct bgmac *bgmac = netdev_priv(net_dev);
1398 struct phy_device *phy_dev = net_dev->phydev;
1399 bool update = false;
1401 if (phy_dev->link) {
1402 if (phy_dev->speed != bgmac->mac_speed) {
1403 bgmac->mac_speed = phy_dev->speed;
1407 if (phy_dev->duplex != bgmac->mac_duplex) {
1408 bgmac->mac_duplex = phy_dev->duplex;
1414 bgmac_mac_speed(bgmac);
1415 phy_print_status(phy_dev);
1419 static int bgmac_phy_connect_direct(struct bgmac *bgmac)
1421 struct fixed_phy_status fphy_status = {
1423 .speed = SPEED_1000,
1424 .duplex = DUPLEX_FULL,
1426 struct phy_device *phy_dev;
1429 phy_dev = fixed_phy_register(PHY_POLL, &fphy_status, -1, NULL);
1430 if (!phy_dev || IS_ERR(phy_dev)) {
1431 dev_err(bgmac->dev, "Failed to register fixed PHY device\n");
1435 err = phy_connect_direct(bgmac->net_dev, phy_dev, bgmac_adjust_link,
1436 PHY_INTERFACE_MODE_MII);
1438 dev_err(bgmac->dev, "Connecting PHY failed\n");
1445 static int bgmac_phy_connect(struct bgmac *bgmac)
1447 struct phy_device *phy_dev;
1448 char bus_id[MII_BUS_ID_SIZE + 3];
1450 /* Connect to the PHY */
1451 snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, bgmac->mii_bus->id,
1453 phy_dev = phy_connect(bgmac->net_dev, bus_id, &bgmac_adjust_link,
1454 PHY_INTERFACE_MODE_MII);
1455 if (IS_ERR(phy_dev)) {
1456 dev_err(bgmac->dev, "PHY connection failed\n");
1457 return PTR_ERR(phy_dev);
1463 int bgmac_enet_probe(struct bgmac *info)
1465 struct net_device *net_dev;
1466 struct bgmac *bgmac;
1469 /* Allocation and references */
1470 net_dev = alloc_etherdev(sizeof(*bgmac));
1474 net_dev->netdev_ops = &bgmac_netdev_ops;
1475 net_dev->ethtool_ops = &bgmac_ethtool_ops;
1476 bgmac = netdev_priv(net_dev);
1477 memcpy(bgmac, info, sizeof(*bgmac));
1478 bgmac->net_dev = net_dev;
1479 net_dev->irq = bgmac->irq;
1480 SET_NETDEV_DEV(net_dev, bgmac->dev);
1482 if (!is_valid_ether_addr(bgmac->mac_addr)) {
1483 dev_err(bgmac->dev, "Invalid MAC addr: %pM\n",
1485 eth_random_addr(bgmac->mac_addr);
1486 dev_warn(bgmac->dev, "Using random MAC: %pM\n",
1489 ether_addr_copy(net_dev->dev_addr, bgmac->mac_addr);
1491 /* This (reset &) enable is not preset in specs or reference driver but
1492 * Broadcom does it in arch PCI code when enabling fake PCI device.
1494 bgmac_clk_enable(bgmac, 0);
1496 /* This seems to be fixing IRQ by assigning OOB #6 to the core */
1497 if (bgmac->feature_flags & BGMAC_FEAT_IRQ_ID_OOB_6)
1498 bgmac_idm_write(bgmac, BCMA_OOB_SEL_OUT_A30, 0x86);
1500 bgmac_chip_reset(bgmac);
1502 err = bgmac_dma_alloc(bgmac);
1504 dev_err(bgmac->dev, "Unable to alloc memory for DMA\n");
1505 goto err_netdev_free;
1508 bgmac->int_mask = BGMAC_IS_ERRMASK | BGMAC_IS_RX | BGMAC_IS_TX_MASK;
1509 if (bcm47xx_nvram_getenv("et0_no_txint", NULL, 0) == 0)
1510 bgmac->int_mask &= ~BGMAC_IS_TX_MASK;
1512 netif_napi_add(net_dev, &bgmac->napi, bgmac_poll, BGMAC_WEIGHT);
1514 if (!bgmac->mii_bus)
1515 err = bgmac_phy_connect_direct(bgmac);
1517 err = bgmac_phy_connect(bgmac);
1519 dev_err(bgmac->dev, "Cannot connect to phy\n");
1523 net_dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1524 net_dev->hw_features = net_dev->features;
1525 net_dev->vlan_features = net_dev->features;
1527 err = register_netdev(bgmac->net_dev);
1529 dev_err(bgmac->dev, "Cannot register net device\n");
1530 goto err_phy_disconnect;
1533 netif_carrier_off(net_dev);
1538 phy_disconnect(net_dev->phydev);
1540 bgmac_dma_free(bgmac);
1542 free_netdev(net_dev);
1546 EXPORT_SYMBOL_GPL(bgmac_enet_probe);
1548 void bgmac_enet_remove(struct bgmac *bgmac)
1550 unregister_netdev(bgmac->net_dev);
1551 phy_disconnect(bgmac->net_dev->phydev);
1552 netif_napi_del(&bgmac->napi);
1553 bgmac_dma_free(bgmac);
1554 free_netdev(bgmac->net_dev);
1556 EXPORT_SYMBOL_GPL(bgmac_enet_remove);
1558 MODULE_AUTHOR("Rafał Miłecki");
1559 MODULE_LICENSE("GPL");