2 * This file is based on code from OCTEON SDK by Cavium Networks.
4 * Copyright (c) 2003-2007 Cavium Networks
6 * This file is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, Version 2, as
8 * published by the Free Software Foundation.
11 #include <linux/platform_device.h>
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/netdevice.h>
15 #include <linux/etherdevice.h>
16 #include <linux/phy.h>
17 #include <linux/slab.h>
18 #include <linux/interrupt.h>
19 #include <linux/of_mdio.h>
20 #include <linux/of_net.h>
21 #include <linux/if_ether.h>
22 #include <linux/if_vlan.h>
26 #include <asm/octeon/octeon.h>
28 #include "ethernet-defines.h"
29 #include "octeon-ethernet.h"
30 #include "ethernet-mem.h"
31 #include "ethernet-rx.h"
32 #include "ethernet-tx.h"
33 #include "ethernet-mdio.h"
34 #include "ethernet-util.h"
36 #include <asm/octeon/cvmx-pip.h>
37 #include <asm/octeon/cvmx-pko.h>
38 #include <asm/octeon/cvmx-fau.h>
39 #include <asm/octeon/cvmx-ipd.h>
40 #include <asm/octeon/cvmx-helper.h>
41 #include <asm/octeon/cvmx-asxx-defs.h>
42 #include <asm/octeon/cvmx-gmxx-defs.h>
43 #include <asm/octeon/cvmx-smix-defs.h>
45 #define OCTEON_MAX_MTU 65392
47 static int num_packet_buffers = 1024;
48 module_param(num_packet_buffers, int, 0444);
49 MODULE_PARM_DESC(num_packet_buffers, "\n"
50 "\tNumber of packet buffers to allocate and store in the\n"
51 "\tFPA. By default, 1024 packet buffers are used.\n");
53 static int pow_receive_group = 15;
54 module_param(pow_receive_group, int, 0444);
55 MODULE_PARM_DESC(pow_receive_group, "\n"
56 "\tPOW group to receive packets from. All ethernet hardware\n"
57 "\twill be configured to send incoming packets to this POW\n"
58 "\tgroup. Also any other software can submit packets to this\n"
59 "\tgroup for the kernel to process.");
61 static int receive_group_order;
62 module_param(receive_group_order, int, 0444);
63 MODULE_PARM_DESC(receive_group_order, "\n"
64 "\tOrder (0..4) of receive groups to take into use. Ethernet hardware\n"
65 "\twill be configured to send incoming packets to multiple POW\n"
66 "\tgroups. pow_receive_group parameter is ignored when multiple\n"
67 "\tgroups are taken into use and groups are allocated starting\n"
68 "\tfrom 0. By default, a single group is used.\n");
70 int pow_send_group = -1;
71 module_param(pow_send_group, int, 0644);
72 MODULE_PARM_DESC(pow_send_group, "\n"
73 "\tPOW group to send packets to other software on. This\n"
74 "\tcontrols the creation of the virtual device pow0.\n"
75 "\talways_use_pow also depends on this value.");
78 module_param(always_use_pow, int, 0444);
79 MODULE_PARM_DESC(always_use_pow, "\n"
80 "\tWhen set, always send to the pow group. This will cause\n"
81 "\tpackets sent to real ethernet devices to be sent to the\n"
82 "\tPOW group instead of the hardware. Unless some other\n"
83 "\tapplication changes the config, packets will still be\n"
84 "\treceived from the low level hardware. Use this option\n"
85 "\tto allow a CVMX app to intercept all packets from the\n"
86 "\tlinux kernel. You must specify pow_send_group along with\n"
89 char pow_send_list[128] = "";
90 module_param_string(pow_send_list, pow_send_list, sizeof(pow_send_list), 0444);
91 MODULE_PARM_DESC(pow_send_list, "\n"
92 "\tComma separated list of ethernet devices that should use the\n"
93 "\tPOW for transmit instead of the actual ethernet hardware. This\n"
94 "\tis a per port version of always_use_pow. always_use_pow takes\n"
95 "\tprecedence over this list. For example, setting this to\n"
96 "\t\"eth2,spi3,spi7\" would cause these three devices to transmit\n"
97 "\tusing the pow_send_group.");
99 int rx_napi_weight = 32;
100 module_param(rx_napi_weight, int, 0444);
101 MODULE_PARM_DESC(rx_napi_weight, "The NAPI WEIGHT parameter.");
103 /* Mask indicating which receive groups are in use. */
104 int pow_receive_groups;
107 * cvm_oct_poll_queue_stopping - flag to indicate polling should stop.
109 * Set to one right before cvm_oct_poll_queue is destroyed.
111 atomic_t cvm_oct_poll_queue_stopping = ATOMIC_INIT(0);
114 * Array of every ethernet device owned by this driver indexed by
115 * the ipd input port number.
117 struct net_device *cvm_oct_device[TOTAL_NUMBER_OF_PORTS];
119 u64 cvm_oct_tx_poll_interval;
121 static void cvm_oct_rx_refill_worker(struct work_struct *work);
122 static DECLARE_DELAYED_WORK(cvm_oct_rx_refill_work, cvm_oct_rx_refill_worker);
124 static void cvm_oct_rx_refill_worker(struct work_struct *work)
127 * FPA 0 may have been drained, try to refill it if we need
128 * more than num_packet_buffers / 2, otherwise normal receive
129 * processing will refill it. If it were drained, no packets
130 * could be received so cvm_oct_napi_poll would never be
131 * invoked to do the refill.
133 cvm_oct_rx_refill_pool(num_packet_buffers / 2);
135 if (!atomic_read(&cvm_oct_poll_queue_stopping))
136 schedule_delayed_work(&cvm_oct_rx_refill_work, HZ);
139 static void cvm_oct_periodic_worker(struct work_struct *work)
141 struct octeon_ethernet *priv = container_of(work,
142 struct octeon_ethernet,
143 port_periodic_work.work);
146 priv->poll(cvm_oct_device[priv->port]);
148 cvm_oct_device[priv->port]->netdev_ops->ndo_get_stats(
149 cvm_oct_device[priv->port]);
151 if (!atomic_read(&cvm_oct_poll_queue_stopping))
152 schedule_delayed_work(&priv->port_periodic_work, HZ);
155 static void cvm_oct_configure_common_hw(void)
159 cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE,
161 cvm_oct_mem_fill_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE,
163 if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL)
164 cvm_oct_mem_fill_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
165 CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 1024);
167 #ifdef __LITTLE_ENDIAN
169 union cvmx_ipd_ctl_status ipd_ctl_status;
171 ipd_ctl_status.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
172 ipd_ctl_status.s.pkt_lend = 1;
173 ipd_ctl_status.s.wqe_lend = 1;
174 cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_ctl_status.u64);
178 cvmx_helper_setup_red(num_packet_buffers / 4, num_packet_buffers / 8);
182 * cvm_oct_free_work- Free a work queue entry
184 * @work_queue_entry: Work queue entry to free
186 * Returns Zero on success, Negative on failure.
188 int cvm_oct_free_work(void *work_queue_entry)
190 cvmx_wqe_t *work = work_queue_entry;
192 int segments = work->word2.s.bufs;
193 union cvmx_buf_ptr segment_ptr = work->packet_ptr;
196 union cvmx_buf_ptr next_ptr = *(union cvmx_buf_ptr *)
197 cvmx_phys_to_ptr(segment_ptr.s.addr - 8);
198 if (unlikely(!segment_ptr.s.i))
199 cvmx_fpa_free(cvm_oct_get_buffer_ptr(segment_ptr),
201 CVMX_FPA_PACKET_POOL_SIZE / 128);
202 segment_ptr = next_ptr;
204 cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, 1);
208 EXPORT_SYMBOL(cvm_oct_free_work);
211 * cvm_oct_common_get_stats - get the low level ethernet statistics
212 * @dev: Device to get the statistics from
214 * Returns Pointer to the statistics
216 static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
218 cvmx_pip_port_status_t rx_status;
219 cvmx_pko_port_status_t tx_status;
220 struct octeon_ethernet *priv = netdev_priv(dev);
222 if (priv->port < CVMX_PIP_NUM_INPUT_PORTS) {
223 if (octeon_is_simulation()) {
224 /* The simulator doesn't support statistics */
225 memset(&rx_status, 0, sizeof(rx_status));
226 memset(&tx_status, 0, sizeof(tx_status));
228 cvmx_pip_get_port_status(priv->port, 1, &rx_status);
229 cvmx_pko_get_port_status(priv->port, 1, &tx_status);
232 dev->stats.rx_packets += rx_status.inb_packets;
233 dev->stats.tx_packets += tx_status.packets;
234 dev->stats.rx_bytes += rx_status.inb_octets;
235 dev->stats.tx_bytes += tx_status.octets;
236 dev->stats.multicast += rx_status.multicast_packets;
237 dev->stats.rx_crc_errors += rx_status.inb_errors;
238 dev->stats.rx_frame_errors += rx_status.fcs_align_err_packets;
239 dev->stats.rx_dropped += rx_status.dropped_packets;
246 * cvm_oct_common_change_mtu - change the link MTU
247 * @dev: Device to change
248 * @new_mtu: The new MTU
250 * Returns Zero on success
252 static int cvm_oct_common_change_mtu(struct net_device *dev, int new_mtu)
254 struct octeon_ethernet *priv = netdev_priv(dev);
255 int interface = INTERFACE(priv->port);
256 #if IS_ENABLED(CONFIG_VLAN_8021Q)
257 int vlan_bytes = VLAN_HLEN;
261 int mtu_overhead = ETH_HLEN + ETH_FCS_LEN + vlan_bytes;
265 if ((interface < 2) &&
266 (cvmx_helper_interface_get_mode(interface) !=
267 CVMX_HELPER_INTERFACE_MODE_SPI)) {
268 int index = INDEX(priv->port);
269 /* Add ethernet header and FCS, and VLAN if configured. */
270 int max_packet = new_mtu + mtu_overhead;
272 if (OCTEON_IS_MODEL(OCTEON_CN3XXX) ||
273 OCTEON_IS_MODEL(OCTEON_CN58XX)) {
274 /* Signal errors on packets larger than the MTU */
275 cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX(index, interface),
279 * Set the hardware to truncate packets larger
280 * than the MTU and smaller the 64 bytes.
282 union cvmx_pip_frm_len_chkx frm_len_chk;
285 frm_len_chk.s.minlen = VLAN_ETH_ZLEN;
286 frm_len_chk.s.maxlen = max_packet;
287 cvmx_write_csr(CVMX_PIP_FRM_LEN_CHKX(interface),
291 * Set the hardware to truncate packets larger than
292 * the MTU. The jabber register must be set to a
293 * multiple of 8 bytes, so round up.
295 cvmx_write_csr(CVMX_GMXX_RXX_JABBER(index, interface),
296 (max_packet + 7) & ~7u);
302 * cvm_oct_common_set_multicast_list - set the multicast list
303 * @dev: Device to work on
305 static void cvm_oct_common_set_multicast_list(struct net_device *dev)
307 union cvmx_gmxx_prtx_cfg gmx_cfg;
308 struct octeon_ethernet *priv = netdev_priv(dev);
309 int interface = INTERFACE(priv->port);
311 if ((interface < 2) &&
312 (cvmx_helper_interface_get_mode(interface) !=
313 CVMX_HELPER_INTERFACE_MODE_SPI)) {
314 union cvmx_gmxx_rxx_adr_ctl control;
315 int index = INDEX(priv->port);
318 control.s.bcst = 1; /* Allow broadcast MAC addresses */
320 if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI) ||
321 (dev->flags & IFF_PROMISC))
322 /* Force accept multicast packets */
325 /* Force reject multicast packets */
328 if (dev->flags & IFF_PROMISC)
330 * Reject matches if promisc. Since CAM is
331 * shut off, should accept everything.
333 control.s.cam_mode = 0;
335 /* Filter packets based on the CAM */
336 control.s.cam_mode = 1;
339 cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
340 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
341 gmx_cfg.u64 & ~1ull);
343 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CTL(index, interface),
345 if (dev->flags & IFF_PROMISC)
346 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
347 (index, interface), 0);
349 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
350 (index, interface), 1);
352 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
357 static int cvm_oct_set_mac_filter(struct net_device *dev)
359 struct octeon_ethernet *priv = netdev_priv(dev);
360 union cvmx_gmxx_prtx_cfg gmx_cfg;
361 int interface = INTERFACE(priv->port);
363 if ((interface < 2) &&
364 (cvmx_helper_interface_get_mode(interface) !=
365 CVMX_HELPER_INTERFACE_MODE_SPI)) {
367 u8 *ptr = dev->dev_addr;
369 int index = INDEX(priv->port);
371 for (i = 0; i < 6; i++)
372 mac = (mac << 8) | (u64)ptr[i];
375 cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
376 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
377 gmx_cfg.u64 & ~1ull);
379 cvmx_write_csr(CVMX_GMXX_SMACX(index, interface), mac);
380 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM0(index, interface),
382 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM1(index, interface),
384 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM2(index, interface),
386 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM3(index, interface),
388 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM4(index, interface),
390 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM5(index, interface),
392 cvm_oct_common_set_multicast_list(dev);
393 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
400 * cvm_oct_common_set_mac_address - set the hardware MAC address for a device
401 * @dev: The device in question.
402 * @addr: Socket address.
404 * Returns Zero on success
406 static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr)
408 int r = eth_mac_addr(dev, addr);
412 return cvm_oct_set_mac_filter(dev);
416 * cvm_oct_common_init - per network device initialization
417 * @dev: Device to initialize
419 * Returns Zero on success
421 int cvm_oct_common_init(struct net_device *dev)
423 struct octeon_ethernet *priv = netdev_priv(dev);
424 const u8 *mac = NULL;
427 mac = of_get_mac_address(priv->of_node);
430 ether_addr_copy(dev->dev_addr, mac);
432 eth_hw_addr_random(dev);
435 * Force the interface to use the POW send if always_use_pow
436 * was specified or it is in the pow send list.
438 if ((pow_send_group != -1) &&
439 (always_use_pow || strstr(pow_send_list, dev->name)))
442 if (priv->queue != -1)
443 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
445 /* We do our own locking, Linux doesn't need to */
446 dev->features |= NETIF_F_LLTX;
447 dev->ethtool_ops = &cvm_oct_ethtool_ops;
449 cvm_oct_set_mac_filter(dev);
450 dev_set_mtu(dev, dev->mtu);
453 * Zero out stats for port so we won't mistakenly show
454 * counters from the bootloader.
456 memset(dev->netdev_ops->ndo_get_stats(dev), 0,
457 sizeof(struct net_device_stats));
459 if (dev->netdev_ops->ndo_stop)
460 dev->netdev_ops->ndo_stop(dev);
465 void cvm_oct_common_uninit(struct net_device *dev)
468 phy_disconnect(dev->phydev);
471 int cvm_oct_common_open(struct net_device *dev,
472 void (*link_poll)(struct net_device *))
474 union cvmx_gmxx_prtx_cfg gmx_cfg;
475 struct octeon_ethernet *priv = netdev_priv(dev);
476 int interface = INTERFACE(priv->port);
477 int index = INDEX(priv->port);
478 cvmx_helper_link_info_t link_info;
481 rv = cvm_oct_phy_setup_device(dev);
485 gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
487 if (octeon_has_feature(OCTEON_FEATURE_PKND))
488 gmx_cfg.s.pknd = priv->port;
489 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
491 if (octeon_is_simulation())
495 int r = phy_read_status(dev->phydev);
497 if (r == 0 && dev->phydev->link == 0)
498 netif_carrier_off(dev);
499 cvm_oct_adjust_link(dev);
501 link_info = cvmx_helper_link_get(priv->port);
502 if (!link_info.s.link_up)
503 netif_carrier_off(dev);
504 priv->poll = link_poll;
511 void cvm_oct_link_poll(struct net_device *dev)
513 struct octeon_ethernet *priv = netdev_priv(dev);
514 cvmx_helper_link_info_t link_info;
516 link_info = cvmx_helper_link_get(priv->port);
517 if (link_info.u64 == priv->link_info)
520 if (cvmx_helper_link_set(priv->port, link_info))
521 link_info.u64 = priv->link_info;
523 priv->link_info = link_info.u64;
525 if (link_info.s.link_up) {
526 if (!netif_carrier_ok(dev))
527 netif_carrier_on(dev);
528 } else if (netif_carrier_ok(dev)) {
529 netif_carrier_off(dev);
531 cvm_oct_note_carrier(priv, link_info);
534 static int cvm_oct_xaui_open(struct net_device *dev)
536 return cvm_oct_common_open(dev, cvm_oct_link_poll);
539 static const struct net_device_ops cvm_oct_npi_netdev_ops = {
540 .ndo_init = cvm_oct_common_init,
541 .ndo_uninit = cvm_oct_common_uninit,
542 .ndo_start_xmit = cvm_oct_xmit,
543 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
544 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
545 .ndo_do_ioctl = cvm_oct_ioctl,
546 .ndo_change_mtu = cvm_oct_common_change_mtu,
547 .ndo_get_stats = cvm_oct_common_get_stats,
548 #ifdef CONFIG_NET_POLL_CONTROLLER
549 .ndo_poll_controller = cvm_oct_poll_controller,
553 static const struct net_device_ops cvm_oct_xaui_netdev_ops = {
554 .ndo_init = cvm_oct_common_init,
555 .ndo_uninit = cvm_oct_common_uninit,
556 .ndo_open = cvm_oct_xaui_open,
557 .ndo_stop = cvm_oct_common_stop,
558 .ndo_start_xmit = cvm_oct_xmit,
559 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
560 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
561 .ndo_do_ioctl = cvm_oct_ioctl,
562 .ndo_change_mtu = cvm_oct_common_change_mtu,
563 .ndo_get_stats = cvm_oct_common_get_stats,
564 #ifdef CONFIG_NET_POLL_CONTROLLER
565 .ndo_poll_controller = cvm_oct_poll_controller,
569 static const struct net_device_ops cvm_oct_sgmii_netdev_ops = {
570 .ndo_init = cvm_oct_sgmii_init,
571 .ndo_uninit = cvm_oct_common_uninit,
572 .ndo_open = cvm_oct_sgmii_open,
573 .ndo_stop = cvm_oct_common_stop,
574 .ndo_start_xmit = cvm_oct_xmit,
575 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
576 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
577 .ndo_do_ioctl = cvm_oct_ioctl,
578 .ndo_change_mtu = cvm_oct_common_change_mtu,
579 .ndo_get_stats = cvm_oct_common_get_stats,
580 #ifdef CONFIG_NET_POLL_CONTROLLER
581 .ndo_poll_controller = cvm_oct_poll_controller,
585 static const struct net_device_ops cvm_oct_spi_netdev_ops = {
586 .ndo_init = cvm_oct_spi_init,
587 .ndo_uninit = cvm_oct_spi_uninit,
588 .ndo_start_xmit = cvm_oct_xmit,
589 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
590 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
591 .ndo_do_ioctl = cvm_oct_ioctl,
592 .ndo_change_mtu = cvm_oct_common_change_mtu,
593 .ndo_get_stats = cvm_oct_common_get_stats,
594 #ifdef CONFIG_NET_POLL_CONTROLLER
595 .ndo_poll_controller = cvm_oct_poll_controller,
599 static const struct net_device_ops cvm_oct_rgmii_netdev_ops = {
600 .ndo_init = cvm_oct_common_init,
601 .ndo_uninit = cvm_oct_common_uninit,
602 .ndo_open = cvm_oct_rgmii_open,
603 .ndo_stop = cvm_oct_common_stop,
604 .ndo_start_xmit = cvm_oct_xmit,
605 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
606 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
607 .ndo_do_ioctl = cvm_oct_ioctl,
608 .ndo_change_mtu = cvm_oct_common_change_mtu,
609 .ndo_get_stats = cvm_oct_common_get_stats,
610 #ifdef CONFIG_NET_POLL_CONTROLLER
611 .ndo_poll_controller = cvm_oct_poll_controller,
615 static const struct net_device_ops cvm_oct_pow_netdev_ops = {
616 .ndo_init = cvm_oct_common_init,
617 .ndo_start_xmit = cvm_oct_xmit_pow,
618 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
619 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
620 .ndo_do_ioctl = cvm_oct_ioctl,
621 .ndo_change_mtu = cvm_oct_common_change_mtu,
622 .ndo_get_stats = cvm_oct_common_get_stats,
623 #ifdef CONFIG_NET_POLL_CONTROLLER
624 .ndo_poll_controller = cvm_oct_poll_controller,
628 static struct device_node *cvm_oct_of_get_child(
629 const struct device_node *parent, int reg_val)
631 struct device_node *node = NULL;
636 node = of_get_next_child(parent, node);
639 addr = of_get_property(node, "reg", &size);
640 if (addr && (be32_to_cpu(*addr) == reg_val))
646 static struct device_node *cvm_oct_node_for_port(struct device_node *pip,
647 int interface, int port)
649 struct device_node *ni, *np;
651 ni = cvm_oct_of_get_child(pip, interface);
655 np = cvm_oct_of_get_child(ni, port);
661 static void cvm_set_rgmii_delay(struct device_node *np, int iface, int port)
665 if (!of_property_read_u32(np, "rx-delay", &delay_value))
666 cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(port, iface), delay_value);
667 if (!of_property_read_u32(np, "tx-delay", &delay_value))
668 cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port, iface), delay_value);
671 static int cvm_oct_probe(struct platform_device *pdev)
675 int fau = FAU_NUM_PACKET_BUFFERS_TO_FREE;
677 struct device_node *pip;
678 int mtu_overhead = ETH_HLEN + ETH_FCS_LEN;
680 #if IS_ENABLED(CONFIG_VLAN_8021Q)
681 mtu_overhead += VLAN_HLEN;
684 octeon_mdiobus_force_mod_depencency();
686 pip = pdev->dev.of_node;
688 pr_err("Error: No 'pip' in /aliases\n");
692 cvm_oct_configure_common_hw();
694 cvmx_helper_initialize_packet_io_global();
696 if (receive_group_order) {
697 if (receive_group_order > 4)
698 receive_group_order = 4;
699 pow_receive_groups = (1 << (1 << receive_group_order)) - 1;
701 pow_receive_groups = BIT(pow_receive_group);
704 /* Change the input group for all ports before input is enabled */
705 num_interfaces = cvmx_helper_get_number_of_interfaces();
706 for (interface = 0; interface < num_interfaces; interface++) {
707 int num_ports = cvmx_helper_ports_on_interface(interface);
710 for (port = cvmx_helper_get_ipd_port(interface, 0);
711 port < cvmx_helper_get_ipd_port(interface, num_ports);
713 union cvmx_pip_prt_tagx pip_prt_tagx;
716 cvmx_read_csr(CVMX_PIP_PRT_TAGX(port));
718 if (receive_group_order) {
721 /* We support only 16 groups at the moment, so
722 * always disable the two additional "hidden"
723 * tag_mask bits on CN68XX.
725 if (OCTEON_IS_MODEL(OCTEON_CN68XX))
726 pip_prt_tagx.u64 |= 0x3ull << 44;
728 tag_mask = ~((1 << receive_group_order) - 1);
729 pip_prt_tagx.s.grptagbase = 0;
730 pip_prt_tagx.s.grptagmask = tag_mask;
731 pip_prt_tagx.s.grptag = 1;
732 pip_prt_tagx.s.tag_mode = 0;
733 pip_prt_tagx.s.inc_prt_flag = 1;
734 pip_prt_tagx.s.ip6_dprt_flag = 1;
735 pip_prt_tagx.s.ip4_dprt_flag = 1;
736 pip_prt_tagx.s.ip6_sprt_flag = 1;
737 pip_prt_tagx.s.ip4_sprt_flag = 1;
738 pip_prt_tagx.s.ip6_dst_flag = 1;
739 pip_prt_tagx.s.ip4_dst_flag = 1;
740 pip_prt_tagx.s.ip6_src_flag = 1;
741 pip_prt_tagx.s.ip4_src_flag = 1;
742 pip_prt_tagx.s.grp = 0;
744 pip_prt_tagx.s.grptag = 0;
745 pip_prt_tagx.s.grp = pow_receive_group;
748 cvmx_write_csr(CVMX_PIP_PRT_TAGX(port),
753 cvmx_helper_ipd_and_packet_input_enable();
755 memset(cvm_oct_device, 0, sizeof(cvm_oct_device));
758 * Initialize the FAU used for counting packet buffers that
761 cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
763 /* Initialize the FAU used for counting tx SKBs that need to be freed */
764 cvmx_fau_atomic_write32(FAU_TOTAL_TX_TO_CLEAN, 0);
766 if ((pow_send_group != -1)) {
767 struct net_device *dev;
769 dev = alloc_etherdev(sizeof(struct octeon_ethernet));
771 /* Initialize the device private structure. */
772 struct octeon_ethernet *priv = netdev_priv(dev);
774 SET_NETDEV_DEV(dev, &pdev->dev);
775 dev->netdev_ops = &cvm_oct_pow_netdev_ops;
776 priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED;
777 priv->port = CVMX_PIP_NUM_INPUT_PORTS;
779 strcpy(dev->name, "pow%d");
780 for (qos = 0; qos < 16; qos++)
781 skb_queue_head_init(&priv->tx_free_list[qos]);
782 dev->min_mtu = VLAN_ETH_ZLEN - mtu_overhead;
783 dev->max_mtu = OCTEON_MAX_MTU - mtu_overhead;
785 if (register_netdev(dev) < 0) {
786 pr_err("Failed to register ethernet device for POW\n");
789 cvm_oct_device[CVMX_PIP_NUM_INPUT_PORTS] = dev;
790 pr_info("%s: POW send group %d, receive group %d\n",
791 dev->name, pow_send_group,
795 pr_err("Failed to allocate ethernet device for POW\n");
799 num_interfaces = cvmx_helper_get_number_of_interfaces();
800 for (interface = 0; interface < num_interfaces; interface++) {
801 cvmx_helper_interface_mode_t imode =
802 cvmx_helper_interface_get_mode(interface);
803 int num_ports = cvmx_helper_ports_on_interface(interface);
808 port = cvmx_helper_get_ipd_port(interface, 0);
809 port < cvmx_helper_get_ipd_port(interface, num_ports);
810 port_index++, port++) {
811 struct octeon_ethernet *priv;
812 struct net_device *dev =
813 alloc_etherdev(sizeof(struct octeon_ethernet));
815 pr_err("Failed to allocate ethernet device for port %d\n",
820 /* Initialize the device private structure. */
821 SET_NETDEV_DEV(dev, &pdev->dev);
822 priv = netdev_priv(dev);
824 priv->of_node = cvm_oct_node_for_port(pip, interface,
827 INIT_DELAYED_WORK(&priv->port_periodic_work,
828 cvm_oct_periodic_worker);
831 priv->queue = cvmx_pko_get_base_queue(priv->port);
832 priv->fau = fau - cvmx_pko_get_num_queues(port) * 4;
833 for (qos = 0; qos < 16; qos++)
834 skb_queue_head_init(&priv->tx_free_list[qos]);
835 for (qos = 0; qos < cvmx_pko_get_num_queues(port);
837 cvmx_fau_atomic_write32(priv->fau + qos * 4, 0);
838 dev->min_mtu = VLAN_ETH_ZLEN - mtu_overhead;
839 dev->max_mtu = OCTEON_MAX_MTU - mtu_overhead;
841 switch (priv->imode) {
842 /* These types don't support ports to IPD/PKO */
843 case CVMX_HELPER_INTERFACE_MODE_DISABLED:
844 case CVMX_HELPER_INTERFACE_MODE_PCIE:
845 case CVMX_HELPER_INTERFACE_MODE_PICMG:
848 case CVMX_HELPER_INTERFACE_MODE_NPI:
849 dev->netdev_ops = &cvm_oct_npi_netdev_ops;
850 strcpy(dev->name, "npi%d");
853 case CVMX_HELPER_INTERFACE_MODE_XAUI:
854 dev->netdev_ops = &cvm_oct_xaui_netdev_ops;
855 strcpy(dev->name, "xaui%d");
858 case CVMX_HELPER_INTERFACE_MODE_LOOP:
859 dev->netdev_ops = &cvm_oct_npi_netdev_ops;
860 strcpy(dev->name, "loop%d");
863 case CVMX_HELPER_INTERFACE_MODE_SGMII:
864 dev->netdev_ops = &cvm_oct_sgmii_netdev_ops;
865 strcpy(dev->name, "eth%d");
868 case CVMX_HELPER_INTERFACE_MODE_SPI:
869 dev->netdev_ops = &cvm_oct_spi_netdev_ops;
870 strcpy(dev->name, "spi%d");
873 case CVMX_HELPER_INTERFACE_MODE_RGMII:
874 case CVMX_HELPER_INTERFACE_MODE_GMII:
875 dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
876 strcpy(dev->name, "eth%d");
877 cvm_set_rgmii_delay(priv->of_node, interface,
882 if (priv->of_node && of_phy_is_fixed_link(priv->of_node)) {
883 if (of_phy_register_fixed_link(priv->of_node)) {
884 netdev_err(dev, "Failed to register fixed link for interface %d, port %d\n",
885 interface, priv->port);
886 dev->netdev_ops = NULL;
890 if (!dev->netdev_ops) {
892 } else if (register_netdev(dev) < 0) {
893 pr_err("Failed to register ethernet device for interface %d, port %d\n",
894 interface, priv->port);
897 cvm_oct_device[priv->port] = dev;
899 cvmx_pko_get_num_queues(priv->port) *
901 schedule_delayed_work(&priv->port_periodic_work,
907 cvm_oct_tx_initialize();
908 cvm_oct_rx_initialize();
911 * 150 uS: about 10 1500-byte packets at 1GE.
913 cvm_oct_tx_poll_interval = 150 * (octeon_get_clock_rate() / 1000000);
915 schedule_delayed_work(&cvm_oct_rx_refill_work, HZ);
920 static int cvm_oct_remove(struct platform_device *pdev)
926 atomic_inc_return(&cvm_oct_poll_queue_stopping);
927 cancel_delayed_work_sync(&cvm_oct_rx_refill_work);
929 cvm_oct_rx_shutdown();
930 cvm_oct_tx_shutdown();
934 /* Free the ethernet devices */
935 for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) {
936 if (cvm_oct_device[port]) {
937 struct net_device *dev = cvm_oct_device[port];
938 struct octeon_ethernet *priv = netdev_priv(dev);
940 cancel_delayed_work_sync(&priv->port_periodic_work);
942 cvm_oct_tx_shutdown_dev(dev);
943 unregister_netdev(dev);
945 cvm_oct_device[port] = NULL;
953 /* Free the HW pools */
954 cvm_oct_mem_empty_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE,
956 cvm_oct_mem_empty_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE,
958 if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL)
959 cvm_oct_mem_empty_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
960 CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 128);
964 static const struct of_device_id cvm_oct_match[] = {
966 .compatible = "cavium,octeon-3860-pip",
970 MODULE_DEVICE_TABLE(of, cvm_oct_match);
972 static struct platform_driver cvm_oct_driver = {
973 .probe = cvm_oct_probe,
974 .remove = cvm_oct_remove,
976 .name = KBUILD_MODNAME,
977 .of_match_table = cvm_oct_match,
981 module_platform_driver(cvm_oct_driver);
983 MODULE_LICENSE("GPL");
984 MODULE_AUTHOR("Cavium Networks <support@caviumnetworks.com>");
985 MODULE_DESCRIPTION("Cavium Networks Octeon ethernet driver.");