Mention branches and keyring.
[releases.git] / staging / octeon / ethernet.c
1 /*
2  * This file is based on code from OCTEON SDK by Cavium Networks.
3  *
4  * Copyright (c) 2003-2007 Cavium Networks
5  *
6  * This file is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License, Version 2, as
8  * published by the Free Software Foundation.
9  */
10
11 #include <linux/platform_device.h>
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/netdevice.h>
15 #include <linux/etherdevice.h>
16 #include <linux/phy.h>
17 #include <linux/slab.h>
18 #include <linux/interrupt.h>
19 #include <linux/of_mdio.h>
20 #include <linux/of_net.h>
21 #include <linux/if_ether.h>
22 #include <linux/if_vlan.h>
23
24 #include <net/dst.h>
25
26 #include <asm/octeon/octeon.h>
27
28 #include "ethernet-defines.h"
29 #include "octeon-ethernet.h"
30 #include "ethernet-mem.h"
31 #include "ethernet-rx.h"
32 #include "ethernet-tx.h"
33 #include "ethernet-mdio.h"
34 #include "ethernet-util.h"
35
36 #include <asm/octeon/cvmx-pip.h>
37 #include <asm/octeon/cvmx-pko.h>
38 #include <asm/octeon/cvmx-fau.h>
39 #include <asm/octeon/cvmx-ipd.h>
40 #include <asm/octeon/cvmx-helper.h>
41 #include <asm/octeon/cvmx-asxx-defs.h>
42 #include <asm/octeon/cvmx-gmxx-defs.h>
43 #include <asm/octeon/cvmx-smix-defs.h>
44
45 #define OCTEON_MAX_MTU 65392
46
47 static int num_packet_buffers = 1024;
48 module_param(num_packet_buffers, int, 0444);
49 MODULE_PARM_DESC(num_packet_buffers, "\n"
50         "\tNumber of packet buffers to allocate and store in the\n"
51         "\tFPA. By default, 1024 packet buffers are used.\n");
52
53 static int pow_receive_group = 15;
54 module_param(pow_receive_group, int, 0444);
55 MODULE_PARM_DESC(pow_receive_group, "\n"
56         "\tPOW group to receive packets from. All ethernet hardware\n"
57         "\twill be configured to send incoming packets to this POW\n"
58         "\tgroup. Also any other software can submit packets to this\n"
59         "\tgroup for the kernel to process.");
60
61 static int receive_group_order;
62 module_param(receive_group_order, int, 0444);
63 MODULE_PARM_DESC(receive_group_order, "\n"
64         "\tOrder (0..4) of receive groups to take into use. Ethernet hardware\n"
65         "\twill be configured to send incoming packets to multiple POW\n"
66         "\tgroups. pow_receive_group parameter is ignored when multiple\n"
67         "\tgroups are taken into use and groups are allocated starting\n"
68         "\tfrom 0. By default, a single group is used.\n");
69
70 int pow_send_group = -1;
71 module_param(pow_send_group, int, 0644);
72 MODULE_PARM_DESC(pow_send_group, "\n"
73         "\tPOW group to send packets to other software on. This\n"
74         "\tcontrols the creation of the virtual device pow0.\n"
75         "\talways_use_pow also depends on this value.");
76
77 int always_use_pow;
78 module_param(always_use_pow, int, 0444);
79 MODULE_PARM_DESC(always_use_pow, "\n"
80         "\tWhen set, always send to the pow group. This will cause\n"
81         "\tpackets sent to real ethernet devices to be sent to the\n"
82         "\tPOW group instead of the hardware. Unless some other\n"
83         "\tapplication changes the config, packets will still be\n"
84         "\treceived from the low level hardware. Use this option\n"
85         "\tto allow a CVMX app to intercept all packets from the\n"
86         "\tlinux kernel. You must specify pow_send_group along with\n"
87         "\tthis option.");
88
89 char pow_send_list[128] = "";
90 module_param_string(pow_send_list, pow_send_list, sizeof(pow_send_list), 0444);
91 MODULE_PARM_DESC(pow_send_list, "\n"
92         "\tComma separated list of ethernet devices that should use the\n"
93         "\tPOW for transmit instead of the actual ethernet hardware. This\n"
94         "\tis a per port version of always_use_pow. always_use_pow takes\n"
95         "\tprecedence over this list. For example, setting this to\n"
96         "\t\"eth2,spi3,spi7\" would cause these three devices to transmit\n"
97         "\tusing the pow_send_group.");
98
99 int rx_napi_weight = 32;
100 module_param(rx_napi_weight, int, 0444);
101 MODULE_PARM_DESC(rx_napi_weight, "The NAPI WEIGHT parameter.");
102
103 /* Mask indicating which receive groups are in use. */
104 int pow_receive_groups;
105
106 /*
107  * cvm_oct_poll_queue_stopping - flag to indicate polling should stop.
108  *
109  * Set to one right before cvm_oct_poll_queue is destroyed.
110  */
111 atomic_t cvm_oct_poll_queue_stopping = ATOMIC_INIT(0);
112
113 /*
114  * Array of every ethernet device owned by this driver indexed by
115  * the ipd input port number.
116  */
117 struct net_device *cvm_oct_device[TOTAL_NUMBER_OF_PORTS];
118
119 u64 cvm_oct_tx_poll_interval;
120
121 static void cvm_oct_rx_refill_worker(struct work_struct *work);
122 static DECLARE_DELAYED_WORK(cvm_oct_rx_refill_work, cvm_oct_rx_refill_worker);
123
124 static void cvm_oct_rx_refill_worker(struct work_struct *work)
125 {
126         /*
127          * FPA 0 may have been drained, try to refill it if we need
128          * more than num_packet_buffers / 2, otherwise normal receive
129          * processing will refill it.  If it were drained, no packets
130          * could be received so cvm_oct_napi_poll would never be
131          * invoked to do the refill.
132          */
133         cvm_oct_rx_refill_pool(num_packet_buffers / 2);
134
135         if (!atomic_read(&cvm_oct_poll_queue_stopping))
136                 schedule_delayed_work(&cvm_oct_rx_refill_work, HZ);
137 }
138
139 static void cvm_oct_periodic_worker(struct work_struct *work)
140 {
141         struct octeon_ethernet *priv = container_of(work,
142                                                     struct octeon_ethernet,
143                                                     port_periodic_work.work);
144
145         if (priv->poll)
146                 priv->poll(cvm_oct_device[priv->port]);
147
148         cvm_oct_device[priv->port]->netdev_ops->ndo_get_stats(
149                                                 cvm_oct_device[priv->port]);
150
151         if (!atomic_read(&cvm_oct_poll_queue_stopping))
152                 schedule_delayed_work(&priv->port_periodic_work, HZ);
153 }
154
155 static void cvm_oct_configure_common_hw(void)
156 {
157         /* Setup the FPA */
158         cvmx_fpa_enable();
159         cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE,
160                              num_packet_buffers);
161         cvm_oct_mem_fill_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE,
162                              num_packet_buffers);
163         if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL)
164                 cvm_oct_mem_fill_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
165                                      CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 1024);
166
167 #ifdef __LITTLE_ENDIAN
168         {
169                 union cvmx_ipd_ctl_status ipd_ctl_status;
170
171                 ipd_ctl_status.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
172                 ipd_ctl_status.s.pkt_lend = 1;
173                 ipd_ctl_status.s.wqe_lend = 1;
174                 cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_ctl_status.u64);
175         }
176 #endif
177
178         cvmx_helper_setup_red(num_packet_buffers / 4, num_packet_buffers / 8);
179 }
180
181 /**
182  * cvm_oct_free_work- Free a work queue entry
183  *
184  * @work_queue_entry: Work queue entry to free
185  *
186  * Returns Zero on success, Negative on failure.
187  */
188 int cvm_oct_free_work(void *work_queue_entry)
189 {
190         cvmx_wqe_t *work = work_queue_entry;
191
192         int segments = work->word2.s.bufs;
193         union cvmx_buf_ptr segment_ptr = work->packet_ptr;
194
195         while (segments--) {
196                 union cvmx_buf_ptr next_ptr = *(union cvmx_buf_ptr *)
197                         cvmx_phys_to_ptr(segment_ptr.s.addr - 8);
198                 if (unlikely(!segment_ptr.s.i))
199                         cvmx_fpa_free(cvm_oct_get_buffer_ptr(segment_ptr),
200                                       segment_ptr.s.pool,
201                                       CVMX_FPA_PACKET_POOL_SIZE / 128);
202                 segment_ptr = next_ptr;
203         }
204         cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, 1);
205
206         return 0;
207 }
208 EXPORT_SYMBOL(cvm_oct_free_work);
209
210 /**
211  * cvm_oct_common_get_stats - get the low level ethernet statistics
212  * @dev:    Device to get the statistics from
213  *
214  * Returns Pointer to the statistics
215  */
216 static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
217 {
218         cvmx_pip_port_status_t rx_status;
219         cvmx_pko_port_status_t tx_status;
220         struct octeon_ethernet *priv = netdev_priv(dev);
221
222         if (priv->port < CVMX_PIP_NUM_INPUT_PORTS) {
223                 if (octeon_is_simulation()) {
224                         /* The simulator doesn't support statistics */
225                         memset(&rx_status, 0, sizeof(rx_status));
226                         memset(&tx_status, 0, sizeof(tx_status));
227                 } else {
228                         cvmx_pip_get_port_status(priv->port, 1, &rx_status);
229                         cvmx_pko_get_port_status(priv->port, 1, &tx_status);
230                 }
231
232                 dev->stats.rx_packets += rx_status.inb_packets;
233                 dev->stats.tx_packets += tx_status.packets;
234                 dev->stats.rx_bytes += rx_status.inb_octets;
235                 dev->stats.tx_bytes += tx_status.octets;
236                 dev->stats.multicast += rx_status.multicast_packets;
237                 dev->stats.rx_crc_errors += rx_status.inb_errors;
238                 dev->stats.rx_frame_errors += rx_status.fcs_align_err_packets;
239                 dev->stats.rx_dropped += rx_status.dropped_packets;
240         }
241
242         return &dev->stats;
243 }
244
245 /**
246  * cvm_oct_common_change_mtu - change the link MTU
247  * @dev:     Device to change
248  * @new_mtu: The new MTU
249  *
250  * Returns Zero on success
251  */
252 static int cvm_oct_common_change_mtu(struct net_device *dev, int new_mtu)
253 {
254         struct octeon_ethernet *priv = netdev_priv(dev);
255         int interface = INTERFACE(priv->port);
256 #if IS_ENABLED(CONFIG_VLAN_8021Q)
257         int vlan_bytes = VLAN_HLEN;
258 #else
259         int vlan_bytes = 0;
260 #endif
261         int mtu_overhead = ETH_HLEN + ETH_FCS_LEN + vlan_bytes;
262
263         dev->mtu = new_mtu;
264
265         if ((interface < 2) &&
266             (cvmx_helper_interface_get_mode(interface) !=
267                 CVMX_HELPER_INTERFACE_MODE_SPI)) {
268                 int index = INDEX(priv->port);
269                 /* Add ethernet header and FCS, and VLAN if configured. */
270                 int max_packet = new_mtu + mtu_overhead;
271
272                 if (OCTEON_IS_MODEL(OCTEON_CN3XXX) ||
273                     OCTEON_IS_MODEL(OCTEON_CN58XX)) {
274                         /* Signal errors on packets larger than the MTU */
275                         cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX(index, interface),
276                                        max_packet);
277                 } else {
278                         /*
279                          * Set the hardware to truncate packets larger
280                          * than the MTU and smaller the 64 bytes.
281                          */
282                         union cvmx_pip_frm_len_chkx frm_len_chk;
283
284                         frm_len_chk.u64 = 0;
285                         frm_len_chk.s.minlen = VLAN_ETH_ZLEN;
286                         frm_len_chk.s.maxlen = max_packet;
287                         cvmx_write_csr(CVMX_PIP_FRM_LEN_CHKX(interface),
288                                        frm_len_chk.u64);
289                 }
290                 /*
291                  * Set the hardware to truncate packets larger than
292                  * the MTU. The jabber register must be set to a
293                  * multiple of 8 bytes, so round up.
294                  */
295                 cvmx_write_csr(CVMX_GMXX_RXX_JABBER(index, interface),
296                                (max_packet + 7) & ~7u);
297         }
298         return 0;
299 }
300
301 /**
302  * cvm_oct_common_set_multicast_list - set the multicast list
303  * @dev:    Device to work on
304  */
305 static void cvm_oct_common_set_multicast_list(struct net_device *dev)
306 {
307         union cvmx_gmxx_prtx_cfg gmx_cfg;
308         struct octeon_ethernet *priv = netdev_priv(dev);
309         int interface = INTERFACE(priv->port);
310
311         if ((interface < 2) &&
312             (cvmx_helper_interface_get_mode(interface) !=
313                 CVMX_HELPER_INTERFACE_MODE_SPI)) {
314                 union cvmx_gmxx_rxx_adr_ctl control;
315                 int index = INDEX(priv->port);
316
317                 control.u64 = 0;
318                 control.s.bcst = 1;     /* Allow broadcast MAC addresses */
319
320                 if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI) ||
321                     (dev->flags & IFF_PROMISC))
322                         /* Force accept multicast packets */
323                         control.s.mcst = 2;
324                 else
325                         /* Force reject multicast packets */
326                         control.s.mcst = 1;
327
328                 if (dev->flags & IFF_PROMISC)
329                         /*
330                          * Reject matches if promisc. Since CAM is
331                          * shut off, should accept everything.
332                          */
333                         control.s.cam_mode = 0;
334                 else
335                         /* Filter packets based on the CAM */
336                         control.s.cam_mode = 1;
337
338                 gmx_cfg.u64 =
339                     cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
340                 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
341                                gmx_cfg.u64 & ~1ull);
342
343                 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CTL(index, interface),
344                                control.u64);
345                 if (dev->flags & IFF_PROMISC)
346                         cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
347                                        (index, interface), 0);
348                 else
349                         cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
350                                        (index, interface), 1);
351
352                 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
353                                gmx_cfg.u64);
354         }
355 }
356
357 static int cvm_oct_set_mac_filter(struct net_device *dev)
358 {
359         struct octeon_ethernet *priv = netdev_priv(dev);
360         union cvmx_gmxx_prtx_cfg gmx_cfg;
361         int interface = INTERFACE(priv->port);
362
363         if ((interface < 2) &&
364             (cvmx_helper_interface_get_mode(interface) !=
365                 CVMX_HELPER_INTERFACE_MODE_SPI)) {
366                 int i;
367                 u8 *ptr = dev->dev_addr;
368                 u64 mac = 0;
369                 int index = INDEX(priv->port);
370
371                 for (i = 0; i < 6; i++)
372                         mac = (mac << 8) | (u64)ptr[i];
373
374                 gmx_cfg.u64 =
375                     cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
376                 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
377                                gmx_cfg.u64 & ~1ull);
378
379                 cvmx_write_csr(CVMX_GMXX_SMACX(index, interface), mac);
380                 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM0(index, interface),
381                                ptr[0]);
382                 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM1(index, interface),
383                                ptr[1]);
384                 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM2(index, interface),
385                                ptr[2]);
386                 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM3(index, interface),
387                                ptr[3]);
388                 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM4(index, interface),
389                                ptr[4]);
390                 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM5(index, interface),
391                                ptr[5]);
392                 cvm_oct_common_set_multicast_list(dev);
393                 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
394                                gmx_cfg.u64);
395         }
396         return 0;
397 }
398
399 /**
400  * cvm_oct_common_set_mac_address - set the hardware MAC address for a device
401  * @dev:    The device in question.
402  * @addr:   Socket address.
403  *
404  * Returns Zero on success
405  */
406 static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr)
407 {
408         int r = eth_mac_addr(dev, addr);
409
410         if (r)
411                 return r;
412         return cvm_oct_set_mac_filter(dev);
413 }
414
415 /**
416  * cvm_oct_common_init - per network device initialization
417  * @dev:    Device to initialize
418  *
419  * Returns Zero on success
420  */
421 int cvm_oct_common_init(struct net_device *dev)
422 {
423         struct octeon_ethernet *priv = netdev_priv(dev);
424         const u8 *mac = NULL;
425
426         if (priv->of_node)
427                 mac = of_get_mac_address(priv->of_node);
428
429         if (mac)
430                 ether_addr_copy(dev->dev_addr, mac);
431         else
432                 eth_hw_addr_random(dev);
433
434         /*
435          * Force the interface to use the POW send if always_use_pow
436          * was specified or it is in the pow send list.
437          */
438         if ((pow_send_group != -1) &&
439             (always_use_pow || strstr(pow_send_list, dev->name)))
440                 priv->queue = -1;
441
442         if (priv->queue != -1)
443                 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
444
445         /* We do our own locking, Linux doesn't need to */
446         dev->features |= NETIF_F_LLTX;
447         dev->ethtool_ops = &cvm_oct_ethtool_ops;
448
449         cvm_oct_set_mac_filter(dev);
450         dev_set_mtu(dev, dev->mtu);
451
452         /*
453          * Zero out stats for port so we won't mistakenly show
454          * counters from the bootloader.
455          */
456         memset(dev->netdev_ops->ndo_get_stats(dev), 0,
457                sizeof(struct net_device_stats));
458
459         if (dev->netdev_ops->ndo_stop)
460                 dev->netdev_ops->ndo_stop(dev);
461
462         return 0;
463 }
464
465 void cvm_oct_common_uninit(struct net_device *dev)
466 {
467         if (dev->phydev)
468                 phy_disconnect(dev->phydev);
469 }
470
471 int cvm_oct_common_open(struct net_device *dev,
472                         void (*link_poll)(struct net_device *))
473 {
474         union cvmx_gmxx_prtx_cfg gmx_cfg;
475         struct octeon_ethernet *priv = netdev_priv(dev);
476         int interface = INTERFACE(priv->port);
477         int index = INDEX(priv->port);
478         cvmx_helper_link_info_t link_info;
479         int rv;
480
481         rv = cvm_oct_phy_setup_device(dev);
482         if (rv)
483                 return rv;
484
485         gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
486         gmx_cfg.s.en = 1;
487         if (octeon_has_feature(OCTEON_FEATURE_PKND))
488                 gmx_cfg.s.pknd = priv->port;
489         cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
490
491         if (octeon_is_simulation())
492                 return 0;
493
494         if (dev->phydev) {
495                 int r = phy_read_status(dev->phydev);
496
497                 if (r == 0 && dev->phydev->link == 0)
498                         netif_carrier_off(dev);
499                 cvm_oct_adjust_link(dev);
500         } else {
501                 link_info = cvmx_helper_link_get(priv->port);
502                 if (!link_info.s.link_up)
503                         netif_carrier_off(dev);
504                 priv->poll = link_poll;
505                 link_poll(dev);
506         }
507
508         return 0;
509 }
510
511 void cvm_oct_link_poll(struct net_device *dev)
512 {
513         struct octeon_ethernet *priv = netdev_priv(dev);
514         cvmx_helper_link_info_t link_info;
515
516         link_info = cvmx_helper_link_get(priv->port);
517         if (link_info.u64 == priv->link_info)
518                 return;
519
520         if (cvmx_helper_link_set(priv->port, link_info))
521                 link_info.u64 = priv->link_info;
522         else
523                 priv->link_info = link_info.u64;
524
525         if (link_info.s.link_up) {
526                 if (!netif_carrier_ok(dev))
527                         netif_carrier_on(dev);
528         } else if (netif_carrier_ok(dev)) {
529                 netif_carrier_off(dev);
530         }
531         cvm_oct_note_carrier(priv, link_info);
532 }
533
534 static int cvm_oct_xaui_open(struct net_device *dev)
535 {
536         return cvm_oct_common_open(dev, cvm_oct_link_poll);
537 }
538
539 static const struct net_device_ops cvm_oct_npi_netdev_ops = {
540         .ndo_init               = cvm_oct_common_init,
541         .ndo_uninit             = cvm_oct_common_uninit,
542         .ndo_start_xmit         = cvm_oct_xmit,
543         .ndo_set_rx_mode        = cvm_oct_common_set_multicast_list,
544         .ndo_set_mac_address    = cvm_oct_common_set_mac_address,
545         .ndo_do_ioctl           = cvm_oct_ioctl,
546         .ndo_change_mtu         = cvm_oct_common_change_mtu,
547         .ndo_get_stats          = cvm_oct_common_get_stats,
548 #ifdef CONFIG_NET_POLL_CONTROLLER
549         .ndo_poll_controller    = cvm_oct_poll_controller,
550 #endif
551 };
552
553 static const struct net_device_ops cvm_oct_xaui_netdev_ops = {
554         .ndo_init               = cvm_oct_common_init,
555         .ndo_uninit             = cvm_oct_common_uninit,
556         .ndo_open               = cvm_oct_xaui_open,
557         .ndo_stop               = cvm_oct_common_stop,
558         .ndo_start_xmit         = cvm_oct_xmit,
559         .ndo_set_rx_mode        = cvm_oct_common_set_multicast_list,
560         .ndo_set_mac_address    = cvm_oct_common_set_mac_address,
561         .ndo_do_ioctl           = cvm_oct_ioctl,
562         .ndo_change_mtu         = cvm_oct_common_change_mtu,
563         .ndo_get_stats          = cvm_oct_common_get_stats,
564 #ifdef CONFIG_NET_POLL_CONTROLLER
565         .ndo_poll_controller    = cvm_oct_poll_controller,
566 #endif
567 };
568
569 static const struct net_device_ops cvm_oct_sgmii_netdev_ops = {
570         .ndo_init               = cvm_oct_sgmii_init,
571         .ndo_uninit             = cvm_oct_common_uninit,
572         .ndo_open               = cvm_oct_sgmii_open,
573         .ndo_stop               = cvm_oct_common_stop,
574         .ndo_start_xmit         = cvm_oct_xmit,
575         .ndo_set_rx_mode        = cvm_oct_common_set_multicast_list,
576         .ndo_set_mac_address    = cvm_oct_common_set_mac_address,
577         .ndo_do_ioctl           = cvm_oct_ioctl,
578         .ndo_change_mtu         = cvm_oct_common_change_mtu,
579         .ndo_get_stats          = cvm_oct_common_get_stats,
580 #ifdef CONFIG_NET_POLL_CONTROLLER
581         .ndo_poll_controller    = cvm_oct_poll_controller,
582 #endif
583 };
584
585 static const struct net_device_ops cvm_oct_spi_netdev_ops = {
586         .ndo_init               = cvm_oct_spi_init,
587         .ndo_uninit             = cvm_oct_spi_uninit,
588         .ndo_start_xmit         = cvm_oct_xmit,
589         .ndo_set_rx_mode        = cvm_oct_common_set_multicast_list,
590         .ndo_set_mac_address    = cvm_oct_common_set_mac_address,
591         .ndo_do_ioctl           = cvm_oct_ioctl,
592         .ndo_change_mtu         = cvm_oct_common_change_mtu,
593         .ndo_get_stats          = cvm_oct_common_get_stats,
594 #ifdef CONFIG_NET_POLL_CONTROLLER
595         .ndo_poll_controller    = cvm_oct_poll_controller,
596 #endif
597 };
598
599 static const struct net_device_ops cvm_oct_rgmii_netdev_ops = {
600         .ndo_init               = cvm_oct_common_init,
601         .ndo_uninit             = cvm_oct_common_uninit,
602         .ndo_open               = cvm_oct_rgmii_open,
603         .ndo_stop               = cvm_oct_common_stop,
604         .ndo_start_xmit         = cvm_oct_xmit,
605         .ndo_set_rx_mode        = cvm_oct_common_set_multicast_list,
606         .ndo_set_mac_address    = cvm_oct_common_set_mac_address,
607         .ndo_do_ioctl           = cvm_oct_ioctl,
608         .ndo_change_mtu         = cvm_oct_common_change_mtu,
609         .ndo_get_stats          = cvm_oct_common_get_stats,
610 #ifdef CONFIG_NET_POLL_CONTROLLER
611         .ndo_poll_controller    = cvm_oct_poll_controller,
612 #endif
613 };
614
615 static const struct net_device_ops cvm_oct_pow_netdev_ops = {
616         .ndo_init               = cvm_oct_common_init,
617         .ndo_start_xmit         = cvm_oct_xmit_pow,
618         .ndo_set_rx_mode        = cvm_oct_common_set_multicast_list,
619         .ndo_set_mac_address    = cvm_oct_common_set_mac_address,
620         .ndo_do_ioctl           = cvm_oct_ioctl,
621         .ndo_change_mtu         = cvm_oct_common_change_mtu,
622         .ndo_get_stats          = cvm_oct_common_get_stats,
623 #ifdef CONFIG_NET_POLL_CONTROLLER
624         .ndo_poll_controller    = cvm_oct_poll_controller,
625 #endif
626 };
627
628 static struct device_node *cvm_oct_of_get_child(
629                                 const struct device_node *parent, int reg_val)
630 {
631         struct device_node *node = NULL;
632         int size;
633         const __be32 *addr;
634
635         for (;;) {
636                 node = of_get_next_child(parent, node);
637                 if (!node)
638                         break;
639                 addr = of_get_property(node, "reg", &size);
640                 if (addr && (be32_to_cpu(*addr) == reg_val))
641                         break;
642         }
643         return node;
644 }
645
646 static struct device_node *cvm_oct_node_for_port(struct device_node *pip,
647                                                  int interface, int port)
648 {
649         struct device_node *ni, *np;
650
651         ni = cvm_oct_of_get_child(pip, interface);
652         if (!ni)
653                 return NULL;
654
655         np = cvm_oct_of_get_child(ni, port);
656         of_node_put(ni);
657
658         return np;
659 }
660
661 static void cvm_set_rgmii_delay(struct device_node *np, int iface, int port)
662 {
663         u32 delay_value;
664
665         if (!of_property_read_u32(np, "rx-delay", &delay_value))
666                 cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(port, iface), delay_value);
667         if (!of_property_read_u32(np, "tx-delay", &delay_value))
668                 cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port, iface), delay_value);
669 }
670
671 static int cvm_oct_probe(struct platform_device *pdev)
672 {
673         int num_interfaces;
674         int interface;
675         int fau = FAU_NUM_PACKET_BUFFERS_TO_FREE;
676         int qos;
677         struct device_node *pip;
678         int mtu_overhead = ETH_HLEN + ETH_FCS_LEN;
679
680 #if IS_ENABLED(CONFIG_VLAN_8021Q)
681         mtu_overhead += VLAN_HLEN;
682 #endif
683
684         octeon_mdiobus_force_mod_depencency();
685
686         pip = pdev->dev.of_node;
687         if (!pip) {
688                 pr_err("Error: No 'pip' in /aliases\n");
689                 return -EINVAL;
690         }
691
692         cvm_oct_configure_common_hw();
693
694         cvmx_helper_initialize_packet_io_global();
695
696         if (receive_group_order) {
697                 if (receive_group_order > 4)
698                         receive_group_order = 4;
699                 pow_receive_groups = (1 << (1 << receive_group_order)) - 1;
700         } else {
701                 pow_receive_groups = BIT(pow_receive_group);
702         }
703
704         /* Change the input group for all ports before input is enabled */
705         num_interfaces = cvmx_helper_get_number_of_interfaces();
706         for (interface = 0; interface < num_interfaces; interface++) {
707                 int num_ports = cvmx_helper_ports_on_interface(interface);
708                 int port;
709
710                 for (port = cvmx_helper_get_ipd_port(interface, 0);
711                      port < cvmx_helper_get_ipd_port(interface, num_ports);
712                      port++) {
713                         union cvmx_pip_prt_tagx pip_prt_tagx;
714
715                         pip_prt_tagx.u64 =
716                             cvmx_read_csr(CVMX_PIP_PRT_TAGX(port));
717
718                         if (receive_group_order) {
719                                 int tag_mask;
720
721                                 /* We support only 16 groups at the moment, so
722                                  * always disable the two additional "hidden"
723                                  * tag_mask bits on CN68XX.
724                                  */
725                                 if (OCTEON_IS_MODEL(OCTEON_CN68XX))
726                                         pip_prt_tagx.u64 |= 0x3ull << 44;
727
728                                 tag_mask = ~((1 << receive_group_order) - 1);
729                                 pip_prt_tagx.s.grptagbase       = 0;
730                                 pip_prt_tagx.s.grptagmask       = tag_mask;
731                                 pip_prt_tagx.s.grptag           = 1;
732                                 pip_prt_tagx.s.tag_mode         = 0;
733                                 pip_prt_tagx.s.inc_prt_flag     = 1;
734                                 pip_prt_tagx.s.ip6_dprt_flag    = 1;
735                                 pip_prt_tagx.s.ip4_dprt_flag    = 1;
736                                 pip_prt_tagx.s.ip6_sprt_flag    = 1;
737                                 pip_prt_tagx.s.ip4_sprt_flag    = 1;
738                                 pip_prt_tagx.s.ip6_dst_flag     = 1;
739                                 pip_prt_tagx.s.ip4_dst_flag     = 1;
740                                 pip_prt_tagx.s.ip6_src_flag     = 1;
741                                 pip_prt_tagx.s.ip4_src_flag     = 1;
742                                 pip_prt_tagx.s.grp              = 0;
743                         } else {
744                                 pip_prt_tagx.s.grptag   = 0;
745                                 pip_prt_tagx.s.grp      = pow_receive_group;
746                         }
747
748                         cvmx_write_csr(CVMX_PIP_PRT_TAGX(port),
749                                        pip_prt_tagx.u64);
750                 }
751         }
752
753         cvmx_helper_ipd_and_packet_input_enable();
754
755         memset(cvm_oct_device, 0, sizeof(cvm_oct_device));
756
757         /*
758          * Initialize the FAU used for counting packet buffers that
759          * need to be freed.
760          */
761         cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
762
763         /* Initialize the FAU used for counting tx SKBs that need to be freed */
764         cvmx_fau_atomic_write32(FAU_TOTAL_TX_TO_CLEAN, 0);
765
766         if ((pow_send_group != -1)) {
767                 struct net_device *dev;
768
769                 dev = alloc_etherdev(sizeof(struct octeon_ethernet));
770                 if (dev) {
771                         /* Initialize the device private structure. */
772                         struct octeon_ethernet *priv = netdev_priv(dev);
773
774                         SET_NETDEV_DEV(dev, &pdev->dev);
775                         dev->netdev_ops = &cvm_oct_pow_netdev_ops;
776                         priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED;
777                         priv->port = CVMX_PIP_NUM_INPUT_PORTS;
778                         priv->queue = -1;
779                         strcpy(dev->name, "pow%d");
780                         for (qos = 0; qos < 16; qos++)
781                                 skb_queue_head_init(&priv->tx_free_list[qos]);
782                         dev->min_mtu = VLAN_ETH_ZLEN - mtu_overhead;
783                         dev->max_mtu = OCTEON_MAX_MTU - mtu_overhead;
784
785                         if (register_netdev(dev) < 0) {
786                                 pr_err("Failed to register ethernet device for POW\n");
787                                 free_netdev(dev);
788                         } else {
789                                 cvm_oct_device[CVMX_PIP_NUM_INPUT_PORTS] = dev;
790                                 pr_info("%s: POW send group %d, receive group %d\n",
791                                         dev->name, pow_send_group,
792                                         pow_receive_group);
793                         }
794                 } else {
795                         pr_err("Failed to allocate ethernet device for POW\n");
796                 }
797         }
798
799         num_interfaces = cvmx_helper_get_number_of_interfaces();
800         for (interface = 0; interface < num_interfaces; interface++) {
801                 cvmx_helper_interface_mode_t imode =
802                     cvmx_helper_interface_get_mode(interface);
803                 int num_ports = cvmx_helper_ports_on_interface(interface);
804                 int port;
805                 int port_index;
806
807                 for (port_index = 0,
808                      port = cvmx_helper_get_ipd_port(interface, 0);
809                      port < cvmx_helper_get_ipd_port(interface, num_ports);
810                      port_index++, port++) {
811                         struct octeon_ethernet *priv;
812                         struct net_device *dev =
813                             alloc_etherdev(sizeof(struct octeon_ethernet));
814                         if (!dev) {
815                                 pr_err("Failed to allocate ethernet device for port %d\n",
816                                        port);
817                                 continue;
818                         }
819
820                         /* Initialize the device private structure. */
821                         SET_NETDEV_DEV(dev, &pdev->dev);
822                         priv = netdev_priv(dev);
823                         priv->netdev = dev;
824                         priv->of_node = cvm_oct_node_for_port(pip, interface,
825                                                                 port_index);
826
827                         INIT_DELAYED_WORK(&priv->port_periodic_work,
828                                           cvm_oct_periodic_worker);
829                         priv->imode = imode;
830                         priv->port = port;
831                         priv->queue = cvmx_pko_get_base_queue(priv->port);
832                         priv->fau = fau - cvmx_pko_get_num_queues(port) * 4;
833                         for (qos = 0; qos < 16; qos++)
834                                 skb_queue_head_init(&priv->tx_free_list[qos]);
835                         for (qos = 0; qos < cvmx_pko_get_num_queues(port);
836                              qos++)
837                                 cvmx_fau_atomic_write32(priv->fau + qos * 4, 0);
838                         dev->min_mtu = VLAN_ETH_ZLEN - mtu_overhead;
839                         dev->max_mtu = OCTEON_MAX_MTU - mtu_overhead;
840
841                         switch (priv->imode) {
842                         /* These types don't support ports to IPD/PKO */
843                         case CVMX_HELPER_INTERFACE_MODE_DISABLED:
844                         case CVMX_HELPER_INTERFACE_MODE_PCIE:
845                         case CVMX_HELPER_INTERFACE_MODE_PICMG:
846                                 break;
847
848                         case CVMX_HELPER_INTERFACE_MODE_NPI:
849                                 dev->netdev_ops = &cvm_oct_npi_netdev_ops;
850                                 strcpy(dev->name, "npi%d");
851                                 break;
852
853                         case CVMX_HELPER_INTERFACE_MODE_XAUI:
854                                 dev->netdev_ops = &cvm_oct_xaui_netdev_ops;
855                                 strcpy(dev->name, "xaui%d");
856                                 break;
857
858                         case CVMX_HELPER_INTERFACE_MODE_LOOP:
859                                 dev->netdev_ops = &cvm_oct_npi_netdev_ops;
860                                 strcpy(dev->name, "loop%d");
861                                 break;
862
863                         case CVMX_HELPER_INTERFACE_MODE_SGMII:
864                                 dev->netdev_ops = &cvm_oct_sgmii_netdev_ops;
865                                 strcpy(dev->name, "eth%d");
866                                 break;
867
868                         case CVMX_HELPER_INTERFACE_MODE_SPI:
869                                 dev->netdev_ops = &cvm_oct_spi_netdev_ops;
870                                 strcpy(dev->name, "spi%d");
871                                 break;
872
873                         case CVMX_HELPER_INTERFACE_MODE_RGMII:
874                         case CVMX_HELPER_INTERFACE_MODE_GMII:
875                                 dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
876                                 strcpy(dev->name, "eth%d");
877                                 cvm_set_rgmii_delay(priv->of_node, interface,
878                                                     port_index);
879                                 break;
880                         }
881
882                         if (priv->of_node && of_phy_is_fixed_link(priv->of_node)) {
883                                 if (of_phy_register_fixed_link(priv->of_node)) {
884                                         netdev_err(dev, "Failed to register fixed link for interface %d, port %d\n",
885                                                    interface, priv->port);
886                                         dev->netdev_ops = NULL;
887                                 }
888                         }
889
890                         if (!dev->netdev_ops) {
891                                 free_netdev(dev);
892                         } else if (register_netdev(dev) < 0) {
893                                 pr_err("Failed to register ethernet device for interface %d, port %d\n",
894                                        interface, priv->port);
895                                 free_netdev(dev);
896                         } else {
897                                 cvm_oct_device[priv->port] = dev;
898                                 fau -=
899                                     cvmx_pko_get_num_queues(priv->port) *
900                                     sizeof(u32);
901                                 schedule_delayed_work(&priv->port_periodic_work,
902                                                       HZ);
903                         }
904                 }
905         }
906
907         cvm_oct_tx_initialize();
908         cvm_oct_rx_initialize();
909
910         /*
911          * 150 uS: about 10 1500-byte packets at 1GE.
912          */
913         cvm_oct_tx_poll_interval = 150 * (octeon_get_clock_rate() / 1000000);
914
915         schedule_delayed_work(&cvm_oct_rx_refill_work, HZ);
916
917         return 0;
918 }
919
920 static int cvm_oct_remove(struct platform_device *pdev)
921 {
922         int port;
923
924         cvmx_ipd_disable();
925
926         atomic_inc_return(&cvm_oct_poll_queue_stopping);
927         cancel_delayed_work_sync(&cvm_oct_rx_refill_work);
928
929         cvm_oct_rx_shutdown();
930         cvm_oct_tx_shutdown();
931
932         cvmx_pko_disable();
933
934         /* Free the ethernet devices */
935         for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) {
936                 if (cvm_oct_device[port]) {
937                         struct net_device *dev = cvm_oct_device[port];
938                         struct octeon_ethernet *priv = netdev_priv(dev);
939
940                         cancel_delayed_work_sync(&priv->port_periodic_work);
941
942                         cvm_oct_tx_shutdown_dev(dev);
943                         unregister_netdev(dev);
944                         free_netdev(dev);
945                         cvm_oct_device[port] = NULL;
946                 }
947         }
948
949         cvmx_pko_shutdown();
950
951         cvmx_ipd_free_ptr();
952
953         /* Free the HW pools */
954         cvm_oct_mem_empty_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE,
955                               num_packet_buffers);
956         cvm_oct_mem_empty_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE,
957                               num_packet_buffers);
958         if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL)
959                 cvm_oct_mem_empty_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
960                                       CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 128);
961         return 0;
962 }
963
964 static const struct of_device_id cvm_oct_match[] = {
965         {
966                 .compatible = "cavium,octeon-3860-pip",
967         },
968         {},
969 };
970 MODULE_DEVICE_TABLE(of, cvm_oct_match);
971
972 static struct platform_driver cvm_oct_driver = {
973         .probe          = cvm_oct_probe,
974         .remove         = cvm_oct_remove,
975         .driver         = {
976                 .name   = KBUILD_MODNAME,
977                 .of_match_table = cvm_oct_match,
978         },
979 };
980
981 module_platform_driver(cvm_oct_driver);
982
983 MODULE_LICENSE("GPL");
984 MODULE_AUTHOR("Cavium Networks <support@caviumnetworks.com>");
985 MODULE_DESCRIPTION("Cavium Networks Octeon ethernet driver.");