GNU Linux-libre 5.10.215-gnu1
[releases.git] / drivers / net / ethernet / ti / netcp_ethss.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Keystone GBE and XGBE subsystem code
4  *
5  * Copyright (C) 2014 Texas Instruments Incorporated
6  * Authors:     Sandeep Nair <sandeep_n@ti.com>
7  *              Sandeep Paulraj <s-paulraj@ti.com>
8  *              Cyril Chemparathy <cyril@ti.com>
9  *              Santosh Shilimkar <santosh.shilimkar@ti.com>
10  *              Wingman Kwok <w-kwok2@ti.com>
11  */
12
13 #include <linux/io.h>
14 #include <linux/module.h>
15 #include <linux/of_mdio.h>
16 #include <linux/of_net.h>
17 #include <linux/of_address.h>
18 #include <linux/if_vlan.h>
19 #include <linux/ptp_classify.h>
20 #include <linux/net_tstamp.h>
21 #include <linux/ethtool.h>
22
23 #include "cpsw.h"
24 #include "cpsw_ale.h"
25 #include "netcp.h"
26 #include "cpts.h"
27
28 #define NETCP_DRIVER_NAME               "TI KeyStone Ethernet Driver"
29 #define NETCP_DRIVER_VERSION            "v1.0"
30
31 #define GBE_IDENT(reg)                  ((reg >> 16) & 0xffff)
32 #define GBE_MAJOR_VERSION(reg)          (reg >> 8 & 0x7)
33 #define GBE_MINOR_VERSION(reg)          (reg & 0xff)
34 #define GBE_RTL_VERSION(reg)            ((reg >> 11) & 0x1f)
35
36 /* 1G Ethernet SS defines */
37 #define GBE_MODULE_NAME                 "netcp-gbe"
38 #define GBE_SS_VERSION_14               0x4ed2
39
40 #define GBE_SS_REG_INDEX                0
41 #define GBE_SGMII34_REG_INDEX           1
42 #define GBE_SM_REG_INDEX                2
43 /* offset relative to base of GBE_SS_REG_INDEX */
44 #define GBE13_SGMII_MODULE_OFFSET       0x100
45 /* offset relative to base of GBE_SM_REG_INDEX */
46 #define GBE13_HOST_PORT_OFFSET          0x34
47 #define GBE13_SLAVE_PORT_OFFSET         0x60
48 #define GBE13_EMAC_OFFSET               0x100
49 #define GBE13_SLAVE_PORT2_OFFSET        0x200
50 #define GBE13_HW_STATS_OFFSET           0x300
51 #define GBE13_CPTS_OFFSET               0x500
52 #define GBE13_ALE_OFFSET                0x600
53 #define GBE13_HOST_PORT_NUM             0
54
55 /* 1G Ethernet NU SS defines */
56 #define GBENU_MODULE_NAME               "netcp-gbenu"
57 #define GBE_SS_ID_NU                    0x4ee6
58 #define GBE_SS_ID_2U                    0x4ee8
59
60 #define IS_SS_ID_MU(d) \
61         ((GBE_IDENT((d)->ss_version) == GBE_SS_ID_NU) || \
62          (GBE_IDENT((d)->ss_version) == GBE_SS_ID_2U))
63
64 #define IS_SS_ID_NU(d) \
65         (GBE_IDENT((d)->ss_version) == GBE_SS_ID_NU)
66
67 #define IS_SS_ID_VER_14(d) \
68         (GBE_IDENT((d)->ss_version) == GBE_SS_VERSION_14)
69 #define IS_SS_ID_2U(d) \
70         (GBE_IDENT((d)->ss_version) == GBE_SS_ID_2U)
71
72 #define GBENU_SS_REG_INDEX              0
73 #define GBENU_SM_REG_INDEX              1
74 #define GBENU_SGMII_MODULE_OFFSET       0x100
75 #define GBENU_HOST_PORT_OFFSET          0x1000
76 #define GBENU_SLAVE_PORT_OFFSET         0x2000
77 #define GBENU_EMAC_OFFSET               0x2330
78 #define GBENU_HW_STATS_OFFSET           0x1a000
79 #define GBENU_CPTS_OFFSET               0x1d000
80 #define GBENU_ALE_OFFSET                0x1e000
81 #define GBENU_HOST_PORT_NUM             0
82 #define GBENU_SGMII_MODULE_SIZE         0x100
83
84 /* 10G Ethernet SS defines */
85 #define XGBE_MODULE_NAME                "netcp-xgbe"
86 #define XGBE_SS_VERSION_10              0x4ee4
87
88 #define XGBE_SS_REG_INDEX               0
89 #define XGBE_SM_REG_INDEX               1
90 #define XGBE_SERDES_REG_INDEX           2
91
92 /* offset relative to base of XGBE_SS_REG_INDEX */
93 #define XGBE10_SGMII_MODULE_OFFSET      0x100
94 #define IS_SS_ID_XGBE(d)                ((d)->ss_version == XGBE_SS_VERSION_10)
95 /* offset relative to base of XGBE_SM_REG_INDEX */
96 #define XGBE10_HOST_PORT_OFFSET         0x34
97 #define XGBE10_SLAVE_PORT_OFFSET        0x64
98 #define XGBE10_EMAC_OFFSET              0x400
99 #define XGBE10_CPTS_OFFSET              0x600
100 #define XGBE10_ALE_OFFSET               0x700
101 #define XGBE10_HW_STATS_OFFSET          0x800
102 #define XGBE10_HOST_PORT_NUM            0
103
104 #define GBE_TIMER_INTERVAL                      (HZ / 2)
105
106 /* Soft reset register values */
107 #define SOFT_RESET_MASK                         BIT(0)
108 #define SOFT_RESET                              BIT(0)
109 #define DEVICE_EMACSL_RESET_POLL_COUNT          100
110 #define GMACSL_RET_WARN_RESET_INCOMPLETE        -2
111
112 #define MACSL_RX_ENABLE_CSF                     BIT(23)
113 #define MACSL_ENABLE_EXT_CTL                    BIT(18)
114 #define MACSL_XGMII_ENABLE                      BIT(13)
115 #define MACSL_XGIG_MODE                         BIT(8)
116 #define MACSL_GIG_MODE                          BIT(7)
117 #define MACSL_GMII_ENABLE                       BIT(5)
118 #define MACSL_FULLDUPLEX                        BIT(0)
119
120 #define GBE_CTL_P0_ENABLE                       BIT(2)
121 #define ETH_SW_CTL_P0_TX_CRC_REMOVE             BIT(13)
122 #define GBE13_REG_VAL_STAT_ENABLE_ALL           0xff
123 #define XGBE_REG_VAL_STAT_ENABLE_ALL            0xf
124 #define GBE_STATS_CD_SEL                        BIT(28)
125
126 #define GBE_PORT_MASK(x)                        (BIT(x) - 1)
127 #define GBE_MASK_NO_PORTS                       0
128
129 #define GBE_DEF_1G_MAC_CONTROL                                  \
130                 (MACSL_GIG_MODE | MACSL_GMII_ENABLE |           \
131                  MACSL_ENABLE_EXT_CTL | MACSL_RX_ENABLE_CSF)
132
133 #define GBE_DEF_10G_MAC_CONTROL                         \
134                 (MACSL_XGIG_MODE | MACSL_XGMII_ENABLE |         \
135                  MACSL_ENABLE_EXT_CTL | MACSL_RX_ENABLE_CSF)
136
137 #define GBE_STATSA_MODULE                       0
138 #define GBE_STATSB_MODULE                       1
139 #define GBE_STATSC_MODULE                       2
140 #define GBE_STATSD_MODULE                       3
141
142 #define GBENU_STATS0_MODULE                     0
143 #define GBENU_STATS1_MODULE                     1
144 #define GBENU_STATS2_MODULE                     2
145 #define GBENU_STATS3_MODULE                     3
146 #define GBENU_STATS4_MODULE                     4
147 #define GBENU_STATS5_MODULE                     5
148 #define GBENU_STATS6_MODULE                     6
149 #define GBENU_STATS7_MODULE                     7
150 #define GBENU_STATS8_MODULE                     8
151
152 #define XGBE_STATS0_MODULE                      0
153 #define XGBE_STATS1_MODULE                      1
154 #define XGBE_STATS2_MODULE                      2
155
156 /* s: 0-based slave_port */
157 #define SGMII_BASE(d, s) \
158         (((s) < 2) ? (d)->sgmii_port_regs : (d)->sgmii_port34_regs)
159
160 #define GBE_TX_QUEUE                            648
161 #define GBE_TXHOOK_ORDER                        0
162 #define GBE_RXHOOK_ORDER                        0
163 #define GBE_DEFAULT_ALE_AGEOUT                  30
164 #define SLAVE_LINK_IS_XGMII(s) ((s)->link_interface >= XGMII_LINK_MAC_PHY)
165 #define SLAVE_LINK_IS_RGMII(s) \
166         (((s)->link_interface >= RGMII_LINK_MAC_PHY) && \
167          ((s)->link_interface <= RGMII_LINK_MAC_PHY_NO_MDIO))
168 #define SLAVE_LINK_IS_SGMII(s) \
169         ((s)->link_interface <= SGMII_LINK_MAC_PHY_NO_MDIO)
170 #define NETCP_LINK_STATE_INVALID                -1
171
172 #define GBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
173                 offsetof(struct gbe##_##rb, rn)
174 #define GBENU_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
175                 offsetof(struct gbenu##_##rb, rn)
176 #define XGBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
177                 offsetof(struct xgbe##_##rb, rn)
178 #define GBE_REG_ADDR(p, rb, rn) (p->rb + p->rb##_ofs.rn)
179
180 #define HOST_TX_PRI_MAP_DEFAULT                 0x00000000
181
182 #if IS_ENABLED(CONFIG_TI_CPTS)
183 /* Px_TS_CTL register fields */
184 #define TS_RX_ANX_F_EN                          BIT(0)
185 #define TS_RX_VLAN_LT1_EN                       BIT(1)
186 #define TS_RX_VLAN_LT2_EN                       BIT(2)
187 #define TS_RX_ANX_D_EN                          BIT(3)
188 #define TS_TX_ANX_F_EN                          BIT(4)
189 #define TS_TX_VLAN_LT1_EN                       BIT(5)
190 #define TS_TX_VLAN_LT2_EN                       BIT(6)
191 #define TS_TX_ANX_D_EN                          BIT(7)
192 #define TS_LT2_EN                               BIT(8)
193 #define TS_RX_ANX_E_EN                          BIT(9)
194 #define TS_TX_ANX_E_EN                          BIT(10)
195 #define TS_MSG_TYPE_EN_SHIFT                    16
196 #define TS_MSG_TYPE_EN_MASK                     0xffff
197
198 /* Px_TS_SEQ_LTYPE register fields */
199 #define TS_SEQ_ID_OFS_SHIFT                     16
200 #define TS_SEQ_ID_OFS_MASK                      0x3f
201
202 /* Px_TS_CTL_LTYPE2 register fields */
203 #define TS_107                                  BIT(16)
204 #define TS_129                                  BIT(17)
205 #define TS_130                                  BIT(18)
206 #define TS_131                                  BIT(19)
207 #define TS_132                                  BIT(20)
208 #define TS_319                                  BIT(21)
209 #define TS_320                                  BIT(22)
210 #define TS_TTL_NONZERO                          BIT(23)
211 #define TS_UNI_EN                               BIT(24)
212 #define TS_UNI_EN_SHIFT                         24
213
214 #define TS_TX_ANX_ALL_EN         \
215         (TS_TX_ANX_D_EN | TS_TX_ANX_E_EN | TS_TX_ANX_F_EN)
216
217 #define TS_RX_ANX_ALL_EN         \
218         (TS_RX_ANX_D_EN | TS_RX_ANX_E_EN | TS_RX_ANX_F_EN)
219
220 #define TS_CTL_DST_PORT                         TS_319
221 #define TS_CTL_DST_PORT_SHIFT                   21
222
223 #define TS_CTL_MADDR_ALL        \
224         (TS_107 | TS_129 | TS_130 | TS_131 | TS_132)
225
226 #define TS_CTL_MADDR_SHIFT                      16
227
228 /* The PTP event messages - Sync, Delay_Req, Pdelay_Req, and Pdelay_Resp. */
229 #define EVENT_MSG_BITS (BIT(0) | BIT(1) | BIT(2) | BIT(3))
230 #endif /* CONFIG_TI_CPTS */
231
232 struct xgbe_ss_regs {
233         u32     id_ver;
234         u32     synce_count;
235         u32     synce_mux;
236         u32     control;
237 };
238
239 struct xgbe_switch_regs {
240         u32     id_ver;
241         u32     control;
242         u32     emcontrol;
243         u32     stat_port_en;
244         u32     ptype;
245         u32     soft_idle;
246         u32     thru_rate;
247         u32     gap_thresh;
248         u32     tx_start_wds;
249         u32     flow_control;
250         u32     cppi_thresh;
251 };
252
253 struct xgbe_port_regs {
254         u32     blk_cnt;
255         u32     port_vlan;
256         u32     tx_pri_map;
257         u32     sa_lo;
258         u32     sa_hi;
259         u32     ts_ctl;
260         u32     ts_seq_ltype;
261         u32     ts_vlan;
262         u32     ts_ctl_ltype2;
263         u32     ts_ctl2;
264         u32     control;
265 };
266
267 struct xgbe_host_port_regs {
268         u32     blk_cnt;
269         u32     port_vlan;
270         u32     tx_pri_map;
271         u32     src_id;
272         u32     rx_pri_map;
273         u32     rx_maxlen;
274 };
275
276 struct xgbe_emac_regs {
277         u32     id_ver;
278         u32     mac_control;
279         u32     mac_status;
280         u32     soft_reset;
281         u32     rx_maxlen;
282         u32     __reserved_0;
283         u32     rx_pause;
284         u32     tx_pause;
285         u32     em_control;
286         u32     __reserved_1;
287         u32     tx_gap;
288         u32     rsvd[4];
289 };
290
291 struct xgbe_host_hw_stats {
292         u32     rx_good_frames;
293         u32     rx_broadcast_frames;
294         u32     rx_multicast_frames;
295         u32     __rsvd_0[3];
296         u32     rx_oversized_frames;
297         u32     __rsvd_1;
298         u32     rx_undersized_frames;
299         u32     __rsvd_2;
300         u32     overrun_type4;
301         u32     overrun_type5;
302         u32     rx_bytes;
303         u32     tx_good_frames;
304         u32     tx_broadcast_frames;
305         u32     tx_multicast_frames;
306         u32     __rsvd_3[9];
307         u32     tx_bytes;
308         u32     tx_64byte_frames;
309         u32     tx_65_to_127byte_frames;
310         u32     tx_128_to_255byte_frames;
311         u32     tx_256_to_511byte_frames;
312         u32     tx_512_to_1023byte_frames;
313         u32     tx_1024byte_frames;
314         u32     net_bytes;
315         u32     rx_sof_overruns;
316         u32     rx_mof_overruns;
317         u32     rx_dma_overruns;
318 };
319
320 struct xgbe_hw_stats {
321         u32     rx_good_frames;
322         u32     rx_broadcast_frames;
323         u32     rx_multicast_frames;
324         u32     rx_pause_frames;
325         u32     rx_crc_errors;
326         u32     rx_align_code_errors;
327         u32     rx_oversized_frames;
328         u32     rx_jabber_frames;
329         u32     rx_undersized_frames;
330         u32     rx_fragments;
331         u32     overrun_type4;
332         u32     overrun_type5;
333         u32     rx_bytes;
334         u32     tx_good_frames;
335         u32     tx_broadcast_frames;
336         u32     tx_multicast_frames;
337         u32     tx_pause_frames;
338         u32     tx_deferred_frames;
339         u32     tx_collision_frames;
340         u32     tx_single_coll_frames;
341         u32     tx_mult_coll_frames;
342         u32     tx_excessive_collisions;
343         u32     tx_late_collisions;
344         u32     tx_underrun;
345         u32     tx_carrier_sense_errors;
346         u32     tx_bytes;
347         u32     tx_64byte_frames;
348         u32     tx_65_to_127byte_frames;
349         u32     tx_128_to_255byte_frames;
350         u32     tx_256_to_511byte_frames;
351         u32     tx_512_to_1023byte_frames;
352         u32     tx_1024byte_frames;
353         u32     net_bytes;
354         u32     rx_sof_overruns;
355         u32     rx_mof_overruns;
356         u32     rx_dma_overruns;
357 };
358
359 struct gbenu_ss_regs {
360         u32     id_ver;
361         u32     synce_count;            /* NU */
362         u32     synce_mux;              /* NU */
363         u32     control;                /* 2U */
364         u32     __rsvd_0[2];            /* 2U */
365         u32     rgmii_status;           /* 2U */
366         u32     ss_status;              /* 2U */
367 };
368
369 struct gbenu_switch_regs {
370         u32     id_ver;
371         u32     control;
372         u32     __rsvd_0[2];
373         u32     emcontrol;
374         u32     stat_port_en;
375         u32     ptype;                  /* NU */
376         u32     soft_idle;
377         u32     thru_rate;              /* NU */
378         u32     gap_thresh;             /* NU */
379         u32     tx_start_wds;           /* NU */
380         u32     eee_prescale;           /* 2U */
381         u32     tx_g_oflow_thresh_set;  /* NU */
382         u32     tx_g_oflow_thresh_clr;  /* NU */
383         u32     tx_g_buf_thresh_set_l;  /* NU */
384         u32     tx_g_buf_thresh_set_h;  /* NU */
385         u32     tx_g_buf_thresh_clr_l;  /* NU */
386         u32     tx_g_buf_thresh_clr_h;  /* NU */
387 };
388
389 struct gbenu_port_regs {
390         u32     __rsvd_0;
391         u32     control;
392         u32     max_blks;               /* 2U */
393         u32     mem_align1;
394         u32     blk_cnt;
395         u32     port_vlan;
396         u32     tx_pri_map;             /* NU */
397         u32     pri_ctl;                /* 2U */
398         u32     rx_pri_map;
399         u32     rx_maxlen;
400         u32     tx_blks_pri;            /* NU */
401         u32     __rsvd_1;
402         u32     idle2lpi;               /* 2U */
403         u32     lpi2idle;               /* 2U */
404         u32     eee_status;             /* 2U */
405         u32     __rsvd_2;
406         u32     __rsvd_3[176];          /* NU: more to add */
407         u32     __rsvd_4[2];
408         u32     sa_lo;
409         u32     sa_hi;
410         u32     ts_ctl;
411         u32     ts_seq_ltype;
412         u32     ts_vlan;
413         u32     ts_ctl_ltype2;
414         u32     ts_ctl2;
415 };
416
417 struct gbenu_host_port_regs {
418         u32     __rsvd_0;
419         u32     control;
420         u32     flow_id_offset;         /* 2U */
421         u32     __rsvd_1;
422         u32     blk_cnt;
423         u32     port_vlan;
424         u32     tx_pri_map;             /* NU */
425         u32     pri_ctl;
426         u32     rx_pri_map;
427         u32     rx_maxlen;
428         u32     tx_blks_pri;            /* NU */
429         u32     __rsvd_2;
430         u32     idle2lpi;               /* 2U */
431         u32     lpi2wake;               /* 2U */
432         u32     eee_status;             /* 2U */
433         u32     __rsvd_3;
434         u32     __rsvd_4[184];          /* NU */
435         u32     host_blks_pri;          /* NU */
436 };
437
438 struct gbenu_emac_regs {
439         u32     mac_control;
440         u32     mac_status;
441         u32     soft_reset;
442         u32     boff_test;
443         u32     rx_pause;
444         u32     __rsvd_0[11];           /* NU */
445         u32     tx_pause;
446         u32     __rsvd_1[11];           /* NU */
447         u32     em_control;
448         u32     tx_gap;
449 };
450
451 /* Some hw stat regs are applicable to slave port only.
452  * This is handled by gbenu_et_stats struct.  Also some
453  * are for SS version NU and some are for 2U.
454  */
455 struct gbenu_hw_stats {
456         u32     rx_good_frames;
457         u32     rx_broadcast_frames;
458         u32     rx_multicast_frames;
459         u32     rx_pause_frames;                /* slave */
460         u32     rx_crc_errors;
461         u32     rx_align_code_errors;           /* slave */
462         u32     rx_oversized_frames;
463         u32     rx_jabber_frames;               /* slave */
464         u32     rx_undersized_frames;
465         u32     rx_fragments;                   /* slave */
466         u32     ale_drop;
467         u32     ale_overrun_drop;
468         u32     rx_bytes;
469         u32     tx_good_frames;
470         u32     tx_broadcast_frames;
471         u32     tx_multicast_frames;
472         u32     tx_pause_frames;                /* slave */
473         u32     tx_deferred_frames;             /* slave */
474         u32     tx_collision_frames;            /* slave */
475         u32     tx_single_coll_frames;          /* slave */
476         u32     tx_mult_coll_frames;            /* slave */
477         u32     tx_excessive_collisions;        /* slave */
478         u32     tx_late_collisions;             /* slave */
479         u32     rx_ipg_error;                   /* slave 10G only */
480         u32     tx_carrier_sense_errors;        /* slave */
481         u32     tx_bytes;
482         u32     tx_64B_frames;
483         u32     tx_65_to_127B_frames;
484         u32     tx_128_to_255B_frames;
485         u32     tx_256_to_511B_frames;
486         u32     tx_512_to_1023B_frames;
487         u32     tx_1024B_frames;
488         u32     net_bytes;
489         u32     rx_bottom_fifo_drop;
490         u32     rx_port_mask_drop;
491         u32     rx_top_fifo_drop;
492         u32     ale_rate_limit_drop;
493         u32     ale_vid_ingress_drop;
494         u32     ale_da_eq_sa_drop;
495         u32     __rsvd_0[3];
496         u32     ale_unknown_ucast;
497         u32     ale_unknown_ucast_bytes;
498         u32     ale_unknown_mcast;
499         u32     ale_unknown_mcast_bytes;
500         u32     ale_unknown_bcast;
501         u32     ale_unknown_bcast_bytes;
502         u32     ale_pol_match;
503         u32     ale_pol_match_red;              /* NU */
504         u32     ale_pol_match_yellow;           /* NU */
505         u32     __rsvd_1[44];
506         u32     tx_mem_protect_err;
507         /* following NU only */
508         u32     tx_pri0;
509         u32     tx_pri1;
510         u32     tx_pri2;
511         u32     tx_pri3;
512         u32     tx_pri4;
513         u32     tx_pri5;
514         u32     tx_pri6;
515         u32     tx_pri7;
516         u32     tx_pri0_bcnt;
517         u32     tx_pri1_bcnt;
518         u32     tx_pri2_bcnt;
519         u32     tx_pri3_bcnt;
520         u32     tx_pri4_bcnt;
521         u32     tx_pri5_bcnt;
522         u32     tx_pri6_bcnt;
523         u32     tx_pri7_bcnt;
524         u32     tx_pri0_drop;
525         u32     tx_pri1_drop;
526         u32     tx_pri2_drop;
527         u32     tx_pri3_drop;
528         u32     tx_pri4_drop;
529         u32     tx_pri5_drop;
530         u32     tx_pri6_drop;
531         u32     tx_pri7_drop;
532         u32     tx_pri0_drop_bcnt;
533         u32     tx_pri1_drop_bcnt;
534         u32     tx_pri2_drop_bcnt;
535         u32     tx_pri3_drop_bcnt;
536         u32     tx_pri4_drop_bcnt;
537         u32     tx_pri5_drop_bcnt;
538         u32     tx_pri6_drop_bcnt;
539         u32     tx_pri7_drop_bcnt;
540 };
541
542 #define GBENU_HW_STATS_REG_MAP_SZ       0x200
543
544 struct gbe_ss_regs {
545         u32     id_ver;
546         u32     synce_count;
547         u32     synce_mux;
548 };
549
550 struct gbe_ss_regs_ofs {
551         u16     id_ver;
552         u16     control;
553         u16     rgmii_status; /* 2U */
554 };
555
556 struct gbe_switch_regs {
557         u32     id_ver;
558         u32     control;
559         u32     soft_reset;
560         u32     stat_port_en;
561         u32     ptype;
562         u32     soft_idle;
563         u32     thru_rate;
564         u32     gap_thresh;
565         u32     tx_start_wds;
566         u32     flow_control;
567 };
568
569 struct gbe_switch_regs_ofs {
570         u16     id_ver;
571         u16     control;
572         u16     soft_reset;
573         u16     emcontrol;
574         u16     stat_port_en;
575         u16     ptype;
576         u16     flow_control;
577 };
578
579 struct gbe_port_regs {
580         u32     max_blks;
581         u32     blk_cnt;
582         u32     port_vlan;
583         u32     tx_pri_map;
584         u32     sa_lo;
585         u32     sa_hi;
586         u32     ts_ctl;
587         u32     ts_seq_ltype;
588         u32     ts_vlan;
589         u32     ts_ctl_ltype2;
590         u32     ts_ctl2;
591 };
592
593 struct gbe_port_regs_ofs {
594         u16     port_vlan;
595         u16     tx_pri_map;
596         u16     rx_pri_map;
597         u16     sa_lo;
598         u16     sa_hi;
599         u16     ts_ctl;
600         u16     ts_seq_ltype;
601         u16     ts_vlan;
602         u16     ts_ctl_ltype2;
603         u16     ts_ctl2;
604         u16     rx_maxlen;      /* 2U, NU */
605 };
606
607 struct gbe_host_port_regs {
608         u32     src_id;
609         u32     port_vlan;
610         u32     rx_pri_map;
611         u32     rx_maxlen;
612 };
613
614 struct gbe_host_port_regs_ofs {
615         u16     port_vlan;
616         u16     tx_pri_map;
617         u16     rx_maxlen;
618 };
619
620 struct gbe_emac_regs {
621         u32     id_ver;
622         u32     mac_control;
623         u32     mac_status;
624         u32     soft_reset;
625         u32     rx_maxlen;
626         u32     __reserved_0;
627         u32     rx_pause;
628         u32     tx_pause;
629         u32     __reserved_1;
630         u32     rx_pri_map;
631         u32     rsvd[6];
632 };
633
634 struct gbe_emac_regs_ofs {
635         u16     mac_control;
636         u16     soft_reset;
637         u16     rx_maxlen;
638 };
639
640 struct gbe_hw_stats {
641         u32     rx_good_frames;
642         u32     rx_broadcast_frames;
643         u32     rx_multicast_frames;
644         u32     rx_pause_frames;
645         u32     rx_crc_errors;
646         u32     rx_align_code_errors;
647         u32     rx_oversized_frames;
648         u32     rx_jabber_frames;
649         u32     rx_undersized_frames;
650         u32     rx_fragments;
651         u32     __pad_0[2];
652         u32     rx_bytes;
653         u32     tx_good_frames;
654         u32     tx_broadcast_frames;
655         u32     tx_multicast_frames;
656         u32     tx_pause_frames;
657         u32     tx_deferred_frames;
658         u32     tx_collision_frames;
659         u32     tx_single_coll_frames;
660         u32     tx_mult_coll_frames;
661         u32     tx_excessive_collisions;
662         u32     tx_late_collisions;
663         u32     tx_underrun;
664         u32     tx_carrier_sense_errors;
665         u32     tx_bytes;
666         u32     tx_64byte_frames;
667         u32     tx_65_to_127byte_frames;
668         u32     tx_128_to_255byte_frames;
669         u32     tx_256_to_511byte_frames;
670         u32     tx_512_to_1023byte_frames;
671         u32     tx_1024byte_frames;
672         u32     net_bytes;
673         u32     rx_sof_overruns;
674         u32     rx_mof_overruns;
675         u32     rx_dma_overruns;
676 };
677
678 #define GBE_MAX_HW_STAT_MODS                    9
679 #define GBE_HW_STATS_REG_MAP_SZ                 0x100
680
681 struct ts_ctl {
682         int     uni;
683         u8      dst_port_map;
684         u8      maddr_map;
685         u8      ts_mcast_type;
686 };
687
688 struct gbe_slave {
689         void __iomem                    *port_regs;
690         void __iomem                    *emac_regs;
691         struct gbe_port_regs_ofs        port_regs_ofs;
692         struct gbe_emac_regs_ofs        emac_regs_ofs;
693         int                             slave_num; /* 0 based logical number */
694         int                             port_num;  /* actual port number */
695         atomic_t                        link_state;
696         bool                            open;
697         struct phy_device               *phy;
698         u32                             link_interface;
699         u32                             mac_control;
700         u8                              phy_port_t;
701         struct device_node              *node;
702         struct device_node              *phy_node;
703         struct ts_ctl                   ts_ctl;
704         struct list_head                slave_list;
705 };
706
707 struct gbe_priv {
708         struct device                   *dev;
709         struct netcp_device             *netcp_device;
710         struct timer_list               timer;
711         u32                             num_slaves;
712         u32                             ale_ports;
713         bool                            enable_ale;
714         u8                              max_num_slaves;
715         u8                              max_num_ports; /* max_num_slaves + 1 */
716         u8                              num_stats_mods;
717         struct netcp_tx_pipe            tx_pipe;
718
719         int                             host_port;
720         u32                             rx_packet_max;
721         u32                             ss_version;
722         u32                             stats_en_mask;
723
724         void __iomem                    *ss_regs;
725         void __iomem                    *switch_regs;
726         void __iomem                    *host_port_regs;
727         void __iomem                    *ale_reg;
728         void __iomem                    *cpts_reg;
729         void __iomem                    *sgmii_port_regs;
730         void __iomem                    *sgmii_port34_regs;
731         void __iomem                    *xgbe_serdes_regs;
732         void __iomem                    *hw_stats_regs[GBE_MAX_HW_STAT_MODS];
733
734         struct gbe_ss_regs_ofs          ss_regs_ofs;
735         struct gbe_switch_regs_ofs      switch_regs_ofs;
736         struct gbe_host_port_regs_ofs   host_port_regs_ofs;
737
738         struct cpsw_ale                 *ale;
739         unsigned int                    tx_queue_id;
740         const char                      *dma_chan_name;
741
742         struct list_head                gbe_intf_head;
743         struct list_head                secondary_slaves;
744         struct net_device               *dummy_ndev;
745
746         u64                             *hw_stats;
747         u32                             *hw_stats_prev;
748         const struct netcp_ethtool_stat *et_stats;
749         int                             num_et_stats;
750         /*  Lock for updating the hwstats */
751         spinlock_t                      hw_stats_lock;
752
753         int                             cpts_registered;
754         struct cpts                     *cpts;
755         int                             rx_ts_enabled;
756         int                             tx_ts_enabled;
757 };
758
759 struct gbe_intf {
760         struct net_device       *ndev;
761         struct device           *dev;
762         struct gbe_priv         *gbe_dev;
763         struct netcp_tx_pipe    tx_pipe;
764         struct gbe_slave        *slave;
765         struct list_head        gbe_intf_list;
766         unsigned long           active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
767 };
768
769 static struct netcp_module gbe_module;
770 static struct netcp_module xgbe_module;
771
772 /* Statistic management */
773 struct netcp_ethtool_stat {
774         char desc[ETH_GSTRING_LEN];
775         int type;
776         u32 size;
777         int offset;
778 };
779
780 #define GBE_STATSA_INFO(field)                                          \
781 {                                                                       \
782         "GBE_A:"#field, GBE_STATSA_MODULE,                              \
783         sizeof_field(struct gbe_hw_stats, field),                       \
784         offsetof(struct gbe_hw_stats, field)                            \
785 }
786
787 #define GBE_STATSB_INFO(field)                                          \
788 {                                                                       \
789         "GBE_B:"#field, GBE_STATSB_MODULE,                              \
790         sizeof_field(struct gbe_hw_stats, field),                       \
791         offsetof(struct gbe_hw_stats, field)                            \
792 }
793
794 #define GBE_STATSC_INFO(field)                                          \
795 {                                                                       \
796         "GBE_C:"#field, GBE_STATSC_MODULE,                              \
797         sizeof_field(struct gbe_hw_stats, field),                       \
798         offsetof(struct gbe_hw_stats, field)                            \
799 }
800
801 #define GBE_STATSD_INFO(field)                                          \
802 {                                                                       \
803         "GBE_D:"#field, GBE_STATSD_MODULE,                              \
804         sizeof_field(struct gbe_hw_stats, field),                       \
805         offsetof(struct gbe_hw_stats, field)                            \
806 }
807
808 static const struct netcp_ethtool_stat gbe13_et_stats[] = {
809         /* GBE module A */
810         GBE_STATSA_INFO(rx_good_frames),
811         GBE_STATSA_INFO(rx_broadcast_frames),
812         GBE_STATSA_INFO(rx_multicast_frames),
813         GBE_STATSA_INFO(rx_pause_frames),
814         GBE_STATSA_INFO(rx_crc_errors),
815         GBE_STATSA_INFO(rx_align_code_errors),
816         GBE_STATSA_INFO(rx_oversized_frames),
817         GBE_STATSA_INFO(rx_jabber_frames),
818         GBE_STATSA_INFO(rx_undersized_frames),
819         GBE_STATSA_INFO(rx_fragments),
820         GBE_STATSA_INFO(rx_bytes),
821         GBE_STATSA_INFO(tx_good_frames),
822         GBE_STATSA_INFO(tx_broadcast_frames),
823         GBE_STATSA_INFO(tx_multicast_frames),
824         GBE_STATSA_INFO(tx_pause_frames),
825         GBE_STATSA_INFO(tx_deferred_frames),
826         GBE_STATSA_INFO(tx_collision_frames),
827         GBE_STATSA_INFO(tx_single_coll_frames),
828         GBE_STATSA_INFO(tx_mult_coll_frames),
829         GBE_STATSA_INFO(tx_excessive_collisions),
830         GBE_STATSA_INFO(tx_late_collisions),
831         GBE_STATSA_INFO(tx_underrun),
832         GBE_STATSA_INFO(tx_carrier_sense_errors),
833         GBE_STATSA_INFO(tx_bytes),
834         GBE_STATSA_INFO(tx_64byte_frames),
835         GBE_STATSA_INFO(tx_65_to_127byte_frames),
836         GBE_STATSA_INFO(tx_128_to_255byte_frames),
837         GBE_STATSA_INFO(tx_256_to_511byte_frames),
838         GBE_STATSA_INFO(tx_512_to_1023byte_frames),
839         GBE_STATSA_INFO(tx_1024byte_frames),
840         GBE_STATSA_INFO(net_bytes),
841         GBE_STATSA_INFO(rx_sof_overruns),
842         GBE_STATSA_INFO(rx_mof_overruns),
843         GBE_STATSA_INFO(rx_dma_overruns),
844         /* GBE module B */
845         GBE_STATSB_INFO(rx_good_frames),
846         GBE_STATSB_INFO(rx_broadcast_frames),
847         GBE_STATSB_INFO(rx_multicast_frames),
848         GBE_STATSB_INFO(rx_pause_frames),
849         GBE_STATSB_INFO(rx_crc_errors),
850         GBE_STATSB_INFO(rx_align_code_errors),
851         GBE_STATSB_INFO(rx_oversized_frames),
852         GBE_STATSB_INFO(rx_jabber_frames),
853         GBE_STATSB_INFO(rx_undersized_frames),
854         GBE_STATSB_INFO(rx_fragments),
855         GBE_STATSB_INFO(rx_bytes),
856         GBE_STATSB_INFO(tx_good_frames),
857         GBE_STATSB_INFO(tx_broadcast_frames),
858         GBE_STATSB_INFO(tx_multicast_frames),
859         GBE_STATSB_INFO(tx_pause_frames),
860         GBE_STATSB_INFO(tx_deferred_frames),
861         GBE_STATSB_INFO(tx_collision_frames),
862         GBE_STATSB_INFO(tx_single_coll_frames),
863         GBE_STATSB_INFO(tx_mult_coll_frames),
864         GBE_STATSB_INFO(tx_excessive_collisions),
865         GBE_STATSB_INFO(tx_late_collisions),
866         GBE_STATSB_INFO(tx_underrun),
867         GBE_STATSB_INFO(tx_carrier_sense_errors),
868         GBE_STATSB_INFO(tx_bytes),
869         GBE_STATSB_INFO(tx_64byte_frames),
870         GBE_STATSB_INFO(tx_65_to_127byte_frames),
871         GBE_STATSB_INFO(tx_128_to_255byte_frames),
872         GBE_STATSB_INFO(tx_256_to_511byte_frames),
873         GBE_STATSB_INFO(tx_512_to_1023byte_frames),
874         GBE_STATSB_INFO(tx_1024byte_frames),
875         GBE_STATSB_INFO(net_bytes),
876         GBE_STATSB_INFO(rx_sof_overruns),
877         GBE_STATSB_INFO(rx_mof_overruns),
878         GBE_STATSB_INFO(rx_dma_overruns),
879         /* GBE module C */
880         GBE_STATSC_INFO(rx_good_frames),
881         GBE_STATSC_INFO(rx_broadcast_frames),
882         GBE_STATSC_INFO(rx_multicast_frames),
883         GBE_STATSC_INFO(rx_pause_frames),
884         GBE_STATSC_INFO(rx_crc_errors),
885         GBE_STATSC_INFO(rx_align_code_errors),
886         GBE_STATSC_INFO(rx_oversized_frames),
887         GBE_STATSC_INFO(rx_jabber_frames),
888         GBE_STATSC_INFO(rx_undersized_frames),
889         GBE_STATSC_INFO(rx_fragments),
890         GBE_STATSC_INFO(rx_bytes),
891         GBE_STATSC_INFO(tx_good_frames),
892         GBE_STATSC_INFO(tx_broadcast_frames),
893         GBE_STATSC_INFO(tx_multicast_frames),
894         GBE_STATSC_INFO(tx_pause_frames),
895         GBE_STATSC_INFO(tx_deferred_frames),
896         GBE_STATSC_INFO(tx_collision_frames),
897         GBE_STATSC_INFO(tx_single_coll_frames),
898         GBE_STATSC_INFO(tx_mult_coll_frames),
899         GBE_STATSC_INFO(tx_excessive_collisions),
900         GBE_STATSC_INFO(tx_late_collisions),
901         GBE_STATSC_INFO(tx_underrun),
902         GBE_STATSC_INFO(tx_carrier_sense_errors),
903         GBE_STATSC_INFO(tx_bytes),
904         GBE_STATSC_INFO(tx_64byte_frames),
905         GBE_STATSC_INFO(tx_65_to_127byte_frames),
906         GBE_STATSC_INFO(tx_128_to_255byte_frames),
907         GBE_STATSC_INFO(tx_256_to_511byte_frames),
908         GBE_STATSC_INFO(tx_512_to_1023byte_frames),
909         GBE_STATSC_INFO(tx_1024byte_frames),
910         GBE_STATSC_INFO(net_bytes),
911         GBE_STATSC_INFO(rx_sof_overruns),
912         GBE_STATSC_INFO(rx_mof_overruns),
913         GBE_STATSC_INFO(rx_dma_overruns),
914         /* GBE module D */
915         GBE_STATSD_INFO(rx_good_frames),
916         GBE_STATSD_INFO(rx_broadcast_frames),
917         GBE_STATSD_INFO(rx_multicast_frames),
918         GBE_STATSD_INFO(rx_pause_frames),
919         GBE_STATSD_INFO(rx_crc_errors),
920         GBE_STATSD_INFO(rx_align_code_errors),
921         GBE_STATSD_INFO(rx_oversized_frames),
922         GBE_STATSD_INFO(rx_jabber_frames),
923         GBE_STATSD_INFO(rx_undersized_frames),
924         GBE_STATSD_INFO(rx_fragments),
925         GBE_STATSD_INFO(rx_bytes),
926         GBE_STATSD_INFO(tx_good_frames),
927         GBE_STATSD_INFO(tx_broadcast_frames),
928         GBE_STATSD_INFO(tx_multicast_frames),
929         GBE_STATSD_INFO(tx_pause_frames),
930         GBE_STATSD_INFO(tx_deferred_frames),
931         GBE_STATSD_INFO(tx_collision_frames),
932         GBE_STATSD_INFO(tx_single_coll_frames),
933         GBE_STATSD_INFO(tx_mult_coll_frames),
934         GBE_STATSD_INFO(tx_excessive_collisions),
935         GBE_STATSD_INFO(tx_late_collisions),
936         GBE_STATSD_INFO(tx_underrun),
937         GBE_STATSD_INFO(tx_carrier_sense_errors),
938         GBE_STATSD_INFO(tx_bytes),
939         GBE_STATSD_INFO(tx_64byte_frames),
940         GBE_STATSD_INFO(tx_65_to_127byte_frames),
941         GBE_STATSD_INFO(tx_128_to_255byte_frames),
942         GBE_STATSD_INFO(tx_256_to_511byte_frames),
943         GBE_STATSD_INFO(tx_512_to_1023byte_frames),
944         GBE_STATSD_INFO(tx_1024byte_frames),
945         GBE_STATSD_INFO(net_bytes),
946         GBE_STATSD_INFO(rx_sof_overruns),
947         GBE_STATSD_INFO(rx_mof_overruns),
948         GBE_STATSD_INFO(rx_dma_overruns),
949 };
950
951 /* This is the size of entries in GBENU_STATS_HOST */
952 #define GBENU_ET_STATS_HOST_SIZE        52
953
954 #define GBENU_STATS_HOST(field)                                 \
955 {                                                               \
956         "GBE_HOST:"#field, GBENU_STATS0_MODULE,                 \
957         sizeof_field(struct gbenu_hw_stats, field),             \
958         offsetof(struct gbenu_hw_stats, field)                  \
959 }
960
961 /* This is the size of entries in GBENU_STATS_PORT */
962 #define GBENU_ET_STATS_PORT_SIZE        65
963
964 #define GBENU_STATS_P1(field)                                   \
965 {                                                               \
966         "GBE_P1:"#field, GBENU_STATS1_MODULE,                   \
967         sizeof_field(struct gbenu_hw_stats, field),             \
968         offsetof(struct gbenu_hw_stats, field)                  \
969 }
970
971 #define GBENU_STATS_P2(field)                                   \
972 {                                                               \
973         "GBE_P2:"#field, GBENU_STATS2_MODULE,                   \
974         sizeof_field(struct gbenu_hw_stats, field),             \
975         offsetof(struct gbenu_hw_stats, field)                  \
976 }
977
978 #define GBENU_STATS_P3(field)                                   \
979 {                                                               \
980         "GBE_P3:"#field, GBENU_STATS3_MODULE,                   \
981         sizeof_field(struct gbenu_hw_stats, field),             \
982         offsetof(struct gbenu_hw_stats, field)                  \
983 }
984
985 #define GBENU_STATS_P4(field)                                   \
986 {                                                               \
987         "GBE_P4:"#field, GBENU_STATS4_MODULE,                   \
988         sizeof_field(struct gbenu_hw_stats, field),             \
989         offsetof(struct gbenu_hw_stats, field)                  \
990 }
991
992 #define GBENU_STATS_P5(field)                                   \
993 {                                                               \
994         "GBE_P5:"#field, GBENU_STATS5_MODULE,                   \
995         sizeof_field(struct gbenu_hw_stats, field),             \
996         offsetof(struct gbenu_hw_stats, field)                  \
997 }
998
999 #define GBENU_STATS_P6(field)                                   \
1000 {                                                               \
1001         "GBE_P6:"#field, GBENU_STATS6_MODULE,                   \
1002         sizeof_field(struct gbenu_hw_stats, field),             \
1003         offsetof(struct gbenu_hw_stats, field)                  \
1004 }
1005
1006 #define GBENU_STATS_P7(field)                                   \
1007 {                                                               \
1008         "GBE_P7:"#field, GBENU_STATS7_MODULE,                   \
1009         sizeof_field(struct gbenu_hw_stats, field),             \
1010         offsetof(struct gbenu_hw_stats, field)                  \
1011 }
1012
1013 #define GBENU_STATS_P8(field)                                   \
1014 {                                                               \
1015         "GBE_P8:"#field, GBENU_STATS8_MODULE,                   \
1016         sizeof_field(struct gbenu_hw_stats, field),             \
1017         offsetof(struct gbenu_hw_stats, field)                  \
1018 }
1019
1020 static const struct netcp_ethtool_stat gbenu_et_stats[] = {
1021         /* GBENU Host Module */
1022         GBENU_STATS_HOST(rx_good_frames),
1023         GBENU_STATS_HOST(rx_broadcast_frames),
1024         GBENU_STATS_HOST(rx_multicast_frames),
1025         GBENU_STATS_HOST(rx_crc_errors),
1026         GBENU_STATS_HOST(rx_oversized_frames),
1027         GBENU_STATS_HOST(rx_undersized_frames),
1028         GBENU_STATS_HOST(ale_drop),
1029         GBENU_STATS_HOST(ale_overrun_drop),
1030         GBENU_STATS_HOST(rx_bytes),
1031         GBENU_STATS_HOST(tx_good_frames),
1032         GBENU_STATS_HOST(tx_broadcast_frames),
1033         GBENU_STATS_HOST(tx_multicast_frames),
1034         GBENU_STATS_HOST(tx_bytes),
1035         GBENU_STATS_HOST(tx_64B_frames),
1036         GBENU_STATS_HOST(tx_65_to_127B_frames),
1037         GBENU_STATS_HOST(tx_128_to_255B_frames),
1038         GBENU_STATS_HOST(tx_256_to_511B_frames),
1039         GBENU_STATS_HOST(tx_512_to_1023B_frames),
1040         GBENU_STATS_HOST(tx_1024B_frames),
1041         GBENU_STATS_HOST(net_bytes),
1042         GBENU_STATS_HOST(rx_bottom_fifo_drop),
1043         GBENU_STATS_HOST(rx_port_mask_drop),
1044         GBENU_STATS_HOST(rx_top_fifo_drop),
1045         GBENU_STATS_HOST(ale_rate_limit_drop),
1046         GBENU_STATS_HOST(ale_vid_ingress_drop),
1047         GBENU_STATS_HOST(ale_da_eq_sa_drop),
1048         GBENU_STATS_HOST(ale_unknown_ucast),
1049         GBENU_STATS_HOST(ale_unknown_ucast_bytes),
1050         GBENU_STATS_HOST(ale_unknown_mcast),
1051         GBENU_STATS_HOST(ale_unknown_mcast_bytes),
1052         GBENU_STATS_HOST(ale_unknown_bcast),
1053         GBENU_STATS_HOST(ale_unknown_bcast_bytes),
1054         GBENU_STATS_HOST(ale_pol_match),
1055         GBENU_STATS_HOST(ale_pol_match_red),
1056         GBENU_STATS_HOST(ale_pol_match_yellow),
1057         GBENU_STATS_HOST(tx_mem_protect_err),
1058         GBENU_STATS_HOST(tx_pri0_drop),
1059         GBENU_STATS_HOST(tx_pri1_drop),
1060         GBENU_STATS_HOST(tx_pri2_drop),
1061         GBENU_STATS_HOST(tx_pri3_drop),
1062         GBENU_STATS_HOST(tx_pri4_drop),
1063         GBENU_STATS_HOST(tx_pri5_drop),
1064         GBENU_STATS_HOST(tx_pri6_drop),
1065         GBENU_STATS_HOST(tx_pri7_drop),
1066         GBENU_STATS_HOST(tx_pri0_drop_bcnt),
1067         GBENU_STATS_HOST(tx_pri1_drop_bcnt),
1068         GBENU_STATS_HOST(tx_pri2_drop_bcnt),
1069         GBENU_STATS_HOST(tx_pri3_drop_bcnt),
1070         GBENU_STATS_HOST(tx_pri4_drop_bcnt),
1071         GBENU_STATS_HOST(tx_pri5_drop_bcnt),
1072         GBENU_STATS_HOST(tx_pri6_drop_bcnt),
1073         GBENU_STATS_HOST(tx_pri7_drop_bcnt),
1074         /* GBENU Module 1 */
1075         GBENU_STATS_P1(rx_good_frames),
1076         GBENU_STATS_P1(rx_broadcast_frames),
1077         GBENU_STATS_P1(rx_multicast_frames),
1078         GBENU_STATS_P1(rx_pause_frames),
1079         GBENU_STATS_P1(rx_crc_errors),
1080         GBENU_STATS_P1(rx_align_code_errors),
1081         GBENU_STATS_P1(rx_oversized_frames),
1082         GBENU_STATS_P1(rx_jabber_frames),
1083         GBENU_STATS_P1(rx_undersized_frames),
1084         GBENU_STATS_P1(rx_fragments),
1085         GBENU_STATS_P1(ale_drop),
1086         GBENU_STATS_P1(ale_overrun_drop),
1087         GBENU_STATS_P1(rx_bytes),
1088         GBENU_STATS_P1(tx_good_frames),
1089         GBENU_STATS_P1(tx_broadcast_frames),
1090         GBENU_STATS_P1(tx_multicast_frames),
1091         GBENU_STATS_P1(tx_pause_frames),
1092         GBENU_STATS_P1(tx_deferred_frames),
1093         GBENU_STATS_P1(tx_collision_frames),
1094         GBENU_STATS_P1(tx_single_coll_frames),
1095         GBENU_STATS_P1(tx_mult_coll_frames),
1096         GBENU_STATS_P1(tx_excessive_collisions),
1097         GBENU_STATS_P1(tx_late_collisions),
1098         GBENU_STATS_P1(rx_ipg_error),
1099         GBENU_STATS_P1(tx_carrier_sense_errors),
1100         GBENU_STATS_P1(tx_bytes),
1101         GBENU_STATS_P1(tx_64B_frames),
1102         GBENU_STATS_P1(tx_65_to_127B_frames),
1103         GBENU_STATS_P1(tx_128_to_255B_frames),
1104         GBENU_STATS_P1(tx_256_to_511B_frames),
1105         GBENU_STATS_P1(tx_512_to_1023B_frames),
1106         GBENU_STATS_P1(tx_1024B_frames),
1107         GBENU_STATS_P1(net_bytes),
1108         GBENU_STATS_P1(rx_bottom_fifo_drop),
1109         GBENU_STATS_P1(rx_port_mask_drop),
1110         GBENU_STATS_P1(rx_top_fifo_drop),
1111         GBENU_STATS_P1(ale_rate_limit_drop),
1112         GBENU_STATS_P1(ale_vid_ingress_drop),
1113         GBENU_STATS_P1(ale_da_eq_sa_drop),
1114         GBENU_STATS_P1(ale_unknown_ucast),
1115         GBENU_STATS_P1(ale_unknown_ucast_bytes),
1116         GBENU_STATS_P1(ale_unknown_mcast),
1117         GBENU_STATS_P1(ale_unknown_mcast_bytes),
1118         GBENU_STATS_P1(ale_unknown_bcast),
1119         GBENU_STATS_P1(ale_unknown_bcast_bytes),
1120         GBENU_STATS_P1(ale_pol_match),
1121         GBENU_STATS_P1(ale_pol_match_red),
1122         GBENU_STATS_P1(ale_pol_match_yellow),
1123         GBENU_STATS_P1(tx_mem_protect_err),
1124         GBENU_STATS_P1(tx_pri0_drop),
1125         GBENU_STATS_P1(tx_pri1_drop),
1126         GBENU_STATS_P1(tx_pri2_drop),
1127         GBENU_STATS_P1(tx_pri3_drop),
1128         GBENU_STATS_P1(tx_pri4_drop),
1129         GBENU_STATS_P1(tx_pri5_drop),
1130         GBENU_STATS_P1(tx_pri6_drop),
1131         GBENU_STATS_P1(tx_pri7_drop),
1132         GBENU_STATS_P1(tx_pri0_drop_bcnt),
1133         GBENU_STATS_P1(tx_pri1_drop_bcnt),
1134         GBENU_STATS_P1(tx_pri2_drop_bcnt),
1135         GBENU_STATS_P1(tx_pri3_drop_bcnt),
1136         GBENU_STATS_P1(tx_pri4_drop_bcnt),
1137         GBENU_STATS_P1(tx_pri5_drop_bcnt),
1138         GBENU_STATS_P1(tx_pri6_drop_bcnt),
1139         GBENU_STATS_P1(tx_pri7_drop_bcnt),
1140         /* GBENU Module 2 */
1141         GBENU_STATS_P2(rx_good_frames),
1142         GBENU_STATS_P2(rx_broadcast_frames),
1143         GBENU_STATS_P2(rx_multicast_frames),
1144         GBENU_STATS_P2(rx_pause_frames),
1145         GBENU_STATS_P2(rx_crc_errors),
1146         GBENU_STATS_P2(rx_align_code_errors),
1147         GBENU_STATS_P2(rx_oversized_frames),
1148         GBENU_STATS_P2(rx_jabber_frames),
1149         GBENU_STATS_P2(rx_undersized_frames),
1150         GBENU_STATS_P2(rx_fragments),
1151         GBENU_STATS_P2(ale_drop),
1152         GBENU_STATS_P2(ale_overrun_drop),
1153         GBENU_STATS_P2(rx_bytes),
1154         GBENU_STATS_P2(tx_good_frames),
1155         GBENU_STATS_P2(tx_broadcast_frames),
1156         GBENU_STATS_P2(tx_multicast_frames),
1157         GBENU_STATS_P2(tx_pause_frames),
1158         GBENU_STATS_P2(tx_deferred_frames),
1159         GBENU_STATS_P2(tx_collision_frames),
1160         GBENU_STATS_P2(tx_single_coll_frames),
1161         GBENU_STATS_P2(tx_mult_coll_frames),
1162         GBENU_STATS_P2(tx_excessive_collisions),
1163         GBENU_STATS_P2(tx_late_collisions),
1164         GBENU_STATS_P2(rx_ipg_error),
1165         GBENU_STATS_P2(tx_carrier_sense_errors),
1166         GBENU_STATS_P2(tx_bytes),
1167         GBENU_STATS_P2(tx_64B_frames),
1168         GBENU_STATS_P2(tx_65_to_127B_frames),
1169         GBENU_STATS_P2(tx_128_to_255B_frames),
1170         GBENU_STATS_P2(tx_256_to_511B_frames),
1171         GBENU_STATS_P2(tx_512_to_1023B_frames),
1172         GBENU_STATS_P2(tx_1024B_frames),
1173         GBENU_STATS_P2(net_bytes),
1174         GBENU_STATS_P2(rx_bottom_fifo_drop),
1175         GBENU_STATS_P2(rx_port_mask_drop),
1176         GBENU_STATS_P2(rx_top_fifo_drop),
1177         GBENU_STATS_P2(ale_rate_limit_drop),
1178         GBENU_STATS_P2(ale_vid_ingress_drop),
1179         GBENU_STATS_P2(ale_da_eq_sa_drop),
1180         GBENU_STATS_P2(ale_unknown_ucast),
1181         GBENU_STATS_P2(ale_unknown_ucast_bytes),
1182         GBENU_STATS_P2(ale_unknown_mcast),
1183         GBENU_STATS_P2(ale_unknown_mcast_bytes),
1184         GBENU_STATS_P2(ale_unknown_bcast),
1185         GBENU_STATS_P2(ale_unknown_bcast_bytes),
1186         GBENU_STATS_P2(ale_pol_match),
1187         GBENU_STATS_P2(ale_pol_match_red),
1188         GBENU_STATS_P2(ale_pol_match_yellow),
1189         GBENU_STATS_P2(tx_mem_protect_err),
1190         GBENU_STATS_P2(tx_pri0_drop),
1191         GBENU_STATS_P2(tx_pri1_drop),
1192         GBENU_STATS_P2(tx_pri2_drop),
1193         GBENU_STATS_P2(tx_pri3_drop),
1194         GBENU_STATS_P2(tx_pri4_drop),
1195         GBENU_STATS_P2(tx_pri5_drop),
1196         GBENU_STATS_P2(tx_pri6_drop),
1197         GBENU_STATS_P2(tx_pri7_drop),
1198         GBENU_STATS_P2(tx_pri0_drop_bcnt),
1199         GBENU_STATS_P2(tx_pri1_drop_bcnt),
1200         GBENU_STATS_P2(tx_pri2_drop_bcnt),
1201         GBENU_STATS_P2(tx_pri3_drop_bcnt),
1202         GBENU_STATS_P2(tx_pri4_drop_bcnt),
1203         GBENU_STATS_P2(tx_pri5_drop_bcnt),
1204         GBENU_STATS_P2(tx_pri6_drop_bcnt),
1205         GBENU_STATS_P2(tx_pri7_drop_bcnt),
1206         /* GBENU Module 3 */
1207         GBENU_STATS_P3(rx_good_frames),
1208         GBENU_STATS_P3(rx_broadcast_frames),
1209         GBENU_STATS_P3(rx_multicast_frames),
1210         GBENU_STATS_P3(rx_pause_frames),
1211         GBENU_STATS_P3(rx_crc_errors),
1212         GBENU_STATS_P3(rx_align_code_errors),
1213         GBENU_STATS_P3(rx_oversized_frames),
1214         GBENU_STATS_P3(rx_jabber_frames),
1215         GBENU_STATS_P3(rx_undersized_frames),
1216         GBENU_STATS_P3(rx_fragments),
1217         GBENU_STATS_P3(ale_drop),
1218         GBENU_STATS_P3(ale_overrun_drop),
1219         GBENU_STATS_P3(rx_bytes),
1220         GBENU_STATS_P3(tx_good_frames),
1221         GBENU_STATS_P3(tx_broadcast_frames),
1222         GBENU_STATS_P3(tx_multicast_frames),
1223         GBENU_STATS_P3(tx_pause_frames),
1224         GBENU_STATS_P3(tx_deferred_frames),
1225         GBENU_STATS_P3(tx_collision_frames),
1226         GBENU_STATS_P3(tx_single_coll_frames),
1227         GBENU_STATS_P3(tx_mult_coll_frames),
1228         GBENU_STATS_P3(tx_excessive_collisions),
1229         GBENU_STATS_P3(tx_late_collisions),
1230         GBENU_STATS_P3(rx_ipg_error),
1231         GBENU_STATS_P3(tx_carrier_sense_errors),
1232         GBENU_STATS_P3(tx_bytes),
1233         GBENU_STATS_P3(tx_64B_frames),
1234         GBENU_STATS_P3(tx_65_to_127B_frames),
1235         GBENU_STATS_P3(tx_128_to_255B_frames),
1236         GBENU_STATS_P3(tx_256_to_511B_frames),
1237         GBENU_STATS_P3(tx_512_to_1023B_frames),
1238         GBENU_STATS_P3(tx_1024B_frames),
1239         GBENU_STATS_P3(net_bytes),
1240         GBENU_STATS_P3(rx_bottom_fifo_drop),
1241         GBENU_STATS_P3(rx_port_mask_drop),
1242         GBENU_STATS_P3(rx_top_fifo_drop),
1243         GBENU_STATS_P3(ale_rate_limit_drop),
1244         GBENU_STATS_P3(ale_vid_ingress_drop),
1245         GBENU_STATS_P3(ale_da_eq_sa_drop),
1246         GBENU_STATS_P3(ale_unknown_ucast),
1247         GBENU_STATS_P3(ale_unknown_ucast_bytes),
1248         GBENU_STATS_P3(ale_unknown_mcast),
1249         GBENU_STATS_P3(ale_unknown_mcast_bytes),
1250         GBENU_STATS_P3(ale_unknown_bcast),
1251         GBENU_STATS_P3(ale_unknown_bcast_bytes),
1252         GBENU_STATS_P3(ale_pol_match),
1253         GBENU_STATS_P3(ale_pol_match_red),
1254         GBENU_STATS_P3(ale_pol_match_yellow),
1255         GBENU_STATS_P3(tx_mem_protect_err),
1256         GBENU_STATS_P3(tx_pri0_drop),
1257         GBENU_STATS_P3(tx_pri1_drop),
1258         GBENU_STATS_P3(tx_pri2_drop),
1259         GBENU_STATS_P3(tx_pri3_drop),
1260         GBENU_STATS_P3(tx_pri4_drop),
1261         GBENU_STATS_P3(tx_pri5_drop),
1262         GBENU_STATS_P3(tx_pri6_drop),
1263         GBENU_STATS_P3(tx_pri7_drop),
1264         GBENU_STATS_P3(tx_pri0_drop_bcnt),
1265         GBENU_STATS_P3(tx_pri1_drop_bcnt),
1266         GBENU_STATS_P3(tx_pri2_drop_bcnt),
1267         GBENU_STATS_P3(tx_pri3_drop_bcnt),
1268         GBENU_STATS_P3(tx_pri4_drop_bcnt),
1269         GBENU_STATS_P3(tx_pri5_drop_bcnt),
1270         GBENU_STATS_P3(tx_pri6_drop_bcnt),
1271         GBENU_STATS_P3(tx_pri7_drop_bcnt),
1272         /* GBENU Module 4 */
1273         GBENU_STATS_P4(rx_good_frames),
1274         GBENU_STATS_P4(rx_broadcast_frames),
1275         GBENU_STATS_P4(rx_multicast_frames),
1276         GBENU_STATS_P4(rx_pause_frames),
1277         GBENU_STATS_P4(rx_crc_errors),
1278         GBENU_STATS_P4(rx_align_code_errors),
1279         GBENU_STATS_P4(rx_oversized_frames),
1280         GBENU_STATS_P4(rx_jabber_frames),
1281         GBENU_STATS_P4(rx_undersized_frames),
1282         GBENU_STATS_P4(rx_fragments),
1283         GBENU_STATS_P4(ale_drop),
1284         GBENU_STATS_P4(ale_overrun_drop),
1285         GBENU_STATS_P4(rx_bytes),
1286         GBENU_STATS_P4(tx_good_frames),
1287         GBENU_STATS_P4(tx_broadcast_frames),
1288         GBENU_STATS_P4(tx_multicast_frames),
1289         GBENU_STATS_P4(tx_pause_frames),
1290         GBENU_STATS_P4(tx_deferred_frames),
1291         GBENU_STATS_P4(tx_collision_frames),
1292         GBENU_STATS_P4(tx_single_coll_frames),
1293         GBENU_STATS_P4(tx_mult_coll_frames),
1294         GBENU_STATS_P4(tx_excessive_collisions),
1295         GBENU_STATS_P4(tx_late_collisions),
1296         GBENU_STATS_P4(rx_ipg_error),
1297         GBENU_STATS_P4(tx_carrier_sense_errors),
1298         GBENU_STATS_P4(tx_bytes),
1299         GBENU_STATS_P4(tx_64B_frames),
1300         GBENU_STATS_P4(tx_65_to_127B_frames),
1301         GBENU_STATS_P4(tx_128_to_255B_frames),
1302         GBENU_STATS_P4(tx_256_to_511B_frames),
1303         GBENU_STATS_P4(tx_512_to_1023B_frames),
1304         GBENU_STATS_P4(tx_1024B_frames),
1305         GBENU_STATS_P4(net_bytes),
1306         GBENU_STATS_P4(rx_bottom_fifo_drop),
1307         GBENU_STATS_P4(rx_port_mask_drop),
1308         GBENU_STATS_P4(rx_top_fifo_drop),
1309         GBENU_STATS_P4(ale_rate_limit_drop),
1310         GBENU_STATS_P4(ale_vid_ingress_drop),
1311         GBENU_STATS_P4(ale_da_eq_sa_drop),
1312         GBENU_STATS_P4(ale_unknown_ucast),
1313         GBENU_STATS_P4(ale_unknown_ucast_bytes),
1314         GBENU_STATS_P4(ale_unknown_mcast),
1315         GBENU_STATS_P4(ale_unknown_mcast_bytes),
1316         GBENU_STATS_P4(ale_unknown_bcast),
1317         GBENU_STATS_P4(ale_unknown_bcast_bytes),
1318         GBENU_STATS_P4(ale_pol_match),
1319         GBENU_STATS_P4(ale_pol_match_red),
1320         GBENU_STATS_P4(ale_pol_match_yellow),
1321         GBENU_STATS_P4(tx_mem_protect_err),
1322         GBENU_STATS_P4(tx_pri0_drop),
1323         GBENU_STATS_P4(tx_pri1_drop),
1324         GBENU_STATS_P4(tx_pri2_drop),
1325         GBENU_STATS_P4(tx_pri3_drop),
1326         GBENU_STATS_P4(tx_pri4_drop),
1327         GBENU_STATS_P4(tx_pri5_drop),
1328         GBENU_STATS_P4(tx_pri6_drop),
1329         GBENU_STATS_P4(tx_pri7_drop),
1330         GBENU_STATS_P4(tx_pri0_drop_bcnt),
1331         GBENU_STATS_P4(tx_pri1_drop_bcnt),
1332         GBENU_STATS_P4(tx_pri2_drop_bcnt),
1333         GBENU_STATS_P4(tx_pri3_drop_bcnt),
1334         GBENU_STATS_P4(tx_pri4_drop_bcnt),
1335         GBENU_STATS_P4(tx_pri5_drop_bcnt),
1336         GBENU_STATS_P4(tx_pri6_drop_bcnt),
1337         GBENU_STATS_P4(tx_pri7_drop_bcnt),
1338         /* GBENU Module 5 */
1339         GBENU_STATS_P5(rx_good_frames),
1340         GBENU_STATS_P5(rx_broadcast_frames),
1341         GBENU_STATS_P5(rx_multicast_frames),
1342         GBENU_STATS_P5(rx_pause_frames),
1343         GBENU_STATS_P5(rx_crc_errors),
1344         GBENU_STATS_P5(rx_align_code_errors),
1345         GBENU_STATS_P5(rx_oversized_frames),
1346         GBENU_STATS_P5(rx_jabber_frames),
1347         GBENU_STATS_P5(rx_undersized_frames),
1348         GBENU_STATS_P5(rx_fragments),
1349         GBENU_STATS_P5(ale_drop),
1350         GBENU_STATS_P5(ale_overrun_drop),
1351         GBENU_STATS_P5(rx_bytes),
1352         GBENU_STATS_P5(tx_good_frames),
1353         GBENU_STATS_P5(tx_broadcast_frames),
1354         GBENU_STATS_P5(tx_multicast_frames),
1355         GBENU_STATS_P5(tx_pause_frames),
1356         GBENU_STATS_P5(tx_deferred_frames),
1357         GBENU_STATS_P5(tx_collision_frames),
1358         GBENU_STATS_P5(tx_single_coll_frames),
1359         GBENU_STATS_P5(tx_mult_coll_frames),
1360         GBENU_STATS_P5(tx_excessive_collisions),
1361         GBENU_STATS_P5(tx_late_collisions),
1362         GBENU_STATS_P5(rx_ipg_error),
1363         GBENU_STATS_P5(tx_carrier_sense_errors),
1364         GBENU_STATS_P5(tx_bytes),
1365         GBENU_STATS_P5(tx_64B_frames),
1366         GBENU_STATS_P5(tx_65_to_127B_frames),
1367         GBENU_STATS_P5(tx_128_to_255B_frames),
1368         GBENU_STATS_P5(tx_256_to_511B_frames),
1369         GBENU_STATS_P5(tx_512_to_1023B_frames),
1370         GBENU_STATS_P5(tx_1024B_frames),
1371         GBENU_STATS_P5(net_bytes),
1372         GBENU_STATS_P5(rx_bottom_fifo_drop),
1373         GBENU_STATS_P5(rx_port_mask_drop),
1374         GBENU_STATS_P5(rx_top_fifo_drop),
1375         GBENU_STATS_P5(ale_rate_limit_drop),
1376         GBENU_STATS_P5(ale_vid_ingress_drop),
1377         GBENU_STATS_P5(ale_da_eq_sa_drop),
1378         GBENU_STATS_P5(ale_unknown_ucast),
1379         GBENU_STATS_P5(ale_unknown_ucast_bytes),
1380         GBENU_STATS_P5(ale_unknown_mcast),
1381         GBENU_STATS_P5(ale_unknown_mcast_bytes),
1382         GBENU_STATS_P5(ale_unknown_bcast),
1383         GBENU_STATS_P5(ale_unknown_bcast_bytes),
1384         GBENU_STATS_P5(ale_pol_match),
1385         GBENU_STATS_P5(ale_pol_match_red),
1386         GBENU_STATS_P5(ale_pol_match_yellow),
1387         GBENU_STATS_P5(tx_mem_protect_err),
1388         GBENU_STATS_P5(tx_pri0_drop),
1389         GBENU_STATS_P5(tx_pri1_drop),
1390         GBENU_STATS_P5(tx_pri2_drop),
1391         GBENU_STATS_P5(tx_pri3_drop),
1392         GBENU_STATS_P5(tx_pri4_drop),
1393         GBENU_STATS_P5(tx_pri5_drop),
1394         GBENU_STATS_P5(tx_pri6_drop),
1395         GBENU_STATS_P5(tx_pri7_drop),
1396         GBENU_STATS_P5(tx_pri0_drop_bcnt),
1397         GBENU_STATS_P5(tx_pri1_drop_bcnt),
1398         GBENU_STATS_P5(tx_pri2_drop_bcnt),
1399         GBENU_STATS_P5(tx_pri3_drop_bcnt),
1400         GBENU_STATS_P5(tx_pri4_drop_bcnt),
1401         GBENU_STATS_P5(tx_pri5_drop_bcnt),
1402         GBENU_STATS_P5(tx_pri6_drop_bcnt),
1403         GBENU_STATS_P5(tx_pri7_drop_bcnt),
1404         /* GBENU Module 6 */
1405         GBENU_STATS_P6(rx_good_frames),
1406         GBENU_STATS_P6(rx_broadcast_frames),
1407         GBENU_STATS_P6(rx_multicast_frames),
1408         GBENU_STATS_P6(rx_pause_frames),
1409         GBENU_STATS_P6(rx_crc_errors),
1410         GBENU_STATS_P6(rx_align_code_errors),
1411         GBENU_STATS_P6(rx_oversized_frames),
1412         GBENU_STATS_P6(rx_jabber_frames),
1413         GBENU_STATS_P6(rx_undersized_frames),
1414         GBENU_STATS_P6(rx_fragments),
1415         GBENU_STATS_P6(ale_drop),
1416         GBENU_STATS_P6(ale_overrun_drop),
1417         GBENU_STATS_P6(rx_bytes),
1418         GBENU_STATS_P6(tx_good_frames),
1419         GBENU_STATS_P6(tx_broadcast_frames),
1420         GBENU_STATS_P6(tx_multicast_frames),
1421         GBENU_STATS_P6(tx_pause_frames),
1422         GBENU_STATS_P6(tx_deferred_frames),
1423         GBENU_STATS_P6(tx_collision_frames),
1424         GBENU_STATS_P6(tx_single_coll_frames),
1425         GBENU_STATS_P6(tx_mult_coll_frames),
1426         GBENU_STATS_P6(tx_excessive_collisions),
1427         GBENU_STATS_P6(tx_late_collisions),
1428         GBENU_STATS_P6(rx_ipg_error),
1429         GBENU_STATS_P6(tx_carrier_sense_errors),
1430         GBENU_STATS_P6(tx_bytes),
1431         GBENU_STATS_P6(tx_64B_frames),
1432         GBENU_STATS_P6(tx_65_to_127B_frames),
1433         GBENU_STATS_P6(tx_128_to_255B_frames),
1434         GBENU_STATS_P6(tx_256_to_511B_frames),
1435         GBENU_STATS_P6(tx_512_to_1023B_frames),
1436         GBENU_STATS_P6(tx_1024B_frames),
1437         GBENU_STATS_P6(net_bytes),
1438         GBENU_STATS_P6(rx_bottom_fifo_drop),
1439         GBENU_STATS_P6(rx_port_mask_drop),
1440         GBENU_STATS_P6(rx_top_fifo_drop),
1441         GBENU_STATS_P6(ale_rate_limit_drop),
1442         GBENU_STATS_P6(ale_vid_ingress_drop),
1443         GBENU_STATS_P6(ale_da_eq_sa_drop),
1444         GBENU_STATS_P6(ale_unknown_ucast),
1445         GBENU_STATS_P6(ale_unknown_ucast_bytes),
1446         GBENU_STATS_P6(ale_unknown_mcast),
1447         GBENU_STATS_P6(ale_unknown_mcast_bytes),
1448         GBENU_STATS_P6(ale_unknown_bcast),
1449         GBENU_STATS_P6(ale_unknown_bcast_bytes),
1450         GBENU_STATS_P6(ale_pol_match),
1451         GBENU_STATS_P6(ale_pol_match_red),
1452         GBENU_STATS_P6(ale_pol_match_yellow),
1453         GBENU_STATS_P6(tx_mem_protect_err),
1454         GBENU_STATS_P6(tx_pri0_drop),
1455         GBENU_STATS_P6(tx_pri1_drop),
1456         GBENU_STATS_P6(tx_pri2_drop),
1457         GBENU_STATS_P6(tx_pri3_drop),
1458         GBENU_STATS_P6(tx_pri4_drop),
1459         GBENU_STATS_P6(tx_pri5_drop),
1460         GBENU_STATS_P6(tx_pri6_drop),
1461         GBENU_STATS_P6(tx_pri7_drop),
1462         GBENU_STATS_P6(tx_pri0_drop_bcnt),
1463         GBENU_STATS_P6(tx_pri1_drop_bcnt),
1464         GBENU_STATS_P6(tx_pri2_drop_bcnt),
1465         GBENU_STATS_P6(tx_pri3_drop_bcnt),
1466         GBENU_STATS_P6(tx_pri4_drop_bcnt),
1467         GBENU_STATS_P6(tx_pri5_drop_bcnt),
1468         GBENU_STATS_P6(tx_pri6_drop_bcnt),
1469         GBENU_STATS_P6(tx_pri7_drop_bcnt),
1470         /* GBENU Module 7 */
1471         GBENU_STATS_P7(rx_good_frames),
1472         GBENU_STATS_P7(rx_broadcast_frames),
1473         GBENU_STATS_P7(rx_multicast_frames),
1474         GBENU_STATS_P7(rx_pause_frames),
1475         GBENU_STATS_P7(rx_crc_errors),
1476         GBENU_STATS_P7(rx_align_code_errors),
1477         GBENU_STATS_P7(rx_oversized_frames),
1478         GBENU_STATS_P7(rx_jabber_frames),
1479         GBENU_STATS_P7(rx_undersized_frames),
1480         GBENU_STATS_P7(rx_fragments),
1481         GBENU_STATS_P7(ale_drop),
1482         GBENU_STATS_P7(ale_overrun_drop),
1483         GBENU_STATS_P7(rx_bytes),
1484         GBENU_STATS_P7(tx_good_frames),
1485         GBENU_STATS_P7(tx_broadcast_frames),
1486         GBENU_STATS_P7(tx_multicast_frames),
1487         GBENU_STATS_P7(tx_pause_frames),
1488         GBENU_STATS_P7(tx_deferred_frames),
1489         GBENU_STATS_P7(tx_collision_frames),
1490         GBENU_STATS_P7(tx_single_coll_frames),
1491         GBENU_STATS_P7(tx_mult_coll_frames),
1492         GBENU_STATS_P7(tx_excessive_collisions),
1493         GBENU_STATS_P7(tx_late_collisions),
1494         GBENU_STATS_P7(rx_ipg_error),
1495         GBENU_STATS_P7(tx_carrier_sense_errors),
1496         GBENU_STATS_P7(tx_bytes),
1497         GBENU_STATS_P7(tx_64B_frames),
1498         GBENU_STATS_P7(tx_65_to_127B_frames),
1499         GBENU_STATS_P7(tx_128_to_255B_frames),
1500         GBENU_STATS_P7(tx_256_to_511B_frames),
1501         GBENU_STATS_P7(tx_512_to_1023B_frames),
1502         GBENU_STATS_P7(tx_1024B_frames),
1503         GBENU_STATS_P7(net_bytes),
1504         GBENU_STATS_P7(rx_bottom_fifo_drop),
1505         GBENU_STATS_P7(rx_port_mask_drop),
1506         GBENU_STATS_P7(rx_top_fifo_drop),
1507         GBENU_STATS_P7(ale_rate_limit_drop),
1508         GBENU_STATS_P7(ale_vid_ingress_drop),
1509         GBENU_STATS_P7(ale_da_eq_sa_drop),
1510         GBENU_STATS_P7(ale_unknown_ucast),
1511         GBENU_STATS_P7(ale_unknown_ucast_bytes),
1512         GBENU_STATS_P7(ale_unknown_mcast),
1513         GBENU_STATS_P7(ale_unknown_mcast_bytes),
1514         GBENU_STATS_P7(ale_unknown_bcast),
1515         GBENU_STATS_P7(ale_unknown_bcast_bytes),
1516         GBENU_STATS_P7(ale_pol_match),
1517         GBENU_STATS_P7(ale_pol_match_red),
1518         GBENU_STATS_P7(ale_pol_match_yellow),
1519         GBENU_STATS_P7(tx_mem_protect_err),
1520         GBENU_STATS_P7(tx_pri0_drop),
1521         GBENU_STATS_P7(tx_pri1_drop),
1522         GBENU_STATS_P7(tx_pri2_drop),
1523         GBENU_STATS_P7(tx_pri3_drop),
1524         GBENU_STATS_P7(tx_pri4_drop),
1525         GBENU_STATS_P7(tx_pri5_drop),
1526         GBENU_STATS_P7(tx_pri6_drop),
1527         GBENU_STATS_P7(tx_pri7_drop),
1528         GBENU_STATS_P7(tx_pri0_drop_bcnt),
1529         GBENU_STATS_P7(tx_pri1_drop_bcnt),
1530         GBENU_STATS_P7(tx_pri2_drop_bcnt),
1531         GBENU_STATS_P7(tx_pri3_drop_bcnt),
1532         GBENU_STATS_P7(tx_pri4_drop_bcnt),
1533         GBENU_STATS_P7(tx_pri5_drop_bcnt),
1534         GBENU_STATS_P7(tx_pri6_drop_bcnt),
1535         GBENU_STATS_P7(tx_pri7_drop_bcnt),
1536         /* GBENU Module 8 */
1537         GBENU_STATS_P8(rx_good_frames),
1538         GBENU_STATS_P8(rx_broadcast_frames),
1539         GBENU_STATS_P8(rx_multicast_frames),
1540         GBENU_STATS_P8(rx_pause_frames),
1541         GBENU_STATS_P8(rx_crc_errors),
1542         GBENU_STATS_P8(rx_align_code_errors),
1543         GBENU_STATS_P8(rx_oversized_frames),
1544         GBENU_STATS_P8(rx_jabber_frames),
1545         GBENU_STATS_P8(rx_undersized_frames),
1546         GBENU_STATS_P8(rx_fragments),
1547         GBENU_STATS_P8(ale_drop),
1548         GBENU_STATS_P8(ale_overrun_drop),
1549         GBENU_STATS_P8(rx_bytes),
1550         GBENU_STATS_P8(tx_good_frames),
1551         GBENU_STATS_P8(tx_broadcast_frames),
1552         GBENU_STATS_P8(tx_multicast_frames),
1553         GBENU_STATS_P8(tx_pause_frames),
1554         GBENU_STATS_P8(tx_deferred_frames),
1555         GBENU_STATS_P8(tx_collision_frames),
1556         GBENU_STATS_P8(tx_single_coll_frames),
1557         GBENU_STATS_P8(tx_mult_coll_frames),
1558         GBENU_STATS_P8(tx_excessive_collisions),
1559         GBENU_STATS_P8(tx_late_collisions),
1560         GBENU_STATS_P8(rx_ipg_error),
1561         GBENU_STATS_P8(tx_carrier_sense_errors),
1562         GBENU_STATS_P8(tx_bytes),
1563         GBENU_STATS_P8(tx_64B_frames),
1564         GBENU_STATS_P8(tx_65_to_127B_frames),
1565         GBENU_STATS_P8(tx_128_to_255B_frames),
1566         GBENU_STATS_P8(tx_256_to_511B_frames),
1567         GBENU_STATS_P8(tx_512_to_1023B_frames),
1568         GBENU_STATS_P8(tx_1024B_frames),
1569         GBENU_STATS_P8(net_bytes),
1570         GBENU_STATS_P8(rx_bottom_fifo_drop),
1571         GBENU_STATS_P8(rx_port_mask_drop),
1572         GBENU_STATS_P8(rx_top_fifo_drop),
1573         GBENU_STATS_P8(ale_rate_limit_drop),
1574         GBENU_STATS_P8(ale_vid_ingress_drop),
1575         GBENU_STATS_P8(ale_da_eq_sa_drop),
1576         GBENU_STATS_P8(ale_unknown_ucast),
1577         GBENU_STATS_P8(ale_unknown_ucast_bytes),
1578         GBENU_STATS_P8(ale_unknown_mcast),
1579         GBENU_STATS_P8(ale_unknown_mcast_bytes),
1580         GBENU_STATS_P8(ale_unknown_bcast),
1581         GBENU_STATS_P8(ale_unknown_bcast_bytes),
1582         GBENU_STATS_P8(ale_pol_match),
1583         GBENU_STATS_P8(ale_pol_match_red),
1584         GBENU_STATS_P8(ale_pol_match_yellow),
1585         GBENU_STATS_P8(tx_mem_protect_err),
1586         GBENU_STATS_P8(tx_pri0_drop),
1587         GBENU_STATS_P8(tx_pri1_drop),
1588         GBENU_STATS_P8(tx_pri2_drop),
1589         GBENU_STATS_P8(tx_pri3_drop),
1590         GBENU_STATS_P8(tx_pri4_drop),
1591         GBENU_STATS_P8(tx_pri5_drop),
1592         GBENU_STATS_P8(tx_pri6_drop),
1593         GBENU_STATS_P8(tx_pri7_drop),
1594         GBENU_STATS_P8(tx_pri0_drop_bcnt),
1595         GBENU_STATS_P8(tx_pri1_drop_bcnt),
1596         GBENU_STATS_P8(tx_pri2_drop_bcnt),
1597         GBENU_STATS_P8(tx_pri3_drop_bcnt),
1598         GBENU_STATS_P8(tx_pri4_drop_bcnt),
1599         GBENU_STATS_P8(tx_pri5_drop_bcnt),
1600         GBENU_STATS_P8(tx_pri6_drop_bcnt),
1601         GBENU_STATS_P8(tx_pri7_drop_bcnt),
1602 };
1603
1604 #define XGBE_STATS0_INFO(field)                         \
1605 {                                                       \
1606         "GBE_0:"#field, XGBE_STATS0_MODULE,             \
1607         sizeof_field(struct xgbe_hw_stats, field),      \
1608         offsetof(struct xgbe_hw_stats, field)           \
1609 }
1610
1611 #define XGBE_STATS1_INFO(field)                         \
1612 {                                                       \
1613         "GBE_1:"#field, XGBE_STATS1_MODULE,             \
1614         sizeof_field(struct xgbe_hw_stats, field),      \
1615         offsetof(struct xgbe_hw_stats, field)           \
1616 }
1617
1618 #define XGBE_STATS2_INFO(field)                         \
1619 {                                                       \
1620         "GBE_2:"#field, XGBE_STATS2_MODULE,             \
1621         sizeof_field(struct xgbe_hw_stats, field),      \
1622         offsetof(struct xgbe_hw_stats, field)           \
1623 }
1624
1625 static const struct netcp_ethtool_stat xgbe10_et_stats[] = {
1626         /* GBE module 0 */
1627         XGBE_STATS0_INFO(rx_good_frames),
1628         XGBE_STATS0_INFO(rx_broadcast_frames),
1629         XGBE_STATS0_INFO(rx_multicast_frames),
1630         XGBE_STATS0_INFO(rx_oversized_frames),
1631         XGBE_STATS0_INFO(rx_undersized_frames),
1632         XGBE_STATS0_INFO(overrun_type4),
1633         XGBE_STATS0_INFO(overrun_type5),
1634         XGBE_STATS0_INFO(rx_bytes),
1635         XGBE_STATS0_INFO(tx_good_frames),
1636         XGBE_STATS0_INFO(tx_broadcast_frames),
1637         XGBE_STATS0_INFO(tx_multicast_frames),
1638         XGBE_STATS0_INFO(tx_bytes),
1639         XGBE_STATS0_INFO(tx_64byte_frames),
1640         XGBE_STATS0_INFO(tx_65_to_127byte_frames),
1641         XGBE_STATS0_INFO(tx_128_to_255byte_frames),
1642         XGBE_STATS0_INFO(tx_256_to_511byte_frames),
1643         XGBE_STATS0_INFO(tx_512_to_1023byte_frames),
1644         XGBE_STATS0_INFO(tx_1024byte_frames),
1645         XGBE_STATS0_INFO(net_bytes),
1646         XGBE_STATS0_INFO(rx_sof_overruns),
1647         XGBE_STATS0_INFO(rx_mof_overruns),
1648         XGBE_STATS0_INFO(rx_dma_overruns),
1649         /* XGBE module 1 */
1650         XGBE_STATS1_INFO(rx_good_frames),
1651         XGBE_STATS1_INFO(rx_broadcast_frames),
1652         XGBE_STATS1_INFO(rx_multicast_frames),
1653         XGBE_STATS1_INFO(rx_pause_frames),
1654         XGBE_STATS1_INFO(rx_crc_errors),
1655         XGBE_STATS1_INFO(rx_align_code_errors),
1656         XGBE_STATS1_INFO(rx_oversized_frames),
1657         XGBE_STATS1_INFO(rx_jabber_frames),
1658         XGBE_STATS1_INFO(rx_undersized_frames),
1659         XGBE_STATS1_INFO(rx_fragments),
1660         XGBE_STATS1_INFO(overrun_type4),
1661         XGBE_STATS1_INFO(overrun_type5),
1662         XGBE_STATS1_INFO(rx_bytes),
1663         XGBE_STATS1_INFO(tx_good_frames),
1664         XGBE_STATS1_INFO(tx_broadcast_frames),
1665         XGBE_STATS1_INFO(tx_multicast_frames),
1666         XGBE_STATS1_INFO(tx_pause_frames),
1667         XGBE_STATS1_INFO(tx_deferred_frames),
1668         XGBE_STATS1_INFO(tx_collision_frames),
1669         XGBE_STATS1_INFO(tx_single_coll_frames),
1670         XGBE_STATS1_INFO(tx_mult_coll_frames),
1671         XGBE_STATS1_INFO(tx_excessive_collisions),
1672         XGBE_STATS1_INFO(tx_late_collisions),
1673         XGBE_STATS1_INFO(tx_underrun),
1674         XGBE_STATS1_INFO(tx_carrier_sense_errors),
1675         XGBE_STATS1_INFO(tx_bytes),
1676         XGBE_STATS1_INFO(tx_64byte_frames),
1677         XGBE_STATS1_INFO(tx_65_to_127byte_frames),
1678         XGBE_STATS1_INFO(tx_128_to_255byte_frames),
1679         XGBE_STATS1_INFO(tx_256_to_511byte_frames),
1680         XGBE_STATS1_INFO(tx_512_to_1023byte_frames),
1681         XGBE_STATS1_INFO(tx_1024byte_frames),
1682         XGBE_STATS1_INFO(net_bytes),
1683         XGBE_STATS1_INFO(rx_sof_overruns),
1684         XGBE_STATS1_INFO(rx_mof_overruns),
1685         XGBE_STATS1_INFO(rx_dma_overruns),
1686         /* XGBE module 2 */
1687         XGBE_STATS2_INFO(rx_good_frames),
1688         XGBE_STATS2_INFO(rx_broadcast_frames),
1689         XGBE_STATS2_INFO(rx_multicast_frames),
1690         XGBE_STATS2_INFO(rx_pause_frames),
1691         XGBE_STATS2_INFO(rx_crc_errors),
1692         XGBE_STATS2_INFO(rx_align_code_errors),
1693         XGBE_STATS2_INFO(rx_oversized_frames),
1694         XGBE_STATS2_INFO(rx_jabber_frames),
1695         XGBE_STATS2_INFO(rx_undersized_frames),
1696         XGBE_STATS2_INFO(rx_fragments),
1697         XGBE_STATS2_INFO(overrun_type4),
1698         XGBE_STATS2_INFO(overrun_type5),
1699         XGBE_STATS2_INFO(rx_bytes),
1700         XGBE_STATS2_INFO(tx_good_frames),
1701         XGBE_STATS2_INFO(tx_broadcast_frames),
1702         XGBE_STATS2_INFO(tx_multicast_frames),
1703         XGBE_STATS2_INFO(tx_pause_frames),
1704         XGBE_STATS2_INFO(tx_deferred_frames),
1705         XGBE_STATS2_INFO(tx_collision_frames),
1706         XGBE_STATS2_INFO(tx_single_coll_frames),
1707         XGBE_STATS2_INFO(tx_mult_coll_frames),
1708         XGBE_STATS2_INFO(tx_excessive_collisions),
1709         XGBE_STATS2_INFO(tx_late_collisions),
1710         XGBE_STATS2_INFO(tx_underrun),
1711         XGBE_STATS2_INFO(tx_carrier_sense_errors),
1712         XGBE_STATS2_INFO(tx_bytes),
1713         XGBE_STATS2_INFO(tx_64byte_frames),
1714         XGBE_STATS2_INFO(tx_65_to_127byte_frames),
1715         XGBE_STATS2_INFO(tx_128_to_255byte_frames),
1716         XGBE_STATS2_INFO(tx_256_to_511byte_frames),
1717         XGBE_STATS2_INFO(tx_512_to_1023byte_frames),
1718         XGBE_STATS2_INFO(tx_1024byte_frames),
1719         XGBE_STATS2_INFO(net_bytes),
1720         XGBE_STATS2_INFO(rx_sof_overruns),
1721         XGBE_STATS2_INFO(rx_mof_overruns),
1722         XGBE_STATS2_INFO(rx_dma_overruns),
1723 };
1724
1725 #define for_each_intf(i, priv) \
1726         list_for_each_entry((i), &(priv)->gbe_intf_head, gbe_intf_list)
1727
1728 #define for_each_sec_slave(slave, priv) \
1729         list_for_each_entry((slave), &(priv)->secondary_slaves, slave_list)
1730
1731 #define first_sec_slave(priv)                                   \
1732         list_first_entry(&priv->secondary_slaves, \
1733                         struct gbe_slave, slave_list)
1734
1735 static void keystone_get_drvinfo(struct net_device *ndev,
1736                                  struct ethtool_drvinfo *info)
1737 {
1738         strncpy(info->driver, NETCP_DRIVER_NAME, sizeof(info->driver));
1739         strncpy(info->version, NETCP_DRIVER_VERSION, sizeof(info->version));
1740 }
1741
1742 static u32 keystone_get_msglevel(struct net_device *ndev)
1743 {
1744         struct netcp_intf *netcp = netdev_priv(ndev);
1745
1746         return netcp->msg_enable;
1747 }
1748
1749 static void keystone_set_msglevel(struct net_device *ndev, u32 value)
1750 {
1751         struct netcp_intf *netcp = netdev_priv(ndev);
1752
1753         netcp->msg_enable = value;
1754 }
1755
1756 static struct gbe_intf *keystone_get_intf_data(struct netcp_intf *netcp)
1757 {
1758         struct gbe_intf *gbe_intf;
1759
1760         gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
1761         if (!gbe_intf)
1762                 gbe_intf = netcp_module_get_intf_data(&xgbe_module, netcp);
1763
1764         return gbe_intf;
1765 }
1766
1767 static void keystone_get_stat_strings(struct net_device *ndev,
1768                                       uint32_t stringset, uint8_t *data)
1769 {
1770         struct netcp_intf *netcp = netdev_priv(ndev);
1771         struct gbe_intf *gbe_intf;
1772         struct gbe_priv *gbe_dev;
1773         int i;
1774
1775         gbe_intf = keystone_get_intf_data(netcp);
1776         if (!gbe_intf)
1777                 return;
1778         gbe_dev = gbe_intf->gbe_dev;
1779
1780         switch (stringset) {
1781         case ETH_SS_STATS:
1782                 for (i = 0; i < gbe_dev->num_et_stats; i++) {
1783                         memcpy(data, gbe_dev->et_stats[i].desc,
1784                                ETH_GSTRING_LEN);
1785                         data += ETH_GSTRING_LEN;
1786                 }
1787                 break;
1788         case ETH_SS_TEST:
1789                 break;
1790         }
1791 }
1792
1793 static int keystone_get_sset_count(struct net_device *ndev, int stringset)
1794 {
1795         struct netcp_intf *netcp = netdev_priv(ndev);
1796         struct gbe_intf *gbe_intf;
1797         struct gbe_priv *gbe_dev;
1798
1799         gbe_intf = keystone_get_intf_data(netcp);
1800         if (!gbe_intf)
1801                 return -EINVAL;
1802         gbe_dev = gbe_intf->gbe_dev;
1803
1804         switch (stringset) {
1805         case ETH_SS_TEST:
1806                 return 0;
1807         case ETH_SS_STATS:
1808                 return gbe_dev->num_et_stats;
1809         default:
1810                 return -EINVAL;
1811         }
1812 }
1813
1814 static void gbe_reset_mod_stats(struct gbe_priv *gbe_dev, int stats_mod)
1815 {
1816         void __iomem *base = gbe_dev->hw_stats_regs[stats_mod];
1817         u32  __iomem *p_stats_entry;
1818         int i;
1819
1820         for (i = 0; i < gbe_dev->num_et_stats; i++) {
1821                 if (gbe_dev->et_stats[i].type == stats_mod) {
1822                         p_stats_entry = base + gbe_dev->et_stats[i].offset;
1823                         gbe_dev->hw_stats[i] = 0;
1824                         gbe_dev->hw_stats_prev[i] = readl(p_stats_entry);
1825                 }
1826         }
1827 }
1828
1829 static inline void gbe_update_hw_stats_entry(struct gbe_priv *gbe_dev,
1830                                              int et_stats_entry)
1831 {
1832         void __iomem *base = NULL;
1833         u32  __iomem *p_stats_entry;
1834         u32 curr, delta;
1835
1836         /* The hw_stats_regs pointers are already
1837          * properly set to point to the right base:
1838          */
1839         base = gbe_dev->hw_stats_regs[gbe_dev->et_stats[et_stats_entry].type];
1840         p_stats_entry = base + gbe_dev->et_stats[et_stats_entry].offset;
1841         curr = readl(p_stats_entry);
1842         delta = curr - gbe_dev->hw_stats_prev[et_stats_entry];
1843         gbe_dev->hw_stats_prev[et_stats_entry] = curr;
1844         gbe_dev->hw_stats[et_stats_entry] += delta;
1845 }
1846
1847 static void gbe_update_stats(struct gbe_priv *gbe_dev, uint64_t *data)
1848 {
1849         int i;
1850
1851         for (i = 0; i < gbe_dev->num_et_stats; i++) {
1852                 gbe_update_hw_stats_entry(gbe_dev, i);
1853
1854                 if (data)
1855                         data[i] = gbe_dev->hw_stats[i];
1856         }
1857 }
1858
1859 static inline void gbe_stats_mod_visible_ver14(struct gbe_priv *gbe_dev,
1860                                                int stats_mod)
1861 {
1862         u32 val;
1863
1864         val = readl(GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
1865
1866         switch (stats_mod) {
1867         case GBE_STATSA_MODULE:
1868         case GBE_STATSB_MODULE:
1869                 val &= ~GBE_STATS_CD_SEL;
1870                 break;
1871         case GBE_STATSC_MODULE:
1872         case GBE_STATSD_MODULE:
1873                 val |= GBE_STATS_CD_SEL;
1874                 break;
1875         default:
1876                 return;
1877         }
1878
1879         /* make the stat module visible */
1880         writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
1881 }
1882
1883 static void gbe_reset_mod_stats_ver14(struct gbe_priv *gbe_dev, int stats_mod)
1884 {
1885         gbe_stats_mod_visible_ver14(gbe_dev, stats_mod);
1886         gbe_reset_mod_stats(gbe_dev, stats_mod);
1887 }
1888
1889 static void gbe_update_stats_ver14(struct gbe_priv *gbe_dev, uint64_t *data)
1890 {
1891         u32 half_num_et_stats = (gbe_dev->num_et_stats / 2);
1892         int et_entry, j, pair;
1893
1894         for (pair = 0; pair < 2; pair++) {
1895                 gbe_stats_mod_visible_ver14(gbe_dev, (pair ?
1896                                                       GBE_STATSC_MODULE :
1897                                                       GBE_STATSA_MODULE));
1898
1899                 for (j = 0; j < half_num_et_stats; j++) {
1900                         et_entry = pair * half_num_et_stats + j;
1901                         gbe_update_hw_stats_entry(gbe_dev, et_entry);
1902
1903                         if (data)
1904                                 data[et_entry] = gbe_dev->hw_stats[et_entry];
1905                 }
1906         }
1907 }
1908
1909 static void keystone_get_ethtool_stats(struct net_device *ndev,
1910                                        struct ethtool_stats *stats,
1911                                        uint64_t *data)
1912 {
1913         struct netcp_intf *netcp = netdev_priv(ndev);
1914         struct gbe_intf *gbe_intf;
1915         struct gbe_priv *gbe_dev;
1916
1917         gbe_intf = keystone_get_intf_data(netcp);
1918         if (!gbe_intf)
1919                 return;
1920
1921         gbe_dev = gbe_intf->gbe_dev;
1922         spin_lock_bh(&gbe_dev->hw_stats_lock);
1923         if (IS_SS_ID_VER_14(gbe_dev))
1924                 gbe_update_stats_ver14(gbe_dev, data);
1925         else
1926                 gbe_update_stats(gbe_dev, data);
1927         spin_unlock_bh(&gbe_dev->hw_stats_lock);
1928 }
1929
1930 static int keystone_get_link_ksettings(struct net_device *ndev,
1931                                        struct ethtool_link_ksettings *cmd)
1932 {
1933         struct netcp_intf *netcp = netdev_priv(ndev);
1934         struct phy_device *phy = ndev->phydev;
1935         struct gbe_intf *gbe_intf;
1936
1937         if (!phy)
1938                 return -EINVAL;
1939
1940         gbe_intf = keystone_get_intf_data(netcp);
1941         if (!gbe_intf)
1942                 return -EINVAL;
1943
1944         if (!gbe_intf->slave)
1945                 return -EINVAL;
1946
1947         phy_ethtool_ksettings_get(phy, cmd);
1948         cmd->base.port = gbe_intf->slave->phy_port_t;
1949
1950         return 0;
1951 }
1952
1953 static int keystone_set_link_ksettings(struct net_device *ndev,
1954                                        const struct ethtool_link_ksettings *cmd)
1955 {
1956         struct netcp_intf *netcp = netdev_priv(ndev);
1957         struct phy_device *phy = ndev->phydev;
1958         struct gbe_intf *gbe_intf;
1959         u8 port = cmd->base.port;
1960         u32 advertising, supported;
1961         u32 features;
1962
1963         ethtool_convert_link_mode_to_legacy_u32(&advertising,
1964                                                 cmd->link_modes.advertising);
1965         ethtool_convert_link_mode_to_legacy_u32(&supported,
1966                                                 cmd->link_modes.supported);
1967         features = advertising & supported;
1968
1969         if (!phy)
1970                 return -EINVAL;
1971
1972         gbe_intf = keystone_get_intf_data(netcp);
1973         if (!gbe_intf)
1974                 return -EINVAL;
1975
1976         if (!gbe_intf->slave)
1977                 return -EINVAL;
1978
1979         if (port != gbe_intf->slave->phy_port_t) {
1980                 if ((port == PORT_TP) && !(features & ADVERTISED_TP))
1981                         return -EINVAL;
1982
1983                 if ((port == PORT_AUI) && !(features & ADVERTISED_AUI))
1984                         return -EINVAL;
1985
1986                 if ((port == PORT_BNC) && !(features & ADVERTISED_BNC))
1987                         return -EINVAL;
1988
1989                 if ((port == PORT_MII) && !(features & ADVERTISED_MII))
1990                         return -EINVAL;
1991
1992                 if ((port == PORT_FIBRE) && !(features & ADVERTISED_FIBRE))
1993                         return -EINVAL;
1994         }
1995
1996         gbe_intf->slave->phy_port_t = port;
1997         return phy_ethtool_ksettings_set(phy, cmd);
1998 }
1999
2000 #if IS_ENABLED(CONFIG_TI_CPTS)
2001 static int keystone_get_ts_info(struct net_device *ndev,
2002                                 struct ethtool_ts_info *info)
2003 {
2004         struct netcp_intf *netcp = netdev_priv(ndev);
2005         struct gbe_intf *gbe_intf;
2006
2007         gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
2008         if (!gbe_intf || !gbe_intf->gbe_dev->cpts)
2009                 return -EINVAL;
2010
2011         info->so_timestamping =
2012                 SOF_TIMESTAMPING_TX_HARDWARE |
2013                 SOF_TIMESTAMPING_TX_SOFTWARE |
2014                 SOF_TIMESTAMPING_RX_HARDWARE |
2015                 SOF_TIMESTAMPING_RX_SOFTWARE |
2016                 SOF_TIMESTAMPING_SOFTWARE |
2017                 SOF_TIMESTAMPING_RAW_HARDWARE;
2018         info->phc_index = gbe_intf->gbe_dev->cpts->phc_index;
2019         info->tx_types =
2020                 (1 << HWTSTAMP_TX_OFF) |
2021                 (1 << HWTSTAMP_TX_ON);
2022         info->rx_filters =
2023                 (1 << HWTSTAMP_FILTER_NONE) |
2024                 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
2025                 (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
2026         return 0;
2027 }
2028 #else
2029 static int keystone_get_ts_info(struct net_device *ndev,
2030                                 struct ethtool_ts_info *info)
2031 {
2032         info->so_timestamping =
2033                 SOF_TIMESTAMPING_TX_SOFTWARE |
2034                 SOF_TIMESTAMPING_RX_SOFTWARE |
2035                 SOF_TIMESTAMPING_SOFTWARE;
2036         info->phc_index = -1;
2037         info->tx_types = 0;
2038         info->rx_filters = 0;
2039         return 0;
2040 }
2041 #endif /* CONFIG_TI_CPTS */
2042
2043 static const struct ethtool_ops keystone_ethtool_ops = {
2044         .get_drvinfo            = keystone_get_drvinfo,
2045         .get_link               = ethtool_op_get_link,
2046         .get_msglevel           = keystone_get_msglevel,
2047         .set_msglevel           = keystone_set_msglevel,
2048         .get_strings            = keystone_get_stat_strings,
2049         .get_sset_count         = keystone_get_sset_count,
2050         .get_ethtool_stats      = keystone_get_ethtool_stats,
2051         .get_link_ksettings     = keystone_get_link_ksettings,
2052         .set_link_ksettings     = keystone_set_link_ksettings,
2053         .get_ts_info            = keystone_get_ts_info,
2054 };
2055
2056 static void gbe_set_slave_mac(struct gbe_slave *slave,
2057                               struct gbe_intf *gbe_intf)
2058 {
2059         struct net_device *ndev = gbe_intf->ndev;
2060
2061         writel(mac_hi(ndev->dev_addr), GBE_REG_ADDR(slave, port_regs, sa_hi));
2062         writel(mac_lo(ndev->dev_addr), GBE_REG_ADDR(slave, port_regs, sa_lo));
2063 }
2064
2065 static int gbe_get_slave_port(struct gbe_priv *priv, u32 slave_num)
2066 {
2067         if (priv->host_port == 0)
2068                 return slave_num + 1;
2069
2070         return slave_num;
2071 }
2072
2073 static void netcp_ethss_link_state_action(struct gbe_priv *gbe_dev,
2074                                           struct net_device *ndev,
2075                                           struct gbe_slave *slave,
2076                                           int up)
2077 {
2078         struct phy_device *phy = slave->phy;
2079         u32 mac_control = 0;
2080
2081         if (up) {
2082                 mac_control = slave->mac_control;
2083                 if (phy && (phy->speed == SPEED_1000)) {
2084                         mac_control |= MACSL_GIG_MODE;
2085                         mac_control &= ~MACSL_XGIG_MODE;
2086                 } else if (phy && (phy->speed == SPEED_10000)) {
2087                         mac_control |= MACSL_XGIG_MODE;
2088                         mac_control &= ~MACSL_GIG_MODE;
2089                 }
2090
2091                 writel(mac_control, GBE_REG_ADDR(slave, emac_regs,
2092                                                  mac_control));
2093
2094                 cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
2095                                      ALE_PORT_STATE,
2096                                      ALE_PORT_STATE_FORWARD);
2097
2098                 if (ndev && slave->open &&
2099                     ((slave->link_interface != SGMII_LINK_MAC_PHY) &&
2100                     (slave->link_interface != RGMII_LINK_MAC_PHY) &&
2101                     (slave->link_interface != XGMII_LINK_MAC_PHY)))
2102                         netif_carrier_on(ndev);
2103         } else {
2104                 writel(mac_control, GBE_REG_ADDR(slave, emac_regs,
2105                                                  mac_control));
2106                 cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
2107                                      ALE_PORT_STATE,
2108                                      ALE_PORT_STATE_DISABLE);
2109                 if (ndev &&
2110                     ((slave->link_interface != SGMII_LINK_MAC_PHY) &&
2111                     (slave->link_interface != RGMII_LINK_MAC_PHY) &&
2112                     (slave->link_interface != XGMII_LINK_MAC_PHY)))
2113                         netif_carrier_off(ndev);
2114         }
2115
2116         if (phy)
2117                 phy_print_status(phy);
2118 }
2119
2120 static bool gbe_phy_link_status(struct gbe_slave *slave)
2121 {
2122          return !slave->phy || slave->phy->link;
2123 }
2124
2125 #define RGMII_REG_STATUS_LINK   BIT(0)
2126
2127 static void netcp_2u_rgmii_get_port_link(struct gbe_priv *gbe_dev, bool *status)
2128 {
2129         u32 val = 0;
2130
2131         val = readl(GBE_REG_ADDR(gbe_dev, ss_regs, rgmii_status));
2132         *status = !!(val & RGMII_REG_STATUS_LINK);
2133 }
2134
2135 static void netcp_ethss_update_link_state(struct gbe_priv *gbe_dev,
2136                                           struct gbe_slave *slave,
2137                                           struct net_device *ndev)
2138 {
2139         bool sw_link_state = true, phy_link_state;
2140         int sp = slave->slave_num, link_state;
2141
2142         if (!slave->open)
2143                 return;
2144
2145         if (SLAVE_LINK_IS_RGMII(slave))
2146                 netcp_2u_rgmii_get_port_link(gbe_dev,
2147                                              &sw_link_state);
2148         if (SLAVE_LINK_IS_SGMII(slave))
2149                 sw_link_state =
2150                 netcp_sgmii_get_port_link(SGMII_BASE(gbe_dev, sp), sp);
2151
2152         phy_link_state = gbe_phy_link_status(slave);
2153         link_state = phy_link_state & sw_link_state;
2154
2155         if (atomic_xchg(&slave->link_state, link_state) != link_state)
2156                 netcp_ethss_link_state_action(gbe_dev, ndev, slave,
2157                                               link_state);
2158 }
2159
2160 static void xgbe_adjust_link(struct net_device *ndev)
2161 {
2162         struct netcp_intf *netcp = netdev_priv(ndev);
2163         struct gbe_intf *gbe_intf;
2164
2165         gbe_intf = netcp_module_get_intf_data(&xgbe_module, netcp);
2166         if (!gbe_intf)
2167                 return;
2168
2169         netcp_ethss_update_link_state(gbe_intf->gbe_dev, gbe_intf->slave,
2170                                       ndev);
2171 }
2172
2173 static void gbe_adjust_link(struct net_device *ndev)
2174 {
2175         struct netcp_intf *netcp = netdev_priv(ndev);
2176         struct gbe_intf *gbe_intf;
2177
2178         gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
2179         if (!gbe_intf)
2180                 return;
2181
2182         netcp_ethss_update_link_state(gbe_intf->gbe_dev, gbe_intf->slave,
2183                                       ndev);
2184 }
2185
2186 static void gbe_adjust_link_sec_slaves(struct net_device *ndev)
2187 {
2188         struct gbe_priv *gbe_dev = netdev_priv(ndev);
2189         struct gbe_slave *slave;
2190
2191         for_each_sec_slave(slave, gbe_dev)
2192                 netcp_ethss_update_link_state(gbe_dev, slave, NULL);
2193 }
2194
2195 /* Reset EMAC
2196  * Soft reset is set and polled until clear, or until a timeout occurs
2197  */
2198 static int gbe_port_reset(struct gbe_slave *slave)
2199 {
2200         u32 i, v;
2201
2202         /* Set the soft reset bit */
2203         writel(SOFT_RESET, GBE_REG_ADDR(slave, emac_regs, soft_reset));
2204
2205         /* Wait for the bit to clear */
2206         for (i = 0; i < DEVICE_EMACSL_RESET_POLL_COUNT; i++) {
2207                 v = readl(GBE_REG_ADDR(slave, emac_regs, soft_reset));
2208                 if ((v & SOFT_RESET_MASK) != SOFT_RESET)
2209                         return 0;
2210         }
2211
2212         /* Timeout on the reset */
2213         return GMACSL_RET_WARN_RESET_INCOMPLETE;
2214 }
2215
2216 /* Configure EMAC */
2217 static void gbe_port_config(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
2218                             int max_rx_len)
2219 {
2220         void __iomem *rx_maxlen_reg;
2221         u32 xgmii_mode;
2222
2223         if (max_rx_len > NETCP_MAX_FRAME_SIZE)
2224                 max_rx_len = NETCP_MAX_FRAME_SIZE;
2225
2226         /* Enable correct MII mode at SS level */
2227         if (IS_SS_ID_XGBE(gbe_dev) &&
2228             (slave->link_interface >= XGMII_LINK_MAC_PHY)) {
2229                 xgmii_mode = readl(GBE_REG_ADDR(gbe_dev, ss_regs, control));
2230                 xgmii_mode |= (1 << slave->slave_num);
2231                 writel(xgmii_mode, GBE_REG_ADDR(gbe_dev, ss_regs, control));
2232         }
2233
2234         if (IS_SS_ID_MU(gbe_dev))
2235                 rx_maxlen_reg = GBE_REG_ADDR(slave, port_regs, rx_maxlen);
2236         else
2237                 rx_maxlen_reg = GBE_REG_ADDR(slave, emac_regs, rx_maxlen);
2238
2239         writel(max_rx_len, rx_maxlen_reg);
2240         writel(slave->mac_control, GBE_REG_ADDR(slave, emac_regs, mac_control));
2241 }
2242
2243 static void gbe_sgmii_rtreset(struct gbe_priv *priv,
2244                               struct gbe_slave *slave, bool set)
2245 {
2246         if (SLAVE_LINK_IS_XGMII(slave))
2247                 return;
2248
2249         netcp_sgmii_rtreset(SGMII_BASE(priv, slave->slave_num),
2250                             slave->slave_num, set);
2251 }
2252
2253 static void gbe_slave_stop(struct gbe_intf *intf)
2254 {
2255         struct gbe_priv *gbe_dev = intf->gbe_dev;
2256         struct gbe_slave *slave = intf->slave;
2257
2258         if (!IS_SS_ID_2U(gbe_dev))
2259                 gbe_sgmii_rtreset(gbe_dev, slave, true);
2260         gbe_port_reset(slave);
2261         /* Disable forwarding */
2262         cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
2263                              ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
2264         cpsw_ale_del_mcast(gbe_dev->ale, intf->ndev->broadcast,
2265                            1 << slave->port_num, 0, 0);
2266
2267         if (!slave->phy)
2268                 return;
2269
2270         phy_stop(slave->phy);
2271         phy_disconnect(slave->phy);
2272         slave->phy = NULL;
2273 }
2274
2275 static void gbe_sgmii_config(struct gbe_priv *priv, struct gbe_slave *slave)
2276 {
2277         if (SLAVE_LINK_IS_XGMII(slave))
2278                 return;
2279
2280         netcp_sgmii_reset(SGMII_BASE(priv, slave->slave_num), slave->slave_num);
2281         netcp_sgmii_config(SGMII_BASE(priv, slave->slave_num), slave->slave_num,
2282                            slave->link_interface);
2283 }
2284
2285 static int gbe_slave_open(struct gbe_intf *gbe_intf)
2286 {
2287         struct gbe_priv *priv = gbe_intf->gbe_dev;
2288         struct gbe_slave *slave = gbe_intf->slave;
2289         phy_interface_t phy_mode;
2290         bool has_phy = false;
2291         int err;
2292
2293         void (*hndlr)(struct net_device *) = gbe_adjust_link;
2294
2295         if (!IS_SS_ID_2U(priv))
2296                 gbe_sgmii_config(priv, slave);
2297         gbe_port_reset(slave);
2298         if (!IS_SS_ID_2U(priv))
2299                 gbe_sgmii_rtreset(priv, slave, false);
2300         gbe_port_config(priv, slave, priv->rx_packet_max);
2301         gbe_set_slave_mac(slave, gbe_intf);
2302         /* For NU & 2U switch, map the vlan priorities to zero
2303          * as we only configure to use priority 0
2304          */
2305         if (IS_SS_ID_MU(priv))
2306                 writel(HOST_TX_PRI_MAP_DEFAULT,
2307                        GBE_REG_ADDR(slave, port_regs, rx_pri_map));
2308
2309         /* enable forwarding */
2310         cpsw_ale_control_set(priv->ale, slave->port_num,
2311                              ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
2312         cpsw_ale_add_mcast(priv->ale, gbe_intf->ndev->broadcast,
2313                            1 << slave->port_num, 0, 0, ALE_MCAST_FWD_2);
2314
2315         if (slave->link_interface == SGMII_LINK_MAC_PHY) {
2316                 has_phy = true;
2317                 phy_mode = PHY_INTERFACE_MODE_SGMII;
2318                 slave->phy_port_t = PORT_MII;
2319         } else if (slave->link_interface == RGMII_LINK_MAC_PHY) {
2320                 has_phy = true;
2321                 err = of_get_phy_mode(slave->node, &phy_mode);
2322                 /* if phy-mode is not present, default to
2323                  * PHY_INTERFACE_MODE_RGMII
2324                  */
2325                 if (err)
2326                         phy_mode = PHY_INTERFACE_MODE_RGMII;
2327
2328                 if (!phy_interface_mode_is_rgmii(phy_mode)) {
2329                         dev_err(priv->dev,
2330                                 "Unsupported phy mode %d\n", phy_mode);
2331                         return -EINVAL;
2332                 }
2333                 slave->phy_port_t = PORT_MII;
2334         } else if (slave->link_interface == XGMII_LINK_MAC_PHY) {
2335                 has_phy = true;
2336                 phy_mode = PHY_INTERFACE_MODE_NA;
2337                 slave->phy_port_t = PORT_FIBRE;
2338         }
2339
2340         if (has_phy) {
2341                 if (IS_SS_ID_XGBE(priv))
2342                         hndlr = xgbe_adjust_link;
2343
2344                 slave->phy = of_phy_connect(gbe_intf->ndev,
2345                                             slave->phy_node,
2346                                             hndlr, 0,
2347                                             phy_mode);
2348                 if (!slave->phy) {
2349                         dev_err(priv->dev, "phy not found on slave %d\n",
2350                                 slave->slave_num);
2351                         return -ENODEV;
2352                 }
2353                 dev_dbg(priv->dev, "phy found: id is: 0x%s\n",
2354                         phydev_name(slave->phy));
2355                 phy_start(slave->phy);
2356         }
2357         return 0;
2358 }
2359
2360 static void gbe_init_host_port(struct gbe_priv *priv)
2361 {
2362         int bypass_en = 1;
2363
2364         /* Host Tx Pri */
2365         if (IS_SS_ID_NU(priv) || IS_SS_ID_XGBE(priv))
2366                 writel(HOST_TX_PRI_MAP_DEFAULT,
2367                        GBE_REG_ADDR(priv, host_port_regs, tx_pri_map));
2368
2369         /* Max length register */
2370         writel(NETCP_MAX_FRAME_SIZE, GBE_REG_ADDR(priv, host_port_regs,
2371                                                   rx_maxlen));
2372
2373         cpsw_ale_start(priv->ale);
2374
2375         if (priv->enable_ale)
2376                 bypass_en = 0;
2377
2378         cpsw_ale_control_set(priv->ale, 0, ALE_BYPASS, bypass_en);
2379
2380         cpsw_ale_control_set(priv->ale, 0, ALE_NO_PORT_VLAN, 1);
2381
2382         cpsw_ale_control_set(priv->ale, priv->host_port,
2383                              ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
2384
2385         cpsw_ale_control_set(priv->ale, 0,
2386                              ALE_PORT_UNKNOWN_VLAN_MEMBER,
2387                              GBE_PORT_MASK(priv->ale_ports));
2388
2389         cpsw_ale_control_set(priv->ale, 0,
2390                              ALE_PORT_UNKNOWN_MCAST_FLOOD,
2391                              GBE_PORT_MASK(priv->ale_ports - 1));
2392
2393         cpsw_ale_control_set(priv->ale, 0,
2394                              ALE_PORT_UNKNOWN_REG_MCAST_FLOOD,
2395                              GBE_PORT_MASK(priv->ale_ports));
2396
2397         cpsw_ale_control_set(priv->ale, 0,
2398                              ALE_PORT_UNTAGGED_EGRESS,
2399                              GBE_PORT_MASK(priv->ale_ports));
2400 }
2401
2402 static void gbe_add_mcast_addr(struct gbe_intf *gbe_intf, u8 *addr)
2403 {
2404         struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2405         u16 vlan_id;
2406
2407         cpsw_ale_add_mcast(gbe_dev->ale, addr,
2408                            GBE_PORT_MASK(gbe_dev->ale_ports), 0, 0,
2409                            ALE_MCAST_FWD_2);
2410         for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
2411                 cpsw_ale_add_mcast(gbe_dev->ale, addr,
2412                                    GBE_PORT_MASK(gbe_dev->ale_ports),
2413                                    ALE_VLAN, vlan_id, ALE_MCAST_FWD_2);
2414         }
2415 }
2416
2417 static void gbe_add_ucast_addr(struct gbe_intf *gbe_intf, u8 *addr)
2418 {
2419         struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2420         u16 vlan_id;
2421
2422         cpsw_ale_add_ucast(gbe_dev->ale, addr, gbe_dev->host_port, 0, 0);
2423
2424         for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID)
2425                 cpsw_ale_add_ucast(gbe_dev->ale, addr, gbe_dev->host_port,
2426                                    ALE_VLAN, vlan_id);
2427 }
2428
2429 static void gbe_del_mcast_addr(struct gbe_intf *gbe_intf, u8 *addr)
2430 {
2431         struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2432         u16 vlan_id;
2433
2434         cpsw_ale_del_mcast(gbe_dev->ale, addr, 0, 0, 0);
2435
2436         for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
2437                 cpsw_ale_del_mcast(gbe_dev->ale, addr, 0, ALE_VLAN, vlan_id);
2438         }
2439 }
2440
2441 static void gbe_del_ucast_addr(struct gbe_intf *gbe_intf, u8 *addr)
2442 {
2443         struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2444         u16 vlan_id;
2445
2446         cpsw_ale_del_ucast(gbe_dev->ale, addr, gbe_dev->host_port, 0, 0);
2447
2448         for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
2449                 cpsw_ale_del_ucast(gbe_dev->ale, addr, gbe_dev->host_port,
2450                                    ALE_VLAN, vlan_id);
2451         }
2452 }
2453
2454 static int gbe_add_addr(void *intf_priv, struct netcp_addr *naddr)
2455 {
2456         struct gbe_intf *gbe_intf = intf_priv;
2457         struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2458
2459         dev_dbg(gbe_dev->dev, "ethss adding address %pM, type %d\n",
2460                 naddr->addr, naddr->type);
2461
2462         switch (naddr->type) {
2463         case ADDR_MCAST:
2464         case ADDR_BCAST:
2465                 gbe_add_mcast_addr(gbe_intf, naddr->addr);
2466                 break;
2467         case ADDR_UCAST:
2468         case ADDR_DEV:
2469                 gbe_add_ucast_addr(gbe_intf, naddr->addr);
2470                 break;
2471         case ADDR_ANY:
2472                 /* nothing to do for promiscuous */
2473         default:
2474                 break;
2475         }
2476
2477         return 0;
2478 }
2479
2480 static int gbe_del_addr(void *intf_priv, struct netcp_addr *naddr)
2481 {
2482         struct gbe_intf *gbe_intf = intf_priv;
2483         struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2484
2485         dev_dbg(gbe_dev->dev, "ethss deleting address %pM, type %d\n",
2486                 naddr->addr, naddr->type);
2487
2488         switch (naddr->type) {
2489         case ADDR_MCAST:
2490         case ADDR_BCAST:
2491                 gbe_del_mcast_addr(gbe_intf, naddr->addr);
2492                 break;
2493         case ADDR_UCAST:
2494         case ADDR_DEV:
2495                 gbe_del_ucast_addr(gbe_intf, naddr->addr);
2496                 break;
2497         case ADDR_ANY:
2498                 /* nothing to do for promiscuous */
2499         default:
2500                 break;
2501         }
2502
2503         return 0;
2504 }
2505
2506 static int gbe_add_vid(void *intf_priv, int vid)
2507 {
2508         struct gbe_intf *gbe_intf = intf_priv;
2509         struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2510
2511         set_bit(vid, gbe_intf->active_vlans);
2512
2513         cpsw_ale_add_vlan(gbe_dev->ale, vid,
2514                           GBE_PORT_MASK(gbe_dev->ale_ports),
2515                           GBE_MASK_NO_PORTS,
2516                           GBE_PORT_MASK(gbe_dev->ale_ports),
2517                           GBE_PORT_MASK(gbe_dev->ale_ports - 1));
2518
2519         return 0;
2520 }
2521
2522 static int gbe_del_vid(void *intf_priv, int vid)
2523 {
2524         struct gbe_intf *gbe_intf = intf_priv;
2525         struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2526
2527         cpsw_ale_del_vlan(gbe_dev->ale, vid, 0);
2528         clear_bit(vid, gbe_intf->active_vlans);
2529         return 0;
2530 }
2531
2532 #if IS_ENABLED(CONFIG_TI_CPTS)
2533
2534 static void gbe_txtstamp(void *context, struct sk_buff *skb)
2535 {
2536         struct gbe_intf *gbe_intf = context;
2537         struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2538
2539         cpts_tx_timestamp(gbe_dev->cpts, skb);
2540 }
2541
2542 static bool gbe_need_txtstamp(struct gbe_intf *gbe_intf,
2543                               const struct netcp_packet *p_info)
2544 {
2545         struct sk_buff *skb = p_info->skb;
2546
2547         return cpts_can_timestamp(gbe_intf->gbe_dev->cpts, skb);
2548 }
2549
2550 static int gbe_txtstamp_mark_pkt(struct gbe_intf *gbe_intf,
2551                                  struct netcp_packet *p_info)
2552 {
2553         struct phy_device *phydev = p_info->skb->dev->phydev;
2554         struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2555
2556         if (!(skb_shinfo(p_info->skb)->tx_flags & SKBTX_HW_TSTAMP) ||
2557             !gbe_dev->tx_ts_enabled)
2558                 return 0;
2559
2560         /* If phy has the txtstamp api, assume it will do it.
2561          * We mark it here because skb_tx_timestamp() is called
2562          * after all the txhooks are called.
2563          */
2564         if (phy_has_txtstamp(phydev)) {
2565                 skb_shinfo(p_info->skb)->tx_flags |= SKBTX_IN_PROGRESS;
2566                 return 0;
2567         }
2568
2569         if (gbe_need_txtstamp(gbe_intf, p_info)) {
2570                 p_info->txtstamp = gbe_txtstamp;
2571                 p_info->ts_context = (void *)gbe_intf;
2572                 skb_shinfo(p_info->skb)->tx_flags |= SKBTX_IN_PROGRESS;
2573         }
2574
2575         return 0;
2576 }
2577
2578 static int gbe_rxtstamp(struct gbe_intf *gbe_intf, struct netcp_packet *p_info)
2579 {
2580         struct phy_device *phydev = p_info->skb->dev->phydev;
2581         struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2582
2583         if (p_info->rxtstamp_complete)
2584                 return 0;
2585
2586         if (phy_has_rxtstamp(phydev)) {
2587                 p_info->rxtstamp_complete = true;
2588                 return 0;
2589         }
2590
2591         if (gbe_dev->rx_ts_enabled)
2592                 cpts_rx_timestamp(gbe_dev->cpts, p_info->skb);
2593
2594         p_info->rxtstamp_complete = true;
2595
2596         return 0;
2597 }
2598
2599 static int gbe_hwtstamp_get(struct gbe_intf *gbe_intf, struct ifreq *ifr)
2600 {
2601         struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2602         struct cpts *cpts = gbe_dev->cpts;
2603         struct hwtstamp_config cfg;
2604
2605         if (!cpts)
2606                 return -EOPNOTSUPP;
2607
2608         cfg.flags = 0;
2609         cfg.tx_type = gbe_dev->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
2610         cfg.rx_filter = gbe_dev->rx_ts_enabled;
2611
2612         return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
2613 }
2614
2615 static void gbe_hwtstamp(struct gbe_intf *gbe_intf)
2616 {
2617         struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2618         struct gbe_slave *slave = gbe_intf->slave;
2619         u32 ts_en, seq_id, ctl;
2620
2621         if (!gbe_dev->rx_ts_enabled &&
2622             !gbe_dev->tx_ts_enabled) {
2623                 writel(0, GBE_REG_ADDR(slave, port_regs, ts_ctl));
2624                 return;
2625         }
2626
2627         seq_id = (30 << TS_SEQ_ID_OFS_SHIFT) | ETH_P_1588;
2628         ts_en = EVENT_MSG_BITS << TS_MSG_TYPE_EN_SHIFT;
2629         ctl = ETH_P_1588 | TS_TTL_NONZERO |
2630                 (slave->ts_ctl.dst_port_map << TS_CTL_DST_PORT_SHIFT) |
2631                 (slave->ts_ctl.uni ?  TS_UNI_EN :
2632                         slave->ts_ctl.maddr_map << TS_CTL_MADDR_SHIFT);
2633
2634         if (gbe_dev->tx_ts_enabled)
2635                 ts_en |= (TS_TX_ANX_ALL_EN | TS_TX_VLAN_LT1_EN);
2636
2637         if (gbe_dev->rx_ts_enabled)
2638                 ts_en |= (TS_RX_ANX_ALL_EN | TS_RX_VLAN_LT1_EN);
2639
2640         writel(ts_en,  GBE_REG_ADDR(slave, port_regs, ts_ctl));
2641         writel(seq_id, GBE_REG_ADDR(slave, port_regs, ts_seq_ltype));
2642         writel(ctl,    GBE_REG_ADDR(slave, port_regs, ts_ctl_ltype2));
2643 }
2644
2645 static int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *ifr)
2646 {
2647         struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2648         struct cpts *cpts = gbe_dev->cpts;
2649         struct hwtstamp_config cfg;
2650
2651         if (!cpts)
2652                 return -EOPNOTSUPP;
2653
2654         if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
2655                 return -EFAULT;
2656
2657         /* reserved for future extensions */
2658         if (cfg.flags)
2659                 return -EINVAL;
2660
2661         switch (cfg.tx_type) {
2662         case HWTSTAMP_TX_OFF:
2663                 gbe_dev->tx_ts_enabled = 0;
2664                 break;
2665         case HWTSTAMP_TX_ON:
2666                 gbe_dev->tx_ts_enabled = 1;
2667                 break;
2668         default:
2669                 return -ERANGE;
2670         }
2671
2672         switch (cfg.rx_filter) {
2673         case HWTSTAMP_FILTER_NONE:
2674                 gbe_dev->rx_ts_enabled = HWTSTAMP_FILTER_NONE;
2675                 break;
2676         case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2677         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2678         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2679                 gbe_dev->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
2680                 cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
2681                 break;
2682         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2683         case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2684         case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2685         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2686         case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2687         case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2688         case HWTSTAMP_FILTER_PTP_V2_EVENT:
2689         case HWTSTAMP_FILTER_PTP_V2_SYNC:
2690         case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2691                 gbe_dev->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V2_EVENT;
2692                 cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
2693                 break;
2694         default:
2695                 return -ERANGE;
2696         }
2697
2698         gbe_hwtstamp(gbe_intf);
2699
2700         return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
2701 }
2702
2703 static void gbe_register_cpts(struct gbe_priv *gbe_dev)
2704 {
2705         if (!gbe_dev->cpts)
2706                 return;
2707
2708         if (gbe_dev->cpts_registered > 0)
2709                 goto done;
2710
2711         if (cpts_register(gbe_dev->cpts)) {
2712                 dev_err(gbe_dev->dev, "error registering cpts device\n");
2713                 return;
2714         }
2715
2716 done:
2717         ++gbe_dev->cpts_registered;
2718 }
2719
2720 static void gbe_unregister_cpts(struct gbe_priv *gbe_dev)
2721 {
2722         if (!gbe_dev->cpts || (gbe_dev->cpts_registered <= 0))
2723                 return;
2724
2725         if (--gbe_dev->cpts_registered)
2726                 return;
2727
2728         cpts_unregister(gbe_dev->cpts);
2729 }
2730 #else
2731 static inline int gbe_txtstamp_mark_pkt(struct gbe_intf *gbe_intf,
2732                                         struct netcp_packet *p_info)
2733 {
2734         return 0;
2735 }
2736
2737 static inline int gbe_rxtstamp(struct gbe_intf *gbe_intf,
2738                                struct netcp_packet *p_info)
2739 {
2740         return 0;
2741 }
2742
2743 static inline int gbe_hwtstamp(struct gbe_intf *gbe_intf,
2744                                struct ifreq *ifr, int cmd)
2745 {
2746         return -EOPNOTSUPP;
2747 }
2748
2749 static inline void gbe_register_cpts(struct gbe_priv *gbe_dev)
2750 {
2751 }
2752
2753 static inline void gbe_unregister_cpts(struct gbe_priv *gbe_dev)
2754 {
2755 }
2756
2757 static inline int gbe_hwtstamp_get(struct gbe_intf *gbe_intf, struct ifreq *req)
2758 {
2759         return -EOPNOTSUPP;
2760 }
2761
2762 static inline int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *req)
2763 {
2764         return -EOPNOTSUPP;
2765 }
2766 #endif /* CONFIG_TI_CPTS */
2767
2768 static int gbe_set_rx_mode(void *intf_priv, bool promisc)
2769 {
2770         struct gbe_intf *gbe_intf = intf_priv;
2771         struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2772         struct cpsw_ale *ale = gbe_dev->ale;
2773         unsigned long timeout;
2774         int i, ret = -ETIMEDOUT;
2775
2776         /* Disable(1)/Enable(0) Learn for all ports (host is port 0 and
2777          * slaves are port 1 and up
2778          */
2779         for (i = 0; i <= gbe_dev->num_slaves; i++) {
2780                 cpsw_ale_control_set(ale, i,
2781                                      ALE_PORT_NOLEARN, !!promisc);
2782                 cpsw_ale_control_set(ale, i,
2783                                      ALE_PORT_NO_SA_UPDATE, !!promisc);
2784         }
2785
2786         if (!promisc) {
2787                 /* Don't Flood All Unicast Packets to Host port */
2788                 cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 0);
2789                 dev_vdbg(gbe_dev->dev, "promiscuous mode disabled\n");
2790                 return 0;
2791         }
2792
2793         timeout = jiffies + HZ;
2794
2795         /* Clear All Untouched entries */
2796         cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
2797         do {
2798                 cpu_relax();
2799                 if (cpsw_ale_control_get(ale, 0, ALE_AGEOUT)) {
2800                         ret = 0;
2801                         break;
2802                 }
2803
2804         } while (time_after(timeout, jiffies));
2805
2806         /* Make sure it is not a false timeout */
2807         if (ret && !cpsw_ale_control_get(ale, 0, ALE_AGEOUT))
2808                 return ret;
2809
2810         cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
2811
2812         /* Clear all mcast from ALE */
2813         cpsw_ale_flush_multicast(ale,
2814                                  GBE_PORT_MASK(gbe_dev->ale_ports),
2815                                  -1);
2816
2817         /* Flood All Unicast Packets to Host port */
2818         cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1);
2819         dev_vdbg(gbe_dev->dev, "promiscuous mode enabled\n");
2820         return ret;
2821 }
2822
2823 static int gbe_ioctl(void *intf_priv, struct ifreq *req, int cmd)
2824 {
2825         struct gbe_intf *gbe_intf = intf_priv;
2826         struct phy_device *phy = gbe_intf->slave->phy;
2827
2828         if (!phy_has_hwtstamp(phy)) {
2829                 switch (cmd) {
2830                 case SIOCGHWTSTAMP:
2831                         return gbe_hwtstamp_get(gbe_intf, req);
2832                 case SIOCSHWTSTAMP:
2833                         return gbe_hwtstamp_set(gbe_intf, req);
2834                 }
2835         }
2836
2837         if (phy)
2838                 return phy_mii_ioctl(phy, req, cmd);
2839
2840         return -EOPNOTSUPP;
2841 }
2842
2843 static void netcp_ethss_timer(struct timer_list *t)
2844 {
2845         struct gbe_priv *gbe_dev = from_timer(gbe_dev, t, timer);
2846         struct gbe_intf *gbe_intf;
2847         struct gbe_slave *slave;
2848
2849         /* Check & update SGMII link state of interfaces */
2850         for_each_intf(gbe_intf, gbe_dev) {
2851                 if (!gbe_intf->slave->open)
2852                         continue;
2853                 netcp_ethss_update_link_state(gbe_dev, gbe_intf->slave,
2854                                               gbe_intf->ndev);
2855         }
2856
2857         /* Check & update SGMII link state of secondary ports */
2858         for_each_sec_slave(slave, gbe_dev) {
2859                 netcp_ethss_update_link_state(gbe_dev, slave, NULL);
2860         }
2861
2862         /* A timer runs as a BH, no need to block them */
2863         spin_lock(&gbe_dev->hw_stats_lock);
2864
2865         if (IS_SS_ID_VER_14(gbe_dev))
2866                 gbe_update_stats_ver14(gbe_dev, NULL);
2867         else
2868                 gbe_update_stats(gbe_dev, NULL);
2869
2870         spin_unlock(&gbe_dev->hw_stats_lock);
2871
2872         gbe_dev->timer.expires  = jiffies + GBE_TIMER_INTERVAL;
2873         add_timer(&gbe_dev->timer);
2874 }
2875
2876 static int gbe_txhook(int order, void *data, struct netcp_packet *p_info)
2877 {
2878         struct gbe_intf *gbe_intf = data;
2879
2880         p_info->tx_pipe = &gbe_intf->tx_pipe;
2881
2882         return gbe_txtstamp_mark_pkt(gbe_intf, p_info);
2883 }
2884
2885 static int gbe_rxhook(int order, void *data, struct netcp_packet *p_info)
2886 {
2887         struct gbe_intf *gbe_intf = data;
2888
2889         return gbe_rxtstamp(gbe_intf, p_info);
2890 }
2891
2892 static int gbe_open(void *intf_priv, struct net_device *ndev)
2893 {
2894         struct gbe_intf *gbe_intf = intf_priv;
2895         struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2896         struct netcp_intf *netcp = netdev_priv(ndev);
2897         struct gbe_slave *slave = gbe_intf->slave;
2898         int port_num = slave->port_num;
2899         u32 reg, val;
2900         int ret;
2901
2902         reg = readl(GBE_REG_ADDR(gbe_dev, switch_regs, id_ver));
2903         dev_dbg(gbe_dev->dev, "initializing gbe version %d.%d (%d) GBE identification value 0x%x\n",
2904                 GBE_MAJOR_VERSION(reg), GBE_MINOR_VERSION(reg),
2905                 GBE_RTL_VERSION(reg), GBE_IDENT(reg));
2906
2907         /* For 10G and on NetCP 1.5, use directed to port */
2908         if (IS_SS_ID_XGBE(gbe_dev) || IS_SS_ID_MU(gbe_dev))
2909                 gbe_intf->tx_pipe.flags = SWITCH_TO_PORT_IN_TAGINFO;
2910
2911         if (gbe_dev->enable_ale)
2912                 gbe_intf->tx_pipe.switch_to_port = 0;
2913         else
2914                 gbe_intf->tx_pipe.switch_to_port = port_num;
2915
2916         dev_dbg(gbe_dev->dev,
2917                 "opened TX channel %s: %p with to port %d, flags %d\n",
2918                 gbe_intf->tx_pipe.dma_chan_name,
2919                 gbe_intf->tx_pipe.dma_channel,
2920                 gbe_intf->tx_pipe.switch_to_port,
2921                 gbe_intf->tx_pipe.flags);
2922
2923         gbe_slave_stop(gbe_intf);
2924
2925         /* disable priority elevation and enable statistics on all ports */
2926         writel(0, GBE_REG_ADDR(gbe_dev, switch_regs, ptype));
2927
2928         /* Control register */
2929         val = GBE_CTL_P0_ENABLE;
2930         if (IS_SS_ID_MU(gbe_dev)) {
2931                 val |= ETH_SW_CTL_P0_TX_CRC_REMOVE;
2932                 netcp->hw_cap = ETH_SW_CAN_REMOVE_ETH_FCS;
2933         }
2934         writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, control));
2935
2936         /* All statistics enabled and STAT AB visible by default */
2937         writel(gbe_dev->stats_en_mask, GBE_REG_ADDR(gbe_dev, switch_regs,
2938                                                     stat_port_en));
2939
2940         ret = gbe_slave_open(gbe_intf);
2941         if (ret)
2942                 goto fail;
2943
2944         netcp_register_txhook(netcp, GBE_TXHOOK_ORDER, gbe_txhook, gbe_intf);
2945         netcp_register_rxhook(netcp, GBE_RXHOOK_ORDER, gbe_rxhook, gbe_intf);
2946
2947         slave->open = true;
2948         netcp_ethss_update_link_state(gbe_dev, slave, ndev);
2949
2950         gbe_register_cpts(gbe_dev);
2951
2952         return 0;
2953
2954 fail:
2955         gbe_slave_stop(gbe_intf);
2956         return ret;
2957 }
2958
2959 static int gbe_close(void *intf_priv, struct net_device *ndev)
2960 {
2961         struct gbe_intf *gbe_intf = intf_priv;
2962         struct netcp_intf *netcp = netdev_priv(ndev);
2963         struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2964
2965         gbe_unregister_cpts(gbe_dev);
2966
2967         gbe_slave_stop(gbe_intf);
2968
2969         netcp_unregister_rxhook(netcp, GBE_RXHOOK_ORDER, gbe_rxhook, gbe_intf);
2970         netcp_unregister_txhook(netcp, GBE_TXHOOK_ORDER, gbe_txhook, gbe_intf);
2971
2972         gbe_intf->slave->open = false;
2973         atomic_set(&gbe_intf->slave->link_state, NETCP_LINK_STATE_INVALID);
2974         return 0;
2975 }
2976
2977 #if IS_ENABLED(CONFIG_TI_CPTS)
2978 static void init_slave_ts_ctl(struct gbe_slave *slave)
2979 {
2980         slave->ts_ctl.uni = 1;
2981         slave->ts_ctl.dst_port_map =
2982                 (TS_CTL_DST_PORT >> TS_CTL_DST_PORT_SHIFT) & 0x3;
2983         slave->ts_ctl.maddr_map =
2984                 (TS_CTL_MADDR_ALL >> TS_CTL_MADDR_SHIFT) & 0x1f;
2985 }
2986
2987 #else
2988 static void init_slave_ts_ctl(struct gbe_slave *slave)
2989 {
2990 }
2991 #endif /* CONFIG_TI_CPTS */
2992
2993 static int init_slave(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
2994                       struct device_node *node)
2995 {
2996         int port_reg_num;
2997         u32 port_reg_ofs, emac_reg_ofs;
2998         u32 port_reg_blk_sz, emac_reg_blk_sz;
2999
3000         if (of_property_read_u32(node, "slave-port", &slave->slave_num)) {
3001                 dev_err(gbe_dev->dev, "missing slave-port parameter\n");
3002                 return -EINVAL;
3003         }
3004
3005         if (of_property_read_u32(node, "link-interface",
3006                                  &slave->link_interface)) {
3007                 dev_warn(gbe_dev->dev,
3008                          "missing link-interface value defaulting to 1G mac-phy link\n");
3009                 slave->link_interface = SGMII_LINK_MAC_PHY;
3010         }
3011
3012         slave->node = node;
3013         slave->open = false;
3014         if ((slave->link_interface == SGMII_LINK_MAC_PHY) ||
3015             (slave->link_interface == RGMII_LINK_MAC_PHY) ||
3016             (slave->link_interface == XGMII_LINK_MAC_PHY))
3017                 slave->phy_node = of_parse_phandle(node, "phy-handle", 0);
3018         slave->port_num = gbe_get_slave_port(gbe_dev, slave->slave_num);
3019
3020         if (slave->link_interface >= XGMII_LINK_MAC_PHY)
3021                 slave->mac_control = GBE_DEF_10G_MAC_CONTROL;
3022         else
3023                 slave->mac_control = GBE_DEF_1G_MAC_CONTROL;
3024
3025         /* Emac regs memmap are contiguous but port regs are not */
3026         port_reg_num = slave->slave_num;
3027         if (IS_SS_ID_VER_14(gbe_dev)) {
3028                 if (slave->slave_num > 1) {
3029                         port_reg_ofs = GBE13_SLAVE_PORT2_OFFSET;
3030                         port_reg_num -= 2;
3031                 } else {
3032                         port_reg_ofs = GBE13_SLAVE_PORT_OFFSET;
3033                 }
3034                 emac_reg_ofs = GBE13_EMAC_OFFSET;
3035                 port_reg_blk_sz = 0x30;
3036                 emac_reg_blk_sz = 0x40;
3037         } else if (IS_SS_ID_MU(gbe_dev)) {
3038                 port_reg_ofs = GBENU_SLAVE_PORT_OFFSET;
3039                 emac_reg_ofs = GBENU_EMAC_OFFSET;
3040                 port_reg_blk_sz = 0x1000;
3041                 emac_reg_blk_sz = 0x1000;
3042         } else if (IS_SS_ID_XGBE(gbe_dev)) {
3043                 port_reg_ofs = XGBE10_SLAVE_PORT_OFFSET;
3044                 emac_reg_ofs = XGBE10_EMAC_OFFSET;
3045                 port_reg_blk_sz = 0x30;
3046                 emac_reg_blk_sz = 0x40;
3047         } else {
3048                 dev_err(gbe_dev->dev, "unknown ethss(0x%x)\n",
3049                         gbe_dev->ss_version);
3050                 return -EINVAL;
3051         }
3052
3053         slave->port_regs = gbe_dev->switch_regs + port_reg_ofs +
3054                                 (port_reg_blk_sz * port_reg_num);
3055         slave->emac_regs = gbe_dev->switch_regs + emac_reg_ofs +
3056                                 (emac_reg_blk_sz * slave->slave_num);
3057
3058         if (IS_SS_ID_VER_14(gbe_dev)) {
3059                 /* Initialize  slave port register offsets */
3060                 GBE_SET_REG_OFS(slave, port_regs, port_vlan);
3061                 GBE_SET_REG_OFS(slave, port_regs, tx_pri_map);
3062                 GBE_SET_REG_OFS(slave, port_regs, sa_lo);
3063                 GBE_SET_REG_OFS(slave, port_regs, sa_hi);
3064                 GBE_SET_REG_OFS(slave, port_regs, ts_ctl);
3065                 GBE_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
3066                 GBE_SET_REG_OFS(slave, port_regs, ts_vlan);
3067                 GBE_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
3068                 GBE_SET_REG_OFS(slave, port_regs, ts_ctl2);
3069
3070                 /* Initialize EMAC register offsets */
3071                 GBE_SET_REG_OFS(slave, emac_regs, mac_control);
3072                 GBE_SET_REG_OFS(slave, emac_regs, soft_reset);
3073                 GBE_SET_REG_OFS(slave, emac_regs, rx_maxlen);
3074
3075         } else if (IS_SS_ID_MU(gbe_dev)) {
3076                 /* Initialize  slave port register offsets */
3077                 GBENU_SET_REG_OFS(slave, port_regs, port_vlan);
3078                 GBENU_SET_REG_OFS(slave, port_regs, tx_pri_map);
3079                 GBENU_SET_REG_OFS(slave, port_regs, rx_pri_map);
3080                 GBENU_SET_REG_OFS(slave, port_regs, sa_lo);
3081                 GBENU_SET_REG_OFS(slave, port_regs, sa_hi);
3082                 GBENU_SET_REG_OFS(slave, port_regs, ts_ctl);
3083                 GBENU_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
3084                 GBENU_SET_REG_OFS(slave, port_regs, ts_vlan);
3085                 GBENU_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
3086                 GBENU_SET_REG_OFS(slave, port_regs, ts_ctl2);
3087                 GBENU_SET_REG_OFS(slave, port_regs, rx_maxlen);
3088
3089                 /* Initialize EMAC register offsets */
3090                 GBENU_SET_REG_OFS(slave, emac_regs, mac_control);
3091                 GBENU_SET_REG_OFS(slave, emac_regs, soft_reset);
3092
3093         } else if (IS_SS_ID_XGBE(gbe_dev)) {
3094                 /* Initialize  slave port register offsets */
3095                 XGBE_SET_REG_OFS(slave, port_regs, port_vlan);
3096                 XGBE_SET_REG_OFS(slave, port_regs, tx_pri_map);
3097                 XGBE_SET_REG_OFS(slave, port_regs, sa_lo);
3098                 XGBE_SET_REG_OFS(slave, port_regs, sa_hi);
3099                 XGBE_SET_REG_OFS(slave, port_regs, ts_ctl);
3100                 XGBE_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
3101                 XGBE_SET_REG_OFS(slave, port_regs, ts_vlan);
3102                 XGBE_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
3103                 XGBE_SET_REG_OFS(slave, port_regs, ts_ctl2);
3104
3105                 /* Initialize EMAC register offsets */
3106                 XGBE_SET_REG_OFS(slave, emac_regs, mac_control);
3107                 XGBE_SET_REG_OFS(slave, emac_regs, soft_reset);
3108                 XGBE_SET_REG_OFS(slave, emac_regs, rx_maxlen);
3109         }
3110
3111         atomic_set(&slave->link_state, NETCP_LINK_STATE_INVALID);
3112
3113         init_slave_ts_ctl(slave);
3114         return 0;
3115 }
3116
3117 static void init_secondary_ports(struct gbe_priv *gbe_dev,
3118                                  struct device_node *node)
3119 {
3120         struct device *dev = gbe_dev->dev;
3121         phy_interface_t phy_mode;
3122         struct gbe_priv **priv;
3123         struct device_node *port;
3124         struct gbe_slave *slave;
3125         bool mac_phy_link = false;
3126
3127         for_each_child_of_node(node, port) {
3128                 slave = devm_kzalloc(dev, sizeof(*slave), GFP_KERNEL);
3129                 if (!slave) {
3130                         dev_err(dev, "memory alloc failed for secondary port(%pOFn), skipping...\n",
3131                                 port);
3132                         continue;
3133                 }
3134
3135                 if (init_slave(gbe_dev, slave, port)) {
3136                         dev_err(dev,
3137                                 "Failed to initialize secondary port(%pOFn), skipping...\n",
3138                                 port);
3139                         devm_kfree(dev, slave);
3140                         continue;
3141                 }
3142
3143                 if (!IS_SS_ID_2U(gbe_dev))
3144                         gbe_sgmii_config(gbe_dev, slave);
3145                 gbe_port_reset(slave);
3146                 gbe_port_config(gbe_dev, slave, gbe_dev->rx_packet_max);
3147                 list_add_tail(&slave->slave_list, &gbe_dev->secondary_slaves);
3148                 gbe_dev->num_slaves++;
3149                 if ((slave->link_interface == SGMII_LINK_MAC_PHY) ||
3150                     (slave->link_interface == XGMII_LINK_MAC_PHY))
3151                         mac_phy_link = true;
3152
3153                 slave->open = true;
3154                 if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves) {
3155                         of_node_put(port);
3156                         break;
3157                 }
3158         }
3159
3160         /* of_phy_connect() is needed only for MAC-PHY interface */
3161         if (!mac_phy_link)
3162                 return;
3163
3164         /* Allocate dummy netdev device for attaching to phy device */
3165         gbe_dev->dummy_ndev = alloc_netdev(sizeof(gbe_dev), "dummy",
3166                                         NET_NAME_UNKNOWN, ether_setup);
3167         if (!gbe_dev->dummy_ndev) {
3168                 dev_err(dev,
3169                         "Failed to allocate dummy netdev for secondary ports, skipping phy_connect()...\n");
3170                 return;
3171         }
3172         priv = netdev_priv(gbe_dev->dummy_ndev);
3173         *priv = gbe_dev;
3174
3175         if (slave->link_interface == SGMII_LINK_MAC_PHY) {
3176                 phy_mode = PHY_INTERFACE_MODE_SGMII;
3177                 slave->phy_port_t = PORT_MII;
3178         } else if (slave->link_interface == RGMII_LINK_MAC_PHY) {
3179                 phy_mode = PHY_INTERFACE_MODE_RGMII;
3180                 slave->phy_port_t = PORT_MII;
3181         } else {
3182                 phy_mode = PHY_INTERFACE_MODE_NA;
3183                 slave->phy_port_t = PORT_FIBRE;
3184         }
3185
3186         for_each_sec_slave(slave, gbe_dev) {
3187                 if ((slave->link_interface != SGMII_LINK_MAC_PHY) &&
3188                     (slave->link_interface != RGMII_LINK_MAC_PHY) &&
3189                     (slave->link_interface != XGMII_LINK_MAC_PHY))
3190                         continue;
3191                 slave->phy =
3192                         of_phy_connect(gbe_dev->dummy_ndev,
3193                                        slave->phy_node,
3194                                        gbe_adjust_link_sec_slaves,
3195                                        0, phy_mode);
3196                 if (!slave->phy) {
3197                         dev_err(dev, "phy not found for slave %d\n",
3198                                 slave->slave_num);
3199                 } else {
3200                         dev_dbg(dev, "phy found: id is: 0x%s\n",
3201                                 phydev_name(slave->phy));
3202                         phy_start(slave->phy);
3203                 }
3204         }
3205 }
3206
3207 static void free_secondary_ports(struct gbe_priv *gbe_dev)
3208 {
3209         struct gbe_slave *slave;
3210
3211         while (!list_empty(&gbe_dev->secondary_slaves)) {
3212                 slave = first_sec_slave(gbe_dev);
3213
3214                 if (slave->phy)
3215                         phy_disconnect(slave->phy);
3216                 list_del(&slave->slave_list);
3217         }
3218         if (gbe_dev->dummy_ndev)
3219                 free_netdev(gbe_dev->dummy_ndev);
3220 }
3221
3222 static int set_xgbe_ethss10_priv(struct gbe_priv *gbe_dev,
3223                                  struct device_node *node)
3224 {
3225         struct resource res;
3226         void __iomem *regs;
3227         int ret, i;
3228
3229         ret = of_address_to_resource(node, XGBE_SS_REG_INDEX, &res);
3230         if (ret) {
3231                 dev_err(gbe_dev->dev,
3232                         "Can't xlate xgbe of node(%pOFn) ss address at %d\n",
3233                         node, XGBE_SS_REG_INDEX);
3234                 return ret;
3235         }
3236
3237         regs = devm_ioremap_resource(gbe_dev->dev, &res);
3238         if (IS_ERR(regs)) {
3239                 dev_err(gbe_dev->dev, "Failed to map xgbe ss register base\n");
3240                 return PTR_ERR(regs);
3241         }
3242         gbe_dev->ss_regs = regs;
3243
3244         ret = of_address_to_resource(node, XGBE_SM_REG_INDEX, &res);
3245         if (ret) {
3246                 dev_err(gbe_dev->dev,
3247                         "Can't xlate xgbe of node(%pOFn) sm address at %d\n",
3248                         node, XGBE_SM_REG_INDEX);
3249                 return ret;
3250         }
3251
3252         regs = devm_ioremap_resource(gbe_dev->dev, &res);
3253         if (IS_ERR(regs)) {
3254                 dev_err(gbe_dev->dev, "Failed to map xgbe sm register base\n");
3255                 return PTR_ERR(regs);
3256         }
3257         gbe_dev->switch_regs = regs;
3258
3259         ret = of_address_to_resource(node, XGBE_SERDES_REG_INDEX, &res);
3260         if (ret) {
3261                 dev_err(gbe_dev->dev,
3262                         "Can't xlate xgbe serdes of node(%pOFn) address at %d\n",
3263                         node, XGBE_SERDES_REG_INDEX);
3264                 return ret;
3265         }
3266
3267         regs = devm_ioremap_resource(gbe_dev->dev, &res);
3268         if (IS_ERR(regs)) {
3269                 dev_err(gbe_dev->dev, "Failed to map xgbe serdes register base\n");
3270                 return PTR_ERR(regs);
3271         }
3272         gbe_dev->xgbe_serdes_regs = regs;
3273
3274         gbe_dev->num_stats_mods = gbe_dev->max_num_ports;
3275         gbe_dev->et_stats = xgbe10_et_stats;
3276         gbe_dev->num_et_stats = ARRAY_SIZE(xgbe10_et_stats);
3277
3278         gbe_dev->hw_stats = devm_kcalloc(gbe_dev->dev,
3279                                          gbe_dev->num_et_stats, sizeof(u64),
3280                                          GFP_KERNEL);
3281         if (!gbe_dev->hw_stats) {
3282                 dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
3283                 return -ENOMEM;
3284         }
3285
3286         gbe_dev->hw_stats_prev =
3287                 devm_kcalloc(gbe_dev->dev,
3288                              gbe_dev->num_et_stats, sizeof(u32),
3289                              GFP_KERNEL);
3290         if (!gbe_dev->hw_stats_prev) {
3291                 dev_err(gbe_dev->dev,
3292                         "hw_stats_prev memory allocation failed\n");
3293                 return -ENOMEM;
3294         }
3295
3296         gbe_dev->ss_version = XGBE_SS_VERSION_10;
3297         gbe_dev->sgmii_port_regs = gbe_dev->ss_regs +
3298                                         XGBE10_SGMII_MODULE_OFFSET;
3299         gbe_dev->host_port_regs = gbe_dev->ss_regs + XGBE10_HOST_PORT_OFFSET;
3300
3301         for (i = 0; i < gbe_dev->max_num_ports; i++)
3302                 gbe_dev->hw_stats_regs[i] = gbe_dev->switch_regs +
3303                         XGBE10_HW_STATS_OFFSET + (GBE_HW_STATS_REG_MAP_SZ * i);
3304
3305         gbe_dev->ale_reg = gbe_dev->switch_regs + XGBE10_ALE_OFFSET;
3306         gbe_dev->cpts_reg = gbe_dev->switch_regs + XGBE10_CPTS_OFFSET;
3307         gbe_dev->ale_ports = gbe_dev->max_num_ports;
3308         gbe_dev->host_port = XGBE10_HOST_PORT_NUM;
3309         gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1;
3310
3311         /* Subsystem registers */
3312         XGBE_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
3313         XGBE_SET_REG_OFS(gbe_dev, ss_regs, control);
3314
3315         /* Switch module registers */
3316         XGBE_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
3317         XGBE_SET_REG_OFS(gbe_dev, switch_regs, control);
3318         XGBE_SET_REG_OFS(gbe_dev, switch_regs, ptype);
3319         XGBE_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
3320         XGBE_SET_REG_OFS(gbe_dev, switch_regs, flow_control);
3321
3322         /* Host port registers */
3323         XGBE_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
3324         XGBE_SET_REG_OFS(gbe_dev, host_port_regs, tx_pri_map);
3325         XGBE_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
3326         return 0;
3327 }
3328
3329 static int get_gbe_resource_version(struct gbe_priv *gbe_dev,
3330                                     struct device_node *node)
3331 {
3332         struct resource res;
3333         void __iomem *regs;
3334         int ret;
3335
3336         ret = of_address_to_resource(node, GBE_SS_REG_INDEX, &res);
3337         if (ret) {
3338                 dev_err(gbe_dev->dev,
3339                         "Can't translate of node(%pOFn) of gbe ss address at %d\n",
3340                         node, GBE_SS_REG_INDEX);
3341                 return ret;
3342         }
3343
3344         regs = devm_ioremap_resource(gbe_dev->dev, &res);
3345         if (IS_ERR(regs)) {
3346                 dev_err(gbe_dev->dev, "Failed to map gbe register base\n");
3347                 return PTR_ERR(regs);
3348         }
3349         gbe_dev->ss_regs = regs;
3350         gbe_dev->ss_version = readl(gbe_dev->ss_regs);
3351         return 0;
3352 }
3353
3354 static int set_gbe_ethss14_priv(struct gbe_priv *gbe_dev,
3355                                 struct device_node *node)
3356 {
3357         struct resource res;
3358         void __iomem *regs;
3359         int i, ret;
3360
3361         ret = of_address_to_resource(node, GBE_SGMII34_REG_INDEX, &res);
3362         if (ret) {
3363                 dev_err(gbe_dev->dev,
3364                         "Can't translate of gbe node(%pOFn) address at index %d\n",
3365                         node, GBE_SGMII34_REG_INDEX);
3366                 return ret;
3367         }
3368
3369         regs = devm_ioremap_resource(gbe_dev->dev, &res);
3370         if (IS_ERR(regs)) {
3371                 dev_err(gbe_dev->dev,
3372                         "Failed to map gbe sgmii port34 register base\n");
3373                 return PTR_ERR(regs);
3374         }
3375         gbe_dev->sgmii_port34_regs = regs;
3376
3377         ret = of_address_to_resource(node, GBE_SM_REG_INDEX, &res);
3378         if (ret) {
3379                 dev_err(gbe_dev->dev,
3380                         "Can't translate of gbe node(%pOFn) address at index %d\n",
3381                         node, GBE_SM_REG_INDEX);
3382                 return ret;
3383         }
3384
3385         regs = devm_ioremap_resource(gbe_dev->dev, &res);
3386         if (IS_ERR(regs)) {
3387                 dev_err(gbe_dev->dev,
3388                         "Failed to map gbe switch module register base\n");
3389                 return PTR_ERR(regs);
3390         }
3391         gbe_dev->switch_regs = regs;
3392
3393         gbe_dev->num_stats_mods = gbe_dev->max_num_slaves;
3394         gbe_dev->et_stats = gbe13_et_stats;
3395         gbe_dev->num_et_stats = ARRAY_SIZE(gbe13_et_stats);
3396
3397         gbe_dev->hw_stats = devm_kcalloc(gbe_dev->dev,
3398                                          gbe_dev->num_et_stats, sizeof(u64),
3399                                          GFP_KERNEL);
3400         if (!gbe_dev->hw_stats) {
3401                 dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
3402                 return -ENOMEM;
3403         }
3404
3405         gbe_dev->hw_stats_prev =
3406                 devm_kcalloc(gbe_dev->dev,
3407                              gbe_dev->num_et_stats, sizeof(u32),
3408                              GFP_KERNEL);
3409         if (!gbe_dev->hw_stats_prev) {
3410                 dev_err(gbe_dev->dev,
3411                         "hw_stats_prev memory allocation failed\n");
3412                 return -ENOMEM;
3413         }
3414
3415         gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBE13_SGMII_MODULE_OFFSET;
3416         gbe_dev->host_port_regs = gbe_dev->switch_regs + GBE13_HOST_PORT_OFFSET;
3417
3418         /* K2HK has only 2 hw stats modules visible at a time, so
3419          * module 0 & 2 points to one base and
3420          * module 1 & 3 points to the other base
3421          */
3422         for (i = 0; i < gbe_dev->max_num_slaves; i++) {
3423                 gbe_dev->hw_stats_regs[i] =
3424                         gbe_dev->switch_regs + GBE13_HW_STATS_OFFSET +
3425                         (GBE_HW_STATS_REG_MAP_SZ * (i & 0x1));
3426         }
3427
3428         gbe_dev->cpts_reg = gbe_dev->switch_regs + GBE13_CPTS_OFFSET;
3429         gbe_dev->ale_reg = gbe_dev->switch_regs + GBE13_ALE_OFFSET;
3430         gbe_dev->ale_ports = gbe_dev->max_num_ports;
3431         gbe_dev->host_port = GBE13_HOST_PORT_NUM;
3432         gbe_dev->stats_en_mask = GBE13_REG_VAL_STAT_ENABLE_ALL;
3433
3434         /* Subsystem registers */
3435         GBE_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
3436
3437         /* Switch module registers */
3438         GBE_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
3439         GBE_SET_REG_OFS(gbe_dev, switch_regs, control);
3440         GBE_SET_REG_OFS(gbe_dev, switch_regs, soft_reset);
3441         GBE_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
3442         GBE_SET_REG_OFS(gbe_dev, switch_regs, ptype);
3443         GBE_SET_REG_OFS(gbe_dev, switch_regs, flow_control);
3444
3445         /* Host port registers */
3446         GBE_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
3447         GBE_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
3448         return 0;
3449 }
3450
3451 static int set_gbenu_ethss_priv(struct gbe_priv *gbe_dev,
3452                                 struct device_node *node)
3453 {
3454         struct resource res;
3455         void __iomem *regs;
3456         int i, ret;
3457
3458         gbe_dev->num_stats_mods = gbe_dev->max_num_ports;
3459         gbe_dev->et_stats = gbenu_et_stats;
3460
3461         if (IS_SS_ID_MU(gbe_dev))
3462                 gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
3463                         (gbe_dev->max_num_slaves * GBENU_ET_STATS_PORT_SIZE);
3464         else
3465                 gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
3466                                         GBENU_ET_STATS_PORT_SIZE;
3467
3468         gbe_dev->hw_stats = devm_kcalloc(gbe_dev->dev,
3469                                          gbe_dev->num_et_stats, sizeof(u64),
3470                                          GFP_KERNEL);
3471         if (!gbe_dev->hw_stats) {
3472                 dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
3473                 return -ENOMEM;
3474         }
3475
3476         gbe_dev->hw_stats_prev =
3477                 devm_kcalloc(gbe_dev->dev,
3478                              gbe_dev->num_et_stats, sizeof(u32),
3479                              GFP_KERNEL);
3480         if (!gbe_dev->hw_stats_prev) {
3481                 dev_err(gbe_dev->dev,
3482                         "hw_stats_prev memory allocation failed\n");
3483                 return -ENOMEM;
3484         }
3485
3486         ret = of_address_to_resource(node, GBENU_SM_REG_INDEX, &res);
3487         if (ret) {
3488                 dev_err(gbe_dev->dev,
3489                         "Can't translate of gbenu node(%pOFn) addr at index %d\n",
3490                         node, GBENU_SM_REG_INDEX);
3491                 return ret;
3492         }
3493
3494         regs = devm_ioremap_resource(gbe_dev->dev, &res);
3495         if (IS_ERR(regs)) {
3496                 dev_err(gbe_dev->dev,
3497                         "Failed to map gbenu switch module register base\n");
3498                 return PTR_ERR(regs);
3499         }
3500         gbe_dev->switch_regs = regs;
3501
3502         if (!IS_SS_ID_2U(gbe_dev))
3503                 gbe_dev->sgmii_port_regs =
3504                        gbe_dev->ss_regs + GBENU_SGMII_MODULE_OFFSET;
3505
3506         /* Although sgmii modules are mem mapped to one contiguous
3507          * region on GBENU devices, setting sgmii_port34_regs allows
3508          * consistent code when accessing sgmii api
3509          */
3510         gbe_dev->sgmii_port34_regs = gbe_dev->sgmii_port_regs +
3511                                      (2 * GBENU_SGMII_MODULE_SIZE);
3512
3513         gbe_dev->host_port_regs = gbe_dev->switch_regs + GBENU_HOST_PORT_OFFSET;
3514
3515         for (i = 0; i < (gbe_dev->max_num_ports); i++)
3516                 gbe_dev->hw_stats_regs[i] = gbe_dev->switch_regs +
3517                         GBENU_HW_STATS_OFFSET + (GBENU_HW_STATS_REG_MAP_SZ * i);
3518
3519         gbe_dev->cpts_reg = gbe_dev->switch_regs + GBENU_CPTS_OFFSET;
3520         gbe_dev->ale_reg = gbe_dev->switch_regs + GBENU_ALE_OFFSET;
3521         gbe_dev->ale_ports = gbe_dev->max_num_ports;
3522         gbe_dev->host_port = GBENU_HOST_PORT_NUM;
3523         gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1;
3524
3525         /* Subsystem registers */
3526         GBENU_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
3527         /* ok to set for MU, but used by 2U only */
3528         GBENU_SET_REG_OFS(gbe_dev, ss_regs, rgmii_status);
3529
3530         /* Switch module registers */
3531         GBENU_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
3532         GBENU_SET_REG_OFS(gbe_dev, switch_regs, control);
3533         GBENU_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
3534         GBENU_SET_REG_OFS(gbe_dev, switch_regs, ptype);
3535
3536         /* Host port registers */
3537         GBENU_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
3538         GBENU_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
3539
3540         /* For NU only.  2U does not need tx_pri_map.
3541          * NU cppi port 0 tx pkt streaming interface has (n-1)*8 egress threads
3542          * while 2U has only 1 such thread
3543          */
3544         GBENU_SET_REG_OFS(gbe_dev, host_port_regs, tx_pri_map);
3545         return 0;
3546 }
3547
3548 static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
3549                      struct device_node *node, void **inst_priv)
3550 {
3551         struct device_node *interfaces, *interface, *cpts_node;
3552         struct device_node *secondary_ports;
3553         struct cpsw_ale_params ale_params;
3554         struct gbe_priv *gbe_dev;
3555         u32 slave_num;
3556         int i, ret = 0;
3557
3558         if (!node) {
3559                 dev_err(dev, "device tree info unavailable\n");
3560                 return -ENODEV;
3561         }
3562
3563         gbe_dev = devm_kzalloc(dev, sizeof(struct gbe_priv), GFP_KERNEL);
3564         if (!gbe_dev)
3565                 return -ENOMEM;
3566
3567         if (of_device_is_compatible(node, "ti,netcp-gbe-5") ||
3568             of_device_is_compatible(node, "ti,netcp-gbe")) {
3569                 gbe_dev->max_num_slaves = 4;
3570         } else if (of_device_is_compatible(node, "ti,netcp-gbe-9")) {
3571                 gbe_dev->max_num_slaves = 8;
3572         } else if (of_device_is_compatible(node, "ti,netcp-gbe-2")) {
3573                 gbe_dev->max_num_slaves = 1;
3574                 gbe_module.set_rx_mode = gbe_set_rx_mode;
3575         } else if (of_device_is_compatible(node, "ti,netcp-xgbe")) {
3576                 gbe_dev->max_num_slaves = 2;
3577         } else {
3578                 dev_err(dev, "device tree node for unknown device\n");
3579                 return -EINVAL;
3580         }
3581         gbe_dev->max_num_ports = gbe_dev->max_num_slaves + 1;
3582
3583         gbe_dev->dev = dev;
3584         gbe_dev->netcp_device = netcp_device;
3585         gbe_dev->rx_packet_max = NETCP_MAX_FRAME_SIZE;
3586
3587         /* init the hw stats lock */
3588         spin_lock_init(&gbe_dev->hw_stats_lock);
3589
3590         if (of_find_property(node, "enable-ale", NULL)) {
3591                 gbe_dev->enable_ale = true;
3592                 dev_info(dev, "ALE enabled\n");
3593         } else {
3594                 gbe_dev->enable_ale = false;
3595                 dev_dbg(dev, "ALE bypass enabled*\n");
3596         }
3597
3598         ret = of_property_read_u32(node, "tx-queue",
3599                                    &gbe_dev->tx_queue_id);
3600         if (ret < 0) {
3601                 dev_err(dev, "missing tx_queue parameter\n");
3602                 gbe_dev->tx_queue_id = GBE_TX_QUEUE;
3603         }
3604
3605         ret = of_property_read_string(node, "tx-channel",
3606                                       &gbe_dev->dma_chan_name);
3607         if (ret < 0) {
3608                 dev_err(dev, "missing \"tx-channel\" parameter\n");
3609                 return -EINVAL;
3610         }
3611
3612         if (of_node_name_eq(node, "gbe")) {
3613                 ret = get_gbe_resource_version(gbe_dev, node);
3614                 if (ret)
3615                         return ret;
3616
3617                 dev_dbg(dev, "ss_version: 0x%08x\n", gbe_dev->ss_version);
3618
3619                 if (IS_SS_ID_VER_14(gbe_dev))
3620                         ret = set_gbe_ethss14_priv(gbe_dev, node);
3621                 else if (IS_SS_ID_MU(gbe_dev))
3622                         ret = set_gbenu_ethss_priv(gbe_dev, node);
3623                 else
3624                         ret = -ENODEV;
3625
3626         } else if (of_node_name_eq(node, "xgbe")) {
3627                 ret = set_xgbe_ethss10_priv(gbe_dev, node);
3628                 if (ret)
3629                         return ret;
3630                 ret = netcp_xgbe_serdes_init(gbe_dev->xgbe_serdes_regs,
3631                                              gbe_dev->ss_regs);
3632         } else {
3633                 dev_err(dev, "unknown GBE node(%pOFn)\n", node);
3634                 ret = -ENODEV;
3635         }
3636
3637         if (ret)
3638                 return ret;
3639
3640         interfaces = of_get_child_by_name(node, "interfaces");
3641         if (!interfaces)
3642                 dev_err(dev, "could not find interfaces\n");
3643
3644         ret = netcp_txpipe_init(&gbe_dev->tx_pipe, netcp_device,
3645                                 gbe_dev->dma_chan_name, gbe_dev->tx_queue_id);
3646         if (ret) {
3647                 of_node_put(interfaces);
3648                 return ret;
3649         }
3650
3651         ret = netcp_txpipe_open(&gbe_dev->tx_pipe);
3652         if (ret) {
3653                 of_node_put(interfaces);
3654                 return ret;
3655         }
3656
3657         /* Create network interfaces */
3658         INIT_LIST_HEAD(&gbe_dev->gbe_intf_head);
3659         for_each_child_of_node(interfaces, interface) {
3660                 ret = of_property_read_u32(interface, "slave-port", &slave_num);
3661                 if (ret) {
3662                         dev_err(dev, "missing slave-port parameter, skipping interface configuration for %pOFn\n",
3663                                 interface);
3664                         continue;
3665                 }
3666                 gbe_dev->num_slaves++;
3667                 if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves) {
3668                         of_node_put(interface);
3669                         break;
3670                 }
3671         }
3672         of_node_put(interfaces);
3673
3674         if (!gbe_dev->num_slaves)
3675                 dev_warn(dev, "No network interface configured\n");
3676
3677         /* Initialize Secondary slave ports */
3678         secondary_ports = of_get_child_by_name(node, "secondary-slave-ports");
3679         INIT_LIST_HEAD(&gbe_dev->secondary_slaves);
3680         if (secondary_ports && (gbe_dev->num_slaves <  gbe_dev->max_num_slaves))
3681                 init_secondary_ports(gbe_dev, secondary_ports);
3682         of_node_put(secondary_ports);
3683
3684         if (!gbe_dev->num_slaves) {
3685                 dev_err(dev,
3686                         "No network interface or secondary ports configured\n");
3687                 ret = -ENODEV;
3688                 goto free_sec_ports;
3689         }
3690
3691         memset(&ale_params, 0, sizeof(ale_params));
3692         ale_params.dev          = gbe_dev->dev;
3693         ale_params.ale_regs     = gbe_dev->ale_reg;
3694         ale_params.ale_ageout   = GBE_DEFAULT_ALE_AGEOUT;
3695         ale_params.ale_ports    = gbe_dev->ale_ports;
3696         ale_params.dev_id       = "cpsw";
3697         if (IS_SS_ID_NU(gbe_dev))
3698                 ale_params.dev_id = "66ak2el";
3699         else if (IS_SS_ID_2U(gbe_dev))
3700                 ale_params.dev_id = "66ak2g";
3701         else if (IS_SS_ID_XGBE(gbe_dev))
3702                 ale_params.dev_id = "66ak2h-xgbe";
3703
3704         gbe_dev->ale = cpsw_ale_create(&ale_params);
3705         if (IS_ERR(gbe_dev->ale)) {
3706                 dev_err(gbe_dev->dev, "error initializing ale engine\n");
3707                 ret = PTR_ERR(gbe_dev->ale);
3708                 goto free_sec_ports;
3709         } else {
3710                 dev_dbg(gbe_dev->dev, "Created a gbe ale engine\n");
3711         }
3712
3713         cpts_node = of_get_child_by_name(node, "cpts");
3714         if (!cpts_node)
3715                 cpts_node = of_node_get(node);
3716
3717         gbe_dev->cpts = cpts_create(gbe_dev->dev, gbe_dev->cpts_reg,
3718                                     cpts_node, 0);
3719         of_node_put(cpts_node);
3720         if (IS_ENABLED(CONFIG_TI_CPTS) && IS_ERR(gbe_dev->cpts)) {
3721                 ret = PTR_ERR(gbe_dev->cpts);
3722                 goto free_sec_ports;
3723         }
3724
3725         /* initialize host port */
3726         gbe_init_host_port(gbe_dev);
3727
3728         spin_lock_bh(&gbe_dev->hw_stats_lock);
3729         for (i = 0; i < gbe_dev->num_stats_mods; i++) {
3730                 if (IS_SS_ID_VER_14(gbe_dev))
3731                         gbe_reset_mod_stats_ver14(gbe_dev, i);
3732                 else
3733                         gbe_reset_mod_stats(gbe_dev, i);
3734         }
3735         spin_unlock_bh(&gbe_dev->hw_stats_lock);
3736
3737         timer_setup(&gbe_dev->timer, netcp_ethss_timer, 0);
3738         gbe_dev->timer.expires   = jiffies + GBE_TIMER_INTERVAL;
3739         add_timer(&gbe_dev->timer);
3740         *inst_priv = gbe_dev;
3741         return 0;
3742
3743 free_sec_ports:
3744         free_secondary_ports(gbe_dev);
3745         return ret;
3746 }
3747
3748 static int gbe_attach(void *inst_priv, struct net_device *ndev,
3749                       struct device_node *node, void **intf_priv)
3750 {
3751         struct gbe_priv *gbe_dev = inst_priv;
3752         struct gbe_intf *gbe_intf;
3753         int ret;
3754
3755         if (!node) {
3756                 dev_err(gbe_dev->dev, "interface node not available\n");
3757                 return -ENODEV;
3758         }
3759
3760         gbe_intf = devm_kzalloc(gbe_dev->dev, sizeof(*gbe_intf), GFP_KERNEL);
3761         if (!gbe_intf)
3762                 return -ENOMEM;
3763
3764         gbe_intf->ndev = ndev;
3765         gbe_intf->dev = gbe_dev->dev;
3766         gbe_intf->gbe_dev = gbe_dev;
3767
3768         gbe_intf->slave = devm_kzalloc(gbe_dev->dev,
3769                                         sizeof(*gbe_intf->slave),
3770                                         GFP_KERNEL);
3771         if (!gbe_intf->slave) {
3772                 ret = -ENOMEM;
3773                 goto fail;
3774         }
3775
3776         if (init_slave(gbe_dev, gbe_intf->slave, node)) {
3777                 ret = -ENODEV;
3778                 goto fail;
3779         }
3780
3781         gbe_intf->tx_pipe = gbe_dev->tx_pipe;
3782         ndev->ethtool_ops = &keystone_ethtool_ops;
3783         list_add_tail(&gbe_intf->gbe_intf_list, &gbe_dev->gbe_intf_head);
3784         *intf_priv = gbe_intf;
3785         return 0;
3786
3787 fail:
3788         if (gbe_intf->slave)
3789                 devm_kfree(gbe_dev->dev, gbe_intf->slave);
3790         if (gbe_intf)
3791                 devm_kfree(gbe_dev->dev, gbe_intf);
3792         return ret;
3793 }
3794
3795 static int gbe_release(void *intf_priv)
3796 {
3797         struct gbe_intf *gbe_intf = intf_priv;
3798
3799         gbe_intf->ndev->ethtool_ops = NULL;
3800         list_del(&gbe_intf->gbe_intf_list);
3801         devm_kfree(gbe_intf->dev, gbe_intf->slave);
3802         devm_kfree(gbe_intf->dev, gbe_intf);
3803         return 0;
3804 }
3805
3806 static int gbe_remove(struct netcp_device *netcp_device, void *inst_priv)
3807 {
3808         struct gbe_priv *gbe_dev = inst_priv;
3809
3810         del_timer_sync(&gbe_dev->timer);
3811         cpts_release(gbe_dev->cpts);
3812         cpsw_ale_stop(gbe_dev->ale);
3813         netcp_txpipe_close(&gbe_dev->tx_pipe);
3814         free_secondary_ports(gbe_dev);
3815
3816         if (!list_empty(&gbe_dev->gbe_intf_head))
3817                 dev_alert(gbe_dev->dev,
3818                           "unreleased ethss interfaces present\n");
3819
3820         return 0;
3821 }
3822
3823 static struct netcp_module gbe_module = {
3824         .name           = GBE_MODULE_NAME,
3825         .owner          = THIS_MODULE,
3826         .primary        = true,
3827         .probe          = gbe_probe,
3828         .open           = gbe_open,
3829         .close          = gbe_close,
3830         .remove         = gbe_remove,
3831         .attach         = gbe_attach,
3832         .release        = gbe_release,
3833         .add_addr       = gbe_add_addr,
3834         .del_addr       = gbe_del_addr,
3835         .add_vid        = gbe_add_vid,
3836         .del_vid        = gbe_del_vid,
3837         .ioctl          = gbe_ioctl,
3838 };
3839
3840 static struct netcp_module xgbe_module = {
3841         .name           = XGBE_MODULE_NAME,
3842         .owner          = THIS_MODULE,
3843         .primary        = true,
3844         .probe          = gbe_probe,
3845         .open           = gbe_open,
3846         .close          = gbe_close,
3847         .remove         = gbe_remove,
3848         .attach         = gbe_attach,
3849         .release        = gbe_release,
3850         .add_addr       = gbe_add_addr,
3851         .del_addr       = gbe_del_addr,
3852         .add_vid        = gbe_add_vid,
3853         .del_vid        = gbe_del_vid,
3854         .ioctl          = gbe_ioctl,
3855 };
3856
3857 static int __init keystone_gbe_init(void)
3858 {
3859         int ret;
3860
3861         ret = netcp_register_module(&gbe_module);
3862         if (ret)
3863                 return ret;
3864
3865         ret = netcp_register_module(&xgbe_module);
3866         if (ret)
3867                 return ret;
3868
3869         return 0;
3870 }
3871 module_init(keystone_gbe_init);
3872
3873 static void __exit keystone_gbe_exit(void)
3874 {
3875         netcp_unregister_module(&gbe_module);
3876         netcp_unregister_module(&xgbe_module);
3877 }
3878 module_exit(keystone_gbe_exit);
3879
3880 MODULE_LICENSE("GPL v2");
3881 MODULE_DESCRIPTION("TI NETCP ETHSS driver for Keystone SOCs");
3882 MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com");